diff --git a/api/go.mod b/api/go.mod index c9c73feb6c..f190b495a2 100644 --- a/api/go.mod +++ b/api/go.mod @@ -3,7 +3,7 @@ module github.com/openshift/hypershift/api go 1.23.0 require ( - github.com/openshift/api v0.0.0-20250124212313-a770960d61e0 + github.com/openshift/api v0.0.0-20250213010142-f5b09d13c01f k8s.io/api v0.32.2 k8s.io/apimachinery v0.32.2 ) @@ -25,7 +25,7 @@ require ( golang.org/x/text v0.21.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect + k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/api/go.sum b/api/go.sum index 09358e12b2..75cae5d11e 100644 --- a/api/go.sum +++ b/api/go.sum @@ -28,8 +28,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/openshift/api v0.0.0-20250124212313-a770960d61e0 h1:dCvNfygMrPLVNQ06bpHXrxKfrXHiprO4+etHrRUqI8g= -github.com/openshift/api v0.0.0-20250124212313-a770960d61e0/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw= +github.com/openshift/api v0.0.0-20250213010142-f5b09d13c01f h1:HH+BqfxONWB2kknhCLAjiAievsyb56WvLGXdNOseI8g= +github.com/openshift/api v0.0.0-20250213010142-f5b09d13c01f/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -87,8 +87,7 @@ k8s.io/apimachinery v0.32.2 h1:yoQBR9ZGkA6Rgmhbp/yuT9/g+4lxtsGYwW6dR6BDPLQ= k8s.io/apimachinery v0.32.2/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= diff --git a/api/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/api/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go index 4a6823640d..a447adb9f4 100644 --- a/api/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go +++ b/api/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go @@ -53,6 +53,8 @@ type ClusterOperatorStatus struct { // conditions describes the state of the operator's managed and monitored components. // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type // +optional Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` diff --git a/api/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/api/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index 0293603d78..21fe0e13e9 100644 --- a/api/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/api/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -528,18 +528,22 @@ type AWSPlatformStatus struct { // AWSResourceTag is a tag to apply to AWS resources created for the cluster. type AWSResourceTag struct { - // key is the key of the tag + // key sets the key of the AWS resource tag key-value pair. Key is required when defining an AWS resource tag. + // Key should consist of between 1 and 128 characters, and may + // contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'. // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=128 - // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + // +kubebuilder:validation:XValidation:rule=`self.matches('^[0-9A-Za-z_.:/=+-@ ]+$')`,message="invalid AWS resource tag key. The string can contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', '@'" // +required Key string `json:"key"` - // value is the value of the tag. + // value sets the value of the AWS resource tag key-value pair. Value is required when defining an AWS resource tag. + // Value should consist of between 1 and 256 characters, and may + // contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'. // Some AWS service do not support empty values. Since tags are added to resources in many services, the // length of the tag value must meet the requirements of all services. // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 - // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + // +kubebuilder:validation:XValidation:rule=`self.matches('^[0-9A-Za-z_.:/=+-@ ]+$')`,message="invalid AWS resource tag value. The string can contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', '@'" // +required Value string `json:"value"` } diff --git a/api/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/api/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 0ac9c7ccd2..cbe5cdd80f 100644 --- a/api/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/api/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -1198,8 +1198,8 @@ func (AWSPlatformStatus) SwaggerDoc() map[string]string { var map_AWSResourceTag = map[string]string{ "": "AWSResourceTag is a tag to apply to AWS resources created for the cluster.", - "key": "key is the key of the tag", - "value": "value is the value of the tag. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services.", + "key": "key sets the key of the AWS resource tag key-value pair. Key is required when defining an AWS resource tag. Key should consist of between 1 and 128 characters, and may contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'.", + "value": "value sets the value of the AWS resource tag key-value pair. Value is required when defining an AWS resource tag. Value should consist of between 1 and 256 characters, and may contain only the set of alphanumeric characters, space (' '), '_', '.', '/', '=', '+', '-', ':', and '@'. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services.", } func (AWSResourceTag) SwaggerDoc() map[string]string { diff --git a/api/vendor/modules.txt b/api/vendor/modules.txt index d343d2dce8..0abd0aa28b 100644 --- a/api/vendor/modules.txt +++ b/api/vendor/modules.txt @@ -23,7 +23,7 @@ github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.2 ## explicit; go 1.12 github.com/modern-go/reflect2 -# github.com/openshift/api v0.0.0-20250124212313-a770960d61e0 +# github.com/openshift/api v0.0.0-20250213010142-f5b09d13c01f ## explicit; go 1.23.0 github.com/openshift/api/config/v1 # github.com/rogpeppe/go-internal v1.13.1 diff --git a/client/applyconfiguration/certificates/v1alpha1/certificaterevocationrequest.go b/client/applyconfiguration/certificates/v1alpha1/certificaterevocationrequest.go index 5f1639b073..2444ac3dbf 100644 --- a/client/applyconfiguration/certificates/v1alpha1/certificaterevocationrequest.go +++ b/client/applyconfiguration/certificates/v1alpha1/certificaterevocationrequest.go @@ -18,6 +18,7 @@ limitations under the License. package v1alpha1 import ( + v1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/certificates/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" v1 "k8s.io/client-go/applyconfigurations/meta/v1" diff --git a/client/applyconfiguration/hypershift/v1beta1/autonode.go b/client/applyconfiguration/hypershift/v1beta1/autonode.go index 374b390290..7b5cf06808 100644 --- a/client/applyconfiguration/hypershift/v1beta1/autonode.go +++ b/client/applyconfiguration/hypershift/v1beta1/autonode.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // AutoNodeApplyConfiguration represents an declarative configuration of the AutoNode type for use // with apply. type AutoNodeApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/awscloudproviderconfig.go b/client/applyconfiguration/hypershift/v1beta1/awscloudproviderconfig.go index 4672ab1c22..bf35cabf73 100644 --- a/client/applyconfiguration/hypershift/v1beta1/awscloudproviderconfig.go +++ b/client/applyconfiguration/hypershift/v1beta1/awscloudproviderconfig.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // AWSCloudProviderConfigApplyConfiguration represents an declarative configuration of the AWSCloudProviderConfig type for use // with apply. type AWSCloudProviderConfigApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/awskmsspec.go b/client/applyconfiguration/hypershift/v1beta1/awskmsspec.go index f728155d44..a8f49f1078 100644 --- a/client/applyconfiguration/hypershift/v1beta1/awskmsspec.go +++ b/client/applyconfiguration/hypershift/v1beta1/awskmsspec.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // AWSKMSSpecApplyConfiguration represents an declarative configuration of the AWSKMSSpec type for use // with apply. type AWSKMSSpecApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/awsnodepoolplatform.go b/client/applyconfiguration/hypershift/v1beta1/awsnodepoolplatform.go index d9caedfabf..8426f503af 100644 --- a/client/applyconfiguration/hypershift/v1beta1/awsnodepoolplatform.go +++ b/client/applyconfiguration/hypershift/v1beta1/awsnodepoolplatform.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // AWSNodePoolPlatformApplyConfiguration represents an declarative configuration of the AWSNodePoolPlatform type for use // with apply. type AWSNodePoolPlatformApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/awsplatformspec.go b/client/applyconfiguration/hypershift/v1beta1/awsplatformspec.go index cc11a94976..37def802db 100644 --- a/client/applyconfiguration/hypershift/v1beta1/awsplatformspec.go +++ b/client/applyconfiguration/hypershift/v1beta1/awsplatformspec.go @@ -19,6 +19,7 @@ package v1beta1 import ( hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // AWSPlatformSpecApplyConfiguration represents an declarative configuration of the AWSPlatformSpec type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/awsresourcereference.go b/client/applyconfiguration/hypershift/v1beta1/awsresourcereference.go index f67e8456d2..f07cbf7627 100644 --- a/client/applyconfiguration/hypershift/v1beta1/awsresourcereference.go +++ b/client/applyconfiguration/hypershift/v1beta1/awsresourcereference.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // AWSResourceReferenceApplyConfiguration represents an declarative configuration of the AWSResourceReference type for use // with apply. type AWSResourceReferenceApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/awssharedvpc.go b/client/applyconfiguration/hypershift/v1beta1/awssharedvpc.go index 70a0a57507..82a5b74595 100644 --- a/client/applyconfiguration/hypershift/v1beta1/awssharedvpc.go +++ b/client/applyconfiguration/hypershift/v1beta1/awssharedvpc.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // AWSSharedVPCApplyConfiguration represents an declarative configuration of the AWSSharedVPC type for use // with apply. type AWSSharedVPCApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/azurekmsspec.go b/client/applyconfiguration/hypershift/v1beta1/azurekmsspec.go index 9a0f54940c..e8e7a2e098 100644 --- a/client/applyconfiguration/hypershift/v1beta1/azurekmsspec.go +++ b/client/applyconfiguration/hypershift/v1beta1/azurekmsspec.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // AzureKMSSpecApplyConfiguration represents an declarative configuration of the AzureKMSSpec type for use // with apply. type AzureKMSSpecApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/azurenodepoolplatform.go b/client/applyconfiguration/hypershift/v1beta1/azurenodepoolplatform.go index 602e5897c5..f989c3b447 100644 --- a/client/applyconfiguration/hypershift/v1beta1/azurenodepoolplatform.go +++ b/client/applyconfiguration/hypershift/v1beta1/azurenodepoolplatform.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // AzureNodePoolPlatformApplyConfiguration represents an declarative configuration of the AzureNodePoolPlatform type for use // with apply. type AzureNodePoolPlatformApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/azureplatformspec.go b/client/applyconfiguration/hypershift/v1beta1/azureplatformspec.go index f1b63cec7c..a7e73e97e2 100644 --- a/client/applyconfiguration/hypershift/v1beta1/azureplatformspec.go +++ b/client/applyconfiguration/hypershift/v1beta1/azureplatformspec.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // AzurePlatformSpecApplyConfiguration represents an declarative configuration of the AzurePlatformSpec type for use // with apply. type AzurePlatformSpecApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/azureresourcemanagedidentities.go b/client/applyconfiguration/hypershift/v1beta1/azureresourcemanagedidentities.go index 15fa0ebc6a..7d89cf0f29 100644 --- a/client/applyconfiguration/hypershift/v1beta1/azureresourcemanagedidentities.go +++ b/client/applyconfiguration/hypershift/v1beta1/azureresourcemanagedidentities.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // AzureResourceManagedIdentitiesApplyConfiguration represents an declarative configuration of the AzureResourceManagedIdentities type for use // with apply. type AzureResourceManagedIdentitiesApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/azurevmimage.go b/client/applyconfiguration/hypershift/v1beta1/azurevmimage.go index 3cbf2cfaf8..817eee831d 100644 --- a/client/applyconfiguration/hypershift/v1beta1/azurevmimage.go +++ b/client/applyconfiguration/hypershift/v1beta1/azurevmimage.go @@ -19,6 +19,7 @@ package v1beta1 import ( v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // AzureVMImageApplyConfiguration represents an declarative configuration of the AzureVMImage type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/clusternetworking.go b/client/applyconfiguration/hypershift/v1beta1/clusternetworking.go index 5051f97cdc..e9ed686263 100644 --- a/client/applyconfiguration/hypershift/v1beta1/clusternetworking.go +++ b/client/applyconfiguration/hypershift/v1beta1/clusternetworking.go @@ -19,6 +19,7 @@ package v1beta1 import ( hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // ClusterNetworkingApplyConfiguration represents an declarative configuration of the ClusterNetworking type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/controlplanemanagedidentities.go b/client/applyconfiguration/hypershift/v1beta1/controlplanemanagedidentities.go index 421ca49b47..5c91d343ea 100644 --- a/client/applyconfiguration/hypershift/v1beta1/controlplanemanagedidentities.go +++ b/client/applyconfiguration/hypershift/v1beta1/controlplanemanagedidentities.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // ControlPlaneManagedIdentitiesApplyConfiguration represents an declarative configuration of the ControlPlaneManagedIdentities type for use // with apply. type ControlPlaneManagedIdentitiesApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/diagnostics.go b/client/applyconfiguration/hypershift/v1beta1/diagnostics.go index d7c6a94285..df5b402351 100644 --- a/client/applyconfiguration/hypershift/v1beta1/diagnostics.go +++ b/client/applyconfiguration/hypershift/v1beta1/diagnostics.go @@ -19,6 +19,7 @@ package v1beta1 import ( v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // DiagnosticsApplyConfiguration represents an declarative configuration of the Diagnostics type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/etcdspec.go b/client/applyconfiguration/hypershift/v1beta1/etcdspec.go index 840435e400..4d0cdb156e 100644 --- a/client/applyconfiguration/hypershift/v1beta1/etcdspec.go +++ b/client/applyconfiguration/hypershift/v1beta1/etcdspec.go @@ -19,6 +19,7 @@ package v1beta1 import ( v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // EtcdSpecApplyConfiguration represents an declarative configuration of the EtcdSpec type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/hostedcluster.go b/client/applyconfiguration/hypershift/v1beta1/hostedcluster.go index 65f3904709..0ff601ceed 100644 --- a/client/applyconfiguration/hypershift/v1beta1/hostedcluster.go +++ b/client/applyconfiguration/hypershift/v1beta1/hostedcluster.go @@ -18,6 +18,7 @@ limitations under the License. package v1beta1 import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" v1 "k8s.io/client-go/applyconfigurations/meta/v1" diff --git a/client/applyconfiguration/hypershift/v1beta1/hostedclusterspec.go b/client/applyconfiguration/hypershift/v1beta1/hostedclusterspec.go index ba18e9adac..3a4b6eb311 100644 --- a/client/applyconfiguration/hypershift/v1beta1/hostedclusterspec.go +++ b/client/applyconfiguration/hypershift/v1beta1/hostedclusterspec.go @@ -20,6 +20,7 @@ package v1beta1 import ( v1 "github.com/openshift/api/config/v1" hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" corev1 "k8s.io/api/core/v1" ) diff --git a/client/applyconfiguration/hypershift/v1beta1/hostedclusterstatus.go b/client/applyconfiguration/hypershift/v1beta1/hostedclusterstatus.go index 433804746c..c2ad61336f 100644 --- a/client/applyconfiguration/hypershift/v1beta1/hostedclusterstatus.go +++ b/client/applyconfiguration/hypershift/v1beta1/hostedclusterstatus.go @@ -19,6 +19,7 @@ package v1beta1 import ( hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) diff --git a/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplane.go b/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplane.go index dfd2ecc322..5bffb5bfa8 100644 --- a/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplane.go +++ b/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplane.go @@ -18,6 +18,7 @@ limitations under the License. package v1beta1 import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" v1 "k8s.io/client-go/applyconfigurations/meta/v1" diff --git a/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanespec.go b/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanespec.go index 5b2e7836e3..810211ff1f 100644 --- a/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanespec.go +++ b/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanespec.go @@ -20,6 +20,7 @@ package v1beta1 import ( v1 "github.com/openshift/api/config/v1" hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" corev1 "k8s.io/api/core/v1" ) diff --git a/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanestatus.go b/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanestatus.go index bd78944b3c..2835183942 100644 --- a/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanestatus.go +++ b/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanestatus.go @@ -18,6 +18,7 @@ limitations under the License. package v1beta1 import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/client-go/applyconfigurations/meta/v1" diff --git a/client/applyconfiguration/hypershift/v1beta1/ibmcloudkmsauthspec.go b/client/applyconfiguration/hypershift/v1beta1/ibmcloudkmsauthspec.go index e9aaa9d44f..6f843362ea 100644 --- a/client/applyconfiguration/hypershift/v1beta1/ibmcloudkmsauthspec.go +++ b/client/applyconfiguration/hypershift/v1beta1/ibmcloudkmsauthspec.go @@ -19,6 +19,7 @@ package v1beta1 import ( v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // IBMCloudKMSAuthSpecApplyConfiguration represents an declarative configuration of the IBMCloudKMSAuthSpec type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/ibmcloudkmsspec.go b/client/applyconfiguration/hypershift/v1beta1/ibmcloudkmsspec.go index 90ca5a990c..7f65476618 100644 --- a/client/applyconfiguration/hypershift/v1beta1/ibmcloudkmsspec.go +++ b/client/applyconfiguration/hypershift/v1beta1/ibmcloudkmsspec.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // IBMCloudKMSSpecApplyConfiguration represents an declarative configuration of the IBMCloudKMSSpec type for use // with apply. type IBMCloudKMSSpecApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/karpenterconfig.go b/client/applyconfiguration/hypershift/v1beta1/karpenterconfig.go index cc2c078a92..bf35f79d1e 100644 --- a/client/applyconfiguration/hypershift/v1beta1/karpenterconfig.go +++ b/client/applyconfiguration/hypershift/v1beta1/karpenterconfig.go @@ -19,6 +19,7 @@ package v1beta1 import ( v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // KarpenterConfigApplyConfiguration represents an declarative configuration of the KarpenterConfig type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/kmsspec.go b/client/applyconfiguration/hypershift/v1beta1/kmsspec.go index eece69f45e..33a5cf0543 100644 --- a/client/applyconfiguration/hypershift/v1beta1/kmsspec.go +++ b/client/applyconfiguration/hypershift/v1beta1/kmsspec.go @@ -19,6 +19,7 @@ package v1beta1 import ( v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // KMSSpecApplyConfiguration represents an declarative configuration of the KMSSpec type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/kubevirtmanualstoragedriverconfig.go b/client/applyconfiguration/hypershift/v1beta1/kubevirtmanualstoragedriverconfig.go index 5176990201..7b0328b0a5 100644 --- a/client/applyconfiguration/hypershift/v1beta1/kubevirtmanualstoragedriverconfig.go +++ b/client/applyconfiguration/hypershift/v1beta1/kubevirtmanualstoragedriverconfig.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // KubevirtManualStorageDriverConfigApplyConfiguration represents an declarative configuration of the KubevirtManualStorageDriverConfig type for use // with apply. type KubevirtManualStorageDriverConfigApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolplatform.go b/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolplatform.go index 88d6679275..1caac99872 100644 --- a/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolplatform.go +++ b/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolplatform.go @@ -19,6 +19,7 @@ package v1beta1 import ( hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // KubevirtNodePoolPlatformApplyConfiguration represents an declarative configuration of the KubevirtNodePoolPlatform type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolstatus.go b/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolstatus.go index bf1f45e965..e02897816b 100644 --- a/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolstatus.go +++ b/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolstatus.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // KubeVirtNodePoolStatusApplyConfiguration represents an declarative configuration of the KubeVirtNodePoolStatus type for use // with apply. type KubeVirtNodePoolStatusApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/kubevirtplatformcredentials.go b/client/applyconfiguration/hypershift/v1beta1/kubevirtplatformcredentials.go index b0c8ad7962..8a93b3ae98 100644 --- a/client/applyconfiguration/hypershift/v1beta1/kubevirtplatformcredentials.go +++ b/client/applyconfiguration/hypershift/v1beta1/kubevirtplatformcredentials.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // KubevirtPlatformCredentialsApplyConfiguration represents an declarative configuration of the KubevirtPlatformCredentials type for use // with apply. type KubevirtPlatformCredentialsApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/kubevirtplatformspec.go b/client/applyconfiguration/hypershift/v1beta1/kubevirtplatformspec.go index 3c5714926a..ea21de4a30 100644 --- a/client/applyconfiguration/hypershift/v1beta1/kubevirtplatformspec.go +++ b/client/applyconfiguration/hypershift/v1beta1/kubevirtplatformspec.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // KubevirtPlatformSpecApplyConfiguration represents an declarative configuration of the KubevirtPlatformSpec type for use // with apply. type KubevirtPlatformSpecApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/kubevirtrootvolume.go b/client/applyconfiguration/hypershift/v1beta1/kubevirtrootvolume.go index bda9105ba8..2802a493b9 100644 --- a/client/applyconfiguration/hypershift/v1beta1/kubevirtrootvolume.go +++ b/client/applyconfiguration/hypershift/v1beta1/kubevirtrootvolume.go @@ -19,6 +19,7 @@ package v1beta1 import ( hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // KubevirtRootVolumeApplyConfiguration represents an declarative configuration of the KubevirtRootVolume type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/kubevirtstoragedriverspec.go b/client/applyconfiguration/hypershift/v1beta1/kubevirtstoragedriverspec.go index 974490912d..8162f08702 100644 --- a/client/applyconfiguration/hypershift/v1beta1/kubevirtstoragedriverspec.go +++ b/client/applyconfiguration/hypershift/v1beta1/kubevirtstoragedriverspec.go @@ -19,6 +19,7 @@ package v1beta1 import ( v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // KubevirtStorageDriverSpecApplyConfiguration represents an declarative configuration of the KubevirtStorageDriverSpec type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/kubevirtvolume.go b/client/applyconfiguration/hypershift/v1beta1/kubevirtvolume.go index 78fa0dcedb..6c0b485aba 100644 --- a/client/applyconfiguration/hypershift/v1beta1/kubevirtvolume.go +++ b/client/applyconfiguration/hypershift/v1beta1/kubevirtvolume.go @@ -19,6 +19,7 @@ package v1beta1 import ( v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // KubevirtVolumeApplyConfiguration represents an declarative configuration of the KubevirtVolume type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/managedetcdspec.go b/client/applyconfiguration/hypershift/v1beta1/managedetcdspec.go index c233269317..8b5cfad9f9 100644 --- a/client/applyconfiguration/hypershift/v1beta1/managedetcdspec.go +++ b/client/applyconfiguration/hypershift/v1beta1/managedetcdspec.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // ManagedEtcdSpecApplyConfiguration represents an declarative configuration of the ManagedEtcdSpec type for use // with apply. type ManagedEtcdSpecApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/managedetcdstoragespec.go b/client/applyconfiguration/hypershift/v1beta1/managedetcdstoragespec.go index 8c632ef1f7..60885da80e 100644 --- a/client/applyconfiguration/hypershift/v1beta1/managedetcdstoragespec.go +++ b/client/applyconfiguration/hypershift/v1beta1/managedetcdstoragespec.go @@ -19,6 +19,7 @@ package v1beta1 import ( v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // ManagedEtcdStorageSpecApplyConfiguration represents an declarative configuration of the ManagedEtcdStorageSpec type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/networkfilter.go b/client/applyconfiguration/hypershift/v1beta1/networkfilter.go index 6b9b3679d1..525275c05a 100644 --- a/client/applyconfiguration/hypershift/v1beta1/networkfilter.go +++ b/client/applyconfiguration/hypershift/v1beta1/networkfilter.go @@ -19,6 +19,7 @@ package v1beta1 import ( hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // NetworkFilterApplyConfiguration represents an declarative configuration of the NetworkFilter type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/networkparam.go b/client/applyconfiguration/hypershift/v1beta1/networkparam.go index d859ec4cb5..cf80f73afe 100644 --- a/client/applyconfiguration/hypershift/v1beta1/networkparam.go +++ b/client/applyconfiguration/hypershift/v1beta1/networkparam.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // NetworkParamApplyConfiguration represents an declarative configuration of the NetworkParam type for use // with apply. type NetworkParamApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/nodepool.go b/client/applyconfiguration/hypershift/v1beta1/nodepool.go index 666d301d68..6fd3c768d5 100644 --- a/client/applyconfiguration/hypershift/v1beta1/nodepool.go +++ b/client/applyconfiguration/hypershift/v1beta1/nodepool.go @@ -18,6 +18,7 @@ limitations under the License. package v1beta1 import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" v1 "k8s.io/client-go/applyconfigurations/meta/v1" diff --git a/client/applyconfiguration/hypershift/v1beta1/nodepoolmanagement.go b/client/applyconfiguration/hypershift/v1beta1/nodepoolmanagement.go index a23475b359..46335069d8 100644 --- a/client/applyconfiguration/hypershift/v1beta1/nodepoolmanagement.go +++ b/client/applyconfiguration/hypershift/v1beta1/nodepoolmanagement.go @@ -19,6 +19,7 @@ package v1beta1 import ( v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // NodePoolManagementApplyConfiguration represents an declarative configuration of the NodePoolManagement type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/nodepoolplatform.go b/client/applyconfiguration/hypershift/v1beta1/nodepoolplatform.go index a6bec424f4..83b5829bc4 100644 --- a/client/applyconfiguration/hypershift/v1beta1/nodepoolplatform.go +++ b/client/applyconfiguration/hypershift/v1beta1/nodepoolplatform.go @@ -19,6 +19,7 @@ package v1beta1 import ( v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // NodePoolPlatformApplyConfiguration represents an declarative configuration of the NodePoolPlatform type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/nodepoolplatformstatus.go b/client/applyconfiguration/hypershift/v1beta1/nodepoolplatformstatus.go index ae60c60f26..848f464391 100644 --- a/client/applyconfiguration/hypershift/v1beta1/nodepoolplatformstatus.go +++ b/client/applyconfiguration/hypershift/v1beta1/nodepoolplatformstatus.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // NodePoolPlatformStatusApplyConfiguration represents an declarative configuration of the NodePoolPlatformStatus type for use // with apply. type NodePoolPlatformStatusApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/nodepoolspec.go b/client/applyconfiguration/hypershift/v1beta1/nodepoolspec.go index 7483baeb2a..ca49fc92b6 100644 --- a/client/applyconfiguration/hypershift/v1beta1/nodepoolspec.go +++ b/client/applyconfiguration/hypershift/v1beta1/nodepoolspec.go @@ -18,6 +18,7 @@ limitations under the License. package v1beta1 import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) diff --git a/client/applyconfiguration/hypershift/v1beta1/nodepoolstatus.go b/client/applyconfiguration/hypershift/v1beta1/nodepoolstatus.go index e47b7fac6d..dd196ed612 100644 --- a/client/applyconfiguration/hypershift/v1beta1/nodepoolstatus.go +++ b/client/applyconfiguration/hypershift/v1beta1/nodepoolstatus.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // NodePoolStatusApplyConfiguration represents an declarative configuration of the NodePoolStatus type for use // with apply. type NodePoolStatusApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/openstacknodepoolplatform.go b/client/applyconfiguration/hypershift/v1beta1/openstacknodepoolplatform.go index 98ee398f0d..c65bdb6b4e 100644 --- a/client/applyconfiguration/hypershift/v1beta1/openstacknodepoolplatform.go +++ b/client/applyconfiguration/hypershift/v1beta1/openstacknodepoolplatform.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // OpenStackNodePoolPlatformApplyConfiguration represents an declarative configuration of the OpenStackNodePoolPlatform type for use // with apply. type OpenStackNodePoolPlatformApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/openstackplatformspec.go b/client/applyconfiguration/hypershift/v1beta1/openstackplatformspec.go index 66ba3c8ae3..2d55c84405 100644 --- a/client/applyconfiguration/hypershift/v1beta1/openstackplatformspec.go +++ b/client/applyconfiguration/hypershift/v1beta1/openstackplatformspec.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // OpenStackPlatformSpecApplyConfiguration represents an declarative configuration of the OpenStackPlatformSpec type for use // with apply. type OpenStackPlatformSpecApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/platformspec.go b/client/applyconfiguration/hypershift/v1beta1/platformspec.go index a5213aa0e4..170941258d 100644 --- a/client/applyconfiguration/hypershift/v1beta1/platformspec.go +++ b/client/applyconfiguration/hypershift/v1beta1/platformspec.go @@ -19,6 +19,7 @@ package v1beta1 import ( v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // PlatformSpecApplyConfiguration represents an declarative configuration of the PlatformSpec type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/platformstatus.go b/client/applyconfiguration/hypershift/v1beta1/platformstatus.go index d56dc2979e..25a20590eb 100644 --- a/client/applyconfiguration/hypershift/v1beta1/platformstatus.go +++ b/client/applyconfiguration/hypershift/v1beta1/platformstatus.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // PlatformStatusApplyConfiguration represents an declarative configuration of the PlatformStatus type for use // with apply. type PlatformStatusApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/portspec.go b/client/applyconfiguration/hypershift/v1beta1/portspec.go index 8b3b6b2b5a..5a5a44ff4f 100644 --- a/client/applyconfiguration/hypershift/v1beta1/portspec.go +++ b/client/applyconfiguration/hypershift/v1beta1/portspec.go @@ -19,6 +19,7 @@ package v1beta1 import ( hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // PortSpecApplyConfiguration represents an declarative configuration of the PortSpec type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/powervsnodepoolplatform.go b/client/applyconfiguration/hypershift/v1beta1/powervsnodepoolplatform.go index 1e248bfe5e..c2e9223ad7 100644 --- a/client/applyconfiguration/hypershift/v1beta1/powervsnodepoolplatform.go +++ b/client/applyconfiguration/hypershift/v1beta1/powervsnodepoolplatform.go @@ -19,6 +19,7 @@ package v1beta1 import ( v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" intstr "k8s.io/apimachinery/pkg/util/intstr" ) diff --git a/client/applyconfiguration/hypershift/v1beta1/powervsplatformspec.go b/client/applyconfiguration/hypershift/v1beta1/powervsplatformspec.go index 8047a8e89c..9e924fe431 100644 --- a/client/applyconfiguration/hypershift/v1beta1/powervsplatformspec.go +++ b/client/applyconfiguration/hypershift/v1beta1/powervsplatformspec.go @@ -18,6 +18,7 @@ limitations under the License. package v1beta1 import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" v1 "k8s.io/api/core/v1" ) diff --git a/client/applyconfiguration/hypershift/v1beta1/provisionerconfig.go b/client/applyconfiguration/hypershift/v1beta1/provisionerconfig.go index 7d972793ff..7864341b04 100644 --- a/client/applyconfiguration/hypershift/v1beta1/provisionerconfig.go +++ b/client/applyconfiguration/hypershift/v1beta1/provisionerconfig.go @@ -19,6 +19,7 @@ package v1beta1 import ( v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // ProvisionerConfigApplyConfiguration represents an declarative configuration of the ProvisionerConfig type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/replaceupgrade.go b/client/applyconfiguration/hypershift/v1beta1/replaceupgrade.go index 4411fe53f2..cff2bc40ca 100644 --- a/client/applyconfiguration/hypershift/v1beta1/replaceupgrade.go +++ b/client/applyconfiguration/hypershift/v1beta1/replaceupgrade.go @@ -19,6 +19,7 @@ package v1beta1 import ( v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // ReplaceUpgradeApplyConfiguration represents an declarative configuration of the ReplaceUpgrade type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/routerfilter.go b/client/applyconfiguration/hypershift/v1beta1/routerfilter.go index c3a52f2777..5b2b982581 100644 --- a/client/applyconfiguration/hypershift/v1beta1/routerfilter.go +++ b/client/applyconfiguration/hypershift/v1beta1/routerfilter.go @@ -19,6 +19,7 @@ package v1beta1 import ( hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // RouterFilterApplyConfiguration represents an declarative configuration of the RouterFilter type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/routerparam.go b/client/applyconfiguration/hypershift/v1beta1/routerparam.go index 1d3469c20c..d834ae574f 100644 --- a/client/applyconfiguration/hypershift/v1beta1/routerparam.go +++ b/client/applyconfiguration/hypershift/v1beta1/routerparam.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // RouterParamApplyConfiguration represents an declarative configuration of the RouterParam type for use // with apply. type RouterParamApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/secretencryptionspec.go b/client/applyconfiguration/hypershift/v1beta1/secretencryptionspec.go index 558e494826..fb3068fefa 100644 --- a/client/applyconfiguration/hypershift/v1beta1/secretencryptionspec.go +++ b/client/applyconfiguration/hypershift/v1beta1/secretencryptionspec.go @@ -19,6 +19,7 @@ package v1beta1 import ( v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // SecretEncryptionSpecApplyConfiguration represents an declarative configuration of the SecretEncryptionSpec type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/servicepublishingstrategy.go b/client/applyconfiguration/hypershift/v1beta1/servicepublishingstrategy.go index 6f47aa7726..e3003802d5 100644 --- a/client/applyconfiguration/hypershift/v1beta1/servicepublishingstrategy.go +++ b/client/applyconfiguration/hypershift/v1beta1/servicepublishingstrategy.go @@ -19,6 +19,7 @@ package v1beta1 import ( v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // ServicePublishingStrategyApplyConfiguration represents an declarative configuration of the ServicePublishingStrategy type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/servicepublishingstrategymapping.go b/client/applyconfiguration/hypershift/v1beta1/servicepublishingstrategymapping.go index 22bfe60303..e2dad876e8 100644 --- a/client/applyconfiguration/hypershift/v1beta1/servicepublishingstrategymapping.go +++ b/client/applyconfiguration/hypershift/v1beta1/servicepublishingstrategymapping.go @@ -19,6 +19,7 @@ package v1beta1 import ( v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // ServicePublishingStrategyMappingApplyConfiguration represents an declarative configuration of the ServicePublishingStrategyMapping type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/subnetfilter.go b/client/applyconfiguration/hypershift/v1beta1/subnetfilter.go index 893dc3e324..8abfc7745b 100644 --- a/client/applyconfiguration/hypershift/v1beta1/subnetfilter.go +++ b/client/applyconfiguration/hypershift/v1beta1/subnetfilter.go @@ -19,6 +19,7 @@ package v1beta1 import ( hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" ) // SubnetFilterApplyConfiguration represents an declarative configuration of the SubnetFilter type for use diff --git a/client/applyconfiguration/hypershift/v1beta1/subnetparam.go b/client/applyconfiguration/hypershift/v1beta1/subnetparam.go index 6754f748da..5fa2173ee0 100644 --- a/client/applyconfiguration/hypershift/v1beta1/subnetparam.go +++ b/client/applyconfiguration/hypershift/v1beta1/subnetparam.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // SubnetParamApplyConfiguration represents an declarative configuration of the SubnetParam type for use // with apply. type SubnetParamApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/subnetspec.go b/client/applyconfiguration/hypershift/v1beta1/subnetspec.go index 016c4cd98e..6408f73890 100644 --- a/client/applyconfiguration/hypershift/v1beta1/subnetspec.go +++ b/client/applyconfiguration/hypershift/v1beta1/subnetspec.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // SubnetSpecApplyConfiguration represents an declarative configuration of the SubnetSpec type for use // with apply. type SubnetSpecApplyConfiguration struct { diff --git a/client/applyconfiguration/hypershift/v1beta1/unmanagedetcdspec.go b/client/applyconfiguration/hypershift/v1beta1/unmanagedetcdspec.go index 09f0acf3c7..84f51a30ec 100644 --- a/client/applyconfiguration/hypershift/v1beta1/unmanagedetcdspec.go +++ b/client/applyconfiguration/hypershift/v1beta1/unmanagedetcdspec.go @@ -17,6 +17,10 @@ limitations under the License. package v1beta1 +import ( + v1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" +) + // UnmanagedEtcdSpecApplyConfiguration represents an declarative configuration of the UnmanagedEtcdSpec type for use // with apply. type UnmanagedEtcdSpecApplyConfiguration struct { diff --git a/client/applyconfiguration/internal/internal.go b/client/applyconfiguration/internal/internal.go index 765bec46ab..289f7f35f4 100644 --- a/client/applyconfiguration/internal/internal.go +++ b/client/applyconfiguration/internal/internal.go @@ -18,9 +18,6 @@ limitations under the License. package internal import ( - "fmt" - "sync" - typed "sigs.k8s.io/structured-merge-diff/v4/typed" ) diff --git a/client/applyconfiguration/scheduling/v1alpha1/clustersizingconfiguration.go b/client/applyconfiguration/scheduling/v1alpha1/clustersizingconfiguration.go index 5b1f541d83..42feefa671 100644 --- a/client/applyconfiguration/scheduling/v1alpha1/clustersizingconfiguration.go +++ b/client/applyconfiguration/scheduling/v1alpha1/clustersizingconfiguration.go @@ -18,6 +18,7 @@ limitations under the License. package v1alpha1 import ( + v1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/scheduling/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" v1 "k8s.io/client-go/applyconfigurations/meta/v1" diff --git a/client/applyconfiguration/scheduling/v1alpha1/clustersizingconfigurationspec.go b/client/applyconfiguration/scheduling/v1alpha1/clustersizingconfigurationspec.go index deb0fd83af..0afb04d962 100644 --- a/client/applyconfiguration/scheduling/v1alpha1/clustersizingconfigurationspec.go +++ b/client/applyconfiguration/scheduling/v1alpha1/clustersizingconfigurationspec.go @@ -18,6 +18,7 @@ limitations under the License. package v1alpha1 import ( + v1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/scheduling/v1alpha1" resource "k8s.io/apimachinery/pkg/api/resource" ) diff --git a/client/applyconfiguration/scheduling/v1alpha1/effects.go b/client/applyconfiguration/scheduling/v1alpha1/effects.go index ba77d9bdaa..066fcf9b12 100644 --- a/client/applyconfiguration/scheduling/v1alpha1/effects.go +++ b/client/applyconfiguration/scheduling/v1alpha1/effects.go @@ -18,6 +18,7 @@ limitations under the License. package v1alpha1 import ( + v1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/scheduling/v1alpha1" resource "k8s.io/apimachinery/pkg/api/resource" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) diff --git a/client/applyconfiguration/scheduling/v1alpha1/sizeconfiguration.go b/client/applyconfiguration/scheduling/v1alpha1/sizeconfiguration.go index 0928fbf153..4efe73a8c0 100644 --- a/client/applyconfiguration/scheduling/v1alpha1/sizeconfiguration.go +++ b/client/applyconfiguration/scheduling/v1alpha1/sizeconfiguration.go @@ -17,6 +17,10 @@ limitations under the License. package v1alpha1 +import ( + v1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/scheduling/v1alpha1" +) + // SizeConfigurationApplyConfiguration represents an declarative configuration of the SizeConfiguration type for use // with apply. type SizeConfigurationApplyConfiguration struct { diff --git a/client/clientset/clientset/clientset.go b/client/clientset/clientset/clientset.go index 8e910c9216..c2d8b72351 100644 --- a/client/clientset/clientset/clientset.go +++ b/client/clientset/clientset/clientset.go @@ -18,9 +18,6 @@ limitations under the License. package clientset import ( - "fmt" - "net/http" - certificatesv1alpha1 "github.com/openshift/hypershift/client/clientset/clientset/typed/certificates/v1alpha1" hypershiftv1beta1 "github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1beta1" schedulingv1alpha1 "github.com/openshift/hypershift/client/clientset/clientset/typed/scheduling/v1alpha1" diff --git a/client/clientset/clientset/typed/certificates/v1alpha1/certificaterevocationrequest.go b/client/clientset/clientset/typed/certificates/v1alpha1/certificaterevocationrequest.go index b241c32c89..ffb87ce82f 100644 --- a/client/clientset/clientset/typed/certificates/v1alpha1/certificaterevocationrequest.go +++ b/client/clientset/clientset/typed/certificates/v1alpha1/certificaterevocationrequest.go @@ -18,10 +18,7 @@ limitations under the License. package v1alpha1 import ( - "context" json "encoding/json" - "fmt" - "time" v1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" certificatesv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/certificates/v1alpha1" diff --git a/client/clientset/clientset/typed/certificates/v1alpha1/certificates_client.go b/client/clientset/clientset/typed/certificates/v1alpha1/certificates_client.go index 6fca6b0654..a95f41b4b5 100644 --- a/client/clientset/clientset/typed/certificates/v1alpha1/certificates_client.go +++ b/client/clientset/clientset/typed/certificates/v1alpha1/certificates_client.go @@ -18,8 +18,6 @@ limitations under the License. package v1alpha1 import ( - "net/http" - v1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" "github.com/openshift/hypershift/client/clientset/clientset/scheme" rest "k8s.io/client-go/rest" diff --git a/client/clientset/clientset/typed/certificates/v1alpha1/certificatesigningrequestapproval.go b/client/clientset/clientset/typed/certificates/v1alpha1/certificatesigningrequestapproval.go index e71feed455..09e3d57c71 100644 --- a/client/clientset/clientset/typed/certificates/v1alpha1/certificatesigningrequestapproval.go +++ b/client/clientset/clientset/typed/certificates/v1alpha1/certificatesigningrequestapproval.go @@ -18,10 +18,7 @@ limitations under the License. package v1alpha1 import ( - "context" json "encoding/json" - "fmt" - "time" v1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" certificatesv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/certificates/v1alpha1" diff --git a/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificaterevocationrequest.go b/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificaterevocationrequest.go index da045ce90f..19f9e1b272 100644 --- a/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificaterevocationrequest.go +++ b/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificaterevocationrequest.go @@ -18,9 +18,7 @@ limitations under the License. package fake import ( - "context" json "encoding/json" - "fmt" v1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" certificatesv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/certificates/v1alpha1" diff --git a/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificatesigningrequestapproval.go b/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificatesigningrequestapproval.go index fd365c96c3..b36f51d26c 100644 --- a/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificatesigningrequestapproval.go +++ b/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificatesigningrequestapproval.go @@ -18,9 +18,7 @@ limitations under the License. package fake import ( - "context" json "encoding/json" - "fmt" v1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" certificatesv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/certificates/v1alpha1" diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/certificatesigningrequestapproval.go b/client/clientset/clientset/typed/hypershift/v1beta1/certificatesigningrequestapproval.go index 313ecdd31c..d6b0359822 100644 --- a/client/clientset/clientset/typed/hypershift/v1beta1/certificatesigningrequestapproval.go +++ b/client/clientset/clientset/typed/hypershift/v1beta1/certificatesigningrequestapproval.go @@ -18,10 +18,7 @@ limitations under the License. package v1beta1 import ( - "context" json "encoding/json" - "fmt" - "time" v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_certificatesigningrequestapproval.go b/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_certificatesigningrequestapproval.go index 6d9e62e34d..85b369353c 100644 --- a/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_certificatesigningrequestapproval.go +++ b/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_certificatesigningrequestapproval.go @@ -18,9 +18,7 @@ limitations under the License. package fake import ( - "context" json "encoding/json" - "fmt" v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_hostedcluster.go b/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_hostedcluster.go index 569c9acf28..238d75fd84 100644 --- a/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_hostedcluster.go +++ b/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_hostedcluster.go @@ -18,9 +18,7 @@ limitations under the License. package fake import ( - "context" json "encoding/json" - "fmt" v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_hostedcontrolplane.go b/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_hostedcontrolplane.go index 2f381b4d3a..0d2fa9253b 100644 --- a/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_hostedcontrolplane.go +++ b/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_hostedcontrolplane.go @@ -18,9 +18,7 @@ limitations under the License. package fake import ( - "context" json "encoding/json" - "fmt" v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_nodepool.go b/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_nodepool.go index 4d15f10169..39db40392b 100644 --- a/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_nodepool.go +++ b/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_nodepool.go @@ -18,9 +18,7 @@ limitations under the License. package fake import ( - "context" json "encoding/json" - "fmt" v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/hostedcluster.go b/client/clientset/clientset/typed/hypershift/v1beta1/hostedcluster.go index e4eff190c0..8886f8249a 100644 --- a/client/clientset/clientset/typed/hypershift/v1beta1/hostedcluster.go +++ b/client/clientset/clientset/typed/hypershift/v1beta1/hostedcluster.go @@ -18,10 +18,7 @@ limitations under the License. package v1beta1 import ( - "context" json "encoding/json" - "fmt" - "time" v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/hostedcontrolplane.go b/client/clientset/clientset/typed/hypershift/v1beta1/hostedcontrolplane.go index d838e1e960..966223dd6a 100644 --- a/client/clientset/clientset/typed/hypershift/v1beta1/hostedcontrolplane.go +++ b/client/clientset/clientset/typed/hypershift/v1beta1/hostedcontrolplane.go @@ -18,10 +18,7 @@ limitations under the License. package v1beta1 import ( - "context" json "encoding/json" - "fmt" - "time" v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/hypershift_client.go b/client/clientset/clientset/typed/hypershift/v1beta1/hypershift_client.go index 7ce2da0f94..891bd63f4a 100644 --- a/client/clientset/clientset/typed/hypershift/v1beta1/hypershift_client.go +++ b/client/clientset/clientset/typed/hypershift/v1beta1/hypershift_client.go @@ -18,8 +18,6 @@ limitations under the License. package v1beta1 import ( - "net/http" - v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" "github.com/openshift/hypershift/client/clientset/clientset/scheme" rest "k8s.io/client-go/rest" diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/nodepool.go b/client/clientset/clientset/typed/hypershift/v1beta1/nodepool.go index d8dc3b5f9e..6653c7a7a3 100644 --- a/client/clientset/clientset/typed/hypershift/v1beta1/nodepool.go +++ b/client/clientset/clientset/typed/hypershift/v1beta1/nodepool.go @@ -18,10 +18,7 @@ limitations under the License. package v1beta1 import ( - "context" json "encoding/json" - "fmt" - "time" v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" diff --git a/client/clientset/clientset/typed/scheduling/v1alpha1/clustersizingconfiguration.go b/client/clientset/clientset/typed/scheduling/v1alpha1/clustersizingconfiguration.go index 85f707895e..1d66ed9d8d 100644 --- a/client/clientset/clientset/typed/scheduling/v1alpha1/clustersizingconfiguration.go +++ b/client/clientset/clientset/typed/scheduling/v1alpha1/clustersizingconfiguration.go @@ -18,10 +18,7 @@ limitations under the License. package v1alpha1 import ( - "context" json "encoding/json" - "fmt" - "time" v1alpha1 "github.com/openshift/hypershift/api/scheduling/v1alpha1" schedulingv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/scheduling/v1alpha1" diff --git a/client/clientset/clientset/typed/scheduling/v1alpha1/fake/fake_clustersizingconfiguration.go b/client/clientset/clientset/typed/scheduling/v1alpha1/fake/fake_clustersizingconfiguration.go index 1ad988f887..dd0e845d70 100644 --- a/client/clientset/clientset/typed/scheduling/v1alpha1/fake/fake_clustersizingconfiguration.go +++ b/client/clientset/clientset/typed/scheduling/v1alpha1/fake/fake_clustersizingconfiguration.go @@ -18,9 +18,7 @@ limitations under the License. package fake import ( - "context" json "encoding/json" - "fmt" v1alpha1 "github.com/openshift/hypershift/api/scheduling/v1alpha1" schedulingv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/scheduling/v1alpha1" diff --git a/client/clientset/clientset/typed/scheduling/v1alpha1/scheduling_client.go b/client/clientset/clientset/typed/scheduling/v1alpha1/scheduling_client.go index 91bf604baa..878aa7fb13 100644 --- a/client/clientset/clientset/typed/scheduling/v1alpha1/scheduling_client.go +++ b/client/clientset/clientset/typed/scheduling/v1alpha1/scheduling_client.go @@ -18,8 +18,6 @@ limitations under the License. package v1alpha1 import ( - "net/http" - v1alpha1 "github.com/openshift/hypershift/api/scheduling/v1alpha1" "github.com/openshift/hypershift/client/clientset/clientset/scheme" rest "k8s.io/client-go/rest" diff --git a/client/informers/externalversions/certificates/v1alpha1/certificaterevocationrequest.go b/client/informers/externalversions/certificates/v1alpha1/certificaterevocationrequest.go index b79ab1e28e..c03a3cc344 100644 --- a/client/informers/externalversions/certificates/v1alpha1/certificaterevocationrequest.go +++ b/client/informers/externalversions/certificates/v1alpha1/certificaterevocationrequest.go @@ -18,7 +18,6 @@ limitations under the License. package v1alpha1 import ( - "context" time "time" certificatesv1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" diff --git a/client/informers/externalversions/certificates/v1alpha1/certificatesigningrequestapproval.go b/client/informers/externalversions/certificates/v1alpha1/certificatesigningrequestapproval.go index 98eda861a8..0a2df613db 100644 --- a/client/informers/externalversions/certificates/v1alpha1/certificatesigningrequestapproval.go +++ b/client/informers/externalversions/certificates/v1alpha1/certificatesigningrequestapproval.go @@ -18,7 +18,6 @@ limitations under the License. package v1alpha1 import ( - "context" time "time" certificatesv1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" diff --git a/client/informers/externalversions/hypershift/v1beta1/certificatesigningrequestapproval.go b/client/informers/externalversions/hypershift/v1beta1/certificatesigningrequestapproval.go index bd82a48460..5d74c45312 100644 --- a/client/informers/externalversions/hypershift/v1beta1/certificatesigningrequestapproval.go +++ b/client/informers/externalversions/hypershift/v1beta1/certificatesigningrequestapproval.go @@ -18,7 +18,6 @@ limitations under the License. package v1beta1 import ( - "context" time "time" hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" diff --git a/client/informers/externalversions/hypershift/v1beta1/hostedcluster.go b/client/informers/externalversions/hypershift/v1beta1/hostedcluster.go index 1432ad978a..e06d22481f 100644 --- a/client/informers/externalversions/hypershift/v1beta1/hostedcluster.go +++ b/client/informers/externalversions/hypershift/v1beta1/hostedcluster.go @@ -18,7 +18,6 @@ limitations under the License. package v1beta1 import ( - "context" time "time" hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" diff --git a/client/informers/externalversions/hypershift/v1beta1/hostedcontrolplane.go b/client/informers/externalversions/hypershift/v1beta1/hostedcontrolplane.go index 2da1f3feab..12c6e2f4b8 100644 --- a/client/informers/externalversions/hypershift/v1beta1/hostedcontrolplane.go +++ b/client/informers/externalversions/hypershift/v1beta1/hostedcontrolplane.go @@ -18,7 +18,6 @@ limitations under the License. package v1beta1 import ( - "context" time "time" hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" diff --git a/client/informers/externalversions/hypershift/v1beta1/nodepool.go b/client/informers/externalversions/hypershift/v1beta1/nodepool.go index 9572114de8..262275882f 100644 --- a/client/informers/externalversions/hypershift/v1beta1/nodepool.go +++ b/client/informers/externalversions/hypershift/v1beta1/nodepool.go @@ -18,7 +18,6 @@ limitations under the License. package v1beta1 import ( - "context" time "time" hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" diff --git a/client/informers/externalversions/scheduling/v1alpha1/clustersizingconfiguration.go b/client/informers/externalversions/scheduling/v1alpha1/clustersizingconfiguration.go index da86755963..1a3b5d59b6 100644 --- a/client/informers/externalversions/scheduling/v1alpha1/clustersizingconfiguration.go +++ b/client/informers/externalversions/scheduling/v1alpha1/clustersizingconfiguration.go @@ -18,7 +18,6 @@ limitations under the License. package v1alpha1 import ( - "context" time "time" schedulingv1alpha1 "github.com/openshift/hypershift/api/scheduling/v1alpha1" diff --git a/go.mod b/go.mod index 0e75e1b20f..41d3c5c2fe 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/go-jose/go-jose/v3 v3.0.3 github.com/go-logr/logr v1.4.2 github.com/go-logr/zapr v1.3.0 - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 github.com/google/cel-go v0.22.0 github.com/google/go-cmp v0.6.0 github.com/google/gofuzz v1.2.0 @@ -46,9 +46,9 @@ require ( github.com/onsi/gomega v1.36.2 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.0 - github.com/openshift/api v0.0.0-20250124212313-a770960d61e0 - github.com/openshift/client-go v0.0.0-20250125113824-8e1f0b8fa9a7 - github.com/openshift/cloud-credential-operator v0.0.0-20240814063301-26d835e0fa5e + github.com/openshift/api v0.0.0-20250213010142-f5b09d13c01f + github.com/openshift/client-go v0.0.0-20250131180035-f7ec47e2d87a + github.com/openshift/cloud-credential-operator v0.0.0-20250120201329-db5f2531a5b4 github.com/openshift/cluster-api-provider-agent/api v0.0.0-20230918065757-81658c4ddf2f github.com/openshift/cluster-node-tuning-operator v0.0.0-20250123060244-3b77fb55b835 github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 @@ -91,7 +91,7 @@ require ( k8s.io/kube-scheduler v0.32.2 k8s.io/kubectl v0.32.2 k8s.io/pod-security-admission v0.32.2 - k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 + k8s.io/utils v0.0.0-20241210054802-24370beab758 kubevirt.io/api v1.4.0 kubevirt.io/containerized-data-importer-api v1.61.1 sigs.k8s.io/cluster-api v1.9.4 diff --git a/go.sum b/go.sum index 4af5613518..3e38e73f57 100644 --- a/go.sum +++ b/go.sum @@ -295,8 +295,8 @@ github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= @@ -495,12 +495,12 @@ github.com/opencontainers/runc v1.1.14 h1:rgSuzbmgz5DUJjeSnw337TxDbRuqjs6iqQck/2 github.com/opencontainers/runc v1.1.14/go.mod h1:E4C2z+7BxR7GHXp0hAY53mek+x49X1LjPNeMTfRGvOA= github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= -github.com/openshift/api v0.0.0-20250124212313-a770960d61e0 h1:dCvNfygMrPLVNQ06bpHXrxKfrXHiprO4+etHrRUqI8g= -github.com/openshift/api v0.0.0-20250124212313-a770960d61e0/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw= -github.com/openshift/client-go v0.0.0-20250125113824-8e1f0b8fa9a7 h1:4iliLcvr1P9EUMZgIaSNEKNQQzBn+L6PSequlFOuB6Q= -github.com/openshift/client-go v0.0.0-20250125113824-8e1f0b8fa9a7/go.mod h1:2tcufBE4Cu6RNgDCxcUJepa530kGo5GFVfR9BSnndhI= -github.com/openshift/cloud-credential-operator v0.0.0-20240814063301-26d835e0fa5e h1:X0et0+WqSmnW0CS//XdLFLvJvzt8TFJzfFE1XJ2rXNg= -github.com/openshift/cloud-credential-operator v0.0.0-20240814063301-26d835e0fa5e/go.mod h1:4AWWBNPuWzPtT77xDONlObrazPlBCKXd+16lupnIrQc= +github.com/openshift/api v0.0.0-20250213010142-f5b09d13c01f h1:HH+BqfxONWB2kknhCLAjiAievsyb56WvLGXdNOseI8g= +github.com/openshift/api v0.0.0-20250213010142-f5b09d13c01f/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw= +github.com/openshift/client-go v0.0.0-20250131180035-f7ec47e2d87a h1:duO3JMrUOqVx50QhzxvDeOYIwTNOB8/EEuRLPyvAMBg= +github.com/openshift/client-go v0.0.0-20250131180035-f7ec47e2d87a/go.mod h1:Qw3ThpzVZ0bfTILpBNYg4LGyjtNxfyCiGh/uDLOOTP8= +github.com/openshift/cloud-credential-operator v0.0.0-20250120201329-db5f2531a5b4 h1:nrD3npDGt5bvwNXZKTzzEuZTI/4Uo5PbrkpAjfxhxtE= +github.com/openshift/cloud-credential-operator v0.0.0-20250120201329-db5f2531a5b4/go.mod h1:Lzu29TMne5LsgPnyw2n9jrPiD5t6uyG5aE6KFy8cz6w= github.com/openshift/cluster-api-provider-agent/api v0.0.0-20230918065757-81658c4ddf2f h1:YWIaRFHMNzxi26FbT0TE9zl4pBzfeiZjQJv0JUzibUc= github.com/openshift/cluster-api-provider-agent/api v0.0.0-20230918065757-81658c4ddf2f/go.mod h1:RNzrOK4TA6LdyrgrEvb1TXjOnhRC+iBmnBoAa/4ivBw= github.com/openshift/cluster-node-tuning-operator v0.0.0-20250123060244-3b77fb55b835 h1:AN2ZaBNt5YrJmO7QbE4ULW4UD+8RnJRtpLuchcLVAno= @@ -955,8 +955,8 @@ k8s.io/pod-security-admission v0.32.2 h1:zDfAb/t0LbNU3z0ZMHtCb1zp8x05gWCGhmBYpUp k8s.io/pod-security-admission v0.32.2/go.mod h1:yxMPB3i1pGMLfxbe4BiWMuowMD7cdHR32y4nCj4wH+s= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= +k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= knative.dev/pkg v0.0.0-20231010144348-ca8c009405dd h1:KJXBX9dOmRTUWduHg1gnWtPGIEl+GMh8UHdrBEZgOXE= knative.dev/pkg v0.0.0-20231010144348-ca8c009405dd/go.mod h1:36cYnaOVHkzmhgybmYX6zDaTl3PakFeJQJl7wi6/RLE= kubevirt.io/api v1.4.0 h1:dDLyQLSp9obzsDrv3cyL1olIc/66IWVaGiR3gfPfgT0= diff --git a/hack/tools/go.mod b/hack/tools/go.mod index bf5e8adf03..8ee6c5791c 100644 --- a/hack/tools/go.mod +++ b/hack/tools/go.mod @@ -4,56 +4,63 @@ go 1.23.0 require ( github.com/ahmetb/gen-crd-api-reference-docs v0.3.0 - github.com/golangci/golangci-lint v1.62.0 - github.com/openshift/api/tools v0.0.0-20241014171745-f199b1f2660c + github.com/golangci/golangci-lint v1.63.4 + github.com/openshift/api/tools v0.0.0-20250213010142-f5b09d13c01f gotest.tools/gotestsum v1.12.0 honnef.co/go/tools v0.5.1 - k8s.io/code-generator v0.31.1 - k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 + k8s.io/code-generator v0.32.1 + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 ) require ( 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect 4d63.com/gochecknoglobals v0.2.1 // indirect + cel.dev/expr v0.18.0 // indirect dario.cat/mergo v1.0.0 // indirect - github.com/4meepo/tagalign v1.3.4 // indirect + github.com/4meepo/tagalign v1.4.1 // indirect github.com/Abirdcfly/dupword v0.1.3 // indirect github.com/Antonboom/errname v1.0.0 // indirect - github.com/Antonboom/nilnil v1.0.0 // indirect - github.com/Antonboom/testifylint v1.5.0 // indirect + github.com/Antonboom/nilnil v1.0.1 // indirect + github.com/Antonboom/testifylint v1.5.2 // indirect github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect - github.com/Crocmagnon/fatcontext v0.5.2 // indirect + github.com/Crocmagnon/fatcontext v0.5.3 // indirect github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 // indirect + github.com/JoelSpeed/kal v0.0.0-20250121143106-304c215e474e // indirect github.com/Masterminds/semver/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect github.com/ProtonMail/go-crypto v1.1.3 // indirect github.com/a8m/envsubst v1.4.2 // indirect - github.com/alecthomas/go-check-sumtype v0.2.0 // indirect + github.com/alecthomas/go-check-sumtype v0.3.1 // indirect github.com/alecthomas/participle/v2 v2.1.1 // indirect github.com/alexkohler/nakedret/v2 v2.0.5 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect github.com/alingse/asasalint v0.0.11 // indirect + github.com/alingse/nilnesserr v0.1.1 // indirect + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect github.com/ashanbrown/forbidigo v1.6.0 // indirect - github.com/ashanbrown/makezero v1.1.1 // indirect + github.com/ashanbrown/makezero v1.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bitfield/gotestdox v0.2.2 // indirect github.com/bkielbasa/cyclop v1.2.3 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/blizzy78/varnamelen v0.8.0 // indirect - github.com/bombsimon/wsl/v4 v4.4.1 // indirect + github.com/bombsimon/wsl/v4 v4.5.0 // indirect github.com/breml/bidichk v0.3.2 // indirect github.com/breml/errchkjson v0.4.0 // indirect - github.com/butuzov/ireturn v0.3.0 // indirect - github.com/butuzov/mirror v1.2.0 // indirect + github.com/butuzov/ireturn v0.3.1 // indirect + github.com/butuzov/mirror v1.3.0 // indirect github.com/catenacyber/perfsprint v0.7.1 // indirect github.com/ccojocar/zxcvbn-go v1.0.2 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/charithe/durationcheck v0.0.10 // indirect github.com/chavacava/garif v0.1.0 // indirect - github.com/ckaznocha/intrange v0.2.1 // indirect + github.com/ckaznocha/intrange v0.3.0 // indirect github.com/cloudflare/circl v1.3.7 // indirect - github.com/curioswitch/go-reassign v0.2.0 // indirect + github.com/curioswitch/go-reassign v0.3.0 // indirect github.com/cyphar/filepath-securejoin v0.2.5 // indirect github.com/daixiang0/gci v0.13.5 // indirect github.com/dave/dst v0.27.3 // indirect @@ -67,6 +74,7 @@ require ( github.com/ettle/strcase v0.2.0 // indirect github.com/fatih/color v1.18.0 // indirect github.com/fatih/structtag v1.2.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/firefart/nonamedreturns v1.0.5 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect @@ -78,9 +86,10 @@ require ( github.com/go-git/go-billy/v5 v5.6.0 // indirect github.com/go-git/go-git/v5 v5.13.0 // indirect github.com/go-logr/logr v1.4.2 // indirect - github.com/go-openapi/jsonpointer v0.20.0 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/go-toolsmith/astcast v1.1.0 // indirect github.com/go-toolsmith/astcopy v1.1.0 // indirect github.com/go-toolsmith/astequal v1.2.0 // indirect @@ -89,45 +98,49 @@ require ( github.com/go-toolsmith/strparse v1.1.0 // indirect github.com/go-toolsmith/typep v1.1.0 // indirect github.com/go-viper/mapstructure/v2 v2.2.1 // indirect - github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect + github.com/go-xmlfmt/xmlfmt v1.1.3 // indirect github.com/gobuffalo/flect v1.0.2 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/goccy/go-json v0.10.3 // indirect - github.com/goccy/go-yaml v1.11.3 // indirect + github.com/goccy/go-yaml v1.13.0 // indirect github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect github.com/golangci/go-printf-func-name v0.1.0 // indirect - github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9 // indirect + github.com/golangci/gofmt v0.0.0-20241223200906-057b0627d9b9 // indirect github.com/golangci/misspell v0.6.0 // indirect - github.com/golangci/modinfo v0.3.4 // indirect github.com/golangci/plugin-module-register v0.1.1 // indirect github.com/golangci/revgrep v0.5.3 // indirect github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed // indirect + github.com/google/cel-go v0.22.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gordonklaus/ineffassign v0.1.0 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect github.com/gostaticanalysis/comment v1.4.2 // indirect github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect github.com/gostaticanalysis/nilerr v0.1.1 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect github.com/hashicorp/go-version v1.7.0 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect - github.com/jessevdk/go-flags v1.5.0 // indirect + github.com/jessevdk/go-flags v1.6.1 // indirect github.com/jgautheron/goconst v1.7.1 // indirect github.com/jingyugao/rowserrcheck v1.1.1 // indirect github.com/jinzhu/copier v0.4.0 // indirect - github.com/jjti/go-spancheck v0.6.2 // indirect + github.com/jjti/go-spancheck v0.6.4 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/julz/importas v0.1.0 // indirect + github.com/julz/importas v0.2.0 // indirect github.com/karamaru-alpha/copyloopvar v1.1.0 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/kisielk/errcheck v1.8.0 // indirect @@ -137,8 +150,11 @@ require ( github.com/kunwardeep/paralleltest v1.0.10 // indirect github.com/kyoh86/exportloopref v0.1.11 // indirect github.com/lasiar/canonicalheader v1.1.2 // indirect - github.com/ldez/gomoddirectives v0.2.4 // indirect - github.com/ldez/tagliatelle v0.5.0 // indirect + github.com/ldez/exptostd v0.3.1 // indirect + github.com/ldez/gomoddirectives v0.6.0 // indirect + github.com/ldez/grignotin v0.7.0 // indirect + github.com/ldez/tagliatelle v0.7.1 // indirect + github.com/ldez/usetesting v0.4.2 // indirect github.com/leonklingele/grouper v1.1.2 // indirect github.com/macabu/inamedparam v0.1.3 // indirect github.com/magiconair/properties v1.8.7 // indirect @@ -149,8 +165,8 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect - github.com/mgechev/revive v1.5.0 // indirect - github.com/mikefarah/yq/v4 v4.44.2 // indirect + github.com/mgechev/revive v1.5.1 // indirect + github.com/mikefarah/yq/v4 v4.44.5 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -160,15 +176,15 @@ require ( github.com/nakabonne/nestif v0.3.1 // indirect github.com/nishanths/exhaustive v0.12.0 // indirect github.com/nishanths/predeclared v0.2.2 // indirect - github.com/nunnatsa/ginkgolinter v0.18.0 // indirect + github.com/nunnatsa/ginkgolinter v0.18.4 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/openshift/crd-schema-checker v0.0.0-20240927092539-11d809bb1183 // indirect + github.com/openshift/crd-schema-checker v0.0.0-20241113192003-573763d3107a // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/polyfloyd/go-errorlint v1.6.0 // indirect + github.com/polyfloyd/go-errorlint v1.7.0 // indirect github.com/prometheus/client_golang v1.19.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect @@ -178,17 +194,17 @@ require ( github.com/quasilyte/gogrep v0.5.0 // indirect github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect - github.com/raeperd/recvcheck v0.1.2 // indirect + github.com/raeperd/recvcheck v0.2.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/russross/blackfriday v2.0.0+incompatible // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/ryancurrah/gomodguard v1.3.5 // indirect github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect - github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect - github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect + github.com/sanposhiho/wastedassign/v2 v2.1.0 // indirect + github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 // indirect github.com/sashamelentyev/interfacebloat v1.1.0 // indirect - github.com/sashamelentyev/usestdlibvars v1.27.0 // indirect + github.com/sashamelentyev/usestdlibvars v1.28.0 // indirect github.com/securego/gosec/v2 v2.21.4 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect @@ -206,20 +222,21 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.12.0 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect - github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect + github.com/stbenjam/no-sprintf-host-port v0.2.0 // indirect + github.com/stoewer/go-strcase v1.3.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/stretchr/testify v1.10.0 // indirect github.com/subosito/gotenv v1.4.1 // indirect - github.com/tdakkota/asciicheck v0.2.0 // indirect - github.com/tetafro/godot v1.4.18 // indirect - github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 // indirect + github.com/tdakkota/asciicheck v0.3.0 // indirect + github.com/tetafro/godot v1.4.20 // indirect + github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3 // indirect github.com/timonwong/loggercheck v0.10.1 // indirect - github.com/tomarrell/wrapcheck/v2 v2.9.0 // indirect + github.com/tomarrell/wrapcheck/v2 v2.10.0 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect - github.com/ultraware/funlen v0.1.0 // indirect - github.com/ultraware/whitespace v0.1.1 // indirect - github.com/uudashr/gocognit v1.1.3 // indirect - github.com/uudashr/iface v1.2.0 // indirect + github.com/ultraware/funlen v0.2.0 // indirect + github.com/ultraware/whitespace v0.2.0 // indirect + github.com/uudashr/gocognit v1.2.0 // indirect + github.com/uudashr/iface v1.3.0 // indirect github.com/vmware-archive/yaml-patch v0.0.11 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect @@ -231,40 +248,56 @@ require ( gitlab.com/bosi/decorder v0.4.2 // indirect go-simpler.org/musttag v0.13.0 // indirect go-simpler.org/sloglint v0.7.2 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.29.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.26.0 // indirect + go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.31.0 // indirect golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect - golang.org/x/exp/typeparams v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/exp/typeparams v0.0.0-20241108190413-2d47ceb2692f // indirect golang.org/x/mod v0.22.0 // indirect golang.org/x/net v0.33.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.10.0 // indirect golang.org/x/sys v0.28.0 // indirect golang.org/x/term v0.27.0 // indirect golang.org/x/text v0.21.0 // indirect - golang.org/x/tools v0.27.0 // indirect - golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect - google.golang.org/protobuf v1.34.2 // indirect + golang.org/x/time v0.7.0 // indirect + golang.org/x/tools v0.28.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect + google.golang.org/grpc v1.67.0 // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.31.1 // indirect - k8s.io/apiextensions-apiserver v0.31.1 // indirect - k8s.io/apimachinery v0.31.1 // indirect + k8s.io/api v0.32.1 // indirect + k8s.io/apiextensions-apiserver v0.32.1 // indirect + k8s.io/apimachinery v0.32.1 // indirect + k8s.io/apiserver v0.32.1 // indirect + k8s.io/client-go v0.32.1 // indirect + k8s.io/component-base v0.32.1 // indirect k8s.io/gengo v0.0.0-20240404160639-a0386bf69313 // indirect - k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect + k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect k8s.io/klog v1.0.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect mvdan.cc/gofumpt v0.7.0 // indirect mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect sigs.k8s.io/controller-tools v0.15.0 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/hack/tools/go.sum b/hack/tools/go.sum index 0050e38f41..d15bc6887d 100644 --- a/hack/tools/go.sum +++ b/hack/tools/go.sum @@ -2,26 +2,30 @@ 4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs= 4d63.com/gochecknoglobals v0.2.1 h1:1eiorGsgHOFOuoOiJDy2psSrQbRdIHrlge0IJIkUgDc= 4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU= +cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= +cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= -github.com/4meepo/tagalign v1.3.4 h1:P51VcvBnf04YkHzjfclN6BbsopfJR5rxs1n+5zHt+w8= -github.com/4meepo/tagalign v1.3.4/go.mod h1:M+pnkHH2vG8+qhE5bVc/zeP7HS/j910Fwa9TUSyZVI0= +github.com/4meepo/tagalign v1.4.1 h1:GYTu2FaPGOGb/xJalcqHeD4il5BiCywyEYZOA55P6J4= +github.com/4meepo/tagalign v1.4.1/go.mod h1:2H9Yu6sZ67hmuraFgfZkNcg5Py9Ch/Om9l2K/2W1qS4= github.com/Abirdcfly/dupword v0.1.3 h1:9Pa1NuAsZvpFPi9Pqkd93I7LIYRURj+A//dFd5tgBeE= github.com/Abirdcfly/dupword v0.1.3/go.mod h1:8VbB2t7e10KRNdwTVoxdBaxla6avbhGzb8sCTygUMhw= github.com/Antonboom/errname v1.0.0 h1:oJOOWR07vS1kRusl6YRSlat7HFnb3mSfMl6sDMRoTBA= github.com/Antonboom/errname v1.0.0/go.mod h1:gMOBFzK/vrTiXN9Oh+HFs+e6Ndl0eTFbtsRTSRdXyGI= -github.com/Antonboom/nilnil v1.0.0 h1:n+v+B12dsE5tbAqRODXmEKfZv9j2KcTBrp+LkoM4HZk= -github.com/Antonboom/nilnil v1.0.0/go.mod h1:fDJ1FSFoLN6yoG65ANb1WihItf6qt9PJVTn/s2IrcII= -github.com/Antonboom/testifylint v1.5.0 h1:dlUIsDMtCrZWUnvkaCz3quJCoIjaGi41GzjPBGkkJ8A= -github.com/Antonboom/testifylint v1.5.0/go.mod h1:wqaJbu0Blb5Wag2wv7Z5xt+CIV+eVLxtGZrlK13z3AE= +github.com/Antonboom/nilnil v1.0.1 h1:C3Tkm0KUxgfO4Duk3PM+ztPncTFlOf0b2qadmS0s4xs= +github.com/Antonboom/nilnil v1.0.1/go.mod h1:CH7pW2JsRNFgEh8B2UaPZTEPhCMuFowP/e8Udp9Nnb0= +github.com/Antonboom/testifylint v1.5.2 h1:4s3Xhuv5AvdIgbd8wOOEeo0uZG7PbDKQyKY5lGoQazk= +github.com/Antonboom/testifylint v1.5.2/go.mod h1:vxy8VJ0bc6NavlYqjZfmp6EfqXMtBgQ4+mhCojwC1P8= github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs= github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= -github.com/Crocmagnon/fatcontext v0.5.2 h1:vhSEg8Gqng8awhPju2w7MKHqMlg4/NI+gSDHtR3xgwA= -github.com/Crocmagnon/fatcontext v0.5.2/go.mod h1:87XhRMaInHP44Q7Tlc7jkgKKB7kZAOPiDkFMdKCC+74= +github.com/Crocmagnon/fatcontext v0.5.3 h1:zCh/wjc9oyeF+Gmp+V60wetm8ph2tlsxocgg/J0hOps= +github.com/Crocmagnon/fatcontext v0.5.3/go.mod h1:XoCQYY1J+XTfyv74qLXvNw4xFunr3L1wkopIIKG7wGM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 h1:/fTUt5vmbkAcMBt4YQiuC23cV0kEsN1MVMNqeOW43cU= github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0/go.mod h1:ONJg5sxcbsdQQ4pOW8TGdTidT2TMAUy/2Xhr8mrYaao= +github.com/JoelSpeed/kal v0.0.0-20250121143106-304c215e474e h1:H7jNmU1/bzAi4YFVpRPUzE3hzcrOCk1H6qJpLVCAPaI= +github.com/JoelSpeed/kal v0.0.0-20250121143106-304c215e474e/go.mod h1:zptAqBcJGHljQfJR7oQXVKS/E6KVUFZDkE7OJDQeKOw= github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= @@ -35,10 +39,10 @@ github.com/a8m/envsubst v1.4.2 h1:4yWIHXOLEJHQEFd4UjrWDrYeYlV7ncFWJOCBRLOZHQg= github.com/a8m/envsubst v1.4.2/go.mod h1:MVUTQNGQ3tsjOOtKCNd+fl8RzhsXcDvvAEzkhGtlsbY= github.com/ahmetb/gen-crd-api-reference-docs v0.3.0 h1:+XfOU14S4bGuwyvCijJwhhBIjYN+YXS18jrCY2EzJaY= github.com/ahmetb/gen-crd-api-reference-docs v0.3.0/go.mod h1:TdjdkYhlOifCQWPs1UdTma97kQQMozf5h26hTuG70u8= -github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVdDZXL0= -github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= -github.com/alecthomas/go-check-sumtype v0.2.0 h1:Bo+e4DFf3rs7ME9w/0SU/g6nmzJaphduP8Cjiz0gbwY= -github.com/alecthomas/go-check-sumtype v0.2.0/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ= +github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= +github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= +github.com/alecthomas/go-check-sumtype v0.3.1 h1:u9aUvbGINJxLVXiFvHUlPEaD7VDULsrxJb4Aq31NLkU= +github.com/alecthomas/go-check-sumtype v0.3.1/go.mod h1:A8TSiN3UPRw3laIgWEUOHHLPa6/r9MtoigdlP5h3K/E= github.com/alecthomas/participle/v2 v2.1.1 h1:hrjKESvSqGHzRb4yW1ciisFJ4p3MGYih6icjJvbsmV8= github.com/alecthomas/participle/v2 v2.1.1/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c= github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= @@ -49,50 +53,64 @@ github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pO github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= +github.com/alingse/nilnesserr v0.1.1 h1:7cYuJewpy9jFNMEA72Q1+3Nm3zKHzg+Q28D5f2bBFUA= +github.com/alingse/nilnesserr v0.1.1/go.mod h1:1xJPrXonEtX7wyTq8Dytns5P2hNzoWymVUIaKm4HNFg= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= +github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY= github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= -github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= -github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= +github.com/ashanbrown/makezero v1.2.0 h1:/2Lp1bypdmK9wDIq7uWBlDF1iMUpIIS4A+pF6C9IEUU= +github.com/ashanbrown/makezero v1.2.0/go.mod h1:dxlPhHbDMC6N6xICzFBSK+4njQDdK8euNO0qjQMtGY4= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bitfield/gotestdox v0.2.2 h1:x6RcPAbBbErKLnapz1QeAlf3ospg8efBsedU93CDsnE= github.com/bitfield/gotestdox v0.2.2/go.mod h1:D+gwtS0urjBrzguAkTM2wodsTQYFHdpx8eqRJ3N+9pY= github.com/bkielbasa/cyclop v1.2.3 h1:faIVMIGDIANuGPWH031CZJTi2ymOQBULs9H21HSMa5w= github.com/bkielbasa/cyclop v1.2.3/go.mod h1:kHTwA9Q0uZqOADdupvcFJQtp/ksSnytRMe8ztxG8Fuo= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= -github.com/bombsimon/wsl/v4 v4.4.1 h1:jfUaCkN+aUpobrMO24zwyAMwMAV5eSziCkOKEauOLdw= -github.com/bombsimon/wsl/v4 v4.4.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo= +github.com/bombsimon/wsl/v4 v4.5.0 h1:iZRsEvDdyhd2La0FVi5k6tYehpOR/R7qIUjmKk7N74A= +github.com/bombsimon/wsl/v4 v4.5.0/go.mod h1:NOQ3aLF4nD7N5YPXMruR6ZXDOAqLoM0GEpLwTdvmOSc= github.com/breml/bidichk v0.3.2 h1:xV4flJ9V5xWTqxL+/PMFF6dtJPvZLPsyixAoPe8BGJs= github.com/breml/bidichk v0.3.2/go.mod h1:VzFLBxuYtT23z5+iVkamXO386OB+/sVwZOpIj6zXGos= github.com/breml/errchkjson v0.4.0 h1:gftf6uWZMtIa/Is3XJgibewBm2ksAQSY/kABDNFTAdk= github.com/breml/errchkjson v0.4.0/go.mod h1:AuBOSTHyLSaaAFlWsRSuRBIroCh3eh7ZHh5YeelDIk8= -github.com/butuzov/ireturn v0.3.0 h1:hTjMqWw3y5JC3kpnC5vXmFJAWI/m31jaCYQqzkS6PL0= -github.com/butuzov/ireturn v0.3.0/go.mod h1:A09nIiwiqzN/IoVo9ogpa0Hzi9fex1kd9PSD6edP5ZA= -github.com/butuzov/mirror v1.2.0 h1:9YVK1qIjNspaqWutSv8gsge2e/Xpq1eqEkslEUHy5cs= -github.com/butuzov/mirror v1.2.0/go.mod h1:DqZZDtzm42wIAIyHXeN8W/qb1EPlb9Qn/if9icBOpdQ= +github.com/butuzov/ireturn v0.3.1 h1:mFgbEI6m+9W8oP/oDdfA34dLisRFCj2G6o/yiI1yZrY= +github.com/butuzov/ireturn v0.3.1/go.mod h1:ZfRp+E7eJLC0NQmk1Nrm1LOrn/gQlOykv+cVPdiXH5M= +github.com/butuzov/mirror v1.3.0 h1:HdWCXzmwlQHdVhwvsfBb2Au0r3HyINry3bDWLYXiKoc= +github.com/butuzov/mirror v1.3.0/go.mod h1:AEij0Z8YMALaq4yQj9CPPVYOyJQyiexpQEQgihajRfI= github.com/catenacyber/perfsprint v0.7.1 h1:PGW5G/Kxn+YrN04cRAZKC+ZuvlVwolYMrIyyTJ/rMmc= github.com/catenacyber/perfsprint v0.7.1/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50= github.com/ccojocar/zxcvbn-go v1.0.2 h1:na/czXU8RrhXO4EZme6eQJLR4PzcGsahsBOAwU6I3Vg= github.com/ccojocar/zxcvbn-go v1.0.2/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc= github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww= -github.com/ckaznocha/intrange v0.2.1 h1:M07spnNEQoALOJhwrImSrJLaxwuiQK+hA2DeajBlwYk= -github.com/ckaznocha/intrange v0.2.1/go.mod h1:7NEhVyf8fzZO5Ds7CRaqPEm52Ut83hsTiL5zbER/HYk= +github.com/ckaznocha/intrange v0.3.0 h1:VqnxtK32pxgkhJgYQEeOArVidIPg+ahLP7WBOXZd5ZY= +github.com/ckaznocha/intrange v0.3.0/go.mod h1:+I/o2d2A1FBHgGELbGxzIcyd3/9l9DuwjM8FsbSS3Lo= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= -github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= +github.com/curioswitch/go-reassign v0.3.0 h1:dh3kpQHuADL3cobV/sSGETA8DOv457dwl+fbBAhrQPs= +github.com/curioswitch/go-reassign v0.3.0/go.mod h1:nApPCCTtqLJN/s8HfItCcKV0jIPwluBOvZP+dsJGA88= github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo= github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/daixiang0/gci v0.13.5 h1:kThgmH1yBmZSBCh1EJVxQ7JsHpm5Oms0AMed/0LaH4c= @@ -109,6 +127,8 @@ github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42 github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= +github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk= github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE= github.com/elazarl/goproxy v1.2.1 h1:njjgvO6cRG9rIqN2ebkqy6cQz2Njkx7Fsfv/zIZqgug= @@ -127,6 +147,8 @@ github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/firefart/nonamedreturns v1.0.5 h1:tM+Me2ZaXs8tfdDw3X6DOX++wMCOqzYUho6tUTYIdRA= github.com/firefart/nonamedreturns v1.0.5/go.mod h1:gHJjDqhGM4WyPt639SOZs+G89Ko7QKH5R5BhnO6xJhw= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= @@ -137,6 +159,8 @@ github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= +github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= +github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghostiam/protogetter v0.3.8 h1:LYcXbYvybUyTIxN2Mj9h6rHrDZBDwZloPoKctWrFyJY= @@ -155,22 +179,25 @@ github.com/go-git/go-git/v5 v5.13.0 h1:vLn5wlGIh/X78El6r3Jr+30W16Blk0CTcxTYcYPWi github.com/go-git/go-git/v5 v5.13.0/go.mod h1:Wjo7/JyVKtQgUNdXYXIepzWfJQkUEIGvkvVkiXRR/zw= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= -github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= -github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.22.1 h1:40JcKH+bBNGFczGuoBYgX4I6m/i27HYW8P9FDk5PbgA= +github.com/go-playground/validator/v10 v10.22.1/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= @@ -196,16 +223,16 @@ github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUN github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= -github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/go-xmlfmt/xmlfmt v1.1.3 h1:t8Ey3Uy7jDSEisW2K3somuMKIpzktkWptA0iFCnRUWY= +github.com/go-xmlfmt/xmlfmt v1.1.3/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= -github.com/goccy/go-yaml v1.11.3 h1:B3W9IdWbvrUu2OYQGwvU1nZtvMQJPBKgBUuweJjLj6I= -github.com/goccy/go-yaml v1.11.3/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU= +github.com/goccy/go-yaml v1.13.0 h1:0Wtp0FZLd7Sm8gERmR9S6Iczzb3vItJj7NaHmFg8pTs= +github.com/goccy/go-yaml v1.13.0/go.mod h1:IjYwxUiJDoqpx2RmbdjMUceGHZwYLon3sfOGl5Hi9lc= github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -218,20 +245,22 @@ github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9 github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= github.com/golangci/go-printf-func-name v0.1.0 h1:dVokQP+NMTO7jwO4bwsRwLWeudOVUPPyAKJuzv8pEJU= github.com/golangci/go-printf-func-name v0.1.0/go.mod h1:wqhWFH5mUdJQhweRnldEywnR5021wTdZSNgwYceV14s= -github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9 h1:/1322Qns6BtQxUZDTAT4SdcoxknUki7IAoK4SAXr8ME= -github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9/go.mod h1:Oesb/0uFAyWoaw1U1qS5zyjCg5NP9C9iwjnI4tIsXEE= -github.com/golangci/golangci-lint v1.62.0 h1:/G0g+bi1BhmGJqLdNQkKBWjcim8HjOPc4tsKuHDOhcI= -github.com/golangci/golangci-lint v1.62.0/go.mod h1:jtoOhQcKTz8B6dGNFyfQV3WZkQk+YvBDewDtNpiAJts= +github.com/golangci/gofmt v0.0.0-20241223200906-057b0627d9b9 h1:t5wybL6RtO83VwoMOb7U/Peqe3gGKQlPIC66wXmnkvM= +github.com/golangci/gofmt v0.0.0-20241223200906-057b0627d9b9/go.mod h1:Ag3L7sh7E28qAp/5xnpMMTuGYqxLZoSaEHZDkZB1RgU= +github.com/golangci/golangci-lint v1.63.4 h1:bJQFQ3hSfUto597dkL7ipDzOxsGEpiWdLiZ359OWOBI= +github.com/golangci/golangci-lint v1.63.4/go.mod h1:Hx0B7Lg5/NXbaOHem8+KU+ZUIzMI6zNj/7tFwdnn10I= github.com/golangci/misspell v0.6.0 h1:JCle2HUTNWirNlDIAUO44hUsKhOFqGPoC4LZxlaSXDs= github.com/golangci/misspell v0.6.0/go.mod h1:keMNyY6R9isGaSAu+4Q8NMBwMPkh15Gtc8UCVoDtAWo= -github.com/golangci/modinfo v0.3.4 h1:oU5huX3fbxqQXdfspamej74DFX0kyGLkw1ppvXoJ8GA= -github.com/golangci/modinfo v0.3.4/go.mod h1:wytF1M5xl9u0ij8YSvhkEVPP3M5Mc7XLl1pxH3B2aUM= github.com/golangci/plugin-module-register v0.1.1 h1:TCmesur25LnyJkpsVrupv1Cdzo+2f7zX0H6Jkw1Ol6c= github.com/golangci/plugin-module-register v0.1.1/go.mod h1:TTpqoB6KkwOJMV8u7+NyXMrkwwESJLOkfl9TxR1DGFc= github.com/golangci/revgrep v0.5.3 h1:3tL7c1XBMtWHHqVpS5ChmiAAoe4PF/d5+ULzV9sLAzs= github.com/golangci/revgrep v0.5.3/go.mod h1:U4R/s9dlXZsg8uJmaR1GrloUr14D7qDl8gi2iPXJH8k= github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed h1:IURFTjxeTfNFP0hTEi1YKjB/ub8zkpaOqFFMApi2EAs= github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed/go.mod h1:XLXN8bNw4CGRPaqgl3bv/lhz7bsGPh4/xSaMTbo2vkQ= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= +github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -247,10 +276,12 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA= -github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gordonklaus/ineffassign v0.1.0 h1:y2Gd/9I7MdY1oEIt+n+rowjBNDcLQq3RsH5hwJd0f9s= github.com/gordonklaus/ineffassign v0.1.0/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= @@ -263,11 +294,21 @@ github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3 github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= -github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= -github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= +github.com/gostaticanalysis/testutil v0.5.0 h1:Dq4wT1DdTwTGCQQv3rl3IvD5Ld0E6HiY+3Zh0sUGqw8= +github.com/gostaticanalysis/testutil v0.5.0/go.mod h1:OLQSbuM6zw2EvCcXTz1lVq5unyoNft372msDY0nY5Hs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0 h1:CUW5RYIcysz+D3B+l1mDeXrQ7fUvGGCwJfdASSzbrfo= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0/go.mod h1:hgdqLXA4f6NIjRVisM1TJ9aOJVNRqKZj+xDGF6m7PBw= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= @@ -276,22 +317,22 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/jessevdk/go-flags v1.6.1 h1:Cvu5U8UGrLay1rZfv/zP7iLpSHGUZ/Ou68T0iX1bBK4= +github.com/jessevdk/go-flags v1.6.1/go.mod h1:Mk8T1hIAWpOiJiHa9rJASDK2UGWji0EuPGBnNLMooyc= github.com/jgautheron/goconst v1.7.1 h1:VpdAG7Ca7yvvJk5n8dMwQhfEZJh95kl/Hl9S1OI5Jkk= github.com/jgautheron/goconst v1.7.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= -github.com/jjti/go-spancheck v0.6.2 h1:iYtoxqPMzHUPp7St+5yA8+cONdyXD3ug6KK15n7Pklk= -github.com/jjti/go-spancheck v0.6.2/go.mod h1:+X7lvIrR5ZdUTkxFYqzJ0abr8Sb5LOo80uOhWNqIrYA= +github.com/jjti/go-spancheck v0.6.4 h1:Tl7gQpYf4/TMU7AT84MN83/6PutY21Nb9fuQjFTpRRc= +github.com/jjti/go-spancheck v0.6.4/go.mod h1:yAEYdKJ2lRkDA8g7X+oKUHXOWVAXSBJRv04OhF+QUjk= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= -github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/julz/importas v0.2.0 h1:y+MJN/UdL63QbFJHws9BVC5RpA2iq0kpjrFajTGivjQ= +github.com/julz/importas v0.2.0/go.mod h1:pThlt589EnCYtMnmhmRYY/qn9lCf/frPOK+WMx3xiJY= github.com/karamaru-alpha/copyloopvar v1.1.0 h1:x7gNyKcC2vRBO1H2Mks5u1VxQtYvFiym7fCjIP8RPos= github.com/karamaru-alpha/copyloopvar v1.1.0/go.mod h1:u7CIfztblY0jZLOQZgH3oYsJzpC2A7S6u/lfgSXHy0k= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= @@ -321,12 +362,18 @@ github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= github.com/lasiar/canonicalheader v1.1.2 h1:vZ5uqwvDbyJCnMhmFYimgMZnJMjwljN5VGY0VKbMXb4= github.com/lasiar/canonicalheader v1.1.2/go.mod h1:qJCeLFS0G/QlLQ506T+Fk/fWMa2VmBUiEI2cuMK4djI= -github.com/ldez/gomoddirectives v0.2.4 h1:j3YjBIjEBbqZ0NKtBNzr8rtMHTOrLPeiwTkfUJZ3alg= -github.com/ldez/gomoddirectives v0.2.4/go.mod h1:oWu9i62VcQDYp9EQ0ONTfqLNh+mDLWWDO+SO0qSQw5g= -github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo= -github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4= -github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/ldez/exptostd v0.3.1 h1:90yWWoAKMFHeovTK8uzBms9Ppp8Du/xQ20DRO26Ymrw= +github.com/ldez/exptostd v0.3.1/go.mod h1:iZBRYaUmcW5jwCR3KROEZ1KivQQp6PHXbDPk9hqJKCQ= +github.com/ldez/gomoddirectives v0.6.0 h1:Jyf1ZdTeiIB4dd+2n4qw+g4aI9IJ6JyfOZ8BityWvnA= +github.com/ldez/gomoddirectives v0.6.0/go.mod h1:TuwOGYoPAoENDWQpe8DMqEm5nIfjrxZXmxX/CExWyZ4= +github.com/ldez/grignotin v0.7.0 h1:vh0dI32WhHaq6LLPZ38g7WxXuZ1+RzyrJ7iPG9JMa8c= +github.com/ldez/grignotin v0.7.0/go.mod h1:uaVTr0SoZ1KBii33c47O1M8Jp3OP3YDwhZCmzT9GHEk= +github.com/ldez/tagliatelle v0.7.1 h1:bTgKjjc2sQcsgPiT902+aadvMjCeMHrY7ly2XKFORIk= +github.com/ldez/tagliatelle v0.7.1/go.mod h1:3zjxUpsNB2aEZScWiZTHrAXOl1x25t3cRmzfK1mlo2I= +github.com/ldez/usetesting v0.4.2 h1:J2WwbrFGk3wx4cZwSMiCQQ00kjGR0+tuuyW0Lqm4lwA= +github.com/ldez/usetesting v0.4.2/go.mod h1:eEs46T3PpQ+9RgN9VjpY6qWdiw2/QmfiDeWmdZdrjIQ= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84YrjT3mIY= github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA= github.com/macabu/inamedparam v0.1.3 h1:2tk/phHkMlEL/1GNe/Yf6kkR/hkcUdAEY3L0hjYV1Mk= @@ -353,10 +400,10 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mgechev/revive v1.5.0 h1:oaSmjA7rP8+HyoRuCgC531VHwnLH1AlJdjj+1AnQceQ= -github.com/mgechev/revive v1.5.0/go.mod h1:L6T3H8EoerRO86c7WuGpvohIUmiploGiyoYbtIWFmV8= -github.com/mikefarah/yq/v4 v4.44.2 h1:J+ezWCDTg+SUs0jXdcE0HIPH1+rEr0Tbn9Y1SwiWtH0= -github.com/mikefarah/yq/v4 v4.44.2/go.mod h1:9bnz36uZJDEyxdIjRronBcqStS953k3y3DrSRXr4F/w= +github.com/mgechev/revive v1.5.1 h1:hE+QPeq0/wIzJwOphdVyUJ82njdd8Khp4fUIHGZHW3M= +github.com/mgechev/revive v1.5.1/go.mod h1:lC9AhkJIBs5zwx8wkudyHrU+IJkrEKmpCmGMnIJPk4o= +github.com/mikefarah/yq/v4 v4.44.5 h1:/Xm1dM1BfyDJMg+yIpnl2AgpmLFQg3Lcm/kuyYgHEXE= +github.com/mikefarah/yq/v4 v4.44.5/go.mod h1:rpn3xGVz+2pDuLJTlCvzatCwTmmUeHcm7MbkbtHdvkc= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -376,24 +423,24 @@ github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhK github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs= github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= -github.com/nunnatsa/ginkgolinter v0.18.0 h1:ZXO1wKhPg3A6LpbN5dMuqwhfOjN5c3ous8YdKOuqk9k= -github.com/nunnatsa/ginkgolinter v0.18.0/go.mod h1:vPrWafSULmjMGCMsfGA908if95VnHQNAahvSBOjTuWs= +github.com/nunnatsa/ginkgolinter v0.18.4 h1:zmX4KUR+6fk/vhUFt8DOP6KwznekhkmVSzzVJve2vyM= +github.com/nunnatsa/ginkgolinter v0.18.4/go.mod h1:AMEane4QQ6JwFz5GgjI5xLUM9S/CylO+UyM97fN2iBI= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= -github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= -github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= -github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= -github.com/openshift/api/tools v0.0.0-20241014171745-f199b1f2660c h1:Yqb9U5t5VW0nY2lfhxJ3dkpcOipAdCpQKfnplg5y8PI= -github.com/openshift/api/tools v0.0.0-20241014171745-f199b1f2660c/go.mod h1:AOqVox+u6bxjXoSTHI421YuBBY5YIkhI/2AcdbRpPxU= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/openshift/api/tools v0.0.0-20250213010142-f5b09d13c01f h1:6+KhkIK8YWxh4n+qD5aypR43Vozm4QWfNwXI5yWJzTw= +github.com/openshift/api/tools v0.0.0-20250213010142-f5b09d13c01f/go.mod h1:NDQL+y3cc/O4/qgC8nEo9+RPDx47NPqI6SiVKLydzBQ= github.com/openshift/controller-tools v0.0.0-20240809124726-b901265f16d5 h1:ImjAJqnPX8ILYw1iaZkToByrUqBHhCmYw+Bk1uEVKRo= github.com/openshift/controller-tools v0.0.0-20240809124726-b901265f16d5/go.mod h1:80xsUppuf2iNgiThH2bzDIN5204p5E93z+YtNnAJlHA= -github.com/openshift/crd-schema-checker v0.0.0-20240927092539-11d809bb1183 h1:5QvKMDlTFpY9+Md+qYv1vVXT0Rt/VhAqtXeoauyzNxc= -github.com/openshift/crd-schema-checker v0.0.0-20240927092539-11d809bb1183/go.mod h1:sTxJ4ZFW9r9fEdbW2v0yMRi6NcyTbx0fII4p83IQ+L8= +github.com/openshift/crd-schema-checker v0.0.0-20241113192003-573763d3107a h1:gBheMF1vVwxfnuGHJ82f/nmUATdtewq60/yhbBqD4+M= +github.com/openshift/crd-schema-checker v0.0.0-20241113192003-573763d3107a/go.mod h1:sTxJ4ZFW9r9fEdbW2v0yMRi6NcyTbx0fII4p83IQ+L8= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= @@ -415,8 +462,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v1.6.0 h1:tftWV9DE7txiFzPpztTAwyoRLKNj9gpVm2cg8/OwcYY= -github.com/polyfloyd/go-errorlint v1.6.0/go.mod h1:HR7u8wuP1kb1NeN1zqTd1ZMlqUKPPHF+Id4vIPvDqVw= +github.com/polyfloyd/go-errorlint v1.7.0 h1:Zp6lzCK4hpBDj8y8a237YK4EPrMXQWvOe3nGoH4pFrU= +github.com/polyfloyd/go-errorlint v1.7.0/go.mod h1:dGWKu85mGHnegQ2SWpEybFityCg3j7ZbwsVUxAOk9gY= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= @@ -437,8 +484,8 @@ github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= -github.com/raeperd/recvcheck v0.1.2 h1:SjdquRsRXJc26eSonWIo8b7IMtKD3OAT2Lb5G3ZX1+4= -github.com/raeperd/recvcheck v0.1.2/go.mod h1:n04eYkwIR0JbgD73wT8wL4JjPC3wm0nFtzBnWNocnYU= +github.com/raeperd/recvcheck v0.2.0 h1:GnU+NsbiCqdC2XX5+vMZzP+jAJC5fht7rcVTAhX74UI= +github.com/raeperd/recvcheck v0.2.0/go.mod h1:n04eYkwIR0JbgD73wT8wL4JjPC3wm0nFtzBnWNocnYU= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -454,14 +501,14 @@ github.com/ryancurrah/gomodguard v1.3.5 h1:cShyguSwUEeC0jS7ylOiG/idnd1TpJ1LfHGpV github.com/ryancurrah/gomodguard v1.3.5/go.mod h1:MXlEPQRxgfPQa62O8wzK3Ozbkv9Rkqr+wKjSxTdsNJE= github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= -github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc= -github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= -github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4= -github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= +github.com/sanposhiho/wastedassign/v2 v2.1.0 h1:crurBF7fJKIORrV85u9UUpePDYGWnwvv3+A96WvwXT0= +github.com/sanposhiho/wastedassign/v2 v2.1.0/go.mod h1:+oSmSC+9bQ+VUAxA66nBb0Z7N8CK7mscKTDYC6aIek4= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= -github.com/sashamelentyev/usestdlibvars v1.27.0 h1:t/3jZpSXtRPRf2xr0m63i32ZrusyurIGT9E5wAvXQnI= -github.com/sashamelentyev/usestdlibvars v1.27.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8= +github.com/sashamelentyev/usestdlibvars v1.28.0 h1:jZnudE2zKCtYlGzLVreNp5pmCdOxXUzwsMDBkR21cyQ= +github.com/sashamelentyev/usestdlibvars v1.28.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8= github.com/securego/gosec/v2 v2.21.4 h1:Le8MSj0PDmOnHJgUATjD96PaXRvCpKC+DGJvwyy0Mlk= github.com/securego/gosec/v2 v2.21.4/go.mod h1:Jtb/MwRQfRxCXyCm1rfM1BEiiiTfUOdyzzAhlr6lUTA= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= @@ -499,8 +546,10 @@ github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= -github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= -github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= +github.com/stbenjam/no-sprintf-host-port v0.2.0 h1:i8pxvGrt1+4G0czLr/WnmyH7zbZ8Bg8etvARQ1rpyl4= +github.com/stbenjam/no-sprintf-host-port v0.2.0/go.mod h1:eL0bQ9PasS0hsyTyfTjjG+E80QIyPnBVQbYZyv20Jfk= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -519,30 +568,30 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM= -github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg= +github.com/tdakkota/asciicheck v0.3.0 h1:LqDGgZdholxZMaJgpM6b0U9CFIjDCbFdUF00bDnBKOQ= +github.com/tdakkota/asciicheck v0.3.0/go.mod h1:KoJKXuX/Z/lt6XzLo8WMBfQGzak0SrAKZlvRr4tg8Ac= github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= -github.com/tetafro/godot v1.4.18 h1:ouX3XGiziKDypbpXqShBfnNLTSjR8r3/HVzrtJ+bHlI= -github.com/tetafro/godot v1.4.18/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= -github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M= -github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ= +github.com/tetafro/godot v1.4.20 h1:z/p8Ek55UdNvzt4TFn2zx2KscpW4rWqcnUrdmvWJj7E= +github.com/tetafro/godot v1.4.20/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= +github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3 h1:y4mJRFlM6fUyPhoXuFg/Yu02fg/nIPFMOY8tOqppoFg= +github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3/go.mod h1:mkjARE7Yr8qU23YcGMSALbIxTQ9r9QBVahQOBRfU460= github.com/timonwong/loggercheck v0.10.1 h1:uVZYClxQFpw55eh+PIoqM7uAOHMrhVcDoWDery9R8Lg= github.com/timonwong/loggercheck v0.10.1/go.mod h1:HEAWU8djynujaAVX7QI65Myb8qgfcZ1uKbdpg3ZzKl8= -github.com/tomarrell/wrapcheck/v2 v2.9.0 h1:801U2YCAjLhdN8zhZ/7tdjB3EnAoRlJHt/s+9hijLQ4= -github.com/tomarrell/wrapcheck/v2 v2.9.0/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo= +github.com/tomarrell/wrapcheck/v2 v2.10.0 h1:SzRCryzy4IrAH7bVGG4cK40tNUhmVmMDuJujy4XwYDg= +github.com/tomarrell/wrapcheck/v2 v2.10.0/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo= github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI= -github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4= -github.com/ultraware/whitespace v0.1.1 h1:bTPOGejYFulW3PkcrqkeQwOd6NKOOXvmGD9bo/Gk8VQ= -github.com/ultraware/whitespace v0.1.1/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8= -github.com/uudashr/gocognit v1.1.3 h1:l+a111VcDbKfynh+airAy/DJQKaXh2m9vkoysMPSZyM= -github.com/uudashr/gocognit v1.1.3/go.mod h1:aKH8/e8xbTRBwjbCkwZ8qt4l2EpKXl31KMHgSS+lZ2U= -github.com/uudashr/iface v1.2.0 h1:ECJjh5q/1Zmnv/2yFpWV6H3oMg5+Mo+vL0aqw9Gjazo= -github.com/uudashr/iface v1.2.0/go.mod h1:Ux/7d/rAF3owK4m53cTVXL4YoVHKNqnoOeQHn2xrlp0= +github.com/ultraware/funlen v0.2.0 h1:gCHmCn+d2/1SemTdYMiKLAHFYxTYz7z9VIDRaTGyLkI= +github.com/ultraware/funlen v0.2.0/go.mod h1:ZE0q4TsJ8T1SQcjmkhN/w+MceuatI6pBFSxxyteHIJA= +github.com/ultraware/whitespace v0.2.0 h1:TYowo2m9Nfj1baEQBjuHzvMRbp19i+RCcRYrSWoFa+g= +github.com/ultraware/whitespace v0.2.0/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8= +github.com/uudashr/gocognit v1.2.0 h1:3BU9aMr1xbhPlvJLSydKwdLN3tEUUrzPSSM8S4hDYRA= +github.com/uudashr/gocognit v1.2.0/go.mod h1:k/DdKPI6XBZO1q7HgoV2juESI2/Ofj9AcHPZhBBdrTU= +github.com/uudashr/iface v1.3.0 h1:zwPch0fs9tdh9BmL5kcgSpvnObV+yHjO4JjVBl8IA10= +github.com/uudashr/iface v1.3.0/go.mod h1:4QvspiRd3JLPAEXBQ9AiZpLbJlrWWgRChOKDJEuQTdg= github.com/vmware-archive/yaml-patch v0.0.11 h1:9o4FGgkpLD88A5O7BOOXs7UBeeymRT9atLsKmHJ2wWs= github.com/vmware-archive/yaml-patch v0.0.11/go.mod h1:mHWEn1O1CU3yBnN6iPFeAwAqzUibF2X+9EltQ28w+Vs= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= @@ -574,14 +623,38 @@ go-simpler.org/musttag v0.13.0 h1:Q/YAW0AHvaoaIbsPj3bvEI5/QFP7w696IMUpnKXQfCE= go-simpler.org/musttag v0.13.0/go.mod h1:FTzIGeK6OkKlUDVpj0iQUXZLUO1Js9+mvykDQy9C5yM= go-simpler.org/sloglint v0.7.2 h1:Wc9Em/Zeuu7JYpl+oKoYOsQSy2X560aVueCW/m6IijY= go-simpler.org/sloglint v0.7.2/go.mod h1:US+9C80ppl7VsThQclkM7BkCHQAzuz8kHLsW3ppuluo= +go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0= +go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28= +go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q= +go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E= +go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE= +go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -590,6 +663,7 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= @@ -598,13 +672,12 @@ golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWB golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20240909161429-701f63a606c0 h1:bVwtbF629Xlyxk6xLQq2TDYmqP0uiWaet5LwRebuY0k= -golang.org/x/exp/typeparams v0.0.0-20240909161429-701f63a606c0/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20241108190413-2d47ceb2692f h1:WTyX8eCCyfdqiPYkRGm0MqElSfYFH3yR1+rl/mct9sA= +golang.org/x/exp/typeparams v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= @@ -612,6 +685,7 @@ golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= @@ -631,13 +705,17 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -646,6 +724,7 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= @@ -655,9 +734,7 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -676,6 +753,7 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= @@ -688,9 +766,11 @@ golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= @@ -703,12 +783,15 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -721,35 +804,41 @@ golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= -golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= -golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f h1:jTm13A2itBi3La6yTGqn8bVSrc3ZZ1r8ENHlIXBfnRA= +google.golang.org/genproto/googleapis/api v0.0.0-20240930140551-af27646dc61f/go.mod h1:CLGoBuH1VHxAUXVPP8FfPwPEVJB6lz3URE5mY2SuayE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 h1:N9BgCIAUvn/M+p4NJccWPWb3BWh88+zyL0ll9HgbEeM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= @@ -773,37 +862,45 @@ gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.5.1 h1:4bH5o3b5ZULQ4UrBmP+63W9r7qIkqJClEA9ko5YKx+I= honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs= -k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= -k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= -k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40= -k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ= -k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= -k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= +k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= +k8s.io/apiextensions-apiserver v0.32.1 h1:hjkALhRUeCariC8DiVmb5jj0VjIc1N0DREP32+6UXZw= +k8s.io/apiextensions-apiserver v0.32.1/go.mod h1:sxWIGuGiYov7Io1fAS2X06NjMIk5CbRHc2StSmbaQto= +k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= +k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apiserver v0.32.1 h1:oo0OozRos66WFq87Zc5tclUX2r0mymoVHRq8JmR7Aak= +k8s.io/apiserver v0.32.1/go.mod h1:UcB9tWjBY7aryeI5zAgzVJB/6k7E97bkr1RgqDz0jPw= +k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= +k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= k8s.io/code-generator v0.30.1 h1:ZsG++q5Vt0ScmKCeLhynUuWgcwFGg1Hl1AGfatqPJBI= k8s.io/code-generator v0.30.1/go.mod h1:hFgxRsvOUg79mbpbVKfjJvRhVz1qLoe40yZDJ/hwRH4= +k8s.io/component-base v0.32.1 h1:/5IfJ0dHIKBWysGV0yKTFfacZ5yNV1sulPh3ilJjRZk= +k8s.io/component-base v0.32.1/go.mod h1:j1iMMHi/sqAHeG5z+O9BFNCF698a1u0186zkjMZQ28w= k8s.io/gengo v0.0.0-20201203183100-97869a43a9d9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20240404160639-a0386bf69313 h1:wBIDZID8ju9pwOiLlV22YYKjFGtiNSWgHf5CnKLRUuM= k8s.io/gengo v0.0.0-20240404160639-a0386bf69313/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= -k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= +k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4= +k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= mvdan.cc/gofumpt v0.7.0 h1:bg91ttqXmi9y2xawvkuMXyvAA/1ZGJqYAEGjXuP0JXU= mvdan.cc/gofumpt v0.7.0/go.mod h1:txVFJy/Sc/mvaycET54pV8SW8gWxTlUuGHVEcncmNUo= mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f h1:lMpcwN6GxNbWtbpI1+xzFLSW8XzX0u72NttUGVFjO3U= mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f/go.mod h1:RSLa7mKKCNeTTMHBw5Hsy2rfJmd6O2ivt9Dw9ZqCQpQ= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/tools/vendor/cel.dev/expr/.bazelversion b/hack/tools/vendor/cel.dev/expr/.bazelversion new file mode 100644 index 0000000000..26bc914a3b --- /dev/null +++ b/hack/tools/vendor/cel.dev/expr/.bazelversion @@ -0,0 +1,2 @@ +7.0.1 +# Keep this pinned version in parity with cel-go diff --git a/hack/tools/vendor/cel.dev/expr/.gitattributes b/hack/tools/vendor/cel.dev/expr/.gitattributes new file mode 100644 index 0000000000..3de1ec213a --- /dev/null +++ b/hack/tools/vendor/cel.dev/expr/.gitattributes @@ -0,0 +1,2 @@ +*.pb.go linguist-generated=true +*.pb.go -diff -merge diff --git a/hack/tools/vendor/cel.dev/expr/.gitignore b/hack/tools/vendor/cel.dev/expr/.gitignore new file mode 100644 index 0000000000..0d4fed27c9 --- /dev/null +++ b/hack/tools/vendor/cel.dev/expr/.gitignore @@ -0,0 +1,2 @@ +bazel-* +MODULE.bazel.lock diff --git a/hack/tools/vendor/cel.dev/expr/BUILD.bazel b/hack/tools/vendor/cel.dev/expr/BUILD.bazel new file mode 100644 index 0000000000..37d8adc950 --- /dev/null +++ b/hack/tools/vendor/cel.dev/expr/BUILD.bazel @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) # Apache 2.0 + +go_library( + name = "expr", + srcs = [ + "checked.pb.go", + "eval.pb.go", + "explain.pb.go", + "syntax.pb.go", + "value.pb.go", + ], + importpath = "cel.dev/expr", + visibility = ["//visibility:public"], + deps = [ + "@org_golang_google_genproto_googleapis_rpc//status:go_default_library", + "@org_golang_google_protobuf//reflect/protoreflect", + "@org_golang_google_protobuf//runtime/protoimpl", + "@org_golang_google_protobuf//types/known/anypb", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + ], +) + +alias( + name = "go_default_library", + actual = ":expr", + visibility = ["//visibility:public"], +) diff --git a/hack/tools/vendor/cel.dev/expr/CODE_OF_CONDUCT.md b/hack/tools/vendor/cel.dev/expr/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..59908e2d8e --- /dev/null +++ b/hack/tools/vendor/cel.dev/expr/CODE_OF_CONDUCT.md @@ -0,0 +1,25 @@ +# Contributor Code of Conduct +## Version 0.1.1 (adapted from 0.3b-angular) + +As contributors and maintainers of the Common Expression Language +(CEL) project, we pledge to respect everyone who contributes by +posting issues, updating documentation, submitting pull requests, +providing feedback in comments, and any other activities. + +Communication through any of CEL's channels (GitHub, Gitter, IRC, +mailing lists, Google+, Twitter, etc.) must be constructive and never +resort to personal attacks, trolling, public or private harassment, +insults, or other unprofessional conduct. + +We promise to extend courtesy and respect to everyone involved in this +project regardless of gender, gender identity, sexual orientation, +disability, age, race, ethnicity, religion, or level of experience. We +expect anyone contributing to the project to do the same. + +If any member of the community violates this code of conduct, the +maintainers of the CEL project may take action, removing issues, +comments, and PRs or blocking accounts as deemed appropriate. + +If you are subject to or witness unacceptable behavior, or have any +other concerns, please email us at +[cel-conduct@google.com](mailto:cel-conduct@google.com). diff --git a/hack/tools/vendor/cel.dev/expr/CONTRIBUTING.md b/hack/tools/vendor/cel.dev/expr/CONTRIBUTING.md new file mode 100644 index 0000000000..8f5fd5c31f --- /dev/null +++ b/hack/tools/vendor/cel.dev/expr/CONTRIBUTING.md @@ -0,0 +1,32 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. There are a +few guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. + +## What to expect from maintainers + +Expect maintainers to respond to new issues or pull requests within a week. +For outstanding and ongoing issues and particularly for long-running +pull requests, expect the maintainers to review within a week of a +contributor asking for a new review. There is no commitment to resolution -- +merging or closing a pull request, or fixing or closing an issue -- because some +issues will require more discussion than others. diff --git a/hack/tools/vendor/cel.dev/expr/GOVERNANCE.md b/hack/tools/vendor/cel.dev/expr/GOVERNANCE.md new file mode 100644 index 0000000000..0a525bc17d --- /dev/null +++ b/hack/tools/vendor/cel.dev/expr/GOVERNANCE.md @@ -0,0 +1,43 @@ +# Project Governance + +This document defines the governance process for the CEL language. CEL is +Google-developed, but openly governed. Major contributors to the CEL +specification and its corresponding implementations constitute the CEL +Language Council. New members may be added by a unanimous vote of the +Council. + +The MAINTAINERS.md file lists the members of the CEL Language Council, and +unofficially indicates the "areas of expertise" of each member with respect +to the publicly available CEL repos. + +## Code Changes + +Code changes must follow the standard pull request (PR) model documented in the +CONTRIBUTING.md for each CEL repo. All fixes and features must be reviewed by a +maintainer. The maintainer reserves the right to request that any feature +request (FR) or PR be reviewed by the language council. + +## Syntax and Semantic Changes + +Syntactic and semantic changes must be reviewed by the CEL Language Council. +Maintainers may also request language council review at their discretion. + +The review process is as follows: + +- Create a Feature Request in the CEL-Spec repo. The feature description will + serve as an abstract for the detailed design document. +- Co-develop a design document with the Language Council. +- Once the proposer gives the design document approval, the document will be + linked to the FR in the CEL-Spec repo and opened for comments to members of + the cel-lang-discuss@googlegroups.com. +- The Language Council will review the design doc at the next council meeting + (once every three weeks) and the council decision included in the document. + +If the proposal is approved, the spec will be updated by a maintainer (if +applicable) and a rationale will be included in the CEL-Spec wiki to ensure +future developers may follow CEL's growth and direction over time. + +Approved proposals may be implemented by the proposer or by the maintainers as +the parties see fit. At the discretion of the maintainer, changes from the +approved design are permitted during implementation if they improve the user +experience and clarity of the feature. diff --git a/hack/tools/vendor/cel.dev/expr/LICENSE b/hack/tools/vendor/cel.dev/expr/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/hack/tools/vendor/cel.dev/expr/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hack/tools/vendor/cel.dev/expr/MAINTAINERS.md b/hack/tools/vendor/cel.dev/expr/MAINTAINERS.md new file mode 100644 index 0000000000..1ed2eb8ab3 --- /dev/null +++ b/hack/tools/vendor/cel.dev/expr/MAINTAINERS.md @@ -0,0 +1,13 @@ +# CEL Language Council + +| Name | Company | Area of Expertise | +|-----------------|--------------|-------------------| +| Alfred Fuller | Facebook | cel-cpp, cel-spec | +| Jim Larson | Google | cel-go, cel-spec | +| Matthais Blume | Google | cel-spec | +| Tristan Swadell | Google | cel-go, cel-spec | + +## Emeritus + +* Sanjay Ghemawat (Google) +* Wolfgang Grieskamp (Facebook) diff --git a/hack/tools/vendor/cel.dev/expr/MODULE.bazel b/hack/tools/vendor/cel.dev/expr/MODULE.bazel new file mode 100644 index 0000000000..9794266f56 --- /dev/null +++ b/hack/tools/vendor/cel.dev/expr/MODULE.bazel @@ -0,0 +1,70 @@ +module( + name = "cel-spec", +) + +bazel_dep( + name = "bazel_skylib", + version = "1.7.1", +) +bazel_dep( + name = "gazelle", + version = "0.36.0", + repo_name = "bazel_gazelle", +) +bazel_dep( + name = "googleapis", + version = "0.0.0-20240819-fe8ba054a", + repo_name = "com_google_googleapis", +) +bazel_dep( + name = "protobuf", + version = "26.0", + repo_name = "com_google_protobuf", +) +bazel_dep( + name = "rules_cc", + version = "0.0.9", +) +bazel_dep( + name = "rules_go", + version = "0.49.0", + repo_name = "io_bazel_rules_go", +) +bazel_dep( + name = "rules_java", + version = "7.6.5", +) +bazel_dep( + name = "rules_proto", + version = "6.0.0", +) +bazel_dep( + name = "rules_python", + version = "0.35.0", +) + +### PYTHON ### +python = use_extension("@rules_python//python/extensions:python.bzl", "python") +python.toolchain( + ignore_root_user_error = True, + python_version = "3.11", +) + +switched_rules = use_extension("@com_google_googleapis//:extensions.bzl", "switched_rules") +switched_rules.use_languages( + cc = True, + go = True, + java = True, +) +use_repo(switched_rules, "com_google_googleapis_imports") + +go_sdk = use_extension("@io_bazel_rules_go//go:extensions.bzl", "go_sdk") +go_sdk.download(version = "1.21.1") + +go_deps = use_extension("@bazel_gazelle//:extensions.bzl", "go_deps") +go_deps.from_file(go_mod = "//:go.mod") +use_repo( + go_deps, + "org_golang_google_genproto_googleapis_rpc", + "org_golang_google_protobuf", +) diff --git a/hack/tools/vendor/cel.dev/expr/README.md b/hack/tools/vendor/cel.dev/expr/README.md new file mode 100644 index 0000000000..7930c0b755 --- /dev/null +++ b/hack/tools/vendor/cel.dev/expr/README.md @@ -0,0 +1,73 @@ +# Common Expression Language + +The Common Expression Language (CEL) implements common semantics for expression +evaluation, enabling different applications to more easily interoperate. + +Key Applications + +* Security policy: organizations have complex infrastructure and need common + tooling to reason about the system as a whole +* Protocols: expressions are a useful data type and require interoperability + across programming languages and platforms. + + +Guiding philosophy: + +1. Keep it small & fast. + * CEL evaluates in linear time, is mutation free, and not Turing-complete. + This limitation is a feature of the language design, which allows the + implementation to evaluate orders of magnitude faster than equivalently + sandboxed JavaScript. +2. Make it extensible. + * CEL is designed to be embedded in applications, and allows for + extensibility via its context which allows for functions and data to be + provided by the software that embeds it. +3. Developer-friendly. + * The language is approachable to developers. The initial spec was based + on the experience of developing Firebase Rules and usability testing + many prior iterations. + * The library itself and accompanying toolings should be easy to adopt by + teams that seek to integrate CEL into their platforms. + +The required components of a system that supports CEL are: + +* The textual representation of an expression as written by a developer. It is + of similar syntax to expressions in C/C++/Java/JavaScript +* A representation of the program's abstract syntax tree (AST). +* A compiler library that converts the textual representation to the binary + representation. This can be done ahead of time (in the control plane) or + just before evaluation (in the data plane). +* A context containing one or more typed variables, often protobuf messages. + Most use-cases will use `attribute_context.proto` +* An evaluator library that takes the binary format in the context and + produces a result, usually a Boolean. + +For use cases which require persistence or cross-process communcation, it is +highly recommended to serialize the type-checked expression as a protocol +buffer. The CEL team will maintains canonical protocol buffers for ASTs and +will keep these versions identical and wire-compatible in perpetuity: + +* [CEL canonical](https://github.com/google/cel-spec/tree/master/proto/cel/expr) +* [CEL v1alpha1](https://github.com/googleapis/googleapis/tree/master/google/api/expr/v1alpha1) + + +Example of boolean conditions and object construction: + +``` c +// Condition +account.balance >= transaction.withdrawal + || (account.overdraftProtection + && account.overdraftLimit >= transaction.withdrawal - account.balance) + +// Object construction +common.GeoPoint{ latitude: 10.0, longitude: -5.5 } +``` + +For more detail, see: + +* [Introduction](doc/intro.md) +* [Language Definition](doc/langdef.md) + +Released under the [Apache License](LICENSE). + +Disclaimer: This is not an official Google product. diff --git a/hack/tools/vendor/cel.dev/expr/WORKSPACE b/hack/tools/vendor/cel.dev/expr/WORKSPACE new file mode 100644 index 0000000000..b6dc9ed673 --- /dev/null +++ b/hack/tools/vendor/cel.dev/expr/WORKSPACE @@ -0,0 +1,145 @@ +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +http_archive( + name = "io_bazel_rules_go", + sha256 = "099a9fb96a376ccbbb7d291ed4ecbdfd42f6bc822ab77ae6f1b5cb9e914e94fa", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip", + "https://github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip", + ], +) + +http_archive( + name = "bazel_gazelle", + sha256 = "ecba0f04f96b4960a5b250c8e8eeec42281035970aa8852dda73098274d14a1d", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz", + "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz", + ], +) + +http_archive( + name = "rules_proto", + sha256 = "e017528fd1c91c5a33f15493e3a398181a9e821a804eb7ff5acdd1d2d6c2b18d", + strip_prefix = "rules_proto-4.0.0-3.20.0", + urls = [ + "https://github.com/bazelbuild/rules_proto/archive/refs/tags/4.0.0-3.20.0.tar.gz", + ], +) + +# googleapis as of 09/16/2024 +http_archive( + name = "com_google_googleapis", + strip_prefix = "googleapis-4082d5e51e8481f6ccc384cacd896f4e78f19dee", + sha256 = "57319889d47578b3c89bf1b3f34888d796a8913d63b32d750a4cd12ed303c4e8", + urls = [ + "https://github.com/googleapis/googleapis/archive/4082d5e51e8481f6ccc384cacd896f4e78f19dee.tar.gz", + ], +) + +# protobuf +http_archive( + name = "com_google_protobuf", + sha256 = "8242327e5df8c80ba49e4165250b8f79a76bd11765facefaaecfca7747dc8da2", + strip_prefix = "protobuf-3.21.5", + urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.21.5.zip"], +) + +# googletest +http_archive( + name = "com_google_googletest", + urls = ["https://github.com/google/googletest/archive/master.zip"], + strip_prefix = "googletest-master", +) + +# gflags +http_archive( + name = "com_github_gflags_gflags", + sha256 = "6e16c8bc91b1310a44f3965e616383dbda48f83e8c1eaa2370a215057b00cabe", + strip_prefix = "gflags-77592648e3f3be87d6c7123eb81cbad75f9aef5a", + urls = [ + "https://mirror.bazel.build/github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz", + "https://github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz", + ], +) + +# glog +http_archive( + name = "com_google_glog", + sha256 = "1ee310e5d0a19b9d584a855000434bb724aa744745d5b8ab1855c85bff8a8e21", + strip_prefix = "glog-028d37889a1e80e8a07da1b8945ac706259e5fd8", + urls = [ + "https://mirror.bazel.build/github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz", + "https://github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz", + ], +) + +# absl +http_archive( + name = "com_google_absl", + strip_prefix = "abseil-cpp-master", + urls = ["https://github.com/abseil/abseil-cpp/archive/master.zip"], +) + +load("@io_bazel_rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains") +load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository") +load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language") +load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies", "rules_proto_toolchains") +load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps") + +switched_rules_by_language( + name = "com_google_googleapis_imports", + cc = True, +) + +# Do *not* call *_dependencies(), etc, yet. See comment at the end. + +# Generated Google APIs protos for Golang +# Generated Google APIs protos for Golang 08/26/2024 +go_repository( + name = "org_golang_google_genproto_googleapis_api", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto/googleapis/api", + sum = "h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=", + version = "v0.0.0-20240826202546-f6391c0de4c7", +) + +# Generated Google APIs protos for Golang 08/26/2024 +go_repository( + name = "org_golang_google_genproto_googleapis_rpc", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto/googleapis/rpc", + sum = "h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=", + version = "v0.0.0-20240826202546-f6391c0de4c7", +) + +# gRPC deps +go_repository( + name = "org_golang_google_grpc", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/grpc", + tag = "v1.49.0", +) + +go_repository( + name = "org_golang_x_net", + importpath = "golang.org/x/net", + sum = "h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=", + version = "v0.0.0-20190311183353-d8887717615a", +) + +go_repository( + name = "org_golang_x_text", + importpath = "golang.org/x/text", + sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=", + version = "v0.3.2", +) + +# Run the dependencies at the end. These will silently try to import some +# of the above repositories but at different versions, so ours must come first. +go_rules_dependencies() +go_register_toolchains(version = "1.19.1") +gazelle_dependencies() +rules_proto_dependencies() +rules_proto_toolchains() +protobuf_deps() diff --git a/hack/tools/vendor/cel.dev/expr/WORKSPACE.bzlmod b/hack/tools/vendor/cel.dev/expr/WORKSPACE.bzlmod new file mode 100644 index 0000000000..e69de29bb2 diff --git a/hack/tools/vendor/cel.dev/expr/checked.pb.go b/hack/tools/vendor/cel.dev/expr/checked.pb.go new file mode 100644 index 0000000000..bb225c8ab3 --- /dev/null +++ b/hack/tools/vendor/cel.dev/expr/checked.pb.go @@ -0,0 +1,1432 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/checked.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Type_PrimitiveType int32 + +const ( + Type_PRIMITIVE_TYPE_UNSPECIFIED Type_PrimitiveType = 0 + Type_BOOL Type_PrimitiveType = 1 + Type_INT64 Type_PrimitiveType = 2 + Type_UINT64 Type_PrimitiveType = 3 + Type_DOUBLE Type_PrimitiveType = 4 + Type_STRING Type_PrimitiveType = 5 + Type_BYTES Type_PrimitiveType = 6 +) + +// Enum value maps for Type_PrimitiveType. +var ( + Type_PrimitiveType_name = map[int32]string{ + 0: "PRIMITIVE_TYPE_UNSPECIFIED", + 1: "BOOL", + 2: "INT64", + 3: "UINT64", + 4: "DOUBLE", + 5: "STRING", + 6: "BYTES", + } + Type_PrimitiveType_value = map[string]int32{ + "PRIMITIVE_TYPE_UNSPECIFIED": 0, + "BOOL": 1, + "INT64": 2, + "UINT64": 3, + "DOUBLE": 4, + "STRING": 5, + "BYTES": 6, + } +) + +func (x Type_PrimitiveType) Enum() *Type_PrimitiveType { + p := new(Type_PrimitiveType) + *p = x + return p +} + +func (x Type_PrimitiveType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Type_PrimitiveType) Descriptor() protoreflect.EnumDescriptor { + return file_cel_expr_checked_proto_enumTypes[0].Descriptor() +} + +func (Type_PrimitiveType) Type() protoreflect.EnumType { + return &file_cel_expr_checked_proto_enumTypes[0] +} + +func (x Type_PrimitiveType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Type_PrimitiveType.Descriptor instead. +func (Type_PrimitiveType) EnumDescriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0} +} + +type Type_WellKnownType int32 + +const ( + Type_WELL_KNOWN_TYPE_UNSPECIFIED Type_WellKnownType = 0 + Type_ANY Type_WellKnownType = 1 + Type_TIMESTAMP Type_WellKnownType = 2 + Type_DURATION Type_WellKnownType = 3 +) + +// Enum value maps for Type_WellKnownType. +var ( + Type_WellKnownType_name = map[int32]string{ + 0: "WELL_KNOWN_TYPE_UNSPECIFIED", + 1: "ANY", + 2: "TIMESTAMP", + 3: "DURATION", + } + Type_WellKnownType_value = map[string]int32{ + "WELL_KNOWN_TYPE_UNSPECIFIED": 0, + "ANY": 1, + "TIMESTAMP": 2, + "DURATION": 3, + } +) + +func (x Type_WellKnownType) Enum() *Type_WellKnownType { + p := new(Type_WellKnownType) + *p = x + return p +} + +func (x Type_WellKnownType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Type_WellKnownType) Descriptor() protoreflect.EnumDescriptor { + return file_cel_expr_checked_proto_enumTypes[1].Descriptor() +} + +func (Type_WellKnownType) Type() protoreflect.EnumType { + return &file_cel_expr_checked_proto_enumTypes[1] +} + +func (x Type_WellKnownType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Type_WellKnownType.Descriptor instead. +func (Type_WellKnownType) EnumDescriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1} +} + +type CheckedExpr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` + ExprVersion string `protobuf:"bytes,6,opt,name=expr_version,json=exprVersion,proto3" json:"expr_version,omitempty"` + Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"` +} + +func (x *CheckedExpr) Reset() { + *x = CheckedExpr{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CheckedExpr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckedExpr) ProtoMessage() {} + +func (x *CheckedExpr) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckedExpr.ProtoReflect.Descriptor instead. +func (*CheckedExpr) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{0} +} + +func (x *CheckedExpr) GetReferenceMap() map[int64]*Reference { + if x != nil { + return x.ReferenceMap + } + return nil +} + +func (x *CheckedExpr) GetTypeMap() map[int64]*Type { + if x != nil { + return x.TypeMap + } + return nil +} + +func (x *CheckedExpr) GetSourceInfo() *SourceInfo { + if x != nil { + return x.SourceInfo + } + return nil +} + +func (x *CheckedExpr) GetExprVersion() string { + if x != nil { + return x.ExprVersion + } + return "" +} + +func (x *CheckedExpr) GetExpr() *Expr { + if x != nil { + return x.Expr + } + return nil +} + +type Type struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to TypeKind: + // + // *Type_Dyn + // *Type_Null + // *Type_Primitive + // *Type_Wrapper + // *Type_WellKnown + // *Type_ListType_ + // *Type_MapType_ + // *Type_Function + // *Type_MessageType + // *Type_TypeParam + // *Type_Type + // *Type_Error + // *Type_AbstractType_ + TypeKind isType_TypeKind `protobuf_oneof:"type_kind"` +} + +func (x *Type) Reset() { + *x = Type{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type) ProtoMessage() {} + +func (x *Type) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type.ProtoReflect.Descriptor instead. +func (*Type) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1} +} + +func (m *Type) GetTypeKind() isType_TypeKind { + if m != nil { + return m.TypeKind + } + return nil +} + +func (x *Type) GetDyn() *emptypb.Empty { + if x, ok := x.GetTypeKind().(*Type_Dyn); ok { + return x.Dyn + } + return nil +} + +func (x *Type) GetNull() structpb.NullValue { + if x, ok := x.GetTypeKind().(*Type_Null); ok { + return x.Null + } + return structpb.NullValue(0) +} + +func (x *Type) GetPrimitive() Type_PrimitiveType { + if x, ok := x.GetTypeKind().(*Type_Primitive); ok { + return x.Primitive + } + return Type_PRIMITIVE_TYPE_UNSPECIFIED +} + +func (x *Type) GetWrapper() Type_PrimitiveType { + if x, ok := x.GetTypeKind().(*Type_Wrapper); ok { + return x.Wrapper + } + return Type_PRIMITIVE_TYPE_UNSPECIFIED +} + +func (x *Type) GetWellKnown() Type_WellKnownType { + if x, ok := x.GetTypeKind().(*Type_WellKnown); ok { + return x.WellKnown + } + return Type_WELL_KNOWN_TYPE_UNSPECIFIED +} + +func (x *Type) GetListType() *Type_ListType { + if x, ok := x.GetTypeKind().(*Type_ListType_); ok { + return x.ListType + } + return nil +} + +func (x *Type) GetMapType() *Type_MapType { + if x, ok := x.GetTypeKind().(*Type_MapType_); ok { + return x.MapType + } + return nil +} + +func (x *Type) GetFunction() *Type_FunctionType { + if x, ok := x.GetTypeKind().(*Type_Function); ok { + return x.Function + } + return nil +} + +func (x *Type) GetMessageType() string { + if x, ok := x.GetTypeKind().(*Type_MessageType); ok { + return x.MessageType + } + return "" +} + +func (x *Type) GetTypeParam() string { + if x, ok := x.GetTypeKind().(*Type_TypeParam); ok { + return x.TypeParam + } + return "" +} + +func (x *Type) GetType() *Type { + if x, ok := x.GetTypeKind().(*Type_Type); ok { + return x.Type + } + return nil +} + +func (x *Type) GetError() *emptypb.Empty { + if x, ok := x.GetTypeKind().(*Type_Error); ok { + return x.Error + } + return nil +} + +func (x *Type) GetAbstractType() *Type_AbstractType { + if x, ok := x.GetTypeKind().(*Type_AbstractType_); ok { + return x.AbstractType + } + return nil +} + +type isType_TypeKind interface { + isType_TypeKind() +} + +type Type_Dyn struct { + Dyn *emptypb.Empty `protobuf:"bytes,1,opt,name=dyn,proto3,oneof"` +} + +type Type_Null struct { + Null structpb.NullValue `protobuf:"varint,2,opt,name=null,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Type_Primitive struct { + Primitive Type_PrimitiveType `protobuf:"varint,3,opt,name=primitive,proto3,enum=cel.expr.Type_PrimitiveType,oneof"` +} + +type Type_Wrapper struct { + Wrapper Type_PrimitiveType `protobuf:"varint,4,opt,name=wrapper,proto3,enum=cel.expr.Type_PrimitiveType,oneof"` +} + +type Type_WellKnown struct { + WellKnown Type_WellKnownType `protobuf:"varint,5,opt,name=well_known,json=wellKnown,proto3,enum=cel.expr.Type_WellKnownType,oneof"` +} + +type Type_ListType_ struct { + ListType *Type_ListType `protobuf:"bytes,6,opt,name=list_type,json=listType,proto3,oneof"` +} + +type Type_MapType_ struct { + MapType *Type_MapType `protobuf:"bytes,7,opt,name=map_type,json=mapType,proto3,oneof"` +} + +type Type_Function struct { + Function *Type_FunctionType `protobuf:"bytes,8,opt,name=function,proto3,oneof"` +} + +type Type_MessageType struct { + MessageType string `protobuf:"bytes,9,opt,name=message_type,json=messageType,proto3,oneof"` +} + +type Type_TypeParam struct { + TypeParam string `protobuf:"bytes,10,opt,name=type_param,json=typeParam,proto3,oneof"` +} + +type Type_Type struct { + Type *Type `protobuf:"bytes,11,opt,name=type,proto3,oneof"` +} + +type Type_Error struct { + Error *emptypb.Empty `protobuf:"bytes,12,opt,name=error,proto3,oneof"` +} + +type Type_AbstractType_ struct { + AbstractType *Type_AbstractType `protobuf:"bytes,14,opt,name=abstract_type,json=abstractType,proto3,oneof"` +} + +func (*Type_Dyn) isType_TypeKind() {} + +func (*Type_Null) isType_TypeKind() {} + +func (*Type_Primitive) isType_TypeKind() {} + +func (*Type_Wrapper) isType_TypeKind() {} + +func (*Type_WellKnown) isType_TypeKind() {} + +func (*Type_ListType_) isType_TypeKind() {} + +func (*Type_MapType_) isType_TypeKind() {} + +func (*Type_Function) isType_TypeKind() {} + +func (*Type_MessageType) isType_TypeKind() {} + +func (*Type_TypeParam) isType_TypeKind() {} + +func (*Type_Type) isType_TypeKind() {} + +func (*Type_Error) isType_TypeKind() {} + +func (*Type_AbstractType_) isType_TypeKind() {} + +type Decl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are assignable to DeclKind: + // + // *Decl_Ident + // *Decl_Function + DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"` +} + +func (x *Decl) Reset() { + *x = Decl{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl) ProtoMessage() {} + +func (x *Decl) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl.ProtoReflect.Descriptor instead. +func (*Decl) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2} +} + +func (x *Decl) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *Decl) GetDeclKind() isDecl_DeclKind { + if m != nil { + return m.DeclKind + } + return nil +} + +func (x *Decl) GetIdent() *Decl_IdentDecl { + if x, ok := x.GetDeclKind().(*Decl_Ident); ok { + return x.Ident + } + return nil +} + +func (x *Decl) GetFunction() *Decl_FunctionDecl { + if x, ok := x.GetDeclKind().(*Decl_Function); ok { + return x.Function + } + return nil +} + +type isDecl_DeclKind interface { + isDecl_DeclKind() +} + +type Decl_Ident struct { + Ident *Decl_IdentDecl `protobuf:"bytes,2,opt,name=ident,proto3,oneof"` +} + +type Decl_Function struct { + Function *Decl_FunctionDecl `protobuf:"bytes,3,opt,name=function,proto3,oneof"` +} + +func (*Decl_Ident) isDecl_DeclKind() {} + +func (*Decl_Function) isDecl_DeclKind() {} + +type Reference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` + Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Reference) Reset() { + *x = Reference{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Reference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Reference) ProtoMessage() {} + +func (x *Reference) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Reference.ProtoReflect.Descriptor instead. +func (*Reference) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{3} +} + +func (x *Reference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Reference) GetOverloadId() []string { + if x != nil { + return x.OverloadId + } + return nil +} + +func (x *Reference) GetValue() *Constant { + if x != nil { + return x.Value + } + return nil +} + +type Type_ListType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"` +} + +func (x *Type_ListType) Reset() { + *x = Type_ListType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_ListType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_ListType) ProtoMessage() {} + +func (x *Type_ListType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_ListType.ProtoReflect.Descriptor instead. +func (*Type_ListType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *Type_ListType) GetElemType() *Type { + if x != nil { + return x.ElemType + } + return nil +} + +type Type_MapType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"` + ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"` +} + +func (x *Type_MapType) Reset() { + *x = Type_MapType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_MapType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_MapType) ProtoMessage() {} + +func (x *Type_MapType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_MapType.ProtoReflect.Descriptor instead. +func (*Type_MapType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *Type_MapType) GetKeyType() *Type { + if x != nil { + return x.KeyType + } + return nil +} + +func (x *Type_MapType) GetValueType() *Type { + if x != nil { + return x.ValueType + } + return nil +} + +type Type_FunctionType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` + ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"` +} + +func (x *Type_FunctionType) Reset() { + *x = Type_FunctionType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_FunctionType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_FunctionType) ProtoMessage() {} + +func (x *Type_FunctionType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_FunctionType.ProtoReflect.Descriptor instead. +func (*Type_FunctionType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *Type_FunctionType) GetResultType() *Type { + if x != nil { + return x.ResultType + } + return nil +} + +func (x *Type_FunctionType) GetArgTypes() []*Type { + if x != nil { + return x.ArgTypes + } + return nil +} + +type Type_AbstractType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"` +} + +func (x *Type_AbstractType) Reset() { + *x = Type_AbstractType{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_AbstractType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_AbstractType) ProtoMessage() {} + +func (x *Type_AbstractType) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_AbstractType.ProtoReflect.Descriptor instead. +func (*Type_AbstractType) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 3} +} + +func (x *Type_AbstractType) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Type_AbstractType) GetParameterTypes() []*Type { + if x != nil { + return x.ParameterTypes + } + return nil +} + +type Decl_IdentDecl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"` +} + +func (x *Decl_IdentDecl) Reset() { + *x = Decl_IdentDecl{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl_IdentDecl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl_IdentDecl) ProtoMessage() {} + +func (x *Decl_IdentDecl) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl_IdentDecl.ProtoReflect.Descriptor instead. +func (*Decl_IdentDecl) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *Decl_IdentDecl) GetType() *Type { + if x != nil { + return x.Type + } + return nil +} + +func (x *Decl_IdentDecl) GetValue() *Constant { + if x != nil { + return x.Value + } + return nil +} + +func (x *Decl_IdentDecl) GetDoc() string { + if x != nil { + return x.Doc + } + return "" +} + +type Decl_FunctionDecl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"` +} + +func (x *Decl_FunctionDecl) Reset() { + *x = Decl_FunctionDecl{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl_FunctionDecl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl_FunctionDecl) ProtoMessage() {} + +func (x *Decl_FunctionDecl) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl_FunctionDecl.ProtoReflect.Descriptor instead. +func (*Decl_FunctionDecl) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *Decl_FunctionDecl) GetOverloads() []*Decl_FunctionDecl_Overload { + if x != nil { + return x.Overloads + } + return nil +} + +type Decl_FunctionDecl_Overload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` + Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"` + TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"` + ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` + IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"` + Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"` +} + +func (x *Decl_FunctionDecl_Overload) Reset() { + *x = Decl_FunctionDecl_Overload{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_checked_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl_FunctionDecl_Overload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl_FunctionDecl_Overload) ProtoMessage() {} + +func (x *Decl_FunctionDecl_Overload) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_checked_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl_FunctionDecl_Overload.ProtoReflect.Descriptor instead. +func (*Decl_FunctionDecl_Overload) Descriptor() ([]byte, []int) { + return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1, 0} +} + +func (x *Decl_FunctionDecl_Overload) GetOverloadId() string { + if x != nil { + return x.OverloadId + } + return "" +} + +func (x *Decl_FunctionDecl_Overload) GetParams() []*Type { + if x != nil { + return x.Params + } + return nil +} + +func (x *Decl_FunctionDecl_Overload) GetTypeParams() []string { + if x != nil { + return x.TypeParams + } + return nil +} + +func (x *Decl_FunctionDecl_Overload) GetResultType() *Type { + if x != nil { + return x.ResultType + } + return nil +} + +func (x *Decl_FunctionDecl_Overload) GetIsInstanceFunction() bool { + if x != nil { + return x.IsInstanceFunction + } + return false +} + +func (x *Decl_FunctionDecl_Overload) GetDoc() string { + if x != nil { + return x.Doc + } + return "" +} + +var File_cel_expr_checked_proto protoreflect.FileDescriptor + +var file_cel_expr_checked_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x1a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, + 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xba, 0x03, 0x0a, 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, + 0x45, 0x78, 0x70, 0x72, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, + 0x70, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, + 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x4d, 0x61, + 0x70, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x65, 0x78, 0x70, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x65, + 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x1a, + 0x54, 0x0a, 0x11, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4a, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x61, 0x70, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0xe6, 0x09, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x64, 0x79, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, + 0x00, 0x52, 0x03, 0x64, 0x79, 0x6e, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x48, 0x00, 0x52, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x12, 0x3c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6d, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x69, + 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, + 0x12, 0x3d, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x12, + 0x36, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x6c, + 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, + 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x08, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0a, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x24, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x12, 0x42, 0x0a, 0x0d, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x62, 0x73, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x62, 0x73, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x37, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65, + 0x1a, 0x63, 0x0a, 0x07, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6b, + 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x6c, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x61, 0x72, 0x67, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x1a, 0x5b, 0x0a, 0x0c, 0x41, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x73, + 0x22, 0x73, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x52, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49, + 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, + 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x59, + 0x54, 0x45, 0x53, 0x10, 0x06, 0x22, 0x56, 0x0a, 0x0d, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, + 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x01, + 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x02, 0x12, + 0x0c, 0x0a, 0x08, 0x44, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x42, 0x0b, 0x0a, + 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc2, 0x04, 0x0a, 0x04, 0x44, + 0x65, 0x63, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x6c, + 0x48, 0x00, 0x52, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x08, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x66, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6b, 0x0a, 0x09, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, + 0x6c, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6f, + 0x63, 0x1a, 0xbe, 0x02, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x63, 0x6c, 0x12, 0x42, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x63, 0x6c, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09, 0x6f, 0x76, 0x65, + 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x1a, 0xe9, 0x01, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x6c, + 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, + 0x61, 0x64, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2f, 0x0a, + 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, + 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, + 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, + 0x6f, 0x63, 0x42, 0x0b, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6c, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, + 0x6a, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x49, + 0x64, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2c, 0x0a, 0x0c, 0x64, + 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x44, 0x65, 0x63, + 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, + 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_cel_expr_checked_proto_rawDescOnce sync.Once + file_cel_expr_checked_proto_rawDescData = file_cel_expr_checked_proto_rawDesc +) + +func file_cel_expr_checked_proto_rawDescGZIP() []byte { + file_cel_expr_checked_proto_rawDescOnce.Do(func() { + file_cel_expr_checked_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_checked_proto_rawDescData) + }) + return file_cel_expr_checked_proto_rawDescData +} + +var file_cel_expr_checked_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_cel_expr_checked_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_cel_expr_checked_proto_goTypes = []interface{}{ + (Type_PrimitiveType)(0), // 0: cel.expr.Type.PrimitiveType + (Type_WellKnownType)(0), // 1: cel.expr.Type.WellKnownType + (*CheckedExpr)(nil), // 2: cel.expr.CheckedExpr + (*Type)(nil), // 3: cel.expr.Type + (*Decl)(nil), // 4: cel.expr.Decl + (*Reference)(nil), // 5: cel.expr.Reference + nil, // 6: cel.expr.CheckedExpr.ReferenceMapEntry + nil, // 7: cel.expr.CheckedExpr.TypeMapEntry + (*Type_ListType)(nil), // 8: cel.expr.Type.ListType + (*Type_MapType)(nil), // 9: cel.expr.Type.MapType + (*Type_FunctionType)(nil), // 10: cel.expr.Type.FunctionType + (*Type_AbstractType)(nil), // 11: cel.expr.Type.AbstractType + (*Decl_IdentDecl)(nil), // 12: cel.expr.Decl.IdentDecl + (*Decl_FunctionDecl)(nil), // 13: cel.expr.Decl.FunctionDecl + (*Decl_FunctionDecl_Overload)(nil), // 14: cel.expr.Decl.FunctionDecl.Overload + (*SourceInfo)(nil), // 15: cel.expr.SourceInfo + (*Expr)(nil), // 16: cel.expr.Expr + (*emptypb.Empty)(nil), // 17: google.protobuf.Empty + (structpb.NullValue)(0), // 18: google.protobuf.NullValue + (*Constant)(nil), // 19: cel.expr.Constant +} +var file_cel_expr_checked_proto_depIdxs = []int32{ + 6, // 0: cel.expr.CheckedExpr.reference_map:type_name -> cel.expr.CheckedExpr.ReferenceMapEntry + 7, // 1: cel.expr.CheckedExpr.type_map:type_name -> cel.expr.CheckedExpr.TypeMapEntry + 15, // 2: cel.expr.CheckedExpr.source_info:type_name -> cel.expr.SourceInfo + 16, // 3: cel.expr.CheckedExpr.expr:type_name -> cel.expr.Expr + 17, // 4: cel.expr.Type.dyn:type_name -> google.protobuf.Empty + 18, // 5: cel.expr.Type.null:type_name -> google.protobuf.NullValue + 0, // 6: cel.expr.Type.primitive:type_name -> cel.expr.Type.PrimitiveType + 0, // 7: cel.expr.Type.wrapper:type_name -> cel.expr.Type.PrimitiveType + 1, // 8: cel.expr.Type.well_known:type_name -> cel.expr.Type.WellKnownType + 8, // 9: cel.expr.Type.list_type:type_name -> cel.expr.Type.ListType + 9, // 10: cel.expr.Type.map_type:type_name -> cel.expr.Type.MapType + 10, // 11: cel.expr.Type.function:type_name -> cel.expr.Type.FunctionType + 3, // 12: cel.expr.Type.type:type_name -> cel.expr.Type + 17, // 13: cel.expr.Type.error:type_name -> google.protobuf.Empty + 11, // 14: cel.expr.Type.abstract_type:type_name -> cel.expr.Type.AbstractType + 12, // 15: cel.expr.Decl.ident:type_name -> cel.expr.Decl.IdentDecl + 13, // 16: cel.expr.Decl.function:type_name -> cel.expr.Decl.FunctionDecl + 19, // 17: cel.expr.Reference.value:type_name -> cel.expr.Constant + 5, // 18: cel.expr.CheckedExpr.ReferenceMapEntry.value:type_name -> cel.expr.Reference + 3, // 19: cel.expr.CheckedExpr.TypeMapEntry.value:type_name -> cel.expr.Type + 3, // 20: cel.expr.Type.ListType.elem_type:type_name -> cel.expr.Type + 3, // 21: cel.expr.Type.MapType.key_type:type_name -> cel.expr.Type + 3, // 22: cel.expr.Type.MapType.value_type:type_name -> cel.expr.Type + 3, // 23: cel.expr.Type.FunctionType.result_type:type_name -> cel.expr.Type + 3, // 24: cel.expr.Type.FunctionType.arg_types:type_name -> cel.expr.Type + 3, // 25: cel.expr.Type.AbstractType.parameter_types:type_name -> cel.expr.Type + 3, // 26: cel.expr.Decl.IdentDecl.type:type_name -> cel.expr.Type + 19, // 27: cel.expr.Decl.IdentDecl.value:type_name -> cel.expr.Constant + 14, // 28: cel.expr.Decl.FunctionDecl.overloads:type_name -> cel.expr.Decl.FunctionDecl.Overload + 3, // 29: cel.expr.Decl.FunctionDecl.Overload.params:type_name -> cel.expr.Type + 3, // 30: cel.expr.Decl.FunctionDecl.Overload.result_type:type_name -> cel.expr.Type + 31, // [31:31] is the sub-list for method output_type + 31, // [31:31] is the sub-list for method input_type + 31, // [31:31] is the sub-list for extension type_name + 31, // [31:31] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name +} + +func init() { file_cel_expr_checked_proto_init() } +func file_cel_expr_checked_proto_init() { + if File_cel_expr_checked_proto != nil { + return + } + file_cel_expr_syntax_proto_init() + if !protoimpl.UnsafeEnabled { + file_cel_expr_checked_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckedExpr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Reference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_ListType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_MapType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_FunctionType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_AbstractType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl_IdentDecl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl_FunctionDecl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_checked_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl_FunctionDecl_Overload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_checked_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Type_Dyn)(nil), + (*Type_Null)(nil), + (*Type_Primitive)(nil), + (*Type_Wrapper)(nil), + (*Type_WellKnown)(nil), + (*Type_ListType_)(nil), + (*Type_MapType_)(nil), + (*Type_Function)(nil), + (*Type_MessageType)(nil), + (*Type_TypeParam)(nil), + (*Type_Type)(nil), + (*Type_Error)(nil), + (*Type_AbstractType_)(nil), + } + file_cel_expr_checked_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Decl_Ident)(nil), + (*Decl_Function)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_checked_proto_rawDesc, + NumEnums: 2, + NumMessages: 13, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_checked_proto_goTypes, + DependencyIndexes: file_cel_expr_checked_proto_depIdxs, + EnumInfos: file_cel_expr_checked_proto_enumTypes, + MessageInfos: file_cel_expr_checked_proto_msgTypes, + }.Build() + File_cel_expr_checked_proto = out.File + file_cel_expr_checked_proto_rawDesc = nil + file_cel_expr_checked_proto_goTypes = nil + file_cel_expr_checked_proto_depIdxs = nil +} diff --git a/hack/tools/vendor/cel.dev/expr/cloudbuild.yaml b/hack/tools/vendor/cel.dev/expr/cloudbuild.yaml new file mode 100644 index 0000000000..c40881f122 --- /dev/null +++ b/hack/tools/vendor/cel.dev/expr/cloudbuild.yaml @@ -0,0 +1,9 @@ +steps: +- name: 'gcr.io/cloud-builders/bazel:7.0.1' + entrypoint: bazel + args: ['build', '...'] + id: bazel-build + waitFor: ['-'] +timeout: 15m +options: + machineType: 'N1_HIGHCPU_32' diff --git a/hack/tools/vendor/cel.dev/expr/eval.pb.go b/hack/tools/vendor/cel.dev/expr/eval.pb.go new file mode 100644 index 0000000000..8f651f9cc6 --- /dev/null +++ b/hack/tools/vendor/cel.dev/expr/eval.pb.go @@ -0,0 +1,490 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/eval.proto + +package expr + +import ( + status "google.golang.org/genproto/googleapis/rpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type EvalState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *EvalState) Reset() { + *x = EvalState{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EvalState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EvalState) ProtoMessage() {} + +func (x *EvalState) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EvalState.ProtoReflect.Descriptor instead. +func (*EvalState) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{0} +} + +func (x *EvalState) GetValues() []*ExprValue { + if x != nil { + return x.Values + } + return nil +} + +func (x *EvalState) GetResults() []*EvalState_Result { + if x != nil { + return x.Results + } + return nil +} + +type ExprValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Kind: + // + // *ExprValue_Value + // *ExprValue_Error + // *ExprValue_Unknown + Kind isExprValue_Kind `protobuf_oneof:"kind"` +} + +func (x *ExprValue) Reset() { + *x = ExprValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExprValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExprValue) ProtoMessage() {} + +func (x *ExprValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExprValue.ProtoReflect.Descriptor instead. +func (*ExprValue) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{1} +} + +func (m *ExprValue) GetKind() isExprValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *ExprValue) GetValue() *Value { + if x, ok := x.GetKind().(*ExprValue_Value); ok { + return x.Value + } + return nil +} + +func (x *ExprValue) GetError() *ErrorSet { + if x, ok := x.GetKind().(*ExprValue_Error); ok { + return x.Error + } + return nil +} + +func (x *ExprValue) GetUnknown() *UnknownSet { + if x, ok := x.GetKind().(*ExprValue_Unknown); ok { + return x.Unknown + } + return nil +} + +type isExprValue_Kind interface { + isExprValue_Kind() +} + +type ExprValue_Value struct { + Value *Value `protobuf:"bytes,1,opt,name=value,proto3,oneof"` +} + +type ExprValue_Error struct { + Error *ErrorSet `protobuf:"bytes,2,opt,name=error,proto3,oneof"` +} + +type ExprValue_Unknown struct { + Unknown *UnknownSet `protobuf:"bytes,3,opt,name=unknown,proto3,oneof"` +} + +func (*ExprValue_Value) isExprValue_Kind() {} + +func (*ExprValue_Error) isExprValue_Kind() {} + +func (*ExprValue_Unknown) isExprValue_Kind() {} + +type ErrorSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Errors []*status.Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` +} + +func (x *ErrorSet) Reset() { + *x = ErrorSet{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorSet) ProtoMessage() {} + +func (x *ErrorSet) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorSet.ProtoReflect.Descriptor instead. +func (*ErrorSet) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{2} +} + +func (x *ErrorSet) GetErrors() []*status.Status { + if x != nil { + return x.Errors + } + return nil +} + +type UnknownSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"` +} + +func (x *UnknownSet) Reset() { + *x = UnknownSet{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UnknownSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnknownSet) ProtoMessage() {} + +func (x *UnknownSet) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead. +func (*UnknownSet) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{3} +} + +func (x *UnknownSet) GetExprs() []int64 { + if x != nil { + return x.Exprs + } + return nil +} + +type EvalState_Result struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *EvalState_Result) Reset() { + *x = EvalState_Result{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_eval_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EvalState_Result) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EvalState_Result) ProtoMessage() {} + +func (x *EvalState_Result) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_eval_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EvalState_Result.ProtoReflect.Descriptor instead. +func (*EvalState_Result) Descriptor() ([]byte, []int) { + return file_cel_expr_eval_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *EvalState_Result) GetExpr() int64 { + if x != nil { + return x.Expr + } + return 0 +} + +func (x *EvalState_Result) GetValue() int64 { + if x != nil { + return x.Value + } + return 0 +} + +var File_cel_expr_eval_proto protoreflect.FileDescriptor + +var file_cel_expr_eval_proto_rawDesc = []byte{ + 0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a, + 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, + 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2, + 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, 0x0a, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, + 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, + 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x78, 0x70, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, + 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, + 0x22, 0x36, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x06, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, + 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, + 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, + 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_cel_expr_eval_proto_rawDescOnce sync.Once + file_cel_expr_eval_proto_rawDescData = file_cel_expr_eval_proto_rawDesc +) + +func file_cel_expr_eval_proto_rawDescGZIP() []byte { + file_cel_expr_eval_proto_rawDescOnce.Do(func() { + file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_eval_proto_rawDescData) + }) + return file_cel_expr_eval_proto_rawDescData +} + +var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_cel_expr_eval_proto_goTypes = []interface{}{ + (*EvalState)(nil), // 0: cel.expr.EvalState + (*ExprValue)(nil), // 1: cel.expr.ExprValue + (*ErrorSet)(nil), // 2: cel.expr.ErrorSet + (*UnknownSet)(nil), // 3: cel.expr.UnknownSet + (*EvalState_Result)(nil), // 4: cel.expr.EvalState.Result + (*Value)(nil), // 5: cel.expr.Value + (*status.Status)(nil), // 6: google.rpc.Status +} +var file_cel_expr_eval_proto_depIdxs = []int32{ + 1, // 0: cel.expr.EvalState.values:type_name -> cel.expr.ExprValue + 4, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result + 5, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value + 2, // 3: cel.expr.ExprValue.error:type_name -> cel.expr.ErrorSet + 3, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet + 6, // 5: cel.expr.ErrorSet.errors:type_name -> google.rpc.Status + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_cel_expr_eval_proto_init() } +func file_cel_expr_eval_proto_init() { + if File_cel_expr_eval_proto != nil { + return + } + file_cel_expr_value_proto_init() + if !protoimpl.UnsafeEnabled { + file_cel_expr_eval_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EvalState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExprValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UnknownSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_eval_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EvalState_Result); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*ExprValue_Value)(nil), + (*ExprValue_Error)(nil), + (*ExprValue_Unknown)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_eval_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_eval_proto_goTypes, + DependencyIndexes: file_cel_expr_eval_proto_depIdxs, + MessageInfos: file_cel_expr_eval_proto_msgTypes, + }.Build() + File_cel_expr_eval_proto = out.File + file_cel_expr_eval_proto_rawDesc = nil + file_cel_expr_eval_proto_goTypes = nil + file_cel_expr_eval_proto_depIdxs = nil +} diff --git a/hack/tools/vendor/cel.dev/expr/explain.pb.go b/hack/tools/vendor/cel.dev/expr/explain.pb.go new file mode 100644 index 0000000000..79fd5443b9 --- /dev/null +++ b/hack/tools/vendor/cel.dev/expr/explain.pb.go @@ -0,0 +1,236 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/explain.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Deprecated: Do not use. +type Explain struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"` +} + +func (x *Explain) Reset() { + *x = Explain{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_explain_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Explain) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Explain) ProtoMessage() {} + +func (x *Explain) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_explain_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Explain.ProtoReflect.Descriptor instead. +func (*Explain) Descriptor() ([]byte, []int) { + return file_cel_expr_explain_proto_rawDescGZIP(), []int{0} +} + +func (x *Explain) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + +func (x *Explain) GetExprSteps() []*Explain_ExprStep { + if x != nil { + return x.ExprSteps + } + return nil +} + +type Explain_ExprStep struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"` +} + +func (x *Explain_ExprStep) Reset() { + *x = Explain_ExprStep{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_explain_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Explain_ExprStep) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Explain_ExprStep) ProtoMessage() {} + +func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_explain_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Explain_ExprStep.ProtoReflect.Descriptor instead. +func (*Explain_ExprStep) Descriptor() ([]byte, []int) { + return file_cel_expr_explain_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Explain_ExprStep) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *Explain_ExprStep) GetValueIndex() int32 { + if x != nil { + return x.ValueIndex + } + return 0 +} + +var File_cel_expr_explain_proto protoreflect.FileDescriptor + +var file_cel_expr_explain_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61, + 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70, + 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a, + 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, + 0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65, + 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72, + 0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x2f, 0x0a, 0x0c, 0x64, 0x65, 0x76, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61, + 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, + 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_cel_expr_explain_proto_rawDescOnce sync.Once + file_cel_expr_explain_proto_rawDescData = file_cel_expr_explain_proto_rawDesc +) + +func file_cel_expr_explain_proto_rawDescGZIP() []byte { + file_cel_expr_explain_proto_rawDescOnce.Do(func() { + file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_explain_proto_rawDescData) + }) + return file_cel_expr_explain_proto_rawDescData +} + +var file_cel_expr_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_cel_expr_explain_proto_goTypes = []interface{}{ + (*Explain)(nil), // 0: cel.expr.Explain + (*Explain_ExprStep)(nil), // 1: cel.expr.Explain.ExprStep + (*Value)(nil), // 2: cel.expr.Value +} +var file_cel_expr_explain_proto_depIdxs = []int32{ + 2, // 0: cel.expr.Explain.values:type_name -> cel.expr.Value + 1, // 1: cel.expr.Explain.expr_steps:type_name -> cel.expr.Explain.ExprStep + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_cel_expr_explain_proto_init() } +func file_cel_expr_explain_proto_init() { + if File_cel_expr_explain_proto != nil { + return + } + file_cel_expr_value_proto_init() + if !protoimpl.UnsafeEnabled { + file_cel_expr_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Explain); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Explain_ExprStep); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_explain_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_explain_proto_goTypes, + DependencyIndexes: file_cel_expr_explain_proto_depIdxs, + MessageInfos: file_cel_expr_explain_proto_msgTypes, + }.Build() + File_cel_expr_explain_proto = out.File + file_cel_expr_explain_proto_rawDesc = nil + file_cel_expr_explain_proto_goTypes = nil + file_cel_expr_explain_proto_depIdxs = nil +} diff --git a/hack/tools/vendor/cel.dev/expr/regen_go_proto.sh b/hack/tools/vendor/cel.dev/expr/regen_go_proto.sh new file mode 100644 index 0000000000..fdcbb3ce25 --- /dev/null +++ b/hack/tools/vendor/cel.dev/expr/regen_go_proto.sh @@ -0,0 +1,9 @@ +#!/bin/sh +bazel build //proto/cel/expr/conformance/... +files=($(bazel aquery 'kind(proto, //proto/cel/expr/conformance/...)' | grep Outputs | grep "[.]pb[.]go" | sed 's/Outputs: \[//' | sed 's/\]//' | tr "," "\n")) +for src in ${files[@]}; +do + dst=$(echo $src | sed 's/\(.*\/cel.dev\/expr\/\(.*\)\)/\2/') + echo "copying $dst" + $(cp $src $dst) +done diff --git a/hack/tools/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh b/hack/tools/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh new file mode 100644 index 0000000000..9a13479e40 --- /dev/null +++ b/hack/tools/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +bazel build //proto/cel/expr:all + +rm -vf ./*.pb.go + +files=( $(bazel cquery //proto/cel/expr:expr_go_proto --output=starlark --starlark:expr="'\n'.join([f.path for f in target.output_groups.go_generated_srcs.to_list()])") ) +for src in "${files[@]}"; +do + cp -v "${src}" ./ +done diff --git a/hack/tools/vendor/cel.dev/expr/syntax.pb.go b/hack/tools/vendor/cel.dev/expr/syntax.pb.go new file mode 100644 index 0000000000..48a952872e --- /dev/null +++ b/hack/tools/vendor/cel.dev/expr/syntax.pb.go @@ -0,0 +1,1633 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/syntax.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SourceInfo_Extension_Component int32 + +const ( + SourceInfo_Extension_COMPONENT_UNSPECIFIED SourceInfo_Extension_Component = 0 + SourceInfo_Extension_COMPONENT_PARSER SourceInfo_Extension_Component = 1 + SourceInfo_Extension_COMPONENT_TYPE_CHECKER SourceInfo_Extension_Component = 2 + SourceInfo_Extension_COMPONENT_RUNTIME SourceInfo_Extension_Component = 3 +) + +// Enum value maps for SourceInfo_Extension_Component. +var ( + SourceInfo_Extension_Component_name = map[int32]string{ + 0: "COMPONENT_UNSPECIFIED", + 1: "COMPONENT_PARSER", + 2: "COMPONENT_TYPE_CHECKER", + 3: "COMPONENT_RUNTIME", + } + SourceInfo_Extension_Component_value = map[string]int32{ + "COMPONENT_UNSPECIFIED": 0, + "COMPONENT_PARSER": 1, + "COMPONENT_TYPE_CHECKER": 2, + "COMPONENT_RUNTIME": 3, + } +) + +func (x SourceInfo_Extension_Component) Enum() *SourceInfo_Extension_Component { + p := new(SourceInfo_Extension_Component) + *p = x + return p +} + +func (x SourceInfo_Extension_Component) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SourceInfo_Extension_Component) Descriptor() protoreflect.EnumDescriptor { + return file_cel_expr_syntax_proto_enumTypes[0].Descriptor() +} + +func (SourceInfo_Extension_Component) Type() protoreflect.EnumType { + return &file_cel_expr_syntax_proto_enumTypes[0] +} + +func (x SourceInfo_Extension_Component) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SourceInfo_Extension_Component.Descriptor instead. +func (SourceInfo_Extension_Component) EnumDescriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0} +} + +type ParsedExpr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"` + SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` +} + +func (x *ParsedExpr) Reset() { + *x = ParsedExpr{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ParsedExpr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ParsedExpr) ProtoMessage() {} + +func (x *ParsedExpr) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ParsedExpr.ProtoReflect.Descriptor instead. +func (*ParsedExpr) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{0} +} + +func (x *ParsedExpr) GetExpr() *Expr { + if x != nil { + return x.Expr + } + return nil +} + +func (x *ParsedExpr) GetSourceInfo() *SourceInfo { + if x != nil { + return x.SourceInfo + } + return nil +} + +type Expr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // Types that are assignable to ExprKind: + // + // *Expr_ConstExpr + // *Expr_IdentExpr + // *Expr_SelectExpr + // *Expr_CallExpr + // *Expr_ListExpr + // *Expr_StructExpr + // *Expr_ComprehensionExpr + ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"` +} + +func (x *Expr) Reset() { + *x = Expr{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr) ProtoMessage() {} + +func (x *Expr) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr.ProtoReflect.Descriptor instead. +func (*Expr) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1} +} + +func (x *Expr) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (m *Expr) GetExprKind() isExpr_ExprKind { + if m != nil { + return m.ExprKind + } + return nil +} + +func (x *Expr) GetConstExpr() *Constant { + if x, ok := x.GetExprKind().(*Expr_ConstExpr); ok { + return x.ConstExpr + } + return nil +} + +func (x *Expr) GetIdentExpr() *Expr_Ident { + if x, ok := x.GetExprKind().(*Expr_IdentExpr); ok { + return x.IdentExpr + } + return nil +} + +func (x *Expr) GetSelectExpr() *Expr_Select { + if x, ok := x.GetExprKind().(*Expr_SelectExpr); ok { + return x.SelectExpr + } + return nil +} + +func (x *Expr) GetCallExpr() *Expr_Call { + if x, ok := x.GetExprKind().(*Expr_CallExpr); ok { + return x.CallExpr + } + return nil +} + +func (x *Expr) GetListExpr() *Expr_CreateList { + if x, ok := x.GetExprKind().(*Expr_ListExpr); ok { + return x.ListExpr + } + return nil +} + +func (x *Expr) GetStructExpr() *Expr_CreateStruct { + if x, ok := x.GetExprKind().(*Expr_StructExpr); ok { + return x.StructExpr + } + return nil +} + +func (x *Expr) GetComprehensionExpr() *Expr_Comprehension { + if x, ok := x.GetExprKind().(*Expr_ComprehensionExpr); ok { + return x.ComprehensionExpr + } + return nil +} + +type isExpr_ExprKind interface { + isExpr_ExprKind() +} + +type Expr_ConstExpr struct { + ConstExpr *Constant `protobuf:"bytes,3,opt,name=const_expr,json=constExpr,proto3,oneof"` +} + +type Expr_IdentExpr struct { + IdentExpr *Expr_Ident `protobuf:"bytes,4,opt,name=ident_expr,json=identExpr,proto3,oneof"` +} + +type Expr_SelectExpr struct { + SelectExpr *Expr_Select `protobuf:"bytes,5,opt,name=select_expr,json=selectExpr,proto3,oneof"` +} + +type Expr_CallExpr struct { + CallExpr *Expr_Call `protobuf:"bytes,6,opt,name=call_expr,json=callExpr,proto3,oneof"` +} + +type Expr_ListExpr struct { + ListExpr *Expr_CreateList `protobuf:"bytes,7,opt,name=list_expr,json=listExpr,proto3,oneof"` +} + +type Expr_StructExpr struct { + StructExpr *Expr_CreateStruct `protobuf:"bytes,8,opt,name=struct_expr,json=structExpr,proto3,oneof"` +} + +type Expr_ComprehensionExpr struct { + ComprehensionExpr *Expr_Comprehension `protobuf:"bytes,9,opt,name=comprehension_expr,json=comprehensionExpr,proto3,oneof"` +} + +func (*Expr_ConstExpr) isExpr_ExprKind() {} + +func (*Expr_IdentExpr) isExpr_ExprKind() {} + +func (*Expr_SelectExpr) isExpr_ExprKind() {} + +func (*Expr_CallExpr) isExpr_ExprKind() {} + +func (*Expr_ListExpr) isExpr_ExprKind() {} + +func (*Expr_StructExpr) isExpr_ExprKind() {} + +func (*Expr_ComprehensionExpr) isExpr_ExprKind() {} + +type Constant struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ConstantKind: + // + // *Constant_NullValue + // *Constant_BoolValue + // *Constant_Int64Value + // *Constant_Uint64Value + // *Constant_DoubleValue + // *Constant_StringValue + // *Constant_BytesValue + // *Constant_DurationValue + // *Constant_TimestampValue + ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"` +} + +func (x *Constant) Reset() { + *x = Constant{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Constant) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Constant) ProtoMessage() {} + +func (x *Constant) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Constant.ProtoReflect.Descriptor instead. +func (*Constant) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{2} +} + +func (m *Constant) GetConstantKind() isConstant_ConstantKind { + if m != nil { + return m.ConstantKind + } + return nil +} + +func (x *Constant) GetNullValue() structpb.NullValue { + if x, ok := x.GetConstantKind().(*Constant_NullValue); ok { + return x.NullValue + } + return structpb.NullValue(0) +} + +func (x *Constant) GetBoolValue() bool { + if x, ok := x.GetConstantKind().(*Constant_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *Constant) GetInt64Value() int64 { + if x, ok := x.GetConstantKind().(*Constant_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (x *Constant) GetUint64Value() uint64 { + if x, ok := x.GetConstantKind().(*Constant_Uint64Value); ok { + return x.Uint64Value + } + return 0 +} + +func (x *Constant) GetDoubleValue() float64 { + if x, ok := x.GetConstantKind().(*Constant_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (x *Constant) GetStringValue() string { + if x, ok := x.GetConstantKind().(*Constant_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *Constant) GetBytesValue() []byte { + if x, ok := x.GetConstantKind().(*Constant_BytesValue); ok { + return x.BytesValue + } + return nil +} + +// Deprecated: Do not use. +func (x *Constant) GetDurationValue() *durationpb.Duration { + if x, ok := x.GetConstantKind().(*Constant_DurationValue); ok { + return x.DurationValue + } + return nil +} + +// Deprecated: Do not use. +func (x *Constant) GetTimestampValue() *timestamppb.Timestamp { + if x, ok := x.GetConstantKind().(*Constant_TimestampValue); ok { + return x.TimestampValue + } + return nil +} + +type isConstant_ConstantKind interface { + isConstant_ConstantKind() +} + +type Constant_NullValue struct { + NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Constant_BoolValue struct { + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Constant_Int64Value struct { + Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type Constant_Uint64Value struct { + Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"` +} + +type Constant_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type Constant_StringValue struct { + StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Constant_BytesValue struct { + BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +type Constant_DurationValue struct { + // Deprecated: Do not use. + DurationValue *durationpb.Duration `protobuf:"bytes,8,opt,name=duration_value,json=durationValue,proto3,oneof"` +} + +type Constant_TimestampValue struct { + // Deprecated: Do not use. + TimestampValue *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=timestamp_value,json=timestampValue,proto3,oneof"` +} + +func (*Constant_NullValue) isConstant_ConstantKind() {} + +func (*Constant_BoolValue) isConstant_ConstantKind() {} + +func (*Constant_Int64Value) isConstant_ConstantKind() {} + +func (*Constant_Uint64Value) isConstant_ConstantKind() {} + +func (*Constant_DoubleValue) isConstant_ConstantKind() {} + +func (*Constant_StringValue) isConstant_ConstantKind() {} + +func (*Constant_BytesValue) isConstant_ConstantKind() {} + +func (*Constant_DurationValue) isConstant_ConstantKind() {} + +func (*Constant_TimestampValue) isConstant_ConstantKind() {} + +type SourceInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SyntaxVersion string `protobuf:"bytes,1,opt,name=syntax_version,json=syntaxVersion,proto3" json:"syntax_version,omitempty"` + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + LineOffsets []int32 `protobuf:"varint,3,rep,packed,name=line_offsets,json=lineOffsets,proto3" json:"line_offsets,omitempty"` + Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + MacroCalls map[int64]*Expr `protobuf:"bytes,5,rep,name=macro_calls,json=macroCalls,proto3" json:"macro_calls,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Extensions []*SourceInfo_Extension `protobuf:"bytes,6,rep,name=extensions,proto3" json:"extensions,omitempty"` +} + +func (x *SourceInfo) Reset() { + *x = SourceInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceInfo) ProtoMessage() {} + +func (x *SourceInfo) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceInfo.ProtoReflect.Descriptor instead. +func (*SourceInfo) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3} +} + +func (x *SourceInfo) GetSyntaxVersion() string { + if x != nil { + return x.SyntaxVersion + } + return "" +} + +func (x *SourceInfo) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *SourceInfo) GetLineOffsets() []int32 { + if x != nil { + return x.LineOffsets + } + return nil +} + +func (x *SourceInfo) GetPositions() map[int64]int32 { + if x != nil { + return x.Positions + } + return nil +} + +func (x *SourceInfo) GetMacroCalls() map[int64]*Expr { + if x != nil { + return x.MacroCalls + } + return nil +} + +func (x *SourceInfo) GetExtensions() []*SourceInfo_Extension { + if x != nil { + return x.Extensions + } + return nil +} + +type Expr_Ident struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *Expr_Ident) Reset() { + *x = Expr_Ident{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Ident) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Ident) ProtoMessage() {} + +func (x *Expr_Ident) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Ident.ProtoReflect.Descriptor instead. +func (*Expr_Ident) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *Expr_Ident) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type Expr_Select struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"` + Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"` + TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"` +} + +func (x *Expr_Select) Reset() { + *x = Expr_Select{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Select) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Select) ProtoMessage() {} + +func (x *Expr_Select) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Select.ProtoReflect.Descriptor instead. +func (*Expr_Select) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *Expr_Select) GetOperand() *Expr { + if x != nil { + return x.Operand + } + return nil +} + +func (x *Expr_Select) GetField() string { + if x != nil { + return x.Field + } + return "" +} + +func (x *Expr_Select) GetTestOnly() bool { + if x != nil { + return x.TestOnly + } + return false +} + +type Expr_Call struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` + Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"` + Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"` +} + +func (x *Expr_Call) Reset() { + *x = Expr_Call{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Call) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Call) ProtoMessage() {} + +func (x *Expr_Call) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Call.ProtoReflect.Descriptor instead. +func (*Expr_Call) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *Expr_Call) GetTarget() *Expr { + if x != nil { + return x.Target + } + return nil +} + +func (x *Expr_Call) GetFunction() string { + if x != nil { + return x.Function + } + return "" +} + +func (x *Expr_Call) GetArgs() []*Expr { + if x != nil { + return x.Args + } + return nil +} + +type Expr_CreateList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"` + OptionalIndices []int32 `protobuf:"varint,2,rep,packed,name=optional_indices,json=optionalIndices,proto3" json:"optional_indices,omitempty"` +} + +func (x *Expr_CreateList) Reset() { + *x = Expr_CreateList{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_CreateList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_CreateList) ProtoMessage() {} + +func (x *Expr_CreateList) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_CreateList.ProtoReflect.Descriptor instead. +func (*Expr_CreateList) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 3} +} + +func (x *Expr_CreateList) GetElements() []*Expr { + if x != nil { + return x.Elements + } + return nil +} + +func (x *Expr_CreateList) GetOptionalIndices() []int32 { + if x != nil { + return x.OptionalIndices + } + return nil +} + +type Expr_CreateStruct struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"` + Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *Expr_CreateStruct) Reset() { + *x = Expr_CreateStruct{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_CreateStruct) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_CreateStruct) ProtoMessage() {} + +func (x *Expr_CreateStruct) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_CreateStruct.ProtoReflect.Descriptor instead. +func (*Expr_CreateStruct) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4} +} + +func (x *Expr_CreateStruct) GetMessageName() string { + if x != nil { + return x.MessageName + } + return "" +} + +func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry { + if x != nil { + return x.Entries + } + return nil +} + +type Expr_Comprehension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"` + IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"` + AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"` + AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"` + LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"` + LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"` + Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"` +} + +func (x *Expr_Comprehension) Reset() { + *x = Expr_Comprehension{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Comprehension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Comprehension) ProtoMessage() {} + +func (x *Expr_Comprehension) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Comprehension.ProtoReflect.Descriptor instead. +func (*Expr_Comprehension) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 5} +} + +func (x *Expr_Comprehension) GetIterVar() string { + if x != nil { + return x.IterVar + } + return "" +} + +func (x *Expr_Comprehension) GetIterRange() *Expr { + if x != nil { + return x.IterRange + } + return nil +} + +func (x *Expr_Comprehension) GetAccuVar() string { + if x != nil { + return x.AccuVar + } + return "" +} + +func (x *Expr_Comprehension) GetAccuInit() *Expr { + if x != nil { + return x.AccuInit + } + return nil +} + +func (x *Expr_Comprehension) GetLoopCondition() *Expr { + if x != nil { + return x.LoopCondition + } + return nil +} + +func (x *Expr_Comprehension) GetLoopStep() *Expr { + if x != nil { + return x.LoopStep + } + return nil +} + +func (x *Expr_Comprehension) GetResult() *Expr { + if x != nil { + return x.Result + } + return nil +} + +type Expr_CreateStruct_Entry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // Types that are assignable to KeyKind: + // + // *Expr_CreateStruct_Entry_FieldKey + // *Expr_CreateStruct_Entry_MapKey + KeyKind isExpr_CreateStruct_Entry_KeyKind `protobuf_oneof:"key_kind"` + Value *Expr `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` + OptionalEntry bool `protobuf:"varint,5,opt,name=optional_entry,json=optionalEntry,proto3" json:"optional_entry,omitempty"` +} + +func (x *Expr_CreateStruct_Entry) Reset() { + *x = Expr_CreateStruct_Entry{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_CreateStruct_Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_CreateStruct_Entry) ProtoMessage() {} + +func (x *Expr_CreateStruct_Entry) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_CreateStruct_Entry.ProtoReflect.Descriptor instead. +func (*Expr_CreateStruct_Entry) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4, 0} +} + +func (x *Expr_CreateStruct_Entry) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (m *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind { + if m != nil { + return m.KeyKind + } + return nil +} + +func (x *Expr_CreateStruct_Entry) GetFieldKey() string { + if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_FieldKey); ok { + return x.FieldKey + } + return "" +} + +func (x *Expr_CreateStruct_Entry) GetMapKey() *Expr { + if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_MapKey); ok { + return x.MapKey + } + return nil +} + +func (x *Expr_CreateStruct_Entry) GetValue() *Expr { + if x != nil { + return x.Value + } + return nil +} + +func (x *Expr_CreateStruct_Entry) GetOptionalEntry() bool { + if x != nil { + return x.OptionalEntry + } + return false +} + +type isExpr_CreateStruct_Entry_KeyKind interface { + isExpr_CreateStruct_Entry_KeyKind() +} + +type Expr_CreateStruct_Entry_FieldKey struct { + FieldKey string `protobuf:"bytes,2,opt,name=field_key,json=fieldKey,proto3,oneof"` +} + +type Expr_CreateStruct_Entry_MapKey struct { + MapKey *Expr `protobuf:"bytes,3,opt,name=map_key,json=mapKey,proto3,oneof"` +} + +func (*Expr_CreateStruct_Entry_FieldKey) isExpr_CreateStruct_Entry_KeyKind() {} + +func (*Expr_CreateStruct_Entry_MapKey) isExpr_CreateStruct_Entry_KeyKind() {} + +type SourceInfo_Extension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + AffectedComponents []SourceInfo_Extension_Component `protobuf:"varint,2,rep,packed,name=affected_components,json=affectedComponents,proto3,enum=cel.expr.SourceInfo_Extension_Component" json:"affected_components,omitempty"` + Version *SourceInfo_Extension_Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *SourceInfo_Extension) Reset() { + *x = SourceInfo_Extension{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceInfo_Extension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceInfo_Extension) ProtoMessage() {} + +func (x *SourceInfo_Extension) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceInfo_Extension.ProtoReflect.Descriptor instead. +func (*SourceInfo_Extension) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2} +} + +func (x *SourceInfo_Extension) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *SourceInfo_Extension) GetAffectedComponents() []SourceInfo_Extension_Component { + if x != nil { + return x.AffectedComponents + } + return nil +} + +func (x *SourceInfo_Extension) GetVersion() *SourceInfo_Extension_Version { + if x != nil { + return x.Version + } + return nil +} + +type SourceInfo_Extension_Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Major int64 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + Minor int64 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` +} + +func (x *SourceInfo_Extension_Version) Reset() { + *x = SourceInfo_Extension_Version{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_syntax_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceInfo_Extension_Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceInfo_Extension_Version) ProtoMessage() {} + +func (x *SourceInfo_Extension_Version) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_syntax_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceInfo_Extension_Version.ProtoReflect.Descriptor instead. +func (*SourceInfo_Extension_Version) Descriptor() ([]byte, []int) { + return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0} +} + +func (x *SourceInfo_Extension_Version) GetMajor() int64 { + if x != nil { + return x.Major + } + return 0 +} + +func (x *SourceInfo_Extension_Version) GetMinor() int64 { + if x != nil { + return x.Minor + } + return 0 +} + +var File_cel_expr_syntax_proto protoreflect.FileDescriptor + +var file_cel_expr_syntax_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, + 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x67, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x22, + 0x0a, 0x04, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, + 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, + 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, + 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xfd, 0x0a, 0x0a, 0x04, 0x45, 0x78, + 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x33, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f, + 0x6e, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x48, 0x00, 0x52, 0x09, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, + 0x0a, 0x0b, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, + 0x78, 0x70, 0x72, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x32, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x61, 0x6c, 0x6c, + 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, 0x0a, 0x09, + 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x6c, 0x69, + 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, + 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4d, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, + 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, + 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x48, 0x00, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x45, 0x78, 0x70, 0x72, 0x1a, 0x1b, 0x0a, 0x05, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x1a, 0x65, 0x0a, 0x06, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x07, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x07, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x6e, 0x0a, 0x04, 0x43, 0x61, 0x6c, + 0x6c, 0x12, 0x26, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, + 0x72, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, + 0x78, 0x70, 0x72, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x0a, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x69, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x1a, 0xab, + 0x02, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, + 0x21, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, + 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, + 0xba, 0x01, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x70, + 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, + 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0xad, 0x02, 0x0a, + 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, + 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x2d, 0x0a, 0x0a, 0x69, 0x74, 0x65, + 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69, + 0x74, 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75, + 0x5f, 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75, + 0x56, 0x61, 0x72, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74, + 0x12, 0x35, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, + 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, + 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70, + 0x53, 0x74, 0x65, 0x70, 0x12, 0x26, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x45, 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09, + 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, + 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, + 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, + 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, + 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01, + 0x48, 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, + 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xac, 0x06, + 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e, + 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x21, 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x73, 0x12, 0x41, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x3e, 0x0a, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x0e, + 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4d, 0x0a, 0x0f, 0x4d, 0x61, + 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xe0, 0x02, 0x0a, 0x09, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x59, 0x0a, 0x13, 0x61, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x12, + 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x73, 0x12, 0x40, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, + 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a, 0x09, 0x43, + 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, 0x4d, 0x50, + 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, + 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d, + 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43, + 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, + 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x2e, 0x0a, 0x0c, + 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0b, 0x53, 0x79, + 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, + 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_cel_expr_syntax_proto_rawDescOnce sync.Once + file_cel_expr_syntax_proto_rawDescData = file_cel_expr_syntax_proto_rawDesc +) + +func file_cel_expr_syntax_proto_rawDescGZIP() []byte { + file_cel_expr_syntax_proto_rawDescOnce.Do(func() { + file_cel_expr_syntax_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_syntax_proto_rawDescData) + }) + return file_cel_expr_syntax_proto_rawDescData +} + +var file_cel_expr_syntax_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_cel_expr_syntax_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_cel_expr_syntax_proto_goTypes = []interface{}{ + (SourceInfo_Extension_Component)(0), // 0: cel.expr.SourceInfo.Extension.Component + (*ParsedExpr)(nil), // 1: cel.expr.ParsedExpr + (*Expr)(nil), // 2: cel.expr.Expr + (*Constant)(nil), // 3: cel.expr.Constant + (*SourceInfo)(nil), // 4: cel.expr.SourceInfo + (*Expr_Ident)(nil), // 5: cel.expr.Expr.Ident + (*Expr_Select)(nil), // 6: cel.expr.Expr.Select + (*Expr_Call)(nil), // 7: cel.expr.Expr.Call + (*Expr_CreateList)(nil), // 8: cel.expr.Expr.CreateList + (*Expr_CreateStruct)(nil), // 9: cel.expr.Expr.CreateStruct + (*Expr_Comprehension)(nil), // 10: cel.expr.Expr.Comprehension + (*Expr_CreateStruct_Entry)(nil), // 11: cel.expr.Expr.CreateStruct.Entry + nil, // 12: cel.expr.SourceInfo.PositionsEntry + nil, // 13: cel.expr.SourceInfo.MacroCallsEntry + (*SourceInfo_Extension)(nil), // 14: cel.expr.SourceInfo.Extension + (*SourceInfo_Extension_Version)(nil), // 15: cel.expr.SourceInfo.Extension.Version + (structpb.NullValue)(0), // 16: google.protobuf.NullValue + (*durationpb.Duration)(nil), // 17: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 18: google.protobuf.Timestamp +} +var file_cel_expr_syntax_proto_depIdxs = []int32{ + 2, // 0: cel.expr.ParsedExpr.expr:type_name -> cel.expr.Expr + 4, // 1: cel.expr.ParsedExpr.source_info:type_name -> cel.expr.SourceInfo + 3, // 2: cel.expr.Expr.const_expr:type_name -> cel.expr.Constant + 5, // 3: cel.expr.Expr.ident_expr:type_name -> cel.expr.Expr.Ident + 6, // 4: cel.expr.Expr.select_expr:type_name -> cel.expr.Expr.Select + 7, // 5: cel.expr.Expr.call_expr:type_name -> cel.expr.Expr.Call + 8, // 6: cel.expr.Expr.list_expr:type_name -> cel.expr.Expr.CreateList + 9, // 7: cel.expr.Expr.struct_expr:type_name -> cel.expr.Expr.CreateStruct + 10, // 8: cel.expr.Expr.comprehension_expr:type_name -> cel.expr.Expr.Comprehension + 16, // 9: cel.expr.Constant.null_value:type_name -> google.protobuf.NullValue + 17, // 10: cel.expr.Constant.duration_value:type_name -> google.protobuf.Duration + 18, // 11: cel.expr.Constant.timestamp_value:type_name -> google.protobuf.Timestamp + 12, // 12: cel.expr.SourceInfo.positions:type_name -> cel.expr.SourceInfo.PositionsEntry + 13, // 13: cel.expr.SourceInfo.macro_calls:type_name -> cel.expr.SourceInfo.MacroCallsEntry + 14, // 14: cel.expr.SourceInfo.extensions:type_name -> cel.expr.SourceInfo.Extension + 2, // 15: cel.expr.Expr.Select.operand:type_name -> cel.expr.Expr + 2, // 16: cel.expr.Expr.Call.target:type_name -> cel.expr.Expr + 2, // 17: cel.expr.Expr.Call.args:type_name -> cel.expr.Expr + 2, // 18: cel.expr.Expr.CreateList.elements:type_name -> cel.expr.Expr + 11, // 19: cel.expr.Expr.CreateStruct.entries:type_name -> cel.expr.Expr.CreateStruct.Entry + 2, // 20: cel.expr.Expr.Comprehension.iter_range:type_name -> cel.expr.Expr + 2, // 21: cel.expr.Expr.Comprehension.accu_init:type_name -> cel.expr.Expr + 2, // 22: cel.expr.Expr.Comprehension.loop_condition:type_name -> cel.expr.Expr + 2, // 23: cel.expr.Expr.Comprehension.loop_step:type_name -> cel.expr.Expr + 2, // 24: cel.expr.Expr.Comprehension.result:type_name -> cel.expr.Expr + 2, // 25: cel.expr.Expr.CreateStruct.Entry.map_key:type_name -> cel.expr.Expr + 2, // 26: cel.expr.Expr.CreateStruct.Entry.value:type_name -> cel.expr.Expr + 2, // 27: cel.expr.SourceInfo.MacroCallsEntry.value:type_name -> cel.expr.Expr + 0, // 28: cel.expr.SourceInfo.Extension.affected_components:type_name -> cel.expr.SourceInfo.Extension.Component + 15, // 29: cel.expr.SourceInfo.Extension.version:type_name -> cel.expr.SourceInfo.Extension.Version + 30, // [30:30] is the sub-list for method output_type + 30, // [30:30] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name +} + +func init() { file_cel_expr_syntax_proto_init() } +func file_cel_expr_syntax_proto_init() { + if File_cel_expr_syntax_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cel_expr_syntax_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParsedExpr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Constant); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Ident); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Select); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Call); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_CreateList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_CreateStruct); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Comprehension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_CreateStruct_Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceInfo_Extension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_syntax_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceInfo_Extension_Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_syntax_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Expr_ConstExpr)(nil), + (*Expr_IdentExpr)(nil), + (*Expr_SelectExpr)(nil), + (*Expr_CallExpr)(nil), + (*Expr_ListExpr)(nil), + (*Expr_StructExpr)(nil), + (*Expr_ComprehensionExpr)(nil), + } + file_cel_expr_syntax_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Constant_NullValue)(nil), + (*Constant_BoolValue)(nil), + (*Constant_Int64Value)(nil), + (*Constant_Uint64Value)(nil), + (*Constant_DoubleValue)(nil), + (*Constant_StringValue)(nil), + (*Constant_BytesValue)(nil), + (*Constant_DurationValue)(nil), + (*Constant_TimestampValue)(nil), + } + file_cel_expr_syntax_proto_msgTypes[10].OneofWrappers = []interface{}{ + (*Expr_CreateStruct_Entry_FieldKey)(nil), + (*Expr_CreateStruct_Entry_MapKey)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_syntax_proto_rawDesc, + NumEnums: 1, + NumMessages: 15, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_syntax_proto_goTypes, + DependencyIndexes: file_cel_expr_syntax_proto_depIdxs, + EnumInfos: file_cel_expr_syntax_proto_enumTypes, + MessageInfos: file_cel_expr_syntax_proto_msgTypes, + }.Build() + File_cel_expr_syntax_proto = out.File + file_cel_expr_syntax_proto_rawDesc = nil + file_cel_expr_syntax_proto_goTypes = nil + file_cel_expr_syntax_proto_depIdxs = nil +} diff --git a/hack/tools/vendor/cel.dev/expr/value.pb.go b/hack/tools/vendor/cel.dev/expr/value.pb.go new file mode 100644 index 0000000000..e5e29228c2 --- /dev/null +++ b/hack/tools/vendor/cel.dev/expr/value.pb.go @@ -0,0 +1,653 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: cel/expr/value.proto + +package expr + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Kind: + // + // *Value_NullValue + // *Value_BoolValue + // *Value_Int64Value + // *Value_Uint64Value + // *Value_DoubleValue + // *Value_StringValue + // *Value_BytesValue + // *Value_EnumValue + // *Value_ObjectValue + // *Value_MapValue + // *Value_ListValue + // *Value_TypeValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +func (x *Value) Reset() { + *x = Value{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Value) ProtoMessage() {} + +func (x *Value) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Value.ProtoReflect.Descriptor instead. +func (*Value) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{0} +} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *Value) GetNullValue() structpb.NullValue { + if x, ok := x.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return structpb.NullValue(0) +} + +func (x *Value) GetBoolValue() bool { + if x, ok := x.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *Value) GetInt64Value() int64 { + if x, ok := x.GetKind().(*Value_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (x *Value) GetUint64Value() uint64 { + if x, ok := x.GetKind().(*Value_Uint64Value); ok { + return x.Uint64Value + } + return 0 +} + +func (x *Value) GetDoubleValue() float64 { + if x, ok := x.GetKind().(*Value_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (x *Value) GetStringValue() string { + if x, ok := x.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *Value) GetBytesValue() []byte { + if x, ok := x.GetKind().(*Value_BytesValue); ok { + return x.BytesValue + } + return nil +} + +func (x *Value) GetEnumValue() *EnumValue { + if x, ok := x.GetKind().(*Value_EnumValue); ok { + return x.EnumValue + } + return nil +} + +func (x *Value) GetObjectValue() *anypb.Any { + if x, ok := x.GetKind().(*Value_ObjectValue); ok { + return x.ObjectValue + } + return nil +} + +func (x *Value) GetMapValue() *MapValue { + if x, ok := x.GetKind().(*Value_MapValue); ok { + return x.MapValue + } + return nil +} + +func (x *Value) GetListValue() *ListValue { + if x, ok := x.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +func (x *Value) GetTypeValue() string { + if x, ok := x.GetKind().(*Value_TypeValue); ok { + return x.TypeValue + } + return "" +} + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Value_Int64Value struct { + Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type Value_Uint64Value struct { + Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"` +} + +type Value_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type Value_StringValue struct { + StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Value_BytesValue struct { + BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +type Value_EnumValue struct { + EnumValue *EnumValue `protobuf:"bytes,9,opt,name=enum_value,json=enumValue,proto3,oneof"` +} + +type Value_ObjectValue struct { + ObjectValue *anypb.Any `protobuf:"bytes,10,opt,name=object_value,json=objectValue,proto3,oneof"` +} + +type Value_MapValue struct { + MapValue *MapValue `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"` +} + +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,12,opt,name=list_value,json=listValue,proto3,oneof"` +} + +type Value_TypeValue struct { + TypeValue string `protobuf:"bytes,15,opt,name=type_value,json=typeValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} + +func (*Value_BoolValue) isValue_Kind() {} + +func (*Value_Int64Value) isValue_Kind() {} + +func (*Value_Uint64Value) isValue_Kind() {} + +func (*Value_DoubleValue) isValue_Kind() {} + +func (*Value_StringValue) isValue_Kind() {} + +func (*Value_BytesValue) isValue_Kind() {} + +func (*Value_EnumValue) isValue_Kind() {} + +func (*Value_ObjectValue) isValue_Kind() {} + +func (*Value_MapValue) isValue_Kind() {} + +func (*Value_ListValue) isValue_Kind() {} + +func (*Value_TypeValue) isValue_Kind() {} + +type EnumValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *EnumValue) Reset() { + *x = EnumValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValue) ProtoMessage() {} + +func (x *EnumValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead. +func (*EnumValue) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{1} +} + +func (x *EnumValue) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *EnumValue) GetValue() int32 { + if x != nil { + return x.Value + } + return 0 +} + +type ListValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *ListValue) Reset() { + *x = ListValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListValue) ProtoMessage() {} + +func (x *ListValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListValue.ProtoReflect.Descriptor instead. +func (*ListValue) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{2} +} + +func (x *ListValue) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + +type MapValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *MapValue) Reset() { + *x = MapValue{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MapValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MapValue) ProtoMessage() {} + +func (x *MapValue) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MapValue.ProtoReflect.Descriptor instead. +func (*MapValue) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{3} +} + +func (x *MapValue) GetEntries() []*MapValue_Entry { + if x != nil { + return x.Entries + } + return nil +} + +type MapValue_Entry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *MapValue_Entry) Reset() { + *x = MapValue_Entry{} + if protoimpl.UnsafeEnabled { + mi := &file_cel_expr_value_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MapValue_Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MapValue_Entry) ProtoMessage() {} + +func (x *MapValue_Entry) ProtoReflect() protoreflect.Message { + mi := &file_cel_expr_value_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MapValue_Entry.ProtoReflect.Descriptor instead. +func (*MapValue_Entry) Descriptor() ([]byte, []int) { + return file_cel_expr_value_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *MapValue_Entry) GetKey() *Value { + if x != nil { + return x.Key + } + return nil +} + +func (x *MapValue_Entry) GetValue() *Value { + if x != nil { + return x.Value + } + return nil +} + +var File_cel_expr_value_proto protoreflect.FileDescriptor + +var file_cel_expr_value_proto_rawDesc = []byte{ + 0x0a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x04, 0x0a, 0x05, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69, + 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75, + 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48, + 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, + 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, + 0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, + 0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x6c, 0x69, + 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75, + 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x22, 0x34, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, + 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, + 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x51, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x21, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2d, 0x0a, 0x0c, 0x64, 0x65, + 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, + 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_cel_expr_value_proto_rawDescOnce sync.Once + file_cel_expr_value_proto_rawDescData = file_cel_expr_value_proto_rawDesc +) + +func file_cel_expr_value_proto_rawDescGZIP() []byte { + file_cel_expr_value_proto_rawDescOnce.Do(func() { + file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_value_proto_rawDescData) + }) + return file_cel_expr_value_proto_rawDescData +} + +var file_cel_expr_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_cel_expr_value_proto_goTypes = []interface{}{ + (*Value)(nil), // 0: cel.expr.Value + (*EnumValue)(nil), // 1: cel.expr.EnumValue + (*ListValue)(nil), // 2: cel.expr.ListValue + (*MapValue)(nil), // 3: cel.expr.MapValue + (*MapValue_Entry)(nil), // 4: cel.expr.MapValue.Entry + (structpb.NullValue)(0), // 5: google.protobuf.NullValue + (*anypb.Any)(nil), // 6: google.protobuf.Any +} +var file_cel_expr_value_proto_depIdxs = []int32{ + 5, // 0: cel.expr.Value.null_value:type_name -> google.protobuf.NullValue + 1, // 1: cel.expr.Value.enum_value:type_name -> cel.expr.EnumValue + 6, // 2: cel.expr.Value.object_value:type_name -> google.protobuf.Any + 3, // 3: cel.expr.Value.map_value:type_name -> cel.expr.MapValue + 2, // 4: cel.expr.Value.list_value:type_name -> cel.expr.ListValue + 0, // 5: cel.expr.ListValue.values:type_name -> cel.expr.Value + 4, // 6: cel.expr.MapValue.entries:type_name -> cel.expr.MapValue.Entry + 0, // 7: cel.expr.MapValue.Entry.key:type_name -> cel.expr.Value + 0, // 8: cel.expr.MapValue.Entry.value:type_name -> cel.expr.Value + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_cel_expr_value_proto_init() } +func file_cel_expr_value_proto_init() { + if File_cel_expr_value_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_cel_expr_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MapValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_cel_expr_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MapValue_Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Value_NullValue)(nil), + (*Value_BoolValue)(nil), + (*Value_Int64Value)(nil), + (*Value_Uint64Value)(nil), + (*Value_DoubleValue)(nil), + (*Value_StringValue)(nil), + (*Value_BytesValue)(nil), + (*Value_EnumValue)(nil), + (*Value_ObjectValue)(nil), + (*Value_MapValue)(nil), + (*Value_ListValue)(nil), + (*Value_TypeValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_cel_expr_value_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_cel_expr_value_proto_goTypes, + DependencyIndexes: file_cel_expr_value_proto_depIdxs, + MessageInfos: file_cel_expr_value_proto_msgTypes, + }.Build() + File_cel_expr_value_proto = out.File + file_cel_expr_value_proto_rawDesc = nil + file_cel_expr_value_proto_goTypes = nil + file_cel_expr_value_proto_depIdxs = nil +} diff --git a/hack/tools/vendor/github.com/4meepo/tagalign/.gitignore b/hack/tools/vendor/github.com/4meepo/tagalign/.gitignore index e37bb52e49..1c6218ee29 100644 --- a/hack/tools/vendor/github.com/4meepo/tagalign/.gitignore +++ b/hack/tools/vendor/github.com/4meepo/tagalign/.gitignore @@ -17,6 +17,7 @@ *.test .vscode +.idea/ # Output of the go coverage tool, specifically when used with LiteIDE *.out diff --git a/hack/tools/vendor/github.com/4meepo/tagalign/.goreleaser.yml b/hack/tools/vendor/github.com/4meepo/tagalign/.goreleaser.yml index e7b6f6800e..37dfec7c88 100644 --- a/hack/tools/vendor/github.com/4meepo/tagalign/.goreleaser.yml +++ b/hack/tools/vendor/github.com/4meepo/tagalign/.goreleaser.yml @@ -1,4 +1,4 @@ ---- +version: 2 project_name: tagalign release: @@ -29,4 +29,4 @@ builds: goarch: 386 - goos: freebsd goarch: arm64 - main: ./cmd/tagalign/ \ No newline at end of file + main: ./cmd/tagalign/ diff --git a/hack/tools/vendor/github.com/4meepo/tagalign/options.go b/hack/tools/vendor/github.com/4meepo/tagalign/options.go index ddec98da73..2a78592465 100644 --- a/hack/tools/vendor/github.com/4meepo/tagalign/options.go +++ b/hack/tools/vendor/github.com/4meepo/tagalign/options.go @@ -2,13 +2,6 @@ package tagalign type Option func(*Helper) -// WithMode specify the mode of tagalign. -func WithMode(mode Mode) Option { - return func(h *Helper) { - h.mode = mode - } -} - // WithSort enable tags sort. // fixedOrder specify the order of tags, the other tags will be sorted by name. // Sory is disabled by default. diff --git a/hack/tools/vendor/github.com/4meepo/tagalign/tagalign.go b/hack/tools/vendor/github.com/4meepo/tagalign/tagalign.go index 4734b56661..8161a0aa7f 100644 --- a/hack/tools/vendor/github.com/4meepo/tagalign/tagalign.go +++ b/hack/tools/vendor/github.com/4meepo/tagalign/tagalign.go @@ -1,27 +1,19 @@ package tagalign import ( + "cmp" "fmt" "go/ast" "go/token" - "log" "reflect" - "sort" + "slices" "strconv" "strings" "github.com/fatih/structtag" - "golang.org/x/tools/go/analysis" ) -type Mode int - -const ( - StandaloneMode Mode = iota - GolangciLintMode -) - type Style int const ( @@ -44,11 +36,14 @@ func NewAnalyzer(options ...Option) *analysis.Analyzer { } } -func Run(pass *analysis.Pass, options ...Option) []Issue { - var issues []Issue +func Run(pass *analysis.Pass, options ...Option) { for _, f := range pass.Files { + filename := getFilename(pass.Fset, f) + if !strings.HasSuffix(filename, ".go") { + continue + } + h := &Helper{ - mode: StandaloneMode, style: DefaultStyle, align: true, } @@ -63,22 +58,19 @@ func Run(pass *analysis.Pass, options ...Option) []Issue { if !h.align && !h.sort { // do nothing - return nil + return } ast.Inspect(f, func(n ast.Node) bool { h.find(pass, n) return true }) + h.Process(pass) - issues = append(issues, h.issues...) } - return issues } type Helper struct { - mode Mode - style Style align bool // whether enable tags align. @@ -87,19 +79,6 @@ type Helper struct { singleFields []*ast.Field consecutiveFieldsGroups [][]*ast.Field // fields in this group, must be consecutive in struct. - issues []Issue -} - -// Issue is used to integrate with golangci-lint's inline auto fix. -type Issue struct { - Pos token.Position - Message string - InlineFix InlineFix -} -type InlineFix struct { - StartCol int // zero-based - Length int - NewString string } func (w *Helper) find(pass *analysis.Pass, n ast.Node) { @@ -159,42 +138,28 @@ func (w *Helper) find(pass *analysis.Pass, n ast.Node) { split() } -func (w *Helper) report(pass *analysis.Pass, field *ast.Field, startCol int, msg, replaceStr string) { - if w.mode == GolangciLintMode { - iss := Issue{ - Pos: pass.Fset.Position(field.Tag.Pos()), - Message: msg, - InlineFix: InlineFix{ - StartCol: startCol, - Length: len(field.Tag.Value), - NewString: replaceStr, - }, - } - w.issues = append(w.issues, iss) - } - - if w.mode == StandaloneMode { - pass.Report(analysis.Diagnostic{ - Pos: field.Tag.Pos(), - End: field.Tag.End(), - Message: msg, - SuggestedFixes: []analysis.SuggestedFix{ - { - Message: msg, - TextEdits: []analysis.TextEdit{ - { - Pos: field.Tag.Pos(), - End: field.Tag.End(), - NewText: []byte(replaceStr), - }, +func (w *Helper) report(pass *analysis.Pass, field *ast.Field, msg, replaceStr string) { + pass.Report(analysis.Diagnostic{ + Pos: field.Tag.Pos(), + End: field.Tag.End(), + Message: msg, + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: msg, + TextEdits: []analysis.TextEdit{ + { + Pos: field.Tag.Pos(), + End: field.Tag.End(), + NewText: []byte(replaceStr), }, }, }, - }) - } + }, + }) } -func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit +//nolint:gocognit,gocyclo,nestif +func (w *Helper) Process(pass *analysis.Pass) { // process grouped fields for _, fields := range w.consecutiveFieldsGroups { offsets := make([]int, len(fields)) @@ -220,7 +185,7 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit tag, err := strconv.Unquote(field.Tag.Value) if err != nil { // if tag value is not a valid string, report it directly - w.report(pass, field, column, errTagValueSyntax, field.Tag.Value) + w.report(pass, field, errTagValueSyntax, field.Tag.Value) fields = removeField(fields, i) continue } @@ -228,7 +193,7 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit tags, err := structtag.Parse(tag) if err != nil { // if tag value is not a valid struct tag, report it directly - w.report(pass, field, column, err.Error(), field.Tag.Value) + w.report(pass, field, err.Error(), field.Tag.Value) fields = removeField(fields, i) continue } @@ -241,7 +206,7 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit cp[i] = tag } notSortedTagsGroup = append(notSortedTagsGroup, cp) - sortBy(w.fixedTagOrder, tags) + sortTags(w.fixedTagOrder, tags) } for _, t := range tags.Tags() { addKey(t.Key) @@ -252,7 +217,7 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit } if w.sort && StrictStyle == w.style { - sortAllKeys(w.fixedTagOrder, uniqueKeys) + sortKeys(w.fixedTagOrder, uniqueKeys) maxTagNum = len(uniqueKeys) } @@ -340,27 +305,26 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit msg := "tag is not aligned, should be: " + unquoteTag - w.report(pass, field, offsets[i], msg, newTagValue) + w.report(pass, field, msg, newTagValue) } } // process single fields for _, field := range w.singleFields { - column := pass.Fset.Position(field.Tag.Pos()).Column - 1 tag, err := strconv.Unquote(field.Tag.Value) if err != nil { - w.report(pass, field, column, errTagValueSyntax, field.Tag.Value) + w.report(pass, field, errTagValueSyntax, field.Tag.Value) continue } tags, err := structtag.Parse(tag) if err != nil { - w.report(pass, field, column, err.Error(), field.Tag.Value) + w.report(pass, field, err.Error(), field.Tag.Value) continue } originalTags := append([]*structtag.Tag(nil), tags.Tags()...) if w.sort { - sortBy(w.fixedTagOrder, tags) + sortTags(w.fixedTagOrder, tags) } newTagValue := fmt.Sprintf("`%s`", tags.String()) @@ -371,85 +335,47 @@ func (w *Helper) Process(pass *analysis.Pass) { //nolint:gocognit msg := "tag is not aligned , should be: " + tags.String() - w.report(pass, field, column, msg, newTagValue) + w.report(pass, field, msg, newTagValue) } } -// Issues returns all issues found by the analyzer. -// It is used to integrate with golangci-lint. -func (w *Helper) Issues() []Issue { - log.Println("tagalign 's Issues() should only be called in golangci-lint mode") - return w.issues -} - -// sortBy sorts tags by fixed order. +// sortTags sorts tags by fixed order. // If a tag is not in the fixed order, it will be sorted by name. -func sortBy(fixedOrder []string, tags *structtag.Tags) { - // sort by fixed order - sort.Slice(tags.Tags(), func(i, j int) bool { - ti := tags.Tags()[i] - tj := tags.Tags()[j] - - oi := findIndex(fixedOrder, ti.Key) - oj := findIndex(fixedOrder, tj.Key) - - if oi == -1 && oj == -1 { - return ti.Key < tj.Key - } - - if oi == -1 { - return false - } - - if oj == -1 { - return true - } - - return oi < oj +func sortTags(fixedOrder []string, tags *structtag.Tags) { + slices.SortFunc(tags.Tags(), func(a, b *structtag.Tag) int { + return compareByFixedOrder(fixedOrder)(a.Key, b.Key) }) } -func sortAllKeys(fixedOrder []string, keys []string) { - sort.Slice(keys, func(i, j int) bool { - oi := findIndex(fixedOrder, keys[i]) - oj := findIndex(fixedOrder, keys[j]) +func sortKeys(fixedOrder []string, keys []string) { + slices.SortFunc(keys, compareByFixedOrder(fixedOrder)) +} + +func compareByFixedOrder(fixedOrder []string) func(a, b string) int { + return func(a, b string) int { + oi := slices.Index(fixedOrder, a) + oj := slices.Index(fixedOrder, b) if oi == -1 && oj == -1 { - return keys[i] < keys[j] + return strings.Compare(a, b) } if oi == -1 { - return false + return 1 } if oj == -1 { - return true + return -1 } - return oi < oj - }) -} - -func findIndex(s []string, e string) int { - for i, a := range s { - if a == e { - return i - } + return cmp.Compare(oi, oj) } - return -1 } func alignFormat(length int) string { return "%" + fmt.Sprintf("-%ds", length) } -func max(a, b int) int { - if a > b { - return a - } - return b -} - func removeField(fields []*ast.Field, index int) []*ast.Field { if index < 0 || index >= len(fields) { return fields @@ -457,3 +383,12 @@ func removeField(fields []*ast.Field, index int) []*ast.Field { return append(fields[:index], fields[index+1:]...) } + +func getFilename(fset *token.FileSet, file *ast.File) string { + filename := fset.PositionFor(file.Pos(), true).Filename + if !strings.HasSuffix(filename, ".go") { + return fset.PositionFor(file.Pos(), false).Filename + } + + return filename +} diff --git a/hack/tools/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go b/hack/tools/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go index 5507d95462..703cc1c39f 100644 --- a/hack/tools/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go +++ b/hack/tools/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go @@ -125,7 +125,7 @@ const ( ) func (n *nilNil) isDangerNilType(t types.Type) (bool, zeroValue) { - switch v := t.(type) { + switch v := types.Unalias(t).(type) { case *types.Pointer: return n.checkedTypes.Contains(ptrType), zeroValueNil diff --git a/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/encoded_compare.go b/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/encoded_compare.go index 53c74ac453..1464fd640b 100644 --- a/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/encoded_compare.go +++ b/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/encoded_compare.go @@ -48,7 +48,7 @@ func (checker EncodedCompare) Check(pass *analysis.Pass, call *CallMeta) *analys switch { case aIsExplicitJSON, bIsExplicitJSON, isJSONStyleExpr(pass, a), isJSONStyleExpr(pass, b): proposed = "JSONEq" - case isYAMLStyleExpr(a), isYAMLStyleExpr(b): + case isYAMLStyleExpr(pass, a), isYAMLStyleExpr(pass, b): proposed = "YAMLEq" } diff --git a/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/formatter.go b/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/formatter.go index 896b6bf5fa..7ff4de470a 100644 --- a/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/formatter.go +++ b/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/formatter.go @@ -115,7 +115,11 @@ func (checker Formatter) checkFmtAssertion(pass *analysis.Pass, call *CallMeta) func isPrintfLikeCall(pass *analysis.Pass, call *CallMeta) (int, bool) { msgAndArgsPos := getMsgAndArgsPosition(call.Fn.Signature) - if msgAndArgsPos < 0 { + if msgAndArgsPos <= 0 { + return -1, false + } + + if !(len(call.ArgsRaw) > msgAndArgsPos && hasStringType(pass, call.ArgsRaw[msgAndArgsPos])) { return -1, false } @@ -123,7 +127,7 @@ func isPrintfLikeCall(pass *analysis.Pass, call *CallMeta) (int, bool) { return -1, false } - return msgAndArgsPos, len(call.ArgsRaw) > msgAndArgsPos + return msgAndArgsPos, true } func assertHasFormattedAnalogue(pass *analysis.Pass, call *CallMeta) bool { diff --git a/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_basic_type.go b/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_basic_type.go index 9b43e914cc..b4bb563219 100644 --- a/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_basic_type.go +++ b/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_basic_type.go @@ -166,6 +166,12 @@ func hasBytesType(pass *analysis.Pass, e ast.Expr) bool { return ok && el.Kind() == types.Uint8 } +// hasStringType returns true if the expression is of `string` type. +func hasStringType(pass *analysis.Pass, e ast.Expr) bool { + basicType, ok := pass.TypesInfo.TypeOf(e).(*types.Basic) + return ok && basicType.Kind() == types.String +} + // untype returns v from type(v) expression or v itself if there is no type conversion. func untype(e ast.Expr) ast.Expr { ce, ok := e.(*ast.CallExpr) diff --git a/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_encoded.go b/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_encoded.go index 35a497a724..c366f85635 100644 --- a/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_encoded.go +++ b/hack/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_encoded.go @@ -11,13 +11,15 @@ import ( ) var ( + wordsRe = regexp.MustCompile(`[A-Z]+(?:[a-z]*|$)|[a-z]+`) // NOTE(a.telyshev): ChatGPT. + jsonIdentRe = regexp.MustCompile(`json|JSON|Json`) - yamlIdentRe = regexp.MustCompile(`yaml|YAML|Yaml|yml|YML|Yml`) + yamlWordRe = regexp.MustCompile(`yaml|YAML|Yaml|^(yml|YML|Yml)$`) ) func isJSONStyleExpr(pass *analysis.Pass, e ast.Expr) bool { if isIdentNamedAfterPattern(jsonIdentRe, e) { - return true + return hasBytesType(pass, e) || hasStringType(pass, e) } if t, ok := pass.TypesInfo.Types[e]; ok && t.Value != nil { @@ -35,6 +37,20 @@ func isJSONStyleExpr(pass *analysis.Pass, e ast.Expr) bool { return false } -func isYAMLStyleExpr(e ast.Expr) bool { - return isIdentNamedAfterPattern(yamlIdentRe, e) +func isYAMLStyleExpr(pass *analysis.Pass, e ast.Expr) bool { + id, ok := e.(*ast.Ident) + return ok && (hasBytesType(pass, e) || hasStringType(pass, e)) && hasWordAfterPattern(id.Name, yamlWordRe) +} + +func hasWordAfterPattern(s string, re *regexp.Regexp) bool { + for _, w := range splitIntoWords(s) { + if re.MatchString(w) { + return true + } + } + return false +} + +func splitIntoWords(s string) []string { + return wordsRe.FindAllString(s, -1) } diff --git a/hack/tools/vendor/github.com/Antonboom/testifylint/internal/config/config.go b/hack/tools/vendor/github.com/Antonboom/testifylint/internal/config/config.go index a8812e6d01..23b673428e 100644 --- a/hack/tools/vendor/github.com/Antonboom/testifylint/internal/config/config.go +++ b/hack/tools/vendor/github.com/Antonboom/testifylint/internal/config/config.go @@ -133,7 +133,7 @@ func BindToFlags(cfg *Config, fs *flag.FlagSet) { "to enable go vet's printf checks") fs.BoolVar(&cfg.Formatter.RequireFFuncs, "formatter.require-f-funcs", false, - "to require f-assertions if format string is used") + "to require f-assertions (e.g. assert.Equalf) if format string is used, even if there are no variable-length variables.") fs.BoolVar(&cfg.GoRequire.IgnoreHTTPHandlers, "go-require.ignore-http-handlers", false, diff --git a/hack/tools/vendor/github.com/Crocmagnon/fatcontext/pkg/analyzer/analyzer.go b/hack/tools/vendor/github.com/Crocmagnon/fatcontext/pkg/analyzer/analyzer.go index a65efbba89..7b88bf56e9 100644 --- a/hack/tools/vendor/github.com/Crocmagnon/fatcontext/pkg/analyzer/analyzer.go +++ b/hack/tools/vendor/github.com/Crocmagnon/fatcontext/pkg/analyzer/analyzer.go @@ -59,7 +59,7 @@ func run(pass *analysis.Pass) (interface{}, error) { { Pos: assignStmt.Pos(), End: assignStmt.End(), - NewText: []byte(suggested), + NewText: suggested, }, }, }) @@ -215,10 +215,10 @@ func getRootIdent(pass *analysis.Pass, node ast.Node) *ast.Ident { } // render returns the pretty-print of the given node -func render(fset *token.FileSet, x interface{}) (string, error) { +func render(fset *token.FileSet, x interface{}) ([]byte, error) { var buf bytes.Buffer if err := printer.Fprint(&buf, fset, x); err != nil { - return "", fmt.Errorf("printing node: %w", err) + return nil, fmt.Errorf("printing node: %w", err) } - return buf.String(), nil + return buf.Bytes(), nil } diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/.custom-gcl.yml b/hack/tools/vendor/github.com/JoelSpeed/kal/.custom-gcl.yml new file mode 100644 index 0000000000..f31f0effa8 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/.custom-gcl.yml @@ -0,0 +1,6 @@ +version: v1.62.0 +name: golangci-kal +destination: ./bin +plugins: +- module: 'github.com/JoelSpeed/kal' + path: ./ diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/.gitignore b/hack/tools/vendor/github.com/JoelSpeed/kal/.gitignore new file mode 100644 index 0000000000..1fc7cb9297 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/.gitignore @@ -0,0 +1,21 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin +testbin/* + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# editor and IDE paraphernalia +.idea +*.swp +*.swo +*~ +.vscode/ diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/.golangci.yaml b/hack/tools/vendor/github.com/JoelSpeed/kal/.golangci.yaml new file mode 100644 index 0000000000..9b090a0113 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/.golangci.yaml @@ -0,0 +1,97 @@ +linters: + disable-all: true + # Enable specific linter + # https://golangci-lint.run/usage/linters/#enabled-by-default-linters + enable: + # Default linters + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - typecheck + - unused + # Additional linters + - asciicheck + - bidichk + - bodyclose + - contextcheck + - cyclop + - dogsled + - dupl + - durationcheck + - errname + - errorlint + - exhaustive + - exportloopref + - forcetypeassert + - funlen + - gochecknoglobals + - gocognit + - goconst + - gocritic + - gocyclo + - godot + - goerr113 + - gofmt + - goimports + - goprintffuncname + - gosec + - importas + - makezero + - misspell + - nakedret + - nestif + - nilerr + - nilnil + - nlreturn + - noctx + - nolintlint + - prealloc + - predeclared + - revive + - stylecheck + - tagliatelle + - tenv + - unconvert + - unparam + - wastedassign + - whitespace + - wrapcheck + - wsl +linters-settings: + nlreturn: + block-size: 2 + revive: + confidence: 0 + rules: + - name: exported + severity: warning + disabled: false + arguments: + - "checkPrivateReceivers" + - "disableStutteringCheck" + stylecheck: + # https://staticcheck.io/docs/options#checks + checks: ["all", "-ST1000"] + dot-import-whitelist: + - "github.com/onsi/ginkgo/v2" + - "github.com/onsi/gomega" +issues: + exclude: + - Analyzer is a global variable + exclude-use-default: false + exclude-rules: + # Exclude some linters from running on tests files. + - path: _test\.go + linters: + - gocyclo + - dupl + - gosec + - gochecknoglobals + - goerr113 + - funlen + - path: testdata + linters: + - all + diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/LICENSE b/hack/tools/vendor/github.com/JoelSpeed/kal/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/Makefile b/hack/tools/vendor/github.com/JoelSpeed/kal/Makefile new file mode 100644 index 0000000000..a93bd95495 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/Makefile @@ -0,0 +1,66 @@ +PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) +GOLANGCI_LINT = go run ${PROJECT_DIR}/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint + +VERSION ?= $(shell git describe --always --abbrev=7) + +.PHONY: all +all: build + +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk commands is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Development + +.PHONY: fmt +fmt: ## Run go fmt against code. + go fmt ./... + +.PHONY: vet +vet: ## Run go vet against code. + go vet ./... + +.PHONY: vendor +vendor: ## Ensure the vendor directory is up to date. + go mod tidy + go mod vendor + go mod verify + +.PHONY: lint +lint: ## Run golangci-lint over the codebase. + ${GOLANGCI_LINT} run ./... --timeout 5m -v + +.PHONY: test +test: fmt vet unit ## Run tests. + +.PHONY: unit +unit: ## Run unit tests. + go test ./... + +##@ Build + +.PHONY: build +build: fmt vet ## Build KAL binary. + go build -o bin/kal ./cmd/kal + +.PHONY: build-golangci +build-golangci: ## Run golangci-lint over the codebase. + ${GOLANGCI_LINT} custom + +.PHONY: verify-% +verify-%: + make $* + git diff --exit-code diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/README.md b/hack/tools/vendor/github.com/JoelSpeed/kal/README.md new file mode 100644 index 0000000000..06a3c11bbb --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/README.md @@ -0,0 +1,298 @@ +# KAL - The Kubernetes API Linter + +KAL is a Golang based linter for Kubernetes API types. It checks for common mistakes and enforces best practices. +The rules implemented by KAL, are based on the [Kubernetes API Conventions][api-conventions]. + +KAL is aimed at being an assistant to API review, by catching the mechanical elements of API review, and allowing reviewers to focus on the more complex aspects of API design. + +[api-conventions]: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md + +## Installation + +KAL currently comes in two flavours, a standalone binary, and a golangci-lint plugin. + +### Standalone Binary + +To install the standalone binary, run the following command: + +```shell +go install github.com/JoelSpeed/kal/cmd/kal@latest +``` + +The standalone binary can be run with the following command: + +```shell +kal path/to/api/types +``` + +`kal` currently accepts no complex configuration, and will run all checks considered to be default. + +Individual linters can be disabled with the flag corresponding to the linter name. For example, to disable the `commentstart` linter, run the following command: + +```shell +kal -commentstart=false path/to/api/types +``` + +Where fixes are available, these can be applied automatically with the `-fix` flag. +Note, automatic fixing is currently only available via the standalone binary, and is not available via the `golangci-lint` plugin. + +```shell +kal -fix path/to/api/types +``` + +Other standard Golang linter flags implemented by [multichecker][multichecker] based linters are also supported. + +[multichecker]: https://pkg.go.dev/golang.org/x/tools/go/analysis/multichecker + +### Golangci-lint Plugin + +To install the `golangci-lint` plugin, first you must have `golangci-lint` installed. +If you do not have `golangci-lint` installed, review the `golangci-lint` [install guide][golangci-lint-install]. + +[golangci-lint-install]: https://golangci-lint.run/welcome/install/ + +You will need to create a `.custom-gcl.yml` file to describe the custom linters you want to run. The following is an example of a `.custom-gcl.yml` file: + +```yaml +version: v1.62.0 +name: golangci-kal +destination: ./bin +plugins: +- module: 'github.com/JoelSpeed/kal' + version: 'v0.0.0' # Replace with the latest version +``` + +Once you have created the custom configuration file, you can run the following command to build the custom `golangci-kal` binary: + +```shell +golangci-lint custom +``` + +The output binary will be a combination of the initial `golangci-lint` binary and the KAL linters. +This means that you can use any of the standard `golangci-lint` configuration or flags to run the binary, but may also include the KAL linters. + +If you wish to only use the KAL linters, you can configure your `.golangci.yml` file to only run the KAL linters: + +```yaml +linters-settings: + custom: + kal: + type: "module" + description: KAL is the Kube-API-Linter and lints Kube like APIs based on API conventions and best practices. + settings: + linters: {} + lintersConfig: {} +linters: + disable-all: true + enable: + - kal + +# To only run KAL on specific path +issues: + exclude-rules: + - path-except: "api/*" + linters: + - kal +``` + +The settings for KAL are based on the [GolangCIConfig][golangci-config-struct] struct and allow for finer control over the linter rules. +The finer control over linter rules is not currently avaialable outside of the plugin based version of KAL. + +If you wish to use the KAL linters in conjunction with other linters, you can enable the KAL linters in the `.golangci.yml` file by ensuring that `kal` is in the `linters.enabled` list. +To provide further configuration, add the `custom.kal` section to your `linter-settings` as per the example above. + +[golangci-config-struct]: https://pkg.go.dev/github.com/JoelSpeed/kal/pkg/config#GolangCIConfig + +#### VSCode integration + +Since VSCode already integrates with `golangci-lint` via the [Go][vscode-go] extension, you can use the `golangci-kal` binary as a linter in VSCode. +If your project authors are already using VSCode and have the configuration to lint their code when saving, this can be a seamless integration. + +Ensure that your project setup includes building the `golangci-kal` binary, and then configure the `go.lintTool` and `go.alternateTools` settings in your project `.vscode/settings.json` file. + +[vscode-go]: https://code.visualstudio.com/docs/languages/go + +```json +{ + "go.lintTool": "golangci-lint", + "go.alternateTools": { + "golangci-lint": "${workspaceFolder}/bin/golangci-kal", + } +} +``` + +Alternatively, you can also replace the binary with a script that runs the `golangci-kal` binary, +allowing for customisation or automatic copmilation of the project should it not already exist. + +```json +{ + "go.lintTool": "golangci-lint", + "go.alternateTools": { + "golangci-lint": "${workspaceFolder}/hack/golangci-lint.sh", + } +} +``` + +# Linters + +## Conditions + +The `conditions` linter checks that `Conditions` fields in the API types are correctly formatted. +The `Conditions` field should be a slice of `metav1.Condition` with the following tags and markers: + +```go +// +listType=map +// +listMapKey=type +// +patchStrategy=merge +// +patchMergeKey=type +// +optional +Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,opt,name=conditions"` +``` + +Conditions are idiomatically the first field within the status struct, and the linter will highlight when the Conditions are not the first field. + +### Configuration + +```yaml +lintersConfig: + conditions: + isFirstField: Warn | Ignore # The policy for the Conditions field being the first field. Defaults to `Warn`. + useProtobuf: SuggestFix | Warn | Ignore # The policy for the protobuf tag on the Conditions field. Defaults to `SuggestFix`. +``` + +### Fixes (via standalone binary only) + +The `conditions` linter can automatically fix the tags on the `Conditions` field. +When they do not match the expected format, the linter will suggest to update the tags to match the expected format. + +For CRDs, protobuf tags are not expected. By setting the `useProtobuf` configuration to `Ignore`, the linter will not suggest to add the protobuf tag to the `Conditions` field tags. + +The linter will also suggest to add missing markers. +If any of the 5 markers in the example above are missing, the linter will suggest to add them directly above the field. + +## CommentStart + +The `commentstart` linter checks that all comments in the API types start with the serialized form of the type they are commenting on. +This helps to ensure that generated documentation reflects the most common usage of the field, the serialized YAML form. + +### Fixes (via standalone binary only) + +The `commentstart` linter can automatically fix comments that do not start with the serialized form of the type. + +When the `json` tag is present, and matches the first word of the field comment in all but casing, the linter will suggest that the comment be updated to match the `json` tag. + +## Integers + +The `integers` linter checks for usage of unsupported integer types. +Only `int32` and `int64` types should be used in APIs, and other integer types, including unsigned integers are forbidden. + +## JSONTags + +The `jsontags` linter checks that all fields in the API types have a `json` tag, and that those tags are correctly formatted. +The `json` tag for a field within a Kubernetes API type should use a camel case version of the field name. + +The `jsontags` linter checks the tag name against the regex `"^[a-z][a-z0-9]*(?:[A-Z][a-z0-9]*)*$"` which allows consecutive upper case characters, to allow for acronyms, e.g. `requestTTL`. + +### Configuration + +```yaml +lintersConfig: + jsonTags: + jsonTagRegex: "^[a-z][a-z0-9]*(?:[A-Z][a-z0-9]*)*$" # Provide a custom regex, which the json tag must match. +``` + +## MaxLength + +The `maxlength` linter checks that string and array fields in the API are bounded by a maximum length. + +For strings, this means they have a `+kubebuilder:validation:MaxLength` marker. + +For arrays, this means they have a `+kubebuilder:validation:MaxItems` marker. + +For arrays of strings, the array element should also have a `+kubebuilder:validation:MaxLength` marker if the array element is a type alias, +or `+kubebuilder:validation:items:MaxLenth` if the array is an element of the built-in string type. + +Adding maximum lengths to strings and arrays not only ensures that the API is not abused (used to store overly large data, reduces DDOS etc.), +but also allows CEL validation cost estimations to be kept within reasonable bounds. + +## NoBools + +The `nobools` linter checks that fields in the API types do not contain a `bool` type. + +Booleans are limited and do not evolve well over time. +It is recommended instead to create a string alias with meaningful values, as an enum. + +## Nophase + +The `nophase` linter checks that the fields in the API types don't contain a 'Phase', or any field which contains 'Phase' as a substring, e.g MachinePhase. + +## OptionalOrRequired + +The `optionalorrequired` linter checks that all fields in the API types are either optional or required, and are marked explicitly as such. + +The linter expects to find a comment marker `// +optional` or `// +required` within the comment for the field. + +It also supports the `// +kubebuilder:validation:Optional` and `// +kubebuilder:validation:Required` markers, but will suggest to use the `// +optional` and `// +required` markers instead. + +If you prefer to use the Kubebuilder markers instead, you can change the preference in the configuration. + +### Configuration + +```yaml +lintersConfig: + optionalOrRequired: + preferredOptionalMarker: optional | kubebuilder:validation:Optional # The preferred optional marker to use, fixes will suggest to use this marker. Defaults to `optional`. + preferredRequiredMarker: required | kubebuilder:validation:Required # The preferred required marker to use, fixes will suggest to use this marker. Defaults to `required`. +``` + +### Fixes (via standalone binary only) + +The `optionalorrequired` linter can automatically fix fields that are using the incorrect form of either the optional or required marker. + +It will also remove the secondary marker where both the preferred and secondary marker are present on a field. + +## RequiredFields + +The `requiredfields` linter checks that fields that are marked as required, follow the convention of not being pointers, +and not having an `omitempty` value in their `json` tag. + +### Configuration + +```yaml +lintersConfig: + requiredFields: + pointerPolicy: Warn | SuggestFix # The policy for pointers in required fields. Defaults to `SuggestFix`. +``` + +### Fixes (via standalone binary only) + +The `requiredfields` linter can automatically fix fields that are marked as required, but are pointers. + +It will suggest to remove the pointer from the field, and update the `json` tag to remove the `omitempty` value. + +If you prefer not to suggest fixes for pointers in required fields, you can change the `pointerPolicy` to `Warn`. +The linter will then only suggest to remove the `omitempty` value from the `json` tag. + +## StatusSubresource + +The `statussubresource` linter checks that the status subresource is configured correctly for +structs marked with the `kubebuilder:object:root:=true` marker. Correct configuration is that +when there is a status field the `kubebuilder:subresource:status` marker is present on the struct +OR when the `kubebuilder:subresource:status` marker is present on the struct there is a status field. + +This linter is not enabled by default as it is only applicable to CustomResourceDefinitions. + +### Fixes (via standalone binary only) + +In the case where there is a status field present but no `kubebuilder:subresource:status` marker, the +linter will suggest adding the comment `// +kubebuilder:subresource:status` above the struct. + +# Contributing + +New linters can be added by following the [New Linter][new-linter] guide. + +[new-linter]: docs/new-linter.md + +# License + +KAL is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text. diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/cmd/kal/main.go b/hack/tools/vendor/github.com/JoelSpeed/kal/cmd/kal/main.go new file mode 100644 index 0000000000..410053395e --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/cmd/kal/main.go @@ -0,0 +1,18 @@ +package main + +import ( + kalanalysis "github.com/JoelSpeed/kal/pkg/analysis" + "github.com/JoelSpeed/kal/pkg/config" + "golang.org/x/tools/go/analysis/multichecker" +) + +func main() { + analyzers, err := kalanalysis.NewRegistry().InitializeLinters(config.Linters{}, config.LintersConfig{}) + if err != nil { + panic(err) + } + + multichecker.Main( + analyzers..., + ) +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/doc.go new file mode 100644 index 0000000000..3b14dbe2be --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/doc.go @@ -0,0 +1,34 @@ +/* +KAL is a linter for Kubernetes API types, that implements API conventions and best practices. + +This package provides a GolangCI-Lint plugin that can be used to build a custom linter for Kubernetes API types. +The custom golangci-lint binary can be built by checking out the KAL repository and running `make build-golangci`. +This will generate a custom golangci-lint binary in the `bin` directory. + +The custom golangci-lint binary can be run with the `run` command, and the KAL linters can be enabled by setting the `kal` linter in the `.golangci.yml` configuration file. + +Example `.golangci.yml` configuration file: + + linters-settings: + custom: + kal: + type: "module" + description: KAL is the Kube-API-Linter and lints Kube like APIs based on API conventions and best practices. + settings: + linters: + enabled: [] + disabled: [] + lintersConfig: + jsonTags: + jsonTagRegex: "" + optionalOrRequired: + preferredOptionalMarker: "" + preferredRequiredMarker: "" + linters: + disable-all: true + enable: + - kal + +New linters can be added in the [github.com/JoelSpeed/kal/pkg/analysis] package. +*/ +package kal diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/commentstart/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/commentstart/analyzer.go new file mode 100644 index 0000000000..57e2af6783 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/commentstart/analyzer.go @@ -0,0 +1,108 @@ +package commentstart + +import ( + "errors" + "fmt" + "go/ast" + "go/token" + "strings" + + "github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const name = "commentstart" + +var ( + errCouldNotGetInspector = errors.New("could not get inspector") + errCouldNotGetJSONTags = errors.New("could not get json tags") +) + +// Analyzer is the analyzer for the commentstart package. +// It checks that all struct fields in an API have a godoc, and that the godoc starts with the serialised field name. +var Analyzer = &analysis.Analyzer{ + Name: name, + Doc: "Check that all struct fields in an API have a godoc, and that the godoc starts with the serialised field name", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer, extractjsontags.Analyzer}, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if !ok { + return nil, errCouldNotGetInspector + } + + jsonTags, ok := pass.ResultOf[extractjsontags.Analyzer].(extractjsontags.StructFieldTags) + if !ok { + return nil, errCouldNotGetJSONTags + } + + // Filter to structs so that we can iterate over fields in a struct. + nodeFilter := []ast.Node{ + (*ast.StructType)(nil), + } + + inspect.Preorder(nodeFilter, func(n ast.Node) { + sTyp, ok := n.(*ast.StructType) + if !ok { + return + } + + if sTyp.Fields == nil { + return + } + + for _, field := range sTyp.Fields.List { + checkField(pass, field, jsonTags) + } + }) + + return nil, nil //nolint:nilnil +} + +func checkField(pass *analysis.Pass, field *ast.Field, jsonTags extractjsontags.StructFieldTags) { + if field == nil || len(field.Names) == 0 { + return + } + + fieldName := field.Names[0].Name + + tagInfo := jsonTags.FieldTags(field) + + if tagInfo.Name == "" { + return + } + + if field.Doc == nil { + pass.Reportf(field.Pos(), "field %s is missing godoc comment", fieldName) + return + } + + firstLine := field.Doc.List[0] + if !strings.HasPrefix(firstLine.Text, "// "+tagInfo.Name+" ") { + if strings.HasPrefix(strings.ToLower(firstLine.Text), strings.ToLower("// "+tagInfo.Name+" ")) { + // The comment start is correct, apart from the casing, we can fix that. + pass.Report(analysis.Diagnostic{ + Pos: firstLine.Pos(), + Message: fmt.Sprintf("godoc for field %s should start with '%s ...'", fieldName, tagInfo.Name), + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: fmt.Sprintf("should replace first word with `%s`", tagInfo.Name), + TextEdits: []analysis.TextEdit{ + { + Pos: firstLine.Pos(), + End: firstLine.Pos() + token.Pos(len(tagInfo.Name)) + token.Pos(4), + NewText: []byte("// " + tagInfo.Name + " "), + }, + }, + }, + }, + }) + } else { + pass.Reportf(field.Doc.List[0].Pos(), "godoc for field %s should start with '%s ...'", fieldName, tagInfo.Name) + } + } +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/commentstart/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/commentstart/doc.go new file mode 100644 index 0000000000..f50d57b0d3 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/commentstart/doc.go @@ -0,0 +1,19 @@ +/* +commentstart is a simple analysis tool that checks if the first line of a comment is the same as the json tag. + +By convention in Go, comments typically start with the name of the item being described. +In the case of field names, this would mean for a field Foo, the comment should look like: + + // Foo is a field that does something. + Foo string `json:"foo"` + +However, in Kubernetes API types, the json tag is often used to generate documentation. +In this case, the comment should start with the json tag, like so: + + // foo is a field that does something. + Foo string `json:"foo"` + +This ensures that for any generated documentation, the documentation refers to the serialized field name. +We expect most readers of Kubernetes API documentation will be more familiar with the serialized field names than the Go field names. +*/ +package commentstart diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/commentstart/initializer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/commentstart/initializer.go new file mode 100644 index 0000000000..370106fd04 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/commentstart/initializer.go @@ -0,0 +1,30 @@ +package commentstart + +import ( + "github.com/JoelSpeed/kal/pkg/config" + "golang.org/x/tools/go/analysis" +) + +// Initializer returns the AnalyzerInitializer for this +// Analyzer so that it can be added to the registry. +func Initializer() initializer { + return initializer{} +} + +// intializer implements the AnalyzerInitializer interface. +type initializer struct{} + +// Name returns the name of the Analyzer. +func (initializer) Name() string { + return name +} + +// Init returns the intialized Analyzer. +func (initializer) Init(cfg config.LintersConfig) (*analysis.Analyzer, error) { + return Analyzer, nil +} + +// Default determines whether this Analyzer is on by default, or not. +func (initializer) Default() bool { + return true +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/conditions/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/conditions/analyzer.go new file mode 100644 index 0000000000..9c48375bf2 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/conditions/analyzer.go @@ -0,0 +1,312 @@ +package conditions + +import ( + "errors" + "fmt" + "go/ast" + "go/token" + "strings" + + "github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags" + "github.com/JoelSpeed/kal/pkg/analysis/helpers/markers" + "github.com/JoelSpeed/kal/pkg/config" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const ( + name = "conditions" + + listTypeMarkerID = "listType" + listMapKeyMarkerID = "listMapKey" + patchStrategyMarkerID = "patchStrategy" + patchMergeKeyMarkerID = "patchMergeKey" + + listTypeMap = "listType=map" + listMapKeyType = "listMapKey=type" + patchStrategy = "patchStrategy=merge" + patchMergeKeyType = "patchMergeKey=type" + optional = "optional" + + expectedTagWithProtobufFmt = "`json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,%d,rep,name=conditions\"`" + expectedTagWithoutProtobuf = "`json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\"`" +) + +func init() { + markers.DefaultRegistry().Register( + listTypeMarkerID, + listMapKeyMarkerID, + patchStrategyMarkerID, + patchMergeKeyMarkerID, + optional, + ) +} + +var ( + errCouldNotGetInspector = errors.New("could not get inspector") + errCouldNotGetMarkers = errors.New("could not get markers") +) + +type analyzer struct { + isFirstField config.ConditionsFirstField + useProtobuf config.ConditionsUseProtobuf +} + +// newAnalyzer creates a new analyzer. +func newAnalyzer(cfg config.ConditionsConfig) *analysis.Analyzer { + defaultConfig(&cfg) + + a := &analyzer{ + isFirstField: cfg.IsFirstField, + useProtobuf: cfg.UseProtobuf, + } + + return &analysis.Analyzer{ + Name: name, + Doc: `Checks that all conditions type fields conform to the required conventions.`, + Run: a.run, + Requires: []*analysis.Analyzer{inspect.Analyzer, markers.Analyzer, extractjsontags.Analyzer}, + } +} + +func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) { + inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if !ok { + return nil, errCouldNotGetInspector + } + + markersAccess, ok := pass.ResultOf[markers.Analyzer].(markers.Markers) + if !ok { + return nil, errCouldNotGetMarkers + } + + // Filter to structs so that we can iterate over fields in a struct. + // We need a struct here so that we can tell where in the struct the field is. + nodeFilter := []ast.Node{ + (*ast.StructType)(nil), + } + + inspect.Preorder(nodeFilter, func(n ast.Node) { + sTyp, ok := n.(*ast.StructType) + if !ok { + return + } + + if sTyp.Fields == nil { + return + } + + for i, field := range sTyp.Fields.List { + fieldMarkers := markersAccess.FieldMarkers(field) + + a.checkField(pass, i, field, fieldMarkers) + } + }) + + return nil, nil //nolint:nilnil +} + +func (a *analyzer) checkField(pass *analysis.Pass, index int, field *ast.Field, fieldMarkers markers.MarkerSet) { + if !fieldIsCalledConditions(field) { + return + } + + if !isSliceMetaV1Condition(field) { + pass.Reportf(field.Pos(), "Conditions field must be a slice of metav1.Condition") + return + } + + checkFieldMarkers(pass, field, fieldMarkers) + a.checkFieldTags(pass, index, field) + + if a.isFirstField == config.ConditionsFirstFieldWarn && index != 0 { + pass.Reportf(field.Pos(), "Conditions field must be the first field in the struct") + } +} + +func checkFieldMarkers(pass *analysis.Pass, field *ast.Field, fieldMarkers markers.MarkerSet) { + missingMarkers := []string{} + + if !fieldMarkers.HasWithValue(listTypeMap) { + missingMarkers = append(missingMarkers, listTypeMap) + } + + if !fieldMarkers.HasWithValue(listMapKeyType) { + missingMarkers = append(missingMarkers, listMapKeyType) + } + + if !fieldMarkers.HasWithValue(patchStrategy) { + missingMarkers = append(missingMarkers, patchStrategy) + } + + if !fieldMarkers.HasWithValue(patchMergeKeyType) { + missingMarkers = append(missingMarkers, patchMergeKeyType) + } + + if !fieldMarkers.Has(optional) { + missingMarkers = append(missingMarkers, optional) + } + + if len(missingMarkers) != 0 { + pass.Report(analysis.Diagnostic{ + Pos: field.Pos(), + End: field.End(), + Message: "Conditions field is missing the following markers: " + strings.Join(missingMarkers, ", "), + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Add missing markers", + TextEdits: []analysis.TextEdit{ + { + Pos: field.Pos(), + End: token.NoPos, + NewText: getNewMarkers(missingMarkers), + }, + }, + }, + }, + }) + } +} + +func getNewMarkers(missingMarkers []string) []byte { + var out string + + for _, marker := range missingMarkers { + out += "// +" + marker + "\n" + } + + return []byte(out) +} + +func (a *analyzer) checkFieldTags(pass *analysis.Pass, index int, field *ast.Field) { + if field.Tag == nil { + expectedTag := getExpectedTag(a.useProtobuf, a.isFirstField, index) + + pass.Report(analysis.Diagnostic{ + Pos: field.Pos(), + End: field.End(), + Message: "Conditions field is missing tags, should be: " + expectedTag, + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Add missing tags", + TextEdits: []analysis.TextEdit{ + { + Pos: field.End(), + End: token.NoPos, + NewText: []byte(expectedTag), + }, + }, + }, + }, + }) + + return + } + + asExpected, shouldFix := tagIsAsExpected(field.Tag.Value, a.useProtobuf, a.isFirstField, index) + if !asExpected { + expectedTag := getExpectedTag(a.useProtobuf, a.isFirstField, index) + + if !shouldFix { + pass.Reportf(field.Tag.ValuePos, "Conditions field has incorrect tags, should be: %s", expectedTag) + } else { + pass.Report(analysis.Diagnostic{ + Pos: field.Tag.ValuePos, + End: field.Tag.End(), + Message: "Conditions field has incorrect tags, should be: " + expectedTag, + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Update tags", + TextEdits: []analysis.TextEdit{ + { + Pos: field.Tag.ValuePos, + End: field.Tag.End(), + NewText: []byte(expectedTag), + }, + }, + }, + }, + }) + } + } +} + +func getExpectedTag(useProtobuf config.ConditionsUseProtobuf, isFirstField config.ConditionsFirstField, index int) string { + if useProtobuf == config.ConditionsUseProtobufSuggestFix || useProtobuf == config.ConditionsUseProtobufWarn { + i := 1 + if isFirstField == config.ConditionsFirstFieldIgnore { + i = index + 1 + } + + return fmt.Sprintf(expectedTagWithProtobufFmt, i) + } + + return expectedTagWithoutProtobuf +} + +func tagIsAsExpected(tag string, useProtobuf config.ConditionsUseProtobuf, isFirstField config.ConditionsFirstField, index int) (bool, bool) { + switch useProtobuf { + case config.ConditionsUseProtobufSuggestFix: + return tag == getExpectedTag(config.ConditionsUseProtobufSuggestFix, isFirstField, index), true + case config.ConditionsUseProtobufWarn: + return tag == getExpectedTag(config.ConditionsUseProtobufWarn, isFirstField, index), false + case config.ConditionsUseProtobufIgnore: + return tag == getExpectedTag(config.ConditionsUseProtobufIgnore, isFirstField, index) || tag == getExpectedTag(config.ConditionsUseProtobufSuggestFix, isFirstField, index), true + default: + panic("unexpected useProtobuf value") + } +} + +func fieldIsCalledConditions(field *ast.Field) bool { + if field == nil { + return false + } + + return len(field.Names) != 0 && field.Names[0] != nil && field.Names[0].Name == "Conditions" +} + +func isSliceMetaV1Condition(field *ast.Field) bool { + if field == nil { + return false + } + + // Field is not an array type. + arr, ok := field.Type.(*ast.ArrayType) + if !ok { + return false + } + + // Array element is not imported. + selector, ok := arr.Elt.(*ast.SelectorExpr) + if !ok { + return false + } + + pkg, ok := selector.X.(*ast.Ident) + if !ok { + return false + } + + // Array element is not imported from metav1. + if selector.X == nil || pkg.Name != "metav1" { + return false + } + + // Array element is not a metav1.Condition. + if selector.Sel == nil || selector.Sel.Name != "Condition" { + return false + } + + return true +} + +func defaultConfig(cfg *config.ConditionsConfig) { + if cfg.IsFirstField == "" { + cfg.IsFirstField = config.ConditionsFirstFieldWarn + } + + if cfg.UseProtobuf == "" { + cfg.UseProtobuf = config.ConditionsUseProtobufSuggestFix + } +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/conditions/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/conditions/doc.go new file mode 100644 index 0000000000..456c97abf4 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/conditions/doc.go @@ -0,0 +1,25 @@ +/* +conditions is a linter that verifies that the conditions field within the struct is correctly defined. + +conditions fields in Kuberenetes API types are expected to be a slice of metav1.Condition. +This linter verifies that the field is a slice of metav1.Condition and that it is correctly annotated with the required markers, +and tags. + +The expected condition field should look like this: + + // +listType=map + // +listMapKey=type + // +patchStrategy=merge + // +patchMergeKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + +Where the tags and markers are incorrect, the linter will suggest fixes to improve the field definition. + +Conditions are also idiomatically the first item in the struct, the linter will highlight when the conditions field is not the first field in the struct. +If this is not a desired behaviour, set the linter config option `isFirstField` to `Ignore`. + +Protobuf tags are required for in-tree API types, but not for CRDs. +When linting CRD based types, set the `useProtobuf` config option to `Ignore`. +*/ +package conditions diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/conditions/initializer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/conditions/initializer.go new file mode 100644 index 0000000000..1855ed87cb --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/conditions/initializer.go @@ -0,0 +1,30 @@ +package conditions + +import ( + "github.com/JoelSpeed/kal/pkg/config" + "golang.org/x/tools/go/analysis" +) + +// Initializer returns the AnalyzerInitializer for this +// Analyzer so that it can be added to the registry. +func Initializer() initializer { + return initializer{} +} + +// intializer implements the AnalyzerInitializer interface. +type initializer struct{} + +// Name returns the name of the Analyzer. +func (initializer) Name() string { + return name +} + +// Init returns the intialized Analyzer. +func (initializer) Init(cfg config.LintersConfig) (*analysis.Analyzer, error) { + return newAnalyzer(cfg.Conditions), nil +} + +// Default determines whether this Analyzer is on by default, or not. +func (initializer) Default() bool { + return true +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/doc.go new file mode 100644 index 0000000000..d98c51013c --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/doc.go @@ -0,0 +1,53 @@ +/* +analysis providers a linter registry and a set of linters that can be used to analyze Go code. +The linters in this package are focused on Kubernetes API types and implmement API conventions +and best practices. + +To use the linters provided by KAL, initialise an instance of the Registry and then initialize +the linters within, by passing the required configuration. + +Example: + + registry := analysis.NewRegistry() + + // Initialize the linters + linters, err := registry.InitLinters( + config.Linters{ + Enabled: []string{ + "commentstart" + "jsontags", + "optionalorrequired", + }, + Disabled: []string{ + ... + }, + }, + config.LintersConfig{ + JSONTags: config.JSONTagsConfig{ + JSONTagRegex: `^[-_a-zA-Z0-9]+$`, + }, + OptionalOrRequired: config.OptionalOrRequiredConfig{ + PreferredOptionalMarker: optionalorrequired.OptionalMarker, + PreferredRequiredMarker: optionalorrequired.RequiredMarker, + }, + }, + ) + +The provided list of analyzers can be used with `multichecker.Main()` from the `golang.org/x/tools/go/analysis/multichecker` package, +or as part of a custom analysis pipeline, eg via the golangci-lint plugin system. + +Linters provided by KAL: + - [commentstart]: Linter to ensure that comments start with the serialized version of the field name. + - [jsontags]: Linter to ensure that JSON tags are present on struct fields, and that they match a given regex. + - [optionalorrequired]: Linter to ensure that all fields are marked as either optional or required. + +When adding new linters, ensure that they are added to the registry in the `NewRegistry` function. +Linters should not depend on other linters, unless that linter has no configuration and is always enabled, +see the helpers package. + +Any common, or shared functionality to extract data from types, should be added as a helper function in the helpers package. +The available helpers are: + - extractjsontags: Extracts JSON tags from struct fields and returns the information in a structured format. + - markers: Extracts marker information from types and returns the information in a structured format. +*/ +package analysis diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags/analyzer.go new file mode 100644 index 0000000000..0dafe82eaa --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags/analyzer.go @@ -0,0 +1,152 @@ +package extractjsontags + +import ( + "errors" + "go/ast" + "go/token" + "reflect" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +var ( + errCouldNotGetInspector = errors.New("could not get inspector") + errCouldNotCreateStructFieldTags = errors.New("could not create new structFieldTags") +) + +// StructFieldTags is used to find information about +// json tags on fields within struct. +type StructFieldTags interface { + FieldTags(*ast.Field) FieldTagInfo +} + +type structFieldTags struct { + fieldTags map[*ast.Field]FieldTagInfo +} + +func newStructFieldTags() StructFieldTags { + return &structFieldTags{ + fieldTags: make(map[*ast.Field]FieldTagInfo), + } +} + +func (s *structFieldTags) insertFieldTagInfo(field *ast.Field, tagInfo FieldTagInfo) { + s.fieldTags[field] = tagInfo +} + +// FieldTags find the tag information for the named field within the given struct. +func (s *structFieldTags) FieldTags(field *ast.Field) FieldTagInfo { + return s.fieldTags[field] +} + +// Analyzer is the analyzer for the jsontags package. +// It checks that all struct fields in an API are tagged with json tags. +var Analyzer = &analysis.Analyzer{ + Name: "extractjsontags", + Doc: "Iterates over all fields in structs and extracts their json tags.", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + ResultType: reflect.TypeOf(newStructFieldTags()), +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if !ok { + return nil, errCouldNotGetInspector + } + + // Filter to structs so that we can iterate over fields in a struct. + nodeFilter := []ast.Node{ + (*ast.Field)(nil), + } + + results, ok := newStructFieldTags().(*structFieldTags) + if !ok { + return nil, errCouldNotCreateStructFieldTags + } + + inspect.Preorder(nodeFilter, func(n ast.Node) { + field, ok := n.(*ast.Field) + if !ok { + return + } + + results.insertFieldTagInfo(field, extractTagInfo(field.Tag)) + }) + + return results, nil +} + +func extractTagInfo(tag *ast.BasicLit) FieldTagInfo { + if tag == nil || tag.Value == "" { + return FieldTagInfo{Missing: true} + } + + rawTag, err := strconv.Unquote(tag.Value) + if err != nil { + // This means the way AST is treating tags has changed. + panic(err) + } + + tagValue, ok := reflect.StructTag(rawTag).Lookup("json") + if !ok { + return FieldTagInfo{Missing: true} + } + + if tagValue == "" { + return FieldTagInfo{} + } + + pos := tag.Pos() + token.Pos(strings.Index(tag.Value, tagValue)) + end := pos + token.Pos(len(tagValue)) + + tagValues := strings.Split(tagValue, ",") + + if len(tagValues) == 2 && tagValues[0] == "" && tagValues[1] == "inline" { + return FieldTagInfo{ + Inline: true, + RawValue: tagValue, + Pos: pos, + End: end, + } + } + + tagName := tagValues[0] + + return FieldTagInfo{ + Name: tagName, + OmitEmpty: len(tagValues) == 2 && tagValues[1] == "omitempty", + RawValue: tagValue, + Pos: pos, + End: end, + } +} + +// FieldTagInfo contains information about a field's json tag. +// This is used to pass information about a field's json tag between analyzers. +type FieldTagInfo struct { + // Name is the name of the field extracted from the json tag. + Name string + + // OmitEmpty is true if the field has the omitempty option in the json tag. + OmitEmpty bool + + // Inline is true if the field has the inline option in the json tag. + Inline bool + + // Missing is true when the field had no json tag. + Missing bool + + // RawValue is the raw value from the json tag. + RawValue string + + // Pos marks the starting position of the json tag value. + Pos token.Pos + + // End marks the end of the json tag value. + End token.Pos +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags/doc.go new file mode 100644 index 0000000000..3fa3a58a65 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags/doc.go @@ -0,0 +1,34 @@ +/* +extractjsontags is a helper package that extracts JSON tags from a struct field. + +It returns data behind the interface [StructFieldTags] which is used to find information about JSON tags on fields within a struct. + +Data about json tags, for a field within a struct can be accessed by calling the `FieldTags` method on the interface. + +Example: + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + jsonTags := pass.ResultOf[extractjsontags.Analyzer].(extractjsontags.StructFieldTags) + + // Filter to fields so that we can iterate over fields in a struct. + nodeFilter := []ast.Node{ + (*ast.Field)(nil), + } + + inspect.Preorder(nodeFilter, func(n ast.Node) { + field, ok := n.(*ast.Field) + if !ok { + return + } + + tagInfo := jsonTags.FieldTags(field) + + ... + + }) + +For each field, tag information is returned as a [FieldTagInfo] struct. +This can be used to determine the name of the field, as per the json tag, whether the +field is inline, has omitempty or is missing completely. +*/ +package extractjsontags diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/markers/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/markers/analyzer.go new file mode 100644 index 0000000000..b6cf847c80 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/markers/analyzer.go @@ -0,0 +1,381 @@ +package markers + +import ( + "errors" + "go/ast" + "go/token" + "reflect" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +// UnnamedExpression is the expression key used +// when parsing markers that don't have a specific +// named expression. +// +// An example of a marker without a named expression +// is "kubebuilder:default:=foo". +// +// An example of a marker with named expressions +// is "kubebuilder:validation:XValidation:rule='...',message='...'". +const UnnamedExpression = "" + +var ( + errCouldNotGetInspector = errors.New("could not get inspector") + errCouldNotCreateMarkers = errors.New("could not create markers") +) + +// Markers allows access to markers extracted from the +// go types. +type Markers interface { + // FieldMarkers returns markers associated to the field. + FieldMarkers(*ast.Field) MarkerSet + + // StructMarkers returns markers associated to the given sturct. + StructMarkers(*ast.StructType) MarkerSet + + // TypeMarkers returns markers associated to the given type. + TypeMarkers(*ast.TypeSpec) MarkerSet +} + +func newMarkers() Markers { + return &markers{ + fieldMarkers: make(map[*ast.Field]MarkerSet), + structMarkers: make(map[*ast.StructType]MarkerSet), + typeMarkers: make(map[*ast.TypeSpec]MarkerSet), + } +} + +// markers implements the storage for the implementation of the Markers interface. +type markers struct { + fieldMarkers map[*ast.Field]MarkerSet + structMarkers map[*ast.StructType]MarkerSet + typeMarkers map[*ast.TypeSpec]MarkerSet +} + +// FieldMarkers return the appropriate MarkerSet for the field, +// or an empty MarkerSet if the appropriate MarkerSet isn't found. +func (m *markers) FieldMarkers(field *ast.Field) MarkerSet { + fMarkers := m.fieldMarkers[field] + + return NewMarkerSet(fMarkers.UnsortedList()...) +} + +// StructMarkers returns the appropriate MarkerSet if found, else +// it returns an empty MarkerSet. +func (m *markers) StructMarkers(sTyp *ast.StructType) MarkerSet { + sMarkers := m.structMarkers[sTyp] + + return NewMarkerSet(sMarkers.UnsortedList()...) +} + +// TypeMarkers return the appropriate MarkerSet for the type, +// or an empty MarkerSet if the appropriate MarkerSet isn't found. +func (m *markers) TypeMarkers(typ *ast.TypeSpec) MarkerSet { + tMarkers := m.typeMarkers[typ] + + return NewMarkerSet(tMarkers.UnsortedList()...) +} + +func (m *markers) insertFieldMarkers(field *ast.Field, ms MarkerSet) { + m.fieldMarkers[field] = ms +} + +func (m *markers) insertStructMarkers(sTyp *ast.StructType, ms MarkerSet) { + m.structMarkers[sTyp] = ms +} + +func (m *markers) insertTypeMarkers(typ *ast.TypeSpec, ms MarkerSet) { + m.typeMarkers[typ] = ms +} + +// Analyzer is the analyzer for the markers package. +// It iterates over declarations within a package and parses the comments to extract markers. +var Analyzer = &analysis.Analyzer{ + Name: "markers", + Doc: "Iterates over declarations within a package and parses the comments to extract markers", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + ResultType: reflect.TypeOf(newMarkers()), +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if !ok { + return nil, errCouldNotGetInspector + } + + nodeFilter := []ast.Node{ + // In order to get the godoc comments from a type + // definition as such: + // + // // comment + // type Foo struct {...} + // + // We need to use the ast.GenDecl type instead of the + // ast.TypeSpec type. The ast.TypeSpec.Doc field will only + // be populated if types are defined as such: + // + // type( + // // comment + // Foo struct {...} + // ) + // + // For more information, see https://github.com/golang/go/issues/27477 + (*ast.GenDecl)(nil), + (*ast.Field)(nil), + } + + results, ok := newMarkers().(*markers) + if !ok { + return nil, errCouldNotCreateMarkers + } + + inspect.Preorder(nodeFilter, func(n ast.Node) { + switch typ := n.(type) { + case *ast.GenDecl: + extractGenDeclMarkers(typ, results) + case *ast.Field: + extractFieldMarkers(typ, results) + } + }) + + return results, nil +} + +func extractGenDeclMarkers(typ *ast.GenDecl, results *markers) { + declMarkers := NewMarkerSet() + + if typ.Doc != nil { + for _, comment := range typ.Doc.List { + if marker := extractMarker(comment); marker.Identifier != "" { + declMarkers.Insert(marker) + } + } + } + + if len(typ.Specs) == 0 { + return + } + + tSpec, ok := typ.Specs[0].(*ast.TypeSpec) + if !ok { + return + } + + results.insertTypeMarkers(tSpec, declMarkers) + + if sTyp, ok := tSpec.Type.(*ast.StructType); ok { + results.insertStructMarkers(sTyp, declMarkers) + } +} + +func extractFieldMarkers(field *ast.Field, results *markers) { + if field == nil || field.Doc == nil { + return + } + + fieldMarkers := NewMarkerSet() + + for _, comment := range field.Doc.List { + if marker := extractMarker(comment); marker.Identifier != "" { + fieldMarkers.Insert(marker) + } + } + + results.insertFieldMarkers(field, fieldMarkers) +} + +func extractMarker(comment *ast.Comment) Marker { + if !strings.HasPrefix(comment.Text, "// +") { + return Marker{} + } + + markerContent := strings.TrimPrefix(comment.Text, "// +") + id, expressions := extractMarkerIDAndExpressions(DefaultRegistry(), markerContent) + + return Marker{ + Identifier: id, + Expressions: expressions, + RawComment: comment.Text, + Pos: comment.Pos(), + End: comment.End(), + } +} + +func extractMarkerIDAndExpressions(knownMarkers Registry, marker string) (string, map[string]string) { + if id, ok := knownMarkers.Match(marker); ok { + return extractKnownMarkerIDAndExpressions(id, marker) + } + + return extractUnknownMarkerIDAndExpressions(marker) +} + +func extractKnownMarkerIDAndExpressions(id string, marker string) (string, map[string]string) { + return id, extractExpressions(strings.TrimPrefix(marker, id)) +} + +func extractExpressions(expressions string) map[string]string { + expressionsMap := map[string]string{} + + // Do some normalization work to ensure we can parse expressions in + // a standard way. Trim any lingering colons (:) and replace all ':='s with '=' + expressions = strings.TrimPrefix(expressions, ":") + expressions = strings.ReplaceAll(expressions, ":=", "=") + + // split expression string on commas (,) to handle multiple expressions + // in a single marker + chainedExpressions := strings.Split(expressions, ",") + for _, chainedExpression := range chainedExpressions { + exps := strings.SplitN(chainedExpression, "=", 2) + if len(exps) < 2 { + continue + } + + expressionsMap[exps[0]] = exps[1] + } + + return expressionsMap +} + +func extractUnknownMarkerIDAndExpressions(marker string) (string, map[string]string) { + // if there is only a single "=" split on the equal sign and trim any + // dangling ":" characters. + if strings.Count(marker, "=") == 1 { + splits := strings.Split(marker, "=") + // Trim any dangling ":" characters on the identifier to handle + // cases like +kubebuilder:object:root:=true + identifier := strings.TrimSuffix(splits[0], ":") + + // If there is a single "=" sign that means the left side of the + // marker is the identifier and there is no real expression identifier. + expressions := map[string]string{UnnamedExpression: splits[1]} + + return identifier, expressions + } + + // split on : + separators := strings.Split(marker, ":") + + identifier := "" + expressionString := "" + + for _, item := range separators { + // Not an expression + if strings.Count(item, "=") == 0 { + if identifier == "" { + identifier = item + + continue + } + + identifier = strings.Join([]string{identifier, item}, ":") + + continue + } + + // The item is likely an expression, join it with any existing expression string. + // While something like 'foo:bar=baz:value=something' isn't a valid marker based on our + // current understanding, this logic should ensure we are joining expressions appropriately + // in a scenario like this. + if expressionString == "" { + expressionString = item + continue + } + + expressionString = strings.Join([]string{expressionString, item}, ",") + } + + expressions := extractExpressions(expressionString) + + return identifier, expressions +} + +// Marker represents a marker extracted from a comment on a declaration. +type Marker struct { + // Identifier is the value of the marker once the leading comment, '+', and expressions are trimmed. + Identifier string + + // Expressions are the set of expressions that have been specified for the marker + Expressions map[string]string + + // RawComment is the raw comment line, unfiltered. + RawComment string + + // Pos is the starting position in the file for the comment line containing the marker. + Pos token.Pos + + // End is the ending position in the file for the coment line containing the marker. + End token.Pos +} + +// MarkerSet is a set implementation for Markers that uses +// the Marker identifier as the key, but returns all full Markers +// with that identifier as the result. +type MarkerSet map[string][]Marker + +// NewMarkerSet initialises a new MarkerSet with the provided values. +// If any markers have the same identifier, they will both be added to +// the list of markers for that identifier. No duplication checks are implemented. +func NewMarkerSet(markers ...Marker) MarkerSet { + ms := make(MarkerSet) + + ms.Insert(markers...) + + return ms +} + +// Insert adds the given markers to the MarkerSet. +// If any markers have the same value, the latter marker in the list +// will take precedence, no duplication checks are implemented. +func (ms MarkerSet) Insert(markers ...Marker) { + for _, marker := range markers { + ms[marker.Identifier] = append(ms[marker.Identifier], marker) + } +} + +// Has returns whether marker(s) with the identifier given is present in the +// MarkerSet. If Has returns true, there is at least one marker +// with this identifier. +func (ms MarkerSet) Has(identifier string) bool { + _, ok := ms[identifier] + return ok +} + +// HasWithValue returns whether marker(s) with the given identifier and +// expression values (i.e "kubebuilder:object:root:=true") is present +// in the MarkerSet. +func (ms MarkerSet) HasWithValue(marker string) bool { + return ms.HasWithExpressions(extractMarkerIDAndExpressions(DefaultRegistry(), marker)) +} + +// HasWithExpressions returns whether marker(s) with the identifier and +// expressions are present in the MarkerSet. +func (ms MarkerSet) HasWithExpressions(identifier string, expressions map[string]string) bool { + markers, ok := ms[identifier] + if !ok { + return false + } + + for _, marker := range markers { + if reflect.DeepEqual(marker.Expressions, expressions) { + return true + } + } + + return false +} + +// UnsortedList returns a list of the markers, in no particular order. +func (ms MarkerSet) UnsortedList() []Marker { + markers := []Marker{} + + for _, marker := range ms { + markers = append(markers, marker...) + } + + return markers +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/markers/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/markers/doc.go new file mode 100644 index 0000000000..bb10107469 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/markers/doc.go @@ -0,0 +1,53 @@ +/* +markers is a helper used to extract marker information from types. +A marker is a comment line preceded with `+` that indicates to a generator something about the field or type. + +The package returns a [Markers] interface, which can be used to access markers associated with a struct or a field within a struct. + +Example: + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + markersAccess := pass.ResultOf[markers.Analyzer].(markers.Markers) + + // Filter to structs so that we can iterate over fields in a struct. + nodeFilter := []ast.Node{ + (*ast.StructType)(nil), + } + + inspect.Preorder(nodeFilter, func(n ast.Node) { + sTyp, ok := n.(*ast.StructType) + if !ok { + return + } + + if sTyp.Fields == nil { + return + } + + for _, field := range sTyp.Fields.List { + if field == nil || len(field.Names) == 0 { + continue + } + + structMarkers := markersAccess.StructMarkers(sTyp) + fieldMarkers := markersAccess.FieldMarkers(field) + + ... + } + }) + +The result of StructMarkers or StructFieldMarkers is a [MarkerSet] which can be used to determine the presence of a marker, and the value of the marker. +The MarkerSet is indexed based on the value of the marker, once the prefix `+` is removed. + +Additional information about the marker can be found in the [Marker] struct, for each marker on the field. + +Example: + + fieldMarkers := markersAccess.FieldMarkers(field) + + if fieldMarkers.Has("required") { + requiredMarker := fieldMarkers["required"] + ... + } +*/ +package markers diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/markers/registry.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/markers/registry.go new file mode 100644 index 0000000000..615f2e2f41 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/helpers/markers/registry.go @@ -0,0 +1,83 @@ +package markers + +import ( + "strings" + "sync" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// Registry is a thread-safe set of known marker identifiers. +type Registry interface { + // Register adds the provided identifiers to the Registry. + Register(ids ...string) + + // Match performs a greedy match to determine if an input + // marker string matches a known marker identifier. It returns + // the matched identifier and a boolean representing if a match was + // found. If no match is found, the returned identifier will be an + // empty string. + Match(in string) (string, bool) +} + +var defaultRegistry = NewRegistry() //nolint:gochecknoglobals + +// DefaultRegistry is a global registry for known markers. +// New linters should register the markers they care about during +// an init() function. +func DefaultRegistry() Registry { + return defaultRegistry +} + +// Registry is a thread-safe set of known marker identifiers. +type registry struct { + identifiers sets.Set[string] + mu sync.Mutex +} + +// NewRegistry creates a new Registry. +func NewRegistry() Registry { + return ®istry{ + identifiers: sets.New[string](), + mu: sync.Mutex{}, + } +} + +// Register adds the provided identifiers to the Registry. +func (r *registry) Register(ids ...string) { + r.mu.Lock() + defer r.mu.Unlock() + r.identifiers.Insert(ids...) +} + +// Match performs a greedy match to determine if an input +// marker string matches a known marker identifier. It returns +// the matched identifier and a boolean representing if a match was +// found. If no match is found, the returned identifier will be an +// empty string. +func (r *registry) Match(in string) (string, bool) { + r.mu.Lock() + defer r.mu.Unlock() + + // If there is an exact match, return early. + // This is likely when using markers with no expressions like + // optional, required, kubebuilder:validation:Required, etc. + if ok := r.identifiers.Has(in); ok { + return in, true + } + + // Look for the longest matching known identifier + bestMatch := "" + foundMatch := false + + for _, id := range r.identifiers.UnsortedList() { + if strings.HasPrefix(in, id) { + if len(bestMatch) < len(id) { + bestMatch = id + foundMatch = true + } + } + } + + return bestMatch, foundMatch +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/integers/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/integers/analyzer.go new file mode 100644 index 0000000000..000052cacb --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/integers/analyzer.go @@ -0,0 +1,64 @@ +package integers + +import ( + "errors" + "go/ast" + + "github.com/JoelSpeed/kal/pkg/analysis/utils" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const name = "integers" + +var ( + errCouldNotGetInspector = errors.New("could not get inspector") +) + +// Analyzer is the analyzer for the integers package. +// It checks that no struct fields or type aliases are `int`, or unsigned integers. +var Analyzer = &analysis.Analyzer{ + Name: name, + Doc: "All integers should be explicit about their size, int32 and int64 should be used over plain int. Unsigned ints are not allowed.", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if !ok { + return nil, errCouldNotGetInspector + } + + // Filter to fields so that we can look at fields within structs. + // Filter typespecs so that we can look at type aliases. + nodeFilter := []ast.Node{ + (*ast.StructType)(nil), + (*ast.TypeSpec)(nil), + } + + typeChecker := utils.NewTypeChecker(checkIntegers) + + // Preorder visits all the nodes of the AST in depth-first order. It calls + // f(n) for each node n before it visits n's children. + // + // We use the filter defined above, ensuring we only look at struct fields and type declarations. + inspect.Preorder(nodeFilter, func(n ast.Node) { + typeChecker.CheckNode(pass, n) + }) + + return nil, nil //nolint:nilnil +} + +// checkIntegers looks for known type of integers that do not match the allowed `int32` or `int64` requirements. +func checkIntegers(pass *analysis.Pass, ident *ast.Ident, node ast.Node, prefix string) { + switch ident.Name { + case "int32", "int64": + // Valid cases + case "int", "int8", "int16": + pass.Reportf(node.Pos(), "%s should not use an int, int8 or int16. Use int32 or int64 depending on bounding requirements", prefix) + case "uint", "uint8", "uint16", "uint32", "uint64": + pass.Reportf(node.Pos(), "%s should not use unsigned integers, use only int32 or int64 and apply validation to ensure the value is positive", prefix) + } +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/integers/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/integers/doc.go new file mode 100644 index 0000000000..2cbe3691ff --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/integers/doc.go @@ -0,0 +1,15 @@ +/* +integers is an analyzer that checks for usage of unsupported integer types. + +According to the API conventions, only int32 and int64 types should be used in Kubernetes APIs. + +int32 is preferred and should be used in most cases, unless the use case requireds representing +values larger than int32. + +It also states that unsigned integers should be replaced with signed integers, and then numeric +lower bounds added to prevent negative integers. + +Succinctly this anaylzer checks for int, int8, int16, uint, uint8, uint16, uint32 and uint64 types +and highlights that they should not be used. +*/ +package integers diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/integers/initializer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/integers/initializer.go new file mode 100644 index 0000000000..dc41ec663e --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/integers/initializer.go @@ -0,0 +1,30 @@ +package integers + +import ( + "github.com/JoelSpeed/kal/pkg/config" + "golang.org/x/tools/go/analysis" +) + +// Initializer returns the AnalyzerInitializer for this +// Analyzer so that it can be added to the registry. +func Initializer() initializer { + return initializer{} +} + +// intializer implements the AnalyzerInitializer interface. +type initializer struct{} + +// Name returns the name of the Analyzer. +func (initializer) Name() string { + return name +} + +// Init returns the intialized Analyzer. +func (initializer) Init(cfg config.LintersConfig) (*analysis.Analyzer, error) { + return Analyzer, nil +} + +// Default determines whether this Analyzer is on by default, or not. +func (initializer) Default() bool { + return true +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/jsontags/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/jsontags/analyzer.go new file mode 100644 index 0000000000..156f945a7d --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/jsontags/analyzer.go @@ -0,0 +1,122 @@ +package jsontags + +import ( + "errors" + "fmt" + "go/ast" + "regexp" + + "github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags" + "github.com/JoelSpeed/kal/pkg/config" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const ( + // camelCaseRegex is a regular expression that matches camel case strings. + camelCaseRegex = "^[a-z][a-z0-9]*(?:[A-Z][a-z0-9]*)*$" + + name = "jsontags" +) + +var ( + errCouldNotGetInspector = errors.New("could not get inspector") + errCouldNotGetJSONTags = errors.New("could not get json tags") +) + +type analyzer struct { + jsonTagRegex *regexp.Regexp +} + +// newAnalyzer creates a new analyzer with the given json tag regex. +func newAnalyzer(cfg config.JSONTagsConfig) (*analysis.Analyzer, error) { + defaultConfig(&cfg) + + jsonTagRegex, err := regexp.Compile(cfg.JSONTagRegex) + if err != nil { + return nil, fmt.Errorf("could not compile json tag regex: %w", err) + } + + a := &analyzer{ + jsonTagRegex: jsonTagRegex, + } + + return &analysis.Analyzer{ + Name: name, + Doc: "Check that all struct fields in an API are tagged with json tags", + Run: a.run, + Requires: []*analysis.Analyzer{inspect.Analyzer, extractjsontags.Analyzer}, + }, nil +} + +func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) { + inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if !ok { + return nil, errCouldNotGetInspector + } + + jsonTags, ok := pass.ResultOf[extractjsontags.Analyzer].(extractjsontags.StructFieldTags) + if !ok { + return nil, errCouldNotGetJSONTags + } + + // Filter to fields so that we can iterate over fields in a struct. + nodeFilter := []ast.Node{ + (*ast.StructType)(nil), + } + + inspect.Preorder(nodeFilter, func(n ast.Node) { + sTyp, ok := n.(*ast.StructType) + if !ok { + return + } + + if sTyp.Fields == nil { + return + } + + for _, field := range sTyp.Fields.List { + a.checkField(pass, field, jsonTags) + } + }) + + return nil, nil //nolint:nilnil +} + +func (a *analyzer) checkField(pass *analysis.Pass, field *ast.Field, jsonTags extractjsontags.StructFieldTags) { + tagInfo := jsonTags.FieldTags(field) + + var prefix string + if len(field.Names) > 0 && field.Names[0] != nil { + prefix = fmt.Sprintf("field %s", field.Names[0].Name) + } else if ident, ok := field.Type.(*ast.Ident); ok { + prefix = fmt.Sprintf("embedded field %s", ident.Name) + } + + if tagInfo.Missing { + pass.Reportf(field.Pos(), "%s is missing json tag", prefix) + return + } + + if tagInfo.Inline { + return + } + + if tagInfo.Name == "" { + pass.Reportf(field.Pos(), "%s has empty json tag", prefix) + return + } + + matched := a.jsonTagRegex.Match([]byte(tagInfo.Name)) + if !matched { + pass.Reportf(field.Pos(), "%s json tag does not match pattern %q: %s", prefix, a.jsonTagRegex.String(), tagInfo.Name) + } +} + +func defaultConfig(cfg *config.JSONTagsConfig) { + if cfg.JSONTagRegex == "" { + cfg.JSONTagRegex = camelCaseRegex + } +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/jsontags/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/jsontags/doc.go new file mode 100644 index 0000000000..1e3ad31cd2 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/jsontags/doc.go @@ -0,0 +1,15 @@ +/* +jsontags provides a linter to ensure that JSON tags are present on struct fields, and that they match a given regex. + +Kubernetes API types should have JSON tags on all fields, to ensure that the fields are correctly serialized and deserialized. +The JSON tags should be camelCase, with a lower case first letter, and should match the field name in all but capitalization. +There should be no hyphens or underscores in either the field name or the JSON tag. + +The linter can be configured with a regex to match the JSON tags against. +By default, the regex is `^[a-z][a-z0-9]*(?:[A-Z][a-z0-9]*)*$`, which allows for camelCase JSON tags, with consecutive capital letters, +to allow, for example, for fields like `requestTTLSeconds`. + +To disallow consecutive capital letters, the regex can be set to `^[a-z][a-z0-9]*(?:[A-Z][a-z0-9]+)*$`. +The regex can be configured with the JSONTagRegex field in the JSONTagsConfig struct. +*/ +package jsontags diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/jsontags/initializer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/jsontags/initializer.go new file mode 100644 index 0000000000..75844302a8 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/jsontags/initializer.go @@ -0,0 +1,30 @@ +package jsontags + +import ( + "github.com/JoelSpeed/kal/pkg/config" + "golang.org/x/tools/go/analysis" +) + +// Initializer returns the AnalyzerInitializer for this +// Analyzer so that it can be added to the registry. +func Initializer() initializer { + return initializer{} +} + +// intializer implements the AnalyzerInitializer interface. +type initializer struct{} + +// Name returns the name of the Analyzer. +func (initializer) Name() string { + return name +} + +// Init returns the intialized Analyzer. +func (initializer) Init(cfg config.LintersConfig) (*analysis.Analyzer, error) { + return newAnalyzer(cfg.JSONTags) +} + +// Default determines whether this Analyzer is on by default, or not. +func (initializer) Default() bool { + return true +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/maxlength/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/maxlength/analyzer.go new file mode 100644 index 0000000000..dc967f87e1 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/maxlength/analyzer.go @@ -0,0 +1,225 @@ +package maxlength + +import ( + "errors" + "fmt" + "go/ast" + + "github.com/JoelSpeed/kal/pkg/analysis/helpers/markers" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const ( + name = "maxlength" + + kubebuilderMaxLength = "kubebuilder:validation:MaxLength" + kubebuilderEnum = "kubebuilder:validation:Enum" + kubebuilderFormat = "kubebuilder:validation:Format" + + kubebuilderItemsMaxLength = "kubebuilder:validation:items:MaxLength" + kubebuilderItemsEnum = "kubebuilder:validation:items:Enum" + kubebuilderItemsFormat = "kubebuilder:validation:items:Format" + + kubebuilderMaxItems = "kubebuilder:validation:MaxItems" +) + +var ( + errCouldNotGetInspector = errors.New("could not get inspector") + errCouldNotGetMarkers = errors.New("could not get markers") +) + +// Analyzer is the analyzer for the maxlength package. +// It checks that strings and arrays have maximum lengths and maximum items respectively. +var Analyzer = &analysis.Analyzer{ + Name: name, + Doc: "Checks that all strings formatted fields are marked with a maximum length, and that arrays are marked with max items.", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer, markers.Analyzer}, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if !ok { + return nil, errCouldNotGetInspector + } + + markersAccess, ok := pass.ResultOf[markers.Analyzer].(markers.Markers) + if !ok { + return nil, errCouldNotGetMarkers + } + + // Filter to structs so that we can iterate over the fields in a struct. + nodeFilter := []ast.Node{ + (*ast.StructType)(nil), + } + + inspect.Preorder(nodeFilter, func(n ast.Node) { + sTyp, ok := n.(*ast.StructType) + if !ok { + return + } + + if sTyp.Fields == nil { + return + } + + for _, field := range sTyp.Fields.List { + checkField(pass, field, markersAccess) + } + }) + + return nil, nil //nolint:nilnil +} + +func checkField(pass *analysis.Pass, field *ast.Field, markersAccess markers.Markers) { + if len(field.Names) == 0 || field.Names[0] == nil { + return + } + + fieldName := field.Names[0].Name + prefix := fmt.Sprintf("field %s", fieldName) + + checkTypeExpr(pass, field.Type, field, nil, markersAccess, prefix, kubebuilderMaxLength, needsStringMaxLength) +} + +func checkIdent(pass *analysis.Pass, ident *ast.Ident, node ast.Node, aliases []*ast.TypeSpec, markersAccess markers.Markers, prefix, marker string, needsMaxLength func(markers.MarkerSet) bool) { + if ident.Obj == nil { // Built-in type + checkString(pass, ident, node, aliases, markersAccess, prefix, marker, needsMaxLength) + + return + } + + tSpec, ok := ident.Obj.Decl.(*ast.TypeSpec) + if !ok { + return + } + + checkTypeSpec(pass, tSpec, node, append(aliases, tSpec), markersAccess, fmt.Sprintf("%s type", prefix), marker, needsMaxLength) +} + +func checkString(pass *analysis.Pass, ident *ast.Ident, node ast.Node, aliases []*ast.TypeSpec, markersAccess markers.Markers, prefix, marker string, needsMaxLength func(markers.MarkerSet) bool) { + if ident.Name != "string" { + return + } + + markers := getCombinedMarkers(markersAccess, node, aliases) + + if needsMaxLength(markers) { + pass.Reportf(node.Pos(), "%s must have a maximum length, add %s marker", prefix, marker) + } +} + +func checkTypeSpec(pass *analysis.Pass, tSpec *ast.TypeSpec, node ast.Node, aliases []*ast.TypeSpec, markersAccess markers.Markers, prefix, marker string, needsMaxLength func(markers.MarkerSet) bool) { + if tSpec.Name == nil { + return + } + + typeName := tSpec.Name.Name + prefix = fmt.Sprintf("%s %s", prefix, typeName) + + checkTypeExpr(pass, tSpec.Type, node, aliases, markersAccess, prefix, marker, needsMaxLength) +} + +func checkTypeExpr(pass *analysis.Pass, typeExpr ast.Expr, node ast.Node, aliases []*ast.TypeSpec, markersAccess markers.Markers, prefix, marker string, needsMaxLength func(markers.MarkerSet) bool) { + switch typ := typeExpr.(type) { + case *ast.Ident: + checkIdent(pass, typ, node, aliases, markersAccess, prefix, marker, needsMaxLength) + case *ast.StarExpr: + checkTypeExpr(pass, typ.X, node, aliases, markersAccess, prefix, marker, needsMaxLength) + case *ast.ArrayType: + checkArrayType(pass, typ, node, aliases, markersAccess, prefix) + } +} + +func checkArrayType(pass *analysis.Pass, arrayType *ast.ArrayType, node ast.Node, aliases []*ast.TypeSpec, markersAccess markers.Markers, prefix string) { + if arrayType.Elt != nil { + if ident, ok := arrayType.Elt.(*ast.Ident); ok { + checkArrayElementIdent(pass, ident, node, aliases, markersAccess, fmt.Sprintf("%s array element", prefix)) + } + } + + markers := getCombinedMarkers(markersAccess, node, aliases) + + if !markers.Has(kubebuilderMaxItems) { + pass.Reportf(node.Pos(), "%s must have a maximum items, add %s marker", prefix, kubebuilderMaxItems) + } +} + +func checkArrayElementIdent(pass *analysis.Pass, ident *ast.Ident, node ast.Node, aliases []*ast.TypeSpec, markersAccess markers.Markers, prefix string) { + if ident.Obj == nil { // Built-in type + checkString(pass, ident, node, aliases, markersAccess, prefix, kubebuilderItemsMaxLength, needsItemsMaxLength) + + return + } + + tSpec, ok := ident.Obj.Decl.(*ast.TypeSpec) + if !ok { + return + } + + // If the array element wasn't directly a string, allow a string alias to be used + // with either the items style markers or the on alias style markers. + checkTypeSpec(pass, tSpec, node, append(aliases, tSpec), markersAccess, fmt.Sprintf("%s type", prefix), kubebuilderMaxLength, func(ms markers.MarkerSet) bool { + return needsStringMaxLength(ms) && needsItemsMaxLength(ms) + }) +} + +func getCombinedMarkers(markersAccess markers.Markers, node ast.Node, aliases []*ast.TypeSpec) markers.MarkerSet { + base := markers.NewMarkerSet(getMarkers(markersAccess, node).UnsortedList()...) + + for _, a := range aliases { + base.Insert(getMarkers(markersAccess, a).UnsortedList()...) + } + + return base +} + +func getMarkers(markersAccess markers.Markers, node ast.Node) markers.MarkerSet { + switch t := node.(type) { + case *ast.Field: + return markersAccess.FieldMarkers(t) + case *ast.TypeSpec: + return markersAccess.TypeMarkers(t) + } + + return nil +} + +// needsMaxLength returns true if the field needs a maximum length. +// Fields do not need a maximum length if they are already marked with a maximum length, +// or if they are an enum, or if they are a date, date-time or duration. +func needsStringMaxLength(markerSet markers.MarkerSet) bool { + switch { + case markerSet.Has(kubebuilderMaxLength), + markerSet.Has(kubebuilderEnum), + markerSet.HasWithValue(kubebuilderFormatWithValue("date")), + markerSet.HasWithValue(kubebuilderFormatWithValue("date-time")), + markerSet.HasWithValue(kubebuilderFormatWithValue("duration")): + return false + } + + return true +} + +func needsItemsMaxLength(markerSet markers.MarkerSet) bool { + switch { + case markerSet.Has(kubebuilderItemsMaxLength), + markerSet.Has(kubebuilderItemsEnum), + markerSet.HasWithValue(kubebuilderItemsFormatWithValue("date")), + markerSet.HasWithValue(kubebuilderItemsFormatWithValue("date-time")), + markerSet.HasWithValue(kubebuilderItemsFormatWithValue("duration")): + return false + } + + return true +} + +func kubebuilderFormatWithValue(value string) string { + return fmt.Sprintf("%s:=%s", kubebuilderFormat, value) +} + +func kubebuilderItemsFormatWithValue(value string) string { + return fmt.Sprintf("%s:=%s", kubebuilderItemsFormat, value) +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/maxlength/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/maxlength/doc.go new file mode 100644 index 0000000000..25cf68de9a --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/maxlength/doc.go @@ -0,0 +1,19 @@ +/* +maxlength is an analyzer that checks that all string fields have a maximum length, and that all array fields have a maximum number of items. + +String fields that are not otherwise bound in length, through being an enum or formatted in a certain way, should have a maximum length. +This ensures that CEL validations on the field are not overly costly in terms of time and memory. + +Array fields should have a maximum number of items. +This ensures that any CEL validations on the field are not overly costly in terms of time and memory. +Where arrays are used to represent a list of structures, CEL rules may exist within the array. +Limiting the array length ensures the cardinality of the rules within the array is not unbounded. + +For strings, the maximum length can be set using the `kubebuilder:validation:MaxLength` tag. +For arrays, the maximum number of items can be set using the `kubebuilder:validation:MaxItems` tag. + +For arrays of strings, the maximum length of each string can be set using the `kubebuilder:validation:items:MaxLength` tag, +on the array field itself. +Or, if the array uses a string type alias, the `kubebuilder:validation:MaxLength` tag can be used on the alias. +*/ +package maxlength diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/maxlength/initializer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/maxlength/initializer.go new file mode 100644 index 0000000000..53ff89ac81 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/maxlength/initializer.go @@ -0,0 +1,30 @@ +package maxlength + +import ( + "github.com/JoelSpeed/kal/pkg/config" + "golang.org/x/tools/go/analysis" +) + +// Initializer returns the AnalyzerInitializer for this +// Analyzer so that it can be added to the registry. +func Initializer() initializer { + return initializer{} +} + +// intializer implements the AnalyzerInitializer interface. +type initializer struct{} + +// Name returns the name of the Analyzer. +func (initializer) Name() string { + return name +} + +// Init returns the intialized Analyzer. +func (initializer) Init(cfg config.LintersConfig) (*analysis.Analyzer, error) { + return Analyzer, nil +} + +// Default determines whether this Analyzer is on by default, or not. +func (initializer) Default() bool { + return false // For now, CRD only, and so not on by default. +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nobools/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nobools/analyzer.go new file mode 100644 index 0000000000..afdc928f3f --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nobools/analyzer.go @@ -0,0 +1,58 @@ +package nobools + +import ( + "errors" + "go/ast" + + "github.com/JoelSpeed/kal/pkg/analysis/utils" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const name = "nobools" + +var ( + errCouldNotGetInspector = errors.New("could not get inspector") +) + +// Analyzer is the analyzer for the nobools package. +// It checks that no struct fields are `bool`. +var Analyzer = &analysis.Analyzer{ + Name: name, + Doc: "Boolean values cannot evolve over time, use an enum with meaningful values instead", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if !ok { + return nil, errCouldNotGetInspector + } + + // Filter to fields so that we can look at fields within structs. + // Filter typespecs so that we can look at type aliases. + nodeFilter := []ast.Node{ + (*ast.StructType)(nil), + (*ast.TypeSpec)(nil), + } + + typeChecker := utils.NewTypeChecker(checkBool) + + // Preorder visits all the nodes of the AST in depth-first order. It calls + // f(n) for each node n before it visits n's children. + // + // We use the filter defined above, ensuring we only look at struct fields and type declarations. + inspect.Preorder(nodeFilter, func(n ast.Node) { + typeChecker.CheckNode(pass, n) + }) + + return nil, nil //nolint:nilnil +} + +func checkBool(pass *analysis.Pass, ident *ast.Ident, node ast.Node, prefix string) { + if ident.Name == "bool" { + pass.Reportf(node.Pos(), "%s should not use a bool. Use a string type with meaningful constant values as an enum.", prefix) + } +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nobools/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nobools/doc.go new file mode 100644 index 0000000000..2165e6a424 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nobools/doc.go @@ -0,0 +1,15 @@ +/* +nobools is an analyzer that checks for usage of bool types. + +Boolean values can only ever have 2 states, true or false. +Over time, needs may change, and with a bool type, there is no way to add additional states. +This problem then leads to pairs of bools, where values of one are only valid given the value of another. +This is confusing and error-prone. + +It is recommended instead to use a string type with a set of constants to represent the different states, +creating an enum. + +By using an enum, not only can you provide meaningul names for the various states of the API, +but you can also add additional states in the future without breaking the API. +*/ +package nobools diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nobools/initializer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nobools/initializer.go new file mode 100644 index 0000000000..e12ac15893 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nobools/initializer.go @@ -0,0 +1,32 @@ +package nobools + +import ( + "github.com/JoelSpeed/kal/pkg/config" + "golang.org/x/tools/go/analysis" +) + +// Initializer returns the AnalyzerInitializer for this +// Analyzer so that it can be added to the registry. +func Initializer() initializer { + return initializer{} +} + +// intializer implements the AnalyzerInitializer interface. +type initializer struct{} + +// Name returns the name of the Analyzer. +func (initializer) Name() string { + return name +} + +// Init returns the intialized Analyzer. +func (initializer) Init(cfg config.LintersConfig) (*analysis.Analyzer, error) { + return Analyzer, nil +} + +// Default determines whether this Analyzer is on by default, or not. +func (initializer) Default() bool { + // Bools avoidance in the Kube conventions is not a must. + // Make this opt in depending on the projects own preference. + return false +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nophase/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nophase/analyzer.go new file mode 100644 index 0000000000..bdec59f1c4 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nophase/analyzer.go @@ -0,0 +1,85 @@ +package nophase + +import ( + "errors" + "go/ast" + "strings" + + "github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const name = "nophase" + +var ( + errCouldNotGetInspector = errors.New("could not get inspector") + errCouldNotGetJSONTags = errors.New("could not get json tags") +) + +// Analyzer is the analyzer for the nophase package. +// It checks that no struct fields named 'phase', or that contain phase as a +// substring are present. +var Analyzer = &analysis.Analyzer{ + Name: name, + Doc: "phase fields are deprecated and conditions should be preferred, avoid phase like enum fields", + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer, extractjsontags.Analyzer}, +} + +func run(pass *analysis.Pass) (interface{}, error) { + inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if !ok { + return nil, errCouldNotGetInspector + } + + jsonTags, ok := pass.ResultOf[extractjsontags.Analyzer].(extractjsontags.StructFieldTags) + if !ok { + return nil, errCouldNotGetJSONTags + } + + // Filter to fields so that we can iterate over fields in a struct. + nodeFilter := []ast.Node{ + (*ast.Field)(nil), + } + + // Preorder visits all the nodes of the AST in depth-first order. It calls + // f(n) for each node n before it visits n's children. + // + // We use the filter defined above, ensuring we only look at struct fields. + inspect.Preorder(nodeFilter, func(n ast.Node) { + field, ok := n.(*ast.Field) + if !ok { + return + } + + if field == nil || len(field.Names) == 0 { + return + } + + fieldName := field.Names[0].Name + + // First check if the struct field name contains 'phase' + if strings.Contains(strings.ToLower(fieldName), "phase") { + pass.Reportf(field.Pos(), + "field %s: phase fields are deprecated and conditions should be preferred, avoid phase like enum fields", + fieldName, + ) + + return + } + + // Then check if the json serialization of the field contains 'phase' + tagInfo := jsonTags.FieldTags(field) + + if strings.Contains(strings.ToLower(tagInfo.Name), "phase") { + pass.Reportf(field.Pos(), + "field %s: phase fields are deprecated and conditions should be preferred, avoid phase like enum fields", + fieldName, + ) + } + }) + + return nil, nil //nolint:nilnil +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nophase/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nophase/doc.go new file mode 100644 index 0000000000..edeb3f6f2e --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nophase/doc.go @@ -0,0 +1,11 @@ +/* +nophase provides a linter to ensure that structs do not contain a Phase field. + +Phase fields are deprecated and conditions should be preferred. Avoid phase like enum +fields. + +The linter will flag any struct field containing the substring 'phase'. This means both +Phase and FooPhase will be flagged. +*/ + +package nophase diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nophase/initializer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nophase/initializer.go new file mode 100644 index 0000000000..0554aecb59 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/nophase/initializer.go @@ -0,0 +1,30 @@ +package nophase + +import ( + "github.com/JoelSpeed/kal/pkg/config" + "golang.org/x/tools/go/analysis" +) + +// Initializer returns the AnalyzerInitializer for this +// Analyzer so that it can be added to the registry. +func Initializer() initializer { + return initializer{} +} + +// intializer implements the AnalyzerInitializer interface. +type initializer struct{} + +// Name returns the name of the Analyzer. +func (initializer) Name() string { + return name +} + +// Init returns the intialized Analyzer. +func (initializer) Init(cfg config.LintersConfig) (*analysis.Analyzer, error) { + return Analyzer, nil +} + +// Default determines whether this Analyzer is on by default, or not. +func (initializer) Default() bool { + return true +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired/analyzer.go new file mode 100644 index 0000000000..74b866eb29 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired/analyzer.go @@ -0,0 +1,243 @@ +package optionalorrequired + +import ( + "errors" + "fmt" + "go/ast" + + "github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags" + "github.com/JoelSpeed/kal/pkg/analysis/helpers/markers" + "github.com/JoelSpeed/kal/pkg/config" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const ( + name = "optionalorrequired" + + // OptionalMarker is the marker that indicates that a field is optional. + OptionalMarker = "optional" + + // RequiredMarker is the marker that indicates that a field is required. + RequiredMarker = "required" + + // KubebuilderOptionalMarker is the marker that indicates that a field is optional in kubebuilder. + KubebuilderOptionalMarker = "kubebuilder:validation:Optional" + + // KubebuilderRequiredMarker is the marker that indicates that a field is required in kubebuilder. + KubebuilderRequiredMarker = "kubebuilder:validation:Required" +) + +func init() { + markers.DefaultRegistry().Register( + OptionalMarker, + RequiredMarker, + KubebuilderOptionalMarker, + KubebuilderRequiredMarker, + ) +} + +var ( + errCouldNotGetInspector = errors.New("could not get inspector") + errCouldNotGetMarkers = errors.New("could not get markers") + errCouldNotGetJSONTags = errors.New("could not get jsontags") +) + +type analyzer struct { + primaryOptionalMarker string + secondaryOptionalMarker string + + primaryRequiredMarker string + secondaryRequiredMarker string +} + +// newAnalyzer creates a new analyzer with the given configuration. +func newAnalyzer(cfg config.OptionalOrRequiredConfig) *analysis.Analyzer { + defaultConfig(&cfg) + + a := &analyzer{} + + switch cfg.PreferredOptionalMarker { + case OptionalMarker: + a.primaryOptionalMarker = OptionalMarker + a.secondaryOptionalMarker = KubebuilderOptionalMarker + case KubebuilderOptionalMarker: + a.primaryOptionalMarker = KubebuilderOptionalMarker + a.secondaryOptionalMarker = OptionalMarker + } + + switch cfg.PreferredRequiredMarker { + case RequiredMarker: + a.primaryRequiredMarker = RequiredMarker + a.secondaryRequiredMarker = KubebuilderRequiredMarker + case KubebuilderRequiredMarker: + a.primaryRequiredMarker = KubebuilderRequiredMarker + a.secondaryRequiredMarker = RequiredMarker + } + + return &analysis.Analyzer{ + Name: name, + Doc: "Checks that all struct fields are marked either with the optional or required markers.", + Run: a.run, + Requires: []*analysis.Analyzer{inspect.Analyzer, markers.Analyzer, extractjsontags.Analyzer}, + } +} + +func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) { + inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if !ok { + return nil, errCouldNotGetInspector + } + + markersAccess, ok := pass.ResultOf[markers.Analyzer].(markers.Markers) + if !ok { + return nil, errCouldNotGetMarkers + } + + jsonTags, ok := pass.ResultOf[extractjsontags.Analyzer].(extractjsontags.StructFieldTags) + if !ok { + return nil, errCouldNotGetJSONTags + } + + // Filter to fields so that we can iterate over fields in a struct. + nodeFilter := []ast.Node{ + (*ast.StructType)(nil), + } + + inspect.Preorder(nodeFilter, func(n ast.Node) { + sTyp, ok := n.(*ast.StructType) + if !ok { + return + } + + if sTyp.Fields == nil { + return + } + + for _, field := range sTyp.Fields.List { + fieldMarkers := markersAccess.FieldMarkers(field) + fieldTagInfo := jsonTags.FieldTags(field) + + a.checkField(pass, field, fieldMarkers, fieldTagInfo) + } + }) + + return nil, nil //nolint:nilnil +} + +//nolint:cyclop +func (a *analyzer) checkField(pass *analysis.Pass, field *ast.Field, fieldMarkers markers.MarkerSet, fieldTagInfo extractjsontags.FieldTagInfo) { + if fieldTagInfo.Inline { + // Inline fields would have no effect if they were marked as optional/required. + return + } + + var prefix string + if len(field.Names) > 0 && field.Names[0] != nil { + prefix = fmt.Sprintf("field %s", field.Names[0].Name) + } else if ident, ok := field.Type.(*ast.Ident); ok { + prefix = fmt.Sprintf("embedded field %s", ident.Name) + } + + hasPrimaryOptional := fieldMarkers.Has(a.primaryOptionalMarker) + hasPrimaryRequired := fieldMarkers.Has(a.primaryRequiredMarker) + + hasSecondaryOptional := fieldMarkers.Has(a.secondaryOptionalMarker) + hasSecondaryRequired := fieldMarkers.Has(a.secondaryRequiredMarker) + + hasEitherOptional := hasPrimaryOptional || hasSecondaryOptional + hasEitherRequired := hasPrimaryRequired || hasSecondaryRequired + + hasBothOptional := hasPrimaryOptional && hasSecondaryOptional + hasBothRequired := hasPrimaryRequired && hasSecondaryRequired + + switch { + case hasEitherOptional && hasEitherRequired: + pass.Reportf(field.Pos(), "%s must not be marked as both optional and required", prefix) + case hasSecondaryOptional: + marker := fieldMarkers[a.secondaryOptionalMarker] + if hasBothOptional { + pass.Report(reportShouldRemoveSecondaryMarker(field, marker, a.primaryOptionalMarker, a.secondaryOptionalMarker, prefix)) + } else { + pass.Report(reportShouldReplaceSecondaryMarker(field, marker, a.primaryOptionalMarker, a.secondaryOptionalMarker, prefix)) + } + case hasSecondaryRequired: + marker := fieldMarkers[a.secondaryRequiredMarker] + if hasBothRequired { + pass.Report(reportShouldRemoveSecondaryMarker(field, marker, a.primaryRequiredMarker, a.secondaryRequiredMarker, prefix)) + } else { + pass.Report(reportShouldReplaceSecondaryMarker(field, marker, a.primaryRequiredMarker, a.secondaryRequiredMarker, prefix)) + } + case hasPrimaryOptional || hasPrimaryRequired: + // This is the correct state. + default: + pass.Reportf(field.Pos(), "%s must be marked as %s or %s", prefix, a.primaryOptionalMarker, a.primaryRequiredMarker) + } +} + +func reportShouldReplaceSecondaryMarker(field *ast.Field, marker []markers.Marker, primaryMarker, secondaryMarker, prefix string) analysis.Diagnostic { + textEdits := make([]analysis.TextEdit, len(marker)) + + for i, m := range marker { + if i == 0 { + textEdits[i] = analysis.TextEdit{ + Pos: m.Pos, + End: m.End, + NewText: []byte(fmt.Sprintf("// +%s", primaryMarker)), + } + + continue + } + + textEdits[i] = analysis.TextEdit{ + Pos: m.Pos, + End: m.End + 1, // Add 1 to position to include the new line + NewText: nil, + } + } + + return analysis.Diagnostic{ + Pos: field.Pos(), + Message: fmt.Sprintf("%s should use marker %s instead of %s", prefix, primaryMarker, secondaryMarker), + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: fmt.Sprintf("should replace `%s` with `%s`", secondaryMarker, primaryMarker), + TextEdits: textEdits, + }, + }, + } +} + +func reportShouldRemoveSecondaryMarker(field *ast.Field, marker []markers.Marker, primaryMarker, secondaryMarker, prefix string) analysis.Diagnostic { + textEdits := make([]analysis.TextEdit, len(marker)) + + for i, m := range marker { + textEdits[i] = analysis.TextEdit{ + Pos: m.Pos, + End: m.End + 1, // Add 1 to position to include the new line + NewText: nil, + } + } + + return analysis.Diagnostic{ + Pos: field.Pos(), + Message: fmt.Sprintf("%s should use only the marker %s, %s is not required", prefix, primaryMarker, secondaryMarker), + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: fmt.Sprintf("should remove `// +%s`", secondaryMarker), + TextEdits: textEdits, + }, + }, + } +} + +func defaultConfig(cfg *config.OptionalOrRequiredConfig) { + if cfg.PreferredOptionalMarker == "" { + cfg.PreferredOptionalMarker = OptionalMarker + } + + if cfg.PreferredRequiredMarker == "" { + cfg.PreferredRequiredMarker = RequiredMarker + } +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired/doc.go new file mode 100644 index 0000000000..2898daf6d8 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired/doc.go @@ -0,0 +1,24 @@ +/* +optionalorrequired is a linter to ensure that all fields are marked as either optional or required. +By default, it searches for the `+optional` and `+required` markers, and ensures that all fields are marked +with at least one of these markers. + +The linter can be configured to use different markers, by setting the `PreferredOptionalMarker` and `PreferredRequiredMarker`. +The default values are `+optional` and `+required`, respectively. +The available alternate values for each marker are: + +For PreferredOptionalMarker: + - `+optional`: The standard Kubernetes marker for optional fields. + - `+kubebuilder:validation:Optional`: The Kubebuilder marker for optional fields. + +For PreferredRequiredMarker: + - `+required`: The standard Kubernetes marker for required fields. + - `+kubebuilder:validation:Required`: The Kubebuilder marker for required fields. + +When a field is marked with both the Kubernetes and Kubebuilder markers, the linter will suggest to remove the Kubebuilder marker. +When a field is marked only with the Kubebuilder marker, the linter will suggest to use the Kubernetes marker instead. +This behaviour is reversed when the `PreferredOptionalMarker` and `PreferredRequiredMarker` are set to the Kubebuilder markers. + +Use the linter fix option to automatically apply suggested fixes. +*/ +package optionalorrequired diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired/initializer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired/initializer.go new file mode 100644 index 0000000000..6d671628c9 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired/initializer.go @@ -0,0 +1,30 @@ +package optionalorrequired + +import ( + "github.com/JoelSpeed/kal/pkg/config" + "golang.org/x/tools/go/analysis" +) + +// Initializer returns the AnalyzerInitializer for this +// Analyzer so that it can be added to the registry. +func Initializer() initializer { + return initializer{} +} + +// intializer implements the AnalyzerInitializer interface. +type initializer struct{} + +// Name returns the name of the Analyzer. +func (initializer) Name() string { + return name +} + +// Init returns the intialized Analyzer. +func (initializer) Init(cfg config.LintersConfig) (*analysis.Analyzer, error) { + return newAnalyzer(cfg.OptionalOrRequired), nil +} + +// Default determines whether this Analyzer is on by default, or not. +func (initializer) Default() bool { + return true +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/registry.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/registry.go new file mode 100644 index 0000000000..eda7daeefa --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/registry.go @@ -0,0 +1,122 @@ +package analysis + +import ( + "fmt" + + "github.com/JoelSpeed/kal/pkg/analysis/commentstart" + "github.com/JoelSpeed/kal/pkg/analysis/conditions" + "github.com/JoelSpeed/kal/pkg/analysis/integers" + "github.com/JoelSpeed/kal/pkg/analysis/jsontags" + "github.com/JoelSpeed/kal/pkg/analysis/maxlength" + "github.com/JoelSpeed/kal/pkg/analysis/nobools" + "github.com/JoelSpeed/kal/pkg/analysis/nophase" + "github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired" + "github.com/JoelSpeed/kal/pkg/analysis/requiredfields" + "github.com/JoelSpeed/kal/pkg/analysis/statussubresource" + "github.com/JoelSpeed/kal/pkg/config" + "golang.org/x/tools/go/analysis" + + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" +) + +// AnalyzerInitializer is used to intializer analyzers. +type AnalyzerInitializer interface { + // Name returns the name of the analyzer initialized by this intializer. + Name() string + + // Init returns the newly initialized analyzer. + // It will be passed the complete LintersConfig and is expected to rely only on its own configuration. + Init(config.LintersConfig) (*analysis.Analyzer, error) + + // Default determines whether the inializer intializes an analyzer that should be + // on by default, or not. + Default() bool +} + +// Registry is used to fetch and initialize analyzers. +type Registry interface { + // DefaultLinters returns the names of linters that are enabled by default. + DefaultLinters() sets.Set[string] + + // AllLinters returns the names of all registered linters. + AllLinters() sets.Set[string] + + // InitializeLinters returns a set of newly initialized linters based on the + // provided configuration. + InitializeLinters(config.Linters, config.LintersConfig) ([]*analysis.Analyzer, error) +} + +type registry struct { + initializers []AnalyzerInitializer +} + +// NewRegistry returns a new registry, from which analyzers can be fetched. +func NewRegistry() Registry { + return ®istry{ + initializers: []AnalyzerInitializer{ + conditions.Initializer(), + commentstart.Initializer(), + integers.Initializer(), + jsontags.Initializer(), + maxlength.Initializer(), + nobools.Initializer(), + nophase.Initializer(), + optionalorrequired.Initializer(), + requiredfields.Initializer(), + statussubresource.Initializer(), + }, + } +} + +// DefaultLinters returns the list of linters that are registered +// as being enabled by default. +func (r *registry) DefaultLinters() sets.Set[string] { + defaultLinters := sets.New[string]() + + for _, initializer := range r.initializers { + if initializer.Default() { + defaultLinters.Insert(initializer.Name()) + } + } + + return defaultLinters +} + +// AllLinters returns the list of all known linters that are known +// to the registry. +func (r *registry) AllLinters() sets.Set[string] { + linters := sets.New[string]() + + for _, initializer := range r.initializers { + linters.Insert(initializer.Name()) + } + + return linters +} + +// InitializeLinters returns a list of initialized linters based on the provided config. +func (r *registry) InitializeLinters(cfg config.Linters, lintersCfg config.LintersConfig) ([]*analysis.Analyzer, error) { + analyzers := []*analysis.Analyzer{} + errs := []error{} + + enabled := sets.New(cfg.Enable...) + disabled := sets.New(cfg.Disable...) + + allEnabled := enabled.Len() == 1 && enabled.Has(config.Wildcard) + allDisabled := disabled.Len() == 1 && disabled.Has(config.Wildcard) + + for _, initializer := range r.initializers { + if !disabled.Has(initializer.Name()) && (allEnabled || enabled.Has(initializer.Name()) || !allDisabled && initializer.Default()) { + a, err := initializer.Init(lintersCfg) + if err != nil { + errs = append(errs, fmt.Errorf("failed to initialize linter %s: %w", initializer.Name(), err)) + continue + } + + analyzers = append(analyzers, a) + } + } + + return analyzers, kerrors.NewAggregate(errs) +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/requiredfields/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/requiredfields/analyzer.go new file mode 100644 index 0000000000..6aa50c3407 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/requiredfields/analyzer.go @@ -0,0 +1,157 @@ +package requiredfields + +import ( + "errors" + "fmt" + "go/ast" + "strings" + + "github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags" + "github.com/JoelSpeed/kal/pkg/analysis/helpers/markers" + "github.com/JoelSpeed/kal/pkg/config" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const ( + name = "requiredfields" + + requiredMarker = "required" + kubebuilderRequiredMarker = "kubebuilder:validation:Required" +) + +func init() { + markers.DefaultRegistry().Register(requiredMarker, kubebuilderRequiredMarker) +} + +var ( + errCouldNotGetInspector = errors.New("could not get inspector") + errCouldNotGetMarkers = errors.New("could not get markers") + errCouldNotGetJSONTags = errors.New("could not get json tags") +) + +type analyzer struct { + pointerPolicy config.RequiredFieldPointerPolicy +} + +// newAnalyzer creates a new analyzer. +func newAnalyzer(cfg config.RequiredFieldsConfig) *analysis.Analyzer { + defaultConfig(&cfg) + + a := &analyzer{ + pointerPolicy: cfg.PointerPolicy, + } + + return &analysis.Analyzer{ + Name: name, + Doc: "Checks that all required fields are not pointers, and do not have the omitempty tag.", + Run: a.run, + Requires: []*analysis.Analyzer{inspect.Analyzer, markers.Analyzer, extractjsontags.Analyzer}, + } +} + +func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) { + inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if !ok { + return nil, errCouldNotGetInspector + } + + markersAccess, ok := pass.ResultOf[markers.Analyzer].(markers.Markers) + if !ok { + return nil, errCouldNotGetMarkers + } + + jsonTags, ok := pass.ResultOf[extractjsontags.Analyzer].(extractjsontags.StructFieldTags) + if !ok { + return nil, errCouldNotGetJSONTags + } + + // Filter to fields so that we can iterate over fields in a struct. + nodeFilter := []ast.Node{ + (*ast.Field)(nil), + } + + inspect.Preorder(nodeFilter, func(n ast.Node) { + field, ok := n.(*ast.Field) + if !ok { + return + } + + fieldMarkers := markersAccess.FieldMarkers(field) + fieldTagInfo := jsonTags.FieldTags(field) + + a.checkField(pass, field, fieldMarkers, fieldTagInfo) + }) + + return nil, nil //nolint:nilnil +} + +func (a *analyzer) checkField(pass *analysis.Pass, field *ast.Field, fieldMarkers markers.MarkerSet, fieldTagInfo extractjsontags.FieldTagInfo) { + if field == nil || len(field.Names) == 0 { + return + } + + fieldName := field.Names[0].Name + + if !fieldMarkers.Has(requiredMarker) && !fieldMarkers.Has(kubebuilderRequiredMarker) { + // The field is not marked required, so we don't need to check it. + return + } + + if fieldTagInfo.OmitEmpty { + pass.Report(analysis.Diagnostic{ + Pos: field.Pos(), + Message: fmt.Sprintf("field %s is marked as required, but has the omitempty tag", fieldName), + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "should remove the omitempty tag", + TextEdits: []analysis.TextEdit{ + { + Pos: fieldTagInfo.Pos, + End: fieldTagInfo.End, + NewText: []byte(strings.Replace(fieldTagInfo.RawValue, ",omitempty", "", 1)), + }, + }, + }, + }, + }) + } + + if field.Type == nil { + // The field has no type? We can't check if it's a pointer. + return + } + + if starExpr, ok := field.Type.(*ast.StarExpr); ok { + var suggestedFixes []analysis.SuggestedFix + + switch a.pointerPolicy { + case config.RequiredFieldPointerWarn: + // Do not suggest a fix. + case config.RequiredFieldPointerSuggestFix: + suggestedFixes = append(suggestedFixes, analysis.SuggestedFix{ + Message: "should remove the pointer", + TextEdits: []analysis.TextEdit{ + { + Pos: starExpr.Pos(), + End: starExpr.X.Pos(), + NewText: nil, + }, + }, + }) + } + + pass.Report(analysis.Diagnostic{ + Pos: field.Pos(), + Message: fmt.Sprintf("field %s is marked as required, should not be a pointer", fieldName), + SuggestedFixes: suggestedFixes, + }) + } +} + +func defaultConfig(cfg *config.RequiredFieldsConfig) { + if cfg.PointerPolicy == "" { + cfg.PointerPolicy = config.RequiredFieldPointerSuggestFix + } +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/requiredfields/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/requiredfields/doc.go new file mode 100644 index 0000000000..9eb82f368c --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/requiredfields/doc.go @@ -0,0 +1,11 @@ +/* +requiredFields is a linter to check that fields that are marked as required are not pointers, and do not have the omitempty tag. +The linter will check for fields that are marked as required using the +required marker, or the +kubebuilder:validation:Required marker. + +The linter will suggest to remove the omitempty tag from fields that are marked as required, but have the omitempty tag. +The linter will suggest to remove the pointer type from fields that are marked as required. + +If you have a large, existing codebase, you may not want to automatically fix all of the pointer issues. +In this case, you can configure the linter not to suggest fixing the pointer issues by setting the `pointerPolicy` option to `Warn`. +*/ +package requiredfields diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/requiredfields/initializer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/requiredfields/initializer.go new file mode 100644 index 0000000000..30779c00ad --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/requiredfields/initializer.go @@ -0,0 +1,30 @@ +package requiredfields + +import ( + "github.com/JoelSpeed/kal/pkg/config" + "golang.org/x/tools/go/analysis" +) + +// Initializer returns the AnalyzerInitializer for this +// Analyzer so that it can be added to the registry. +func Initializer() initializer { + return initializer{} +} + +// intializer implements the AnalyzerInitializer interface. +type initializer struct{} + +// Name returns the name of the Analyzer. +func (initializer) Name() string { + return name +} + +// Init returns the intialized Analyzer. +func (initializer) Init(cfg config.LintersConfig) (*analysis.Analyzer, error) { + return newAnalyzer(cfg.RequiredFields), nil +} + +// Default determines whether this Analyzer is on by default, or not. +func (initializer) Default() bool { + return true +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/statussubresource/analyzer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/statussubresource/analyzer.go new file mode 100644 index 0000000000..68b5e02335 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/statussubresource/analyzer.go @@ -0,0 +1,151 @@ +package statussubresource + +import ( + "errors" + "fmt" + "go/ast" + "go/token" + + "github.com/JoelSpeed/kal/pkg/analysis/helpers/extractjsontags" + "github.com/JoelSpeed/kal/pkg/analysis/helpers/markers" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const ( + name = "statussubresource" + + statusJSONTag = "status" + + // kubebuilderRootMarker is the marker that indicates that a struct is the object root for code and CRD generation. + kubebuilderRootMarker = "kubebuilder:object:root:=true" + + // kubebuilderStatusSubresourceMarker is the marker that indicates that the CRD generated for a struct should include the /status subresource. + kubebuilderStatusSubresourceMarker = "kubebuilder:subresource:status" +) + +var ( + errCouldNotGetInspector = errors.New("could not get inspector") + errCouldNotGetMarkers = errors.New("could not get markers") + errCouldNotGetJSONTags = errors.New("could not get json tags") +) + +type analyzer struct{} + +// newAnalyzer creates a new analyzer with the given configuration. +func newAnalyzer() *analysis.Analyzer { + a := &analyzer{} + + return &analysis.Analyzer{ + Name: name, + Doc: "Checks that a type marked with kubebuilder:object:root:=true and containing a status field is marked with kubebuilder:subresource:status", + Run: a.run, + Requires: []*analysis.Analyzer{inspect.Analyzer, markers.Analyzer, extractjsontags.Analyzer}, + } +} + +func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) { + inspect, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if !ok { + return nil, errCouldNotGetInspector + } + + markersAccess, ok := pass.ResultOf[markers.Analyzer].(markers.Markers) + if !ok { + return nil, errCouldNotGetMarkers + } + + jsonTags, ok := pass.ResultOf[extractjsontags.Analyzer].(extractjsontags.StructFieldTags) + if !ok { + return nil, errCouldNotGetJSONTags + } + + // Filter to type specs so we can get the names of types + nodeFilter := []ast.Node{ + (*ast.TypeSpec)(nil), + } + + inspect.Preorder(nodeFilter, func(n ast.Node) { + typeSpec, ok := n.(*ast.TypeSpec) + if !ok { + return + } + + // we only care about struct types + sTyp, ok := typeSpec.Type.(*ast.StructType) + if !ok { + return + } + + // no identifier on the type + if typeSpec.Name == nil { + return + } + + structMarkers := markersAccess.StructMarkers(sTyp) + a.checkStruct(pass, sTyp, typeSpec.Name.Name, structMarkers, jsonTags) + }) + + return nil, nil //nolint:nilnil +} + +func (a *analyzer) checkStruct(pass *analysis.Pass, sTyp *ast.StructType, name string, structMarkers markers.MarkerSet, jsonTags extractjsontags.StructFieldTags) { + if sTyp == nil { + return + } + + if !structMarkers.HasWithValue(kubebuilderRootMarker) { + return + } + + hasStatusSubresourceMarker := structMarkers.Has(kubebuilderStatusSubresourceMarker) + hasStatusField := hasStatusField(sTyp, jsonTags) + + switch { + case (hasStatusSubresourceMarker && hasStatusField), (!hasStatusSubresourceMarker && !hasStatusField): + // acceptable state + case hasStatusSubresourceMarker && !hasStatusField: + // Might be able to have some suggested fixes here, but it is likely much more complex + // so for now leave it with a descriptive failure message. + pass.Reportf(sTyp.Pos(), "root object type %q is marked to enable the status subresource with marker %q but has no status field", name, kubebuilderStatusSubresourceMarker) + case !hasStatusSubresourceMarker && hasStatusField: + // In this case we can suggest the autofix to add the status subresource marker + pass.Report(analysis.Diagnostic{ + Pos: sTyp.Pos(), + Message: fmt.Sprintf("root object type %q has a status field but does not have the marker %q to enable the status subresource", name, kubebuilderStatusSubresourceMarker), + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "should add the kubebuilder:subresource:status marker", + TextEdits: []analysis.TextEdit{ + // go one line above the struct and add the marker + { + // sTyp.Pos() is the beginning of the 'struct' keyword. Subtract + // the length of the struct name + 7 (2 for spaces surrounding type name, 4 for the 'type' keyword, + // and 1 for the newline) to position at the end of the line above the struct + // definition. + Pos: sTyp.Pos() - token.Pos(len(name)+7), + // prefix with a newline to ensure we aren't appending to a previous comment + NewText: []byte("\n// +kubebuilder:subresource:status"), + }, + }, + }, + }, + }) + } +} + +func hasStatusField(sTyp *ast.StructType, jsonTags extractjsontags.StructFieldTags) bool { + if sTyp == nil || sTyp.Fields == nil || sTyp.Fields.List == nil { + return false + } + + for _, field := range sTyp.Fields.List { + info := jsonTags.FieldTags(field) + if info.Name == statusJSONTag { + return true + } + } + + return false +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/statussubresource/doc.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/statussubresource/doc.go new file mode 100644 index 0000000000..b601b65301 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/statussubresource/doc.go @@ -0,0 +1,10 @@ +// statussubresource is a linter to check that the status subresource is configured correctly for +// structs marked with the 'kubebuilder:object:root:=true' marker. Correct configuration is that +// when there is a status field the 'kubebuilder:subresource:status' marker is present on the struct +// OR when the 'kubebuilder:subresource:status' marker is present on the struct there is a status field. +// +// In the case where there is a status field present but no 'kubebuilder:subresource:status' marker, the +// linter will suggest adding the comment '// +kubebuilder:subresource:status' above the struct. +// +// This linter is not enabled by default as it is only applicable to CustomResourceDefinitions. +package statussubresource diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/statussubresource/initializer.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/statussubresource/initializer.go new file mode 100644 index 0000000000..6a03facdd1 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/statussubresource/initializer.go @@ -0,0 +1,31 @@ +package statussubresource + +import ( + "github.com/JoelSpeed/kal/pkg/config" + "golang.org/x/tools/go/analysis" +) + +// Initializer returns the AnalyzerInitializer for this +// Analyzer so that it can be added to the registry. +func Initializer() initializer { + return initializer{} +} + +// intializer implements the AnalyzerInitializer interface. +type initializer struct{} + +// Name returns the name of the Analyzer. +func (initializer) Name() string { + return name +} + +// Init returns the intialized Analyzer. +func (initializer) Init(cfg config.LintersConfig) (*analysis.Analyzer, error) { + return newAnalyzer(), nil +} + +// Default determines whether this Analyzer is on by default, or not. +func (initializer) Default() bool { + // This check only applies to CRDs so should not be on by default. + return false +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/utils/type_check.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/utils/type_check.go new file mode 100644 index 0000000000..4cf7c2f30c --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/analysis/utils/type_check.go @@ -0,0 +1,101 @@ +package utils + +import ( + "fmt" + "go/ast" + + "golang.org/x/tools/go/analysis" +) + +// TypeChecker is an interface for checking types in an AST. +// It is used to check the underlying, built-in types of fields within structs, and raw type declarations. +// It is up to the implementer to provide a function that will be called when a built-in type is found, +// and to provide the logic for providing analysis about this type. +type TypeChecker interface { + CheckNode(pass *analysis.Pass, node ast.Node) +} + +// NewTypeChecker returns a new TypeChecker with the provided checkFunc. +func NewTypeChecker(checkFunc func(pass *analysis.Pass, ident *ast.Ident, node ast.Node, prefix string)) TypeChecker { + return &typeChecker{ + checkFunc: checkFunc, + } +} + +type typeChecker struct { + checkFunc func(pass *analysis.Pass, ident *ast.Ident, node ast.Node, prefix string) +} + +// CheckNode checks the provided node for built-in types. +// It will iterate through fields within structs, and raw type declarations. +// Calling the checkFunc when a built-in type is found. +func (t *typeChecker) CheckNode(pass *analysis.Pass, node ast.Node) { + switch n := node.(type) { + case *ast.StructType: + if n.Fields == nil { + return + } + + for _, field := range n.Fields.List { + t.checkField(pass, field) + } + case *ast.Field: + t.checkField(pass, n) + case *ast.TypeSpec: + t.checkTypeSpec(pass, n, node, "type") + } +} + +func (t *typeChecker) checkField(pass *analysis.Pass, field *ast.Field) { + if field == nil || len(field.Names) == 0 || field.Names[0] == nil { + return + } + + fieldName := field.Names[0].Name + prefix := fmt.Sprintf("field %s", fieldName) + + t.checkTypeExpr(pass, field.Type, field, prefix) +} + +func (t *typeChecker) checkTypeSpec(pass *analysis.Pass, tSpec *ast.TypeSpec, node ast.Node, prefix string) { + if tSpec.Name == nil { + return + } + + typeName := tSpec.Name.Name + prefix = fmt.Sprintf("%s %s", prefix, typeName) + + t.checkTypeExpr(pass, tSpec.Type, node, prefix) +} + +func (t *typeChecker) checkTypeExpr(pass *analysis.Pass, typeExpr ast.Expr, node ast.Node, prefix string) { + switch typ := typeExpr.(type) { + case *ast.Ident: + t.checkIdent(pass, typ, node, prefix) + case *ast.StarExpr: + t.checkTypeExpr(pass, typ.X, node, fmt.Sprintf("%s pointer", prefix)) + case *ast.ArrayType: + t.checkTypeExpr(pass, typ.Elt, node, fmt.Sprintf("%s array element", prefix)) + case *ast.MapType: + t.checkTypeExpr(pass, typ.Key, node, fmt.Sprintf("%s map key", prefix)) + t.checkTypeExpr(pass, typ.Value, node, fmt.Sprintf("%s map value", prefix)) + } +} + +// checkIdent calls the checkFunc with the ident, when we have hit a built-in type. +// If the ident is not a built in, we look at the underlying type until we hit a built-in type. +func (t *typeChecker) checkIdent(pass *analysis.Pass, ident *ast.Ident, node ast.Node, prefix string) { + if ident.Obj == nil || ident.Obj.Decl == nil { + // We've hit a built-in type, no need to check further. + t.checkFunc(pass, ident, node, prefix) + return + } + + tSpec, ok := ident.Obj.Decl.(*ast.TypeSpec) + if !ok { + return + } + + // The field is using a type alias, check if the alias is an int. + t.checkTypeSpec(pass, tSpec, node, fmt.Sprintf("%s type", prefix)) +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/config/config.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/config/config.go new file mode 100644 index 0000000000..9e83bf4e1a --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/config/config.go @@ -0,0 +1,12 @@ +package config + +// GolangCIConfig is the complete configuration for the KAL +// linter when built as an integration into golangci-lint. +type GolangCIConfig struct { + // Linters allows the user to configure which linters should, + // and should not be enabled. + Linters Linters `mapstructure:"linters"` + + // LintersConfig contains configuration for individual linters. + LintersConfig LintersConfig `mapstructure:"lintersConfig"` +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/config/linters.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/config/linters.go new file mode 100644 index 0000000000..237f1ea298 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/config/linters.go @@ -0,0 +1,25 @@ +package config + +const ( + // Wildcard is used to imply all linters should be enabled/disabled. + Wildcard = "*" +) + +// Linters allows the user to configure which linters should, and +// should not be enabled. +type Linters struct { + // Enable is used to enable specific linters. + // Use '*' to enable all known linters. + // When using '*', it should be the only value in the list. + // Values in this list will be added to the linters enabled by default. + // Values should not appear in both 'enable' and 'disable'. + Enable []string `mapstructure:"enable"` + + // Disable is used to disable specific linters. + // Use '*' to disable all known linters. When all linters are disabled, + // only those explicitly called out in 'Enable' will be enabled. + // When using '*', it should be the only value in the list. + // Values in this list will be added to the linters disabled by default. + // Values should not appear in both 'enable' and 'disable'. + Disable []string `mapstructure:"disable"` +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/config/linters_config.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/config/linters_config.go new file mode 100644 index 0000000000..b0987c1ee9 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/config/linters_config.go @@ -0,0 +1,101 @@ +package config + +// LintersConfig contains configuration for individual linters. +type LintersConfig struct { + // conditions contains configuration for the conditions linter. + Conditions ConditionsConfig `json:"conditions"` + + // jsonTags contains configuration for the jsontags linter. + JSONTags JSONTagsConfig `json:"jsonTags"` + + // optionalOrRequired contains configuration for the optionalorrequired linter. + OptionalOrRequired OptionalOrRequiredConfig `json:"optionalOrRequired"` + + // requiredFields contains configuration for the requiredfields linter. + RequiredFields RequiredFieldsConfig `json:"requiredFields"` +} + +// ConditionsFirstField is the policy for the conditions linter. +type ConditionsFirstField string + +const ( + // ConditionsFirstFieldWarn indicates that the conditions should be the first field in the struct. + ConditionsFirstFieldWarn ConditionsFirstField = "Warn" + + // ConditionsFirstFieldIgnore indicates that the conditions do not need to be the first field in the struct. + ConditionsFirstFieldIgnore ConditionsFirstField = "Ignore" +) + +// ConditionsUseProtobuf is the policy for the conditions linter. +type ConditionsUseProtobuf string + +const ( + // ConditionsUseProtobufSuggestFix indicates that the linter will emit a warning if the conditions are not using protobuf tags and suggest a fix. + ConditionsUseProtobufSuggestFix ConditionsUseProtobuf = "SuggestFix" + + // ConditionsUseProtobufWarn indicates that the linter will emit a warning if the conditions are not using protobuf tags. + ConditionsUseProtobufWarn ConditionsUseProtobuf = "Warn" + + // ConditionsUseProtobufIgnore indicates that the linter will not emit a warning if the conditions are not using protobuf tags. + ConditionsUseProtobufIgnore ConditionsUseProtobuf = "Ignore" +) + +// ConditionsConfig contains configuration for the conditions linter. +type ConditionsConfig struct { + // isFirstField indicates whether the conditions should be the first field in the struct. + // Valid values are Warn and Ignore. + // When set to Warn, the linter will emit a warning if the conditions are not the first field in the struct. + // When set to Ignore, the linter will not emit a warning if the conditions are not the first field in the struct. + // When otherwise not specified, the default value is Warn. + IsFirstField ConditionsFirstField `json:"isFirstField"` + + // useProtobuf indicates whether the linter should use protobuf tags. + // Valid values are SuggestFix, Warn and Ignore. + // When set to SuggestFix, the linter will emit a warning if the conditions are not using protobuf tags and suggest a fix. + // When set to Warn, the linter will emit a warning if the conditions are not using protobuf tags. + // When set to Ignore, the linter will not emit a warning if the conditions are not using protobuf tags. + // When otherwise not specified, the default value is SuggestFix. + UseProtobuf ConditionsUseProtobuf `json:"useProtobuf"` +} + +// JSONTagsConfig contains configuration for the jsontags linter. +type JSONTagsConfig struct { + // jsonTagRegex is the regular expression used to validate that json tags are in a particular format. + // By default, the regex used is "^[a-z][a-z0-9]*(?:[A-Z][a-z0-9]*)*$" and is used to check for + // camel case like string. + JSONTagRegex string `json:"jsonTagRegex"` +} + +// OptionalOrRequiredConfig contains configuration for the optionalorrequired linter. +type OptionalOrRequiredConfig struct { + // preferredOptionalMarker is the preferred marker to use for optional fields. + // If this field is not set, the default value is "optional". + // Valid values are "optional" and "kubebuilder:validation:Optional". + PreferredOptionalMarker string `json:"preferredOptionalMarker"` + + // preferredRequiredMarker is the preferred marker to use for required fields. + // If this field is not set, the default value is "required". + // Valid values are "required" and "kubebuilder:validation:Required". + PreferredRequiredMarker string `json:"preferredRequiredMarker"` +} + +// RequiredFieldPointerPolicy is the policy for pointers in required fields. +type RequiredFieldPointerPolicy string + +const ( + // RequiredFieldPointerWarn indicates that the linter will emit a warning if a required field is a pointer. + RequiredFieldPointerWarn RequiredFieldPointerPolicy = "Warn" + + // RequiredFieldPointerSuggestFix indicates that the linter will emit a warning if a required field is a pointer and suggest a fix. + RequiredFieldPointerSuggestFix RequiredFieldPointerPolicy = "SuggestFix" +) + +// RequiredFieldsConfig contains configuration for the requiredfields linter. +type RequiredFieldsConfig struct { + // pointerPolicy is the policy for pointers in required fields. + // Valid values are "Warn" and "SuggestFix". + // When set to "Warn", the linter will emit a warning if a required field is a pointer. + // When set to "SuggestFix", the linter will emit a warning if a required field is a pointer and suggest a fix. + // When otherwise not specified, the default value is "SuggestFix". + PointerPolicy RequiredFieldPointerPolicy `json:"pointerPolicy"` +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/validation/config.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/validation/config.go new file mode 100644 index 0000000000..37a8bcdf15 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/validation/config.go @@ -0,0 +1,21 @@ +package validation + +import ( + "github.com/JoelSpeed/kal/pkg/config" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// ValidateGolangCIConfig is used to validate the provided configuration once +// extracted from golangci-lint. +func ValidateGolangCIConfig(g config.GolangCIConfig, fldPath *field.Path) error { + if fldPath == nil { + fldPath = field.NewPath("") + } + + var fieldErrors field.ErrorList + + fieldErrors = append(fieldErrors, ValidateLinters(g.Linters, fldPath.Child("linters"))...) + fieldErrors = append(fieldErrors, ValidateLintersConfig(g.LintersConfig, fldPath.Child("lintersConfig"))...) + + return fieldErrors.ToAggregate() +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/validation/linters.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/validation/linters.go new file mode 100644 index 0000000000..e73fa2d95d --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/validation/linters.go @@ -0,0 +1,49 @@ +package validation + +import ( + "fmt" + "strings" + + "github.com/JoelSpeed/kal/pkg/analysis" + "github.com/JoelSpeed/kal/pkg/config" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// ValidateLinters is used to validate the configuration in the config.Linters struct. +// +//nolint:cyclop +func ValidateLinters(l config.Linters, fldPath *field.Path) field.ErrorList { + fieldErrors := field.ErrorList{} + + enable := sets.New(l.Enable...) + enablePath := fldPath.Child("enable") + + switch { + case len(enable) != len(l.Enable): + fieldErrors = append(fieldErrors, field.Invalid(enablePath, l.Enable, "values in 'enable' must be unique")) + case enable.Has(config.Wildcard) && enable.Len() != 1: + fieldErrors = append(fieldErrors, field.Invalid(enablePath, l.Enable, "wildcard ('*') must not be specified with other values")) + case !enable.Has(config.Wildcard) && enable.Difference(analysis.NewRegistry().AllLinters()).Len() > 0: + fieldErrors = append(fieldErrors, field.Invalid(enablePath, l.Enable, fmt.Sprintf("unknown linters: %s", strings.Join(enable.Difference(analysis.NewRegistry().AllLinters()).UnsortedList(), ",")))) + } + + disable := sets.New(l.Disable...) + disablePath := fldPath.Child("disable") + + switch { + case len(disable) != len(l.Disable): + fieldErrors = append(fieldErrors, field.Invalid(disablePath, l.Disable, "values in 'disable' must be unique")) + case disable.Has(config.Wildcard) && disable.Len() != 1: + fieldErrors = append(fieldErrors, field.Invalid(disablePath, l.Disable, "wildcard ('*') must not be specified with other values")) + case !disable.Has(config.Wildcard) && disable.Difference(analysis.NewRegistry().AllLinters()).Len() > 0: + fieldErrors = append(fieldErrors, field.Invalid(disablePath, l.Disable, fmt.Sprintf("unknown linters: %s", strings.Join(disable.Difference(analysis.NewRegistry().AllLinters()).UnsortedList(), ",")))) + } + + if enable.Intersection(disable).Len() > 0 { + fieldErrors = append(fieldErrors, field.Invalid(fldPath, l, fmt.Sprintf("values in 'enable' and 'disable may not overlap, overlapping values: %s", strings.Join(enable.Intersection(disable).UnsortedList(), ",")))) + } + + return fieldErrors +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/validation/linters_config.go b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/validation/linters_config.go new file mode 100644 index 0000000000..591bb8e343 --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/pkg/validation/linters_config.go @@ -0,0 +1,87 @@ +package validation + +import ( + "fmt" + "regexp" + + "github.com/JoelSpeed/kal/pkg/analysis/optionalorrequired" + "github.com/JoelSpeed/kal/pkg/config" + + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// ValidateLintersConfig is used to validate the configuration in the config.LintersConfig struct. +func ValidateLintersConfig(lc config.LintersConfig, fldPath *field.Path) field.ErrorList { + fieldErrors := field.ErrorList{} + + fieldErrors = append(fieldErrors, validateConditionsConfig(lc.Conditions, fldPath.Child("conditions"))...) + fieldErrors = append(fieldErrors, validateJSONTagsConfig(lc.JSONTags, fldPath.Child("jsonTags"))...) + fieldErrors = append(fieldErrors, validateOptionalOrRequiredConfig(lc.OptionalOrRequired, fldPath.Child("optionalOrRequired"))...) + fieldErrors = append(fieldErrors, validateRequiredFieldsConfig(lc.RequiredFields, fldPath.Child("requiredFields"))...) + + return fieldErrors +} + +// validateConditionsConfig is used to validate the configuration in the config.ConditionsConfig struct. +func validateConditionsConfig(cc config.ConditionsConfig, fldPath *field.Path) field.ErrorList { + fieldErrors := field.ErrorList{} + + switch cc.IsFirstField { + case "", config.ConditionsFirstFieldWarn, config.ConditionsFirstFieldIgnore: + default: + fieldErrors = append(fieldErrors, field.Invalid(fldPath.Child("isFirstField"), cc.IsFirstField, fmt.Sprintf("invalid value, must be one of %q, %q or omitted", config.ConditionsFirstFieldWarn, config.ConditionsFirstFieldIgnore))) + } + + switch cc.UseProtobuf { + case "", config.ConditionsUseProtobufSuggestFix, config.ConditionsUseProtobufWarn, config.ConditionsUseProtobufIgnore: + default: + fieldErrors = append(fieldErrors, field.Invalid(fldPath.Child("useProtobuf"), cc.UseProtobuf, fmt.Sprintf("invalid value, must be one of %q, %q, %q or omitted", config.ConditionsUseProtobufSuggestFix, config.ConditionsUseProtobufWarn, config.ConditionsUseProtobufIgnore))) + } + + return fieldErrors +} + +// validateJSONTagsConfig is used to validate the configuration in the config.JSONTagsConfig struct. +func validateJSONTagsConfig(jtc config.JSONTagsConfig, fldPath *field.Path) field.ErrorList { + fieldErrors := field.ErrorList{} + + if jtc.JSONTagRegex != "" { + if _, err := regexp.Compile(jtc.JSONTagRegex); err != nil { + fieldErrors = append(fieldErrors, field.Invalid(fldPath.Child("jsonTagRegex"), jtc.JSONTagRegex, fmt.Sprintf("invalid regex: %v", err))) + } + } + + return fieldErrors +} + +// validateOptionalOrRequiredConfig is used to validate the configuration in the config.OptionalOrRequiredConfig struct. +func validateOptionalOrRequiredConfig(oorc config.OptionalOrRequiredConfig, fldPath *field.Path) field.ErrorList { + fieldErrors := field.ErrorList{} + + switch oorc.PreferredOptionalMarker { + case "", optionalorrequired.OptionalMarker, optionalorrequired.KubebuilderOptionalMarker: + default: + fieldErrors = append(fieldErrors, field.Invalid(fldPath.Child("preferredOptionalMarker"), oorc.PreferredOptionalMarker, fmt.Sprintf("invalid value, must be one of %q, %q or omitted", optionalorrequired.OptionalMarker, optionalorrequired.KubebuilderOptionalMarker))) + } + + switch oorc.PreferredRequiredMarker { + case "", optionalorrequired.RequiredMarker, optionalorrequired.KubebuilderRequiredMarker: + default: + fieldErrors = append(fieldErrors, field.Invalid(fldPath.Child("preferredRequiredMarker"), oorc.PreferredRequiredMarker, fmt.Sprintf("invalid value, must be one of %q, %q or omitted", optionalorrequired.RequiredMarker, optionalorrequired.KubebuilderRequiredMarker))) + } + + return fieldErrors +} + +// validateRequiredFieldsConfig is used to validate the configuration in the config.RequiredFieldsConfig struct. +func validateRequiredFieldsConfig(rfc config.RequiredFieldsConfig, fldPath *field.Path) field.ErrorList { + fieldErrors := field.ErrorList{} + + switch rfc.PointerPolicy { + case "", config.RequiredFieldPointerWarn, config.RequiredFieldPointerSuggestFix: + default: + fieldErrors = append(fieldErrors, field.Invalid(fldPath.Child("pointerPolicy"), rfc.PointerPolicy, fmt.Sprintf("invalid value, must be one of %q, %q or omitted", config.RequiredFieldPointerWarn, config.RequiredFieldPointerSuggestFix))) + } + + return fieldErrors +} diff --git a/hack/tools/vendor/github.com/JoelSpeed/kal/plugin.go b/hack/tools/vendor/github.com/JoelSpeed/kal/plugin.go new file mode 100644 index 0000000000..ef77aead5c --- /dev/null +++ b/hack/tools/vendor/github.com/JoelSpeed/kal/plugin.go @@ -0,0 +1,55 @@ +package kal + +import ( + "fmt" + + kalanalysis "github.com/JoelSpeed/kal/pkg/analysis" + "github.com/JoelSpeed/kal/pkg/config" + "github.com/JoelSpeed/kal/pkg/validation" + "github.com/golangci/plugin-module-register/register" + "golang.org/x/tools/go/analysis" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func init() { + register.Plugin("kal", New) +} + +// New creates a new golangci-lint plugin based on the KAL analyzers. +func New(settings any) (register.LinterPlugin, error) { + s, err := register.DecodeSettings[config.GolangCIConfig](settings) + if err != nil { + return nil, fmt.Errorf("error decoding settings: %w", err) + } + + return &GolangCIPlugin{config: s}, nil +} + +// GolangCIPlugin constructs a new plugin for the golangci-lint +// plugin pattern. +// This allows golangci-lint to build a version of itself, containing +// all of the anaylzers included in KAL. +type GolangCIPlugin struct { + config config.GolangCIConfig +} + +// BuildAnalyzers returns all of the analyzers to run, based on the configuration. +func (f *GolangCIPlugin) BuildAnalyzers() ([]*analysis.Analyzer, error) { + if err := validation.ValidateGolangCIConfig(f.config, field.NewPath("")); err != nil { + return nil, fmt.Errorf("error in KAL configuration: %w", err) + } + + registry := kalanalysis.NewRegistry() + + analyzers, err := registry.InitializeLinters(f.config.Linters, f.config.LintersConfig) + if err != nil { + return nil, fmt.Errorf("error initializing analyzers: %w", err) + } + + return analyzers, nil +} + +// GetLoadMode implements the golangci-lint plugin interface. +func (f *GolangCIPlugin) GetLoadMode() string { + return register.LoadModeTypesInfo +} diff --git a/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/.golangci.yml b/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/.golangci.yml new file mode 100644 index 0000000000..758ae1a9e4 --- /dev/null +++ b/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/.golangci.yml @@ -0,0 +1,92 @@ +run: + tests: true + +output: + print-issued-lines: false + +linters: + enable-all: true + disable: + - cyclop + - depguard + - dupl + - dupword + - err113 + - errorlint + - exhaustive + - exhaustruct + - exportloopref + - forcetypeassert + - funlen + - gci + - gochecknoglobals + - gocognit + - goconst + - gocyclo + - godot + - godox + - gofumpt + - govet + - ireturn + - lll + - maintidx + - mnd + - mnd + - musttag + - nestif + - nilnil + - nlreturn + - nolintlint + - nonamedreturns + - paralleltest + - perfsprint + - predeclared + - revive + - stylecheck + - testableexamples + - testpackage + - thelper + - varnamelen + - wrapcheck + - wsl + +linters-settings: + govet: + enable: + - shadow + gocyclo: + min-complexity: 10 + dupl: + threshold: 100 + goconst: + min-len: 8 + min-occurrences: 3 + forbidigo: + exclude-godoc-examples: false + #forbid: + # - (Must)?NewLexer$ + +issues: + max-issues-per-linter: 0 + max-same-issues: 0 + exclude-use-default: false + exclude-dirs: + - _examples + exclude: + # Captured by errcheck. + - "^(G104|G204):" + # Very commonly not checked. + - 'Error return value of .(.*\.Help|.*\.MarkFlagRequired|(os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*printf?|os\.(Un)?Setenv). is not checked' + - 'exported method (.*\.MarshalJSON|.*\.UnmarshalJSON|.*\.EntityURN|.*\.GoString|.*\.Pos) should have comment or be unexported' + - "composite literal uses unkeyed fields" + - 'declaration of "err" shadows declaration' + - "should not use dot imports" + - "Potential file inclusion via variable" + - "should have comment or be unexported" + - "comment on exported var .* should be of the form" + - "at least one file in a package should have a package comment" + - "string literal contains the Unicode" + - "methods on the same type should have the same receiver name" + - "_TokenType_name should be _TokenTypeName" + - "`_TokenType_map` should be `_TokenTypeMap`" + - "rewrite if-else to switch statement" diff --git a/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/README.md b/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/README.md index 2ccec4e848..287aa68b7f 100644 --- a/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/README.md +++ b/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/README.md @@ -92,6 +92,12 @@ passing checks, set the `-default-signifies-exhasutive=false` flag. As a special case, if the type switch statement contains a `default` clause that always panics, then exhaustiveness checks are still performed. +By default, `go-check-sumtype` will not include shared interfaces in the exhaustiviness check. +This can be changed by setting the `-include-shared-interfaces=true` flag. +When this flag is set, `go-check-sumtype` will not require that all concrete structs +are listed in the switch statement, as long as the switch statement is exhaustive +with respect to interfaces the structs implement. + ## Details and motivation Sum types are otherwise known as discriminated unions. That is, a sum type is diff --git a/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/check.go b/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/check.go index 1a0a32517b..ff7fec728a 100644 --- a/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/check.go +++ b/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/check.go @@ -29,7 +29,7 @@ func (e inexhaustiveError) Error() string { // Names returns a sorted list of names corresponding to the missing variant // cases. func (e inexhaustiveError) Names() []string { - var list []string + list := make([]string, 0, len(e.Missing)) for _, o := range e.Missing { list = append(list, o.Name()) } @@ -92,6 +92,10 @@ func missingVariantsInSwitch( ) (*sumTypeDef, []types.Object) { asserted := findTypeAssertExpr(swtch) ty := pkg.TypesInfo.TypeOf(asserted) + if ty == nil { + panic(fmt.Sprintf("no type found for asserted expression: %v", asserted)) + } + def := findDef(defs, ty) if def == nil { // We couldn't find a corresponding sum type, so there's @@ -103,11 +107,11 @@ func missingVariantsInSwitch( // A catch-all case defeats all exhaustiveness checks. return def, nil } - var variantTypes []types.Type + variantTypes := make([]types.Type, 0, len(variantExprs)) for _, expr := range variantExprs { variantTypes = append(variantTypes, pkg.TypesInfo.TypeOf(expr)) } - return def, def.missing(variantTypes) + return def, def.missing(variantTypes, config.IncludeSharedInterfaces) } // switchVariants returns all case expressions found in a type switch. This diff --git a/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/config.go b/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/config.go index 759176eb7d..5c722b75c4 100644 --- a/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/config.go +++ b/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/config.go @@ -2,4 +2,7 @@ package gochecksumtype type Config struct { DefaultSignifiesExhaustive bool + // IncludeSharedInterfaces in the exhaustiviness check. If true, we do not need to list all concrete structs, as long + // as the switch statement is exhaustive with respect to interfaces the structs implement. + IncludeSharedInterfaces bool } diff --git a/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/def.go b/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/def.go index 24729ac01b..71bdf2f72d 100644 --- a/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/def.go +++ b/hack/tools/vendor/github.com/alecthomas/go-check-sumtype/def.go @@ -71,7 +71,7 @@ type sumTypeDef struct { // sum type declarations. If no such sum type definition could be found for // any of the given declarations, then an error is returned. func findSumTypeDefs(decls []sumTypeDecl) ([]sumTypeDef, []error) { - var defs []sumTypeDef + defs := make([]sumTypeDef, 0, len(decls)) var errs []error for _, decl := range decls { def, err := newSumTypeDef(decl.Package.Types, decl) @@ -104,7 +104,7 @@ func newSumTypeDef(pkg *types.Package, decl sumTypeDecl) (*sumTypeDef, error) { return nil, notInterfaceError{decl} } hasUnexported := false - for i := 0; i < iface.NumMethods(); i++ { + for i := range iface.NumMethods() { if !iface.Method(i).Exported() { hasUnexported = true break @@ -145,7 +145,7 @@ func (def *sumTypeDef) String() string { // missing returns a list of variants in this sum type that are not in the // given list of types. -func (def *sumTypeDef) missing(tys []types.Type) []types.Object { +func (def *sumTypeDef) missing(tys []types.Type, includeSharedInterfaces bool) []types.Object { // TODO(ag): This is O(n^2). Fix that. /shrug var missing []types.Object for _, v := range def.Variants { @@ -155,15 +155,29 @@ func (def *sumTypeDef) missing(tys []types.Type) []types.Object { ty = indirect(ty) if types.Identical(varty, ty) { found = true + break + } + if includeSharedInterfaces && implements(varty, ty) { + found = true + break } } - if !found { + if !found && !isInterface(varty) { + // we do not include interfaces extending the sumtype, as the + // all implementations of those interfaces are already covered + // by the sumtype. missing = append(missing, v) } } return missing } +func isInterface(ty types.Type) bool { + underlying := indirect(ty).Underlying() + _, ok := underlying.(*types.Interface) + return ok +} + // indirect dereferences through an arbitrary number of pointer types. func indirect(ty types.Type) types.Type { if ty, ok := ty.(*types.Pointer); ok { @@ -171,3 +185,11 @@ func indirect(ty types.Type) types.Type { } return ty } + +func implements(varty, interfaceType types.Type) bool { + underlying := interfaceType.Underlying() + if interf, ok := underlying.(*types.Interface); ok { + return types.Implements(varty, interf) || types.Implements(types.NewPointer(varty), interf) + } + return false +} diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/.gitignore b/hack/tools/vendor/github.com/alingse/nilnesserr/.gitignore new file mode 100644 index 0000000000..6f72f89261 --- /dev/null +++ b/hack/tools/vendor/github.com/alingse/nilnesserr/.gitignore @@ -0,0 +1,25 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work +go.work.sum + +# env file +.env diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/.golangci.yaml b/hack/tools/vendor/github.com/alingse/nilnesserr/.golangci.yaml new file mode 100644 index 0000000000..1a2a270a66 --- /dev/null +++ b/hack/tools/vendor/github.com/alingse/nilnesserr/.golangci.yaml @@ -0,0 +1,66 @@ +linters: + enable-all: true + disable: + - wsl + - varnamelen + - nilnil + - ireturn + - gochecknoglobals + - nolintlint + +linters-settings: + depguard: + rules: + main: + list-mode: lax + files: + - $all + allow: + - $gostd + - github.com/alingse/nilnesserr + +issues: + exclude-rules: + - path: internal/typeparams + linters: + - nonamedreturns + - nlreturn + - intrange + - mnd + - forcetypeassert + - exhaustruct + - exhaustive + - err113 + - gofumpt + - prealloc + - funclen + - gocritic + - funlen + - cyclop + - gocognit + + - path: nilness.go + linters: + - nonamedreturns + - nlreturn + - nilnil + - mnd + - forcetypeassert + - gochecknoglobals + - nestif + - funlen + - godox + - gocognit + - gofumpt + - exhaustive + - cyclop + - unparam + - gocyclo + + - text: "analysis." + linters: + - exhaustruct + + - text: "newAnalyzer" + linters: + - unparam diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/LICENSE b/hack/tools/vendor/github.com/alingse/nilnesserr/LICENSE new file mode 100644 index 0000000000..6caf1ea1c6 --- /dev/null +++ b/hack/tools/vendor/github.com/alingse/nilnesserr/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 alingse + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/README.md b/hack/tools/vendor/github.com/alingse/nilnesserr/README.md new file mode 100644 index 0000000000..e01bb40244 --- /dev/null +++ b/hack/tools/vendor/github.com/alingse/nilnesserr/README.md @@ -0,0 +1,74 @@ +# nilnesserr + +nilnesserr = nilness + nilerr + +`nilnesserr` is a linter for report return nil error in go. It combines the features of [nilness](https://cs.opensource.google/go/x/tools/+/refs/tags/v0.28.0:go/analysis/passes/nilness/nilness.go) and [nilerr](https://github.com/gostaticanalysis/nilerr), providing a concise way to detect return an unrelated/nil-values error. + +## Case + +case 1 +```go +err := do() +if err != nil { + return err +} +err2 := do2() +if err2 != nil { + return err // which should return err2 after check `err2 != nil`, but return a nil value error +} +``` + + +## Some Real Bugs + +- https://github.com/alingse/sundrylint/issues/4 +- https://github.com/alingse/nilnesserr/issues/1 + +we use https://github.com/alingse/go-linter-runner to run linter on GitHub Actions for public golang repos + +## Install + +```bash +go install github.com/alingse/nilnesserr/cmd/nilnesserr@latest +``` + + +## TODO + +case 2 + +```go +err := do() +if err != nil { + return err +} +_, ok := do2() +if !ok { + return err +} + +``` + +case 3 + +```go +err := do() +if err != nil { + return err +} +_, ok := do2() +if !ok { + return errors.Wrap(err) +} +``` + +maybe this is also a bug, should return a non-nil value error after the if + +## License + +This project is licensed under the MIT License. See the LICENSE file for details. + +This project incorporates source code from two different libraries: + +1. [nilness](https://cs.opensource.google/go/x/tools/+/refs/tags/v0.28.0:go/analysis/passes/nilness/nilness.go) licensed under the BSD license. +2. [nilerr](https://github.com/gostaticanalysis/nilerr) licensed under the MIT license. diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/coretype.go b/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/coretype.go new file mode 100644 index 0000000000..7a744d123b --- /dev/null +++ b/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/coretype.go @@ -0,0 +1,122 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "go/types" +) + +// CoreType returns the core type of T or nil if T does not have a core type. +// +// See https://go.dev/ref/spec#Core_types for the definition of a core type. +func CoreType(T types.Type) types.Type { + U := T.Underlying() + if _, ok := U.(*types.Interface); !ok { + return U // for non-interface types, + } + + terms, err := NormalTerms(U) + if len(terms) == 0 || err != nil { + // len(terms) -> empty type set of interface. + // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set. + return nil // no core type. + } + + U = terms[0].Type().Underlying() + var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying()) + for identical = 1; identical < len(terms); identical++ { + if !types.Identical(U, terms[identical].Type().Underlying()) { + break + } + } + + if identical == len(terms) { + // https://go.dev/ref/spec#Core_types + // "There is a single type U which is the underlying type of all types in the type set of T" + return U + } + ch, ok := U.(*types.Chan) + if !ok { + return nil // no core type as identical < len(terms) and U is not a channel. + } + // https://go.dev/ref/spec#Core_types + // "the type chan E if T contains only bidirectional channels, or the type chan<- E or + // <-chan E depending on the direction of the directional channels present." + for chans := identical; chans < len(terms); chans++ { + curr, ok := terms[chans].Type().Underlying().(*types.Chan) + if !ok { + return nil + } + if !types.Identical(ch.Elem(), curr.Elem()) { + return nil // channel elements are not identical. + } + if ch.Dir() == types.SendRecv { + // ch is bidirectional. We can safely always use curr's direction. + ch = curr + } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() { + // ch and curr are not bidirectional and not the same direction. + return nil + } + } + return ch +} + +// NormalTerms returns a slice of terms representing the normalized structural +// type restrictions of a type, if any. +// +// For all types other than *types.TypeParam, *types.Interface, and +// *types.Union, this is just a single term with Tilde() == false and +// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see +// below. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration type +// T[P interface{~int; m()}] int the structural restriction of the type +// parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// NormalTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, NormalTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the type is +// invalid, exceeds complexity bounds, or has an empty type set. In the latter +// case, NormalTerms returns ErrEmptyTypeSet. +// +// NormalTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func NormalTerms(typ types.Type) ([]*types.Term, error) { + switch typ := typ.Underlying().(type) { + case *types.TypeParam: + return StructuralTerms(typ) + case *types.Union: + return UnionTermSet(typ) + case *types.Interface: + return InterfaceTermSet(typ) + default: + return []*types.Term{types.NewTerm(false, typ)}, nil + } +} diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/normalize.go b/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/normalize.go new file mode 100644 index 0000000000..0302872f47 --- /dev/null +++ b/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/normalize.go @@ -0,0 +1,200 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "errors" + "fmt" + "go/types" +) + +var ErrEmptyTypeSet = errors.New("empty type set") + +// StructuralTerms returns a slice of terms representing the normalized +// structural type restrictions of a type parameter, if any. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration +// +// type T[P interface{~int; m()}] int +// +// the structural restriction of the type parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// StructuralTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, StructuralTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the +// constraint interface is invalid, exceeds complexity bounds, or has an empty +// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet. +// +// StructuralTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) { + constraint := tparam.Constraint() + if constraint == nil { + return nil, fmt.Errorf("%s has nil constraint", tparam) + } + iface, _ := constraint.Underlying().(*types.Interface) + if iface == nil { + return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying()) + } + return InterfaceTermSet(iface) +} + +// InterfaceTermSet computes the normalized terms for a constraint interface, +// returning an error if the term set cannot be computed or is empty. In the +// latter case, the error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) { + return computeTermSet(iface) +} + +// UnionTermSet computes the normalized terms for a union, returning an error +// if the term set cannot be computed or is empty. In the latter case, the +// error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func UnionTermSet(union *types.Union) ([]*types.Term, error) { + return computeTermSet(union) +} + +func computeTermSet(typ types.Type) ([]*types.Term, error) { + tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) + if err != nil { + return nil, err + } + if tset.terms.isEmpty() { + return nil, ErrEmptyTypeSet + } + if tset.terms.isAll() { + return nil, nil + } + var terms []*types.Term + for _, term := range tset.terms { + terms = append(terms, types.NewTerm(term.tilde, term.typ)) + } + return terms, nil +} + +// A termSet holds the normalized set of terms for a given type. +// +// The name termSet is intentionally distinct from 'type set': a type set is +// all types that implement a type (and includes method restrictions), whereas +// a term set just represents the structural restrictions on a type. +type termSet struct { + complete bool + terms termlist +} + +var ErrNilType = errors.New("nil type") +var ErrUnreachable = errors.New("unreachable") + +func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) { + if t == nil { + return nil, ErrNilType + } + + const maxTermCount = 100 + if tset, ok := seen[t]; ok { + if !tset.complete { + return nil, fmt.Errorf("cycle detected in the declaration of %s", t) + } + return tset, nil + } + + // Mark the current type as seen to avoid infinite recursion. + tset := new(termSet) + defer func() { + tset.complete = true + }() + seen[t] = tset + + switch u := t.Underlying().(type) { + case *types.Interface: + // The term set of an interface is the intersection of the term sets of its + // embedded types. + tset.terms = allTermlist + for i := 0; i < u.NumEmbeddeds(); i++ { + embedded := u.EmbeddedType(i) + if _, ok := embedded.Underlying().(*types.TypeParam); ok { + return nil, fmt.Errorf("invalid embedded type %T", embedded) + } + tset2, err := computeTermSetInternal(embedded, seen, depth+1) + if err != nil { + return nil, err + } + tset.terms = tset.terms.intersect(tset2.terms) + } + case *types.Union: + // The term set of a union is the union of term sets of its terms. + tset.terms = nil + for i := 0; i < u.Len(); i++ { + t := u.Term(i) + var terms termlist + switch t.Type().Underlying().(type) { + case *types.Interface: + tset2, err := computeTermSetInternal(t.Type(), seen, depth+1) + if err != nil { + return nil, err + } + terms = tset2.terms + case *types.TypeParam, *types.Union: + // A stand-alone type parameter or union is not permitted as union + // term. + return nil, fmt.Errorf("invalid union term %T", t) + default: + if t.Type() == types.Typ[types.Invalid] { + continue + } + terms = termlist{{t.Tilde(), t.Type()}} + } + tset.terms = tset.terms.union(terms) + if len(tset.terms) > maxTermCount { + return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) + } + } + case *types.TypeParam: + return nil, ErrUnreachable + default: + // For all other types, the term set is just a single non-tilde term + // holding the type itself. + if u != types.Typ[types.Invalid] { + tset.terms = termlist{{false, t}} + } + } + return tset, nil +} + +// under is a facade for the go/types internal function of the same name. It is +// used by typeterm.go. +func under(t types.Type) types.Type { + return t.Underlying() +} diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/termlist.go b/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/termlist.go new file mode 100644 index 0000000000..cbd12f8013 --- /dev/null +++ b/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/termlist.go @@ -0,0 +1,163 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import ( + "bytes" + "go/types" +) + +// A termlist represents the type set represented by the union +// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn. +// A termlist is in normal form if all terms are disjoint. +// termlist operations don't require the operands to be in +// normal form. +type termlist []*term + +// allTermlist represents the set of all types. +// It is in normal form. +var allTermlist = termlist{new(term)} + +// String prints the termlist exactly (without normalization). +func (xl termlist) String() string { + if len(xl) == 0 { + return "∅" + } + var buf bytes.Buffer + for i, x := range xl { + if i > 0 { + buf.WriteString(" | ") + } + buf.WriteString(x.String()) + } + return buf.String() +} + +// isEmpty reports whether the termlist xl represents the empty set of types. +func (xl termlist) isEmpty() bool { + // If there's a non-nil term, the entire list is not empty. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil { + return false + } + } + return true +} + +// isAll reports whether the termlist xl represents the set of all types. +func (xl termlist) isAll() bool { + // If there's a 𝓤 term, the entire list is 𝓤. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil && x.typ == nil { + return true + } + } + return false +} + +// norm returns the normal form of xl. +func (xl termlist) norm() termlist { + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + used := make([]bool, len(xl)) + var rl termlist + for i, xi := range xl { + if xi == nil || used[i] { + continue + } + for j := i + 1; j < len(xl); j++ { + xj := xl[j] + if xj == nil || used[j] { + continue + } + if u1, u2 := xi.union(xj); u2 == nil { + // If we encounter a 𝓤 term, the entire list is 𝓤. + // Exit early. + // (Note that this is not just an optimization; + // if we continue, we may end up with a 𝓤 term + // and other terms and the result would not be + // in normal form.) + if u1.typ == nil { + return allTermlist + } + xi = u1 + used[j] = true // xj is now unioned into xi - ignore it in future iterations + } + } + rl = append(rl, xi) + } + return rl +} + +// union returns the union xl ∪ yl. +func (xl termlist) union(yl termlist) termlist { + return append(xl, yl...).norm() +} + +// intersect returns the intersection xl ∩ yl. +func (xl termlist) intersect(yl termlist) termlist { + if xl.isEmpty() || yl.isEmpty() { + return nil + } + + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + var rl termlist + for _, x := range xl { + for _, y := range yl { + if r := x.intersect(y); r != nil { + rl = append(rl, r) + } + } + } + return rl.norm() +} + +// equal reports whether xl and yl represent the same type set. +func (xl termlist) equal(yl termlist) bool { + // TODO(gri) this should be more efficient + return xl.subsetOf(yl) && yl.subsetOf(xl) +} + +// includes reports whether t ∈ xl. +func (xl termlist) includes(t types.Type) bool { + for _, x := range xl { + if x.includes(t) { + return true + } + } + return false +} + +// supersetOf reports whether y ⊆ xl. +func (xl termlist) supersetOf(y *term) bool { + for _, x := range xl { + if y.subsetOf(x) { + return true + } + } + return false +} + +// subsetOf reports whether xl ⊆ yl. +func (xl termlist) subsetOf(yl termlist) bool { + if yl.isEmpty() { + return xl.isEmpty() + } + + // each term x of xl must be a subset of yl + for _, x := range xl { + if !yl.supersetOf(x) { + return false // x is not a subset yl + } + } + return true +} diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/typeterm.go b/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/typeterm.go new file mode 100644 index 0000000000..35c66003d6 --- /dev/null +++ b/hack/tools/vendor/github.com/alingse/nilnesserr/internal/typeparams/typeterm.go @@ -0,0 +1,166 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import "go/types" + +// A term describes elementary type sets: +// +// ∅: (*term)(nil) == ∅ // set of no types (empty set) +// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t +type term struct { + tilde bool // valid if typ != nil + typ types.Type +} + +func (x *term) String() string { + switch { + case x == nil: + return "∅" + case x.typ == nil: + return "𝓤" + case x.tilde: + return "~" + x.typ.String() + default: + return x.typ.String() + } +} + +// equal reports whether x and y represent the same type set. +func (x *term) equal(y *term) bool { + // easy cases + switch { + case x == nil || y == nil: + return x == y + case x.typ == nil || y.typ == nil: + return x.typ == y.typ + } + // ∅ ⊂ x, y ⊂ 𝓤 + + return x.tilde == y.tilde && types.Identical(x.typ, y.typ) +} + +// union returns the union x ∪ y: zero, one, or two non-nil terms. +func (x *term) union(y *term) (_, _ *term) { + // easy cases + switch { + case x == nil && y == nil: + return nil, nil // ∅ ∪ ∅ == ∅ + case x == nil: + return y, nil // ∅ ∪ y == y + case y == nil: + return x, nil // x ∪ ∅ == x + case x.typ == nil: + return x, nil // 𝓤 ∪ y == 𝓤 + case y.typ == nil: + return y, nil // x ∪ 𝓤 == 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return x, y // x ∪ y == (x, y) if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∪ ~t == ~t + // ~t ∪ T == ~t + // T ∪ ~t == ~t + // T ∪ T == T + if x.tilde || !y.tilde { + return x, nil + } + return y, nil +} + +// intersect returns the intersection x ∩ y. +func (x *term) intersect(y *term) *term { + // easy cases + switch { + case x == nil || y == nil: + return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅ + case x.typ == nil: + return y // 𝓤 ∩ y == y + case y.typ == nil: + return x // x ∩ 𝓤 == x + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return nil // x ∩ y == ∅ if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∩ ~t == ~t + // ~t ∩ T == T + // T ∩ ~t == T + // T ∩ T == T + if !x.tilde || y.tilde { + return x + } + return y +} + +// includes reports whether t ∈ x. +func (x *term) includes(t types.Type) bool { + // easy cases + switch { + case x == nil: + return false // t ∈ ∅ == false + case x.typ == nil: + return true // t ∈ 𝓤 == true + } + // ∅ ⊂ x ⊂ 𝓤 + + u := t + if x.tilde { + u = under(u) + } + return types.Identical(x.typ, u) +} + +// subsetOf reports whether x ⊆ y. +func (x *term) subsetOf(y *term) bool { + // easy cases + switch { + case x == nil: + return true // ∅ ⊆ y == true + case y == nil: + return false // x ⊆ ∅ == false since x != ∅ + case y.typ == nil: + return true // x ⊆ 𝓤 == true + case x.typ == nil: + return false // 𝓤 ⊆ y == false since y != 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return false // x ⊆ y == false if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ⊆ ~t == true + // ~t ⊆ T == false + // T ⊆ ~t == true + // T ⊆ T == true + return !x.tilde || y.tilde +} + +// disjoint reports whether x ∩ y == ∅. +// x.typ and y.typ must not be nil. +func (x *term) disjoint(y *term) bool { + ux := x.typ + if y.tilde { + ux = under(ux) + } + uy := y.typ + if x.tilde { + uy = under(uy) + } + return !types.Identical(ux, uy) +} diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/linter.go b/hack/tools/vendor/github.com/alingse/nilnesserr/linter.go new file mode 100644 index 0000000000..b622fa694f --- /dev/null +++ b/hack/tools/vendor/github.com/alingse/nilnesserr/linter.go @@ -0,0 +1,48 @@ +package nilnesserr + +import ( + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" +) + +const ( + linterName = "nilnesserr" + linterDoc = `This linter reports that it checks for err != nil, but it returns a different nil value error. +powered by nilness and nilerr.` + + linterMessage = "return a nil value error after check error" +) + +type LinterSetting struct{} + +func NewAnalyzer(setting LinterSetting) (*analysis.Analyzer, error) { + a, err := newAnalyzer(setting) + if err != nil { + return nil, err + } + + return &analysis.Analyzer{ + Name: linterName, + Doc: linterDoc, + Run: a.run, + Requires: []*analysis.Analyzer{ + buildssa.Analyzer, + }, + }, nil +} + +type analyzer struct { + setting LinterSetting +} + +func newAnalyzer(setting LinterSetting) (*analyzer, error) { + a := &analyzer{setting: setting} + + return a, nil +} + +func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) { + _, _ = a.checkNilnesserr(pass) + + return nil, nil +} diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/nilerr.go b/hack/tools/vendor/github.com/alingse/nilnesserr/nilerr.go new file mode 100644 index 0000000000..c05ec90031 --- /dev/null +++ b/hack/tools/vendor/github.com/alingse/nilnesserr/nilerr.go @@ -0,0 +1,83 @@ +// some code was copy from https://github.com/gostaticanalysis/nilerr/blob/master/nilerr.go + +package nilnesserr + +import ( + "go/types" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ssa" +) + +var errType = types.Universe.Lookup("error").Type().Underlying().(*types.Interface) // nolint: forcetypeassert + +func isErrType(res ssa.Value) bool { + return types.Implements(res.Type(), errType) +} + +func isConstNil(res ssa.Value) bool { + v, ok := res.(*ssa.Const) + if ok && v.IsNil() { + return true + } + + return false +} + +func extractCheckedErrorValue(binOp *ssa.BinOp) ssa.Value { + if isErrType(binOp.X) && isConstNil(binOp.Y) { + return binOp.X + } + if isErrType(binOp.Y) && isConstNil(binOp.X) { + return binOp.Y + } + + return nil +} + +type errFact fact + +func findLastNonnilValue(errors []errFact, res ssa.Value) ssa.Value { + if len(errors) == 0 { + return nil + } + + for j := len(errors) - 1; j >= 0; j-- { + last := errors[j] + if last.value == res { + return nil + } else if last.nilness == isnonnil { + return last.value + } + } + + return nil +} + +func checkNilnesserr(pass *analysis.Pass, b *ssa.BasicBlock, errors []errFact, isNilnees func(value ssa.Value) bool) { + for i := range b.Instrs { + instr, ok := b.Instrs[i].(*ssa.Return) + if !ok { + continue + } + + for _, res := range instr.Results { + if !isErrType(res) || isConstNil(res) || !isNilnees(res) { + continue + } + // check the lastValue error that is isnonnil + lastValue := findLastNonnilValue(errors, res) + if lastValue == nil { + continue + } + // report + pos := instr.Pos() + if pos.IsValid() { + pass.Report(analysis.Diagnostic{ + Pos: pos, + Message: linterMessage, + }) + } + } + } +} diff --git a/hack/tools/vendor/github.com/alingse/nilnesserr/nilness.go b/hack/tools/vendor/github.com/alingse/nilnesserr/nilness.go new file mode 100644 index 0000000000..6c99041e90 --- /dev/null +++ b/hack/tools/vendor/github.com/alingse/nilnesserr/nilness.go @@ -0,0 +1,374 @@ +// This file was copy from https://cs.opensource.google/go/x/tools/+/master:go/analysis/passes/nilness/nilness.go +// I modified some to check the error retrun + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nilnesserr + +import ( + "go/token" + "go/types" + + "github.com/alingse/nilnesserr/internal/typeparams" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/buildssa" + "golang.org/x/tools/go/ssa" +) + +func (a *analyzer) checkNilnesserr(pass *analysis.Pass) (interface{}, error) { + ssainput := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) + for _, fn := range ssainput.SrcFuncs { + runFunc(pass, fn) + } + return nil, nil +} + +func runFunc(pass *analysis.Pass, fn *ssa.Function) { + // visit visits reachable blocks of the CFG in dominance order, + // maintaining a stack of dominating nilness facts. + // + // By traversing the dom tree, we can pop facts off the stack as + // soon as we've visited a subtree. Had we traversed the CFG, + // we would need to retain the set of facts for each block. + seen := make([]bool, len(fn.Blocks)) // seen[i] means visit should ignore block i + + var visit func(b *ssa.BasicBlock, stack []fact, errors []errFact) + + visit = func(b *ssa.BasicBlock, stack []fact, errors []errFact) { + if seen[b.Index] { + return + } + seen[b.Index] = true + + // check this block return a nil value error + checkNilnesserr( + pass, b, + errors, + func(v ssa.Value) bool { + return nilnessOf(stack, v) == isnil + }) + + // For nil comparison blocks, report an error if the condition + // is degenerate, and push a nilness fact on the stack when + // visiting its true and false successor blocks. + if binop, tsucc, fsucc := eq(b); binop != nil { + // extract the err != nil or err == nil + errValue := extractCheckedErrorValue(binop) + + xnil := nilnessOf(stack, binop.X) + ynil := nilnessOf(stack, binop.Y) + + if ynil != unknown && xnil != unknown && (xnil == isnil || ynil == isnil) { + // Degenerate condition: + // the nilness of both operands is known, + // and at least one of them is nil. + + // If tsucc's or fsucc's sole incoming edge is impossible, + // it is unreachable. Prune traversal of it and + // all the blocks it dominates. + // (We could be more precise with full dataflow + // analysis of control-flow joins.) + var skip *ssa.BasicBlock + if xnil == ynil { + skip = fsucc + } else { + skip = tsucc + } + for _, d := range b.Dominees() { + if d == skip && len(d.Preds) == 1 { + continue + } + + visit(d, stack, errors) + } + + return + } + + // "if x == nil" or "if nil == y" condition; x, y are unknown. + if xnil == isnil || ynil == isnil { + var newFacts facts + if xnil == isnil { + // x is nil, y is unknown: + // t successor learns y is nil. + newFacts = expandFacts(fact{binop.Y, isnil}) + } else { + // y is nil, x is unknown: + // t successor learns x is nil. + newFacts = expandFacts(fact{binop.X, isnil}) + } + + for _, d := range b.Dominees() { + // Successor blocks learn a fact + // only at non-critical edges. + // (We could do be more precise with full dataflow + // analysis of control-flow joins.) + s := stack + errs := errors + if len(d.Preds) == 1 { + if d == tsucc { + s = append(s, newFacts...) + // add nil error + if errValue != nil { + errs = append(errs, errFact{value: errValue, nilness: isnil}) + } + } else if d == fsucc { + s = append(s, newFacts.negate()...) + // add non-nil error + if errValue != nil { + errs = append(errs, errFact{value: errValue, nilness: isnonnil}) + } + } + } + + visit(d, s, errs) + } + return + } + } + + // In code of the form: + // + // if ptr, ok := x.(*T); ok { ... } else { fsucc } + // + // the fsucc block learns that ptr == nil, + // since that's its zero value. + if If, ok := b.Instrs[len(b.Instrs)-1].(*ssa.If); ok { + // Handle "if ok" and "if !ok" variants. + cond, fsucc := If.Cond, b.Succs[1] + if unop, ok := cond.(*ssa.UnOp); ok && unop.Op == token.NOT { + cond, fsucc = unop.X, b.Succs[0] + } + + // Match pattern: + // t0 = typeassert (pointerlike) + // t1 = extract t0 #0 // ptr + // t2 = extract t0 #1 // ok + // if t2 goto tsucc, fsucc + if extract1, ok := cond.(*ssa.Extract); ok && extract1.Index == 1 { + if assert, ok := extract1.Tuple.(*ssa.TypeAssert); ok && + isNillable(assert.AssertedType) { + for _, pinstr := range *assert.Referrers() { + if extract0, ok := pinstr.(*ssa.Extract); ok && + extract0.Index == 0 && + extract0.Tuple == extract1.Tuple { + for _, d := range b.Dominees() { + if len(d.Preds) == 1 && d == fsucc { + visit(d, append(stack, fact{extract0, isnil}), errors) + } + } + } + } + } + } + } + + for _, d := range b.Dominees() { + visit(d, stack, errors) + } + } + + // Visit the entry block. No need to visit fn.Recover. + if fn.Blocks != nil { + visit(fn.Blocks[0], make([]fact, 0, 20), nil) // 20 is plenty + } +} + +// A fact records that a block is dominated +// by the condition v == nil or v != nil. +type fact struct { + value ssa.Value + nilness nilness +} + +func (f fact) negate() fact { return fact{f.value, -f.nilness} } + +type nilness int + +const ( + isnonnil = -1 + unknown nilness = 0 + isnil = 1 +) + +var nilnessStrings = []string{"non-nil", "unknown", "nil"} + +func (n nilness) String() string { return nilnessStrings[n+1] } + +// nilnessOf reports whether v is definitely nil, definitely not nil, +// or unknown given the dominating stack of facts. +func nilnessOf(stack []fact, v ssa.Value) nilness { + switch v := v.(type) { + // unwrap ChangeInterface and Slice values recursively, to detect if underlying + // values have any facts recorded or are otherwise known with regard to nilness. + // + // This work must be in addition to expanding facts about + // ChangeInterfaces during inference/fact gathering because this covers + // cases where the nilness of a value is intrinsic, rather than based + // on inferred facts, such as a zero value interface variable. That + // said, this work alone would only inform us when facts are about + // underlying values, rather than outer values, when the analysis is + // transitive in both directions. + case *ssa.ChangeInterface: + if underlying := nilnessOf(stack, v.X); underlying != unknown { + return underlying + } + case *ssa.MakeInterface: + // A MakeInterface is non-nil unless its operand is a type parameter. + tparam, ok := types.Unalias(v.X.Type()).(*types.TypeParam) + if !ok { + return isnonnil + } + + // A MakeInterface of a type parameter is non-nil if + // the type parameter cannot be instantiated as an + // interface type (#66835). + if terms, err := typeparams.NormalTerms(tparam.Constraint()); err == nil && len(terms) > 0 { + return isnonnil + } + + // If the type parameter can be instantiated as an + // interface (and thus also as a concrete type), + // we can't determine the nilness. + + case *ssa.Slice: + if underlying := nilnessOf(stack, v.X); underlying != unknown { + return underlying + } + case *ssa.SliceToArrayPointer: + nn := nilnessOf(stack, v.X) + if slice2ArrayPtrLen(v) > 0 { + if nn == isnil { + // We know that *(*[1]byte)(nil) is going to panic because of the + // conversion. So return unknown to the caller, prevent useless + // nil deference reporting due to * operator. + return unknown + } + // Otherwise, the conversion will yield a non-nil pointer to array. + // Note that the instruction can still panic if array length greater + // than slice length. If the value is used by another instruction, + // that instruction can assume the panic did not happen when that + // instruction is reached. + return isnonnil + } + // In case array length is zero, the conversion result depends on nilness of the slice. + if nn != unknown { + return nn + } + } + + // Is value intrinsically nil or non-nil? + switch v := v.(type) { + case *ssa.Alloc, + *ssa.FieldAddr, + *ssa.FreeVar, + *ssa.Function, + *ssa.Global, + *ssa.IndexAddr, + *ssa.MakeChan, + *ssa.MakeClosure, + *ssa.MakeMap, + *ssa.MakeSlice: + return isnonnil + + case *ssa.Const: + if v.IsNil() { + return isnil // nil or zero value of a pointer-like type + } else { + return unknown // non-pointer + } + } + + // Search dominating control-flow facts. + for _, f := range stack { + if f.value == v { + return f.nilness + } + } + return unknown +} + +func slice2ArrayPtrLen(v *ssa.SliceToArrayPointer) int64 { + return v.Type().(*types.Pointer).Elem().Underlying().(*types.Array).Len() +} + +// If b ends with an equality comparison, eq returns the operation and +// its true (equal) and false (not equal) successors. +func eq(b *ssa.BasicBlock) (op *ssa.BinOp, tsucc, fsucc *ssa.BasicBlock) { + if If, ok := b.Instrs[len(b.Instrs)-1].(*ssa.If); ok { + if binop, ok := If.Cond.(*ssa.BinOp); ok { + switch binop.Op { + case token.EQL: + return binop, b.Succs[0], b.Succs[1] + case token.NEQ: + return binop, b.Succs[1], b.Succs[0] + } + } + } + return nil, nil, nil +} + +// expandFacts takes a single fact and returns the set of facts that can be +// known about it or any of its related values. Some operations, like +// ChangeInterface, have transitive nilness, such that if you know the +// underlying value is nil, you also know the value itself is nil, and vice +// versa. This operation allows callers to match on any of the related values +// in analyses, rather than just the one form of the value that happened to +// appear in a comparison. +// +// This work must be in addition to unwrapping values within nilnessOf because +// while this work helps give facts about transitively known values based on +// inferred facts, the recursive check within nilnessOf covers cases where +// nilness facts are intrinsic to the underlying value, such as a zero value +// interface variables. +// +// ChangeInterface is the only expansion currently supported, but others, like +// Slice, could be added. At this time, this tool does not check slice +// operations in a way this expansion could help. See +// https://play.golang.org/p/mGqXEp7w4fR for an example. +func expandFacts(f fact) []fact { + ff := []fact{f} + +Loop: + for { + switch v := f.value.(type) { + case *ssa.ChangeInterface: + f = fact{v.X, f.nilness} + ff = append(ff, f) + default: + break Loop + } + } + + return ff +} + +type facts []fact + +func (ff facts) negate() facts { + nn := make([]fact, len(ff)) + for i, f := range ff { + nn[i] = f.negate() + } + return nn +} + +func isNillable(t types.Type) bool { + // TODO(adonovan): CoreType (+ case *Interface) looks wrong. + // This should probably use Underlying, and handle TypeParam + // by computing the union across its normal terms. + switch t := typeparams.CoreType(t).(type) { + case *types.Pointer, + *types.Map, + *types.Signature, + *types.Chan, + *types.Interface, + *types.Slice: + return true + case *types.Basic: + return t == types.Typ[types.UnsafePointer] + } + return false +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/.gitignore b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/.gitignore new file mode 100644 index 0000000000..38ea34ff51 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/.gitignore @@ -0,0 +1,18 @@ +### Go template + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + + +# Go workspace file +go.work + +# No Goland stuff in this repo +.idea diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/LICENSE b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/LICENSE new file mode 100644 index 0000000000..a22292eb5a --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012-2023 The ANTLR Project. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +3. Neither name of copyright holders nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/README.md b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/README.md new file mode 100644 index 0000000000..03e5b83eb1 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/README.md @@ -0,0 +1,54 @@ +[![Go Report Card](https://goreportcard.com/badge/github.com/antlr4-go/antlr?style=flat-square)](https://goreportcard.com/report/github.com/antlr4-go/antlr) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/github.com/antlr4-go/antlr)](https://pkg.go.dev/github.com/antlr4-go/antlr) +[![Release](https://img.shields.io/github/v/release/antlr4-go/antlr?sort=semver&style=flat-square)](https://github.com/antlr4-go/antlr/releases/latest) +[![Release](https://img.shields.io/github/go-mod/go-version/antlr4-go/antlr?style=flat-square)](https://github.com/antlr4-go/antlr/releases/latest) +[![Maintenance](https://img.shields.io/badge/Maintained%3F-yes-green.svg?style=flat-square)](https://github.com/antlr4-go/antlr/commit-activity) +[![License](https://img.shields.io/badge/License-BSD_3--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause) +[![GitHub stars](https://img.shields.io/github/stars/antlr4-go/antlr?style=flat-square&label=Star&maxAge=2592000)](https://GitHub.com/Naereen/StrapDown.js/stargazers/) +# ANTLR4 Go Runtime Module Repo + +IMPORTANT: Please submit PRs via a clone of the https://github.com/antlr/antlr4 repo, and not here. + + - Do not submit PRs or any change requests to this repo + - This repo is read only and is updated by the ANTLR team to create a new release of the Go Runtime for ANTLR + - This repo contains the Go runtime that your generated projects should import + +## Introduction + +This repo contains the official modules for the Go Runtime for ANTLR. It is a copy of the runtime maintained +at: https://github.com/antlr/antlr4/tree/master/runtime/Go/antlr and is automatically updated by the ANTLR team to create +the official Go runtime release only. No development work is carried out in this repo and PRs are not accepted here. + +The dev branch of this repo is kept in sync with the dev branch of the main ANTLR repo and is updated periodically. + +### Why? + +The `go get` command is unable to retrieve the Go runtime when it is embedded so +deeply in the main repo. A `go get` against the `antlr/antlr4` repo, while retrieving the correct source code for the runtime, +does not correctly resolve tags and will create a reference in your `go.mod` file that is unclear, will not upgrade smoothly and +causes confusion. + +For instance, the current Go runtime release, which is tagged with v4.13.0 in `antlr/antlr4` is retrieved by go get as: + +```sh +require ( + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230219212500-1f9a474cc2dc +) +``` + +Where you would expect to see: + +```sh +require ( + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.13.0 +) +``` + +The decision was taken to create a separate org in a separate repo to hold the official Go runtime for ANTLR and +from whence users can expect `go get` to behave as expected. + + +# Documentation +Please read the official documentation at: https://github.com/antlr/antlr4/blob/master/doc/index.md for tips on +migrating existing projects to use the new module location and for information on how to use the Go runtime in +general. diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go new file mode 100644 index 0000000000..3bb4fd7c4e --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go @@ -0,0 +1,102 @@ +/* +Package antlr implements the Go version of the ANTLR 4 runtime. + +# The ANTLR Tool + +ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, +or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. +From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface +(or visitor) that makes it easy to respond to the recognition of phrases of interest. + +# Go Runtime + +At version 4.11.x and prior, the Go runtime was not properly versioned for go modules. After this point, the runtime +source code to be imported was held in the `runtime/Go/antlr/v4` directory, and the go.mod file was updated to reflect the version of +ANTLR4 that it is compatible with (I.E. uses the /v4 path). + +However, this was found to be problematic, as it meant that with the runtime embedded so far underneath the root +of the repo, the `go get` and related commands could not properly resolve the location of the go runtime source code. +This meant that the reference to the runtime in your `go.mod` file would refer to the correct source code, but would not +list the release tag such as @4.12.0 - this was confusing, to say the least. + +As of 4.12.1, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr` +(the go get command should also be used with this path). See the main documentation for the ANTLR4 project for more information, +which is available at [ANTLR docs]. The documentation for using the Go runtime is available at [Go runtime docs]. + +This means that if you are using the source code without modules, you should also use the source code in the [new repo]. +Though we highly recommend that you use go modules, as they are now idiomatic for Go. + +I am aware that this change will prove Hyrum's Law, but am prepared to live with it for the common good. + +Go runtime author: [Jim Idle] jimi@idle.ws + +# Code Generation + +ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a +runtime library, written specifically to support the generated code in the target language. This library is the +runtime for the Go target. + +To generate code for the go target, it is generally recommended to place the source grammar files in a package of +their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory +it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean +that the antlr tool JAR file will be checked in to your source code control though, so you are, of course, free to use any other +way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in +your IDE, or configuration in your CI system. Checking in the jar does mean that it is easy to reproduce the build as +it was at any point in its history. + +Here is a general/recommended template for an ANTLR based recognizer in Go: + + . + ├── parser + │ ├── mygrammar.g4 + │ ├── antlr-4.12.1-complete.jar + │ ├── generate.go + │ └── generate.sh + ├── parsing - generated code goes here + │ └── error_listeners.go + ├── go.mod + ├── go.sum + ├── main.go + └── main_test.go + +Make sure that the package statement in your grammar file(s) reflects the go package the generated code will exist in. + +The generate.go file then looks like this: + + package parser + + //go:generate ./generate.sh + +And the generate.sh file will look similar to this: + + #!/bin/sh + + alias antlr4='java -Xmx500M -cp "./antlr4-4.12.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool' + antlr4 -Dlanguage=Go -no-visitor -package parsing *.g4 + +depending on whether you want visitors or listeners or any other ANTLR options. Not that another option here +is to generate the code into a + +From the command line at the root of your source package (location of go.mo)d) you can then simply issue the command: + + go generate ./... + +Which will generate the code for the parser, and place it in the parsing package. You can then use the generated code +by importing the parsing package. + +There are no hard and fast rules on this. It is just a recommendation. You can generate the code in any way and to anywhere you like. + +# Copyright Notice + +Copyright (c) 2012-2023 The ANTLR Project. All rights reserved. + +Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root. + +[target languages]: https://github.com/antlr/antlr4/tree/master/runtime +[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt +[ANTLR docs]: https://github.com/antlr/antlr4/blob/master/doc/index.md +[new repo]: https://github.com/antlr4-go/antlr +[Jim Idle]: https://github.com/jimidle +[Go runtime docs]: https://github.com/antlr/antlr4/blob/master/doc/go-target.md +*/ +package antlr diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn.go new file mode 100644 index 0000000000..cdeefed247 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn.go @@ -0,0 +1,179 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "sync" + +// ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or +// which is invalid for a particular struct such as [*antlr.BaseRuleContext] +var ATNInvalidAltNumber int + +// ATN represents an “[Augmented Transition Network]”, though general in ANTLR the term +// “Augmented Recursive Transition Network” though there are some descriptions of “[Recursive Transition Network]” +// in existence. +// +// ATNs represent the main networks in the system and are serialized by the code generator and support [ALL(*)]. +// +// [Augmented Transition Network]: https://en.wikipedia.org/wiki/Augmented_transition_network +// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf +// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network +type ATN struct { + + // DecisionToState is the decision points for all rules, sub-rules, optional + // blocks, ()+, ()*, etc. Each sub-rule/rule is a decision point, and we must track them, so we + // can go back later and build DFA predictors for them. This includes + // all the rules, sub-rules, optional blocks, ()+, ()* etc... + DecisionToState []DecisionState + + // grammarType is the ATN type and is used for deserializing ATNs from strings. + grammarType int + + // lexerActions is referenced by action transitions in the ATN for lexer ATNs. + lexerActions []LexerAction + + // maxTokenType is the maximum value for any symbol recognized by a transition in the ATN. + maxTokenType int + + modeNameToStartState map[string]*TokensStartState + + modeToStartState []*TokensStartState + + // ruleToStartState maps from rule index to starting state number. + ruleToStartState []*RuleStartState + + // ruleToStopState maps from rule index to stop state number. + ruleToStopState []*RuleStopState + + // ruleToTokenType maps the rule index to the resulting token type for lexer + // ATNs. For parser ATNs, it maps the rule index to the generated bypass token + // type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was + // specified, and otherwise is nil. + ruleToTokenType []int + + // ATNStates is a list of all states in the ATN, ordered by state number. + // + states []ATNState + + mu sync.Mutex + stateMu sync.RWMutex + edgeMu sync.RWMutex +} + +// NewATN returns a new ATN struct representing the given grammarType and is used +// for runtime deserialization of ATNs from the code generated by the ANTLR tool +func NewATN(grammarType int, maxTokenType int) *ATN { + return &ATN{ + grammarType: grammarType, + maxTokenType: maxTokenType, + modeNameToStartState: make(map[string]*TokensStartState), + } +} + +// NextTokensInContext computes and returns the set of valid tokens that can occur starting +// in state s. If ctx is nil, the set of tokens will not include what can follow +// the rule surrounding s. In other words, the set will be restricted to tokens +// reachable staying within the rule of s. +func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet { + return NewLL1Analyzer(a).Look(s, nil, ctx) +} + +// NextTokensNoContext computes and returns the set of valid tokens that can occur starting +// in state s and staying in same rule. [antlr.Token.EPSILON] is in set if we reach end of +// rule. +func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet { + a.mu.Lock() + defer a.mu.Unlock() + iset := s.GetNextTokenWithinRule() + if iset == nil { + iset = a.NextTokensInContext(s, nil) + iset.readOnly = true + s.SetNextTokenWithinRule(iset) + } + return iset +} + +// NextTokens computes and returns the set of valid tokens starting in state s, by +// calling either [NextTokensNoContext] (ctx == nil) or [NextTokensInContext] (ctx != nil). +func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet { + if ctx == nil { + return a.NextTokensNoContext(s) + } + + return a.NextTokensInContext(s, ctx) +} + +func (a *ATN) addState(state ATNState) { + if state != nil { + state.SetATN(a) + state.SetStateNumber(len(a.states)) + } + + a.states = append(a.states, state) +} + +func (a *ATN) removeState(state ATNState) { + a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice +} + +func (a *ATN) defineDecisionState(s DecisionState) int { + a.DecisionToState = append(a.DecisionToState, s) + s.setDecision(len(a.DecisionToState) - 1) + + return s.getDecision() +} + +func (a *ATN) getDecisionState(decision int) DecisionState { + if len(a.DecisionToState) == 0 { + return nil + } + + return a.DecisionToState[decision] +} + +// getExpectedTokens computes the set of input symbols which could follow ATN +// state number stateNumber in the specified full parse context ctx and returns +// the set of potentially valid input symbols which could follow the specified +// state in the specified context. This method considers the complete parser +// context, but does not evaluate semantic predicates (i.e. all predicates +// encountered during the calculation are assumed true). If a path in the ATN +// exists from the starting state to the RuleStopState of the outermost context +// without Matching any symbols, Token.EOF is added to the returned set. +// +// A nil ctx defaults to ParserRuleContext.EMPTY. +// +// It panics if the ATN does not contain state stateNumber. +func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet { + if stateNumber < 0 || stateNumber >= len(a.states) { + panic("Invalid state number.") + } + + s := a.states[stateNumber] + following := a.NextTokens(s, nil) + + if !following.contains(TokenEpsilon) { + return following + } + + expected := NewIntervalSet() + + expected.addSet(following) + expected.removeOne(TokenEpsilon) + + for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { + invokingState := a.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + + following = a.NextTokens(rt.(*RuleTransition).followState, nil) + expected.addSet(following) + expected.removeOne(TokenEpsilon) + ctx = ctx.GetParent().(RuleContext) + } + + if following.contains(TokenEpsilon) { + expected.addOne(TokenEOF) + } + + return expected +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_config.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_config.go new file mode 100644 index 0000000000..a83f25d349 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_config.go @@ -0,0 +1,335 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +const ( + lexerConfig = iota // Indicates that this ATNConfig is for a lexer + parserConfig // Indicates that this ATNConfig is for a parser +) + +// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic +// context). The syntactic context is a graph-structured stack node whose +// path(s) to the root is the rule invocation(s) chain used to arrive in the +// state. The semantic context is the tree of semantic predicates encountered +// before reaching an ATN state. +type ATNConfig struct { + precedenceFilterSuppressed bool + state ATNState + alt int + context *PredictionContext + semanticContext SemanticContext + reachesIntoOuterContext int + cType int // lexerConfig or parserConfig + lexerActionExecutor *LexerActionExecutor + passedThroughNonGreedyDecision bool +} + +// NewATNConfig6 creates a new ATNConfig instance given a state, alt and context only +func NewATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig { + return NewATNConfig5(state, alt, context, SemanticContextNone) +} + +// NewATNConfig5 creates a new ATNConfig instance given a state, alt, context and semantic context +func NewATNConfig5(state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) *ATNConfig { + if semanticContext == nil { + panic("semanticContext cannot be nil") // TODO: Necessary? + } + + pac := &ATNConfig{} + pac.state = state + pac.alt = alt + pac.context = context + pac.semanticContext = semanticContext + pac.cType = parserConfig + return pac +} + +// NewATNConfig4 creates a new ATNConfig instance given an existing config, and a state only +func NewATNConfig4(c *ATNConfig, state ATNState) *ATNConfig { + return NewATNConfig(c, state, c.GetContext(), c.GetSemanticContext()) +} + +// NewATNConfig3 creates a new ATNConfig instance given an existing config, a state and a semantic context +func NewATNConfig3(c *ATNConfig, state ATNState, semanticContext SemanticContext) *ATNConfig { + return NewATNConfig(c, state, c.GetContext(), semanticContext) +} + +// NewATNConfig2 creates a new ATNConfig instance given an existing config, and a context only +func NewATNConfig2(c *ATNConfig, semanticContext SemanticContext) *ATNConfig { + return NewATNConfig(c, c.GetState(), c.GetContext(), semanticContext) +} + +// NewATNConfig1 creates a new ATNConfig instance given an existing config, a state, and a context only +func NewATNConfig1(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig { + return NewATNConfig(c, state, context, c.GetSemanticContext()) +} + +// NewATNConfig creates a new ATNConfig instance given an existing config, a state, a context and a semantic context, other 'constructors' +// are just wrappers around this one. +func NewATNConfig(c *ATNConfig, state ATNState, context *PredictionContext, semanticContext SemanticContext) *ATNConfig { + if semanticContext == nil { + panic("semanticContext cannot be nil") // TODO: Remove this - probably put here for some bug that is now fixed + } + b := &ATNConfig{} + b.InitATNConfig(c, state, c.GetAlt(), context, semanticContext) + b.cType = parserConfig + return b +} + +func (a *ATNConfig) InitATNConfig(c *ATNConfig, state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) { + + a.state = state + a.alt = alt + a.context = context + a.semanticContext = semanticContext + a.reachesIntoOuterContext = c.GetReachesIntoOuterContext() + a.precedenceFilterSuppressed = c.getPrecedenceFilterSuppressed() +} + +func (a *ATNConfig) getPrecedenceFilterSuppressed() bool { + return a.precedenceFilterSuppressed +} + +func (a *ATNConfig) setPrecedenceFilterSuppressed(v bool) { + a.precedenceFilterSuppressed = v +} + +// GetState returns the ATN state associated with this configuration +func (a *ATNConfig) GetState() ATNState { + return a.state +} + +// GetAlt returns the alternative associated with this configuration +func (a *ATNConfig) GetAlt() int { + return a.alt +} + +// SetContext sets the rule invocation stack associated with this configuration +func (a *ATNConfig) SetContext(v *PredictionContext) { + a.context = v +} + +// GetContext returns the rule invocation stack associated with this configuration +func (a *ATNConfig) GetContext() *PredictionContext { + return a.context +} + +// GetSemanticContext returns the semantic context associated with this configuration +func (a *ATNConfig) GetSemanticContext() SemanticContext { + return a.semanticContext +} + +// GetReachesIntoOuterContext returns the count of references to an outer context from this configuration +func (a *ATNConfig) GetReachesIntoOuterContext() int { + return a.reachesIntoOuterContext +} + +// SetReachesIntoOuterContext sets the count of references to an outer context from this configuration +func (a *ATNConfig) SetReachesIntoOuterContext(v int) { + a.reachesIntoOuterContext = v +} + +// Equals is the default comparison function for an ATNConfig when no specialist implementation is required +// for a collection. +// +// An ATN configuration is equal to another if both have the same state, they +// predict the same alternative, and syntactic/semantic contexts are the same. +func (a *ATNConfig) Equals(o Collectable[*ATNConfig]) bool { + switch a.cType { + case lexerConfig: + return a.LEquals(o) + case parserConfig: + return a.PEquals(o) + default: + panic("Invalid ATNConfig type") + } +} + +// PEquals is the default comparison function for a Parser ATNConfig when no specialist implementation is required +// for a collection. +// +// An ATN configuration is equal to another if both have the same state, they +// predict the same alternative, and syntactic/semantic contexts are the same. +func (a *ATNConfig) PEquals(o Collectable[*ATNConfig]) bool { + var other, ok = o.(*ATNConfig) + + if !ok { + return false + } + if a == other { + return true + } else if other == nil { + return false + } + + var equal bool + + if a.context == nil { + equal = other.context == nil + } else { + equal = a.context.Equals(other.context) + } + + var ( + nums = a.state.GetStateNumber() == other.state.GetStateNumber() + alts = a.alt == other.alt + cons = a.semanticContext.Equals(other.semanticContext) + sups = a.precedenceFilterSuppressed == other.precedenceFilterSuppressed + ) + + return nums && alts && cons && sups && equal +} + +// Hash is the default hash function for a parser ATNConfig, when no specialist hash function +// is required for a collection +func (a *ATNConfig) Hash() int { + switch a.cType { + case lexerConfig: + return a.LHash() + case parserConfig: + return a.PHash() + default: + panic("Invalid ATNConfig type") + } +} + +// PHash is the default hash function for a parser ATNConfig, when no specialist hash function +// is required for a collection +func (a *ATNConfig) PHash() int { + var c int + if a.context != nil { + c = a.context.Hash() + } + + h := murmurInit(7) + h = murmurUpdate(h, a.state.GetStateNumber()) + h = murmurUpdate(h, a.alt) + h = murmurUpdate(h, c) + h = murmurUpdate(h, a.semanticContext.Hash()) + return murmurFinish(h, 4) +} + +// String returns a string representation of the ATNConfig, usually used for debugging purposes +func (a *ATNConfig) String() string { + var s1, s2, s3 string + + if a.context != nil { + s1 = ",[" + fmt.Sprint(a.context) + "]" + } + + if a.semanticContext != SemanticContextNone { + s2 = "," + fmt.Sprint(a.semanticContext) + } + + if a.reachesIntoOuterContext > 0 { + s3 = ",up=" + fmt.Sprint(a.reachesIntoOuterContext) + } + + return fmt.Sprintf("(%v,%v%v%v%v)", a.state, a.alt, s1, s2, s3) +} + +func NewLexerATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig { + lac := &ATNConfig{} + lac.state = state + lac.alt = alt + lac.context = context + lac.semanticContext = SemanticContextNone + lac.cType = lexerConfig + return lac +} + +func NewLexerATNConfig4(c *ATNConfig, state ATNState) *ATNConfig { + lac := &ATNConfig{} + lac.lexerActionExecutor = c.lexerActionExecutor + lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state) + lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext()) + lac.cType = lexerConfig + return lac +} + +func NewLexerATNConfig3(c *ATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *ATNConfig { + lac := &ATNConfig{} + lac.lexerActionExecutor = lexerActionExecutor + lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state) + lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext()) + lac.cType = lexerConfig + return lac +} + +func NewLexerATNConfig2(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig { + lac := &ATNConfig{} + lac.lexerActionExecutor = c.lexerActionExecutor + lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state) + lac.InitATNConfig(c, state, c.GetAlt(), context, c.GetSemanticContext()) + lac.cType = lexerConfig + return lac +} + +//goland:noinspection GoUnusedExportedFunction +func NewLexerATNConfig1(state ATNState, alt int, context *PredictionContext) *ATNConfig { + lac := &ATNConfig{} + lac.state = state + lac.alt = alt + lac.context = context + lac.semanticContext = SemanticContextNone + lac.cType = lexerConfig + return lac +} + +// LHash is the default hash function for Lexer ATNConfig objects, it can be used directly or via +// the default comparator [ObjEqComparator]. +func (a *ATNConfig) LHash() int { + var f int + if a.passedThroughNonGreedyDecision { + f = 1 + } else { + f = 0 + } + h := murmurInit(7) + h = murmurUpdate(h, a.state.GetStateNumber()) + h = murmurUpdate(h, a.alt) + h = murmurUpdate(h, a.context.Hash()) + h = murmurUpdate(h, a.semanticContext.Hash()) + h = murmurUpdate(h, f) + h = murmurUpdate(h, a.lexerActionExecutor.Hash()) + h = murmurFinish(h, 6) + return h +} + +// LEquals is the default comparison function for Lexer ATNConfig objects, it can be used directly or via +// the default comparator [ObjEqComparator]. +func (a *ATNConfig) LEquals(other Collectable[*ATNConfig]) bool { + var otherT, ok = other.(*ATNConfig) + if !ok { + return false + } else if a == otherT { + return true + } else if a.passedThroughNonGreedyDecision != otherT.passedThroughNonGreedyDecision { + return false + } + + switch { + case a.lexerActionExecutor == nil && otherT.lexerActionExecutor == nil: + return true + case a.lexerActionExecutor != nil && otherT.lexerActionExecutor != nil: + if !a.lexerActionExecutor.Equals(otherT.lexerActionExecutor) { + return false + } + default: + return false // One but not both, are nil + } + + return a.PEquals(otherT) +} + +func checkNonGreedyDecision(source *ATNConfig, target ATNState) bool { + var ds, ok = target.(DecisionState) + + return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy()) +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go new file mode 100644 index 0000000000..52dbaf8064 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go @@ -0,0 +1,301 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +// ATNConfigSet is a specialized set of ATNConfig that tracks information +// about its elements and can combine similar configurations using a +// graph-structured stack. +type ATNConfigSet struct { + cachedHash int + + // configLookup is used to determine whether two ATNConfigSets are equal. We + // need all configurations with the same (s, i, _, semctx) to be equal. A key + // effectively doubles the number of objects associated with ATNConfigs. All + // keys are hashed by (s, i, _, pi), not including the context. Wiped out when + // read-only because a set becomes a DFA state. + configLookup *JStore[*ATNConfig, Comparator[*ATNConfig]] + + // configs is the added elements that did not match an existing key in configLookup + configs []*ATNConfig + + // TODO: These fields make me pretty uncomfortable, but it is nice to pack up + // info together because it saves re-computation. Can we track conflicts as they + // are added to save scanning configs later? + conflictingAlts *BitSet + + // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates + // we hit a pred while computing a closure operation. Do not make a DFA state + // from the ATNConfigSet in this case. TODO: How is this used by parsers? + dipsIntoOuterContext bool + + // fullCtx is whether it is part of a full context LL prediction. Used to + // determine how to merge $. It is a wildcard with SLL, but not for an LL + // context merge. + fullCtx bool + + // Used in parser and lexer. In lexer, it indicates we hit a pred + // while computing a closure operation. Don't make a DFA state from this set. + hasSemanticContext bool + + // readOnly is whether it is read-only. Do not + // allow any code to manipulate the set if true because DFA states will point at + // sets and those must not change. It not, protect other fields; conflictingAlts + // in particular, which is assigned after readOnly. + readOnly bool + + // TODO: These fields make me pretty uncomfortable, but it is nice to pack up + // info together because it saves re-computation. Can we track conflicts as they + // are added to save scanning configs later? + uniqueAlt int +} + +// Alts returns the combined set of alts for all the configurations in this set. +func (b *ATNConfigSet) Alts() *BitSet { + alts := NewBitSet() + for _, it := range b.configs { + alts.add(it.GetAlt()) + } + return alts +} + +// NewATNConfigSet creates a new ATNConfigSet instance. +func NewATNConfigSet(fullCtx bool) *ATNConfigSet { + return &ATNConfigSet{ + cachedHash: -1, + configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()"), + fullCtx: fullCtx, + } +} + +// Add merges contexts with existing configs for (s, i, pi, _), +// where 's' is the ATNConfig.state, 'i' is the ATNConfig.alt, and +// 'pi' is the [ATNConfig].semanticContext. +// +// We use (s,i,pi) as the key. +// Updates dipsIntoOuterContext and hasSemanticContext when necessary. +func (b *ATNConfigSet) Add(config *ATNConfig, mergeCache *JPCMap) bool { + if b.readOnly { + panic("set is read-only") + } + + if config.GetSemanticContext() != SemanticContextNone { + b.hasSemanticContext = true + } + + if config.GetReachesIntoOuterContext() > 0 { + b.dipsIntoOuterContext = true + } + + existing, present := b.configLookup.Put(config) + + // The config was not already in the set + // + if !present { + b.cachedHash = -1 + b.configs = append(b.configs, config) // Track order here + return true + } + + // Merge a previous (s, i, pi, _) with it and save the result + rootIsWildcard := !b.fullCtx + merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache) + + // No need to check for existing.context because config.context is in the cache, + // since the only way to create new graphs is the "call rule" and here. We cache + // at both places. + existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext())) + + // Preserve the precedence filter suppression during the merge + if config.getPrecedenceFilterSuppressed() { + existing.setPrecedenceFilterSuppressed(true) + } + + // Replace the context because there is no need to do alt mapping + existing.SetContext(merged) + + return true +} + +// GetStates returns the set of states represented by all configurations in this config set +func (b *ATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] { + + // states uses the standard comparator and Hash() provided by the ATNState instance + // + states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst, ATNStateCollection, "ATNConfigSet.GetStates()") + + for i := 0; i < len(b.configs); i++ { + states.Put(b.configs[i].GetState()) + } + + return states +} + +func (b *ATNConfigSet) GetPredicates() []SemanticContext { + predicates := make([]SemanticContext, 0) + + for i := 0; i < len(b.configs); i++ { + c := b.configs[i].GetSemanticContext() + + if c != SemanticContextNone { + predicates = append(predicates, c) + } + } + + return predicates +} + +func (b *ATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) { + if b.readOnly { + panic("set is read-only") + } + + // Empty indicate no optimization is possible + if b.configLookup == nil || b.configLookup.Len() == 0 { + return + } + + for i := 0; i < len(b.configs); i++ { + config := b.configs[i] + config.SetContext(interpreter.getCachedContext(config.GetContext())) + } +} + +func (b *ATNConfigSet) AddAll(coll []*ATNConfig) bool { + for i := 0; i < len(coll); i++ { + b.Add(coll[i], nil) + } + + return false +} + +// Compare The configs are only equal if they are in the same order and their Equals function returns true. +// Java uses ArrayList.equals(), which requires the same order. +func (b *ATNConfigSet) Compare(bs *ATNConfigSet) bool { + if len(b.configs) != len(bs.configs) { + return false + } + for i := 0; i < len(b.configs); i++ { + if !b.configs[i].Equals(bs.configs[i]) { + return false + } + } + + return true +} + +func (b *ATNConfigSet) Equals(other Collectable[ATNConfig]) bool { + if b == other { + return true + } else if _, ok := other.(*ATNConfigSet); !ok { + return false + } + + other2 := other.(*ATNConfigSet) + var eca bool + switch { + case b.conflictingAlts == nil && other2.conflictingAlts == nil: + eca = true + case b.conflictingAlts != nil && other2.conflictingAlts != nil: + eca = b.conflictingAlts.equals(other2.conflictingAlts) + } + return b.configs != nil && + b.fullCtx == other2.fullCtx && + b.uniqueAlt == other2.uniqueAlt && + eca && + b.hasSemanticContext == other2.hasSemanticContext && + b.dipsIntoOuterContext == other2.dipsIntoOuterContext && + b.Compare(other2) +} + +func (b *ATNConfigSet) Hash() int { + if b.readOnly { + if b.cachedHash == -1 { + b.cachedHash = b.hashCodeConfigs() + } + + return b.cachedHash + } + + return b.hashCodeConfigs() +} + +func (b *ATNConfigSet) hashCodeConfigs() int { + h := 1 + for _, config := range b.configs { + h = 31*h + config.Hash() + } + return h +} + +func (b *ATNConfigSet) Contains(item *ATNConfig) bool { + if b.readOnly { + panic("not implemented for read-only sets") + } + if b.configLookup == nil { + return false + } + return b.configLookup.Contains(item) +} + +func (b *ATNConfigSet) ContainsFast(item *ATNConfig) bool { + return b.Contains(item) +} + +func (b *ATNConfigSet) Clear() { + if b.readOnly { + panic("set is read-only") + } + b.configs = make([]*ATNConfig, 0) + b.cachedHash = -1 + b.configLookup = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()") +} + +func (b *ATNConfigSet) String() string { + + s := "[" + + for i, c := range b.configs { + s += c.String() + + if i != len(b.configs)-1 { + s += ", " + } + } + + s += "]" + + if b.hasSemanticContext { + s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext) + } + + if b.uniqueAlt != ATNInvalidAltNumber { + s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt) + } + + if b.conflictingAlts != nil { + s += ",conflictingAlts=" + b.conflictingAlts.String() + } + + if b.dipsIntoOuterContext { + s += ",dipsIntoOuterContext" + } + + return s +} + +// NewOrderedATNConfigSet creates a config set with a slightly different Hash/Equal pair +// for use in lexers. +func NewOrderedATNConfigSet() *ATNConfigSet { + return &ATNConfigSet{ + cachedHash: -1, + // This set uses the standard Hash() and Equals() from ATNConfig + configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ATNConfigCollection, "ATNConfigSet.NewOrderedATNConfigSet()"), + fullCtx: false, + } +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go new file mode 100644 index 0000000000..bdb30b3622 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go @@ -0,0 +1,62 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "errors" + +var defaultATNDeserializationOptions = ATNDeserializationOptions{true, true, false} + +type ATNDeserializationOptions struct { + readOnly bool + verifyATN bool + generateRuleBypassTransitions bool +} + +func (opts *ATNDeserializationOptions) ReadOnly() bool { + return opts.readOnly +} + +func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) { + if opts.readOnly { + panic(errors.New("cannot mutate read only ATNDeserializationOptions")) + } + opts.readOnly = readOnly +} + +func (opts *ATNDeserializationOptions) VerifyATN() bool { + return opts.verifyATN +} + +func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) { + if opts.readOnly { + panic(errors.New("cannot mutate read only ATNDeserializationOptions")) + } + opts.verifyATN = verifyATN +} + +func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool { + return opts.generateRuleBypassTransitions +} + +func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) { + if opts.readOnly { + panic(errors.New("cannot mutate read only ATNDeserializationOptions")) + } + opts.generateRuleBypassTransitions = generateRuleBypassTransitions +} + +//goland:noinspection GoUnusedExportedFunction +func DefaultATNDeserializationOptions() *ATNDeserializationOptions { + return NewATNDeserializationOptions(&defaultATNDeserializationOptions) +} + +func NewATNDeserializationOptions(other *ATNDeserializationOptions) *ATNDeserializationOptions { + o := new(ATNDeserializationOptions) + if other != nil { + *o = *other + o.readOnly = false + } + return o +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go new file mode 100644 index 0000000000..2dcb9ae11b --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go @@ -0,0 +1,684 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +const serializedVersion = 4 + +type loopEndStateIntPair struct { + item0 *LoopEndState + item1 int +} + +type blockStartStateIntPair struct { + item0 BlockStartState + item1 int +} + +type ATNDeserializer struct { + options *ATNDeserializationOptions + data []int32 + pos int +} + +func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer { + if options == nil { + options = &defaultATNDeserializationOptions + } + + return &ATNDeserializer{options: options} +} + +//goland:noinspection GoUnusedFunction +func stringInSlice(a string, list []string) int { + for i, b := range list { + if b == a { + return i + } + } + + return -1 +} + +func (a *ATNDeserializer) Deserialize(data []int32) *ATN { + a.data = data + a.pos = 0 + a.checkVersion() + + atn := a.readATN() + + a.readStates(atn) + a.readRules(atn) + a.readModes(atn) + + sets := a.readSets(atn, nil) + + a.readEdges(atn, sets) + a.readDecisions(atn) + a.readLexerActions(atn) + a.markPrecedenceDecisions(atn) + a.verifyATN(atn) + + if a.options.GenerateRuleBypassTransitions() && atn.grammarType == ATNTypeParser { + a.generateRuleBypassTransitions(atn) + // Re-verify after modification + a.verifyATN(atn) + } + + return atn + +} + +func (a *ATNDeserializer) checkVersion() { + version := a.readInt() + + if version != serializedVersion { + panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(serializedVersion) + ").") + } +} + +func (a *ATNDeserializer) readATN() *ATN { + grammarType := a.readInt() + maxTokenType := a.readInt() + + return NewATN(grammarType, maxTokenType) +} + +func (a *ATNDeserializer) readStates(atn *ATN) { + nstates := a.readInt() + + // Allocate worst case size. + loopBackStateNumbers := make([]loopEndStateIntPair, 0, nstates) + endStateNumbers := make([]blockStartStateIntPair, 0, nstates) + + // Preallocate states slice. + atn.states = make([]ATNState, 0, nstates) + + for i := 0; i < nstates; i++ { + stype := a.readInt() + + // Ignore bad types of states + if stype == ATNStateInvalidType { + atn.addState(nil) + continue + } + + ruleIndex := a.readInt() + + s := a.stateFactory(stype, ruleIndex) + + if stype == ATNStateLoopEnd { + loopBackStateNumber := a.readInt() + + loopBackStateNumbers = append(loopBackStateNumbers, loopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber}) + } else if s2, ok := s.(BlockStartState); ok { + endStateNumber := a.readInt() + + endStateNumbers = append(endStateNumbers, blockStartStateIntPair{s2, endStateNumber}) + } + + atn.addState(s) + } + + // Delay the assignment of loop back and end states until we know all the state + // instances have been initialized + for _, pair := range loopBackStateNumbers { + pair.item0.loopBackState = atn.states[pair.item1] + } + + for _, pair := range endStateNumbers { + pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState)) + } + + numNonGreedyStates := a.readInt() + for j := 0; j < numNonGreedyStates; j++ { + stateNumber := a.readInt() + + atn.states[stateNumber].(DecisionState).setNonGreedy(true) + } + + numPrecedenceStates := a.readInt() + for j := 0; j < numPrecedenceStates; j++ { + stateNumber := a.readInt() + + atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true + } +} + +func (a *ATNDeserializer) readRules(atn *ATN) { + nrules := a.readInt() + + if atn.grammarType == ATNTypeLexer { + atn.ruleToTokenType = make([]int, nrules) + } + + atn.ruleToStartState = make([]*RuleStartState, nrules) + + for i := range atn.ruleToStartState { + s := a.readInt() + startState := atn.states[s].(*RuleStartState) + + atn.ruleToStartState[i] = startState + + if atn.grammarType == ATNTypeLexer { + tokenType := a.readInt() + + atn.ruleToTokenType[i] = tokenType + } + } + + atn.ruleToStopState = make([]*RuleStopState, nrules) + + for _, state := range atn.states { + if s2, ok := state.(*RuleStopState); ok { + atn.ruleToStopState[s2.ruleIndex] = s2 + atn.ruleToStartState[s2.ruleIndex].stopState = s2 + } + } +} + +func (a *ATNDeserializer) readModes(atn *ATN) { + nmodes := a.readInt() + atn.modeToStartState = make([]*TokensStartState, nmodes) + + for i := range atn.modeToStartState { + s := a.readInt() + + atn.modeToStartState[i] = atn.states[s].(*TokensStartState) + } +} + +func (a *ATNDeserializer) readSets(_ *ATN, sets []*IntervalSet) []*IntervalSet { + m := a.readInt() + + // Preallocate the needed capacity. + if cap(sets)-len(sets) < m { + isets := make([]*IntervalSet, len(sets), len(sets)+m) + copy(isets, sets) + sets = isets + } + + for i := 0; i < m; i++ { + iset := NewIntervalSet() + + sets = append(sets, iset) + + n := a.readInt() + containsEOF := a.readInt() + + if containsEOF != 0 { + iset.addOne(-1) + } + + for j := 0; j < n; j++ { + i1 := a.readInt() + i2 := a.readInt() + + iset.addRange(i1, i2) + } + } + + return sets +} + +func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) { + nedges := a.readInt() + + for i := 0; i < nedges; i++ { + var ( + src = a.readInt() + trg = a.readInt() + ttype = a.readInt() + arg1 = a.readInt() + arg2 = a.readInt() + arg3 = a.readInt() + trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets) + srcState = atn.states[src] + ) + + srcState.AddTransition(trans, -1) + } + + // Edges for rule stop states can be derived, so they are not serialized + for _, state := range atn.states { + for _, t := range state.GetTransitions() { + var rt, ok = t.(*RuleTransition) + + if !ok { + continue + } + + outermostPrecedenceReturn := -1 + + if atn.ruleToStartState[rt.getTarget().GetRuleIndex()].isPrecedenceRule { + if rt.precedence == 0 { + outermostPrecedenceReturn = rt.getTarget().GetRuleIndex() + } + } + + trans := NewEpsilonTransition(rt.followState, outermostPrecedenceReturn) + + atn.ruleToStopState[rt.getTarget().GetRuleIndex()].AddTransition(trans, -1) + } + } + + for _, state := range atn.states { + if s2, ok := state.(BlockStartState); ok { + // We need to know the end state to set its start state + if s2.getEndState() == nil { + panic("IllegalState") + } + + // Block end states can only be associated to a single block start state + if s2.getEndState().startState != nil { + panic("IllegalState") + } + + s2.getEndState().startState = state + } + + if s2, ok := state.(*PlusLoopbackState); ok { + for _, t := range s2.GetTransitions() { + if t2, ok := t.getTarget().(*PlusBlockStartState); ok { + t2.loopBackState = state + } + } + } else if s2, ok := state.(*StarLoopbackState); ok { + for _, t := range s2.GetTransitions() { + if t2, ok := t.getTarget().(*StarLoopEntryState); ok { + t2.loopBackState = state + } + } + } + } +} + +func (a *ATNDeserializer) readDecisions(atn *ATN) { + ndecisions := a.readInt() + + for i := 0; i < ndecisions; i++ { + s := a.readInt() + decState := atn.states[s].(DecisionState) + + atn.DecisionToState = append(atn.DecisionToState, decState) + decState.setDecision(i) + } +} + +func (a *ATNDeserializer) readLexerActions(atn *ATN) { + if atn.grammarType == ATNTypeLexer { + count := a.readInt() + + atn.lexerActions = make([]LexerAction, count) + + for i := range atn.lexerActions { + actionType := a.readInt() + data1 := a.readInt() + data2 := a.readInt() + atn.lexerActions[i] = a.lexerActionFactory(actionType, data1, data2) + } + } +} + +func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) { + count := len(atn.ruleToStartState) + + for i := 0; i < count; i++ { + atn.ruleToTokenType[i] = atn.maxTokenType + i + 1 + } + + for i := 0; i < count; i++ { + a.generateRuleBypassTransition(atn, i) + } +} + +func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) { + bypassStart := NewBasicBlockStartState() + + bypassStart.ruleIndex = idx + atn.addState(bypassStart) + + bypassStop := NewBlockEndState() + + bypassStop.ruleIndex = idx + atn.addState(bypassStop) + + bypassStart.endState = bypassStop + + atn.defineDecisionState(&bypassStart.BaseDecisionState) + + bypassStop.startState = bypassStart + + var excludeTransition Transition + var endState ATNState + + if atn.ruleToStartState[idx].isPrecedenceRule { + // Wrap from the beginning of the rule to the StarLoopEntryState + endState = nil + + for i := 0; i < len(atn.states); i++ { + state := atn.states[i] + + if a.stateIsEndStateFor(state, idx) != nil { + endState = state + excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0] + + break + } + } + + if excludeTransition == nil { + panic("Couldn't identify final state of the precedence rule prefix section.") + } + } else { + endState = atn.ruleToStopState[idx] + } + + // All non-excluded transitions that currently target end state need to target + // blockEnd instead + for i := 0; i < len(atn.states); i++ { + state := atn.states[i] + + for j := 0; j < len(state.GetTransitions()); j++ { + transition := state.GetTransitions()[j] + + if transition == excludeTransition { + continue + } + + if transition.getTarget() == endState { + transition.setTarget(bypassStop) + } + } + } + + // All transitions leaving the rule start state need to leave blockStart instead + ruleToStartState := atn.ruleToStartState[idx] + count := len(ruleToStartState.GetTransitions()) + + for count > 0 { + bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1) + ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]}) + } + + // Link the new states + atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1) + bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1) + + MatchState := NewBasicState() + + atn.addState(MatchState) + MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1) + bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1) +} + +func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState { + if state.GetRuleIndex() != idx { + return nil + } + + if _, ok := state.(*StarLoopEntryState); !ok { + return nil + } + + maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget() + + if _, ok := maybeLoopEndState.(*LoopEndState); !ok { + return nil + } + + var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState) + + if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok { + return state + } + + return nil +} + +// markPrecedenceDecisions analyzes the StarLoopEntryState states in the +// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to +// the correct value. +func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) { + for _, state := range atn.states { + if _, ok := state.(*StarLoopEntryState); !ok { + continue + } + + // We analyze the [ATN] to determine if an ATN decision state is the + // decision for the closure block that determines whether a + // precedence rule should continue or complete. + if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule { + maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget() + + if s3, ok := maybeLoopEndState.(*LoopEndState); ok { + var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState) + + if s3.epsilonOnlyTransitions && ok2 { + state.(*StarLoopEntryState).precedenceRuleDecision = true + } + } + } + } +} + +func (a *ATNDeserializer) verifyATN(atn *ATN) { + if !a.options.VerifyATN() { + return + } + + // Verify assumptions + for _, state := range atn.states { + if state == nil { + continue + } + + a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "") + + switch s2 := state.(type) { + case *PlusBlockStartState: + a.checkCondition(s2.loopBackState != nil, "") + + case *StarLoopEntryState: + a.checkCondition(s2.loopBackState != nil, "") + a.checkCondition(len(s2.GetTransitions()) == 2, "") + + switch s2.transitions[0].getTarget().(type) { + case *StarBlockStartState: + _, ok := s2.transitions[1].getTarget().(*LoopEndState) + + a.checkCondition(ok, "") + a.checkCondition(!s2.nonGreedy, "") + + case *LoopEndState: + var _, ok = s2.transitions[1].getTarget().(*StarBlockStartState) + + a.checkCondition(ok, "") + a.checkCondition(s2.nonGreedy, "") + + default: + panic("IllegalState") + } + + case *StarLoopbackState: + a.checkCondition(len(state.GetTransitions()) == 1, "") + + var _, ok = state.GetTransitions()[0].getTarget().(*StarLoopEntryState) + + a.checkCondition(ok, "") + + case *LoopEndState: + a.checkCondition(s2.loopBackState != nil, "") + + case *RuleStartState: + a.checkCondition(s2.stopState != nil, "") + + case BlockStartState: + a.checkCondition(s2.getEndState() != nil, "") + + case *BlockEndState: + a.checkCondition(s2.startState != nil, "") + + case DecisionState: + a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "") + + default: + var _, ok = s2.(*RuleStopState) + + a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "") + } + } +} + +func (a *ATNDeserializer) checkCondition(condition bool, message string) { + if !condition { + if message == "" { + message = "IllegalState" + } + + panic(message) + } +} + +func (a *ATNDeserializer) readInt() int { + v := a.data[a.pos] + + a.pos++ + + return int(v) // data is 32 bits but int is at least that big +} + +func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, _, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition { + target := atn.states[trg] + + switch typeIndex { + case TransitionEPSILON: + return NewEpsilonTransition(target, -1) + + case TransitionRANGE: + if arg3 != 0 { + return NewRangeTransition(target, TokenEOF, arg2) + } + + return NewRangeTransition(target, arg1, arg2) + + case TransitionRULE: + return NewRuleTransition(atn.states[arg1], arg2, arg3, target) + + case TransitionPREDICATE: + return NewPredicateTransition(target, arg1, arg2, arg3 != 0) + + case TransitionPRECEDENCE: + return NewPrecedencePredicateTransition(target, arg1) + + case TransitionATOM: + if arg3 != 0 { + return NewAtomTransition(target, TokenEOF) + } + + return NewAtomTransition(target, arg1) + + case TransitionACTION: + return NewActionTransition(target, arg1, arg2, arg3 != 0) + + case TransitionSET: + return NewSetTransition(target, sets[arg1]) + + case TransitionNOTSET: + return NewNotSetTransition(target, sets[arg1]) + + case TransitionWILDCARD: + return NewWildcardTransition(target) + } + + panic("The specified transition type is not valid.") +} + +func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState { + var s ATNState + + switch typeIndex { + case ATNStateInvalidType: + return nil + + case ATNStateBasic: + s = NewBasicState() + + case ATNStateRuleStart: + s = NewRuleStartState() + + case ATNStateBlockStart: + s = NewBasicBlockStartState() + + case ATNStatePlusBlockStart: + s = NewPlusBlockStartState() + + case ATNStateStarBlockStart: + s = NewStarBlockStartState() + + case ATNStateTokenStart: + s = NewTokensStartState() + + case ATNStateRuleStop: + s = NewRuleStopState() + + case ATNStateBlockEnd: + s = NewBlockEndState() + + case ATNStateStarLoopBack: + s = NewStarLoopbackState() + + case ATNStateStarLoopEntry: + s = NewStarLoopEntryState() + + case ATNStatePlusLoopBack: + s = NewPlusLoopbackState() + + case ATNStateLoopEnd: + s = NewLoopEndState() + + default: + panic(fmt.Sprintf("state type %d is invalid", typeIndex)) + } + + s.SetRuleIndex(ruleIndex) + + return s +} + +func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction { + switch typeIndex { + case LexerActionTypeChannel: + return NewLexerChannelAction(data1) + + case LexerActionTypeCustom: + return NewLexerCustomAction(data1, data2) + + case LexerActionTypeMode: + return NewLexerModeAction(data1) + + case LexerActionTypeMore: + return LexerMoreActionINSTANCE + + case LexerActionTypePopMode: + return LexerPopModeActionINSTANCE + + case LexerActionTypePushMode: + return NewLexerPushModeAction(data1) + + case LexerActionTypeSkip: + return LexerSkipActionINSTANCE + + case LexerActionTypeType: + return NewLexerTypeAction(data1) + + default: + panic(fmt.Sprintf("lexer action %d is invalid", typeIndex)) + } +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go new file mode 100644 index 0000000000..afe6c9f809 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go @@ -0,0 +1,41 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewATNConfigSet(false)) + +type IATNSimulator interface { + SharedContextCache() *PredictionContextCache + ATN() *ATN + DecisionToDFA() []*DFA +} + +type BaseATNSimulator struct { + atn *ATN + sharedContextCache *PredictionContextCache + decisionToDFA []*DFA +} + +func (b *BaseATNSimulator) getCachedContext(context *PredictionContext) *PredictionContext { + if b.sharedContextCache == nil { + return context + } + + //visited := NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionVisitedCollection, "Visit map in getCachedContext()") + visited := NewVisitRecord() + return getCachedBasePredictionContext(context, b.sharedContextCache, visited) +} + +func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache { + return b.sharedContextCache +} + +func (b *BaseATNSimulator) ATN() *ATN { + return b.atn +} + +func (b *BaseATNSimulator) DecisionToDFA() []*DFA { + return b.decisionToDFA +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_state.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_state.go new file mode 100644 index 0000000000..2ae5807cdb --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_state.go @@ -0,0 +1,461 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "os" + "strconv" +) + +// Constants for serialization. +const ( + ATNStateInvalidType = 0 + ATNStateBasic = 1 + ATNStateRuleStart = 2 + ATNStateBlockStart = 3 + ATNStatePlusBlockStart = 4 + ATNStateStarBlockStart = 5 + ATNStateTokenStart = 6 + ATNStateRuleStop = 7 + ATNStateBlockEnd = 8 + ATNStateStarLoopBack = 9 + ATNStateStarLoopEntry = 10 + ATNStatePlusLoopBack = 11 + ATNStateLoopEnd = 12 + + ATNStateInvalidStateNumber = -1 +) + +//goland:noinspection GoUnusedGlobalVariable +var ATNStateInitialNumTransitions = 4 + +type ATNState interface { + GetEpsilonOnlyTransitions() bool + + GetRuleIndex() int + SetRuleIndex(int) + + GetNextTokenWithinRule() *IntervalSet + SetNextTokenWithinRule(*IntervalSet) + + GetATN() *ATN + SetATN(*ATN) + + GetStateType() int + + GetStateNumber() int + SetStateNumber(int) + + GetTransitions() []Transition + SetTransitions([]Transition) + AddTransition(Transition, int) + + String() string + Hash() int + Equals(Collectable[ATNState]) bool +} + +type BaseATNState struct { + // NextTokenWithinRule caches lookahead during parsing. Not used during construction. + NextTokenWithinRule *IntervalSet + + // atn is the current ATN. + atn *ATN + + epsilonOnlyTransitions bool + + // ruleIndex tracks the Rule index because there are no Rule objects at runtime. + ruleIndex int + + stateNumber int + + stateType int + + // Track the transitions emanating from this ATN state. + transitions []Transition +} + +func NewATNState() *BaseATNState { + return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType} +} + +func (as *BaseATNState) GetRuleIndex() int { + return as.ruleIndex +} + +func (as *BaseATNState) SetRuleIndex(v int) { + as.ruleIndex = v +} +func (as *BaseATNState) GetEpsilonOnlyTransitions() bool { + return as.epsilonOnlyTransitions +} + +func (as *BaseATNState) GetATN() *ATN { + return as.atn +} + +func (as *BaseATNState) SetATN(atn *ATN) { + as.atn = atn +} + +func (as *BaseATNState) GetTransitions() []Transition { + return as.transitions +} + +func (as *BaseATNState) SetTransitions(t []Transition) { + as.transitions = t +} + +func (as *BaseATNState) GetStateType() int { + return as.stateType +} + +func (as *BaseATNState) GetStateNumber() int { + return as.stateNumber +} + +func (as *BaseATNState) SetStateNumber(stateNumber int) { + as.stateNumber = stateNumber +} + +func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet { + return as.NextTokenWithinRule +} + +func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) { + as.NextTokenWithinRule = v +} + +func (as *BaseATNState) Hash() int { + return as.stateNumber +} + +func (as *BaseATNState) String() string { + return strconv.Itoa(as.stateNumber) +} + +func (as *BaseATNState) Equals(other Collectable[ATNState]) bool { + if ot, ok := other.(ATNState); ok { + return as.stateNumber == ot.GetStateNumber() + } + + return false +} + +func (as *BaseATNState) isNonGreedyExitState() bool { + return false +} + +func (as *BaseATNState) AddTransition(trans Transition, index int) { + if len(as.transitions) == 0 { + as.epsilonOnlyTransitions = trans.getIsEpsilon() + } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() { + _, _ = fmt.Fprintf(os.Stdin, "ATN state %d has both epsilon and non-epsilon transitions.\n", as.stateNumber) + as.epsilonOnlyTransitions = false + } + + // TODO: Check code for already present compared to the Java equivalent + //alreadyPresent := false + //for _, t := range as.transitions { + // if t.getTarget().GetStateNumber() == trans.getTarget().GetStateNumber() { + // if t.getLabel() != nil && trans.getLabel() != nil && trans.getLabel().Equals(t.getLabel()) { + // alreadyPresent = true + // break + // } + // } else if t.getIsEpsilon() && trans.getIsEpsilon() { + // alreadyPresent = true + // break + // } + //} + //if !alreadyPresent { + if index == -1 { + as.transitions = append(as.transitions, trans) + } else { + as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...) + // TODO: as.transitions.splice(index, 1, trans) + } + //} else { + // _, _ = fmt.Fprintf(os.Stderr, "Transition already present in state %d\n", as.stateNumber) + //} +} + +type BasicState struct { + BaseATNState +} + +func NewBasicState() *BasicState { + return &BasicState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBasic, + }, + } +} + +type DecisionState interface { + ATNState + + getDecision() int + setDecision(int) + + getNonGreedy() bool + setNonGreedy(bool) +} + +type BaseDecisionState struct { + BaseATNState + decision int + nonGreedy bool +} + +func NewBaseDecisionState() *BaseDecisionState { + return &BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBasic, + }, + decision: -1, + } +} + +func (s *BaseDecisionState) getDecision() int { + return s.decision +} + +func (s *BaseDecisionState) setDecision(b int) { + s.decision = b +} + +func (s *BaseDecisionState) getNonGreedy() bool { + return s.nonGreedy +} + +func (s *BaseDecisionState) setNonGreedy(b bool) { + s.nonGreedy = b +} + +type BlockStartState interface { + DecisionState + + getEndState() *BlockEndState + setEndState(*BlockEndState) +} + +// BaseBlockStartState is the start of a regular (...) block. +type BaseBlockStartState struct { + BaseDecisionState + endState *BlockEndState +} + +func NewBlockStartState() *BaseBlockStartState { + return &BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBasic, + }, + decision: -1, + }, + } +} + +func (s *BaseBlockStartState) getEndState() *BlockEndState { + return s.endState +} + +func (s *BaseBlockStartState) setEndState(b *BlockEndState) { + s.endState = b +} + +type BasicBlockStartState struct { + BaseBlockStartState +} + +func NewBasicBlockStartState() *BasicBlockStartState { + return &BasicBlockStartState{ + BaseBlockStartState: BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBlockStart, + }, + }, + }, + } +} + +var _ BlockStartState = &BasicBlockStartState{} + +// BlockEndState is a terminal node of a simple (a|b|c) block. +type BlockEndState struct { + BaseATNState + startState ATNState +} + +func NewBlockEndState() *BlockEndState { + return &BlockEndState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBlockEnd, + }, + startState: nil, + } +} + +// RuleStopState is the last node in the ATN for a rule, unless that rule is the +// start symbol. In that case, there is one transition to EOF. Later, we might +// encode references to all calls to this rule to compute FOLLOW sets for error +// handling. +type RuleStopState struct { + BaseATNState +} + +func NewRuleStopState() *RuleStopState { + return &RuleStopState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateRuleStop, + }, + } +} + +type RuleStartState struct { + BaseATNState + stopState ATNState + isPrecedenceRule bool +} + +func NewRuleStartState() *RuleStartState { + return &RuleStartState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateRuleStart, + }, + } +} + +// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two +// transitions: one to the loop back to start of the block, and one to exit. +type PlusLoopbackState struct { + BaseDecisionState +} + +func NewPlusLoopbackState() *PlusLoopbackState { + return &PlusLoopbackState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStatePlusLoopBack, + }, + }, + } +} + +// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a +// decision state; we don't use it for code generation. Somebody might need it, +// it is included for completeness. In reality, PlusLoopbackState is the real +// decision-making node for A+. +type PlusBlockStartState struct { + BaseBlockStartState + loopBackState ATNState +} + +func NewPlusBlockStartState() *PlusBlockStartState { + return &PlusBlockStartState{ + BaseBlockStartState: BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStatePlusBlockStart, + }, + }, + }, + } +} + +var _ BlockStartState = &PlusBlockStartState{} + +// StarBlockStartState is the block that begins a closure loop. +type StarBlockStartState struct { + BaseBlockStartState +} + +func NewStarBlockStartState() *StarBlockStartState { + return &StarBlockStartState{ + BaseBlockStartState: BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateStarBlockStart, + }, + }, + }, + } +} + +var _ BlockStartState = &StarBlockStartState{} + +type StarLoopbackState struct { + BaseATNState +} + +func NewStarLoopbackState() *StarLoopbackState { + return &StarLoopbackState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateStarLoopBack, + }, + } +} + +type StarLoopEntryState struct { + BaseDecisionState + loopBackState ATNState + precedenceRuleDecision bool +} + +func NewStarLoopEntryState() *StarLoopEntryState { + // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making. + return &StarLoopEntryState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateStarLoopEntry, + }, + }, + } +} + +// LoopEndState marks the end of a * or + loop. +type LoopEndState struct { + BaseATNState + loopBackState ATNState +} + +func NewLoopEndState() *LoopEndState { + return &LoopEndState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateLoopEnd, + }, + } +} + +// TokensStartState is the Tokens rule start state linking to each lexer rule start state. +type TokensStartState struct { + BaseDecisionState +} + +func NewTokensStartState() *TokensStartState { + return &TokensStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateTokenStart, + }, + }, + } +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_type.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_type.go new file mode 100644 index 0000000000..3a515a145f --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/atn_type.go @@ -0,0 +1,11 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// Represent the type of recognizer an ATN applies to. +const ( + ATNTypeLexer = 0 + ATNTypeParser = 1 +) diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/char_stream.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/char_stream.go new file mode 100644 index 0000000000..bd8127b6b5 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/char_stream.go @@ -0,0 +1,12 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type CharStream interface { + IntStream + GetText(int, int) string + GetTextFromTokens(start, end Token) string + GetTextFromInterval(Interval) string +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go new file mode 100644 index 0000000000..1bb0314ea0 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go @@ -0,0 +1,56 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// TokenFactory creates CommonToken objects. +type TokenFactory interface { + Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token +} + +// CommonTokenFactory is the default TokenFactory implementation. +type CommonTokenFactory struct { + // copyText indicates whether CommonToken.setText should be called after + // constructing tokens to explicitly set the text. This is useful for cases + // where the input stream might not be able to provide arbitrary substrings of + // text from the input after the lexer creates a token (e.g. the + // implementation of CharStream.GetText in UnbufferedCharStream panics an + // UnsupportedOperationException). Explicitly setting the token text allows + // Token.GetText to be called at any time regardless of the input stream + // implementation. + // + // The default value is false to avoid the performance and memory overhead of + // copying text for every token unless explicitly requested. + copyText bool +} + +func NewCommonTokenFactory(copyText bool) *CommonTokenFactory { + return &CommonTokenFactory{copyText: copyText} +} + +// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not +// explicitly copy token text when constructing tokens. +var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false) + +func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token { + t := NewCommonToken(source, ttype, channel, start, stop) + + t.line = line + t.column = column + + if text != "" { + t.SetText(text) + } else if c.copyText && source.charStream != nil { + t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop))) + } + + return t +} + +func (c *CommonTokenFactory) createThin(ttype int, text string) Token { + t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1) + t.SetText(text) + + return t +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go new file mode 100644 index 0000000000..b75da9df08 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go @@ -0,0 +1,450 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" +) + +// CommonTokenStream is an implementation of TokenStream that loads tokens from +// a TokenSource on-demand and places the tokens in a buffer to provide access +// to any previous token by index. This token stream ignores the value of +// Token.getChannel. If your parser requires the token stream filter tokens to +// only those on a particular channel, such as Token.DEFAULT_CHANNEL or +// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream. +type CommonTokenStream struct { + channel int + + // fetchedEOF indicates whether the Token.EOF token has been fetched from + // tokenSource and added to tokens. This field improves performance for the + // following cases: + // + // consume: The lookahead check in consume to preven consuming the EOF symbol is + // optimized by checking the values of fetchedEOF and p instead of calling LA. + // + // fetch: The check to prevent adding multiple EOF symbols into tokens is + // trivial with bt field. + fetchedEOF bool + + // index into [tokens] of the current token (next token to consume). + // tokens[p] should be LT(1). It is set to -1 when the stream is first + // constructed or when SetTokenSource is called, indicating that the first token + // has not yet been fetched from the token source. For additional information, + // see the documentation of [IntStream] for a description of initializing methods. + index int + + // tokenSource is the [TokenSource] from which tokens for the bt stream are + // fetched. + tokenSource TokenSource + + // tokens contains all tokens fetched from the token source. The list is considered a + // complete view of the input once fetchedEOF is set to true. + tokens []Token +} + +// NewCommonTokenStream creates a new CommonTokenStream instance using the supplied lexer to produce +// tokens and will pull tokens from the given lexer channel. +func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream { + return &CommonTokenStream{ + channel: channel, + index: -1, + tokenSource: lexer, + tokens: make([]Token, 0), + } +} + +// GetAllTokens returns all tokens currently pulled from the token source. +func (c *CommonTokenStream) GetAllTokens() []Token { + return c.tokens +} + +func (c *CommonTokenStream) Mark() int { + return 0 +} + +func (c *CommonTokenStream) Release(_ int) {} + +func (c *CommonTokenStream) Reset() { + c.fetchedEOF = false + c.tokens = make([]Token, 0) + c.Seek(0) +} + +func (c *CommonTokenStream) Seek(index int) { + c.lazyInit() + c.index = c.adjustSeekIndex(index) +} + +func (c *CommonTokenStream) Get(index int) Token { + c.lazyInit() + + return c.tokens[index] +} + +func (c *CommonTokenStream) Consume() { + SkipEOFCheck := false + + if c.index >= 0 { + if c.fetchedEOF { + // The last token in tokens is EOF. Skip the check if p indexes any fetched. + // token except the last. + SkipEOFCheck = c.index < len(c.tokens)-1 + } else { + // No EOF token in tokens. Skip the check if p indexes a fetched token. + SkipEOFCheck = c.index < len(c.tokens) + } + } else { + // Not yet initialized + SkipEOFCheck = false + } + + if !SkipEOFCheck && c.LA(1) == TokenEOF { + panic("cannot consume EOF") + } + + if c.Sync(c.index + 1) { + c.index = c.adjustSeekIndex(c.index + 1) + } +} + +// Sync makes sure index i in tokens has a token and returns true if a token is +// located at index i and otherwise false. +func (c *CommonTokenStream) Sync(i int) bool { + n := i - len(c.tokens) + 1 // How many more elements do we need? + + if n > 0 { + fetched := c.fetch(n) + return fetched >= n + } + + return true +} + +// fetch adds n elements to buffer and returns the actual number of elements +// added to the buffer. +func (c *CommonTokenStream) fetch(n int) int { + if c.fetchedEOF { + return 0 + } + + for i := 0; i < n; i++ { + t := c.tokenSource.NextToken() + + t.SetTokenIndex(len(c.tokens)) + c.tokens = append(c.tokens, t) + + if t.GetTokenType() == TokenEOF { + c.fetchedEOF = true + + return i + 1 + } + } + + return n +} + +// GetTokens gets all tokens from start to stop inclusive. +func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token { + if start < 0 || stop < 0 { + return nil + } + + c.lazyInit() + + subset := make([]Token, 0) + + if stop >= len(c.tokens) { + stop = len(c.tokens) - 1 + } + + for i := start; i < stop; i++ { + t := c.tokens[i] + + if t.GetTokenType() == TokenEOF { + break + } + + if types == nil || types.contains(t.GetTokenType()) { + subset = append(subset, t) + } + } + + return subset +} + +func (c *CommonTokenStream) LA(i int) int { + return c.LT(i).GetTokenType() +} + +func (c *CommonTokenStream) lazyInit() { + if c.index == -1 { + c.setup() + } +} + +func (c *CommonTokenStream) setup() { + c.Sync(0) + c.index = c.adjustSeekIndex(0) +} + +func (c *CommonTokenStream) GetTokenSource() TokenSource { + return c.tokenSource +} + +// SetTokenSource resets the c token stream by setting its token source. +func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) { + c.tokenSource = tokenSource + c.tokens = make([]Token, 0) + c.index = -1 + c.fetchedEOF = false +} + +// NextTokenOnChannel returns the index of the next token on channel given a +// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are +// no tokens on channel between 'i' and [TokenEOF]. +func (c *CommonTokenStream) NextTokenOnChannel(i, _ int) int { + c.Sync(i) + + if i >= len(c.tokens) { + return -1 + } + + token := c.tokens[i] + + for token.GetChannel() != c.channel { + if token.GetTokenType() == TokenEOF { + return -1 + } + + i++ + c.Sync(i) + token = c.tokens[i] + } + + return i +} + +// previousTokenOnChannel returns the index of the previous token on channel +// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if +// there are no tokens on channel between i and 0. +func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int { + for i >= 0 && c.tokens[i].GetChannel() != channel { + i-- + } + + return i +} + +// GetHiddenTokensToRight collects all tokens on a specified channel to the +// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL +// or EOF. If channel is -1, it finds any non-default channel token. +func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token { + c.lazyInit() + + if tokenIndex < 0 || tokenIndex >= len(c.tokens) { + panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1)) + } + + nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel) + from := tokenIndex + 1 + + // If no onChannel to the right, then nextOnChannel == -1, so set 'to' to the last token + var to int + + if nextOnChannel == -1 { + to = len(c.tokens) - 1 + } else { + to = nextOnChannel + } + + return c.filterForChannel(from, to, channel) +} + +// GetHiddenTokensToLeft collects all tokens on channel to the left of the +// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is +// -1, it finds any non default channel token. +func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token { + c.lazyInit() + + if tokenIndex < 0 || tokenIndex >= len(c.tokens) { + panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1)) + } + + prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel) + + if prevOnChannel == tokenIndex-1 { + return nil + } + + // If there are none on channel to the left and prevOnChannel == -1 then from = 0 + from := prevOnChannel + 1 + to := tokenIndex - 1 + + return c.filterForChannel(from, to, channel) +} + +func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token { + hidden := make([]Token, 0) + + for i := left; i < right+1; i++ { + t := c.tokens[i] + + if channel == -1 { + if t.GetChannel() != LexerDefaultTokenChannel { + hidden = append(hidden, t) + } + } else if t.GetChannel() == channel { + hidden = append(hidden, t) + } + } + + if len(hidden) == 0 { + return nil + } + + return hidden +} + +func (c *CommonTokenStream) GetSourceName() string { + return c.tokenSource.GetSourceName() +} + +func (c *CommonTokenStream) Size() int { + return len(c.tokens) +} + +func (c *CommonTokenStream) Index() int { + return c.index +} + +func (c *CommonTokenStream) GetAllText() string { + c.Fill() + return c.GetTextFromInterval(NewInterval(0, len(c.tokens)-1)) +} + +func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string { + if start == nil || end == nil { + return "" + } + + return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex())) +} + +func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string { + return c.GetTextFromInterval(interval.GetSourceInterval()) +} + +func (c *CommonTokenStream) GetTextFromInterval(interval Interval) string { + c.lazyInit() + c.Sync(interval.Stop) + + start := interval.Start + stop := interval.Stop + + if start < 0 || stop < 0 { + return "" + } + + if stop >= len(c.tokens) { + stop = len(c.tokens) - 1 + } + + s := "" + + for i := start; i < stop+1; i++ { + t := c.tokens[i] + + if t.GetTokenType() == TokenEOF { + break + } + + s += t.GetText() + } + + return s +} + +// Fill gets all tokens from the lexer until EOF. +func (c *CommonTokenStream) Fill() { + c.lazyInit() + + for c.fetch(1000) == 1000 { + continue + } +} + +func (c *CommonTokenStream) adjustSeekIndex(i int) int { + return c.NextTokenOnChannel(i, c.channel) +} + +func (c *CommonTokenStream) LB(k int) Token { + if k == 0 || c.index-k < 0 { + return nil + } + + i := c.index + n := 1 + + // Find k good tokens looking backward + for n <= k { + // Skip off-channel tokens + i = c.previousTokenOnChannel(i-1, c.channel) + n++ + } + + if i < 0 { + return nil + } + + return c.tokens[i] +} + +func (c *CommonTokenStream) LT(k int) Token { + c.lazyInit() + + if k == 0 { + return nil + } + + if k < 0 { + return c.LB(-k) + } + + i := c.index + n := 1 // We know tokens[n] is valid + + // Find k good tokens + for n < k { + // Skip off-channel tokens, but make sure to not look past EOF + if c.Sync(i + 1) { + i = c.NextTokenOnChannel(i+1, c.channel) + } + + n++ + } + + return c.tokens[i] +} + +// getNumberOfOnChannelTokens counts EOF once. +func (c *CommonTokenStream) getNumberOfOnChannelTokens() int { + var n int + + c.Fill() + + for i := 0; i < len(c.tokens); i++ { + t := c.tokens[i] + + if t.GetChannel() == c.channel { + n++ + } + + if t.GetTokenType() == TokenEOF { + break + } + } + + return n +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/comparators.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/comparators.go new file mode 100644 index 0000000000..7467e9b43d --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/comparators.go @@ -0,0 +1,150 @@ +package antlr + +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +// This file contains all the implementations of custom comparators used for generic collections when the +// Hash() and Equals() funcs supplied by the struct objects themselves need to be overridden. Normally, we would +// put the comparators in the source file for the struct themselves, but given the organization of this code is +// sorta kinda based upon the Java code, I found it confusing trying to find out which comparator was where and used by +// which instantiation of a collection. For instance, an Array2DHashSet in the Java source, when used with ATNConfig +// collections requires three different comparators depending on what the collection is being used for. Collecting - pun intended - +// all the comparators here, makes it much easier to see which implementation of hash and equals is used by which collection. +// It also makes it easy to verify that the Hash() and Equals() functions marry up with the Java implementations. + +// ObjEqComparator is the equivalent of the Java ObjectEqualityComparator, which is the default instance of +// Equality comparator. We do not have inheritance in Go, only interfaces, so we use generics to enforce some +// type safety and avoid having to implement this for every type that we want to perform comparison on. +// +// This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which +// allows us to use it in any collection instance that does not require a special hash or equals implementation. +type ObjEqComparator[T Collectable[T]] struct{} + +var ( + aStateEqInst = &ObjEqComparator[ATNState]{} + aConfEqInst = &ObjEqComparator[*ATNConfig]{} + + // aConfCompInst is the comparator used for the ATNConfigSet for the configLookup cache + aConfCompInst = &ATNConfigComparator[*ATNConfig]{} + atnConfCompInst = &BaseATNConfigComparator[*ATNConfig]{} + dfaStateEqInst = &ObjEqComparator[*DFAState]{} + semctxEqInst = &ObjEqComparator[SemanticContext]{} + atnAltCfgEqInst = &ATNAltConfigComparator[*ATNConfig]{} + pContextEqInst = &ObjEqComparator[*PredictionContext]{} +) + +// Equals2 delegates to the Equals() method of type T +func (c *ObjEqComparator[T]) Equals2(o1, o2 T) bool { + return o1.Equals(o2) +} + +// Hash1 delegates to the Hash() method of type T +func (c *ObjEqComparator[T]) Hash1(o T) int { + + return o.Hash() +} + +type SemCComparator[T Collectable[T]] struct{} + +// ATNConfigComparator is used as the comparator for the configLookup field of an ATNConfigSet +// and has a custom Equals() and Hash() implementation, because equality is not based on the +// standard Hash() and Equals() methods of the ATNConfig type. +type ATNConfigComparator[T Collectable[T]] struct { +} + +// Equals2 is a custom comparator for ATNConfigs specifically for configLookup +func (c *ATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool { + + // Same pointer, must be equal, even if both nil + // + if o1 == o2 { + return true + + } + + // If either are nil, but not both, then the result is false + // + if o1 == nil || o2 == nil { + return false + } + + return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() && + o1.GetAlt() == o2.GetAlt() && + o1.GetSemanticContext().Equals(o2.GetSemanticContext()) +} + +// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup +func (c *ATNConfigComparator[T]) Hash1(o *ATNConfig) int { + + hash := 7 + hash = 31*hash + o.GetState().GetStateNumber() + hash = 31*hash + o.GetAlt() + hash = 31*hash + o.GetSemanticContext().Hash() + return hash +} + +// ATNAltConfigComparator is used as the comparator for mapping configs to Alt Bitsets +type ATNAltConfigComparator[T Collectable[T]] struct { +} + +// Equals2 is a custom comparator for ATNConfigs specifically for configLookup +func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool { + + // Same pointer, must be equal, even if both nil + // + if o1 == o2 { + return true + + } + + // If either are nil, but not both, then the result is false + // + if o1 == nil || o2 == nil { + return false + } + + return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() && + o1.GetContext().Equals(o2.GetContext()) +} + +// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup +func (c *ATNAltConfigComparator[T]) Hash1(o *ATNConfig) int { + h := murmurInit(7) + h = murmurUpdate(h, o.GetState().GetStateNumber()) + h = murmurUpdate(h, o.GetContext().Hash()) + return murmurFinish(h, 2) +} + +// BaseATNConfigComparator is used as the comparator for the configLookup field of a ATNConfigSet +// and has a custom Equals() and Hash() implementation, because equality is not based on the +// standard Hash() and Equals() methods of the ATNConfig type. +type BaseATNConfigComparator[T Collectable[T]] struct { +} + +// Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet +func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool { + + // Same pointer, must be equal, even if both nil + // + if o1 == o2 { + return true + + } + + // If either are nil, but not both, then the result is false + // + if o1 == nil || o2 == nil { + return false + } + + return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() && + o1.GetAlt() == o2.GetAlt() && + o1.GetSemanticContext().Equals(o2.GetSemanticContext()) +} + +// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just +// delegates to the standard Hash() method of the ATNConfig type. +func (c *BaseATNConfigComparator[T]) Hash1(o *ATNConfig) int { + return o.Hash() +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/configuration.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/configuration.go new file mode 100644 index 0000000000..c2b724514d --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/configuration.go @@ -0,0 +1,214 @@ +package antlr + +type runtimeConfiguration struct { + statsTraceStacks bool + lexerATNSimulatorDebug bool + lexerATNSimulatorDFADebug bool + parserATNSimulatorDebug bool + parserATNSimulatorTraceATNSim bool + parserATNSimulatorDFADebug bool + parserATNSimulatorRetryDebug bool + lRLoopEntryBranchOpt bool + memoryManager bool +} + +// Global runtime configuration +var runtimeConfig = runtimeConfiguration{ + lRLoopEntryBranchOpt: true, +} + +type runtimeOption func(*runtimeConfiguration) error + +// ConfigureRuntime allows the runtime to be configured globally setting things like trace and statistics options. +// It uses the functional options pattern for go. This is a package global function as it operates on the runtime +// configuration regardless of the instantiation of anything higher up such as a parser or lexer. Generally this is +// used for debugging/tracing/statistics options, which are usually used by the runtime maintainers (or rather the +// only maintainer). However, it is possible that you might want to use this to set a global option concerning the +// memory allocation type used by the runtime such as sync.Pool or not. +// +// The options are applied in the order they are passed in, so the last option will override any previous options. +// +// For example, if you want to turn on the collection create point stack flag to true, you can do: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true)) +// +// If you want to turn it off, you can do: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false)) +func ConfigureRuntime(options ...runtimeOption) error { + for _, option := range options { + err := option(&runtimeConfig) + if err != nil { + return err + } + } + return nil +} + +// WithStatsTraceStacks sets the global flag indicating whether to collect stack traces at the create-point of +// certain structs, such as collections, or the use point of certain methods such as Put(). +// Because this can be expensive, it is turned off by default. However, it +// can be useful to track down exactly where memory is being created and used. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false)) +func WithStatsTraceStacks(trace bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.statsTraceStacks = trace + return nil + } +} + +// WithLexerATNSimulatorDebug sets the global flag indicating whether to log debug information from the lexer [ATN] +// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(false)) +func WithLexerATNSimulatorDebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.lexerATNSimulatorDebug = debug + return nil + } +} + +// WithLexerATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the lexer [ATN] [DFA] +// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(false)) +func WithLexerATNSimulatorDFADebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.lexerATNSimulatorDFADebug = debug + return nil + } +} + +// WithParserATNSimulatorDebug sets the global flag indicating whether to log debug information from the parser [ATN] +// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(false)) +func WithParserATNSimulatorDebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorDebug = debug + return nil + } +} + +// WithParserATNSimulatorTraceATNSim sets the global flag indicating whether to log trace information from the parser [ATN] simulator +// [DFA]. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(false)) +func WithParserATNSimulatorTraceATNSim(trace bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorTraceATNSim = trace + return nil + } +} + +// WithParserATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA] +// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(false)) +func WithParserATNSimulatorDFADebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorDFADebug = debug + return nil + } +} + +// WithParserATNSimulatorRetryDebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA] +// simulator when retrying a decision. This is useful for debugging parser issues by comparing the output with the Java runtime. +// Only useful to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(false)) +func WithParserATNSimulatorRetryDebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorRetryDebug = debug + return nil + } +} + +// WithLRLoopEntryBranchOpt sets the global flag indicating whether let recursive loop operations should be +// optimized or not. This is useful for debugging parser issues by comparing the output with the Java runtime. +// It turns off the functionality of [canDropLoopEntryEdgeInLeftRecursiveRule] in [ParserATNSimulator]. +// +// Note that default is to use this optimization. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(false)) +func WithLRLoopEntryBranchOpt(off bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.lRLoopEntryBranchOpt = off + return nil + } +} + +// WithMemoryManager sets the global flag indicating whether to use the memory manager or not. This is useful +// for poorly constructed grammars that create a lot of garbage. It turns on the functionality of [memoryManager], which +// will intercept garbage collection and cause available memory to be reused. At the end of the day, this is no substitute +// for fixing your grammar by ridding yourself of extreme ambiguity. BUt if you are just trying to reuse an opensource +// grammar, this may help make it more practical. +// +// Note that default is to use normal Go memory allocation and not pool memory. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithMemoryManager(true)) +// +// Note that if you turn this on, you should probably leave it on. You should use only one memory strategy or the other +// and should remember to nil out any references to the parser or lexer when you are done with them. +func WithMemoryManager(use bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.memoryManager = use + return nil + } +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/dfa.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/dfa.go new file mode 100644 index 0000000000..6b63eb1589 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/dfa.go @@ -0,0 +1,175 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// DFA represents the Deterministic Finite Automaton used by the recognizer, including all the states it can +// reach and the transitions between them. +type DFA struct { + // atnStartState is the ATN state in which this was created + atnStartState DecisionState + + decision int + + // states is all the DFA states. Use Map to get the old state back; Set can only + // indicate whether it is there. Go maps implement key hash collisions and so on and are very + // good, but the DFAState is an object and can't be used directly as the key as it can in say Java + // amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them + // to see if they really are the same object. Hence, we have our own map storage. + // + states *JStore[*DFAState, *ObjEqComparator[*DFAState]] + + numstates int + + s0 *DFAState + + // precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa. + // True if the DFA is for a precedence decision and false otherwise. + precedenceDfa bool +} + +func NewDFA(atnStartState DecisionState, decision int) *DFA { + dfa := &DFA{ + atnStartState: atnStartState, + decision: decision, + states: nil, // Lazy initialize + } + if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision { + dfa.precedenceDfa = true + dfa.s0 = NewDFAState(-1, NewATNConfigSet(false)) + dfa.s0.isAcceptState = false + dfa.s0.requiresFullContext = false + } + return dfa +} + +// getPrecedenceStartState gets the start state for the current precedence and +// returns the start state corresponding to the specified precedence if a start +// state exists for the specified precedence and nil otherwise. d must be a +// precedence DFA. See also isPrecedenceDfa. +func (d *DFA) getPrecedenceStartState(precedence int) *DFAState { + if !d.getPrecedenceDfa() { + panic("only precedence DFAs may contain a precedence start state") + } + + // s0.edges is never nil for a precedence DFA + if precedence < 0 || precedence >= len(d.getS0().getEdges()) { + return nil + } + + return d.getS0().getIthEdge(precedence) +} + +// setPrecedenceStartState sets the start state for the current precedence. d +// must be a precedence DFA. See also isPrecedenceDfa. +func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) { + if !d.getPrecedenceDfa() { + panic("only precedence DFAs may contain a precedence start state") + } + + if precedence < 0 { + return + } + + // Synchronization on s0 here is ok. When the DFA is turned into a + // precedence DFA, s0 will be initialized once and not updated again. s0.edges + // is never nil for a precedence DFA. + s0 := d.getS0() + if precedence >= s0.numEdges() { + edges := append(s0.getEdges(), make([]*DFAState, precedence+1-s0.numEdges())...) + s0.setEdges(edges) + d.setS0(s0) + } + + s0.setIthEdge(precedence, startState) +} + +func (d *DFA) getPrecedenceDfa() bool { + return d.precedenceDfa +} + +// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs +// from the current DFA configuration, then d.states is cleared, the initial +// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to +// store the start states for individual precedence values if precedenceDfa is +// true or nil otherwise, and d.precedenceDfa is updated. +func (d *DFA) setPrecedenceDfa(precedenceDfa bool) { + if d.getPrecedenceDfa() != precedenceDfa { + d.states = nil // Lazy initialize + d.numstates = 0 + + if precedenceDfa { + precedenceState := NewDFAState(-1, NewATNConfigSet(false)) + precedenceState.setEdges(make([]*DFAState, 0)) + precedenceState.isAcceptState = false + precedenceState.requiresFullContext = false + d.setS0(precedenceState) + } else { + d.setS0(nil) + } + + d.precedenceDfa = precedenceDfa + } +} + +// Len returns the number of states in d. We use this instead of accessing states directly so that we can implement lazy +// instantiation of the states JMap. +func (d *DFA) Len() int { + if d.states == nil { + return 0 + } + return d.states.Len() +} + +// Get returns a state that matches s if it is present in the DFA state set. We defer to this +// function instead of accessing states directly so that we can implement lazy instantiation of the states JMap. +func (d *DFA) Get(s *DFAState) (*DFAState, bool) { + if d.states == nil { + return nil, false + } + return d.states.Get(s) +} + +func (d *DFA) Put(s *DFAState) (*DFAState, bool) { + if d.states == nil { + d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst, DFAStateCollection, "DFA via DFA.Put") + } + return d.states.Put(s) +} + +func (d *DFA) getS0() *DFAState { + return d.s0 +} + +func (d *DFA) setS0(s *DFAState) { + d.s0 = s +} + +// sortedStates returns the states in d sorted by their state number, or an empty set if d.states is nil. +func (d *DFA) sortedStates() []*DFAState { + if d.states == nil { + return []*DFAState{} + } + vs := d.states.SortedSlice(func(i, j *DFAState) bool { + return i.stateNumber < j.stateNumber + }) + + return vs +} + +func (d *DFA) String(literalNames []string, symbolicNames []string) string { + if d.getS0() == nil { + return "" + } + + return NewDFASerializer(d, literalNames, symbolicNames).String() +} + +func (d *DFA) ToLexerString() string { + if d.getS0() == nil { + return "" + } + + return NewLexerDFASerializer(d).String() +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go new file mode 100644 index 0000000000..0e11009899 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go @@ -0,0 +1,158 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +// DFASerializer is a DFA walker that knows how to dump the DFA states to serialized +// strings. +type DFASerializer struct { + dfa *DFA + literalNames []string + symbolicNames []string +} + +func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer { + if literalNames == nil { + literalNames = make([]string, 0) + } + + if symbolicNames == nil { + symbolicNames = make([]string, 0) + } + + return &DFASerializer{ + dfa: dfa, + literalNames: literalNames, + symbolicNames: symbolicNames, + } +} + +func (d *DFASerializer) String() string { + if d.dfa.getS0() == nil { + return "" + } + + buf := "" + states := d.dfa.sortedStates() + + for _, s := range states { + if s.edges != nil { + n := len(s.edges) + + for j := 0; j < n; j++ { + t := s.edges[j] + + if t != nil && t.stateNumber != 0x7FFFFFFF { + buf += d.GetStateString(s) + buf += "-" + buf += d.getEdgeLabel(j) + buf += "->" + buf += d.GetStateString(t) + buf += "\n" + } + } + } + } + + if len(buf) == 0 { + return "" + } + + return buf +} + +func (d *DFASerializer) getEdgeLabel(i int) string { + if i == 0 { + return "EOF" + } else if d.literalNames != nil && i-1 < len(d.literalNames) { + return d.literalNames[i-1] + } else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) { + return d.symbolicNames[i-1] + } + + return strconv.Itoa(i - 1) +} + +func (d *DFASerializer) GetStateString(s *DFAState) string { + var a, b string + + if s.isAcceptState { + a = ":" + } + + if s.requiresFullContext { + b = "^" + } + + baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b + + if s.isAcceptState { + if s.predicates != nil { + return baseStateStr + "=>" + fmt.Sprint(s.predicates) + } + + return baseStateStr + "=>" + fmt.Sprint(s.prediction) + } + + return baseStateStr +} + +type LexerDFASerializer struct { + *DFASerializer +} + +func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer { + return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)} +} + +func (l *LexerDFASerializer) getEdgeLabel(i int) string { + var sb strings.Builder + sb.Grow(6) + sb.WriteByte('\'') + sb.WriteRune(rune(i)) + sb.WriteByte('\'') + return sb.String() +} + +func (l *LexerDFASerializer) String() string { + if l.dfa.getS0() == nil { + return "" + } + + buf := "" + states := l.dfa.sortedStates() + + for i := 0; i < len(states); i++ { + s := states[i] + + if s.edges != nil { + n := len(s.edges) + + for j := 0; j < n; j++ { + t := s.edges[j] + + if t != nil && t.stateNumber != 0x7FFFFFFF { + buf += l.GetStateString(s) + buf += "-" + buf += l.getEdgeLabel(j) + buf += "->" + buf += l.GetStateString(t) + buf += "\n" + } + } + } + } + + if len(buf) == 0 { + return "" + } + + return buf +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go new file mode 100644 index 0000000000..6541430745 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go @@ -0,0 +1,170 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +// PredPrediction maps a predicate to a predicted alternative. +type PredPrediction struct { + alt int + pred SemanticContext +} + +func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction { + return &PredPrediction{alt: alt, pred: pred} +} + +func (p *PredPrediction) String() string { + return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")" +} + +// DFAState represents a set of possible [ATN] configurations. As Aho, Sethi, +// Ullman p. 117 says: "The DFA uses its state to keep track of all possible +// states the ATN can be in after reading each input symbol. That is to say, +// after reading input a1, a2,..an, the DFA is in a state that represents the +// subset T of the states of the ATN that are reachable from the ATN's start +// state along some path labeled a1a2..an." +// +// In conventional NFA-to-DFA conversion, therefore, the subset T would be a bitset representing the set of +// states the [ATN] could be in. We need to track the alt predicted by each state +// as well, however. More importantly, we need to maintain a stack of states, +// tracking the closure operations as they jump from rule to rule, emulating +// rule invocations (method calls). I have to add a stack to simulate the proper +// lookahead sequences for the underlying LL grammar from which the ATN was +// derived. +// +// I use a set of [ATNConfig] objects, not simple states. An [ATNConfig] is both a +// state (ala normal conversion) and a [RuleContext] describing the chain of rules +// (if any) followed to arrive at that state. +// +// A [DFAState] may have multiple references to a particular state, but with +// different [ATN] contexts (with same or different alts) meaning that state was +// reached via a different set of rule invocations. +type DFAState struct { + stateNumber int + configs *ATNConfigSet + + // edges elements point to the target of the symbol. Shift up by 1 so (-1) + // Token.EOF maps to the first element. + edges []*DFAState + + isAcceptState bool + + // prediction is the 'ttype' we match or alt we predict if the state is 'accept'. + // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or + // requiresFullContext. + prediction int + + lexerActionExecutor *LexerActionExecutor + + // requiresFullContext indicates it was created during an SLL prediction that + // discovered a conflict between the configurations in the state. Future + // ParserATNSimulator.execATN invocations immediately jump doing + // full context prediction if true. + requiresFullContext bool + + // predicates is the predicates associated with the ATN configurations of the + // DFA state during SLL parsing. When we have predicates, requiresFullContext + // is false, since full context prediction evaluates predicates on-the-fly. If + // d is + // not nil, then prediction is ATN.INVALID_ALT_NUMBER. + // + // We only use these for non-requiresFullContext but conflicting states. That + // means we know from the context (it's $ or we don't dip into outer context) + // that it's an ambiguity not a conflict. + // + // This list is computed by + // ParserATNSimulator.predicateDFAState. + predicates []*PredPrediction +} + +func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState { + if configs == nil { + configs = NewATNConfigSet(false) + } + + return &DFAState{configs: configs, stateNumber: stateNumber} +} + +// GetAltSet gets the set of all alts mentioned by all ATN configurations in d. +func (d *DFAState) GetAltSet() []int { + var alts []int + + if d.configs != nil { + for _, c := range d.configs.configs { + alts = append(alts, c.GetAlt()) + } + } + + if len(alts) == 0 { + return nil + } + + return alts +} + +func (d *DFAState) getEdges() []*DFAState { + return d.edges +} + +func (d *DFAState) numEdges() int { + return len(d.edges) +} + +func (d *DFAState) getIthEdge(i int) *DFAState { + return d.edges[i] +} + +func (d *DFAState) setEdges(newEdges []*DFAState) { + d.edges = newEdges +} + +func (d *DFAState) setIthEdge(i int, edge *DFAState) { + d.edges[i] = edge +} + +func (d *DFAState) setPrediction(v int) { + d.prediction = v +} + +func (d *DFAState) String() string { + var s string + if d.isAcceptState { + if d.predicates != nil { + s = "=>" + fmt.Sprint(d.predicates) + } else { + s = "=>" + fmt.Sprint(d.prediction) + } + } + + return fmt.Sprintf("%d:%s%s", d.stateNumber, fmt.Sprint(d.configs), s) +} + +func (d *DFAState) Hash() int { + h := murmurInit(7) + h = murmurUpdate(h, d.configs.Hash()) + return murmurFinish(h, 1) +} + +// Equals returns whether d equals other. Two DFAStates are equal if their ATN +// configuration sets are the same. This method is used to see if a state +// already exists. +// +// Because the number of alternatives and number of ATN configurations are +// finite, there is a finite number of DFA states that can be processed. This is +// necessary to show that the algorithm terminates. +// +// Cannot test the DFA state numbers here because in +// ParserATNSimulator.addDFAState we need to know if any other state exists that +// has d exact set of ATN configurations. The stateNumber is irrelevant. +func (d *DFAState) Equals(o Collectable[*DFAState]) bool { + if d == o { + return true + } + + return d.configs.Equals(o.(*DFAState).configs) +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go new file mode 100644 index 0000000000..bd2cd8bc3a --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go @@ -0,0 +1,110 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" +) + +// +// This implementation of {@link ANTLRErrorListener} can be used to identify +// certain potential correctness and performance problems in grammars. "reports" +// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate +// message. +// +// + +type DiagnosticErrorListener struct { + *DefaultErrorListener + + exactOnly bool +} + +//goland:noinspection GoUnusedExportedFunction +func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener { + + n := new(DiagnosticErrorListener) + + // whether all ambiguities or only exact ambiguities are Reported. + n.exactOnly = exactOnly + return n +} + +func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { + if d.exactOnly && !exact { + return + } + msg := "reportAmbiguity d=" + + d.getDecisionDescription(recognizer, dfa) + + ": ambigAlts=" + + d.getConflictingAlts(ambigAlts, configs).String() + + ", input='" + + recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" + recognizer.NotifyErrorListeners(msg, nil, nil) +} + +func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, _ *BitSet, _ *ATNConfigSet) { + + msg := "reportAttemptingFullContext d=" + + d.getDecisionDescription(recognizer, dfa) + + ", input='" + + recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" + recognizer.NotifyErrorListeners(msg, nil, nil) +} + +func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, _ int, _ *ATNConfigSet) { + msg := "reportContextSensitivity d=" + + d.getDecisionDescription(recognizer, dfa) + + ", input='" + + recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" + recognizer.NotifyErrorListeners(msg, nil, nil) +} + +func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string { + decision := dfa.decision + ruleIndex := dfa.atnStartState.GetRuleIndex() + + ruleNames := recognizer.GetRuleNames() + if ruleIndex < 0 || ruleIndex >= len(ruleNames) { + return strconv.Itoa(decision) + } + ruleName := ruleNames[ruleIndex] + if ruleName == "" { + return strconv.Itoa(decision) + } + return strconv.Itoa(decision) + " (" + ruleName + ")" +} + +// Computes the set of conflicting or ambiguous alternatives from a +// configuration set, if that information was not already provided by the +// parser. +// +// @param ReportedAlts The set of conflicting or ambiguous alternatives, as +// Reported by the parser. +// @param configs The conflicting or ambiguous configuration set. +// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise +// returns the set of alternatives represented in {@code configs}. +func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set *ATNConfigSet) *BitSet { + if ReportedAlts != nil { + return ReportedAlts + } + result := NewBitSet() + for _, c := range set.configs { + result.add(c.GetAlt()) + } + + return result +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/error_listener.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/error_listener.go new file mode 100644 index 0000000000..21a0216434 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/error_listener.go @@ -0,0 +1,100 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "os" + "strconv" +) + +// Provides an empty default implementation of {@link ANTLRErrorListener}. The +// default implementation of each method does nothing, but can be overridden as +// necessary. + +type ErrorListener interface { + SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) + ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) + ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) + ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) +} + +type DefaultErrorListener struct { +} + +//goland:noinspection GoUnusedExportedFunction +func NewDefaultErrorListener() *DefaultErrorListener { + return new(DefaultErrorListener) +} + +func (d *DefaultErrorListener) SyntaxError(_ Recognizer, _ interface{}, _, _ int, _ string, _ RecognitionException) { +} + +func (d *DefaultErrorListener) ReportAmbiguity(_ Parser, _ *DFA, _, _ int, _ bool, _ *BitSet, _ *ATNConfigSet) { +} + +func (d *DefaultErrorListener) ReportAttemptingFullContext(_ Parser, _ *DFA, _, _ int, _ *BitSet, _ *ATNConfigSet) { +} + +func (d *DefaultErrorListener) ReportContextSensitivity(_ Parser, _ *DFA, _, _, _ int, _ *ATNConfigSet) { +} + +type ConsoleErrorListener struct { + *DefaultErrorListener +} + +func NewConsoleErrorListener() *ConsoleErrorListener { + return new(ConsoleErrorListener) +} + +// ConsoleErrorListenerINSTANCE provides a default instance of {@link ConsoleErrorListener}. +var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener() + +// SyntaxError prints messages to System.err containing the +// values of line, charPositionInLine, and msg using +// the following format: +// +// line : +func (c *ConsoleErrorListener) SyntaxError(_ Recognizer, _ interface{}, line, column int, msg string, _ RecognitionException) { + _, _ = fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) +} + +type ProxyErrorListener struct { + *DefaultErrorListener + delegates []ErrorListener +} + +func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener { + if delegates == nil { + panic("delegates is not provided") + } + l := new(ProxyErrorListener) + l.delegates = delegates + return l +} + +func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { + for _, d := range p.delegates { + d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e) + } +} + +func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { + for _, d := range p.delegates { + d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) + } +} + +func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) { + for _, d := range p.delegates { + d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) + } +} + +func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) { + for _, d := range p.delegates { + d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) + } +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go new file mode 100644 index 0000000000..9db2be1c74 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go @@ -0,0 +1,702 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "reflect" + "strconv" + "strings" +) + +type ErrorStrategy interface { + reset(Parser) + RecoverInline(Parser) Token + Recover(Parser, RecognitionException) + Sync(Parser) + InErrorRecoveryMode(Parser) bool + ReportError(Parser, RecognitionException) + ReportMatch(Parser) +} + +// DefaultErrorStrategy is the default implementation of ANTLRErrorStrategy used for +// error reporting and recovery in ANTLR parsers. +type DefaultErrorStrategy struct { + errorRecoveryMode bool + lastErrorIndex int + lastErrorStates *IntervalSet +} + +var _ ErrorStrategy = &DefaultErrorStrategy{} + +func NewDefaultErrorStrategy() *DefaultErrorStrategy { + + d := new(DefaultErrorStrategy) + + // Indicates whether the error strategy is currently "recovering from an + // error". This is used to suppress Reporting multiple error messages while + // attempting to recover from a detected syntax error. + // + // @see //InErrorRecoveryMode + // + d.errorRecoveryMode = false + + // The index into the input stream where the last error occurred. + // This is used to prevent infinite loops where an error is found + // but no token is consumed during recovery...another error is found, + // ad nauseam. This is a failsafe mechanism to guarantee that at least + // one token/tree node is consumed for two errors. + // + d.lastErrorIndex = -1 + d.lastErrorStates = nil + return d +} + +//

The default implementation simply calls {@link //endErrorCondition} to +// ensure that the handler is not in error recovery mode.

+func (d *DefaultErrorStrategy) reset(recognizer Parser) { + d.endErrorCondition(recognizer) +} + +// This method is called to enter error recovery mode when a recognition +// exception is Reported. +func (d *DefaultErrorStrategy) beginErrorCondition(_ Parser) { + d.errorRecoveryMode = true +} + +func (d *DefaultErrorStrategy) InErrorRecoveryMode(_ Parser) bool { + return d.errorRecoveryMode +} + +// This method is called to leave error recovery mode after recovering from +// a recognition exception. +func (d *DefaultErrorStrategy) endErrorCondition(_ Parser) { + d.errorRecoveryMode = false + d.lastErrorStates = nil + d.lastErrorIndex = -1 +} + +// ReportMatch is the default implementation of error matching and simply calls endErrorCondition. +func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { + d.endErrorCondition(recognizer) +} + +// ReportError is the default implementation of error reporting. +// It returns immediately if the handler is already +// in error recovery mode. Otherwise, it calls [beginErrorCondition] +// and dispatches the Reporting task based on the runtime type of e +// according to the following table. +// +// [NoViableAltException] : Dispatches the call to [ReportNoViableAlternative] +// [InputMisMatchException] : Dispatches the call to [ReportInputMisMatch] +// [FailedPredicateException] : Dispatches the call to [ReportFailedPredicate] +// All other types : Calls [NotifyErrorListeners] to Report the exception +func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) { + // if we've already Reported an error and have not Matched a token + // yet successfully, don't Report any errors. + if d.InErrorRecoveryMode(recognizer) { + return // don't Report spurious errors + } + d.beginErrorCondition(recognizer) + + switch t := e.(type) { + default: + fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name()) + // fmt.Println(e.stack) + recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e) + case *NoViableAltException: + d.ReportNoViableAlternative(recognizer, t) + case *InputMisMatchException: + d.ReportInputMisMatch(recognizer, t) + case *FailedPredicateException: + d.ReportFailedPredicate(recognizer, t) + } +} + +// Recover is the default recovery implementation. +// It reSynchronizes the parser by consuming tokens until we find one in the reSynchronization set - +// loosely the set of tokens that can follow the current rule. +func (d *DefaultErrorStrategy) Recover(recognizer Parser, _ RecognitionException) { + + if d.lastErrorIndex == recognizer.GetInputStream().Index() && + d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) { + // uh oh, another error at same token index and previously-Visited + // state in ATN must be a case where LT(1) is in the recovery + // token set so nothing got consumed. Consume a single token + // at least to prevent an infinite loop d is a failsafe. + recognizer.Consume() + } + d.lastErrorIndex = recognizer.GetInputStream().Index() + if d.lastErrorStates == nil { + d.lastErrorStates = NewIntervalSet() + } + d.lastErrorStates.addOne(recognizer.GetState()) + followSet := d.GetErrorRecoverySet(recognizer) + d.consumeUntil(recognizer, followSet) +} + +// Sync is the default implementation of error strategy synchronization. +// +// This Sync makes sure that the current lookahead symbol is consistent with what were expecting +// at this point in the [ATN]. You can call this anytime but ANTLR only +// generates code to check before sub-rules/loops and each iteration. +// +// Implements [Jim Idle]'s magic Sync mechanism in closures and optional +// sub-rules. E.g.: +// +// a : Sync ( stuff Sync )* +// Sync : {consume to what can follow Sync} +// +// At the start of a sub-rule upon error, Sync performs single +// token deletion, if possible. If it can't do that, it bails on the current +// rule and uses the default error recovery, which consumes until the +// reSynchronization set of the current rule. +// +// If the sub-rule is optional +// +// ({@code (...)?}, {@code (...)*}, +// +// or a block with an empty alternative), then the expected set includes what follows +// the sub-rule. +// +// During loop iteration, it consumes until it sees a token that can start a +// sub-rule or what follows loop. Yes, that is pretty aggressive. We opt to +// stay in the loop as long as possible. +// +// # Origins +// +// Previous versions of ANTLR did a poor job of their recovery within loops. +// A single mismatch token or missing token would force the parser to bail +// out of the entire rules surrounding the loop. So, for rule: +// +// classfunc : 'class' ID '{' member* '}' +// +// input with an extra token between members would force the parser to +// consume until it found the next class definition rather than the next +// member definition of the current class. +// +// This functionality cost a bit of effort because the parser has to +// compare the token set at the start of the loop and at each iteration. If for +// some reason speed is suffering for you, you can turn off this +// functionality by simply overriding this method as empty: +// +// { } +// +// [Jim Idle]: https://github.com/jimidle +func (d *DefaultErrorStrategy) Sync(recognizer Parser) { + // If already recovering, don't try to Sync + if d.InErrorRecoveryMode(recognizer) { + return + } + + s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] + la := recognizer.GetTokenStream().LA(1) + + // try cheaper subset first might get lucky. seems to shave a wee bit off + nextTokens := recognizer.GetATN().NextTokens(s, nil) + if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) { + return + } + + switch s.GetStateType() { + case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry: + // Report error and recover if possible + if d.SingleTokenDeletion(recognizer) != nil { + return + } + recognizer.SetError(NewInputMisMatchException(recognizer)) + case ATNStatePlusLoopBack, ATNStateStarLoopBack: + d.ReportUnwantedToken(recognizer) + expecting := NewIntervalSet() + expecting.addSet(recognizer.GetExpectedTokens()) + whatFollowsLoopIterationOrRule := expecting.addSet(d.GetErrorRecoverySet(recognizer)) + d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) + default: + // do nothing if we can't identify the exact kind of ATN state + } +} + +// ReportNoViableAlternative is called by [ReportError] when the exception is a [NoViableAltException]. +// +// See also [ReportError] +func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) { + tokens := recognizer.GetTokenStream() + var input string + if tokens != nil { + if e.startToken.GetTokenType() == TokenEOF { + input = "" + } else { + input = tokens.GetTextFromTokens(e.startToken, e.offendingToken) + } + } else { + input = "" + } + msg := "no viable alternative at input " + d.escapeWSAndQuote(input) + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// ReportInputMisMatch is called by [ReportError] when the exception is an [InputMisMatchException] +// +// See also: [ReportError] +func (d *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) { + msg := "mismatched input " + d.GetTokenErrorDisplay(e.offendingToken) + + " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// ReportFailedPredicate is called by [ReportError] when the exception is a [FailedPredicateException]. +// +// See also: [ReportError] +func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) { + ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()] + msg := "rule " + ruleName + " " + e.message + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// ReportUnwantedToken is called to report a syntax error that requires the removal +// of a token from the input stream. At the time d method is called, the +// erroneous symbol is the current LT(1) symbol and has not yet been +// removed from the input stream. When this method returns, +// recognizer is in error recovery mode. +// +// This method is called when singleTokenDeletion identifies +// single-token deletion as a viable recovery strategy for a mismatched +// input error. +// +// The default implementation simply returns if the handler is already in +// error recovery mode. Otherwise, it calls beginErrorCondition to +// enter error recovery mode, followed by calling +// [NotifyErrorListeners] +func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { + if d.InErrorRecoveryMode(recognizer) { + return + } + d.beginErrorCondition(recognizer) + t := recognizer.GetCurrentToken() + tokenName := d.GetTokenErrorDisplay(t) + expecting := d.GetExpectedTokens(recognizer) + msg := "extraneous input " + tokenName + " expecting " + + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + recognizer.NotifyErrorListeners(msg, t, nil) +} + +// ReportMissingToken is called to report a syntax error which requires the +// insertion of a missing token into the input stream. At the time this +// method is called, the missing token has not yet been inserted. When this +// method returns, recognizer is in error recovery mode. +// +// This method is called when singleTokenInsertion identifies +// single-token insertion as a viable recovery strategy for a mismatched +// input error. +// +// The default implementation simply returns if the handler is already in +// error recovery mode. Otherwise, it calls beginErrorCondition to +// enter error recovery mode, followed by calling [NotifyErrorListeners] +func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { + if d.InErrorRecoveryMode(recognizer) { + return + } + d.beginErrorCondition(recognizer) + t := recognizer.GetCurrentToken() + expecting := d.GetExpectedTokens(recognizer) + msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + + " at " + d.GetTokenErrorDisplay(t) + recognizer.NotifyErrorListeners(msg, t, nil) +} + +// The RecoverInline default implementation attempts to recover from the mismatched input +// by using single token insertion and deletion as described below. If the +// recovery attempt fails, this method panics with [InputMisMatchException}. +// TODO: Not sure that panic() is the right thing to do here - JI +// +// # EXTRA TOKEN (single token deletion) +// +// LA(1) is not what we are looking for. If LA(2) has the +// right token, however, then assume LA(1) is some extra spurious +// token and delete it. Then consume and return the next token (which was +// the LA(2) token) as the successful result of the Match operation. +// +// # This recovery strategy is implemented by singleTokenDeletion +// +// # MISSING TOKEN (single token insertion) +// +// If current token -at LA(1) - is consistent with what could come +// after the expected LA(1) token, then assume the token is missing +// and use the parser's [TokenFactory] to create it on the fly. The +// “insertion” is performed by returning the created token as the successful +// result of the Match operation. +// +// This recovery strategy is implemented by [SingleTokenInsertion]. +// +// # Example +// +// For example, Input i=(3 is clearly missing the ')'. When +// the parser returns from the nested call to expr, it will have +// call the chain: +// +// stat → expr → atom +// +// and it will be trying to Match the ')' at this point in the +// derivation: +// +// : ID '=' '(' INT ')' ('+' atom)* ';' +// ^ +// +// The attempt to [Match] ')' will fail when it sees ';' and +// call [RecoverInline]. To recover, it sees that LA(1)==';' +// is in the set of tokens that can follow the ')' token reference +// in rule atom. It can assume that you forgot the ')'. +func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { + // SINGLE TOKEN DELETION + MatchedSymbol := d.SingleTokenDeletion(recognizer) + if MatchedSymbol != nil { + // we have deleted the extra token. + // now, move past ttype token as if all were ok + recognizer.Consume() + return MatchedSymbol + } + // SINGLE TOKEN INSERTION + if d.SingleTokenInsertion(recognizer) { + return d.GetMissingSymbol(recognizer) + } + // even that didn't work must panic the exception + recognizer.SetError(NewInputMisMatchException(recognizer)) + return nil +} + +// SingleTokenInsertion implements the single-token insertion inline error recovery +// strategy. It is called by [RecoverInline] if the single-token +// deletion strategy fails to recover from the mismatched input. If this +// method returns {@code true}, {@code recognizer} will be in error recovery +// mode. +// +// This method determines whether single-token insertion is viable by +// checking if the LA(1) input symbol could be successfully Matched +// if it were instead the LA(2) symbol. If this method returns +// {@code true}, the caller is responsible for creating and inserting a +// token with the correct type to produce this behavior.

+// +// This func returns true if single-token insertion is a viable recovery +// strategy for the current mismatched input. +func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { + currentSymbolType := recognizer.GetTokenStream().LA(1) + // if current token is consistent with what could come after current + // ATN state, then we know we're missing a token error recovery + // is free to conjure up and insert the missing token + atn := recognizer.GetInterpreter().atn + currentState := atn.states[recognizer.GetState()] + next := currentState.GetTransitions()[0].getTarget() + expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext()) + if expectingAtLL2.contains(currentSymbolType) { + d.ReportMissingToken(recognizer) + return true + } + + return false +} + +// SingleTokenDeletion implements the single-token deletion inline error recovery +// strategy. It is called by [RecoverInline] to attempt to recover +// from mismatched input. If this method returns nil, the parser and error +// handler state will not have changed. If this method returns non-nil, +// recognizer will not be in error recovery mode since the +// returned token was a successful Match. +// +// If the single-token deletion is successful, this method calls +// [ReportUnwantedToken] to Report the error, followed by +// [Consume] to actually “delete” the extraneous token. Then, +// before returning, [ReportMatch] is called to signal a successful +// Match. +// +// The func returns the successfully Matched [Token] instance if single-token +// deletion successfully recovers from the mismatched input, otherwise nil. +func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { + NextTokenType := recognizer.GetTokenStream().LA(2) + expecting := d.GetExpectedTokens(recognizer) + if expecting.contains(NextTokenType) { + d.ReportUnwantedToken(recognizer) + // print("recoverFromMisMatchedToken deleting " \ + // + str(recognizer.GetTokenStream().LT(1)) \ + // + " since " + str(recognizer.GetTokenStream().LT(2)) \ + // + " is what we want", file=sys.stderr) + recognizer.Consume() // simply delete extra token + // we want to return the token we're actually Matching + MatchedSymbol := recognizer.GetCurrentToken() + d.ReportMatch(recognizer) // we know current token is correct + return MatchedSymbol + } + + return nil +} + +// GetMissingSymbol conjures up a missing token during error recovery. +// +// The recognizer attempts to recover from single missing +// symbols. But, actions might refer to that missing symbol. +// For example: +// +// x=ID {f($x)}. +// +// The action clearly assumes +// that there has been an identifier Matched previously and that +// $x points at that token. If that token is missing, but +// the next token in the stream is what we want we assume that +// this token is missing, and we keep going. Because we +// have to return some token to replace the missing token, +// we have to conjure one up. This method gives the user control +// over the tokens returned for missing tokens. Mostly, +// you will want to create something special for identifier +// tokens. For literals such as '{' and ',', the default +// action in the parser or tree parser works. It simply creates +// a [CommonToken] of the appropriate type. The text will be the token name. +// If you need to change which tokens must be created by the lexer, +// override this method to create the appropriate tokens. +func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { + currentSymbol := recognizer.GetCurrentToken() + expecting := d.GetExpectedTokens(recognizer) + expectedTokenType := expecting.first() + var tokenText string + + if expectedTokenType == TokenEOF { + tokenText = "" + } else { + ln := recognizer.GetLiteralNames() + if expectedTokenType > 0 && expectedTokenType < len(ln) { + tokenText = "" + } else { + tokenText = "" // TODO: matches the JS impl + } + } + current := currentSymbol + lookback := recognizer.GetTokenStream().LT(-1) + if current.GetTokenType() == TokenEOF && lookback != nil { + current = lookback + } + + tf := recognizer.GetTokenFactory() + + return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn()) +} + +func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet { + return recognizer.GetExpectedTokens() +} + +// GetTokenErrorDisplay determines how a token should be displayed in an error message. +// The default is to display just the text, but during development you might +// want to have a lot of information spit out. Override this func in that case +// to use t.String() (which, for [CommonToken], dumps everything about +// the token). This is better than forcing you to override a method in +// your token objects because you don't have to go modify your lexer +// so that it creates a new type. +func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string { + if t == nil { + return "" + } + s := t.GetText() + if s == "" { + if t.GetTokenType() == TokenEOF { + s = "" + } else { + s = "<" + strconv.Itoa(t.GetTokenType()) + ">" + } + } + return d.escapeWSAndQuote(s) +} + +func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + return "'" + s + "'" +} + +// GetErrorRecoverySet computes the error recovery set for the current rule. During +// rule invocation, the parser pushes the set of tokens that can +// follow that rule reference on the stack. This amounts to +// computing FIRST of what follows the rule reference in the +// enclosing rule. See LinearApproximator.FIRST(). +// +// This local follow set only includes tokens +// from within the rule i.e., the FIRST computation done by +// ANTLR stops at the end of a rule. +// +// # Example +// +// When you find a "no viable alt exception", the input is not +// consistent with any of the alternatives for rule r. The best +// thing to do is to consume tokens until you see something that +// can legally follow a call to r or any rule that called r. +// You don't want the exact set of viable next tokens because the +// input might just be missing a token--you might consume the +// rest of the input looking for one of the missing tokens. +// +// Consider the grammar: +// +// a : '[' b ']' +// | '(' b ')' +// ; +// +// b : c '^' INT +// ; +// +// c : ID +// | INT +// ; +// +// At each rule invocation, the set of tokens that could follow +// that rule is pushed on a stack. Here are the various +// context-sensitive follow sets: +// +// FOLLOW(b1_in_a) = FIRST(']') = ']' +// FOLLOW(b2_in_a) = FIRST(')') = ')' +// FOLLOW(c_in_b) = FIRST('^') = '^' +// +// Upon erroneous input “[]”, the call chain is +// +// a → b → c +// +// and, hence, the follow context stack is: +// +// Depth Follow set Start of rule execution +// 0 a (from main()) +// 1 ']' b +// 2 '^' c +// +// Notice that ')' is not included, because b would have to have +// been called from a different context in rule a for ')' to be +// included. +// +// For error recovery, we cannot consider FOLLOW(c) +// (context-sensitive or otherwise). We need the combined set of +// all context-sensitive FOLLOW sets - the set of all tokens that +// could follow any reference in the call chain. We need to +// reSync to one of those tokens. Note that FOLLOW(c)='^' and if +// we reSync'd to that token, we'd consume until EOF. We need to +// Sync to context-sensitive FOLLOWs for a, b, and c: +// +// {']','^'} +// +// In this case, for input "[]", LA(1) is ']' and in the set, so we would +// not consume anything. After printing an error, rule c would +// return normally. Rule b would not find the required '^' though. +// At this point, it gets a mismatched token error and panics an +// exception (since LA(1) is not in the viable following token +// set). The rule exception handler tries to recover, but finds +// the same recovery set and doesn't consume anything. Rule b +// exits normally returning to rule a. Now it finds the ']' (and +// with the successful Match exits errorRecovery mode). +// +// So, you can see that the parser walks up the call chain looking +// for the token that was a member of the recovery set. +// +// Errors are not generated in errorRecovery mode. +// +// ANTLR's error recovery mechanism is based upon original ideas: +// +// [Algorithms + Data Structures = Programs] by Niklaus Wirth and +// [A note on error recovery in recursive descent parsers]. +// +// Later, Josef Grosch had some good ideas in [Efficient and Comfortable Error Recovery in Recursive Descent +// Parsers] +// +// Like Grosch I implement context-sensitive FOLLOW sets that are combined at run-time upon error to avoid overhead +// during parsing. Later, the runtime Sync was improved for loops/sub-rules see [Sync] docs +// +// [A note on error recovery in recursive descent parsers]: http://portal.acm.org/citation.cfm?id=947902.947905 +// [Algorithms + Data Structures = Programs]: https://t.ly/5QzgE +// [Efficient and Comfortable Error Recovery in Recursive Descent Parsers]: ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip +func (d *DefaultErrorStrategy) GetErrorRecoverySet(recognizer Parser) *IntervalSet { + atn := recognizer.GetInterpreter().atn + ctx := recognizer.GetParserRuleContext() + recoverSet := NewIntervalSet() + for ctx != nil && ctx.GetInvokingState() >= 0 { + // compute what follows who invoked us + invokingState := atn.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + follow := atn.NextTokens(rt.(*RuleTransition).followState, nil) + recoverSet.addSet(follow) + ctx = ctx.GetParent().(ParserRuleContext) + } + recoverSet.removeOne(TokenEpsilon) + return recoverSet +} + +// Consume tokens until one Matches the given token set.// +func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) { + ttype := recognizer.GetTokenStream().LA(1) + for ttype != TokenEOF && !set.contains(ttype) { + recognizer.Consume() + ttype = recognizer.GetTokenStream().LA(1) + } +} + +// The BailErrorStrategy implementation of ANTLRErrorStrategy responds to syntax errors +// by immediately canceling the parse operation with a +// [ParseCancellationException]. The implementation ensures that the +// [ParserRuleContext//exception] field is set for all parse tree nodes +// that were not completed prior to encountering the error. +// +// This error strategy is useful in the following scenarios. +// +// - Two-stage parsing: This error strategy allows the first +// stage of two-stage parsing to immediately terminate if an error is +// encountered, and immediately fall back to the second stage. In addition to +// avoiding wasted work by attempting to recover from errors here, the empty +// implementation of [BailErrorStrategy.Sync] improves the performance of +// the first stage. +// +// - Silent validation: When syntax errors are not being +// Reported or logged, and the parse result is simply ignored if errors occur, +// the [BailErrorStrategy] avoids wasting work on recovering from errors +// when the result will be ignored either way. +// +// myparser.SetErrorHandler(NewBailErrorStrategy()) +// +// See also: [Parser.SetErrorHandler(ANTLRErrorStrategy)] +type BailErrorStrategy struct { + *DefaultErrorStrategy +} + +var _ ErrorStrategy = &BailErrorStrategy{} + +//goland:noinspection GoUnusedExportedFunction +func NewBailErrorStrategy() *BailErrorStrategy { + + b := new(BailErrorStrategy) + + b.DefaultErrorStrategy = NewDefaultErrorStrategy() + + return b +} + +// Recover Instead of recovering from exception e, re-panic it wrapped +// in a [ParseCancellationException] so it is not caught by the +// rule func catches. Use Exception.GetCause() to get the +// original [RecognitionException]. +func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { + context := recognizer.GetParserRuleContext() + for context != nil { + context.SetException(e) + if parent, ok := context.GetParent().(ParserRuleContext); ok { + context = parent + } else { + context = nil + } + } + recognizer.SetError(NewParseCancellationException()) // TODO: we don't emit e properly +} + +// RecoverInline makes sure we don't attempt to recover inline if the parser +// successfully recovers, it won't panic an exception. +func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { + b.Recover(recognizer, NewInputMisMatchException(recognizer)) + + return nil +} + +// Sync makes sure we don't attempt to recover from problems in sub-rules. +func (b *BailErrorStrategy) Sync(_ Parser) { +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/errors.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/errors.go new file mode 100644 index 0000000000..8f0f2f601f --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/errors.go @@ -0,0 +1,259 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just +// 3 kinds of errors: prediction errors, failed predicate errors, and +// mismatched input errors. In each case, the parser knows where it is +// in the input, where it is in the ATN, the rule invocation stack, +// and what kind of problem occurred. + +type RecognitionException interface { + GetOffendingToken() Token + GetMessage() string + GetInputStream() IntStream +} + +type BaseRecognitionException struct { + message string + recognizer Recognizer + offendingToken Token + offendingState int + ctx RuleContext + input IntStream +} + +func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException { + + // todo + // Error.call(this) + // + // if (!!Error.captureStackTrace) { + // Error.captureStackTrace(this, RecognitionException) + // } else { + // stack := NewError().stack + // } + // TODO: may be able to use - "runtime" func Stack(buf []byte, all bool) int + + t := new(BaseRecognitionException) + + t.message = message + t.recognizer = recognizer + t.input = input + t.ctx = ctx + + // The current Token when an error occurred. Since not all streams + // support accessing symbols by index, we have to track the {@link Token} + // instance itself. + // + t.offendingToken = nil + + // Get the ATN state number the parser was in at the time the error + // occurred. For NoViableAltException and LexerNoViableAltException exceptions, this is the + // DecisionState number. For others, it is the state whose outgoing edge we couldn't Match. + // + t.offendingState = -1 + if t.recognizer != nil { + t.offendingState = t.recognizer.GetState() + } + + return t +} + +func (b *BaseRecognitionException) GetMessage() string { + return b.message +} + +func (b *BaseRecognitionException) GetOffendingToken() Token { + return b.offendingToken +} + +func (b *BaseRecognitionException) GetInputStream() IntStream { + return b.input +} + +//

If the state number is not known, b method returns -1.

+ +// getExpectedTokens gets the set of input symbols which could potentially follow the +// previously Matched symbol at the time this exception was raised. +// +// If the set of expected tokens is not known and could not be computed, +// this method returns nil. +// +// The func returns the set of token types that could potentially follow the current +// state in the {ATN}, or nil if the information is not available. + +func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { + if b.recognizer != nil { + return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) + } + + return nil +} + +func (b *BaseRecognitionException) String() string { + return b.message +} + +type LexerNoViableAltException struct { + *BaseRecognitionException + + startIndex int + deadEndConfigs *ATNConfigSet +} + +func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs *ATNConfigSet) *LexerNoViableAltException { + + l := new(LexerNoViableAltException) + + l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil) + + l.startIndex = startIndex + l.deadEndConfigs = deadEndConfigs + + return l +} + +func (l *LexerNoViableAltException) String() string { + symbol := "" + if l.startIndex >= 0 && l.startIndex < l.input.Size() { + symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex)) + } + return "LexerNoViableAltException" + symbol +} + +type NoViableAltException struct { + *BaseRecognitionException + + startToken Token + offendingToken Token + ctx ParserRuleContext + deadEndConfigs *ATNConfigSet +} + +// NewNoViableAltException creates an exception indicating that the parser could not decide which of two or more paths +// to take based upon the remaining input. It tracks the starting token +// of the offending input and also knows where the parser was +// in the various paths when the error. +// +// Reported by [ReportNoViableAlternative] +func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs *ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { + + if ctx == nil { + ctx = recognizer.GetParserRuleContext() + } + + if offendingToken == nil { + offendingToken = recognizer.GetCurrentToken() + } + + if startToken == nil { + startToken = recognizer.GetCurrentToken() + } + + if input == nil { + input = recognizer.GetInputStream().(TokenStream) + } + + n := new(NoViableAltException) + n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) + + // Which configurations did we try at input.Index() that couldn't Match + // input.LT(1) + n.deadEndConfigs = deadEndConfigs + + // The token object at the start index the input stream might + // not be buffering tokens so get a reference to it. + // + // At the time the error occurred, of course the stream needs to keep a + // buffer of all the tokens, but later we might not have access to those. + n.startToken = startToken + n.offendingToken = offendingToken + + return n +} + +type InputMisMatchException struct { + *BaseRecognitionException +} + +// NewInputMisMatchException creates an exception that signifies any kind of mismatched input exceptions such as +// when the current input does not Match the expected token. +func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { + + i := new(InputMisMatchException) + i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) + + i.offendingToken = recognizer.GetCurrentToken() + + return i + +} + +// FailedPredicateException indicates that a semantic predicate failed during validation. Validation of predicates +// occurs when normally parsing the alternative just like Matching a token. +// Disambiguating predicate evaluation occurs when we test a predicate during +// prediction. +type FailedPredicateException struct { + *BaseRecognitionException + + ruleIndex int + predicateIndex int + predicate string +} + +//goland:noinspection GoUnusedExportedFunction +func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { + + f := new(FailedPredicateException) + + f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) + + s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] + trans := s.GetTransitions()[0] + if trans2, ok := trans.(*PredicateTransition); ok { + f.ruleIndex = trans2.ruleIndex + f.predicateIndex = trans2.predIndex + } else { + f.ruleIndex = 0 + f.predicateIndex = 0 + } + f.predicate = predicate + f.offendingToken = recognizer.GetCurrentToken() + + return f +} + +func (f *FailedPredicateException) formatMessage(predicate, message string) string { + if message != "" { + return message + } + + return "failed predicate: {" + predicate + "}?" +} + +type ParseCancellationException struct { +} + +func (p ParseCancellationException) GetOffendingToken() Token { + //TODO implement me + panic("implement me") +} + +func (p ParseCancellationException) GetMessage() string { + //TODO implement me + panic("implement me") +} + +func (p ParseCancellationException) GetInputStream() IntStream { + //TODO implement me + panic("implement me") +} + +func NewParseCancellationException() *ParseCancellationException { + // Error.call(this) + // Error.captureStackTrace(this, ParseCancellationException) + return new(ParseCancellationException) +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/file_stream.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/file_stream.go new file mode 100644 index 0000000000..5f65f809be --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/file_stream.go @@ -0,0 +1,67 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bufio" + "os" +) + +// This is an InputStream that is loaded from a file all at once +// when you construct the object. + +type FileStream struct { + InputStream + filename string +} + +//goland:noinspection GoUnusedExportedFunction +func NewFileStream(fileName string) (*FileStream, error) { + + f, err := os.Open(fileName) + if err != nil { + return nil, err + } + + defer func(f *os.File) { + errF := f.Close() + if errF != nil { + } + }(f) + + reader := bufio.NewReader(f) + fInfo, err := f.Stat() + if err != nil { + return nil, err + } + + fs := &FileStream{ + InputStream: InputStream{ + index: 0, + name: fileName, + }, + filename: fileName, + } + + // Pre-build the buffer and read runes efficiently + // + fs.data = make([]rune, 0, fInfo.Size()) + for { + r, _, err := reader.ReadRune() + if err != nil { + break + } + fs.data = append(fs.data, r) + } + fs.size = len(fs.data) // Size in runes + + // All done. + // + return fs, nil +} + +func (f *FileStream) GetSourceName() string { + return f.filename +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/input_stream.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/input_stream.go new file mode 100644 index 0000000000..b737fe85fb --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/input_stream.go @@ -0,0 +1,157 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bufio" + "io" +) + +type InputStream struct { + name string + index int + data []rune + size int +} + +// NewIoStream creates a new input stream from the given io.Reader reader. +// Note that the reader is read completely into memory and so it must actually +// have a stopping point - you cannot pass in a reader on an open-ended source such +// as a socket for instance. +func NewIoStream(reader io.Reader) *InputStream { + + rReader := bufio.NewReader(reader) + + is := &InputStream{ + name: "", + index: 0, + } + + // Pre-build the buffer and read runes reasonably efficiently given that + // we don't exactly know how big the input is. + // + is.data = make([]rune, 0, 512) + for { + r, _, err := rReader.ReadRune() + if err != nil { + break + } + is.data = append(is.data, r) + } + is.size = len(is.data) // number of runes + return is +} + +// NewInputStream creates a new input stream from the given string +func NewInputStream(data string) *InputStream { + + is := &InputStream{ + name: "", + index: 0, + data: []rune(data), // This is actually the most efficient way + } + is.size = len(is.data) // number of runes, but we could also use len(data), which is efficient too + return is +} + +func (is *InputStream) reset() { + is.index = 0 +} + +// Consume moves the input pointer to the next character in the input stream +func (is *InputStream) Consume() { + if is.index >= is.size { + // assert is.LA(1) == TokenEOF + panic("cannot consume EOF") + } + is.index++ +} + +// LA returns the character at the given offset from the start of the input stream +func (is *InputStream) LA(offset int) int { + + if offset == 0 { + return 0 // nil + } + if offset < 0 { + offset++ // e.g., translate LA(-1) to use offset=0 + } + pos := is.index + offset - 1 + + if pos < 0 || pos >= is.size { // invalid + return TokenEOF + } + + return int(is.data[pos]) +} + +// LT returns the character at the given offset from the start of the input stream +func (is *InputStream) LT(offset int) int { + return is.LA(offset) +} + +// Index returns the current offset in to the input stream +func (is *InputStream) Index() int { + return is.index +} + +// Size returns the total number of characters in the input stream +func (is *InputStream) Size() int { + return is.size +} + +// Mark does nothing here as we have entire buffer +func (is *InputStream) Mark() int { + return -1 +} + +// Release does nothing here as we have entire buffer +func (is *InputStream) Release(_ int) { +} + +// Seek the input point to the provided index offset +func (is *InputStream) Seek(index int) { + if index <= is.index { + is.index = index // just jump don't update stream state (line,...) + return + } + // seek forward + is.index = intMin(index, is.size) +} + +// GetText returns the text from the input stream from the start to the stop index +func (is *InputStream) GetText(start int, stop int) string { + if stop >= is.size { + stop = is.size - 1 + } + if start >= is.size { + return "" + } + + return string(is.data[start : stop+1]) +} + +// GetTextFromTokens returns the text from the input stream from the first character of the start token to the last +// character of the stop token +func (is *InputStream) GetTextFromTokens(start, stop Token) string { + if start != nil && stop != nil { + return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex())) + } + + return "" +} + +func (is *InputStream) GetTextFromInterval(i Interval) string { + return is.GetText(i.Start, i.Stop) +} + +func (*InputStream) GetSourceName() string { + return "" +} + +// String returns the entire input stream as a string +func (is *InputStream) String() string { + return string(is.data) +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/int_stream.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/int_stream.go new file mode 100644 index 0000000000..4778878bd0 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/int_stream.go @@ -0,0 +1,16 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type IntStream interface { + Consume() + LA(int) int + Mark() int + Release(marker int) + Index() int + Seek(index int) + Size() int + GetSourceName() string +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/interval_set.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/interval_set.go new file mode 100644 index 0000000000..cc5066067a --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/interval_set.go @@ -0,0 +1,330 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" + "strings" +) + +type Interval struct { + Start int + Stop int +} + +// NewInterval creates a new interval with the given start and stop values. +func NewInterval(start, stop int) Interval { + return Interval{ + Start: start, + Stop: stop, + } +} + +// Contains returns true if the given item is contained within the interval. +func (i Interval) Contains(item int) bool { + return item >= i.Start && item < i.Stop +} + +// String generates a string representation of the interval. +func (i Interval) String() string { + if i.Start == i.Stop-1 { + return strconv.Itoa(i.Start) + } + + return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1) +} + +// Length returns the length of the interval. +func (i Interval) Length() int { + return i.Stop - i.Start +} + +// IntervalSet represents a collection of [Intervals], which may be read-only. +type IntervalSet struct { + intervals []Interval + readOnly bool +} + +// NewIntervalSet creates a new empty, writable, interval set. +func NewIntervalSet() *IntervalSet { + + i := new(IntervalSet) + + i.intervals = nil + i.readOnly = false + + return i +} + +func (i *IntervalSet) Equals(other *IntervalSet) bool { + if len(i.intervals) != len(other.intervals) { + return false + } + + for k, v := range i.intervals { + if v.Start != other.intervals[k].Start || v.Stop != other.intervals[k].Stop { + return false + } + } + + return true +} + +func (i *IntervalSet) first() int { + if len(i.intervals) == 0 { + return TokenInvalidType + } + + return i.intervals[0].Start +} + +func (i *IntervalSet) addOne(v int) { + i.addInterval(NewInterval(v, v+1)) +} + +func (i *IntervalSet) addRange(l, h int) { + i.addInterval(NewInterval(l, h+1)) +} + +func (i *IntervalSet) addInterval(v Interval) { + if i.intervals == nil { + i.intervals = make([]Interval, 0) + i.intervals = append(i.intervals, v) + } else { + // find insert pos + for k, interval := range i.intervals { + // distinct range -> insert + if v.Stop < interval.Start { + i.intervals = append(i.intervals[0:k], append([]Interval{v}, i.intervals[k:]...)...) + return + } else if v.Stop == interval.Start { + i.intervals[k].Start = v.Start + return + } else if v.Start <= interval.Stop { + i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop)) + + // if not applying to end, merge potential overlaps + if k < len(i.intervals)-1 { + l := i.intervals[k] + r := i.intervals[k+1] + // if r contained in l + if l.Stop >= r.Stop { + i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) + } else if l.Stop >= r.Start { // partial overlap + i.intervals[k] = NewInterval(l.Start, r.Stop) + i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) + } + } + return + } + } + // greater than any exiting + i.intervals = append(i.intervals, v) + } +} + +func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet { + if other.intervals != nil { + for k := 0; k < len(other.intervals); k++ { + i2 := other.intervals[k] + i.addInterval(NewInterval(i2.Start, i2.Stop)) + } + } + return i +} + +func (i *IntervalSet) complement(start int, stop int) *IntervalSet { + result := NewIntervalSet() + result.addInterval(NewInterval(start, stop+1)) + for j := 0; j < len(i.intervals); j++ { + result.removeRange(i.intervals[j]) + } + return result +} + +func (i *IntervalSet) contains(item int) bool { + if i.intervals == nil { + return false + } + for k := 0; k < len(i.intervals); k++ { + if i.intervals[k].Contains(item) { + return true + } + } + return false +} + +func (i *IntervalSet) length() int { + iLen := 0 + + for _, v := range i.intervals { + iLen += v.Length() + } + + return iLen +} + +func (i *IntervalSet) removeRange(v Interval) { + if v.Start == v.Stop-1 { + i.removeOne(v.Start) + } else if i.intervals != nil { + k := 0 + for n := 0; n < len(i.intervals); n++ { + ni := i.intervals[k] + // intervals are ordered + if v.Stop <= ni.Start { + return + } else if v.Start > ni.Start && v.Stop < ni.Stop { + i.intervals[k] = NewInterval(ni.Start, v.Start) + x := NewInterval(v.Stop, ni.Stop) + // i.intervals.splice(k, 0, x) + i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...) + return + } else if v.Start <= ni.Start && v.Stop >= ni.Stop { + // i.intervals.splice(k, 1) + i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) + k = k - 1 // need another pass + } else if v.Start < ni.Stop { + i.intervals[k] = NewInterval(ni.Start, v.Start) + } else if v.Stop < ni.Stop { + i.intervals[k] = NewInterval(v.Stop, ni.Stop) + } + k++ + } + } +} + +func (i *IntervalSet) removeOne(v int) { + if i.intervals != nil { + for k := 0; k < len(i.intervals); k++ { + ki := i.intervals[k] + // intervals i ordered + if v < ki.Start { + return + } else if v == ki.Start && v == ki.Stop-1 { + // i.intervals.splice(k, 1) + i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) + return + } else if v == ki.Start { + i.intervals[k] = NewInterval(ki.Start+1, ki.Stop) + return + } else if v == ki.Stop-1 { + i.intervals[k] = NewInterval(ki.Start, ki.Stop-1) + return + } else if v < ki.Stop-1 { + x := NewInterval(ki.Start, v) + ki.Start = v + 1 + // i.intervals.splice(k, 0, x) + i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...) + return + } + } + } +} + +func (i *IntervalSet) String() string { + return i.StringVerbose(nil, nil, false) +} + +func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string { + + if i.intervals == nil { + return "{}" + } else if literalNames != nil || symbolicNames != nil { + return i.toTokenString(literalNames, symbolicNames) + } else if elemsAreChar { + return i.toCharString() + } + + return i.toIndexString() +} + +func (i *IntervalSet) GetIntervals() []Interval { + return i.intervals +} + +func (i *IntervalSet) toCharString() string { + names := make([]string, len(i.intervals)) + + var sb strings.Builder + + for j := 0; j < len(i.intervals); j++ { + v := i.intervals[j] + if v.Stop == v.Start+1 { + if v.Start == TokenEOF { + names = append(names, "") + } else { + sb.WriteByte('\'') + sb.WriteRune(rune(v.Start)) + sb.WriteByte('\'') + names = append(names, sb.String()) + sb.Reset() + } + } else { + sb.WriteByte('\'') + sb.WriteRune(rune(v.Start)) + sb.WriteString("'..'") + sb.WriteRune(rune(v.Stop - 1)) + sb.WriteByte('\'') + names = append(names, sb.String()) + sb.Reset() + } + } + if len(names) > 1 { + return "{" + strings.Join(names, ", ") + "}" + } + + return names[0] +} + +func (i *IntervalSet) toIndexString() string { + + names := make([]string, 0) + for j := 0; j < len(i.intervals); j++ { + v := i.intervals[j] + if v.Stop == v.Start+1 { + if v.Start == TokenEOF { + names = append(names, "") + } else { + names = append(names, strconv.Itoa(v.Start)) + } + } else { + names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1)) + } + } + if len(names) > 1 { + return "{" + strings.Join(names, ", ") + "}" + } + + return names[0] +} + +func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string { + names := make([]string, 0) + for _, v := range i.intervals { + for j := v.Start; j < v.Stop; j++ { + names = append(names, i.elementName(literalNames, symbolicNames, j)) + } + } + if len(names) > 1 { + return "{" + strings.Join(names, ", ") + "}" + } + + return names[0] +} + +func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string { + if a == TokenEOF { + return "" + } else if a == TokenEpsilon { + return "" + } else { + if a < len(literalNames) && literalNames[a] != "" { + return literalNames[a] + } + + return symbolicNames[a] + } +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/jcollect.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/jcollect.go new file mode 100644 index 0000000000..ceccd96d25 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/jcollect.go @@ -0,0 +1,685 @@ +package antlr + +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +import ( + "container/list" + "runtime/debug" + "sort" + "sync" +) + +// Collectable is an interface that a struct should implement if it is to be +// usable as a key in these collections. +type Collectable[T any] interface { + Hash() int + Equals(other Collectable[T]) bool +} + +type Comparator[T any] interface { + Hash1(o T) int + Equals2(T, T) bool +} + +type CollectionSource int +type CollectionDescriptor struct { + SybolicName string + Description string +} + +const ( + UnknownCollection CollectionSource = iota + ATNConfigLookupCollection + ATNStateCollection + DFAStateCollection + ATNConfigCollection + PredictionContextCollection + SemanticContextCollection + ClosureBusyCollection + PredictionVisitedCollection + MergeCacheCollection + PredictionContextCacheCollection + AltSetCollection + ReachSetCollection +) + +var CollectionDescriptors = map[CollectionSource]CollectionDescriptor{ + UnknownCollection: { + SybolicName: "UnknownCollection", + Description: "Unknown collection type. Only used if the target author thought it was an unimportant collection.", + }, + ATNConfigCollection: { + SybolicName: "ATNConfigCollection", + Description: "ATNConfig collection. Used to store the ATNConfigs for a particular state in the ATN." + + "For instance, it is used to store the results of the closure() operation in the ATN.", + }, + ATNConfigLookupCollection: { + SybolicName: "ATNConfigLookupCollection", + Description: "ATNConfigLookup collection. Used to store the ATNConfigs for a particular state in the ATN." + + "This is used to prevent duplicating equivalent states in an ATNConfigurationSet.", + }, + ATNStateCollection: { + SybolicName: "ATNStateCollection", + Description: "ATNState collection. This is used to store the states of the ATN.", + }, + DFAStateCollection: { + SybolicName: "DFAStateCollection", + Description: "DFAState collection. This is used to store the states of the DFA.", + }, + PredictionContextCollection: { + SybolicName: "PredictionContextCollection", + Description: "PredictionContext collection. This is used to store the prediction contexts of the ATN and cache computes.", + }, + SemanticContextCollection: { + SybolicName: "SemanticContextCollection", + Description: "SemanticContext collection. This is used to store the semantic contexts of the ATN.", + }, + ClosureBusyCollection: { + SybolicName: "ClosureBusyCollection", + Description: "ClosureBusy collection. This is used to check and prevent infinite recursion right recursive rules." + + "It stores ATNConfigs that are currently being processed in the closure() operation.", + }, + PredictionVisitedCollection: { + SybolicName: "PredictionVisitedCollection", + Description: "A map that records whether we have visited a particular context when searching through cached entries.", + }, + MergeCacheCollection: { + SybolicName: "MergeCacheCollection", + Description: "A map that records whether we have already merged two particular contexts and can save effort by not repeating it.", + }, + PredictionContextCacheCollection: { + SybolicName: "PredictionContextCacheCollection", + Description: "A map that records whether we have already created a particular context and can save effort by not computing it again.", + }, + AltSetCollection: { + SybolicName: "AltSetCollection", + Description: "Used to eliminate duplicate alternatives in an ATN config set.", + }, + ReachSetCollection: { + SybolicName: "ReachSetCollection", + Description: "Used as merge cache to prevent us needing to compute the merge of two states if we have already done it.", + }, +} + +// JStore implements a container that allows the use of a struct to calculate the key +// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just +// serve the needs of the ANTLR Go runtime. +// +// For ease of porting the logic of the runtime from the master target (Java), this collection +// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals() +// function as the key. The values are stored in a standard go map which internally is a form of hashmap +// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with +// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't +// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and +// we understand the requirements, then this is fine - this is not a general purpose collection. +type JStore[T any, C Comparator[T]] struct { + store map[int][]T + len int + comparator Comparator[T] + stats *JStatRec +} + +func NewJStore[T any, C Comparator[T]](comparator Comparator[T], cType CollectionSource, desc string) *JStore[T, C] { + + if comparator == nil { + panic("comparator cannot be nil") + } + + s := &JStore[T, C]{ + store: make(map[int][]T, 1), + comparator: comparator, + } + if collectStats { + s.stats = &JStatRec{ + Source: cType, + Description: desc, + } + + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + s.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(s.stats) + } + return s +} + +// Put will store given value in the collection. Note that the key for storage is generated from +// the value itself - this is specifically because that is what ANTLR needs - this would not be useful +// as any kind of general collection. +// +// If the key has a hash conflict, then the value will be added to the slice of values associated with the +// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is +// tested by calling the equals() method on the key. +// +// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true +// +// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false. +func (s *JStore[T, C]) Put(value T) (v T, exists bool) { + + if collectStats { + s.stats.Puts++ + } + kh := s.comparator.Hash1(value) + + var hClash bool + for _, v1 := range s.store[kh] { + hClash = true + if s.comparator.Equals2(value, v1) { + if collectStats { + s.stats.PutHits++ + s.stats.PutHashConflicts++ + } + return v1, true + } + if collectStats { + s.stats.PutMisses++ + } + } + if collectStats && hClash { + s.stats.PutHashConflicts++ + } + s.store[kh] = append(s.store[kh], value) + + if collectStats { + if len(s.store[kh]) > s.stats.MaxSlotSize { + s.stats.MaxSlotSize = len(s.store[kh]) + } + } + s.len++ + if collectStats { + s.stats.CurSize = s.len + if s.len > s.stats.MaxSize { + s.stats.MaxSize = s.len + } + } + return value, false +} + +// Get will return the value associated with the key - the type of the key is the same type as the value +// which would not generally be useful, but this is a specific thing for ANTLR where the key is +// generated using the object we are going to store. +func (s *JStore[T, C]) Get(key T) (T, bool) { + if collectStats { + s.stats.Gets++ + } + kh := s.comparator.Hash1(key) + var hClash bool + for _, v := range s.store[kh] { + hClash = true + if s.comparator.Equals2(key, v) { + if collectStats { + s.stats.GetHits++ + s.stats.GetHashConflicts++ + } + return v, true + } + if collectStats { + s.stats.GetMisses++ + } + } + if collectStats { + if hClash { + s.stats.GetHashConflicts++ + } + s.stats.GetNoEnt++ + } + return key, false +} + +// Contains returns true if the given key is present in the store +func (s *JStore[T, C]) Contains(key T) bool { + _, present := s.Get(key) + return present +} + +func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T { + vs := make([]T, 0, len(s.store)) + for _, v := range s.store { + vs = append(vs, v...) + } + sort.Slice(vs, func(i, j int) bool { + return less(vs[i], vs[j]) + }) + + return vs +} + +func (s *JStore[T, C]) Each(f func(T) bool) { + for _, e := range s.store { + for _, v := range e { + f(v) + } + } +} + +func (s *JStore[T, C]) Len() int { + return s.len +} + +func (s *JStore[T, C]) Values() []T { + vs := make([]T, 0, len(s.store)) + for _, e := range s.store { + vs = append(vs, e...) + } + return vs +} + +type entry[K, V any] struct { + key K + val V +} + +type JMap[K, V any, C Comparator[K]] struct { + store map[int][]*entry[K, V] + len int + comparator Comparator[K] + stats *JStatRec +} + +func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K], cType CollectionSource, desc string) *JMap[K, V, C] { + m := &JMap[K, V, C]{ + store: make(map[int][]*entry[K, V], 1), + comparator: comparator, + } + if collectStats { + m.stats = &JStatRec{ + Source: cType, + Description: desc, + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + m.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(m.stats) + } + return m +} + +func (m *JMap[K, V, C]) Put(key K, val V) (V, bool) { + if collectStats { + m.stats.Puts++ + } + kh := m.comparator.Hash1(key) + + var hClash bool + for _, e := range m.store[kh] { + hClash = true + if m.comparator.Equals2(e.key, key) { + if collectStats { + m.stats.PutHits++ + m.stats.PutHashConflicts++ + } + return e.val, true + } + if collectStats { + m.stats.PutMisses++ + } + } + if collectStats { + if hClash { + m.stats.PutHashConflicts++ + } + } + m.store[kh] = append(m.store[kh], &entry[K, V]{key, val}) + if collectStats { + if len(m.store[kh]) > m.stats.MaxSlotSize { + m.stats.MaxSlotSize = len(m.store[kh]) + } + } + m.len++ + if collectStats { + m.stats.CurSize = m.len + if m.len > m.stats.MaxSize { + m.stats.MaxSize = m.len + } + } + return val, false +} + +func (m *JMap[K, V, C]) Values() []V { + vs := make([]V, 0, len(m.store)) + for _, e := range m.store { + for _, v := range e { + vs = append(vs, v.val) + } + } + return vs +} + +func (m *JMap[K, V, C]) Get(key K) (V, bool) { + if collectStats { + m.stats.Gets++ + } + var none V + kh := m.comparator.Hash1(key) + var hClash bool + for _, e := range m.store[kh] { + hClash = true + if m.comparator.Equals2(e.key, key) { + if collectStats { + m.stats.GetHits++ + m.stats.GetHashConflicts++ + } + return e.val, true + } + if collectStats { + m.stats.GetMisses++ + } + } + if collectStats { + if hClash { + m.stats.GetHashConflicts++ + } + m.stats.GetNoEnt++ + } + return none, false +} + +func (m *JMap[K, V, C]) Len() int { + return m.len +} + +func (m *JMap[K, V, C]) Delete(key K) { + kh := m.comparator.Hash1(key) + for i, e := range m.store[kh] { + if m.comparator.Equals2(e.key, key) { + m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...) + m.len-- + return + } + } +} + +func (m *JMap[K, V, C]) Clear() { + m.store = make(map[int][]*entry[K, V]) +} + +type JPCMap struct { + store *JMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]] + size int + stats *JStatRec +} + +func NewJPCMap(cType CollectionSource, desc string) *JPCMap { + m := &JPCMap{ + store: NewJMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]](pContextEqInst, cType, desc), + } + if collectStats { + m.stats = &JStatRec{ + Source: cType, + Description: desc, + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + m.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(m.stats) + } + return m +} + +func (pcm *JPCMap) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) { + if collectStats { + pcm.stats.Gets++ + } + // Do we have a map stored by k1? + // + m2, present := pcm.store.Get(k1) + if present { + if collectStats { + pcm.stats.GetHits++ + } + // We found a map of values corresponding to k1, so now we need to look up k2 in that map + // + return m2.Get(k2) + } + if collectStats { + pcm.stats.GetMisses++ + } + return nil, false +} + +func (pcm *JPCMap) Put(k1, k2, v *PredictionContext) { + + if collectStats { + pcm.stats.Puts++ + } + // First does a map already exist for k1? + // + if m2, present := pcm.store.Get(k1); present { + if collectStats { + pcm.stats.PutHits++ + } + _, present = m2.Put(k2, v) + if !present { + pcm.size++ + if collectStats { + pcm.stats.CurSize = pcm.size + if pcm.size > pcm.stats.MaxSize { + pcm.stats.MaxSize = pcm.size + } + } + } + } else { + // No map found for k1, so we create it, add in our value, then store is + // + if collectStats { + pcm.stats.PutMisses++ + m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, pcm.stats.Source, pcm.stats.Description+" map entry") + } else { + m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "map entry") + } + + m2.Put(k2, v) + pcm.store.Put(k1, m2) + pcm.size++ + } +} + +type JPCMap2 struct { + store map[int][]JPCEntry + size int + stats *JStatRec +} + +type JPCEntry struct { + k1, k2, v *PredictionContext +} + +func NewJPCMap2(cType CollectionSource, desc string) *JPCMap2 { + m := &JPCMap2{ + store: make(map[int][]JPCEntry, 1000), + } + if collectStats { + m.stats = &JStatRec{ + Source: cType, + Description: desc, + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + m.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(m.stats) + } + return m +} + +func dHash(k1, k2 *PredictionContext) int { + return k1.cachedHash*31 + k2.cachedHash +} + +func (pcm *JPCMap2) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) { + if collectStats { + pcm.stats.Gets++ + } + + h := dHash(k1, k2) + var hClash bool + for _, e := range pcm.store[h] { + hClash = true + if e.k1.Equals(k1) && e.k2.Equals(k2) { + if collectStats { + pcm.stats.GetHits++ + pcm.stats.GetHashConflicts++ + } + return e.v, true + } + if collectStats { + pcm.stats.GetMisses++ + } + } + if collectStats { + if hClash { + pcm.stats.GetHashConflicts++ + } + pcm.stats.GetNoEnt++ + } + return nil, false +} + +func (pcm *JPCMap2) Put(k1, k2, v *PredictionContext) (*PredictionContext, bool) { + if collectStats { + pcm.stats.Puts++ + } + h := dHash(k1, k2) + var hClash bool + for _, e := range pcm.store[h] { + hClash = true + if e.k1.Equals(k1) && e.k2.Equals(k2) { + if collectStats { + pcm.stats.PutHits++ + pcm.stats.PutHashConflicts++ + } + return e.v, true + } + if collectStats { + pcm.stats.PutMisses++ + } + } + if collectStats { + if hClash { + pcm.stats.PutHashConflicts++ + } + } + pcm.store[h] = append(pcm.store[h], JPCEntry{k1, k2, v}) + pcm.size++ + if collectStats { + pcm.stats.CurSize = pcm.size + if pcm.size > pcm.stats.MaxSize { + pcm.stats.MaxSize = pcm.size + } + } + return nil, false +} + +type VisitEntry struct { + k *PredictionContext + v *PredictionContext +} +type VisitRecord struct { + store map[*PredictionContext]*PredictionContext + len int + stats *JStatRec +} + +type VisitList struct { + cache *list.List + lock sync.RWMutex +} + +var visitListPool = VisitList{ + cache: list.New(), + lock: sync.RWMutex{}, +} + +// NewVisitRecord returns a new VisitRecord instance from the pool if available. +// Note that this "map" uses a pointer as a key because we are emulating the behavior of +// IdentityHashMap in Java, which uses the `==` operator to compare whether the keys are equal, +// which means is the key the same reference to an object rather than is it .equals() to another +// object. +func NewVisitRecord() *VisitRecord { + visitListPool.lock.Lock() + el := visitListPool.cache.Front() + defer visitListPool.lock.Unlock() + var vr *VisitRecord + if el == nil { + vr = &VisitRecord{ + store: make(map[*PredictionContext]*PredictionContext), + } + if collectStats { + vr.stats = &JStatRec{ + Source: PredictionContextCacheCollection, + Description: "VisitRecord", + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + vr.stats.CreateStack = debug.Stack() + } + } + } else { + vr = el.Value.(*VisitRecord) + visitListPool.cache.Remove(el) + vr.store = make(map[*PredictionContext]*PredictionContext) + } + if collectStats { + Statistics.AddJStatRec(vr.stats) + } + return vr +} + +func (vr *VisitRecord) Release() { + vr.len = 0 + vr.store = nil + if collectStats { + vr.stats.MaxSize = 0 + vr.stats.CurSize = 0 + vr.stats.Gets = 0 + vr.stats.GetHits = 0 + vr.stats.GetMisses = 0 + vr.stats.GetHashConflicts = 0 + vr.stats.GetNoEnt = 0 + vr.stats.Puts = 0 + vr.stats.PutHits = 0 + vr.stats.PutMisses = 0 + vr.stats.PutHashConflicts = 0 + vr.stats.MaxSlotSize = 0 + } + visitListPool.lock.Lock() + visitListPool.cache.PushBack(vr) + visitListPool.lock.Unlock() +} + +func (vr *VisitRecord) Get(k *PredictionContext) (*PredictionContext, bool) { + if collectStats { + vr.stats.Gets++ + } + v := vr.store[k] + if v != nil { + if collectStats { + vr.stats.GetHits++ + } + return v, true + } + if collectStats { + vr.stats.GetNoEnt++ + } + return nil, false +} + +func (vr *VisitRecord) Put(k, v *PredictionContext) (*PredictionContext, bool) { + if collectStats { + vr.stats.Puts++ + } + vr.store[k] = v + vr.len++ + if collectStats { + vr.stats.CurSize = vr.len + if vr.len > vr.stats.MaxSize { + vr.stats.MaxSize = vr.len + } + } + return v, false +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer.go new file mode 100644 index 0000000000..3c7896a918 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer.go @@ -0,0 +1,426 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +// A lexer is recognizer that draws input symbols from a character stream. +// lexer grammars result in a subclass of this object. A Lexer object +// uses simplified Match() and error recovery mechanisms in the interest +// of speed. +/// + +type Lexer interface { + TokenSource + Recognizer + + Emit() Token + + SetChannel(int) + PushMode(int) + PopMode() int + SetType(int) + SetMode(int) +} + +type BaseLexer struct { + *BaseRecognizer + + Interpreter ILexerATNSimulator + TokenStartCharIndex int + TokenStartLine int + TokenStartColumn int + ActionType int + Virt Lexer // The most derived lexer implementation. Allows virtual method calls. + + input CharStream + factory TokenFactory + tokenFactorySourcePair *TokenSourceCharStreamPair + token Token + hitEOF bool + channel int + thetype int + modeStack IntStack + mode int + text string +} + +func NewBaseLexer(input CharStream) *BaseLexer { + + lexer := new(BaseLexer) + + lexer.BaseRecognizer = NewBaseRecognizer() + + lexer.input = input + lexer.factory = CommonTokenFactoryDEFAULT + lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input} + + lexer.Virt = lexer + + lexer.Interpreter = nil // child classes must populate it + + // The goal of all lexer rules/methods is to create a token object. + // l is an instance variable as multiple rules may collaborate to + // create a single token. NextToken will return l object after + // Matching lexer rule(s). If you subclass to allow multiple token + // emissions, then set l to the last token to be Matched or + // something non nil so that the auto token emit mechanism will not + // emit another token. + lexer.token = nil + + // What character index in the stream did the current token start at? + // Needed, for example, to get the text for current token. Set at + // the start of NextToken. + lexer.TokenStartCharIndex = -1 + + // The line on which the first character of the token resides/// + lexer.TokenStartLine = -1 + + // The character position of first character within the line/// + lexer.TokenStartColumn = -1 + + // Once we see EOF on char stream, next token will be EOF. + // If you have DONE : EOF then you see DONE EOF. + lexer.hitEOF = false + + // The channel number for the current token/// + lexer.channel = TokenDefaultChannel + + // The token type for the current token/// + lexer.thetype = TokenInvalidType + + lexer.modeStack = make([]int, 0) + lexer.mode = LexerDefaultMode + + // You can set the text for the current token to override what is in + // the input char buffer. Use setText() or can set l instance var. + // / + lexer.text = "" + + return lexer +} + +const ( + LexerDefaultMode = 0 + LexerMore = -2 + LexerSkip = -3 +) + +//goland:noinspection GoUnusedConst +const ( + LexerDefaultTokenChannel = TokenDefaultChannel + LexerHidden = TokenHiddenChannel + LexerMinCharValue = 0x0000 + LexerMaxCharValue = 0x10FFFF +) + +func (b *BaseLexer) Reset() { + // wack Lexer state variables + if b.input != nil { + b.input.Seek(0) // rewind the input + } + b.token = nil + b.thetype = TokenInvalidType + b.channel = TokenDefaultChannel + b.TokenStartCharIndex = -1 + b.TokenStartColumn = -1 + b.TokenStartLine = -1 + b.text = "" + + b.hitEOF = false + b.mode = LexerDefaultMode + b.modeStack = make([]int, 0) + + b.Interpreter.reset() +} + +func (b *BaseLexer) GetInterpreter() ILexerATNSimulator { + return b.Interpreter +} + +func (b *BaseLexer) GetInputStream() CharStream { + return b.input +} + +func (b *BaseLexer) GetSourceName() string { + return b.GrammarFileName +} + +func (b *BaseLexer) SetChannel(v int) { + b.channel = v +} + +func (b *BaseLexer) GetTokenFactory() TokenFactory { + return b.factory +} + +func (b *BaseLexer) setTokenFactory(f TokenFactory) { + b.factory = f +} + +func (b *BaseLexer) safeMatch() (ret int) { + defer func() { + if e := recover(); e != nil { + if re, ok := e.(RecognitionException); ok { + b.notifyListeners(re) // Report error + b.Recover(re) + ret = LexerSkip // default + } + } + }() + + return b.Interpreter.Match(b.input, b.mode) +} + +// NextToken returns a token from the lexer input source i.e., Match a token on the source char stream. +func (b *BaseLexer) NextToken() Token { + if b.input == nil { + panic("NextToken requires a non-nil input stream.") + } + + tokenStartMarker := b.input.Mark() + + // previously in finally block + defer func() { + // make sure we release marker after Match or + // unbuffered char stream will keep buffering + b.input.Release(tokenStartMarker) + }() + + for { + if b.hitEOF { + b.EmitEOF() + return b.token + } + b.token = nil + b.channel = TokenDefaultChannel + b.TokenStartCharIndex = b.input.Index() + b.TokenStartColumn = b.Interpreter.GetCharPositionInLine() + b.TokenStartLine = b.Interpreter.GetLine() + b.text = "" + continueOuter := false + for { + b.thetype = TokenInvalidType + + ttype := b.safeMatch() + + if b.input.LA(1) == TokenEOF { + b.hitEOF = true + } + if b.thetype == TokenInvalidType { + b.thetype = ttype + } + if b.thetype == LexerSkip { + continueOuter = true + break + } + if b.thetype != LexerMore { + break + } + } + + if continueOuter { + continue + } + if b.token == nil { + b.Virt.Emit() + } + return b.token + } +} + +// Skip instructs the lexer to Skip creating a token for current lexer rule +// and look for another token. [NextToken] knows to keep looking when +// a lexer rule finishes with token set to [SKIPTOKEN]. Recall that +// if token==nil at end of any token rule, it creates one for you +// and emits it. +func (b *BaseLexer) Skip() { + b.thetype = LexerSkip +} + +func (b *BaseLexer) More() { + b.thetype = LexerMore +} + +// SetMode changes the lexer to a new mode. The lexer will use this mode from hereon in and the rules for that mode +// will be in force. +func (b *BaseLexer) SetMode(m int) { + b.mode = m +} + +// PushMode saves the current lexer mode so that it can be restored later. See [PopMode], then sets the +// current lexer mode to the supplied mode m. +func (b *BaseLexer) PushMode(m int) { + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("pushMode " + strconv.Itoa(m)) + } + b.modeStack.Push(b.mode) + b.mode = m +} + +// PopMode restores the lexer mode saved by a call to [PushMode]. It is a panic error if there is no saved mode to +// return to. +func (b *BaseLexer) PopMode() int { + if len(b.modeStack) == 0 { + panic("Empty Stack") + } + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1])) + } + i, _ := b.modeStack.Pop() + b.mode = i + return b.mode +} + +func (b *BaseLexer) inputStream() CharStream { + return b.input +} + +// SetInputStream resets the lexer input stream and associated lexer state. +func (b *BaseLexer) SetInputStream(input CharStream) { + b.input = nil + b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} + b.Reset() + b.input = input + b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} +} + +func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair { + return b.tokenFactorySourcePair +} + +// EmitToken by default does not support multiple emits per [NextToken] invocation +// for efficiency reasons. Subclass and override this func, [NextToken], +// and [GetToken] (to push tokens into a list and pull from that list +// rather than a single variable as this implementation does). +func (b *BaseLexer) EmitToken(token Token) { + b.token = token +} + +// Emit is the standard method called to automatically emit a token at the +// outermost lexical rule. The token object should point into the +// char buffer start..stop. If there is a text override in 'text', +// use that to set the token's text. Override this method to emit +// custom [Token] objects or provide a new factory. +// / +func (b *BaseLexer) Emit() Token { + t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn) + b.EmitToken(t) + return t +} + +// EmitEOF emits an EOF token. By default, this is the last token emitted +func (b *BaseLexer) EmitEOF() Token { + cpos := b.GetCharPositionInLine() + lpos := b.GetLine() + eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos) + b.EmitToken(eof) + return eof +} + +// GetCharPositionInLine returns the current position in the current line as far as the lexer is concerned. +func (b *BaseLexer) GetCharPositionInLine() int { + return b.Interpreter.GetCharPositionInLine() +} + +func (b *BaseLexer) GetLine() int { + return b.Interpreter.GetLine() +} + +func (b *BaseLexer) GetType() int { + return b.thetype +} + +func (b *BaseLexer) SetType(t int) { + b.thetype = t +} + +// GetCharIndex returns the index of the current character of lookahead +func (b *BaseLexer) GetCharIndex() int { + return b.input.Index() +} + +// GetText returns the text Matched so far for the current token or any text override. +func (b *BaseLexer) GetText() string { + if b.text != "" { + return b.text + } + + return b.Interpreter.GetText(b.input) +} + +// SetText sets the complete text of this token; it wipes any previous changes to the text. +func (b *BaseLexer) SetText(text string) { + b.text = text +} + +// GetATN returns the ATN used by the lexer. +func (b *BaseLexer) GetATN() *ATN { + return b.Interpreter.ATN() +} + +// GetAllTokens returns a list of all [Token] objects in input char stream. +// Forces a load of all tokens that can be made from the input char stream. +// +// Does not include EOF token. +func (b *BaseLexer) GetAllTokens() []Token { + vl := b.Virt + tokens := make([]Token, 0) + t := vl.NextToken() + for t.GetTokenType() != TokenEOF { + tokens = append(tokens, t) + t = vl.NextToken() + } + return tokens +} + +func (b *BaseLexer) notifyListeners(e RecognitionException) { + start := b.TokenStartCharIndex + stop := b.input.Index() + text := b.input.GetTextFromInterval(NewInterval(start, stop)) + msg := "token recognition error at: '" + text + "'" + listener := b.GetErrorListenerDispatch() + listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e) +} + +func (b *BaseLexer) getErrorDisplayForChar(c rune) string { + if c == TokenEOF { + return "" + } else if c == '\n' { + return "\\n" + } else if c == '\t' { + return "\\t" + } else if c == '\r' { + return "\\r" + } else { + return string(c) + } +} + +func (b *BaseLexer) getCharErrorDisplay(c rune) string { + return "'" + b.getErrorDisplayForChar(c) + "'" +} + +// Recover can normally Match any char in its vocabulary after Matching +// a token, so here we do the easy thing and just kill a character and hope +// it all works out. You can instead use the rule invocation stack +// to do sophisticated error recovery if you are in a fragment rule. +// +// In general, lexers should not need to recover and should have rules that cover any eventuality, such as +// a character that makes no sense to the recognizer. +func (b *BaseLexer) Recover(re RecognitionException) { + if b.input.LA(1) != TokenEOF { + if _, ok := re.(*LexerNoViableAltException); ok { + // Skip a char and try again + b.Interpreter.Consume(b.input) + } else { + // TODO: Do we lose character or line position information? + b.input.Consume() + } + } +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go new file mode 100644 index 0000000000..eaa7393e06 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go @@ -0,0 +1,452 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "strconv" + +const ( + // LexerActionTypeChannel represents a [LexerChannelAction] action. + LexerActionTypeChannel = 0 + + // LexerActionTypeCustom represents a [LexerCustomAction] action. + LexerActionTypeCustom = 1 + + // LexerActionTypeMode represents a [LexerModeAction] action. + LexerActionTypeMode = 2 + + // LexerActionTypeMore represents a [LexerMoreAction] action. + LexerActionTypeMore = 3 + + // LexerActionTypePopMode represents a [LexerPopModeAction] action. + LexerActionTypePopMode = 4 + + // LexerActionTypePushMode represents a [LexerPushModeAction] action. + LexerActionTypePushMode = 5 + + // LexerActionTypeSkip represents a [LexerSkipAction] action. + LexerActionTypeSkip = 6 + + // LexerActionTypeType represents a [LexerTypeAction] action. + LexerActionTypeType = 7 +) + +type LexerAction interface { + getActionType() int + getIsPositionDependent() bool + execute(lexer Lexer) + Hash() int + Equals(other LexerAction) bool +} + +type BaseLexerAction struct { + actionType int + isPositionDependent bool +} + +func NewBaseLexerAction(action int) *BaseLexerAction { + la := new(BaseLexerAction) + + la.actionType = action + la.isPositionDependent = false + + return la +} + +func (b *BaseLexerAction) execute(_ Lexer) { + panic("Not implemented") +} + +func (b *BaseLexerAction) getActionType() int { + return b.actionType +} + +func (b *BaseLexerAction) getIsPositionDependent() bool { + return b.isPositionDependent +} + +func (b *BaseLexerAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, b.actionType) + return murmurFinish(h, 1) +} + +func (b *BaseLexerAction) Equals(other LexerAction) bool { + return b.actionType == other.getActionType() +} + +// LexerSkipAction implements the [BaseLexerAction.Skip] lexer action by calling [Lexer.Skip]. +// +// The Skip command does not have any parameters, so this action is +// implemented as a singleton instance exposed by the [LexerSkipActionINSTANCE]. +type LexerSkipAction struct { + *BaseLexerAction +} + +func NewLexerSkipAction() *LexerSkipAction { + la := new(LexerSkipAction) + la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip) + return la +} + +// LexerSkipActionINSTANCE provides a singleton instance of this parameterless lexer action. +var LexerSkipActionINSTANCE = NewLexerSkipAction() + +func (l *LexerSkipAction) execute(lexer Lexer) { + lexer.Skip() +} + +// String returns a string representation of the current [LexerSkipAction]. +func (l *LexerSkipAction) String() string { + return "skip" +} + +func (b *LexerSkipAction) Equals(other LexerAction) bool { + return other.getActionType() == LexerActionTypeSkip +} + +// Implements the {@code type} lexer action by calling {@link Lexer//setType} +// +// with the assigned type. +type LexerTypeAction struct { + *BaseLexerAction + + thetype int +} + +func NewLexerTypeAction(thetype int) *LexerTypeAction { + l := new(LexerTypeAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType) + l.thetype = thetype + return l +} + +func (l *LexerTypeAction) execute(lexer Lexer) { + lexer.SetType(l.thetype) +} + +func (l *LexerTypeAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.thetype) + return murmurFinish(h, 2) +} + +func (l *LexerTypeAction) Equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerTypeAction); !ok { + return false + } else { + return l.thetype == other.(*LexerTypeAction).thetype + } +} + +func (l *LexerTypeAction) String() string { + return "actionType(" + strconv.Itoa(l.thetype) + ")" +} + +// LexerPushModeAction implements the pushMode lexer action by calling +// [Lexer.pushMode] with the assigned mode. +type LexerPushModeAction struct { + *BaseLexerAction + mode int +} + +func NewLexerPushModeAction(mode int) *LexerPushModeAction { + + l := new(LexerPushModeAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode) + + l.mode = mode + return l +} + +//

This action is implemented by calling {@link Lexer//pushMode} with the +// value provided by {@link //getMode}.

+func (l *LexerPushModeAction) execute(lexer Lexer) { + lexer.PushMode(l.mode) +} + +func (l *LexerPushModeAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.mode) + return murmurFinish(h, 2) +} + +func (l *LexerPushModeAction) Equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerPushModeAction); !ok { + return false + } else { + return l.mode == other.(*LexerPushModeAction).mode + } +} + +func (l *LexerPushModeAction) String() string { + return "pushMode(" + strconv.Itoa(l.mode) + ")" +} + +// LexerPopModeAction implements the popMode lexer action by calling [Lexer.popMode]. +// +// The popMode command does not have any parameters, so this action is +// implemented as a singleton instance exposed by [LexerPopModeActionINSTANCE] +type LexerPopModeAction struct { + *BaseLexerAction +} + +func NewLexerPopModeAction() *LexerPopModeAction { + + l := new(LexerPopModeAction) + + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode) + + return l +} + +var LexerPopModeActionINSTANCE = NewLexerPopModeAction() + +//

This action is implemented by calling {@link Lexer//popMode}.

+func (l *LexerPopModeAction) execute(lexer Lexer) { + lexer.PopMode() +} + +func (l *LexerPopModeAction) String() string { + return "popMode" +} + +// Implements the {@code more} lexer action by calling {@link Lexer//more}. +// +//

The {@code more} command does not have any parameters, so l action is +// implemented as a singleton instance exposed by {@link //INSTANCE}.

+ +type LexerMoreAction struct { + *BaseLexerAction +} + +func NewLexerMoreAction() *LexerMoreAction { + l := new(LexerMoreAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore) + + return l +} + +var LexerMoreActionINSTANCE = NewLexerMoreAction() + +//

This action is implemented by calling {@link Lexer//popMode}.

+func (l *LexerMoreAction) execute(lexer Lexer) { + lexer.More() +} + +func (l *LexerMoreAction) String() string { + return "more" +} + +// LexerModeAction implements the mode lexer action by calling [Lexer.mode] with +// the assigned mode. +type LexerModeAction struct { + *BaseLexerAction + mode int +} + +func NewLexerModeAction(mode int) *LexerModeAction { + l := new(LexerModeAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode) + l.mode = mode + return l +} + +//

This action is implemented by calling {@link Lexer//mode} with the +// value provided by {@link //getMode}.

+func (l *LexerModeAction) execute(lexer Lexer) { + lexer.SetMode(l.mode) +} + +func (l *LexerModeAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.mode) + return murmurFinish(h, 2) +} + +func (l *LexerModeAction) Equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerModeAction); !ok { + return false + } else { + return l.mode == other.(*LexerModeAction).mode + } +} + +func (l *LexerModeAction) String() string { + return "mode(" + strconv.Itoa(l.mode) + ")" +} + +// Executes a custom lexer action by calling {@link Recognizer//action} with the +// rule and action indexes assigned to the custom action. The implementation of +// a custom action is added to the generated code for the lexer in an override +// of {@link Recognizer//action} when the grammar is compiled. +// +//

This class may represent embedded actions created with the {...} +// syntax in ANTLR 4, as well as actions created for lexer commands where the +// command argument could not be evaluated when the grammar was compiled.

+ +// Constructs a custom lexer action with the specified rule and action +// indexes. +// +// @param ruleIndex The rule index to use for calls to +// {@link Recognizer//action}. +// @param actionIndex The action index to use for calls to +// {@link Recognizer//action}. + +type LexerCustomAction struct { + *BaseLexerAction + ruleIndex, actionIndex int +} + +func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction { + l := new(LexerCustomAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom) + l.ruleIndex = ruleIndex + l.actionIndex = actionIndex + l.isPositionDependent = true + return l +} + +//

Custom actions are implemented by calling {@link Lexer//action} with the +// appropriate rule and action indexes.

+func (l *LexerCustomAction) execute(lexer Lexer) { + lexer.Action(nil, l.ruleIndex, l.actionIndex) +} + +func (l *LexerCustomAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.ruleIndex) + h = murmurUpdate(h, l.actionIndex) + return murmurFinish(h, 3) +} + +func (l *LexerCustomAction) Equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerCustomAction); !ok { + return false + } else { + return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && + l.actionIndex == other.(*LexerCustomAction).actionIndex + } +} + +// LexerChannelAction implements the channel lexer action by calling +// [Lexer.setChannel] with the assigned channel. +// +// Constructs a new channel action with the specified channel value. +type LexerChannelAction struct { + *BaseLexerAction + channel int +} + +// NewLexerChannelAction creates a channel lexer action by calling +// [Lexer.setChannel] with the assigned channel. +// +// Constructs a new channel action with the specified channel value. +func NewLexerChannelAction(channel int) *LexerChannelAction { + l := new(LexerChannelAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel) + l.channel = channel + return l +} + +//

This action is implemented by calling {@link Lexer//setChannel} with the +// value provided by {@link //getChannel}.

+func (l *LexerChannelAction) execute(lexer Lexer) { + lexer.SetChannel(l.channel) +} + +func (l *LexerChannelAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.channel) + return murmurFinish(h, 2) +} + +func (l *LexerChannelAction) Equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerChannelAction); !ok { + return false + } else { + return l.channel == other.(*LexerChannelAction).channel + } +} + +func (l *LexerChannelAction) String() string { + return "channel(" + strconv.Itoa(l.channel) + ")" +} + +// This implementation of {@link LexerAction} is used for tracking input offsets +// for position-dependent actions within a {@link LexerActionExecutor}. +// +//

This action is not serialized as part of the ATN, and is only required for +// position-dependent lexer actions which appear at a location other than the +// end of a rule. For more information about DFA optimizations employed for +// lexer actions, see {@link LexerActionExecutor//append} and +// {@link LexerActionExecutor//fixOffsetBeforeMatch}.

+ +type LexerIndexedCustomAction struct { + *BaseLexerAction + offset int + lexerAction LexerAction + isPositionDependent bool +} + +// NewLexerIndexedCustomAction constructs a new indexed custom action by associating a character offset +// with a [LexerAction]. +// +// Note: This class is only required for lexer actions for which +// [LexerAction.isPositionDependent] returns true. +// +// The offset points into the input [CharStream], relative to +// the token start index, at which the specified lexerAction should be +// executed. +func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction { + + l := new(LexerIndexedCustomAction) + l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType()) + + l.offset = offset + l.lexerAction = lexerAction + l.isPositionDependent = true + + return l +} + +//

This method calls {@link //execute} on the result of {@link //getAction} +// using the provided {@code lexer}.

+func (l *LexerIndexedCustomAction) execute(lexer Lexer) { + // assume the input stream position was properly set by the calling code + l.lexerAction.execute(lexer) +} + +func (l *LexerIndexedCustomAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.offset) + h = murmurUpdate(h, l.lexerAction.Hash()) + return murmurFinish(h, 2) +} + +func (l *LexerIndexedCustomAction) equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerIndexedCustomAction); !ok { + return false + } else { + return l.offset == other.(*LexerIndexedCustomAction).offset && + l.lexerAction.Equals(other.(*LexerIndexedCustomAction).lexerAction) + } +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go new file mode 100644 index 0000000000..dfc28c32b3 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go @@ -0,0 +1,173 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "golang.org/x/exp/slices" + +// Represents an executor for a sequence of lexer actions which traversed during +// the Matching operation of a lexer rule (token). +// +//

The executor tracks position information for position-dependent lexer actions +// efficiently, ensuring that actions appearing only at the end of the rule do +// not cause bloating of the {@link DFA} created for the lexer.

+ +type LexerActionExecutor struct { + lexerActions []LexerAction + cachedHash int +} + +func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { + + if lexerActions == nil { + lexerActions = make([]LexerAction, 0) + } + + l := new(LexerActionExecutor) + + l.lexerActions = lexerActions + + // Caches the result of {@link //hashCode} since the hash code is an element + // of the performance-critical {@link ATNConfig//hashCode} operation. + l.cachedHash = murmurInit(0) + for _, a := range lexerActions { + l.cachedHash = murmurUpdate(l.cachedHash, a.Hash()) + } + l.cachedHash = murmurFinish(l.cachedHash, len(lexerActions)) + + return l +} + +// LexerActionExecutorappend creates a [LexerActionExecutor] which executes the actions for +// the input [LexerActionExecutor] followed by a specified +// [LexerAction]. +// TODO: This does not match the Java code +func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor { + if lexerActionExecutor == nil { + return NewLexerActionExecutor([]LexerAction{lexerAction}) + } + + return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction)) +} + +// fixOffsetBeforeMatch creates a [LexerActionExecutor] which encodes the current offset +// for position-dependent lexer actions. +// +// Normally, when the executor encounters lexer actions where +// [LexerAction.isPositionDependent] returns true, it calls +// [IntStream.Seek] on the input [CharStream] to set the input +// position to the end of the current token. This behavior provides +// for efficient [DFA] representation of lexer actions which appear at the end +// of a lexer rule, even when the lexer rule Matches a variable number of +// characters. +// +// Prior to traversing a Match transition in the [ATN], the current offset +// from the token start index is assigned to all position-dependent lexer +// actions which have not already been assigned a fixed offset. By storing +// the offsets relative to the token start index, the [DFA] representation of +// lexer actions which appear in the middle of tokens remains efficient due +// to sharing among tokens of the same Length, regardless of their absolute +// position in the input stream. +// +// If the current executor already has offsets assigned to all +// position-dependent lexer actions, the method returns this instance. +// +// The offset is assigned to all position-dependent +// lexer actions which do not already have offsets assigned. +// +// The func returns a [LexerActionExecutor] that stores input stream offsets +// for all position-dependent lexer actions. +func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor { + var updatedLexerActions []LexerAction + for i := 0; i < len(l.lexerActions); i++ { + _, ok := l.lexerActions[i].(*LexerIndexedCustomAction) + if l.lexerActions[i].getIsPositionDependent() && !ok { + if updatedLexerActions == nil { + updatedLexerActions = make([]LexerAction, 0, len(l.lexerActions)) + updatedLexerActions = append(updatedLexerActions, l.lexerActions...) + } + updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i]) + } + } + if updatedLexerActions == nil { + return l + } + + return NewLexerActionExecutor(updatedLexerActions) +} + +// Execute the actions encapsulated by l executor within the context of a +// particular {@link Lexer}. +// +//

This method calls {@link IntStream//seek} to set the position of the +// {@code input} {@link CharStream} prior to calling +// {@link LexerAction//execute} on a position-dependent action. Before the +// method returns, the input position will be restored to the same position +// it was in when the method was invoked.

+// +// @param lexer The lexer instance. +// @param input The input stream which is the source for the current token. +// When l method is called, the current {@link IntStream//index} for +// {@code input} should be the start of the following token, i.e. 1 +// character past the end of the current token. +// @param startIndex The token start index. This value may be passed to +// {@link IntStream//seek} to set the {@code input} position to the beginning +// of the token. +// / +func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) { + requiresSeek := false + stopIndex := input.Index() + + defer func() { + if requiresSeek { + input.Seek(stopIndex) + } + }() + + for i := 0; i < len(l.lexerActions); i++ { + lexerAction := l.lexerActions[i] + if la, ok := lexerAction.(*LexerIndexedCustomAction); ok { + offset := la.offset + input.Seek(startIndex + offset) + lexerAction = la.lexerAction + requiresSeek = (startIndex + offset) != stopIndex + } else if lexerAction.getIsPositionDependent() { + input.Seek(stopIndex) + requiresSeek = false + } + lexerAction.execute(lexer) + } +} + +func (l *LexerActionExecutor) Hash() int { + if l == nil { + // TODO: Why is this here? l should not be nil + return 61 + } + + // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode + return l.cachedHash +} + +func (l *LexerActionExecutor) Equals(other interface{}) bool { + if l == other { + return true + } + othert, ok := other.(*LexerActionExecutor) + if !ok { + return false + } + if othert == nil { + return false + } + if l.cachedHash != othert.cachedHash { + return false + } + if len(l.lexerActions) != len(othert.lexerActions) { + return false + } + return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool { + return i.Equals(j) + }) +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go new file mode 100644 index 0000000000..fe938b0259 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go @@ -0,0 +1,677 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +//goland:noinspection GoUnusedGlobalVariable +var ( + LexerATNSimulatorMinDFAEdge = 0 + LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN + + LexerATNSimulatorMatchCalls = 0 +) + +type ILexerATNSimulator interface { + IATNSimulator + + reset() + Match(input CharStream, mode int) int + GetCharPositionInLine() int + GetLine() int + GetText(input CharStream) string + Consume(input CharStream) +} + +type LexerATNSimulator struct { + BaseATNSimulator + + recog Lexer + predictionMode int + mergeCache *JPCMap2 + startIndex int + Line int + CharPositionInLine int + mode int + prevAccept *SimState + MatchCalls int +} + +func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator { + l := &LexerATNSimulator{ + BaseATNSimulator: BaseATNSimulator{ + atn: atn, + sharedContextCache: sharedContextCache, + }, + } + + l.decisionToDFA = decisionToDFA + l.recog = recog + + // The current token's starting index into the character stream. + // Shared across DFA to ATN simulation in case the ATN fails and the + // DFA did not have a previous accept state. In l case, we use the + // ATN-generated exception object. + l.startIndex = -1 + + // line number 1..n within the input + l.Line = 1 + + // The index of the character relative to the beginning of the line + // 0..n-1 + l.CharPositionInLine = 0 + + l.mode = LexerDefaultMode + + // Used during DFA/ATN exec to record the most recent accept configuration + // info + l.prevAccept = NewSimState() + + return l +} + +func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) { + l.CharPositionInLine = simulator.CharPositionInLine + l.Line = simulator.Line + l.mode = simulator.mode + l.startIndex = simulator.startIndex +} + +func (l *LexerATNSimulator) Match(input CharStream, mode int) int { + l.MatchCalls++ + l.mode = mode + mark := input.Mark() + + defer func() { + input.Release(mark) + }() + + l.startIndex = input.Index() + l.prevAccept.reset() + + dfa := l.decisionToDFA[mode] + + var s0 *DFAState + l.atn.stateMu.RLock() + s0 = dfa.getS0() + l.atn.stateMu.RUnlock() + + if s0 == nil { + return l.MatchATN(input) + } + + return l.execATN(input, s0) +} + +func (l *LexerATNSimulator) reset() { + l.prevAccept.reset() + l.startIndex = -1 + l.Line = 1 + l.CharPositionInLine = 0 + l.mode = LexerDefaultMode +} + +func (l *LexerATNSimulator) MatchATN(input CharStream) int { + startState := l.atn.modeToStartState[l.mode] + + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String()) + } + oldMode := l.mode + s0Closure := l.computeStartState(input, startState) + suppressEdge := s0Closure.hasSemanticContext + s0Closure.hasSemanticContext = false + + next := l.addDFAState(s0Closure, suppressEdge) + + predict := l.execATN(input, next) + + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString()) + } + return predict +} + +func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { + + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("start state closure=" + ds0.configs.String()) + } + if ds0.isAcceptState { + // allow zero-Length tokens + l.captureSimState(l.prevAccept, input, ds0) + } + t := input.LA(1) + s := ds0 // s is current/from DFA state + + for { // while more work + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("execATN loop starting closure: " + s.configs.String()) + } + + // As we move src->trg, src->trg, we keep track of the previous trg to + // avoid looking up the DFA state again, which is expensive. + // If the previous target was already part of the DFA, we might + // be able to avoid doing a reach operation upon t. If s!=nil, + // it means that semantic predicates didn't prevent us from + // creating a DFA state. Once we know s!=nil, we check to see if + // the DFA state has an edge already for t. If so, we can just reuse + // it's configuration set there's no point in re-computing it. + // This is kind of like doing DFA simulation within the ATN + // simulation because DFA simulation is really just a way to avoid + // computing reach/closure sets. Technically, once we know that + // we have a previously added DFA state, we could jump over to + // the DFA simulator. But, that would mean popping back and forth + // a lot and making things more complicated algorithmically. + // This optimization makes a lot of sense for loops within DFA. + // A character will take us back to an existing DFA state + // that already has lots of edges out of it. e.g., .* in comments. + target := l.getExistingTargetState(s, t) + if target == nil { + target = l.computeTargetState(input, s, t) + // print("Computed:" + str(target)) + } + if target == ATNSimulatorError { + break + } + // If l is a consumable input element, make sure to consume before + // capturing the accept state so the input index, line, and char + // position accurately reflect the state of the interpreter at the + // end of the token. + if t != TokenEOF { + l.Consume(input) + } + if target.isAcceptState { + l.captureSimState(l.prevAccept, input, target) + if t == TokenEOF { + break + } + } + t = input.LA(1) + s = target // flip current DFA target becomes new src/from state + } + + return l.failOrAccept(l.prevAccept, input, s.configs, t) +} + +// Get an existing target state for an edge in the DFA. If the target state +// for the edge has not yet been computed or is otherwise not available, +// l method returns {@code nil}. +// +// @param s The current DFA state +// @param t The next input symbol +// @return The existing target DFA state for the given input symbol +// {@code t}, or {@code nil} if the target state for l edge is not +// already cached +func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState { + if t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge { + return nil + } + + l.atn.edgeMu.RLock() + defer l.atn.edgeMu.RUnlock() + if s.getEdges() == nil { + return nil + } + target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge) + if runtimeConfig.lexerATNSimulatorDebug && target != nil { + fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber)) + } + return target +} + +// computeTargetState computes a target state for an edge in the [DFA], and attempt to add the +// computed state and corresponding edge to the [DFA]. +// +// The func returns the computed target [DFA] state for the given input symbol t. +// If this does not lead to a valid [DFA] state, this method +// returns ATNSimulatorError. +func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState { + reach := NewOrderedATNConfigSet() + + // if we don't find an existing DFA state + // Fill reach starting from closure, following t transitions + l.getReachableConfigSet(input, s.configs, reach, t) + + if len(reach.configs) == 0 { // we got nowhere on t from s + if !reach.hasSemanticContext { + // we got nowhere on t, don't panic out l knowledge it'd + // cause a fail-over from DFA later. + l.addDFAEdge(s, t, ATNSimulatorError, nil) + } + // stop when we can't Match any more char + return ATNSimulatorError + } + // Add an edge from s to target DFA found/created for reach + return l.addDFAEdge(s, t, nil, reach) +} + +func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach *ATNConfigSet, t int) int { + if l.prevAccept.dfaState != nil { + lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor + l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column) + return prevAccept.dfaState.prediction + } + + // if no accept and EOF is first char, return EOF + if t == TokenEOF && input.Index() == l.startIndex { + return TokenEOF + } + + panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach)) +} + +// getReachableConfigSet when given a starting configuration set, figures out all [ATN] configurations +// we can reach upon input t. +// +// Parameter reach is a return parameter. +func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *ATNConfigSet, reach *ATNConfigSet, t int) { + // l is used to Skip processing for configs which have a lower priority + // than a runtimeConfig that already reached an accept state for the same rule + SkipAlt := ATNInvalidAltNumber + + for _, cfg := range closure.configs { + currentAltReachedAcceptState := cfg.GetAlt() == SkipAlt + if currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision { + continue + } + + if runtimeConfig.lexerATNSimulatorDebug { + + fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) + } + + for _, trans := range cfg.GetState().GetTransitions() { + target := l.getReachableTarget(trans, t) + if target != nil { + lexerActionExecutor := cfg.lexerActionExecutor + if lexerActionExecutor != nil { + lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex) + } + treatEOFAsEpsilon := t == TokenEOF + config := NewLexerATNConfig3(cfg, target, lexerActionExecutor) + if l.closure(input, config, reach, + currentAltReachedAcceptState, true, treatEOFAsEpsilon) { + // any remaining configs for l alt have a lower priority + // than the one that just reached an accept state. + SkipAlt = cfg.GetAlt() + } + } + } + } +} + +func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) { + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Printf("ACTION %v\n", lexerActionExecutor) + } + // seek to after last char in token + input.Seek(index) + l.Line = line + l.CharPositionInLine = charPos + if lexerActionExecutor != nil && l.recog != nil { + lexerActionExecutor.execute(l.recog, input, startIndex) + } +} + +func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState { + if trans.Matches(t, 0, LexerMaxCharValue) { + return trans.getTarget() + } + + return nil +} + +func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *ATNConfigSet { + configs := NewOrderedATNConfigSet() + for i := 0; i < len(p.GetTransitions()); i++ { + target := p.GetTransitions()[i].getTarget() + cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY) + l.closure(input, cfg, configs, false, false, false) + } + + return configs +} + +// closure since the alternatives within any lexer decision are ordered by +// preference, this method stops pursuing the closure as soon as an accept +// state is reached. After the first accept state is reached by depth-first +// search from runtimeConfig, all other (potentially reachable) states for +// this rule would have a lower priority. +// +// The func returns true if an accept state is reached. +func (l *LexerATNSimulator) closure(input CharStream, config *ATNConfig, configs *ATNConfigSet, + currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool { + + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("closure(" + config.String() + ")") + } + + _, ok := config.state.(*RuleStopState) + if ok { + + if runtimeConfig.lexerATNSimulatorDebug { + if l.recog != nil { + fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config) + } else { + fmt.Printf("closure at rule stop %s\n", config) + } + } + + if config.context == nil || config.context.hasEmptyPath() { + if config.context == nil || config.context.isEmpty() { + configs.Add(config, nil) + return true + } + + configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil) + currentAltReachedAcceptState = true + } + if config.context != nil && !config.context.isEmpty() { + for i := 0; i < config.context.length(); i++ { + if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState { + newContext := config.context.GetParent(i) // "pop" return state + returnState := l.atn.states[config.context.getReturnState(i)] + cfg := NewLexerATNConfig2(config, returnState, newContext) + currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) + } + } + } + return currentAltReachedAcceptState + } + // optimization + if !config.state.GetEpsilonOnlyTransitions() { + if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision { + configs.Add(config, nil) + } + } + for j := 0; j < len(config.state.GetTransitions()); j++ { + trans := config.state.GetTransitions()[j] + cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon) + if cfg != nil { + currentAltReachedAcceptState = l.closure(input, cfg, configs, + currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) + } + } + return currentAltReachedAcceptState +} + +// side-effect: can alter configs.hasSemanticContext +func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig, trans Transition, + configs *ATNConfigSet, speculative, treatEOFAsEpsilon bool) *ATNConfig { + + var cfg *ATNConfig + + if trans.getSerializationType() == TransitionRULE { + + rt := trans.(*RuleTransition) + newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber()) + cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext) + + } else if trans.getSerializationType() == TransitionPRECEDENCE { + panic("Precedence predicates are not supported in lexers.") + } else if trans.getSerializationType() == TransitionPREDICATE { + // Track traversing semantic predicates. If we traverse, + // we cannot add a DFA state for l "reach" computation + // because the DFA would not test the predicate again in the + // future. Rather than creating collections of semantic predicates + // like v3 and testing them on prediction, v4 will test them on the + // fly all the time using the ATN not the DFA. This is slower but + // semantically it's not used that often. One of the key elements to + // l predicate mechanism is not adding DFA states that see + // predicates immediately afterwards in the ATN. For example, + + // a : ID {p1}? | ID {p2}? + + // should create the start state for rule 'a' (to save start state + // competition), but should not create target of ID state. The + // collection of ATN states the following ID references includes + // states reached by traversing predicates. Since l is when we + // test them, we cannot cash the DFA state target of ID. + + pt := trans.(*PredicateTransition) + + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex)) + } + configs.hasSemanticContext = true + if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) { + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } + } else if trans.getSerializationType() == TransitionACTION { + if config.context == nil || config.context.hasEmptyPath() { + // execute actions anywhere in the start rule for a token. + // + // TODO: if the entry rule is invoked recursively, some + // actions may be executed during the recursive call. The + // problem can appear when hasEmptyPath() is true but + // isEmpty() is false. In this case, the config needs to be + // split into two contexts - one with just the empty path + // and another with everything but the empty path. + // Unfortunately, the current algorithm does not allow + // getEpsilonTarget to return two configurations, so + // additional modifications are needed before we can support + // the split operation. + lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex]) + cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor) + } else { + // ignore actions in referenced rules + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } + } else if trans.getSerializationType() == TransitionEPSILON { + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } else if trans.getSerializationType() == TransitionATOM || + trans.getSerializationType() == TransitionRANGE || + trans.getSerializationType() == TransitionSET { + if treatEOFAsEpsilon { + if trans.Matches(TokenEOF, 0, LexerMaxCharValue) { + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } + } + } + return cfg +} + +// evaluatePredicate eEvaluates a predicate specified in the lexer. +// +// If speculative is true, this method was called before +// [consume] for the Matched character. This method should call +// [consume] before evaluating the predicate to ensure position +// sensitive values, including [GetText], [GetLine], +// and [GetColumn], properly reflect the current +// lexer state. This method should restore input and the simulator +// to the original state before returning, i.e. undo the actions made by the +// call to [Consume]. +// +// The func returns true if the specified predicate evaluates to true. +func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool { + // assume true if no recognizer was provided + if l.recog == nil { + return true + } + if !speculative { + return l.recog.Sempred(nil, ruleIndex, predIndex) + } + savedcolumn := l.CharPositionInLine + savedLine := l.Line + index := input.Index() + marker := input.Mark() + + defer func() { + l.CharPositionInLine = savedcolumn + l.Line = savedLine + input.Seek(index) + input.Release(marker) + }() + + l.Consume(input) + return l.recog.Sempred(nil, ruleIndex, predIndex) +} + +func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) { + settings.index = input.Index() + settings.line = l.Line + settings.column = l.CharPositionInLine + settings.dfaState = dfaState +} + +func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs *ATNConfigSet) *DFAState { + if to == nil && cfgs != nil { + // leading to l call, ATNConfigSet.hasSemanticContext is used as a + // marker indicating dynamic predicate evaluation makes l edge + // dependent on the specific input sequence, so the static edge in the + // DFA should be omitted. The target DFAState is still created since + // execATN has the ability to reSynchronize with the DFA state cache + // following the predicate evaluation step. + // + // TJP notes: next time through the DFA, we see a pred again and eval. + // If that gets us to a previously created (but dangling) DFA + // state, we can continue in pure DFA mode from there. + // + suppressEdge := cfgs.hasSemanticContext + cfgs.hasSemanticContext = false + to = l.addDFAState(cfgs, true) + + if suppressEdge { + return to + } + } + // add the edge + if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge { + // Only track edges within the DFA bounds + return to + } + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk)) + } + l.atn.edgeMu.Lock() + defer l.atn.edgeMu.Unlock() + if from.getEdges() == nil { + // make room for tokens 1..n and -1 masquerading as index 0 + from.setEdges(make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1)) + } + from.setIthEdge(tk-LexerATNSimulatorMinDFAEdge, to) // connect + + return to +} + +// Add a NewDFA state if there isn't one with l set of +// configurations already. This method also detects the first +// configuration containing an ATN rule stop state. Later, when +// traversing the DFA, we will know which rule to accept. +func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool) *DFAState { + + proposed := NewDFAState(-1, configs) + var firstConfigWithRuleStopState *ATNConfig + + for _, cfg := range configs.configs { + _, ok := cfg.GetState().(*RuleStopState) + + if ok { + firstConfigWithRuleStopState = cfg + break + } + } + if firstConfigWithRuleStopState != nil { + proposed.isAcceptState = true + proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor + proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()]) + } + dfa := l.decisionToDFA[l.mode] + + l.atn.stateMu.Lock() + defer l.atn.stateMu.Unlock() + existing, present := dfa.Get(proposed) + if present { + + // This state was already present, so just return it. + // + proposed = existing + } else { + + // We need to add the new state + // + proposed.stateNumber = dfa.Len() + configs.readOnly = true + configs.configLookup = nil // Not needed now + proposed.configs = configs + dfa.Put(proposed) + } + if !suppressEdge { + dfa.setS0(proposed) + } + return proposed +} + +func (l *LexerATNSimulator) getDFA(mode int) *DFA { + return l.decisionToDFA[mode] +} + +// GetText returns the text [Match]ed so far for the current token. +func (l *LexerATNSimulator) GetText(input CharStream) string { + // index is first lookahead char, don't include. + return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1)) +} + +func (l *LexerATNSimulator) Consume(input CharStream) { + curChar := input.LA(1) + if curChar == int('\n') { + l.Line++ + l.CharPositionInLine = 0 + } else { + l.CharPositionInLine++ + } + input.Consume() +} + +func (l *LexerATNSimulator) GetCharPositionInLine() int { + return l.CharPositionInLine +} + +func (l *LexerATNSimulator) GetLine() int { + return l.Line +} + +func (l *LexerATNSimulator) GetTokenName(tt int) string { + if tt == -1 { + return "EOF" + } + + var sb strings.Builder + sb.Grow(6) + sb.WriteByte('\'') + sb.WriteRune(rune(tt)) + sb.WriteByte('\'') + + return sb.String() +} + +func resetSimState(sim *SimState) { + sim.index = -1 + sim.line = 0 + sim.column = -1 + sim.dfaState = nil +} + +type SimState struct { + index int + line int + column int + dfaState *DFAState +} + +func NewSimState() *SimState { + s := new(SimState) + resetSimState(s) + return s +} + +func (s *SimState) reset() { + resetSimState(s) +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go new file mode 100644 index 0000000000..4955ac876f --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go @@ -0,0 +1,218 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type LL1Analyzer struct { + atn *ATN +} + +func NewLL1Analyzer(atn *ATN) *LL1Analyzer { + la := new(LL1Analyzer) + la.atn = atn + return la +} + +const ( + // LL1AnalyzerHitPred is a special value added to the lookahead sets to indicate that we hit + // a predicate during analysis if + // + // seeThruPreds==false + LL1AnalyzerHitPred = TokenInvalidType +) + +// * +// Calculates the SLL(1) expected lookahead set for each outgoing transition +// of an {@link ATNState}. The returned array has one element for each +// outgoing transition in {@code s}. If the closure from transition +// i leads to a semantic predicate before Matching a symbol, the +// element at index i of the result will be {@code nil}. +// +// @param s the ATN state +// @return the expected symbols for each outgoing transition of {@code s}. +func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { + if s == nil { + return nil + } + count := len(s.GetTransitions()) + look := make([]*IntervalSet, count) + for alt := 0; alt < count; alt++ { + + look[alt] = NewIntervalSet() + lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy") + la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false) + + // Wipe out lookahead for la alternative if we found nothing, + // or we had a predicate when we !seeThruPreds + if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { + look[alt] = nil + } + } + return look +} + +// Look computes the set of tokens that can follow s in the [ATN] in the +// specified ctx. +// +// If ctx is nil and the end of the rule containing +// s is reached, [EPSILON] is added to the result set. +// +// If ctx is not nil and the end of the outermost rule is +// reached, [EOF] is added to the result set. +// +// Parameter s the ATN state, and stopState is the ATN state to stop at. This can be a +// [BlockEndState] to detect epsilon paths through a closure. +// +// Parameter ctx is the complete parser context, or nil if the context +// should be ignored +// +// The func returns the set of tokens that can follow s in the [ATN] in the +// specified ctx. +func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { + r := NewIntervalSet() + var lookContext *PredictionContext + if ctx != nil { + lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) + } + la.look1(s, stopState, lookContext, r, NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.Look for la.look1()"), + NewBitSet(), true, true) + return r +} + +//* +// Compute set of tokens that can follow {@code s} in the ATN in the +// specified {@code ctx}. +// +//

If {@code ctx} is {@code nil} and {@code stopState} or the end of the +// rule containing {@code s} is reached, {@link Token//EPSILON} is added to +// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is +// {@code true} and {@code stopState} or the end of the outermost rule is +// reached, {@link Token//EOF} is added to the result set.

+// +// @param s the ATN state. +// @param stopState the ATN state to stop at. This can be a +// {@link BlockEndState} to detect epsilon paths through a closure. +// @param ctx The outer context, or {@code nil} if the outer context should +// not be used. +// @param look The result lookahead set. +// @param lookBusy A set used for preventing epsilon closures in the ATN +// from causing a stack overflow. Outside code should pass +// {@code NewSet} for la argument. +// @param calledRuleStack A set used for preventing left recursion in the +// ATN from causing a stack overflow. Outside code should pass +// {@code NewBitSet()} for la argument. +// @param seeThruPreds {@code true} to true semantic predicates as +// implicitly {@code true} and "see through them", otherwise {@code false} +// to treat semantic predicates as opaque and add {@link //HitPred} to the +// result if one is encountered. +// @param addEOF Add {@link Token//EOF} to the result if the end of the +// outermost context is reached. This parameter has no effect if {@code ctx} +// is {@code nil}. + +func (la *LL1Analyzer) look2(_, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], + calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { + + returnState := la.atn.states[ctx.getReturnState(i)] + la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + +} + +func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) { + + c := NewATNConfig6(s, 0, ctx) + + if lookBusy.Contains(c) { + return + } + + _, present := lookBusy.Put(c) + if present { + return + + } + if s == stopState { + if ctx == nil { + look.addOne(TokenEpsilon) + return + } else if ctx.isEmpty() && addEOF { + look.addOne(TokenEOF) + return + } + } + + _, ok := s.(*RuleStopState) + + if ok { + if ctx == nil { + look.addOne(TokenEpsilon) + return + } else if ctx.isEmpty() && addEOF { + look.addOne(TokenEOF) + return + } + + if ctx.pcType != PredictionContextEmpty { + removed := calledRuleStack.contains(s.GetRuleIndex()) + defer func() { + if removed { + calledRuleStack.add(s.GetRuleIndex()) + } + }() + calledRuleStack.remove(s.GetRuleIndex()) + // run thru all possible stack tops in ctx + for i := 0; i < ctx.length(); i++ { + returnState := la.atn.states[ctx.getReturnState(i)] + la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i) + } + return + } + } + + n := len(s.GetTransitions()) + + for i := 0; i < n; i++ { + t := s.GetTransitions()[i] + + if t1, ok := t.(*RuleTransition); ok { + if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) { + continue + } + + newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) + la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1) + } else if t2, ok := t.(AbstractPredicateTransition); ok { + if seeThruPreds { + la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + } else { + look.addOne(LL1AnalyzerHitPred) + } + } else if t.getIsEpsilon() { + la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + } else if _, ok := t.(*WildcardTransition); ok { + look.addRange(TokenMinUserTokenType, la.atn.maxTokenType) + } else { + set := t.getLabel() + if set != nil { + if _, ok := t.(*NotSetTransition); ok { + set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType) + } + look.addSet(set) + } + } + } +} + +func (la *LL1Analyzer) look3(stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], + calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { + + newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) + + defer func() { + calledRuleStack.remove(t1.getTarget().GetRuleIndex()) + }() + + calledRuleStack.add(t1.getTarget().GetRuleIndex()) + la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go new file mode 100644 index 0000000000..923c7b52c4 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go @@ -0,0 +1,47 @@ +//go:build !antlr.stats + +package antlr + +// This file is compiled when the build configuration antlr.stats is not enabled. +// which then allows the compiler to optimize out all the code that is not used. +const collectStats = false + +// goRunStats is a dummy struct used when build configuration antlr.stats is not enabled. +type goRunStats struct { +} + +var Statistics = &goRunStats{} + +func (s *goRunStats) AddJStatRec(_ *JStatRec) { + // Do nothing - compiler will optimize this out (hopefully) +} + +func (s *goRunStats) CollectionAnomalies() { + // Do nothing - compiler will optimize this out (hopefully) +} + +func (s *goRunStats) Reset() { + // Do nothing - compiler will optimize this out (hopefully) +} + +func (s *goRunStats) Report(dir string, prefix string) error { + // Do nothing - compiler will optimize this out (hopefully) + return nil +} + +func (s *goRunStats) Analyze() { + // Do nothing - compiler will optimize this out (hopefully) +} + +type statsOption func(*goRunStats) error + +func (s *goRunStats) Configure(options ...statsOption) error { + // Do nothing - compiler will optimize this out (hopefully) + return nil +} + +func WithTopN(topN int) statsOption { + return func(s *goRunStats) error { + return nil + } +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/parser.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/parser.go new file mode 100644 index 0000000000..fb57ac15db --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/parser.go @@ -0,0 +1,700 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +type Parser interface { + Recognizer + + GetInterpreter() *ParserATNSimulator + + GetTokenStream() TokenStream + GetTokenFactory() TokenFactory + GetParserRuleContext() ParserRuleContext + SetParserRuleContext(ParserRuleContext) + Consume() Token + GetParseListeners() []ParseTreeListener + + GetErrorHandler() ErrorStrategy + SetErrorHandler(ErrorStrategy) + GetInputStream() IntStream + GetCurrentToken() Token + GetExpectedTokens() *IntervalSet + NotifyErrorListeners(string, Token, RecognitionException) + IsExpectedToken(int) bool + GetPrecedence() int + GetRuleInvocationStack(ParserRuleContext) []string +} + +type BaseParser struct { + *BaseRecognizer + + Interpreter *ParserATNSimulator + BuildParseTrees bool + + input TokenStream + errHandler ErrorStrategy + precedenceStack IntStack + ctx ParserRuleContext + + tracer *TraceListener + parseListeners []ParseTreeListener + _SyntaxErrors int +} + +// NewBaseParser contains all the parsing support code to embed in parsers. Essentially most of it is error +// recovery stuff. +// +//goland:noinspection GoUnusedExportedFunction +func NewBaseParser(input TokenStream) *BaseParser { + + p := new(BaseParser) + + p.BaseRecognizer = NewBaseRecognizer() + + // The input stream. + p.input = nil + + // The error handling strategy for the parser. The default value is a new + // instance of {@link DefaultErrorStrategy}. + p.errHandler = NewDefaultErrorStrategy() + p.precedenceStack = make([]int, 0) + p.precedenceStack.Push(0) + + // The ParserRuleContext object for the currently executing rule. + // p.is always non-nil during the parsing process. + p.ctx = nil + + // Specifies whether the parser should construct a parse tree during + // the parsing process. The default value is {@code true}. + p.BuildParseTrees = true + + // When setTrace(true) is called, a reference to the + // TraceListener is stored here, so it can be easily removed in a + // later call to setTrace(false). The listener itself is + // implemented as a parser listener so p.field is not directly used by + // other parser methods. + p.tracer = nil + + // The list of ParseTreeListener listeners registered to receive + // events during the parse. + p.parseListeners = nil + + // The number of syntax errors Reported during parsing. p.value is + // incremented each time NotifyErrorListeners is called. + p._SyntaxErrors = 0 + p.SetInputStream(input) + + return p +} + +// This field maps from the serialized ATN string to the deserialized [ATN] with +// bypass alternatives. +// +// [ATNDeserializationOptions.isGenerateRuleBypassTransitions] +// +//goland:noinspection GoUnusedGlobalVariable +var bypassAltsAtnCache = make(map[string]int) + +// reset the parser's state// +func (p *BaseParser) reset() { + if p.input != nil { + p.input.Seek(0) + } + p.errHandler.reset(p) + p.ctx = nil + p._SyntaxErrors = 0 + p.SetTrace(nil) + p.precedenceStack = make([]int, 0) + p.precedenceStack.Push(0) + if p.Interpreter != nil { + p.Interpreter.reset() + } +} + +func (p *BaseParser) GetErrorHandler() ErrorStrategy { + return p.errHandler +} + +func (p *BaseParser) SetErrorHandler(e ErrorStrategy) { + p.errHandler = e +} + +// Match current input symbol against {@code ttype}. If the symbol type +// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are +// called to complete the Match process. +// +//

If the symbol type does not Match, +// {@link ANTLRErrorStrategy//recoverInline} is called on the current error +// strategy to attempt recovery. If {@link //getBuildParseTree} is +// {@code true} and the token index of the symbol returned by +// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to +// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

+// +// @param ttype the token type to Match +// @return the Matched symbol +// @panics RecognitionException if the current input symbol did not Match +// {@code ttype} and the error strategy could not recover from the +// mismatched symbol + +func (p *BaseParser) Match(ttype int) Token { + + t := p.GetCurrentToken() + + if t.GetTokenType() == ttype { + p.errHandler.ReportMatch(p) + p.Consume() + } else { + t = p.errHandler.RecoverInline(p) + if p.HasError() { + return nil + } + if p.BuildParseTrees && t.GetTokenIndex() == -1 { + + // we must have conjured up a new token during single token + // insertion if it's not the current symbol + p.ctx.AddErrorNode(t) + } + } + + return t +} + +// Match current input symbol as a wildcard. If the symbol type Matches +// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch} +// and {@link //consume} are called to complete the Match process. +// +//

If the symbol type does not Match, +// {@link ANTLRErrorStrategy//recoverInline} is called on the current error +// strategy to attempt recovery. If {@link //getBuildParseTree} is +// {@code true} and the token index of the symbol returned by +// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to +// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

+// +// @return the Matched symbol +// @panics RecognitionException if the current input symbol did not Match +// a wildcard and the error strategy could not recover from the mismatched +// symbol + +func (p *BaseParser) MatchWildcard() Token { + t := p.GetCurrentToken() + if t.GetTokenType() > 0 { + p.errHandler.ReportMatch(p) + p.Consume() + } else { + t = p.errHandler.RecoverInline(p) + if p.BuildParseTrees && t.GetTokenIndex() == -1 { + // we must have conjured up a new token during single token + // insertion if it's not the current symbol + p.ctx.AddErrorNode(t) + } + } + return t +} + +func (p *BaseParser) GetParserRuleContext() ParserRuleContext { + return p.ctx +} + +func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) { + p.ctx = v +} + +func (p *BaseParser) GetParseListeners() []ParseTreeListener { + if p.parseListeners == nil { + return make([]ParseTreeListener, 0) + } + return p.parseListeners +} + +// AddParseListener registers listener to receive events during the parsing process. +// +// To support output-preserving grammar transformations (including but not +// limited to left-recursion removal, automated left-factoring, and +// optimized code generation), calls to listener methods during the parse +// may differ substantially from calls made by +// [ParseTreeWalker.DEFAULT] used after the parse is complete. In +// particular, rule entry and exit events may occur in a different order +// during the parse than after the parser. In addition, calls to certain +// rule entry methods may be omitted. +// +// With the following specific exceptions, calls to listener events are +// deterministic, i.e. for identical input the calls to listener +// methods will be the same. +// +// - Alterations to the grammar used to generate code may change the +// behavior of the listener calls. +// - Alterations to the command line options passed to ANTLR 4 when +// generating the parser may change the behavior of the listener calls. +// - Changing the version of the ANTLR Tool used to generate the parser +// may change the behavior of the listener calls. +func (p *BaseParser) AddParseListener(listener ParseTreeListener) { + if listener == nil { + panic("listener") + } + if p.parseListeners == nil { + p.parseListeners = make([]ParseTreeListener, 0) + } + p.parseListeners = append(p.parseListeners, listener) +} + +// RemoveParseListener removes listener from the list of parse listeners. +// +// If listener is nil or has not been added as a parse +// listener, this func does nothing. +func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { + + if p.parseListeners != nil { + + idx := -1 + for i, v := range p.parseListeners { + if v == listener { + idx = i + break + } + } + + if idx == -1 { + return + } + + // remove the listener from the slice + p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...) + + if len(p.parseListeners) == 0 { + p.parseListeners = nil + } + } +} + +// Remove all parse listeners. +func (p *BaseParser) removeParseListeners() { + p.parseListeners = nil +} + +// TriggerEnterRuleEvent notifies all parse listeners of an enter rule event. +func (p *BaseParser) TriggerEnterRuleEvent() { + if p.parseListeners != nil { + ctx := p.ctx + for _, listener := range p.parseListeners { + listener.EnterEveryRule(ctx) + ctx.EnterRule(listener) + } + } +} + +// TriggerExitRuleEvent notifies any parse listeners of an exit rule event. +func (p *BaseParser) TriggerExitRuleEvent() { + if p.parseListeners != nil { + // reverse order walk of listeners + ctx := p.ctx + l := len(p.parseListeners) - 1 + + for i := range p.parseListeners { + listener := p.parseListeners[l-i] + ctx.ExitRule(listener) + listener.ExitEveryRule(ctx) + } + } +} + +func (p *BaseParser) GetInterpreter() *ParserATNSimulator { + return p.Interpreter +} + +func (p *BaseParser) GetATN() *ATN { + return p.Interpreter.atn +} + +func (p *BaseParser) GetTokenFactory() TokenFactory { + return p.input.GetTokenSource().GetTokenFactory() +} + +// setTokenFactory is used to tell our token source and error strategy about a new way to create tokens. +func (p *BaseParser) setTokenFactory(factory TokenFactory) { + p.input.GetTokenSource().setTokenFactory(factory) +} + +// GetATNWithBypassAlts - the ATN with bypass alternatives is expensive to create, so we create it +// lazily. +func (p *BaseParser) GetATNWithBypassAlts() { + + // TODO - Implement this? + panic("Not implemented!") + + // serializedAtn := p.getSerializedATN() + // if (serializedAtn == nil) { + // panic("The current parser does not support an ATN with bypass alternatives.") + // } + // result := p.bypassAltsAtnCache[serializedAtn] + // if (result == nil) { + // deserializationOptions := NewATNDeserializationOptions(nil) + // deserializationOptions.generateRuleBypassTransitions = true + // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn) + // p.bypassAltsAtnCache[serializedAtn] = result + // } + // return result +} + +// The preferred method of getting a tree pattern. For example, here's a +// sample use: +// +//
+// ParseTree t = parser.expr()
+// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
+// MyParser.RULE_expr)
+// ParseTreeMatch m = p.Match(t)
+// String id = m.Get("ID")
+// 
+ +//goland:noinspection GoUnusedParameter +func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) { + + panic("NewParseTreePatternMatcher not implemented!") + // + // if (lexer == nil) { + // if (p.GetTokenStream() != nil) { + // tokenSource := p.GetTokenStream().GetTokenSource() + // if _, ok := tokenSource.(ILexer); ok { + // lexer = tokenSource + // } + // } + // } + // if (lexer == nil) { + // panic("Parser can't discover a lexer to use") + // } + + // m := NewParseTreePatternMatcher(lexer, p) + // return m.compile(pattern, patternRuleIndex) +} + +func (p *BaseParser) GetInputStream() IntStream { + return p.GetTokenStream() +} + +func (p *BaseParser) SetInputStream(input TokenStream) { + p.SetTokenStream(input) +} + +func (p *BaseParser) GetTokenStream() TokenStream { + return p.input +} + +// SetTokenStream installs input as the token stream and resets the parser. +func (p *BaseParser) SetTokenStream(input TokenStream) { + p.input = nil + p.reset() + p.input = input +} + +// GetCurrentToken returns the current token at LT(1). +// +// [Match] needs to return the current input symbol, which gets put +// into the label for the associated token ref e.g., x=ID. +func (p *BaseParser) GetCurrentToken() Token { + return p.input.LT(1) +} + +func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) { + if offendingToken == nil { + offendingToken = p.GetCurrentToken() + } + p._SyntaxErrors++ + line := offendingToken.GetLine() + column := offendingToken.GetColumn() + listener := p.GetErrorListenerDispatch() + listener.SyntaxError(p, offendingToken, line, column, msg, err) +} + +func (p *BaseParser) Consume() Token { + o := p.GetCurrentToken() + if o.GetTokenType() != TokenEOF { + p.GetInputStream().Consume() + } + hasListener := p.parseListeners != nil && len(p.parseListeners) > 0 + if p.BuildParseTrees || hasListener { + if p.errHandler.InErrorRecoveryMode(p) { + node := p.ctx.AddErrorNode(o) + if p.parseListeners != nil { + for _, l := range p.parseListeners { + l.VisitErrorNode(node) + } + } + + } else { + node := p.ctx.AddTokenNode(o) + if p.parseListeners != nil { + for _, l := range p.parseListeners { + l.VisitTerminal(node) + } + } + } + // node.invokingState = p.state + } + + return o +} + +func (p *BaseParser) addContextToParseTree() { + // add current context to parent if we have a parent + if p.ctx.GetParent() != nil { + p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx) + } +} + +func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, _ int) { + p.SetState(state) + p.ctx = localctx + p.ctx.SetStart(p.input.LT(1)) + if p.BuildParseTrees { + p.addContextToParseTree() + } + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() + } +} + +func (p *BaseParser) ExitRule() { + p.ctx.SetStop(p.input.LT(-1)) + // trigger event on ctx, before it reverts to parent + if p.parseListeners != nil { + p.TriggerExitRuleEvent() + } + p.SetState(p.ctx.GetInvokingState()) + if p.ctx.GetParent() != nil { + p.ctx = p.ctx.GetParent().(ParserRuleContext) + } else { + p.ctx = nil + } +} + +func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) { + localctx.SetAltNumber(altNum) + // if we have a new localctx, make sure we replace existing ctx + // that is previous child of parse tree + if p.BuildParseTrees && p.ctx != localctx { + if p.ctx.GetParent() != nil { + p.ctx.GetParent().(ParserRuleContext).RemoveLastChild() + p.ctx.GetParent().(ParserRuleContext).AddChild(localctx) + } + } + p.ctx = localctx +} + +// Get the precedence level for the top-most precedence rule. +// +// @return The precedence level for the top-most precedence rule, or -1 if +// the parser context is not nested within a precedence rule. + +func (p *BaseParser) GetPrecedence() int { + if len(p.precedenceStack) == 0 { + return -1 + } + + return p.precedenceStack[len(p.precedenceStack)-1] +} + +func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, _, precedence int) { + p.SetState(state) + p.precedenceStack.Push(precedence) + p.ctx = localctx + p.ctx.SetStart(p.input.LT(1)) + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() // simulates rule entry for + // left-recursive rules + } +} + +// +// Like {@link //EnterRule} but for recursive rules. + +func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, _ int) { + previous := p.ctx + previous.SetParent(localctx) + previous.SetInvokingState(state) + previous.SetStop(p.input.LT(-1)) + + p.ctx = localctx + p.ctx.SetStart(previous.GetStart()) + if p.BuildParseTrees { + p.ctx.AddChild(previous) + } + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() // simulates rule entry for + // left-recursive rules + } +} + +func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) { + _, _ = p.precedenceStack.Pop() + p.ctx.SetStop(p.input.LT(-1)) + retCtx := p.ctx // save current ctx (return value) + // unroll so ctx is as it was before call to recursive method + if p.parseListeners != nil { + for p.ctx != parentCtx { + p.TriggerExitRuleEvent() + p.ctx = p.ctx.GetParent().(ParserRuleContext) + } + } else { + p.ctx = parentCtx + } + // hook into tree + retCtx.SetParent(parentCtx) + if p.BuildParseTrees && parentCtx != nil { + // add return ctx into invoking rule's tree + parentCtx.AddChild(retCtx) + } +} + +func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext { + ctx := p.ctx + for ctx != nil { + if ctx.GetRuleIndex() == ruleIndex { + return ctx + } + ctx = ctx.GetParent().(ParserRuleContext) + } + return nil +} + +func (p *BaseParser) Precpred(_ RuleContext, precedence int) bool { + return precedence >= p.precedenceStack[len(p.precedenceStack)-1] +} + +//goland:noinspection GoUnusedParameter +func (p *BaseParser) inContext(context ParserRuleContext) bool { + // TODO: useful in parser? + return false +} + +// IsExpectedToken checks whether symbol can follow the current state in the +// {ATN}. The behavior of p.method is equivalent to the following, but is +// implemented such that the complete context-sensitive follow set does not +// need to be explicitly constructed. +// +// return getExpectedTokens().contains(symbol) +func (p *BaseParser) IsExpectedToken(symbol int) bool { + atn := p.Interpreter.atn + ctx := p.ctx + s := atn.states[p.state] + following := atn.NextTokens(s, nil) + if following.contains(symbol) { + return true + } + if !following.contains(TokenEpsilon) { + return false + } + for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { + invokingState := atn.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + following = atn.NextTokens(rt.(*RuleTransition).followState, nil) + if following.contains(symbol) { + return true + } + ctx = ctx.GetParent().(ParserRuleContext) + } + if following.contains(TokenEpsilon) && symbol == TokenEOF { + return true + } + + return false +} + +// GetExpectedTokens and returns the set of input symbols which could follow the current parser +// state and context, as given by [GetState] and [GetContext], +// respectively. +func (p *BaseParser) GetExpectedTokens() *IntervalSet { + return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) +} + +func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet { + atn := p.Interpreter.atn + s := atn.states[p.state] + return atn.NextTokens(s, nil) +} + +// GetRuleIndex get a rule's index (i.e., RULE_ruleName field) or -1 if not found. +func (p *BaseParser) GetRuleIndex(ruleName string) int { + var ruleIndex, ok = p.GetRuleIndexMap()[ruleName] + if ok { + return ruleIndex + } + + return -1 +} + +// GetRuleInvocationStack returns a list of the rule names in your parser instance +// leading up to a call to the current rule. You could override if +// you want more details such as the file/line info of where +// in the ATN a rule is invoked. +func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { + if c == nil { + c = p.ctx + } + stack := make([]string, 0) + for c != nil { + // compute what follows who invoked us + ruleIndex := c.GetRuleIndex() + if ruleIndex < 0 { + stack = append(stack, "n/a") + } else { + stack = append(stack, p.GetRuleNames()[ruleIndex]) + } + + vp := c.GetParent() + + if vp == nil { + break + } + + c = vp.(ParserRuleContext) + } + return stack +} + +// GetDFAStrings returns a list of all DFA states used for debugging purposes +func (p *BaseParser) GetDFAStrings() string { + return fmt.Sprint(p.Interpreter.decisionToDFA) +} + +// DumpDFA prints the whole of the DFA for debugging +func (p *BaseParser) DumpDFA() { + seenOne := false + for _, dfa := range p.Interpreter.decisionToDFA { + if dfa.Len() > 0 { + if seenOne { + fmt.Println() + } + fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":") + fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames)) + seenOne = true + } + } +} + +func (p *BaseParser) GetSourceName() string { + return p.GrammarFileName +} + +// SetTrace installs a trace listener for the parse. +// +// During a parse it is sometimes useful to listen in on the rule entry and exit +// events as well as token Matches. This is for quick and dirty debugging. +func (p *BaseParser) SetTrace(trace *TraceListener) { + if trace == nil { + p.RemoveParseListener(p.tracer) + p.tracer = nil + } else { + if p.tracer != nil { + p.RemoveParseListener(p.tracer) + } + p.tracer = NewTraceListener(p) + p.AddParseListener(p.tracer) + } +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go new file mode 100644 index 0000000000..ae2869692a --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go @@ -0,0 +1,1668 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +var () + +// ClosureBusy is a store of ATNConfigs and is a tiny abstraction layer over +// a standard JStore so that we can use Lazy instantiation of the JStore, mostly +// to avoid polluting the stats module with a ton of JStore instances with nothing in them. +type ClosureBusy struct { + bMap *JStore[*ATNConfig, Comparator[*ATNConfig]] + desc string +} + +// NewClosureBusy creates a new ClosureBusy instance used to avoid infinite recursion for right-recursive rules +func NewClosureBusy(desc string) *ClosureBusy { + return &ClosureBusy{ + desc: desc, + } +} + +func (c *ClosureBusy) Put(config *ATNConfig) (*ATNConfig, bool) { + if c.bMap == nil { + c.bMap = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, c.desc) + } + return c.bMap.Put(config) +} + +type ParserATNSimulator struct { + BaseATNSimulator + + parser Parser + predictionMode int + input TokenStream + startIndex int + dfa *DFA + mergeCache *JPCMap + outerContext ParserRuleContext +} + +//goland:noinspection GoUnusedExportedFunction +func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator { + + p := &ParserATNSimulator{ + BaseATNSimulator: BaseATNSimulator{ + atn: atn, + sharedContextCache: sharedContextCache, + }, + } + + p.parser = parser + p.decisionToDFA = decisionToDFA + // SLL, LL, or LL + exact ambig detection?// + p.predictionMode = PredictionModeLL + // LAME globals to avoid parameters!!!!! I need these down deep in predTransition + p.input = nil + p.startIndex = 0 + p.outerContext = nil + p.dfa = nil + // Each prediction operation uses a cache for merge of prediction contexts. + // Don't keep around as it wastes huge amounts of memory. [JPCMap] + // isn't Synchronized, but we're ok since two threads shouldn't reuse same + // parser/atn-simulator object because it can only handle one input at a time. + // This maps graphs a and b to merged result c. (a,b) -> c. We can avoid + // the merge if we ever see a and b again. Note that (b,a) -> c should + // also be examined during cache lookup. + // + p.mergeCache = nil + + return p +} + +func (p *ParserATNSimulator) GetPredictionMode() int { + return p.predictionMode +} + +func (p *ParserATNSimulator) SetPredictionMode(v int) { + p.predictionMode = v +} + +func (p *ParserATNSimulator) reset() { +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStream, decision int, outerContext ParserRuleContext) int { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) + + " exec LA(1)==" + p.getLookaheadName(input) + + " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + + strconv.Itoa(input.LT(1).GetColumn())) + } + p.input = input + p.startIndex = input.Index() + p.outerContext = outerContext + + dfa := p.decisionToDFA[decision] + p.dfa = dfa + m := input.Mark() + index := input.Index() + + defer func() { + p.dfa = nil + p.mergeCache = nil // whack cache after each prediction + // Do not attempt to run a GC now that we're done with the cache as makes the + // GC overhead terrible for badly formed grammars and has little effect on well formed + // grammars. + // I have made some extra effort to try and reduce memory pressure by reusing allocations when + // possible. However, it can only have a limited effect. The real solution is to encourage grammar + // authors to think more carefully about their grammar and to use the new antlr.stats tag to inspect + // what is happening at runtime, along with using the error listener to report ambiguities. + + input.Seek(index) + input.Release(m) + }() + + // Now we are certain to have a specific decision's DFA + // But, do we still need an initial state? + var s0 *DFAState + p.atn.stateMu.RLock() + if dfa.getPrecedenceDfa() { + p.atn.edgeMu.RLock() + // the start state for a precedence DFA depends on the current + // parser precedence, and is provided by a DFA method. + s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence()) + p.atn.edgeMu.RUnlock() + } else { + // the start state for a "regular" DFA is just s0 + s0 = dfa.getS0() + } + p.atn.stateMu.RUnlock() + + if s0 == nil { + if outerContext == nil { + outerContext = ParserRuleContextEmpty + } + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + + " exec LA(1)==" + p.getLookaheadName(input) + + ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil)) + } + fullCtx := false + s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx) + + p.atn.stateMu.Lock() + if dfa.getPrecedenceDfa() { + // If p is a precedence DFA, we use applyPrecedenceFilter + // to convert the computed start state to a precedence start + // state. We then use DFA.setPrecedenceStartState to set the + // appropriate start state for the precedence level rather + // than simply setting DFA.s0. + // + dfa.s0.configs = s0Closure + s0Closure = p.applyPrecedenceFilter(s0Closure) + s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) + p.atn.edgeMu.Lock() + dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0) + p.atn.edgeMu.Unlock() + } else { + s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) + dfa.setS0(s0) + } + p.atn.stateMu.Unlock() + } + + alt, re := p.execATN(dfa, s0, input, index, outerContext) + parser.SetError(re) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil)) + } + return alt + +} + +// execATN performs ATN simulation to compute a predicted alternative based +// upon the remaining input, but also updates the DFA cache to avoid +// having to traverse the ATN again for the same input sequence. +// +// There are some key conditions we're looking for after computing a new +// set of ATN configs (proposed DFA state): +// +// - If the set is empty, there is no viable alternative for current symbol +// - Does the state uniquely predict an alternative? +// - Does the state have a conflict that would prevent us from +// putting it on the work list? +// +// We also have some key operations to do: +// +// - Add an edge from previous DFA state to potentially NewDFA state, D, +// - Upon current symbol but only if adding to work list, which means in all +// cases except no viable alternative (and possibly non-greedy decisions?) +// - Collecting predicates and adding semantic context to DFA accept states +// - adding rule context to context-sensitive DFA accept states +// - Consuming an input symbol +// - Reporting a conflict +// - Reporting an ambiguity +// - Reporting a context sensitivity +// - Reporting insufficient predicates +// +// Cover these cases: +// +// - dead end +// - single alt +// - single alt + predicates +// - conflict +// - conflict + predicates +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) { + + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + + ", DFA state " + s0.String() + + ", LA(1)==" + p.getLookaheadName(input) + + " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn())) + } + + previousD := s0 + + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("s0 = " + s0.String()) + } + t := input.LA(1) + for { // for more work + D := p.getExistingTargetState(previousD, t) + if D == nil { + D = p.computeTargetState(dfa, previousD, t) + } + if D == ATNSimulatorError { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for SLL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + e := p.noViableAlt(input, outerContext, previousD.configs, startIndex) + input.Seek(startIndex) + alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) + if alt != ATNInvalidAltNumber { + return alt, nil + } + p.parser.SetError(e) + return ATNInvalidAltNumber, e + } + if D.requiresFullContext && p.predictionMode != PredictionModeSLL { + // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) + conflictingAlts := D.configs.conflictingAlts + if D.predicates != nil { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("DFA state has preds in DFA sim LL fail-over") + } + conflictIndex := input.Index() + if conflictIndex != startIndex { + input.Seek(startIndex) + } + conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true) + if conflictingAlts.length() == 1 { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("Full LL avoided") + } + return conflictingAlts.minValue(), nil + } + if conflictIndex != startIndex { + // restore the index so Reporting the fallback to full + // context occurs with the index at the correct spot + input.Seek(conflictIndex) + } + } + if runtimeConfig.parserATNSimulatorDFADebug { + fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String()) + } + fullCtx := true + s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx) + p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index()) + alt, re := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) + return alt, re + } + if D.isAcceptState { + if D.predicates == nil { + return D.prediction, nil + } + stopIndex := input.Index() + input.Seek(startIndex) + alts := p.evalSemanticContext(D.predicates, outerContext, true) + + switch alts.length() { + case 0: + return ATNInvalidAltNumber, p.noViableAlt(input, outerContext, D.configs, startIndex) + case 1: + return alts.minValue(), nil + default: + // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported. + p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) + return alts.minValue(), nil + } + } + previousD = D + + if t != TokenEOF { + input.Consume() + t = input.LA(1) + } + } +} + +// Get an existing target state for an edge in the DFA. If the target state +// for the edge has not yet been computed or is otherwise not available, +// p method returns {@code nil}. +// +// @param previousD The current DFA state +// @param t The next input symbol +// @return The existing target DFA state for the given input symbol +// {@code t}, or {@code nil} if the target state for p edge is not +// already cached + +func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState { + if t+1 < 0 { + return nil + } + + p.atn.edgeMu.RLock() + defer p.atn.edgeMu.RUnlock() + edges := previousD.getEdges() + if edges == nil || t+1 >= len(edges) { + return nil + } + return previousD.getIthEdge(t + 1) +} + +// Compute a target state for an edge in the DFA, and attempt to add the +// computed state and corresponding edge to the DFA. +// +// @param dfa The DFA +// @param previousD The current DFA state +// @param t The next input symbol +// +// @return The computed target DFA state for the given input symbol +// {@code t}. If {@code t} does not lead to a valid DFA state, p method +// returns {@link //ERROR}. +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState { + reach := p.computeReachSet(previousD.configs, t, false) + + if reach == nil { + p.addDFAEdge(dfa, previousD, t, ATNSimulatorError) + return ATNSimulatorError + } + // create new target state we'll add to DFA after it's complete + D := NewDFAState(-1, reach) + + predictedAlt := p.getUniqueAlt(reach) + + if runtimeConfig.parserATNSimulatorDebug { + altSubSets := PredictionModegetConflictingAltSubsets(reach) + fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) + + ", previous=" + previousD.configs.String() + + ", configs=" + reach.String() + + ", predict=" + strconv.Itoa(predictedAlt) + + ", allSubsetsConflict=" + + fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) + + ", conflictingAlts=" + p.getConflictingAlts(reach).String()) + } + if predictedAlt != ATNInvalidAltNumber { + // NO CONFLICT, UNIQUELY PREDICTED ALT + D.isAcceptState = true + D.configs.uniqueAlt = predictedAlt + D.setPrediction(predictedAlt) + } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) { + // MORE THAN ONE VIABLE ALTERNATIVE + D.configs.conflictingAlts = p.getConflictingAlts(reach) + D.requiresFullContext = true + // in SLL-only mode, we will stop at p state and return the minimum alt + D.isAcceptState = true + D.setPrediction(D.configs.conflictingAlts.minValue()) + } + if D.isAcceptState && D.configs.hasSemanticContext { + p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision)) + if D.predicates != nil { + D.setPrediction(ATNInvalidAltNumber) + } + } + // all adds to dfa are done after we've created full D state + D = p.addDFAEdge(dfa, previousD, t, D) + return D +} + +func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) { + // We need to test all predicates, even in DFA states that + // uniquely predict alternative. + nalts := len(decisionState.GetTransitions()) + // Update DFA so reach becomes accept state with (predicate,alt) + // pairs if preds found for conflicting alts + altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs) + altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) + if altToPred != nil { + dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred) + dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds + } else { + // There are preds in configs but they might go away + // when OR'd together like {p}? || NONE == NONE. If neither + // alt has preds, resolve to min alt + dfaState.setPrediction(altsToCollectPredsFrom.minValue()) + } +} + +// comes back with reach.uniqueAlt set to a valid alt +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) { + + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("execATNWithFullContext " + s0.String()) + } + + fullCtx := true + foundExactAmbig := false + var reach *ATNConfigSet + previous := s0 + input.Seek(startIndex) + t := input.LA(1) + predictedAlt := -1 + + for { // for more work + reach = p.computeReachSet(previous, t, fullCtx) + if reach == nil { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for LL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + input.Seek(startIndex) + alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) + if alt != ATNInvalidAltNumber { + return alt, nil + } + return alt, p.noViableAlt(input, outerContext, previous, startIndex) + } + altSubSets := PredictionModegetConflictingAltSubsets(reach) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" + + strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + + fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets))) + } + reach.uniqueAlt = p.getUniqueAlt(reach) + // unique prediction? + if reach.uniqueAlt != ATNInvalidAltNumber { + predictedAlt = reach.uniqueAlt + break + } + if p.predictionMode != PredictionModeLLExactAmbigDetection { + predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets) + if predictedAlt != ATNInvalidAltNumber { + break + } + } else { + // In exact ambiguity mode, we never try to terminate early. + // Just keeps scarfing until we know what the conflict is + if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) { + foundExactAmbig = true + predictedAlt = PredictionModegetSingleViableAlt(altSubSets) + break + } + // else there are multiple non-conflicting subsets or + // we're not sure what the ambiguity is yet. + // So, keep going. + } + previous = reach + if t != TokenEOF { + input.Consume() + t = input.LA(1) + } + } + // If the configuration set uniquely predicts an alternative, + // without conflict, then we know that it's a full LL decision + // not SLL. + if reach.uniqueAlt != ATNInvalidAltNumber { + p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index()) + return predictedAlt, nil + } + // We do not check predicates here because we have checked them + // on-the-fly when doing full context prediction. + + // + // In non-exact ambiguity detection mode, we might actually be able to + // detect an exact ambiguity, but I'm not going to spend the cycles + // needed to check. We only emit ambiguity warnings in exact ambiguity + // mode. + // + // For example, we might know that we have conflicting configurations. + // But, that does not mean that there is no way forward without a + // conflict. It's possible to have non-conflicting alt subsets as in: + // + // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] + // + // from + // + // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), + // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] + // + // In p case, (17,1,[5 $]) indicates there is some next sequence that + // would resolve p without conflict to alternative 1. Any other viable + // next sequence, however, is associated with a conflict. We stop + // looking for input because no amount of further lookahead will alter + // the fact that we should predict alternative 1. We just can't say for + // sure that there is an ambiguity without looking further. + + p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach) + + return predictedAlt, nil +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullCtx bool) *ATNConfigSet { + if p.mergeCache == nil { + p.mergeCache = NewJPCMap(ReachSetCollection, "Merge cache for computeReachSet()") + } + intermediate := NewATNConfigSet(fullCtx) + + // Configurations already in a rule stop state indicate reaching the end + // of the decision rule (local context) or end of the start rule (full + // context). Once reached, these configurations are never updated by a + // closure operation, so they are handled separately for the performance + // advantage of having a smaller intermediate set when calling closure. + // + // For full-context reach operations, separate handling is required to + // ensure that the alternative Matching the longest overall sequence is + // chosen when multiple such configurations can Match the input. + + var skippedStopStates []*ATNConfig + + // First figure out where we can reach on input t + for _, c := range closure.configs { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String()) + } + + if _, ok := c.GetState().(*RuleStopState); ok { + if fullCtx || t == TokenEOF { + skippedStopStates = append(skippedStopStates, c) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("added " + c.String() + " to SkippedStopStates") + } + } + continue + } + + for _, trans := range c.GetState().GetTransitions() { + target := p.getReachableTarget(trans, t) + if target != nil { + cfg := NewATNConfig4(c, target) + intermediate.Add(cfg, p.mergeCache) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("added " + cfg.String() + " to intermediate") + } + } + } + } + + // Now figure out where the reach operation can take us... + var reach *ATNConfigSet + + // This block optimizes the reach operation for intermediate sets which + // trivially indicate a termination state for the overall + // AdaptivePredict operation. + // + // The conditions assume that intermediate + // contains all configurations relevant to the reach set, but p + // condition is not true when one or more configurations have been + // withheld in SkippedStopStates, or when the current symbol is EOF. + // + if skippedStopStates == nil && t != TokenEOF { + if len(intermediate.configs) == 1 { + // Don't pursue the closure if there is just one state. + // It can only have one alternative just add to result + // Also don't pursue the closure if there is unique alternative + // among the configurations. + reach = intermediate + } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber { + // Also don't pursue the closure if there is unique alternative + // among the configurations. + reach = intermediate + } + } + // If the reach set could not be trivially determined, perform a closure + // operation on the intermediate set to compute its initial value. + // + if reach == nil { + reach = NewATNConfigSet(fullCtx) + closureBusy := NewClosureBusy("ParserATNSimulator.computeReachSet() make a closureBusy") + treatEOFAsEpsilon := t == TokenEOF + amount := len(intermediate.configs) + for k := 0; k < amount; k++ { + p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon) + } + } + if t == TokenEOF { + // After consuming EOF no additional input is possible, so we are + // only interested in configurations which reached the end of the + // decision rule (local context) or end of the start rule (full + // context). Update reach to contain only these configurations. This + // handles both explicit EOF transitions in the grammar and implicit + // EOF transitions following the end of the decision or start rule. + // + // When reach==intermediate, no closure operation was performed. In + // p case, removeAllConfigsNotInRuleStopState needs to check for + // reachable rule stop states as well as configurations already in + // a rule stop state. + // + // This is handled before the configurations in SkippedStopStates, + // because any configurations potentially added from that list are + // already guaranteed to meet this condition whether it's + // required. + // + reach = p.removeAllConfigsNotInRuleStopState(reach, reach.Equals(intermediate)) + } + // If SkippedStopStates!=nil, then it contains at least one + // configuration. For full-context reach operations, these + // configurations reached the end of the start rule, in which case we + // only add them back to reach if no configuration during the current + // closure operation reached such a state. This ensures AdaptivePredict + // chooses an alternative Matching the longest overall sequence when + // multiple alternatives are viable. + // + if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) { + for l := 0; l < len(skippedStopStates); l++ { + reach.Add(skippedStopStates[l], p.mergeCache) + } + } + + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String()) + } + + if len(reach.configs) == 0 { + return nil + } + + return reach +} + +// removeAllConfigsNotInRuleStopState returns a configuration set containing only the configurations from +// configs which are in a [RuleStopState]. If all +// configurations in configs are already in a rule stop state, this +// method simply returns configs. +// +// When lookToEndOfRule is true, this method uses +// [ATN].[NextTokens] for each configuration in configs which is +// not already in a rule stop state to see if a rule stop state is reachable +// from the configuration via epsilon-only transitions. +// +// When lookToEndOfRule is true, this method checks for rule stop states +// reachable by epsilon-only transitions from each configuration in +// configs. +// +// The func returns configs if all configurations in configs are in a +// rule stop state, otherwise it returns a new configuration set containing only +// the configurations from configs which are in a rule stop state +func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs *ATNConfigSet, lookToEndOfRule bool) *ATNConfigSet { + if PredictionModeallConfigsInRuleStopStates(configs) { + return configs + } + result := NewATNConfigSet(configs.fullCtx) + for _, config := range configs.configs { + if _, ok := config.GetState().(*RuleStopState); ok { + result.Add(config, p.mergeCache) + continue + } + if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() { + NextTokens := p.atn.NextTokens(config.GetState(), nil) + if NextTokens.contains(TokenEpsilon) { + endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()] + result.Add(NewATNConfig4(config, endOfRuleState), p.mergeCache) + } + } + } + return result +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) *ATNConfigSet { + // always at least the implicit call to start rule + initialContext := predictionContextFromRuleContext(p.atn, ctx) + configs := NewATNConfigSet(fullCtx) + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("computeStartState from ATN state " + a.String() + + " initialContext=" + initialContext.String()) + } + + for i := 0; i < len(a.GetTransitions()); i++ { + target := a.GetTransitions()[i].getTarget() + c := NewATNConfig6(target, i+1, initialContext) + closureBusy := NewClosureBusy("ParserATNSimulator.computeStartState() make a closureBusy") + p.closure(c, configs, closureBusy, true, fullCtx, false) + } + return configs +} + +// applyPrecedenceFilter transforms the start state computed by +// [computeStartState] to the special start state used by a +// precedence [DFA] for a particular precedence value. The transformation +// process applies the following changes to the start state's configuration +// set. +// +// 1. Evaluate the precedence predicates for each configuration using +// [SemanticContext].evalPrecedence. +// 2. Remove all configurations which predict an alternative greater than +// 1, for which another configuration that predicts alternative 1 is in the +// same ATN state with the same prediction context. +// +// Transformation 2 is valid for the following reasons: +// +// - The closure block cannot contain any epsilon transitions which bypass +// the body of the closure, so all states reachable via alternative 1 are +// part of the precedence alternatives of the transformed left-recursive +// rule. +// - The "primary" portion of a left recursive rule cannot contain an +// epsilon transition, so the only way an alternative other than 1 can exist +// in a state that is also reachable via alternative 1 is by nesting calls +// to the left-recursive rule, with the outer calls not being at the +// preferred precedence level. +// +// The prediction context must be considered by this filter to address +// situations like the following: +// +// grammar TA +// prog: statement* EOF +// statement: letterA | statement letterA 'b' +// letterA: 'a' +// +// In the above grammar, the [ATN] state immediately before the token +// reference 'a' in letterA is reachable from the left edge +// of both the primary and closure blocks of the left-recursive rule +// statement. The prediction context associated with each of these +// configurations distinguishes between them, and prevents the alternative +// which stepped out to prog, and then back in to statement +// from being eliminated by the filter. +// +// The func returns the transformed configuration set representing the start state +// for a precedence [DFA] at a particular precedence level (determined by +// calling [Parser].getPrecedence). +func (p *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *ATNConfigSet { + + statesFromAlt1 := make(map[int]*PredictionContext) + configSet := NewATNConfigSet(configs.fullCtx) + + for _, config := range configs.configs { + // handle alt 1 first + if config.GetAlt() != 1 { + continue + } + updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext) + if updatedContext == nil { + // the configuration was eliminated + continue + } + statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext() + if updatedContext != config.GetSemanticContext() { + configSet.Add(NewATNConfig2(config, updatedContext), p.mergeCache) + } else { + configSet.Add(config, p.mergeCache) + } + } + for _, config := range configs.configs { + + if config.GetAlt() == 1 { + // already handled + continue + } + // In the future, p elimination step could be updated to also + // filter the prediction context for alternatives predicting alt>1 + // (basically a graph subtraction algorithm). + if !config.getPrecedenceFilterSuppressed() { + context := statesFromAlt1[config.GetState().GetStateNumber()] + if context != nil && context.Equals(config.GetContext()) { + // eliminated + continue + } + } + configSet.Add(config, p.mergeCache) + } + return configSet +} + +func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState { + if trans.Matches(ttype, 0, p.atn.maxTokenType) { + return trans.getTarget() + } + + return nil +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs *ATNConfigSet, nalts int) []SemanticContext { + + altToPred := make([]SemanticContext, nalts+1) + for _, c := range configs.configs { + if ambigAlts.contains(c.GetAlt()) { + altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext()) + } + } + nPredAlts := 0 + for i := 1; i <= nalts; i++ { + pred := altToPred[i] + if pred == nil { + altToPred[i] = SemanticContextNone + } else if pred != SemanticContextNone { + nPredAlts++ + } + } + // unambiguous alts are nil in altToPred + if nPredAlts == 0 { + altToPred = nil + } + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred)) + } + return altToPred +} + +func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction { + pairs := make([]*PredPrediction, 0) + containsPredicate := false + for i := 1; i < len(altToPred); i++ { + pred := altToPred[i] + // un-predicated is indicated by SemanticContextNONE + if ambigAlts != nil && ambigAlts.contains(i) { + pairs = append(pairs, NewPredPrediction(pred, i)) + } + if pred != SemanticContextNone { + containsPredicate = true + } + } + if !containsPredicate { + return nil + } + return pairs +} + +// getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule is used to improve the localization of error messages by +// choosing an alternative rather than panic a NoViableAltException in particular prediction scenarios where the +// Error state was reached during [ATN] simulation. +// +// The default implementation of this method uses the following +// algorithm to identify an [ATN] configuration which successfully parsed the +// decision entry rule. Choosing such an alternative ensures that the +// [ParserRuleContext] returned by the calling rule will be complete +// and valid, and the syntax error will be Reported later at a more +// localized location. +// +// - If a syntactically valid path or paths reach the end of the decision rule, and +// they are semantically valid if predicated, return the min associated alt. +// - Else, if a semantically invalid but syntactically valid path exist +// or paths exist, return the minimum associated alt. +// - Otherwise, return [ATNInvalidAltNumber]. +// +// In some scenarios, the algorithm described above could predict an +// alternative which will result in a [FailedPredicateException] in +// the parser. Specifically, this could occur if the only configuration +// capable of successfully parsing to the end of the decision rule is +// blocked by a semantic predicate. By choosing this alternative within +// [AdaptivePredict] instead of panic a [NoViableAltException], the resulting +// [FailedPredicateException] in the parser will identify the specific +// predicate which is preventing the parser from successfully parsing the +// decision rule, which helps developers identify and correct logic errors +// in semantic predicates. +// +// pass in the configs holding ATN configurations which were valid immediately before +// the ERROR state was reached, outerContext as the initial parser context from the paper +// or the parser stack at the instant before prediction commences. +// +// Teh func returns the value to return from [AdaptivePredict], or +// [ATNInvalidAltNumber] if a suitable alternative was not +// identified and [AdaptivePredict] should report an error instead. +func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext ParserRuleContext) int { + cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) + semValidConfigs := cfgs[0] + semInvalidConfigs := cfgs[1] + alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs) + if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists + return alt + } + // Is there a syntactically valid path with a failed pred? + if len(semInvalidConfigs.configs) > 0 { + alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs) + if alt != ATNInvalidAltNumber { // syntactically viable path exists + return alt + } + } + return ATNInvalidAltNumber +} + +func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs *ATNConfigSet) int { + alts := NewIntervalSet() + + for _, c := range configs.configs { + _, ok := c.GetState().(*RuleStopState) + + if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) { + alts.addOne(c.GetAlt()) + } + } + if alts.length() == 0 { + return ATNInvalidAltNumber + } + + return alts.first() +} + +// Walk the list of configurations and split them according to +// those that have preds evaluating to true/false. If no pred, assume +// true pred and include in succeeded set. Returns Pair of sets. +// +// Create a NewSet so as not to alter the incoming parameter. +// +// Assumption: the input stream has been restored to the starting point +// prediction, which is where predicates need to evaluate. + +type ATNConfigSetPair struct { + item0, item1 *ATNConfigSet +} + +func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs *ATNConfigSet, outerContext ParserRuleContext) []*ATNConfigSet { + succeeded := NewATNConfigSet(configs.fullCtx) + failed := NewATNConfigSet(configs.fullCtx) + + for _, c := range configs.configs { + if c.GetSemanticContext() != SemanticContextNone { + predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext) + if predicateEvaluationResult { + succeeded.Add(c, nil) + } else { + failed.Add(c, nil) + } + } else { + succeeded.Add(c, nil) + } + } + return []*ATNConfigSet{succeeded, failed} +} + +// evalSemanticContext looks through a list of predicate/alt pairs, returning alts for the +// pairs that win. A [SemanticContextNone] predicate indicates an alt containing an +// un-predicated runtimeConfig which behaves as "always true." If !complete +// then we stop at the first predicate that evaluates to true. This +// includes pairs with nil predicates. +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { + predictions := NewBitSet() + for i := 0; i < len(predPredictions); i++ { + pair := predPredictions[i] + if pair.pred == SemanticContextNone { + predictions.add(pair.alt) + if !complete { + break + } + continue + } + + predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext) + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug { + fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult)) + } + if predicateEvaluationResult { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug { + fmt.Println("PREDICT " + fmt.Sprint(pair.alt)) + } + predictions.add(pair.alt) + if !complete { + break + } + } + } + return predictions +} + +func (p *ParserATNSimulator) closure(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx, treatEOFAsEpsilon bool) { + initialDepth := 0 + p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, + fullCtx, initialDepth, treatEOFAsEpsilon) +} + +func (p *ParserATNSimulator) closureCheckingStopState(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("closure(" + config.String() + ")") + } + + var stack []*ATNConfig + visited := make(map[*ATNConfig]bool) + + stack = append(stack, config) + + for len(stack) > 0 { + currConfig := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + if _, ok := visited[currConfig]; ok { + continue + } + visited[currConfig] = true + + if _, ok := currConfig.GetState().(*RuleStopState); ok { + // We hit rule end. If we have context info, use it + // run thru all possible stack tops in ctx + if !currConfig.GetContext().isEmpty() { + for i := 0; i < currConfig.GetContext().length(); i++ { + if currConfig.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { + if fullCtx { + nb := NewATNConfig1(currConfig, currConfig.GetState(), BasePredictionContextEMPTY) + configs.Add(nb, p.mergeCache) + continue + } else { + // we have no context info, just chase follow links (if greedy) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex())) + } + p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) + } + continue + } + returnState := p.atn.states[currConfig.GetContext().getReturnState(i)] + newContext := currConfig.GetContext().GetParent(i) // "pop" return state + + c := NewATNConfig5(returnState, currConfig.GetAlt(), newContext, currConfig.GetSemanticContext()) + // While we have context to pop back from, we may have + // gotten that context AFTER having falling off a rule. + // Make sure we track that we are now out of context. + c.SetReachesIntoOuterContext(currConfig.GetReachesIntoOuterContext()) + + stack = append(stack, c) + } + continue + } else if fullCtx { + // reached end of start rule + configs.Add(currConfig, p.mergeCache) + continue + } else { + // else if we have no context info, just chase follow links (if greedy) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex())) + } + } + } + + p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) + } +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("closure(" + config.String() + ")") + } + + if _, ok := config.GetState().(*RuleStopState); ok { + // We hit rule end. If we have context info, use it + // run thru all possible stack tops in ctx + if !config.GetContext().isEmpty() { + for i := 0; i < config.GetContext().length(); i++ { + if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { + if fullCtx { + nb := NewATNConfig1(config, config.GetState(), BasePredictionContextEMPTY) + configs.Add(nb, p.mergeCache) + continue + } else { + // we have no context info, just chase follow links (if greedy) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) + } + p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) + } + continue + } + returnState := p.atn.states[config.GetContext().getReturnState(i)] + newContext := config.GetContext().GetParent(i) // "pop" return state + + c := NewATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) + // While we have context to pop back from, we may have + // gotten that context AFTER having falling off a rule. + // Make sure we track that we are now out of context. + c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext()) + p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon) + } + return + } else if fullCtx { + // reached end of start rule + configs.Add(config, p.mergeCache) + return + } else { + // else if we have no context info, just chase follow links (if greedy) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) + } + } + } + p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) +} + +// Do the actual work of walking epsilon edges +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + state := config.GetState() + // optimization + if !state.GetEpsilonOnlyTransitions() { + configs.Add(config, p.mergeCache) + // make sure to not return here, because EOF transitions can act as + // both epsilon transitions and non-epsilon transitions. + } + for i := 0; i < len(state.GetTransitions()); i++ { + if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) { + continue + } + + t := state.GetTransitions()[i] + _, ok := t.(*ActionTransition) + continueCollecting := collectPredicates && !ok + c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon) + if c != nil { + newDepth := depth + + if _, ok := config.GetState().(*RuleStopState); ok { + // target fell off end of rule mark resulting c as having dipped into outer context + // We can't get here if incoming config was rule stop and we had context + // track how far we dip into outer context. Might + // come in handy and we avoid evaluating context dependent + // preds if this is > 0. + + if p.dfa != nil && p.dfa.getPrecedenceDfa() { + if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() { + c.setPrecedenceFilterSuppressed(true) + } + } + + c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1) + + _, present := closureBusy.Put(c) + if present { + // avoid infinite recursion for right-recursive rules + continue + } + + configs.dipsIntoOuterContext = true // TODO: can remove? only care when we add to set per middle of this method + newDepth-- + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("dips into outer ctx: " + c.String()) + } + } else { + + if !t.getIsEpsilon() { + _, present := closureBusy.Put(c) + if present { + // avoid infinite recursion for EOF* and EOF+ + continue + } + } + if _, ok := t.(*RuleTransition); ok { + // latch when newDepth goes negative - once we step out of the entry context we can't return + if newDepth >= 0 { + newDepth++ + } + } + } + p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon) + } + } +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config *ATNConfig) bool { + if !runtimeConfig.lRLoopEntryBranchOpt { + return false + } + + _p := config.GetState() + + // First check to see if we are in StarLoopEntryState generated during + // left-recursion elimination. For efficiency, also check if + // the context has an empty stack case. If so, it would mean + // global FOLLOW so we can't perform optimization + if _p.GetStateType() != ATNStateStarLoopEntry { + return false + } + startLoop, ok := _p.(*StarLoopEntryState) + if !ok { + return false + } + if !startLoop.precedenceRuleDecision || + config.GetContext().isEmpty() || + config.GetContext().hasEmptyPath() { + return false + } + + // Require all return states to return back to the same rule + // that p is in. + numCtxs := config.GetContext().length() + for i := 0; i < numCtxs; i++ { + returnState := p.atn.states[config.GetContext().getReturnState(i)] + if returnState.GetRuleIndex() != _p.GetRuleIndex() { + return false + } + } + x := _p.GetTransitions()[0].getTarget() + decisionStartState := x.(BlockStartState) + blockEndStateNum := decisionStartState.getEndState().stateNumber + blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState) + + // Verify that the top of each stack context leads to loop entry/exit + // state through epsilon edges and w/o leaving rule. + + for i := 0; i < numCtxs; i++ { // for each stack context + returnStateNumber := config.GetContext().getReturnState(i) + returnState := p.atn.states[returnStateNumber] + + // all states must have single outgoing epsilon edge + if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() { + return false + } + + // Look for prefix op case like 'not expr', (' type ')' expr + returnStateTarget := returnState.GetTransitions()[0].getTarget() + if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p { + continue + } + + // Look for 'expr op expr' or case where expr's return state is block end + // of (...)* internal block; the block end points to loop back + // which points to p but we don't need to check that + if returnState == blockEndState { + continue + } + + // Look for ternary expr ? expr : expr. The return state points at block end, + // which points at loop entry state + if returnStateTarget == blockEndState { + continue + } + + // Look for complex prefix 'between expr and expr' case where 2nd expr's + // return state points at block end state of (...)* internal block + if returnStateTarget.GetStateType() == ATNStateBlockEnd && + len(returnStateTarget.GetTransitions()) == 1 && + returnStateTarget.GetTransitions()[0].getIsEpsilon() && + returnStateTarget.GetTransitions()[0].getTarget() == _p { + continue + } + + // anything else ain't conforming + return false + } + + return true +} + +func (p *ParserATNSimulator) getRuleName(index int) string { + if p.parser != nil && index >= 0 { + return p.parser.GetRuleNames()[index] + } + var sb strings.Builder + sb.Grow(32) + + sb.WriteString("') + return sb.String() +} + +func (p *ParserATNSimulator) getEpsilonTarget(config *ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) *ATNConfig { + + switch t.getSerializationType() { + case TransitionRULE: + return p.ruleTransition(config, t.(*RuleTransition)) + case TransitionPRECEDENCE: + return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx) + case TransitionPREDICATE: + return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx) + case TransitionACTION: + return p.actionTransition(config, t.(*ActionTransition)) + case TransitionEPSILON: + return NewATNConfig4(config, t.getTarget()) + case TransitionATOM, TransitionRANGE, TransitionSET: + // EOF transitions act like epsilon transitions after the first EOF + // transition is traversed + if treatEOFAsEpsilon { + if t.Matches(TokenEOF, 0, 1) { + return NewATNConfig4(config, t.getTarget()) + } + } + return nil + default: + return nil + } +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) actionTransition(config *ATNConfig, t *ActionTransition) *ATNConfig { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)) + } + return NewATNConfig4(config, t.getTarget()) +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) precedenceTransition(config *ATNConfig, + pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig { + + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + + strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true") + if p.parser != nil { + fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) + } + } + var c *ATNConfig + if collectPredicates && inContext { + if fullCtx { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the runtimeConfig sets. It also obviates the need to test predicates + // later during conflict resolution. + currentPosition := p.input.Index() + p.input.Seek(p.startIndex) + predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) + p.input.Seek(currentPosition) + if predSucceeds { + c = NewATNConfig4(config, pt.getTarget()) // no pred context + } + } else { + newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) + c = NewATNConfig3(config, pt.getTarget(), newSemCtx) + } + } else { + c = NewATNConfig4(config, pt.getTarget()) + } + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("runtimeConfig from pred transition=" + c.String()) + } + return c +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) predTransition(config *ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig { + + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) + + ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent)) + if p.parser != nil { + fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) + } + } + var c *ATNConfig + if collectPredicates && (!pt.isCtxDependent || inContext) { + if fullCtx { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + currentPosition := p.input.Index() + p.input.Seek(p.startIndex) + predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) + p.input.Seek(currentPosition) + if predSucceeds { + c = NewATNConfig4(config, pt.getTarget()) // no pred context + } + } else { + newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) + c = NewATNConfig3(config, pt.getTarget(), newSemCtx) + } + } else { + c = NewATNConfig4(config, pt.getTarget()) + } + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("config from pred transition=" + c.String()) + } + return c +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ruleTransition(config *ATNConfig, t *RuleTransition) *ATNConfig { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String()) + } + returnState := t.followState + newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber()) + return NewATNConfig1(config, t.getTarget(), newContext) +} + +func (p *ParserATNSimulator) getConflictingAlts(configs *ATNConfigSet) *BitSet { + altsets := PredictionModegetConflictingAltSubsets(configs) + return PredictionModeGetAlts(altsets) +} + +// getConflictingAltsOrUniqueAlt Sam pointed out a problem with the previous definition, v3, of +// ambiguous states. If we have another state associated with conflicting +// alternatives, we should keep going. For example, the following grammar +// +// s : (ID | ID ID?) ; +// +// When the [ATN] simulation reaches the state before ;, it has a [DFA] +// state that looks like: +// +// [12|1|[], 6|2|[], 12|2|[]]. +// +// Naturally +// +// 12|1|[] and 12|2|[] +// +// conflict, but we cannot stop processing this node +// because alternative to has another way to continue, via +// +// [6|2|[]]. +// +// The key is that we have a single state that has config's only associated +// with a single alternative, 2, and crucially the state transitions +// among the configurations are all non-epsilon transitions. That means +// we don't consider any conflicts that include alternative 2. So, we +// ignore the conflict between alts 1 and 2. We ignore a set of +// conflicting alts when there is an intersection with an alternative +// associated with a single alt state in the state config-list map. +// +// It's also the case that we might have two conflicting configurations but +// also a 3rd non-conflicting configuration for a different alternative: +// +// [1|1|[], 1|2|[], 8|3|[]]. +// +// This can come about from grammar: +// +// a : A | A | A B +// +// After Matching input A, we reach the stop state for rule A, state 1. +// State 8 is the state right before B. Clearly alternatives 1 and 2 +// conflict and no amount of further lookahead will separate the two. +// However, alternative 3 will be able to continue, so we do not +// stop working on this state. +// +// In the previous example, we're concerned +// with states associated with the conflicting alternatives. Here alt +// 3 is not associated with the conflicting configs, but since we can continue +// looking for input reasonably, I don't declare the state done. We +// ignore a set of conflicting alts when we have an alternative +// that we still need to pursue. +func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs *ATNConfigSet) *BitSet { + var conflictingAlts *BitSet + if configs.uniqueAlt != ATNInvalidAltNumber { + conflictingAlts = NewBitSet() + conflictingAlts.add(configs.uniqueAlt) + } else { + conflictingAlts = configs.conflictingAlts + } + return conflictingAlts +} + +func (p *ParserATNSimulator) GetTokenName(t int) string { + if t == TokenEOF { + return "EOF" + } + + if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetLiteralNames()) { + return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">" + } + + if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetSymbolicNames()) { + return p.parser.GetSymbolicNames()[t] + "<" + strconv.Itoa(t) + ">" + } + + return strconv.Itoa(t) +} + +func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string { + return p.GetTokenName(input.LA(1)) +} + +// Used for debugging in [AdaptivePredict] around [execATN], but I cut +// it out for clarity now that alg. works well. We can leave this +// "dead" code for a bit. +func (p *ParserATNSimulator) dumpDeadEndConfigs(_ *NoViableAltException) { + + panic("Not implemented") + + // fmt.Println("dead end configs: ") + // var decs = nvae.deadEndConfigs + // + // for i:=0; i0) { + // var t = c.state.GetTransitions()[0] + // if t2, ok := t.(*AtomTransition); ok { + // trans = "Atom "+ p.GetTokenName(t2.label) + // } else if t3, ok := t.(SetTransition); ok { + // _, ok := t.(*NotSetTransition) + // + // var s string + // if (ok){ + // s = "~" + // } + // + // trans = s + "Set " + t3.set + // } + // } + // fmt.Errorf(c.String(p.parser, true) + ":" + trans) + // } +} + +func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs *ATNConfigSet, startIndex int) *NoViableAltException { + return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext) +} + +func (p *ParserATNSimulator) getUniqueAlt(configs *ATNConfigSet) int { + alt := ATNInvalidAltNumber + for _, c := range configs.configs { + if alt == ATNInvalidAltNumber { + alt = c.GetAlt() // found first alt + } else if c.GetAlt() != alt { + return ATNInvalidAltNumber + } + } + return alt +} + +// Add an edge to the DFA, if possible. This method calls +// {@link //addDFAState} to ensure the {@code to} state is present in the +// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the +// range of edges that can be represented in the DFA tables, p method +// returns without adding the edge to the DFA. +// +//

If {@code to} is {@code nil}, p method returns {@code nil}. +// Otherwise, p method returns the {@link DFAState} returned by calling +// {@link //addDFAState} for the {@code to} state.

+// +// @param dfa The DFA +// @param from The source state for the edge +// @param t The input symbol +// @param to The target state for the edge +// +// @return If {@code to} is {@code nil}, p method returns {@code nil} +// otherwise p method returns the result of calling {@link //addDFAState} +// on {@code to} +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) + } + if to == nil { + return nil + } + p.atn.stateMu.Lock() + to = p.addDFAState(dfa, to) // used existing if possible not incoming + p.atn.stateMu.Unlock() + if from == nil || t < -1 || t > p.atn.maxTokenType { + return to + } + p.atn.edgeMu.Lock() + if from.getEdges() == nil { + from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1)) + } + from.setIthEdge(t+1, to) // connect + p.atn.edgeMu.Unlock() + + if runtimeConfig.parserATNSimulatorDebug { + var names []string + if p.parser != nil { + names = p.parser.GetLiteralNames() + } + + fmt.Println("DFA=\n" + dfa.String(names, nil)) + } + return to +} + +// addDFAState adds state D to the [DFA] if it is not already present, and returns +// the actual instance stored in the [DFA]. If a state equivalent to D +// is already in the [DFA], the existing state is returned. Otherwise, this +// method returns D after adding it to the [DFA]. +// +// If D is [ATNSimulatorError], this method returns [ATNSimulatorError] and +// does not change the DFA. +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { + if d == ATNSimulatorError { + return d + } + + existing, present := dfa.Get(d) + if present { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Print("addDFAState " + d.String() + " exists") + } + return existing + } + + // The state will be added if not already there or we will be given back the existing state struct + // if it is present. + // + d.stateNumber = dfa.Len() + if !d.configs.readOnly { + d.configs.OptimizeConfigs(&p.BaseATNSimulator) + d.configs.readOnly = true + d.configs.configLookup = nil + } + dfa.Put(d) + + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("addDFAState new " + d.String()) + } + + return d +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs *ATNConfigSet, startIndex, stopIndex int) { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) + } +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs *ATNConfigSet, startIndex, stopIndex int) { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs) + } +} + +// ReportAmbiguity reports and ambiguity in the parse, which shows that the parser will explore a different route. +// +// If context-sensitive parsing, we know it's an ambiguity not a conflict or error, but we can report it to the developer +// so that they can see that this is happening and can take action if they want to. +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, _ *DFAState, startIndex, stopIndex int, + exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) + } +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go new file mode 100644 index 0000000000..c249bc1385 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go @@ -0,0 +1,421 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "reflect" + "strconv" +) + +type ParserRuleContext interface { + RuleContext + + SetException(RecognitionException) + + AddTokenNode(token Token) *TerminalNodeImpl + AddErrorNode(badToken Token) *ErrorNodeImpl + + EnterRule(listener ParseTreeListener) + ExitRule(listener ParseTreeListener) + + SetStart(Token) + GetStart() Token + + SetStop(Token) + GetStop() Token + + AddChild(child RuleContext) RuleContext + RemoveLastChild() +} + +type BaseParserRuleContext struct { + parentCtx RuleContext + invokingState int + RuleIndex int + + start, stop Token + exception RecognitionException + children []Tree +} + +func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext { + prc := new(BaseParserRuleContext) + InitBaseParserRuleContext(prc, parent, invokingStateNumber) + return prc +} + +func InitBaseParserRuleContext(prc *BaseParserRuleContext, parent ParserRuleContext, invokingStateNumber int) { + // What context invoked b rule? + prc.parentCtx = parent + + // What state invoked the rule associated with b context? + // The "return address" is the followState of invokingState + // If parent is nil, b should be -1. + if parent == nil { + prc.invokingState = -1 + } else { + prc.invokingState = invokingStateNumber + } + + prc.RuleIndex = -1 + // * If we are debugging or building a parse tree for a Visitor, + // we need to track all of the tokens and rule invocations associated + // with prc rule's context. This is empty for parsing w/o tree constr. + // operation because we don't the need to track the details about + // how we parse prc rule. + // / + prc.children = nil + prc.start = nil + prc.stop = nil + // The exception that forced prc rule to return. If the rule successfully + // completed, prc is {@code nil}. + prc.exception = nil +} + +func (prc *BaseParserRuleContext) SetException(e RecognitionException) { + prc.exception = e +} + +func (prc *BaseParserRuleContext) GetChildren() []Tree { + return prc.children +} + +func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) { + // from RuleContext + prc.parentCtx = ctx.parentCtx + prc.invokingState = ctx.invokingState + prc.children = nil + prc.start = ctx.start + prc.stop = ctx.stop +} + +func (prc *BaseParserRuleContext) GetText() string { + if prc.GetChildCount() == 0 { + return "" + } + + var s string + for _, child := range prc.children { + s += child.(ParseTree).GetText() + } + + return s +} + +// EnterRule is called when any rule is entered. +func (prc *BaseParserRuleContext) EnterRule(_ ParseTreeListener) { +} + +// ExitRule is called when any rule is exited. +func (prc *BaseParserRuleContext) ExitRule(_ ParseTreeListener) { +} + +// * Does not set parent link other add methods do that +func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode { + if prc.children == nil { + prc.children = make([]Tree, 0) + } + if child == nil { + panic("Child may not be null") + } + prc.children = append(prc.children, child) + return child +} + +func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext { + if prc.children == nil { + prc.children = make([]Tree, 0) + } + if child == nil { + panic("Child may not be null") + } + prc.children = append(prc.children, child) + return child +} + +// RemoveLastChild is used by [EnterOuterAlt] to toss out a [RuleContext] previously added as +// we entered a rule. If we have a label, we will need to remove +// the generic ruleContext object. +func (prc *BaseParserRuleContext) RemoveLastChild() { + if prc.children != nil && len(prc.children) > 0 { + prc.children = prc.children[0 : len(prc.children)-1] + } +} + +func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl { + + node := NewTerminalNodeImpl(token) + prc.addTerminalNodeChild(node) + node.parentCtx = prc + return node + +} + +func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl { + node := NewErrorNodeImpl(badToken) + prc.addTerminalNodeChild(node) + node.parentCtx = prc + return node +} + +func (prc *BaseParserRuleContext) GetChild(i int) Tree { + if prc.children != nil && len(prc.children) >= i { + return prc.children[i] + } + + return nil +} + +func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext { + if childType == nil { + return prc.GetChild(i).(RuleContext) + } + + for j := 0; j < len(prc.children); j++ { + child := prc.children[j] + if reflect.TypeOf(child) == childType { + if i == 0 { + return child.(RuleContext) + } + + i-- + } + } + + return nil +} + +func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string { + return TreesStringTree(prc, ruleNames, recog) +} + +func (prc *BaseParserRuleContext) GetRuleContext() RuleContext { + return prc +} + +func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} { + return visitor.VisitChildren(prc) +} + +func (prc *BaseParserRuleContext) SetStart(t Token) { + prc.start = t +} + +func (prc *BaseParserRuleContext) GetStart() Token { + return prc.start +} + +func (prc *BaseParserRuleContext) SetStop(t Token) { + prc.stop = t +} + +func (prc *BaseParserRuleContext) GetStop() Token { + return prc.stop +} + +func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode { + + for j := 0; j < len(prc.children); j++ { + child := prc.children[j] + if c2, ok := child.(TerminalNode); ok { + if c2.GetSymbol().GetTokenType() == ttype { + if i == 0 { + return c2 + } + + i-- + } + } + } + return nil +} + +func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode { + if prc.children == nil { + return make([]TerminalNode, 0) + } + + tokens := make([]TerminalNode, 0) + + for j := 0; j < len(prc.children); j++ { + child := prc.children[j] + if tchild, ok := child.(TerminalNode); ok { + if tchild.GetSymbol().GetTokenType() == ttype { + tokens = append(tokens, tchild) + } + } + } + + return tokens +} + +func (prc *BaseParserRuleContext) GetPayload() interface{} { + return prc +} + +func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext { + if prc.children == nil || i < 0 || i >= len(prc.children) { + return nil + } + + j := -1 // what element have we found with ctxType? + for _, o := range prc.children { + + childType := reflect.TypeOf(o) + + if childType.Implements(ctxType) { + j++ + if j == i { + return o.(RuleContext) + } + } + } + return nil +} + +// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do +// check for convertibility + +func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext { + return prc.getChild(ctxType, i) +} + +func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext { + if prc.children == nil { + return make([]RuleContext, 0) + } + + contexts := make([]RuleContext, 0) + + for _, child := range prc.children { + childType := reflect.TypeOf(child) + + if childType.ConvertibleTo(ctxType) { + contexts = append(contexts, child.(RuleContext)) + } + } + return contexts +} + +func (prc *BaseParserRuleContext) GetChildCount() int { + if prc.children == nil { + return 0 + } + + return len(prc.children) +} + +func (prc *BaseParserRuleContext) GetSourceInterval() Interval { + if prc.start == nil || prc.stop == nil { + return TreeInvalidInterval + } + + return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex()) +} + +//need to manage circular dependencies, so export now + +// Print out a whole tree, not just a node, in LISP format +// (root child1 .. childN). Print just a node if b is a leaf. +// + +func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string { + + var p ParserRuleContext = prc + s := "[" + for p != nil && p != stop { + if ruleNames == nil { + if !p.IsEmpty() { + s += strconv.Itoa(p.GetInvokingState()) + } + } else { + ri := p.GetRuleIndex() + var ruleName string + if ri >= 0 && ri < len(ruleNames) { + ruleName = ruleNames[ri] + } else { + ruleName = strconv.Itoa(ri) + } + s += ruleName + } + if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) { + s += " " + } + pi := p.GetParent() + if pi != nil { + p = pi.(ParserRuleContext) + } else { + p = nil + } + } + s += "]" + return s +} + +func (prc *BaseParserRuleContext) SetParent(v Tree) { + if v == nil { + prc.parentCtx = nil + } else { + prc.parentCtx = v.(RuleContext) + } +} + +func (prc *BaseParserRuleContext) GetInvokingState() int { + return prc.invokingState +} + +func (prc *BaseParserRuleContext) SetInvokingState(t int) { + prc.invokingState = t +} + +func (prc *BaseParserRuleContext) GetRuleIndex() int { + return prc.RuleIndex +} + +func (prc *BaseParserRuleContext) GetAltNumber() int { + return ATNInvalidAltNumber +} + +func (prc *BaseParserRuleContext) SetAltNumber(_ int) {} + +// IsEmpty returns true if the context of b is empty. +// +// A context is empty if there is no invoking state, meaning nobody calls +// current context. +func (prc *BaseParserRuleContext) IsEmpty() bool { + return prc.invokingState == -1 +} + +// GetParent returns the combined text of all child nodes. This method only considers +// tokens which have been added to the parse tree. +// +// Since tokens on hidden channels (e.g. whitespace or comments) are not +// added to the parse trees, they will not appear in the output of this +// method. +func (prc *BaseParserRuleContext) GetParent() Tree { + return prc.parentCtx +} + +var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1) + +type InterpreterRuleContext interface { + ParserRuleContext +} + +type BaseInterpreterRuleContext struct { + *BaseParserRuleContext +} + +//goland:noinspection GoUnusedExportedFunction +func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext { + + prc := new(BaseInterpreterRuleContext) + + prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber) + + prc.RuleIndex = ruleIndex + + return prc +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go new file mode 100644 index 0000000000..c1b80cc1f0 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go @@ -0,0 +1,727 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "golang.org/x/exp/slices" + "strconv" +) + +var _emptyPredictionContextHash int + +func init() { + _emptyPredictionContextHash = murmurInit(1) + _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0) +} + +func calculateEmptyHash() int { + return _emptyPredictionContextHash +} + +const ( + // BasePredictionContextEmptyReturnState represents {@code $} in an array in full context mode, $ + // doesn't mean wildcard: + // + // $ + x = [$,x] + // + // Here, + // + // $ = EmptyReturnState + BasePredictionContextEmptyReturnState = 0x7FFFFFFF +) + +// TODO: JI These are meant to be atomics - this does not seem to match the Java runtime here +// +//goland:noinspection GoUnusedGlobalVariable +var ( + BasePredictionContextglobalNodeCount = 1 + BasePredictionContextid = BasePredictionContextglobalNodeCount +) + +const ( + PredictionContextEmpty = iota + PredictionContextSingleton + PredictionContextArray +) + +// PredictionContext is a go idiomatic implementation of PredictionContext that does not rty to +// emulate inheritance from Java, and can be used without an interface definition. An interface +// is not required because no user code will ever need to implement this interface. +type PredictionContext struct { + cachedHash int + pcType int + parentCtx *PredictionContext + returnState int + parents []*PredictionContext + returnStates []int +} + +func NewEmptyPredictionContext() *PredictionContext { + nep := &PredictionContext{} + nep.cachedHash = calculateEmptyHash() + nep.pcType = PredictionContextEmpty + nep.returnState = BasePredictionContextEmptyReturnState + return nep +} + +func NewBaseSingletonPredictionContext(parent *PredictionContext, returnState int) *PredictionContext { + pc := &PredictionContext{} + pc.pcType = PredictionContextSingleton + pc.returnState = returnState + pc.parentCtx = parent + if parent != nil { + pc.cachedHash = calculateHash(parent, returnState) + } else { + pc.cachedHash = calculateEmptyHash() + } + return pc +} + +func SingletonBasePredictionContextCreate(parent *PredictionContext, returnState int) *PredictionContext { + if returnState == BasePredictionContextEmptyReturnState && parent == nil { + // someone can pass in the bits of an array ctx that mean $ + return BasePredictionContextEMPTY + } + return NewBaseSingletonPredictionContext(parent, returnState) +} + +func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int) *PredictionContext { + // Parent can be nil only if full ctx mode and we make an array + // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using + // nil parent and + // returnState == {@link //EmptyReturnState}. + hash := murmurInit(1) + for _, parent := range parents { + hash = murmurUpdate(hash, parent.Hash()) + } + for _, returnState := range returnStates { + hash = murmurUpdate(hash, returnState) + } + hash = murmurFinish(hash, len(parents)<<1) + + nec := &PredictionContext{} + nec.cachedHash = hash + nec.pcType = PredictionContextArray + nec.parents = parents + nec.returnStates = returnStates + return nec +} + +func (p *PredictionContext) Hash() int { + return p.cachedHash +} + +func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool { + switch p.pcType { + case PredictionContextEmpty: + otherP := other.(*PredictionContext) + return other == nil || otherP == nil || otherP.isEmpty() + case PredictionContextSingleton: + return p.SingletonEquals(other) + case PredictionContextArray: + return p.ArrayEquals(other) + } + return false +} + +func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool { + if o == nil { + return false + } + other := o.(*PredictionContext) + if other == nil || other.pcType != PredictionContextArray { + return false + } + if p.cachedHash != other.Hash() { + return false // can't be same if hash is different + } + + // Must compare the actual array elements and not just the array address + // + return slices.Equal(p.returnStates, other.returnStates) && + slices.EqualFunc(p.parents, other.parents, func(x, y *PredictionContext) bool { + return x.Equals(y) + }) +} + +func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool { + if other == nil { + return false + } + otherP := other.(*PredictionContext) + if otherP == nil { + return false + } + + if p.cachedHash != otherP.Hash() { + return false // Can't be same if hash is different + } + + if p.returnState != otherP.getReturnState(0) { + return false + } + + // Both parents must be nil if one is + if p.parentCtx == nil { + return otherP.parentCtx == nil + } + + return p.parentCtx.Equals(otherP.parentCtx) +} + +func (p *PredictionContext) GetParent(i int) *PredictionContext { + switch p.pcType { + case PredictionContextEmpty: + return nil + case PredictionContextSingleton: + return p.parentCtx + case PredictionContextArray: + return p.parents[i] + } + return nil +} + +func (p *PredictionContext) getReturnState(i int) int { + switch p.pcType { + case PredictionContextArray: + return p.returnStates[i] + default: + return p.returnState + } +} + +func (p *PredictionContext) GetReturnStates() []int { + switch p.pcType { + case PredictionContextArray: + return p.returnStates + default: + return []int{p.returnState} + } +} + +func (p *PredictionContext) length() int { + switch p.pcType { + case PredictionContextArray: + return len(p.returnStates) + default: + return 1 + } +} + +func (p *PredictionContext) hasEmptyPath() bool { + switch p.pcType { + case PredictionContextSingleton: + return p.returnState == BasePredictionContextEmptyReturnState + } + return p.getReturnState(p.length()-1) == BasePredictionContextEmptyReturnState +} + +func (p *PredictionContext) String() string { + switch p.pcType { + case PredictionContextEmpty: + return "$" + case PredictionContextSingleton: + var up string + + if p.parentCtx == nil { + up = "" + } else { + up = p.parentCtx.String() + } + + if len(up) == 0 { + if p.returnState == BasePredictionContextEmptyReturnState { + return "$" + } + + return strconv.Itoa(p.returnState) + } + + return strconv.Itoa(p.returnState) + " " + up + case PredictionContextArray: + if p.isEmpty() { + return "[]" + } + + s := "[" + for i := 0; i < len(p.returnStates); i++ { + if i > 0 { + s = s + ", " + } + if p.returnStates[i] == BasePredictionContextEmptyReturnState { + s = s + "$" + continue + } + s = s + strconv.Itoa(p.returnStates[i]) + if !p.parents[i].isEmpty() { + s = s + " " + p.parents[i].String() + } else { + s = s + "nil" + } + } + return s + "]" + + default: + return "unknown" + } +} + +func (p *PredictionContext) isEmpty() bool { + switch p.pcType { + case PredictionContextEmpty: + return true + case PredictionContextArray: + // since EmptyReturnState can only appear in the last position, we + // don't need to verify that size==1 + return p.returnStates[0] == BasePredictionContextEmptyReturnState + default: + return false + } +} + +func (p *PredictionContext) Type() int { + return p.pcType +} + +func calculateHash(parent *PredictionContext, returnState int) int { + h := murmurInit(1) + h = murmurUpdate(h, parent.Hash()) + h = murmurUpdate(h, returnState) + return murmurFinish(h, 2) +} + +// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. +// Return {@link //EMPTY} if {@code outerContext} is empty or nil. +// / +func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *PredictionContext { + if outerContext == nil { + outerContext = ParserRuleContextEmpty + } + // if we are in RuleContext of start rule, s, then BasePredictionContext + // is EMPTY. Nobody called us. (if we are empty, return empty) + if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty { + return BasePredictionContextEMPTY + } + // If we have a parent, convert it to a BasePredictionContext graph + parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) + state := a.states[outerContext.GetInvokingState()] + transition := state.GetTransitions()[0] + + return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) +} + +func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { + + // Share same graph if both same + // + if a == b || a.Equals(b) { + return a + } + + if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton { + return mergeSingletons(a, b, rootIsWildcard, mergeCache) + } + // At least one of a or b is array + // If one is $ and rootIsWildcard, return $ as wildcard + if rootIsWildcard { + if a.isEmpty() { + return a + } + if b.isEmpty() { + return b + } + } + + // Convert either Singleton or Empty to arrays, so that we can merge them + // + ara := convertToArray(a) + arb := convertToArray(b) + return mergeArrays(ara, arb, rootIsWildcard, mergeCache) +} + +func convertToArray(pc *PredictionContext) *PredictionContext { + switch pc.Type() { + case PredictionContextEmpty: + return NewArrayPredictionContext([]*PredictionContext{}, []int{}) + case PredictionContextSingleton: + return NewArrayPredictionContext([]*PredictionContext{pc.GetParent(0)}, []int{pc.getReturnState(0)}) + default: + // Already an array + } + return pc +} + +// mergeSingletons merges two Singleton [PredictionContext] instances. +// +// Stack tops equal, parents merge is same return left graph. +//

+// +//

Same stack top, parents differ merge parents giving array node, then +// remainders of those graphs. A new root node is created to point to the +// merged parents.
+//

+// +//

Different stack tops pointing to same parent. Make array node for the +// root where both element in the root point to the same (original) +// parent.
+//

+// +//

Different stack tops pointing to different parents. Make array node for +// the root where each element points to the corresponding original +// parent.
+//

+// +// @param a the first {@link SingletonBasePredictionContext} +// @param b the second {@link SingletonBasePredictionContext} +// @param rootIsWildcard {@code true} if this is a local-context merge, +// otherwise false to indicate a full-context merge +// @param mergeCache +// / +func mergeSingletons(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { + if mergeCache != nil { + previous, present := mergeCache.Get(a, b) + if present { + return previous + } + previous, present = mergeCache.Get(b, a) + if present { + return previous + } + } + + rootMerge := mergeRoot(a, b, rootIsWildcard) + if rootMerge != nil { + if mergeCache != nil { + mergeCache.Put(a, b, rootMerge) + } + return rootMerge + } + if a.returnState == b.returnState { + parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache) + // if parent is same as existing a or b parent or reduced to a parent, + // return it + if parent.Equals(a.parentCtx) { + return a // ax + bx = ax, if a=b + } + if parent.Equals(b.parentCtx) { + return b // ax + bx = bx, if a=b + } + // else: ax + ay = a'[x,y] + // merge parents x and y, giving array node with x,y then remainders + // of those graphs. dup a, a' points at merged array. + // New joined parent so create a new singleton pointing to it, a' + spc := SingletonBasePredictionContextCreate(parent, a.returnState) + if mergeCache != nil { + mergeCache.Put(a, b, spc) + } + return spc + } + // a != b payloads differ + // see if we can collapse parents due to $+x parents if local ctx + var singleParent *PredictionContext + if a.Equals(b) || (a.parentCtx != nil && a.parentCtx.Equals(b.parentCtx)) { // ax + + // bx = + // [a,b]x + singleParent = a.parentCtx + } + if singleParent != nil { // parents are same + // sort payloads and use same parent + payloads := []int{a.returnState, b.returnState} + if a.returnState > b.returnState { + payloads[0] = b.returnState + payloads[1] = a.returnState + } + parents := []*PredictionContext{singleParent, singleParent} + apc := NewArrayPredictionContext(parents, payloads) + if mergeCache != nil { + mergeCache.Put(a, b, apc) + } + return apc + } + // parents differ and can't merge them. Just pack together + // into array can't merge. + // ax + by = [ax,by] + payloads := []int{a.returnState, b.returnState} + parents := []*PredictionContext{a.parentCtx, b.parentCtx} + if a.returnState > b.returnState { // sort by payload + payloads[0] = b.returnState + payloads[1] = a.returnState + parents = []*PredictionContext{b.parentCtx, a.parentCtx} + } + apc := NewArrayPredictionContext(parents, payloads) + if mergeCache != nil { + mergeCache.Put(a, b, apc) + } + return apc +} + +// Handle case where at least one of {@code a} or {@code b} is +// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used +// to represent {@link //EMPTY}. +// +//

Local-Context Merges

+// +//

These local-context merge operations are used when {@code rootIsWildcard} +// is true.

+// +//

{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//

+// +//

{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is +// {@code //EMPTY} return left graph.
+//

+// +//

Special case of last merge if local context.
+//

+// +//

Full-Context Merges

+// +//

These full-context merge operations are used when {@code rootIsWildcard} +// is false.

+// +//

+// +//

Must keep all contexts {@link //EMPTY} in array is a special value (and +// nil parent).
+//

+// +//

+// +// @param a the first {@link SingletonBasePredictionContext} +// @param b the second {@link SingletonBasePredictionContext} +// @param rootIsWildcard {@code true} if this is a local-context merge, +// otherwise false to indicate a full-context merge +// / +func mergeRoot(a, b *PredictionContext, rootIsWildcard bool) *PredictionContext { + if rootIsWildcard { + if a.pcType == PredictionContextEmpty { + return BasePredictionContextEMPTY // // + b =// + } + if b.pcType == PredictionContextEmpty { + return BasePredictionContextEMPTY // a +// =// + } + } else { + if a.isEmpty() && b.isEmpty() { + return BasePredictionContextEMPTY // $ + $ = $ + } else if a.isEmpty() { // $ + x = [$,x] + payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState} + parents := []*PredictionContext{b.GetParent(-1), nil} + return NewArrayPredictionContext(parents, payloads) + } else if b.isEmpty() { // x + $ = [$,x] ($ is always first if present) + payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState} + parents := []*PredictionContext{a.GetParent(-1), nil} + return NewArrayPredictionContext(parents, payloads) + } + } + return nil +} + +// Merge two {@link ArrayBasePredictionContext} instances. +// +//

Different tops, different parents.
+//

+// +//

Shared top, same parents.
+//

+// +//

Shared top, different parents.
+//

+// +//

Shared top, all shared parents.
+//

+// +//

Equal tops, merge parents and reduce top to +// {@link SingletonBasePredictionContext}.
+//

+// +//goland:noinspection GoBoolExpressions +func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { + if mergeCache != nil { + previous, present := mergeCache.Get(a, b) + if present { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") + } + return previous + } + previous, present = mergeCache.Get(b, a) + if present { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") + } + return previous + } + } + // merge sorted payloads a + b => M + i := 0 // walks a + j := 0 // walks b + k := 0 // walks target M array + + mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) + mergedParents := make([]*PredictionContext, len(a.returnStates)+len(b.returnStates)) + // walk and merge to yield mergedParents, mergedReturnStates + for i < len(a.returnStates) && j < len(b.returnStates) { + aParent := a.parents[i] + bParent := b.parents[j] + if a.returnStates[i] == b.returnStates[j] { + // same payload (stack tops are equal), must yield merged singleton + payload := a.returnStates[i] + // $+$ = $ + bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil + axAX := aParent != nil && bParent != nil && aParent.Equals(bParent) // ax+ax + // -> + // ax + if bothDollars || axAX { + mergedParents[k] = aParent // choose left + mergedReturnStates[k] = payload + } else { // ax+ay -> a'[x,y] + mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache) + mergedParents[k] = mergedParent + mergedReturnStates[k] = payload + } + i++ // hop over left one as usual + j++ // but also Skip one in right side since we merge + } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M + mergedParents[k] = aParent + mergedReturnStates[k] = a.returnStates[i] + i++ + } else { // b > a, copy b[j] to M + mergedParents[k] = bParent + mergedReturnStates[k] = b.returnStates[j] + j++ + } + k++ + } + // copy over any payloads remaining in either array + if i < len(a.returnStates) { + for p := i; p < len(a.returnStates); p++ { + mergedParents[k] = a.parents[p] + mergedReturnStates[k] = a.returnStates[p] + k++ + } + } else { + for p := j; p < len(b.returnStates); p++ { + mergedParents[k] = b.parents[p] + mergedReturnStates[k] = b.returnStates[p] + k++ + } + } + // trim merged if we combined a few that had same stack tops + if k < len(mergedParents) { // write index < last position trim + if k == 1 { // for just one merged element, return singleton top + pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0]) + if mergeCache != nil { + mergeCache.Put(a, b, pc) + } + return pc + } + mergedParents = mergedParents[0:k] + mergedReturnStates = mergedReturnStates[0:k] + } + + M := NewArrayPredictionContext(mergedParents, mergedReturnStates) + + // if we created same array as a or b, return that instead + // TODO: JI track whether this is possible above during merge sort for speed and possibly avoid an allocation + if M.Equals(a) { + if mergeCache != nil { + mergeCache.Put(a, b, a) + } + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a") + } + return a + } + if M.Equals(b) { + if mergeCache != nil { + mergeCache.Put(a, b, b) + } + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b") + } + return b + } + combineCommonParents(&mergedParents) + + if mergeCache != nil { + mergeCache.Put(a, b, M) + } + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String()) + } + return M +} + +// Make pass over all M parents and merge any Equals() ones. +// Note that we pass a pointer to the slice as we want to modify it in place. +// +//goland:noinspection GoUnusedFunction +func combineCommonParents(parents *[]*PredictionContext) { + uniqueParents := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCollection, "combineCommonParents for PredictionContext") + + for p := 0; p < len(*parents); p++ { + parent := (*parents)[p] + _, _ = uniqueParents.Put(parent) + } + for q := 0; q < len(*parents); q++ { + pc, _ := uniqueParents.Get((*parents)[q]) + (*parents)[q] = pc + } +} + +func getCachedBasePredictionContext(context *PredictionContext, contextCache *PredictionContextCache, visited *VisitRecord) *PredictionContext { + if context.isEmpty() { + return context + } + existing, present := visited.Get(context) + if present { + return existing + } + + existing, present = contextCache.Get(context) + if present { + visited.Put(context, existing) + return existing + } + changed := false + parents := make([]*PredictionContext, context.length()) + for i := 0; i < len(parents); i++ { + parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited) + if changed || !parent.Equals(context.GetParent(i)) { + if !changed { + parents = make([]*PredictionContext, context.length()) + for j := 0; j < context.length(); j++ { + parents[j] = context.GetParent(j) + } + changed = true + } + parents[i] = parent + } + } + if !changed { + contextCache.add(context) + visited.Put(context, context) + return context + } + var updated *PredictionContext + if len(parents) == 0 { + updated = BasePredictionContextEMPTY + } else if len(parents) == 1 { + updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0)) + } else { + updated = NewArrayPredictionContext(parents, context.GetReturnStates()) + } + contextCache.add(updated) + visited.Put(updated, updated) + visited.Put(context, updated) + + return updated +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go new file mode 100644 index 0000000000..25dfb11e8f --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go @@ -0,0 +1,48 @@ +package antlr + +var BasePredictionContextEMPTY = &PredictionContext{ + cachedHash: calculateEmptyHash(), + pcType: PredictionContextEmpty, + returnState: BasePredictionContextEmptyReturnState, +} + +// PredictionContextCache is Used to cache [PredictionContext] objects. It is used for the shared +// context cash associated with contexts in DFA states. This cache +// can be used for both lexers and parsers. +type PredictionContextCache struct { + cache *JMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]] +} + +func NewPredictionContextCache() *PredictionContextCache { + return &PredictionContextCache{ + cache: NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "NewPredictionContextCache()"), + } +} + +// Add a context to the cache and return it. If the context already exists, +// return that one instead and do not add a new context to the cache. +// Protect shared cache from unsafe thread access. +func (p *PredictionContextCache) add(ctx *PredictionContext) *PredictionContext { + if ctx.isEmpty() { + return BasePredictionContextEMPTY + } + + // Put will return the existing entry if it is present (note this is done via Equals, not whether it is + // the same pointer), otherwise it will add the new entry and return that. + // + existing, present := p.cache.Get(ctx) + if present { + return existing + } + p.cache.Put(ctx, ctx) + return ctx +} + +func (p *PredictionContextCache) Get(ctx *PredictionContext) (*PredictionContext, bool) { + pc, exists := p.cache.Get(ctx) + return pc, exists +} + +func (p *PredictionContextCache) length() int { + return p.cache.Len() +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go new file mode 100644 index 0000000000..3f85a6a520 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go @@ -0,0 +1,536 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// This enumeration defines the prediction modes available in ANTLR 4 along with +// utility methods for analyzing configuration sets for conflicts and/or +// ambiguities. + +const ( + // PredictionModeSLL represents the SLL(*) prediction mode. + // This prediction mode ignores the current + // parser context when making predictions. This is the fastest prediction + // mode, and provides correct results for many grammars. This prediction + // mode is more powerful than the prediction mode provided by ANTLR 3, but + // may result in syntax errors for grammar and input combinations which are + // not SLL. + // + // When using this prediction mode, the parser will either return a correct + // parse tree (i.e. the same parse tree that would be returned with the + // [PredictionModeLL] prediction mode), or it will Report a syntax error. If a + // syntax error is encountered when using the SLL prediction mode, + // it may be due to either an actual syntax error in the input or indicate + // that the particular combination of grammar and input requires the more + // powerful LL prediction abilities to complete successfully. + // + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs. + // + PredictionModeSLL = 0 + + // PredictionModeLL represents the LL(*) prediction mode. + // This prediction mode allows the current parser + // context to be used for resolving SLL conflicts that occur during + // prediction. This is the fastest prediction mode that guarantees correct + // parse results for all combinations of grammars with syntactically correct + // inputs. + // + // When using this prediction mode, the parser will make correct decisions + // for all syntactically-correct grammar and input combinations. However, in + // cases where the grammar is truly ambiguous this prediction mode might not + // report a precise answer for exactly which alternatives are + // ambiguous. + // + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs. + // + PredictionModeLL = 1 + + // PredictionModeLLExactAmbigDetection represents the LL(*) prediction mode + // with exact ambiguity detection. + // + // In addition to the correctness guarantees provided by the [PredictionModeLL] prediction mode, + // this prediction mode instructs the prediction algorithm to determine the + // complete and exact set of ambiguous alternatives for every ambiguous + // decision encountered while parsing. + // + // This prediction mode may be used for diagnosing ambiguities during + // grammar development. Due to the performance overhead of calculating sets + // of ambiguous alternatives, this prediction mode should be avoided when + // the exact results are not necessary. + // + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs. + // + PredictionModeLLExactAmbigDetection = 2 +) + +// PredictionModehasSLLConflictTerminatingPrediction computes the SLL prediction termination condition. +// +// This method computes the SLL prediction termination condition for both of +// the following cases: +// +// - The usual SLL+LL fallback upon SLL conflict +// - Pure SLL without LL fallback +// +// # Combined SLL+LL Parsing +// +// When LL-fallback is enabled upon SLL conflict, correct predictions are +// ensured regardless of how the termination condition is computed by this +// method. Due to the substantially higher cost of LL prediction, the +// prediction should only fall back to LL when the additional lookahead +// cannot lead to a unique SLL prediction. +// +// Assuming combined SLL+LL parsing, an SLL configuration set with only +// conflicting subsets should fall back to full LL, even if the +// configuration sets don't resolve to the same alternative, e.g. +// +// {1,2} and {3,4} +// +// If there is at least one non-conflicting +// configuration, SLL could continue with the hopes that more lookahead will +// resolve via one of those non-conflicting configurations. +// +// Here's the prediction termination rule them: SLL (for SLL+LL parsing) +// stops when it sees only conflicting configuration subsets. In contrast, +// full LL keeps going when there is uncertainty. +// +// # Heuristic +// +// As a heuristic, we stop prediction when we see any conflicting subset +// unless we see a state that only has one alternative associated with it. +// The single-alt-state thing lets prediction continue upon rules like +// (otherwise, it would admit defeat too soon): +// +// [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ; +// +// When the [ATN] simulation reaches the state before ';', it has a +// [DFA] state that looks like: +// +// [12|1|[], 6|2|[], 12|2|[]] +// +// Naturally +// +// 12|1|[] and 12|2|[] +// +// conflict, but we cannot stop processing this node because alternative to has another way to continue, +// via +// +// [6|2|[]] +// +// It also let's us continue for this rule: +// +// [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ; +// +// After Matching input A, we reach the stop state for rule A, state 1. +// State 8 is the state immediately before B. Clearly alternatives 1 and 2 +// conflict and no amount of further lookahead will separate the two. +// However, alternative 3 will be able to continue, and so we do not stop +// working on this state. In the previous example, we're concerned with +// states associated with the conflicting alternatives. Here alt 3 is not +// associated with the conflicting configs, but since we can continue +// looking for input reasonably, don't declare the state done. +// +// # Pure SLL Parsing +// +// To handle pure SLL parsing, all we have to do is make sure that we +// combine stack contexts for configurations that differ only by semantic +// predicate. From there, we can do the usual SLL termination heuristic. +// +// # Predicates in SLL+LL Parsing +// +// SLL decisions don't evaluate predicates until after they reach [DFA] stop +// states because they need to create the [DFA] cache that works in all +// semantic situations. In contrast, full LL evaluates predicates collected +// during start state computation, so it can ignore predicates thereafter. +// This means that SLL termination detection can totally ignore semantic +// predicates. +// +// Implementation-wise, [ATNConfigSet] combines stack contexts but not +// semantic predicate contexts, so we might see two configurations like the +// following: +// +// (s, 1, x, {}), (s, 1, x', {p}) +// +// Before testing these configurations against others, we have to merge +// x and x' (without modifying the existing configurations). +// For example, we test (x+x')==x” when looking for conflicts in +// the following configurations: +// +// (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {}) +// +// If the configuration set has predicates (as indicated by +// [ATNConfigSet.hasSemanticContext]), this algorithm makes a copy of +// the configurations to strip out all the predicates so that a standard +// [ATNConfigSet] will merge everything ignoring predicates. +func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs *ATNConfigSet) bool { + + // Configs in rule stop states indicate reaching the end of the decision + // rule (local context) or end of start rule (full context). If all + // configs meet this condition, then none of the configurations is able + // to Match additional input, so we terminate prediction. + // + if PredictionModeallConfigsInRuleStopStates(configs) { + return true + } + + // pure SLL mode parsing + if mode == PredictionModeSLL { + // Don't bother with combining configs from different semantic + // contexts if we can fail over to full LL costs more time + // since we'll often fail over anyway. + if configs.hasSemanticContext { + // dup configs, tossing out semantic predicates + dup := NewATNConfigSet(false) + for _, c := range configs.configs { + + // NewATNConfig({semanticContext:}, c) + c = NewATNConfig2(c, SemanticContextNone) + dup.Add(c, nil) + } + configs = dup + } + // now we have combined contexts for configs with dissimilar predicates + } + // pure SLL or combined SLL+LL mode parsing + altsets := PredictionModegetConflictingAltSubsets(configs) + return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) +} + +// PredictionModehasConfigInRuleStopState checks if any configuration in the given configs is in a +// [RuleStopState]. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// The func returns true if any configuration in the supplied configs is in a [RuleStopState] +func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool { + for _, c := range configs.configs { + if _, ok := c.GetState().(*RuleStopState); ok { + return true + } + } + return false +} + +// PredictionModeallConfigsInRuleStopStates checks if all configurations in configs are in a +// [RuleStopState]. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// the func returns true if all configurations in configs are in a +// [RuleStopState] +func PredictionModeallConfigsInRuleStopStates(configs *ATNConfigSet) bool { + + for _, c := range configs.configs { + if _, ok := c.GetState().(*RuleStopState); !ok { + return false + } + } + return true +} + +// PredictionModeresolvesToJustOneViableAlt checks full LL prediction termination. +// +// Can we stop looking ahead during [ATN] simulation or is there some +// uncertainty as to which alternative we will ultimately pick, after +// consuming more input? Even if there are partial conflicts, we might know +// that everything is going to resolve to the same minimum alternative. That +// means we can stop since no more lookahead will change that fact. On the +// other hand, there might be multiple conflicts that resolve to different +// minimums. That means we need more look ahead to decide which of those +// alternatives we should predict. +// +// The basic idea is to split the set of configurations 'C', into +// conflicting subsets (s, _, ctx, _) and singleton subsets with +// non-conflicting configurations. Two configurations conflict if they have +// identical [ATNConfig].state and [ATNConfig].context values +// but a different [ATNConfig].alt value, e.g. +// +// (s, i, ctx, _) +// +// and +// +// (s, j, ctx, _) ; for i != j +// +// Reduce these configuration subsets to the set of possible alternatives. +// You can compute the alternative subsets in one pass as follows: +// +// A_s,ctx = {i | (s, i, ctx, _)} +// +// for each configuration in C holding s and ctx fixed. +// +// Or in pseudo-code: +// +// for each configuration c in C: +// map[c] U = c.ATNConfig.alt alt // map hash/equals uses s and x, not alt and not pred +// +// The values in map are the set of +// +// A_s,ctx +// +// sets. +// +// If +// +// |A_s,ctx| = 1 +// +// then there is no conflict associated with s and ctx. +// +// Reduce the subsets to singletons by choosing a minimum of each subset. If +// the union of these alternative subsets is a singleton, then no amount of +// further lookahead will help us. We will always pick that alternative. If, +// however, there is more than one alternative, then we are uncertain which +// alternative to predict and must continue looking for resolution. We may +// or may not discover an ambiguity in the future, even if there are no +// conflicting subsets this round. +// +// The biggest sin is to terminate early because it means we've made a +// decision but were uncertain as to the eventual outcome. We haven't used +// enough lookahead. On the other hand, announcing a conflict too late is no +// big deal; you will still have the conflict. It's just inefficient. It +// might even look until the end of file. +// +// No special consideration for semantic predicates is required because +// predicates are evaluated on-the-fly for full LL prediction, ensuring that +// no configuration contains a semantic context during the termination +// check. +// +// # Conflicting Configs +// +// Two configurations: +// +// (s, i, x) and (s, j, x') +// +// conflict when i != j but x = x'. Because we merge all +// (s, i, _) configurations together, that means that there are at +// most n configurations associated with state s for +// n possible alternatives in the decision. The merged stacks +// complicate the comparison of configuration contexts x and x'. +// +// Sam checks to see if one is a subset of the other by calling +// merge and checking to see if the merged result is either x or x'. +// If the x associated with lowest alternative i +// is the superset, then i is the only possible prediction since the +// others resolve to min(i) as well. However, if x is +// associated with j > i then at least one stack configuration for +// j is not in conflict with alternative i. The algorithm +// should keep going, looking for more lookahead due to the uncertainty. +// +// For simplicity, I'm doing an equality check between x and +// x', which lets the algorithm continue to consume lookahead longer +// than necessary. The reason I like the equality is of course the +// simplicity but also because that is the test you need to detect the +// alternatives that are actually in conflict. +// +// # Continue/Stop Rule +// +// Continue if the union of resolved alternative sets from non-conflicting and +// conflicting alternative subsets has more than one alternative. We are +// uncertain about which alternative to predict. +// +// The complete set of alternatives, +// +// [i for (_, i, _)] +// +// tells us which alternatives are still in the running for the amount of input we've +// consumed at this point. The conflicting sets let us to strip away +// configurations that won't lead to more states because we resolve +// conflicts to the configuration with a minimum alternate for the +// conflicting set. +// +// Cases +// +// - no conflicts and more than 1 alternative in set => continue +// - (s, 1, x), (s, 2, x), (s, 3, z), (s', 1, y), (s', 2, y) yields non-conflicting set +// {3} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1,3} => continue +// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y), (s”, 1, z) yields non-conflicting set +// {1} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1} => stop and predict 1 +// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y) yields conflicting, reduced sets +// {1} ∪ {1} = {1} => stop and predict 1, can announce ambiguity {1,2} +// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets +// {1} ∪ {2} = {1,2} => continue +// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets +// {1} ∪ {2} = {1,2} => continue +// - (s, 1, x), (s, 2, x), (s', 3, y), (s', 4, y) yields conflicting, reduced sets +// {1} ∪ {3} = {1,3} => continue +// +// # Exact Ambiguity Detection +// +// If all states report the same conflicting set of alternatives, then we +// know we have the exact ambiguity set: +// +// |A_i| > 1 +// +// and +// +// A_i = A_j ; for all i, j +// +// In other words, we continue examining lookahead until all A_i +// have more than one alternative and all A_i are the same. If +// +// A={{1,2}, {1,3}} +// +// then regular LL prediction would terminate because the resolved set is {1}. +// To determine what the real ambiguity is, we have to know whether the ambiguity is between one and +// two or one and three so we keep going. We can only stop prediction when +// we need exact ambiguity detection when the sets look like: +// +// A={{1,2}} +// +// or +// +// {{1,2},{1,2}}, etc... +func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { + return PredictionModegetSingleViableAlt(altsets) +} + +// PredictionModeallSubsetsConflict determines if every alternative subset in altsets contains more +// than one alternative. +// +// The func returns true if every [BitSet] in altsets has +// [BitSet].cardinality cardinality > 1 +func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { + return !PredictionModehasNonConflictingAltSet(altsets) +} + +// PredictionModehasNonConflictingAltSet determines if any single alternative subset in altsets contains +// exactly one alternative. +// +// The func returns true if altsets contains at least one [BitSet] with +// [BitSet].cardinality cardinality 1 +func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() == 1 { + return true + } + } + return false +} + +// PredictionModehasConflictingAltSet determines if any single alternative subset in altsets contains +// more than one alternative. +// +// The func returns true if altsets contains a [BitSet] with +// [BitSet].cardinality cardinality > 1, otherwise false +func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() > 1 { + return true + } + } + return false +} + +// PredictionModeallSubsetsEqual determines if every alternative subset in altsets is equivalent. +// +// The func returns true if every member of altsets is equal to the others. +func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { + var first *BitSet + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if first == nil { + first = alts + } else if alts != first { + return false + } + } + + return true +} + +// PredictionModegetUniqueAlt returns the unique alternative predicted by all alternative subsets in +// altsets. If no such alternative exists, this method returns +// [ATNInvalidAltNumber]. +// +// @param altsets a collection of alternative subsets +func PredictionModegetUniqueAlt(altsets []*BitSet) int { + all := PredictionModeGetAlts(altsets) + if all.length() == 1 { + return all.minValue() + } + + return ATNInvalidAltNumber +} + +// PredictionModeGetAlts returns the complete set of represented alternatives for a collection of +// alternative subsets. This method returns the union of each [BitSet] +// in altsets, being the set of represented alternatives in altsets. +func PredictionModeGetAlts(altsets []*BitSet) *BitSet { + all := NewBitSet() + for _, alts := range altsets { + all.or(alts) + } + return all +} + +// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set. +// +// for each configuration c in configs: +// map[c] U= c.ATNConfig.alt // map hash/equals uses s and x, not alt and not pred +func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet { + configToAlts := NewJMap[*ATNConfig, *BitSet, *ATNAltConfigComparator[*ATNConfig]](atnAltCfgEqInst, AltSetCollection, "PredictionModegetConflictingAltSubsets()") + + for _, c := range configs.configs { + + alts, ok := configToAlts.Get(c) + if !ok { + alts = NewBitSet() + configToAlts.Put(c, alts) + } + alts.add(c.GetAlt()) + } + + return configToAlts.Values() +} + +// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. +// +// for each configuration c in configs: +// map[c.ATNConfig.state] U= c.ATNConfig.alt} +func PredictionModeGetStateToAltMap(configs *ATNConfigSet) *AltDict { + m := NewAltDict() + + for _, c := range configs.configs { + alts := m.Get(c.GetState().String()) + if alts == nil { + alts = NewBitSet() + m.put(c.GetState().String(), alts) + } + alts.(*BitSet).add(c.GetAlt()) + } + return m +} + +func PredictionModehasStateAssociatedWithOneAlt(configs *ATNConfigSet) bool { + values := PredictionModeGetStateToAltMap(configs).values() + for i := 0; i < len(values); i++ { + if values[i].(*BitSet).length() == 1 { + return true + } + } + return false +} + +// PredictionModegetSingleViableAlt gets the single alternative predicted by all alternative subsets in altsets +// if there is one. +// +// TODO: JI - Review this code - it does not seem to do the same thing as the Java code - maybe because [BitSet] is not like the Java utils BitSet +func PredictionModegetSingleViableAlt(altsets []*BitSet) int { + result := ATNInvalidAltNumber + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + minAlt := alts.minValue() + if result == ATNInvalidAltNumber { + result = minAlt + } else if result != minAlt { // more than 1 viable alt + return ATNInvalidAltNumber + } + } + return result +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/recognizer.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/recognizer.go new file mode 100644 index 0000000000..2e0b504fb3 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/recognizer.go @@ -0,0 +1,241 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strings" + + "strconv" +) + +type Recognizer interface { + GetLiteralNames() []string + GetSymbolicNames() []string + GetRuleNames() []string + + Sempred(RuleContext, int, int) bool + Precpred(RuleContext, int) bool + + GetState() int + SetState(int) + Action(RuleContext, int, int) + AddErrorListener(ErrorListener) + RemoveErrorListeners() + GetATN() *ATN + GetErrorListenerDispatch() ErrorListener + HasError() bool + GetError() RecognitionException + SetError(RecognitionException) +} + +type BaseRecognizer struct { + listeners []ErrorListener + state int + + RuleNames []string + LiteralNames []string + SymbolicNames []string + GrammarFileName string + SynErr RecognitionException +} + +func NewBaseRecognizer() *BaseRecognizer { + rec := new(BaseRecognizer) + rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE} + rec.state = -1 + return rec +} + +//goland:noinspection GoUnusedGlobalVariable +var tokenTypeMapCache = make(map[string]int) + +//goland:noinspection GoUnusedGlobalVariable +var ruleIndexMapCache = make(map[string]int) + +func (b *BaseRecognizer) checkVersion(toolVersion string) { + runtimeVersion := "4.12.0" + if runtimeVersion != toolVersion { + fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) + } +} + +func (b *BaseRecognizer) SetError(err RecognitionException) { + b.SynErr = err +} + +func (b *BaseRecognizer) HasError() bool { + return b.SynErr != nil +} + +func (b *BaseRecognizer) GetError() RecognitionException { + return b.SynErr +} + +func (b *BaseRecognizer) Action(_ RuleContext, _, _ int) { + panic("action not implemented on Recognizer!") +} + +func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) { + b.listeners = append(b.listeners, listener) +} + +func (b *BaseRecognizer) RemoveErrorListeners() { + b.listeners = make([]ErrorListener, 0) +} + +func (b *BaseRecognizer) GetRuleNames() []string { + return b.RuleNames +} + +func (b *BaseRecognizer) GetTokenNames() []string { + return b.LiteralNames +} + +func (b *BaseRecognizer) GetSymbolicNames() []string { + return b.SymbolicNames +} + +func (b *BaseRecognizer) GetLiteralNames() []string { + return b.LiteralNames +} + +func (b *BaseRecognizer) GetState() int { + return b.state +} + +func (b *BaseRecognizer) SetState(v int) { + b.state = v +} + +//func (b *Recognizer) GetTokenTypeMap() { +// var tokenNames = b.GetTokenNames() +// if (tokenNames==nil) { +// panic("The current recognizer does not provide a list of token names.") +// } +// var result = tokenTypeMapCache[tokenNames] +// if(result==nil) { +// result = tokenNames.reduce(function(o, k, i) { o[k] = i }) +// result.EOF = TokenEOF +// tokenTypeMapCache[tokenNames] = result +// } +// return result +//} + +// GetRuleIndexMap Get a map from rule names to rule indexes. +// +// Used for XPath and tree pattern compilation. +// +// TODO: JI This is not yet implemented in the Go runtime. Maybe not needed. +func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { + + panic("Method not defined!") + // var ruleNames = b.GetRuleNames() + // if (ruleNames==nil) { + // panic("The current recognizer does not provide a list of rule names.") + // } + // + // var result = ruleIndexMapCache[ruleNames] + // if(result==nil) { + // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) + // ruleIndexMapCache[ruleNames] = result + // } + // return result +} + +// GetTokenType get the token type based upon its name +func (b *BaseRecognizer) GetTokenType(_ string) int { + panic("Method not defined!") + // var ttype = b.GetTokenTypeMap()[tokenName] + // if (ttype !=nil) { + // return ttype + // } else { + // return TokenInvalidType + // } +} + +//func (b *Recognizer) GetTokenTypeMap() map[string]int { +// Vocabulary vocabulary = getVocabulary() +// +// Synchronized (tokenTypeMapCache) { +// Map result = tokenTypeMapCache.Get(vocabulary) +// if (result == null) { +// result = new HashMap() +// for (int i = 0; i < GetATN().maxTokenType; i++) { +// String literalName = vocabulary.getLiteralName(i) +// if (literalName != null) { +// result.put(literalName, i) +// } +// +// String symbolicName = vocabulary.GetSymbolicName(i) +// if (symbolicName != null) { +// result.put(symbolicName, i) +// } +// } +// +// result.put("EOF", Token.EOF) +// result = Collections.unmodifiableMap(result) +// tokenTypeMapCache.put(vocabulary, result) +// } +// +// return result +// } +//} + +// GetErrorHeader returns the error header, normally line/character position information. +// +// Can be overridden in sub structs embedding BaseRecognizer. +func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string { + line := e.GetOffendingToken().GetLine() + column := e.GetOffendingToken().GetColumn() + return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) +} + +// GetTokenErrorDisplay shows how a token should be displayed in an error message. +// +// The default is to display just the text, but during development you might +// want to have a lot of information spit out. Override in that case +// to use t.String() (which, for CommonToken, dumps everything about +// the token). This is better than forcing you to override a method in +// your token objects because you don't have to go modify your lexer +// so that it creates a NewJava type. +// +// Deprecated: This method is not called by the ANTLR 4 Runtime. Specific +// implementations of [ANTLRErrorStrategy] may provide a similar +// feature when necessary. For example, see [DefaultErrorStrategy].GetTokenErrorDisplay() +func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string { + if t == nil { + return "" + } + s := t.GetText() + if s == "" { + if t.GetTokenType() == TokenEOF { + s = "" + } else { + s = "<" + strconv.Itoa(t.GetTokenType()) + ">" + } + } + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + + return "'" + s + "'" +} + +func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener { + return NewProxyErrorListener(b.listeners) +} + +// Sempred embedding structs need to override this if there are sempreds or actions +// that the ATN interpreter needs to execute +func (b *BaseRecognizer) Sempred(_ RuleContext, _ int, _ int) bool { + return true +} + +// Precpred embedding structs need to override this if there are preceding predicates +// that the ATN interpreter needs to execute +func (b *BaseRecognizer) Precpred(_ RuleContext, _ int) bool { + return true +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/rule_context.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/rule_context.go new file mode 100644 index 0000000000..f2ad04793e --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/rule_context.go @@ -0,0 +1,40 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// RuleContext is a record of a single rule invocation. It knows +// which context invoked it, if any. If there is no parent context, then +// naturally the invoking state is not valid. The parent link +// provides a chain upwards from the current rule invocation to the root +// of the invocation tree, forming a stack. +// +// We actually carry no information about the rule associated with this context (except +// when parsing). We keep only the state number of the invoking state from +// the [ATN] submachine that invoked this. Contrast this with the s +// pointer inside [ParserRuleContext] that tracks the current state +// being "executed" for the current rule. +// +// The parent contexts are useful for computing lookahead sets and +// getting error information. +// +// These objects are used during parsing and prediction. +// For the special case of parsers, we use the struct +// [ParserRuleContext], which embeds a RuleContext. +// +// @see ParserRuleContext +type RuleContext interface { + RuleNode + + GetInvokingState() int + SetInvokingState(int) + + GetRuleIndex() int + IsEmpty() bool + + GetAltNumber() int + SetAltNumber(altNumber int) + + String([]string, RuleContext) string +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go new file mode 100644 index 0000000000..68cb9061eb --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go @@ -0,0 +1,464 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +// SemanticContext is a tree structure used to record the semantic context in which +// +// an ATN configuration is valid. It's either a single predicate, +// a conjunction p1 && p2, or a sum of products p1 || p2. +// +// I have scoped the AND, OR, and Predicate subclasses of +// [SemanticContext] within the scope of this outer ``class'' +type SemanticContext interface { + Equals(other Collectable[SemanticContext]) bool + Hash() int + + evaluate(parser Recognizer, outerContext RuleContext) bool + evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext + + String() string +} + +func SemanticContextandContext(a, b SemanticContext) SemanticContext { + if a == nil || a == SemanticContextNone { + return b + } + if b == nil || b == SemanticContextNone { + return a + } + result := NewAND(a, b) + if len(result.opnds) == 1 { + return result.opnds[0] + } + + return result +} + +func SemanticContextorContext(a, b SemanticContext) SemanticContext { + if a == nil { + return b + } + if b == nil { + return a + } + if a == SemanticContextNone || b == SemanticContextNone { + return SemanticContextNone + } + result := NewOR(a, b) + if len(result.opnds) == 1 { + return result.opnds[0] + } + + return result +} + +type Predicate struct { + ruleIndex int + predIndex int + isCtxDependent bool +} + +func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate { + p := new(Predicate) + + p.ruleIndex = ruleIndex + p.predIndex = predIndex + p.isCtxDependent = isCtxDependent // e.g., $i ref in pred + return p +} + +//The default {@link SemanticContext}, which is semantically equivalent to +//a predicate of the form {@code {true}?}. + +var SemanticContextNone = NewPredicate(-1, -1, false) + +func (p *Predicate) evalPrecedence(_ Recognizer, _ RuleContext) SemanticContext { + return p +} + +func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool { + + var localctx RuleContext + + if p.isCtxDependent { + localctx = outerContext + } + + return parser.Sempred(localctx, p.ruleIndex, p.predIndex) +} + +func (p *Predicate) Equals(other Collectable[SemanticContext]) bool { + if p == other { + return true + } else if _, ok := other.(*Predicate); !ok { + return false + } else { + return p.ruleIndex == other.(*Predicate).ruleIndex && + p.predIndex == other.(*Predicate).predIndex && + p.isCtxDependent == other.(*Predicate).isCtxDependent + } +} + +func (p *Predicate) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, p.ruleIndex) + h = murmurUpdate(h, p.predIndex) + if p.isCtxDependent { + h = murmurUpdate(h, 1) + } else { + h = murmurUpdate(h, 0) + } + return murmurFinish(h, 3) +} + +func (p *Predicate) String() string { + return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?" +} + +type PrecedencePredicate struct { + precedence int +} + +func NewPrecedencePredicate(precedence int) *PrecedencePredicate { + + p := new(PrecedencePredicate) + p.precedence = precedence + + return p +} + +func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool { + return parser.Precpred(outerContext, p.precedence) +} + +func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + if parser.Precpred(outerContext, p.precedence) { + return SemanticContextNone + } + + return nil +} + +func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int { + return p.precedence - other.precedence +} + +func (p *PrecedencePredicate) Equals(other Collectable[SemanticContext]) bool { + + var op *PrecedencePredicate + var ok bool + if op, ok = other.(*PrecedencePredicate); !ok { + return false + } + + if p == op { + return true + } + + return p.precedence == other.(*PrecedencePredicate).precedence +} + +func (p *PrecedencePredicate) Hash() int { + h := uint32(1) + h = 31*h + uint32(p.precedence) + return int(h) +} + +func (p *PrecedencePredicate) String() string { + return "{" + strconv.Itoa(p.precedence) + ">=prec}?" +} + +func PrecedencePredicatefilterPrecedencePredicates(set *JStore[SemanticContext, Comparator[SemanticContext]]) []*PrecedencePredicate { + result := make([]*PrecedencePredicate, 0) + + set.Each(func(v SemanticContext) bool { + if c2, ok := v.(*PrecedencePredicate); ok { + result = append(result, c2) + } + return true + }) + + return result +} + +// A semantic context which is true whenever none of the contained contexts +// is false.` + +type AND struct { + opnds []SemanticContext +} + +func NewAND(a, b SemanticContext) *AND { + + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewAND() operands") + if aa, ok := a.(*AND); ok { + for _, o := range aa.opnds { + operands.Put(o) + } + } else { + operands.Put(a) + } + + if ba, ok := b.(*AND); ok { + for _, o := range ba.opnds { + operands.Put(o) + } + } else { + operands.Put(b) + } + precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) + if len(precedencePredicates) > 0 { + // interested in the transition with the lowest precedence + var reduced *PrecedencePredicate + + for _, p := range precedencePredicates { + if reduced == nil || p.precedence < reduced.precedence { + reduced = p + } + } + + operands.Put(reduced) + } + + vs := operands.Values() + opnds := make([]SemanticContext, len(vs)) + copy(opnds, vs) + + and := new(AND) + and.opnds = opnds + + return and +} + +func (a *AND) Equals(other Collectable[SemanticContext]) bool { + if a == other { + return true + } + if _, ok := other.(*AND); !ok { + return false + } else { + for i, v := range other.(*AND).opnds { + if !a.opnds[i].Equals(v) { + return false + } + } + return true + } +} + +// {@inheritDoc} +// +//

+// The evaluation of predicates by a context is short-circuiting, but +// unordered.

+func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool { + for i := 0; i < len(a.opnds); i++ { + if !a.opnds[i].evaluate(parser, outerContext) { + return false + } + } + return true +} + +func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + differs := false + operands := make([]SemanticContext, 0) + + for i := 0; i < len(a.opnds); i++ { + context := a.opnds[i] + evaluated := context.evalPrecedence(parser, outerContext) + differs = differs || (evaluated != context) + if evaluated == nil { + // The AND context is false if any element is false + return nil + } else if evaluated != SemanticContextNone { + // Reduce the result by Skipping true elements + operands = append(operands, evaluated) + } + } + if !differs { + return a + } + + if len(operands) == 0 { + // all elements were true, so the AND context is true + return SemanticContextNone + } + + var result SemanticContext + + for _, o := range operands { + if result == nil { + result = o + } else { + result = SemanticContextandContext(result, o) + } + } + + return result +} + +func (a *AND) Hash() int { + h := murmurInit(37) // Init with a value different from OR + for _, op := range a.opnds { + h = murmurUpdate(h, op.Hash()) + } + return murmurFinish(h, len(a.opnds)) +} + +func (o *OR) Hash() int { + h := murmurInit(41) // Init with o value different from AND + for _, op := range o.opnds { + h = murmurUpdate(h, op.Hash()) + } + return murmurFinish(h, len(o.opnds)) +} + +func (a *AND) String() string { + s := "" + + for _, o := range a.opnds { + s += "&& " + fmt.Sprint(o) + } + + if len(s) > 3 { + return s[0:3] + } + + return s +} + +// +// A semantic context which is true whenever at least one of the contained +// contexts is true. +// + +type OR struct { + opnds []SemanticContext +} + +func NewOR(a, b SemanticContext) *OR { + + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewOR() operands") + if aa, ok := a.(*OR); ok { + for _, o := range aa.opnds { + operands.Put(o) + } + } else { + operands.Put(a) + } + + if ba, ok := b.(*OR); ok { + for _, o := range ba.opnds { + operands.Put(o) + } + } else { + operands.Put(b) + } + precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) + if len(precedencePredicates) > 0 { + // interested in the transition with the lowest precedence + var reduced *PrecedencePredicate + + for _, p := range precedencePredicates { + if reduced == nil || p.precedence > reduced.precedence { + reduced = p + } + } + + operands.Put(reduced) + } + + vs := operands.Values() + + opnds := make([]SemanticContext, len(vs)) + copy(opnds, vs) + + o := new(OR) + o.opnds = opnds + + return o +} + +func (o *OR) Equals(other Collectable[SemanticContext]) bool { + if o == other { + return true + } else if _, ok := other.(*OR); !ok { + return false + } else { + for i, v := range other.(*OR).opnds { + if !o.opnds[i].Equals(v) { + return false + } + } + return true + } +} + +//

+// The evaluation of predicates by o context is short-circuiting, but +// unordered.

+func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool { + for i := 0; i < len(o.opnds); i++ { + if o.opnds[i].evaluate(parser, outerContext) { + return true + } + } + return false +} + +func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + differs := false + operands := make([]SemanticContext, 0) + for i := 0; i < len(o.opnds); i++ { + context := o.opnds[i] + evaluated := context.evalPrecedence(parser, outerContext) + differs = differs || (evaluated != context) + if evaluated == SemanticContextNone { + // The OR context is true if any element is true + return SemanticContextNone + } else if evaluated != nil { + // Reduce the result by Skipping false elements + operands = append(operands, evaluated) + } + } + if !differs { + return o + } + if len(operands) == 0 { + // all elements were false, so the OR context is false + return nil + } + var result SemanticContext + + for _, o := range operands { + if result == nil { + result = o + } else { + result = SemanticContextorContext(result, o) + } + } + + return result +} + +func (o *OR) String() string { + s := "" + + for _, o := range o.opnds { + s += "|| " + fmt.Sprint(o) + } + + if len(s) > 3 { + return s[0:3] + } + + return s +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/statistics.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/statistics.go new file mode 100644 index 0000000000..70c0673a0f --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/statistics.go @@ -0,0 +1,281 @@ +//go:build antlr.stats + +package antlr + +import ( + "fmt" + "log" + "os" + "path/filepath" + "sort" + "strconv" + "sync" +) + +// This file allows the user to collect statistics about the runtime of the ANTLR runtime. It is not enabled by default +// and so incurs no time penalty. To enable it, you must build the runtime with the antlr.stats build tag. +// + +// Tells various components to collect statistics - because it is only true when this file is included, it will +// allow the compiler to completely eliminate all the code that is only used when collecting statistics. +const collectStats = true + +// goRunStats is a collection of all the various data the ANTLR runtime has collected about a particular run. +// It is exported so that it can be used by others to look for things that are not already looked for in the +// runtime statistics. +type goRunStats struct { + + // jStats is a slice of all the [JStatRec] records that have been created, which is one for EVERY collection created + // during a run. It is exported so that it can be used by others to look for things that are not already looked for + // within this package. + // + jStats []*JStatRec + jStatsLock sync.RWMutex + topN int + topNByMax []*JStatRec + topNByUsed []*JStatRec + unusedCollections map[CollectionSource]int + counts map[CollectionSource]int +} + +const ( + collectionsFile = "collections" +) + +var ( + Statistics = &goRunStats{ + topN: 10, + } +) + +type statsOption func(*goRunStats) error + +// Configure allows the statistics system to be configured as the user wants and override the defaults +func (s *goRunStats) Configure(options ...statsOption) error { + for _, option := range options { + err := option(s) + if err != nil { + return err + } + } + return nil +} + +// WithTopN sets the number of things to list in the report when we are concerned with the top N things. +// +// For example, if you want to see the top 20 collections by size, you can do: +// +// antlr.Statistics.Configure(antlr.WithTopN(20)) +func WithTopN(topN int) statsOption { + return func(s *goRunStats) error { + s.topN = topN + return nil + } +} + +// Analyze looks through all the statistical records and computes all the outputs that might be useful to the user. +// +// The function gathers and analyzes a number of statistics about any particular run of +// an ANTLR generated recognizer. In the vast majority of cases, the statistics are only +// useful to maintainers of ANTLR itself, but they can be useful to users as well. They may be +// especially useful in tracking down bugs or performance problems when an ANTLR user could +// supply the output from this package, but cannot supply the grammar file(s) they are using, even +// privately to the maintainers. +// +// The statistics are gathered by the runtime itself, and are not gathered by the parser or lexer, but the user +// must call this function their selves to analyze the statistics. This is because none of the infrastructure is +// extant unless the calling program is built with the antlr.stats tag like so: +// +// go build -tags antlr.stats . +// +// When a program is built with the antlr.stats tag, the Statistics object is created and available outside +// the package. The user can then call the [Statistics.Analyze] function to analyze the statistics and then call the +// [Statistics.Report] function to report the statistics. +// +// Please forward any questions about this package to the ANTLR discussion groups on GitHub or send to them to +// me [Jim Idle] directly at jimi@idle.ws +// +// [Jim Idle]: https:://github.com/jim-idle +func (s *goRunStats) Analyze() { + + // Look for anything that looks strange and record it in our local maps etc for the report to present it + // + s.CollectionAnomalies() + s.TopNCollections() +} + +// TopNCollections looks through all the statistical records and gathers the top ten collections by size. +func (s *goRunStats) TopNCollections() { + + // Let's sort the stat records by MaxSize + // + sort.Slice(s.jStats, func(i, j int) bool { + return s.jStats[i].MaxSize > s.jStats[j].MaxSize + }) + + for i := 0; i < len(s.jStats) && i < s.topN; i++ { + s.topNByMax = append(s.topNByMax, s.jStats[i]) + } + + // Sort by the number of times used + // + sort.Slice(s.jStats, func(i, j int) bool { + return s.jStats[i].Gets+s.jStats[i].Puts > s.jStats[j].Gets+s.jStats[j].Puts + }) + for i := 0; i < len(s.jStats) && i < s.topN; i++ { + s.topNByUsed = append(s.topNByUsed, s.jStats[i]) + } +} + +// Report dumps a markdown formatted report of all the statistics collected during a run to the given dir output +// path, which should represent a directory. Generated files will be prefixed with the given prefix and will be +// given a type name such as `anomalies` and a time stamp such as `2021-09-01T12:34:56` and a .md suffix. +func (s *goRunStats) Report(dir string, prefix string) error { + + isDir, err := isDirectory(dir) + switch { + case err != nil: + return err + case !isDir: + return fmt.Errorf("output directory `%s` is not a directory", dir) + } + s.reportCollections(dir, prefix) + + // Clean out any old data in case the user forgets + // + s.Reset() + return nil +} + +func (s *goRunStats) Reset() { + s.jStats = nil + s.topNByUsed = nil + s.topNByMax = nil +} + +func (s *goRunStats) reportCollections(dir, prefix string) { + cname := filepath.Join(dir, ".asciidoctor") + // If the file doesn't exist, create it, or append to the file + f, err := os.OpenFile(cname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Fatal(err) + } + _, _ = f.WriteString(`// .asciidoctorconfig +++++ + +++++`) + _ = f.Close() + + fname := filepath.Join(dir, prefix+"_"+"_"+collectionsFile+"_"+".adoc") + // If the file doesn't exist, create it, or append to the file + f, err = os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Fatal(err) + } + defer func(f *os.File) { + err := f.Close() + if err != nil { + log.Fatal(err) + } + }(f) + _, _ = f.WriteString("= Collections for " + prefix + "\n\n") + + _, _ = f.WriteString("== Summary\n") + + if s.unusedCollections != nil { + _, _ = f.WriteString("=== Unused Collections\n") + _, _ = f.WriteString("Unused collections incur a penalty for allocation that makes them a candidate for either\n") + _, _ = f.WriteString(" removal or optimization. If you are using a collection that is not used, you should\n") + _, _ = f.WriteString(" consider removing it. If you are using a collection that is used, but not very often,\n") + _, _ = f.WriteString(" you should consider using lazy initialization to defer the allocation until it is\n") + _, _ = f.WriteString(" actually needed.\n\n") + + _, _ = f.WriteString("\n.Unused collections\n") + _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Type | Count\n") + + for k, v := range s.unusedCollections { + _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n") + } + f.WriteString("|===\n\n") + } + + _, _ = f.WriteString("\n.Summary of Collections\n") + _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Type | Count\n") + for k, v := range s.counts { + _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n") + } + _, _ = f.WriteString("| Total | " + strconv.Itoa(len(s.jStats)) + "\n") + _, _ = f.WriteString("|===\n\n") + + _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by MaxSize\n") + _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets\n") + for _, c := range s.topNByMax { + _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n") + _, _ = f.WriteString("| " + c.Description + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n") + _, _ = f.WriteString("\n") + } + _, _ = f.WriteString("|===\n\n") + + _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by Access\n") + _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets | P+G\n") + for _, c := range s.topNByUsed { + _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n") + _, _ = f.WriteString("| " + c.Description + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Gets+c.Puts) + "\n") + _, _ = f.WriteString("\n") + } + _, _ = f.WriteString("|===\n\n") +} + +// AddJStatRec adds a [JStatRec] record to the [goRunStats] collection when build runtimeConfig antlr.stats is enabled. +func (s *goRunStats) AddJStatRec(rec *JStatRec) { + s.jStatsLock.Lock() + defer s.jStatsLock.Unlock() + s.jStats = append(s.jStats, rec) +} + +// CollectionAnomalies looks through all the statistical records and gathers any anomalies that have been found. +func (s *goRunStats) CollectionAnomalies() { + s.jStatsLock.RLock() + defer s.jStatsLock.RUnlock() + s.counts = make(map[CollectionSource]int, len(s.jStats)) + for _, c := range s.jStats { + + // Accumlate raw counts + // + s.counts[c.Source]++ + + // Look for allocated but unused collections and count them + if c.MaxSize == 0 && c.Puts == 0 { + if s.unusedCollections == nil { + s.unusedCollections = make(map[CollectionSource]int) + } + s.unusedCollections[c.Source]++ + } + if c.MaxSize > 6000 { + fmt.Println("Collection ", c.Description, "accumulated a max size of ", c.MaxSize, " - this is probably too large and indicates a poorly formed grammar") + } + } + +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/stats_data.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/stats_data.go new file mode 100644 index 0000000000..4d9eb94e5f --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/stats_data.go @@ -0,0 +1,23 @@ +package antlr + +// A JStatRec is a record of a particular use of a [JStore], [JMap] or JPCMap] collection. Typically, it will be +// used to look for unused collections that wre allocated anyway, problems with hash bucket clashes, and anomalies +// such as huge numbers of Gets with no entries found GetNoEnt. You can refer to the CollectionAnomalies() function +// for ideas on what can be gleaned from these statistics about collections. +type JStatRec struct { + Source CollectionSource + MaxSize int + CurSize int + Gets int + GetHits int + GetMisses int + GetHashConflicts int + GetNoEnt int + Puts int + PutHits int + PutMisses int + PutHashConflicts int + MaxSlotSize int + Description string + CreateStack []byte +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/token.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/token.go new file mode 100644 index 0000000000..9670efb829 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/token.go @@ -0,0 +1,213 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" + "strings" +) + +type TokenSourceCharStreamPair struct { + tokenSource TokenSource + charStream CharStream +} + +// A token has properties: text, type, line, character position in the line +// (so we can ignore tabs), token channel, index, and source from which +// we obtained this token. + +type Token interface { + GetSource() *TokenSourceCharStreamPair + GetTokenType() int + GetChannel() int + GetStart() int + GetStop() int + GetLine() int + GetColumn() int + + GetText() string + SetText(s string) + + GetTokenIndex() int + SetTokenIndex(v int) + + GetTokenSource() TokenSource + GetInputStream() CharStream + + String() string +} + +type BaseToken struct { + source *TokenSourceCharStreamPair + tokenType int // token type of the token + channel int // The parser ignores everything not on DEFAULT_CHANNEL + start int // optional return -1 if not implemented. + stop int // optional return -1 if not implemented. + tokenIndex int // from 0..n-1 of the token object in the input stream + line int // line=1..n of the 1st character + column int // beginning of the line at which it occurs, 0..n-1 + text string // text of the token. + readOnly bool +} + +const ( + TokenInvalidType = 0 + + // TokenEpsilon - during lookahead operations, this "token" signifies we hit the rule end [ATN] state + // and did not follow it despite needing to. + TokenEpsilon = -2 + + TokenMinUserTokenType = 1 + + TokenEOF = -1 + + // TokenDefaultChannel is the default channel upon which tokens are sent to the parser. + // + // All tokens go to the parser (unless [Skip] is called in the lexer rule) + // on a particular "channel". The parser tunes to a particular channel + // so that whitespace etc... can go to the parser on a "hidden" channel. + TokenDefaultChannel = 0 + + // TokenHiddenChannel defines the normal hidden channel - the parser wil not see tokens that are not on [TokenDefaultChannel]. + // + // Anything on a different channel than TokenDefaultChannel is not parsed by parser. + TokenHiddenChannel = 1 +) + +func (b *BaseToken) GetChannel() int { + return b.channel +} + +func (b *BaseToken) GetStart() int { + return b.start +} + +func (b *BaseToken) GetStop() int { + return b.stop +} + +func (b *BaseToken) GetLine() int { + return b.line +} + +func (b *BaseToken) GetColumn() int { + return b.column +} + +func (b *BaseToken) GetTokenType() int { + return b.tokenType +} + +func (b *BaseToken) GetSource() *TokenSourceCharStreamPair { + return b.source +} + +func (b *BaseToken) GetTokenIndex() int { + return b.tokenIndex +} + +func (b *BaseToken) SetTokenIndex(v int) { + b.tokenIndex = v +} + +func (b *BaseToken) GetTokenSource() TokenSource { + return b.source.tokenSource +} + +func (b *BaseToken) GetInputStream() CharStream { + return b.source.charStream +} + +type CommonToken struct { + BaseToken +} + +func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken { + + t := &CommonToken{ + BaseToken: BaseToken{ + source: source, + tokenType: tokenType, + channel: channel, + start: start, + stop: stop, + tokenIndex: -1, + }, + } + + if t.source.tokenSource != nil { + t.line = source.tokenSource.GetLine() + t.column = source.tokenSource.GetCharPositionInLine() + } else { + t.column = -1 + } + return t +} + +// An empty {@link Pair} which is used as the default value of +// {@link //source} for tokens that do not have a source. + +//CommonToken.EMPTY_SOURCE = [ nil, nil ] + +// Constructs a New{@link CommonToken} as a copy of another {@link Token}. +// +//

+// If {@code oldToken} is also a {@link CommonToken} instance, the newly +// constructed token will share a reference to the {@link //text} field and +// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will +// be assigned the result of calling {@link //GetText}, and {@link //source} +// will be constructed from the result of {@link Token//GetTokenSource} and +// {@link Token//GetInputStream}.

+// +// @param oldToken The token to copy. +func (c *CommonToken) clone() *CommonToken { + t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop) + t.tokenIndex = c.GetTokenIndex() + t.line = c.GetLine() + t.column = c.GetColumn() + t.text = c.GetText() + return t +} + +func (c *CommonToken) GetText() string { + if c.text != "" { + return c.text + } + input := c.GetInputStream() + if input == nil { + return "" + } + n := input.Size() + if c.start < n && c.stop < n { + return input.GetTextFromInterval(NewInterval(c.start, c.stop)) + } + return "" +} + +func (c *CommonToken) SetText(text string) { + c.text = text +} + +func (c *CommonToken) String() string { + txt := c.GetText() + if txt != "" { + txt = strings.Replace(txt, "\n", "\\n", -1) + txt = strings.Replace(txt, "\r", "\\r", -1) + txt = strings.Replace(txt, "\t", "\\t", -1) + } else { + txt = "" + } + + var ch string + if c.channel > 0 { + ch = ",channel=" + strconv.Itoa(c.channel) + } else { + ch = "" + } + + return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" + + txt + "',<" + strconv.Itoa(c.tokenType) + ">" + + ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]" +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/token_source.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/token_source.go new file mode 100644 index 0000000000..a3f36eaa67 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/token_source.go @@ -0,0 +1,17 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type TokenSource interface { + NextToken() Token + Skip() + More() + GetLine() int + GetCharPositionInLine() int + GetInputStream() CharStream + GetSourceName() string + setTokenFactory(factory TokenFactory) + GetTokenFactory() TokenFactory +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/token_stream.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/token_stream.go new file mode 100644 index 0000000000..bf4ff6633e --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/token_stream.go @@ -0,0 +1,21 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type TokenStream interface { + IntStream + + LT(k int) Token + Reset() + + Get(index int) Token + GetTokenSource() TokenSource + SetTokenSource(TokenSource) + + GetAllText() string + GetTextFromInterval(Interval) string + GetTextFromRuleContext(RuleContext) string + GetTextFromTokens(Token, Token) string +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go new file mode 100644 index 0000000000..ccf59b465c --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go @@ -0,0 +1,662 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bytes" + "fmt" +) + +// +// Useful for rewriting out a buffered input token stream after doing some +// augmentation or other manipulations on it. + +//

+// You can insert stuff, replace, and delete chunks. Note that the operations +// are done lazily--only if you convert the buffer to a {@link String} with +// {@link TokenStream#getText()}. This is very efficient because you are not +// moving data around all the time. As the buffer of tokens is converted to +// strings, the {@link #getText()} method(s) scan the input token stream and +// check to see if there is an operation at the current index. If so, the +// operation is done and then normal {@link String} rendering continues on the +// buffer. This is like having multiple Turing machine instruction streams +// (programs) operating on a single input tape. :)

+//

+ +// This rewriter makes no modifications to the token stream. It does not ask the +// stream to fill itself up nor does it advance the input cursor. The token +// stream {@link TokenStream#index()} will return the same value before and +// after any {@link #getText()} call.

+ +//

+// The rewriter only works on tokens that you have in the buffer and ignores the +// current input cursor. If you are buffering tokens on-demand, calling +// {@link #getText()} halfway through the input will only do rewrites for those +// tokens in the first half of the file.

+ +//

+// Since the operations are done lazily at {@link #getText}-time, operations do +// not screw up the token index values. That is, an insert operation at token +// index {@code i} does not change the index values for tokens +// {@code i}+1..n-1.

+ +//

+// Because operations never actually alter the buffer, you may always get the +// original token stream back without undoing anything. Since the instructions +// are queued up, you can easily simulate transactions and roll back any changes +// if there is an error just by removing instructions. For example,

+ +//
+// CharStream input = new ANTLRFileStream("input");
+// TLexer lex = new TLexer(input);
+// CommonTokenStream tokens = new CommonTokenStream(lex);
+// T parser = new T(tokens);
+// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+// parser.startRule();
+// 
+ +//

+// Then in the rules, you can execute (assuming rewriter is visible):

+ +//
+// Token t,u;
+// ...
+// rewriter.insertAfter(t, "text to put after t");}
+// rewriter.insertAfter(u, "text after u");}
+// System.out.println(rewriter.getText());
+// 
+ +//

+// You can also have multiple "instruction streams" and get multiple rewrites +// from a single pass over the input. Just name the instruction streams and use +// that name again when printing the buffer. This could be useful for generating +// a C file and also its header file--all from the same buffer:

+ +//
+// rewriter.insertAfter("pass1", t, "text to put after t");}
+// rewriter.insertAfter("pass2", u, "text after u");}
+// System.out.println(rewriter.getText("pass1"));
+// System.out.println(rewriter.getText("pass2"));
+// 
+ +//

+// If you don't use named rewrite streams, a "default" stream is used as the +// first example shows.

+ +const ( + DefaultProgramName = "default" + ProgramInitSize = 100 + MinTokenIndex = 0 +) + +// Define the rewrite operation hierarchy + +type RewriteOperation interface { + + // Execute the rewrite operation by possibly adding to the buffer. + // Return the index of the next token to operate on. + Execute(buffer *bytes.Buffer) int + String() string + GetInstructionIndex() int + GetIndex() int + GetText() string + GetOpName() string + GetTokens() TokenStream + SetInstructionIndex(val int) + SetIndex(int) + SetText(string) + SetOpName(string) + SetTokens(TokenStream) +} + +type BaseRewriteOperation struct { + //Current index of rewrites list + instructionIndex int + //Token buffer index + index int + //Substitution text + text string + //Actual operation name + opName string + //Pointer to token steam + tokens TokenStream +} + +func (op *BaseRewriteOperation) GetInstructionIndex() int { + return op.instructionIndex +} + +func (op *BaseRewriteOperation) GetIndex() int { + return op.index +} + +func (op *BaseRewriteOperation) GetText() string { + return op.text +} + +func (op *BaseRewriteOperation) GetOpName() string { + return op.opName +} + +func (op *BaseRewriteOperation) GetTokens() TokenStream { + return op.tokens +} + +func (op *BaseRewriteOperation) SetInstructionIndex(val int) { + op.instructionIndex = val +} + +func (op *BaseRewriteOperation) SetIndex(val int) { + op.index = val +} + +func (op *BaseRewriteOperation) SetText(val string) { + op.text = val +} + +func (op *BaseRewriteOperation) SetOpName(val string) { + op.opName = val +} + +func (op *BaseRewriteOperation) SetTokens(val TokenStream) { + op.tokens = val +} + +func (op *BaseRewriteOperation) Execute(_ *bytes.Buffer) int { + return op.index +} + +func (op *BaseRewriteOperation) String() string { + return fmt.Sprintf("<%s@%d:\"%s\">", + op.opName, + op.tokens.Get(op.GetIndex()), + op.text, + ) + +} + +type InsertBeforeOp struct { + BaseRewriteOperation +} + +func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp { + return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{ + index: index, + text: text, + opName: "InsertBeforeOp", + tokens: stream, + }} +} + +func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int { + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF { + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index + 1 +} + +func (op *InsertBeforeOp) String() string { + return op.BaseRewriteOperation.String() +} + +// InsertAfterOp distinguishes between insert after/before to do the "insert after" instructions +// first and then the "insert before" instructions at same index. Implementation +// of "insert after" is "insert before index+1". +type InsertAfterOp struct { + BaseRewriteOperation +} + +func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp { + return &InsertAfterOp{ + BaseRewriteOperation: BaseRewriteOperation{ + index: index + 1, + text: text, + tokens: stream, + }, + } +} + +func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF { + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index + 1 +} + +func (op *InsertAfterOp) String() string { + return op.BaseRewriteOperation.String() +} + +// ReplaceOp tries to replace range from x..y with (y-x)+1 ReplaceOp +// instructions. +type ReplaceOp struct { + BaseRewriteOperation + LastIndex int +} + +func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp { + return &ReplaceOp{ + BaseRewriteOperation: BaseRewriteOperation{ + index: from, + text: text, + opName: "ReplaceOp", + tokens: stream, + }, + LastIndex: to, + } +} + +func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int { + if op.text != "" { + buffer.WriteString(op.text) + } + return op.LastIndex + 1 +} + +func (op *ReplaceOp) String() string { + if op.text == "" { + return fmt.Sprintf("", + op.tokens.Get(op.index), op.tokens.Get(op.LastIndex)) + } + return fmt.Sprintf("", + op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text) +} + +type TokenStreamRewriter struct { + //Our source stream + tokens TokenStream + // You may have multiple, named streams of rewrite operations. + // I'm calling these things "programs." + // Maps String (name) → rewrite (List) + programs map[string][]RewriteOperation + lastRewriteTokenIndexes map[string]int +} + +func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter { + return &TokenStreamRewriter{ + tokens: tokens, + programs: map[string][]RewriteOperation{ + DefaultProgramName: make([]RewriteOperation, 0, ProgramInitSize), + }, + lastRewriteTokenIndexes: map[string]int{}, + } +} + +func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream { + return tsr.tokens +} + +// Rollback the instruction stream for a program so that +// the indicated instruction (via instructionIndex) is no +// longer in the stream. UNTESTED! +func (tsr *TokenStreamRewriter) Rollback(programName string, instructionIndex int) { + is, ok := tsr.programs[programName] + if ok { + tsr.programs[programName] = is[MinTokenIndex:instructionIndex] + } +} + +func (tsr *TokenStreamRewriter) RollbackDefault(instructionIndex int) { + tsr.Rollback(DefaultProgramName, instructionIndex) +} + +// DeleteProgram Reset the program so that no instructions exist +func (tsr *TokenStreamRewriter) DeleteProgram(programName string) { + tsr.Rollback(programName, MinTokenIndex) //TODO: double test on that cause lower bound is not included +} + +func (tsr *TokenStreamRewriter) DeleteProgramDefault() { + tsr.DeleteProgram(DefaultProgramName) +} + +func (tsr *TokenStreamRewriter) InsertAfter(programName string, index int, text string) { + // to insert after, just insert before next index (even if past end) + var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens) + rewrites := tsr.GetProgram(programName) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(programName, op) +} + +func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) { + tsr.InsertAfter(DefaultProgramName, index, text) +} + +func (tsr *TokenStreamRewriter) InsertAfterToken(programName string, token Token, text string) { + tsr.InsertAfter(programName, token.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) InsertBefore(programName string, index int, text string) { + var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens) + rewrites := tsr.GetProgram(programName) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(programName, op) +} + +func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) { + tsr.InsertBefore(DefaultProgramName, index, text) +} + +func (tsr *TokenStreamRewriter) InsertBeforeToken(programName string, token Token, text string) { + tsr.InsertBefore(programName, token.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) Replace(programName string, from, to int, text string) { + if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() { + panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)", + from, to, tsr.tokens.Size())) + } + var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens) + rewrites := tsr.GetProgram(programName) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(programName, op) +} + +func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) { + tsr.Replace(DefaultProgramName, from, to, text) +} + +func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) { + tsr.ReplaceDefault(index, index, text) +} + +func (tsr *TokenStreamRewriter) ReplaceToken(programName string, from, to Token, text string) { + tsr.Replace(programName, from.GetTokenIndex(), to.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) { + tsr.ReplaceToken(DefaultProgramName, from, to, text) +} + +func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) { + tsr.ReplaceTokenDefault(index, index, text) +} + +func (tsr *TokenStreamRewriter) Delete(programName string, from, to int) { + tsr.Replace(programName, from, to, "") +} + +func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) { + tsr.Delete(DefaultProgramName, from, to) +} + +func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) { + tsr.DeleteDefault(index, index) +} + +func (tsr *TokenStreamRewriter) DeleteToken(programName string, from, to Token) { + tsr.ReplaceToken(programName, from, to, "") +} + +func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) { + tsr.DeleteToken(DefaultProgramName, from, to) +} + +func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(programName string) int { + i, ok := tsr.lastRewriteTokenIndexes[programName] + if !ok { + return -1 + } + return i +} + +func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int { + return tsr.GetLastRewriteTokenIndex(DefaultProgramName) +} + +func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(programName string, i int) { + tsr.lastRewriteTokenIndexes[programName] = i +} + +func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation { + is := make([]RewriteOperation, 0, ProgramInitSize) + tsr.programs[name] = is + return is +} + +func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) { + is := tsr.GetProgram(name) + is = append(is, op) + tsr.programs[name] = is +} + +func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation { + is, ok := tsr.programs[name] + if !ok { + is = tsr.InitializeProgram(name) + } + return is +} + +// GetTextDefault returns the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter) GetTextDefault() string { + return tsr.GetText( + DefaultProgramName, + NewInterval(0, tsr.tokens.Size()-1)) +} + +// GetText returns the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter) GetText(programName string, interval Interval) string { + rewrites := tsr.programs[programName] + start := interval.Start + stop := interval.Stop + // ensure start/end are in range + stop = min(stop, tsr.tokens.Size()-1) + start = max(start, 0) + if len(rewrites) == 0 { + return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute + } + buf := bytes.Buffer{} + // First, optimize instruction stream + indexToOp := reduceToSingleOperationPerIndex(rewrites) + // Walk buffer, executing instructions and emitting tokens + for i := start; i <= stop && i < tsr.tokens.Size(); { + op := indexToOp[i] + delete(indexToOp, i) // remove so any left have index size-1 + t := tsr.tokens.Get(i) + if op == nil { + // no operation at that index, just dump token + if t.GetTokenType() != TokenEOF { + buf.WriteString(t.GetText()) + } + i++ // move to next token + } else { + i = op.Execute(&buf) // execute operation and skip + } + } + // include stuff after end if it's last index in buffer + // So, if they did an insertAfter(lastValidIndex, "foo"), include + // foo if end==lastValidIndex. + if stop == tsr.tokens.Size()-1 { + // Scan any remaining operations after last token + // should be included (they will be inserts). + for _, op := range indexToOp { + if op.GetIndex() >= tsr.tokens.Size()-1 { + buf.WriteString(op.GetText()) + } + } + } + return buf.String() +} + +// reduceToSingleOperationPerIndex combines operations and report invalid operations (like +// overlapping replaces that are not completed nested). Inserts to +// same index need to be combined etc... +// +// Here are the cases: +// +// I.i.u I.j.v leave alone, non-overlapping +// I.i.u I.i.v combine: Iivu +// +// R.i-j.u R.x-y.v | i-j in x-y delete first R +// R.i-j.u R.i-j.v delete first R +// R.i-j.u R.x-y.v | x-y in i-j ERROR +// R.i-j.u R.x-y.v | boundaries overlap ERROR +// +// Delete special case of replace (text==null): +// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) +// +// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before +// we're not deleting i) +// I.i.u R.x-y.v | i not in (x+1)-y leave alone, non-overlapping +// R.x-y.v I.i.u | i in x-y ERROR +// R.x-y.v I.x.u R.x-y.uv (combine, delete I) +// R.x-y.v I.i.u | i not in x-y leave alone, non-overlapping +// +// I.i.u = insert u before op @ index i +// R.x-y.u = replace x-y indexed tokens with u +// +// First we need to examine replaces. For any replace op: +// +// 1. wipe out any insertions before op within that range. +// 2. Drop any replace op before that is contained completely within +// that range. +// 3. Throw exception upon boundary overlap with any previous replace. +// +// Then we can deal with inserts: +// +// 1. for any inserts to same index, combine even if not adjacent. +// 2. for any prior replace with same left boundary, combine this +// insert with replace and delete this 'replace'. +// 3. throw exception if index in same range as previous replace +// +// Don't actually delete; make op null in list. Easier to walk list. +// Later we can throw as we add to index → op map. +// +// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the +// inserted stuff would be before the 'replace' range. But, if you +// add tokens in front of a method body '{' and then delete the method +// body, I think the stuff before the '{' you added should disappear too. +// +// The func returns a map from token index to operation. +func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation { + // WALK REPLACES + for i := 0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil { + continue + } + rop, ok := op.(*ReplaceOp) + if !ok { + continue + } + // Wipe prior inserts within range + for j := 0; j < i && j < len(rewrites); j++ { + if iop, ok := rewrites[j].(*InsertBeforeOp); ok { + if iop.index == rop.index { + // E.g., insert before 2, delete 2..2; update replace + // text to include insert before, kill insert + rewrites[iop.instructionIndex] = nil + if rop.text != "" { + rop.text = iop.text + rop.text + } else { + rop.text = iop.text + } + } else if iop.index > rop.index && iop.index <= rop.LastIndex { + // delete insert as it's a no-op. + rewrites[iop.instructionIndex] = nil + } + } + } + // Drop any prior replaces contained within + for j := 0; j < i && j < len(rewrites); j++ { + if prevop, ok := rewrites[j].(*ReplaceOp); ok { + if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex { + // delete replace as it's a no-op. + rewrites[prevop.instructionIndex] = nil + continue + } + // throw exception unless disjoint or identical + disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex + // Delete special case of replace (text==null): + // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) + if prevop.text == "" && rop.text == "" && !disjoint { + rewrites[prevop.instructionIndex] = nil + rop.index = min(prevop.index, rop.index) + rop.LastIndex = max(prevop.LastIndex, rop.LastIndex) + } else if !disjoint { + panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String()) + } + } + } + } + // WALK INSERTS + for i := 0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil { + continue + } + //hack to replicate inheritance in composition + _, iok := rewrites[i].(*InsertBeforeOp) + _, aok := rewrites[i].(*InsertAfterOp) + if !iok && !aok { + continue + } + iop := rewrites[i] + // combine current insert with prior if any at same index + // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic + for j := 0; j < i && j < len(rewrites); j++ { + if nextIop, ok := rewrites[j].(*InsertAfterOp); ok { + if nextIop.index == iop.GetIndex() { + iop.SetText(nextIop.text + iop.GetText()) + rewrites[j] = nil + } + } + if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok { + if prevIop.index == iop.GetIndex() { + iop.SetText(iop.GetText() + prevIop.text) + rewrites[prevIop.instructionIndex] = nil + } + } + } + // look for replaces where iop.index is in range; error + for j := 0; j < i && j < len(rewrites); j++ { + if rop, ok := rewrites[j].(*ReplaceOp); ok { + if iop.GetIndex() == rop.index { + rop.text = iop.GetText() + rop.text + rewrites[i] = nil + continue + } + if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex { + panic("insert op " + iop.String() + " within boundaries of previous " + rop.String()) + } + } + } + } + m := map[int]RewriteOperation{} + for i := 0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil { + continue + } + if _, ok := m[op.GetIndex()]; ok { + panic("should only be one op per index") + } + m[op.GetIndex()] = op + } + return m +} + +/* + Quick fixing Go lack of overloads +*/ + +func max(a, b int) int { + if a > b { + return a + } else { + return b + } +} +func min(a, b int) int { + if a < b { + return a + } else { + return b + } +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go new file mode 100644 index 0000000000..7b663bf849 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go @@ -0,0 +1,32 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "fmt" + +type TraceListener struct { + parser *BaseParser +} + +func NewTraceListener(parser *BaseParser) *TraceListener { + tl := new(TraceListener) + tl.parser = parser + return tl +} + +func (t *TraceListener) VisitErrorNode(_ ErrorNode) { +} + +func (t *TraceListener) EnterEveryRule(ctx ParserRuleContext) { + fmt.Println("enter " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText()) +} + +func (t *TraceListener) VisitTerminal(node TerminalNode) { + fmt.Println("consume " + fmt.Sprint(node.GetSymbol()) + " rule " + t.parser.GetRuleNames()[t.parser.ctx.GetRuleIndex()]) +} + +func (t *TraceListener) ExitEveryRule(ctx ParserRuleContext) { + fmt.Println("exit " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText()) +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/transition.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/transition.go new file mode 100644 index 0000000000..313b0fc127 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/transition.go @@ -0,0 +1,439 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +// atom, set, epsilon, action, predicate, rule transitions. +// +//

This is a one way link. It emanates from a state (usually via a list of +// transitions) and has a target state.

+// +//

Since we never have to change the ATN transitions once we construct it, +// the states. We'll use the term Edge for the DFA to distinguish them from +// ATN transitions.

+ +type Transition interface { + getTarget() ATNState + setTarget(ATNState) + getIsEpsilon() bool + getLabel() *IntervalSet + getSerializationType() int + Matches(int, int, int) bool +} + +type BaseTransition struct { + target ATNState + isEpsilon bool + label int + intervalSet *IntervalSet + serializationType int +} + +func NewBaseTransition(target ATNState) *BaseTransition { + + if target == nil { + panic("target cannot be nil.") + } + + t := new(BaseTransition) + + t.target = target + // Are we epsilon, action, sempred? + t.isEpsilon = false + t.intervalSet = nil + + return t +} + +func (t *BaseTransition) getTarget() ATNState { + return t.target +} + +func (t *BaseTransition) setTarget(s ATNState) { + t.target = s +} + +func (t *BaseTransition) getIsEpsilon() bool { + return t.isEpsilon +} + +func (t *BaseTransition) getLabel() *IntervalSet { + return t.intervalSet +} + +func (t *BaseTransition) getSerializationType() int { + return t.serializationType +} + +func (t *BaseTransition) Matches(_, _, _ int) bool { + panic("Not implemented") +} + +const ( + TransitionEPSILON = 1 + TransitionRANGE = 2 + TransitionRULE = 3 + TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}? + TransitionATOM = 5 + TransitionACTION = 6 + TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2 + TransitionNOTSET = 8 + TransitionWILDCARD = 9 + TransitionPRECEDENCE = 10 +) + +//goland:noinspection GoUnusedGlobalVariable +var TransitionserializationNames = []string{ + "INVALID", + "EPSILON", + "RANGE", + "RULE", + "PREDICATE", + "ATOM", + "ACTION", + "SET", + "NOT_SET", + "WILDCARD", + "PRECEDENCE", +} + +//var TransitionserializationTypes struct { +// EpsilonTransition int +// RangeTransition int +// RuleTransition int +// PredicateTransition int +// AtomTransition int +// ActionTransition int +// SetTransition int +// NotSetTransition int +// WildcardTransition int +// PrecedencePredicateTransition int +//}{ +// TransitionEPSILON, +// TransitionRANGE, +// TransitionRULE, +// TransitionPREDICATE, +// TransitionATOM, +// TransitionACTION, +// TransitionSET, +// TransitionNOTSET, +// TransitionWILDCARD, +// TransitionPRECEDENCE +//} + +// AtomTransition +// TODO: make all transitions sets? no, should remove set edges +type AtomTransition struct { + BaseTransition +} + +func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition { + t := &AtomTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionATOM, + label: intervalSet, + isEpsilon: false, + }, + } + t.intervalSet = t.makeLabel() + + return t +} + +func (t *AtomTransition) makeLabel() *IntervalSet { + s := NewIntervalSet() + s.addOne(t.label) + return s +} + +func (t *AtomTransition) Matches(symbol, _, _ int) bool { + return t.label == symbol +} + +func (t *AtomTransition) String() string { + return strconv.Itoa(t.label) +} + +type RuleTransition struct { + BaseTransition + followState ATNState + ruleIndex, precedence int +} + +func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition { + return &RuleTransition{ + BaseTransition: BaseTransition{ + target: ruleStart, + isEpsilon: true, + serializationType: TransitionRULE, + }, + ruleIndex: ruleIndex, + precedence: precedence, + followState: followState, + } +} + +func (t *RuleTransition) Matches(_, _, _ int) bool { + return false +} + +type EpsilonTransition struct { + BaseTransition + outermostPrecedenceReturn int +} + +func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition { + return &EpsilonTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionEPSILON, + isEpsilon: true, + }, + outermostPrecedenceReturn: outermostPrecedenceReturn, + } +} + +func (t *EpsilonTransition) Matches(_, _, _ int) bool { + return false +} + +func (t *EpsilonTransition) String() string { + return "epsilon" +} + +type RangeTransition struct { + BaseTransition + start, stop int +} + +func NewRangeTransition(target ATNState, start, stop int) *RangeTransition { + t := &RangeTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionRANGE, + isEpsilon: false, + }, + start: start, + stop: stop, + } + t.intervalSet = t.makeLabel() + return t +} + +func (t *RangeTransition) makeLabel() *IntervalSet { + s := NewIntervalSet() + s.addRange(t.start, t.stop) + return s +} + +func (t *RangeTransition) Matches(symbol, _, _ int) bool { + return symbol >= t.start && symbol <= t.stop +} + +func (t *RangeTransition) String() string { + var sb strings.Builder + sb.WriteByte('\'') + sb.WriteRune(rune(t.start)) + sb.WriteString("'..'") + sb.WriteRune(rune(t.stop)) + sb.WriteByte('\'') + return sb.String() +} + +type AbstractPredicateTransition interface { + Transition + IAbstractPredicateTransitionFoo() +} + +type BaseAbstractPredicateTransition struct { + BaseTransition +} + +func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition { + return &BaseAbstractPredicateTransition{ + BaseTransition: BaseTransition{ + target: target, + }, + } +} + +func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {} + +type PredicateTransition struct { + BaseAbstractPredicateTransition + isCtxDependent bool + ruleIndex, predIndex int +} + +func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition { + return &PredicateTransition{ + BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionPREDICATE, + isEpsilon: true, + }, + }, + isCtxDependent: isCtxDependent, + ruleIndex: ruleIndex, + predIndex: predIndex, + } +} + +func (t *PredicateTransition) Matches(_, _, _ int) bool { + return false +} + +func (t *PredicateTransition) getPredicate() *Predicate { + return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent) +} + +func (t *PredicateTransition) String() string { + return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex) +} + +type ActionTransition struct { + BaseTransition + isCtxDependent bool + ruleIndex, actionIndex, predIndex int +} + +func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition { + return &ActionTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionACTION, + isEpsilon: true, + }, + isCtxDependent: isCtxDependent, + ruleIndex: ruleIndex, + actionIndex: actionIndex, + } +} + +func (t *ActionTransition) Matches(_, _, _ int) bool { + return false +} + +func (t *ActionTransition) String() string { + return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex) +} + +type SetTransition struct { + BaseTransition +} + +func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition { + t := &SetTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionSET, + }, + } + + if set != nil { + t.intervalSet = set + } else { + t.intervalSet = NewIntervalSet() + t.intervalSet.addOne(TokenInvalidType) + } + return t +} + +func (t *SetTransition) Matches(symbol, _, _ int) bool { + return t.intervalSet.contains(symbol) +} + +func (t *SetTransition) String() string { + return t.intervalSet.String() +} + +type NotSetTransition struct { + SetTransition +} + +func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition { + t := &NotSetTransition{ + SetTransition: SetTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionNOTSET, + }, + }, + } + if set != nil { + t.intervalSet = set + } else { + t.intervalSet = NewIntervalSet() + t.intervalSet.addOne(TokenInvalidType) + } + + return t +} + +func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol) +} + +func (t *NotSetTransition) String() string { + return "~" + t.intervalSet.String() +} + +type WildcardTransition struct { + BaseTransition +} + +func NewWildcardTransition(target ATNState) *WildcardTransition { + return &WildcardTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionWILDCARD, + }, + } +} + +func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol +} + +func (t *WildcardTransition) String() string { + return "." +} + +type PrecedencePredicateTransition struct { + BaseAbstractPredicateTransition + precedence int +} + +func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition { + return &PrecedencePredicateTransition{ + BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionPRECEDENCE, + isEpsilon: true, + }, + }, + precedence: precedence, + } +} + +func (t *PrecedencePredicateTransition) Matches(_, _, _ int) bool { + return false +} + +func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate { + return NewPrecedencePredicate(t.precedence) +} + +func (t *PrecedencePredicateTransition) String() string { + return fmt.Sprint(t.precedence) + " >= _p" +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/tree.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/tree.go new file mode 100644 index 0000000000..c288420fb2 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/tree.go @@ -0,0 +1,304 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// The basic notion of a tree has a parent, a payload, and a list of children. +// It is the most abstract interface for all the trees used by ANTLR. +/// + +var TreeInvalidInterval = NewInterval(-1, -2) + +type Tree interface { + GetParent() Tree + SetParent(Tree) + GetPayload() interface{} + GetChild(i int) Tree + GetChildCount() int + GetChildren() []Tree +} + +type SyntaxTree interface { + Tree + GetSourceInterval() Interval +} + +type ParseTree interface { + SyntaxTree + Accept(Visitor ParseTreeVisitor) interface{} + GetText() string + ToStringTree([]string, Recognizer) string +} + +type RuleNode interface { + ParseTree + GetRuleContext() RuleContext +} + +type TerminalNode interface { + ParseTree + GetSymbol() Token +} + +type ErrorNode interface { + TerminalNode + + errorNode() +} + +type ParseTreeVisitor interface { + Visit(tree ParseTree) interface{} + VisitChildren(node RuleNode) interface{} + VisitTerminal(node TerminalNode) interface{} + VisitErrorNode(node ErrorNode) interface{} +} + +type BaseParseTreeVisitor struct{} + +var _ ParseTreeVisitor = &BaseParseTreeVisitor{} + +func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) } +func (v *BaseParseTreeVisitor) VisitChildren(_ RuleNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitTerminal(_ TerminalNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitErrorNode(_ ErrorNode) interface{} { return nil } + +// TODO: Implement this? +//func (this ParseTreeVisitor) Visit(ctx) { +// if (Utils.isArray(ctx)) { +// self := this +// return ctx.map(function(child) { return VisitAtom(self, child)}) +// } else { +// return VisitAtom(this, ctx) +// } +//} +// +//func VisitAtom(Visitor, ctx) { +// if (ctx.parser == nil) { //is terminal +// return +// } +// +// name := ctx.parser.ruleNames[ctx.ruleIndex] +// funcName := "Visit" + Utils.titleCase(name) +// +// return Visitor[funcName](ctx) +//} + +type ParseTreeListener interface { + VisitTerminal(node TerminalNode) + VisitErrorNode(node ErrorNode) + EnterEveryRule(ctx ParserRuleContext) + ExitEveryRule(ctx ParserRuleContext) +} + +type BaseParseTreeListener struct{} + +var _ ParseTreeListener = &BaseParseTreeListener{} + +func (l *BaseParseTreeListener) VisitTerminal(_ TerminalNode) {} +func (l *BaseParseTreeListener) VisitErrorNode(_ ErrorNode) {} +func (l *BaseParseTreeListener) EnterEveryRule(_ ParserRuleContext) {} +func (l *BaseParseTreeListener) ExitEveryRule(_ ParserRuleContext) {} + +type TerminalNodeImpl struct { + parentCtx RuleContext + symbol Token +} + +var _ TerminalNode = &TerminalNodeImpl{} + +func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl { + tn := new(TerminalNodeImpl) + + tn.parentCtx = nil + tn.symbol = symbol + + return tn +} + +func (t *TerminalNodeImpl) GetChild(_ int) Tree { + return nil +} + +func (t *TerminalNodeImpl) GetChildren() []Tree { + return nil +} + +func (t *TerminalNodeImpl) SetChildren(_ []Tree) { + panic("Cannot set children on terminal node") +} + +func (t *TerminalNodeImpl) GetSymbol() Token { + return t.symbol +} + +func (t *TerminalNodeImpl) GetParent() Tree { + return t.parentCtx +} + +func (t *TerminalNodeImpl) SetParent(tree Tree) { + t.parentCtx = tree.(RuleContext) +} + +func (t *TerminalNodeImpl) GetPayload() interface{} { + return t.symbol +} + +func (t *TerminalNodeImpl) GetSourceInterval() Interval { + if t.symbol == nil { + return TreeInvalidInterval + } + tokenIndex := t.symbol.GetTokenIndex() + return NewInterval(tokenIndex, tokenIndex) +} + +func (t *TerminalNodeImpl) GetChildCount() int { + return 0 +} + +func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} { + return v.VisitTerminal(t) +} + +func (t *TerminalNodeImpl) GetText() string { + return t.symbol.GetText() +} + +func (t *TerminalNodeImpl) String() string { + if t.symbol.GetTokenType() == TokenEOF { + return "" + } + + return t.symbol.GetText() +} + +func (t *TerminalNodeImpl) ToStringTree(_ []string, _ Recognizer) string { + return t.String() +} + +// Represents a token that was consumed during reSynchronization +// rather than during a valid Match operation. For example, +// we will create this kind of a node during single token insertion +// and deletion as well as during "consume until error recovery set" +// upon no viable alternative exceptions. + +type ErrorNodeImpl struct { + *TerminalNodeImpl +} + +var _ ErrorNode = &ErrorNodeImpl{} + +func NewErrorNodeImpl(token Token) *ErrorNodeImpl { + en := new(ErrorNodeImpl) + en.TerminalNodeImpl = NewTerminalNodeImpl(token) + return en +} + +func (e *ErrorNodeImpl) errorNode() {} + +func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} { + return v.VisitErrorNode(e) +} + +type ParseTreeWalker struct { +} + +func NewParseTreeWalker() *ParseTreeWalker { + return new(ParseTreeWalker) +} + +// Walk performs a walk on the given parse tree starting at the root and going down recursively +// with depth-first search. On each node, [EnterRule] is called before +// recursively walking down into child nodes, then [ExitRule] is called after the recursive call to wind up. +func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { + switch tt := t.(type) { + case ErrorNode: + listener.VisitErrorNode(tt) + case TerminalNode: + listener.VisitTerminal(tt) + default: + p.EnterRule(listener, t.(RuleNode)) + for i := 0; i < t.GetChildCount(); i++ { + child := t.GetChild(i) + p.Walk(listener, child) + } + p.ExitRule(listener, t.(RuleNode)) + } +} + +// EnterRule enters a grammar rule by first triggering the generic event [ParseTreeListener].[EnterEveryRule] +// then by triggering the event specific to the given parse tree node +func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { + ctx := r.GetRuleContext().(ParserRuleContext) + listener.EnterEveryRule(ctx) + ctx.EnterRule(listener) +} + +// ExitRule exits a grammar rule by first triggering the event specific to the given parse tree node +// then by triggering the generic event [ParseTreeListener].ExitEveryRule +func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) { + ctx := r.GetRuleContext().(ParserRuleContext) + ctx.ExitRule(listener) + listener.ExitEveryRule(ctx) +} + +//goland:noinspection GoUnusedGlobalVariable +var ParseTreeWalkerDefault = NewParseTreeWalker() + +type IterativeParseTreeWalker struct { + *ParseTreeWalker +} + +//goland:noinspection GoUnusedExportedFunction +func NewIterativeParseTreeWalker() *IterativeParseTreeWalker { + return new(IterativeParseTreeWalker) +} + +func (i *IterativeParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { + var stack []Tree + var indexStack []int + currentNode := t + currentIndex := 0 + + for currentNode != nil { + // pre-order visit + switch tt := currentNode.(type) { + case ErrorNode: + listener.VisitErrorNode(tt) + case TerminalNode: + listener.VisitTerminal(tt) + default: + i.EnterRule(listener, currentNode.(RuleNode)) + } + // Move down to first child, if exists + if currentNode.GetChildCount() > 0 { + stack = append(stack, currentNode) + indexStack = append(indexStack, currentIndex) + currentIndex = 0 + currentNode = currentNode.GetChild(0) + continue + } + + for { + // post-order visit + if ruleNode, ok := currentNode.(RuleNode); ok { + i.ExitRule(listener, ruleNode) + } + // No parent, so no siblings + if len(stack) == 0 { + currentNode = nil + currentIndex = 0 + break + } + // Move to next sibling if possible + currentIndex++ + if stack[len(stack)-1].GetChildCount() > currentIndex { + currentNode = stack[len(stack)-1].GetChild(currentIndex) + break + } + // No next, sibling, so move up + currentNode, stack = stack[len(stack)-1], stack[:len(stack)-1] + currentIndex, indexStack = indexStack[len(indexStack)-1], indexStack[:len(indexStack)-1] + } + } +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/trees.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/trees.go new file mode 100644 index 0000000000..f44c05d811 --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/trees.go @@ -0,0 +1,142 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "fmt" + +/** A set of utility routines useful for all kinds of ANTLR trees. */ + +// TreesStringTree prints out a whole tree in LISP form. [getNodeText] is used on the +// node payloads to get the text for the nodes. Detects parse trees and extracts data appropriately. +func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string { + + if recog != nil { + ruleNames = recog.GetRuleNames() + } + + s := TreesGetNodeText(tree, ruleNames, nil) + + s = EscapeWhitespace(s, false) + c := tree.GetChildCount() + if c == 0 { + return s + } + res := "(" + s + " " + if c > 0 { + s = TreesStringTree(tree.GetChild(0), ruleNames, nil) + res += s + } + for i := 1; i < c; i++ { + s = TreesStringTree(tree.GetChild(i), ruleNames, nil) + res += " " + s + } + res += ")" + return res +} + +func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string { + if recog != nil { + ruleNames = recog.GetRuleNames() + } + + if ruleNames != nil { + switch t2 := t.(type) { + case RuleNode: + t3 := t2.GetRuleContext() + altNumber := t3.GetAltNumber() + + if altNumber != ATNInvalidAltNumber { + return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber) + } + return ruleNames[t3.GetRuleIndex()] + case ErrorNode: + return fmt.Sprint(t2) + case TerminalNode: + if t2.GetSymbol() != nil { + return t2.GetSymbol().GetText() + } + } + } + + // no recognition for rule names + payload := t.GetPayload() + if p2, ok := payload.(Token); ok { + return p2.GetText() + } + + return fmt.Sprint(t.GetPayload()) +} + +// TreesGetChildren returns am ordered list of all children of this node +// +//goland:noinspection GoUnusedExportedFunction +func TreesGetChildren(t Tree) []Tree { + list := make([]Tree, 0) + for i := 0; i < t.GetChildCount(); i++ { + list = append(list, t.GetChild(i)) + } + return list +} + +// TreesgetAncestors returns a list of all ancestors of this node. The first node of list is the root +// and the last node is the parent of this node. +// +//goland:noinspection GoUnusedExportedFunction +func TreesgetAncestors(t Tree) []Tree { + ancestors := make([]Tree, 0) + t = t.GetParent() + for t != nil { + f := []Tree{t} + ancestors = append(f, ancestors...) + t = t.GetParent() + } + return ancestors +} + +//goland:noinspection GoUnusedExportedFunction +func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree { + return TreesfindAllNodes(t, ttype, true) +} + +//goland:noinspection GoUnusedExportedFunction +func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree { + return TreesfindAllNodes(t, ruleIndex, false) +} + +func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree { + nodes := make([]ParseTree, 0) + treesFindAllNodes(t, index, findTokens, &nodes) + return nodes +} + +func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) { + // check this node (the root) first + + t2, ok := t.(TerminalNode) + t3, ok2 := t.(ParserRuleContext) + + if findTokens && ok { + if t2.GetSymbol().GetTokenType() == index { + *nodes = append(*nodes, t2) + } + } else if !findTokens && ok2 { + if t3.GetRuleIndex() == index { + *nodes = append(*nodes, t3) + } + } + // check children + for i := 0; i < t.GetChildCount(); i++ { + treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes) + } +} + +//goland:noinspection GoUnusedExportedFunction +func TreesDescendants(t ParseTree) []ParseTree { + nodes := []ParseTree{t} + for i := 0; i < t.GetChildCount(); i++ { + nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...) + } + return nodes +} diff --git a/hack/tools/vendor/github.com/antlr4-go/antlr/v4/utils.go b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/utils.go new file mode 100644 index 0000000000..733d7df9dc --- /dev/null +++ b/hack/tools/vendor/github.com/antlr4-go/antlr/v4/utils.go @@ -0,0 +1,328 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bytes" + "errors" + "fmt" + "math/bits" + "os" + "strconv" + "strings" + "syscall" +) + +func intMin(a, b int) int { + if a < b { + return a + } + return b +} + +func intMax(a, b int) int { + if a > b { + return a + } + return b +} + +// A simple integer stack + +type IntStack []int + +var ErrEmptyStack = errors.New("stack is empty") + +func (s *IntStack) Pop() (int, error) { + l := len(*s) - 1 + if l < 0 { + return 0, ErrEmptyStack + } + v := (*s)[l] + *s = (*s)[0:l] + return v, nil +} + +func (s *IntStack) Push(e int) { + *s = append(*s, e) +} + +const bitsPerWord = 64 + +func indexForBit(bit int) int { + return bit / bitsPerWord +} + +//goland:noinspection GoUnusedExportedFunction,GoUnusedFunction +func wordForBit(data []uint64, bit int) uint64 { + idx := indexForBit(bit) + if idx >= len(data) { + return 0 + } + return data[idx] +} + +func maskForBit(bit int) uint64 { + return uint64(1) << (bit % bitsPerWord) +} + +func wordsNeeded(bit int) int { + return indexForBit(bit) + 1 +} + +type BitSet struct { + data []uint64 +} + +// NewBitSet creates a new bitwise set +// TODO: See if we can replace with the standard library's BitSet +func NewBitSet() *BitSet { + return &BitSet{} +} + +func (b *BitSet) add(value int) { + idx := indexForBit(value) + if idx >= len(b.data) { + size := wordsNeeded(value) + data := make([]uint64, size) + copy(data, b.data) + b.data = data + } + b.data[idx] |= maskForBit(value) +} + +func (b *BitSet) clear(index int) { + idx := indexForBit(index) + if idx >= len(b.data) { + return + } + b.data[idx] &= ^maskForBit(index) +} + +func (b *BitSet) or(set *BitSet) { + // Get min size necessary to represent the bits in both sets. + bLen := b.minLen() + setLen := set.minLen() + maxLen := intMax(bLen, setLen) + if maxLen > len(b.data) { + // Increase the size of len(b.data) to represent the bits in both sets. + data := make([]uint64, maxLen) + copy(data, b.data) + b.data = data + } + // len(b.data) is at least setLen. + for i := 0; i < setLen; i++ { + b.data[i] |= set.data[i] + } +} + +func (b *BitSet) remove(value int) { + b.clear(value) +} + +func (b *BitSet) contains(value int) bool { + idx := indexForBit(value) + if idx >= len(b.data) { + return false + } + return (b.data[idx] & maskForBit(value)) != 0 +} + +func (b *BitSet) minValue() int { + for i, v := range b.data { + if v == 0 { + continue + } + return i*bitsPerWord + bits.TrailingZeros64(v) + } + return 2147483647 +} + +func (b *BitSet) equals(other interface{}) bool { + otherBitSet, ok := other.(*BitSet) + if !ok { + return false + } + + if b == otherBitSet { + return true + } + + // We only compare set bits, so we cannot rely on the two slices having the same size. Its + // possible for two BitSets to have different slice lengths but the same set bits. So we only + // compare the relevant words and ignore the trailing zeros. + bLen := b.minLen() + otherLen := otherBitSet.minLen() + + if bLen != otherLen { + return false + } + + for i := 0; i < bLen; i++ { + if b.data[i] != otherBitSet.data[i] { + return false + } + } + + return true +} + +func (b *BitSet) minLen() int { + for i := len(b.data); i > 0; i-- { + if b.data[i-1] != 0 { + return i + } + } + return 0 +} + +func (b *BitSet) length() int { + cnt := 0 + for _, val := range b.data { + cnt += bits.OnesCount64(val) + } + return cnt +} + +func (b *BitSet) String() string { + vals := make([]string, 0, b.length()) + + for i, v := range b.data { + for v != 0 { + n := bits.TrailingZeros64(v) + vals = append(vals, strconv.Itoa(i*bitsPerWord+n)) + v &= ^(uint64(1) << n) + } + } + + return "{" + strings.Join(vals, ", ") + "}" +} + +type AltDict struct { + data map[string]interface{} +} + +func NewAltDict() *AltDict { + d := new(AltDict) + d.data = make(map[string]interface{}) + return d +} + +func (a *AltDict) Get(key string) interface{} { + key = "k-" + key + return a.data[key] +} + +func (a *AltDict) put(key string, value interface{}) { + key = "k-" + key + a.data[key] = value +} + +func (a *AltDict) values() []interface{} { + vs := make([]interface{}, len(a.data)) + i := 0 + for _, v := range a.data { + vs[i] = v + i++ + } + return vs +} + +func EscapeWhitespace(s string, escapeSpaces bool) string { + + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + if escapeSpaces { + s = strings.Replace(s, " ", "\u00B7", -1) + } + return s +} + +//goland:noinspection GoUnusedExportedFunction +func TerminalNodeToStringArray(sa []TerminalNode) []string { + st := make([]string, len(sa)) + + for i, s := range sa { + st[i] = fmt.Sprintf("%v", s) + } + + return st +} + +//goland:noinspection GoUnusedExportedFunction +func PrintArrayJavaStyle(sa []string) string { + var buffer bytes.Buffer + + buffer.WriteString("[") + + for i, s := range sa { + buffer.WriteString(s) + if i != len(sa)-1 { + buffer.WriteString(", ") + } + } + + buffer.WriteString("]") + + return buffer.String() +} + +// murmur hash +func murmurInit(seed int) int { + return seed +} + +func murmurUpdate(h int, value int) int { + const c1 uint32 = 0xCC9E2D51 + const c2 uint32 = 0x1B873593 + const r1 uint32 = 15 + const r2 uint32 = 13 + const m uint32 = 5 + const n uint32 = 0xE6546B64 + + k := uint32(value) + k *= c1 + k = (k << r1) | (k >> (32 - r1)) + k *= c2 + + hash := uint32(h) ^ k + hash = (hash << r2) | (hash >> (32 - r2)) + hash = hash*m + n + return int(hash) +} + +func murmurFinish(h int, numberOfWords int) int { + var hash = uint32(h) + hash ^= uint32(numberOfWords) << 2 + hash ^= hash >> 16 + hash *= 0x85ebca6b + hash ^= hash >> 13 + hash *= 0xc2b2ae35 + hash ^= hash >> 16 + + return int(hash) +} + +func isDirectory(dir string) (bool, error) { + fileInfo, err := os.Stat(dir) + if err != nil { + switch { + case errors.Is(err, syscall.ENOENT): + // The given directory does not exist, so we will try to create it + // + err = os.MkdirAll(dir, 0755) + if err != nil { + return false, err + } + + return true, nil + case err != nil: + return false, err + default: + } + } + return fileInfo.IsDir(), err +} diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/.travis.yml b/hack/tools/vendor/github.com/asaskevich/govalidator/.travis.yml new file mode 100644 index 0000000000..e29f8eef5e --- /dev/null +++ b/hack/tools/vendor/github.com/asaskevich/govalidator/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip + +notifications: + email: + - bwatas@gmail.com diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/hack/tools/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md new file mode 100644 index 0000000000..f0f7e3a8ad --- /dev/null +++ b/hack/tools/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md @@ -0,0 +1,63 @@ +#### Support +If you do have a contribution to the package, feel free to create a Pull Request or an Issue. + +#### What to contribute +If you don't know what to do, there are some features and functions that need to be done + +- [ ] Refactor code +- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check +- [ ] Create actual list of contributors and projects that currently using this package +- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) +- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) +- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new +- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc +- [ ] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) +- [ ] Implement fuzzing testing +- [ ] Implement some struct/map/array utilities +- [ ] Implement map/array validation +- [ ] Implement benchmarking +- [ ] Implement batch of examples +- [ ] Look at forks for new features and fixes + +#### Advice +Feel free to create what you want, but keep in mind when you implement new features: +- Code must be clear and readable, names of variables/constants clearly describes what they are doing +- Public functions must be documented and described in source file and added to README.md to the list of available functions +- There are must be unit-tests for any new functions and improvements + +## Financial contributions + +We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator). +Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed. + + +## Credits + + +### Contributors + +Thank you to all the people who have already contributed to govalidator! + + + +### Backers + +Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)] + + + + +### Sponsors + +Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor)) + + + + + + + + + + + \ No newline at end of file diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/LICENSE b/hack/tools/vendor/github.com/asaskevich/govalidator/LICENSE new file mode 100644 index 0000000000..2f9a31fadf --- /dev/null +++ b/hack/tools/vendor/github.com/asaskevich/govalidator/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Alex Saskevich + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/README.md b/hack/tools/vendor/github.com/asaskevich/govalidator/README.md new file mode 100644 index 0000000000..40f9a87811 --- /dev/null +++ b/hack/tools/vendor/github.com/asaskevich/govalidator/README.md @@ -0,0 +1,507 @@ +govalidator +=========== +[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) [![Coverage Status](https://img.shields.io/coveralls/asaskevich/govalidator.svg)](https://coveralls.io/r/asaskevich/govalidator?branch=master) [![wercker status](https://app.wercker.com/status/1ec990b09ea86c910d5f08b0e02c6043/s "wercker status")](https://app.wercker.com/project/bykey/1ec990b09ea86c910d5f08b0e02c6043) +[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield) + +A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js). + +#### Installation +Make sure that Go is installed on your computer. +Type the following command in your terminal: + + go get github.com/asaskevich/govalidator + +or you can get specified release of the package with `gopkg.in`: + + go get gopkg.in/asaskevich/govalidator.v4 + +After it the package is ready to use. + + +#### Import package in your project +Add following line in your `*.go` file: +```go +import "github.com/asaskevich/govalidator" +``` +If you are unhappy to use long `govalidator`, you can do something like this: +```go +import ( + valid "github.com/asaskevich/govalidator" +) +``` + +#### Activate behavior to require all fields have a validation tag by default +`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function. + +`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors. + +```go +import "github.com/asaskevich/govalidator" + +func init() { + govalidator.SetFieldsRequiredByDefault(true) +} +``` + +Here's some code to explain it: +```go +// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): +type exampleStruct struct { + Name string `` + Email string `valid:"email"` +} + +// this, however, will only fail when Email is empty or an invalid email address: +type exampleStruct2 struct { + Name string `valid:"-"` + Email string `valid:"email"` +} + +// lastly, this will only fail when Email is an invalid email address but not when it's empty: +type exampleStruct2 struct { + Name string `valid:"-"` + Email string `valid:"email,optional"` +} +``` + +#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123)) +##### Custom validator function signature +A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible. +```go +import "github.com/asaskevich/govalidator" + +// old signature +func(i interface{}) bool + +// new signature +func(i interface{}, o interface{}) bool +``` + +##### Adding a custom validator +This was changed to prevent data races when accessing custom validators. +```go +import "github.com/asaskevich/govalidator" + +// before +govalidator.CustomTypeTagMap["customByteArrayValidator"] = CustomTypeValidator(func(i interface{}, o interface{}) bool { + // ... +}) + +// after +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, o interface{}) bool { + // ... +})) +``` + +#### List of functions: +```go +func Abs(value float64) float64 +func BlackList(str, chars string) string +func ByteLength(str string, params ...string) bool +func CamelCaseToUnderscore(str string) string +func Contains(str, substring string) bool +func Count(array []interface{}, iterator ConditionIterator) int +func Each(array []interface{}, iterator Iterator) +func ErrorByField(e error, field string) string +func ErrorsByField(e error) map[string]string +func Filter(array []interface{}, iterator ConditionIterator) []interface{} +func Find(array []interface{}, iterator ConditionIterator) interface{} +func GetLine(s string, index int) (string, error) +func GetLines(s string) []string +func InRange(value, left, right float64) bool +func IsASCII(str string) bool +func IsAlpha(str string) bool +func IsAlphanumeric(str string) bool +func IsBase64(str string) bool +func IsByteLength(str string, min, max int) bool +func IsCIDR(str string) bool +func IsCreditCard(str string) bool +func IsDNSName(str string) bool +func IsDataURI(str string) bool +func IsDialString(str string) bool +func IsDivisibleBy(str, num string) bool +func IsEmail(str string) bool +func IsFilePath(str string) (bool, int) +func IsFloat(str string) bool +func IsFullWidth(str string) bool +func IsHalfWidth(str string) bool +func IsHexadecimal(str string) bool +func IsHexcolor(str string) bool +func IsHost(str string) bool +func IsIP(str string) bool +func IsIPv4(str string) bool +func IsIPv6(str string) bool +func IsISBN(str string, version int) bool +func IsISBN10(str string) bool +func IsISBN13(str string) bool +func IsISO3166Alpha2(str string) bool +func IsISO3166Alpha3(str string) bool +func IsISO693Alpha2(str string) bool +func IsISO693Alpha3b(str string) bool +func IsISO4217(str string) bool +func IsIn(str string, params ...string) bool +func IsInt(str string) bool +func IsJSON(str string) bool +func IsLatitude(str string) bool +func IsLongitude(str string) bool +func IsLowerCase(str string) bool +func IsMAC(str string) bool +func IsMongoID(str string) bool +func IsMultibyte(str string) bool +func IsNatural(value float64) bool +func IsNegative(value float64) bool +func IsNonNegative(value float64) bool +func IsNonPositive(value float64) bool +func IsNull(str string) bool +func IsNumeric(str string) bool +func IsPort(str string) bool +func IsPositive(value float64) bool +func IsPrintableASCII(str string) bool +func IsRFC3339(str string) bool +func IsRFC3339WithoutZone(str string) bool +func IsRGBcolor(str string) bool +func IsRequestURI(rawurl string) bool +func IsRequestURL(rawurl string) bool +func IsSSN(str string) bool +func IsSemver(str string) bool +func IsTime(str string, format string) bool +func IsURL(str string) bool +func IsUTFDigit(str string) bool +func IsUTFLetter(str string) bool +func IsUTFLetterNumeric(str string) bool +func IsUTFNumeric(str string) bool +func IsUUID(str string) bool +func IsUUIDv3(str string) bool +func IsUUIDv4(str string) bool +func IsUUIDv5(str string) bool +func IsUpperCase(str string) bool +func IsVariableWidth(str string) bool +func IsWhole(value float64) bool +func LeftTrim(str, chars string) string +func Map(array []interface{}, iterator ResultIterator) []interface{} +func Matches(str, pattern string) bool +func NormalizeEmail(str string) (string, error) +func PadBoth(str string, padStr string, padLen int) string +func PadLeft(str string, padStr string, padLen int) string +func PadRight(str string, padStr string, padLen int) string +func Range(str string, params ...string) bool +func RemoveTags(s string) string +func ReplacePattern(str, pattern, replace string) string +func Reverse(s string) string +func RightTrim(str, chars string) string +func RuneLength(str string, params ...string) bool +func SafeFileName(str string) string +func SetFieldsRequiredByDefault(value bool) +func Sign(value float64) float64 +func StringLength(str string, params ...string) bool +func StringMatches(s string, params ...string) bool +func StripLow(str string, keepNewLines bool) string +func ToBoolean(str string) (bool, error) +func ToFloat(str string) (float64, error) +func ToInt(str string) (int64, error) +func ToJSON(obj interface{}) (string, error) +func ToString(obj interface{}) string +func Trim(str, chars string) string +func Truncate(str string, length int, ending string) string +func UnderscoreToCamelCase(s string) string +func ValidateStruct(s interface{}) (bool, error) +func WhiteList(str, chars string) string +type ConditionIterator +type CustomTypeValidator +type Error +func (e Error) Error() string +type Errors +func (es Errors) Error() string +func (es Errors) Errors() []error +type ISO3166Entry +type Iterator +type ParamValidator +type ResultIterator +type UnsupportedTypeError +func (e *UnsupportedTypeError) Error() string +type Validator +``` + +#### Examples +###### IsURL +```go +println(govalidator.IsURL(`http://user@pass:domain.com/path/page`)) +``` +###### ToString +```go +type User struct { + FirstName string + LastName string +} + +str := govalidator.ToString(&User{"John", "Juan"}) +println(str) +``` +###### Each, Map, Filter, Count for slices +Each iterates over the slice/array and calls Iterator for every item +```go +data := []interface{}{1, 2, 3, 4, 5} +var fn govalidator.Iterator = func(value interface{}, index int) { + println(value.(int)) +} +govalidator.Each(data, fn) +``` +```go +data := []interface{}{1, 2, 3, 4, 5} +var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} { + return value.(int) * 3 +} +_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15} +``` +```go +data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} +var fn govalidator.ConditionIterator = func(value interface{}, index int) bool { + return value.(int)%2 == 0 +} +_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10} +_ = govalidator.Count(data, fn) // result = 5 +``` +###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2) +If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this: +```go +govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { + return str == "duck" +}) +``` +For completely custom validators (interface-based), see below. + +Here is a list of available validators for struct fields (validator - used function): +```go +"email": IsEmail, +"url": IsURL, +"dialstring": IsDialString, +"requrl": IsRequestURL, +"requri": IsRequestURI, +"alpha": IsAlpha, +"utfletter": IsUTFLetter, +"alphanum": IsAlphanumeric, +"utfletternum": IsUTFLetterNumeric, +"numeric": IsNumeric, +"utfnumeric": IsUTFNumeric, +"utfdigit": IsUTFDigit, +"hexadecimal": IsHexadecimal, +"hexcolor": IsHexcolor, +"rgbcolor": IsRGBcolor, +"lowercase": IsLowerCase, +"uppercase": IsUpperCase, +"int": IsInt, +"float": IsFloat, +"null": IsNull, +"uuid": IsUUID, +"uuidv3": IsUUIDv3, +"uuidv4": IsUUIDv4, +"uuidv5": IsUUIDv5, +"creditcard": IsCreditCard, +"isbn10": IsISBN10, +"isbn13": IsISBN13, +"json": IsJSON, +"multibyte": IsMultibyte, +"ascii": IsASCII, +"printableascii": IsPrintableASCII, +"fullwidth": IsFullWidth, +"halfwidth": IsHalfWidth, +"variablewidth": IsVariableWidth, +"base64": IsBase64, +"datauri": IsDataURI, +"ip": IsIP, +"port": IsPort, +"ipv4": IsIPv4, +"ipv6": IsIPv6, +"dns": IsDNSName, +"host": IsHost, +"mac": IsMAC, +"latitude": IsLatitude, +"longitude": IsLongitude, +"ssn": IsSSN, +"semver": IsSemver, +"rfc3339": IsRFC3339, +"rfc3339WithoutZone": IsRFC3339WithoutZone, +"ISO3166Alpha2": IsISO3166Alpha2, +"ISO3166Alpha3": IsISO3166Alpha3, +``` +Validators with parameters + +```go +"range(min|max)": Range, +"length(min|max)": ByteLength, +"runelength(min|max)": RuneLength, +"stringlength(min|max)": StringLength, +"matches(pattern)": StringMatches, +"in(string1|string2|...|stringN)": IsIn, +"rsapub(keylength)" : IsRsaPub, +``` + +And here is small example of usage: +```go +type Post struct { + Title string `valid:"alphanum,required"` + Message string `valid:"duck,ascii"` + Message2 string `valid:"animal(dog)"` + AuthorIP string `valid:"ipv4"` + Date string `valid:"-"` +} +post := &Post{ + Title: "My Example Post", + Message: "duck", + Message2: "dog", + AuthorIP: "123.234.54.3", +} + +// Add your own struct validation tags +govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { + return str == "duck" +}) + +// Add your own struct validation tags with parameter +govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool { + species := params[0] + return str == species +}) +govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$") + +result, err := govalidator.ValidateStruct(post) +if err != nil { + println("error: " + err.Error()) +} +println(result) +``` +###### WhiteList +```go +// Remove all characters from string ignoring characters between "a" and "z" +println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa") +``` + +###### Custom validation functions +Custom validation using your own domain specific validators is also available - here's an example of how to use it: +```go +import "github.com/asaskevich/govalidator" + +type CustomByteArray [6]byte // custom types are supported and can be validated + +type StructWithCustomByteArray struct { + ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence + Email string `valid:"email"` + CustomMinLength int `valid:"-"` +} + +govalidator.CustomTypeTagMap.Set("customByteArrayValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool { + switch v := context.(type) { // you can type switch on the context interface being validated + case StructWithCustomByteArray: + // you can check and validate against some other field in the context, + // return early or not validate against the context at all – your choice + case SomeOtherType: + // ... + default: + // expecting some other type? Throw/panic here or continue + } + + switch v := i.(type) { // type switch on the struct field being validated + case CustomByteArray: + for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes + if e != 0 { + return true + } + } + } + return false +})) +govalidator.CustomTypeTagMap.Set("customMinLengthValidator", CustomTypeValidator(func(i interface{}, context interface{}) bool { + switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation + case StructWithCustomByteArray: + return len(v.ID) >= v.CustomMinLength + } + return false +})) +``` + +###### Custom error messages +Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it: +```go +type Ticket struct { + Id int64 `json:"id"` + FirstName string `json:"firstname" valid:"required~First name is blank"` +} +``` + +#### Notes +Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator). +Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator). + +#### Support +If you do have a contribution to the package, feel free to create a Pull Request or an Issue. + +#### What to contribute +If you don't know what to do, there are some features and functions that need to be done + +- [ ] Refactor code +- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check +- [ ] Create actual list of contributors and projects that currently using this package +- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) +- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) +- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new +- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc +- [ ] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) +- [ ] Implement fuzzing testing +- [ ] Implement some struct/map/array utilities +- [ ] Implement map/array validation +- [ ] Implement benchmarking +- [ ] Implement batch of examples +- [ ] Look at forks for new features and fixes + +#### Advice +Feel free to create what you want, but keep in mind when you implement new features: +- Code must be clear and readable, names of variables/constants clearly describes what they are doing +- Public functions must be documented and described in source file and added to README.md to the list of available functions +- There are must be unit-tests for any new functions and improvements + +## Credits +### Contributors + +This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. + +#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors) +* [Daniel Lohse](https://github.com/annismckenzie) +* [Attila Oláh](https://github.com/attilaolah) +* [Daniel Korner](https://github.com/Dadie) +* [Steven Wilkin](https://github.com/stevenwilkin) +* [Deiwin Sarjas](https://github.com/deiwin) +* [Noah Shibley](https://github.com/slugmobile) +* [Nathan Davies](https://github.com/nathj07) +* [Matt Sanford](https://github.com/mzsanford) +* [Simon ccl1115](https://github.com/ccl1115) + + + + +### Backers + +Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)] + + + + +### Sponsors + +Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)] + + + + + + + + + + + + + + + +## License +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large) \ No newline at end of file diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/arrays.go b/hack/tools/vendor/github.com/asaskevich/govalidator/arrays.go new file mode 100644 index 0000000000..5bace2654d --- /dev/null +++ b/hack/tools/vendor/github.com/asaskevich/govalidator/arrays.go @@ -0,0 +1,58 @@ +package govalidator + +// Iterator is the function that accepts element of slice/array and its index +type Iterator func(interface{}, int) + +// ResultIterator is the function that accepts element of slice/array and its index and returns any result +type ResultIterator func(interface{}, int) interface{} + +// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean +type ConditionIterator func(interface{}, int) bool + +// Each iterates over the slice and apply Iterator to every item +func Each(array []interface{}, iterator Iterator) { + for index, data := range array { + iterator(data, index) + } +} + +// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result. +func Map(array []interface{}, iterator ResultIterator) []interface{} { + var result = make([]interface{}, len(array)) + for index, data := range array { + result[index] = iterator(data, index) + } + return result +} + +// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise. +func Find(array []interface{}, iterator ConditionIterator) interface{} { + for index, data := range array { + if iterator(data, index) { + return data + } + } + return nil +} + +// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice. +func Filter(array []interface{}, iterator ConditionIterator) []interface{} { + var result = make([]interface{}, 0) + for index, data := range array { + if iterator(data, index) { + result = append(result, data) + } + } + return result +} + +// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator. +func Count(array []interface{}, iterator ConditionIterator) int { + count := 0 + for index, data := range array { + if iterator(data, index) { + count = count + 1 + } + } + return count +} diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/converter.go b/hack/tools/vendor/github.com/asaskevich/govalidator/converter.go new file mode 100644 index 0000000000..cf1e5d569b --- /dev/null +++ b/hack/tools/vendor/github.com/asaskevich/govalidator/converter.go @@ -0,0 +1,64 @@ +package govalidator + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" +) + +// ToString convert the input to a string. +func ToString(obj interface{}) string { + res := fmt.Sprintf("%v", obj) + return string(res) +} + +// ToJSON convert the input to a valid JSON string +func ToJSON(obj interface{}) (string, error) { + res, err := json.Marshal(obj) + if err != nil { + res = []byte("") + } + return string(res), err +} + +// ToFloat convert the input string to a float, or 0.0 if the input is not a float. +func ToFloat(str string) (float64, error) { + res, err := strconv.ParseFloat(str, 64) + if err != nil { + res = 0.0 + } + return res, err +} + +// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer. +func ToInt(value interface{}) (res int64, err error) { + val := reflect.ValueOf(value) + + switch value.(type) { + case int, int8, int16, int32, int64: + res = val.Int() + case uint, uint8, uint16, uint32, uint64: + res = int64(val.Uint()) + case string: + if IsInt(val.String()) { + res, err = strconv.ParseInt(val.String(), 0, 64) + if err != nil { + res = 0 + } + } else { + err = fmt.Errorf("math: square root of negative number %g", value) + res = 0 + } + default: + err = fmt.Errorf("math: square root of negative number %g", value) + res = 0 + } + + return +} + +// ToBoolean convert the input string to a boolean. +func ToBoolean(str string) (bool, error) { + return strconv.ParseBool(str) +} diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/error.go b/hack/tools/vendor/github.com/asaskevich/govalidator/error.go new file mode 100644 index 0000000000..655b750cb8 --- /dev/null +++ b/hack/tools/vendor/github.com/asaskevich/govalidator/error.go @@ -0,0 +1,43 @@ +package govalidator + +import "strings" + +// Errors is an array of multiple errors and conforms to the error interface. +type Errors []error + +// Errors returns itself. +func (es Errors) Errors() []error { + return es +} + +func (es Errors) Error() string { + var errs []string + for _, e := range es { + errs = append(errs, e.Error()) + } + return strings.Join(errs, ";") +} + +// Error encapsulates a name, an error and whether there's a custom error message or not. +type Error struct { + Name string + Err error + CustomErrorMessageExists bool + + // Validator indicates the name of the validator that failed + Validator string + Path []string +} + +func (e Error) Error() string { + if e.CustomErrorMessageExists { + return e.Err.Error() + } + + errName := e.Name + if len(e.Path) > 0 { + errName = strings.Join(append(e.Path, e.Name), ".") + } + + return errName + ": " + e.Err.Error() +} diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/numerics.go b/hack/tools/vendor/github.com/asaskevich/govalidator/numerics.go new file mode 100644 index 0000000000..7e6c652e14 --- /dev/null +++ b/hack/tools/vendor/github.com/asaskevich/govalidator/numerics.go @@ -0,0 +1,97 @@ +package govalidator + +import ( + "math" + "reflect" +) + +// Abs returns absolute value of number +func Abs(value float64) float64 { + return math.Abs(value) +} + +// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise +func Sign(value float64) float64 { + if value > 0 { + return 1 + } else if value < 0 { + return -1 + } else { + return 0 + } +} + +// IsNegative returns true if value < 0 +func IsNegative(value float64) bool { + return value < 0 +} + +// IsPositive returns true if value > 0 +func IsPositive(value float64) bool { + return value > 0 +} + +// IsNonNegative returns true if value >= 0 +func IsNonNegative(value float64) bool { + return value >= 0 +} + +// IsNonPositive returns true if value <= 0 +func IsNonPositive(value float64) bool { + return value <= 0 +} + +// InRange returns true if value lies between left and right border +func InRangeInt(value, left, right interface{}) bool { + value64, _ := ToInt(value) + left64, _ := ToInt(left) + right64, _ := ToInt(right) + if left64 > right64 { + left64, right64 = right64, left64 + } + return value64 >= left64 && value64 <= right64 +} + +// InRange returns true if value lies between left and right border +func InRangeFloat32(value, left, right float32) bool { + if left > right { + left, right = right, left + } + return value >= left && value <= right +} + +// InRange returns true if value lies between left and right border +func InRangeFloat64(value, left, right float64) bool { + if left > right { + left, right = right, left + } + return value >= left && value <= right +} + +// InRange returns true if value lies between left and right border, generic type to handle int, float32 or float64, all types must the same type +func InRange(value interface{}, left interface{}, right interface{}) bool { + + reflectValue := reflect.TypeOf(value).Kind() + reflectLeft := reflect.TypeOf(left).Kind() + reflectRight := reflect.TypeOf(right).Kind() + + if reflectValue == reflect.Int && reflectLeft == reflect.Int && reflectRight == reflect.Int { + return InRangeInt(value.(int), left.(int), right.(int)) + } else if reflectValue == reflect.Float32 && reflectLeft == reflect.Float32 && reflectRight == reflect.Float32 { + return InRangeFloat32(value.(float32), left.(float32), right.(float32)) + } else if reflectValue == reflect.Float64 && reflectLeft == reflect.Float64 && reflectRight == reflect.Float64 { + return InRangeFloat64(value.(float64), left.(float64), right.(float64)) + } else { + return false + } +} + +// IsWhole returns true if value is whole number +func IsWhole(value float64) bool { + return math.Remainder(value, 1) == 0 +} + +// IsNatural returns true if value is natural number (positive and whole) +func IsNatural(value float64) bool { + return IsWhole(value) && IsPositive(value) +} diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/patterns.go b/hack/tools/vendor/github.com/asaskevich/govalidator/patterns.go new file mode 100644 index 0000000000..61a05d438e --- /dev/null +++ b/hack/tools/vendor/github.com/asaskevich/govalidator/patterns.go @@ -0,0 +1,101 @@ +package govalidator + +import "regexp" + +// Basic regular expressions for validating strings +const ( + Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" + CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$" + ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$" + ISBN13 string = "^(?:[0-9]{13})$" + UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" + UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + Alpha string = "^[a-zA-Z]+$" + Alphanumeric string = "^[a-zA-Z0-9]+$" + Numeric string = "^[0-9]+$" + Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$" + Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$" + Hexadecimal string = "^[0-9a-fA-F]+$" + Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" + RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" + ASCII string = "^[\x00-\x7F]+$" + Multibyte string = "[^\x00-\x7F]" + FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" + HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" + Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" + PrintableASCII string = "^[\x20-\x7E]+$" + DataURI string = "^data:.+\\/(.+);base64$" + Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" + Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" + DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$` + IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` + URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)` + URLUsername string = `(\S+(:\S*)?@)` + URLPath string = `((\/|\?|#)[^\s]*)` + URLPort string = `(:(\d{1,5}))` + URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))` + URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` + URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` + SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` + WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` + UnixPath string = `^(/[^/\x00]*)+/?$` + Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$" + tagName string = "valid" + hasLowerCase string = ".*[[:lower:]]" + hasUpperCase string = ".*[[:upper:]]" + hasWhitespace string = ".*[[:space:]]" + hasWhitespaceOnly string = "^[[:space:]]+$" +) + +// Used by IsFilePath func +const ( + // Unknown is unresolved OS type + Unknown = iota + // Win is Windows type + Win + // Unix is *nix OS types + Unix +) + +var ( + userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") + hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$") + userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") + rxEmail = regexp.MustCompile(Email) + rxCreditCard = regexp.MustCompile(CreditCard) + rxISBN10 = regexp.MustCompile(ISBN10) + rxISBN13 = regexp.MustCompile(ISBN13) + rxUUID3 = regexp.MustCompile(UUID3) + rxUUID4 = regexp.MustCompile(UUID4) + rxUUID5 = regexp.MustCompile(UUID5) + rxUUID = regexp.MustCompile(UUID) + rxAlpha = regexp.MustCompile(Alpha) + rxAlphanumeric = regexp.MustCompile(Alphanumeric) + rxNumeric = regexp.MustCompile(Numeric) + rxInt = regexp.MustCompile(Int) + rxFloat = regexp.MustCompile(Float) + rxHexadecimal = regexp.MustCompile(Hexadecimal) + rxHexcolor = regexp.MustCompile(Hexcolor) + rxRGBcolor = regexp.MustCompile(RGBcolor) + rxASCII = regexp.MustCompile(ASCII) + rxPrintableASCII = regexp.MustCompile(PrintableASCII) + rxMultibyte = regexp.MustCompile(Multibyte) + rxFullWidth = regexp.MustCompile(FullWidth) + rxHalfWidth = regexp.MustCompile(HalfWidth) + rxBase64 = regexp.MustCompile(Base64) + rxDataURI = regexp.MustCompile(DataURI) + rxLatitude = regexp.MustCompile(Latitude) + rxLongitude = regexp.MustCompile(Longitude) + rxDNSName = regexp.MustCompile(DNSName) + rxURL = regexp.MustCompile(URL) + rxSSN = regexp.MustCompile(SSN) + rxWinPath = regexp.MustCompile(WinPath) + rxUnixPath = regexp.MustCompile(UnixPath) + rxSemver = regexp.MustCompile(Semver) + rxHasLowerCase = regexp.MustCompile(hasLowerCase) + rxHasUpperCase = regexp.MustCompile(hasUpperCase) + rxHasWhitespace = regexp.MustCompile(hasWhitespace) + rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly) +) diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/types.go b/hack/tools/vendor/github.com/asaskevich/govalidator/types.go new file mode 100644 index 0000000000..4f7e9274ad --- /dev/null +++ b/hack/tools/vendor/github.com/asaskevich/govalidator/types.go @@ -0,0 +1,636 @@ +package govalidator + +import ( + "reflect" + "regexp" + "sort" + "sync" +) + +// Validator is a wrapper for a validator function that returns bool and accepts string. +type Validator func(str string) bool + +// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type. +// The second parameter should be the context (in the case of validating a struct: the whole object being validated). +type CustomTypeValidator func(i interface{}, o interface{}) bool + +// ParamValidator is a wrapper for validator functions that accepts additional parameters. +type ParamValidator func(str string, params ...string) bool +type tagOptionsMap map[string]tagOption + +func (t tagOptionsMap) orderedKeys() []string { + var keys []string + for k := range t { + keys = append(keys, k) + } + + sort.Slice(keys, func(a, b int) bool { + return t[keys[a]].order < t[keys[b]].order + }) + + return keys +} + +type tagOption struct { + name string + customErrorMessage string + order int +} + +// UnsupportedTypeError is a wrapper for reflect.Type +type UnsupportedTypeError struct { + Type reflect.Type +} + +// stringValues is a slice of reflect.Value holding *reflect.StringValue. +// It implements the methods to sort by string. +type stringValues []reflect.Value + +// ParamTagMap is a map of functions accept variants parameters +var ParamTagMap = map[string]ParamValidator{ + "length": ByteLength, + "range": Range, + "runelength": RuneLength, + "stringlength": StringLength, + "matches": StringMatches, + "in": isInRaw, + "rsapub": IsRsaPub, +} + +// ParamTagRegexMap maps param tags to their respective regexes. +var ParamTagRegexMap = map[string]*regexp.Regexp{ + "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"), + "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"), + "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"), + "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"), + "in": regexp.MustCompile(`^in\((.*)\)`), + "matches": regexp.MustCompile(`^matches\((.+)\)$`), + "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"), +} + +type customTypeTagMap struct { + validators map[string]CustomTypeValidator + + sync.RWMutex +} + +func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) { + tm.RLock() + defer tm.RUnlock() + v, ok := tm.validators[name] + return v, ok +} + +func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) { + tm.Lock() + defer tm.Unlock() + tm.validators[name] = ctv +} + +// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function. +// Use this to validate compound or custom types that need to be handled as a whole, e.g. +// `type UUID [16]byte` (this would be handled as an array of bytes). +var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)} + +// TagMap is a map of functions, that can be used as tags for ValidateStruct function. +var TagMap = map[string]Validator{ + "email": IsEmail, + "url": IsURL, + "dialstring": IsDialString, + "requrl": IsRequestURL, + "requri": IsRequestURI, + "alpha": IsAlpha, + "utfletter": IsUTFLetter, + "alphanum": IsAlphanumeric, + "utfletternum": IsUTFLetterNumeric, + "numeric": IsNumeric, + "utfnumeric": IsUTFNumeric, + "utfdigit": IsUTFDigit, + "hexadecimal": IsHexadecimal, + "hexcolor": IsHexcolor, + "rgbcolor": IsRGBcolor, + "lowercase": IsLowerCase, + "uppercase": IsUpperCase, + "int": IsInt, + "float": IsFloat, + "null": IsNull, + "uuid": IsUUID, + "uuidv3": IsUUIDv3, + "uuidv4": IsUUIDv4, + "uuidv5": IsUUIDv5, + "creditcard": IsCreditCard, + "isbn10": IsISBN10, + "isbn13": IsISBN13, + "json": IsJSON, + "multibyte": IsMultibyte, + "ascii": IsASCII, + "printableascii": IsPrintableASCII, + "fullwidth": IsFullWidth, + "halfwidth": IsHalfWidth, + "variablewidth": IsVariableWidth, + "base64": IsBase64, + "datauri": IsDataURI, + "ip": IsIP, + "port": IsPort, + "ipv4": IsIPv4, + "ipv6": IsIPv6, + "dns": IsDNSName, + "host": IsHost, + "mac": IsMAC, + "latitude": IsLatitude, + "longitude": IsLongitude, + "ssn": IsSSN, + "semver": IsSemver, + "rfc3339": IsRFC3339, + "rfc3339WithoutZone": IsRFC3339WithoutZone, + "ISO3166Alpha2": IsISO3166Alpha2, + "ISO3166Alpha3": IsISO3166Alpha3, + "ISO4217": IsISO4217, +} + +// ISO3166Entry stores country codes +type ISO3166Entry struct { + EnglishShortName string + FrenchShortName string + Alpha2Code string + Alpha3Code string + Numeric string +} + +//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes" +var ISO3166List = []ISO3166Entry{ + {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"}, + {"Albania", "Albanie (l')", "AL", "ALB", "008"}, + {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"}, + {"Algeria", "Algérie (l')", "DZ", "DZA", "012"}, + {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"}, + {"Andorra", "Andorre (l')", "AD", "AND", "020"}, + {"Angola", "Angola (l')", "AO", "AGO", "024"}, + {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"}, + {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"}, + {"Argentina", "Argentine (l')", "AR", "ARG", "032"}, + {"Australia", "Australie (l')", "AU", "AUS", "036"}, + {"Austria", "Autriche (l')", "AT", "AUT", "040"}, + {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"}, + {"Bahrain", "Bahreïn", "BH", "BHR", "048"}, + {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"}, + {"Armenia", "Arménie (l')", "AM", "ARM", "051"}, + {"Barbados", "Barbade (la)", "BB", "BRB", "052"}, + {"Belgium", "Belgique (la)", "BE", "BEL", "056"}, + {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"}, + {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"}, + {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"}, + {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"}, + {"Botswana", "Botswana (le)", "BW", "BWA", "072"}, + {"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"}, + {"Brazil", "Brésil (le)", "BR", "BRA", "076"}, + {"Belize", "Belize (le)", "BZ", "BLZ", "084"}, + {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"}, + {"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"}, + {"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"}, + {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"}, + {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"}, + {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"}, + {"Burundi", "Burundi (le)", "BI", "BDI", "108"}, + {"Belarus", "Bélarus (le)", "BY", "BLR", "112"}, + {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"}, + {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"}, + {"Canada", "Canada (le)", "CA", "CAN", "124"}, + {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"}, + {"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"}, + {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"}, + {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"}, + {"Chad", "Tchad (le)", "TD", "TCD", "148"}, + {"Chile", "Chili (le)", "CL", "CHL", "152"}, + {"China", "Chine (la)", "CN", "CHN", "156"}, + {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"}, + {"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"}, + {"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"}, + {"Colombia", "Colombie (la)", "CO", "COL", "170"}, + {"Comoros (the)", "Comores (les)", "KM", "COM", "174"}, + {"Mayotte", "Mayotte", "YT", "MYT", "175"}, + {"Congo (the)", "Congo (le)", "CG", "COG", "178"}, + {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"}, + {"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"}, + {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"}, + {"Croatia", "Croatie (la)", "HR", "HRV", "191"}, + {"Cuba", "Cuba", "CU", "CUB", "192"}, + {"Cyprus", "Chypre", "CY", "CYP", "196"}, + {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"}, + {"Benin", "Bénin (le)", "BJ", "BEN", "204"}, + {"Denmark", "Danemark (le)", "DK", "DNK", "208"}, + {"Dominica", "Dominique (la)", "DM", "DMA", "212"}, + {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"}, + {"Ecuador", "Équateur (l')", "EC", "ECU", "218"}, + {"El Salvador", "El Salvador", "SV", "SLV", "222"}, + {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"}, + {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"}, + {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"}, + {"Estonia", "Estonie (l')", "EE", "EST", "233"}, + {"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"}, + {"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"}, + {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"}, + {"Fiji", "Fidji (les)", "FJ", "FJI", "242"}, + {"Finland", "Finlande (la)", "FI", "FIN", "246"}, + {"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"}, + {"France", "France (la)", "FR", "FRA", "250"}, + {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"}, + {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"}, + {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"}, + {"Djibouti", "Djibouti", "DJ", "DJI", "262"}, + {"Gabon", "Gabon (le)", "GA", "GAB", "266"}, + {"Georgia", "Géorgie (la)", "GE", "GEO", "268"}, + {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"}, + {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"}, + {"Germany", "Allemagne (l')", "DE", "DEU", "276"}, + {"Ghana", "Ghana (le)", "GH", "GHA", "288"}, + {"Gibraltar", "Gibraltar", "GI", "GIB", "292"}, + {"Kiribati", "Kiribati", "KI", "KIR", "296"}, + {"Greece", "Grèce (la)", "GR", "GRC", "300"}, + {"Greenland", "Groenland (le)", "GL", "GRL", "304"}, + {"Grenada", "Grenade (la)", "GD", "GRD", "308"}, + {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"}, + {"Guam", "Guam", "GU", "GUM", "316"}, + {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"}, + {"Guinea", "Guinée (la)", "GN", "GIN", "324"}, + {"Guyana", "Guyana (le)", "GY", "GUY", "328"}, + {"Haiti", "Haïti", "HT", "HTI", "332"}, + {"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"}, + {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"}, + {"Honduras", "Honduras (le)", "HN", "HND", "340"}, + {"Hong Kong", "Hong Kong", "HK", "HKG", "344"}, + {"Hungary", "Hongrie (la)", "HU", "HUN", "348"}, + {"Iceland", "Islande (l')", "IS", "ISL", "352"}, + {"India", "Inde (l')", "IN", "IND", "356"}, + {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"}, + {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"}, + {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"}, + {"Ireland", "Irlande (l')", "IE", "IRL", "372"}, + {"Israel", "Israël", "IL", "ISR", "376"}, + {"Italy", "Italie (l')", "IT", "ITA", "380"}, + {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"}, + {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"}, + {"Japan", "Japon (le)", "JP", "JPN", "392"}, + {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"}, + {"Jordan", "Jordanie (la)", "JO", "JOR", "400"}, + {"Kenya", "Kenya (le)", "KE", "KEN", "404"}, + {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"}, + {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"}, + {"Kuwait", "Koweït (le)", "KW", "KWT", "414"}, + {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"}, + {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"}, + {"Lebanon", "Liban (le)", "LB", "LBN", "422"}, + {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"}, + {"Latvia", "Lettonie (la)", "LV", "LVA", "428"}, + {"Liberia", "Libéria (le)", "LR", "LBR", "430"}, + {"Libya", "Libye (la)", "LY", "LBY", "434"}, + {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"}, + {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"}, + {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"}, + {"Macao", "Macao", "MO", "MAC", "446"}, + {"Madagascar", "Madagascar", "MG", "MDG", "450"}, + {"Malawi", "Malawi (le)", "MW", "MWI", "454"}, + {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"}, + {"Maldives", "Maldives (les)", "MV", "MDV", "462"}, + {"Mali", "Mali (le)", "ML", "MLI", "466"}, + {"Malta", "Malte", "MT", "MLT", "470"}, + {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"}, + {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"}, + {"Mauritius", "Maurice", "MU", "MUS", "480"}, + {"Mexico", "Mexique (le)", "MX", "MEX", "484"}, + {"Monaco", "Monaco", "MC", "MCO", "492"}, + {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"}, + {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"}, + {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"}, + {"Montserrat", "Montserrat", "MS", "MSR", "500"}, + {"Morocco", "Maroc (le)", "MA", "MAR", "504"}, + {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"}, + {"Oman", "Oman", "OM", "OMN", "512"}, + {"Namibia", "Namibie (la)", "NA", "NAM", "516"}, + {"Nauru", "Nauru", "NR", "NRU", "520"}, + {"Nepal", "Népal (le)", "NP", "NPL", "524"}, + {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"}, + {"Curaçao", "Curaçao", "CW", "CUW", "531"}, + {"Aruba", "Aruba", "AW", "ABW", "533"}, + {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"}, + {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"}, + {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"}, + {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"}, + {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"}, + {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"}, + {"Niger (the)", "Niger (le)", "NE", "NER", "562"}, + {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"}, + {"Niue", "Niue", "NU", "NIU", "570"}, + {"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"}, + {"Norway", "Norvège (la)", "NO", "NOR", "578"}, + {"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"}, + {"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"}, + {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"}, + {"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"}, + {"Palau", "Palaos (les)", "PW", "PLW", "585"}, + {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"}, + {"Panama", "Panama (le)", "PA", "PAN", "591"}, + {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"}, + {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"}, + {"Peru", "Pérou (le)", "PE", "PER", "604"}, + {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"}, + {"Pitcairn", "Pitcairn", "PN", "PCN", "612"}, + {"Poland", "Pologne (la)", "PL", "POL", "616"}, + {"Portugal", "Portugal (le)", "PT", "PRT", "620"}, + {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"}, + {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"}, + {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"}, + {"Qatar", "Qatar (le)", "QA", "QAT", "634"}, + {"Réunion", "Réunion (La)", "RE", "REU", "638"}, + {"Romania", "Roumanie (la)", "RO", "ROU", "642"}, + {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"}, + {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"}, + {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"}, + {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"}, + {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"}, + {"Anguilla", "Anguilla", "AI", "AIA", "660"}, + {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"}, + {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"}, + {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"}, + {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"}, + {"San Marino", "Saint-Marin", "SM", "SMR", "674"}, + {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"}, + {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"}, + {"Senegal", "Sénégal (le)", "SN", "SEN", "686"}, + {"Serbia", "Serbie (la)", "RS", "SRB", "688"}, + {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"}, + {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"}, + {"Singapore", "Singapour", "SG", "SGP", "702"}, + {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"}, + {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"}, + {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"}, + {"Somalia", "Somalie (la)", "SO", "SOM", "706"}, + {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"}, + {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"}, + {"Spain", "Espagne (l')", "ES", "ESP", "724"}, + {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"}, + {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"}, + {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"}, + {"Suriname", "Suriname (le)", "SR", "SUR", "740"}, + {"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"}, + {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"}, + {"Sweden", "Suède (la)", "SE", "SWE", "752"}, + {"Switzerland", "Suisse (la)", "CH", "CHE", "756"}, + {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"}, + {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"}, + {"Thailand", "Thaïlande (la)", "TH", "THA", "764"}, + {"Togo", "Togo (le)", "TG", "TGO", "768"}, + {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"}, + {"Tonga", "Tonga (les)", "TO", "TON", "776"}, + {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"}, + {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"}, + {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"}, + {"Turkey", "Turquie (la)", "TR", "TUR", "792"}, + {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"}, + {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"}, + {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"}, + {"Uganda", "Ouganda (l')", "UG", "UGA", "800"}, + {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"}, + {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"}, + {"Egypt", "Égypte (l')", "EG", "EGY", "818"}, + {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"}, + {"Guernsey", "Guernesey", "GG", "GGY", "831"}, + {"Jersey", "Jersey", "JE", "JEY", "832"}, + {"Isle of Man", "Île de Man", "IM", "IMN", "833"}, + {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"}, + {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"}, + {"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"}, + {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"}, + {"Uruguay", "Uruguay (l')", "UY", "URY", "858"}, + {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"}, + {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"}, + {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"}, + {"Samoa", "Samoa (le)", "WS", "WSM", "882"}, + {"Yemen", "Yémen (le)", "YE", "YEM", "887"}, + {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"}, +} + +// ISO4217List is the list of ISO currency codes +var ISO4217List = []string{ + "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN", + "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD", + "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK", + "DJF", "DKK", "DOP", "DZD", + "EGP", "ERN", "ETB", "EUR", + "FJD", "FKP", + "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD", + "HKD", "HNL", "HRK", "HTG", "HUF", + "IDR", "ILS", "INR", "IQD", "IRR", "ISK", + "JMD", "JOD", "JPY", + "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT", + "LAK", "LBP", "LKR", "LRD", "LSL", "LYD", + "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN", + "NAD", "NGN", "NIO", "NOK", "NPR", "NZD", + "OMR", + "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG", + "QAR", + "RON", "RSD", "RUB", "RWF", + "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "SVC", "SYP", "SZL", + "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", + "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UZS", + "VEF", "VND", "VUV", + "WST", + "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX", + "YER", + "ZAR", "ZMW", "ZWL", +} + +// ISO693Entry stores ISO language codes +type ISO693Entry struct { + Alpha3bCode string + Alpha2Code string + English string +} + +//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json +var ISO693List = []ISO693Entry{ + {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"}, + {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"}, + {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"}, + {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"}, + {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"}, + {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"}, + {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"}, + {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"}, + {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"}, + {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"}, + {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"}, + {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"}, + {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"}, + {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"}, + {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"}, + {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"}, + {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"}, + {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"}, + {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"}, + {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"}, + {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"}, + {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"}, + {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"}, + {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"}, + {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"}, + {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"}, + {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"}, + {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"}, + {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"}, + {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"}, + {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"}, + {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"}, + {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"}, + {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"}, + {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"}, + {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"}, + {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"}, + {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"}, + {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"}, + {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"}, + {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"}, + {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"}, + {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"}, + {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"}, + {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"}, + {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"}, + {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"}, + {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"}, + {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"}, + {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"}, + {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"}, + {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"}, + {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"}, + {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"}, + {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"}, + {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"}, + {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"}, + {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"}, + {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"}, + {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"}, + {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"}, + {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"}, + {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"}, + {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"}, + {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"}, + {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"}, + {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"}, + {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"}, + {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"}, + {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"}, + {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"}, + {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"}, + {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"}, + {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"}, + {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"}, + {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"}, + {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"}, + {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"}, + {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"}, + {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"}, + {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"}, + {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"}, + {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"}, + {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"}, + {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"}, + {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"}, + {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"}, + {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"}, + {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"}, + {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"}, + {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"}, + {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"}, + {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"}, + {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"}, + {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"}, + {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"}, + {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"}, + {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"}, + {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"}, + {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"}, + {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"}, + {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"}, + {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"}, + {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"}, + {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"}, + {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"}, + {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"}, + {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"}, + {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"}, + {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"}, + {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"}, + {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"}, + {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"}, + {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"}, + {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"}, + {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"}, + {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"}, + {Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"}, + {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"}, + {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"}, + {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"}, + {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"}, + {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"}, + {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"}, + {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"}, + {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"}, + {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"}, + {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"}, + {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"}, + {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"}, + {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"}, + {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"}, + {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"}, + {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"}, + {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"}, + {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"}, + {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"}, + {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"}, + {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"}, + {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"}, + {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"}, + {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"}, + {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"}, + {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"}, + {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"}, + {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"}, + {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"}, + {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"}, + {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"}, + {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"}, + {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"}, + {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"}, + {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"}, + {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"}, + {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"}, + {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"}, + {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"}, + {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"}, + {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"}, + {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"}, + {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"}, + {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"}, + {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"}, + {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"}, + {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"}, + {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"}, + {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"}, + {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"}, + {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"}, + {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"}, + {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"}, + {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"}, + {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"}, + {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"}, + {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"}, + {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"}, + {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"}, + {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"}, + {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"}, + {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"}, + {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"}, + {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"}, + {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"}, + {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"}, +} diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/utils.go b/hack/tools/vendor/github.com/asaskevich/govalidator/utils.go new file mode 100644 index 0000000000..a0b706a743 --- /dev/null +++ b/hack/tools/vendor/github.com/asaskevich/govalidator/utils.go @@ -0,0 +1,270 @@ +package govalidator + +import ( + "errors" + "fmt" + "html" + "math" + "path" + "regexp" + "strings" + "unicode" + "unicode/utf8" +) + +// Contains check if the string contains the substring. +func Contains(str, substring string) bool { + return strings.Contains(str, substring) +} + +// Matches check if string matches the pattern (pattern is regular expression) +// In case of error return false +func Matches(str, pattern string) bool { + match, _ := regexp.MatchString(pattern, str) + return match +} + +// LeftTrim trim characters from the left-side of the input. +// If second argument is empty, it's will be remove leading spaces. +func LeftTrim(str, chars string) string { + if chars == "" { + return strings.TrimLeftFunc(str, unicode.IsSpace) + } + r, _ := regexp.Compile("^[" + chars + "]+") + return r.ReplaceAllString(str, "") +} + +// RightTrim trim characters from the right-side of the input. +// If second argument is empty, it's will be remove spaces. +func RightTrim(str, chars string) string { + if chars == "" { + return strings.TrimRightFunc(str, unicode.IsSpace) + } + r, _ := regexp.Compile("[" + chars + "]+$") + return r.ReplaceAllString(str, "") +} + +// Trim trim characters from both sides of the input. +// If second argument is empty, it's will be remove spaces. +func Trim(str, chars string) string { + return LeftTrim(RightTrim(str, chars), chars) +} + +// WhiteList remove characters that do not appear in the whitelist. +func WhiteList(str, chars string) string { + pattern := "[^" + chars + "]+" + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, "") +} + +// BlackList remove characters that appear in the blacklist. +func BlackList(str, chars string) string { + pattern := "[" + chars + "]+" + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, "") +} + +// StripLow remove characters with a numerical value < 32 and 127, mostly control characters. +// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD). +func StripLow(str string, keepNewLines bool) string { + chars := "" + if keepNewLines { + chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F" + } else { + chars = "\x00-\x1F\x7F" + } + return BlackList(str, chars) +} + +// ReplacePattern replace regular expression pattern in string +func ReplacePattern(str, pattern, replace string) string { + r, _ := regexp.Compile(pattern) + return r.ReplaceAllString(str, replace) +} + +// Escape replace <, >, & and " with HTML entities. +var Escape = html.EscapeString + +func addSegment(inrune, segment []rune) []rune { + if len(segment) == 0 { + return inrune + } + if len(inrune) != 0 { + inrune = append(inrune, '_') + } + inrune = append(inrune, segment...) + return inrune +} + +// UnderscoreToCamelCase converts from underscore separated form to camel case form. +// Ex.: my_func => MyFunc +func UnderscoreToCamelCase(s string) string { + return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1) +} + +// CamelCaseToUnderscore converts from camel case form to underscore separated form. +// Ex.: MyFunc => my_func +func CamelCaseToUnderscore(str string) string { + var output []rune + var segment []rune + for _, r := range str { + + // not treat number as separate segment + if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) { + output = addSegment(output, segment) + segment = nil + } + segment = append(segment, unicode.ToLower(r)) + } + output = addSegment(output, segment) + return string(output) +} + +// Reverse return reversed string +func Reverse(s string) string { + r := []rune(s) + for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r) +} + +// GetLines split string by "\n" and return array of lines +func GetLines(s string) []string { + return strings.Split(s, "\n") +} + +// GetLine return specified line of multiline string +func GetLine(s string, index int) (string, error) { + lines := GetLines(s) + if index < 0 || index >= len(lines) { + return "", errors.New("line index out of bounds") + } + return lines[index], nil +} + +// RemoveTags remove all tags from HTML string +func RemoveTags(s string) string { + return ReplacePattern(s, "<[^>]*>", "") +} + +// SafeFileName return safe string that can be used in file names +func SafeFileName(str string) string { + name := strings.ToLower(str) + name = path.Clean(path.Base(name)) + name = strings.Trim(name, " ") + separators, err := regexp.Compile(`[ &_=+:]`) + if err == nil { + name = separators.ReplaceAllString(name, "-") + } + legal, err := regexp.Compile(`[^[:alnum:]-.]`) + if err == nil { + name = legal.ReplaceAllString(name, "") + } + for strings.Contains(name, "--") { + name = strings.Replace(name, "--", "-", -1) + } + return name +} + +// NormalizeEmail canonicalize an email address. +// The local part of the email address is lowercased for all domains; the hostname is always lowercased and +// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail). +// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and +// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are +// normalized to @gmail.com. +func NormalizeEmail(str string) (string, error) { + if !IsEmail(str) { + return "", fmt.Errorf("%s is not an email", str) + } + parts := strings.Split(str, "@") + parts[0] = strings.ToLower(parts[0]) + parts[1] = strings.ToLower(parts[1]) + if parts[1] == "gmail.com" || parts[1] == "googlemail.com" { + parts[1] = "gmail.com" + parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0] + } + return strings.Join(parts, "@"), nil +} + +// Truncate a string to the closest length without breaking words. +func Truncate(str string, length int, ending string) string { + var aftstr, befstr string + if len(str) > length { + words := strings.Fields(str) + before, present := 0, 0 + for i := range words { + befstr = aftstr + before = present + aftstr = aftstr + words[i] + " " + present = len(aftstr) + if present > length && i != 0 { + if (length - before) < (present - length) { + return Trim(befstr, " /\\.,\"'#!?&@+-") + ending + } + return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending + } + } + } + + return str +} + +// PadLeft pad left side of string if size of string is less then indicated pad length +func PadLeft(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, true, false) +} + +// PadRight pad right side of string if size of string is less then indicated pad length +func PadRight(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, false, true) +} + +// PadBoth pad sides of string if size of string is less then indicated pad length +func PadBoth(str string, padStr string, padLen int) string { + return buildPadStr(str, padStr, padLen, true, true) +} + +// PadString either left, right or both sides, not the padding string can be unicode and more then one +// character +func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string { + + // When padded length is less then the current string size + if padLen < utf8.RuneCountInString(str) { + return str + } + + padLen -= utf8.RuneCountInString(str) + + targetLen := padLen + + targetLenLeft := targetLen + targetLenRight := targetLen + if padLeft && padRight { + targetLenLeft = padLen / 2 + targetLenRight = padLen - targetLenLeft + } + + strToRepeatLen := utf8.RuneCountInString(padStr) + + repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen))) + repeatedString := strings.Repeat(padStr, repeatTimes) + + leftSide := "" + if padLeft { + leftSide = repeatedString[0:targetLenLeft] + } + + rightSide := "" + if padRight { + rightSide = repeatedString[0:targetLenRight] + } + + return leftSide + str + rightSide +} + +// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object +func TruncatingErrorf(str string, args ...interface{}) error { + n := strings.Count(str, "%s") + return fmt.Errorf(str, args[:n]...) +} diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/validator.go b/hack/tools/vendor/github.com/asaskevich/govalidator/validator.go new file mode 100644 index 0000000000..b18bbcb4c9 --- /dev/null +++ b/hack/tools/vendor/github.com/asaskevich/govalidator/validator.go @@ -0,0 +1,1278 @@ +// Package govalidator is package of validators and sanitizers for strings, structs and collections. +package govalidator + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "io/ioutil" + "net" + "net/url" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +var ( + fieldsRequiredByDefault bool + nilPtrAllowedByRequired = false + notNumberRegexp = regexp.MustCompile("[^0-9]+") + whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`) + paramsRegexp = regexp.MustCompile(`\(.*\)$`) +) + +const maxURLRuneCount = 2083 +const minURLRuneCount = 3 +const RF3339WithoutZone = "2006-01-02T15:04:05" + +// SetFieldsRequiredByDefault causes validation to fail when struct fields +// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). +// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): +// type exampleStruct struct { +// Name string `` +// Email string `valid:"email"` +// This, however, will only fail when Email is empty or an invalid email address: +// type exampleStruct2 struct { +// Name string `valid:"-"` +// Email string `valid:"email"` +// Lastly, this will only fail when Email is an invalid email address but not when it's empty: +// type exampleStruct2 struct { +// Name string `valid:"-"` +// Email string `valid:"email,optional"` +func SetFieldsRequiredByDefault(value bool) { + fieldsRequiredByDefault = value +} + +// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required. +// The validation will still reject ptr fields in their zero value state. Example with this enabled: +// type exampleStruct struct { +// Name *string `valid:"required"` +// With `Name` set to "", this will be considered invalid input and will cause a validation error. +// With `Name` set to nil, this will be considered valid by validation. +// By default this is disabled. +func SetNilPtrAllowedByRequired(value bool) { + nilPtrAllowedByRequired = value +} + +// IsEmail check if the string is an email. +func IsEmail(str string) bool { + // TODO uppercase letters are not supported + return rxEmail.MatchString(str) +} + +// IsExistingEmail check if the string is an email of existing domain +func IsExistingEmail(email string) bool { + + if len(email) < 6 || len(email) > 254 { + return false + } + at := strings.LastIndex(email, "@") + if at <= 0 || at > len(email)-3 { + return false + } + user := email[:at] + host := email[at+1:] + if len(user) > 64 { + return false + } + if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) { + return false + } + switch host { + case "localhost", "example.com": + return true + } + if _, err := net.LookupMX(host); err != nil { + if _, err := net.LookupIP(host); err != nil { + return false + } + } + + return true +} + +// IsURL check if the string is an URL. +func IsURL(str string) bool { + if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { + return false + } + strTemp := str + if strings.Contains(str, ":") && !strings.Contains(str, "://") { + // support no indicated urlscheme but with colon for port number + // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString + strTemp = "http://" + str + } + u, err := url.Parse(strTemp) + if err != nil { + return false + } + if strings.HasPrefix(u.Host, ".") { + return false + } + if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) { + return false + } + return rxURL.MatchString(str) +} + +// IsRequestURL check if the string rawurl, assuming +// it was received in an HTTP request, is a valid +// URL confirm to RFC 3986 +func IsRequestURL(rawurl string) bool { + url, err := url.ParseRequestURI(rawurl) + if err != nil { + return false //Couldn't even parse the rawurl + } + if len(url.Scheme) == 0 { + return false //No Scheme found + } + return true +} + +// IsRequestURI check if the string rawurl, assuming +// it was received in an HTTP request, is an +// absolute URI or an absolute path. +func IsRequestURI(rawurl string) bool { + _, err := url.ParseRequestURI(rawurl) + return err == nil +} + +// IsAlpha check if the string contains only letters (a-zA-Z). Empty string is valid. +func IsAlpha(str string) bool { + if IsNull(str) { + return true + } + return rxAlpha.MatchString(str) +} + +//IsUTFLetter check if the string contains only unicode letter characters. +//Similar to IsAlpha but for all languages. Empty string is valid. +func IsUTFLetter(str string) bool { + if IsNull(str) { + return true + } + + for _, c := range str { + if !unicode.IsLetter(c) { + return false + } + } + return true + +} + +// IsAlphanumeric check if the string contains only letters and numbers. Empty string is valid. +func IsAlphanumeric(str string) bool { + if IsNull(str) { + return true + } + return rxAlphanumeric.MatchString(str) +} + +// IsUTFLetterNumeric check if the string contains only unicode letters and numbers. Empty string is valid. +func IsUTFLetterNumeric(str string) bool { + if IsNull(str) { + return true + } + for _, c := range str { + if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok + return false + } + } + return true + +} + +// IsNumeric check if the string contains only numbers. Empty string is valid. +func IsNumeric(str string) bool { + if IsNull(str) { + return true + } + return rxNumeric.MatchString(str) +} + +// IsUTFNumeric check if the string contains only unicode numbers of any kind. +// Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid. +func IsUTFNumeric(str string) bool { + if IsNull(str) { + return true + } + if strings.IndexAny(str, "+-") > 0 { + return false + } + if len(str) > 1 { + str = strings.TrimPrefix(str, "-") + str = strings.TrimPrefix(str, "+") + } + for _, c := range str { + if !unicode.IsNumber(c) { //numbers && minus sign are ok + return false + } + } + return true + +} + +// IsUTFDigit check if the string contains only unicode radix-10 decimal digits. Empty string is valid. +func IsUTFDigit(str string) bool { + if IsNull(str) { + return true + } + if strings.IndexAny(str, "+-") > 0 { + return false + } + if len(str) > 1 { + str = strings.TrimPrefix(str, "-") + str = strings.TrimPrefix(str, "+") + } + for _, c := range str { + if !unicode.IsDigit(c) { //digits && minus sign are ok + return false + } + } + return true + +} + +// IsHexadecimal check if the string is a hexadecimal number. +func IsHexadecimal(str string) bool { + return rxHexadecimal.MatchString(str) +} + +// IsHexcolor check if the string is a hexadecimal color. +func IsHexcolor(str string) bool { + return rxHexcolor.MatchString(str) +} + +// IsRGBcolor check if the string is a valid RGB color in form rgb(RRR, GGG, BBB). +func IsRGBcolor(str string) bool { + return rxRGBcolor.MatchString(str) +} + +// IsLowerCase check if the string is lowercase. Empty string is valid. +func IsLowerCase(str string) bool { + if IsNull(str) { + return true + } + return str == strings.ToLower(str) +} + +// IsUpperCase check if the string is uppercase. Empty string is valid. +func IsUpperCase(str string) bool { + if IsNull(str) { + return true + } + return str == strings.ToUpper(str) +} + +// HasLowerCase check if the string contains at least 1 lowercase. Empty string is valid. +func HasLowerCase(str string) bool { + if IsNull(str) { + return true + } + return rxHasLowerCase.MatchString(str) +} + +// HasUpperCase check if the string contians as least 1 uppercase. Empty string is valid. +func HasUpperCase(str string) bool { + if IsNull(str) { + return true + } + return rxHasUpperCase.MatchString(str) +} + +// IsInt check if the string is an integer. Empty string is valid. +func IsInt(str string) bool { + if IsNull(str) { + return true + } + return rxInt.MatchString(str) +} + +// IsFloat check if the string is a float. +func IsFloat(str string) bool { + return str != "" && rxFloat.MatchString(str) +} + +// IsDivisibleBy check if the string is a number that's divisible by another. +// If second argument is not valid integer or zero, it's return false. +// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero). +func IsDivisibleBy(str, num string) bool { + f, _ := ToFloat(str) + p := int64(f) + q, _ := ToInt(num) + if q == 0 { + return false + } + return (p == 0) || (p%q == 0) +} + +// IsNull check if the string is null. +func IsNull(str string) bool { + return len(str) == 0 +} + +// HasWhitespaceOnly checks the string only contains whitespace +func HasWhitespaceOnly(str string) bool { + return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str) +} + +// HasWhitespace checks if the string contains any whitespace +func HasWhitespace(str string) bool { + return len(str) > 0 && rxHasWhitespace.MatchString(str) +} + +// IsByteLength check if the string's length (in bytes) falls in a range. +func IsByteLength(str string, min, max int) bool { + return len(str) >= min && len(str) <= max +} + +// IsUUIDv3 check if the string is a UUID version 3. +func IsUUIDv3(str string) bool { + return rxUUID3.MatchString(str) +} + +// IsUUIDv4 check if the string is a UUID version 4. +func IsUUIDv4(str string) bool { + return rxUUID4.MatchString(str) +} + +// IsUUIDv5 check if the string is a UUID version 5. +func IsUUIDv5(str string) bool { + return rxUUID5.MatchString(str) +} + +// IsUUID check if the string is a UUID (version 3, 4 or 5). +func IsUUID(str string) bool { + return rxUUID.MatchString(str) +} + +// IsCreditCard check if the string is a credit card. +func IsCreditCard(str string) bool { + sanitized := notNumberRegexp.ReplaceAllString(str, "") + if !rxCreditCard.MatchString(sanitized) { + return false + } + var sum int64 + var digit string + var tmpNum int64 + var shouldDouble bool + for i := len(sanitized) - 1; i >= 0; i-- { + digit = sanitized[i:(i + 1)] + tmpNum, _ = ToInt(digit) + if shouldDouble { + tmpNum *= 2 + if tmpNum >= 10 { + sum += ((tmpNum % 10) + 1) + } else { + sum += tmpNum + } + } else { + sum += tmpNum + } + shouldDouble = !shouldDouble + } + + return sum%10 == 0 +} + +// IsISBN10 check if the string is an ISBN version 10. +func IsISBN10(str string) bool { + return IsISBN(str, 10) +} + +// IsISBN13 check if the string is an ISBN version 13. +func IsISBN13(str string) bool { + return IsISBN(str, 13) +} + +// IsISBN check if the string is an ISBN (version 10 or 13). +// If version value is not equal to 10 or 13, it will be check both variants. +func IsISBN(str string, version int) bool { + sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") + var checksum int32 + var i int32 + if version == 10 { + if !rxISBN10.MatchString(sanitized) { + return false + } + for i = 0; i < 9; i++ { + checksum += (i + 1) * int32(sanitized[i]-'0') + } + if sanitized[9] == 'X' { + checksum += 10 * 10 + } else { + checksum += 10 * int32(sanitized[9]-'0') + } + if checksum%11 == 0 { + return true + } + return false + } else if version == 13 { + if !rxISBN13.MatchString(sanitized) { + return false + } + factor := []int32{1, 3} + for i = 0; i < 12; i++ { + checksum += factor[i%2] * int32(sanitized[i]-'0') + } + return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0 + } + return IsISBN(str, 10) || IsISBN(str, 13) +} + +// IsJSON check if the string is valid JSON (note: uses json.Unmarshal). +func IsJSON(str string) bool { + var js json.RawMessage + return json.Unmarshal([]byte(str), &js) == nil +} + +// IsMultibyte check if the string contains one or more multibyte chars. Empty string is valid. +func IsMultibyte(str string) bool { + if IsNull(str) { + return true + } + return rxMultibyte.MatchString(str) +} + +// IsASCII check if the string contains ASCII chars only. Empty string is valid. +func IsASCII(str string) bool { + if IsNull(str) { + return true + } + return rxASCII.MatchString(str) +} + +// IsPrintableASCII check if the string contains printable ASCII chars only. Empty string is valid. +func IsPrintableASCII(str string) bool { + if IsNull(str) { + return true + } + return rxPrintableASCII.MatchString(str) +} + +// IsFullWidth check if the string contains any full-width chars. Empty string is valid. +func IsFullWidth(str string) bool { + if IsNull(str) { + return true + } + return rxFullWidth.MatchString(str) +} + +// IsHalfWidth check if the string contains any half-width chars. Empty string is valid. +func IsHalfWidth(str string) bool { + if IsNull(str) { + return true + } + return rxHalfWidth.MatchString(str) +} + +// IsVariableWidth check if the string contains a mixture of full and half-width chars. Empty string is valid. +func IsVariableWidth(str string) bool { + if IsNull(str) { + return true + } + return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str) +} + +// IsBase64 check if a string is base64 encoded. +func IsBase64(str string) bool { + return rxBase64.MatchString(str) +} + +// IsFilePath check is a string is Win or Unix file path and returns it's type. +func IsFilePath(str string) (bool, int) { + if rxWinPath.MatchString(str) { + //check windows path limit see: + // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath + if len(str[3:]) > 32767 { + return false, Win + } + return true, Win + } else if rxUnixPath.MatchString(str) { + return true, Unix + } + return false, Unknown +} + +// IsDataURI checks if a string is base64 encoded data URI such as an image +func IsDataURI(str string) bool { + dataURI := strings.Split(str, ",") + if !rxDataURI.MatchString(dataURI[0]) { + return false + } + return IsBase64(dataURI[1]) +} + +// IsISO3166Alpha2 checks if a string is valid two-letter country code +func IsISO3166Alpha2(str string) bool { + for _, entry := range ISO3166List { + if str == entry.Alpha2Code { + return true + } + } + return false +} + +// IsISO3166Alpha3 checks if a string is valid three-letter country code +func IsISO3166Alpha3(str string) bool { + for _, entry := range ISO3166List { + if str == entry.Alpha3Code { + return true + } + } + return false +} + +// IsISO693Alpha2 checks if a string is valid two-letter language code +func IsISO693Alpha2(str string) bool { + for _, entry := range ISO693List { + if str == entry.Alpha2Code { + return true + } + } + return false +} + +// IsISO693Alpha3b checks if a string is valid three-letter language code +func IsISO693Alpha3b(str string) bool { + for _, entry := range ISO693List { + if str == entry.Alpha3bCode { + return true + } + } + return false +} + +// IsDNSName will validate the given string as a DNS name +func IsDNSName(str string) bool { + if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 { + // constraints already violated + return false + } + return !IsIP(str) && rxDNSName.MatchString(str) +} + +// IsHash checks if a string is a hash of type algorithm. +// Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b'] +func IsHash(str string, algorithm string) bool { + len := "0" + algo := strings.ToLower(algorithm) + + if algo == "crc32" || algo == "crc32b" { + len = "8" + } else if algo == "md5" || algo == "md4" || algo == "ripemd128" || algo == "tiger128" { + len = "32" + } else if algo == "sha1" || algo == "ripemd160" || algo == "tiger160" { + len = "40" + } else if algo == "tiger192" { + len = "48" + } else if algo == "sha256" { + len = "64" + } else if algo == "sha384" { + len = "96" + } else if algo == "sha512" { + len = "128" + } else { + return false + } + + return Matches(str, "^[a-f0-9]{"+len+"}$") +} + +// IsDialString validates the given string for usage with the various Dial() functions +func IsDialString(str string) bool { + + if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) { + return true + } + + return false +} + +// IsIP checks if a string is either IP version 4 or 6. +func IsIP(str string) bool { + return net.ParseIP(str) != nil +} + +// IsPort checks if a string represents a valid port +func IsPort(str string) bool { + if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 { + return true + } + return false +} + +// IsIPv4 check if the string is an IP version 4. +func IsIPv4(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ".") +} + +// IsIPv6 check if the string is an IP version 6. +func IsIPv6(str string) bool { + ip := net.ParseIP(str) + return ip != nil && strings.Contains(str, ":") +} + +// IsCIDR check if the string is an valid CIDR notiation (IPV4 & IPV6) +func IsCIDR(str string) bool { + _, _, err := net.ParseCIDR(str) + return err == nil +} + +// IsMAC check if a string is valid MAC address. +// Possible MAC formats: +// 01:23:45:67:89:ab +// 01:23:45:67:89:ab:cd:ef +// 01-23-45-67-89-ab +// 01-23-45-67-89-ab-cd-ef +// 0123.4567.89ab +// 0123.4567.89ab.cdef +func IsMAC(str string) bool { + _, err := net.ParseMAC(str) + return err == nil +} + +// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name +func IsHost(str string) bool { + return IsIP(str) || IsDNSName(str) +} + +// IsMongoID check if the string is a valid hex-encoded representation of a MongoDB ObjectId. +func IsMongoID(str string) bool { + return rxHexadecimal.MatchString(str) && (len(str) == 24) +} + +// IsLatitude check if a string is valid latitude. +func IsLatitude(str string) bool { + return rxLatitude.MatchString(str) +} + +// IsLongitude check if a string is valid longitude. +func IsLongitude(str string) bool { + return rxLongitude.MatchString(str) +} + +// IsRsaPublicKey check if a string is valid public key with provided length +func IsRsaPublicKey(str string, keylen int) bool { + bb := bytes.NewBufferString(str) + pemBytes, err := ioutil.ReadAll(bb) + if err != nil { + return false + } + block, _ := pem.Decode(pemBytes) + if block != nil && block.Type != "PUBLIC KEY" { + return false + } + var der []byte + + if block != nil { + der = block.Bytes + } else { + der, err = base64.StdEncoding.DecodeString(str) + if err != nil { + return false + } + } + + key, err := x509.ParsePKIXPublicKey(der) + if err != nil { + return false + } + pubkey, ok := key.(*rsa.PublicKey) + if !ok { + return false + } + bitlen := len(pubkey.N.Bytes()) * 8 + return bitlen == int(keylen) +} + +func toJSONName(tag string) string { + if tag == "" { + return "" + } + + // JSON name always comes first. If there's no options then split[0] is + // JSON name, if JSON name is not set, then split[0] is an empty string. + split := strings.SplitN(tag, ",", 2) + + name := split[0] + + // However it is possible that the field is skipped when + // (de-)serializing from/to JSON, in which case assume that there is no + // tag name to use + if name == "-" { + return "" + } + return name +} + +func PrependPathToErrors(err error, path string) error { + switch err2 := err.(type) { + case Error: + err2.Path = append([]string{path}, err2.Path...) + return err2 + case Errors: + errors := err2.Errors() + for i, err3 := range errors { + errors[i] = PrependPathToErrors(err3, path) + } + return err2 + } + fmt.Println(err) + return err +} + +// ValidateStruct use tags for fields. +// result will be equal to `false` if there are any errors. +func ValidateStruct(s interface{}) (bool, error) { + if s == nil { + return true, nil + } + result := true + var err error + val := reflect.ValueOf(s) + if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { + val = val.Elem() + } + // we only accept structs + if val.Kind() != reflect.Struct { + return false, fmt.Errorf("function only accepts structs; got %s", val.Kind()) + } + var errs Errors + for i := 0; i < val.NumField(); i++ { + valueField := val.Field(i) + typeField := val.Type().Field(i) + if typeField.PkgPath != "" { + continue // Private field + } + structResult := true + if valueField.Kind() == reflect.Interface { + valueField = valueField.Elem() + } + if (valueField.Kind() == reflect.Struct || + (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && + typeField.Tag.Get(tagName) != "-" { + var err error + structResult, err = ValidateStruct(valueField.Interface()) + if err != nil { + err = PrependPathToErrors(err, typeField.Name) + errs = append(errs, err) + } + } + resultField, err2 := typeCheck(valueField, typeField, val, nil) + if err2 != nil { + + // Replace structure name with JSON name if there is a tag on the variable + jsonTag := toJSONName(typeField.Tag.Get("json")) + if jsonTag != "" { + switch jsonError := err2.(type) { + case Error: + jsonError.Name = jsonTag + err2 = jsonError + case Errors: + for i2, err3 := range jsonError { + switch customErr := err3.(type) { + case Error: + customErr.Name = jsonTag + jsonError[i2] = customErr + } + } + + err2 = jsonError + } + } + + errs = append(errs, err2) + } + result = result && resultField && structResult + } + if len(errs) > 0 { + err = errs + } + return result, err +} + +// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""} +func parseTagIntoMap(tag string) tagOptionsMap { + optionsMap := make(tagOptionsMap) + options := strings.Split(tag, ",") + + for i, option := range options { + option = strings.TrimSpace(option) + + validationOptions := strings.Split(option, "~") + if !isValidTag(validationOptions[0]) { + continue + } + if len(validationOptions) == 2 { + optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i} + } else { + optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i} + } + } + return optionsMap +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("\\'\"!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +// IsSSN will validate the given string as a U.S. Social Security Number +func IsSSN(str string) bool { + if str == "" || len(str) != 11 { + return false + } + return rxSSN.MatchString(str) +} + +// IsSemver check if string is valid semantic version +func IsSemver(str string) bool { + return rxSemver.MatchString(str) +} + +// IsTime check if string is valid according to given format +func IsTime(str string, format string) bool { + _, err := time.Parse(format, str) + return err == nil +} + +// IsRFC3339 check if string is valid timestamp value according to RFC3339 +func IsRFC3339(str string) bool { + return IsTime(str, time.RFC3339) +} + +// IsRFC3339WithoutZone check if string is valid timestamp value according to RFC3339 which excludes the timezone. +func IsRFC3339WithoutZone(str string) bool { + return IsTime(str, RF3339WithoutZone) +} + +// IsISO4217 check if string is valid ISO currency code +func IsISO4217(str string) bool { + for _, currency := range ISO4217List { + if str == currency { + return true + } + } + + return false +} + +// ByteLength check string's length +func ByteLength(str string, params ...string) bool { + if len(params) == 2 { + min, _ := ToInt(params[0]) + max, _ := ToInt(params[1]) + return len(str) >= int(min) && len(str) <= int(max) + } + + return false +} + +// RuneLength check string's length +// Alias for StringLength +func RuneLength(str string, params ...string) bool { + return StringLength(str, params...) +} + +// IsRsaPub check whether string is valid RSA key +// Alias for IsRsaPublicKey +func IsRsaPub(str string, params ...string) bool { + if len(params) == 1 { + len, _ := ToInt(params[0]) + return IsRsaPublicKey(str, int(len)) + } + + return false +} + +// StringMatches checks if a string matches a given pattern. +func StringMatches(s string, params ...string) bool { + if len(params) == 1 { + pattern := params[0] + return Matches(s, pattern) + } + return false +} + +// StringLength check string's length (including multi byte strings) +func StringLength(str string, params ...string) bool { + + if len(params) == 2 { + strLength := utf8.RuneCountInString(str) + min, _ := ToInt(params[0]) + max, _ := ToInt(params[1]) + return strLength >= int(min) && strLength <= int(max) + } + + return false +} + +// Range check string's length +func Range(str string, params ...string) bool { + if len(params) == 2 { + value, _ := ToFloat(str) + min, _ := ToFloat(params[0]) + max, _ := ToFloat(params[1]) + return InRange(value, min, max) + } + + return false +} + +func isInRaw(str string, params ...string) bool { + if len(params) == 1 { + rawParams := params[0] + + parsedParams := strings.Split(rawParams, "|") + + return IsIn(str, parsedParams...) + } + + return false +} + +// IsIn check if string str is a member of the set of strings params +func IsIn(str string, params ...string) bool { + for _, param := range params { + if str == param { + return true + } + } + + return false +} + +func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) { + if nilPtrAllowedByRequired { + k := v.Kind() + if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() { + return true, nil + } + } + + if requiredOption, isRequired := options["required"]; isRequired { + if len(requiredOption.customErrorMessage) > 0 { + return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}} + } + return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}} + } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional { + return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}} + } + // not required and empty is valid + return true, nil +} + +func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tagOptionsMap) (isValid bool, resultErr error) { + if !v.IsValid() { + return false, nil + } + + tag := t.Tag.Get(tagName) + + // Check if the field should be ignored + switch tag { + case "": + if v.Kind() != reflect.Slice && v.Kind() != reflect.Map { + if !fieldsRequiredByDefault { + return true, nil + } + return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}} + } + case "-": + return true, nil + } + + isRootType := false + if options == nil { + isRootType = true + options = parseTagIntoMap(tag) + } + + if isEmptyValue(v) { + // an empty value is not validated, check only required + isValid, resultErr = checkRequired(v, t, options) + for key := range options { + delete(options, key) + } + return isValid, resultErr + } + + var customTypeErrors Errors + optionsOrder := options.orderedKeys() + for _, validatorName := range optionsOrder { + validatorStruct := options[validatorName] + if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok { + delete(options, validatorName) + + if result := validatefunc(v.Interface(), o.Interface()); !result { + if len(validatorStruct.customErrorMessage) > 0 { + customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)}) + continue + } + customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)}) + } + } + } + + if len(customTypeErrors.Errors()) > 0 { + return false, customTypeErrors + } + + if isRootType { + // Ensure that we've checked the value by all specified validators before report that the value is valid + defer func() { + delete(options, "optional") + delete(options, "required") + + if isValid && resultErr == nil && len(options) != 0 { + optionsOrder := options.orderedKeys() + for _, validator := range optionsOrder { + isValid = false + resultErr = Error{t.Name, fmt.Errorf( + "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}} + return + } + } + }() + } + + switch v.Kind() { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, + reflect.String: + // for each tag option check the map of validator functions + for _, validatorSpec := range optionsOrder { + validatorStruct := options[validatorSpec] + var negate bool + validator := validatorSpec + customMsgExists := len(validatorStruct.customErrorMessage) > 0 + + // Check whether the tag looks like '!something' or 'something' + if validator[0] == '!' { + validator = validator[1:] + negate = true + } + + // Check for param validators + for key, value := range ParamTagRegexMap { + ps := value.FindStringSubmatch(validator) + if len(ps) == 0 { + continue + } + + validatefunc, ok := ParamTagMap[key] + if !ok { + continue + } + + delete(options, validatorSpec) + + switch v.Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + + field := fmt.Sprint(v) // make value into string, then validate with regex + if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + default: + // type not yet supported, fail + return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}} + } + } + + if validatefunc, ok := TagMap[validator]; ok { + delete(options, validatorSpec) + + switch v.Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + field := fmt.Sprint(v) // make value into string, then validate with regex + if result := validatefunc(field); !result && !negate || result && negate { + if customMsgExists { + return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + if negate { + return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} + } + default: + //Not Yet Supported Types (Fail here!) + err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v) + return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}} + } + } + } + return true, nil + case reflect.Map: + if v.Type().Key().Kind() != reflect.String { + return false, &UnsupportedTypeError{v.Type()} + } + var sv stringValues + sv = v.MapKeys() + sort.Sort(sv) + result := true + for i, k := range sv { + var resultItem bool + var err error + if v.MapIndex(k).Kind() != reflect.Struct { + resultItem, err = typeCheck(v.MapIndex(k), t, o, options) + if err != nil { + return false, err + } + } else { + resultItem, err = ValidateStruct(v.MapIndex(k).Interface()) + if err != nil { + err = PrependPathToErrors(err, t.Name+"."+sv[i].Interface().(string)) + return false, err + } + } + result = result && resultItem + } + return result, nil + case reflect.Slice, reflect.Array: + result := true + for i := 0; i < v.Len(); i++ { + var resultItem bool + var err error + if v.Index(i).Kind() != reflect.Struct { + resultItem, err = typeCheck(v.Index(i), t, o, options) + if err != nil { + return false, err + } + } else { + resultItem, err = ValidateStruct(v.Index(i).Interface()) + if err != nil { + err = PrependPathToErrors(err, t.Name+"."+strconv.Itoa(i)) + return false, err + } + } + result = result && resultItem + } + return result, nil + case reflect.Interface: + // If the value is an interface then encode its element + if v.IsNil() { + return true, nil + } + return ValidateStruct(v.Interface()) + case reflect.Ptr: + // If the value is a pointer then check its element + if v.IsNil() { + return true, nil + } + return typeCheck(v.Elem(), t, o, options) + case reflect.Struct: + return ValidateStruct(v.Interface()) + default: + return false, &UnsupportedTypeError{v.Type()} + } +} + +func stripParams(validatorString string) string { + return paramsRegexp.ReplaceAllString(validatorString, "") +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.String, reflect.Array: + return v.Len() == 0 + case reflect.Map, reflect.Slice: + return v.Len() == 0 || v.IsNil() + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + + return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) +} + +// ErrorByField returns error for specified field of the struct +// validated by ValidateStruct or empty string if there are no errors +// or this field doesn't exists or doesn't have any errors. +func ErrorByField(e error, field string) string { + if e == nil { + return "" + } + return ErrorsByField(e)[field] +} + +// ErrorsByField returns map of errors of the struct validated +// by ValidateStruct or empty map if there are no errors. +func ErrorsByField(e error) map[string]string { + m := make(map[string]string) + if e == nil { + return m + } + // prototype for ValidateStruct + + switch e.(type) { + case Error: + m[e.(Error).Name] = e.(Error).Err.Error() + case Errors: + for _, item := range e.(Errors).Errors() { + n := ErrorsByField(item) + for k, v := range n { + m[k] = v + } + } + } + + return m +} + +// Error returns string equivalent for reflect.Type +func (e *UnsupportedTypeError) Error() string { + return "validator: unsupported type: " + e.Type.String() +} + +func (sv stringValues) Len() int { return len(sv) } +func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) } +func (sv stringValues) get(i int) string { return sv[i].String() } diff --git a/hack/tools/vendor/github.com/asaskevich/govalidator/wercker.yml b/hack/tools/vendor/github.com/asaskevich/govalidator/wercker.yml new file mode 100644 index 0000000000..cac7a5fcf0 --- /dev/null +++ b/hack/tools/vendor/github.com/asaskevich/govalidator/wercker.yml @@ -0,0 +1,15 @@ +box: golang +build: + steps: + - setup-go-workspace + + - script: + name: go get + code: | + go version + go get -t ./... + + - script: + name: go test + code: | + go test -race ./... diff --git a/hack/tools/vendor/github.com/blang/semver/v4/LICENSE b/hack/tools/vendor/github.com/blang/semver/v4/LICENSE new file mode 100644 index 0000000000..5ba5c86fcb --- /dev/null +++ b/hack/tools/vendor/github.com/blang/semver/v4/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2014 Benedikt Lang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/hack/tools/vendor/github.com/blang/semver/v4/json.go b/hack/tools/vendor/github.com/blang/semver/v4/json.go new file mode 100644 index 0000000000..a74bf7c449 --- /dev/null +++ b/hack/tools/vendor/github.com/blang/semver/v4/json.go @@ -0,0 +1,23 @@ +package semver + +import ( + "encoding/json" +) + +// MarshalJSON implements the encoding/json.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements the encoding/json.Unmarshaler interface. +func (v *Version) UnmarshalJSON(data []byte) (err error) { + var versionString string + + if err = json.Unmarshal(data, &versionString); err != nil { + return + } + + *v, err = Parse(versionString) + + return +} diff --git a/hack/tools/vendor/github.com/blang/semver/v4/range.go b/hack/tools/vendor/github.com/blang/semver/v4/range.go new file mode 100644 index 0000000000..95f7139b97 --- /dev/null +++ b/hack/tools/vendor/github.com/blang/semver/v4/range.go @@ -0,0 +1,416 @@ +package semver + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +type wildcardType int + +const ( + noneWildcard wildcardType = iota + majorWildcard wildcardType = 1 + minorWildcard wildcardType = 2 + patchWildcard wildcardType = 3 +) + +func wildcardTypefromInt(i int) wildcardType { + switch i { + case 1: + return majorWildcard + case 2: + return minorWildcard + case 3: + return patchWildcard + default: + return noneWildcard + } +} + +type comparator func(Version, Version) bool + +var ( + compEQ comparator = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 0 + } + compNE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) != 0 + } + compGT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 1 + } + compGE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) >= 0 + } + compLT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == -1 + } + compLE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) <= 0 + } +) + +type versionRange struct { + v Version + c comparator +} + +// rangeFunc creates a Range from the given versionRange. +func (vr *versionRange) rangeFunc() Range { + return Range(func(v Version) bool { + return vr.c(v, vr.v) + }) +} + +// Range represents a range of versions. +// A Range can be used to check if a Version satisfies it: +// +// range, err := semver.ParseRange(">1.0.0 <2.0.0") +// range(semver.MustParse("1.1.1") // returns true +type Range func(Version) bool + +// OR combines the existing Range with another Range using logical OR. +func (rf Range) OR(f Range) Range { + return Range(func(v Version) bool { + return rf(v) || f(v) + }) +} + +// AND combines the existing Range with another Range using logical AND. +func (rf Range) AND(f Range) Range { + return Range(func(v Version) bool { + return rf(v) && f(v) + }) +} + +// ParseRange parses a range and returns a Range. +// If the range could not be parsed an error is returned. +// +// Valid ranges are: +// - "<1.0.0" +// - "<=1.0.0" +// - ">1.0.0" +// - ">=1.0.0" +// - "1.0.0", "=1.0.0", "==1.0.0" +// - "!1.0.0", "!=1.0.0" +// +// A Range can consist of multiple ranges separated by space: +// Ranges can be linked by logical AND: +// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" +// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 +// +// Ranges can also be linked by logical OR: +// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" +// +// AND has a higher precedence than OR. It's not possible to use brackets. +// +// Ranges can be combined by both AND and OR +// +// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` +func ParseRange(s string) (Range, error) { + parts := splitAndTrim(s) + orParts, err := splitORParts(parts) + if err != nil { + return nil, err + } + expandedParts, err := expandWildcardVersion(orParts) + if err != nil { + return nil, err + } + var orFn Range + for _, p := range expandedParts { + var andFn Range + for _, ap := range p { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + vr, err := buildVersionRange(opStr, vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) + } + rf := vr.rangeFunc() + + // Set function + if andFn == nil { + andFn = rf + } else { // Combine with existing function + andFn = andFn.AND(rf) + } + } + if orFn == nil { + orFn = andFn + } else { + orFn = orFn.OR(andFn) + } + + } + return orFn, nil +} + +// splitORParts splits the already cleaned parts by '||'. +// Checks for invalid positions of the operator and returns an +// error if found. +func splitORParts(parts []string) ([][]string, error) { + var ORparts [][]string + last := 0 + for i, p := range parts { + if p == "||" { + if i == 0 { + return nil, fmt.Errorf("First element in range is '||'") + } + ORparts = append(ORparts, parts[last:i]) + last = i + 1 + } + } + if last == len(parts) { + return nil, fmt.Errorf("Last element in range is '||'") + } + ORparts = append(ORparts, parts[last:]) + return ORparts, nil +} + +// buildVersionRange takes a slice of 2: operator and version +// and builds a versionRange, otherwise an error. +func buildVersionRange(opStr, vStr string) (*versionRange, error) { + c := parseComparator(opStr) + if c == nil { + return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) + } + v, err := Parse(vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) + } + + return &versionRange{ + v: v, + c: c, + }, nil + +} + +// inArray checks if a byte is contained in an array of bytes +func inArray(s byte, list []byte) bool { + for _, el := range list { + if el == s { + return true + } + } + return false +} + +// splitAndTrim splits a range string by spaces and cleans whitespaces +func splitAndTrim(s string) (result []string) { + last := 0 + var lastChar byte + excludeFromSplit := []byte{'>', '<', '='} + for i := 0; i < len(s); i++ { + if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { + if last < i-1 { + result = append(result, s[last:i]) + } + last = i + 1 + } else if s[i] != ' ' { + lastChar = s[i] + } + } + if last < len(s)-1 { + result = append(result, s[last:]) + } + + for i, v := range result { + result[i] = strings.Replace(v, " ", "", -1) + } + + // parts := strings.Split(s, " ") + // for _, x := range parts { + // if s := strings.TrimSpace(x); len(s) != 0 { + // result = append(result, s) + // } + // } + return +} + +// splitComparatorVersion splits the comparator from the version. +// Input must be free of leading or trailing spaces. +func splitComparatorVersion(s string) (string, string, error) { + i := strings.IndexFunc(s, unicode.IsDigit) + if i == -1 { + return "", "", fmt.Errorf("Could not get version from string: %q", s) + } + return strings.TrimSpace(s[0:i]), s[i:], nil +} + +// getWildcardType will return the type of wildcard that the +// passed version contains +func getWildcardType(vStr string) wildcardType { + parts := strings.Split(vStr, ".") + nparts := len(parts) + wildcard := parts[nparts-1] + + possibleWildcardType := wildcardTypefromInt(nparts) + if wildcard == "x" { + return possibleWildcardType + } + + return noneWildcard +} + +// createVersionFromWildcard will convert a wildcard version +// into a regular version, replacing 'x's with '0's, handling +// special cases like '1.x.x' and '1.x' +func createVersionFromWildcard(vStr string) string { + // handle 1.x.x + vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) + vStr2 = strings.Replace(vStr2, ".x", ".0", 1) + parts := strings.Split(vStr2, ".") + + // handle 1.x + if len(parts) == 2 { + return vStr2 + ".0" + } + + return vStr2 +} + +// incrementMajorVersion will increment the major version +// of the passed version +func incrementMajorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[0]) + if err != nil { + return "", err + } + parts[0] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// incrementMajorVersion will increment the minor version +// of the passed version +func incrementMinorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[1]) + if err != nil { + return "", err + } + parts[1] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// expandWildcardVersion will expand wildcards inside versions +// following these rules: +// +// * when dealing with patch wildcards: +// >= 1.2.x will become >= 1.2.0 +// <= 1.2.x will become < 1.3.0 +// > 1.2.x will become >= 1.3.0 +// < 1.2.x will become < 1.2.0 +// != 1.2.x will become < 1.2.0 >= 1.3.0 +// +// * when dealing with minor wildcards: +// >= 1.x will become >= 1.0.0 +// <= 1.x will become < 2.0.0 +// > 1.x will become >= 2.0.0 +// < 1.0 will become < 1.0.0 +// != 1.x will become < 1.0.0 >= 2.0.0 +// +// * when dealing with wildcards without +// version operator: +// 1.2.x will become >= 1.2.0 < 1.3.0 +// 1.x will become >= 1.0.0 < 2.0.0 +func expandWildcardVersion(parts [][]string) ([][]string, error) { + var expandedParts [][]string + for _, p := range parts { + var newParts []string + for _, ap := range p { + if strings.Contains(ap, "x") { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + + versionWildcardType := getWildcardType(vStr) + flatVersion := createVersionFromWildcard(vStr) + + var resultOperator string + var shouldIncrementVersion bool + switch opStr { + case ">": + resultOperator = ">=" + shouldIncrementVersion = true + case ">=": + resultOperator = ">=" + case "<": + resultOperator = "<" + case "<=": + resultOperator = "<" + shouldIncrementVersion = true + case "", "=", "==": + newParts = append(newParts, ">="+flatVersion) + resultOperator = "<" + shouldIncrementVersion = true + case "!=", "!": + newParts = append(newParts, "<"+flatVersion) + resultOperator = ">=" + shouldIncrementVersion = true + } + + var resultVersion string + if shouldIncrementVersion { + switch versionWildcardType { + case patchWildcard: + resultVersion, _ = incrementMinorVersion(flatVersion) + case minorWildcard: + resultVersion, _ = incrementMajorVersion(flatVersion) + } + } else { + resultVersion = flatVersion + } + + ap = resultOperator + resultVersion + } + newParts = append(newParts, ap) + } + expandedParts = append(expandedParts, newParts) + } + + return expandedParts, nil +} + +func parseComparator(s string) comparator { + switch s { + case "==": + fallthrough + case "": + fallthrough + case "=": + return compEQ + case ">": + return compGT + case ">=": + return compGE + case "<": + return compLT + case "<=": + return compLE + case "!": + fallthrough + case "!=": + return compNE + } + + return nil +} + +// MustParseRange is like ParseRange but panics if the range cannot be parsed. +func MustParseRange(s string) Range { + r, err := ParseRange(s) + if err != nil { + panic(`semver: ParseRange(` + s + `): ` + err.Error()) + } + return r +} diff --git a/hack/tools/vendor/github.com/blang/semver/v4/semver.go b/hack/tools/vendor/github.com/blang/semver/v4/semver.go new file mode 100644 index 0000000000..307de610f9 --- /dev/null +++ b/hack/tools/vendor/github.com/blang/semver/v4/semver.go @@ -0,0 +1,476 @@ +package semver + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +const ( + numbers string = "0123456789" + alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + alphanum = alphas + numbers +) + +// SpecVersion is the latest fully supported spec version of semver +var SpecVersion = Version{ + Major: 2, + Minor: 0, + Patch: 0, +} + +// Version represents a semver compatible version +type Version struct { + Major uint64 + Minor uint64 + Patch uint64 + Pre []PRVersion + Build []string //No Precedence +} + +// Version to string +func (v Version) String() string { + b := make([]byte, 0, 5) + b = strconv.AppendUint(b, v.Major, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Minor, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Patch, 10) + + if len(v.Pre) > 0 { + b = append(b, '-') + b = append(b, v.Pre[0].String()...) + + for _, pre := range v.Pre[1:] { + b = append(b, '.') + b = append(b, pre.String()...) + } + } + + if len(v.Build) > 0 { + b = append(b, '+') + b = append(b, v.Build[0]...) + + for _, build := range v.Build[1:] { + b = append(b, '.') + b = append(b, build...) + } + } + + return string(b) +} + +// FinalizeVersion discards prerelease and build number and only returns +// major, minor and patch number. +func (v Version) FinalizeVersion() string { + b := make([]byte, 0, 5) + b = strconv.AppendUint(b, v.Major, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Minor, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Patch, 10) + return string(b) +} + +// Equals checks if v is equal to o. +func (v Version) Equals(o Version) bool { + return (v.Compare(o) == 0) +} + +// EQ checks if v is equal to o. +func (v Version) EQ(o Version) bool { + return (v.Compare(o) == 0) +} + +// NE checks if v is not equal to o. +func (v Version) NE(o Version) bool { + return (v.Compare(o) != 0) +} + +// GT checks if v is greater than o. +func (v Version) GT(o Version) bool { + return (v.Compare(o) == 1) +} + +// GTE checks if v is greater than or equal to o. +func (v Version) GTE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// GE checks if v is greater than or equal to o. +func (v Version) GE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// LT checks if v is less than o. +func (v Version) LT(o Version) bool { + return (v.Compare(o) == -1) +} + +// LTE checks if v is less than or equal to o. +func (v Version) LTE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// LE checks if v is less than or equal to o. +func (v Version) LE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// Compare compares Versions v to o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v Version) Compare(o Version) int { + if v.Major != o.Major { + if v.Major > o.Major { + return 1 + } + return -1 + } + if v.Minor != o.Minor { + if v.Minor > o.Minor { + return 1 + } + return -1 + } + if v.Patch != o.Patch { + if v.Patch > o.Patch { + return 1 + } + return -1 + } + + // Quick comparison if a version has no prerelease versions + if len(v.Pre) == 0 && len(o.Pre) == 0 { + return 0 + } else if len(v.Pre) == 0 && len(o.Pre) > 0 { + return 1 + } else if len(v.Pre) > 0 && len(o.Pre) == 0 { + return -1 + } + + i := 0 + for ; i < len(v.Pre) && i < len(o.Pre); i++ { + if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { + continue + } else if comp == 1 { + return 1 + } else { + return -1 + } + } + + // If all pr versions are the equal but one has further prversion, this one greater + if i == len(v.Pre) && i == len(o.Pre) { + return 0 + } else if i == len(v.Pre) && i < len(o.Pre) { + return -1 + } else { + return 1 + } + +} + +// IncrementPatch increments the patch version +func (v *Version) IncrementPatch() error { + v.Patch++ + return nil +} + +// IncrementMinor increments the minor version +func (v *Version) IncrementMinor() error { + v.Minor++ + v.Patch = 0 + return nil +} + +// IncrementMajor increments the major version +func (v *Version) IncrementMajor() error { + v.Major++ + v.Minor = 0 + v.Patch = 0 + return nil +} + +// Validate validates v and returns error in case +func (v Version) Validate() error { + // Major, Minor, Patch already validated using uint64 + + for _, pre := range v.Pre { + if !pre.IsNum { //Numeric prerelease versions already uint64 + if len(pre.VersionStr) == 0 { + return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) + } + if !containsOnly(pre.VersionStr, alphanum) { + return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) + } + } + } + + for _, build := range v.Build { + if len(build) == 0 { + return fmt.Errorf("Build meta data can not be empty %q", build) + } + if !containsOnly(build, alphanum) { + return fmt.Errorf("Invalid character(s) found in build meta data %q", build) + } + } + + return nil +} + +// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error +func New(s string) (*Version, error) { + v, err := Parse(s) + vp := &v + return vp, err +} + +// Make is an alias for Parse, parses version string and returns a validated Version or error +func Make(s string) (Version, error) { + return Parse(s) +} + +// ParseTolerant allows for certain version specifications that do not strictly adhere to semver +// specs to be parsed by this library. It does so by normalizing versions before passing them to +// Parse(). It currently trims spaces, removes a "v" prefix, adds a 0 patch number to versions +// with only major and minor components specified, and removes leading 0s. +func ParseTolerant(s string) (Version, error) { + s = strings.TrimSpace(s) + s = strings.TrimPrefix(s, "v") + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + // Remove leading zeros. + for i, p := range parts { + if len(p) > 1 { + p = strings.TrimLeft(p, "0") + if len(p) == 0 || !strings.ContainsAny(p[0:1], "0123456789") { + p = "0" + p + } + parts[i] = p + } + } + // Fill up shortened versions. + if len(parts) < 3 { + if strings.ContainsAny(parts[len(parts)-1], "+-") { + return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") + } + for len(parts) < 3 { + parts = append(parts, "0") + } + } + s = strings.Join(parts, ".") + + return Parse(s) +} + +// Parse parses version string and returns a validated Version or error +func Parse(s string) (Version, error) { + if len(s) == 0 { + return Version{}, errors.New("Version string empty") + } + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + if len(parts) != 3 { + return Version{}, errors.New("No Major.Minor.Patch elements found") + } + + // Major + if !containsOnly(parts[0], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) + } + if hasLeadingZeroes(parts[0]) { + return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) + } + major, err := strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return Version{}, err + } + + // Minor + if !containsOnly(parts[1], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) + } + if hasLeadingZeroes(parts[1]) { + return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) + } + minor, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return Version{}, err + } + + v := Version{} + v.Major = major + v.Minor = minor + + var build, prerelease []string + patchStr := parts[2] + + if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { + build = strings.Split(patchStr[buildIndex+1:], ".") + patchStr = patchStr[:buildIndex] + } + + if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { + prerelease = strings.Split(patchStr[preIndex+1:], ".") + patchStr = patchStr[:preIndex] + } + + if !containsOnly(patchStr, numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) + } + if hasLeadingZeroes(patchStr) { + return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) + } + patch, err := strconv.ParseUint(patchStr, 10, 64) + if err != nil { + return Version{}, err + } + + v.Patch = patch + + // Prerelease + for _, prstr := range prerelease { + parsedPR, err := NewPRVersion(prstr) + if err != nil { + return Version{}, err + } + v.Pre = append(v.Pre, parsedPR) + } + + // Build meta data + for _, str := range build { + if len(str) == 0 { + return Version{}, errors.New("Build meta data is empty") + } + if !containsOnly(str, alphanum) { + return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) + } + v.Build = append(v.Build, str) + } + + return v, nil +} + +// MustParse is like Parse but panics if the version cannot be parsed. +func MustParse(s string) Version { + v, err := Parse(s) + if err != nil { + panic(`semver: Parse(` + s + `): ` + err.Error()) + } + return v +} + +// PRVersion represents a PreRelease Version +type PRVersion struct { + VersionStr string + VersionNum uint64 + IsNum bool +} + +// NewPRVersion creates a new valid prerelease version +func NewPRVersion(s string) (PRVersion, error) { + if len(s) == 0 { + return PRVersion{}, errors.New("Prerelease is empty") + } + v := PRVersion{} + if containsOnly(s, numbers) { + if hasLeadingZeroes(s) { + return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) + } + num, err := strconv.ParseUint(s, 10, 64) + + // Might never be hit, but just in case + if err != nil { + return PRVersion{}, err + } + v.VersionNum = num + v.IsNum = true + } else if containsOnly(s, alphanum) { + v.VersionStr = s + v.IsNum = false + } else { + return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) + } + return v, nil +} + +// IsNumeric checks if prerelease-version is numeric +func (v PRVersion) IsNumeric() bool { + return v.IsNum +} + +// Compare compares two PreRelease Versions v and o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v PRVersion) Compare(o PRVersion) int { + if v.IsNum && !o.IsNum { + return -1 + } else if !v.IsNum && o.IsNum { + return 1 + } else if v.IsNum && o.IsNum { + if v.VersionNum == o.VersionNum { + return 0 + } else if v.VersionNum > o.VersionNum { + return 1 + } else { + return -1 + } + } else { // both are Alphas + if v.VersionStr == o.VersionStr { + return 0 + } else if v.VersionStr > o.VersionStr { + return 1 + } else { + return -1 + } + } +} + +// PreRelease version to string +func (v PRVersion) String() string { + if v.IsNum { + return strconv.FormatUint(v.VersionNum, 10) + } + return v.VersionStr +} + +func containsOnly(s string, set string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(set, r) + }) == -1 +} + +func hasLeadingZeroes(s string) bool { + return len(s) > 1 && s[0] == '0' +} + +// NewBuildVersion creates a new valid build version +func NewBuildVersion(s string) (string, error) { + if len(s) == 0 { + return "", errors.New("Buildversion is empty") + } + if !containsOnly(s, alphanum) { + return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) + } + return s, nil +} + +// FinalizeVersion returns the major, minor and patch number only and discards +// prerelease and build number. +func FinalizeVersion(s string) (string, error) { + v, err := Parse(s) + if err != nil { + return "", err + } + v.Pre = nil + v.Build = nil + + finalVer := v.String() + return finalVer, nil +} diff --git a/hack/tools/vendor/github.com/blang/semver/v4/sort.go b/hack/tools/vendor/github.com/blang/semver/v4/sort.go new file mode 100644 index 0000000000..e18f880826 --- /dev/null +++ b/hack/tools/vendor/github.com/blang/semver/v4/sort.go @@ -0,0 +1,28 @@ +package semver + +import ( + "sort" +) + +// Versions represents multiple versions. +type Versions []Version + +// Len returns length of version collection +func (s Versions) Len() int { + return len(s) +} + +// Swap swaps two versions inside the collection by its indices +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less checks if version at index i is less than version at index j +func (s Versions) Less(i, j int) bool { + return s[i].LT(s[j]) +} + +// Sort sorts a slice of versions +func Sort(versions []Version) { + sort.Sort(Versions(versions)) +} diff --git a/hack/tools/vendor/github.com/blang/semver/v4/sql.go b/hack/tools/vendor/github.com/blang/semver/v4/sql.go new file mode 100644 index 0000000000..db958134f3 --- /dev/null +++ b/hack/tools/vendor/github.com/blang/semver/v4/sql.go @@ -0,0 +1,30 @@ +package semver + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements the database/sql.Scanner interface. +func (v *Version) Scan(src interface{}) (err error) { + var str string + switch src := src.(type) { + case string: + str = src + case []byte: + str = string(src) + default: + return fmt.Errorf("version.Scan: cannot convert %T to string", src) + } + + if t, err := Parse(str); err == nil { + *v = t + } + + return +} + +// Value implements the database/sql/driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} diff --git a/hack/tools/vendor/github.com/bombsimon/wsl/v4/.golangci.yml b/hack/tools/vendor/github.com/bombsimon/wsl/v4/.golangci.yml index bde0ae54e4..bc79b83961 100644 --- a/hack/tools/vendor/github.com/bombsimon/wsl/v4/.golangci.yml +++ b/hack/tools/vendor/github.com/bombsimon/wsl/v4/.golangci.yml @@ -39,36 +39,28 @@ linters: enable-all: true disable: - cyclop - - deadcode - depguard - dupl - dupword - - exhaustivestruct - exhaustruct + - exportloopref - forbidigo - funlen - gci - gocognit - gocyclo - godox - - golint - - gomnd - - ifshort - - interfacer + - mnd - lll - maintidx - - maligned - nakedret - nestif - nlreturn - - nosnakecase - paralleltest - prealloc - rowserrcheck - - scopelint - - structcheck - testpackage - - varcheck + - tparallel - varnamelen - wastedassign @@ -76,5 +68,4 @@ issues: exclude-use-default: true max-issues-per-linter: 0 max-same-issues: 0 - # vim: set sw=2 ts=2 et: diff --git a/hack/tools/vendor/github.com/bombsimon/wsl/v4/analyzer.go b/hack/tools/vendor/github.com/bombsimon/wsl/v4/analyzer.go index 46d5019a78..e51df89c6c 100644 --- a/hack/tools/vendor/github.com/bombsimon/wsl/v4/analyzer.go +++ b/hack/tools/vendor/github.com/bombsimon/wsl/v4/analyzer.go @@ -3,6 +3,7 @@ package wsl import ( "flag" "go/ast" + "go/token" "strings" "golang.org/x/tools/go/analysis" @@ -78,12 +79,17 @@ func (wa *wslAnalyzer) flags() flag.FlagSet { func (wa *wslAnalyzer) run(pass *analysis.Pass) (interface{}, error) { for _, file := range pass.Files { - if !wa.config.IncludeGenerated && ast.IsGenerated(file) { + filename := getFilename(pass.Fset, file) + if !strings.HasSuffix(filename, ".go") { continue } - filename := pass.Fset.PositionFor(file.Pos(), false).Filename - if !strings.HasSuffix(filename, ".go") { + // if the file is related to cgo the filename of the unadjusted position is a not a '.go' file. + fn := pass.Fset.PositionFor(file.Pos(), false).Filename + + // The file is skipped if the "unadjusted" file is a Go file, and it's a generated file (ex: "_test.go" file). + // The other non-Go files are skipped by the first 'if' with the adjusted position. + if !wa.config.IncludeGenerated && ast.IsGenerated(file) && strings.HasSuffix(fn, ".go") { continue } @@ -127,7 +133,7 @@ type multiStringValue struct { // Set implements the flag.Value interface and will overwrite the pointer to the // slice with a new pointer after splitting the flag by comma. func (m *multiStringValue) Set(value string) error { - s := []string{} + var s []string for _, v := range strings.Split(value, ",") { s = append(s, strings.TrimSpace(v)) @@ -146,3 +152,12 @@ func (m *multiStringValue) String() string { return strings.Join(*m.slicePtr, ", ") } + +func getFilename(fset *token.FileSet, file *ast.File) string { + filename := fset.PositionFor(file.Pos(), true).Filename + if !strings.HasSuffix(filename, ".go") { + return fset.PositionFor(file.Pos(), false).Filename + } + + return filename +} diff --git a/hack/tools/vendor/github.com/bombsimon/wsl/v4/wsl.go b/hack/tools/vendor/github.com/bombsimon/wsl/v4/wsl.go index 76f4abf617..44c7abe219 100644 --- a/hack/tools/vendor/github.com/bombsimon/wsl/v4/wsl.go +++ b/hack/tools/vendor/github.com/bombsimon/wsl/v4/wsl.go @@ -353,7 +353,7 @@ func (p *processor) parseBlockStatements(statements []ast.Stmt) { return false } - for j := 0; j < n; j++ { + for j := range n { s1 := statements[i+j] s2 := statements[i+j+1] @@ -1113,8 +1113,8 @@ func (p *processor) findLeadingAndTrailingWhitespaces(ident *ast.Ident, stmt, ne return } - blockStartLine = p.fileSet.PositionFor(blockStartPos, false).Line - blockEndLine = p.fileSet.PositionFor(blockEndPos, false).Line + blockStartLine = p.fileSet.Position(blockStartPos).Line + blockEndLine = p.fileSet.Position(blockEndPos).Line // No whitespace possible if LBrace and RBrace is on the same line. if blockStartLine == blockEndLine { @@ -1362,14 +1362,14 @@ func isExampleFunc(ident *ast.Ident) bool { } func (p *processor) nodeStart(node ast.Node) int { - return p.fileSet.PositionFor(node.Pos(), false).Line + return p.fileSet.Position(node.Pos()).Line } func (p *processor) nodeEnd(node ast.Node) int { - line := p.fileSet.PositionFor(node.End(), false).Line + line := p.fileSet.Position(node.End()).Line if isEmptyLabeledStmt(node) { - return p.fileSet.PositionFor(node.Pos(), false).Line + return p.fileSet.Position(node.Pos()).Line } return line @@ -1408,7 +1408,7 @@ func (p *processor) addErrorRange(reportAt, start, end token.Pos, reason string) } func (p *processor) addWarning(w string, pos token.Pos, t interface{}) { - position := p.fileSet.PositionFor(pos, false) + position := p.fileSet.Position(pos) p.warnings = append(p.warnings, fmt.Sprintf("%s:%d: %s (%T)", position.Filename, position.Line, w, t), diff --git a/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go b/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go index f68170fb31..ebf2a0dbea 100644 --- a/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go +++ b/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/analyzer.go @@ -8,12 +8,12 @@ import ( "strings" "sync" - "github.com/butuzov/ireturn/analyzer/internal/config" - "github.com/butuzov/ireturn/analyzer/internal/types" - "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" + + "github.com/butuzov/ireturn/analyzer/internal/config" + "github.com/butuzov/ireturn/analyzer/internal/types" ) const name string = "ireturn" // linter name @@ -23,11 +23,11 @@ type validator interface { } type analyzer struct { - once sync.Once - mu sync.RWMutex - handler validator - err error - diabledNolint bool + once sync.Once + mu sync.RWMutex + handler validator + err error + disabledNolint bool found []analysis.Diagnostic } @@ -63,7 +63,7 @@ func (a *analyzer) run(pass *analysis.Pass) (interface{}, error) { } // 003. Is it allowed to be checked? - if !a.diabledNolint && hasDisallowDirective(f.Doc) { + if !a.disabledNolint && hasDisallowDirective(f.Doc) { return } @@ -115,7 +115,7 @@ func (a *analyzer) readConfiguration(fs *flag.FlagSet) { // First: checking nonolint directive val := fs.Lookup("nonolint") if val != nil { - a.diabledNolint = fs.Lookup("nonolint").Value.String() == "true" + a.disabledNolint = fs.Lookup("nonolint").Value.String() == "true" } // Second: validators implementation next @@ -128,7 +128,7 @@ func (a *analyzer) readConfiguration(fs *flag.FlagSet) { } func NewAnalyzer() *analysis.Analyzer { - a := analyzer{} //nolint: exhaustivestruct + a := analyzer{} return &analysis.Analyzer{ Name: name, @@ -196,7 +196,7 @@ func filterInterfaces(p *analysis.Pass, ft *ast.FuncType, di map[string]struct{} typeParams := val.String() prefix, suffix := "interface{", "}" - if strings.HasPrefix(typeParams, prefix) { // nolint: gosimple + if strings.HasPrefix(typeParams, prefix) { //nolint:gosimple typeParams = typeParams[len(prefix):] } if strings.HasSuffix(typeParams, suffix) { diff --git a/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go b/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go index 6a294ca35f..da101c7862 100644 --- a/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go +++ b/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/allow.go @@ -2,7 +2,7 @@ package config import "github.com/butuzov/ireturn/analyzer/internal/types" -// allowConfig specifies a list of interfaces (keywords, patters and regular expressions) +// allowConfig specifies a list of interfaces (keywords, patterns and regular expressions) // that are allowed by ireturn as valid to return, any non listed interface are rejected. type allowConfig struct { *defaultConfig diff --git a/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go b/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go index 6aa04e52e8..d6914af862 100644 --- a/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go +++ b/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/new.go @@ -10,7 +10,6 @@ import ( var ErrCollisionOfInterests = errors.New("can't have both `-accept` and `-reject` specified at same time") -// nolint: exhaustivestruct func DefaultValidatorConfig() *allowConfig { return allowAll([]string{ types.NameEmpty, // "empty": empty interfaces (interface{}) diff --git a/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go b/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go index bef6913bb8..b2cde910ce 100644 --- a/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go +++ b/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/config/reject.go @@ -2,7 +2,7 @@ package config import "github.com/butuzov/ireturn/analyzer/internal/types" -// rejectConfig specifies a list of interfaces (keywords, patters and regular expressions) +// rejectConfig specifies a list of interfaces (keywords, patterns and regular expressions) // that are rejected by ireturn as valid to return, any non listed interface are allowed. type rejectConfig struct { *defaultConfig diff --git a/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go b/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go index 5e576374d5..0f4286515f 100644 --- a/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go +++ b/hack/tools/vendor/github.com/butuzov/ireturn/analyzer/internal/types/iface.go @@ -47,7 +47,7 @@ func (i IFace) HashString() string { } func (i IFace) ExportDiagnostic() analysis.Diagnostic { - return analysis.Diagnostic{ //nolint: exhaustivestruct + return analysis.Diagnostic{ Pos: i.Pos, Message: i.String(), } diff --git a/hack/tools/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md b/hack/tools/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md index 3dcc01e960..da30c8e00f 100644 --- a/hack/tools/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md +++ b/hack/tools/vendor/github.com/butuzov/mirror/MIRROR_FUNCS.md @@ -1,201 +1,55 @@ - -func (*bufio.Writer) Write([]byte) (int, error) -func (*bufio.Writer) WriteString(string) (int, error) - - -func (*bufio.Writer) WriteRune(rune) (int, error) -func (*bufio.Writer) WriteString(string) (int, error) - - -func (*bytes.Buffer) Write([]byte) (int, error) -func (*bytes.Buffer) WriteString(string) (int, error) - - -func (*bytes.Buffer) WriteRune(rune) (int, error) -func (*bytes.Buffer) WriteString(string) (int, error) - - -func bytes.Compare([]byte, []byte) int -func strings.Compare(string, string) int - - -func bytes.Contains([]byte, []byte) bool -func strings.Contains(string, string) bool - - -func bytes.ContainsAny([]byte, string) bool -func strings.ContainsAny(string, string) bool - - -func bytes.ContainsRune([]byte, byte) bool -func strings.ContainsRune(string, byte) bool - - -func bytes.Count([]byte, []byte) int -func strings.Count(string, string) int - - -func bytes.EqualFold([]byte, []byte) bool -func strings.EqualFold(string, string) bool - - -func bytes.HasPrefix([]byte, []byte) bool -func strings.HasPrefix(string, string) bool - - -func bytes.HasSuffix([]byte, []byte) bool -func strings.HasSuffix(string, string) bool - - -func bytes.Index([]byte, []byte) int -func strings.Index(string, string) int - - -func bytes.IndexAny([]byte, string) int -func strings.IndexAny(string, string) int - - -func bytes.IndexByte([]byte, byte) int -func strings.IndexByte(string, byte) int - - -func bytes.IndexFunc([]byte, func(rune) bool) int -func strings.IndexFunc(string, func(rune) bool) int - - -func bytes.IndexRune([]byte, rune) int -func strings.IndexRune(string, rune) int - - -func bytes.LastIndex([]byte, []byte) int -func strings.LastIndex(string, string) int - - -func bytes.LastIndexAny([]byte, string) int -func strings.LastIndexAny(string, string) int - - -func bytes.LastIndexByte([]byte, byte) int -func strings.LastIndexByte(string, byte) int - - -func bytes.LastIndexFunc([]byte, func(rune) bool) int -func strings.LastIndexFunc(string, func(rune) bool) int - - -func bytes.NewBuffer([]byte) *bytes.Buffer -func bytes.NewBufferString(string) *bytes.Buffer - - -func (*httptest.ResponseRecorder) Write([]byte) (int, error) -func (*httptest.ResponseRecorder) WriteString(string) (int, error) - - -func (*maphash.Hash) Write([]byte) (int, error) -func (*maphash.Hash) WriteString(string) (int, error) - - -func (*os.File) Write([]byte) (int, error) -func (*os.File) WriteString(string) (int, error) - - -func regexp.Match(string, []byte) (bool, error) -func regexp.MatchString(string, string) (bool, error) - - -func (*regexp.Regexp) FindAllIndex([]byte, int) [][]int -func (*regexp.Regexp) FindAllStringIndex(string, int) [][]int - - -func (*regexp.Regexp) FindAllSubmatchIndex([]byte, int) [][]int -func (*regexp.Regexp) FindAllStringSubmatchIndex(string, int) [][]int - - -func (*regexp.Regexp) FindIndex([]byte) []int -func (*regexp.Regexp) FindStringIndex(string) []int - - -func (*regexp.Regexp) FindSubmatchIndex([]byte) []int -func (*regexp.Regexp) FindStringSubmatchIndex(string) []int - - -func (*regexp.Regexp) Match([]byte) bool -func (*regexp.Regexp) MatchString(string) bool - - -func (*strings.Builder) Write([]byte) (int, error) -func (*strings.Builder) WriteString(string) (int, error) - - -func (*strings.Builder) WriteRune(rune) (int, error) -func (*strings.Builder) WriteString(string) (int, error) - - -func strings.Compare(string) int -func bytes.Compare([]byte) int - - -func strings.Contains(string) bool -func bytes.Contains([]byte) bool - - -func strings.ContainsAny(string) bool -func bytes.ContainsAny([]byte) bool - - -func strings.ContainsRune(string) bool -func bytes.ContainsRune([]byte) bool - - -func strings.EqualFold(string) bool -func bytes.EqualFold([]byte) bool - - -func strings.HasPrefix(string) bool -func bytes.HasPrefix([]byte) bool - - -func strings.HasSuffix(string) bool -func bytes.HasSuffix([]byte) bool - - -func strings.Index(string) int -func bytes.Index([]byte) int - - -func strings.IndexFunc(string, func(r rune) bool) int -func bytes.IndexFunc([]byte, func(r rune) bool) int - - -func strings.LastIndex(string) int -func bytes.LastIndex([]byte) int - - -func strings.LastIndexAny(string) int -func bytes.LastIndexAny([]byte) int - - -func strings.LastIndexFunc(string, func(r rune) bool) int -func bytes.LastIndexFunc([]byte, func(r rune) bool) int - - -func utf8.DecodeLastRune([]byte) (rune, int) -func utf8.DecodeLastRuneInString(string) (rune, int) - - -func utf8.DecodeRune([]byte) (rune, int) -func utf8.DecodeRuneInString(string) (rune, int) - - -func utf8.FullRune([]byte) bool -func utf8.FullRuneInString(string) bool - - -func utf8.RuneCount([]byte) int -func utf8.RuneCountInString(string) int - - -func utf8.Valid([]byte) bool -func utf8.ValidString(string) bool - + +| Function | Mirror | +|----------|--------| +| `func (*bufio.Writer) Write([]byte) (int, error)` | `func (*bufio.Writer) WriteString(string) (int, error)` | +| `func (*bufio.Writer) WriteRune(rune) (int, error)` | `func (*bufio.Writer) WriteString(string) (int, error)` | +| `func (*bytes.Buffer) Write([]byte) (int, error)` | `func (*bytes.Buffer) WriteString(string) (int, error)` | +| `func (*bytes.Buffer) WriteRune(rune) (int, error)` | `func (*bytes.Buffer) WriteString(string) (int, error)` | +| `func bytes.Compare([]byte, []byte) int` | `func strings.Compare(string, string) int` | +| `func bytes.Contains([]byte, []byte) bool` | `func strings.Contains(string, string) bool` | +| `func bytes.ContainsAny([]byte, string) bool` | `func strings.ContainsAny(string, string) bool` | +| `func bytes.ContainsRune([]byte, byte) bool` | `func strings.ContainsRune(string, byte) bool` | +| `func bytes.Count([]byte, []byte) int` | `func strings.Count(string, string) int` | +| `func bytes.EqualFold([]byte, []byte) bool` | `func strings.EqualFold(string, string) bool` | +| `func bytes.HasPrefix([]byte, []byte) bool` | `func strings.HasPrefix(string, string) bool` | +| `func bytes.HasSuffix([]byte, []byte) bool` | `func strings.HasSuffix(string, string) bool` | +| `func bytes.Index([]byte, []byte) int` | `func strings.Index(string, string) int` | +| `func bytes.IndexAny([]byte, string) int` | `func strings.IndexAny(string, string) int` | +| `func bytes.IndexByte([]byte, byte) int` | `func strings.IndexByte(string, byte) int` | +| `func bytes.IndexFunc([]byte, func(rune) bool) int` | `func strings.IndexFunc(string, func(rune) bool) int` | +| `func bytes.IndexRune([]byte, rune) int` | `func strings.IndexRune(string, rune) int` | +| `func bytes.LastIndex([]byte, []byte) int` | `func strings.LastIndex(string, string) int` | +| `func bytes.LastIndexAny([]byte, string) int` | `func strings.LastIndexAny(string, string) int` | +| `func bytes.LastIndexByte([]byte, byte) int` | `func strings.LastIndexByte(string, byte) int` | +| `func bytes.LastIndexFunc([]byte, func(rune) bool) int` | `func strings.LastIndexFunc(string, func(rune) bool) int` | +| `func bytes.NewBuffer([]byte) *bytes.Buffer` | `func bytes.NewBufferString(string) *bytes.Buffer` | +| `func (*httptest.ResponseRecorder) Write([]byte) (int, error)` | `func (*httptest.ResponseRecorder) WriteString(string) (int, error)` | +| `func maphash.Bytes([]byte) uint64` | `func maphash.String(string) uint64` | +| `func (*maphash.Hash) Write([]byte) (int, error)` | `func (*maphash.Hash) WriteString(string) (int, error)` | +| `func (*os.File) Write([]byte) (int, error)` | `func (*os.File) WriteString(string) (int, error)` | +| `func regexp.Match(string, []byte) (bool, error)` | `func regexp.MatchString(string, string) (bool, error)` | +| `func (*regexp.Regexp) FindAllIndex([]byte, int) [][]int` | `func (*regexp.Regexp) FindAllStringIndex(string, int) [][]int` | +| `func (*regexp.Regexp) FindAllSubmatchIndex([]byte, int) [][]int` | `func (*regexp.Regexp) FindAllStringSubmatchIndex(string, int) [][]int` | +| `func (*regexp.Regexp) FindIndex([]byte) []int` | `func (*regexp.Regexp) FindStringIndex(string) []int` | +| `func (*regexp.Regexp) FindSubmatchIndex([]byte) []int` | `func (*regexp.Regexp) FindStringSubmatchIndex(string) []int` | +| `func (*regexp.Regexp) Match([]byte) bool` | `func (*regexp.Regexp) MatchString(string) bool` | +| `func (*strings.Builder) Write([]byte) (int, error)` | `func (*strings.Builder) WriteString(string) (int, error)` | +| `func (*strings.Builder) WriteRune(rune) (int, error)` | `func (*strings.Builder) WriteString(string) (int, error)` | +| `func strings.Compare(string) int` | `func bytes.Compare([]byte) int` | +| `func strings.Contains(string) bool` | `func bytes.Contains([]byte) bool` | +| `func strings.ContainsAny(string) bool` | `func bytes.ContainsAny([]byte) bool` | +| `func strings.ContainsRune(string) bool` | `func bytes.ContainsRune([]byte) bool` | +| `func strings.EqualFold(string) bool` | `func bytes.EqualFold([]byte) bool` | +| `func strings.HasPrefix(string) bool` | `func bytes.HasPrefix([]byte) bool` | +| `func strings.HasSuffix(string) bool` | `func bytes.HasSuffix([]byte) bool` | +| `func strings.Index(string) int` | `func bytes.Index([]byte) int` | +| `func strings.IndexFunc(string, func(r rune) bool) int` | `func bytes.IndexFunc([]byte, func(r rune) bool) int` | +| `func strings.LastIndex(string) int` | `func bytes.LastIndex([]byte) int` | +| `func strings.LastIndexAny(string) int` | `func bytes.LastIndexAny([]byte) int` | +| `func strings.LastIndexFunc(string, func(r rune) bool) int` | `func bytes.LastIndexFunc([]byte, func(r rune) bool) int` | +| `func utf8.DecodeLastRune([]byte) (rune, int)` | `func utf8.DecodeLastRuneInString(string) (rune, int)` | +| `func utf8.DecodeRune([]byte) (rune, int)` | `func utf8.DecodeRuneInString(string) (rune, int)` | +| `func utf8.FullRune([]byte) bool` | `func utf8.FullRuneInString(string) bool` | +| `func utf8.RuneCount([]byte) int` | `func utf8.RuneCountInString(string) int` | +| `func utf8.Valid([]byte) bool` | `func utf8.ValidString(string) bool` | diff --git a/hack/tools/vendor/github.com/butuzov/mirror/Makefile b/hack/tools/vendor/github.com/butuzov/mirror/Makefile index ac267208fb..dab6f160ae 100644 --- a/hack/tools/vendor/github.com/butuzov/mirror/Makefile +++ b/hack/tools/vendor/github.com/butuzov/mirror/Makefile @@ -10,7 +10,8 @@ endef # Generate Artifacts ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ generate: ## Generate Assets - $(MAKE) + $(MAKE) generate-tests + $(MAKE) generate-mirror-table generate-tests: ## Generates Assets at testdata go run ./cmd/internal/tests/ "$(PWD)/testdata" @@ -52,7 +53,7 @@ tests-summary: bin/tparse lints: ## Run golangci-lint lints: bin/golangci-lint lints: - golangci-lint run --no-config ./... --skip-dirs "^(cmd|testdata)" + golangci-lint run --no-config ./... --exclude-dirs "^(cmd|testdata)" cover: ## Run Coverage @@ -71,8 +72,8 @@ bin/tparse: INSTALL_URL=github.com/mfridman/tparse@v0.13.2 bin/tparse: $(call install_go_bin, tparse, $(INSTALL_URL)) -bin/golangci-lint: ## Installs golangci-lint@v1.55.2 (if not exists) -bin/golangci-lint: INSTALL_URL=github.com/golangci/golangci-lint@v1.55.2 +bin/golangci-lint: ## Installs golangci-lint@v1.62.0 (if not exists) +bin/golangci-lint: INSTALL_URL=github.com/golangci/golangci-lint@v1.62.0 bin/golangci-lint: $(call install_go_bin, golangci-lint, $(INSTALL_URL)) @@ -99,7 +100,7 @@ help: dep-gawk @ echo "" -# Helper Mehtods ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# Helper Methods ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ dep-gawk: @ if [ -z "$(shell command -v gawk)" ]; then \ if [ -x /usr/local/bin/brew ]; then $(MAKE) _brew_gawk_install; exit 0; fi; \ @@ -111,21 +112,21 @@ dep-gawk: fi _brew_gawk_install: - @ echo "Instaling gawk using brew... " + @ echo "Installing gawk using brew... " @ brew install gawk --quiet @ echo "done" _ubuntu_gawk_install: - @ echo "Instaling gawk using apt-get... " + @ echo "Installing gawk using apt-get... " @ apt-get -q install gawk -y @ echo "done" _alpine_gawk_install: - @ echo "Instaling gawk using yum... " + @ echo "Installing gawk using yum... " @ apk add --update --no-cache gawk @ echo "done" _centos_gawk_install: - @ echo "Instaling gawk using yum... " + @ echo "Installing gawk using yum... " @ yum install -q -y gawk; @ echo "done" diff --git a/hack/tools/vendor/github.com/butuzov/mirror/analyzer.go b/hack/tools/vendor/github.com/butuzov/mirror/analyzer.go index 13ded46c6d..b15019ce1f 100644 --- a/hack/tools/vendor/github.com/butuzov/mirror/analyzer.go +++ b/hack/tools/vendor/github.com/butuzov/mirror/analyzer.go @@ -44,9 +44,9 @@ func Run(pass *analysis.Pass, withTests bool) []*checker.Violation { BytesFunctions, BytesBufferMethods, RegexpFunctions, RegexpRegexpMethods, StringFunctions, StringsBuilderMethods, + MaphashMethods, MaphashFunctions, BufioMethods, HTTPTestMethods, - OsFileMethods, MaphashMethods, - UTF8Functions, + OsFileMethods, UTF8Functions, ) check.Type = checker.WrapType(pass.TypesInfo) diff --git a/hack/tools/vendor/github.com/butuzov/mirror/checkers_maphash.go b/hack/tools/vendor/github.com/butuzov/mirror/checkers_maphash.go index 0aa43ff7bb..345a64123e 100644 --- a/hack/tools/vendor/github.com/butuzov/mirror/checkers_maphash.go +++ b/hack/tools/vendor/github.com/butuzov/mirror/checkers_maphash.go @@ -2,35 +2,66 @@ package mirror import "github.com/butuzov/mirror/internal/checker" -var MaphashMethods = []checker.Violation{ - { // (*hash/maphash).Write - Targets: checker.Bytes, - Type: checker.Method, - Package: "hash/maphash", - Struct: "Hash", - Caller: "Write", - Args: []int{0}, - AltCaller: "WriteString", +var ( + MaphashFunctions = []checker.Violation{ + { // maphash.Bytes + Targets: checker.Bytes, + Type: checker.Function, + Package: "hash/maphash", + Caller: "Bytes", + Args: []int{1}, + AltCaller: "String", - Generate: &checker.Generate{ - PreCondition: `h := maphash.Hash{}`, - Pattern: `Write($0)`, - Returns: []string{"int", "error"}, + Generate: &checker.Generate{ + Pattern: `Bytes(maphash.MakeSeed(), $0)`, + Returns: []string{"uint64"}, + }, }, - }, - { // (*hash/maphash).WriteString - Targets: checker.Strings, - Type: checker.Method, - Package: "hash/maphash", - Struct: "Hash", - Caller: "WriteString", - Args: []int{0}, - AltCaller: "Write", + { // maphash.String + Targets: checker.Strings, + Type: checker.Function, + Package: "hash/maphash", + Caller: "String", + Args: []int{1}, + AltCaller: "Bytes", - Generate: &checker.Generate{ - PreCondition: `h := maphash.Hash{}`, - Pattern: `WriteString($0)`, - Returns: []string{"int", "error"}, + Generate: &checker.Generate{ + Pattern: `String(maphash.MakeSeed(), $0)`, + Returns: []string{"uint64"}, + }, }, - }, -} + } + + MaphashMethods = []checker.Violation{ + { // (*hash/maphash).Write + Targets: checker.Bytes, + Type: checker.Method, + Package: "hash/maphash", + Struct: "Hash", + Caller: "Write", + Args: []int{0}, + AltCaller: "WriteString", + + Generate: &checker.Generate{ + PreCondition: `h := maphash.Hash{}`, + Pattern: `Write($0)`, + Returns: []string{"int", "error"}, + }, + }, + { // (*hash/maphash).WriteString + Targets: checker.Strings, + Type: checker.Method, + Package: "hash/maphash", + Struct: "Hash", + Caller: "WriteString", + Args: []int{0}, + AltCaller: "Write", + + Generate: &checker.Generate{ + PreCondition: `h := maphash.Hash{}`, + Pattern: `WriteString($0)`, + Returns: []string{"int", "error"}, + }, + }, + } +) diff --git a/hack/tools/vendor/github.com/butuzov/mirror/internal/checker/checker.go b/hack/tools/vendor/github.com/butuzov/mirror/internal/checker/checker.go index c1a9416314..fb9ba41729 100644 --- a/hack/tools/vendor/github.com/butuzov/mirror/internal/checker/checker.go +++ b/hack/tools/vendor/github.com/butuzov/mirror/internal/checker/checker.go @@ -9,12 +9,12 @@ import ( "strings" ) -// Checker will perform standart check on package and its methods. +// Checker will perform standard check on package and its methods. type Checker struct { Violations []Violation // List of available violations Packages map[string][]int // Storing indexes of Violations per pkg/kg.Struct Type func(ast.Expr) string // Type Checker closure. - Print func(ast.Node) []byte // String representation of the expresion. + Print func(ast.Node) []byte // String representation of the expression. } func New(violations ...[]Violation) Checker { @@ -76,7 +76,7 @@ func (c *Checker) Handle(v *Violation, ce *ast.CallExpr) (map[int]ast.Expr, bool continue } - // is it convertsion call + // is it conversion call if !c.callConverts(call) { continue } diff --git a/hack/tools/vendor/github.com/butuzov/mirror/internal/checker/violation.go b/hack/tools/vendor/github.com/butuzov/mirror/internal/checker/violation.go index 3d8acf1415..c2c1492086 100644 --- a/hack/tools/vendor/github.com/butuzov/mirror/internal/checker/violation.go +++ b/hack/tools/vendor/github.com/butuzov/mirror/internal/checker/violation.go @@ -28,7 +28,7 @@ const ( UntypedRune string = "untyped rune" ) -// Violation describs what message we going to give to a particular code violation +// Violation describes what message we going to give to a particular code violation type Violation struct { Type ViolationType // Args []int // Indexes of the arguments needs to be checked @@ -143,7 +143,7 @@ func (v *Violation) Diagnostic(fSet *token.FileSet) analysis.Diagnostic { v.AltPackage = v.Package } - // Hooray! we dont need to change package and redo imports. + // Hooray! we don't need to change package and redo imports. if v.Type == Function && v.AltPackage == v.Package && noNl { diagnostic.SuggestedFixes = []analysis.SuggestedFix{{ Message: "Fix Issue With", @@ -166,7 +166,7 @@ type GolangIssue struct { Original string } -// Issue intended to be used only within `golangci-lint`, bu you can use use it +// Issue intended to be used only within `golangci-lint`, but you can use it // alongside Diagnostic if you wish. func (v *Violation) Issue(fSet *token.FileSet) GolangIssue { issue := GolangIssue{ diff --git a/hack/tools/vendor/github.com/butuzov/mirror/readme.md b/hack/tools/vendor/github.com/butuzov/mirror/readme.md index f830ea72ea..f5cfa47a68 100644 --- a/hack/tools/vendor/github.com/butuzov/mirror/readme.md +++ b/hack/tools/vendor/github.com/butuzov/mirror/readme.md @@ -2,6 +2,13 @@ `mirror` suggests use of alternative functions/methods in order to gain performance boosts by avoiding unnecessary `[]byte/string` conversion calls. See [MIRROR_FUNCS.md](MIRROR_FUNCS.md) list of mirror functions you can use in go's stdlib. +--- + +[![United 24](https://raw.githubusercontent.com/vshymanskyy/StandWithUkraine/main/banner-personal-page.svg)](https://u24.gov.ua/) +[![Help Oleg Butuzov](https://raw.githubusercontent.com/butuzov/butuzov/main/personal.svg)](https://github.com/butuzov) + +--- + ## Linter Use Cases ### `github.com/argoproj/argo-cd` @@ -86,13 +93,13 @@ util/cert/cert.go:82:10: avoid allocations with (*regexp.Regexp).MatchString (mi - flag `--tests` (e.g. `--tests=false`) - flag `--skip-files` (e.g. `--skip-files="_test.go"`) - - yaml confguration `run.skip-files`: + - yaml configuration `run.skip-files`: ```yaml run: skip-files: - '(.+)_test\.go' ``` - - yaml confguration `issues.exclude-rules`: + - yaml configuration `issues.exclude-rules`: ```yaml issues: exclude-rules: @@ -106,7 +113,7 @@ util/cert/cert.go:82:10: avoid allocations with (*regexp.Regexp).MatchString (mi ```shell # Update Assets (testdata/(strings|bytes|os|utf8|maphash|regexp|bufio).go) -(task|make) generated +(task|make) generate # Run Tests (task|make) tests # Lint Code diff --git a/hack/tools/vendor/github.com/cenkalti/backoff/v4/.gitignore b/hack/tools/vendor/github.com/cenkalti/backoff/v4/.gitignore new file mode 100644 index 0000000000..50d95c548b --- /dev/null +++ b/hack/tools/vendor/github.com/cenkalti/backoff/v4/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +# IDEs +.idea/ diff --git a/hack/tools/vendor/github.com/cenkalti/backoff/v4/LICENSE b/hack/tools/vendor/github.com/cenkalti/backoff/v4/LICENSE new file mode 100644 index 0000000000..89b8179965 --- /dev/null +++ b/hack/tools/vendor/github.com/cenkalti/backoff/v4/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/hack/tools/vendor/github.com/cenkalti/backoff/v4/README.md b/hack/tools/vendor/github.com/cenkalti/backoff/v4/README.md new file mode 100644 index 0000000000..9433004a28 --- /dev/null +++ b/hack/tools/vendor/github.com/cenkalti/backoff/v4/README.md @@ -0,0 +1,30 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## Usage + +Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. + +Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. + +## Contributing + +* I would like to keep this library as small as possible. +* Please don't send a PR without opening an issue and discussing it first. +* If proposed change is not a common use case, I will probably not accept it. + +[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png +[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master +[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master + +[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples diff --git a/hack/tools/vendor/github.com/cenkalti/backoff/v4/backoff.go b/hack/tools/vendor/github.com/cenkalti/backoff/v4/backoff.go new file mode 100644 index 0000000000..3676ee405d --- /dev/null +++ b/hack/tools/vendor/github.com/cenkalti/backoff/v4/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // or backoff. Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff(); + // if (duration == backoff.Stop) { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/hack/tools/vendor/github.com/cenkalti/backoff/v4/context.go b/hack/tools/vendor/github.com/cenkalti/backoff/v4/context.go new file mode 100644 index 0000000000..48482330eb --- /dev/null +++ b/hack/tools/vendor/github.com/cenkalti/backoff/v4/context.go @@ -0,0 +1,62 @@ +package backoff + +import ( + "context" + "time" +) + +// BackOffContext is a backoff policy that stops retrying after the context +// is canceled. +type BackOffContext interface { // nolint: golint + BackOff + Context() context.Context +} + +type backOffContext struct { + BackOff + ctx context.Context +} + +// WithContext returns a BackOffContext with context ctx +// +// ctx must not be nil +func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint + if ctx == nil { + panic("nil context") + } + + if b, ok := b.(*backOffContext); ok { + return &backOffContext{ + BackOff: b.BackOff, + ctx: ctx, + } + } + + return &backOffContext{ + BackOff: b, + ctx: ctx, + } +} + +func getContext(b BackOff) context.Context { + if cb, ok := b.(BackOffContext); ok { + return cb.Context() + } + if tb, ok := b.(*backOffTries); ok { + return getContext(tb.delegate) + } + return context.Background() +} + +func (b *backOffContext) Context() context.Context { + return b.ctx +} + +func (b *backOffContext) NextBackOff() time.Duration { + select { + case <-b.ctx.Done(): + return Stop + default: + return b.BackOff.NextBackOff() + } +} diff --git a/hack/tools/vendor/github.com/cenkalti/backoff/v4/exponential.go b/hack/tools/vendor/github.com/cenkalti/backoff/v4/exponential.go new file mode 100644 index 0000000000..aac99f196a --- /dev/null +++ b/hack/tools/vendor/github.com/cenkalti/backoff/v4/exponential.go @@ -0,0 +1,216 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + // After MaxElapsedTime the ExponentialBackOff returns Stop. + // It never stops if MaxElapsedTime == 0. + MaxElapsedTime time.Duration + Stop time.Duration + Clock Clock + + currentInterval time.Duration + startTime time.Time +} + +// Clock is an interface that returns current time for BackOff. +type Clock interface { + Now() time.Time +} + +// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options. +type ExponentialBackOffOpts func(*ExponentialBackOff) + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second + DefaultMaxElapsedTime = 15 * time.Minute +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + MaxElapsedTime: DefaultMaxElapsedTime, + Stop: Stop, + Clock: SystemClock, + } + for _, fn := range opts { + fn(b) + } + b.Reset() + return b +} + +// WithInitialInterval sets the initial interval between retries. +func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.InitialInterval = duration + } +} + +// WithRandomizationFactor sets the randomization factor to add jitter to intervals. +func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.RandomizationFactor = randomizationFactor + } +} + +// WithMultiplier sets the multiplier for increasing the interval after each retry. +func WithMultiplier(multiplier float64) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Multiplier = multiplier + } +} + +// WithMaxInterval sets the maximum interval between retries. +func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.MaxInterval = duration + } +} + +// WithMaxElapsedTime sets the maximum total time for retries. +func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.MaxElapsedTime = duration + } +} + +// WithRetryStopDuration sets the duration after which retries should stop. +func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Stop = duration + } +} + +// WithClockProvider sets the clock used to measure time. +func WithClockProvider(clock Clock) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Clock = clock + } +} + +type systemClock struct{} + +func (t systemClock) Now() time.Time { + return time.Now() +} + +// SystemClock implements Clock interface that uses time.Now(). +var SystemClock = systemClock{} + +// Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval + b.startTime = b.Clock.Now() +} + +// NextBackOff calculates the next backoff interval using the formula: +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + // Make sure we have not gone over the maximum elapsed time. + elapsed := b.GetElapsedTime() + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { + return b.Stop + } + return next +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). It is +// safe to call even while the backoff policy is used by a running +// ticker. +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + if randomizationFactor == 0 { + return currentInterval // make sure no randomness is used when randomizationFactor is 0. + } + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/hack/tools/vendor/github.com/cenkalti/backoff/v4/retry.go b/hack/tools/vendor/github.com/cenkalti/backoff/v4/retry.go new file mode 100644 index 0000000000..b9c0c51cd7 --- /dev/null +++ b/hack/tools/vendor/github.com/cenkalti/backoff/v4/retry.go @@ -0,0 +1,146 @@ +package backoff + +import ( + "errors" + "time" +) + +// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData(). +// The operation will be retried using a backoff policy if it returns an error. +type OperationWithData[T any] func() (T, error) + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +func (o Operation) withEmptyData() OperationWithData[struct{}] { + return func() (struct{}, error) { + return struct{}{}, o() + } +} + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the operation o until it does not return error or BackOff stops. +// o is guaranteed to be run at least once. +// +// If o returns a *PermanentError, the operation is not retried, and the +// wrapped error is returned. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { + return RetryNotify(o, b, nil) +} + +// RetryWithData is like Retry but returns data in the response too. +func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) { + return RetryNotifyWithData(o, b, nil) +} + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + return RetryNotifyWithTimer(operation, b, notify, nil) +} + +// RetryNotifyWithData is like RetryNotify but returns data in the response too. +func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) { + return doRetryNotify(operation, b, notify, nil) +} + +// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer +// for each failed attempt before sleep. +// A default timer that uses system timer is used when nil is passed. +func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { + _, err := doRetryNotify(operation.withEmptyData(), b, notify, t) + return err +} + +// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too. +func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + return doRetryNotify(operation, b, notify, t) +} + +func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + var ( + err error + next time.Duration + res T + ) + if t == nil { + t = &defaultTimer{} + } + + defer func() { + t.Stop() + }() + + ctx := getContext(b) + + b.Reset() + for { + res, err = operation() + if err == nil { + return res, nil + } + + var permanent *PermanentError + if errors.As(err, &permanent) { + return res, permanent.Err + } + + if next = b.NextBackOff(); next == Stop { + if cerr := ctx.Err(); cerr != nil { + return res, cerr + } + + return res, err + } + + if notify != nil { + notify(err, next) + } + + t.Start(next) + + select { + case <-ctx.Done(): + return res, ctx.Err() + case <-t.C(): + } + } +} + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +func (e *PermanentError) Unwrap() error { + return e.Err +} + +func (e *PermanentError) Is(target error) bool { + _, ok := target.(*PermanentError) + return ok +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) error { + if err == nil { + return nil + } + return &PermanentError{ + Err: err, + } +} diff --git a/hack/tools/vendor/github.com/cenkalti/backoff/v4/ticker.go b/hack/tools/vendor/github.com/cenkalti/backoff/v4/ticker.go new file mode 100644 index 0000000000..df9d68bce5 --- /dev/null +++ b/hack/tools/vendor/github.com/cenkalti/backoff/v4/ticker.go @@ -0,0 +1,97 @@ +package backoff + +import ( + "context" + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOff + ctx context.Context + timer Timer + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + return NewTickerWithTimer(b, &defaultTimer{}) +} + +// NewTickerWithTimer returns a new Ticker with a custom timer. +// A default timer that uses system timer is used when nil is passed. +func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { + if timer == nil { + timer = &defaultTimer{} + } + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: b, + ctx: getContext(b), + timer: timer, + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + case <-t.ctx.Done(): + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + t.timer.Start(next) + return t.timer.C() +} diff --git a/hack/tools/vendor/github.com/cenkalti/backoff/v4/timer.go b/hack/tools/vendor/github.com/cenkalti/backoff/v4/timer.go new file mode 100644 index 0000000000..8120d0213c --- /dev/null +++ b/hack/tools/vendor/github.com/cenkalti/backoff/v4/timer.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +type Timer interface { + Start(duration time.Duration) + Stop() + C() <-chan time.Time +} + +// defaultTimer implements Timer interface using time.Timer +type defaultTimer struct { + timer *time.Timer +} + +// C returns the timers channel which receives the current time when the timer fires. +func (t *defaultTimer) C() <-chan time.Time { + return t.timer.C +} + +// Start starts the timer to fire after the given duration +func (t *defaultTimer) Start(duration time.Duration) { + if t.timer == nil { + t.timer = time.NewTimer(duration) + } else { + t.timer.Reset(duration) + } +} + +// Stop is called when the timer is not used anymore and resources may be freed. +func (t *defaultTimer) Stop() { + if t.timer != nil { + t.timer.Stop() + } +} diff --git a/hack/tools/vendor/github.com/cenkalti/backoff/v4/tries.go b/hack/tools/vendor/github.com/cenkalti/backoff/v4/tries.go new file mode 100644 index 0000000000..28d58ca37c --- /dev/null +++ b/hack/tools/vendor/github.com/cenkalti/backoff/v4/tries.go @@ -0,0 +1,38 @@ +package backoff + +import "time" + +/* +WithMaxRetries creates a wrapper around another BackOff, which will +return Stop if NextBackOff() has been called too many times since +the last time Reset() was called + +Note: Implementation is not thread-safe. +*/ +func WithMaxRetries(b BackOff, max uint64) BackOff { + return &backOffTries{delegate: b, maxTries: max} +} + +type backOffTries struct { + delegate BackOff + maxTries uint64 + numTries uint64 +} + +func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries == 0 { + return Stop + } + if b.maxTries > 0 { + if b.maxTries <= b.numTries { + return Stop + } + b.numTries++ + } + return b.delegate.NextBackOff() +} + +func (b *backOffTries) Reset() { + b.numTries = 0 + b.delegate.Reset() +} diff --git a/hack/tools/vendor/github.com/ckaznocha/intrange/.golangci.yml b/hack/tools/vendor/github.com/ckaznocha/intrange/.golangci.yml index 2ad830d1b2..b240f85ce9 100644 --- a/hack/tools/vendor/github.com/ckaznocha/intrange/.golangci.yml +++ b/hack/tools/vendor/github.com/ckaznocha/intrange/.golangci.yml @@ -1,6 +1,9 @@ linters-settings: gci: - local-prefixes: github.com/ckaznocha/intrange + sections: + - standard + - default + - localmodule gocritic: enabled-tags: - diagnostic @@ -10,10 +13,7 @@ linters-settings: - style goimports: local-prefixes: github.com/ckaznocha/intrange - golint: - min-confidence: 0 govet: - check-shadowing: true enable: - asmdecl - assign @@ -24,6 +24,7 @@ linters-settings: - cgocall - composite - copylock + - copyloopvar - deepequalerrors - errorsas - fieldalignment @@ -57,18 +58,16 @@ linters: - dupl - errcheck - errorlint - - exportloopref - gci - gochecknoinits - goconst - gocritic - godot - godox - - goerr113 + - err113 - gofmt - gofumpt - goimports - - gomnd - goprintffuncname - gosec - gosimple @@ -94,6 +93,6 @@ linters: - wastedassign - whitespace - wsl -run: - skip-dirs: +issues: + exclude-dirs: - testdata/ diff --git a/hack/tools/vendor/github.com/ckaznocha/intrange/intrange.go b/hack/tools/vendor/github.com/ckaznocha/intrange/intrange.go index 44a15091e7..229c847d5a 100644 --- a/hack/tools/vendor/github.com/ckaznocha/intrange/intrange.go +++ b/hack/tools/vendor/github.com/ckaznocha/intrange/intrange.go @@ -79,6 +79,8 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { return } + initAssign := init.Tok == token.ASSIGN + if len(init.Lhs) != 1 || len(init.Rhs) != 1 { return } @@ -97,16 +99,13 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { return } - var nExpr ast.Expr + var ( + operand ast.Expr + hasEquivalentOperator bool + ) switch cond.Op { - case token.LSS: // ;i < n; - if isBenchmark(cond.Y) { - return - } - - nExpr = findNExpr(cond.Y) - + case token.LSS, token.LEQ: // ;i < n; || ;i <= n; x, ok := cond.X.(*ast.Ident) if !ok { return @@ -115,13 +114,10 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { if x.Name != initIdent.Name { return } - case token.GTR: // ;n > i; - if isBenchmark(cond.X) { - return - } - - nExpr = findNExpr(cond.X) + hasEquivalentOperator = cond.Op == token.LEQ + operand = cond.Y + case token.GTR, token.GEQ: // ;n > i; || ;n >= i; y, ok := cond.Y.(*ast.Ident) if !ok { return @@ -130,6 +126,9 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { if y.Name != initIdent.Name { return } + + hasEquivalentOperator = cond.Op == token.GEQ + operand = cond.X default: return } @@ -228,7 +227,7 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { bc := &bodyChecker{ initIdent: initIdent, - nExpr: nExpr, + nExpr: findNExpr(operand), } ast.Inspect(forStmt.Body, bc.check) @@ -237,9 +236,50 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { return } + if initAssign { + pass.Report(analysis.Diagnostic{ + Pos: forStmt.Pos(), + Message: msg + "\nBecause the key is not part of the loop's scope, take care to consider side effects.", + }) + + return + } + + operandIsNumberLit := isNumberLit(operand) + + if hasEquivalentOperator && !operandIsNumberLit { + return + } + + rangeX := operandToString( + pass, + initIdent, + operand, + hasEquivalentOperator && operandIsNumberLit, + ) + + var replacement string + if bc.accessed { + replacement = fmt.Sprintf("%s := range %s", initIdent.Name, rangeX) + } else { + replacement = fmt.Sprintf("range %s", rangeX) + } + pass.Report(analysis.Diagnostic{ Pos: forStmt.Pos(), Message: msg, + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: fmt.Sprintf("Replace loop with `%s`", replacement), + TextEdits: []analysis.TextEdit{ + { + Pos: forStmt.Init.Pos(), + End: forStmt.Post.End(), + NewText: []byte(replacement), + }, + }, + }, + }, }) } @@ -363,26 +403,45 @@ func findNExpr(expr ast.Expr) ast.Expr { } } -func isBenchmark(expr ast.Expr) bool { - selectorExpr, ok := expr.(*ast.SelectorExpr) - if !ok { - return false - } +func recursiveOperandToString( + expr ast.Expr, + incrementInt bool, +) string { + switch e := expr.(type) { + case *ast.CallExpr: + args := "" - if selectorExpr.Sel.Name != "N" { - return false - } + for i, v := range e.Args { + if i > 0 { + args += ", " + } - ident, ok := selectorExpr.X.(*ast.Ident) - if !ok { - return false - } + args += recursiveOperandToString(v, incrementInt && len(e.Args) == 1) + } - if ident.Name == "b" { - return true - } + return recursiveOperandToString(e.Fun, false) + "(" + args + ")" + case *ast.BasicLit: + if incrementInt && e.Kind == token.INT { + v, err := strconv.Atoi(e.Value) + if err == nil { + return strconv.Itoa(v + 1) + } + + return e.Value + } - return false + return e.Value + case *ast.Ident: + return e.Name + case *ast.SelectorExpr: + return recursiveOperandToString(e.X, false) + "." + recursiveOperandToString(e.Sel, false) + case *ast.IndexExpr: + return recursiveOperandToString(e.X, false) + "[" + recursiveOperandToString(e.Index, false) + "]" + case *ast.BinaryExpr: + return recursiveOperandToString(e.X, false) + " " + e.Op.String() + " " + recursiveOperandToString(e.Y, false) + default: + return "" + } } func identEqual(a, b ast.Expr) bool { @@ -428,6 +487,7 @@ type bodyChecker struct { initIdent *ast.Ident nExpr ast.Expr modified bool + accessed bool } func (b *bodyChecker) check(n ast.Node) bool { @@ -446,11 +506,55 @@ func (b *bodyChecker) check(n ast.Node) bool { return false } + case *ast.Ident: + if identEqual(stmt, b.initIdent) { + b.accessed = true + } } return true } +func isNumberLit(exp ast.Expr) bool { + switch lit := exp.(type) { + case *ast.BasicLit: + if lit.Kind == token.INT { + return true + } + + return false + case *ast.CallExpr: + switch fun := lit.Fun.(type) { + case *ast.Ident: + switch fun.Name { + case + "int", + "int8", + "int16", + "int32", + "int64", + "uint", + "uint8", + "uint16", + "uint32", + "uint64": + default: + return false + } + default: + return false + } + + if len(lit.Args) != 1 { + return false + } + + return isNumberLit(lit.Args[0]) + default: + return false + } +} + func compareNumberLit(exp ast.Expr, val int) bool { switch lit := exp.(type) { case *ast.BasicLit: @@ -497,3 +601,27 @@ func compareNumberLit(exp ast.Expr, val int) bool { return false } } + +func operandToString( + pass *analysis.Pass, + i *ast.Ident, + operand ast.Expr, + increment bool, +) string { + s := recursiveOperandToString(operand, increment) + t := pass.TypesInfo.TypeOf(i) + + if t == types.Typ[types.Int] { + if len(s) > 5 && s[:4] == "int(" && s[len(s)-1] == ')' { + s = s[4 : len(s)-1] + } + + return s + } + + if len(s) > 2 && s[len(s)-1] == ')' { + return s + } + + return t.String() + "(" + s + ")" +} diff --git a/hack/tools/vendor/github.com/curioswitch/go-reassign/.golangci.yml b/hack/tools/vendor/github.com/curioswitch/go-reassign/.golangci.yml index e3bf79ae72..fdf0bb2f22 100644 --- a/hack/tools/vendor/github.com/curioswitch/go-reassign/.golangci.yml +++ b/hack/tools/vendor/github.com/curioswitch/go-reassign/.golangci.yml @@ -5,14 +5,12 @@ linters: - bodyclose - decorder - durationcheck + - err113 - errchkjson - errname - errorlint - - execinquery - exhaustive - - exportloopref - gocritic - - goerr113 - gofmt - goimports - goprintffuncname @@ -20,7 +18,6 @@ linters: - importas - misspell - nolintlint - - nosnakecase - prealloc - predeclared - promlinter diff --git a/hack/tools/vendor/github.com/curioswitch/go-reassign/README.md b/hack/tools/vendor/github.com/curioswitch/go-reassign/README.md index ac9c131df2..190756f928 100644 --- a/hack/tools/vendor/github.com/curioswitch/go-reassign/README.md +++ b/hack/tools/vendor/github.com/curioswitch/go-reassign/README.md @@ -47,7 +47,8 @@ Package variable reassignment is generally confusing, though, and we recommend a The `pattern` flag can be set to a regular expression to define what variables cannot be reassigned, and `.*` is recommended if it works with your code. -## Limitations +## Development -If a variable shadows the name of an import, an assignment of a field in the variable will trigger the linter. Shadowing -can be confusing, so it's recommended to rename the variable. +[mage](https://magefile.org/) is used for development. Run `go run mage.go -l` to see available targets. + +For example, to run checks before sending a PR, run `go run mage.go check`. diff --git a/hack/tools/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go b/hack/tools/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go index e1b47d5b95..c2a29c5299 100644 --- a/hack/tools/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go +++ b/hack/tools/vendor/github.com/curioswitch/go-reassign/internal/analyzer/analyzer.go @@ -48,23 +48,35 @@ func run(pass *analysis.Pass) (interface{}, error) { func reportImported(pass *analysis.Pass, expr ast.Expr, checkRE *regexp.Regexp, prefix string) { switch x := expr.(type) { case *ast.SelectorExpr: - if !checkRE.MatchString(x.Sel.Name) { - return - } - selectIdent, ok := x.X.(*ast.Ident) if !ok { return } + var pkgPath string if selectObj, ok := pass.TypesInfo.Uses[selectIdent]; ok { - if pkg, ok := selectObj.(*types.PkgName); !ok || pkg.Imported() == pass.Pkg { + pkg, ok := selectObj.(*types.PkgName) + if !ok || pkg.Imported() == pass.Pkg { return } + pkgPath = pkg.Imported().Path() } - pass.Reportf(expr.Pos(), "%s variable %s in other package %s", prefix, x.Sel.Name, selectIdent.Name) + matches := false + if checkRE.MatchString(x.Sel.Name) { + matches = true + } + if !matches { + // Expression may include a package name, so check that too. Support was added later so we check + // just name and qualified name separately for compatibility. + if checkRE.MatchString(pkgPath + "." + x.Sel.Name) { + matches = true + } + } + if matches { + pass.Reportf(expr.Pos(), "%s variable %s in other package %s", prefix, x.Sel.Name, selectIdent.Name) + } case *ast.Ident: use, ok := pass.TypesInfo.Uses[x].(*types.Var) if !ok { diff --git a/hack/tools/vendor/github.com/felixge/httpsnoop/.gitignore b/hack/tools/vendor/github.com/felixge/httpsnoop/.gitignore new file mode 100644 index 0000000000..e69de29bb2 diff --git a/hack/tools/vendor/github.com/felixge/httpsnoop/LICENSE.txt b/hack/tools/vendor/github.com/felixge/httpsnoop/LICENSE.txt new file mode 100644 index 0000000000..e028b46a9b --- /dev/null +++ b/hack/tools/vendor/github.com/felixge/httpsnoop/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com) + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. diff --git a/hack/tools/vendor/github.com/felixge/httpsnoop/Makefile b/hack/tools/vendor/github.com/felixge/httpsnoop/Makefile new file mode 100644 index 0000000000..4e12afdd90 --- /dev/null +++ b/hack/tools/vendor/github.com/felixge/httpsnoop/Makefile @@ -0,0 +1,10 @@ +.PHONY: ci generate clean + +ci: clean generate + go test -race -v ./... + +generate: + go generate . + +clean: + rm -rf *_generated*.go diff --git a/hack/tools/vendor/github.com/felixge/httpsnoop/README.md b/hack/tools/vendor/github.com/felixge/httpsnoop/README.md new file mode 100644 index 0000000000..cf6b42f3d7 --- /dev/null +++ b/hack/tools/vendor/github.com/felixge/httpsnoop/README.md @@ -0,0 +1,95 @@ +# httpsnoop + +Package httpsnoop provides an easy way to capture http related metrics (i.e. +response time, bytes written, and http status code) from your application's +http.Handlers. + +Doing this requires non-trivial wrapping of the http.ResponseWriter interface, +which is also exposed for users interested in a more low-level API. + +[![Go Reference](https://pkg.go.dev/badge/github.com/felixge/httpsnoop.svg)](https://pkg.go.dev/github.com/felixge/httpsnoop) +[![Build Status](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml/badge.svg)](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml) + +## Usage Example + +```go +// myH is your app's http handler, perhaps a http.ServeMux or similar. +var myH http.Handler +// wrappedH wraps myH in order to log every request. +wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + m := httpsnoop.CaptureMetrics(myH, w, r) + log.Printf( + "%s %s (code=%d dt=%s written=%d)", + r.Method, + r.URL, + m.Code, + m.Duration, + m.Written, + ) +}) +http.ListenAndServe(":8080", wrappedH) +``` + +## Why this package exists + +Instrumenting an application's http.Handler is surprisingly difficult. + +However if you google for e.g. "capture ResponseWriter status code" you'll find +lots of advise and code examples that suggest it to be a fairly trivial +undertaking. Unfortunately everything I've seen so far has a high chance of +breaking your application. + +The main problem is that a `http.ResponseWriter` often implements additional +interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and +`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter` +in your own struct that also implements the `http.ResponseWriter` interface +will hide the additional interfaces mentioned above. This has a high change of +introducing subtle bugs into any non-trivial application. + +Another approach I've seen people take is to return a struct that implements +all of the interfaces above. However, that's also problematic, because it's +difficult to fake some of these interfaces behaviors when the underlying +`http.ResponseWriter` doesn't have an implementation. It's also dangerous, +because an application may choose to operate differently, merely because it +detects the presence of these additional interfaces. + +This package solves this problem by checking which additional interfaces a +`http.ResponseWriter` implements, returning a wrapped version implementing the +exact same set of interfaces. + +Additionally this package properly handles edge cases such as `WriteHeader` not +being called, or called more than once, as well as concurrent calls to +`http.ResponseWriter` methods, and even calls happening after the wrapped +`ServeHTTP` has already returned. + +Unfortunately this package is not perfect either. It's possible that it is +still missing some interfaces provided by the go core (let me know if you find +one), and it won't work for applications adding their own interfaces into the +mix. You can however use `httpsnoop.Unwrap(w)` to access the underlying +`http.ResponseWriter` and type-assert the result to its other interfaces. + +However, hopefully the explanation above has sufficiently scared you of rolling +your own solution to this problem. httpsnoop may still break your application, +but at least it tries to avoid it as much as possible. + +Anyway, the real problem here is that smuggling additional interfaces inside +`http.ResponseWriter` is a problematic design choice, but it probably goes as +deep as the Go language specification itself. But that's okay, I still prefer +Go over the alternatives ;). + +## Performance + +``` +BenchmarkBaseline-8 20000 94912 ns/op +BenchmarkCaptureMetrics-8 20000 95461 ns/op +``` + +As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an +overhead of ~500 ns per http request on my machine. However, the margin of +error appears to be larger than that, therefor it should be reasonable to +assume that the overhead introduced by `CaptureMetrics` is absolutely +negligible. + +## License + +MIT diff --git a/hack/tools/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/hack/tools/vendor/github.com/felixge/httpsnoop/capture_metrics.go new file mode 100644 index 0000000000..bec7b71b39 --- /dev/null +++ b/hack/tools/vendor/github.com/felixge/httpsnoop/capture_metrics.go @@ -0,0 +1,86 @@ +package httpsnoop + +import ( + "io" + "net/http" + "time" +) + +// Metrics holds metrics captured from CaptureMetrics. +type Metrics struct { + // Code is the first http response code passed to the WriteHeader func of + // the ResponseWriter. If no such call is made, a default code of 200 is + // assumed instead. + Code int + // Duration is the time it took to execute the handler. + Duration time.Duration + // Written is the number of bytes successfully written by the Write or + // ReadFrom function of the ResponseWriter. ResponseWriters may also write + // data to their underlaying connection directly (e.g. headers), but those + // are not tracked. Therefor the number of Written bytes will usually match + // the size of the response body. + Written int64 +} + +// CaptureMetrics wraps the given hnd, executes it with the given w and r, and +// returns the metrics it captured from it. +func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics { + return CaptureMetricsFn(w, func(ww http.ResponseWriter) { + hnd.ServeHTTP(ww, r) + }) +} + +// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the +// resulting metrics. This is very similar to CaptureMetrics (which is just +// sugar on top of this func), but is a more usable interface if your +// application doesn't use the Go http.Handler interface. +func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics { + m := Metrics{Code: http.StatusOK} + m.CaptureMetrics(w, fn) + return m +} + +// CaptureMetrics wraps w and calls fn with the wrapped w and updates +// Metrics m with the resulting metrics. This is similar to CaptureMetricsFn, +// but allows one to customize starting Metrics object. +func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWriter)) { + var ( + start = time.Now() + headerWritten bool + hooks = Hooks{ + WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc { + return func(code int) { + next(code) + + if !(code >= 100 && code <= 199) && !headerWritten { + m.Code = code + headerWritten = true + } + } + }, + + Write: func(next WriteFunc) WriteFunc { + return func(p []byte) (int, error) { + n, err := next(p) + + m.Written += int64(n) + headerWritten = true + return n, err + } + }, + + ReadFrom: func(next ReadFromFunc) ReadFromFunc { + return func(src io.Reader) (int64, error) { + n, err := next(src) + + headerWritten = true + m.Written += n + return n, err + } + }, + } + ) + + fn(Wrap(w, hooks)) + m.Duration += time.Since(start) +} diff --git a/hack/tools/vendor/github.com/felixge/httpsnoop/docs.go b/hack/tools/vendor/github.com/felixge/httpsnoop/docs.go new file mode 100644 index 0000000000..203c35b3c6 --- /dev/null +++ b/hack/tools/vendor/github.com/felixge/httpsnoop/docs.go @@ -0,0 +1,10 @@ +// Package httpsnoop provides an easy way to capture http related metrics (i.e. +// response time, bytes written, and http status code) from your application's +// http.Handlers. +// +// Doing this requires non-trivial wrapping of the http.ResponseWriter +// interface, which is also exposed for users interested in a more low-level +// API. +package httpsnoop + +//go:generate go run codegen/main.go diff --git a/hack/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/hack/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go new file mode 100644 index 0000000000..101cedde67 --- /dev/null +++ b/hack/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go @@ -0,0 +1,436 @@ +// +build go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT. + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// PushFunc is part of the http.Pusher interface. +type PushFunc func(target string, opts *http.PushOptions) error + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc + Push func(PushFunc) PushFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// - http.Pusher +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + _, i4 := w.(http.Pusher) + switch { + // combination 1/32 + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/32 + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Pusher + }{rw, rw, rw} + // combination 3/32 + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 4/32 + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw} + // combination 5/32 + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 6/32 + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + http.Pusher + }{rw, rw, rw, rw} + // combination 7/32 + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 8/32 + case !i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 9/32 + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 10/32 + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw} + // combination 11/32 + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 12/32 + case !i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 13/32 + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 14/32 + case !i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 15/32 + case !i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 16/32 + case !i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 17/32 + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 18/32 + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Pusher + }{rw, rw, rw, rw} + // combination 19/32 + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 20/32 + case i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 21/32 + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 22/32 + case i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 23/32 + case i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 24/32 + case i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 25/32 + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 26/32 + case i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 27/32 + case i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 28/32 + case i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 29/32 + case i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 30/32 + case i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 31/32 + case i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + // combination 32/32 + case i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +func (w *rw) Push(target string, opts *http.PushOptions) error { + f := w.w.(http.Pusher).Push + if w.h.Push != nil { + f = w.h.Push(f) + } + return f(target, opts) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/hack/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/hack/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go new file mode 100644 index 0000000000..e0951df152 --- /dev/null +++ b/hack/tools/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go @@ -0,0 +1,278 @@ +// +build !go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT. + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + switch { + // combination 1/16 + case !i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/16 + case !i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 3/16 + case !i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 4/16 + case !i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 5/16 + case !i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 6/16 + case !i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 7/16 + case !i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 8/16 + case !i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 9/16 + case i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 10/16 + case i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 11/16 + case i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 12/16 + case i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 13/16 + case i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 14/16 + case i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 15/16 + case i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 16/16 + case i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/hack/tools/vendor/github.com/go-logr/logr/funcr/funcr.go b/hack/tools/vendor/github.com/go-logr/logr/funcr/funcr.go new file mode 100644 index 0000000000..30568e768d --- /dev/null +++ b/hack/tools/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -0,0 +1,914 @@ +/* +Copyright 2021 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package funcr implements formatting of structured log messages and +// optionally captures the call site and timestamp. +// +// The simplest way to use it is via its implementation of a +// github.com/go-logr/logr.LogSink with output through an arbitrary +// "write" function. See New and NewJSON for details. +// +// # Custom LogSinks +// +// For users who need more control, a funcr.Formatter can be embedded inside +// your own custom LogSink implementation. This is useful when the LogSink +// needs to implement additional methods, for example. +// +// # Formatting +// +// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for +// values which are being logged. When rendering a struct, funcr will use Go's +// standard JSON tags (all except "string"). +package funcr + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" +) + +// New returns a logr.Logger which is implemented by an arbitrary function. +func New(fn func(prefix, args string), opts Options) logr.Logger { + return logr.New(newSink(fn, NewFormatter(opts))) +} + +// NewJSON returns a logr.Logger which is implemented by an arbitrary function +// and produces JSON output. +func NewJSON(fn func(obj string), opts Options) logr.Logger { + fnWrapper := func(_, obj string) { + fn(obj) + } + return logr.New(newSink(fnWrapper, NewFormatterJSON(opts))) +} + +// Underlier exposes access to the underlying logging function. Since +// callers only have a logr.Logger, they have to know which +// implementation is in use, so this interface is less of an +// abstraction and more of a way to test type conversion. +type Underlier interface { + GetUnderlying() func(prefix, args string) +} + +func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { + l := &fnlogger{ + Formatter: formatter, + write: fn, + } + // For skipping fnlogger.Info and fnlogger.Error. + l.Formatter.AddCallDepth(1) + return l +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // LogCaller tells funcr to add a "caller" key to some or all log lines. + // This has some overhead, so some users might not want it. + LogCaller MessageClass + + // LogCallerFunc tells funcr to also log the calling function name. This + // has no effect if caller logging is not enabled (see Options.LogCaller). + LogCallerFunc bool + + // LogTimestamp tells funcr to add a "ts" key to log lines. This has some + // overhead, so some users might not want it. + LogTimestamp bool + + // TimestampFormat tells funcr how to render timestamps when LogTimestamp + // is enabled. If not specified, a default format will be used. For more + // details, see docs for Go's time.Layout. + TimestampFormat string + + // LogInfoLevel tells funcr what key to use to log the info level. + // If not specified, the info level will be logged as "level". + // If this is set to "", the info level will not be logged at all. + LogInfoLevel *string + + // Verbosity tells funcr which V logs to produce. Higher values enable + // more logs. Info logs at or below this level will be written, while logs + // above this level will be discarded. + Verbosity int + + // RenderBuiltinsHook allows users to mutate the list of key-value pairs + // while a log line is being rendered. The kvList argument follows logr + // conventions - each pair of slice elements is comprised of a string key + // and an arbitrary value (verified and sanitized before calling this + // hook). The value returned must follow the same conventions. This hook + // can be used to audit or modify logged data. For example, you might want + // to prefix all of funcr's built-in keys with some string. This hook is + // only called for built-in (provided by funcr itself) key-value pairs. + // Equivalent hooks are offered for key-value pairs saved via + // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and + // for user-provided pairs (see RenderArgsHook). + RenderBuiltinsHook func(kvList []any) []any + + // RenderValuesHook is the same as RenderBuiltinsHook, except that it is + // only called for key-value pairs saved via logr.Logger.WithValues. See + // RenderBuiltinsHook for more details. + RenderValuesHook func(kvList []any) []any + + // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only + // called for key-value pairs passed directly to Info and Error. See + // RenderBuiltinsHook for more details. + RenderArgsHook func(kvList []any) []any + + // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct + // that contains a struct, etc.) it may log. Every time it finds a struct, + // slice, array, or map the depth is increased by one. When the maximum is + // reached, the value will be converted to a string indicating that the max + // depth has been exceeded. If this field is not specified, a default + // value will be used. + MaxLogDepth int +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// fnlogger inherits some of its LogSink implementation from Formatter +// and just needs to add some glue code. +type fnlogger struct { + Formatter + write func(prefix, args string) +} + +func (l fnlogger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l fnlogger) WithValues(kvList ...any) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l fnlogger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +func (l fnlogger) Info(level int, msg string, kvList ...any) { + prefix, args := l.FormatInfo(level, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) Error(err error, msg string, kvList ...any) { + prefix, args := l.FormatError(err, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) GetUnderlying() func(prefix, args string) { + return l.write +} + +// Assert conformance to the interfaces. +var _ logr.LogSink = &fnlogger{} +var _ logr.CallDepthLogSink = &fnlogger{} +var _ Underlier = &fnlogger{} + +// NewFormatter constructs a Formatter which emits a JSON-like key=value format. +func NewFormatter(opts Options) Formatter { + return newFormatter(opts, outputKeyValue) +} + +// NewFormatterJSON constructs a Formatter which emits strict JSON. +func NewFormatterJSON(opts Options) Formatter { + return newFormatter(opts, outputJSON) +} + +// Defaults for Options. +const defaultTimestampFormat = "2006-01-02 15:04:05.000000" +const defaultMaxLogDepth = 16 + +func newFormatter(opts Options, outfmt outputFormat) Formatter { + if opts.TimestampFormat == "" { + opts.TimestampFormat = defaultTimestampFormat + } + if opts.MaxLogDepth == 0 { + opts.MaxLogDepth = defaultMaxLogDepth + } + if opts.LogInfoLevel == nil { + opts.LogInfoLevel = new(string) + *opts.LogInfoLevel = "level" + } + f := Formatter{ + outputFormat: outfmt, + prefix: "", + values: nil, + depth: 0, + opts: &opts, + } + return f +} + +// Formatter is an opaque struct which can be embedded in a LogSink +// implementation. It should be constructed with NewFormatter. Some of +// its methods directly implement logr.LogSink. +type Formatter struct { + outputFormat outputFormat + prefix string + values []any + valuesStr string + depth int + opts *Options + groupName string // for slog groups + groups []groupDef +} + +// outputFormat indicates which outputFormat to use. +type outputFormat int + +const ( + // outputKeyValue emits a JSON-like key=value format, but not strict JSON. + outputKeyValue outputFormat = iota + // outputJSON emits strict JSON. + outputJSON +) + +// groupDef represents a saved group. The values may be empty, but we don't +// know if we need to render the group until the final record is rendered. +type groupDef struct { + name string + values string +} + +// PseudoStruct is a list of key-value pairs that gets logged as a struct. +type PseudoStruct []any + +// render produces a log line, ready to use. +func (f Formatter) render(builtins, args []any) string { + // Empirically bytes.Buffer is faster than strings.Builder for this. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + if f.outputFormat == outputJSON { + buf.WriteByte('{') // for the whole record + } + + // Render builtins + vals := builtins + if hook := f.opts.RenderBuiltinsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, false) // keys are ours, no need to escape + continuing := len(builtins) > 0 + + // Turn the inner-most group into a string + argsStr := func() string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, true) // escape user-provided keys + + return buf.String() + }() + + // Render the stack of groups from the inside out. + bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr) + for i := len(f.groups) - 1; i >= 0; i-- { + grp := &f.groups[i] + if grp.values == "" && bodyStr == "" { + // no contents, so we must elide the whole group + continue + } + bodyStr = f.renderGroup(grp.name, grp.values, bodyStr) + } + + if bodyStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(bodyStr) + } + + if f.outputFormat == outputJSON { + buf.WriteByte('}') // for the whole record + } + + return buf.String() +} + +// renderGroup returns a string representation of the named group with rendered +// values and args. If the name is empty, this will return the values and args, +// joined. If the name is not empty, this will return a single key-value pair, +// where the value is a grouping of the values and args. If the values and +// args are both empty, this will return an empty string, even if the name was +// specified. +func (f Formatter) renderGroup(name string, values string, args string) string { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + needClosingBrace := false + if name != "" && (values != "" || args != "") { + buf.WriteString(f.quoted(name, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') + needClosingBrace = true + } + + continuing := false + if values != "" { + buf.WriteString(values) + continuing = true + } + + if args != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(args) + } + + if needClosingBrace { + buf.WriteByte('}') + } + + return buf.String() +} + +// flatten renders a list of key-value pairs into a buffer. If escapeKeys is +// true, the keys are assumed to have non-JSON-compatible characters in them +// and must be evaluated for escapes. +// +// This function returns a potentially modified version of kvList, which +// ensures that there is a value for every key (adding a value if needed) and +// that each key is a string (substituting a key if needed). +func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any { + // This logic overlaps with sanitize() but saves one type-cast per key, + // which can be measurable. + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + copied := false + for i := 0; i < len(kvList); i += 2 { + k, ok := kvList[i].(string) + if !ok { + if !copied { + newList := make([]any, len(kvList)) + copy(newList, kvList) + kvList = newList + copied = true + } + k = f.nonStringKey(kvList[i]) + kvList[i] = k + } + v := kvList[i+1] + + if i > 0 { + if f.outputFormat == outputJSON { + buf.WriteByte(f.comma()) + } else { + // In theory the format could be something we don't understand. In + // practice, we control it, so it won't be. + buf.WriteByte(' ') + } + } + + buf.WriteString(f.quoted(k, escapeKeys)) + buf.WriteByte(f.colon()) + buf.WriteString(f.pretty(v)) + } + return kvList +} + +func (f Formatter) quoted(str string, escape bool) string { + if escape { + return prettyString(str) + } + // this is faster + return `"` + str + `"` +} + +func (f Formatter) comma() byte { + if f.outputFormat == outputJSON { + return ',' + } + return ' ' +} + +func (f Formatter) colon() byte { + if f.outputFormat == outputJSON { + return ':' + } + return '=' +} + +func (f Formatter) pretty(value any) string { + return f.prettyWithFlags(value, 0, 0) +} + +const ( + flagRawStruct = 0x1 // do not print braces on structs +) + +// TODO: This is not fast. Most of the overhead goes here. +func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { + if depth > f.opts.MaxLogDepth { + return `""` + } + + // Handle types that take full control of logging. + if v, ok := value.(logr.Marshaler); ok { + // Replace the value with what the type wants to get logged. + // That then gets handled below via reflection. + value = invokeMarshaler(v) + } + + // Handle types that want to format themselves. + switch v := value.(type) { + case fmt.Stringer: + value = invokeStringer(v) + case error: + value = invokeError(v) + } + + // Handling the most common types without reflect is a small perf win. + switch v := value.(type) { + case bool: + return strconv.FormatBool(v) + case string: + return prettyString(v) + case int: + return strconv.FormatInt(int64(v), 10) + case int8: + return strconv.FormatInt(int64(v), 10) + case int16: + return strconv.FormatInt(int64(v), 10) + case int32: + return strconv.FormatInt(int64(v), 10) + case int64: + return strconv.FormatInt(int64(v), 10) + case uint: + return strconv.FormatUint(uint64(v), 10) + case uint8: + return strconv.FormatUint(uint64(v), 10) + case uint16: + return strconv.FormatUint(uint64(v), 10) + case uint32: + return strconv.FormatUint(uint64(v), 10) + case uint64: + return strconv.FormatUint(v, 10) + case uintptr: + return strconv.FormatUint(uint64(v), 10) + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32) + case float64: + return strconv.FormatFloat(v, 'f', -1, 64) + case complex64: + return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"` + case complex128: + return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"` + case PseudoStruct: + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + v = f.sanitize(v) + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < len(v); i += 2 { + if i > 0 { + buf.WriteByte(f.comma()) + } + k, _ := v[i].(string) // sanitize() above means no need to check success + // arbitrary keys might need escaping + buf.WriteString(prettyString(k)) + buf.WriteByte(f.colon()) + buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + } + + buf := bytes.NewBuffer(make([]byte, 0, 256)) + t := reflect.TypeOf(value) + if t == nil { + return "null" + } + v := reflect.ValueOf(value) + switch t.Kind() { + case reflect.Bool: + return strconv.FormatBool(v.Bool()) + case reflect.String: + return prettyString(v.String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(int64(v.Int()), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(uint64(v.Uint()), 10) + case reflect.Float32: + return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32) + case reflect.Float64: + return strconv.FormatFloat(v.Float(), 'f', -1, 64) + case reflect.Complex64: + return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"` + case reflect.Complex128: + return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"` + case reflect.Struct: + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + printComma := false // testing i>0 is not enough because of JSON omitted fields + for i := 0; i < t.NumField(); i++ { + fld := t.Field(i) + if fld.PkgPath != "" { + // reflect says this field is only defined for non-exported fields. + continue + } + if !v.Field(i).CanInterface() { + // reflect isn't clear exactly what this means, but we can't use it. + continue + } + name := "" + omitempty := false + if tag, found := fld.Tag.Lookup("json"); found { + if tag == "-" { + continue + } + if comma := strings.Index(tag, ","); comma != -1 { + if n := tag[:comma]; n != "" { + name = n + } + rest := tag[comma:] + if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") { + omitempty = true + } + } else { + name = tag + } + } + if omitempty && isEmpty(v.Field(i)) { + continue + } + if printComma { + buf.WriteByte(f.comma()) + } + printComma = true // if we got here, we are rendering a field + if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) + continue + } + if name == "" { + name = fld.Name + } + // field names can't contain characters which need escaping + buf.WriteString(f.quoted(name, false)) + buf.WriteByte(f.colon()) + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + case reflect.Slice, reflect.Array: + // If this is outputing as JSON make sure this isn't really a json.RawMessage. + // If so just emit "as-is" and don't pretty it as that will just print + // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want. + if f.outputFormat == outputJSON { + if rm, ok := value.(json.RawMessage); ok { + // If it's empty make sure we emit an empty value as the array style would below. + if len(rm) > 0 { + buf.Write(rm) + } else { + buf.WriteString("null") + } + return buf.String() + } + } + buf.WriteByte('[') + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteByte(f.comma()) + } + e := v.Index(i) + buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) + } + buf.WriteByte(']') + return buf.String() + case reflect.Map: + buf.WriteByte('{') + // This does not sort the map keys, for best perf. + it := v.MapRange() + i := 0 + for it.Next() { + if i > 0 { + buf.WriteByte(f.comma()) + } + // If a map key supports TextMarshaler, use it. + keystr := "" + if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok { + txt, err := m.MarshalText() + if err != nil { + keystr = fmt.Sprintf("", err.Error()) + } else { + keystr = string(txt) + } + keystr = prettyString(keystr) + } else { + // prettyWithFlags will produce already-escaped values + keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1) + if t.Key().Kind() != reflect.String { + // JSON only does string keys. Unlike Go's standard JSON, we'll + // convert just about anything to a string. + keystr = prettyString(keystr) + } + } + buf.WriteString(keystr) + buf.WriteByte(f.colon()) + buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) + i++ + } + buf.WriteByte('}') + return buf.String() + case reflect.Ptr, reflect.Interface: + if v.IsNil() { + return "null" + } + return f.prettyWithFlags(v.Elem().Interface(), 0, depth) + } + return fmt.Sprintf(`""`, t.Kind().String()) +} + +func prettyString(s string) string { + // Avoid escaping (which does allocations) if we can. + if needsEscape(s) { + return strconv.Quote(s) + } + b := bytes.NewBuffer(make([]byte, 0, 1024)) + b.WriteByte('"') + b.WriteString(s) + b.WriteByte('"') + return b.String() +} + +// needsEscape determines whether the input string needs to be escaped or not, +// without doing any allocations. +func needsEscape(s string) bool { + for _, r := range s { + if !strconv.IsPrint(r) || r == '\\' || r == '"' { + return true + } + } + return false +} + +func isEmpty(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func invokeMarshaler(m logr.Marshaler) (ret any) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return m.MarshalLog() +} + +func invokeStringer(s fmt.Stringer) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return s.String() +} + +func invokeError(e error) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return e.Error() +} + +// Caller represents the original call site for a log line, after considering +// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and +// Line fields will always be provided, while the Func field is optional. +// Users can set the render hook fields in Options to examine logged key-value +// pairs, one of which will be {"caller", Caller} if the Options.LogCaller +// field is enabled for the given MessageClass. +type Caller struct { + // File is the basename of the file for this call site. + File string `json:"file"` + // Line is the line number in the file for this call site. + Line int `json:"line"` + // Func is the function name for this call site, or empty if + // Options.LogCallerFunc is not enabled. + Func string `json:"function,omitempty"` +} + +func (f Formatter) caller() Caller { + // +1 for this frame, +1 for Info/Error. + pc, file, line, ok := runtime.Caller(f.depth + 2) + if !ok { + return Caller{"", 0, ""} + } + fn := "" + if f.opts.LogCallerFunc { + if fp := runtime.FuncForPC(pc); fp != nil { + fn = fp.Name() + } + } + + return Caller{filepath.Base(file), line, fn} +} + +const noValue = "" + +func (f Formatter) nonStringKey(v any) string { + return fmt.Sprintf("", f.snippet(v)) +} + +// snippet produces a short snippet string of an arbitrary value. +func (f Formatter) snippet(v any) string { + const snipLen = 16 + + snip := f.pretty(v) + if len(snip) > snipLen { + snip = snip[:snipLen] + } + return snip +} + +// sanitize ensures that a list of key-value pairs has a value for every key +// (adding a value if needed) and that each key is a string (substituting a key +// if needed). +func (f Formatter) sanitize(kvList []any) []any { + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + _, ok := kvList[i].(string) + if !ok { + kvList[i] = f.nonStringKey(kvList[i]) + } + } + return kvList +} + +// startGroup opens a new group scope (basically a sub-struct), which locks all +// the current saved values and starts them anew. This is needed to satisfy +// slog. +func (f *Formatter) startGroup(name string) { + // Unnamed groups are just inlined. + if name == "" { + return + } + + n := len(f.groups) + f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr}) + + // Start collecting new values. + f.groupName = name + f.valuesStr = "" + f.values = nil +} + +// Init configures this Formatter from runtime info, such as the call depth +// imposed by logr itself. +// Note that this receiver is a pointer, so depth can be saved. +func (f *Formatter) Init(info logr.RuntimeInfo) { + f.depth += info.CallDepth +} + +// Enabled checks whether an info message at the given level should be logged. +func (f Formatter) Enabled(level int) bool { + return level <= f.opts.Verbosity +} + +// GetDepth returns the current depth of this Formatter. This is useful for +// implementations which do their own caller attribution. +func (f Formatter) GetDepth() int { + return f.depth +} + +// FormatInfo renders an Info log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) { + args := make([]any, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Info { + args = append(args, "caller", f.caller()) + } + if key := *f.opts.LogInfoLevel; key != "" { + args = append(args, key, level) + } + args = append(args, "msg", msg) + return prefix, f.render(args, kvList) +} + +// FormatError renders an Error log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) { + args := make([]any, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Error { + args = append(args, "caller", f.caller()) + } + args = append(args, "msg", msg) + var loggableErr any + if err != nil { + loggableErr = err.Error() + } + args = append(args, "error", loggableErr) + return prefix, f.render(args, kvList) +} + +// AddName appends the specified name. funcr uses '/' characters to separate +// name elements. Callers should not pass '/' in the provided name string, but +// this library does not actually enforce that. +func (f *Formatter) AddName(name string) { + if len(f.prefix) > 0 { + f.prefix += "/" + } + f.prefix += name +} + +// AddValues adds key-value pairs to the set of saved values to be logged with +// each log line. +func (f *Formatter) AddValues(kvList []any) { + // Three slice args forces a copy. + n := len(f.values) + f.values = append(f.values[:n:n], kvList...) + + vals := f.values + if hook := f.opts.RenderValuesHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + + // Pre-render values, so we don't have to do it on each Info/Error call. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + f.flatten(buf, vals, true) // escape user-provided keys + f.valuesStr = buf.String() +} + +// AddCallDepth increases the number of stack-frames to skip when attributing +// the log line to a file and line. +func (f *Formatter) AddCallDepth(depth int) { + f.depth += depth +} diff --git a/hack/tools/vendor/github.com/go-logr/logr/funcr/slogsink.go b/hack/tools/vendor/github.com/go-logr/logr/funcr/slogsink.go new file mode 100644 index 0000000000..7bd84761e2 --- /dev/null +++ b/hack/tools/vendor/github.com/go-logr/logr/funcr/slogsink.go @@ -0,0 +1,105 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package funcr + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" +) + +var _ logr.SlogSink = &fnlogger{} + +const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink + +func (l fnlogger) Handle(_ context.Context, record slog.Record) error { + kvList := make([]any, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + kvList = attrToKVs(attr, kvList) + return true + }) + + if record.Level >= slog.LevelError { + l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...) + } else { + level := l.levelFromSlog(record.Level) + l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...) + } + return nil +} + +func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink { + kvList := make([]any, 0, 2*len(attrs)) + for _, attr := range attrs { + kvList = attrToKVs(attr, kvList) + } + l.AddValues(kvList) + return &l +} + +func (l fnlogger) WithGroup(name string) logr.SlogSink { + l.startGroup(name) + return &l +} + +// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups +// and other details of slog. +func attrToKVs(attr slog.Attr, kvList []any) []any { + attrVal := attr.Value.Resolve() + if attrVal.Kind() == slog.KindGroup { + groupVal := attrVal.Group() + grpKVs := make([]any, 0, 2*len(groupVal)) + for _, attr := range groupVal { + grpKVs = attrToKVs(attr, grpKVs) + } + if attr.Key == "" { + // slog says we have to inline these + kvList = append(kvList, grpKVs...) + } else { + kvList = append(kvList, attr.Key, PseudoStruct(grpKVs)) + } + } else if attr.Key != "" { + kvList = append(kvList, attr.Key, attrVal.Any()) + } + + return kvList +} + +// levelFromSlog adjusts the level by the logger's verbosity and negates it. +// It ensures that the result is >= 0. This is necessary because the result is +// passed to a LogSink and that API did not historically document whether +// levels could be negative or what that meant. +// +// Some example usage: +// +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +func (l fnlogger) levelFromSlog(level slog.Level) int { + result := -level + if result < 0 { + result = 0 // because LogSink doesn't expect negative V levels + } + return int(result) +} diff --git a/hack/tools/vendor/github.com/go-logr/stdr/LICENSE b/hack/tools/vendor/github.com/go-logr/stdr/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/hack/tools/vendor/github.com/go-logr/stdr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hack/tools/vendor/github.com/go-logr/stdr/README.md b/hack/tools/vendor/github.com/go-logr/stdr/README.md new file mode 100644 index 0000000000..5158667890 --- /dev/null +++ b/hack/tools/vendor/github.com/go-logr/stdr/README.md @@ -0,0 +1,6 @@ +# Minimal Go logging using logr and Go's standard library + +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr) + +This package implements the [logr interface](https://github.com/go-logr/logr) +in terms of Go's standard log package(https://pkg.go.dev/log). diff --git a/hack/tools/vendor/github.com/go-logr/stdr/stdr.go b/hack/tools/vendor/github.com/go-logr/stdr/stdr.go new file mode 100644 index 0000000000..93a8aab51b --- /dev/null +++ b/hack/tools/vendor/github.com/go-logr/stdr/stdr.go @@ -0,0 +1,170 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package stdr implements github.com/go-logr/logr.Logger in terms of +// Go's standard log package. +package stdr + +import ( + "log" + "os" + + "github.com/go-logr/logr" + "github.com/go-logr/logr/funcr" +) + +// The global verbosity level. See SetVerbosity(). +var globalVerbosity int + +// SetVerbosity sets the global level against which all info logs will be +// compared. If this is greater than or equal to the "V" of the logger, the +// message will be logged. A higher value here means more logs will be written. +// The previous verbosity value is returned. This is not concurrent-safe - +// callers must be sure to call it from only one goroutine. +func SetVerbosity(v int) int { + old := globalVerbosity + globalVerbosity = v + return old +} + +// New returns a logr.Logger which is implemented by Go's standard log package, +// or something like it. If std is nil, this will use a default logger +// instead. +// +// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +func New(std StdLogger) logr.Logger { + return NewWithOptions(std, Options{}) +} + +// NewWithOptions returns a logr.Logger which is implemented by Go's standard +// log package, or something like it. See New for details. +func NewWithOptions(std StdLogger, opts Options) logr.Logger { + if std == nil { + // Go's log.Default() is only available in 1.16 and higher. + std = log.New(os.Stderr, "", log.LstdFlags) + } + + if opts.Depth < 0 { + opts.Depth = 0 + } + + fopts := funcr.Options{ + LogCaller: funcr.MessageClass(opts.LogCaller), + } + + sl := &logger{ + Formatter: funcr.NewFormatter(fopts), + std: std, + } + + // For skipping our own logger.Info/Error. + sl.Formatter.AddCallDepth(1 + opts.Depth) + + return logr.New(sl) +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // Depth biases the assumed number of call frames to the "true" caller. + // This is useful when the calling code calls a function which then calls + // stdr (e.g. a logging shim to another API). Values less than zero will + // be treated as zero. + Depth int + + // LogCaller tells stdr to add a "caller" key to some or all log lines. + // Go's log package has options to log this natively, too. + LogCaller MessageClass + + // TODO: add an option to log the date/time +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// StdLogger is the subset of the Go stdlib log.Logger API that is needed for +// this adapter. +type StdLogger interface { + // Output is the same as log.Output and log.Logger.Output. + Output(calldepth int, logline string) error +} + +type logger struct { + funcr.Formatter + std StdLogger +} + +var _ logr.LogSink = &logger{} +var _ logr.CallDepthLogSink = &logger{} + +func (l logger) Enabled(level int) bool { + return globalVerbosity >= level +} + +func (l logger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l logger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l logger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +// Underlier exposes access to the underlying logging implementation. Since +// callers only have a logr.Logger, they have to know which implementation is +// in use, so this interface is less of an abstraction and more of way to test +// type conversion. +type Underlier interface { + GetUnderlying() StdLogger +} + +// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger +// is itself an interface, the result may or may not be a Go log.Logger. +func (l logger) GetUnderlying() StdLogger { + return l.std +} diff --git a/hack/tools/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/hack/tools/vendor/github.com/go-openapi/jsonpointer/.golangci.yml new file mode 100644 index 0000000000..22f8d21cca --- /dev/null +++ b/hack/tools/vendor/github.com/go-openapi/jsonpointer/.golangci.yml @@ -0,0 +1,61 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 45 + maligned: + suggest-new: true + dupl: + threshold: 200 + goconst: + min-len: 2 + min-occurrences: 3 + +linters: + enable-all: true + disable: + - maligned + - unparam + - lll + - gochecknoinits + - gochecknoglobals + - funlen + - godox + - gocognit + - whitespace + - wsl + - wrapcheck + - testpackage + - nlreturn + - gomnd + - exhaustivestruct + - goerr113 + - errorlint + - nestif + - godot + - gofumpt + - paralleltest + - tparallel + - thelper + - ifshort + - exhaustruct + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam + - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint + - nosnakecase diff --git a/hack/tools/vendor/github.com/go-openapi/jsonpointer/README.md b/hack/tools/vendor/github.com/go-openapi/jsonpointer/README.md index 813788aff1..0108f1d572 100644 --- a/hack/tools/vendor/github.com/go-openapi/jsonpointer/README.md +++ b/hack/tools/vendor/github.com/go-openapi/jsonpointer/README.md @@ -1,6 +1,10 @@ -# gojsonpointer [![Build Status](https://travis-ci.org/go-openapi/jsonpointer.svg?branch=master)](https://travis-ci.org/go-openapi/jsonpointer) [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# gojsonpointer [![Build Status](https://github.com/go-openapi/jsonpointer/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonpointer/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) + +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonpointer.svg)](https://pkg.go.dev/github.com/go-openapi/jsonpointer) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonpointer)](https://goreportcard.com/report/github.com/go-openapi/jsonpointer) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer) An implementation of JSON Pointer - Go language ## Status diff --git a/hack/tools/vendor/github.com/go-openapi/jsonpointer/pointer.go b/hack/tools/vendor/github.com/go-openapi/jsonpointer/pointer.go index de60dc7dd2..d970c7cf44 100644 --- a/hack/tools/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/hack/tools/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -110,19 +110,39 @@ func SetForToken(document any, decodedToken string, value any) (any, error) { return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) } +func isNil(input any) bool { + if input == nil { + return true + } + + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} + func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) { rValue := reflect.Indirect(reflect.ValueOf(node)) kind := rValue.Kind() + if isNil(node) { + return nil, kind, fmt.Errorf("nil value has not field %q", decodedToken) + } - if rValue.Type().Implements(jsonPointableType) { - r, err := node.(JSONPointable).JSONLookup(decodedToken) + switch typed := node.(type) { + case JSONPointable: + r, err := typed.JSONLookup(decodedToken) if err != nil { return nil, kind, err } return r, kind, nil + case *any: // case of a pointer to interface, that is not resolved by reflect.Indirect + return getSingleImpl(*typed, decodedToken, nameProvider) } - switch kind { + switch kind { //nolint:exhaustive case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -170,7 +190,7 @@ func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameP return node.(JSONSetable).JSONSet(decodedToken, data) } - switch rValue.Kind() { + switch rValue.Kind() { //nolint:exhaustive case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -231,8 +251,7 @@ func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.K if err != nil { return nil, knd, err } - node, kind = r, knd - + node = r } rValue := reflect.ValueOf(node) @@ -245,7 +264,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { knd := reflect.ValueOf(node).Kind() if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { - return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values") + return errors.New("only structs, pointers, maps and slices are supported for setting values") } if nameProvider == nil { @@ -284,7 +303,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { continue } - switch kind { + switch kind { //nolint:exhaustive case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { @@ -405,11 +424,11 @@ func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) { case json.Delim: switch tk { case '{': - if err := drainSingle(dec); err != nil { + if err = drainSingle(dec); err != nil { return 0, err } case '[': - if err := drainSingle(dec); err != nil { + if err = drainSingle(dec); err != nil { return 0, err } } @@ -435,20 +454,21 @@ func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { if err != nil { return 0, err } - switch tk := tk.(type) { - case json.Delim: - switch tk { + + if delim, isDelim := tk.(json.Delim); isDelim { + switch delim { case '{': - if err := drainSingle(dec); err != nil { + if err = drainSingle(dec); err != nil { return 0, err } case '[': - if err := drainSingle(dec); err != nil { + if err = drainSingle(dec); err != nil { return 0, err } } } } + if !dec.More() { return 0, fmt.Errorf("token reference %q not found", decodedToken) } @@ -456,27 +476,27 @@ func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { } // drainSingle drains a single level of object or array. -// The decoder has to guarantee the begining delim (i.e. '{' or '[') has been consumed. +// The decoder has to guarantee the beginning delim (i.e. '{' or '[') has been consumed. func drainSingle(dec *json.Decoder) error { for dec.More() { tk, err := dec.Token() if err != nil { return err } - switch tk := tk.(type) { - case json.Delim: - switch tk { + if delim, isDelim := tk.(json.Delim); isDelim { + switch delim { case '{': - if err := drainSingle(dec); err != nil { + if err = drainSingle(dec); err != nil { return err } case '[': - if err := drainSingle(dec); err != nil { + if err = drainSingle(dec); err != nil { return err } } } } + // Consumes the ending delim if _, err := dec.Token(); err != nil { return err @@ -498,14 +518,14 @@ const ( // Unescape unescapes a json pointer reference token string to the original representation func Unescape(token string) string { - step1 := strings.Replace(token, encRefTok1, decRefTok1, -1) - step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1) + step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1) + step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0) return step2 } // Escape escapes a pointer reference token string func Escape(token string) string { - step1 := strings.Replace(token, decRefTok0, encRefTok0, -1) - step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1) + step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0) + step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1) return step2 } diff --git a/hack/tools/vendor/github.com/go-openapi/swag/.gitignore b/hack/tools/vendor/github.com/go-openapi/swag/.gitignore index d69b53accc..c4b1b64f04 100644 --- a/hack/tools/vendor/github.com/go-openapi/swag/.gitignore +++ b/hack/tools/vendor/github.com/go-openapi/swag/.gitignore @@ -2,3 +2,4 @@ secrets.yml vendor Godeps .idea +*.out diff --git a/hack/tools/vendor/github.com/go-openapi/swag/.golangci.yml b/hack/tools/vendor/github.com/go-openapi/swag/.golangci.yml index bf503e4000..80e2be0042 100644 --- a/hack/tools/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/hack/tools/vendor/github.com/go-openapi/swag/.golangci.yml @@ -4,14 +4,14 @@ linters-settings: golint: min-confidence: 0 gocyclo: - min-complexity: 25 + min-complexity: 45 maligned: suggest-new: true dupl: - threshold: 100 + threshold: 200 goconst: min-len: 3 - min-occurrences: 2 + min-occurrences: 3 linters: enable-all: true @@ -20,35 +20,41 @@ linters: - lll - gochecknoinits - gochecknoglobals - - nlreturn - - testpackage + - funlen + - godox + - gocognit + - whitespace + - wsl - wrapcheck + - testpackage + - nlreturn - gomnd - - exhaustive - exhaustivestruct - goerr113 - - wsl - - whitespace - - gofumpt - - godot + - errorlint - nestif - - godox - - funlen - - gci - - gocognit + - godot + - gofumpt - paralleltest + - tparallel - thelper - ifshort - - gomoddirectives - - cyclop - - forcetypeassert - - ireturn - - tagliatelle - - varnamelen - - goimports - - tenv - - golint - exhaustruct - - nilnil + - varnamelen + - gci + - depguard + - errchkjson + - inamedparam - nonamedreturns + - musttag + - ireturn + - forcetypeassert + - cyclop + # deprecated linters + - deadcode + - interfacer + - scopelint + - varcheck + - structcheck + - golint - nosnakecase diff --git a/hack/tools/vendor/github.com/go-openapi/swag/BENCHMARK.md b/hack/tools/vendor/github.com/go-openapi/swag/BENCHMARK.md new file mode 100644 index 0000000000..e7f28ed6b7 --- /dev/null +++ b/hack/tools/vendor/github.com/go-openapi/swag/BENCHMARK.md @@ -0,0 +1,52 @@ +# Benchmarks + +## Name mangling utilities + +```bash +go test -bench XXX -run XXX -benchtime 30s +``` + +### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op +BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op +BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op +``` + +### Benchmarks after PR #79 + +~ x10 performance improvement and ~ /100 memory allocations. + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op +``` + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: AMD Ryzen 7 5800X 8-Core Processor +BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op +``` diff --git a/hack/tools/vendor/github.com/go-openapi/swag/README.md b/hack/tools/vendor/github.com/go-openapi/swag/README.md index 217f6fa505..a729222998 100644 --- a/hack/tools/vendor/github.com/go-openapi/swag/README.md +++ b/hack/tools/vendor/github.com/go-openapi/swag/README.md @@ -1,7 +1,8 @@ -# Swag [![Build Status](https://travis-ci.org/go-openapi/swag.svg?branch=master)](https://travis-ci.org/go-openapi/swag) [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) +# Swag [![Build Status](https://github.com/go-openapi/swag/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/swag/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) +[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) +[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/swag.svg)](https://pkg.go.dev/github.com/go-openapi/swag) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) Contains a bunch of helper functions for go-openapi and go-swagger projects. @@ -18,4 +19,5 @@ You may also use it standalone for your projects. This repo has only few dependencies outside of the standard library: -* YAML utilities depend on gopkg.in/yaml.v2 +* YAML utilities depend on `gopkg.in/yaml.v3` +* `github.com/mailru/easyjson v0.7.7` diff --git a/hack/tools/vendor/github.com/go-openapi/swag/initialism_index.go b/hack/tools/vendor/github.com/go-openapi/swag/initialism_index.go new file mode 100644 index 0000000000..20a359bb60 --- /dev/null +++ b/hack/tools/vendor/github.com/go-openapi/swag/initialism_index.go @@ -0,0 +1,202 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import ( + "sort" + "strings" + "sync" +) + +var ( + // commonInitialisms are common acronyms that are kept as whole uppercased words. + commonInitialisms *indexOfInitialisms + + // initialisms is a slice of sorted initialisms + initialisms []string + + // a copy of initialisms pre-baked as []rune + initialismsRunes [][]rune + initialismsUpperCased [][]rune + + isInitialism func(string) bool + + maxAllocMatches int +) + +func init() { + // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 + configuredInitialisms := map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTPS": true, + "HTTP": true, + "ID": true, + "IP": true, + "IPv4": true, + "IPv6": true, + "JSON": true, + "LHS": true, + "OAI": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, + } + + // a thread-safe index of initialisms + commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) + maxAllocMatches = maxAllocHeuristic(initialismsRunes) + + // a test function + isInitialism = commonInitialisms.isInitialism +} + +func asRunes(in []string) [][]rune { + out := make([][]rune, len(in)) + for i, initialism := range in { + out[i] = []rune(initialism) + } + + return out +} + +func asUpperCased(in []string) [][]rune { + out := make([][]rune, len(in)) + + for i, initialism := range in { + out[i] = []rune(upper(trim(initialism))) + } + + return out +} + +func maxAllocHeuristic(in [][]rune) int { + heuristic := make(map[rune]int) + for _, initialism := range in { + heuristic[initialism[0]]++ + } + + var maxAlloc int + for _, val := range heuristic { + if val > maxAlloc { + maxAlloc = val + } + } + + return maxAlloc +} + +// AddInitialisms add additional initialisms +func AddInitialisms(words ...string) { + for _, word := range words { + // commonInitialisms[upper(word)] = true + commonInitialisms.add(upper(word)) + } + // sort again + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) +} + +// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. +// Since go1.9, this may be implemented with sync.Map. +type indexOfInitialisms struct { + sortMutex *sync.Mutex + index *sync.Map +} + +func newIndexOfInitialisms() *indexOfInitialisms { + return &indexOfInitialisms{ + sortMutex: new(sync.Mutex), + index: new(sync.Map), + } +} + +func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + for k, v := range initial { + m.index.Store(k, v) + } + return m +} + +func (m *indexOfInitialisms) isInitialism(key string) bool { + _, ok := m.index.Load(key) + return ok +} + +func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { + m.index.Store(key, true) + return m +} + +func (m *indexOfInitialisms) sorted() (result []string) { + m.sortMutex.Lock() + defer m.sortMutex.Unlock() + m.index.Range(func(key, _ interface{}) bool { + k := key.(string) + result = append(result, k) + return true + }) + sort.Sort(sort.Reverse(byInitialism(result))) + return +} + +type byInitialism []string + +func (s byInitialism) Len() int { + return len(s) +} +func (s byInitialism) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s byInitialism) Less(i, j int) bool { + if len(s[i]) != len(s[j]) { + return len(s[i]) < len(s[j]) + } + + return strings.Compare(s[i], s[j]) > 0 +} diff --git a/hack/tools/vendor/github.com/go-openapi/swag/loading.go b/hack/tools/vendor/github.com/go-openapi/swag/loading.go index 00038c3773..783442fddf 100644 --- a/hack/tools/vendor/github.com/go-openapi/swag/loading.go +++ b/hack/tools/vendor/github.com/go-openapi/swag/loading.go @@ -21,6 +21,7 @@ import ( "net/http" "net/url" "os" + "path" "path/filepath" "runtime" "strings" @@ -40,43 +41,97 @@ var LoadHTTPBasicAuthPassword = "" var LoadHTTPCustomHeaders = map[string]string{} // LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in -func LoadFromFileOrHTTP(path string) ([]byte, error) { - return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) +func LoadFromFileOrHTTP(pth string) ([]byte, error) { + return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(pth) } // LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in // timeout arg allows for per request overriding of the request timeout -func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) { - return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path) +func LoadFromFileOrHTTPWithTimeout(pth string, timeout time.Duration) ([]byte, error) { + return LoadStrategy(pth, os.ReadFile, loadHTTPBytes(timeout))(pth) } -// LoadStrategy returns a loader function for a given path or uri -func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { - if strings.HasPrefix(path, "http") { +// LoadStrategy returns a loader function for a given path or URI. +// +// The load strategy returns the remote load for any path starting with `http`. +// So this works for any URI with a scheme `http` or `https`. +// +// The fallback strategy is to call the local loader. +// +// The local loader takes a local file system path (absolute or relative) as argument, +// or alternatively a `file://...` URI, **without host** (see also below for windows). +// +// There are a few liberalities, initially intended to be tolerant regarding the URI syntax, +// especially on windows. +// +// Before the local loader is called, the given path is transformed: +// - percent-encoded characters are unescaped +// - simple paths (e.g. `./folder/file`) are passed as-is +// - on windows, occurrences of `/` are replaced by `\`, so providing a relative path such a `folder/file` works too. +// +// For paths provided as URIs with the "file" scheme, please note that: +// - `file://` is simply stripped. +// This means that the host part of the URI is not parsed at all. +// For example, `file:///folder/file" becomes "/folder/file`, +// but `file://localhost/folder/file` becomes `localhost/folder/file` on unix systems. +// Similarly, `file://./folder/file` yields `./folder/file`. +// - on windows, `file://...` can take a host so as to specify an UNC share location. +// +// Reminder about windows-specifics: +// - `file://host/folder/file` becomes an UNC path like `\\host\folder\file` (no port specification is supported) +// - `file:///c:/folder/file` becomes `C:\folder\file` +// - `file://c:/folder/file` is tolerated (without leading `/`) and becomes `c:\folder\file` +func LoadStrategy(pth string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { + if strings.HasPrefix(pth, "http") { return remote } - return func(pth string) ([]byte, error) { - upth, err := pathUnescape(pth) + + return func(p string) ([]byte, error) { + upth, err := url.PathUnescape(p) if err != nil { return nil, err } - if strings.HasPrefix(pth, `file://`) { - if runtime.GOOS == "windows" { - // support for canonical file URIs on windows. - // Zero tolerance here for dodgy URIs. - u, _ := url.Parse(upth) - if u.Host != "" { - // assume UNC name (volume share) - // file://host/share/folder\... ==> \\host\share\path\folder - // NOTE: UNC port not yet supported - upth = strings.Join([]string{`\`, u.Host, u.Path}, `\`) - } else { - // file:///c:/folder/... ==> just remove the leading slash - upth = strings.TrimPrefix(upth, `file:///`) - } - } else { - upth = strings.TrimPrefix(upth, `file://`) + if !strings.HasPrefix(p, `file://`) { + // regular file path provided: just normalize slashes + return local(filepath.FromSlash(upth)) + } + + if runtime.GOOS != "windows" { + // crude processing: this leaves full URIs with a host with a (mostly) unexpected result + upth = strings.TrimPrefix(upth, `file://`) + + return local(filepath.FromSlash(upth)) + } + + // windows-only pre-processing of file://... URIs + + // support for canonical file URIs on windows. + u, err := url.Parse(filepath.ToSlash(upth)) + if err != nil { + return nil, err + } + + if u.Host != "" { + // assume UNC name (volume share) + // NOTE: UNC port not yet supported + + // when the "host" segment is a drive letter: + // file://C:/folder/... => C:\folder + upth = path.Clean(strings.Join([]string{u.Host, u.Path}, `/`)) + if !strings.HasSuffix(u.Host, ":") && u.Host[0] != '.' { + // tolerance: if we have a leading dot, this can't be a host + // file://host/share/folder\... ==> \\host\share\path\folder + upth = "//" + upth + } + } else { + // no host, let's figure out if this is a drive letter + upth = strings.TrimPrefix(upth, `file://`) + first, _, _ := strings.Cut(strings.TrimPrefix(u.Path, "/"), "/") + if strings.HasSuffix(first, ":") { + // drive letter in the first segment: + // file:///c:/folder/... ==> strip the leading slash + upth = strings.TrimPrefix(upth, `/`) } } diff --git a/hack/tools/vendor/github.com/go-openapi/swag/name_lexem.go b/hack/tools/vendor/github.com/go-openapi/swag/name_lexem.go index aa7f6a9bb8..8bb64ac32f 100644 --- a/hack/tools/vendor/github.com/go-openapi/swag/name_lexem.go +++ b/hack/tools/vendor/github.com/go-openapi/swag/name_lexem.go @@ -14,74 +14,80 @@ package swag -import "unicode" +import ( + "unicode" + "unicode/utf8" +) type ( - nameLexem interface { - GetUnsafeGoName() string - GetOriginal() string - IsInitialism() bool - } + lexemKind uint8 - initialismNameLexem struct { + nameLexem struct { original string matchedInitialism string + kind lexemKind } +) - casualNameLexem struct { - original string - } +const ( + lexemKindCasualName lexemKind = iota + lexemKindInitialismName ) -func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem { - return &initialismNameLexem{ +func newInitialismNameLexem(original, matchedInitialism string) nameLexem { + return nameLexem{ + kind: lexemKindInitialismName, original: original, matchedInitialism: matchedInitialism, } } -func newCasualNameLexem(original string) *casualNameLexem { - return &casualNameLexem{ +func newCasualNameLexem(original string) nameLexem { + return nameLexem{ + kind: lexemKindCasualName, original: original, } } -func (l *initialismNameLexem) GetUnsafeGoName() string { - return l.matchedInitialism -} +func (l nameLexem) GetUnsafeGoName() string { + if l.kind == lexemKindInitialismName { + return l.matchedInitialism + } + + var ( + first rune + rest string + ) -func (l *casualNameLexem) GetUnsafeGoName() string { - var first rune - var rest string for i, orig := range l.original { if i == 0 { first = orig continue } + if i > 0 { rest = l.original[i:] break } } + if len(l.original) > 1 { - return string(unicode.ToUpper(first)) + lower(rest) + b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest)) + defer func() { + poolOfBuffers.RedeemBuffer(b) + }() + b.WriteRune(unicode.ToUpper(first)) + b.WriteString(lower(rest)) + return b.String() } return l.original } -func (l *initialismNameLexem) GetOriginal() string { +func (l nameLexem) GetOriginal() string { return l.original } -func (l *casualNameLexem) GetOriginal() string { - return l.original -} - -func (l *initialismNameLexem) IsInitialism() bool { - return true -} - -func (l *casualNameLexem) IsInitialism() bool { - return false +func (l nameLexem) IsInitialism() bool { + return l.kind == lexemKindInitialismName } diff --git a/hack/tools/vendor/github.com/go-openapi/swag/post_go19.go b/hack/tools/vendor/github.com/go-openapi/swag/post_go19.go deleted file mode 100644 index 7c7da9c088..0000000000 --- a/hack/tools/vendor/github.com/go-openapi/swag/post_go19.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.9 -// +build go1.9 - -package swag - -import ( - "sort" - "sync" -) - -// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. -// Since go1.9, this may be implemented with sync.Map. -type indexOfInitialisms struct { - sortMutex *sync.Mutex - index *sync.Map -} - -func newIndexOfInitialisms() *indexOfInitialisms { - return &indexOfInitialisms{ - sortMutex: new(sync.Mutex), - index: new(sync.Map), - } -} - -func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { - m.sortMutex.Lock() - defer m.sortMutex.Unlock() - for k, v := range initial { - m.index.Store(k, v) - } - return m -} - -func (m *indexOfInitialisms) isInitialism(key string) bool { - _, ok := m.index.Load(key) - return ok -} - -func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { - m.index.Store(key, true) - return m -} - -func (m *indexOfInitialisms) sorted() (result []string) { - m.sortMutex.Lock() - defer m.sortMutex.Unlock() - m.index.Range(func(key, value interface{}) bool { - k := key.(string) - result = append(result, k) - return true - }) - sort.Sort(sort.Reverse(byInitialism(result))) - return -} diff --git a/hack/tools/vendor/github.com/go-openapi/swag/pre_go19.go b/hack/tools/vendor/github.com/go-openapi/swag/pre_go19.go deleted file mode 100644 index 0565db377b..0000000000 --- a/hack/tools/vendor/github.com/go-openapi/swag/pre_go19.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.9 -// +build !go1.9 - -package swag - -import ( - "sort" - "sync" -) - -// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. -// Before go1.9, this may be implemented with a mutex on the map. -type indexOfInitialisms struct { - getMutex *sync.Mutex - index map[string]bool -} - -func newIndexOfInitialisms() *indexOfInitialisms { - return &indexOfInitialisms{ - getMutex: new(sync.Mutex), - index: make(map[string]bool, 50), - } -} - -func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { - m.getMutex.Lock() - defer m.getMutex.Unlock() - for k, v := range initial { - m.index[k] = v - } - return m -} - -func (m *indexOfInitialisms) isInitialism(key string) bool { - m.getMutex.Lock() - defer m.getMutex.Unlock() - _, ok := m.index[key] - return ok -} - -func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { - m.getMutex.Lock() - defer m.getMutex.Unlock() - m.index[key] = true - return m -} - -func (m *indexOfInitialisms) sorted() (result []string) { - m.getMutex.Lock() - defer m.getMutex.Unlock() - for k := range m.index { - result = append(result, k) - } - sort.Sort(sort.Reverse(byInitialism(result))) - return -} diff --git a/hack/tools/vendor/github.com/go-openapi/swag/split.go b/hack/tools/vendor/github.com/go-openapi/swag/split.go index a1825fb7dc..274727a866 100644 --- a/hack/tools/vendor/github.com/go-openapi/swag/split.go +++ b/hack/tools/vendor/github.com/go-openapi/swag/split.go @@ -15,124 +15,269 @@ package swag import ( + "bytes" + "sync" "unicode" + "unicode/utf8" ) -var nameReplaceTable = map[rune]string{ - '@': "At ", - '&': "And ", - '|': "Pipe ", - '$': "Dollar ", - '!': "Bang ", - '-': "", - '_': "", -} - type ( splitter struct { - postSplitInitialismCheck bool initialisms []string + initialismsRunes [][]rune + initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version + postSplitInitialismCheck bool + } + + splitterOption func(*splitter) + + initialismMatch struct { + body []rune + start, end int + complete bool + } + initialismMatches []initialismMatch +) + +type ( + // memory pools of temporary objects. + // + // These are used to recycle temporarily allocated objects + // and relieve the GC from undue pressure. + + matchesPool struct { + *sync.Pool } - splitterOption func(*splitter) *splitter + buffersPool struct { + *sync.Pool + } + + lexemsPool struct { + *sync.Pool + } + + splittersPool struct { + *sync.Pool + } ) -// split calls the splitter; splitter provides more control and post options +var ( + // poolOfMatches holds temporary slices for recycling during the initialism match process + poolOfMatches = matchesPool{ + Pool: &sync.Pool{ + New: func() any { + s := make(initialismMatches, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfBuffers = buffersPool{ + Pool: &sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, + }, + } + + poolOfLexems = lexemsPool{ + Pool: &sync.Pool{ + New: func() any { + s := make([]nameLexem, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfSplitters = splittersPool{ + Pool: &sync.Pool{ + New: func() any { + s := newSplitter() + + return &s + }, + }, + } +) + +// nameReplaceTable finds a word representation for special characters. +func nameReplaceTable(r rune) (string, bool) { + switch r { + case '@': + return "At ", true + case '&': + return "And ", true + case '|': + return "Pipe ", true + case '$': + return "Dollar ", true + case '!': + return "Bang ", true + case '-': + return "", true + case '_': + return "", true + default: + return "", false + } +} + +// split calls the splitter. +// +// Use newSplitter for more control and options func split(str string) []string { - lexems := newSplitter().split(str) - result := make([]string, 0, len(lexems)) + s := poolOfSplitters.BorrowSplitter() + lexems := s.split(str) + result := make([]string, 0, len(*lexems)) - for _, lexem := range lexems { + for _, lexem := range *lexems { result = append(result, lexem.GetOriginal()) } + poolOfLexems.RedeemLexems(lexems) + poolOfSplitters.RedeemSplitter(s) return result } -func (s *splitter) split(str string) []nameLexem { - return s.toNameLexems(str) -} - -func newSplitter(options ...splitterOption) *splitter { - splitter := &splitter{ +func newSplitter(options ...splitterOption) splitter { + s := splitter{ postSplitInitialismCheck: false, initialisms: initialisms, + initialismsRunes: initialismsRunes, + initialismsUpperCased: initialismsUpperCased, } for _, option := range options { - splitter = option(splitter) + option(&s) } - return splitter + return s } // withPostSplitInitialismCheck allows to catch initialisms after main split process -func withPostSplitInitialismCheck(s *splitter) *splitter { +func withPostSplitInitialismCheck(s *splitter) { s.postSplitInitialismCheck = true +} + +func (p matchesPool) BorrowMatches() *initialismMatches { + s := p.Get().(*initialismMatches) + *s = (*s)[:0] // reset slice, keep allocated capacity + return s } -type ( - initialismMatch struct { - start, end int - body []rune - complete bool +func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer { + s := p.Get().(*bytes.Buffer) + s.Reset() + + if s.Cap() < size { + s.Grow(size) } - initialismMatches []*initialismMatch -) -func (s *splitter) toNameLexems(name string) []nameLexem { + return s +} + +func (p lexemsPool) BorrowLexems() *[]nameLexem { + s := p.Get().(*[]nameLexem) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter { + s := p.Get().(*splitter) + s.postSplitInitialismCheck = false // reset options + for _, apply := range options { + apply(s) + } + + return s +} + +func (p matchesPool) RedeemMatches(s *initialismMatches) { + p.Put(s) +} + +func (p buffersPool) RedeemBuffer(s *bytes.Buffer) { + p.Put(s) +} + +func (p lexemsPool) RedeemLexems(s *[]nameLexem) { + p.Put(s) +} + +func (p splittersPool) RedeemSplitter(s *splitter) { + p.Put(s) +} + +func (m initialismMatch) isZero() bool { + return m.start == 0 && m.end == 0 +} + +func (s splitter) split(name string) *[]nameLexem { nameRunes := []rune(name) matches := s.gatherInitialismMatches(nameRunes) + if matches == nil { + return poolOfLexems.BorrowLexems() + } + return s.mapMatchesToNameLexems(nameRunes, matches) } -func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { - matches := make(initialismMatches, 0) +func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches { + var matches *initialismMatches for currentRunePosition, currentRune := range nameRunes { - newMatches := make(initialismMatches, 0, len(matches)) + // recycle these allocations as we loop over runes + // with such recycling, only 2 slices should be allocated per call + // instead of o(n). + newMatches := poolOfMatches.BorrowMatches() // check current initialism matches - for _, match := range matches { - if keepCompleteMatch := match.complete; keepCompleteMatch { - newMatches = append(newMatches, match) - continue - } + if matches != nil { // skip first iteration + for _, match := range *matches { + if keepCompleteMatch := match.complete; keepCompleteMatch { + *newMatches = append(*newMatches, match) + continue + } - // drop failed match - currentMatchRune := match.body[currentRunePosition-match.start] - if !s.initialismRuneEqual(currentMatchRune, currentRune) { - continue - } + // drop failed match + currentMatchRune := match.body[currentRunePosition-match.start] + if currentMatchRune != currentRune { + continue + } - // try to complete ongoing match - if currentRunePosition-match.start == len(match.body)-1 { - // we are close; the next step is to check the symbol ahead - // if it is a small letter, then it is not the end of match - // but beginning of the next word - - if currentRunePosition < len(nameRunes)-1 { - nextRune := nameRunes[currentRunePosition+1] - if newWord := unicode.IsLower(nextRune); newWord { - // oh ok, it was the start of a new word - continue + // try to complete ongoing match + if currentRunePosition-match.start == len(match.body)-1 { + // we are close; the next step is to check the symbol ahead + // if it is a small letter, then it is not the end of match + // but beginning of the next word + + if currentRunePosition < len(nameRunes)-1 { + nextRune := nameRunes[currentRunePosition+1] + if newWord := unicode.IsLower(nextRune); newWord { + // oh ok, it was the start of a new word + continue + } } + + match.complete = true + match.end = currentRunePosition } - match.complete = true - match.end = currentRunePosition + *newMatches = append(*newMatches, match) } - - newMatches = append(newMatches, match) } // check for new initialism matches - for _, initialism := range s.initialisms { - initialismRunes := []rune(initialism) - if s.initialismRuneEqual(initialismRunes[0], currentRune) { - newMatches = append(newMatches, &initialismMatch{ + for i := range s.initialisms { + initialismRunes := s.initialismsRunes[i] + if initialismRunes[0] == currentRune { + *newMatches = append(*newMatches, initialismMatch{ start: currentRunePosition, body: initialismRunes, complete: false, @@ -140,24 +285,28 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { } } + if matches != nil { + poolOfMatches.RedeemMatches(matches) + } matches = newMatches } + // up to the caller to redeem this last slice return matches } -func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem { - nameLexems := make([]nameLexem, 0) +func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem { + nameLexems := poolOfLexems.BorrowLexems() - var lastAcceptedMatch *initialismMatch - for _, match := range matches { + var lastAcceptedMatch initialismMatch + for _, match := range *matches { if !match.complete { continue } - if firstMatch := lastAcceptedMatch == nil; firstMatch { - nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + if firstMatch := lastAcceptedMatch.isZero(); firstMatch { + s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start]) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match @@ -169,63 +318,66 @@ func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMa } middle := nameRunes[lastAcceptedMatch.end+1 : match.start] - nameLexems = append(nameLexems, s.breakCasualString(middle)...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + s.appendBrokenDownCasualString(nameLexems, middle) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match } // we have not found any accepted matches - if lastAcceptedMatch == nil { - return s.breakCasualString(nameRunes) - } - - if lastAcceptedMatch.end+1 != len(nameRunes) { + if lastAcceptedMatch.isZero() { + *nameLexems = (*nameLexems)[:0] + s.appendBrokenDownCasualString(nameLexems, nameRunes) + } else if lastAcceptedMatch.end+1 != len(nameRunes) { rest := nameRunes[lastAcceptedMatch.end+1:] - nameLexems = append(nameLexems, s.breakCasualString(rest)...) + s.appendBrokenDownCasualString(nameLexems, rest) } - return nameLexems -} + poolOfMatches.RedeemMatches(matches) -func (s *splitter) initialismRuneEqual(a, b rune) bool { - return a == b + return nameLexems } -func (s *splitter) breakInitialism(original string) nameLexem { +func (s splitter) breakInitialism(original string) nameLexem { return newInitialismNameLexem(original, original) } -func (s *splitter) breakCasualString(str []rune) []nameLexem { - segments := make([]nameLexem, 0) - currentSegment := "" +func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) { + currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused + defer func() { + poolOfBuffers.RedeemBuffer(currentSegment) + }() addCasualNameLexem := func(original string) { - segments = append(segments, newCasualNameLexem(original)) + *segments = append(*segments, newCasualNameLexem(original)) } addInitialismNameLexem := func(original, match string) { - segments = append(segments, newInitialismNameLexem(original, match)) + *segments = append(*segments, newInitialismNameLexem(original, match)) } - addNameLexem := func(original string) { - if s.postSplitInitialismCheck { - for _, initialism := range s.initialisms { - if upper(initialism) == upper(original) { - addInitialismNameLexem(original, initialism) + var addNameLexem func(string) + if s.postSplitInitialismCheck { + addNameLexem = func(original string) { + for i := range s.initialisms { + if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) { + addInitialismNameLexem(original, s.initialisms[i]) + return } } - } - addCasualNameLexem(original) + addCasualNameLexem(original) + } + } else { + addNameLexem = addCasualNameLexem } - for _, rn := range string(str) { - if replace, found := nameReplaceTable[rn]; found { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + for _, rn := range str { + if replace, found := nameReplaceTable(rn); found { + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } if replace != "" { @@ -236,27 +388,121 @@ func (s *splitter) breakCasualString(str []rune) []nameLexem { } if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } continue } if unicode.IsUpper(rn) { - if currentSegment != "" { - addNameLexem(currentSegment) + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } - currentSegment = "" + currentSegment.Reset() } - currentSegment += string(rn) + currentSegment.WriteRune(rn) + } + + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } +} + +// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but +// it ignores leading and trailing blank spaces in the compared +// string. +// +// base is assumed to be composed of upper-cased runes, and be already +// trimmed. +// +// This code is heavily inspired from strings.EqualFold. +func isEqualFoldIgnoreSpace(base []rune, str string) bool { + var i, baseIndex int + // equivalent to b := []byte(str), but without data copy + b := hackStringBytes(str) + + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + break + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + break + } + i += size + } + + if i >= len(b) { + return len(base) == 0 + } + + for _, baseRune := range base { + if i >= len(b) { + break + } + + if c := b[i]; c < utf8.RuneSelf { + // single byte rune case (ASCII) + if baseRune >= utf8.RuneSelf { + return false + } + + baseChar := byte(baseRune) + if c != baseChar && + !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) { + return false + } + + baseIndex++ + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if unicode.ToUpper(r) != baseRune { + return false + } + baseIndex++ + i += size + } + + if baseIndex != len(base) { + return false + } + + // all passed: now we should only have blanks + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + return false + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + return false + } - if currentSegment != "" { - addNameLexem(currentSegment) + i += size } - return segments + return true } diff --git a/hack/tools/vendor/github.com/go-openapi/swag/string_bytes.go b/hack/tools/vendor/github.com/go-openapi/swag/string_bytes.go new file mode 100644 index 0000000000..90745d5ca9 --- /dev/null +++ b/hack/tools/vendor/github.com/go-openapi/swag/string_bytes.go @@ -0,0 +1,8 @@ +package swag + +import "unsafe" + +// hackStringBytes returns the (unsafe) underlying bytes slice of a string. +func hackStringBytes(str string) []byte { + return unsafe.Slice(unsafe.StringData(str), len(str)) +} diff --git a/hack/tools/vendor/github.com/go-openapi/swag/util.go b/hack/tools/vendor/github.com/go-openapi/swag/util.go index d971fbe34b..5051401c49 100644 --- a/hack/tools/vendor/github.com/go-openapi/swag/util.go +++ b/hack/tools/vendor/github.com/go-openapi/swag/util.go @@ -18,76 +18,25 @@ import ( "reflect" "strings" "unicode" + "unicode/utf8" ) -// commonInitialisms are common acronyms that are kept as whole uppercased words. -var commonInitialisms *indexOfInitialisms - -// initialisms is a slice of sorted initialisms -var initialisms []string - -var isInitialism func(string) bool - // GoNamePrefixFunc sets an optional rule to prefix go names // which do not start with a letter. // +// The prefix function is assumed to return a string that starts with an upper case letter. +// // e.g. to help convert "123" into "{prefix}123" // // The default is to prefix with "X" var GoNamePrefixFunc func(string) string -func init() { - // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 - var configuredInitialisms = map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTPS": true, - "HTTP": true, - "ID": true, - "IP": true, - "IPv4": true, - "IPv6": true, - "JSON": true, - "LHS": true, - "OAI": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, +func prefixFunc(name, in string) string { + if GoNamePrefixFunc == nil { + return "X" + in } - // a thread-safe index of initialisms - commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) - initialisms = commonInitialisms.sorted() - - // a test function - isInitialism = commonInitialisms.isInitialism + return GoNamePrefixFunc(name) + in } const ( @@ -156,25 +105,9 @@ func SplitByFormat(data, format string) []string { return result } -type byInitialism []string - -func (s byInitialism) Len() int { - return len(s) -} -func (s byInitialism) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s byInitialism) Less(i, j int) bool { - if len(s[i]) != len(s[j]) { - return len(s[i]) < len(s[j]) - } - - return strings.Compare(s[i], s[j]) > 0 -} - // Removes leading whitespaces func trim(str string) string { - return strings.Trim(str, " ") + return strings.TrimSpace(str) } // Shortcut to strings.ToUpper() @@ -188,15 +121,20 @@ func lower(str string) string { } // Camelize an uppercased word -func Camelize(word string) (camelized string) { +func Camelize(word string) string { + camelized := poolOfBuffers.BorrowBuffer(len(word)) + defer func() { + poolOfBuffers.RedeemBuffer(camelized) + }() + for pos, ru := range []rune(word) { if pos > 0 { - camelized += string(unicode.ToLower(ru)) + camelized.WriteRune(unicode.ToLower(ru)) } else { - camelized += string(unicode.ToUpper(ru)) + camelized.WriteRune(unicode.ToUpper(ru)) } } - return + return camelized.String() } // ToFileName lowercases and underscores a go type name @@ -224,33 +162,40 @@ func ToCommandName(name string) string { // ToHumanNameLower represents a code name as a human series of words func ToHumanNameLower(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) - out := make([]string, 0, len(in)) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) + out := make([]string, 0, len(*in)) - for _, w := range in { + for _, w := range *in { if !w.IsInitialism() { out = append(out, lower(w.GetOriginal())) } else { - out = append(out, w.GetOriginal()) + out = append(out, trim(w.GetOriginal())) } } + poolOfLexems.RedeemLexems(in) return strings.Join(out, " ") } // ToHumanNameTitle represents a code name as a human series of words with the first letters titleized func ToHumanNameTitle(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) - out := make([]string, 0, len(in)) - for _, w := range in { - original := w.GetOriginal() + out := make([]string, 0, len(*in)) + for _, w := range *in { + original := trim(w.GetOriginal()) if !w.IsInitialism() { out = append(out, Camelize(original)) } else { out = append(out, original) } } + poolOfLexems.RedeemLexems(in) + return strings.Join(out, " ") } @@ -264,7 +209,7 @@ func ToJSONName(name string) string { out = append(out, lower(w)) continue } - out = append(out, Camelize(w)) + out = append(out, Camelize(trim(w))) } return strings.Join(out, "") } @@ -283,35 +228,70 @@ func ToVarName(name string) string { // ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes func ToGoName(name string) string { - lexems := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + lexems := s.split(name) + poolOfSplitters.RedeemSplitter(s) + defer func() { + poolOfLexems.RedeemLexems(lexems) + }() + lexemes := *lexems + + if len(lexemes) == 0 { + return "" + } + + result := poolOfBuffers.BorrowBuffer(len(name)) + defer func() { + poolOfBuffers.RedeemBuffer(result) + }() + + // check if not starting with a letter, upper case + firstPart := lexemes[0].GetUnsafeGoName() + if lexemes[0].IsInitialism() { + firstPart = upper(firstPart) + } + + if c := firstPart[0]; c < utf8.RuneSelf { + // ASCII + switch { + case 'A' <= c && c <= 'Z': + result.WriteString(firstPart) + case 'a' <= c && c <= 'z': + result.WriteByte(c - 'a' + 'A') + result.WriteString(firstPart[1:]) + default: + result.WriteString(prefixFunc(name, firstPart)) + // NOTE: no longer check if prefixFunc returns a string that starts with uppercase: + // assume this is always the case + } + } else { + // unicode + firstRune, _ := utf8.DecodeRuneInString(firstPart) + switch { + case !unicode.IsLetter(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + case !unicode.IsUpper(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + /* + result.WriteRune(unicode.ToUpper(firstRune)) + result.WriteString(firstPart[offset:]) + */ + default: + result.WriteString(firstPart) + } + } - result := "" - for _, lexem := range lexems { + for _, lexem := range lexemes[1:] { goName := lexem.GetUnsafeGoName() // to support old behavior if lexem.IsInitialism() { goName = upper(goName) } - result += goName + result.WriteString(goName) } - if len(result) > 0 { - // Only prefix with X when the first character isn't an ascii letter - first := []rune(result)[0] - if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) { - if GoNamePrefixFunc == nil { - return "X" + result - } - result = GoNamePrefixFunc(name) + result - } - first = []rune(result)[0] - if unicode.IsLetter(first) && !unicode.IsUpper(first) { - result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...)) - } - } - - return result + return result.String() } // ContainsStrings searches a slice of strings for a case-sensitive match @@ -343,7 +323,7 @@ type zeroable interface { func IsZero(data interface{}) bool { v := reflect.ValueOf(data) // check for nil data - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: if v.IsNil() { return true @@ -356,7 +336,7 @@ func IsZero(data interface{}) bool { } // continue with slightly more complex reflection - switch v.Kind() { + switch v.Kind() { //nolint:exhaustive case reflect.String: return v.Len() == 0 case reflect.Bool: @@ -376,16 +356,6 @@ func IsZero(data interface{}) bool { } } -// AddInitialisms add additional initialisms -func AddInitialisms(words ...string) { - for _, word := range words { - // commonInitialisms[upper(word)] = true - commonInitialisms.add(upper(word)) - } - // sort again - initialisms = commonInitialisms.sorted() -} - // CommandLineOptionsGroup represents a group of user-defined command line options type CommandLineOptionsGroup struct { ShortDescription string diff --git a/hack/tools/vendor/github.com/go-openapi/swag/yaml.go b/hack/tools/vendor/github.com/go-openapi/swag/yaml.go index f09ee609f3..f59e025932 100644 --- a/hack/tools/vendor/github.com/go-openapi/swag/yaml.go +++ b/hack/tools/vendor/github.com/go-openapi/swag/yaml.go @@ -16,8 +16,11 @@ package swag import ( "encoding/json" + "errors" "fmt" "path/filepath" + "reflect" + "sort" "strconv" "github.com/mailru/easyjson/jlexer" @@ -48,7 +51,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) { return nil, err } if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { - return nil, fmt.Errorf("only YAML documents that are objects are supported") + return nil, errors.New("only YAML documents that are objects are supported") } return &document, nil } @@ -147,7 +150,7 @@ func yamlScalar(node *yaml.Node) (interface{}, error) { case yamlTimestamp: return node.Value, nil case yamlNull: - return nil, nil + return nil, nil //nolint:nilnil default: return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag()) } @@ -245,7 +248,27 @@ func (s JSONMapSlice) MarshalYAML() (interface{}, error) { return yaml.Marshal(&n) } +func isNil(input interface{}) bool { + if input == nil { + return true + } + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} + func json2yaml(item interface{}) (*yaml.Node, error) { + if isNil(item) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Value: "null", + }, nil + } + switch val := item.(type) { case JSONMapSlice: var n yaml.Node @@ -265,7 +288,14 @@ func json2yaml(item interface{}) (*yaml.Node, error) { case map[string]interface{}: var n yaml.Node n.Kind = yaml.MappingNode - for k, v := range val { + keys := make([]string, 0, len(val)) + for k := range val { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + v := val[k] childNode, err := json2yaml(v) if err != nil { return nil, err @@ -318,8 +348,9 @@ func json2yaml(item interface{}) (*yaml.Node, error) { Tag: yamlBoolScalar, Value: strconv.FormatBool(val), }, nil + default: + return nil, fmt.Errorf("unhandled type: %T", val) } - return nil, nil } // JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice diff --git a/hack/tools/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go b/hack/tools/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go index 4245e5ad72..365a1d0477 100644 --- a/hack/tools/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go +++ b/hack/tools/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go @@ -14,7 +14,10 @@ import ( ) var ( - reg = regexp.MustCompile(`<([/!]?)([^>]+?)(/?)>`) + reg = regexp.MustCompile(`<([/!]?)([^>]+?)(/?)>`) + reXMLComments = regexp.MustCompile(`(?s)()`) + reSpaces = regexp.MustCompile(`(?s)>\s+<`) + reNewlines = regexp.MustCompile(`\r*\n`) // NL is the newline string used in XML output. NL = "\n" ) @@ -33,20 +36,19 @@ func FormatXML(xmls, prefix, indent string, nestedTagsInComments ...bool) string if len(nestedTagsInComments) > 0 { nestedTagsInComment = nestedTagsInComments[0] } - reXmlComments := regexp.MustCompile(`(?s)()`) - src := regexp.MustCompile(`(?s)>\s+<`).ReplaceAllString(xmls, "><") + src := reSpaces.ReplaceAllString(xmls, "><") if nestedTagsInComment { - src = reXmlComments.ReplaceAllStringFunc(src, func(m string) string { - parts := reXmlComments.FindStringSubmatch(m) - p2 := regexp.MustCompile(`\r*\n`).ReplaceAllString(parts[2], " ") + src = reXMLComments.ReplaceAllStringFunc(src, func(m string) string { + parts := reXMLComments.FindStringSubmatch(m) + p2 := reNewlines.ReplaceAllString(parts[2], " ") return parts[1] + html.EscapeString(p2) + parts[3] }) } rf := replaceTag(prefix, indent) r := prefix + reg.ReplaceAllStringFunc(src, rf) if nestedTagsInComment { - r = reXmlComments.ReplaceAllStringFunc(r, func(m string) string { - parts := reXmlComments.FindStringSubmatch(m) + r = reXMLComments.ReplaceAllStringFunc(r, func(m string) string { + parts := reXMLComments.FindStringSubmatch(m) return parts[1] + html.UnescapeString(parts[2]) + parts[3] }) } diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/.gitignore b/hack/tools/vendor/github.com/goccy/go-yaml/.gitignore new file mode 100644 index 0000000000..e660fd93d3 --- /dev/null +++ b/hack/tools/vendor/github.com/goccy/go-yaml/.gitignore @@ -0,0 +1 @@ +bin/ diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/.golangci.yml b/hack/tools/vendor/github.com/goccy/go-yaml/.golangci.yml new file mode 100644 index 0000000000..ec8d7313da --- /dev/null +++ b/hack/tools/vendor/github.com/goccy/go-yaml/.golangci.yml @@ -0,0 +1,42 @@ +run: + timeout: 5m + +linters-settings: + errcheck: + check-type-assertions: true + gci: + sections: + - "standard" + - "default" + - "prefix(github.com/goccy/go-yaml)" + - "blank" + - "dot" + gofmt: + simplify: true + govet: + disable: + - tests + misspell: + locale: US + staticcheck: + checks: ["all", "-ST1000", "-ST1005"] + +linters: + disable-all: true + enable: + - errcheck + - gci + - gofmt + - gosimple + - govet + - ineffassign + - misspell + - staticcheck + - typecheck + - unused + +issues: + exclude-rules: + - path: _test\.go + linters: + - staticcheck diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/Makefile b/hack/tools/vendor/github.com/goccy/go-yaml/Makefile index 1b1d92397c..02a38dd210 100644 --- a/hack/tools/vendor/github.com/goccy/go-yaml/Makefile +++ b/hack/tools/vendor/github.com/goccy/go-yaml/Makefile @@ -17,3 +17,31 @@ cover-html: cover .PHONY: ycat/build ycat/build: go build -o ycat ./cmd/ycat + +.PHONY: lint +lint: golangci-lint ## Run golangci-lint + @$(GOLANGCI_LINT) run + +.PHONY: fmt +fmt: golangci-lint ## Ensure consistent code style + @go mod tidy + @go fmt ./... + @$(GOLANGCI_LINT) run --fix + +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) + +## Tool Binaries +GOLANGCI_LINT ?= $(LOCALBIN)/golangci-lint + +## Tool Versions +GOLANGCI_VERSION := 1.61.0 + +.PHONY: golangci-lint +.PHONY: $(GOLANGCI_LINT) +golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. +$(GOLANGCI_LINT): $(LOCALBIN) + @test -s $(LOCALBIN)/golangci-lint && $(LOCALBIN)/golangci-lint version --format short | grep -q $(GOLANGCI_VERSION) || \ + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(LOCALBIN) v$(GOLANGCI_VERSION) diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/ast/ast.go b/hack/tools/vendor/github.com/goccy/go-yaml/ast/ast.go index b4d5ec4180..eb27588101 100644 --- a/hack/tools/vendor/github.com/goccy/go-yaml/ast/ast.go +++ b/hack/tools/vendor/github.com/goccy/go-yaml/ast/ast.go @@ -1,6 +1,7 @@ package ast import ( + "errors" "fmt" "io" "math" @@ -8,13 +9,12 @@ import ( "strings" "github.com/goccy/go-yaml/token" - "golang.org/x/xerrors" ) var ( - ErrInvalidTokenType = xerrors.New("invalid token type") - ErrInvalidAnchorName = xerrors.New("invalid anchor name") - ErrInvalidAliasName = xerrors.New("invalid alias name") + ErrInvalidTokenType = errors.New("invalid token type") + ErrInvalidAnchorName = errors.New("invalid anchor name") + ErrInvalidAliasName = errors.New("invalid alias name") ) // NodeType type identifier of node @@ -1491,7 +1491,7 @@ type SequenceNode struct { // Replace replace value node. func (n *SequenceNode) Replace(idx int, value Node) error { if len(n.Values) <= idx { - return xerrors.Errorf( + return fmt.Errorf( "invalid index for sequence: sequence length is %d, but specified %d index", len(n.Values), idx, ) @@ -2092,10 +2092,10 @@ func Merge(dst Node, src Node) error { err := &ErrInvalidMergeType{dst: dst, src: src} switch dst.Type() { case DocumentType: - node := dst.(*DocumentNode) + node, _ := dst.(*DocumentNode) return Merge(node.Body, src) case MappingType: - node := dst.(*MappingNode) + node, _ := dst.(*MappingNode) target, ok := src.(*MappingNode) if !ok { return err @@ -2103,7 +2103,7 @@ func Merge(dst Node, src Node) error { node.Merge(target) return nil case SequenceType: - node := dst.(*SequenceNode) + node, _ := dst.(*SequenceNode) target, ok := src.(*SequenceNode) if !ok { return err diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/decode.go b/hack/tools/vendor/github.com/goccy/go-yaml/decode.go index d3dbabcbc9..95f7217a83 100644 --- a/hack/tools/vendor/github.com/goccy/go-yaml/decode.go +++ b/hack/tools/vendor/github.com/goccy/go-yaml/decode.go @@ -7,7 +7,6 @@ import ( "encoding/base64" "fmt" "io" - "io/ioutil" "math" "os" "path/filepath" @@ -20,7 +19,6 @@ import ( "github.com/goccy/go-yaml/internal/errors" "github.com/goccy/go-yaml/parser" "github.com/goccy/go-yaml/token" - "golang.org/x/xerrors" ) // Decoder reads and decodes YAML values from an input stream. @@ -100,7 +98,7 @@ func (d *Decoder) castToFloat(v interface{}) interface{} { func (d *Decoder) mergeValueNode(value ast.Node) ast.Node { if value.Type() == ast.AliasType { - aliasNode := value.(*ast.AliasNode) + aliasNode, _ := value.(*ast.AliasNode) aliasName := aliasNode.Value.GetToken().Value return d.anchorNodeMap[aliasName] } @@ -360,7 +358,7 @@ func (d *Decoder) resolveAlias(node ast.Node) (ast.Node, error) { if err != nil { return nil, err } - n.Values[idx] = value.(*ast.MappingValueNode) + n.Values[idx], _ = value.(*ast.MappingValueNode) } case *ast.TagNode: value, err := d.resolveAlias(n.Value) @@ -389,7 +387,7 @@ func (d *Decoder) resolveAlias(node ast.Node) (ast.Node, error) { if err != nil { return nil, err } - n.Key = key.(ast.MapKeyNode) + n.Key, _ = key.(ast.MapKeyNode) value, err := d.resolveAlias(n.Value) if err != nil { return nil, err @@ -408,7 +406,7 @@ func (d *Decoder) resolveAlias(node ast.Node) (ast.Node, error) { aliasName := n.Value.GetToken().Value node := d.anchorNodeMap[aliasName] if node == nil { - return nil, xerrors.Errorf("cannot find anchor by alias name %s", aliasName) + return nil, fmt.Errorf("cannot find anchor by alias name %s", aliasName) } return d.resolveAlias(node) } @@ -430,7 +428,7 @@ func (d *Decoder) getMapNode(node ast.Node) (ast.MapNode, error) { aliasName := alias.Value.GetToken().Value node := d.anchorNodeMap[aliasName] if node == nil { - return nil, xerrors.Errorf("cannot find anchor by alias name %s", aliasName) + return nil, fmt.Errorf("cannot find anchor by alias name %s", aliasName) } mapNode, ok := node.(ast.MapNode) if ok { @@ -461,7 +459,7 @@ func (d *Decoder) getArrayNode(node ast.Node) (ast.ArrayNode, error) { aliasName := alias.Value.GetToken().Value node := d.anchorNodeMap[aliasName] if node == nil { - return nil, xerrors.Errorf("cannot find anchor by alias name %s", aliasName) + return nil, fmt.Errorf("cannot find anchor by alias name %s", aliasName) } arrayNode, ok := node.(ast.ArrayNode) if ok { @@ -476,18 +474,24 @@ func (d *Decoder) getArrayNode(node ast.Node) (ast.ArrayNode, error) { return arrayNode, nil } -func (d *Decoder) fileToNode(f *ast.File) ast.Node { - for _, doc := range f.Docs { - if v := d.nodeToValue(doc.Body); v != nil { - return doc.Body - } - } - return nil -} - func (d *Decoder) convertValue(v reflect.Value, typ reflect.Type, src ast.Node) (reflect.Value, error) { if typ.Kind() != reflect.String { if !v.Type().ConvertibleTo(typ) { + + // Special case for "strings -> floats" aka scientific notation + // If the destination type is a float and the source type is a string, check if we can + // use strconv.ParseFloat to convert the string to a float. + if (typ.Kind() == reflect.Float32 || typ.Kind() == reflect.Float64) && + v.Type().Kind() == reflect.String { + if f, err := strconv.ParseFloat(v.String(), 64); err == nil { + if typ.Kind() == reflect.Float32 { + return reflect.ValueOf(float32(f)), nil + } else if typ.Kind() == reflect.Float64 { + return reflect.ValueOf(f), nil + } + // else, fall through to the error below + } + } return reflect.Zero(typ), errTypeMismatch(typ, v.Type(), src.GetToken()) } return v.Convert(typ), nil @@ -509,19 +513,6 @@ func (d *Decoder) convertValue(v reflect.Value, typ reflect.Type, src ast.Node) return v.Convert(typ), nil } -type overflowError struct { - dstType reflect.Type - srcNum string -} - -func (e *overflowError) Error() string { - return fmt.Sprintf("cannot unmarshal %s into Go value of type %s ( overflow )", e.srcNum, e.dstType) -} - -func errOverflow(dstType reflect.Type, num string) *overflowError { - return &overflowError{dstType: dstType, srcNum: num} -} - func errTypeMismatch(dstType, srcType reflect.Type, token *token.Token) *errors.TypeError { return &errors.TypeError{DstType: dstType, SrcType: srcType, Token: token} } @@ -560,7 +551,7 @@ func (d *Decoder) deleteStructKeys(structType reflect.Type, unknownFields map[st } structFieldMap, err := structFieldMap(structType) if err != nil { - return errors.Wrapf(err, "failed to create struct field map") + return err } for j := 0; j < structType.NumField(); j++ { @@ -575,7 +566,7 @@ func (d *Decoder) deleteStructKeys(structType reflect.Type, unknownFields map[st } if structField.IsInline { - d.deleteStructKeys(field.Type, unknownFields) + _ = d.deleteStructKeys(field.Type, unknownFields) } else { delete(unknownFields, structField.RenderName) } @@ -698,10 +689,10 @@ func (d *Decoder) decodeByUnmarshaler(ctx context.Context, dst reflect.Value, sr if unmarshaler, exists := d.unmarshalerFromCustomUnmarshalerMap(ptrValue.Type()); exists { b, err := d.unmarshalableDocument(src) if err != nil { - return errors.Wrapf(err, "failed to UnmarshalYAML") + return err } if err := unmarshaler(ptrValue.Interface(), b); err != nil { - return errors.Wrapf(err, "failed to UnmarshalYAML") + return err } return nil } @@ -710,10 +701,10 @@ func (d *Decoder) decodeByUnmarshaler(ctx context.Context, dst reflect.Value, sr if unmarshaler, ok := iface.(BytesUnmarshalerContext); ok { b, err := d.unmarshalableDocument(src) if err != nil { - return errors.Wrapf(err, "failed to UnmarshalYAML") + return err } if err := unmarshaler.UnmarshalYAML(ctx, b); err != nil { - return errors.Wrapf(err, "failed to UnmarshalYAML") + return err } return nil } @@ -721,10 +712,10 @@ func (d *Decoder) decodeByUnmarshaler(ctx context.Context, dst reflect.Value, sr if unmarshaler, ok := iface.(BytesUnmarshaler); ok { b, err := d.unmarshalableDocument(src) if err != nil { - return errors.Wrapf(err, "failed to UnmarshalYAML") + return err } if err := unmarshaler.UnmarshalYAML(b); err != nil { - return errors.Wrapf(err, "failed to UnmarshalYAML") + return err } return nil } @@ -733,14 +724,14 @@ func (d *Decoder) decodeByUnmarshaler(ctx context.Context, dst reflect.Value, sr if err := unmarshaler.UnmarshalYAML(ctx, func(v interface{}) error { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Ptr { - return errors.ErrDecodeRequiredPointerType + return ErrDecodeRequiredPointerType } if err := d.decodeValue(ctx, rv.Elem(), src); err != nil { - return errors.Wrapf(err, "failed to decode value") + return err } return nil }); err != nil { - return errors.Wrapf(err, "failed to UnmarshalYAML") + return err } return nil } @@ -749,14 +740,14 @@ func (d *Decoder) decodeByUnmarshaler(ctx context.Context, dst reflect.Value, sr if err := unmarshaler.UnmarshalYAML(func(v interface{}) error { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Ptr { - return errors.ErrDecodeRequiredPointerType + return ErrDecodeRequiredPointerType } if err := d.decodeValue(ctx, rv.Elem(), src); err != nil { - return errors.Wrapf(err, "failed to decode value") + return err } return nil }); err != nil { - return errors.Wrapf(err, "failed to UnmarshalYAML") + return err } return nil } @@ -772,11 +763,11 @@ func (d *Decoder) decodeByUnmarshaler(ctx context.Context, dst reflect.Value, sr if unmarshaler, isText := iface.(encoding.TextUnmarshaler); isText { b, ok, err := d.unmarshalableText(src) if err != nil { - return errors.Wrapf(err, "failed to UnmarshalText") + return err } if ok { if err := unmarshaler.UnmarshalText(b); err != nil { - return errors.Wrapf(err, "failed to UnmarshalText") + return err } return nil } @@ -786,21 +777,21 @@ func (d *Decoder) decodeByUnmarshaler(ctx context.Context, dst reflect.Value, sr if unmarshaler, ok := iface.(jsonUnmarshaler); ok { b, err := d.unmarshalableDocument(src) if err != nil { - return errors.Wrapf(err, "failed to UnmarshalJSON") + return err } jsonBytes, err := YAMLToJSON(b) if err != nil { - return errors.Wrapf(err, "failed to convert yaml to json") + return err } jsonBytes = bytes.TrimRight(jsonBytes, "\n") if err := unmarshaler.UnmarshalJSON(jsonBytes); err != nil { - return errors.Wrapf(err, "failed to UnmarshalJSON") + return err } return nil } } - return xerrors.Errorf("does not implemented Unmarshaler") + return fmt.Errorf("does not implemented Unmarshaler") } var ( @@ -816,7 +807,7 @@ func (d *Decoder) decodeValue(ctx context.Context, dst reflect.Value, src ast.No } if d.canDecodeByUnmarshaler(dst) { if err := d.decodeByUnmarshaler(ctx, dst, src); err != nil { - return errors.Wrapf(err, "failed to decode by unmarshaler") + return err } return nil } @@ -833,7 +824,7 @@ func (d *Decoder) decodeValue(ctx context.Context, dst reflect.Value, src ast.No } v := d.createDecodableValue(dst.Type()) if err := d.decodeValue(ctx, v, src); err != nil { - return errors.Wrapf(err, "failed to decode ptr value") + return err } dst.Set(d.castToAssignableValue(v, dst.Type())) case reflect.Interface: @@ -877,10 +868,19 @@ func (d *Decoder) decodeValue(ctx context.Context, dst reflect.Value, src ast.No dst.SetInt(int64(vv)) return nil } + case string: // handle scientific notation + if i, err := strconv.ParseFloat(vv, 64); err == nil { + if 0 <= i && i <= math.MaxUint64 && !dst.OverflowInt(int64(i)) { + dst.SetInt(int64(i)) + return nil + } + } else { // couldn't be parsed as float + return errTypeMismatch(valueType, reflect.TypeOf(v), src.GetToken()) + } default: return errTypeMismatch(valueType, reflect.TypeOf(v), src.GetToken()) } - return errOverflow(valueType, fmt.Sprint(v)) + return errors.ErrOverflow(valueType, fmt.Sprint(v), src.GetToken()) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: v := d.nodeToValue(src) switch vv := v.(type) { @@ -899,16 +899,26 @@ func (d *Decoder) decodeValue(ctx context.Context, dst reflect.Value, src ast.No dst.SetUint(uint64(vv)) return nil } + case string: // handle scientific notation + if i, err := strconv.ParseFloat(vv, 64); err == nil { + if 0 <= i && i <= math.MaxUint64 && !dst.OverflowUint(uint64(i)) { + dst.SetUint(uint64(i)) + return nil + } + } else { // couldn't be parsed as float + return errTypeMismatch(valueType, reflect.TypeOf(v), src.GetToken()) + } + default: return errTypeMismatch(valueType, reflect.TypeOf(v), src.GetToken()) } - return errOverflow(valueType, fmt.Sprint(v)) + return errors.ErrOverflow(valueType, fmt.Sprint(v), src.GetToken()) } v := reflect.ValueOf(d.nodeToValue(src)) if v.IsValid() { convertedValue, err := d.convertValue(v, dst.Type(), src) if err != nil { - return errors.Wrapf(err, "failed to convert value") + return err } dst.Set(convertedValue) } @@ -955,18 +965,22 @@ func (d *Decoder) createDecodedNewValue( return newValue, nil } } + var newValue reflect.Value if node.Type() == ast.NullType { - return reflect.Zero(typ), nil + newValue = reflect.New(typ).Elem() + } else { + newValue = d.createDecodableValue(typ) } - newValue := d.createDecodableValue(typ) for defaultVal.Kind() == reflect.Ptr { defaultVal = defaultVal.Elem() } if defaultVal.IsValid() && defaultVal.Type().AssignableTo(newValue.Type()) { newValue.Set(defaultVal) } - if err := d.decodeValue(ctx, newValue, node); err != nil { - return newValue, errors.Wrapf(err, "failed to decode value") + if node.Type() != ast.NullType { + if err := d.decodeValue(ctx, newValue, node); err != nil { + return newValue, err + } } return newValue, nil } @@ -974,7 +988,7 @@ func (d *Decoder) createDecodedNewValue( func (d *Decoder) keyToNodeMap(node ast.Node, ignoreMergeKey bool, getKeyOrValueNode func(*ast.MapNodeIter) ast.Node) (map[string]ast.Node, error) { mapNode, err := d.getMapNode(node) if err != nil { - return nil, errors.Wrapf(err, "failed to get map node") + return nil, err } keyMap := map[string]struct{}{} keyToNodeMap := map[string]ast.Node{} @@ -990,21 +1004,21 @@ func (d *Decoder) keyToNodeMap(node ast.Node, ignoreMergeKey bool, getKeyOrValue } mergeMap, err := d.keyToNodeMap(mapIter.Value(), ignoreMergeKey, getKeyOrValueNode) if err != nil { - return nil, errors.Wrapf(err, "failed to get keyToNodeMap by MergeKey node") + return nil, err } for k, v := range mergeMap { if err := d.validateDuplicateKey(keyMap, k, v); err != nil { - return nil, errors.Wrapf(err, "invalid struct key") + return nil, err } keyToNodeMap[k] = v } } else { key, ok := d.nodeToValue(keyNode).(string) if !ok { - return nil, errors.Wrapf(err, "failed to decode map key") + return nil, err } if err := d.validateDuplicateKey(keyMap, key, keyNode); err != nil { - return nil, errors.Wrapf(err, "invalid struct key") + return nil, err } keyToNodeMap[key] = getKeyOrValueNode(mapIter) } @@ -1015,7 +1029,7 @@ func (d *Decoder) keyToNodeMap(node ast.Node, ignoreMergeKey bool, getKeyOrValue func (d *Decoder) keyToKeyNodeMap(node ast.Node, ignoreMergeKey bool) (map[string]ast.Node, error) { m, err := d.keyToNodeMap(node, ignoreMergeKey, func(nodeMap *ast.MapNodeIter) ast.Node { return nodeMap.Key() }) if err != nil { - return nil, errors.Wrapf(err, "failed to get keyToNodeMap") + return nil, err } return m, nil } @@ -1023,7 +1037,7 @@ func (d *Decoder) keyToKeyNodeMap(node ast.Node, ignoreMergeKey bool) (map[strin func (d *Decoder) keyToValueNodeMap(node ast.Node, ignoreMergeKey bool) (map[string]ast.Node, error) { m, err := d.keyToNodeMap(node, ignoreMergeKey, func(nodeMap *ast.MapNodeIter) ast.Node { return nodeMap.Value() }) if err != nil { - return nil, errors.Wrapf(err, "failed to get keyToNodeMap") + return nil, err } return m, nil } @@ -1035,7 +1049,7 @@ func (d *Decoder) setDefaultValueIfConflicted(v reflect.Value, fieldMap StructFi } embeddedStructFieldMap, err := structFieldMap(typ) if err != nil { - return errors.Wrapf(err, "failed to get struct field map by embedded type") + return err } for i := 0; i < typ.NumField(); i++ { field := typ.Field(i) @@ -1090,7 +1104,7 @@ func (d *Decoder) castToTime(src ast.Node) (time.Time, error) { func (d *Decoder) decodeTime(ctx context.Context, dst reflect.Value, src ast.Node) error { t, err := d.castToTime(src) if err != nil { - return errors.Wrapf(err, "failed to convert to time") + return err } dst.Set(reflect.ValueOf(t)) return nil @@ -1110,7 +1124,7 @@ func (d *Decoder) castToDuration(src ast.Node) (time.Duration, error) { } t, err := time.ParseDuration(s) if err != nil { - return 0, errors.Wrapf(err, "failed to parse duration") + return 0, err } return t, nil } @@ -1118,7 +1132,7 @@ func (d *Decoder) castToDuration(src ast.Node) (time.Duration, error) { func (d *Decoder) decodeDuration(ctx context.Context, dst reflect.Value, src ast.Node) error { t, err := d.castToDuration(src) if err != nil { - return errors.Wrapf(err, "failed to convert to duration") + return err } dst.Set(reflect.ValueOf(t)) return nil @@ -1162,18 +1176,18 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N } structFieldMap, err := structFieldMap(structType) if err != nil { - return errors.Wrapf(err, "failed to create struct field map") + return err } ignoreMergeKey := structFieldMap.hasMergeProperty() keyToNodeMap, err := d.keyToValueNodeMap(src, ignoreMergeKey) if err != nil { - return errors.Wrapf(err, "failed to get keyToValueNodeMap") + return err } var unknownFields map[string]ast.Node if d.disallowUnknownField { unknownFields, err = d.keyToKeyNodeMap(src, ignoreMergeKey) if err != nil { - return errors.Wrapf(err, "failed to get keyToKeyNodeMap") + return err } } @@ -1198,7 +1212,7 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N continue } if !fieldValue.CanSet() { - return xerrors.Errorf("cannot set embedded type as unexported field %s.%s", field.PkgPath, field.Name) + return fmt.Errorf("cannot set embedded type as unexported field %s.%s", field.PkgPath, field.Name) } if fieldValue.Type().Kind() == reflect.Ptr && src.Type() == ast.NullType { // set nil value to pointer @@ -1213,7 +1227,7 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N newFieldValue, err := d.createDecodedNewValue(ctx, fieldValue.Type(), fieldValue, mapNode) if d.disallowUnknownField { if err := d.deleteStructKeys(fieldValue.Type(), unknownFields); err != nil { - return errors.Wrapf(err, "cannot delete struct keys") + return err } } @@ -1222,7 +1236,7 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N continue } var te *errors.TypeError - if xerrors.As(err, &te) { + if errors.As(err, &te) { if te.StructFieldName != nil { fieldName := fmt.Sprintf("%s.%s", structType.Name(), *te.StructFieldName) te.StructFieldName = &fieldName @@ -1237,7 +1251,7 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N } continue } - d.setDefaultValueIfConflicted(newFieldValue, structFieldMap) + _ = d.setDefaultValueIfConflicted(newFieldValue, structFieldMap) fieldValue.Set(d.castToAssignableValue(newFieldValue, fieldValue.Type())) continue } @@ -1258,7 +1272,7 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N continue } var te *errors.TypeError - if xerrors.As(err, &te) { + if errors.As(err, &te) { fieldName := fmt.Sprintf("%s.%s", structType.Name(), field.Name) te.StructFieldName = &fieldName foundErr = te @@ -1270,7 +1284,7 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N fieldValue.Set(d.castToAssignableValue(newFieldValue, fieldValue.Type())) } if foundErr != nil { - return errors.Wrapf(foundErr, "failed to decode value") + return foundErr } // Ignore unknown fields when parsing an inline struct (recognized by a nil token). @@ -1315,7 +1329,7 @@ func (d *Decoder) decodeStruct(ctx context.Context, dst reflect.Value, src ast.N func (d *Decoder) decodeArray(ctx context.Context, dst reflect.Value, src ast.Node) error { arrayNode, err := d.getArrayNode(src) if err != nil { - return errors.Wrapf(err, "failed to get array node") + return err } if arrayNode == nil { return nil @@ -1347,7 +1361,7 @@ func (d *Decoder) decodeArray(ctx context.Context, dst reflect.Value, src ast.No } dst.Set(arrayValue) if foundErr != nil { - return errors.Wrapf(foundErr, "failed to decode value") + return foundErr } return nil } @@ -1355,7 +1369,7 @@ func (d *Decoder) decodeArray(ctx context.Context, dst reflect.Value, src ast.No func (d *Decoder) decodeSlice(ctx context.Context, dst reflect.Value, src ast.Node) error { arrayNode, err := d.getArrayNode(src) if err != nil { - return errors.Wrapf(err, "failed to get array node") + return err } if arrayNode == nil { return nil @@ -1384,7 +1398,7 @@ func (d *Decoder) decodeSlice(ctx context.Context, dst reflect.Value, src ast.No } dst.Set(sliceValue) if foundErr != nil { - return errors.Wrapf(foundErr, "failed to decode value") + return foundErr } return nil } @@ -1392,7 +1406,7 @@ func (d *Decoder) decodeSlice(ctx context.Context, dst reflect.Value, src ast.No func (d *Decoder) decodeMapItem(ctx context.Context, dst *MapItem, src ast.Node) error { mapNode, err := d.getMapNode(src) if err != nil { - return errors.Wrapf(err, "failed to get map node") + return err } if mapNode == nil { return nil @@ -1405,7 +1419,7 @@ func (d *Decoder) decodeMapItem(ctx context.Context, dst *MapItem, src ast.Node) value := mapIter.Value() if key.Type() == ast.MergeKeyType { if err := d.decodeMapItem(ctx, dst, value); err != nil { - return errors.Wrapf(err, "failed to decode map with merge key") + return err } return nil } @@ -1433,7 +1447,7 @@ func (d *Decoder) validateDuplicateKey(keyMap map[string]struct{}, key interface func (d *Decoder) decodeMapSlice(ctx context.Context, dst *MapSlice, src ast.Node) error { mapNode, err := d.getMapNode(src) if err != nil { - return errors.Wrapf(err, "failed to get map node") + return err } if mapNode == nil { return nil @@ -1447,11 +1461,11 @@ func (d *Decoder) decodeMapSlice(ctx context.Context, dst *MapSlice, src ast.Nod if key.Type() == ast.MergeKeyType { var m MapSlice if err := d.decodeMapSlice(ctx, &m, value); err != nil { - return errors.Wrapf(err, "failed to decode map with merge key") + return err } for _, v := range m { if err := d.validateDuplicateKey(keyMap, v.Key, value); err != nil { - return errors.Wrapf(err, "invalid map key") + return err } mapSlice = append(mapSlice, v) } @@ -1459,7 +1473,7 @@ func (d *Decoder) decodeMapSlice(ctx context.Context, dst *MapSlice, src ast.Nod } k := d.nodeToValue(key) if err := d.validateDuplicateKey(keyMap, k, key); err != nil { - return errors.Wrapf(err, "invalid map key") + return err } mapSlice = append(mapSlice, MapItem{ Key: k, @@ -1473,7 +1487,7 @@ func (d *Decoder) decodeMapSlice(ctx context.Context, dst *MapSlice, src ast.Nod func (d *Decoder) decodeMap(ctx context.Context, dst reflect.Value, src ast.Node) error { mapNode, err := d.getMapNode(src) if err != nil { - return errors.Wrapf(err, "failed to get map node") + return err } if mapNode == nil { return nil @@ -1490,24 +1504,33 @@ func (d *Decoder) decodeMap(ctx context.Context, dst reflect.Value, src ast.Node value := mapIter.Value() if key.Type() == ast.MergeKeyType { if err := d.decodeMap(ctx, dst, value); err != nil { - return errors.Wrapf(err, "failed to decode map with merge key") + return err } iter := dst.MapRange() for iter.Next() { if err := d.validateDuplicateKey(keyMap, iter.Key(), value); err != nil { - return errors.Wrapf(err, "invalid map key") + return err } mapValue.SetMapIndex(iter.Key(), iter.Value()) } continue } - k := reflect.ValueOf(d.nodeToValue(key)) - if k.IsValid() && k.Type().ConvertibleTo(keyType) { - k = k.Convert(keyType) + + k := d.createDecodableValue(keyType) + if d.canDecodeByUnmarshaler(k) { + if err := d.decodeByUnmarshaler(ctx, k, key); err != nil { + return err + } + } else { + k = reflect.ValueOf(d.nodeToValue(key)) + if k.IsValid() && k.Type().ConvertibleTo(keyType) { + k = k.Convert(keyType) + } } + if k.IsValid() { if err := d.validateDuplicateKey(keyMap, k.Interface(), key); err != nil { - return errors.Wrapf(err, "invalid map key") + return err } } if valueType.Kind() == reflect.Ptr && value.Type() == ast.NullType { @@ -1530,7 +1553,7 @@ func (d *Decoder) decodeMap(ctx context.Context, dst reflect.Value, src ast.Node } dst.Set(mapValue) if foundErr != nil { - return errors.Wrapf(foundErr, "failed to decode value") + return foundErr } return nil } @@ -1538,7 +1561,7 @@ func (d *Decoder) decodeMap(ctx context.Context, dst reflect.Value, src ast.Node func (d *Decoder) fileToReader(file string) (io.Reader, error) { reader, err := os.Open(file) if err != nil { - return nil, errors.Wrapf(err, "failed to open file") + return nil, err } return reader, nil } @@ -1558,7 +1581,7 @@ func (d *Decoder) readersUnderDir(dir string) ([]io.Reader, error) { pattern := fmt.Sprintf("%s/*", dir) matches, err := filepath.Glob(pattern) if err != nil { - return nil, errors.Wrapf(err, "failed to get files by %s", pattern) + return nil, err } readers := []io.Reader{} for _, match := range matches { @@ -1567,7 +1590,7 @@ func (d *Decoder) readersUnderDir(dir string) ([]io.Reader, error) { } reader, err := d.fileToReader(match) if err != nil { - return nil, errors.Wrapf(err, "failed to get reader") + return nil, err } readers = append(readers, reader) } @@ -1576,18 +1599,18 @@ func (d *Decoder) readersUnderDir(dir string) ([]io.Reader, error) { func (d *Decoder) readersUnderDirRecursive(dir string) ([]io.Reader, error) { readers := []io.Reader{} - if err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err := filepath.Walk(dir, func(path string, info os.FileInfo, _ error) error { if !d.isYAMLFile(path) { return nil } - reader, err := d.fileToReader(path) - if err != nil { - return errors.Wrapf(err, "failed to get reader") + reader, readerErr := d.fileToReader(path) + if readerErr != nil { + return readerErr } readers = append(readers, reader) return nil }); err != nil { - return nil, errors.Wrapf(err, "interrupt walk in %s", dir) + return nil, err } return readers, nil } @@ -1595,13 +1618,13 @@ func (d *Decoder) readersUnderDirRecursive(dir string) ([]io.Reader, error) { func (d *Decoder) resolveReference() error { for _, opt := range d.opts { if err := opt(d); err != nil { - return errors.Wrapf(err, "failed to exec option") + return err } } for _, file := range d.referenceFiles { reader, err := d.fileToReader(file) if err != nil { - return errors.Wrapf(err, "failed to get reader") + return err } d.referenceReaders = append(d.referenceReaders, reader) } @@ -1609,26 +1632,26 @@ func (d *Decoder) resolveReference() error { if !d.isRecursiveDir { readers, err := d.readersUnderDir(dir) if err != nil { - return errors.Wrapf(err, "failed to get readers from under the %s", dir) + return err } d.referenceReaders = append(d.referenceReaders, readers...) } else { readers, err := d.readersUnderDirRecursive(dir) if err != nil { - return errors.Wrapf(err, "failed to get readers from under the %s", dir) + return err } d.referenceReaders = append(d.referenceReaders, readers...) } } for _, reader := range d.referenceReaders { - bytes, err := ioutil.ReadAll(reader) + bytes, err := io.ReadAll(reader) if err != nil { - return errors.Wrapf(err, "failed to read buffer") + return err } // assign new anchor definition to anchorMap if _, err := d.parse(bytes); err != nil { - return errors.Wrapf(err, "failed to decode") + return err } } d.isResolvedReference = true @@ -1642,7 +1665,7 @@ func (d *Decoder) parse(bytes []byte) (*ast.File, error) { } f, err := parser.ParseBytes(bytes, parseMode) if err != nil { - return nil, errors.Wrapf(err, "failed to parse yaml") + return nil, err } normalizedFile := &ast.File{} for _, doc := range f.Docs { @@ -1661,16 +1684,16 @@ func (d *Decoder) isInitialized() bool { func (d *Decoder) decodeInit() error { if !d.isResolvedReference { if err := d.resolveReference(); err != nil { - return errors.Wrapf(err, "failed to resolve reference") + return err } } var buf bytes.Buffer if _, err := io.Copy(&buf, d.reader); err != nil { - return errors.Wrapf(err, "failed to copy from reader") + return err } file, err := d.parse(buf.Bytes()) if err != nil { - return errors.Wrapf(err, "failed to decode") + return err } d.parsedFile = file return nil @@ -1685,7 +1708,7 @@ func (d *Decoder) decode(ctx context.Context, v reflect.Value) error { return nil } if err := d.decodeValue(ctx, v.Elem(), body); err != nil { - return errors.Wrapf(err, "failed to decode value") + return err } d.streamIndex++ return nil @@ -1705,25 +1728,25 @@ func (d *Decoder) Decode(v interface{}) error { func (d *Decoder) DecodeContext(ctx context.Context, v interface{}) error { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Ptr { - return errors.ErrDecodeRequiredPointerType + return ErrDecodeRequiredPointerType } if d.isInitialized() { if err := d.decode(ctx, rv); err != nil { if err == io.EOF { return err } - return errors.Wrapf(err, "failed to decode") + return err } return nil } if err := d.decodeInit(); err != nil { - return errors.Wrapf(err, "failed to decodeInit") + return err } if err := d.decode(ctx, rv); err != nil { if err == io.EOF { return err } - return errors.Wrapf(err, "failed to decode") + return err } return nil } @@ -1737,17 +1760,17 @@ func (d *Decoder) DecodeFromNode(node ast.Node, v interface{}) error { func (d *Decoder) DecodeFromNodeContext(ctx context.Context, node ast.Node, v interface{}) error { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Ptr { - return errors.ErrDecodeRequiredPointerType + return ErrDecodeRequiredPointerType } if !d.isInitialized() { if err := d.decodeInit(); err != nil { - return errors.Wrapf(err, "failed to decodInit") + return err } } // resolve references to the anchor on the same file d.nodeToValue(node) if err := d.decodeValue(ctx, rv.Elem(), node); err != nil { - return errors.Wrapf(err, "failed to decode value") + return err } return nil } diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/encode.go b/hack/tools/vendor/github.com/goccy/go-yaml/encode.go index 3b9b298144..97c1fa5b7a 100644 --- a/hack/tools/vendor/github.com/goccy/go-yaml/encode.go +++ b/hack/tools/vendor/github.com/goccy/go-yaml/encode.go @@ -17,7 +17,6 @@ import ( "github.com/goccy/go-yaml/parser" "github.com/goccy/go-yaml/printer" "github.com/goccy/go-yaml/token" - "golang.org/x/xerrors" ) const ( @@ -84,19 +83,19 @@ func (e *Encoder) Encode(v interface{}) error { func (e *Encoder) EncodeContext(ctx context.Context, v interface{}) error { node, err := e.EncodeToNodeContext(ctx, v) if err != nil { - return errors.Wrapf(err, "failed to encode to node") + return err } if err := e.setCommentByCommentMap(node); err != nil { - return errors.Wrapf(err, "failed to set comment by comment map") + return err } if !e.written { e.written = true } else { // write document separator - e.writer.Write([]byte("---\n")) + _, _ = e.writer.Write([]byte("---\n")) } var p printer.Printer - e.writer.Write(p.PrintNode(node)) + _, _ = e.writer.Write(p.PrintNode(node)) return nil } @@ -109,12 +108,12 @@ func (e *Encoder) EncodeToNode(v interface{}) (ast.Node, error) { func (e *Encoder) EncodeToNodeContext(ctx context.Context, v interface{}) (ast.Node, error) { for _, opt := range e.opts { if err := opt(e); err != nil { - return nil, errors.Wrapf(err, "failed to run option for encoder") + return nil, err } } node, err := e.encodeValue(ctx, reflect.ValueOf(v), 1) if err != nil { - return nil, errors.Wrapf(err, "failed to encode value") + return nil, err } return node, nil } @@ -126,7 +125,7 @@ func (e *Encoder) setCommentByCommentMap(node ast.Node) error { for path, comments := range e.commentMap { n, err := path.FilterNode(node) if err != nil { - return errors.Wrapf(err, "failed to filter node") + return err } if n == nil { continue @@ -140,15 +139,15 @@ func (e *Encoder) setCommentByCommentMap(node ast.Node) error { switch comment.Position { case CommentHeadPosition: if err := e.setHeadComment(node, n, commentGroup); err != nil { - return errors.Wrapf(err, "failed to set head comment") + return err } case CommentLinePosition: if err := e.setLineComment(node, n, commentGroup); err != nil { - return errors.Wrapf(err, "failed to set line comment") + return err } case CommentFootPosition: if err := e.setFootComment(node, n, commentGroup); err != nil { - return errors.Wrapf(err, "failed to set foot comment") + return err } default: return ErrUnknownCommentPositionType @@ -166,11 +165,11 @@ func (e *Encoder) setHeadComment(node ast.Node, filtered ast.Node, comment *ast. switch p := parent.(type) { case *ast.MappingValueNode: if err := p.SetComment(comment); err != nil { - return errors.Wrapf(err, "failed to set comment") + return err } case *ast.MappingNode: if err := p.SetComment(comment); err != nil { - return errors.Wrapf(err, "failed to set comment") + return err } case *ast.SequenceNode: if len(p.ValueHeadComments) == 0 { @@ -196,11 +195,11 @@ func (e *Encoder) setLineComment(node ast.Node, filtered ast.Node, comment *ast. // Line comment cannot be set for mapping value node. // It should probably be set for the parent map node if err := e.setLineCommentToParentMapNode(node, filtered, comment); err != nil { - return errors.Wrapf(err, "failed to set line comment to parent node") + return err } default: if err := filtered.SetComment(comment); err != nil { - return errors.Wrapf(err, "failed to set comment") + return err } } return nil @@ -214,11 +213,11 @@ func (e *Encoder) setLineCommentToParentMapNode(node ast.Node, filtered ast.Node switch p := parent.(type) { case *ast.MappingValueNode: if err := p.Key.SetComment(comment); err != nil { - return errors.Wrapf(err, "failed to set comment") + return err } case *ast.MappingNode: if err := p.SetComment(comment); err != nil { - return errors.Wrapf(err, "failed to set comment") + return err } default: return ErrUnsupportedLinePositionType(parent) @@ -247,7 +246,7 @@ func (e *Encoder) setFootComment(node ast.Node, filtered ast.Node, comment *ast. func (e *Encoder) encodeDocument(doc []byte) (ast.Node, error) { f, err := parser.ParseBytes(doc, 0) if err != nil { - return nil, errors.Wrapf(err, "failed to parse yaml") + return nil, err } for _, docNode := range f.Docs { if docNode.Body != nil { @@ -336,11 +335,11 @@ func (e *Encoder) encodeByMarshaler(ctx context.Context, v reflect.Value, column if marshaler, exists := e.marshalerFromCustomMarshalerMap(v.Type()); exists { doc, err := marshaler(iface) if err != nil { - return nil, errors.Wrapf(err, "failed to MarshalYAML") + return nil, err } node, err := e.encodeDocument(doc) if err != nil { - return nil, errors.Wrapf(err, "failed to encode document") + return nil, err } return node, nil } @@ -348,11 +347,11 @@ func (e *Encoder) encodeByMarshaler(ctx context.Context, v reflect.Value, column if marshaler, ok := iface.(BytesMarshalerContext); ok { doc, err := marshaler.MarshalYAML(ctx) if err != nil { - return nil, errors.Wrapf(err, "failed to MarshalYAML") + return nil, err } node, err := e.encodeDocument(doc) if err != nil { - return nil, errors.Wrapf(err, "failed to encode document") + return nil, err } return node, nil } @@ -360,11 +359,11 @@ func (e *Encoder) encodeByMarshaler(ctx context.Context, v reflect.Value, column if marshaler, ok := iface.(BytesMarshaler); ok { doc, err := marshaler.MarshalYAML() if err != nil { - return nil, errors.Wrapf(err, "failed to MarshalYAML") + return nil, err } node, err := e.encodeDocument(doc) if err != nil { - return nil, errors.Wrapf(err, "failed to encode document") + return nil, err } return node, nil } @@ -372,7 +371,7 @@ func (e *Encoder) encodeByMarshaler(ctx context.Context, v reflect.Value, column if marshaler, ok := iface.(InterfaceMarshalerContext); ok { marshalV, err := marshaler.MarshalYAML(ctx) if err != nil { - return nil, errors.Wrapf(err, "failed to MarshalYAML") + return nil, err } return e.encodeValue(ctx, reflect.ValueOf(marshalV), column) } @@ -380,7 +379,7 @@ func (e *Encoder) encodeByMarshaler(ctx context.Context, v reflect.Value, column if marshaler, ok := iface.(InterfaceMarshaler); ok { marshalV, err := marshaler.MarshalYAML() if err != nil { - return nil, errors.Wrapf(err, "failed to MarshalYAML") + return nil, err } return e.encodeValue(ctx, reflect.ValueOf(marshalV), column) } @@ -396,11 +395,11 @@ func (e *Encoder) encodeByMarshaler(ctx context.Context, v reflect.Value, column if marshaler, ok := iface.(encoding.TextMarshaler); ok { doc, err := marshaler.MarshalText() if err != nil { - return nil, errors.Wrapf(err, "failed to MarshalText") + return nil, err } node, err := e.encodeDocument(doc) if err != nil { - return nil, errors.Wrapf(err, "failed to encode document") + return nil, err } return node, nil } @@ -409,21 +408,21 @@ func (e *Encoder) encodeByMarshaler(ctx context.Context, v reflect.Value, column if marshaler, ok := iface.(jsonMarshaler); ok { jsonBytes, err := marshaler.MarshalJSON() if err != nil { - return nil, errors.Wrapf(err, "failed to MarshalJSON") + return nil, err } doc, err := JSONToYAML(jsonBytes) if err != nil { - return nil, errors.Wrapf(err, "failed to convert json to yaml") + return nil, err } node, err := e.encodeDocument(doc) if err != nil { - return nil, errors.Wrapf(err, "failed to encode document") + return nil, err } return node, nil } } - return nil, xerrors.Errorf("does not implemented Marshaler") + return nil, errors.New("does not implemented Marshaler") } func (e *Encoder) encodeValue(ctx context.Context, v reflect.Value, column int) (ast.Node, error) { @@ -433,7 +432,7 @@ func (e *Encoder) encodeValue(ctx context.Context, v reflect.Value, column int) if e.canEncodeByMarshaler(v) { node, err := e.encodeByMarshaler(ctx, v, column) if err != nil { - return nil, errors.Wrapf(err, "failed to encode by marshaler") + return nil, err } return node, nil } @@ -481,7 +480,7 @@ func (e *Encoder) encodeValue(ctx context.Context, v reflect.Value, column int) case reflect.Map: return e.encodeMap(ctx, v, column), nil default: - return nil, xerrors.Errorf("unknown value type %s", v.Type().String()) + return nil, fmt.Errorf("unknown value type %s", v.Type().String()) } } @@ -570,7 +569,7 @@ func (e *Encoder) encodeSlice(ctx context.Context, value reflect.Value) (*ast.Se for i := 0; i < value.Len(); i++ { node, err := e.encodeValue(ctx, value.Index(i), column) if err != nil { - return nil, errors.Wrapf(err, "failed to encode value for slice") + return nil, err } sequence.Values = append(sequence.Values, node) } @@ -589,7 +588,7 @@ func (e *Encoder) encodeArray(ctx context.Context, value reflect.Value) (*ast.Se for i := 0; i < value.Len(); i++ { node, err := e.encodeValue(ctx, value.Index(i), column) if err != nil { - return nil, errors.Wrapf(err, "failed to encode value for array") + return nil, err } sequence.Values = append(sequence.Values, node) } @@ -604,7 +603,7 @@ func (e *Encoder) encodeMapItem(ctx context.Context, item MapItem, column int) ( v := reflect.ValueOf(item.Value) value, err := e.encodeValue(ctx, v, column) if err != nil { - return nil, errors.Wrapf(err, "failed to encode MapItem") + return nil, err } if e.isMapNode(value) { value.AddColumn(e.indent) @@ -621,7 +620,7 @@ func (e *Encoder) encodeMapSlice(ctx context.Context, value MapSlice, column int for _, item := range value { value, err := e.encodeMapItem(ctx, item, column) if err != nil { - return nil, errors.Wrapf(err, "failed to encode MapItem for MapSlice") + return nil, err } node.Values = append(node.Values, value) } @@ -730,7 +729,7 @@ func (e *Encoder) encodeAnchor(anchorName string, value ast.Node, fieldValue ref anchorNode.Value = value if e.anchorCallback != nil { if err := e.anchorCallback(anchorNode, fieldValue.Interface()); err != nil { - return nil, errors.Wrapf(err, "failed to marshal anchor") + return nil, err } if snode, ok := anchorNode.Name.(*ast.StringNode); ok { anchorName = snode.Value @@ -747,7 +746,7 @@ func (e *Encoder) encodeStruct(ctx context.Context, value reflect.Value, column structType := value.Type() structFieldMap, err := structFieldMap(structType) if err != nil { - return nil, errors.Wrapf(err, "failed to get struct field map") + return nil, err } hasInlineAnchorField := false var inlineAnchorValue reflect.Value @@ -770,7 +769,7 @@ func (e *Encoder) encodeStruct(ctx context.Context, value reflect.Value, column } value, err := ve.encodeValue(ctx, fieldValue, column) if err != nil { - return nil, errors.Wrapf(err, "failed to encode value") + return nil, err } if e.isMapNode(value) { value.AddColumn(e.indent) @@ -780,19 +779,19 @@ func (e *Encoder) encodeStruct(ctx context.Context, value reflect.Value, column case structField.AnchorName != "": anchorNode, err := e.encodeAnchor(structField.AnchorName, value, fieldValue, column) if err != nil { - return nil, errors.Wrapf(err, "failed to encode anchor") + return nil, err } value = anchorNode case structField.IsAutoAlias: if fieldValue.Kind() != reflect.Ptr { - return nil, xerrors.Errorf( + return nil, fmt.Errorf( "%s in struct is not pointer type. but required automatically alias detection", structField.FieldName, ) } anchorName := e.anchorPtrToNameMap[fieldValue.Pointer()] if anchorName == "" { - return nil, xerrors.Errorf( + return nil, errors.New( "cannot find anchor name from pointer address for automatically alias detection", ) } @@ -827,7 +826,7 @@ func (e *Encoder) encodeStruct(ctx context.Context, value reflect.Value, column if _, ok := value.(*ast.NullNode); ok { continue } - return nil, xerrors.Errorf("inline value is must be map or struct type") + return nil, errors.New("inline value is must be map or struct type") } mapIter := mapNode.MapRange() for mapIter.Next() { @@ -846,7 +845,7 @@ func (e *Encoder) encodeStruct(ctx context.Context, value reflect.Value, column case structField.IsAutoAnchor: anchorNode, err := e.encodeAnchor(structField.RenderName, value, fieldValue, column) if err != nil { - return nil, errors.Wrapf(err, "failed to encode anchor") + return nil, err } value = anchorNode } @@ -860,7 +859,7 @@ func (e *Encoder) encodeStruct(ctx context.Context, value reflect.Value, column anchorNode.Value = node if e.anchorCallback != nil { if err := e.anchorCallback(anchorNode, value.Addr().Interface()); err != nil { - return nil, errors.Wrapf(err, "failed to marshal anchor") + return nil, err } if snode, ok := anchorNode.Name.(*ast.StringNode); ok { anchorName = snode.Value diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/error.go b/hack/tools/vendor/github.com/goccy/go-yaml/error.go index 163dcc5588..36e4f06ede 100644 --- a/hack/tools/vendor/github.com/goccy/go-yaml/error.go +++ b/hack/tools/vendor/github.com/goccy/go-yaml/error.go @@ -1,62 +1,65 @@ package yaml import ( + "errors" + "fmt" + "github.com/goccy/go-yaml/ast" - "golang.org/x/xerrors" ) var ( - ErrInvalidQuery = xerrors.New("invalid query") - ErrInvalidPath = xerrors.New("invalid path instance") - ErrInvalidPathString = xerrors.New("invalid path string") - ErrNotFoundNode = xerrors.New("node not found") - ErrUnknownCommentPositionType = xerrors.New("unknown comment position type") - ErrInvalidCommentMapValue = xerrors.New("invalid comment map value. it must be not nil value") + ErrInvalidQuery = errors.New("invalid query") + ErrInvalidPath = errors.New("invalid path instance") + ErrInvalidPathString = errors.New("invalid path string") + ErrNotFoundNode = errors.New("node not found") + ErrUnknownCommentPositionType = errors.New("unknown comment position type") + ErrInvalidCommentMapValue = errors.New("invalid comment map value. it must be not nil value") + ErrDecodeRequiredPointerType = errors.New("required pointer type value") ) func ErrUnsupportedHeadPositionType(node ast.Node) error { - return xerrors.Errorf("unsupported comment head position for %s", node.Type()) + return fmt.Errorf("unsupported comment head position for %s", node.Type()) } func ErrUnsupportedLinePositionType(node ast.Node) error { - return xerrors.Errorf("unsupported comment line position for %s", node.Type()) + return fmt.Errorf("unsupported comment line position for %s", node.Type()) } func ErrUnsupportedFootPositionType(node ast.Node) error { - return xerrors.Errorf("unsupported comment foot position for %s", node.Type()) + return fmt.Errorf("unsupported comment foot position for %s", node.Type()) } // IsInvalidQueryError whether err is ErrInvalidQuery or not. func IsInvalidQueryError(err error) bool { - return xerrors.Is(err, ErrInvalidQuery) + return errors.Is(err, ErrInvalidQuery) } // IsInvalidPathError whether err is ErrInvalidPath or not. func IsInvalidPathError(err error) bool { - return xerrors.Is(err, ErrInvalidPath) + return errors.Is(err, ErrInvalidPath) } // IsInvalidPathStringError whether err is ErrInvalidPathString or not. func IsInvalidPathStringError(err error) bool { - return xerrors.Is(err, ErrInvalidPathString) + return errors.Is(err, ErrInvalidPathString) } // IsNotFoundNodeError whether err is ErrNotFoundNode or not. func IsNotFoundNodeError(err error) bool { - return xerrors.Is(err, ErrNotFoundNode) + return errors.Is(err, ErrNotFoundNode) } // IsInvalidTokenTypeError whether err is ast.ErrInvalidTokenType or not. func IsInvalidTokenTypeError(err error) bool { - return xerrors.Is(err, ast.ErrInvalidTokenType) + return errors.Is(err, ast.ErrInvalidTokenType) } // IsInvalidAnchorNameError whether err is ast.ErrInvalidAnchorName or not. func IsInvalidAnchorNameError(err error) bool { - return xerrors.Is(err, ast.ErrInvalidAnchorName) + return errors.Is(err, ast.ErrInvalidAnchorName) } // IsInvalidAliasNameError whether err is ast.ErrInvalidAliasName or not. func IsInvalidAliasNameError(err error) bool { - return xerrors.Is(err, ast.ErrInvalidAliasName) + return errors.Is(err, ast.ErrInvalidAliasName) } diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/internal/errors/error.go b/hack/tools/vendor/github.com/goccy/go-yaml/internal/errors/error.go index 7f1ea9af7e..5d3ca4b3dd 100644 --- a/hack/tools/vendor/github.com/goccy/go-yaml/internal/errors/error.go +++ b/hack/tools/vendor/github.com/goccy/go-yaml/internal/errors/error.go @@ -2,12 +2,12 @@ package errors import ( "bytes" + "errors" "fmt" "reflect" "github.com/goccy/go-yaml/printer" "github.com/goccy/go-yaml/token" - "golang.org/x/xerrors" ) const ( @@ -15,167 +15,79 @@ const ( defaultIncludeSource = true ) -var ( - ErrDecodeRequiredPointerType = xerrors.New("required pointer type value") -) - -// Wrapf wrap error for stack trace -func Wrapf(err error, msg string, args ...interface{}) error { - return &wrapError{ - baseError: &baseError{}, - err: xerrors.Errorf(msg, args...), - nextErr: err, - frame: xerrors.Caller(1), - } -} - // ErrSyntax create syntax error instance with message and token func ErrSyntax(msg string, tk *token.Token) *syntaxError { return &syntaxError{ - baseError: &baseError{}, - msg: msg, - token: tk, - frame: xerrors.Caller(1), + msg: msg, + token: tk, } } -type baseError struct { - state fmt.State - verb rune +// ErrOverflow creates an overflow error instance with message and a token. +func ErrOverflow(dstType reflect.Type, num string, tk *token.Token) *overflowError { + return &overflowError{dstType: dstType, srcNum: num, token: tk} } -func (e *baseError) Error() string { - return "" -} - -func (e *baseError) chainStateAndVerb(err error) { - wrapErr, ok := err.(*wrapError) - if ok { - wrapErr.state = e.state - wrapErr.verb = e.verb - } - syntaxErr, ok := err.(*syntaxError) - if ok { - syntaxErr.state = e.state - syntaxErr.verb = e.verb - } -} - -type wrapError struct { - *baseError - err error - nextErr error - frame xerrors.Frame +type Printer interface { + // Print appends args to the message output. + Print(args ...any) } type FormatErrorPrinter struct { - xerrors.Printer + Printer Colored bool InclSource bool } -func (e *wrapError) As(target interface{}) bool { - err := e.nextErr - for { - if wrapErr, ok := err.(*wrapError); ok { - err = wrapErr.nextErr - continue - } - break - } - return xerrors.As(err, target) -} - -func (e *wrapError) Unwrap() error { - return e.nextErr -} - -func (e *wrapError) PrettyPrint(p xerrors.Printer, colored, inclSource bool) error { - return e.FormatError(&FormatErrorPrinter{Printer: p, Colored: colored, InclSource: inclSource}) -} - -func (e *wrapError) FormatError(p xerrors.Printer) error { - if _, ok := p.(*FormatErrorPrinter); !ok { - p = &FormatErrorPrinter{ - Printer: p, - Colored: defaultColorize, - InclSource: defaultIncludeSource, - } - } - if e.verb == 'v' && e.state.Flag('+') { - // print stack trace for debugging - p.Print(e.err, "\n") - e.frame.Format(p) - e.chainStateAndVerb(e.nextErr) - return e.nextErr - } - err := e.nextErr - for { - if wrapErr, ok := err.(*wrapError); ok { - err = wrapErr.nextErr - continue - } - break - } - e.chainStateAndVerb(err) - if fmtErr, ok := err.(xerrors.Formatter); ok { - fmtErr.FormatError(p) - } else { - p.Print(err) - } - return nil -} +var ( + As = errors.As + Is = errors.Is + New = errors.New +) -type wrapState struct { - org fmt.State +type overflowError struct { + dstType reflect.Type + srcNum string + token *token.Token } -func (s *wrapState) Write(b []byte) (n int, err error) { - return s.org.Write(b) +func (e *overflowError) Error() string { + return fmt.Sprintf("cannot unmarshal %s into Go value of type %s ( overflow )", e.srcNum, e.dstType) } -func (s *wrapState) Width() (wid int, ok bool) { - return s.org.Width() +func (e *overflowError) PrettyPrint(p Printer, colored, inclSource bool) error { + return e.FormatError(&FormatErrorPrinter{Printer: p, Colored: colored, InclSource: inclSource}) } -func (s *wrapState) Precision() (prec int, ok bool) { - return s.org.Precision() -} +func (e *overflowError) FormatError(p Printer) error { + var pp printer.Printer -func (s *wrapState) Flag(c int) bool { - // set true to 'printDetail' forced because when p.Detail() is false, xerrors.Printer no output any text - if c == '#' { - // ignore '#' keyword because xerrors.FormatError doesn't set true to printDetail. - // ( see https://github.com/golang/xerrors/blob/master/adaptor.go#L39-L43 ) - return false + var colored, inclSource bool + if fep, ok := p.(*FormatErrorPrinter); ok { + colored = fep.Colored + inclSource = fep.InclSource } - return true -} -func (e *wrapError) Format(state fmt.State, verb rune) { - e.state = state - e.verb = verb - xerrors.FormatError(e, &wrapState{org: state}, verb) -} + pos := fmt.Sprintf("[%d:%d] ", e.token.Position.Line, e.token.Position.Column) + msg := pp.PrintErrorMessage(fmt.Sprintf("%s%s", pos, e.Error()), colored) + if inclSource { + msg += "\n" + pp.PrintErrorToken(e.token, colored) + } + p.Print(msg) -func (e *wrapError) Error() string { - var buf bytes.Buffer - e.PrettyPrint(&Sink{&buf}, defaultColorize, defaultIncludeSource) - return buf.String() + return nil } type syntaxError struct { - *baseError msg string token *token.Token - frame xerrors.Frame } -func (e *syntaxError) PrettyPrint(p xerrors.Printer, colored, inclSource bool) error { +func (e *syntaxError) PrettyPrint(p Printer, colored, inclSource bool) error { return e.FormatError(&FormatErrorPrinter{Printer: p, Colored: colored, InclSource: inclSource}) } -func (e *syntaxError) FormatError(p xerrors.Printer) error { +func (e *syntaxError) FormatError(p Printer) error { var pp printer.Printer var colored, inclSource bool @@ -190,18 +102,13 @@ func (e *syntaxError) FormatError(p xerrors.Printer) error { msg += "\n" + pp.PrintErrorToken(e.token, colored) } p.Print(msg) - - if e.verb == 'v' && e.state.Flag('+') { - // %+v - // print stack trace for debugging - e.frame.Format(p) - } return nil } type PrettyPrinter interface { - PrettyPrint(xerrors.Printer, bool, bool) error + PrettyPrint(Printer, bool, bool) error } + type Sink struct{ *bytes.Buffer } func (es *Sink) Print(args ...interface{}) { @@ -236,11 +143,11 @@ func (e *TypeError) Error() string { return fmt.Sprintf("cannot unmarshal %s into Go value of type %s", e.SrcType, e.DstType) } -func (e *TypeError) PrettyPrint(p xerrors.Printer, colored, inclSource bool) error { +func (e *TypeError) PrettyPrint(p Printer, colored, inclSource bool) error { return e.FormatError(&FormatErrorPrinter{Printer: p, Colored: colored, InclSource: inclSource}) } -func (e *TypeError) FormatError(p xerrors.Printer) error { +func (e *TypeError) FormatError(p Printer) error { var pp printer.Printer var colored, inclSource bool diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/parser/context.go b/hack/tools/vendor/github.com/goccy/go-yaml/parser/context.go index 42cc4f8fac..c4752f9f4b 100644 --- a/hack/tools/vendor/github.com/goccy/go-yaml/parser/context.go +++ b/hack/tools/vendor/github.com/goccy/go-yaml/parser/context.go @@ -110,13 +110,6 @@ func (c *context) nextToken() *token.Token { return c.tokens[c.idx+1] } -func (c *context) afterNextToken() *token.Token { - if c.idx+2 >= c.size { - return nil - } - return c.tokens[c.idx+2] -} - func (c *context) nextNotCommentToken() *token.Token { for i := c.idx + 1; i < c.size; i++ { tk := c.tokens[i] diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/parser/parser.go b/hack/tools/vendor/github.com/goccy/go-yaml/parser/parser.go index 13ada50f9d..82e066f681 100644 --- a/hack/tools/vendor/github.com/goccy/go-yaml/parser/parser.go +++ b/hack/tools/vendor/github.com/goccy/go-yaml/parser/parser.go @@ -2,14 +2,13 @@ package parser import ( "fmt" - "io/ioutil" + "os" "strings" "github.com/goccy/go-yaml/ast" "github.com/goccy/go-yaml/internal/errors" "github.com/goccy/go-yaml/lexer" "github.com/goccy/go-yaml/token" - "golang.org/x/xerrors" ) type parser struct{} @@ -31,7 +30,7 @@ func (p *parser) parseMapping(ctx *context) (*ast.MappingNode, error) { value, err := p.parseMappingValue(ctx) if err != nil { - return nil, errors.Wrapf(err, "failed to parse mapping value in mapping node") + return nil, err } mvnode, ok := value.(*ast.MappingValueNode) if !ok { @@ -59,7 +58,7 @@ func (p *parser) parseSequence(ctx *context) (*ast.SequenceNode, error) { value, err := p.parseToken(ctx.withIndex(uint(len(node.Values))), tk) if err != nil { - return nil, errors.Wrapf(err, "failed to parse sequence value in flow sequence node") + return nil, err } node.Values = append(node.Values, value) ctx.progress(1) @@ -100,7 +99,7 @@ func (p *parser) parseTag(ctx *context) (*ast.TagNode, error) { value, err = p.parseToken(ctx, ctx.currentToken()) } if err != nil { - return nil, errors.Wrapf(err, "failed to parse tag value") + return nil, err } node.Value = value return node, nil @@ -141,7 +140,7 @@ func (p *parser) createNullToken(base *token.Token) *token.Token { func (p *parser) parseMapValue(ctx *context, key ast.MapKeyNode, colonToken *token.Token) (ast.Node, error) { node, err := p.createMapValueNode(ctx, key, colonToken) if err != nil { - return nil, errors.Wrapf(err, "failed to create map value node") + return nil, err } if node != nil && node.GetPath() == "" { node.SetPath(ctx.path) @@ -175,7 +174,7 @@ func (p *parser) createMapValueNode(ctx *context, key ast.MapKeyNode, colonToken nullNode := ast.Null(nullToken) if comment != nil { - nullNode.SetComment(comment) + _ = nullNode.SetComment(comment) } else { // If there is a comment, it is already bound to the key node, // so remove the comment from the key to bind it to the null value. @@ -184,7 +183,7 @@ func (p *parser) createMapValueNode(ctx *context, key ast.MapKeyNode, colonToken if err := key.SetComment(nil); err != nil { return nil, err } - nullNode.SetComment(keyComment) + _ = nullNode.SetComment(keyComment) } } return nullNode, nil @@ -199,17 +198,17 @@ func (p *parser) createMapValueNode(ctx *context, key ast.MapKeyNode, colonToken ctx.insertToken(ctx.idx, nullToken) nullNode := ast.Null(nullToken) if comment != nil { - nullNode.SetComment(comment) + _ = nullNode.SetComment(comment) } return nullNode, nil } value, err := p.parseToken(ctx, ctx.currentToken()) if err != nil { - return nil, errors.Wrapf(err, "failed to parse mapping 'value' node") + return nil, err } if comment != nil { - value.SetComment(comment) + _ = value.SetComment(comment) } return value, nil } @@ -233,12 +232,12 @@ func (p *parser) validateMapValue(ctx *context, key, value ast.Node) error { func (p *parser) parseMappingValue(ctx *context) (ast.Node, error) { key, err := p.parseMapKey(ctx) if err != nil { - return nil, errors.Wrapf(err, "failed to parse map key") + return nil, err } keyText := key.GetToken().Value key.SetPath(ctx.withChild(keyText).path) if err := p.validateMapKey(key.GetToken()); err != nil { - return nil, errors.Wrapf(err, "validate mapping key error") + return nil, err } ctx.progress(1) // progress to mapping value token tk := ctx.currentToken() // get mapping value token @@ -247,7 +246,7 @@ func (p *parser) parseMappingValue(ctx *context) (ast.Node, error) { } ctx.progress(1) // progress to value token if err := p.setSameLineCommentIfExists(ctx.withChild(keyText), key); err != nil { - return nil, errors.Wrapf(err, "failed to set same line comment to node") + return nil, err } if key.GetComment() != nil { // if current token is comment, GetComment() is not nil. @@ -257,10 +256,10 @@ func (p *parser) parseMappingValue(ctx *context) (ast.Node, error) { value, err := p.parseMapValue(ctx.withChild(keyText), key, tk) if err != nil { - return nil, errors.Wrapf(err, "failed to parse map value") + return nil, err } if err := p.validateMapValue(ctx, key, value); err != nil { - return nil, errors.Wrapf(err, "failed to validate map value") + return nil, err } mvnode := ast.MappingValue(tk, key, value) @@ -275,16 +274,16 @@ func (p *parser) parseMappingValue(ctx *context) (ast.Node, error) { ctx.progressIgnoreComment(1) value, err := p.parseToken(ctx, ctx.currentToken()) if err != nil { - return nil, errors.Wrapf(err, "failed to parse mapping node") + return nil, err } switch value.Type() { case ast.MappingType: - c := value.(*ast.MappingNode) + c, _ := value.(*ast.MappingNode) comment := c.GetComment() for idx, v := range c.Values { if idx == 0 && comment != nil { if err := v.SetComment(comment); err != nil { - return nil, errors.Wrapf(err, "failed to set comment token to node") + return nil, err } } node.Values = append(node.Values, v) @@ -292,7 +291,7 @@ func (p *parser) parseMappingValue(ctx *context) (ast.Node, error) { case ast.MappingValueType: node.Values = append(node.Values, value.(*ast.MappingValueNode)) default: - return nil, xerrors.Errorf("failed to parse mapping value node node is %s", value.Type()) + return nil, fmt.Errorf("failed to parse mapping value node node is %s", value.Type()) } ntk = ctx.nextNotCommentToken() antk = ctx.afterNextNotCommentToken() @@ -340,7 +339,7 @@ func (p *parser) parseSequenceEntry(ctx *context) (*ast.SequenceNode, error) { } value, err := p.parseToken(ctx.withIndex(uint(len(sequenceNode.Values))), ctx.currentToken()) if err != nil { - return nil, errors.Wrapf(err, "failed to parse sequence") + return nil, err } if comment != nil { comment.SetPath(ctx.withIndex(uint(len(sequenceNode.Values))).path) @@ -382,7 +381,7 @@ func (p *parser) parseAnchor(ctx *context) (*ast.AnchorNode, error) { ctx.progress(1) // skip anchor token name, err := p.parseToken(ctx, ctx.currentToken()) if err != nil { - return nil, errors.Wrapf(err, "failed to parser anchor name node") + return nil, err } anchor.Name = name ntk = ctx.nextToken() @@ -392,7 +391,7 @@ func (p *parser) parseAnchor(ctx *context) (*ast.AnchorNode, error) { ctx.progress(1) value, err := p.parseToken(ctx, ctx.currentToken()) if err != nil { - return nil, errors.Wrapf(err, "failed to parser anchor name node") + return nil, err } anchor.Value = value return anchor, nil @@ -409,7 +408,7 @@ func (p *parser) parseAlias(ctx *context) (*ast.AliasNode, error) { ctx.progress(1) // skip alias token name, err := p.parseToken(ctx, ctx.currentToken()) if err != nil { - return nil, errors.Wrapf(err, "failed to parser alias name node") + return nil, err } alias.Value = name return alias, nil @@ -448,7 +447,7 @@ func (p *parser) parseScalarValueWithComment(ctx *context, tk *token.Token) (ast if p.isSameLineComment(ctx.nextToken(), node) { ctx.progress(1) if err := p.setSameLineCommentIfExists(ctx, node); err != nil { - return nil, errors.Wrapf(err, "failed to set same line comment to node") + return nil, err } } return node, nil @@ -483,7 +482,7 @@ func (p *parser) parseDirective(ctx *context) (*ast.DirectiveNode, error) { ctx.progress(1) // skip directive token value, err := p.parseToken(ctx, ctx.currentToken()) if err != nil { - return nil, errors.Wrapf(err, "failed to parse directive value") + return nil, err } node.Value = value ctx.progress(1) @@ -509,13 +508,13 @@ func (p *parser) parseLiteral(ctx *context) (*ast.LiteralNode, error) { comment = p.parseCommentOnly(ctx) comment.SetPath(ctx.path) if err := node.SetComment(comment); err != nil { - return nil, errors.Wrapf(err, "failed to set comment to literal") + return nil, err } tk = ctx.currentToken() } value, err := p.parseToken(ctx, tk) if err != nil { - return nil, errors.Wrapf(err, "failed to parse literal/folded value") + return nil, err } snode, ok := value.(*ast.StringNode) if !ok { @@ -543,7 +542,7 @@ func (p *parser) setSameLineCommentIfExists(ctx *context, node ast.Node) error { comment := ast.CommentGroup([]*token.Token{tk}) comment.SetPath(ctx.path) if err := node.SetComment(comment); err != nil { - return errors.Wrapf(err, "failed to set comment token to ast.Node") + return err } return nil } @@ -553,7 +552,7 @@ func (p *parser) parseDocument(ctx *context) (*ast.DocumentNode, error) { ctx.progress(1) // skip document header token body, err := p.parseToken(ctx, ctx.currentToken()) if err != nil { - return nil, errors.Wrapf(err, "failed to parse document body") + return nil, err } node := ast.Document(startTk, body) if ntk := ctx.nextToken(); ntk != nil && ntk.Type == token.DocumentEndType { @@ -603,14 +602,14 @@ func (p *parser) parseComment(ctx *context) (ast.Node, error) { group := p.parseCommentOnly(ctx) node, err := p.parseToken(ctx, ctx.currentToken()) if err != nil { - return nil, errors.Wrapf(err, "failed to parse node after comment") + return nil, err } if node == nil { return group, nil } group.SetPath(node.GetPath()) if err := node.SetComment(group); err != nil { - return nil, errors.Wrapf(err, "failed to set comment token to node") + return nil, err } return node, nil } @@ -622,7 +621,7 @@ func (p *parser) parseMappingKey(ctx *context) (*ast.MappingKeyNode, error) { ctx.progress(1) // skip mapping key token value, err := p.parseToken(ctx.withChild(keyTk.Value), ctx.currentToken()) if err != nil { - return nil, errors.Wrapf(err, "failed to parse map key") + return nil, err } node.Value = value return node, nil @@ -631,7 +630,7 @@ func (p *parser) parseMappingKey(ctx *context) (*ast.MappingKeyNode, error) { func (p *parser) parseToken(ctx *context, tk *token.Token) (ast.Node, error) { node, err := p.createNodeFromToken(ctx, tk) if err != nil { - return nil, errors.Wrapf(err, "failed to create node from token") + return nil, err } if node != nil && node.GetPath() == "" { node.SetPath(ctx.path) @@ -649,7 +648,7 @@ func (p *parser) createNodeFromToken(ctx *context, tk *token.Token) (ast.Node, e } node, err := p.parseScalarValueWithComment(ctx, tk) if err != nil { - return nil, errors.Wrapf(err, "failed to parse scalar value") + return nil, err } if node != nil { return node, nil @@ -687,7 +686,7 @@ func (p *parser) parse(tokens token.Tokens, mode Mode) (*ast.File, error) { for ctx.next() { node, err := p.parseToken(ctx, ctx.currentToken()) if err != nil { - return nil, errors.Wrapf(err, "failed to parse") + return nil, err } ctx.progressIgnoreComment(1) if node == nil { @@ -713,7 +712,7 @@ func ParseBytes(bytes []byte, mode Mode) (*ast.File, error) { tokens := lexer.Tokenize(string(bytes)) f, err := Parse(tokens, mode) if err != nil { - return nil, errors.Wrapf(err, "failed to parse") + return nil, err } return f, nil } @@ -723,20 +722,20 @@ func Parse(tokens token.Tokens, mode Mode) (*ast.File, error) { var p parser f, err := p.parse(tokens, mode) if err != nil { - return nil, errors.Wrapf(err, "failed to parse") + return nil, err } return f, nil } // Parse parse from filename, and returns ast.File func ParseFile(filename string, mode Mode) (*ast.File, error) { - file, err := ioutil.ReadFile(filename) + file, err := os.ReadFile(filename) if err != nil { - return nil, errors.Wrapf(err, "failed to read file: %s", filename) + return nil, err } f, err := ParseBytes(file, mode) if err != nil { - return nil, errors.Wrapf(err, "failed to parse") + return nil, err } f.Name = filename return f, nil diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/path.go b/hack/tools/vendor/github.com/goccy/go-yaml/path.go index 72554bd8be..fcb56815e7 100644 --- a/hack/tools/vendor/github.com/goccy/go-yaml/path.go +++ b/hack/tools/vendor/github.com/goccy/go-yaml/path.go @@ -8,7 +8,6 @@ import ( "strings" "github.com/goccy/go-yaml/ast" - "github.com/goccy/go-yaml/internal/errors" "github.com/goccy/go-yaml/parser" "github.com/goccy/go-yaml/printer" ) @@ -39,7 +38,7 @@ func PathString(s string) (*Path, error) { case '.': b, buf, c, err := parsePathDot(builder, buf, cursor) if err != nil { - return nil, errors.Wrapf(err, "failed to parse path of dot") + return nil, err } length = len(buf) builder = b @@ -47,13 +46,13 @@ func PathString(s string) (*Path, error) { case '[': b, buf, c, err := parsePathIndex(builder, buf, cursor) if err != nil { - return nil, errors.Wrapf(err, "failed to parse path of index") + return nil, err } length = len(buf) builder = b cursor = c default: - return nil, errors.Wrapf(ErrInvalidPathString, "invalid path at %d", cursor) + return nil, fmt.Errorf("invalid path at %d: %w", cursor, ErrInvalidPathString) } } return builder.Build(), nil @@ -67,18 +66,18 @@ func parsePathRecursive(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, [ c := buf[cursor] switch c { case '$': - return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '$' after '..' character") + return nil, nil, 0, fmt.Errorf("specified '$' after '..' character: %w", ErrInvalidPathString) case '*': - return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '*' after '..' character") + return nil, nil, 0, fmt.Errorf("specified '*' after '..' character: %w", ErrInvalidPathString) case '.', '[': goto end case ']': - return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified ']' after '..' character") + return nil, nil, 0, fmt.Errorf("specified ']' after '..' character: %w", ErrInvalidPathString) } } end: if start == cursor { - return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "not found recursive selector") + return nil, nil, 0, fmt.Errorf("not found recursive selector: %w", ErrInvalidPathString) } return b.Recursive(string(buf[start:cursor])), buf, cursor, nil } @@ -88,7 +87,7 @@ func parsePathDot(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, []rune, if cursor+1 < length && buf[cursor+1] == '.' { b, buf, c, err := parsePathRecursive(b, buf, cursor) if err != nil { - return nil, nil, 0, errors.Wrapf(err, "failed to parse path of recursive") + return nil, nil, 0, err } return b, buf, c, nil } @@ -103,18 +102,18 @@ func parsePathDot(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, []rune, c := buf[cursor] switch c { case '$': - return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '$' after '.' character") + return nil, nil, 0, fmt.Errorf("specified '$' after '.' character: %w", ErrInvalidPathString) case '*': - return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '*' after '.' character") + return nil, nil, 0, fmt.Errorf("specified '*' after '.' character: %w", ErrInvalidPathString) case '.', '[': goto end case ']': - return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified ']' after '.' character") + return nil, nil, 0, fmt.Errorf("specified ']' after '.' character: %w", ErrInvalidPathString) } } end: if start == cursor { - return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "cloud not find by empty key") + return nil, nil, 0, fmt.Errorf("cloud not find by empty key: %w", ErrInvalidPathString) } return b.child(string(buf[start:cursor])), buf, cursor, nil } @@ -136,21 +135,21 @@ func parseQuotedKey(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, []run } end: if !foundEndDelim { - return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "could not find end delimiter for key") + return nil, nil, 0, fmt.Errorf("could not find end delimiter for key: %w", ErrInvalidPathString) } if start == cursor { - return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "could not find by empty key") + return nil, nil, 0, fmt.Errorf("could not find by empty key: %w", ErrInvalidPathString) } selector := buf[start:cursor] cursor++ if cursor < length { switch buf[cursor] { case '$': - return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '$' after '.' character") + return nil, nil, 0, fmt.Errorf("specified '$' after '.' character: %w", ErrInvalidPathString) case '*': - return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified '*' after '.' character") + return nil, nil, 0, fmt.Errorf("specified '*' after '.' character: %w", ErrInvalidPathString) case ']': - return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "specified ']' after '.' character") + return nil, nil, 0, fmt.Errorf("specified ']' after '.' character: %w", ErrInvalidPathString) } } return b.child(string(selector)), buf, cursor, nil @@ -160,7 +159,7 @@ func parsePathIndex(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, []run length := len(buf) cursor++ // skip '[' character if length <= cursor { - return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "unexpected end of YAML Path") + return nil, nil, 0, fmt.Errorf("unexpected end of YAML Path: %w", ErrInvalidPathString) } c := buf[cursor] switch c { @@ -176,7 +175,7 @@ func parsePathIndex(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, []run break } if buf[cursor] != ']' { - return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "invalid character %s at %d", string(buf[cursor]), cursor) + return nil, nil, 0, fmt.Errorf("invalid character %s at %d: %w", string(buf[cursor]), cursor, ErrInvalidPathString) } numOrAll := string(buf[start:cursor]) if numOrAll == "*" { @@ -184,11 +183,11 @@ func parsePathIndex(b *PathBuilder, buf []rune, cursor int) (*PathBuilder, []run } num, err := strconv.ParseInt(numOrAll, 10, 64) if err != nil { - return nil, nil, 0, errors.Wrapf(err, "failed to parse number") + return nil, nil, 0, err } return b.Index(uint(num)), buf, cursor + 1, nil } - return nil, nil, 0, errors.Wrapf(ErrInvalidPathString, "invalid character %s at %d", c, cursor) + return nil, nil, 0, fmt.Errorf("invalid character %q at %d: %w", c, cursor, ErrInvalidPathString) } // Path represent YAMLPath ( like a JSONPath ). @@ -205,10 +204,10 @@ func (p *Path) String() string { func (p *Path) Read(r io.Reader, v interface{}) error { node, err := p.ReadNode(r) if err != nil { - return errors.Wrapf(err, "failed to read node") + return err } if err := Unmarshal([]byte(node.String()), v); err != nil { - return errors.Wrapf(err, "failed to unmarshal") + return err } return nil } @@ -220,15 +219,15 @@ func (p *Path) ReadNode(r io.Reader) (ast.Node, error) { } var buf bytes.Buffer if _, err := io.Copy(&buf, r); err != nil { - return nil, errors.Wrapf(err, "failed to copy from reader") + return nil, err } f, err := parser.ParseBytes(buf.Bytes(), 0) if err != nil { - return nil, errors.Wrapf(err, "failed to parse yaml") + return nil, err } node, err := p.FilterFile(f) if err != nil { - return nil, errors.Wrapf(err, "failed to filter from ast.File") + return nil, err } return node, nil } @@ -237,10 +236,10 @@ func (p *Path) ReadNode(r io.Reader) (ast.Node, error) { func (p *Path) Filter(target, v interface{}) error { b, err := Marshal(target) if err != nil { - return errors.Wrapf(err, "failed to marshal target value") + return err } if err := p.Read(bytes.NewBuffer(b), v); err != nil { - return errors.Wrapf(err, "failed to read") + return err } return nil } @@ -250,20 +249,20 @@ func (p *Path) FilterFile(f *ast.File) (ast.Node, error) { for _, doc := range f.Docs { node, err := p.FilterNode(doc.Body) if err != nil { - return nil, errors.Wrapf(err, "failed to filter node by path ( %s )", p.node) + return nil, err } if node != nil { return node, nil } } - return nil, errors.Wrapf(ErrNotFoundNode, "failed to find path ( %s )", p.node) + return nil, fmt.Errorf("failed to find path ( %s ): %w", p.node, ErrNotFoundNode) } // FilterNode filter from node by YAMLPath. func (p *Path) FilterNode(node ast.Node) (ast.Node, error) { n, err := p.node.filter(node) if err != nil { - return nil, errors.Wrapf(err, "failed to filter node by path ( %s )", p.node) + return nil, err } return n, nil } @@ -272,14 +271,14 @@ func (p *Path) FilterNode(node ast.Node) (ast.Node, error) { func (p *Path) MergeFromReader(dst *ast.File, src io.Reader) error { var buf bytes.Buffer if _, err := io.Copy(&buf, src); err != nil { - return errors.Wrapf(err, "failed to copy from reader") + return err } file, err := parser.ParseBytes(buf.Bytes(), 0) if err != nil { - return errors.Wrapf(err, "failed to parse") + return err } if err := p.MergeFromFile(dst, file); err != nil { - return errors.Wrapf(err, "failed to merge file") + return err } return nil } @@ -288,11 +287,11 @@ func (p *Path) MergeFromReader(dst *ast.File, src io.Reader) error { func (p *Path) MergeFromFile(dst *ast.File, src *ast.File) error { base, err := p.FilterFile(dst) if err != nil { - return errors.Wrapf(err, "failed to filter file") + return err } for _, doc := range src.Docs { if err := ast.Merge(base, doc); err != nil { - return errors.Wrapf(err, "failed to merge") + return err } } return nil @@ -302,10 +301,10 @@ func (p *Path) MergeFromFile(dst *ast.File, src *ast.File) error { func (p *Path) MergeFromNode(dst *ast.File, src ast.Node) error { base, err := p.FilterFile(dst) if err != nil { - return errors.Wrapf(err, "failed to filter file") + return err } if err := ast.Merge(base, src); err != nil { - return errors.Wrapf(err, "failed to merge") + return err } return nil } @@ -314,14 +313,14 @@ func (p *Path) MergeFromNode(dst *ast.File, src ast.Node) error { func (p *Path) ReplaceWithReader(dst *ast.File, src io.Reader) error { var buf bytes.Buffer if _, err := io.Copy(&buf, src); err != nil { - return errors.Wrapf(err, "failed to copy from reader") + return err } file, err := parser.ParseBytes(buf.Bytes(), 0) if err != nil { - return errors.Wrapf(err, "failed to parse") + return err } if err := p.ReplaceWithFile(dst, file); err != nil { - return errors.Wrapf(err, "failed to replace file") + return err } return nil } @@ -330,7 +329,7 @@ func (p *Path) ReplaceWithReader(dst *ast.File, src io.Reader) error { func (p *Path) ReplaceWithFile(dst *ast.File, src *ast.File) error { for _, doc := range src.Docs { if err := p.ReplaceWithNode(dst, doc); err != nil { - return errors.Wrapf(err, "failed to replace file by path ( %s )", p.node) + return err } } return nil @@ -343,7 +342,7 @@ func (p *Path) ReplaceWithNode(dst *ast.File, node ast.Node) error { node = node.(*ast.DocumentNode).Body } if err := p.node.replace(doc.Body, node); err != nil { - return errors.Wrapf(err, "failed to replace node by path ( %s )", p.node) + return err } } return nil @@ -468,11 +467,11 @@ func (n *rootNode) String() string { func (n *rootNode) filter(node ast.Node) (ast.Node, error) { if n.child == nil { - return nil, nil + return node, nil } filtered, err := n.child.filter(node) if err != nil { - return nil, errors.Wrapf(err, "failed to filter") + return nil, err } return filtered, nil } @@ -482,7 +481,7 @@ func (n *rootNode) replace(node ast.Node, target ast.Node) error { return nil } if err := n.child.replace(node, target); err != nil { - return errors.Wrapf(err, "failed to replace") + return err } return nil } @@ -514,7 +513,7 @@ func (n *selectorNode) filter(node ast.Node) (ast.Node, error) { var err error key, err = strconv.Unquote(key) if err != nil { - return nil, errors.Wrapf(err, "failed to unquote") + return nil, err } case '\'': if len(key) > 1 && key[len(key)-1] == '\'' { @@ -528,13 +527,13 @@ func (n *selectorNode) filter(node ast.Node) (ast.Node, error) { } filtered, err := n.child.filter(value.Value) if err != nil { - return nil, errors.Wrapf(err, "failed to filter") + return nil, err } return filtered, nil } } case ast.MappingValueType: - value := node.(*ast.MappingValueNode) + value, _ := node.(*ast.MappingValueNode) key := value.Key.GetToken().Value if key == selector { if n.child == nil { @@ -542,12 +541,12 @@ func (n *selectorNode) filter(node ast.Node) (ast.Node, error) { } filtered, err := n.child.filter(value.Value) if err != nil { - return nil, errors.Wrapf(err, "failed to filter") + return nil, err } return filtered, nil } default: - return nil, errors.Wrapf(ErrInvalidQuery, "expected node type is map or map value. but got %s", node.Type()) + return nil, fmt.Errorf("expected node type is map or map value. but got %s: %w", node.Type(), ErrInvalidQuery) } return nil, nil } @@ -559,11 +558,11 @@ func (n *selectorNode) replaceMapValue(value *ast.MappingValueNode, target ast.N } if n.child == nil { if err := value.Replace(target); err != nil { - return errors.Wrapf(err, "failed to replace") + return err } } else { if err := n.child.replace(value.Value, target); err != nil { - return errors.Wrapf(err, "failed to replace") + return err } } return nil @@ -574,16 +573,16 @@ func (n *selectorNode) replace(node ast.Node, target ast.Node) error { case ast.MappingType: for _, value := range node.(*ast.MappingNode).Values { if err := n.replaceMapValue(value, target); err != nil { - return errors.Wrapf(err, "failed to replace map value") + return err } } case ast.MappingValueType: - value := node.(*ast.MappingValueNode) + value, _ := node.(*ast.MappingValueNode) if err := n.replaceMapValue(value, target); err != nil { - return errors.Wrapf(err, "failed to replace map value") + return err } default: - return errors.Wrapf(ErrInvalidQuery, "expected node type is map or map value. but got %s", node.Type()) + return fmt.Errorf("expected node type is map or map value. but got %s: %w", node.Type(), ErrInvalidQuery) } return nil } @@ -612,11 +611,11 @@ func newIndexNode(selector uint) *indexNode { func (n *indexNode) filter(node ast.Node) (ast.Node, error) { if node.Type() != ast.SequenceType { - return nil, errors.Wrapf(ErrInvalidQuery, "expected sequence type node. but got %s", node.Type()) + return nil, fmt.Errorf("expected sequence type node. but got %s: %w", node.Type(), ErrInvalidQuery) } - sequence := node.(*ast.SequenceNode) + sequence, _ := node.(*ast.SequenceNode) if n.selector >= uint(len(sequence.Values)) { - return nil, errors.Wrapf(ErrInvalidQuery, "expected index is %d. but got sequences has %d items", n.selector, sequence.Values) + return nil, fmt.Errorf("expected index is %d. but got sequences has %d items: %w", n.selector, sequence.Values, ErrInvalidQuery) } value := sequence.Values[n.selector] if n.child == nil { @@ -624,27 +623,27 @@ func (n *indexNode) filter(node ast.Node) (ast.Node, error) { } filtered, err := n.child.filter(value) if err != nil { - return nil, errors.Wrapf(err, "failed to filter") + return nil, err } return filtered, nil } func (n *indexNode) replace(node ast.Node, target ast.Node) error { if node.Type() != ast.SequenceType { - return errors.Wrapf(ErrInvalidQuery, "expected sequence type node. but got %s", node.Type()) + return fmt.Errorf("expected sequence type node. but got %s: %w", node.Type(), ErrInvalidQuery) } - sequence := node.(*ast.SequenceNode) + sequence, _ := node.(*ast.SequenceNode) if n.selector >= uint(len(sequence.Values)) { - return errors.Wrapf(ErrInvalidQuery, "expected index is %d. but got sequences has %d items", n.selector, sequence.Values) + return fmt.Errorf("expected index is %d. but got sequences has %d items: %w", n.selector, sequence.Values, ErrInvalidQuery) } if n.child == nil { if err := sequence.Replace(int(n.selector), target); err != nil { - return errors.Wrapf(err, "failed to replace") + return err } return nil } if err := n.child.replace(sequence.Values[n.selector], target); err != nil { - return errors.Wrapf(err, "failed to replace") + return err } return nil } @@ -677,9 +676,9 @@ func (n *indexAllNode) String() string { func (n *indexAllNode) filter(node ast.Node) (ast.Node, error) { if node.Type() != ast.SequenceType { - return nil, errors.Wrapf(ErrInvalidQuery, "expected sequence type node. but got %s", node.Type()) + return nil, fmt.Errorf("expected sequence type node. but got %s: %w", node.Type(), ErrInvalidQuery) } - sequence := node.(*ast.SequenceNode) + sequence, _ := node.(*ast.SequenceNode) if n.child == nil { return sequence, nil } @@ -688,7 +687,7 @@ func (n *indexAllNode) filter(node ast.Node) (ast.Node, error) { for _, value := range sequence.Values { filtered, err := n.child.filter(value) if err != nil { - return nil, errors.Wrapf(err, "failed to filter") + return nil, err } out.Values = append(out.Values, filtered) } @@ -697,20 +696,20 @@ func (n *indexAllNode) filter(node ast.Node) (ast.Node, error) { func (n *indexAllNode) replace(node ast.Node, target ast.Node) error { if node.Type() != ast.SequenceType { - return errors.Wrapf(ErrInvalidQuery, "expected sequence type node. but got %s", node.Type()) + return fmt.Errorf("expected sequence type node. but got %s: %w", node.Type(), ErrInvalidQuery) } - sequence := node.(*ast.SequenceNode) + sequence, _ := node.(*ast.SequenceNode) if n.child == nil { for idx := range sequence.Values { if err := sequence.Replace(idx, target); err != nil { - return errors.Wrapf(err, "failed to replace") + return err } } return nil } for _, value := range sequence.Values { if err := n.child.replace(value, target); err != nil { - return errors.Wrapf(err, "failed to replace") + return err } } return nil @@ -743,7 +742,7 @@ func (n *recursiveNode) filterNode(node ast.Node) (*ast.SequenceNode, error) { for _, value := range typedNode.Values { seq, err := n.filterNode(value) if err != nil { - return nil, errors.Wrapf(err, "failed to filter") + return nil, err } sequence.Values = append(sequence.Values, seq.Values...) } @@ -754,14 +753,14 @@ func (n *recursiveNode) filterNode(node ast.Node) (*ast.SequenceNode, error) { } seq, err := n.filterNode(typedNode.Value) if err != nil { - return nil, errors.Wrapf(err, "failed to filter") + return nil, err } sequence.Values = append(sequence.Values, seq.Values...) case *ast.SequenceNode: for _, value := range typedNode.Values { seq, err := n.filterNode(value) if err != nil { - return nil, errors.Wrapf(err, "failed to filter") + return nil, err } sequence.Values = append(sequence.Values, seq.Values...) } @@ -772,7 +771,7 @@ func (n *recursiveNode) filterNode(node ast.Node) (*ast.SequenceNode, error) { func (n *recursiveNode) filter(node ast.Node) (ast.Node, error) { sequence, err := n.filterNode(node) if err != nil { - return nil, errors.Wrapf(err, "failed to filter") + return nil, err } sequence.Start = node.GetToken() return sequence, nil @@ -783,23 +782,23 @@ func (n *recursiveNode) replaceNode(node ast.Node, target ast.Node) error { case *ast.MappingNode: for _, value := range typedNode.Values { if err := n.replaceNode(value, target); err != nil { - return errors.Wrapf(err, "failed to replace") + return err } } case *ast.MappingValueNode: key := typedNode.Key.GetToken().Value if n.selector == key { if err := typedNode.Replace(target); err != nil { - return errors.Wrapf(err, "failed to replace") + return err } } if err := n.replaceNode(typedNode.Value, target); err != nil { - return errors.Wrapf(err, "failed to replace") + return err } case *ast.SequenceNode: for _, value := range typedNode.Values { if err := n.replaceNode(value, target); err != nil { - return errors.Wrapf(err, "failed to replace") + return err } } } @@ -808,7 +807,7 @@ func (n *recursiveNode) replaceNode(node ast.Node, target ast.Node) error { func (n *recursiveNode) replace(node ast.Node, target ast.Node) error { if err := n.replaceNode(node, target); err != nil { - return errors.Wrapf(err, "failed to replace") + return err } return nil } diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/printer/printer.go b/hack/tools/vendor/github.com/goccy/go-yaml/printer/printer.go index d5e25dc919..c71481295e 100644 --- a/hack/tools/vendor/github.com/goccy/go-yaml/printer/printer.go +++ b/hack/tools/vendor/github.com/goccy/go-yaml/printer/printer.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/fatih/color" + "github.com/goccy/go-yaml/ast" "github.com/goccy/go-yaml/token" ) @@ -29,6 +30,7 @@ type Printer struct { Bool PrintFunc String PrintFunc Number PrintFunc + Comment PrintFunc } func defaultLineNumberFormat(num int) string { @@ -82,6 +84,11 @@ func (p *Printer) property(tk *token.Token) *Property { return p.Number() } return prop + case token.CommentType: + if p.Comment != nil { + return p.Comment() + } + return prop default: } return prop diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/scanner/context.go b/hack/tools/vendor/github.com/goccy/go-yaml/scanner/context.go index 3aaec56189..5f92a6ef5a 100644 --- a/hack/tools/vendor/github.com/goccy/go-yaml/scanner/context.go +++ b/hack/tools/vendor/github.com/goccy/go-yaml/scanner/context.go @@ -42,7 +42,7 @@ func createContext() *Context { } func newContext(src []rune) *Context { - ctx := ctxPool.Get().(*Context) + ctx, _ := ctxPool.Get().(*Context) ctx.reset(src) return ctx } diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/scanner/scanner.go b/hack/tools/vendor/github.com/goccy/go-yaml/scanner/scanner.go index b0eac48d24..a559555a99 100644 --- a/hack/tools/vendor/github.com/goccy/go-yaml/scanner/scanner.go +++ b/hack/tools/vendor/github.com/goccy/go-yaml/scanner/scanner.go @@ -1,11 +1,11 @@ package scanner import ( + "errors" "io" "strings" "github.com/goccy/go-yaml/token" - "golang.org/x/xerrors" ) // IndentState state for indent @@ -25,18 +25,24 @@ const ( // Scanner holds the scanner's internal state while processing a given text. // It can be allocated as part of another data structure but must be initialized via Init before use. type Scanner struct { - source []rune - sourcePos int - sourceSize int - line int - column int - offset int - prevIndentLevel int - prevIndentNum int - prevIndentColumn int - docStartColumn int + source []rune + sourcePos int + sourceSize int + // line number. This number starts from 1. + line int + // column number. This number starts from 1. + column int + // offset represents the offset from the beginning of the source. + offset int + // lastDelimColumn is the last column needed to compare indent is retained. + lastDelimColumn int + // indentNum indicates the number of spaces used for indentation. + indentNum int + // prevLineIndentNum indicates the number of spaces used for indentation at previous line. + prevLineIndentNum int + // indentLevel indicates the level of indent depth. This value does not match the column value. indentLevel int - indentNum int + docStartColumn int isFirstCharAtLine bool isAnchor bool startedFlowSequenceNum int @@ -90,6 +96,7 @@ func (s *Scanner) progressColumn(ctx *Context, num int) { } func (s *Scanner) progressLine(ctx *Context) { + s.prevLineIndentNum = s.indentNum s.column = 1 s.line++ s.offset++ @@ -99,19 +106,6 @@ func (s *Scanner) progressLine(ctx *Context) { ctx.progress(1) } -func (s *Scanner) isNeededKeepPreviousIndentNum(ctx *Context, c rune) bool { - if !s.isChangedToIndentStateUp() { - return false - } - if ctx.isDocument() { - return true - } - if c == '-' && ctx.existsBuffer() { - return true - } - return false -} - func (s *Scanner) isNewLineChar(c rune) bool { if c == '\n' { return true @@ -140,42 +134,38 @@ func (s *Scanner) newLineCount(src []rune) int { return cnt } -func (s *Scanner) updateIndentState(ctx *Context) { - indentNumBasedIndentState := s.indentState - if s.prevIndentNum < s.indentNum { - s.indentLevel = s.prevIndentLevel + 1 - indentNumBasedIndentState = IndentStateUp - } else if s.prevIndentNum == s.indentNum { - s.indentLevel = s.prevIndentLevel - indentNumBasedIndentState = IndentStateEqual - } else { - indentNumBasedIndentState = IndentStateDown - if s.prevIndentLevel > 0 { - s.indentLevel = s.prevIndentLevel - 1 +func (s *Scanner) updateIndentLevel() { + if s.prevLineIndentNum < s.indentNum { + s.indentLevel++ + } else if s.prevLineIndentNum > s.indentNum { + if s.indentLevel > 0 { + s.indentLevel-- } } +} - if s.prevIndentColumn > 0 { - if s.prevIndentColumn < s.column { +func (s *Scanner) updateIndentState(ctx *Context) { + if s.lastDelimColumn > 0 { + if s.lastDelimColumn < s.column { s.indentState = IndentStateUp - } else if s.prevIndentColumn != s.column || indentNumBasedIndentState != IndentStateEqual { - // The following case ( current position is 'd' ), some variables becomes like here - // - prevIndentColumn: 1 of 'a' - // - indentNumBasedIndentState: IndentStateDown because d's indentNum(1) is less than c's indentNum(3). - // Therefore, s.prevIndentColumn(1) == s.column(1) is true, but we want to treat this as IndentStateDown. - // So, we look also current indentState value by the above prevIndentNum based logic, and determins finally indentState. - // --- - // a: - // b - // c - // d: e - // ^ - s.indentState = IndentStateDown } else { - s.indentState = IndentStateEqual + // If lastDelimColumn and s.column are the same, + // treat as Down state since it is the same column as delimiter. + s.indentState = IndentStateDown } } else { - s.indentState = indentNumBasedIndentState + s.indentState = s.indentStateFromIndentNumDifference() + } +} + +func (s *Scanner) indentStateFromIndentNumDifference() IndentState { + switch { + case s.prevLineIndentNum < s.indentNum: + return IndentStateUp + case s.prevLineIndentNum == s.indentNum: + return IndentStateEqual + default: + return IndentStateDown } } @@ -191,16 +181,9 @@ func (s *Scanner) updateIndent(ctx *Context, c rune) { s.indentState = IndentStateKeep return } + s.updateIndentLevel() s.updateIndentState(ctx) s.isFirstCharAtLine = false - if s.isNeededKeepPreviousIndentNum(ctx, c) { - return - } - if s.indentState != IndentStateUp { - s.prevIndentColumn = 0 - } - s.prevIndentNum = s.indentNum - s.prevIndentLevel = s.indentLevel } func (s *Scanner) isChangedToIndentStateDown() bool { @@ -211,10 +194,6 @@ func (s *Scanner) isChangedToIndentStateUp() bool { return s.indentState == IndentStateUp } -func (s *Scanner) isChangedToIndentStateEqual() bool { - return s.indentState == IndentStateEqual -} - func (s *Scanner) addBufferedTokenIfExists(ctx *Context) { ctx.addToken(s.bufferedToken(ctx)) } @@ -316,100 +295,93 @@ func (s *Scanner) scanDoubleQuote(ctx *Context) (tk *token.Token, pos int) { continue } else if c == '\\' { isFirstLineChar = false - if idx+1 < size { - nextChar := src[idx+1] - switch nextChar { - case 'b': - ctx.addOriginBuf(nextChar) - value = append(value, '\b') - idx++ - continue - case 'e': - ctx.addOriginBuf(nextChar) - value = append(value, '\x1B') - idx++ - continue - case 'f': - ctx.addOriginBuf(nextChar) - value = append(value, '\f') - idx++ - continue - case 'n': - ctx.addOriginBuf(nextChar) - value = append(value, '\n') - idx++ - continue - case 'r': - ctx.addOriginBuf(nextChar) - value = append(value, '\r') - idx++ - continue - case 'v': - ctx.addOriginBuf(nextChar) - value = append(value, '\v') - idx++ - continue - case 'L': // LS (#x2028) - ctx.addOriginBuf(nextChar) - value = append(value, []rune{'\xE2', '\x80', '\xA8'}...) - idx++ - continue - case 'N': // NEL (#x85) - ctx.addOriginBuf(nextChar) - value = append(value, []rune{'\xC2', '\x85'}...) - idx++ - continue - case 'P': // PS (#x2029) - ctx.addOriginBuf(nextChar) - value = append(value, []rune{'\xE2', '\x80', '\xA9'}...) - idx++ - continue - case '_': // #xA0 - ctx.addOriginBuf(nextChar) - value = append(value, []rune{'\xC2', '\xA0'}...) - idx++ - continue - case '"': - ctx.addOriginBuf(nextChar) - value = append(value, nextChar) - idx++ - continue - case 'x': - if idx+3 >= size { - // TODO: need to return error - //err = xerrors.New("invalid escape character \\x") - return - } - codeNum := hexRunesToInt(src[idx+2 : idx+4]) - value = append(value, rune(codeNum)) - idx += 3 - continue - case 'u': - if idx+5 >= size { - // TODO: need to return error - //err = xerrors.New("invalid escape character \\u") - return - } - codeNum := hexRunesToInt(src[idx+2 : idx+6]) - value = append(value, rune(codeNum)) - idx += 5 - continue - case 'U': - if idx+9 >= size { - // TODO: need to return error - //err = xerrors.New("invalid escape character \\U") - return - } - codeNum := hexRunesToInt(src[idx+2 : idx+10]) - value = append(value, rune(codeNum)) - idx += 9 - continue - case '\\': - ctx.addOriginBuf(nextChar) - idx++ + if idx+1 >= size { + value = append(value, c) + continue + } + nextChar := src[idx+1] + progress := 0 + switch nextChar { + case 'b': + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, '\b') + case 'e': + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, '\x1B') + case 'f': + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, '\f') + case 'n': + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, '\n') + case 'r': + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, '\r') + case 'v': + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, '\v') + case 'L': // LS (#x2028) + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, []rune{'\xE2', '\x80', '\xA8'}...) + case 'N': // NEL (#x85) + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, []rune{'\xC2', '\x85'}...) + case 'P': // PS (#x2029) + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, []rune{'\xE2', '\x80', '\xA9'}...) + case '_': // #xA0 + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, []rune{'\xC2', '\xA0'}...) + case '"': + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, nextChar) + case 'x': + progress = 3 + if idx+progress >= size { + // TODO: need to return error + //err = errors.New("invalid escape character \\x") + return + } + codeNum := hexRunesToInt(src[idx+2 : idx+progress+1]) + value = append(value, rune(codeNum)) + case 'u': + progress = 5 + if idx+progress >= size { + // TODO: need to return error + //err = errors.New("invalid escape character \\u") + return + } + codeNum := hexRunesToInt(src[idx+2 : idx+progress+1]) + value = append(value, rune(codeNum)) + case 'U': + progress = 9 + if idx+progress >= size { + // TODO: need to return error + //err = errors.New("invalid escape character \\U") + return } + codeNum := hexRunesToInt(src[idx+2 : idx+progress+1]) + value = append(value, rune(codeNum)) + case '\\': + progress = 1 + ctx.addOriginBuf(nextChar) + value = append(value, c) + default: + value = append(value, c) } - value = append(value, c) + idx += progress + s.progressColumn(ctx, progress) continue } else if c != '"' { value = append(value, c) @@ -476,7 +448,6 @@ func (s *Scanner) scanComment(ctx *Context) (tk *token.Token, pos int) { ctx.addOriginBuf('#') ctx.progress(1) // skip '#' character for idx, c := range ctx.src[ctx.idx:] { - pos = idx + 1 ctx.addOriginBuf(c) switch c { case '\n', '\r': @@ -502,7 +473,7 @@ func trimCommentFromLiteralOpt(text string) (string, error) { return text, nil } if idx == 0 { - return "", xerrors.New("invalid literal header") + return "", errors.New("invalid literal header") } return text[:idx-1], nil } @@ -538,7 +509,225 @@ func (s *Scanner) scanLiteral(ctx *Context, c rune) { } } -func (s *Scanner) scanLiteralHeader(ctx *Context) (pos int, err error) { +func (s *Scanner) scanNewLine(ctx *Context, c rune) { + if len(ctx.buf) > 0 && s.savedPos == nil { + s.savedPos = s.pos() + s.savedPos.Column -= len(ctx.bufferedSrc()) + } + + // if the following case, origin buffer has unnecessary two spaces. + // So, `removeRightSpaceFromOriginBuf` remove them, also fix column number too. + // --- + // a:[space][space] + // b: c + removedNum := ctx.removeRightSpaceFromBuf() + if removedNum > 0 { + s.column -= removedNum + s.offset -= removedNum + if s.savedPos != nil { + s.savedPos.Column -= removedNum + } + } + + // There is no problem that we ignore CR which followed by LF and normalize it to LF, because of following YAML1.2 spec. + // > Line breaks inside scalar content must be normalized by the YAML processor. Each such line break must be parsed into a single line feed character. + // > Outside scalar content, YAML allows any line break to be used to terminate lines. + // > -- https://yaml.org/spec/1.2/spec.html + if c == '\r' && ctx.nextChar() == '\n' { + ctx.addOriginBuf('\r') + ctx.progress(1) + c = '\n' + } + + if ctx.isEOS() { + s.addBufferedTokenIfExists(ctx) + } else if s.isAnchor { + s.addBufferedTokenIfExists(ctx) + } + ctx.addBuf(' ') + ctx.addOriginBuf(c) + ctx.isSingleLine = false + s.progressLine(ctx) +} + +func (s *Scanner) scanFlowMapStart(ctx *Context) bool { + if ctx.existsBuffer() { + return false + } + + ctx.addOriginBuf('{') + ctx.addToken(token.MappingStart(string(ctx.obuf), s.pos())) + s.startedFlowMapNum++ + s.progressColumn(ctx, 1) + return true +} + +func (s *Scanner) scanFlowMapEnd(ctx *Context) bool { + if s.startedFlowMapNum <= 0 { + return false + } + + s.addBufferedTokenIfExists(ctx) + ctx.addOriginBuf('}') + ctx.addToken(token.MappingEnd(string(ctx.obuf), s.pos())) + s.startedFlowMapNum-- + s.progressColumn(ctx, 1) + return true +} + +func (s *Scanner) scanFlowArrayStart(ctx *Context) bool { + if ctx.existsBuffer() { + return false + } + + ctx.addOriginBuf('[') + ctx.addToken(token.SequenceStart(string(ctx.obuf), s.pos())) + s.startedFlowSequenceNum++ + s.progressColumn(ctx, 1) + return true +} + +func (s *Scanner) scanFlowArrayEnd(ctx *Context) bool { + if s.startedFlowSequenceNum <= 0 { + return false + } + + s.addBufferedTokenIfExists(ctx) + ctx.addOriginBuf(']') + ctx.addToken(token.SequenceEnd(string(ctx.obuf), s.pos())) + s.startedFlowSequenceNum-- + s.progressColumn(ctx, 1) + return true +} + +func (s *Scanner) scanFlowEntry(ctx *Context, c rune) bool { + if s.startedFlowSequenceNum <= 0 && s.startedFlowMapNum <= 0 { + return false + } + + s.addBufferedTokenIfExists(ctx) + ctx.addOriginBuf(c) + ctx.addToken(token.CollectEntry(string(ctx.obuf), s.pos())) + s.progressColumn(ctx, 1) + return true +} + +func (s *Scanner) scanMapDelim(ctx *Context) bool { + nc := ctx.nextChar() + if s.startedFlowMapNum <= 0 && nc != ' ' && !s.isNewLineChar(nc) && !ctx.isNextEOS() { + return false + } + + // mapping value + tk := s.bufferedToken(ctx) + if tk != nil { + s.lastDelimColumn = tk.Position.Column + ctx.addToken(tk) + } else if tk := ctx.lastToken(); tk != nil { + // If the map key is quote, the buffer does not exist because it has already been cut into tokens. + // Therefore, we need to check the last token. + if tk.Indicator == token.QuotedScalarIndicator { + s.lastDelimColumn = tk.Position.Column + } + } + ctx.addToken(token.MappingValue(s.pos())) + s.progressColumn(ctx, 1) + return true +} + +func (s *Scanner) scanDocumentStart(ctx *Context) bool { + if s.indentNum != 0 { + return false + } + if s.column != 1 { + return false + } + if ctx.repeatNum('-') != 3 { + return false + } + + s.addBufferedTokenIfExists(ctx) + ctx.addToken(token.DocumentHeader(string(ctx.obuf)+"---", s.pos())) + s.progressColumn(ctx, 3) + return true +} + +func (s *Scanner) scanDocumentEnd(ctx *Context) bool { + if s.indentNum != 0 { + return false + } + if s.column != 1 { + return false + } + if ctx.repeatNum('.') != 3 { + return false + } + + ctx.addToken(token.DocumentEnd(string(ctx.obuf)+"...", s.pos())) + s.progressColumn(ctx, 3) + return true +} + +func (s *Scanner) scanMergeKey(ctx *Context) bool { + if !s.isMergeKey(ctx) { + return false + } + + s.lastDelimColumn = s.column + ctx.addToken(token.MergeKey(string(ctx.obuf)+"<<", s.pos())) + s.progressColumn(ctx, 1) + return true +} + +func (s *Scanner) scanRawFoldedChar(ctx *Context) bool { + if !ctx.existsBuffer() { + return false + } + if !s.isChangedToIndentStateUp() { + return false + } + + ctx.isRawFolded = true + ctx.addBuf('-') + ctx.addOriginBuf('-') + s.progressColumn(ctx, 1) + return true +} + +func (s *Scanner) scanSequence(ctx *Context) bool { + if ctx.existsBuffer() { + return false + } + + nc := ctx.nextChar() + if nc != ' ' && !s.isNewLineChar(nc) { + return false + } + + s.addBufferedTokenIfExists(ctx) + ctx.addOriginBuf('-') + tk := token.SequenceEntry(string(ctx.obuf), s.pos()) + s.lastDelimColumn = tk.Position.Column + ctx.addToken(tk) + s.progressColumn(ctx, 1) + return true +} + +func (s *Scanner) scanLiteralHeader(ctx *Context) (bool, error) { + if ctx.existsBuffer() { + return false, nil + } + + progress, err := s.scanLiteralHeaderOption(ctx) + if err != nil { + return false, err + } + s.progressColumn(ctx, progress) + s.progressLine(ctx) + return true, nil +} + +func (s *Scanner) scanLiteralHeaderOption(ctx *Context) (pos int, err error) { header := ctx.currentChar() ctx.addOriginBuf(header) ctx.progress(1) // skip '|' or '>' character @@ -594,116 +783,117 @@ func (s *Scanner) scanLiteralHeader(ctx *Context) (pos int, err error) { ctx.literalOpt = opt return } - break } } - err = xerrors.New("invalid literal header") + err = errors.New("invalid literal header") return } -func (s *Scanner) scanNewLine(ctx *Context, c rune) { - if len(ctx.buf) > 0 && s.savedPos == nil { - s.savedPos = s.pos() - s.savedPos.Column -= len(ctx.bufferedSrc()) +func (s *Scanner) scanMapKey(ctx *Context) bool { + if ctx.existsBuffer() { + return false } - // if the following case, origin buffer has unnecessary two spaces. - // So, `removeRightSpaceFromOriginBuf` remove them, also fix column number too. - // --- - // a:[space][space] - // b: c - removedNum := ctx.removeRightSpaceFromBuf() - if removedNum > 0 { - s.column -= removedNum - s.offset -= removedNum - if s.savedPos != nil { - s.savedPos.Column -= removedNum - } + nc := ctx.nextChar() + if nc != ' ' { + return false } - if ctx.isEOS() { - s.addBufferedTokenIfExists(ctx) - } else if s.isAnchor { - s.addBufferedTokenIfExists(ctx) + ctx.addToken(token.MappingKey(s.pos())) + s.progressColumn(ctx, 1) + return true +} + +func (s *Scanner) scanDirective(ctx *Context) bool { + if ctx.existsBuffer() { + return false } - ctx.addBuf(' ') - ctx.addOriginBuf(c) - ctx.isSingleLine = false - s.progressLine(ctx) + if s.indentNum != 0 { + return false + } + + ctx.addToken(token.Directive(string(ctx.obuf)+"%", s.pos())) + s.progressColumn(ctx, 1) + return true +} + +func (s *Scanner) scanAnchor(ctx *Context) bool { + if ctx.existsBuffer() { + return false + } + + s.addBufferedTokenIfExists(ctx) + ctx.addOriginBuf('&') + ctx.addToken(token.Anchor(string(ctx.obuf), s.pos())) + s.progressColumn(ctx, 1) + s.isAnchor = true + return true +} + +func (s *Scanner) scanAlias(ctx *Context) bool { + if ctx.existsBuffer() { + return false + } + + s.addBufferedTokenIfExists(ctx) + ctx.addOriginBuf('*') + ctx.addToken(token.Alias(string(ctx.obuf), s.pos())) + s.progressColumn(ctx, 1) + return true } func (s *Scanner) scan(ctx *Context) (pos int) { for ctx.next() { pos = ctx.nextPos() c := ctx.currentChar() + + // First, change the IndentState. + // If the target character is the first character in a line, IndentState is Up/Down/Equal state. + // The second and subsequent letters are Keep. s.updateIndent(ctx, c) + + // If IndentState is down, tokens are split, so the buffer accumulated until that point needs to be cutted as a token. + if s.isChangedToIndentStateDown() { + s.addBufferedTokenIfExists(ctx) + } if ctx.isDocument() { - if s.isChangedToIndentStateEqual() || - s.isChangedToIndentStateDown() { - s.addBufferedTokenIfExists(ctx) + if s.isChangedToIndentStateDown() { s.breakLiteral(ctx) } else { s.scanLiteral(ctx, c) continue } - } else if s.isChangedToIndentStateDown() { - s.addBufferedTokenIfExists(ctx) - } else if s.isChangedToIndentStateEqual() { - // if first character is new line character, buffer expect to raw folded literal - if len(ctx.obuf) > 0 && s.newLineCount(ctx.obuf) <= 1 { - // doesn't raw folded literal - s.addBufferedTokenIfExists(ctx) - } } switch c { case '{': - if !ctx.existsBuffer() { - ctx.addOriginBuf(c) - ctx.addToken(token.MappingStart(string(ctx.obuf), s.pos())) - s.startedFlowMapNum++ - s.progressColumn(ctx, 1) + if s.scanFlowMapStart(ctx) { return } case '}': - if !ctx.existsBuffer() || s.startedFlowMapNum > 0 { - ctx.addToken(s.bufferedToken(ctx)) - ctx.addOriginBuf(c) - ctx.addToken(token.MappingEnd(string(ctx.obuf), s.pos())) - s.startedFlowMapNum-- - s.progressColumn(ctx, 1) + if s.scanFlowMapEnd(ctx) { return } case '.': - if s.indentNum == 0 && s.column == 1 && ctx.repeatNum('.') == 3 { - ctx.addToken(token.DocumentEnd(string(ctx.obuf)+"...", s.pos())) - s.progressColumn(ctx, 3) + if s.scanDocumentEnd(ctx) { pos += 2 return } case '<': - if s.isMergeKey(ctx) { - s.prevIndentColumn = s.column - ctx.addToken(token.MergeKey(string(ctx.obuf)+"<<", s.pos())) - s.progressColumn(ctx, 1) + if s.scanMergeKey(ctx) { pos++ return } case '-': - if s.indentNum == 0 && s.column == 1 && ctx.repeatNum('-') == 3 { - s.addBufferedTokenIfExists(ctx) - ctx.addToken(token.DocumentHeader(string(ctx.obuf)+"---", s.pos())) - s.progressColumn(ctx, 3) + if s.scanDocumentStart(ctx) { pos += 2 return } - if ctx.existsBuffer() && s.isChangedToIndentStateUp() { - // raw folded - ctx.isRawFolded = true - ctx.addBuf(c) - ctx.addOriginBuf(c) - s.progressColumn(ctx, 1) + if s.scanRawFoldedChar(ctx) { continue } + if s.scanSequence(ctx) { + return + } if ctx.existsBuffer() { // '-' is literal ctx.addBuf(c) @@ -711,69 +901,29 @@ func (s *Scanner) scan(ctx *Context) (pos int) { s.progressColumn(ctx, 1) continue } - nc := ctx.nextChar() - if nc == ' ' || s.isNewLineChar(nc) { - s.addBufferedTokenIfExists(ctx) - ctx.addOriginBuf(c) - tk := token.SequenceEntry(string(ctx.obuf), s.pos()) - s.prevIndentColumn = tk.Position.Column - ctx.addToken(tk) - s.progressColumn(ctx, 1) - return - } case '[': - if !ctx.existsBuffer() { - ctx.addOriginBuf(c) - ctx.addToken(token.SequenceStart(string(ctx.obuf), s.pos())) - s.startedFlowSequenceNum++ - s.progressColumn(ctx, 1) + if s.scanFlowArrayStart(ctx) { return } case ']': - if !ctx.existsBuffer() || s.startedFlowSequenceNum > 0 { - s.addBufferedTokenIfExists(ctx) - ctx.addOriginBuf(c) - ctx.addToken(token.SequenceEnd(string(ctx.obuf), s.pos())) - s.startedFlowSequenceNum-- - s.progressColumn(ctx, 1) + if s.scanFlowArrayEnd(ctx) { return } case ',': - if s.startedFlowSequenceNum > 0 || s.startedFlowMapNum > 0 { - s.addBufferedTokenIfExists(ctx) - ctx.addOriginBuf(c) - ctx.addToken(token.CollectEntry(string(ctx.obuf), s.pos())) - s.progressColumn(ctx, 1) + if s.scanFlowEntry(ctx, c) { return } case ':': - nc := ctx.nextChar() - if s.startedFlowMapNum > 0 || nc == ' ' || s.isNewLineChar(nc) || ctx.isNextEOS() { - // mapping value - tk := s.bufferedToken(ctx) - if tk != nil { - s.prevIndentColumn = tk.Position.Column - ctx.addToken(tk) - } else if tk := ctx.lastToken(); tk != nil { - // If the map key is quote, the buffer does not exist because it has already been cut into tokens. - // Therefore, we need to check the last token. - if tk.Indicator == token.QuotedScalarIndicator { - s.prevIndentColumn = tk.Position.Column - } - } - ctx.addToken(token.MappingValue(s.pos())) - s.progressColumn(ctx, 1) + if s.scanMapDelim(ctx) { return } case '|', '>': - if !ctx.existsBuffer() { - progress, err := s.scanLiteralHeader(ctx) - if err != nil { - // TODO: returns syntax error object - return - } - s.progressColumn(ctx, progress) - s.progressLine(ctx) + scanned, err := s.scanLiteralHeader(ctx) + if err != nil { + // TODO: returns syntax error object + return + } + if scanned { continue } case '!': @@ -788,33 +938,19 @@ func (s *Scanner) scan(ctx *Context) (pos int) { return } case '%': - if !ctx.existsBuffer() && s.indentNum == 0 { - ctx.addToken(token.Directive(string(ctx.obuf)+"%", s.pos())) - s.progressColumn(ctx, 1) + if s.scanDirective(ctx) { return } case '?': - nc := ctx.nextChar() - if !ctx.existsBuffer() && nc == ' ' { - ctx.addToken(token.MappingKey(s.pos())) - s.progressColumn(ctx, 1) + if s.scanMapKey(ctx) { return } case '&': - if !ctx.existsBuffer() { - s.addBufferedTokenIfExists(ctx) - ctx.addOriginBuf(c) - ctx.addToken(token.Anchor(string(ctx.obuf), s.pos())) - s.progressColumn(ctx, 1) - s.isAnchor = true + if s.scanAnchor(ctx) { return } case '*': - if !ctx.existsBuffer() { - s.addBufferedTokenIfExists(ctx) - ctx.addOriginBuf(c) - ctx.addToken(token.Alias(string(ctx.obuf), s.pos())) - s.progressColumn(ctx, 1) + if s.scanAlias(ctx) { return } case '#': @@ -840,15 +976,6 @@ func (s *Scanner) scan(ctx *Context) (pos int) { return } case '\r', '\n': - // There is no problem that we ignore CR which followed by LF and normalize it to LF, because of following YAML1.2 spec. - // > Line breaks inside scalar content must be normalized by the YAML processor. Each such line break must be parsed into a single line feed character. - // > Outside scalar content, YAML allows any line break to be used to terminate lines. - // > -- https://yaml.org/spec/1.2/spec.html - if c == '\r' && ctx.nextChar() == '\n' { - ctx.addOriginBuf('\r') - ctx.progress(1) - c = '\n' - } s.scanNewLine(ctx, c) continue case ' ': @@ -885,9 +1012,8 @@ func (s *Scanner) Init(text string) { s.line = 1 s.column = 1 s.offset = 1 - s.prevIndentLevel = 0 - s.prevIndentNum = 0 - s.prevIndentColumn = 0 + s.prevLineIndentNum = 0 + s.lastDelimColumn = 0 s.indentLevel = 0 s.indentNum = 0 s.isFirstCharAtLine = true diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/struct.go b/hack/tools/vendor/github.com/goccy/go-yaml/struct.go index a3da8ddd3c..dae4d650d9 100644 --- a/hack/tools/vendor/github.com/goccy/go-yaml/struct.go +++ b/hack/tools/vendor/github.com/goccy/go-yaml/struct.go @@ -1,10 +1,9 @@ package yaml import ( + "fmt" "reflect" "strings" - - "golang.org/x/xerrors" ) const ( @@ -84,11 +83,7 @@ func isIgnoredStructField(field reflect.StructField) bool { // private field return true } - tag := getTag(field) - if tag == "-" { - return true - } - return false + return getTag(field) == "-" } type StructFieldMap map[string]*StructField @@ -121,7 +116,7 @@ func structFieldMap(structType reflect.Type) (StructFieldMap, error) { } structField := structField(field) if _, exists := renderNameMap[structField.RenderName]; exists { - return nil, xerrors.Errorf("duplicated struct field name %s", structField.RenderName) + return nil, fmt.Errorf("duplicated struct field name %s", structField.RenderName) } structFieldMap[structField.FieldName] = structField renderNameMap[structField.RenderName] = struct{}{} diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/token/token.go b/hack/tools/vendor/github.com/goccy/go-yaml/token/token.go index c86caab24a..bc8b531cbf 100644 --- a/hack/tools/vendor/github.com/goccy/go-yaml/token/token.go +++ b/hack/tools/vendor/github.com/goccy/go-yaml/token/token.go @@ -339,9 +339,12 @@ func reservedKeywordToken(typ Type, value, org string, pos *Position) *Token { func init() { for _, keyword := range reservedNullKeywords { - reservedKeywordMap[keyword] = func(value, org string, pos *Position) *Token { + f := func(value, org string, pos *Position) *Token { return reservedKeywordToken(NullType, value, org, pos) } + + reservedKeywordMap[keyword] = f + reservedEncKeywordMap[keyword] = f } for _, keyword := range reservedBoolKeywords { f := func(value, org string, pos *Position) *Token { @@ -623,7 +626,7 @@ func IsNeedQuoted(value string) bool { } first := value[0] switch first { - case '*', '&', '[', '{', '}', ']', ',', '!', '|', '>', '%', '\'', '"', '@', ' ': + case '*', '&', '[', '{', '}', ']', ',', '!', '|', '>', '%', '\'', '"', '@', ' ', '`': return true } last := value[len(value)-1] diff --git a/hack/tools/vendor/github.com/goccy/go-yaml/yaml.go b/hack/tools/vendor/github.com/goccy/go-yaml/yaml.go index 25b1056fe3..a90560e192 100644 --- a/hack/tools/vendor/github.com/goccy/go-yaml/yaml.go +++ b/hack/tools/vendor/github.com/goccy/go-yaml/yaml.go @@ -9,7 +9,6 @@ import ( "github.com/goccy/go-yaml/ast" "github.com/goccy/go-yaml/internal/errors" - "golang.org/x/xerrors" ) // BytesMarshaler interface may be implemented by types to customize their @@ -80,11 +79,11 @@ func (s MapSlice) ToMap() map[interface{}]interface{} { // of the generated document will reflect the structure of the value itself. // Maps and pointers (to struct, string, int, etc) are accepted as the in value. // -// Struct fields are only marshalled if they are exported (have an upper case -// first letter), and are marshalled using the field name lowercased as the +// Struct fields are only marshaled if they are exported (have an upper case +// first letter), and are marshaled using the field name lowercased as the // default key. Custom keys may be defined via the "yaml" name in the field // tag: the content preceding the first comma is used as the key, and the -// following comma-separated options are used to tweak the marshalling process. +// following comma-separated options are used to tweak the marshaling process. // Conflicting names result in a runtime error. // // The field tag format accepted is: @@ -138,7 +137,7 @@ func MarshalWithOptions(v interface{}, opts ...EncodeOption) ([]byte, error) { func MarshalContext(ctx context.Context, v interface{}, opts ...EncodeOption) ([]byte, error) { var buf bytes.Buffer if err := NewEncoder(&buf, opts...).EncodeContext(ctx, v); err != nil { - return nil, errors.Wrapf(err, "failed to marshal") + return nil, err } return buf.Bytes(), nil } @@ -148,7 +147,7 @@ func ValueToNode(v interface{}, opts ...EncodeOption) (ast.Node, error) { var buf bytes.Buffer node, err := NewEncoder(&buf, opts...).EncodeToNode(v) if err != nil { - return nil, errors.Wrapf(err, "failed to convert value to node") + return nil, err } return node, nil } @@ -161,7 +160,7 @@ func ValueToNode(v interface{}, opts ...EncodeOption) (ast.Node, error) { // lowercased as the default key. Custom keys may be defined via the // "yaml" name in the field tag: the content preceding the first comma // is used as the key, and the following comma-separated options are -// used to tweak the marshalling process (see Marshal). +// used to tweak the marshaling process (see Marshal). // Conflicting names result in a runtime error. // // For example: @@ -192,7 +191,7 @@ func UnmarshalContext(ctx context.Context, data []byte, v interface{}, opts ...D if err == io.EOF { return nil } - return errors.Wrapf(err, "failed to unmarshal") + return err } return nil } @@ -201,7 +200,7 @@ func UnmarshalContext(ctx context.Context, data []byte, v interface{}, opts ...D func NodeToValue(node ast.Node, v interface{}, opts ...DecodeOption) error { var buf bytes.Buffer if err := NewDecoder(&buf, opts...).DecodeFromNode(node, v); err != nil { - return errors.Wrapf(err, "failed to convert node to value") + return err } return nil } @@ -214,9 +213,9 @@ func NodeToValue(node ast.Node, v interface{}, opts ...DecodeOption) error { // contain snippets of the YAML source that was used. func FormatError(e error, colored, inclSource bool) string { var pp errors.PrettyPrinter - if xerrors.As(e, &pp) { + if errors.As(e, &pp) { var buf bytes.Buffer - pp.PrettyPrint(&errors.Sink{&buf}, colored, inclSource) + pp.PrettyPrint(&errors.Sink{Buffer: &buf}, colored, inclSource) return buf.String() } @@ -227,11 +226,11 @@ func FormatError(e error, colored, inclSource bool) string { func YAMLToJSON(bytes []byte) ([]byte, error) { var v interface{} if err := UnmarshalWithOptions(bytes, &v, UseOrderedMap()); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal") + return nil, err } out, err := MarshalWithOptions(v, JSON()) if err != nil { - return nil, errors.Wrapf(err, "failed to marshal with json option") + return nil, err } return out, nil } @@ -240,11 +239,11 @@ func YAMLToJSON(bytes []byte) ([]byte, error) { func JSONToYAML(bytes []byte) ([]byte, error) { var v interface{} if err := UnmarshalWithOptions(bytes, &v, UseOrderedMap()); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal from json bytes") + return nil, err } out, err := Marshal(v) if err != nil { - return nil, errors.Wrapf(err, "failed to marshal") + return nil, err } return out, nil } diff --git a/hack/tools/vendor/github.com/golangci/gofmt/gofmt/golangci.go b/hack/tools/vendor/github.com/golangci/gofmt/gofmt/golangci.go index a69611e1d3..459e872199 100644 --- a/hack/tools/vendor/github.com/golangci/gofmt/gofmt/golangci.go +++ b/hack/tools/vendor/github.com/golangci/gofmt/gofmt/golangci.go @@ -14,6 +14,11 @@ import ( "github.com/golangci/gofmt/gofmt/internal/diff" ) +type Options struct { + NeedSimplify bool + RewriteRules []RewriteRule +} + var parserModeMu sync.RWMutex type RewriteRule struct { @@ -73,6 +78,32 @@ func RunRewrite(filename string, needSimplify bool, rewriteRules []RewriteRule) return diff.Diff(oldName, src, newName, res), nil } +func Source(filename string, src []byte, opts Options) ([]byte, error) { + fset := token.NewFileSet() + + parserModeMu.Lock() + initParserMode() + parserModeMu.Unlock() + + file, sourceAdj, indentAdj, err := parse(fset, filename, src, false) + if err != nil { + return nil, err + } + + file, err = rewriteFileContent(fset, file, opts.RewriteRules) + if err != nil { + return nil, err + } + + ast.SortImports(fset, file) + + if opts.NeedSimplify { + simplify(file) + } + + return format(fset, file, sourceAdj, indentAdj, src, printer.Config{Mode: printerMode, Tabwidth: tabWidth}) +} + func rewriteFileContent(fset *token.FileSet, file *ast.File, rewriteRules []RewriteRule) (*ast.File, error) { for _, rewriteRule := range rewriteRules { pattern, err := parseExpression(rewriteRule.Pattern, "pattern") diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go b/hack/tools/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go index 413e071d65..bf235bf17f 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/cmd/golangci-lint/main.go @@ -1,6 +1,7 @@ package main import ( + "cmp" "fmt" "os" "runtime/debug" @@ -63,17 +64,9 @@ func createBuildInfo() commands.BuildInfo { } } - if revision == "" { - revision = "unknown" - } - - if modified == "" { - modified = "?" - } - - if info.Date == "" { - info.Date = "(unknown)" - } + revision = cmp.Or(revision, "unknown") + modified = cmp.Or(modified, "?") + info.Date = cmp.Or(info.Date, "(unknown)") info.Commit = fmt.Sprintf("(%s, modified: %s, mod sum: %q)", revision, modified, buildInfo.Main.Sum) diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/go/cache/cache.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/go/cache/cache.go index 7bf4f1d660..85899ebc92 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/go/cache/cache.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/go/cache/cache.go @@ -293,7 +293,7 @@ func GetBytes(c Cache, id ActionID) ([]byte, Entry, error) { } data, err := robustio.ReadFile(c.OutputFile(entry.OutputID)) if err != nil { - return nil, entry, err + return nil, entry, &entryNotFoundError{Err: err} } if sha256.Sum256(data) != entry.OutputID { return nil, entry, &entryNotFoundError{Err: errors.New("bad checksum")} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/LICENSE b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/LICENSE new file mode 100644 index 0000000000..2a7cf70da6 --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisflags/readme.md b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisflags/readme.md new file mode 100644 index 0000000000..4d221d4ca5 --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisflags/readme.md @@ -0,0 +1,8 @@ +# analysisflags + +Extracted from `/go/analysis/internal/analysisflags` (related to `checker`). +This is just a copy of the code without any changes. + +## History + +- sync with https://github.com/golang/tools/blob/v0.28.0 diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisflags/url.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisflags/url.go new file mode 100644 index 0000000000..26a917a991 --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisflags/url.go @@ -0,0 +1,33 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analysisflags + +import ( + "fmt" + "net/url" + + "golang.org/x/tools/go/analysis" +) + +// ResolveURL resolves the URL field for a Diagnostic from an Analyzer +// and returns the URL. See Diagnostic.URL for details. +func ResolveURL(a *analysis.Analyzer, d analysis.Diagnostic) (string, error) { + if d.URL == "" && d.Category == "" && a.URL == "" { + return "", nil // do nothing + } + raw := d.URL + if d.URL == "" && d.Category != "" { + raw = "#" + d.Category + } + u, err := url.Parse(raw) + if err != nil { + return "", fmt.Errorf("invalid Diagnostic.URL %q: %s", raw, err) + } + base, err := url.Parse(a.URL) + if err != nil { + return "", fmt.Errorf("invalid Analyzer.URL %q: %s", a.URL, err) + } + return base.ResolveReference(u).String(), nil +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisinternal/analysis.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisinternal/analysis.go new file mode 100644 index 0000000000..bb12600dac --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisinternal/analysis.go @@ -0,0 +1,48 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package analysisinternal provides gopls' internal analyses with a +// number of helper functions that operate on typed syntax trees. +package analysisinternal + +import ( + "fmt" + "os" + + "golang.org/x/tools/go/analysis" +) + +// MakeReadFile returns a simple implementation of the Pass.ReadFile function. +func MakeReadFile(pass *analysis.Pass) func(filename string) ([]byte, error) { + return func(filename string) ([]byte, error) { + if err := CheckReadable(pass, filename); err != nil { + return nil, err + } + return os.ReadFile(filename) + } +} + +// CheckReadable enforces the access policy defined by the ReadFile field of [analysis.Pass]. +func CheckReadable(pass *analysis.Pass, filename string) error { + if slicesContains(pass.OtherFiles, filename) || + slicesContains(pass.IgnoredFiles, filename) { + return nil + } + for _, f := range pass.Files { + if pass.Fset.File(f.FileStart).Name() == filename { + return nil + } + } + return fmt.Errorf("Pass.ReadFile: %s is not among OtherFiles, IgnoredFiles, or names of Files", filename) +} + +// TODO(adonovan): use go1.21 slices.Contains. +func slicesContains[S ~[]E, E comparable](slice S, x E) bool { + for _, elem := range slice { + if elem == x { + return true + } + } + return false +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisinternal/readme.md b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisinternal/readme.md new file mode 100644 index 0000000000..f301cdbebb --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/analysisinternal/readme.md @@ -0,0 +1,8 @@ +# analysisinternal + +Extracted from `/internal/analysisinternal/` (related to `checker`). +This is just a copy of the code without any changes. + +## History + +- sync with https://github.com/golang/tools/blob/v0.28.0 diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/diff.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/diff.go new file mode 100644 index 0000000000..a13547b7a7 --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/diff.go @@ -0,0 +1,176 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package diff computes differences between text files or strings. +package diff + +import ( + "fmt" + "sort" + "strings" +) + +// An Edit describes the replacement of a portion of a text file. +type Edit struct { + Start, End int // byte offsets of the region to replace + New string // the replacement +} + +func (e Edit) String() string { + return fmt.Sprintf("{Start:%d,End:%d,New:%q}", e.Start, e.End, e.New) +} + +// Apply applies a sequence of edits to the src buffer and returns the +// result. Edits are applied in order of start offset; edits with the +// same start offset are applied in they order they were provided. +// +// Apply returns an error if any edit is out of bounds, +// or if any pair of edits is overlapping. +func Apply(src string, edits []Edit) (string, error) { + edits, size, err := validate(src, edits) + if err != nil { + return "", err + } + + // Apply edits. + out := make([]byte, 0, size) + lastEnd := 0 + for _, edit := range edits { + if lastEnd < edit.Start { + out = append(out, src[lastEnd:edit.Start]...) + } + out = append(out, edit.New...) + lastEnd = edit.End + } + out = append(out, src[lastEnd:]...) + + if len(out) != size { + panic("wrong size") + } + + return string(out), nil +} + +// ApplyBytes is like Apply, but it accepts a byte slice. +// The result is always a new array. +func ApplyBytes(src []byte, edits []Edit) ([]byte, error) { + res, err := Apply(string(src), edits) + return []byte(res), err +} + +// validate checks that edits are consistent with src, +// and returns the size of the patched output. +// It may return a different slice. +func validate(src string, edits []Edit) ([]Edit, int, error) { + if !sort.IsSorted(editsSort(edits)) { + edits = append([]Edit(nil), edits...) + SortEdits(edits) + } + + // Check validity of edits and compute final size. + size := len(src) + lastEnd := 0 + for _, edit := range edits { + if !(0 <= edit.Start && edit.Start <= edit.End && edit.End <= len(src)) { + return nil, 0, fmt.Errorf("diff has out-of-bounds edits") + } + if edit.Start < lastEnd { + return nil, 0, fmt.Errorf("diff has overlapping edits") + } + size += len(edit.New) + edit.Start - edit.End + lastEnd = edit.End + } + + return edits, size, nil +} + +// SortEdits orders a slice of Edits by (start, end) offset. +// This ordering puts insertions (end = start) before deletions +// (end > start) at the same point, but uses a stable sort to preserve +// the order of multiple insertions at the same point. +// (Apply detects multiple deletions at the same point as an error.) +func SortEdits(edits []Edit) { + sort.Stable(editsSort(edits)) +} + +type editsSort []Edit + +func (a editsSort) Len() int { return len(a) } +func (a editsSort) Less(i, j int) bool { + if cmp := a[i].Start - a[j].Start; cmp != 0 { + return cmp < 0 + } + return a[i].End < a[j].End +} +func (a editsSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// lineEdits expands and merges a sequence of edits so that each +// resulting edit replaces one or more complete lines. +// See ApplyEdits for preconditions. +func lineEdits(src string, edits []Edit) ([]Edit, error) { + edits, _, err := validate(src, edits) + if err != nil { + return nil, err + } + + // Do all deletions begin and end at the start of a line, + // and all insertions end with a newline? + // (This is merely a fast path.) + for _, edit := range edits { + if edit.Start >= len(src) || // insertion at EOF + edit.Start > 0 && src[edit.Start-1] != '\n' || // not at line start + edit.End > 0 && src[edit.End-1] != '\n' || // not at line start + edit.New != "" && edit.New[len(edit.New)-1] != '\n' { // partial insert + goto expand // slow path + } + } + return edits, nil // aligned + +expand: + if len(edits) == 0 { + return edits, nil // no edits (unreachable due to fast path) + } + expanded := make([]Edit, 0, len(edits)) // a guess + prev := edits[0] + // TODO(adonovan): opt: start from the first misaligned edit. + // TODO(adonovan): opt: avoid quadratic cost of string += string. + for _, edit := range edits[1:] { + between := src[prev.End:edit.Start] + if !strings.Contains(between, "\n") { + // overlapping lines: combine with previous edit. + prev.New += between + edit.New + prev.End = edit.End + } else { + // non-overlapping lines: flush previous edit. + expanded = append(expanded, expandEdit(prev, src)) + prev = edit + } + } + return append(expanded, expandEdit(prev, src)), nil // flush final edit +} + +// expandEdit returns edit expanded to complete whole lines. +func expandEdit(edit Edit, src string) Edit { + // Expand start left to start of line. + // (delta is the zero-based column number of start.) + start := edit.Start + if delta := start - 1 - strings.LastIndex(src[:start], "\n"); delta > 0 { + edit.Start -= delta + edit.New = src[start-delta:start] + edit.New + } + + // Expand end right to end of line. + end := edit.End + if end > 0 && src[end-1] != '\n' || + edit.New != "" && edit.New[len(edit.New)-1] != '\n' { + if nl := strings.IndexByte(src[end:], '\n'); nl < 0 { + edit.End = len(src) // extend to EOF + } else { + edit.End = end + nl + 1 // extend beyond \n + } + } + edit.New += src[end:edit.End] + + return edit +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/common.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/common.go new file mode 100644 index 0000000000..c3e82dd268 --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/common.go @@ -0,0 +1,179 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +import ( + "log" + "sort" +) + +// lcs is a longest common sequence +type lcs []diag + +// A diag is a piece of the edit graph where A[X+i] == B[Y+i], for 0<=i l[j].Len + }) + return l +} + +// validate that the elements of the lcs do not overlap +// (can only happen when the two-sided algorithm ends early) +// expects the lcs to be sorted +func (l lcs) valid() bool { + for i := 1; i < len(l); i++ { + if l[i-1].X+l[i-1].Len > l[i].X { + return false + } + if l[i-1].Y+l[i-1].Len > l[i].Y { + return false + } + } + return true +} + +// repair overlapping lcs +// only called if two-sided stops early +func (l lcs) fix() lcs { + // from the set of diagonals in l, find a maximal non-conflicting set + // this problem may be NP-complete, but we use a greedy heuristic, + // which is quadratic, but with a better data structure, could be D log D. + // indepedent is not enough: {0,3,1} and {3,0,2} can't both occur in an lcs + // which has to have monotone x and y + if len(l) == 0 { + return nil + } + sort.Slice(l, func(i, j int) bool { return l[i].Len > l[j].Len }) + tmp := make(lcs, 0, len(l)) + tmp = append(tmp, l[0]) + for i := 1; i < len(l); i++ { + var dir direction + nxt := l[i] + for _, in := range tmp { + if dir, nxt = overlap(in, nxt); dir == empty || dir == bad { + break + } + } + if nxt.Len > 0 && dir != bad { + tmp = append(tmp, nxt) + } + } + tmp.sort() + if false && !tmp.valid() { // debug checking + log.Fatalf("here %d", len(tmp)) + } + return tmp +} + +type direction int + +const ( + empty direction = iota // diag is empty (so not in lcs) + leftdown // proposed acceptably to the left and below + rightup // proposed diag is acceptably to the right and above + bad // proposed diag is inconsistent with the lcs so far +) + +// overlap trims the proposed diag prop so it doesn't overlap with +// the existing diag that has already been added to the lcs. +func overlap(exist, prop diag) (direction, diag) { + if prop.X <= exist.X && exist.X < prop.X+prop.Len { + // remove the end of prop where it overlaps with the X end of exist + delta := prop.X + prop.Len - exist.X + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + } + if exist.X <= prop.X && prop.X < exist.X+exist.Len { + // remove the beginning of prop where overlaps with exist + delta := exist.X + exist.Len - prop.X + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + prop.X += delta + prop.Y += delta + } + if prop.Y <= exist.Y && exist.Y < prop.Y+prop.Len { + // remove the end of prop that overlaps (in Y) with exist + delta := prop.Y + prop.Len - exist.Y + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + } + if exist.Y <= prop.Y && prop.Y < exist.Y+exist.Len { + // remove the beginning of peop that overlaps with exist + delta := exist.Y + exist.Len - prop.Y + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + prop.X += delta // no test reaches this code + prop.Y += delta + } + if prop.X+prop.Len <= exist.X && prop.Y+prop.Len <= exist.Y { + return leftdown, prop + } + if exist.X+exist.Len <= prop.X && exist.Y+exist.Len <= prop.Y { + return rightup, prop + } + // prop can't be in an lcs that contains exist + return bad, prop +} + +// manipulating Diag and lcs + +// prepend a diagonal (x,y)-(x+1,y+1) segment either to an empty lcs +// or to its first Diag. prepend is only called to extend diagonals +// the backward direction. +func (lcs lcs) prepend(x, y int) lcs { + if len(lcs) > 0 { + d := &lcs[0] + if int(d.X) == x+1 && int(d.Y) == y+1 { + // extend the diagonal down and to the left + d.X, d.Y = int(x), int(y) + d.Len++ + return lcs + } + } + + r := diag{X: int(x), Y: int(y), Len: 1} + lcs = append([]diag{r}, lcs...) + return lcs +} + +// append appends a diagonal, or extends the existing one. +// by adding the edge (x,y)-(x+1.y+1). append is only called +// to extend diagonals in the forward direction. +func (lcs lcs) append(x, y int) lcs { + if len(lcs) > 0 { + last := &lcs[len(lcs)-1] + // Expand last element if adjoining. + if last.X+last.Len == x && last.Y+last.Len == y { + last.Len++ + return lcs + } + } + + return append(lcs, diag{X: x, Y: y, Len: 1}) +} + +// enforce constraint on d, k +func ok(d, k int) bool { + return d >= 0 && -d <= k && k <= d +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/doc.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/doc.go new file mode 100644 index 0000000000..9029dd20b3 --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/doc.go @@ -0,0 +1,156 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// package lcs contains code to find longest-common-subsequences +// (and diffs) +package lcs + +/* +Compute longest-common-subsequences of two slices A, B using +algorithms from Myers' paper. A longest-common-subsequence +(LCS from now on) of A and B is a maximal set of lexically increasing +pairs of subscripts (x,y) with A[x]==B[y]. There may be many LCS, but +they all have the same length. An LCS determines a sequence of edits +that changes A into B. + +The key concept is the edit graph of A and B. +If A has length N and B has length M, then the edit graph has +vertices v[i][j] for 0 <= i <= N, 0 <= j <= M. There is a +horizontal edge from v[i][j] to v[i+1][j] whenever both are in +the graph, and a vertical edge from v[i][j] to f[i][j+1] similarly. +When A[i] == B[j] there is a diagonal edge from v[i][j] to v[i+1][j+1]. + +A path between in the graph between (0,0) and (N,M) determines a sequence +of edits converting A into B: each horizontal edge corresponds to removing +an element of A, and each vertical edge corresponds to inserting an +element of B. + +A vertex (x,y) is on (forward) diagonal k if x-y=k. A path in the graph +is of length D if it has D non-diagonal edges. The algorithms generate +forward paths (in which at least one of x,y increases at each edge), +or backward paths (in which at least one of x,y decreases at each edge), +or a combination. (Note that the orientation is the traditional mathematical one, +with the origin in the lower-left corner.) + +Here is the edit graph for A:"aabbaa", B:"aacaba". (I know the diagonals look weird.) + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + b | | | ___/‾‾‾ | ___/‾‾‾ | | | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + c | | | | | | | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a a b b a a + + +The algorithm labels a vertex (x,y) with D,k if it is on diagonal k and at +the end of a maximal path of length D. (Because x-y=k it suffices to remember +only the x coordinate of the vertex.) + +The forward algorithm: Find the longest diagonal starting at (0,0) and +label its end with D=0,k=0. From that vertex take a vertical step and +then follow the longest diagonal (up and to the right), and label that vertex +with D=1,k=-1. From the D=0,k=0 point take a horizontal step and the follow +the longest diagonal (up and to the right) and label that vertex +D=1,k=1. In the same way, having labelled all the D vertices, +from a vertex labelled D,k find two vertices +tentatively labelled D+1,k-1 and D+1,k+1. There may be two on the same +diagonal, in which case take the one with the larger x. + +Eventually the path gets to (N,M), and the diagonals on it are the LCS. + +Here is the edit graph with the ends of D-paths labelled. (So, for instance, +0/2,2 indicates that x=2,y=2 is labelled with 0, as it should be, since the first +step is to go up the longest diagonal from (0,0).) +A:"aabbaa", B:"aacaba" + ⊙ ------- ⊙ ------- ⊙ -------(3/3,6)------- ⊙ -------(3/5,6)-------(4/6,6) + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ -------(2/3,5)------- ⊙ ------- ⊙ ------- ⊙ + b | | | ___/‾‾‾ | ___/‾‾‾ | | | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ -------(3/5,4)------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ -------(1/2,3)-------(2/3,3)------- ⊙ ------- ⊙ ------- ⊙ + c | | | | | | | + ⊙ ------- ⊙ -------(0/2,2)-------(1/3,2)-------(2/4,2)-------(3/5,2)-------(4/6,2) + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a a b b a a + +The 4-path is reconstructed starting at (4/6,6), horizontal to (3/5,6), diagonal to (3,4), vertical +to (2/3,3), horizontal to (1/2,3), vertical to (0/2,2), and diagonal to (0,0). As expected, +there are 4 non-diagonal steps, and the diagonals form an LCS. + +There is a symmetric backward algorithm, which gives (backwards labels are prefixed with a colon): +A:"aabbaa", B:"aacaba" + ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ --------(:0/5,5)-------- ⊙ + b | | | ____/‾‾‾ | ____/‾‾‾ | | | + ⊙ -------- ⊙ -------- ⊙ --------(:1/3,4)-------- ⊙ -------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + (:3/0,3)--------(:2/1,3)-------- ⊙ --------(:2/3,3)--------(:1/4,3)-------- ⊙ -------- ⊙ + c | | | | | | | + ⊙ -------- ⊙ -------- ⊙ --------(:3/3,2)--------(:2/4,2)-------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + (:3/0,1)-------- ⊙ -------- ⊙ -------- ⊙ --------(:3/4,1)-------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + (:4/0,0)-------- ⊙ -------- ⊙ -------- ⊙ --------(:4/4,0)-------- ⊙ -------- ⊙ + a a b b a a + +Neither of these is ideal for use in an editor, where it is undesirable to send very long diffs to the +front end. It's tricky to decide exactly what 'very long diffs' means, as "replace A by B" is very short. +We want to control how big D can be, by stopping when it gets too large. The forward algorithm then +privileges common prefixes, and the backward algorithm privileges common suffixes. Either is an undesirable +asymmetry. + +Fortunately there is a two-sided algorithm, implied by results in Myers' paper. Here's what the labels in +the edit graph look like. +A:"aabbaa", B:"aacaba" + ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- ⊙ --------- ⊙ --------- (2/3,5) --------- ⊙ --------- (:0/5,5)--------- ⊙ + b | | | ____/‾‾‾‾ | ____/‾‾‾‾ | | | + ⊙ --------- ⊙ --------- ⊙ --------- (:1/3,4)--------- ⊙ --------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- (:2/1,3)--------- (1/2,3) ---------(2:2/3,3)--------- (:1/4,3)--------- ⊙ --------- ⊙ + c | | | | | | | + ⊙ --------- ⊙ --------- (0/2,2) --------- (1/3,2) ---------(2:2/4,2)--------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ + a a b b a a + +The algorithm stopped when it saw the backwards 2-path ending at (1,3) and the forwards 2-path ending at (3,5). The criterion +is a backwards path ending at (u,v) and a forward path ending at (x,y), where u <= x and the two points are on the same +diagonal. (Here the edgegraph has a diagonal, but the criterion is x-y=u-v.) Myers proves there is a forward +2-path from (0,0) to (1,3), and that together with the backwards 2-path ending at (1,3) gives the expected 4-path. +Unfortunately the forward path has to be constructed by another run of the forward algorithm; it can't be found from the +computed labels. That is the worst case. Had the code noticed (x,y)=(u,v)=(3,3) the whole path could be reconstructed +from the edgegraph. The implementation looks for a number of special cases to try to avoid computing an extra forward path. + +If the two-sided algorithm has stop early (because D has become too large) it will have found a forward LCS and a +backwards LCS. Ideally these go with disjoint prefixes and suffixes of A and B, but disjointness may fail and the two +computed LCS may conflict. (An easy example is where A is a suffix of B, and shares a short prefix. The backwards LCS +is all of A, and the forward LCS is a prefix of A.) The algorithm combines the two +to form a best-effort LCS. In the worst case the forward partial LCS may have to +be recomputed. +*/ + +/* Eugene Myers paper is titled +"An O(ND) Difference Algorithm and Its Variations" +and can be found at +http://www.xmailserver.org/diff2.pdf + +(There is a generic implementation of the algorithm the repository with git hash +b9ad7e4ade3a686d608e44475390ad428e60e7fc) +*/ diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/git.sh b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/git.sh new file mode 100644 index 0000000000..b25ba4aac7 --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/git.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# +# Copyright 2022 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. +# +# Creates a zip file containing all numbered versions +# of the commit history of a large source file, for use +# as input data for the tests of the diff algorithm. +# +# Run script from root of the x/tools repo. + +set -eu + +# WARNING: This script will install the latest version of $file +# The largest real source file in the x/tools repo. +# file=internal/golang/completion/completion.go +# file=internal/golang/diagnostics.go +file=internal/protocol/tsprotocol.go + +tmp=$(mktemp -d) +git log $file | + awk '/^commit / {print $2}' | + nl -ba -nrz | + while read n hash; do + git checkout --quiet $hash $file + cp -f $file $tmp/$n + done +(cd $tmp && zip -q - *) > testdata.zip +rm -fr $tmp +git restore --staged $file +git restore $file +echo "Created testdata.zip" diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/labels.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/labels.go new file mode 100644 index 0000000000..504913d1da --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/labels.go @@ -0,0 +1,55 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +import ( + "fmt" +) + +// For each D, vec[D] has length D+1, +// and the label for (D, k) is stored in vec[D][(D+k)/2]. +type label struct { + vec [][]int +} + +// Temporary checking DO NOT COMMIT true TO PRODUCTION CODE +const debug = false + +// debugging. check that the (d,k) pair is valid +// (that is, -d<=k<=d and d+k even) +func checkDK(D, k int) { + if k >= -D && k <= D && (D+k)%2 == 0 { + return + } + panic(fmt.Sprintf("out of range, d=%d,k=%d", D, k)) +} + +func (t *label) set(D, k, x int) { + if debug { + checkDK(D, k) + } + for len(t.vec) <= D { + t.vec = append(t.vec, nil) + } + if t.vec[D] == nil { + t.vec[D] = make([]int, D+1) + } + t.vec[D][(D+k)/2] = x // known that D+k is even +} + +func (t *label) get(d, k int) int { + if debug { + checkDK(d, k) + } + return int(t.vec[d][(d+k)/2]) +} + +func newtriang(limit int) label { + if limit < 100 { + // Preallocate if limit is not large. + return label{vec: make([][]int, limit)} + } + return label{} +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/old.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/old.go new file mode 100644 index 0000000000..4353da15ba --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/old.go @@ -0,0 +1,480 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +// TODO(adonovan): remove unclear references to "old" in this package. + +import ( + "fmt" +) + +// A Diff is a replacement of a portion of A by a portion of B. +type Diff struct { + Start, End int // offsets of portion to delete in A + ReplStart, ReplEnd int // offset of replacement text in B +} + +// DiffStrings returns the differences between two strings. +// It does not respect rune boundaries. +func DiffStrings(a, b string) []Diff { return diff(stringSeqs{a, b}) } + +// DiffBytes returns the differences between two byte sequences. +// It does not respect rune boundaries. +func DiffBytes(a, b []byte) []Diff { return diff(bytesSeqs{a, b}) } + +// DiffRunes returns the differences between two rune sequences. +func DiffRunes(a, b []rune) []Diff { return diff(runesSeqs{a, b}) } + +func diff(seqs sequences) []Diff { + // A limit on how deeply the LCS algorithm should search. The value is just a guess. + const maxDiffs = 100 + diff, _ := compute(seqs, twosided, maxDiffs/2) + return diff +} + +// compute computes the list of differences between two sequences, +// along with the LCS. It is exercised directly by tests. +// The algorithm is one of {forward, backward, twosided}. +func compute(seqs sequences, algo func(*editGraph) lcs, limit int) ([]Diff, lcs) { + if limit <= 0 { + limit = 1 << 25 // effectively infinity + } + alen, blen := seqs.lengths() + g := &editGraph{ + seqs: seqs, + vf: newtriang(limit), + vb: newtriang(limit), + limit: limit, + ux: alen, + uy: blen, + delta: alen - blen, + } + lcs := algo(g) + diffs := lcs.toDiffs(alen, blen) + return diffs, lcs +} + +// editGraph carries the information for computing the lcs of two sequences. +type editGraph struct { + seqs sequences + vf, vb label // forward and backward labels + + limit int // maximal value of D + // the bounding rectangle of the current edit graph + lx, ly, ux, uy int + delta int // common subexpression: (ux-lx)-(uy-ly) +} + +// toDiffs converts an LCS to a list of edits. +func (lcs lcs) toDiffs(alen, blen int) []Diff { + var diffs []Diff + var pa, pb int // offsets in a, b + for _, l := range lcs { + if pa < l.X || pb < l.Y { + diffs = append(diffs, Diff{pa, l.X, pb, l.Y}) + } + pa = l.X + l.Len + pb = l.Y + l.Len + } + if pa < alen || pb < blen { + diffs = append(diffs, Diff{pa, alen, pb, blen}) + } + return diffs +} + +// --- FORWARD --- + +// fdone decides if the forward path has reached the upper right +// corner of the rectangle. If so, it also returns the computed lcs. +func (e *editGraph) fdone(D, k int) (bool, lcs) { + // x, y, k are relative to the rectangle + x := e.vf.get(D, k) + y := x - k + if x == e.ux && y == e.uy { + return true, e.forwardlcs(D, k) + } + return false, nil +} + +// run the forward algorithm, until success or up to the limit on D. +func forward(e *editGraph) lcs { + e.setForward(0, 0, e.lx) + if ok, ans := e.fdone(0, 0); ok { + return ans + } + // from D to D+1 + for D := 0; D < e.limit; D++ { + e.setForward(D+1, -(D + 1), e.getForward(D, -D)) + if ok, ans := e.fdone(D+1, -(D + 1)); ok { + return ans + } + e.setForward(D+1, D+1, e.getForward(D, D)+1) + if ok, ans := e.fdone(D+1, D+1); ok { + return ans + } + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get backwards + lookv := e.lookForward(k, e.getForward(D, k-1)+1) + lookh := e.lookForward(k, e.getForward(D, k+1)) + if lookv > lookh { + e.setForward(D+1, k, lookv) + } else { + e.setForward(D+1, k, lookh) + } + if ok, ans := e.fdone(D+1, k); ok { + return ans + } + } + } + // D is too large + // find the D path with maximal x+y inside the rectangle and + // use that to compute the found part of the lcs + kmax := -e.limit - 1 + diagmax := -1 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getForward(e.limit, k) + y := x - k + if x+y > diagmax && x <= e.ux && y <= e.uy { + diagmax, kmax = x+y, k + } + } + return e.forwardlcs(e.limit, kmax) +} + +// recover the lcs by backtracking from the farthest point reached +func (e *editGraph) forwardlcs(D, k int) lcs { + var ans lcs + for x := e.getForward(D, k); x != 0 || x-k != 0; { + if ok(D-1, k-1) && x-1 == e.getForward(D-1, k-1) { + // if (x-1,y) is labelled D-1, x--,D--,k--,continue + D, k, x = D-1, k-1, x-1 + continue + } else if ok(D-1, k+1) && x == e.getForward(D-1, k+1) { + // if (x,y-1) is labelled D-1, x, D--,k++, continue + D, k = D-1, k+1 + continue + } + // if (x-1,y-1)--(x,y) is a diagonal, prepend,x--,y--, continue + y := x - k + ans = ans.prepend(x+e.lx-1, y+e.ly-1) + x-- + } + return ans +} + +// start at (x,y), go up the diagonal as far as possible, +// and label the result with d +func (e *editGraph) lookForward(k, relx int) int { + rely := relx - k + x, y := relx+e.lx, rely+e.ly + if x < e.ux && y < e.uy { + x += e.seqs.commonPrefixLen(x, e.ux, y, e.uy) + } + return x +} + +func (e *editGraph) setForward(d, k, relx int) { + x := e.lookForward(k, relx) + e.vf.set(d, k, x-e.lx) +} + +func (e *editGraph) getForward(d, k int) int { + x := e.vf.get(d, k) + return x +} + +// --- BACKWARD --- + +// bdone decides if the backward path has reached the lower left corner +func (e *editGraph) bdone(D, k int) (bool, lcs) { + // x, y, k are relative to the rectangle + x := e.vb.get(D, k) + y := x - (k + e.delta) + if x == 0 && y == 0 { + return true, e.backwardlcs(D, k) + } + return false, nil +} + +// run the backward algorithm, until success or up to the limit on D. +func backward(e *editGraph) lcs { + e.setBackward(0, 0, e.ux) + if ok, ans := e.bdone(0, 0); ok { + return ans + } + // from D to D+1 + for D := 0; D < e.limit; D++ { + e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1) + if ok, ans := e.bdone(D+1, -(D + 1)); ok { + return ans + } + e.setBackward(D+1, D+1, e.getBackward(D, D)) + if ok, ans := e.bdone(D+1, D+1); ok { + return ans + } + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get wrong + lookv := e.lookBackward(k, e.getBackward(D, k-1)) + lookh := e.lookBackward(k, e.getBackward(D, k+1)-1) + if lookv < lookh { + e.setBackward(D+1, k, lookv) + } else { + e.setBackward(D+1, k, lookh) + } + if ok, ans := e.bdone(D+1, k); ok { + return ans + } + } + } + + // D is too large + // find the D path with minimal x+y inside the rectangle and + // use that to compute the part of the lcs found + kmax := -e.limit - 1 + diagmin := 1 << 25 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getBackward(e.limit, k) + y := x - (k + e.delta) + if x+y < diagmin && x >= 0 && y >= 0 { + diagmin, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no paths when limit=%d?", e.limit)) + } + return e.backwardlcs(e.limit, kmax) +} + +// recover the lcs by backtracking +func (e *editGraph) backwardlcs(D, k int) lcs { + var ans lcs + for x := e.getBackward(D, k); x != e.ux || x-(k+e.delta) != e.uy; { + if ok(D-1, k-1) && x == e.getBackward(D-1, k-1) { + // D--, k--, x unchanged + D, k = D-1, k-1 + continue + } else if ok(D-1, k+1) && x+1 == e.getBackward(D-1, k+1) { + // D--, k++, x++ + D, k, x = D-1, k+1, x+1 + continue + } + y := x - (k + e.delta) + ans = ans.append(x+e.lx, y+e.ly) + x++ + } + return ans +} + +// start at (x,y), go down the diagonal as far as possible, +func (e *editGraph) lookBackward(k, relx int) int { + rely := relx - (k + e.delta) // forward k = k + e.delta + x, y := relx+e.lx, rely+e.ly + if x > 0 && y > 0 { + x -= e.seqs.commonSuffixLen(0, x, 0, y) + } + return x +} + +// convert to rectangle, and label the result with d +func (e *editGraph) setBackward(d, k, relx int) { + x := e.lookBackward(k, relx) + e.vb.set(d, k, x-e.lx) +} + +func (e *editGraph) getBackward(d, k int) int { + x := e.vb.get(d, k) + return x +} + +// -- TWOSIDED --- + +func twosided(e *editGraph) lcs { + // The termination condition could be improved, as either the forward + // or backward pass could succeed before Myers' Lemma applies. + // Aside from questions of efficiency (is the extra testing cost-effective) + // this is more likely to matter when e.limit is reached. + e.setForward(0, 0, e.lx) + e.setBackward(0, 0, e.ux) + + // from D to D+1 + for D := 0; D < e.limit; D++ { + // just finished a backwards pass, so check + if got, ok := e.twoDone(D, D); ok { + return e.twolcs(D, D, got) + } + // do a forwards pass (D to D+1) + e.setForward(D+1, -(D + 1), e.getForward(D, -D)) + e.setForward(D+1, D+1, e.getForward(D, D)+1) + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get backwards + lookv := e.lookForward(k, e.getForward(D, k-1)+1) + lookh := e.lookForward(k, e.getForward(D, k+1)) + if lookv > lookh { + e.setForward(D+1, k, lookv) + } else { + e.setForward(D+1, k, lookh) + } + } + // just did a forward pass, so check + if got, ok := e.twoDone(D+1, D); ok { + return e.twolcs(D+1, D, got) + } + // do a backward pass, D to D+1 + e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1) + e.setBackward(D+1, D+1, e.getBackward(D, D)) + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get wrong + lookv := e.lookBackward(k, e.getBackward(D, k-1)) + lookh := e.lookBackward(k, e.getBackward(D, k+1)-1) + if lookv < lookh { + e.setBackward(D+1, k, lookv) + } else { + e.setBackward(D+1, k, lookh) + } + } + } + + // D too large. combine a forward and backward partial lcs + // first, a forward one + kmax := -e.limit - 1 + diagmax := -1 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getForward(e.limit, k) + y := x - k + if x+y > diagmax && x <= e.ux && y <= e.uy { + diagmax, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no forward paths when limit=%d?", e.limit)) + } + lcs := e.forwardlcs(e.limit, kmax) + // now a backward one + // find the D path with minimal x+y inside the rectangle and + // use that to compute the lcs + diagmin := 1 << 25 // infinity + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getBackward(e.limit, k) + y := x - (k + e.delta) + if x+y < diagmin && x >= 0 && y >= 0 { + diagmin, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no backward paths when limit=%d?", e.limit)) + } + lcs = append(lcs, e.backwardlcs(e.limit, kmax)...) + // These may overlap (e.forwardlcs and e.backwardlcs return sorted lcs) + ans := lcs.fix() + return ans +} + +// Does Myers' Lemma apply? +func (e *editGraph) twoDone(df, db int) (int, bool) { + if (df+db+e.delta)%2 != 0 { + return 0, false // diagonals cannot overlap + } + kmin := -db + e.delta + if -df > kmin { + kmin = -df + } + kmax := db + e.delta + if df < kmax { + kmax = df + } + for k := kmin; k <= kmax; k += 2 { + x := e.vf.get(df, k) + u := e.vb.get(db, k-e.delta) + if u <= x { + // is it worth looking at all the other k? + for l := k; l <= kmax; l += 2 { + x := e.vf.get(df, l) + y := x - l + u := e.vb.get(db, l-e.delta) + v := u - l + if x == u || u == 0 || v == 0 || y == e.uy || x == e.ux { + return l, true + } + } + return k, true + } + } + return 0, false +} + +func (e *editGraph) twolcs(df, db, kf int) lcs { + // db==df || db+1==df + x := e.vf.get(df, kf) + y := x - kf + kb := kf - e.delta + u := e.vb.get(db, kb) + v := u - kf + + // Myers proved there is a df-path from (0,0) to (u,v) + // and a db-path from (x,y) to (N,M). + // In the first case the overall path is the forward path + // to (u,v) followed by the backward path to (N,M). + // In the second case the path is the backward path to (x,y) + // followed by the forward path to (x,y) from (0,0). + + // Look for some special cases to avoid computing either of these paths. + if x == u { + // "babaab" "cccaba" + // already patched together + lcs := e.forwardlcs(df, kf) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + + // is (u-1,v) or (u,v-1) labelled df-1? + // if so, that forward df-1-path plus a horizontal or vertical edge + // is the df-path to (u,v), then plus the db-path to (N,M) + if u > 0 && ok(df-1, u-1-v) && e.vf.get(df-1, u-1-v) == u-1 { + // "aabbab" "cbcabc" + lcs := e.forwardlcs(df-1, u-1-v) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + if v > 0 && ok(df-1, (u-(v-1))) && e.vf.get(df-1, u-(v-1)) == u { + // "abaabb" "bcacab" + lcs := e.forwardlcs(df-1, u-(v-1)) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + + // The path can't possibly contribute to the lcs because it + // is all horizontal or vertical edges + if u == 0 || v == 0 || x == e.ux || y == e.uy { + // "abaabb" "abaaaa" + if u == 0 || v == 0 { + return e.backwardlcs(db, kb) + } + return e.forwardlcs(df, kf) + } + + // is (x+1,y) or (x,y+1) labelled db-1? + if x+1 <= e.ux && ok(db-1, x+1-y-e.delta) && e.vb.get(db-1, x+1-y-e.delta) == x+1 { + // "bababb" "baaabb" + lcs := e.backwardlcs(db-1, kb+1) + lcs = append(lcs, e.forwardlcs(df, kf)...) + return lcs.sort() + } + if y+1 <= e.uy && ok(db-1, x-(y+1)-e.delta) && e.vb.get(db-1, x-(y+1)-e.delta) == x { + // "abbbaa" "cabacc" + lcs := e.backwardlcs(db-1, kb-1) + lcs = append(lcs, e.forwardlcs(df, kf)...) + return lcs.sort() + } + + // need to compute another path + // "aabbaa" "aacaba" + lcs := e.backwardlcs(db, kb) + oldx, oldy := e.ux, e.uy + e.ux = u + e.uy = v + lcs = append(lcs, forward(e)...) + e.ux, e.uy = oldx, oldy + return lcs.sort() +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/sequence.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/sequence.go new file mode 100644 index 0000000000..2d72d26304 --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/lcs/sequence.go @@ -0,0 +1,113 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +// This file defines the abstract sequence over which the LCS algorithm operates. + +// sequences abstracts a pair of sequences, A and B. +type sequences interface { + lengths() (int, int) // len(A), len(B) + commonPrefixLen(ai, aj, bi, bj int) int // len(commonPrefix(A[ai:aj], B[bi:bj])) + commonSuffixLen(ai, aj, bi, bj int) int // len(commonSuffix(A[ai:aj], B[bi:bj])) +} + +type stringSeqs struct{ a, b string } + +func (s stringSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s stringSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLenString(s.a[ai:aj], s.b[bi:bj]) +} +func (s stringSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLenString(s.a[ai:aj], s.b[bi:bj]) +} + +// The explicit capacity in s[i:j:j] leads to more efficient code. + +type bytesSeqs struct{ a, b []byte } + +func (s bytesSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s bytesSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLenBytes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} +func (s bytesSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLenBytes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} + +type runesSeqs struct{ a, b []rune } + +func (s runesSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s runesSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLenRunes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} +func (s runesSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLenRunes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} + +// TODO(adonovan): optimize these functions using ideas from: +// - https://go.dev/cl/408116 common.go +// - https://go.dev/cl/421435 xor_generic.go + +// TODO(adonovan): factor using generics when available, +// but measure performance impact. + +// commonPrefixLen* returns the length of the common prefix of a[ai:aj] and b[bi:bj]. +func commonPrefixLenBytes(a, b []byte) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[i] == b[i] { + i++ + } + return i +} +func commonPrefixLenRunes(a, b []rune) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[i] == b[i] { + i++ + } + return i +} +func commonPrefixLenString(a, b string) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[i] == b[i] { + i++ + } + return i +} + +// commonSuffixLen* returns the length of the common suffix of a[ai:aj] and b[bi:bj]. +func commonSuffixLenBytes(a, b []byte) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[len(a)-1-i] == b[len(b)-1-i] { + i++ + } + return i +} +func commonSuffixLenRunes(a, b []rune) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[len(a)-1-i] == b[len(b)-1-i] { + i++ + } + return i +} +func commonSuffixLenString(a, b string) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[len(a)-1-i] == b[len(b)-1-i] { + i++ + } + return i +} + +func min(x, y int) int { + if x < y { + return x + } else { + return y + } +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/ndiff.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/ndiff.go new file mode 100644 index 0000000000..f7aa2b79f6 --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/ndiff.go @@ -0,0 +1,99 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff + +import ( + "bytes" + "unicode/utf8" + + "github.com/golangci/golangci-lint/internal/x/tools/diff/lcs" +) + +// Strings computes the differences between two strings. +// The resulting edits respect rune boundaries. +func Strings(before, after string) []Edit { + if before == after { + return nil // common case + } + + if isASCII(before) && isASCII(after) { + // TODO(adonovan): opt: specialize diffASCII for strings. + return diffASCII([]byte(before), []byte(after)) + } + return diffRunes([]rune(before), []rune(after)) +} + +// Bytes computes the differences between two byte slices. +// The resulting edits respect rune boundaries. +func Bytes(before, after []byte) []Edit { + if bytes.Equal(before, after) { + return nil // common case + } + + if isASCII(before) && isASCII(after) { + return diffASCII(before, after) + } + return diffRunes(runes(before), runes(after)) +} + +func diffASCII(before, after []byte) []Edit { + diffs := lcs.DiffBytes(before, after) + + // Convert from LCS diffs. + res := make([]Edit, len(diffs)) + for i, d := range diffs { + res[i] = Edit{d.Start, d.End, string(after[d.ReplStart:d.ReplEnd])} + } + return res +} + +func diffRunes(before, after []rune) []Edit { + diffs := lcs.DiffRunes(before, after) + + // The diffs returned by the lcs package use indexes + // into whatever slice was passed in. + // Convert rune offsets to byte offsets. + res := make([]Edit, len(diffs)) + lastEnd := 0 + utf8Len := 0 + for i, d := range diffs { + utf8Len += runesLen(before[lastEnd:d.Start]) // text between edits + start := utf8Len + utf8Len += runesLen(before[d.Start:d.End]) // text deleted by this edit + res[i] = Edit{start, utf8Len, string(after[d.ReplStart:d.ReplEnd])} + lastEnd = d.End + } + return res +} + +// runes is like []rune(string(bytes)) without the duplicate allocation. +func runes(bytes []byte) []rune { + n := utf8.RuneCount(bytes) + runes := make([]rune, n) + for i := 0; i < n; i++ { + r, sz := utf8.DecodeRune(bytes) + bytes = bytes[sz:] + runes[i] = r + } + return runes +} + +// runesLen returns the length in bytes of the UTF-8 encoding of runes. +func runesLen(runes []rune) (len int) { + for _, r := range runes { + len += utf8.RuneLen(r) + } + return len +} + +// isASCII reports whether s contains only ASCII. +func isASCII[S string | []byte](s S) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/readme.md b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/readme.md new file mode 100644 index 0000000000..4b97984989 --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/readme.md @@ -0,0 +1,8 @@ +# diff + +Extracted from `/internal/diff/` (related to `fixer`). +This is just a copy of the code without any changes. + +## History + +- sync with https://github.com/golang/tools/blob/v0.28.0 diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/unified.go b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/unified.go new file mode 100644 index 0000000000..cfbda61020 --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/internal/x/tools/diff/unified.go @@ -0,0 +1,251 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff + +import ( + "fmt" + "log" + "strings" +) + +// DefaultContextLines is the number of unchanged lines of surrounding +// context displayed by Unified. Use ToUnified to specify a different value. +const DefaultContextLines = 3 + +// Unified returns a unified diff of the old and new strings. +// The old and new labels are the names of the old and new files. +// If the strings are equal, it returns the empty string. +func Unified(oldLabel, newLabel, old, new string) string { + edits := Strings(old, new) + unified, err := ToUnified(oldLabel, newLabel, old, edits, DefaultContextLines) + if err != nil { + // Can't happen: edits are consistent. + log.Fatalf("internal error in diff.Unified: %v", err) + } + return unified +} + +// ToUnified applies the edits to content and returns a unified diff, +// with contextLines lines of (unchanged) context around each diff hunk. +// The old and new labels are the names of the content and result files. +// It returns an error if the edits are inconsistent; see ApplyEdits. +func ToUnified(oldLabel, newLabel, content string, edits []Edit, contextLines int) (string, error) { + u, err := toUnified(oldLabel, newLabel, content, edits, contextLines) + if err != nil { + return "", err + } + return u.String(), nil +} + +// unified represents a set of edits as a unified diff. +type unified struct { + // from is the name of the original file. + from string + // to is the name of the modified file. + to string + // hunks is the set of edit hunks needed to transform the file content. + hunks []*hunk +} + +// Hunk represents a contiguous set of line edits to apply. +type hunk struct { + // The line in the original source where the hunk starts. + fromLine int + // The line in the original source where the hunk finishes. + toLine int + // The set of line based edits to apply. + lines []line +} + +// Line represents a single line operation to apply as part of a Hunk. +type line struct { + // kind is the type of line this represents, deletion, insertion or copy. + kind opKind + // content is the content of this line. + // For deletion it is the line being removed, for all others it is the line + // to put in the output. + content string +} + +// opKind is used to denote the type of operation a line represents. +type opKind int + +const ( + // opDelete is the operation kind for a line that is present in the input + // but not in the output. + opDelete opKind = iota + // opInsert is the operation kind for a line that is new in the output. + opInsert + // opEqual is the operation kind for a line that is the same in the input and + // output, often used to provide context around edited lines. + opEqual +) + +// String returns a human readable representation of an OpKind. It is not +// intended for machine processing. +func (k opKind) String() string { + switch k { + case opDelete: + return "delete" + case opInsert: + return "insert" + case opEqual: + return "equal" + default: + panic("unknown operation kind") + } +} + +// toUnified takes a file contents and a sequence of edits, and calculates +// a unified diff that represents those edits. +func toUnified(fromName, toName string, content string, edits []Edit, contextLines int) (unified, error) { + gap := contextLines * 2 + u := unified{ + from: fromName, + to: toName, + } + if len(edits) == 0 { + return u, nil + } + var err error + edits, err = lineEdits(content, edits) // expand to whole lines + if err != nil { + return u, err + } + lines := splitLines(content) + var h *hunk + last := 0 + toLine := 0 + for _, edit := range edits { + // Compute the zero-based line numbers of the edit start and end. + // TODO(adonovan): opt: compute incrementally, avoid O(n^2). + start := strings.Count(content[:edit.Start], "\n") + end := strings.Count(content[:edit.End], "\n") + if edit.End == len(content) && len(content) > 0 && content[len(content)-1] != '\n' { + end++ // EOF counts as an implicit newline + } + + switch { + case h != nil && start == last: + //direct extension + case h != nil && start <= last+gap: + //within range of previous lines, add the joiners + addEqualLines(h, lines, last, start) + default: + //need to start a new hunk + if h != nil { + // add the edge to the previous hunk + addEqualLines(h, lines, last, last+contextLines) + u.hunks = append(u.hunks, h) + } + toLine += start - last + h = &hunk{ + fromLine: start + 1, + toLine: toLine + 1, + } + // add the edge to the new hunk + delta := addEqualLines(h, lines, start-contextLines, start) + h.fromLine -= delta + h.toLine -= delta + } + last = start + for i := start; i < end; i++ { + h.lines = append(h.lines, line{kind: opDelete, content: lines[i]}) + last++ + } + if edit.New != "" { + for _, content := range splitLines(edit.New) { + h.lines = append(h.lines, line{kind: opInsert, content: content}) + toLine++ + } + } + } + if h != nil { + // add the edge to the final hunk + addEqualLines(h, lines, last, last+contextLines) + u.hunks = append(u.hunks, h) + } + return u, nil +} + +func splitLines(text string) []string { + lines := strings.SplitAfter(text, "\n") + if lines[len(lines)-1] == "" { + lines = lines[:len(lines)-1] + } + return lines +} + +func addEqualLines(h *hunk, lines []string, start, end int) int { + delta := 0 + for i := start; i < end; i++ { + if i < 0 { + continue + } + if i >= len(lines) { + return delta + } + h.lines = append(h.lines, line{kind: opEqual, content: lines[i]}) + delta++ + } + return delta +} + +// String converts a unified diff to the standard textual form for that diff. +// The output of this function can be passed to tools like patch. +func (u unified) String() string { + if len(u.hunks) == 0 { + return "" + } + b := new(strings.Builder) + fmt.Fprintf(b, "--- %s\n", u.from) + fmt.Fprintf(b, "+++ %s\n", u.to) + for _, hunk := range u.hunks { + fromCount, toCount := 0, 0 + for _, l := range hunk.lines { + switch l.kind { + case opDelete: + fromCount++ + case opInsert: + toCount++ + default: + fromCount++ + toCount++ + } + } + fmt.Fprint(b, "@@") + if fromCount > 1 { + fmt.Fprintf(b, " -%d,%d", hunk.fromLine, fromCount) + } else if hunk.fromLine == 1 && fromCount == 0 { + // Match odd GNU diff -u behavior adding to empty file. + fmt.Fprintf(b, " -0,0") + } else { + fmt.Fprintf(b, " -%d", hunk.fromLine) + } + if toCount > 1 { + fmt.Fprintf(b, " +%d,%d", hunk.toLine, toCount) + } else if hunk.toLine == 1 && toCount == 0 { + // Match odd GNU diff -u behavior adding to empty file. + fmt.Fprintf(b, " +0,0") + } else { + fmt.Fprintf(b, " +%d", hunk.toLine) + } + fmt.Fprint(b, " @@\n") + for _, l := range hunk.lines { + switch l.kind { + case opDelete: + fmt.Fprintf(b, "-%s", l.content) + case opInsert: + fmt.Fprintf(b, "+%s", l.content) + default: + fmt.Fprintf(b, " %s", l.content) + } + if !strings.HasSuffix(l.content, "\n") { + fmt.Fprintf(b, "\n\\ No newline at end of file\n") + } + } + } + return b.String() +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go index 89017e9bfc..5a26c75aed 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go @@ -1,18 +1,20 @@ package commands import ( + "context" + "encoding/json" "errors" "fmt" "net/http" "os" "path/filepath" + "strconv" "strings" "time" hcversion "github.com/hashicorp/go-version" "github.com/pelletier/go-toml/v2" - "github.com/santhosh-tekuri/jsonschema/v5" - "github.com/santhosh-tekuri/jsonschema/v5/httploader" + "github.com/santhosh-tekuri/jsonschema/v6" "github.com/spf13/cobra" "github.com/spf13/pflag" "gopkg.in/yaml.v3" @@ -43,9 +45,7 @@ func (c *configCommand) executeVerify(cmd *cobra.Command, _ []string) error { return fmt.Errorf("[%s] validate: %w", usedConfigFile, err) } - detail := v.DetailedOutput() - - printValidationDetail(cmd, &detail) + printValidationDetail(cmd, v.DetailedOutput()) return errors.New("the configuration contains invalid elements") } @@ -100,10 +100,12 @@ func createSchemaURL(flags *pflag.FlagSet, buildInfo BuildInfo) (string, error) } func validateConfiguration(schemaPath, targetFile string) error { - httploader.Client = &http.Client{Timeout: 2 * time.Second} - compiler := jsonschema.NewCompiler() - compiler.Draft = jsonschema.Draft7 + compiler.UseLoader(jsonschema.SchemeURLLoader{ + "file": jsonschema.FileLoader{}, + "https": newJSONSchemaHTTPLoader(), + }) + compiler.DefaultDraft(jsonschema.Draft7) schema, err := compiler.Compile(schemaPath) if err != nil { @@ -133,10 +135,13 @@ func validateConfiguration(schemaPath, targetFile string) error { return schema.Validate(m) } -func printValidationDetail(cmd *cobra.Command, detail *jsonschema.Detailed) { - if detail.Error != "" { +func printValidationDetail(cmd *cobra.Command, detail *jsonschema.OutputUnit) { + if detail.Error != nil { + data, _ := json.Marshal(detail.Error) + details, _ := strconv.Unquote(string(data)) + cmd.PrintErrf("jsonschema: %q does not validate with %q: %s\n", - strings.ReplaceAll(strings.TrimPrefix(detail.InstanceLocation, "/"), "/", "."), detail.KeywordLocation, detail.Error) + strings.ReplaceAll(strings.TrimPrefix(detail.InstanceLocation, "/"), "/", "."), detail.KeywordLocation, details) } for _, d := range detail.Errors { @@ -177,3 +182,33 @@ func decodeTomlFile(filename string) (any, error) { return m, nil } + +type jsonschemaHTTPLoader struct { + *http.Client +} + +func newJSONSchemaHTTPLoader() *jsonschemaHTTPLoader { + return &jsonschemaHTTPLoader{Client: &http.Client{ + Timeout: 2 * time.Second, + }} +} + +func (l jsonschemaHTTPLoader) Load(url string) (any, error) { + req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, http.NoBody) + if err != nil { + return nil, err + } + + resp, err := l.Do(req) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s returned status code %d", url, resp.StatusCode) + } + + return jsonschema.UnmarshalJSON(resp.Body) +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go index 608f6b9de5..9f17018b2f 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/flagsets.go @@ -28,11 +28,11 @@ func setupLintersFlagSet(v *viper.Viper, fs *pflag.FlagSet) { color.GreenString("Enable only fast linters from enabled linters set (first run won't be fast)")) internal.AddHackedStringSliceP(fs, "presets", "p", - color.GreenString(fmt.Sprintf("Enable presets (%s) of linters.\n"+ - "Run 'golangci-lint help linters' to see them.\n"+ + formatList("Enable presets of linters:", lintersdb.AllPresets(), + "Run 'golangci-lint help linters' to see them.", "This option implies option --disable-all", - strings.Join(lintersdb.AllPresets(), "|"), - ))) + ), + ) fs.StringSlice("enable-only", nil, color.GreenString("Override linters configuration section to only run the specific linter(s)")) // Flags only. @@ -49,14 +49,13 @@ func setupRunFlagSet(v *viper.Viper, fs *pflag.FlagSet) { internal.AddFlagAndBind(v, fs, fs.String, "go", "run.go", "", color.GreenString("Targeted Go version")) internal.AddHackedStringSlice(fs, "build-tags", color.GreenString("Build tags")) - internal.AddFlagAndBind(v, fs, fs.Duration, "timeout", "run.timeout", defaultTimeout, color.GreenString("Timeout for total work")) + internal.AddFlagAndBind(v, fs, fs.Duration, "timeout", "run.timeout", defaultTimeout, + color.GreenString("Timeout for total work. If <= 0, the timeout is disabled")) internal.AddFlagAndBind(v, fs, fs.Bool, "tests", "run.tests", true, color.GreenString("Analyze tests (*_test.go)")) internal.AddDeprecatedHackedStringSlice(fs, "skip-files", color.GreenString("Regexps of files to skip")) internal.AddDeprecatedHackedStringSlice(fs, "skip-dirs", color.GreenString("Regexps of directories to skip")) - internal.AddDeprecatedFlagAndBind(v, fs, fs.Bool, "skip-dirs-use-default", "run.skip-dirs-use-default", true, - getDefaultDirectoryExcludeHelp()) const allowParallelDesc = "Allow multiple parallel golangci-lint instances running.\n" + "If false (default) - golangci-lint acquires file lock on start." @@ -69,13 +68,11 @@ func setupRunFlagSet(v *viper.Viper, fs *pflag.FlagSet) { func setupOutputFlagSet(v *viper.Viper, fs *pflag.FlagSet) { internal.AddFlagAndBind(v, fs, fs.String, "out-format", "output.formats", config.OutFormatColoredLineNumber, - color.GreenString(fmt.Sprintf("Formats of output: %s", strings.Join(config.AllOutputFormats, "|")))) + formatList("Formats of output:", config.AllOutputFormats)) internal.AddFlagAndBind(v, fs, fs.Bool, "print-issued-lines", "output.print-issued-lines", true, color.GreenString("Print lines of code with issue")) internal.AddFlagAndBind(v, fs, fs.Bool, "print-linter-name", "output.print-linter-name", true, color.GreenString("Print linter name in issue line")) - internal.AddFlagAndBind(v, fs, fs.Bool, "uniq-by-line", "output.uniq-by-line", true, - color.GreenString("Make issues output unique by line")) internal.AddFlagAndBind(v, fs, fs.Bool, "sort-results", "output.sort-results", false, color.GreenString("Sort linter results")) internal.AddFlagAndBind(v, fs, fs.StringSlice, "sort-order", "output.sort-order", nil, @@ -97,11 +94,13 @@ func setupIssuesFlagSet(v *viper.Viper, fs *pflag.FlagSet) { color.GreenString("Maximum issues count per one linter. Set to 0 to disable")) internal.AddFlagAndBind(v, fs, fs.Int, "max-same-issues", "issues.max-same-issues", 3, color.GreenString("Maximum count of issues with the same text. Set to 0 to disable")) + internal.AddFlagAndBind(v, fs, fs.Bool, "uniq-by-line", "issues.uniq-by-line", true, + color.GreenString("Make issues output unique by line")) internal.AddHackedStringSlice(fs, "exclude-files", color.GreenString("Regexps of files to exclude")) internal.AddHackedStringSlice(fs, "exclude-dirs", color.GreenString("Regexps of directories to exclude")) internal.AddFlagAndBind(v, fs, fs.Bool, "exclude-dirs-use-default", "issues.exclude-dirs-use-default", true, - getDefaultDirectoryExcludeHelp()) + formatList("Use or not use default excluded directories:", processors.StdExcludeDirRegexps)) internal.AddFlagAndBind(v, fs, fs.String, "exclude-generated", "issues.exclude-generated", processors.AutogeneratedModeLax, color.GreenString("Mode of the generated files analysis")) @@ -123,6 +122,23 @@ func setupIssuesFlagSet(v *viper.Viper, fs *pflag.FlagSet) { color.GreenString("Fix found issues (if it's supported by the linter)")) } +func formatList(head string, items []string, foot ...string) string { + parts := []string{color.GreenString(head)} + for _, p := range items { + parts = append(parts, fmt.Sprintf(" - %s", color.YellowString(p))) + } + + for _, s := range foot { + parts = append(parts, color.GreenString(s)) + } + + if len(foot) == 0 { + parts = append(parts, "") + } + + return strings.Join(parts, "\n") +} + func getDefaultIssueExcludeHelp() string { parts := []string{color.GreenString("Use or not use default excludes:")} @@ -135,12 +151,3 @@ func getDefaultIssueExcludeHelp() string { return strings.Join(parts, "\n") } - -func getDefaultDirectoryExcludeHelp() string { - parts := []string{color.GreenString("Use or not use default excluded directories:")} - for _, dir := range processors.StdExcludeDirRegexps { - parts = append(parts, fmt.Sprintf(" - %s", color.YellowString(dir))) - } - parts = append(parts, "") - return strings.Join(parts, "\n") -} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go index 094e5d1905..02a586f4c1 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/help.go @@ -1,10 +1,13 @@ package commands import ( + "encoding/json" "fmt" "slices" "sort" "strings" + "unicode" + "unicode/utf8" "github.com/fatih/color" "github.com/spf13/cobra" @@ -15,9 +18,27 @@ import ( "github.com/golangci/golangci-lint/pkg/logutils" ) +type linterHelp struct { + Name string `json:"name"` + Desc string `json:"description"` + Fast bool `json:"fast"` + AutoFix bool `json:"autoFix"` + Presets []string `json:"presets"` + EnabledByDefault bool `json:"enabledByDefault"` + Deprecated bool `json:"deprecated"` + Since string `json:"since"` + OriginalURL string `json:"originalURL,omitempty"` +} + +type helpOptions struct { + JSON bool +} + type helpCommand struct { cmd *cobra.Command + opts helpOptions + dbManager *lintersdb.Manager log logutils.Log @@ -35,16 +56,21 @@ func newHelpCommand(logger logutils.Log) *helpCommand { }, } - helpCmd.AddCommand( - &cobra.Command{ - Use: "linters", - Short: "Help about linters", - Args: cobra.NoArgs, - ValidArgsFunction: cobra.NoFileCompletions, - Run: c.execute, - PreRunE: c.preRunE, - }, - ) + lintersCmd := &cobra.Command{ + Use: "linters", + Short: "Help about linters", + Args: cobra.NoArgs, + ValidArgsFunction: cobra.NoFileCompletions, + RunE: c.execute, + PreRunE: c.preRunE, + } + + helpCmd.AddCommand(lintersCmd) + + fs := lintersCmd.Flags() + fs.SortFlags = false // sort them as they are defined here + + fs.BoolVar(&c.opts.JSON, "json", false, color.GreenString("Display as JSON")) c.cmd = helpCmd @@ -64,7 +90,41 @@ func (c *helpCommand) preRunE(_ *cobra.Command, _ []string) error { return nil } -func (c *helpCommand) execute(_ *cobra.Command, _ []string) { +func (c *helpCommand) execute(_ *cobra.Command, _ []string) error { + if c.opts.JSON { + return c.printJSON() + } + + c.print() + + return nil +} + +func (c *helpCommand) printJSON() error { + var linters []linterHelp + + for _, lc := range c.dbManager.GetAllSupportedLinterConfigs() { + if lc.Internal { + continue + } + + linters = append(linters, linterHelp{ + Name: lc.Name(), + Desc: formatDescription(lc.Linter.Desc()), + Fast: !lc.IsSlowLinter(), + AutoFix: lc.CanAutoFix, + Presets: lc.InPresets, + EnabledByDefault: lc.EnabledByDefault, + Deprecated: lc.IsDeprecated(), + Since: lc.Since, + OriginalURL: lc.OriginalURL, + }) + } + + return json.NewEncoder(c.cmd.OutOrStdout()).Encode(linters) +} + +func (c *helpCommand) print() { var enabledLCs, disabledLCs []*linter.Config for _, lc := range c.dbManager.GetAllSupportedLinterConfigs() { if lc.Internal { @@ -124,19 +184,46 @@ func printLinters(lcs []*linter.Config) { }) for _, lc := range lcs { - // If the linter description spans multiple lines, truncate everything following the first newline - linterDescription := lc.Linter.Desc() - firstNewline := strings.IndexRune(linterDescription, '\n') - if firstNewline > 0 { - linterDescription = linterDescription[:firstNewline] - } + desc := formatDescription(lc.Linter.Desc()) deprecatedMark := "" if lc.IsDeprecated() { deprecatedMark = " [" + color.RedString("deprecated") + "]" } - _, _ = fmt.Fprintf(logutils.StdOut, "%s%s: %s [fast: %t, auto-fix: %t]\n", - color.YellowString(lc.Name()), deprecatedMark, linterDescription, !lc.IsSlowLinter(), lc.CanAutoFix) + var capabilities []string + if !lc.IsSlowLinter() { + capabilities = append(capabilities, color.BlueString("fast")) + } + if lc.CanAutoFix { + capabilities = append(capabilities, color.GreenString("auto-fix")) + } + + var capability string + if capabilities != nil { + capability = " [" + strings.Join(capabilities, ", ") + "]" + } + + _, _ = fmt.Fprintf(logutils.StdOut, "%s%s: %s%s\n", + color.YellowString(lc.Name()), deprecatedMark, desc, capability) + } +} + +func formatDescription(desc string) string { + // If the linter description spans multiple lines, truncate everything following the first newline + endFirstLine := strings.IndexRune(desc, '\n') + if endFirstLine > 0 { + desc = desc[:endFirstLine] } + + rawDesc := []rune(desc) + + r, _ := utf8.DecodeRuneInString(desc) + rawDesc[0] = unicode.ToUpper(r) + + if rawDesc[len(rawDesc)-1] != '.' { + rawDesc = append(rawDesc, '.') + } + + return string(rawDesc) } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go index ff7c5e467b..d9aa7578cd 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go @@ -238,14 +238,21 @@ func (c *runCommand) execute(_ *cobra.Command, args []string) { needTrackResources := logutils.IsVerbose() || c.opts.PrintResourcesUsage trackResourcesEndCh := make(chan struct{}) - defer func() { // XXX: this defer must be before ctx.cancel defer - if needTrackResources { // wait until resource tracking finished to print properly + + // Note: this defer must be before ctx.cancel defer + defer func() { + // wait until resource tracking finished to print properly + if needTrackResources { <-trackResourcesEndCh } }() - ctx, cancel := context.WithTimeout(context.Background(), c.cfg.Run.Timeout) - defer cancel() + ctx := context.Background() + if c.cfg.Run.Timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, c.cfg.Run.Timeout) + defer cancel() + } if needTrackResources { go watchResources(ctx, trackResourcesEndCh, c.log, c.debugf) diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/config.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/config.go index b863b329f9..579eddf594 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/config.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/config.go @@ -1,11 +1,16 @@ package config import ( + "cmp" + "fmt" "os" + "path/filepath" + "slices" "strings" hcversion "github.com/hashicorp/go-version" - "github.com/ldez/gomoddirectives" + "github.com/ldez/grignotin/gomod" + "golang.org/x/mod/modfile" ) // Config encapsulates the config data specified in the golangci-lint YAML config file. @@ -80,12 +85,7 @@ func detectGoVersion() string { return goVersion } - v := os.Getenv("GOVERSION") - if v != "" { - return v - } - - return "1.17" + return cmp.Or(os.Getenv("GOVERSION"), "1.17") } // detectGoVersionFromGoMod tries to get Go version from go.mod. @@ -93,8 +93,16 @@ func detectGoVersion() string { // else it returns `go` version if present, // else it returns empty. func detectGoVersionFromGoMod() string { - file, _ := gomoddirectives.GetModuleFile() - if file == nil { + modPath, err := gomod.GetGoModPath() + if err != nil { + modPath = detectGoModFallback() + if modPath == "" { + return "" + } + } + + file, err := parseGoMod(modPath) + if err != nil { return "" } @@ -110,3 +118,41 @@ func detectGoVersionFromGoMod() string { return "" } + +func parseGoMod(goMod string) (*modfile.File, error) { + raw, err := os.ReadFile(filepath.Clean(goMod)) + if err != nil { + return nil, fmt.Errorf("reading go.mod file: %w", err) + } + + return modfile.Parse("go.mod", raw, nil) +} + +func detectGoModFallback() string { + info, err := gomod.GetModuleInfo() + if err != nil { + return "" + } + + wd, err := os.Getwd() + if err != nil { + return "" + } + + slices.SortFunc(info, func(a, b gomod.ModInfo) int { + return cmp.Compare(len(b.Path), len(a.Path)) + }) + + goMod := info[0] + for _, m := range info { + if !strings.HasPrefix(wd, m.Dir) { + continue + } + + goMod = m + + break + } + + return goMod.GoMod +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go index 2ee9364aaa..081b87624d 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/issues.go @@ -117,8 +117,9 @@ type Issues struct { UseDefaultExcludeDirs bool `mapstructure:"exclude-dirs-use-default"` - MaxIssuesPerLinter int `mapstructure:"max-issues-per-linter"` - MaxSameIssues int `mapstructure:"max-same-issues"` + MaxIssuesPerLinter int `mapstructure:"max-issues-per-linter"` + MaxSameIssues int `mapstructure:"max-same-issues"` + UniqByLine bool `mapstructure:"uniq-by-line"` DiffFromRevision string `mapstructure:"new-from-rev"` DiffPatchFilePath string `mapstructure:"new-from-patch"` @@ -127,7 +128,7 @@ type Issues struct { NeedFix bool `mapstructure:"fix"` - ExcludeGeneratedStrict bool `mapstructure:"exclude-generated-strict"` // Deprecated: use ExcludeGenerated instead. + ExcludeGeneratedStrict *bool `mapstructure:"exclude-generated-strict"` // Deprecated: use ExcludeGenerated instead. } func (i *Issues) Validate() error { diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go index b182d1e0f1..8e6c184ca4 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go @@ -105,6 +105,7 @@ var defaultLintersSettings = LintersSettings{ Kitlog: true, Klog: true, Logr: true, + Slog: true, Zap: true, RequireStringKey: false, NoPrintfLike: false, @@ -168,7 +169,6 @@ var defaultLintersSettings = LintersSettings{ Unused: UnusedSettings{ FieldWritesAreUses: true, PostStatementsAreReads: false, - ExportedIsUsed: true, ExportedFieldsAreUsed: true, ParametersAreUsed: true, LocalVariablesAreUsed: true, @@ -178,6 +178,15 @@ var defaultLintersSettings = LintersSettings{ HTTPMethod: true, HTTPStatusCode: true, }, + UseTesting: UseTestingSettings{ + ContextBackground: true, + ContextTodo: true, + OSChdir: true, + OSMkdirTemp: true, + OSSetenv: false, + OSTempDir: false, + OSCreateTemp: true, + }, Varnamelen: VarnamelenSettings{ MaxDistance: 5, MinNameLength: 3, @@ -261,6 +270,7 @@ type LintersSettings struct { Promlinter PromlinterSettings ProtoGetter ProtoGetterSettings Reassign ReassignSettings + Recvcheck RecvcheckSettings Revive ReviveSettings RowsErrCheck RowsErrCheckSettings SlogLint SlogLintSettings @@ -277,6 +287,7 @@ type LintersSettings struct { Unparam UnparamSettings Unused UnusedSettings UseStdlibVars UseStdlibVarsSettings + UseTesting UseTestingSettings Varnamelen VarnamelenSettings Whitespace WhitespaceSettings Wrapcheck WrapcheckSettings @@ -318,8 +329,10 @@ type BiDiChkSettings struct { } type CopyLoopVarSettings struct { - IgnoreAlias bool `mapstructure:"ignore-alias"` // Deprecated: use CheckAlias - CheckAlias bool `mapstructure:"check-alias"` + CheckAlias bool `mapstructure:"check-alias"` + + // Deprecated: use CheckAlias + IgnoreAlias *bool `mapstructure:"ignore-alias"` } type Cyclop struct { @@ -466,10 +479,12 @@ type FunlenSettings struct { } type GciSettings struct { - Sections []string `mapstructure:"sections"` - SkipGenerated bool `mapstructure:"skip-generated"` - CustomOrder bool `mapstructure:"custom-order"` - NoLexOrder bool `mapstructure:"no-lex-order"` + Sections []string `mapstructure:"sections"` + NoInlineComments bool `mapstructure:"no-inline-comments"` + NoPrefixComments bool `mapstructure:"no-prefix-comments"` + SkipGenerated bool `mapstructure:"skip-generated"` + CustomOrder bool `mapstructure:"custom-order"` + NoLexOrder bool `mapstructure:"no-lex-order"` // Deprecated: use Sections instead. LocalPrefixes string `mapstructure:"local-prefixes"` @@ -492,6 +507,7 @@ type GinkgoLinterSettings struct { type GoChecksumTypeSettings struct { DefaultSignifiesExhaustive bool `mapstructure:"default-signifies-exhaustive"` + IncludeSharedInterfaces bool `mapstructure:"include-shared-interfaces"` } type GocognitSettings struct { @@ -534,7 +550,7 @@ type GodotSettings struct { Period bool `mapstructure:"period"` // Deprecated: use Scope instead - CheckAll bool `mapstructure:"check-all"` + CheckAll *bool `mapstructure:"check-all"` } type GodoxSettings struct { @@ -574,6 +590,11 @@ type GoModDirectivesSettings struct { ReplaceLocal bool `mapstructure:"replace-local"` ExcludeForbidden bool `mapstructure:"exclude-forbidden"` RetractAllowNoExplanation bool `mapstructure:"retract-allow-no-explanation"` + ToolchainForbidden bool `mapstructure:"toolchain-forbidden"` + ToolchainPattern string `mapstructure:"toolchain-pattern"` + ToolForbidden bool `mapstructure:"tool-forbidden"` + GoDebugForbidden bool `mapstructure:"go-debug-forbidden"` + GoVersionPattern string `mapstructure:"go-version-pattern"` } type GoModGuardSettings struct { @@ -622,7 +643,7 @@ type GovetSettings struct { Settings map[string]map[string]any // Deprecated: the linter should be enabled inside Enable. - CheckShadowing bool `mapstructure:"check-shadowing"` + CheckShadowing *bool `mapstructure:"check-shadowing"` } func (cfg *GovetSettings) Validate() error { @@ -687,6 +708,7 @@ type LoggerCheckSettings struct { Kitlog bool `mapstructure:"kitlog"` Klog bool `mapstructure:"klog"` Logr bool `mapstructure:"logr"` + Slog bool `mapstructure:"slog"` Zap bool `mapstructure:"zap"` RequireStringKey bool `mapstructure:"require-string-key"` NoPrintfLike bool `mapstructure:"no-printf-like"` @@ -798,6 +820,11 @@ type ReassignSettings struct { Patterns []string `mapstructure:"patterns"` } +type RecvcheckSettings struct { + DisableBuiltin bool `mapstructure:"disable-builtin"` + Exclusions []string `mapstructure:"exclusions"` +} + type ReviveSettings struct { Go string `mapstructure:"-"` MaxOpenFiles int `mapstructure:"max-open-files"` @@ -837,7 +864,7 @@ type SlogLintSettings struct { ArgsOnSepLines bool `mapstructure:"args-on-sep-lines"` // Deprecated: use Context instead. - ContextOnly bool `mapstructure:"context-only"` + ContextOnly *bool `mapstructure:"context-only"` } type SpancheckSettings struct { @@ -868,10 +895,31 @@ type TagAlignSettings struct { } type TagliatelleSettings struct { - Case struct { - Rules map[string]string - UseFieldName bool `mapstructure:"use-field-name"` - } + Case TagliatelleCase +} + +type TagliatelleCase struct { + TagliatelleBase `mapstructure:",squash"` + Overrides []TagliatelleOverrides +} + +type TagliatelleOverrides struct { + TagliatelleBase `mapstructure:",squash"` + Package string `mapstructure:"pkg"` + Ignore bool `mapstructure:"ignore"` +} + +type TagliatelleBase struct { + Rules map[string]string `mapstructure:"rules"` + ExtendedRules map[string]TagliatelleExtendedRule `mapstructure:"extended-rules"` + UseFieldName bool `mapstructure:"use-field-name"` + IgnoredFields []string `mapstructure:"ignored-fields"` +} + +type TagliatelleExtendedRule struct { + Case string + ExtraInitialisms bool + InitialismOverrides map[string]bool } type TestifylintSettings struct { @@ -936,11 +984,24 @@ type UseStdlibVarsSettings struct { TimeLayout bool `mapstructure:"time-layout"` CryptoHash bool `mapstructure:"crypto-hash"` DefaultRPCPath bool `mapstructure:"default-rpc-path"` - OSDevNull bool `mapstructure:"os-dev-null"` // Deprecated SQLIsolationLevel bool `mapstructure:"sql-isolation-level"` TLSSignatureScheme bool `mapstructure:"tls-signature-scheme"` ConstantKind bool `mapstructure:"constant-kind"` - SyslogPriority bool `mapstructure:"syslog-priority"` // Deprecated + + // Deprecated + OSDevNull *bool `mapstructure:"os-dev-null"` + // Deprecated + SyslogPriority *bool `mapstructure:"syslog-priority"` +} + +type UseTestingSettings struct { + ContextBackground bool `mapstructure:"context-background"` + ContextTodo bool `mapstructure:"context-todo"` + OSChdir bool `mapstructure:"os-chdir"` + OSMkdirTemp bool `mapstructure:"os-mkdir-temp"` + OSSetenv bool `mapstructure:"os-setenv"` + OSTempDir bool `mapstructure:"os-temp-dir"` + OSCreateTemp bool `mapstructure:"os-create-temp"` } type UnconvertSettings struct { @@ -956,11 +1017,13 @@ type UnparamSettings struct { type UnusedSettings struct { FieldWritesAreUses bool `mapstructure:"field-writes-are-uses"` PostStatementsAreReads bool `mapstructure:"post-statements-are-reads"` - ExportedIsUsed bool `mapstructure:"exported-is-used"` // Deprecated ExportedFieldsAreUsed bool `mapstructure:"exported-fields-are-used"` ParametersAreUsed bool `mapstructure:"parameters-are-used"` LocalVariablesAreUsed bool `mapstructure:"local-variables-are-used"` GeneratedIsUsed bool `mapstructure:"generated-is-used"` + + // Deprecated + ExportedIsUsed *bool `mapstructure:"exported-is-used"` } type VarnamelenSettings struct { @@ -982,6 +1045,7 @@ type WhitespaceSettings struct { } type WrapcheckSettings struct { + ExtraIgnoreSigs []string `mapstructure:"extra-ignore-sigs"` // TODO(ldez): v2 the options must be renamed to use hyphen. IgnoreSigs []string `mapstructure:"ignoreSigs"` IgnoreSigRegexps []string `mapstructure:"ignoreSigRegexps"` diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go index efeed3ca4d..56f57d9d5d 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go @@ -1,6 +1,7 @@ package config import ( + "cmp" "errors" "fmt" "os" @@ -292,9 +293,7 @@ func (l *Loader) handleGoVersion() { l.cfg.LintersSettings.ParallelTest.Go = l.cfg.Run.Go - if l.cfg.LintersSettings.Gofumpt.LangVersion == "" { - l.cfg.LintersSettings.Gofumpt.LangVersion = l.cfg.Run.Go - } + l.cfg.LintersSettings.Gofumpt.LangVersion = cmp.Or(l.cfg.LintersSettings.Gofumpt.LangVersion, l.cfg.Run.Go) trimmedGoVersion := goutil.TrimGoVersion(l.cfg.Run.Go) @@ -322,19 +321,23 @@ func (l *Loader) handleDeprecation() error { l.cfg.Issues.ExcludeDirs = l.cfg.Run.SkipDirs } - // The 2 options are true by default. // Deprecated since v1.57.0 - if !l.cfg.Run.UseDefaultSkipDirs { + if l.cfg.Run.UseDefaultSkipDirs != nil { l.log.Warnf("The configuration option `run.skip-dirs-use-default` is deprecated, please use `issues.exclude-dirs-use-default`.") + l.cfg.Issues.UseDefaultExcludeDirs = *l.cfg.Run.UseDefaultSkipDirs } - l.cfg.Issues.UseDefaultExcludeDirs = l.cfg.Run.UseDefaultSkipDirs && l.cfg.Issues.UseDefaultExcludeDirs - // The 2 options are false by default. // Deprecated since v1.57.0 - if l.cfg.Run.ShowStats { + if l.cfg.Run.ShowStats != nil { l.log.Warnf("The configuration option `run.show-stats` is deprecated, please use `output.show-stats`") + l.cfg.Output.ShowStats = *l.cfg.Run.ShowStats + } + + // Deprecated since v1.63.0 + if l.cfg.Output.UniqByLine != nil { + l.log.Warnf("The configuration option `output.uniq-by-line` is deprecated, please use `issues.uniq-by-line`") + l.cfg.Issues.UniqByLine = *l.cfg.Output.UniqByLine } - l.cfg.Output.ShowStats = l.cfg.Run.ShowStats || l.cfg.Output.ShowStats // Deprecated since v1.57.0 if l.cfg.Output.Format != "" { @@ -357,9 +360,11 @@ func (l *Loader) handleDeprecation() error { } // Deprecated since v1.59.0 - if l.cfg.Issues.ExcludeGeneratedStrict { + if l.cfg.Issues.ExcludeGeneratedStrict != nil { l.log.Warnf("The configuration option `issues.exclude-generated-strict` is deprecated, please use `issues.exclude-generated`") - l.cfg.Issues.ExcludeGenerated = "strict" // Don't use the constants to avoid cyclic dependencies. + if !*l.cfg.Issues.ExcludeGeneratedStrict { + l.cfg.Issues.ExcludeGenerated = "strict" // Don't use the constants to avoid cyclic dependencies. + } } l.handleLinterOptionDeprecations() @@ -367,16 +372,15 @@ func (l *Loader) handleDeprecation() error { return nil } -//nolint:gocyclo // the complexity cannot be reduced. func (l *Loader) handleLinterOptionDeprecations() { // Deprecated since v1.57.0, // but it was unofficially deprecated since v1.19 (2019) (https://github.com/golangci/golangci-lint/pull/697). - if l.cfg.LintersSettings.Govet.CheckShadowing { + if l.cfg.LintersSettings.Govet.CheckShadowing != nil { l.log.Warnf("The configuration option `linters.govet.check-shadowing` is deprecated. " + "Please enable `shadow` instead, if you are not using `enable-all`.") } - if l.cfg.LintersSettings.CopyLoopVar.IgnoreAlias { + if l.cfg.LintersSettings.CopyLoopVar.IgnoreAlias != nil { l.log.Warnf("The configuration option `linters.copyloopvar.ignore-alias` is deprecated and ignored," + "please use `linters.copyloopvar.check-alias`.") } @@ -398,7 +402,7 @@ func (l *Loader) handleLinterOptionDeprecations() { } // Deprecated since v1.33.0. - if l.cfg.LintersSettings.Godot.CheckAll { + if l.cfg.LintersSettings.Godot.CheckAll != nil { l.log.Warnf("The configuration option `linters.godot.check-all` is deprecated, please use `linters.godot.scope: all`.") } @@ -423,25 +427,23 @@ func (l *Loader) handleLinterOptionDeprecations() { } // Deprecated since v1.60.0 - if !l.cfg.LintersSettings.Unused.ExportedIsUsed { + if l.cfg.LintersSettings.Unused.ExportedIsUsed != nil { l.log.Warnf("The configuration option `linters.unused.exported-is-used` is deprecated.") } // Deprecated since v1.58.0 - if l.cfg.LintersSettings.SlogLint.ContextOnly { + if l.cfg.LintersSettings.SlogLint.ContextOnly != nil { l.log.Warnf("The configuration option `linters.sloglint.context-only` is deprecated, please use `linters.sloglint.context`.") - if l.cfg.LintersSettings.SlogLint.Context == "" { - l.cfg.LintersSettings.SlogLint.Context = "all" - } + l.cfg.LintersSettings.SlogLint.Context = cmp.Or(l.cfg.LintersSettings.SlogLint.Context, "all") } // Deprecated since v1.51.0 - if l.cfg.LintersSettings.UseStdlibVars.OSDevNull { + if l.cfg.LintersSettings.UseStdlibVars.OSDevNull != nil { l.log.Warnf("The configuration option `linters.usestdlibvars.os-dev-null` is deprecated.") } // Deprecated since v1.51.0 - if l.cfg.LintersSettings.UseStdlibVars.SyslogPriority { + if l.cfg.LintersSettings.UseStdlibVars.SyslogPriority != nil { l.log.Warnf("The configuration option `linters.usestdlibvars.syslog-priority` is deprecated.") } } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/output.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/output.go index 6a26d5773e..aaa5183ec4 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/output.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/output.go @@ -43,7 +43,6 @@ type Output struct { Formats OutputFormats `mapstructure:"formats"` PrintIssuedLine bool `mapstructure:"print-issued-lines"` PrintLinterName bool `mapstructure:"print-linter-name"` - UniqByLine bool `mapstructure:"uniq-by-line"` SortResults bool `mapstructure:"sort-results"` SortOrder []string `mapstructure:"sort-order"` PathPrefix string `mapstructure:"path-prefix"` @@ -51,6 +50,9 @@ type Output struct { // Deprecated: use Formats instead. Format string `mapstructure:"format"` + + // Deprecated: use [Issues.UniqByLine] instead. + UniqByLine *bool `mapstructure:"uniq-by-line"` } func (o *Output) Validate() error { diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/run.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/run.go index 2f6523c0b9..784e8c2fad 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/run.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/config/run.go @@ -29,10 +29,10 @@ type Run struct { // Deprecated: use Issues.ExcludeDirs instead. SkipDirs []string `mapstructure:"skip-dirs"` // Deprecated: use Issues.UseDefaultExcludeDirs instead. - UseDefaultSkipDirs bool `mapstructure:"skip-dirs-use-default"` + UseDefaultSkipDirs *bool `mapstructure:"skip-dirs-use-default"` // Deprecated: use Output.ShowStats instead. - ShowStats bool `mapstructure:"show-stats"` + ShowStats *bool `mapstructure:"show-stats"` } func (r *Run) Validate() error { diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go index 80bb9c5b44..91d12e5cd3 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils.go @@ -61,7 +61,7 @@ func EvalSymlinks(path string) (string, error) { } var er evalSymlinkRes - er.path, er.err = filepath.EvalSymlinks(path) + er.path, er.err = evalSymlinks(path) evalSymlinkCache.Store(path, er) return er.path, er.err diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils_unix.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils_unix.go new file mode 100644 index 0000000000..68e762cf4b --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils_unix.go @@ -0,0 +1,9 @@ +//go:build !windows + +package fsutils + +import "path/filepath" + +func evalSymlinks(path string) (string, error) { + return filepath.EvalSymlinks(path) +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils_windows.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils_windows.go new file mode 100644 index 0000000000..19efb1cfc2 --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/fsutils/fsutils_windows.go @@ -0,0 +1,39 @@ +//go:build windows + +package fsutils + +import ( + "errors" + "os" + "path/filepath" + "syscall" +) + +// This is a workaround for the behavior of [filepath.EvalSymlinks], +// which fails with [syscall.ENOTDIR] if the specified path contains a junction on Windows. +// Junctions can occur, for example, when a volume is mounted as a subdirectory inside another drive. +// This can usually happen when using the Dev Drives feature and replacing existing directories. +// See: https://github.com/golang/go/issues/40180 +// +// Since [syscall.ENOTDIR] is only returned when calling [filepath.EvalSymlinks] on Windows +// if part of the presented path is a junction and nothing before was a symlink, +// we simply treat this as NOT symlink, +// because a symlink over the junction makes no sense at all. +func evalSymlinks(path string) (string, error) { + resolved, err := filepath.EvalSymlinks(path) + if err == nil { + return resolved, nil + } + + if !errors.Is(err, syscall.ENOTDIR) { + return "", err + } + + _, err = os.Stat(path) + if err != nil { + return "", err + } + + // If exists, we make the path absolute, to be sure... + return filepath.Abs(path) +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/issue.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/issue.go index 15d8dd2b33..854e7d15f0 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/issue.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/issue.go @@ -26,7 +26,7 @@ type EncodingIssue struct { Severity string Pos token.Position LineRange *result.Range - Replacement *result.Replacement + SuggestedFixes []analysis.SuggestedFix ExpectNoLint bool ExpectedNoLintLinter string } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/position.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/position.go new file mode 100644 index 0000000000..28441b341a --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/position.go @@ -0,0 +1,50 @@ +package goanalysis + +import ( + "go/ast" + "go/token" + "path/filepath" + + "golang.org/x/tools/go/analysis" +) + +func GetGoFilePosition(pass *analysis.Pass, f *ast.File) (token.Position, bool) { + position := GetFilePositionFor(pass.Fset, f.Pos()) + + if filepath.Ext(position.Filename) == ".go" { + return position, true + } + + return position, false +} + +func GetFilePositionFor(fset *token.FileSet, p token.Pos) token.Position { + pos := fset.PositionFor(p, true) + + ext := filepath.Ext(pos.Filename) + if ext != ".go" { + // position has been adjusted to a non-go file, revert to original file + return fset.PositionFor(p, false) + } + + return pos +} + +func EndOfLinePos(f *token.File, line int) token.Pos { + var end token.Pos + + if line >= f.LineCount() { + // missing newline at the end of the file + end = f.Pos(f.Size()) + } else { + end = f.LineStart(line+1) - token.Pos(1) + } + + return end +} + +// AdjustPos is a hack to get the right line to display. +// It should not be used outside some specific cases. +func AdjustPos(line, nonAdjLine, adjLine int) int { + return line + nonAdjLine - adjLine +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go index ac03c71ecc..3a8652486c 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go @@ -42,6 +42,7 @@ type Diagnostic struct { Analyzer *analysis.Analyzer Position token.Position Pkg *packages.Package + File *token.File } type runner struct { @@ -121,9 +122,9 @@ func (r *runner) makeAction(a *analysis.Analyzer, pkg *packages.Package, } act = actAlloc.alloc() - act.a = a - act.pkg = pkg - act.r = r + act.Analyzer = a + act.Package = pkg + act.runner = r act.isInitialPkg = initialPkgs[pkg] act.needAnalyzeSource = initialPkgs[pkg] act.analysisDoneCh = make(chan struct{}) @@ -132,11 +133,11 @@ func (r *runner) makeAction(a *analysis.Analyzer, pkg *packages.Package, if len(a.FactTypes) > 0 { depsCount += len(pkg.Imports) } - act.deps = make([]*action, 0, depsCount) + act.Deps = make([]*action, 0, depsCount) // Add a dependency on each required analyzers. for _, req := range a.Requires { - act.deps = append(act.deps, r.makeAction(req, pkg, initialPkgs, actions, actAlloc)) + act.Deps = append(act.Deps, r.makeAction(req, pkg, initialPkgs, actions, actAlloc)) } r.buildActionFactDeps(act, a, pkg, initialPkgs, actions, actAlloc) @@ -162,7 +163,7 @@ func (r *runner) buildActionFactDeps(act *action, a *analysis.Analyzer, pkg *pac sort.Strings(paths) // for determinism for _, path := range paths { dep := r.makeAction(a, pkg.Imports[path], initialPkgs, actions, actAlloc) - act.deps = append(act.deps, dep) + act.Deps = append(act.Deps, dep) } // Need to register fact types for pkgcache proper gob encoding. @@ -203,7 +204,7 @@ func (r *runner) prepareAnalysis(pkgs []*packages.Package, for _, a := range analyzers { for _, pkg := range pkgs { root := r.makeAction(a, pkg, initialPkgs, actions, actAlloc) - root.isroot = true + root.IsRoot = true roots = append(roots, root) } } @@ -220,7 +221,7 @@ func (r *runner) analyze(pkgs []*packages.Package, analyzers []*analysis.Analyze actionPerPkg := map[*packages.Package][]*action{} for _, act := range actions { - actionPerPkg[act.pkg] = append(actionPerPkg[act.pkg], act) + actionPerPkg[act.Package] = append(actionPerPkg[act.Package], act) } // Fill Imports field. @@ -250,7 +251,7 @@ func (r *runner) analyze(pkgs []*packages.Package, analyzers []*analysis.Analyze } } for _, act := range actions { - dfs(act.pkg) + dfs(act.Package) } // Limit memory and IO usage. @@ -282,7 +283,7 @@ func extractDiagnostics(roots []*action) (retDiags []Diagnostic, retErrors []err for _, act := range actions { if !extracted[act] { extracted[act] = true - visitAll(act.deps) + visitAll(act.Deps) extract(act) } } @@ -299,31 +300,34 @@ func extractDiagnostics(roots []*action) (retDiags []Diagnostic, retErrors []err seen := make(map[key]bool) extract = func(act *action) { - if act.err != nil { - if pe, ok := act.err.(*errorutil.PanicError); ok { + if act.Err != nil { + if pe, ok := act.Err.(*errorutil.PanicError); ok { panic(pe) } - retErrors = append(retErrors, fmt.Errorf("%s: %w", act.a.Name, act.err)) + retErrors = append(retErrors, fmt.Errorf("%s: %w", act.Analyzer.Name, act.Err)) return } - if act.isroot { - for _, diag := range act.diagnostics { + if act.IsRoot { + for _, diag := range act.Diagnostics { // We don't display a.Name/f.Category // as most users don't care. - posn := act.pkg.Fset.Position(diag.Pos) - k := key{posn, act.a, diag.Message} + position := GetFilePositionFor(act.Package.Fset, diag.Pos) + file := act.Package.Fset.File(diag.Pos) + + k := key{Position: position, Analyzer: act.Analyzer, message: diag.Message} if seen[k] { continue // duplicate } seen[k] = true retDiag := Diagnostic{ + File: file, Diagnostic: diag, - Analyzer: act.a, - Position: posn, - Pkg: act.pkg, + Analyzer: act.Analyzer, + Position: position, + Pkg: act.Package, } retDiags = append(retDiags, retDiag) } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go index 152cab1814..2e1c414228 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go @@ -29,8 +29,8 @@ func (actAlloc *actionAllocator) alloc() *action { } func (act *action) waitUntilDependingAnalyzersWorked() { - for _, dep := range act.deps { - if dep.pkg == act.pkg { + for _, dep := range act.Deps { + if dep.Package == act.Package { <-dep.analysisDoneCh } } @@ -39,26 +39,26 @@ func (act *action) waitUntilDependingAnalyzersWorked() { func (act *action) analyzeSafe() { defer func() { if p := recover(); p != nil { - if !act.isroot { + if !act.IsRoot { // This line allows to display "hidden" panic with analyzers like buildssa. // Some linters are dependent of sub-analyzers but when a sub-analyzer fails the linter is not aware of that, // this results to another panic (ex: "interface conversion: interface {} is nil, not *buildssa.SSA"). - act.r.log.Errorf("%s: panic during analysis: %v, %s", act.a.Name, p, string(debug.Stack())) + act.runner.log.Errorf("%s: panic during analysis: %v, %s", act.Analyzer.Name, p, string(debug.Stack())) } - act.err = errorutil.NewPanicError(fmt.Sprintf("%s: package %q (isInitialPkg: %t, needAnalyzeSource: %t): %s", - act.a.Name, act.pkg.Name, act.isInitialPkg, act.needAnalyzeSource, p), debug.Stack()) + act.Err = errorutil.NewPanicError(fmt.Sprintf("%s: package %q (isInitialPkg: %t, needAnalyzeSource: %t): %s", + act.Analyzer.Name, act.Package.Name, act.isInitialPkg, act.needAnalyzeSource, p), debug.Stack()) } }() - act.r.sw.TrackStage(act.a.Name, act.analyze) + act.runner.sw.TrackStage(act.Analyzer.Name, act.analyze) } func (act *action) markDepsForAnalyzingSource() { // Horizontal deps (analyzer.Requires) must be loaded from source and analyzed before analyzing // this action. - for _, dep := range act.deps { - if dep.pkg == act.pkg { + for _, dep := range act.Deps { + if dep.Package == act.Package { // Analyze source only for horizontal dependencies, e.g. from "buildssa". dep.needAnalyzeSource = true // can't be set in parallel } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go index fbc2f82fa9..e06ea2979c 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go @@ -26,7 +26,7 @@ func (act *action) loadCachedFacts() bool { return true // load cached facts only for non-initial packages } - if len(act.a.FactTypes) == 0 { + if len(act.Analyzer.FactTypes) == 0 { return true // no need to load facts } @@ -38,7 +38,7 @@ func (act *action) loadCachedFacts() bool { } func (act *action) persistFactsToCache() error { - analyzer := act.a + analyzer := act.Analyzer if len(analyzer.FactTypes) == 0 { return nil } @@ -46,7 +46,7 @@ func (act *action) persistFactsToCache() error { // Merge new facts into the package and persist them. var facts []Fact for key, fact := range act.packageFacts { - if key.pkg != act.pkg.Types { + if key.pkg != act.Package.Types { // The fact is from inherited facts from another package continue } @@ -57,7 +57,7 @@ func (act *action) persistFactsToCache() error { } for key, fact := range act.objectFacts { obj := key.obj - if obj.Pkg() != act.pkg.Types { + if obj.Pkg() != act.Package.Types { // The fact is from inherited facts from another package continue } @@ -74,33 +74,33 @@ func (act *action) persistFactsToCache() error { }) } - factsCacheDebugf("Caching %d facts for package %q and analyzer %s", len(facts), act.pkg.Name, act.a.Name) + factsCacheDebugf("Caching %d facts for package %q and analyzer %s", len(facts), act.Package.Name, act.Analyzer.Name) - return act.r.pkgCache.Put(act.pkg, cache.HashModeNeedAllDeps, factCacheKey(analyzer), facts) + return act.runner.pkgCache.Put(act.Package, cache.HashModeNeedAllDeps, factCacheKey(analyzer), facts) } func (act *action) loadPersistedFacts() bool { var facts []Fact - err := act.r.pkgCache.Get(act.pkg, cache.HashModeNeedAllDeps, factCacheKey(act.a), &facts) + err := act.runner.pkgCache.Get(act.Package, cache.HashModeNeedAllDeps, factCacheKey(act.Analyzer), &facts) if err != nil { if !errors.Is(err, cache.ErrMissing) && !errors.Is(err, io.EOF) { - act.r.log.Warnf("Failed to get persisted facts: %s", err) + act.runner.log.Warnf("Failed to get persisted facts: %s", err) } - factsCacheDebugf("No cached facts for package %q and analyzer %s", act.pkg.Name, act.a.Name) + factsCacheDebugf("No cached facts for package %q and analyzer %s", act.Package.Name, act.Analyzer.Name) return false } - factsCacheDebugf("Loaded %d cached facts for package %q and analyzer %s", len(facts), act.pkg.Name, act.a.Name) + factsCacheDebugf("Loaded %d cached facts for package %q and analyzer %s", len(facts), act.Package.Name, act.Analyzer.Name) for _, f := range facts { if f.Path == "" { // this is a package fact - key := packageFactKey{act.pkg.Types, act.factType(f.Fact)} + key := packageFactKey{act.Package.Types, act.factType(f.Fact)} act.packageFacts[key] = f.Fact continue } - obj, err := objectpath.Object(act.pkg.Types, objectpath.Path(f.Path)) + obj, err := objectpath.Object(act.Package.Types, objectpath.Path(f.Path)) if err != nil { // Be lenient about these errors. For example, when // analyzing io/ioutil from source, we may get a fact diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_base.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_checker.go similarity index 52% rename from hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_base.go rename to hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_checker.go index d868f8f5da..376a37f039 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_base.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_checker.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // -// Partial copy of https://github.com/golang/tools/blob/dba5486c2a1d03519930812112b23ed2c45c04fc/go/analysis/internal/checker/checker.go +// Altered copy of https://github.com/golang/tools/blob/v0.28.0/go/analysis/internal/checker/checker.go package goanalysis @@ -18,6 +18,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" + "github.com/golangci/golangci-lint/internal/x/tools/analysisflags" + "github.com/golangci/golangci-lint/internal/x/tools/analysisinternal" "github.com/golangci/golangci-lint/pkg/goanalysis/pkgerrors" ) @@ -27,19 +29,21 @@ import ( // package (as different analyzers are applied, either in sequence or // parallel), and across packages (as dependencies are analyzed). type action struct { - a *analysis.Analyzer - pkg *packages.Package + Analyzer *analysis.Analyzer + Package *packages.Package + IsRoot bool // whether this is a root node of the graph + Deps []*action + Result any // computed result of Analyzer.run, if any (and if IsRoot) + Err error // error result of Analyzer.run + Diagnostics []analysis.Diagnostic + Duration time.Duration // execution time of this step + pass *analysis.Pass - isroot bool - deps []*action objectFacts map[objectFactKey]analysis.Fact packageFacts map[packageFactKey]analysis.Fact - result any - diagnostics []analysis.Diagnostic - err error // NOTE(ldez) custom fields. - r *runner + runner *runner analysisDoneCh chan struct{} loadCachedFactsDone bool loadCachedFactsOk bool @@ -61,7 +65,7 @@ type packageFactKey struct { // NOTE(ldez) no alteration. func (act *action) String() string { - return fmt.Sprintf("%s@%s", act.a, act.pkg) + return fmt.Sprintf("%s@%s", act.Analyzer, act.Package) } // NOTE(ldez) altered version of `func (act *action) execOnce()`. @@ -72,20 +76,26 @@ func (act *action) analyze() { return } - defer func(now time.Time) { - analyzeDebugf("go/analysis: %s: %s: analyzed package %q in %s", act.r.prefix, act.a.Name, act.pkg.Name, time.Since(now)) - }(time.Now()) + // Record time spent in this node but not its dependencies. + // In parallel mode, due to GC/scheduler contention, the + // time is 5x higher than in sequential mode, even with a + // semaphore limiting the number of threads here. + // So use -debug=tp. + t0 := time.Now() + defer func() { + act.Duration = time.Since(t0) + analyzeDebugf("go/analysis: %s: %s: analyzed package %q in %s", act.runner.prefix, act.Analyzer.Name, act.Package.Name, time.Since(t0)) + }() // Report an error if any dependency failures. var depErrors error - for _, dep := range act.deps { - if dep.err != nil { - depErrors = errors.Join(depErrors, errors.Unwrap(dep.err)) + for _, dep := range act.Deps { + if dep.Err != nil { + depErrors = errors.Join(depErrors, errors.Unwrap(dep.Err)) } } - if depErrors != nil { - act.err = fmt.Errorf("failed prerequisites: %w", depErrors) + act.Err = fmt.Errorf("failed prerequisites: %w", depErrors) return } @@ -94,15 +104,14 @@ func (act *action) analyze() { inputs := make(map[*analysis.Analyzer]any) act.objectFacts = make(map[objectFactKey]analysis.Fact) act.packageFacts = make(map[packageFactKey]analysis.Fact) - startedAt := time.Now() - - for _, dep := range act.deps { - if dep.pkg == act.pkg { + for _, dep := range act.Deps { + if dep.Package == act.Package { // Same package, different analysis (horizontal edge): // in-memory outputs of prerequisite analyzers // become inputs to this analysis pass. - inputs[dep.a] = dep.result - } else if dep.a == act.a { // (always true) + inputs[dep.Analyzer] = dep.Result + + } else if dep.Analyzer == act.Analyzer { // (always true) // Same analysis, different package (vertical edge): // serialized facts produced by prerequisite analysis // become available to this analysis pass. @@ -110,10 +119,20 @@ func (act *action) analyze() { } } - factsDebugf("%s: Inherited facts in %s", act, time.Since(startedAt)) + // NOTE(ldez) this is not compatible with our implementation. + // Quick (nonexhaustive) check that the correct go/packages mode bits were used. + // (If there were errors, all bets are off.) + // if pkg := act.Package; pkg.Errors == nil { + // if pkg.Name == "" || pkg.PkgPath == "" || pkg.Types == nil || pkg.Fset == nil || pkg.TypesSizes == nil { + // panic(fmt.Sprintf("packages must be loaded with packages.LoadSyntax mode: Name: %v, PkgPath: %v, Types: %v, Fset: %v, TypesSizes: %v", + // pkg.Name == "", pkg.PkgPath == "", pkg.Types == nil, pkg.Fset == nil, pkg.TypesSizes == nil)) + // } + // } + + factsDebugf("%s: Inherited facts in %s", act, time.Since(t0)) module := &analysis.Module{} // possibly empty (non nil) in go/analysis drivers. - if mod := act.pkg.Module; mod != nil { + if mod := act.Package.Module; mod != nil { module.Path = mod.Path module.Version = mod.Version module.GoVersion = mod.GoVersion @@ -121,79 +140,104 @@ func (act *action) analyze() { // Run the analysis. pass := &analysis.Pass{ - Analyzer: act.a, - Fset: act.pkg.Fset, - Files: act.pkg.Syntax, - OtherFiles: act.pkg.OtherFiles, - IgnoredFiles: act.pkg.IgnoredFiles, - Pkg: act.pkg.Types, - TypesInfo: act.pkg.TypesInfo, - TypesSizes: act.pkg.TypesSizes, - TypeErrors: act.pkg.TypeErrors, + Analyzer: act.Analyzer, + Fset: act.Package.Fset, + Files: act.Package.Syntax, + OtherFiles: act.Package.OtherFiles, + IgnoredFiles: act.Package.IgnoredFiles, + Pkg: act.Package.Types, + TypesInfo: act.Package.TypesInfo, + TypesSizes: act.Package.TypesSizes, + TypeErrors: act.Package.TypeErrors, Module: module, ResultOf: inputs, - Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) }, - ImportObjectFact: act.importObjectFact, + Report: func(d analysis.Diagnostic) { act.Diagnostics = append(act.Diagnostics, d) }, + ImportObjectFact: act.ObjectFact, ExportObjectFact: act.exportObjectFact, - ImportPackageFact: act.importPackageFact, + ImportPackageFact: act.PackageFact, ExportPackageFact: act.exportPackageFact, - AllObjectFacts: act.allObjectFacts, - AllPackageFacts: act.allPackageFacts, + AllObjectFacts: act.AllObjectFacts, + AllPackageFacts: act.AllPackageFacts, } - + pass.ReadFile = analysisinternal.MakeReadFile(pass) act.pass = pass - act.r.passToPkgGuard.Lock() - act.r.passToPkg[pass] = act.pkg - act.r.passToPkgGuard.Unlock() - if act.pkg.IllTyped { + act.runner.passToPkgGuard.Lock() + act.runner.passToPkg[pass] = act.Package + act.runner.passToPkgGuard.Unlock() + + act.Result, act.Err = func() (any, error) { + // NOTE(golangci-lint): // It looks like there should be !pass.Analyzer.RunDespiteErrors - // but govet's cgocall crashes on it. Govet itself contains !pass.Analyzer.RunDespiteErrors condition here, + // but govet's cgocall crashes on it. + // Govet itself contains !pass.Analyzer.RunDespiteErrors condition here, // but it exits before it if packages.Load have failed. - act.err = fmt.Errorf("analysis skipped: %w", &pkgerrors.IllTypedError{Pkg: act.pkg}) - } else { - startedAt = time.Now() + if act.Package.IllTyped { + return nil, fmt.Errorf("analysis skipped: %w", &pkgerrors.IllTypedError{Pkg: act.Package}) + } + + t1 := time.Now() - act.result, act.err = pass.Analyzer.Run(pass) + result, err := pass.Analyzer.Run(pass) + if err != nil { + return nil, err + } - analyzedIn := time.Since(startedAt) - if analyzedIn > time.Millisecond*10 { + analyzedIn := time.Since(t1) + if analyzedIn > 10*time.Millisecond { debugf("%s: run analyzer in %s", act, analyzedIn) } - } - // disallow calls after Run + // correct result type? + if got, want := reflect.TypeOf(result), pass.Analyzer.ResultType; got != want { + return nil, fmt.Errorf( + "internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v", + pass.Pkg.Path(), pass.Analyzer, got, want) + } + + // resolve diagnostic URLs + for i := range act.Diagnostics { + url, err := analysisflags.ResolveURL(act.Analyzer, act.Diagnostics[i]) + if err != nil { + return nil, err + } + act.Diagnostics[i].URL = url + } + return result, nil + }() + + // Help detect (disallowed) calls after Run. pass.ExportObjectFact = nil pass.ExportPackageFact = nil err := act.persistFactsToCache() if err != nil { - act.r.log.Warnf("Failed to persist facts to cache: %s", err) + act.runner.log.Warnf("Failed to persist facts to cache: %s", err) } } -// NOTE(ldez) altered: logger; serialize. +// NOTE(ldez) altered: logger; sanityCheck. // inheritFacts populates act.facts with // those it obtains from its dependency, dep. func inheritFacts(act, dep *action) { - const serialize = false + const sanityCheck = false for key, fact := range dep.objectFacts { // Filter out facts related to objects // that are irrelevant downstream // (equivalently: not in the compiler export data). - if !exportedFrom(key.obj, dep.pkg.Types) { + if !exportedFrom(key.obj, dep.Package.Types) { factsInheritDebugf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact) continue } // Optionally serialize/deserialize fact // to verify that it works across address spaces. - if serialize { + if sanityCheck { encodedFact, err := codeFact(fact) if err != nil { - act.r.log.Panicf("internal error: encoding of %T fact failed in %v: %v", fact, act, err) + act.runner.log.Panicf("internal error: encoding of %T fact failed in %v: %v", fact, act, err) } fact = encodedFact } @@ -207,14 +251,26 @@ func inheritFacts(act, dep *action) { // TODO: filter out facts that belong to // packages not mentioned in the export data // to prevent side channels. + // + // The Pass.All{Object,Package}Facts accessors expose too much: + // all facts, of all types, for all dependencies in the action + // graph. Not only does the representation grow quadratically, + // but it violates the separate compilation paradigm, allowing + // analysis implementations to communicate with indirect + // dependencies that are not mentioned in the export data. + // + // It's not clear how to fix this short of a rather expensive + // filtering step after each action that enumerates all the + // objects that would appear in export data, and deletes + // facts associated with objects not in this set. // Optionally serialize/deserialize fact // to verify that it works across address spaces // and is deterministic. - if serialize { + if sanityCheck { encodedFact, err := codeFact(fact) if err != nil { - act.r.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) + act.runner.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) } fact = encodedFact } @@ -225,7 +281,7 @@ func inheritFacts(act, dep *action) { } } -// NOTE(ldez) no alteration. +// NOTE(ldez) altered: `new` is renamed to `newFact`. // codeFact encodes then decodes a fact, // just to exercise that logic. func codeFact(fact analysis.Fact) (analysis.Fact, error) { @@ -259,7 +315,7 @@ func codeFact(fact analysis.Fact) (analysis.Fact, error) { // This includes not just the exported members of pkg, but also unexported // constants, types, fields, and methods, perhaps belonging to other packages, // that find there way into the API. -// This is an over-approximation of the more accurate approach used by +// This is an overapproximation of the more accurate approach used by // gc export data, which walks the type graph, but it's much simpler. // // TODO(adonovan): do more accurate filtering by walking the type graph. @@ -282,11 +338,14 @@ func exportedFrom(obj types.Object, pkg *types.Package) bool { return false // Nil, Builtin, Label, or PkgName } -// NOTE(ldez) altered: logger; `act.factType` -// importObjectFact implements Pass.ImportObjectFact. -// Given a non-nil pointer ptr of type *T, where *T satisfies Fact, -// importObjectFact copies the fact value to *ptr. -func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool { +// NOTE(ldez) altered: logger; `act.factType`. +// ObjectFact retrieves a fact associated with obj, +// and returns true if one was found. +// Given a value ptr of type *T, where *T satisfies Fact, +// ObjectFact copies the value to *ptr. +// +// See documentation at ImportObjectFact field of [analysis.Pass]. +func (act *action) ObjectFact(obj types.Object, ptr analysis.Fact) bool { if obj == nil { panic("nil object") } @@ -298,12 +357,16 @@ func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool { return false } -// NOTE(ldez) altered: removes code related to `act.pass.ExportPackageFact`; logger; `act.factType`. +// NOTE(ldez) altered: logger; `act.factType`. // exportObjectFact implements Pass.ExportObjectFact. func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) { - if obj.Pkg() != act.pkg.Types { - act.r.log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package", - act.a, act.pkg, obj, fact) + if act.pass.ExportObjectFact == nil { + act.runner.log.Panicf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact) + } + + if obj.Pkg() != act.Package.Types { + act.runner.log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package", + act.Analyzer, act.Package, obj, fact) } key := objectFactKey{obj, act.factType(fact)} @@ -312,12 +375,16 @@ func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) { objstr := types.ObjectString(obj, (*types.Package).Name) factsExportDebugf("%s: object %s has fact %s\n", - act.pkg.Fset.Position(obj.Pos()), objstr, fact) + act.Package.Fset.Position(obj.Pos()), objstr, fact) } } // NOTE(ldez) no alteration. -func (act *action) allObjectFacts() []analysis.ObjectFact { +// AllObjectFacts returns a new slice containing all object facts of +// the analysis's FactTypes in unspecified order. +// +// See documentation at AllObjectFacts field of [analysis.Pass]. +func (act *action) AllObjectFacts() []analysis.ObjectFact { facts := make([]analysis.ObjectFact, 0, len(act.objectFacts)) for k := range act.objectFacts { facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: act.objectFacts[k]}) @@ -325,11 +392,12 @@ func (act *action) allObjectFacts() []analysis.ObjectFact { return facts } -// NOTE(ldez) altered: `act.factType` -// importPackageFact implements Pass.ImportPackageFact. -// Given a non-nil pointer ptr of type *T, where *T satisfies Fact, -// fact copies the fact value to *ptr. -func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool { +// NOTE(ldez) altered: `act.factType`. +// PackageFact retrieves a fact associated with package pkg, +// which must be this package or one of its dependencies. +// +// See documentation at ImportObjectFact field of [analysis.Pass]. +func (act *action) PackageFact(pkg *types.Package, ptr analysis.Fact) bool { if pkg == nil { panic("nil package") } @@ -341,30 +409,38 @@ func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool return false } -// NOTE(ldez) altered: removes code related to `act.pass.ExportPackageFact`; logger; `act.factType`. +// NOTE(ldez) altered: logger; `act.factType`. // exportPackageFact implements Pass.ExportPackageFact. func (act *action) exportPackageFact(fact analysis.Fact) { + if act.pass.ExportPackageFact == nil { + act.runner.log.Panicf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact) + } + key := packageFactKey{act.pass.Pkg, act.factType(fact)} act.packageFacts[key] = fact // clobber any existing entry factsDebugf("%s: package %s has fact %s\n", - act.pkg.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact) + act.Package.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact) } // NOTE(ldez) altered: add receiver to handle logs. func (act *action) factType(fact analysis.Fact) reflect.Type { t := reflect.TypeOf(fact) if t.Kind() != reflect.Ptr { - act.r.log.Fatalf("invalid Fact type: got %T, want pointer", fact) + act.runner.log.Fatalf("invalid Fact type: got %T, want pointer", fact) } return t } // NOTE(ldez) no alteration. -func (act *action) allPackageFacts() []analysis.PackageFact { +// AllPackageFacts returns a new slice containing all package +// facts of the analysis's FactTypes in unspecified order. +// +// See documentation at AllPackageFacts field of [analysis.Pass]. +func (act *action) AllPackageFacts() []analysis.PackageFact { facts := make([]analysis.PackageFact, 0, len(act.packageFacts)) - for k := range act.packageFacts { - facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: act.packageFacts[k]}) + for k, fact := range act.packageFacts { + facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: fact}) } return facts } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go index 44d6769586..fca4b8c3ad 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go @@ -67,7 +67,7 @@ func (lp *loadingPackage) analyze(loadMode LoadMode, loadSem chan struct{}) { // Unblock depending on actions and propagate error. for _, act := range lp.actions { close(act.analysisDoneCh) - act.err = werr + act.Err = werr } return } @@ -125,13 +125,14 @@ func (lp *loadingPackage) loadFromSource(loadMode LoadMode) error { pkg.IllTyped = true pkg.TypesInfo = &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Instances: make(map[*ast.Ident]types.Instance), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), + Types: make(map[ast.Expr]types.TypeAndValue), + Instances: make(map[*ast.Ident]types.Instance), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + Scopes: make(map[ast.Node]*types.Scope), + FileVersions: make(map[*ast.File]string), } importer := func(path string) (*types.Package, error) { @@ -363,12 +364,12 @@ func (lp *loadingPackage) decUse(canClearTypes bool) { pass.ImportPackageFact = nil pass.ExportPackageFact = nil act.pass = nil - act.deps = nil - if act.result != nil { + act.Deps = nil + if act.Result != nil { if isMemoryDebug { - debugf("%s: decUse: nilling act result of size %d bytes", act, sizeOfValueTreeBytes(act.result)) + debugf("%s: decUse: nilling act result of size %d bytes", act, sizeOfValueTreeBytes(act.Result)) } - act.result = nil + act.Result = nil } } @@ -399,7 +400,7 @@ func (lp *loadingPackage) decUse(canClearTypes bool) { for _, act := range lp.actions { if !lp.isInitial { - act.pkg = nil + act.Package = nil } act.packageFacts = nil act.objectFacts = nil diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go index a9aee03a2b..3a9a35dec1 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go @@ -2,6 +2,8 @@ package goanalysis import ( "fmt" + "go/token" + "strings" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" @@ -81,6 +83,7 @@ func runAnalyzers(cfg runAnalyzersConfig, lintCtx *linter.Context) ([]result.Iss func buildIssues(diags []Diagnostic, linterNameBuilder func(diag *Diagnostic) string) []result.Issue { var issues []result.Issue + for i := range diags { diag := &diags[i] linterName := linterNameBuilder(diag) @@ -92,11 +95,43 @@ func buildIssues(diags []Diagnostic, linterNameBuilder func(diag *Diagnostic) st text = fmt.Sprintf("%s: %s", diag.Analyzer.Name, diag.Message) } + var suggestedFixes []analysis.SuggestedFix + + for _, sf := range diag.SuggestedFixes { + // Skip suggested fixes on cgo files. + // The related error is: "diff has out-of-bounds edits" + // This is a temporary workaround. + if !strings.HasSuffix(diag.File.Name(), ".go") { + continue + } + + nsf := analysis.SuggestedFix{Message: sf.Message} + + for _, edit := range sf.TextEdits { + end := edit.End + + if !end.IsValid() { + end = edit.Pos + } + + // To be applied the positions need to be "adjusted" based on the file. + // This is the difference between the "displayed" positions and "effective" positions. + nsf.TextEdits = append(nsf.TextEdits, analysis.TextEdit{ + Pos: token.Pos(diag.File.Offset(edit.Pos)), + End: token.Pos(diag.File.Offset(end)), + NewText: edit.NewText, + }) + } + + suggestedFixes = append(suggestedFixes, nsf) + } + issues = append(issues, result.Issue{ - FromLinter: linterName, - Text: text, - Pos: diag.Position, - Pkg: diag.Pkg, + FromLinter: linterName, + Text: text, + Pos: diag.Position, + Pkg: diag.Pkg, + SuggestedFixes: suggestedFixes, }) if len(diag.Related) > 0 { diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go index 8c244688b4..4366155b02 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go @@ -48,7 +48,7 @@ func saveIssuesToCache(allPkgs []*packages.Package, pkgsFromCache map[*packages. Severity: i.Severity, Pos: i.Pos, LineRange: i.LineRange, - Replacement: i.Replacement, + SuggestedFixes: i.SuggestedFixes, ExpectNoLint: i.ExpectNoLint, ExpectedNoLintLinter: i.ExpectedNoLintLinter, }) @@ -123,7 +123,7 @@ func loadIssuesFromCache(pkgs []*packages.Package, lintCtx *linter.Context, Severity: issue.Severity, Pos: issue.Pos, LineRange: issue.LineRange, - Replacement: issue.Replacement, + SuggestedFixes: issue.SuggestedFixes, Pkg: pkg, ExpectNoLint: issue.ExpectNoLint, ExpectedNoLintLinter: issue.ExpectedNoLintLinter, diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/formatters.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/formatters.go new file mode 100644 index 0000000000..c8953ad3bf --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/formatters.go @@ -0,0 +1,6 @@ +package goformatters + +type Formatter interface { + Name() string + Format(filename string, src []byte) ([]byte, error) +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/gci.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/gci.go new file mode 100644 index 0000000000..bae39f3cee --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gci/gci.go @@ -0,0 +1,48 @@ +package gci + +import ( + gcicfg "github.com/daixiang0/gci/pkg/config" + "github.com/daixiang0/gci/pkg/gci" + "github.com/ldez/grignotin/gomod" + + "github.com/golangci/golangci-lint/pkg/config" +) + +const Name = "gci" + +type Formatter struct { + config *gcicfg.Config +} + +func New(cfg config.GciSettings) (*Formatter, error) { + modPath, err := gomod.GetModulePath() + if err != nil { + return nil, err + } + + parsedCfg, err := gcicfg.YamlConfig{ + Cfg: gcicfg.BoolConfig{ + NoInlineComments: cfg.NoInlineComments, + NoPrefixComments: cfg.NoPrefixComments, + SkipGenerated: cfg.SkipGenerated, + CustomOrder: cfg.CustomOrder, + NoLexOrder: cfg.NoLexOrder, + }, + SectionStrings: cfg.Sections, + ModPath: modPath, + }.Parse() + if err != nil { + return nil, err + } + + return &Formatter{config: parsedCfg}, nil +} + +func (*Formatter) Name() string { + return Name +} + +func (f *Formatter) Format(filename string, src []byte) ([]byte, error) { + _, formatted, err := gci.LoadFormat(src, filename, *f.config) + return formatted, err +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gofmt/gofmt.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gofmt/gofmt.go new file mode 100644 index 0000000000..fe2e355590 --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gofmt/gofmt.go @@ -0,0 +1,35 @@ +package gofmt + +import ( + "github.com/golangci/gofmt/gofmt" + + "github.com/golangci/golangci-lint/pkg/config" +) + +const Name = "gofmt" + +type Formatter struct { + options gofmt.Options +} + +func New(cfg config.GoFmtSettings) *Formatter { + var rewriteRules []gofmt.RewriteRule + for _, rule := range cfg.RewriteRules { + rewriteRules = append(rewriteRules, gofmt.RewriteRule(rule)) + } + + return &Formatter{ + options: gofmt.Options{ + NeedSimplify: cfg.Simplify, + RewriteRules: rewriteRules, + }, + } +} + +func (*Formatter) Name() string { + return Name +} + +func (f *Formatter) Format(filename string, src []byte) ([]byte, error) { + return gofmt.Source(filename, src, f.options) +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gofumpt/gofumpt.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gofumpt/gofumpt.go new file mode 100644 index 0000000000..f4605e2333 --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/gofumpt/gofumpt.go @@ -0,0 +1,43 @@ +package gofumpt + +import ( + "strings" + + gofumpt "mvdan.cc/gofumpt/format" + + "github.com/golangci/golangci-lint/pkg/config" +) + +const Name = "gofumpt" + +type Formatter struct { + options gofumpt.Options +} + +func New(cfg config.GofumptSettings, goVersion string) *Formatter { + return &Formatter{ + options: gofumpt.Options{ + LangVersion: getLangVersion(goVersion), + ModulePath: cfg.ModulePath, + ExtraRules: cfg.ExtraRules, + }, + } +} + +func (*Formatter) Name() string { + return Name +} + +func (f *Formatter) Format(_ string, src []byte) ([]byte, error) { + return gofumpt.Source(src, f.options) +} + +// modified copy of pkg/golinters/gofumpt/gofumpt.go +func getLangVersion(v string) string { + if v == "" { + // TODO: defaults to "1.15", in the future (v2) must be removed. + return "go1.15" + } + + return "go" + strings.TrimPrefix(v, "go") +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/goimports/goimports.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/goimports/goimports.go new file mode 100644 index 0000000000..add9eb3a61 --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/goimports/goimports.go @@ -0,0 +1,22 @@ +package goimports + +import ( + "golang.org/x/tools/imports" +) + +const Name = "goimports" + +type Formatter struct{} + +func New() *Formatter { + return &Formatter{} +} + +func (*Formatter) Name() string { + return Name +} + +func (*Formatter) Format(filename string, src []byte) ([]byte, error) { + // The `imports.LocalPrefix` (`settings.LocalPrefixes`) is a global var. + return imports.Process(filename, src, nil) +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/meta_formatter.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/meta_formatter.go new file mode 100644 index 0000000000..e07b3aad80 --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/goformatters/meta_formatter.go @@ -0,0 +1,74 @@ +package goformatters + +import ( + "bytes" + "fmt" + "go/format" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/goformatters/gci" + "github.com/golangci/golangci-lint/pkg/goformatters/gofmt" + "github.com/golangci/golangci-lint/pkg/goformatters/gofumpt" + "github.com/golangci/golangci-lint/pkg/goformatters/goimports" + "github.com/golangci/golangci-lint/pkg/lint/linter" + "github.com/golangci/golangci-lint/pkg/logutils" +) + +type MetaFormatter struct { + log logutils.Log + formatters []Formatter +} + +func NewMetaFormatter(log logutils.Log, cfg *config.Config, enabledLinters map[string]*linter.Config) (*MetaFormatter, error) { + m := &MetaFormatter{log: log} + + if _, ok := enabledLinters[gofmt.Name]; ok { + m.formatters = append(m.formatters, gofmt.New(cfg.LintersSettings.Gofmt)) + } + + if _, ok := enabledLinters[gofumpt.Name]; ok { + m.formatters = append(m.formatters, gofumpt.New(cfg.LintersSettings.Gofumpt, cfg.Run.Go)) + } + + if _, ok := enabledLinters[goimports.Name]; ok { + m.formatters = append(m.formatters, goimports.New()) + } + + // gci is a last because the only goal of gci is to handle imports. + if _, ok := enabledLinters[gci.Name]; ok { + formatter, err := gci.New(cfg.LintersSettings.Gci) + if err != nil { + return nil, fmt.Errorf("gci: creating formatter: %w", err) + } + + m.formatters = append(m.formatters, formatter) + } + + return m, nil +} + +func (m *MetaFormatter) Format(filename string, src []byte) []byte { + if len(m.formatters) == 0 { + data, err := format.Source(src) + if err != nil { + m.log.Warnf("(fmt) formatting file %s: %v", filename, err) + return src + } + + return data + } + + data := bytes.Clone(src) + + for _, formatter := range m.formatters { + formatted, err := formatter.Format(filename, data) + if err != nil { + m.log.Warnf("(%s) formatting file %s: %v", formatter.Name(), filename, err) + continue + } + + data = formatted + } + + return data +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint/asasalint.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint/asasalint.go index 653a2d5142..ccc58fee40 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint/asasalint.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/asasalint/asasalint.go @@ -9,12 +9,12 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/internal" ) -func New(setting *config.AsasalintSettings) *goanalysis.Linter { +func New(settings *config.AsasalintSettings) *goanalysis.Linter { cfg := asasalint.LinterSetting{} - if setting != nil { - cfg.Exclude = setting.Exclude - cfg.NoBuiltinExclusions = !setting.UseBuiltinExclusions - cfg.IgnoreTest = setting.IgnoreTest + if settings != nil { + cfg.Exclude = settings.Exclude + cfg.NoBuiltinExclusions = !settings.UseBuiltinExclusions + cfg.IgnoreTest = settings.IgnoreTest } a, err := asasalint.NewAnalyzer(cfg) diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk/bidichk.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk/bidichk.go index 4ced901e8f..c6315965c4 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk/bidichk.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/bidichk/bidichk.go @@ -10,42 +10,42 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -func New(cfg *config.BiDiChkSettings) *goanalysis.Linter { +func New(settings *config.BiDiChkSettings) *goanalysis.Linter { a := bidichk.NewAnalyzer() - cfgMap := map[string]map[string]any{} - if cfg != nil { + cfg := map[string]map[string]any{} + if settings != nil { var opts []string - if cfg.LeftToRightEmbedding { + if settings.LeftToRightEmbedding { opts = append(opts, "LEFT-TO-RIGHT-EMBEDDING") } - if cfg.RightToLeftEmbedding { + if settings.RightToLeftEmbedding { opts = append(opts, "RIGHT-TO-LEFT-EMBEDDING") } - if cfg.PopDirectionalFormatting { + if settings.PopDirectionalFormatting { opts = append(opts, "POP-DIRECTIONAL-FORMATTING") } - if cfg.LeftToRightOverride { + if settings.LeftToRightOverride { opts = append(opts, "LEFT-TO-RIGHT-OVERRIDE") } - if cfg.RightToLeftOverride { + if settings.RightToLeftOverride { opts = append(opts, "RIGHT-TO-LEFT-OVERRIDE") } - if cfg.LeftToRightIsolate { + if settings.LeftToRightIsolate { opts = append(opts, "LEFT-TO-RIGHT-ISOLATE") } - if cfg.RightToLeftIsolate { + if settings.RightToLeftIsolate { opts = append(opts, "RIGHT-TO-LEFT-ISOLATE") } - if cfg.FirstStrongIsolate { + if settings.FirstStrongIsolate { opts = append(opts, "FIRST-STRONG-ISOLATE") } - if cfg.PopDirectionalIsolate { + if settings.PopDirectionalIsolate { opts = append(opts, "POP-DIRECTIONAL-ISOLATE") } - cfgMap[a.Name] = map[string]any{ + cfg[a.Name] = map[string]any{ "disallowed-runes": strings.Join(opts, ","), } } @@ -54,6 +54,6 @@ func New(cfg *config.BiDiChkSettings) *goanalysis.Linter { a.Name, "Checks for dangerous unicode character sequences", []*analysis.Analyzer{a}, - cfgMap, + cfg, ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose/bodyclose.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose/bodyclose.go index f39814edc5..c520e88db3 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose/bodyclose.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/bodyclose/bodyclose.go @@ -12,7 +12,7 @@ func New() *goanalysis.Linter { return goanalysis.NewLinter( a.Name, - "checks whether HTTP response body is closed successfully", + a.Doc, []*analysis.Analyzer{a}, nil, ).WithLoadMode(goanalysis.LoadModeTypesInfo) diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go index 13baba5a69..772b5601ca 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go @@ -33,5 +33,5 @@ func New(settings *config.Cyclop) *goanalysis.Linter { a.Doc, []*analysis.Analyzer{a}, cfg, - ).WithLoadMode(goanalysis.LoadModeTypesInfo) + ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled/dogsled.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled/dogsled.go index 49108f4f1f..afa8152fac 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled/dogsled.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled/dogsled.go @@ -1,41 +1,26 @@ package dogsled import ( - "fmt" "go/ast" - "go/token" - "sync" "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "dogsled" func New(settings *config.DogsledSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues := runDogsled(pass, settings) - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil + return run(pass, settings.MaxBlankIdentifiers) }, + Requires: []*analysis.Analyzer{inspect.Analyzer}, } return goanalysis.NewLinter( @@ -43,68 +28,51 @@ func New(settings *config.DogsledSettings) *goanalysis.Linter { "Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f())", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } -func runDogsled(pass *analysis.Pass, settings *config.DogsledSettings) []goanalysis.Issue { - var reports []goanalysis.Issue - for _, f := range pass.Files { - v := &returnsVisitor{ - maxBlanks: settings.MaxBlankIdentifiers, - f: pass.Fset, - } - - ast.Walk(v, f) - - for i := range v.issues { - reports = append(reports, goanalysis.NewIssue(&v.issues[i], pass)) - } - } - - return reports -} - -type returnsVisitor struct { - f *token.FileSet - maxBlanks int - issues []result.Issue -} - -func (v *returnsVisitor) Visit(node ast.Node) ast.Visitor { - funcDecl, ok := node.(*ast.FuncDecl) +func run(pass *analysis.Pass, maxBlanks int) (any, error) { + insp, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) if !ok { - return v + return nil, nil } - if funcDecl.Body == nil { - return v + + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), } - for _, expr := range funcDecl.Body.List { - assgnStmt, ok := expr.(*ast.AssignStmt) + insp.Preorder(nodeFilter, func(node ast.Node) { + funcDecl, ok := node.(*ast.FuncDecl) if !ok { - continue + return + } + + if funcDecl.Body == nil { + return } - numBlank := 0 - for _, left := range assgnStmt.Lhs { - ident, ok := left.(*ast.Ident) + for _, expr := range funcDecl.Body.List { + assgnStmt, ok := expr.(*ast.AssignStmt) if !ok { continue } - if ident.Name == "_" { - numBlank++ + + numBlank := 0 + for _, left := range assgnStmt.Lhs { + ident, ok := left.(*ast.Ident) + if !ok { + continue + } + if ident.Name == "_" { + numBlank++ + } } - } - if numBlank > v.maxBlanks { - v.issues = append(v.issues, result.Issue{ - FromLinter: linterName, - Text: fmt.Sprintf("declaration has %v blank identifiers", numBlank), - Pos: v.f.Position(assgnStmt.Pos()), - }) + if numBlank > maxBlanks { + pass.Reportf(assgnStmt.Pos(), "declaration has %v blank identifiers", numBlank) + } } - } - return v + }) + + return nil, nil } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go index 7abcb4c4f4..d2bb3d8d84 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go @@ -54,9 +54,7 @@ func New(settings *config.DuplSettings) *goanalysis.Linter { } func runDupl(pass *analysis.Pass, settings *config.DuplSettings) ([]goanalysis.Issue, error) { - fileNames := internal.GetFileNames(pass) - - issues, err := duplAPI.Run(fileNames, settings.Threshold) + issues, err := duplAPI.Run(internal.GetGoFileNames(pass), settings.Threshold) if err != nil { return nil, err } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword/dupword.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword/dupword.go index bba4fc9e19..a2bcc34d40 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword/dupword.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupword/dupword.go @@ -10,14 +10,14 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -func New(setting *config.DupWordSettings) *goanalysis.Linter { +func New(settings *config.DupWordSettings) *goanalysis.Linter { a := dupword.NewAnalyzer() - cfgMap := map[string]map[string]any{} - if setting != nil { - cfgMap[a.Name] = map[string]any{ - "keyword": strings.Join(setting.Keywords, ","), - "ignore": strings.Join(setting.Ignore, ","), + cfg := map[string]map[string]any{} + if settings != nil { + cfg[a.Name] = map[string]any{ + "keyword": strings.Join(settings.Keywords, ","), + "ignore": strings.Join(settings.Ignore, ","), } } @@ -25,6 +25,6 @@ func New(setting *config.DupWordSettings) *goanalysis.Linter { a.Name, "checks for duplicate words in the source code", []*analysis.Analyzer{a}, - cfgMap, + cfg, ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck/errcheck.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck/errcheck.go index 9a8a2aa876..67a1b2ca8d 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck/errcheck.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck/errcheck.go @@ -2,6 +2,7 @@ package errcheck import ( "bufio" + "cmp" "fmt" "os" "os/user" @@ -90,10 +91,7 @@ func runErrCheck(lintCtx *linter.Context, pass *analysis.Pass, checker *errcheck text := "Error return value is not checked" if err.FuncName != "" { - code := err.SelectorName - if err.SelectorName == "" { - code = err.FuncName - } + code := cmp.Or(err.SelectorName, err.FuncName) text = fmt.Sprintf("Error return value of %s is not checked", internal.FormatCode(code, lintCtx.Cfg)) } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson/errchkjson.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson/errchkjson.go index 8389a750c6..506113d6d5 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson/errchkjson.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errchkjson/errchkjson.go @@ -8,17 +8,17 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -func New(cfg *config.ErrChkJSONSettings) *goanalysis.Linter { +func New(settings *config.ErrChkJSONSettings) *goanalysis.Linter { a := errchkjson.NewAnalyzer() - cfgMap := map[string]map[string]any{} - cfgMap[a.Name] = map[string]any{ + cfg := map[string]map[string]any{} + cfg[a.Name] = map[string]any{ "omit-safe": true, } - if cfg != nil { - cfgMap[a.Name] = map[string]any{ - "omit-safe": !cfg.CheckErrorFreeEncoding, - "report-no-exported": cfg.ReportNoExported, + if settings != nil { + cfg[a.Name] = map[string]any{ + "omit-safe": !settings.CheckErrorFreeEncoding, + "report-no-exported": settings.ReportNoExported, } } @@ -26,6 +26,6 @@ func New(cfg *config.ErrChkJSONSettings) *goanalysis.Linter { a.Name, a.Doc, []*analysis.Analyzer{a}, - cfgMap, + cfg, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint/errorlint.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint/errorlint.go index 86db8552d0..14851adc28 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint/errorlint.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/errorlint/errorlint.go @@ -8,16 +8,16 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -func New(cfg *config.ErrorLintSettings) *goanalysis.Linter { +func New(settings *config.ErrorLintSettings) *goanalysis.Linter { var opts []errorlint.Option - if cfg != nil { - ae := toAllowPairs(cfg.AllowedErrors) + if settings != nil { + ae := toAllowPairs(settings.AllowedErrors) if len(ae) > 0 { opts = append(opts, errorlint.WithAllowedErrors(ae)) } - aew := toAllowPairs(cfg.AllowedErrorsWildcard) + aew := toAllowPairs(settings.AllowedErrorsWildcard) if len(aew) > 0 { opts = append(opts, errorlint.WithAllowedWildcard(aew)) } @@ -25,14 +25,14 @@ func New(cfg *config.ErrorLintSettings) *goanalysis.Linter { a := errorlint.NewAnalyzer(opts...) - cfgMap := map[string]map[string]any{} + cfg := map[string]map[string]any{} - if cfg != nil { - cfgMap[a.Name] = map[string]any{ - "errorf": cfg.Errorf, - "errorf-multi": cfg.ErrorfMulti, - "asserts": cfg.Asserts, - "comparison": cfg.Comparison, + if settings != nil { + cfg[a.Name] = map[string]any{ + "errorf": settings.Errorf, + "errorf-multi": settings.ErrorfMulti, + "asserts": settings.Asserts, + "comparison": settings.Comparison, } } @@ -41,7 +41,7 @@ func New(cfg *config.ErrorLintSettings) *goanalysis.Linter { "errorlint is a linter for that can be used to find code "+ "that will cause problems with the error wrapping scheme introduced in Go 1.13.", []*analysis.Analyzer{a}, - cfgMap, + cfg, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/exptostd/exptostd.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/exptostd/exptostd.go new file mode 100644 index 0000000000..2de8ea98c2 --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/exptostd/exptostd.go @@ -0,0 +1,19 @@ +package exptostd + +import ( + "github.com/ldez/exptostd" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/goanalysis" +) + +func New() *goanalysis.Linter { + a := exptostd.NewAnalyzer() + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo/forbidigo.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo/forbidigo.go index 3572b60c23..3b410359d0 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo/forbidigo.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/forbidigo/forbidigo.go @@ -2,40 +2,27 @@ package forbidigo import ( "fmt" - "sync" "github.com/ashanbrown/forbidigo/forbidigo" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/logutils" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "forbidigo" func New(settings *config.ForbidigoSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues, err := runForbidigo(pass, settings) + err := runForbidigo(pass, settings) if err != nil { return nil, err } - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() return nil, nil }, } @@ -48,12 +35,10 @@ func New(settings *config.ForbidigoSettings) *goanalysis.Linter { "Forbids identifiers", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeTypesInfo) + ).WithLoadMode(goanalysis.LoadModeTypesInfo) } -func runForbidigo(pass *analysis.Pass, settings *config.ForbidigoSettings) ([]goanalysis.Issue, error) { +func runForbidigo(pass *analysis.Pass, settings *config.ForbidigoSettings) error { options := []forbidigo.Option{ forbidigo.OptionExcludeGodocExamples(settings.ExcludeGodocExamples), // disable "//permit" directives so only "//nolint" directives matters within golangci-lint @@ -66,38 +51,39 @@ func runForbidigo(pass *analysis.Pass, settings *config.ForbidigoSettings) ([]go for _, pattern := range settings.Forbid { buffer, err := pattern.MarshalString() if err != nil { - return nil, err + return err } + patterns = append(patterns, string(buffer)) } forbid, err := forbidigo.NewLinter(patterns, options...) if err != nil { - return nil, fmt.Errorf("failed to create linter %q: %w", linterName, err) + return fmt.Errorf("failed to create linter %q: %w", linterName, err) } - var issues []goanalysis.Issue for _, file := range pass.Files { runConfig := forbidigo.RunConfig{ Fset: pass.Fset, DebugLog: logutils.Debug(logutils.DebugKeyForbidigo), } - if settings != nil && settings.AnalyzeTypes { + + if settings.AnalyzeTypes { runConfig.TypesInfo = pass.TypesInfo } + hints, err := forbid.RunWithConfig(runConfig, file) if err != nil { - return nil, fmt.Errorf("forbidigo linter failed on file %q: %w", file.Name.String(), err) + return fmt.Errorf("forbidigo linter failed on file %q: %w", file.Name.String(), err) } for _, hint := range hints { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: hint.Position(), - Text: hint.Details(), - FromLinter: linterName, - }, pass)) + pass.Report(analysis.Diagnostic{ + Pos: hint.Pos(), + Message: hint.Details(), + }) } } - return issues, nil + return nil } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen/funlen.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen/funlen.go index e43339394d..bdadcece46 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen/funlen.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen/funlen.go @@ -1,75 +1,33 @@ package funlen import ( - "go/token" - "strings" - "sync" - "github.com/ultraware/funlen" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) -const linterName = "funlen" +type Config struct { + lineLimit int + stmtLimit int + ignoreComments bool +} func New(settings *config.FunlenSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, - Run: func(pass *analysis.Pass) (any, error) { - issues := runFunlen(pass, settings) - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - }, + cfg := Config{} + if settings != nil { + cfg.lineLimit = settings.Lines + cfg.stmtLimit = settings.Statements + cfg.ignoreComments = !settings.IgnoreComments } + a := funlen.NewAnalyzer(cfg.lineLimit, cfg.stmtLimit, cfg.ignoreComments) + return goanalysis.NewLinter( - linterName, - "Tool for detection of long functions", - []*analysis.Analyzer{analyzer}, + a.Name, + a.Doc, + []*analysis.Analyzer{a}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runFunlen(pass *analysis.Pass, settings *config.FunlenSettings) []goanalysis.Issue { - var lintIssues []funlen.Message - for _, file := range pass.Files { - fileIssues := funlen.Run(file, pass.Fset, settings.Lines, settings.Statements, settings.IgnoreComments) - lintIssues = append(lintIssues, fileIssues...) - } - - if len(lintIssues) == 0 { - return nil - } - - issues := make([]goanalysis.Issue, len(lintIssues)) - for k, i := range lintIssues { - issues[k] = goanalysis.NewIssue(&result.Issue{ - Pos: token.Position{ - Filename: i.Pos.Filename, - Line: i.Pos.Line, - }, - Text: strings.TrimRight(i.Message, "\n"), - FromLinter: linterName, - }, pass) - } - - return issues + ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci/gci.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci/gci.go index a9afb6c893..841ee81b0d 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci/gci.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gci/gci.go @@ -1,20 +1,15 @@ package gci import ( + "bytes" "fmt" - "sort" - "strings" - "sync" + "io" + "os" gcicfg "github.com/daixiang0/gci/pkg/config" "github.com/daixiang0/gci/pkg/gci" - "github.com/daixiang0/gci/pkg/io" "github.com/daixiang0/gci/pkg/log" - "github.com/daixiang0/gci/pkg/section" - "github.com/golangci/modinfo" - "github.com/hexops/gotextdiff" - "github.com/hexops/gotextdiff/myers" - "github.com/hexops/gotextdiff/span" + "github.com/shazow/go-diff/difflib" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" @@ -25,226 +20,97 @@ import ( const linterName = "gci" +type differ interface { + Diff(out io.Writer, a io.ReadSeeker, b io.ReadSeeker) error +} + func New(settings *config.GciSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue + log.InitLogger() + _ = log.L().Sync() - analyzer := &analysis.Analyzer{ + diff := difflib.New() + + a := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: goanalysis.DummyRun, - Requires: []*analysis.Analyzer{ - modinfo.Analyzer, - }, } - var cfg *gcicfg.Config - if settings != nil { - rawCfg := gcicfg.YamlConfig{ - Cfg: gcicfg.BoolConfig{ - SkipGenerated: settings.SkipGenerated, - CustomOrder: settings.CustomOrder, - NoLexOrder: settings.NoLexOrder, - }, - SectionStrings: settings.Sections, - } - - if settings.LocalPrefixes != "" { - prefix := []string{"standard", "default", fmt.Sprintf("prefix(%s)", settings.LocalPrefixes)} - rawCfg.SectionStrings = prefix - } - - var err error - cfg, err = YamlConfig{origin: rawCfg}.Parse() - if err != nil { - internal.LinterLogger.Fatalf("gci: configuration parsing: %v", err) - } - } - - var lock sync.Mutex - return goanalysis.NewLinter( linterName, - "Gci controls Go package import order and makes it always deterministic.", - []*analysis.Analyzer{analyzer}, + "Checks if code and import statements are formatted, it makes import statements always deterministic.", + []*analysis.Analyzer{a}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { - analyzer.Run = func(pass *analysis.Pass) (any, error) { - var err error - cfg.Sections, err = hackSectionList(pass, cfg) + a.Run = func(pass *analysis.Pass) (any, error) { + err := run(lintCtx, pass, settings, diff) if err != nil { return nil, err } - issues, err := runGci(pass, lintCtx, cfg, &lock) - if err != nil { - return nil, err - } - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } -func runGci(pass *analysis.Pass, lintCtx *linter.Context, cfg *gcicfg.Config, lock *sync.Mutex) ([]goanalysis.Issue, error) { - fileNames := internal.GetFileNames(pass) +func run(lintCtx *linter.Context, pass *analysis.Pass, settings *config.GciSettings, diff differ) error { + cfg := gcicfg.YamlConfig{ + Cfg: gcicfg.BoolConfig{ + NoInlineComments: settings.NoInlineComments, + NoPrefixComments: settings.NoPrefixComments, + SkipGenerated: settings.SkipGenerated, + CustomOrder: settings.CustomOrder, + NoLexOrder: settings.NoLexOrder, + }, + SectionStrings: settings.Sections, + ModPath: pass.Module.Path, + } - var diffs []string - err := diffFormattedFilesToArray(fileNames, *cfg, &diffs, lock) - if err != nil { - return nil, err + if settings.LocalPrefixes != "" { + cfg.SectionStrings = []string{ + "standard", + "default", + fmt.Sprintf("prefix(%s)", settings.LocalPrefixes), + } } - var issues []goanalysis.Issue + parsedCfg, err := cfg.Parse() + if err != nil { + return err + } - for _, diff := range diffs { - if diff == "" { + for _, file := range pass.Files { + position, isGoFile := goanalysis.GetGoFilePosition(pass, file) + if !isGoFile { continue } - is, err := internal.ExtractIssuesFromPatch(diff, lintCtx, linterName, getIssuedTextGci) + input, err := os.ReadFile(position.Filename) if err != nil { - return nil, fmt.Errorf("can't extract issues from gci diff output %s: %w", diff, err) - } - - for i := range is { - issues = append(issues, goanalysis.NewIssue(&is[i], pass)) + return fmt.Errorf("unable to open file %s: %w", position.Filename, err) } - } - - return issues, nil -} - -func getIssuedTextGci(settings *config.LintersSettings) string { - text := "File is not `gci`-ed" - hasOptions := settings.Gci.SkipGenerated || len(settings.Gci.Sections) > 0 - if !hasOptions { - return text - } - - text += " with" - - if settings.Gci.SkipGenerated { - text += " --skip-generated" - } - - if len(settings.Gci.Sections) > 0 { - for _, sect := range settings.Gci.Sections { - text += " -s " + sect + _, output, err := gci.LoadFormat(input, position.Filename, *parsedCfg) + if err != nil { + return fmt.Errorf("error while running gci: %w", err) } - } - if settings.Gci.CustomOrder { - text += " --custom-order" - } + if !bytes.Equal(input, output) { + out := bytes.NewBufferString(fmt.Sprintf("--- %[1]s\n+++ %[1]s\n", position.Filename)) - return text -} - -func hackSectionList(pass *analysis.Pass, cfg *gcicfg.Config) (section.SectionList, error) { - var sections section.SectionList - - for _, sect := range cfg.Sections { - // local module hack - if v, ok := sect.(*section.LocalModule); ok { - info, err := modinfo.FindModuleFromPass(pass) + err := diff.Diff(out, bytes.NewReader(input), bytes.NewReader(output)) if err != nil { - return nil, err - } - - if info.Path == "" { - continue + return fmt.Errorf("error while running gci: %w", err) } - v.Path = info.Path - } - - sections = append(sections, sect) - } - - return sections, nil -} - -// diffFormattedFilesToArray is a copy of gci.DiffFormattedFilesToArray without io.StdInGenerator. -// gci.DiffFormattedFilesToArray uses gci.processStdInAndGoFilesInPaths that uses io.StdInGenerator but stdin is not active on CI. -// https://github.com/daixiang0/gci/blob/6f5cb16718ba07f0342a58de9b830ec5a6d58790/pkg/gci/gci.go#L63-L75 -// https://github.com/daixiang0/gci/blob/6f5cb16718ba07f0342a58de9b830ec5a6d58790/pkg/gci/gci.go#L80 -func diffFormattedFilesToArray(paths []string, cfg gcicfg.Config, diffs *[]string, lock *sync.Mutex) error { - log.InitLogger() - defer func() { _ = log.L().Sync() }() - - return gci.ProcessFiles(io.GoFilesInPathsGenerator(paths, true), cfg, func(filePath string, unmodifiedFile, formattedFile []byte) error { - fileURI := span.URIFromPath(filePath) - edits := myers.ComputeEdits(fileURI, string(unmodifiedFile), string(formattedFile)) - unifiedEdits := gotextdiff.ToUnified(filePath, filePath, string(unmodifiedFile), edits) - lock.Lock() - *diffs = append(*diffs, fmt.Sprint(unifiedEdits)) - lock.Unlock() - return nil - }) -} - -// Code below this comment is borrowed and modified from gci. -// https://github.com/daixiang0/gci/blob/v0.13.5/pkg/config/config.go - -var defaultOrder = map[string]int{ - section.StandardType: 0, - section.DefaultType: 1, - section.CustomType: 2, - section.BlankType: 3, - section.DotType: 4, - section.AliasType: 5, - section.LocalModuleType: 6, -} - -type YamlConfig struct { - origin gcicfg.YamlConfig -} - -//nolint:gocritic // code borrowed from gci and modified to fix LocalModule section behavior. -func (g YamlConfig) Parse() (*gcicfg.Config, error) { - var err error - - sections, err := section.Parse(g.origin.SectionStrings) - if err != nil { - return nil, err - } - - if sections == nil { - sections = section.DefaultSections() - } - - // if default order sorted sections - if !g.origin.Cfg.CustomOrder { - sort.Slice(sections, func(i, j int) bool { - sectionI, sectionJ := sections[i].Type(), sections[j].Type() + diff := out.String() - if g.origin.Cfg.NoLexOrder || strings.Compare(sectionI, sectionJ) != 0 { - return defaultOrder[sectionI] < defaultOrder[sectionJ] + err = internal.ExtractDiagnosticFromPatch(pass, file, diff, lintCtx) + if err != nil { + return fmt.Errorf("can't extract issues from gci diff output %q: %w", diff, err) } - - return strings.Compare(sections[i].String(), sections[j].String()) < 0 - }) - } - - sectionSeparators, err := section.Parse(g.origin.SectionSeparatorStrings) - if err != nil { - return nil, err - } - if sectionSeparators == nil { - sectionSeparators = section.DefaultSectionSeparators() + } } - return &gcicfg.Config{BoolConfig: g.origin.Cfg, Sections: sections, SectionSeparators: sectionSeparators}, nil + return nil } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go index 9873c9ba49..6826b77b6b 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go @@ -14,18 +14,18 @@ func New(settings *config.GinkgoLinterSettings) *goanalysis.Linter { if settings != nil { cfg = &types.Config{ - SuppressLen: types.Boolean(settings.SuppressLenAssertion), - SuppressNil: types.Boolean(settings.SuppressNilAssertion), - SuppressErr: types.Boolean(settings.SuppressErrAssertion), - SuppressCompare: types.Boolean(settings.SuppressCompareAssertion), - SuppressAsync: types.Boolean(settings.SuppressAsyncAssertion), - ForbidFocus: types.Boolean(settings.ForbidFocusContainer), - SuppressTypeCompare: types.Boolean(settings.SuppressTypeCompareWarning), - AllowHaveLen0: types.Boolean(settings.AllowHaveLenZero), - ForceExpectTo: types.Boolean(settings.ForceExpectTo), - ValidateAsyncIntervals: types.Boolean(settings.ValidateAsyncIntervals), - ForbidSpecPollution: types.Boolean(settings.ForbidSpecPollution), - ForceSucceedForFuncs: types.Boolean(settings.ForceSucceedForFuncs), + SuppressLen: settings.SuppressLenAssertion, + SuppressNil: settings.SuppressNilAssertion, + SuppressErr: settings.SuppressErrAssertion, + SuppressCompare: settings.SuppressCompareAssertion, + SuppressAsync: settings.SuppressAsyncAssertion, + ForbidFocus: settings.ForbidFocusContainer, + SuppressTypeCompare: settings.SuppressTypeCompareWarning, + AllowHaveLen0: settings.AllowHaveLenZero, + ForceExpectTo: settings.ForceExpectTo, + ValidateAsyncIntervals: settings.ValidateAsyncIntervals, + ForbidSpecPollution: settings.ForbidSpecPollution, + ForceSucceedForFuncs: settings.ForceSucceedForFuncs, } } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits/gochecknoinits.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits/gochecknoinits.go index 1345eb8c29..510a06c91d 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits/gochecknoinits.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits/gochecknoinits.go @@ -1,46 +1,24 @@ package gochecknoinits import ( - "fmt" "go/ast" - "go/token" - "sync" "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" "github.com/golangci/golangci-lint/pkg/goanalysis" "github.com/golangci/golangci-lint/pkg/golinters/internal" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "gochecknoinits" func New() *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ - Name: linterName, - Doc: goanalysis.TheOnlyanalyzerDoc, - Run: func(pass *analysis.Pass) (any, error) { - var res []goanalysis.Issue - for _, file := range pass.Files { - fileIssues := checkFileForInits(file, pass.Fset) - for i := range fileIssues { - res = append(res, goanalysis.NewIssue(&fileIssues[i], pass)) - } - } - if len(res) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, res...) - mu.Unlock() - - return nil, nil - }, + Name: linterName, + Doc: goanalysis.TheOnlyanalyzerDoc, + Run: run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, } return goanalysis.NewLinter( @@ -48,28 +26,30 @@ func New() *goanalysis.Linter { "Checks that no init functions are present in Go code", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } -func checkFileForInits(f *ast.File, fset *token.FileSet) []result.Issue { - var res []result.Issue - for _, decl := range f.Decls { +func run(pass *analysis.Pass) (any, error) { + insp, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if !ok { + return nil, nil + } + + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + } + + insp.Preorder(nodeFilter, func(decl ast.Node) { funcDecl, ok := decl.(*ast.FuncDecl) if !ok { - continue + return } fnName := funcDecl.Name.Name if fnName == "init" && funcDecl.Recv.NumFields() == 0 { - res = append(res, result.Issue{ - Pos: fset.Position(funcDecl.Pos()), - Text: fmt.Sprintf("don't use %s function", internal.FormatCode(fnName, nil)), - FromLinter: linterName, - }) + pass.Reportf(funcDecl.Pos(), "don't use %s function", internal.FormatCode(fnName, nil)) } - } + }) - return res + return nil, nil } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go index 7aab0efebb..cbc5873126 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go @@ -61,9 +61,13 @@ func runGoCheckSumType(pass *analysis.Pass, settings *config.GoChecksumTypeSetti TypesInfo: pass.TypesInfo, } + cfg := gochecksumtype.Config{ + DefaultSignifiesExhaustive: settings.DefaultSignifiesExhaustive, + IncludeSharedInterfaces: settings.IncludeSharedInterfaces, + } + var unknownError error - errors := gochecksumtype.Run([]*packages.Package{pkg}, - gochecksumtype.Config{DefaultSignifiesExhaustive: settings.DefaultSignifiesExhaustive}) + errors := gochecksumtype.Run([]*packages.Package{pkg}, cfg) for _, err := range errors { err, ok := err.(gochecksumtype.Error) if !ok { diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go index 194ea35355..087ddc1df0 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go @@ -5,7 +5,6 @@ import ( "fmt" "go/ast" "go/types" - "path/filepath" "reflect" "runtime" "slices" @@ -23,7 +22,6 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/logutils" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "gocritic" @@ -34,9 +32,6 @@ var ( ) func New(settings *config.GoCriticSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - wrapper := &goCriticWrapper{ sizes: types.SizesFor("gc", runtime.GOARCH), } @@ -45,19 +40,11 @@ func New(settings *config.GoCriticSettings) *goanalysis.Linter { Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues, err := wrapper.run(pass) + err := wrapper.run(pass) if err != nil { return nil, err } - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - return nil, nil }, } @@ -75,9 +62,6 @@ Dynamic rules are written declaratively with AST patterns, filters, report messa wrapper.init(context.Log, settings) }). - WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }). WithLoadMode(goanalysis.LoadModeTypesInfo) } @@ -111,9 +95,9 @@ func (w *goCriticWrapper) init(logger logutils.Log, settings *config.GoCriticSet w.settingsWrapper = settingsWrapper } -func (w *goCriticWrapper) run(pass *analysis.Pass) ([]goanalysis.Issue, error) { +func (w *goCriticWrapper) run(pass *analysis.Pass) error { if w.settingsWrapper == nil { - return nil, errors.New("the settings wrapper is nil") + return errors.New("the settings wrapper is nil") } linterCtx := gocriticlinter.NewContext(pass.Fset, w.sizes) @@ -122,19 +106,14 @@ func (w *goCriticWrapper) run(pass *analysis.Pass) ([]goanalysis.Issue, error) { enabledCheckers, err := w.buildEnabledCheckers(linterCtx) if err != nil { - return nil, err + return err } linterCtx.SetPackageInfo(pass.TypesInfo, pass.Pkg) - pkgIssues := runOnPackage(linterCtx, enabledCheckers, pass.Files) + runOnPackage(pass, enabledCheckers, pass.Files) - issues := make([]goanalysis.Issue, 0, len(pkgIssues)) - for i := range pkgIssues { - issues = append(issues, goanalysis.NewIssue(&pkgIssues[i], pass)) - } - - return issues, nil + return nil } func (w *goCriticWrapper) buildEnabledCheckers(linterCtx *gocriticlinter.Context) ([]*gocriticlinter.Checker, error) { @@ -214,47 +193,36 @@ func (w *goCriticWrapper) normalizeCheckerParamsValue(p any) any { } } -func runOnPackage(linterCtx *gocriticlinter.Context, checks []*gocriticlinter.Checker, files []*ast.File) []result.Issue { - var res []result.Issue +func runOnPackage(pass *analysis.Pass, checks []*gocriticlinter.Checker, files []*ast.File) { for _, f := range files { - filename := filepath.Base(linterCtx.FileSet.Position(f.Pos()).Filename) - linterCtx.SetFileInfo(filename, f) - - issues := runOnFile(linterCtx, f, checks) - res = append(res, issues...) + runOnFile(pass, f, checks) } - return res } -func runOnFile(linterCtx *gocriticlinter.Context, f *ast.File, checks []*gocriticlinter.Checker) []result.Issue { - var res []result.Issue - +func runOnFile(pass *analysis.Pass, f *ast.File, checks []*gocriticlinter.Checker) { for _, c := range checks { // All checkers are expected to use *lint.Context // as read-only structure, so no copying is required. for _, warn := range c.Check(f) { - pos := linterCtx.FileSet.Position(warn.Pos) - issue := result.Issue{ - Pos: pos, - Text: fmt.Sprintf("%s: %s", c.Info.Name, warn.Text), - FromLinter: linterName, + diag := analysis.Diagnostic{ + Pos: warn.Pos, + Category: c.Info.Name, + Message: fmt.Sprintf("%s: %s", c.Info.Name, warn.Text), } if warn.HasQuickFix() { - issue.Replacement = &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: pos.Column - 1, - Length: int(warn.Suggestion.To - warn.Suggestion.From), - NewString: string(warn.Suggestion.Replacement), - }, - } + diag.SuggestedFixes = []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: warn.Suggestion.From, + End: warn.Suggestion.To, + NewText: warn.Suggestion.Replacement, + }}, + }} } - res = append(res, issue) + pass.Report(diag) } } - - return res } type goCriticChecks[T any] map[string]T diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot/godot.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot/godot.go index fc51b5bb8c..3194b3d3ac 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot/godot.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/godot/godot.go @@ -1,23 +1,18 @@ package godot import ( - "sync" + "cmp" "github.com/tetafro/godot" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "godot" func New(settings *config.GodotSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - var dotSettings godot.Settings if settings != nil { @@ -29,32 +24,22 @@ func New(settings *config.GodotSettings) *goanalysis.Linter { } // Convert deprecated setting - if settings.CheckAll { + if settings.CheckAll != nil && *settings.CheckAll { dotSettings.Scope = godot.AllScope } } - if dotSettings.Scope == "" { - dotSettings.Scope = godot.DeclScope - } + dotSettings.Scope = cmp.Or(dotSettings.Scope, godot.DeclScope) analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues, err := runGodot(pass, dotSettings) + err := runGodot(pass, dotSettings) if err != nil { return nil, err } - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - return nil, nil }, } @@ -64,38 +49,40 @@ func New(settings *config.GodotSettings) *goanalysis.Linter { "Check if comments end in a period", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } -func runGodot(pass *analysis.Pass, settings godot.Settings) ([]goanalysis.Issue, error) { - var lintIssues []godot.Issue +func runGodot(pass *analysis.Pass, settings godot.Settings) error { for _, file := range pass.Files { iss, err := godot.Run(file, pass.Fset, settings) if err != nil { - return nil, err + return err } - lintIssues = append(lintIssues, iss...) - } - - if len(lintIssues) == 0 { - return nil, nil - } - issues := make([]goanalysis.Issue, len(lintIssues)) - for k, i := range lintIssues { - issue := result.Issue{ - Pos: i.Pos, - Text: i.Message, - FromLinter: linterName, - Replacement: &result.Replacement{ - NewLines: []string{i.Replacement}, - }, + if len(iss) == 0 { + continue } - issues[k] = goanalysis.NewIssue(&issue, pass) + f := pass.Fset.File(file.Pos()) + + for _, i := range iss { + start := f.Pos(i.Pos.Offset) + end := goanalysis.EndOfLinePos(f, i.Pos.Line) + + pass.Report(analysis.Diagnostic{ + Pos: start, + End: end, + Message: i.Message, + SuggestedFixes: []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: start, + End: end, + NewText: []byte(i.Replacement), + }}, + }}, + }) + } } - return issues, nil + return nil } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox/godox.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox/godox.go index d8de026baf..181e0a73ab 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox/godox.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox/godox.go @@ -3,36 +3,22 @@ package godox import ( "go/token" "strings" - "sync" "github.com/matoous/godox" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "godox" func New(settings *config.GodoxSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues := runGodox(pass, settings) - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() + runGodox(pass, settings) return nil, nil }, @@ -43,33 +29,30 @@ func New(settings *config.GodoxSettings) *goanalysis.Linter { "Tool for detection of FIXME, TODO and other comment keywords", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } -func runGodox(pass *analysis.Pass, settings *config.GodoxSettings) []goanalysis.Issue { - var messages []godox.Message +func runGodox(pass *analysis.Pass, settings *config.GodoxSettings) { for _, file := range pass.Files { - messages = append(messages, godox.Run(file, pass.Fset, settings.Keywords...)...) - } - - if len(messages) == 0 { - return nil + position, isGoFile := goanalysis.GetGoFilePosition(pass, file) + if !isGoFile { + continue + } + + messages := godox.Run(file, pass.Fset, settings.Keywords...) + if len(messages) == 0 { + continue + } + + nonAdjPosition := pass.Fset.PositionFor(file.Pos(), false) + + ft := pass.Fset.File(file.Pos()) + + for _, i := range messages { + pass.Report(analysis.Diagnostic{ + Pos: ft.LineStart(goanalysis.AdjustPos(i.Pos.Line, nonAdjPosition.Line, position.Line)) + token.Pos(i.Pos.Column), + Message: strings.TrimRight(i.Message, "\n"), + }) + } } - - issues := make([]goanalysis.Issue, len(messages)) - - for k, i := range messages { - issues[k] = goanalysis.NewIssue(&result.Issue{ - Pos: token.Position{ - Filename: i.Pos.Filename, - Line: i.Pos.Line, - }, - Text: strings.TrimRight(i.Message, "\n"), - FromLinter: linterName, - }, pass) - } - - return issues } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt/gofmt.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt/gofmt.go index 289ceab8ae..b6531d5314 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt/gofmt.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt/gofmt.go @@ -2,7 +2,6 @@ package gofmt import ( "fmt" - "sync" gofmtAPI "github.com/golangci/gofmt/gofmt" "golang.org/x/tools/go/analysis" @@ -16,9 +15,6 @@ import ( const linterName = "gofmt" func New(settings *config.GoFmtSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, @@ -27,72 +23,46 @@ func New(settings *config.GoFmtSettings) *goanalysis.Linter { return goanalysis.NewLinter( linterName, - "Gofmt checks whether code was gofmt-ed. By default "+ - "this tool runs with -s option to check for code simplification", + "Checks if the code is formatted according to 'gofmt' command.", []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { analyzer.Run = func(pass *analysis.Pass) (any, error) { - issues, err := runGofmt(lintCtx, pass, settings) + err := runGofmt(lintCtx, pass, settings) if err != nil { return nil, err } - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } -func runGofmt(lintCtx *linter.Context, pass *analysis.Pass, settings *config.GoFmtSettings) ([]goanalysis.Issue, error) { - fileNames := internal.GetFileNames(pass) - +func runGofmt(lintCtx *linter.Context, pass *analysis.Pass, settings *config.GoFmtSettings) error { var rewriteRules []gofmtAPI.RewriteRule for _, rule := range settings.RewriteRules { rewriteRules = append(rewriteRules, gofmtAPI.RewriteRule(rule)) } - var issues []goanalysis.Issue + for _, file := range pass.Files { + position, isGoFile := goanalysis.GetGoFilePosition(pass, file) + if !isGoFile { + continue + } - for _, f := range fileNames { - diff, err := gofmtAPI.RunRewrite(f, settings.Simplify, rewriteRules) + diff, err := gofmtAPI.RunRewrite(position.Filename, settings.Simplify, rewriteRules) if err != nil { // TODO: skip - return nil, err + return err } if diff == nil { continue } - is, err := internal.ExtractIssuesFromPatch(string(diff), lintCtx, linterName, getIssuedTextGoFmt) + err = internal.ExtractDiagnosticFromPatch(pass, file, string(diff), lintCtx) if err != nil { - return nil, fmt.Errorf("can't extract issues from gofmt diff output %q: %w", string(diff), err) + return fmt.Errorf("can't extract issues from gofmt diff output %q: %w", string(diff), err) } - - for i := range is { - issues = append(issues, goanalysis.NewIssue(&is[i], pass)) - } - } - - return issues, nil -} - -func getIssuedTextGoFmt(settings *config.LintersSettings) string { - text := "File is not `gofmt`-ed" - if settings.Gofmt.Simplify { - text += " with `-s`" - } - for _, rule := range settings.Gofmt.RewriteRules { - text += fmt.Sprintf(" `-r '%s -> %s'`", rule.Pattern, rule.Replacement) } - return text + return nil } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt/gofumpt.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt/gofumpt.go index 3bb7df12e2..7a11a9074d 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt/gofumpt.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofumpt/gofumpt.go @@ -6,7 +6,6 @@ import ( "io" "os" "strings" - "sync" "github.com/shazow/go-diff/difflib" "golang.org/x/tools/go/analysis" @@ -25,9 +24,6 @@ type differ interface { } func New(settings *config.GofumptSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - diff := difflib.New() var options format.Options @@ -48,68 +44,56 @@ func New(settings *config.GofumptSettings) *goanalysis.Linter { return goanalysis.NewLinter( linterName, - "Gofumpt checks whether code was gofumpt-ed.", + "Checks if code and import statements are formatted, with additional rules.", []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { analyzer.Run = func(pass *analysis.Pass) (any, error) { - issues, err := runGofumpt(lintCtx, pass, diff, options) + err := runGofumpt(lintCtx, pass, diff, options) if err != nil { return nil, err } - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } -func runGofumpt(lintCtx *linter.Context, pass *analysis.Pass, diff differ, options format.Options) ([]goanalysis.Issue, error) { - fileNames := internal.GetFileNames(pass) - - var issues []goanalysis.Issue +func runGofumpt(lintCtx *linter.Context, pass *analysis.Pass, diff differ, options format.Options) error { + for _, file := range pass.Files { + position, isGoFile := goanalysis.GetGoFilePosition(pass, file) + if !isGoFile { + continue + } - for _, f := range fileNames { - input, err := os.ReadFile(f) + input, err := os.ReadFile(position.Filename) if err != nil { - return nil, fmt.Errorf("unable to open file %s: %w", f, err) + return fmt.Errorf("unable to open file %s: %w", position.Filename, err) } output, err := format.Source(input, options) if err != nil { - return nil, fmt.Errorf("error while running gofumpt: %w", err) + return fmt.Errorf("error while running gofumpt: %w", err) } if !bytes.Equal(input, output) { - out := bytes.NewBufferString(fmt.Sprintf("--- %[1]s\n+++ %[1]s\n", f)) + out := bytes.NewBufferString(fmt.Sprintf("--- %[1]s\n+++ %[1]s\n", position.Filename)) err := diff.Diff(out, bytes.NewReader(input), bytes.NewReader(output)) if err != nil { - return nil, fmt.Errorf("error while running gofumpt: %w", err) + return fmt.Errorf("error while running gofumpt: %w", err) } diff := out.String() - is, err := internal.ExtractIssuesFromPatch(diff, lintCtx, linterName, getIssuedTextGoFumpt) - if err != nil { - return nil, fmt.Errorf("can't extract issues from gofumpt diff output %q: %w", diff, err) - } - for i := range is { - issues = append(issues, goanalysis.NewIssue(&is[i], pass)) + err = internal.ExtractDiagnosticFromPatch(pass, file, diff, lintCtx) + if err != nil { + return fmt.Errorf("can't extract issues from gofumpt diff output %q: %w", diff, err) } } } - return issues, nil + return nil } func getLangVersion(settings *config.GofumptSettings) string { @@ -120,13 +104,3 @@ func getLangVersion(settings *config.GofumptSettings) string { return "go" + strings.TrimPrefix(settings.LangVersion, "go") } - -func getIssuedTextGoFumpt(settings *config.LintersSettings) string { - text := "File is not `gofumpt`-ed" - - if settings.Gofumpt.ExtraRules { - text += " with `-extra`" - } - - return text -} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go index c6b1aae6be..5043381143 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go @@ -2,23 +2,18 @@ package goheader import ( "go/token" - "sync" + "strings" goheader "github.com/denis-tingaikin/go-header" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "goheader" func New(settings *config.GoHeaderSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - conf := &goheader.Configuration{} if settings != nil { conf = &goheader.Configuration{ @@ -32,19 +27,11 @@ func New(settings *config.GoHeaderSettings) *goanalysis.Linter { Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues, err := runGoHeader(pass, conf) + err := runGoHeader(pass, conf) if err != nil { return nil, err } - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - return nil, nil }, } @@ -54,62 +41,91 @@ func New(settings *config.GoHeaderSettings) *goanalysis.Linter { "Checks if file header matches to pattern", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } -func runGoHeader(pass *analysis.Pass, conf *goheader.Configuration) ([]goanalysis.Issue, error) { +func runGoHeader(pass *analysis.Pass, conf *goheader.Configuration) error { if conf.TemplatePath == "" && conf.Template == "" { // User did not pass template, so then do not run go-header linter - return nil, nil + return nil } template, err := conf.GetTemplate() if err != nil { - return nil, err + return err } values, err := conf.GetValues() if err != nil { - return nil, err + return err } a := goheader.New(goheader.WithTemplate(template), goheader.WithValues(values)) - var issues []goanalysis.Issue for _, file := range pass.Files { - path := pass.Fset.Position(file.Pos()).Filename + position, isGoFile := goanalysis.GetGoFilePosition(pass, file) + if !isGoFile { + continue + } + + issue := a.Analyze(&goheader.Target{File: file, Path: position.Filename}) + if issue == nil { + continue + } - i := a.Analyze(&goheader.Target{File: file, Path: path}) + f := pass.Fset.File(file.Pos()) - if i == nil { + commentLine := 1 + var offset int + + // Inspired by https://github.com/denis-tingaikin/go-header/blob/4c75a6a2332f025705325d6c71fff4616aedf48f/analyzer.go#L85-L92 + if len(file.Comments) > 0 && file.Comments[0].Pos() < file.Package { + if !strings.HasPrefix(file.Comments[0].List[0].Text, "/*") { + // When the comment is "//" there is a one character offset. + offset = 1 + } + commentLine = goanalysis.GetFilePositionFor(pass.Fset, file.Comments[0].Pos()).Line + } + + // Skip issues related to build directives. + // https://github.com/denis-tingaikin/go-header/issues/18 + if issue.Location().Position-offset < 0 { continue } - issue := result.Issue{ - Pos: token.Position{ - Line: i.Location().Line + 1, - Column: i.Location().Position, - Filename: path, - }, - Text: i.Message(), - FromLinter: linterName, + diag := analysis.Diagnostic{ + Pos: f.LineStart(issue.Location().Line+1) + token.Pos(issue.Location().Position-offset), // The position of the first divergence. + Message: issue.Message(), } - if fix := i.Fix(); fix != nil { - issue.LineRange = &result.Range{ - From: issue.Line(), - To: issue.Line() + len(fix.Actual) - 1, + if fix := issue.Fix(); fix != nil { + current := len(fix.Actual) + for _, s := range fix.Actual { + current += len(s) } - issue.Replacement = &result.Replacement{ - NeedOnlyDelete: len(fix.Expected) == 0, - NewLines: fix.Expected, + + start := f.LineStart(commentLine) + + end := start + token.Pos(current) + + header := strings.Join(fix.Expected, "\n") + "\n" + + // Adds an extra line between the package and the header. + if end == file.Package { + header += "\n" } + + diag.SuggestedFixes = []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: start, + End: end, + NewText: []byte(header), + }}, + }} } - issues = append(issues, goanalysis.NewIssue(&issue, pass)) + pass.Report(diag) } - return issues, nil + return nil } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports/goimports.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports/goimports.go index de965d5c85..6ddc9a75b1 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports/goimports.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports/goimports.go @@ -2,7 +2,6 @@ package goimports import ( "fmt" - "sync" goimportsAPI "github.com/golangci/gofmt/goimports" "golang.org/x/tools/go/analysis" @@ -17,9 +16,6 @@ import ( const linterName = "goimports" func New(settings *config.GoImportsSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, @@ -28,67 +24,43 @@ func New(settings *config.GoImportsSettings) *goanalysis.Linter { return goanalysis.NewLinter( linterName, - "Check import statements are formatted according to the 'goimport' command. "+ - "Reformat imports in autofix mode.", + "Checks if the code and import statements are formatted according to the 'goimports' command.", []*analysis.Analyzer{analyzer}, nil, ).WithContextSetter(func(lintCtx *linter.Context) { imports.LocalPrefix = settings.LocalPrefixes analyzer.Run = func(pass *analysis.Pass) (any, error) { - issues, err := runGoImports(lintCtx, pass) + err := runGoImports(lintCtx, pass) if err != nil { return nil, err } - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } -func runGoImports(lintCtx *linter.Context, pass *analysis.Pass) ([]goanalysis.Issue, error) { - fileNames := internal.GetFileNames(pass) - - var issues []goanalysis.Issue +func runGoImports(lintCtx *linter.Context, pass *analysis.Pass) error { + for _, file := range pass.Files { + position, isGoFile := goanalysis.GetGoFilePosition(pass, file) + if !isGoFile { + continue + } - for _, f := range fileNames { - diff, err := goimportsAPI.Run(f) + diff, err := goimportsAPI.Run(position.Filename) if err != nil { // TODO: skip - return nil, err + return err } if diff == nil { continue } - is, err := internal.ExtractIssuesFromPatch(string(diff), lintCtx, linterName, getIssuedTextGoImports) + err = internal.ExtractDiagnosticFromPatch(pass, file, string(diff), lintCtx) if err != nil { - return nil, fmt.Errorf("can't extract issues from gofmt diff output %q: %w", string(diff), err) + return fmt.Errorf("can't extract issues from goimports diff output %q: %w", string(diff), err) } - - for i := range is { - issues = append(issues, goanalysis.NewIssue(&is[i], pass)) - } - } - - return issues, nil -} - -func getIssuedTextGoImports(settings *config.LintersSettings) string { - text := "File is not `goimports`-ed" - - if settings.Goimports.LocalPrefixes != "" { - text += " with -local " + settings.Goimports.LocalPrefixes } - return text + return nil } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives/gomoddirectives.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives/gomoddirectives.go index 9cde7e26c6..f8f47ba2b4 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives/gomoddirectives.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomoddirectives/gomoddirectives.go @@ -1,6 +1,7 @@ package gomoddirectives import ( + "regexp" "sync" "github.com/ldez/gomoddirectives" @@ -8,6 +9,7 @@ import ( "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" + "github.com/golangci/golangci-lint/pkg/golinters/internal" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/result" ) @@ -24,6 +26,27 @@ func New(settings *config.GoModDirectivesSettings) *goanalysis.Linter { opts.ReplaceAllowList = settings.ReplaceAllowList opts.RetractAllowNoExplanation = settings.RetractAllowNoExplanation opts.ExcludeForbidden = settings.ExcludeForbidden + opts.ToolchainForbidden = settings.ToolchainForbidden + opts.ToolForbidden = settings.ToolForbidden + opts.GoDebugForbidden = settings.GoDebugForbidden + + if settings.ToolchainPattern != "" { + exp, err := regexp.Compile(settings.ToolchainPattern) + if err != nil { + internal.LinterLogger.Fatalf("%s: invalid toolchain pattern: %v", linterName, err) + } else { + opts.ToolchainPattern = exp + } + } + + if settings.GoVersionPattern != "" { + exp, err := regexp.Compile(settings.GoVersionPattern) + if err != nil { + internal.LinterLogger.Fatalf("%s: invalid Go version pattern: %v", linterName, err) + } else { + opts.GoVersionPattern = exp + } + } } analyzer := &analysis.Analyzer{ @@ -40,7 +63,7 @@ func New(settings *config.GoModDirectivesSettings) *goanalysis.Linter { ).WithContextSetter(func(lintCtx *linter.Context) { analyzer.Run = func(pass *analysis.Pass) (any, error) { once.Do(func() { - results, err := gomoddirectives.Analyze(opts) + results, err := gomoddirectives.AnalyzePass(pass, opts) if err != nil { lintCtx.Log.Warnf("running %s failed: %s: "+ "if you are not using go modules it is suggested to disable this linter", linterName, err) diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard/gomodguard.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard/gomodguard.go index 8f1036b0f1..8bddebc162 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard/gomodguard.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gomodguard/gomodguard.go @@ -73,7 +73,7 @@ func New(settings *config.GoModGuardSettings) *goanalysis.Linter { } analyzer.Run = func(pass *analysis.Pass) (any, error) { - gomodguardIssues := processor.ProcessFiles(internal.GetFileNames(pass)) + gomodguardIssues := processor.ProcessFiles(internal.GetGoFileNames(pass)) mu.Lock() defer mu.Unlock() diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec/gosec.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec/gosec.go index a5367399b8..6b46beaccf 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec/gosec.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec/gosec.go @@ -184,7 +184,15 @@ func convertGosecGlobals(globalOptionFromConfig any, conf gosec.Config) { } for k, v := range globalOptionMap { - conf.SetGlobal(gosec.GlobalOption(k), fmt.Sprintf("%v", v)) + option := gosec.GlobalOption(k) + + // Set nosec global option only if the value is true + // https://github.com/securego/gosec/blob/v2.21.4/analyzer.go#L572 + if option == gosec.Nosec && v == false { + continue + } + + conf.SetGlobal(option, fmt.Sprintf("%v", v)) } } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan/gosmopolitan.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan/gosmopolitan.go index 4f6fb80358..bf9b19f129 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan/gosmopolitan.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan/gosmopolitan.go @@ -10,16 +10,16 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -func New(s *config.GosmopolitanSettings) *goanalysis.Linter { +func New(settings *config.GosmopolitanSettings) *goanalysis.Linter { a := gosmopolitan.NewAnalyzer() - cfgMap := map[string]map[string]any{} - if s != nil { - cfgMap[a.Name] = map[string]any{ - "allowtimelocal": s.AllowTimeLocal, - "escapehatches": strings.Join(s.EscapeHatches, ","), - "lookattests": !s.IgnoreTests, - "watchforscripts": strings.Join(s.WatchForScripts, ","), + cfg := map[string]map[string]any{} + if settings != nil { + cfg[a.Name] = map[string]any{ + "allowtimelocal": settings.AllowTimeLocal, + "escapehatches": strings.Join(settings.EscapeHatches, ","), + "lookattests": !settings.IgnoreTests, + "watchforscripts": strings.Join(settings.WatchForScripts, ","), } } @@ -27,6 +27,6 @@ func New(s *config.GosmopolitanSettings) *goanalysis.Linter { a.Name, a.Doc, []*analysis.Analyzer{a}, - cfgMap, + cfg, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet/govet.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet/govet.go index eb63a5d334..73bf63af73 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet/govet.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/govet/govet.go @@ -40,6 +40,7 @@ import ( "golang.org/x/tools/go/analysis/passes/slog" "golang.org/x/tools/go/analysis/passes/sortslice" "golang.org/x/tools/go/analysis/passes/stdmethods" + "golang.org/x/tools/go/analysis/passes/stdversion" "golang.org/x/tools/go/analysis/passes/stringintconv" "golang.org/x/tools/go/analysis/passes/structtag" "golang.org/x/tools/go/analysis/passes/testinggoroutine" @@ -50,6 +51,7 @@ import ( "golang.org/x/tools/go/analysis/passes/unsafeptr" "golang.org/x/tools/go/analysis/passes/unusedresult" "golang.org/x/tools/go/analysis/passes/unusedwrite" + "golang.org/x/tools/go/analysis/passes/waitgroup" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" @@ -89,6 +91,7 @@ var ( slog.Analyzer, sortslice.Analyzer, stdmethods.Analyzer, + stdversion.Analyzer, stringintconv.Analyzer, structtag.Analyzer, testinggoroutine.Analyzer, @@ -99,9 +102,10 @@ var ( unsafeptr.Analyzer, unusedresult.Analyzer, unusedwrite.Analyzer, + waitgroup.Analyzer, } - // https://github.com/golang/go/blob/b56645a87b28840a180d64077877cb46570b4176/src/cmd/vet/main.go#L49-L81 + // https://github.com/golang/go/blob/go1.23.0/src/cmd/vet/main.go#L55-L87 defaultAnalyzers = []*analysis.Analyzer{ appends.Analyzer, asmdecl.Analyzer, @@ -126,6 +130,7 @@ var ( sigchanyzer.Analyzer, slog.Analyzer, stdmethods.Analyzer, + stdversion.Analyzer, stringintconv.Analyzer, structtag.Analyzer, testinggoroutine.Analyzer, @@ -185,7 +190,7 @@ func isAnalyzerEnabled(name string, cfg *config.GovetSettings, defaultAnalyzers } // Keeping for backward compatibility. - if cfg.CheckShadowing && name == shadow.Analyzer.Name { + if cfg.CheckShadowing != nil && *cfg.CheckShadowing && name == shadow.Analyzer.Name { return true } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper/grouper.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper/grouper.go index aa6ce1cebb..e0a3f794a7 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper/grouper.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/grouper/grouper.go @@ -11,9 +11,9 @@ import ( func New(settings *config.GrouperSettings) *goanalysis.Linter { a := grouper.New() - linterCfg := map[string]map[string]any{} + cfg := map[string]map[string]any{} if settings != nil { - linterCfg[a.Name] = map[string]any{ + cfg[a.Name] = map[string]any{ "const-require-single-const": settings.ConstRequireSingleConst, "const-require-grouping": settings.ConstRequireGrouping, "import-require-single-import": settings.ImportRequireSingleImport, @@ -29,6 +29,6 @@ func New(settings *config.GrouperSettings) *goanalysis.Linter { a.Name, "Analyze expression groups.", []*analysis.Analyzer{a}, - linterCfg, + cfg, ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas/importas.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas/importas.go index 45117c9a48..b7c6c35aea 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas/importas.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/importas/importas.go @@ -51,8 +51,11 @@ func New(settings *config.ImportAsSettings) *goanalysis.Linter { uniqPackages[a.Pkg] = a } - // skip the duplication check when the alias is a regular expression replacement pattern (ie. contains `$`). - if v, ok := uniqAliases[a.Alias]; ok && !strings.Contains(a.Alias, "$") { + // Skips the duplication check when: + // - the alias is empty. + // - the alias is a regular expression replacement pattern (ie. contains `$`). + v, ok := uniqAliases[a.Alias] + if ok && a.Alias != "" && !strings.Contains(a.Alias, "$") { lintCtx.Log.Errorf("invalid configuration, multiple packages with the same alias: alias=%s packages=[%s,%s]", a.Alias, a.Pkg, v.Pkg) } else { uniqAliases[a.Alias] = a diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/diff.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/diff.go index f919c5b2a8..8e5e1f0e73 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/diff.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/diff.go @@ -3,20 +3,22 @@ package internal import ( "bytes" "fmt" + "go/ast" "go/token" + "slices" "strings" diffpkg "github.com/sourcegraph/go-diff/diff" + "golang.org/x/tools/go/analysis" - "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/logutils" - "github.com/golangci/golangci-lint/pkg/result" ) type Change struct { - LineRange result.Range - Replacement result.Replacement + From, To int + NewLines []string } type diffLineType string @@ -27,8 +29,6 @@ const ( diffLineDeleted diffLineType = "deleted" ) -type fmtTextFormatter func(settings *config.LintersSettings) string - type diffLine struct { originalNumber int // 1-based original line number typ diffLineType @@ -44,58 +44,48 @@ type hunkChangesParser struct { log logutils.Log - lines []diffLine - - ret []Change + changes []Change } -func (p *hunkChangesParser) parseDiffLines(h *diffpkg.Hunk) { - lines := bytes.Split(h.Body, []byte{'\n'}) - currentOriginalLineNumber := int(h.OrigStartLine) - var ret []diffLine +func (p *hunkChangesParser) parse(h *diffpkg.Hunk) []Change { + lines := parseDiffLines(h) - for i, line := range lines { - dl := diffLine{ - originalNumber: currentOriginalLineNumber, + for i := 0; i < len(lines); { + line := lines[i] + + if line.typ == diffLineOriginal { + p.handleOriginalLine(lines, line, &i) + continue } - lineStr := string(line) + var deletedLines []diffLine + for ; i < len(lines) && lines[i].typ == diffLineDeleted; i++ { + deletedLines = append(deletedLines, lines[i]) + } - if strings.HasPrefix(lineStr, "-") { - dl.typ = diffLineDeleted - dl.data = strings.TrimPrefix(lineStr, "-") - currentOriginalLineNumber++ - } else if strings.HasPrefix(lineStr, "+") { - dl.typ = diffLineAdded - dl.data = strings.TrimPrefix(lineStr, "+") - } else { - if i == len(lines)-1 && lineStr == "" { - // handle last \n: don't add an empty original line - break - } + var addedLines []string + for ; i < len(lines) && lines[i].typ == diffLineAdded; i++ { + addedLines = append(addedLines, lines[i].data) + } - dl.typ = diffLineOriginal - dl.data = strings.TrimPrefix(lineStr, " ") - currentOriginalLineNumber++ + if len(deletedLines) != 0 { + p.handleDeletedLines(deletedLines, addedLines) + continue } - ret = append(ret, dl) + // no deletions, only additions + p.handleAddedOnlyLines(addedLines) } - // if > 0, then the original file had a 'No newline at end of file' mark - if h.OrigNoNewlineAt > 0 { - dl := diffLine{ - originalNumber: currentOriginalLineNumber + 1, - typ: diffLineAdded, - data: "", - } - ret = append(ret, dl) + if len(p.replacementLinesToPrepend) != 0 { + p.log.Infof("The diff contains only additions: no original or deleted lines: %#v", lines) + return nil } - p.lines = ret + return p.changes } -func (p *hunkChangesParser) handleOriginalLine(line diffLine, i *int) { +func (p *hunkChangesParser) handleOriginalLine(lines []diffLine, line diffLine, i *int) { if len(p.replacementLinesToPrepend) == 0 { p.lastOriginalLine = &line *i++ @@ -109,51 +99,39 @@ func (p *hunkChangesParser) handleOriginalLine(line diffLine, i *int) { *i++ var followingAddedLines []string - for ; *i < len(p.lines) && p.lines[*i].typ == diffLineAdded; *i++ { - followingAddedLines = append(followingAddedLines, p.lines[*i].data) + for ; *i < len(lines) && lines[*i].typ == diffLineAdded; *i++ { + followingAddedLines = append(followingAddedLines, lines[*i].data) + } + + change := Change{ + From: line.originalNumber, + To: line.originalNumber, + NewLines: slices.Concat(p.replacementLinesToPrepend, []string{line.data}, followingAddedLines), } + p.changes = append(p.changes, change) - p.ret = append(p.ret, Change{ - LineRange: result.Range{ - From: line.originalNumber, - To: line.originalNumber, - }, - Replacement: result.Replacement{ - NewLines: append(p.replacementLinesToPrepend, append([]string{line.data}, followingAddedLines...)...), - }, - }) p.replacementLinesToPrepend = nil p.lastOriginalLine = &line } func (p *hunkChangesParser) handleDeletedLines(deletedLines []diffLine, addedLines []string) { change := Change{ - LineRange: result.Range{ - From: deletedLines[0].originalNumber, - To: deletedLines[len(deletedLines)-1].originalNumber, - }, + From: deletedLines[0].originalNumber, + To: deletedLines[len(deletedLines)-1].originalNumber, } - if len(addedLines) != 0 { - change.Replacement.NewLines = append([]string{}, p.replacementLinesToPrepend...) - change.Replacement.NewLines = append(change.Replacement.NewLines, addedLines...) - if len(p.replacementLinesToPrepend) != 0 { - p.replacementLinesToPrepend = nil - } - - p.ret = append(p.ret, change) - return - } + switch { + case len(addedLines) != 0: + change.NewLines = slices.Concat(p.replacementLinesToPrepend, addedLines) + p.replacementLinesToPrepend = nil - // delete-only change with possible prepending - if len(p.replacementLinesToPrepend) != 0 { - change.Replacement.NewLines = p.replacementLinesToPrepend + case len(p.replacementLinesToPrepend) != 0: + // delete-only change with possible prepending + change.NewLines = slices.Clone(p.replacementLinesToPrepend) p.replacementLinesToPrepend = nil - } else { - change.Replacement.NeedOnlyDelete = true } - p.ret = append(p.ret, change) + p.changes = append(p.changes, change) } func (p *hunkChangesParser) handleAddedOnlyLines(addedLines []string) { @@ -166,70 +144,92 @@ func (p *hunkChangesParser) handleAddedOnlyLines(addedLines []string) { // 2. ... p.replacementLinesToPrepend = addedLines + return } // add-only change merged into the last original line with possible prepending - p.ret = append(p.ret, Change{ - LineRange: result.Range{ - From: p.lastOriginalLine.originalNumber, - To: p.lastOriginalLine.originalNumber, - }, - Replacement: result.Replacement{ - NewLines: append(p.replacementLinesToPrepend, append([]string{p.lastOriginalLine.data}, addedLines...)...), - }, - }) + change := Change{ + From: p.lastOriginalLine.originalNumber, + To: p.lastOriginalLine.originalNumber, + NewLines: slices.Concat(p.replacementLinesToPrepend, []string{p.lastOriginalLine.data}, addedLines), + } + + p.changes = append(p.changes, change) + p.replacementLinesToPrepend = nil } -func (p *hunkChangesParser) parse(h *diffpkg.Hunk) []Change { - p.parseDiffLines(h) +func parseDiffLines(h *diffpkg.Hunk) []diffLine { + lines := bytes.Split(h.Body, []byte{'\n'}) - for i := 0; i < len(p.lines); { - line := p.lines[i] - if line.typ == diffLineOriginal { - p.handleOriginalLine(line, &i) - continue - } + currentOriginalLineNumber := int(h.OrigStartLine) - var deletedLines []diffLine - for ; i < len(p.lines) && p.lines[i].typ == diffLineDeleted; i++ { - deletedLines = append(deletedLines, p.lines[i]) + var diffLines []diffLine + + for i, line := range lines { + dl := diffLine{ + originalNumber: currentOriginalLineNumber, } - var addedLines []string - for ; i < len(p.lines) && p.lines[i].typ == diffLineAdded; i++ { - addedLines = append(addedLines, p.lines[i].data) + if i == len(lines)-1 && len(line) == 0 { + // handle last \n: don't add an empty original line + break } - if len(deletedLines) != 0 { - p.handleDeletedLines(deletedLines, addedLines) - continue + lineStr := string(line) + + switch { + case strings.HasPrefix(lineStr, "-"): + dl.typ = diffLineDeleted + dl.data = strings.TrimPrefix(lineStr, "-") + currentOriginalLineNumber++ + + case strings.HasPrefix(lineStr, "+"): + dl.typ = diffLineAdded + dl.data = strings.TrimPrefix(lineStr, "+") + + default: + dl.typ = diffLineOriginal + dl.data = strings.TrimPrefix(lineStr, " ") + currentOriginalLineNumber++ } - // no deletions, only additions - p.handleAddedOnlyLines(addedLines) + diffLines = append(diffLines, dl) } - if len(p.replacementLinesToPrepend) != 0 { - p.log.Infof("The diff contains only additions: no original or deleted lines: %#v", p.lines) - return nil + // if > 0, then the original file had a 'No newline at end of file' mark + if h.OrigNoNewlineAt > 0 { + dl := diffLine{ + originalNumber: currentOriginalLineNumber + 1, + typ: diffLineAdded, + data: "", + } + diffLines = append(diffLines, dl) } - return p.ret + return diffLines } -func ExtractIssuesFromPatch(patch string, lintCtx *linter.Context, linterName string, formatter fmtTextFormatter) ([]result.Issue, error) { +func ExtractDiagnosticFromPatch( + pass *analysis.Pass, + file *ast.File, + patch string, + lintCtx *linter.Context, +) error { diffs, err := diffpkg.ParseMultiFileDiff([]byte(patch)) if err != nil { - return nil, fmt.Errorf("can't parse patch: %w", err) + return fmt.Errorf("can't parse patch: %w", err) } if len(diffs) == 0 { - return nil, fmt.Errorf("got no diffs from patch parser: %v", patch) + return fmt.Errorf("got no diffs from patch parser: %v", patch) } - var issues []result.Issue + ft := pass.Fset.File(file.Pos()) + + adjLine := pass.Fset.PositionFor(file.Pos(), false).Line - pass.Fset.PositionFor(file.Pos(), true).Line + for _, d := range diffs { if len(d.Hunks) == 0 { lintCtx.Log.Warnf("Got no hunks in diff %+v", d) @@ -242,23 +242,34 @@ func ExtractIssuesFromPatch(patch string, lintCtx *linter.Context, linterName st changes := p.parse(hunk) for _, change := range changes { - i := result.Issue{ - FromLinter: linterName, - Pos: token.Position{ - Filename: d.NewName, - Line: change.LineRange.From, - }, - Text: formatter(lintCtx.Settings()), - Replacement: &change.Replacement, - } - if change.LineRange.From != change.LineRange.To { - i.LineRange = &change.LineRange - } - - issues = append(issues, i) + pass.Report(toDiagnostic(ft, change, adjLine)) } } } - return issues, nil + return nil +} + +func toDiagnostic(ft *token.File, change Change, adjLine int) analysis.Diagnostic { + from := change.From + adjLine + if from > ft.LineCount() { + from = ft.LineCount() + } + + start := ft.LineStart(from) + + end := goanalysis.EndOfLinePos(ft, change.To+adjLine) + + return analysis.Diagnostic{ + Pos: start, + End: end, + Message: "File is not properly formatted", + SuggestedFixes: []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: start, + End: end, + NewText: []byte(strings.Join(change.NewLines, "\n")), + }}, + }}, + } } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/util.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/util.go index 80b194dd26..7525f2f2c5 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/util.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/util.go @@ -2,12 +2,12 @@ package internal import ( "fmt" - "path/filepath" "strings" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/goanalysis" ) func FormatCode(code string, _ *config.Config) string { @@ -18,16 +18,17 @@ func FormatCode(code string, _ *config.Config) string { return fmt.Sprintf("`%s`", code) } -func GetFileNames(pass *analysis.Pass) []string { - var fileNames []string +func GetGoFileNames(pass *analysis.Pass) []string { + var filenames []string + for _, f := range pass.Files { - fileName := pass.Fset.PositionFor(f.Pos(), true).Filename - ext := filepath.Ext(fileName) - if ext != "" && ext != ".go" { - // position has been adjusted to a non-go file, revert to original file - fileName = pass.Fset.PositionFor(f.Pos(), false).Filename + position, b := goanalysis.GetGoFilePosition(pass, f) + if !b { + continue } - fileNames = append(fileNames, fileName) + + filenames = append(filenames, position.Filename) } - return fileNames + + return filenames } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll/lll.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll/lll.go index 67f89eecbd..bad3b0c4e2 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll/lll.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll/lll.go @@ -4,19 +4,15 @@ import ( "bufio" "errors" "fmt" - "go/token" + "go/ast" "os" "strings" - "sync" "unicode/utf8" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/golinters/internal" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "lll" @@ -24,26 +20,15 @@ const linterName = "lll" const goCommentDirectivePrefix = "//go:" func New(settings *config.LllSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues, err := runLll(pass, settings) + err := runLll(pass, settings) if err != nil { return nil, err } - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - return nil, nil }, } @@ -53,40 +38,39 @@ func New(settings *config.LllSettings) *goanalysis.Linter { "Reports long lines", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } -func runLll(pass *analysis.Pass, settings *config.LllSettings) ([]goanalysis.Issue, error) { - fileNames := internal.GetFileNames(pass) - +func runLll(pass *analysis.Pass, settings *config.LllSettings) error { spaces := strings.Repeat(" ", settings.TabWidth) - var issues []goanalysis.Issue - for _, f := range fileNames { - lintIssues, err := getLLLIssuesForFile(f, settings.LineLength, spaces) + for _, file := range pass.Files { + err := getLLLIssuesForFile(pass, file, settings.LineLength, spaces) if err != nil { - return nil, err - } - - for i := range lintIssues { - issues = append(issues, goanalysis.NewIssue(&lintIssues[i], pass)) + return err } } - return issues, nil + return nil } -func getLLLIssuesForFile(filename string, maxLineLen int, tabSpaces string) ([]result.Issue, error) { - var res []result.Issue +func getLLLIssuesForFile(pass *analysis.Pass, file *ast.File, maxLineLen int, tabSpaces string) error { + position, isGoFile := goanalysis.GetGoFilePosition(pass, file) + if !isGoFile { + return nil + } + + nonAdjPosition := pass.Fset.PositionFor(file.Pos(), false) - f, err := os.Open(filename) + f, err := os.Open(position.Filename) if err != nil { - return nil, fmt.Errorf("can't open file %s: %w", filename, err) + return fmt.Errorf("can't open file %s: %w", position.Filename, err) } + defer f.Close() + ft := pass.Fset.File(file.Pos()) + lineNumber := 0 multiImportEnabled := false @@ -116,42 +100,34 @@ func getLLLIssuesForFile(filename string, maxLineLen int, tabSpaces string) ([]r lineLen := utf8.RuneCountInString(line) if lineLen > maxLineLen { - res = append(res, result.Issue{ - Pos: token.Position{ - Filename: filename, - Line: lineNumber, - }, - Text: fmt.Sprintf("the line is %d characters long, which exceeds the maximum of %d characters.", lineLen, maxLineLen), - FromLinter: linterName, + pass.Report(analysis.Diagnostic{ + Pos: ft.LineStart(goanalysis.AdjustPos(lineNumber, nonAdjPosition.Line, position.Line)), + Message: fmt.Sprintf("The line is %d characters long, which exceeds the maximum of %d characters.", + lineLen, maxLineLen), }) } } if err := scanner.Err(); err != nil { + // scanner.Scan() might fail if the line is longer than bufio.MaxScanTokenSize + // In the case where the specified maxLineLen is smaller than bufio.MaxScanTokenSize + // we can return this line as a long line instead of returning an error. + // The reason for this change is that this case might happen with autogenerated files + // The go-bindata tool for instance might generate a file with a very long line. + // In this case, as it's an auto generated file, the warning returned by lll will + // be ignored. + // But if we return a linter error here, and this error happens for an autogenerated + // file the error will be discarded (fine), but all the subsequent errors for lll will + // be discarded for other files, and we'll miss legit error. if errors.Is(err, bufio.ErrTooLong) && maxLineLen < bufio.MaxScanTokenSize { - // scanner.Scan() might fail if the line is longer than bufio.MaxScanTokenSize - // In the case where the specified maxLineLen is smaller than bufio.MaxScanTokenSize - // we can return this line as a long line instead of returning an error. - // The reason for this change is that this case might happen with autogenerated files - // The go-bindata tool for instance might generate a file with a very long line. - // In this case, as it's an auto generated file, the warning returned by lll will - // be ignored. - // But if we return a linter error here, and this error happens for an autogenerated - // file the error will be discarded (fine), but all the subsequent errors for lll will - // be discarded for other files, and we'll miss legit error. - res = append(res, result.Issue{ - Pos: token.Position{ - Filename: filename, - Line: lineNumber, - Column: 1, - }, - Text: fmt.Sprintf("line is more than %d characters", bufio.MaxScanTokenSize), - FromLinter: linterName, + pass.Report(analysis.Diagnostic{ + Pos: ft.LineStart(goanalysis.AdjustPos(lineNumber, nonAdjPosition.Line, position.Line)), + Message: fmt.Sprintf("line is more than %d characters", bufio.MaxScanTokenSize), }) } else { - return nil, fmt.Errorf("can't scan file %s: %w", filename, err) + return fmt.Errorf("can't scan file %s: %w", position.Filename, err) } } - return res, nil + return nil } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck/loggercheck.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck/loggercheck.go index 077e8a512f..84c8d73635 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck/loggercheck.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/loggercheck/loggercheck.go @@ -22,6 +22,9 @@ func New(settings *config.LoggerCheckSettings) *goanalysis.Linter { if !settings.Logr { disable = append(disable, "logr") } + if !settings.Slog { + disable = append(disable, "slog") + } if !settings.Zap { disable = append(disable, "zap") } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx/maintidx.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx/maintidx.go index 08f12369e6..799c51c874 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx/maintidx.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/maintidx/maintidx.go @@ -8,16 +8,16 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -func New(cfg *config.MaintIdxSettings) *goanalysis.Linter { +func New(settings *config.MaintIdxSettings) *goanalysis.Linter { analyzer := maintidx.Analyzer - cfgMap := map[string]map[string]any{ + cfg := map[string]map[string]any{ analyzer.Name: {"under": 20}, } - if cfg != nil { - cfgMap[analyzer.Name] = map[string]any{ - "under": cfg.Under, + if settings != nil { + cfg[analyzer.Name] = map[string]any{ + "under": settings.Under, } } @@ -25,6 +25,6 @@ func New(cfg *config.MaintIdxSettings) *goanalysis.Linter { analyzer.Name, analyzer.Doc, []*analysis.Analyzer{analyzer}, - cfgMap, + cfg, ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero/makezero.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero/makezero.go index ae4bf21842..b5ab4515e5 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero/makezero.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/makezero/makezero.go @@ -2,40 +2,26 @@ package makezero import ( "fmt" - "sync" "github.com/ashanbrown/makezero/makezero" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "makezero" func New(settings *config.MakezeroSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues, err := runMakeZero(pass, settings) + err := runMakeZero(pass, settings) if err != nil { return nil, err } - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - return nil, nil }, } @@ -45,30 +31,25 @@ func New(settings *config.MakezeroSettings) *goanalysis.Linter { "Finds slice declarations with non-zero initial length", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeTypesInfo) + ).WithLoadMode(goanalysis.LoadModeTypesInfo) } -func runMakeZero(pass *analysis.Pass, settings *config.MakezeroSettings) ([]goanalysis.Issue, error) { +func runMakeZero(pass *analysis.Pass, settings *config.MakezeroSettings) error { zero := makezero.NewLinter(settings.Always) - var issues []goanalysis.Issue - for _, file := range pass.Files { hints, err := zero.Run(pass.Fset, pass.TypesInfo, file) if err != nil { - return nil, fmt.Errorf("makezero linter failed on file %q: %w", file.Name.String(), err) + return fmt.Errorf("makezero linter failed on file %q: %w", file.Name.String(), err) } for _, hint := range hints { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: hint.Position(), - Text: hint.Details(), - FromLinter: linterName, - }, pass)) + pass.Report(analysis.Diagnostic{ + Pos: hint.Pos(), + Message: hint.Details(), + }) } } - return issues, nil + return nil } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror/mirror.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror/mirror.go index 34b880b529..e15dfa3a5a 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror/mirror.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/mirror/mirror.go @@ -1,70 +1,30 @@ package mirror import ( - "sync" - "github.com/butuzov/mirror" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) func New() *goanalysis.Linter { - var ( - mu sync.Mutex - issues []goanalysis.Issue - ) - a := mirror.NewAnalyzer() - a.Run = func(pass *analysis.Pass) (any, error) { - // mirror only lints test files if the `--with-tests` flag is passed, - // so we pass the `with-tests` flag as true to the analyzer before running it. - // This can be turned off by using the regular golangci-lint flags such as `--tests` or `--skip-files` - // or can be disabled per linter via exclude rules. - // (see https://github.com/golangci/golangci-lint/issues/2527#issuecomment-1023707262) - violations := mirror.Run(pass, true) - - if len(violations) == 0 { - return nil, nil - } - - for index := range violations { - i := violations[index].Issue(pass.Fset) - issue := result.Issue{ - FromLinter: a.Name, - Text: i.Message, - Pos: i.Start, - } - - if i.InlineFix != "" { - issue.Replacement = &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: i.Start.Column - 1, - Length: len(i.Original), - NewString: i.InlineFix, - }, - } - } - - mu.Lock() - issues = append(issues, goanalysis.NewIssue(&issue, pass)) - mu.Unlock() - } - - return nil, nil + // mirror only lints test files if the `--with-tests` flag is passed, + // so we pass the `with-tests` flag as true to the analyzer before running it. + // This can be turned off by using the regular golangci-lint flags such as `--tests` or `--skip-files` + // or can be disabled per linter via exclude rules. + // (see https://github.com/golangci/golangci-lint/issues/2527#issuecomment-1023707262) + linterCfg := map[string]map[string]any{ + a.Name: { + "with-tests": true, + }, } - analyzer := goanalysis.NewLinter( + return goanalysis.NewLinter( a.Name, a.Doc, []*analysis.Analyzer{a}, - nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return issues - }).WithLoadMode(goanalysis.LoadModeTypesInfo) - - return analyzer + linterCfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell/misspell.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell/misspell.go index 44409cec9d..3ace5fddb9 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell/misspell.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell/misspell.go @@ -2,9 +2,9 @@ package misspell import ( "fmt" + "go/ast" "go/token" "strings" - "sync" "unicode" "github.com/golangci/misspell" @@ -12,17 +12,12 @@ import ( "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/golinters/internal" "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "misspell" func New(settings *config.MisspellSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, @@ -42,42 +37,25 @@ func New(settings *config.MisspellSettings) *goanalysis.Linter { return nil, ruleErr } - issues, err := runMisspell(lintCtx, pass, replacer, settings.Mode) + err := runMisspell(lintCtx, pass, replacer, settings.Mode) if err != nil { return nil, err } - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } -func runMisspell(lintCtx *linter.Context, pass *analysis.Pass, replacer *misspell.Replacer, mode string) ([]goanalysis.Issue, error) { - fileNames := internal.GetFileNames(pass) - - var issues []goanalysis.Issue - for _, filename := range fileNames { - lintIssues, err := runMisspellOnFile(lintCtx, filename, replacer, mode) +func runMisspell(lintCtx *linter.Context, pass *analysis.Pass, replacer *misspell.Replacer, mode string) error { + for _, file := range pass.Files { + err := runMisspellOnFile(lintCtx, pass, file, replacer, mode) if err != nil { - return nil, err - } - - for i := range lintIssues { - issues = append(issues, goanalysis.NewIssue(&lintIssues[i], pass)) + return err } } - return issues, nil + return nil } func createMisspellReplacer(settings *config.MisspellSettings) (*misspell.Replacer, error) { @@ -112,10 +90,15 @@ func createMisspellReplacer(settings *config.MisspellSettings) (*misspell.Replac return replacer, nil } -func runMisspellOnFile(lintCtx *linter.Context, filename string, replacer *misspell.Replacer, mode string) ([]result.Issue, error) { - fileContent, err := lintCtx.FileCache.GetFileBytes(filename) +func runMisspellOnFile(lintCtx *linter.Context, pass *analysis.Pass, file *ast.File, replacer *misspell.Replacer, mode string) error { + position, isGoFile := goanalysis.GetGoFilePosition(pass, file) + if !isGoFile { + return nil + } + + fileContent, err := lintCtx.FileCache.GetFileBytes(position.Filename) if err != nil { - return nil, fmt.Errorf("can't get file %s contents: %w", filename, err) + return fmt.Errorf("can't get file %s contents: %w", position.Filename, err) } // `r.ReplaceGo` doesn't find issues inside strings: it searches only inside comments. @@ -129,36 +112,31 @@ func runMisspellOnFile(lintCtx *linter.Context, filename string, replacer *missp replace = replacer.Replace } - _, diffs := replace(string(fileContent)) + f := pass.Fset.File(file.Pos()) - var res []result.Issue + _, diffs := replace(string(fileContent)) for _, diff := range diffs { text := fmt.Sprintf("`%s` is a misspelling of `%s`", diff.Original, diff.Corrected) - pos := token.Position{ - Filename: filename, - Line: diff.Line, - Column: diff.Column + 1, - } - - replacement := &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: diff.Column, - Length: len(diff.Original), - NewString: diff.Corrected, - }, - } - - res = append(res, result.Issue{ - Pos: pos, - Text: text, - FromLinter: linterName, - Replacement: replacement, + start := f.LineStart(diff.Line) + token.Pos(diff.Column) + end := f.LineStart(diff.Line) + token.Pos(diff.Column+len(diff.Original)) + + pass.Report(analysis.Diagnostic{ + Pos: start, + End: end, + Message: text, + SuggestedFixes: []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: start, + End: end, + NewText: []byte(diff.Corrected), + }}, + }}, }) } - return res, nil + return nil } func appendExtraWords(replacer *misspell.Replacer, extraWords []config.MisspellExtraWords) error { diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag/musttag.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag/musttag.go index 30047abfc2..a4e9ceff28 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag/musttag.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/musttag/musttag.go @@ -8,11 +8,11 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -func New(setting *config.MustTagSettings) *goanalysis.Linter { +func New(settings *config.MustTagSettings) *goanalysis.Linter { var funcs []musttag.Func - if setting != nil { - for _, fn := range setting.Functions { + if settings != nil { + for _, fn := range settings.Functions { funcs = append(funcs, musttag.Func{ Name: fn.Name, Tag: fn.Tag, diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif/nestif.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif/nestif.go index 43be973b0a..b72538fd16 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif/nestif.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nestif/nestif.go @@ -1,37 +1,21 @@ package nestif import ( - "sort" - "sync" - "github.com/nakabonne/nestif" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "nestif" func New(settings *config.NestifSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues := runNestIf(pass, settings) - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() + runNestIf(pass, settings) return nil, nil }, @@ -42,37 +26,34 @@ func New(settings *config.NestifSettings) *goanalysis.Linter { "Reports deeply nested if statements", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } -func runNestIf(pass *analysis.Pass, settings *config.NestifSettings) []goanalysis.Issue { +func runNestIf(pass *analysis.Pass, settings *config.NestifSettings) { checker := &nestif.Checker{ MinComplexity: settings.MinComplexity, } - var lintIssues []nestif.Issue - for _, f := range pass.Files { - lintIssues = append(lintIssues, checker.Check(f, pass.Fset)...) - } + for _, file := range pass.Files { + position, isGoFile := goanalysis.GetGoFilePosition(pass, file) + if !isGoFile { + continue + } - if len(lintIssues) == 0 { - return nil - } + issues := checker.Check(file, pass.Fset) + if len(issues) == 0 { + continue + } - sort.SliceStable(lintIssues, func(i, j int) bool { - return lintIssues[i].Complexity > lintIssues[j].Complexity - }) + nonAdjPosition := pass.Fset.PositionFor(file.Pos(), false) - issues := make([]goanalysis.Issue, 0, len(lintIssues)) - for _, i := range lintIssues { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: i.Pos, - Text: i.Message, - FromLinter: linterName, - }, pass)) - } + f := pass.Fset.File(file.Pos()) - return issues + for _, issue := range issues { + pass.Report(analysis.Diagnostic{ + Pos: f.LineStart(goanalysis.AdjustPos(issue.Pos.Line, nonAdjPosition.Line, position.Line)), + Message: issue.Message, + }) + } + } } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnesserr/nilnesserr.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnesserr/nilnesserr.go new file mode 100644 index 0000000000..8349377b7b --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnesserr/nilnesserr.go @@ -0,0 +1,23 @@ +package nilnesserr + +import ( + "github.com/alingse/nilnesserr" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/goanalysis" + "github.com/golangci/golangci-lint/pkg/golinters/internal" +) + +func New() *goanalysis.Linter { + a, err := nilnesserr.NewAnalyzer(nilnesserr.LinterSetting{}) + if err != nil { + internal.LinterLogger.Fatalf("nilnesserr: create analyzer: %v", err) + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + nil, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go index d8d677d999..ed25dec71f 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go @@ -11,13 +11,13 @@ import ( func New(settings *config.NilNilSettings) *goanalysis.Linter { a := analyzer.New() - cfgMap := make(map[string]map[string]any) + cfg := make(map[string]map[string]any) if settings != nil { - cfgMap[a.Name] = map[string]any{ + cfg[a.Name] = map[string]any{ "detect-opposite": settings.DetectOpposite, } if len(settings.CheckedTypes) != 0 { - cfgMap[a.Name]["checked-types"] = settings.CheckedTypes + cfg[a.Name]["checked-types"] = settings.CheckedTypes } } @@ -25,7 +25,7 @@ func New(settings *config.NilNilSettings) *goanalysis.Linter { a.Name, a.Doc, []*analysis.Analyzer{a}, - cfgMap, + cfg, ). WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/issues.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/issues.go new file mode 100644 index 0000000000..5e9ba4117c --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/issues.go @@ -0,0 +1,41 @@ +package internal + +import ( + "fmt" + "strings" + "unicode" +) + +func formatExtraLeadingSpace(fullDirective string) string { + return fmt.Sprintf("directive `%s` should not have more than one leading space", fullDirective) +} + +func formatNotMachine(fullDirective string) string { + expected := fullDirective[:2] + strings.TrimLeftFunc(fullDirective[2:], unicode.IsSpace) + return fmt.Sprintf("directive `%s` should be written without leading space as `%s`", + fullDirective, expected) +} + +func formatNotSpecific(fullDirective, directiveWithOptionalLeadingSpace string) string { + return fmt.Sprintf("directive `%s` should mention specific linter such as `%s:my-linter`", + fullDirective, directiveWithOptionalLeadingSpace) +} + +func formatParseError(fullDirective, directiveWithOptionalLeadingSpace string) string { + return fmt.Sprintf("directive `%s` should match `%s[:] [// ]`", + fullDirective, + directiveWithOptionalLeadingSpace) +} + +func formatNoExplanation(fullDirective, fullDirectiveWithoutExplanation string) string { + return fmt.Sprintf("directive `%s` should provide explanation such as `%s // this is why`", + fullDirective, fullDirectiveWithoutExplanation) +} + +func formatUnusedCandidate(fullDirective, expectedLinter string) string { + details := fmt.Sprintf("directive `%s` is unused", fullDirective) + if expectedLinter != "" { + details += fmt.Sprintf(" for linter %q", expectedLinter) + } + return details +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/nolintlint.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/nolintlint.go index 08dd743783..21cd20124f 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/nolintlint.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal/nolintlint.go @@ -2,123 +2,17 @@ package internal import ( - "fmt" - "go/ast" "go/token" "regexp" "strings" - "unicode" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/goanalysis" "github.com/golangci/golangci-lint/pkg/result" ) -type BaseIssue struct { - fullDirective string - directiveWithOptionalLeadingSpace string - position token.Position - replacement *result.Replacement -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (b BaseIssue) Position() token.Position { - return b.position -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (b BaseIssue) Replacement() *result.Replacement { - return b.replacement -} - -type ExtraLeadingSpace struct { - BaseIssue -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (i ExtraLeadingSpace) Details() string { - return fmt.Sprintf("directive `%s` should not have more than one leading space", i.fullDirective) -} - -func (i ExtraLeadingSpace) String() string { return toString(i) } - -type NotMachine struct { - BaseIssue -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (i NotMachine) Details() string { - expected := i.fullDirective[:2] + strings.TrimLeftFunc(i.fullDirective[2:], unicode.IsSpace) - return fmt.Sprintf("directive `%s` should be written without leading space as `%s`", - i.fullDirective, expected) -} - -func (i NotMachine) String() string { return toString(i) } - -type NotSpecific struct { - BaseIssue -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (i NotSpecific) Details() string { - return fmt.Sprintf("directive `%s` should mention specific linter such as `%s:my-linter`", - i.fullDirective, i.directiveWithOptionalLeadingSpace) -} - -func (i NotSpecific) String() string { return toString(i) } - -type ParseError struct { - BaseIssue -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (i ParseError) Details() string { - return fmt.Sprintf("directive `%s` should match `%s[:] [// ]`", - i.fullDirective, - i.directiveWithOptionalLeadingSpace) -} - -func (i ParseError) String() string { return toString(i) } - -type NoExplanation struct { - BaseIssue - fullDirectiveWithoutExplanation string -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (i NoExplanation) Details() string { - return fmt.Sprintf("directive `%s` should provide explanation such as `%s // this is why`", - i.fullDirective, i.fullDirectiveWithoutExplanation) -} - -func (i NoExplanation) String() string { return toString(i) } - -type UnusedCandidate struct { - BaseIssue - ExpectedLinter string -} - -//nolint:gocritic // TODO(ldez) must be change in the future. -func (i UnusedCandidate) Details() string { - details := fmt.Sprintf("directive `%s` is unused", i.fullDirective) - if i.ExpectedLinter != "" { - details += fmt.Sprintf(" for linter %q", i.ExpectedLinter) - } - return details -} - -func (i UnusedCandidate) String() string { return toString(i) } - -func toString(issue Issue) string { - return fmt.Sprintf("%s at %s", issue.Details(), issue.Position()) -} - -type Issue interface { - Details() string - Position() token.Position - String() string - Replacement() *result.Replacement -} - -type Needs uint +const LinterName = "nolintlint" const ( NeedsMachineOnly Needs = 1 << iota @@ -128,6 +22,10 @@ const ( NeedsAll = NeedsMachineOnly | NeedsSpecific | NeedsExplanation ) +type Needs uint + +const commentMark = "//" + var commentPattern = regexp.MustCompile(`^//\s*(nolint)(:\s*[\w-]+\s*(?:,\s*[\w-]+\s*)*)?\b`) // matches a complete nolint directive @@ -157,15 +55,10 @@ var ( ) //nolint:funlen,gocyclo // the function is going to be refactored in the future -func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { - var issues []Issue - - for _, node := range nodes { - file, ok := node.(*ast.File) - if !ok { - continue - } +func (l Linter) Run(pass *analysis.Pass) ([]goanalysis.Issue, error) { + var issues []goanalysis.Issue + for _, file := range pass.Files { for _, c := range file.Comments { for _, comment := range c.List { if !commentPattern.MatchString(comment.Text) { @@ -180,47 +73,58 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { leadingSpace = leadingSpaceMatches[1] } - directiveWithOptionalLeadingSpace := "//" + directiveWithOptionalLeadingSpace := commentMark if leadingSpace != "" { directiveWithOptionalLeadingSpace += " " } - split := strings.Split(strings.SplitN(comment.Text, ":", 2)[0], "//") + split := strings.Split(strings.SplitN(comment.Text, ":", 2)[0], commentMark) directiveWithOptionalLeadingSpace += strings.TrimSpace(split[1]) - pos := fset.Position(comment.Pos()) - end := fset.Position(comment.End()) - - base := BaseIssue{ - fullDirective: comment.Text, - directiveWithOptionalLeadingSpace: directiveWithOptionalLeadingSpace, - position: pos, - } + pos := pass.Fset.Position(comment.Pos()) + end := pass.Fset.Position(comment.End()) // check for, report and eliminate leading spaces, so we can check for other issues if leadingSpace != "" { - removeWhitespace := &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: pos.Column + 1, - Length: len(leadingSpace), - NewString: "", - }, - } + removeWhitespace := []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: token.Pos(pos.Offset), + End: token.Pos(pos.Offset + len(commentMark) + len(leadingSpace)), + NewText: []byte(commentMark), + }}, + }} + if (l.needs & NeedsMachineOnly) != 0 { - issue := NotMachine{BaseIssue: base} - issue.BaseIssue.replacement = removeWhitespace - issues = append(issues, issue) + issue := &result.Issue{ + FromLinter: LinterName, + Text: formatNotMachine(comment.Text), + Pos: pos, + SuggestedFixes: removeWhitespace, + } + + issues = append(issues, goanalysis.NewIssue(issue, pass)) } else if len(leadingSpace) > 1 { - issue := ExtraLeadingSpace{BaseIssue: base} - issue.BaseIssue.replacement = removeWhitespace - issue.BaseIssue.replacement.Inline.NewString = " " // assume a single space was intended - issues = append(issues, issue) + issue := &result.Issue{ + FromLinter: LinterName, + Text: formatExtraLeadingSpace(comment.Text), + Pos: pos, + SuggestedFixes: removeWhitespace, + } + + issues = append(issues, goanalysis.NewIssue(issue, pass)) } } fullMatches := fullDirectivePattern.FindStringSubmatch(comment.Text) if len(fullMatches) == 0 { - issues = append(issues, ParseError{BaseIssue: base}) + issue := &result.Issue{ + FromLinter: LinterName, + Text: formatParseError(comment.Text, directiveWithOptionalLeadingSpace), + Pos: pos, + } + + issues = append(issues, goanalysis.NewIssue(issue, pass)) + continue } @@ -230,7 +134,7 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { if lintersText != "" && !strings.HasPrefix(lintersText, "all") { lls := strings.Split(lintersText, ",") linters = make([]string, 0, len(lls)) - rangeStart := (pos.Column - 1) + len("//") + len(leadingSpace) + len("nolint:") + rangeStart := (pos.Column - 1) + len(commentMark) + len(leadingSpace) + len("nolint:") for i, ll := range lls { rangeEnd := rangeStart + len(ll) if i < len(lls)-1 { @@ -246,46 +150,59 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { if (l.needs & NeedsSpecific) != 0 { if len(linters) == 0 { - issues = append(issues, NotSpecific{BaseIssue: base}) + issue := &result.Issue{ + FromLinter: LinterName, + Text: formatNotSpecific(comment.Text, directiveWithOptionalLeadingSpace), + Pos: pos, + } + + issues = append(issues, goanalysis.NewIssue(issue, pass)) } } // when detecting unused directives, we send all the directives through and filter them out in the nolint processor if (l.needs & NeedsUnused) != 0 { - removeNolintCompletely := &result.Replacement{} + removeNolintCompletely := []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: token.Pos(pos.Offset), + End: token.Pos(end.Offset), + NewText: nil, + }}, + }} - startCol := pos.Column - 1 - - if startCol == 0 { - // if the directive starts from a new line, remove the line - removeNolintCompletely.NeedOnlyDelete = true - } else { - removeNolintCompletely.Inline = &result.InlineFix{ - StartCol: startCol, - Length: end.Column - pos.Column, - NewString: "", + if len(linters) == 0 { + issue := &result.Issue{ + FromLinter: LinterName, + Text: formatUnusedCandidate(comment.Text, ""), + Pos: pos, + ExpectNoLint: true, + SuggestedFixes: removeNolintCompletely, } - } - if len(linters) == 0 { - issue := UnusedCandidate{BaseIssue: base} - issue.replacement = removeNolintCompletely - issues = append(issues, issue) + issues = append(issues, goanalysis.NewIssue(issue, pass)) } else { for _, linter := range linters { - issue := UnusedCandidate{BaseIssue: base, ExpectedLinter: linter} - // only offer replacement if there is a single linter + issue := &result.Issue{ + FromLinter: LinterName, + Text: formatUnusedCandidate(comment.Text, linter), + Pos: pos, + ExpectNoLint: true, + ExpectedNoLintLinter: linter, + } + + // only offer SuggestedFix if there is a single linter // because of issues around commas and the possibility of all // linters being removed if len(linters) == 1 { - issue.replacement = removeNolintCompletely + issue.SuggestedFixes = removeNolintCompletely } - issues = append(issues, issue) + + issues = append(issues, goanalysis.NewIssue(issue, pass)) } } } - if (l.needs&NeedsExplanation) != 0 && (explanation == "" || strings.TrimSpace(explanation) == "//") { + if (l.needs&NeedsExplanation) != 0 && (explanation == "" || strings.TrimSpace(explanation) == commentMark) { needsExplanation := len(linters) == 0 // if no linters are mentioned, we must have explanation // otherwise, check if we are excluding all the mentioned linters for _, ll := range linters { @@ -297,10 +214,14 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) { if needsExplanation { fullDirectiveWithoutExplanation := trailingBlankExplanation.ReplaceAllString(comment.Text, "") - issues = append(issues, NoExplanation{ - BaseIssue: base, - fullDirectiveWithoutExplanation: fullDirectiveWithoutExplanation, - }) + + issue := &result.Issue{ + FromLinter: LinterName, + Text: formatNoExplanation(comment.Text, fullDirectiveWithoutExplanation), + Pos: pos, + } + + issues = append(issues, goanalysis.NewIssue(issue, pass)) } } } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go index 9f04454a5a..e1c878628d 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/nolintlint/nolintlint.go @@ -2,31 +2,47 @@ package nolintlint import ( "fmt" - "go/ast" "sync" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/golinters/internal" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal" + nolintlint "github.com/golangci/golangci-lint/pkg/golinters/nolintlint/internal" "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) -const LinterName = "nolintlint" +const LinterName = nolintlint.LinterName func New(settings *config.NoLintLintSettings) *goanalysis.Linter { var mu sync.Mutex var resIssues []goanalysis.Issue + var needs nolintlint.Needs + if settings.RequireExplanation { + needs |= nolintlint.NeedsExplanation + } + if settings.RequireSpecific { + needs |= nolintlint.NeedsSpecific + } + if !settings.AllowUnused { + needs |= nolintlint.NeedsUnused + } + + lnt, err := nolintlint.NewLinter(needs, settings.AllowNoExplanation) + if err != nil { + internal.LinterLogger.Fatalf("%s: create analyzer: %v", nolintlint.LinterName, err) + } + analyzer := &analysis.Analyzer{ - Name: LinterName, + Name: nolintlint.LinterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues, err := runNoLintLint(pass, settings) + issues, err := lnt.Run(pass) if err != nil { - return nil, err + return nil, fmt.Errorf("linter failed to run: %w", err) } if len(issues) == 0 { @@ -42,7 +58,7 @@ func New(settings *config.NoLintLintSettings) *goanalysis.Linter { } return goanalysis.NewLinter( - LinterName, + nolintlint.LinterName, "Reports ill-formed or insufficient nolint directives", []*analysis.Analyzer{analyzer}, nil, @@ -50,55 +66,3 @@ func New(settings *config.NoLintLintSettings) *goanalysis.Linter { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } - -func runNoLintLint(pass *analysis.Pass, settings *config.NoLintLintSettings) ([]goanalysis.Issue, error) { - var needs internal.Needs - if settings.RequireExplanation { - needs |= internal.NeedsExplanation - } - if settings.RequireSpecific { - needs |= internal.NeedsSpecific - } - if !settings.AllowUnused { - needs |= internal.NeedsUnused - } - - lnt, err := internal.NewLinter(needs, settings.AllowNoExplanation) - if err != nil { - return nil, err - } - - nodes := make([]ast.Node, 0, len(pass.Files)) - for _, n := range pass.Files { - nodes = append(nodes, n) - } - - lintIssues, err := lnt.Run(pass.Fset, nodes...) - if err != nil { - return nil, fmt.Errorf("linter failed to run: %w", err) - } - - var issues []goanalysis.Issue - - for _, i := range lintIssues { - expectNoLint := false - var expectedNolintLinter string - if ii, ok := i.(internal.UnusedCandidate); ok { - expectedNolintLinter = ii.ExpectedLinter - expectNoLint = true - } - - issue := &result.Issue{ - FromLinter: LinterName, - Text: i.Details(), - Pos: i.Position(), - ExpectNoLint: expectNoLint, - ExpectedNoLintLinter: expectedNolintLinter, - Replacement: i.Replacement(), - } - - issues = append(issues, goanalysis.NewIssue(issue, pass)) - } - - return issues, nil -} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc/prealloc.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc/prealloc.go index ce7ff9d59c..17e86c98ee 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc/prealloc.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc/prealloc.go @@ -2,7 +2,6 @@ package prealloc import ( "fmt" - "sync" "github.com/alexkohler/prealloc/pkg" "golang.org/x/tools/go/analysis" @@ -10,29 +9,16 @@ import ( "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" "github.com/golangci/golangci-lint/pkg/golinters/internal" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "prealloc" func New(settings *config.PreallocSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (any, error) { - issues := runPreAlloc(pass, settings) - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() + runPreAlloc(pass, settings) return nil, nil }, @@ -43,23 +29,16 @@ func New(settings *config.PreallocSettings) *goanalysis.Linter { "Finds slice declarations that could potentially be pre-allocated", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } -func runPreAlloc(pass *analysis.Pass, settings *config.PreallocSettings) []goanalysis.Issue { - var issues []goanalysis.Issue - +func runPreAlloc(pass *analysis.Pass, settings *config.PreallocSettings) { hints := pkg.Check(pass.Files, settings.Simple, settings.RangeLoops, settings.ForLoops) for _, hint := range hints { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: pass.Fset.Position(hint.Pos), - Text: fmt.Sprintf("Consider pre-allocating %s", internal.FormatCode(hint.DeclaredSliceName, nil)), - FromLinter: linterName, - }, pass)) + pass.Report(analysis.Diagnostic{ + Pos: hint.Pos, + Message: fmt.Sprintf("Consider pre-allocating %s", internal.FormatCode(hint.DeclaredSliceName, nil)), + }) } - - return issues } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter/protogetter.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter/protogetter.go index 302ce67b88..6c65f86bcf 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter/protogetter.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/protogetter/protogetter.go @@ -1,21 +1,14 @@ package protogetter import ( - "sync" - "github.com/ghostiam/protogetter" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) func New(settings *config.ProtoGetterSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - var cfg protogetter.Config if settings != nil { cfg = protogetter.Config{ @@ -25,50 +18,15 @@ func New(settings *config.ProtoGetterSettings) *goanalysis.Linter { ReplaceFirstArgInAppend: settings.ReplaceFirstArgInAppend, } } - cfg.Mode = protogetter.GolangciLintMode - - a := protogetter.NewAnalyzer(&cfg) - a.Run = func(pass *analysis.Pass) (any, error) { - pgIssues, err := protogetter.Run(pass, &cfg) - if err != nil { - return nil, err - } - - issues := make([]goanalysis.Issue, len(pgIssues)) - for i, issue := range pgIssues { - report := &result.Issue{ - FromLinter: a.Name, - Pos: issue.Pos, - Text: issue.Message, - Replacement: &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: issue.InlineFix.StartCol, - Length: issue.InlineFix.Length, - NewString: issue.InlineFix.NewString, - }, - }, - } - issues[i] = goanalysis.NewIssue(report, pass) - } - - if len(issues) == 0 { - return nil, nil - } + cfg.Mode = protogetter.StandaloneMode - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - } + a := protogetter.NewAnalyzer(&cfg) return goanalysis.NewLinter( a.Name, a.Doc, []*analysis.Analyzer{a}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeTypesInfo) + ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go index 8b030f15de..3af4885b40 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go @@ -4,11 +4,19 @@ import ( "github.com/raeperd/recvcheck" "golang.org/x/tools/go/analysis" + "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" ) -func New() *goanalysis.Linter { - a := recvcheck.Analyzer +func New(settings *config.RecvcheckSettings) *goanalysis.Linter { + var cfg recvcheck.Settings + + if settings != nil { + cfg.DisableBuiltin = settings.DisableBuiltin + cfg.Exclusions = settings.Exclusions + } + + a := recvcheck.NewAnalyzer(cfg) return goanalysis.NewLinter( a.Name, diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go index 056a258e0a..ec621ccfba 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go @@ -2,6 +2,7 @@ package revive import ( "bytes" + "cmp" "encoding/json" "fmt" "go/token" @@ -114,7 +115,7 @@ func newWrapper(settings *config.ReviveSettings) (*wrapper, error) { } func (w *wrapper) run(lintCtx *linter.Context, pass *analysis.Pass) ([]goanalysis.Issue, error) { - packages := [][]string{internal.GetFileNames(pass)} + packages := [][]string{internal.GetGoFileNames(pass)} failures, err := w.revive.Lint(packages, w.lintingRules, *w.conf) if err != nil { @@ -164,7 +165,7 @@ func toIssue(pass *analysis.Pass, object *jsonObject) goanalysis.Issue { lineRangeTo = object.Position.Start.Line } - return goanalysis.NewIssue(&result.Issue{ + issue := &result.Issue{ Severity: string(object.Severity), Text: fmt.Sprintf("%s: %s", object.RuleName, object.Failure.Failure), Pos: token.Position{ @@ -178,7 +179,24 @@ func toIssue(pass *analysis.Pass, object *jsonObject) goanalysis.Issue { To: lineRangeTo, }, FromLinter: linterName, - }, pass) + } + + if object.ReplacementLine != "" { + f := pass.Fset.File(token.Pos(object.Position.Start.Offset)) + + // Skip cgo files because the positions are wrong. + if object.GetFilename() == f.Name() { + issue.SuggestedFixes = []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: f.LineStart(object.Position.Start.Line), + End: goanalysis.EndOfLinePos(f, object.Position.End.Line), + NewText: []byte(object.ReplacementLine), + }}, + }} + } + } + + return goanalysis.NewIssue(issue, pass) } // This function mimics the GetConfig function of revive. @@ -379,12 +397,8 @@ const defaultConfidence = 0.8 func normalizeConfig(cfg *lint.Config) { // NOTE(ldez): this custom section for golangci-lint should be kept. // --- - if cfg.Confidence == 0 { - cfg.Confidence = defaultConfidence - } - if cfg.Severity == "" { - cfg.Severity = lint.SeverityWarning - } + cfg.Confidence = cmp.Or(cfg.Confidence, defaultConfidence) + cfg.Severity = cmp.Or(cfg.Severity, lint.SeverityWarning) // --- if len(cfg.Rules) == 0 { diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign/tagalign.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign/tagalign.go index f438c51b5c..7c8a0c8b02 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign/tagalign.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagalign/tagalign.go @@ -1,22 +1,15 @@ package tagalign import ( - "sync" - "github.com/4meepo/tagalign" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) func New(settings *config.TagAlignSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - - options := []tagalign.Option{tagalign.WithMode(tagalign.GolangciLintMode)} + var options []tagalign.Option if settings != nil { options = append(options, tagalign.WithAlign(settings.Align)) @@ -32,44 +25,11 @@ func New(settings *config.TagAlignSettings) *goanalysis.Linter { } analyzer := tagalign.NewAnalyzer(options...) - analyzer.Run = func(pass *analysis.Pass) (any, error) { - taIssues := tagalign.Run(pass, options...) - - issues := make([]goanalysis.Issue, len(taIssues)) - for i, issue := range taIssues { - report := &result.Issue{ - FromLinter: analyzer.Name, - Pos: issue.Pos, - Text: issue.Message, - Replacement: &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: issue.InlineFix.StartCol, - Length: issue.InlineFix.Length, - NewString: issue.InlineFix.NewString, - }, - }, - } - - issues[i] = goanalysis.NewIssue(report, pass) - } - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - } return goanalysis.NewLinter( analyzer.Name, analyzer.Doc, []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle/tagliatelle.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle/tagliatelle.go index d1674c3e9e..08215c3a53 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle/tagliatelle.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/tagliatelle/tagliatelle.go @@ -10,10 +10,12 @@ import ( func New(settings *config.TagliatelleSettings) *goanalysis.Linter { cfg := tagliatelle.Config{ - Rules: map[string]string{ - "json": "camel", - "yaml": "camel", - "header": "header", + Base: tagliatelle.Base{ + Rules: map[string]string{ + "json": "camel", + "yaml": "camel", + "header": "header", + }, }, } @@ -21,7 +23,23 @@ func New(settings *config.TagliatelleSettings) *goanalysis.Linter { for k, v := range settings.Case.Rules { cfg.Rules[k] = v } + + cfg.ExtendedRules = toExtendedRules(settings.Case.ExtendedRules) cfg.UseFieldName = settings.Case.UseFieldName + cfg.IgnoredFields = settings.Case.IgnoredFields + + for _, override := range settings.Case.Overrides { + cfg.Overrides = append(cfg.Overrides, tagliatelle.Overrides{ + Base: tagliatelle.Base{ + Rules: override.Rules, + ExtendedRules: toExtendedRules(override.ExtendedRules), + UseFieldName: override.UseFieldName, + IgnoredFields: override.IgnoredFields, + Ignore: override.Ignore, + }, + Package: override.Package, + }) + } } a := tagliatelle.New(cfg) @@ -31,5 +49,19 @@ func New(settings *config.TagliatelleSettings) *goanalysis.Linter { a.Doc, []*analysis.Analyzer{a}, nil, - ).WithLoadMode(goanalysis.LoadModeSyntax) + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} + +func toExtendedRules(src map[string]config.TagliatelleExtendedRule) map[string]tagliatelle.ExtendedRule { + result := make(map[string]tagliatelle.ExtendedRule, len(src)) + + for k, v := range src { + result[k] = tagliatelle.ExtendedRule{ + Case: v.Case, + ExtraInitialisms: v.ExtraInitialisms, + InitialismOverrides: v.InitialismOverrides, + } + } + + return result } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage/testpackage.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage/testpackage.go index 632152712b..f617da5536 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage/testpackage.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/testpackage/testpackage.go @@ -10,19 +10,19 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -func New(cfg *config.TestpackageSettings) *goanalysis.Linter { +func New(settings *config.TestpackageSettings) *goanalysis.Linter { a := testpackage.NewAnalyzer() - var settings map[string]map[string]any - if cfg != nil { - settings = map[string]map[string]any{ + var cfg map[string]map[string]any + if settings != nil { + cfg = map[string]map[string]any{ a.Name: { - testpackage.SkipRegexpFlagName: cfg.SkipRegexp, - testpackage.AllowPackagesFlagName: strings.Join(cfg.AllowPackages, ","), + testpackage.SkipRegexpFlagName: settings.SkipRegexp, + testpackage.AllowPackagesFlagName: strings.Join(settings.AllowPackages, ","), }, } } - return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, settings). + return goanalysis.NewLinter(a.Name, a.Doc, []*analysis.Analyzer{a}, cfg). WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper/thelper.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper/thelper.go index cc6ea755c9..102610a69a 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper/thelper.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/thelper/thelper.go @@ -12,7 +12,7 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/internal" ) -func New(cfg *config.ThelperSettings) *goanalysis.Linter { +func New(settings *config.ThelperSettings) *goanalysis.Linter { a := analyzer.NewAnalyzer() opts := map[string]struct{}{ @@ -33,11 +33,11 @@ func New(cfg *config.ThelperSettings) *goanalysis.Linter { "tb_first": {}, } - if cfg != nil { - applyTHelperOptions(cfg.Test, "t_", opts) - applyTHelperOptions(cfg.Fuzz, "f_", opts) - applyTHelperOptions(cfg.Benchmark, "b_", opts) - applyTHelperOptions(cfg.TB, "tb_", opts) + if settings != nil { + applyTHelperOptions(settings.Test, "t_", opts) + applyTHelperOptions(settings.Fuzz, "f_", opts) + applyTHelperOptions(settings.Benchmark, "b_", opts) + applyTHelperOptions(settings.TB, "tb_", opts) } if len(opts) == 0 { @@ -46,7 +46,7 @@ func New(cfg *config.ThelperSettings) *goanalysis.Linter { args := maps.Keys(opts) - cfgMap := map[string]map[string]any{ + cfg := map[string]map[string]any{ a.Name: { "checks": strings.Join(args, ","), }, @@ -56,7 +56,7 @@ func New(cfg *config.ThelperSettings) *goanalysis.Linter { a.Name, a.Doc, []*analysis.Analyzer{a}, - cfgMap, + cfg, ).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam/unparam.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam/unparam.go index 0fe1847366..04c9a223e5 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam/unparam.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam/unparam.go @@ -1,8 +1,6 @@ package unparam import ( - "sync" - "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/buildssa" "golang.org/x/tools/go/packages" @@ -11,33 +9,21 @@ import ( "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const linterName = "unparam" func New(settings *config.UnparamSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - analyzer := &analysis.Analyzer{ Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Requires: []*analysis.Analyzer{buildssa.Analyzer}, Run: func(pass *analysis.Pass) (any, error) { - issues, err := runUnparam(pass, settings) + err := runUnparam(pass, settings) if err != nil { return nil, err } - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - return nil, nil }, } @@ -51,12 +37,10 @@ func New(settings *config.UnparamSettings) *goanalysis.Linter { if settings.Algo != "cha" { lintCtx.Log.Warnf("`linters-settings.unparam.algo` isn't supported by the newest `unparam`") } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } -func runUnparam(pass *analysis.Pass, settings *config.UnparamSettings) ([]goanalysis.Issue, error) { +func runUnparam(pass *analysis.Pass, settings *config.UnparamSettings) error { ssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) ssaPkg := ssa.Pkg @@ -74,17 +58,15 @@ func runUnparam(pass *analysis.Pass, settings *config.UnparamSettings) ([]goanal unparamIssues, err := c.Check() if err != nil { - return nil, err + return err } - var issues []goanalysis.Issue for _, i := range unparamIssues { - issues = append(issues, goanalysis.NewIssue(&result.Issue{ - Pos: pass.Fset.Position(i.Pos()), - Text: i.Message(), - FromLinter: linterName, - }, pass)) + pass.Report(analysis.Diagnostic{ + Pos: i.Pos(), + Message: i.Message(), + }) } - return issues, nil + return nil } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars/usestdlibvars.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars/usestdlibvars.go index 050e47f24c..00f7d9742a 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars/usestdlibvars.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars/usestdlibvars.go @@ -8,24 +8,24 @@ import ( "github.com/golangci/golangci-lint/pkg/goanalysis" ) -func New(cfg *config.UseStdlibVarsSettings) *goanalysis.Linter { +func New(settings *config.UseStdlibVarsSettings) *goanalysis.Linter { a := analyzer.New() - cfgMap := make(map[string]map[string]any) - if cfg != nil { - cfgMap[a.Name] = map[string]any{ - analyzer.ConstantKindFlag: cfg.ConstantKind, - analyzer.CryptoHashFlag: cfg.CryptoHash, - analyzer.HTTPMethodFlag: cfg.HTTPMethod, - analyzer.HTTPStatusCodeFlag: cfg.HTTPStatusCode, - analyzer.OSDevNullFlag: cfg.OSDevNull, - analyzer.RPCDefaultPathFlag: cfg.DefaultRPCPath, - analyzer.SQLIsolationLevelFlag: cfg.SQLIsolationLevel, - analyzer.SyslogPriorityFlag: cfg.SyslogPriority, - analyzer.TimeLayoutFlag: cfg.TimeLayout, - analyzer.TimeMonthFlag: cfg.TimeMonth, - analyzer.TimeWeekdayFlag: cfg.TimeWeekday, - analyzer.TLSSignatureSchemeFlag: cfg.TLSSignatureScheme, + cfg := make(map[string]map[string]any) + if settings != nil { + cfg[a.Name] = map[string]any{ + analyzer.ConstantKindFlag: settings.ConstantKind, + analyzer.CryptoHashFlag: settings.CryptoHash, + analyzer.HTTPMethodFlag: settings.HTTPMethod, + analyzer.HTTPStatusCodeFlag: settings.HTTPStatusCode, + analyzer.OSDevNullFlag: settings.OSDevNull != nil && *settings.OSDevNull, + analyzer.RPCDefaultPathFlag: settings.DefaultRPCPath, + analyzer.SQLIsolationLevelFlag: settings.SQLIsolationLevel, + analyzer.SyslogPriorityFlag: settings.SyslogPriority != nil && *settings.SyslogPriority, + analyzer.TimeLayoutFlag: settings.TimeLayout, + analyzer.TimeMonthFlag: settings.TimeMonth, + analyzer.TimeWeekdayFlag: settings.TimeWeekday, + analyzer.TLSSignatureSchemeFlag: settings.TLSSignatureScheme, } } @@ -33,6 +33,6 @@ func New(cfg *config.UseStdlibVarsSettings) *goanalysis.Linter { a.Name, a.Doc, []*analysis.Analyzer{a}, - cfgMap, + cfg, ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/usetesting/usetesting.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/usetesting/usetesting.go new file mode 100644 index 0000000000..a21742fbd6 --- /dev/null +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/usetesting/usetesting.go @@ -0,0 +1,33 @@ +package usetesting + +import ( + "github.com/ldez/usetesting" + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/config" + "github.com/golangci/golangci-lint/pkg/goanalysis" +) + +func New(settings *config.UseTestingSettings) *goanalysis.Linter { + a := usetesting.NewAnalyzer() + + cfg := make(map[string]map[string]any) + if settings != nil { + cfg[a.Name] = map[string]any{ + "contextbackground": settings.ContextBackground, + "contexttodo": settings.ContextTodo, + "oschdir": settings.OSChdir, + "osmkdirtemp": settings.OSMkdirTemp, + "ossetenv": settings.OSSetenv, + "ostempdir": settings.OSTempDir, + "oscreatetemp": settings.OSCreateTemp, + } + } + + return goanalysis.NewLinter( + a.Name, + a.Doc, + []*analysis.Analyzer{a}, + cfg, + ).WithLoadMode(goanalysis.LoadModeTypesInfo) +} diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace/whitespace.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace/whitespace.go index 721bfada1c..d45969efce 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace/whitespace.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace/whitespace.go @@ -1,28 +1,17 @@ package whitespace import ( - "fmt" - "sync" - "github.com/ultraware/whitespace" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/goanalysis" - "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) -const linterName = "whitespace" - func New(settings *config.WhitespaceSettings) *goanalysis.Linter { - var mu sync.Mutex - var resIssues []goanalysis.Issue - var wsSettings whitespace.Settings if settings != nil { wsSettings = whitespace.Settings{ - Mode: whitespace.RunningModeGolangCI, MultiIf: settings.MultiIf, MultiFunc: settings.MultiFunc, } @@ -35,68 +24,5 @@ func New(settings *config.WhitespaceSettings) *goanalysis.Linter { a.Doc, []*analysis.Analyzer{a}, nil, - ).WithContextSetter(func(_ *linter.Context) { - a.Run = func(pass *analysis.Pass) (any, error) { - issues, err := runWhitespace(pass, wsSettings) - if err != nil { - return nil, err - } - - if len(issues) == 0 { - return nil, nil - } - - mu.Lock() - resIssues = append(resIssues, issues...) - mu.Unlock() - - return nil, nil - } - }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { - return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) -} - -func runWhitespace(pass *analysis.Pass, wsSettings whitespace.Settings) ([]goanalysis.Issue, error) { - lintIssues := whitespace.Run(pass, &wsSettings) - - issues := make([]goanalysis.Issue, len(lintIssues)) - for i, issue := range lintIssues { - report := &result.Issue{ - FromLinter: linterName, - Pos: pass.Fset.PositionFor(issue.Diagnostic, false), - Text: issue.Message, - } - - switch issue.MessageType { - case whitespace.MessageTypeRemove: - if len(issue.LineNumbers) == 0 { - continue - } - - report.LineRange = &result.Range{ - From: issue.LineNumbers[0], - To: issue.LineNumbers[len(issue.LineNumbers)-1], - } - - report.Replacement = &result.Replacement{NeedOnlyDelete: true} - - case whitespace.MessageTypeAdd: - report.Pos = pass.Fset.PositionFor(issue.FixStart, false) - report.Replacement = &result.Replacement{ - Inline: &result.InlineFix{ - StartCol: 0, - Length: 1, - NewString: "\n\t", - }, - } - - default: - return nil, fmt.Errorf("unknown message type: %v", issue.MessageType) - } - - issues[i] = goanalysis.NewIssue(report, pass) - } - - return issues, nil + ).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck/wrapcheck.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck/wrapcheck.go index 96ec2eeae0..b2f5ec7420 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck/wrapcheck.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/wrapcheck/wrapcheck.go @@ -11,6 +11,8 @@ import ( func New(settings *config.WrapcheckSettings) *goanalysis.Linter { cfg := wrapcheck.NewDefaultConfig() if settings != nil { + cfg.ExtraIgnoreSigs = settings.ExtraIgnoreSigs + if len(settings.IgnoreSigs) != 0 { cfg.IgnoreSigs = settings.IgnoreSigs } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go index 6d6d4b17e7..1e438a0f03 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go @@ -164,12 +164,20 @@ func (lc *Config) WithNoopFallback(cfg *config.Config, cond func(cfg *config.Con } func IsGoLowerThanGo122() func(cfg *config.Config) error { + return isGoLowerThanGo("1.22") +} + +func IsGoLowerThanGo124() func(cfg *config.Config) error { + return isGoLowerThanGo("1.24") +} + +func isGoLowerThanGo(v string) func(cfg *config.Config) error { return func(cfg *config.Config) error { - if cfg == nil || config.IsGoGreaterThanOrEqual(cfg.Run.Go, "1.22") { + if cfg == nil || config.IsGoGreaterThanOrEqual(cfg.Run.Go, v) { return nil } - return fmt.Errorf("this linter is disabled because the Go version (%s) of your project is lower than Go 1.22", cfg.Run.Go) + return fmt.Errorf("this linter is disabled because the Go version (%s) of your project is lower than Go %s", cfg.Run.Go, v) } } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go index d2a2dc3d07..4338aa88cc 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go @@ -26,6 +26,7 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/exhaustive" "github.com/golangci/golangci-lint/pkg/golinters/exhaustruct" "github.com/golangci/golangci-lint/pkg/golinters/exportloopref" + "github.com/golangci/golangci-lint/pkg/golinters/exptostd" "github.com/golangci/golangci-lint/pkg/golinters/fatcontext" "github.com/golangci/golangci-lint/pkg/golinters/forbidigo" "github.com/golangci/golangci-lint/pkg/golinters/forcetypeassert" @@ -72,6 +73,7 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/nakedret" "github.com/golangci/golangci-lint/pkg/golinters/nestif" "github.com/golangci/golangci-lint/pkg/golinters/nilerr" + "github.com/golangci/golangci-lint/pkg/golinters/nilnesserr" "github.com/golangci/golangci-lint/pkg/golinters/nilnil" "github.com/golangci/golangci-lint/pkg/golinters/nlreturn" "github.com/golangci/golangci-lint/pkg/golinters/noctx" @@ -105,6 +107,7 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/unparam" "github.com/golangci/golangci-lint/pkg/golinters/unused" "github.com/golangci/golangci-lint/pkg/golinters/usestdlibvars" + "github.com/golangci/golangci-lint/pkg/golinters/usetesting" "github.com/golangci/golangci-lint/pkg/golinters/varnamelen" "github.com/golangci/golangci-lint/pkg/golinters/wastedassign" "github.com/golangci/golangci-lint/pkg/golinters/whitespace" @@ -160,7 +163,8 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.58.0"). WithPresets(linter.PresetStyle). WithLoadForGoAnalysis(). - WithURL("https://github.com/lasiar/canonicalHeader"), + WithAutoFix(). + WithURL("https://github.com/lasiar/canonicalheader"), linter.NewConfig(containedctx.New()). WithSince("v1.44.0"). @@ -182,18 +186,16 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(cyclop.New(&cfg.LintersSettings.Cyclop)). WithSince("v1.37.0"). - WithLoadForGoAnalysis(). WithPresets(linter.PresetComplexity). WithURL("https://github.com/bkielbasa/cyclop"), linter.NewConfig(decorder.New(&cfg.LintersSettings.Decorder)). WithSince("v1.44.0"). - WithPresets(linter.PresetFormatting, linter.PresetStyle). + WithPresets(linter.PresetStyle). WithURL("https://gitlab.com/bosi/decorder"), linter.NewConfig(linter.NewNoopDeprecated("deadcode", cfg, linter.DeprecationError)). WithSince("v1.0.0"). - WithLoadForGoAnalysis(). WithPresets(linter.PresetUnused). WithURL("https://github.com/remyoudompheng/go-misc/tree/master/deadcode"). DeprecatedError("The owner seems to have abandoned the linter.", "v1.49.0", "unused"), @@ -216,6 +218,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(dupword.New(&cfg.LintersSettings.DupWord)). WithSince("v1.50.0"). WithPresets(linter.PresetComment). + WithAutoFix(). WithURL("https://github.com/Abirdcfly/dupword"), linter.NewConfig(durationcheck.New()). @@ -247,12 +250,12 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.32.0"). WithPresets(linter.PresetBugs, linter.PresetError). WithLoadForGoAnalysis(). + WithAutoFix(). WithURL("https://github.com/polyfloyd/go-errorlint"), linter.NewConfig(linter.NewNoopDeprecated("execinquery", cfg, linter.DeprecationError)). WithSince("v1.46.0"). WithPresets(linter.PresetSQL). - WithLoadForGoAnalysis(). WithURL("https://github.com/1uf3/execinquery"). DeprecatedError("The repository of the linter has been archived by the owner.", "v1.58.0", ""), @@ -265,7 +268,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(linter.NewNoopDeprecated("exhaustivestruct", cfg, linter.DeprecationError)). WithSince("v1.32.0"). WithPresets(linter.PresetStyle, linter.PresetTest). - WithLoadForGoAnalysis(). WithURL("https://github.com/mbilski/exhaustivestruct"). DeprecatedError("The repository of the linter has been deprecated by the owner.", "v1.46.0", "exhaustruct"), @@ -282,6 +284,13 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithURL("https://github.com/kyoh86/exportloopref"). DeprecatedWarning("Since Go1.22 (loopvar) this linter is no longer relevant.", "v1.60.2", "copyloopvar"), + linter.NewConfig(exptostd.New()). + WithSince("v1.63.0"). + WithPresets(linter.PresetStyle). + WithLoadForGoAnalysis(). + WithAutoFix(). + WithURL("https://github.com/ldez/exptostd"), + linter.NewConfig(forbidigo.New(&cfg.LintersSettings.Forbidigo)). WithSince("v1.34.0"). WithPresets(linter.PresetStyle). @@ -301,6 +310,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.58.0"). WithPresets(linter.PresetPerformance). WithLoadForGoAnalysis(). + WithAutoFix(). WithURL("https://github.com/Crocmagnon/fatcontext"), linter.NewConfig(funlen.New(&cfg.LintersSettings.Funlen)). @@ -318,6 +328,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.51.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). + WithAutoFix(). WithURL("https://github.com/nunnatsa/ginkgolinter"), linter.NewConfig(gocheckcompilerdirectives.New()). @@ -379,6 +390,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithPresets(linter.PresetStyle, linter.PresetError). WithLoadForGoAnalysis(). WithAlternativeNames("goerr113"). + WithAutoFix(). WithURL("https://github.com/Djarvur/go-err113"), linter.NewConfig(gofmt.New(&cfg.LintersSettings.Gofmt)). @@ -407,7 +419,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(linter.NewNoopDeprecated("golint", cfg, linter.DeprecationError)). WithSince("v1.0.0"). - WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). WithURL("https://github.com/golang/lint"). DeprecatedError("The repository of the linter has been archived by the owner.", "v1.41.0", "revive"), @@ -451,6 +462,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). WithAlternativeNames(megacheckName). + WithAutoFix(). WithURL("https://github.com/dominikh/go-tools/tree/master/simple"), linter.NewConfig(gosmopolitan.New(&cfg.LintersSettings.Gosmopolitan)). @@ -464,6 +476,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.0.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetBugs, linter.PresetMetaLinter). + WithAutoFix(). WithAlternativeNames("vet", "vetshadow"). WithURL("https://pkg.go.dev/cmd/vet"), @@ -482,12 +495,14 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.62.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). + WithAutoFix(). WithURL("https://github.com/uudashr/iface"), linter.NewConfig(importas.New(&cfg.LintersSettings.ImportAs)). WithSince("v1.38.0"). WithPresets(linter.PresetStyle). WithLoadForGoAnalysis(). + WithAutoFix(). WithURL("https://github.com/julz/importas"), linter.NewConfig(inamedparam.New(&cfg.LintersSettings.Inamedparam)). @@ -508,7 +523,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(linter.NewNoopDeprecated("interfacer", cfg, linter.DeprecationError)). WithSince("v1.0.0"). - WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). WithURL("https://github.com/mvdan/interfacer"). DeprecatedError("The repository of the linter has been archived by the owner.", "v1.38.0", ""), @@ -517,6 +531,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.57.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). + WithAutoFix(). WithURL("https://github.com/ckaznocha/intrange"). WithNoopFallback(cfg, linter.IsGoLowerThanGo122()), @@ -550,7 +565,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(linter.NewNoopDeprecated("maligned", cfg, linter.DeprecationError)). WithSince("v1.0.0"). - WithLoadForGoAnalysis(). WithPresets(linter.PresetPerformance). WithURL("https://github.com/mdempsky/maligned"). DeprecatedError("The repository of the linter has been archived by the owner.", "v1.38.0", "govet 'fieldalignment'"), @@ -577,6 +591,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(nakedret.New(&cfg.LintersSettings.Nakedret)). WithSince("v1.19.0"). WithPresets(linter.PresetStyle). + WithAutoFix(). WithURL("https://github.com/alexkohler/nakedret"), linter.NewConfig(nestif.New(&cfg.LintersSettings.Nestif)). @@ -590,6 +605,12 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithPresets(linter.PresetBugs). WithURL("https://github.com/gostaticanalysis/nilerr"), + linter.NewConfig(nilnesserr.New()). + WithSince("v1.63.0"). + WithLoadForGoAnalysis(). + WithPresets(linter.PresetBugs). + WithURL("https://github.com/alingse/nilnesserr"), + linter.NewConfig(nilnil.New(&cfg.LintersSettings.NilNil)). WithSince("v1.43.0"). WithPresets(linter.PresetStyle). @@ -599,6 +620,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(nlreturn.New(&cfg.LintersSettings.Nlreturn)). WithSince("v1.30.0"). WithPresets(linter.PresetStyle). + WithAutoFix(). WithURL("https://github.com/ssgreg/nlreturn"), linter.NewConfig(noctx.New()). @@ -634,6 +656,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.55.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetPerformance). + WithAutoFix(). WithURL("https://github.com/catenacyber/perfsprint"), linter.NewConfig(prealloc.New(&cfg.LintersSettings.Prealloc)). @@ -664,7 +687,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithLoadForGoAnalysis(). WithURL("https://github.com/curioswitch/go-reassign"), - linter.NewConfig(recvcheck.New()). + linter.NewConfig(recvcheck.New(&cfg.LintersSettings.Recvcheck)). WithSince("v1.62.0"). WithPresets(linter.PresetBugs). WithLoadForGoAnalysis(). @@ -674,6 +697,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.37.0"). WithPresets(linter.PresetStyle, linter.PresetMetaLinter). ConsiderSlow(). + WithAutoFix(). WithURL("https://github.com/mgechev/revive"), linter.NewConfig(rowserrcheck.New(&cfg.LintersSettings.RowsErrCheck)). @@ -685,7 +709,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(sloglint.New(&cfg.LintersSettings.SlogLint)). WithSince("v1.55.0"). WithLoadForGoAnalysis(). - WithPresets(linter.PresetStyle, linter.PresetFormatting). + WithPresets(linter.PresetStyle). WithURL("https://github.com/go-simpler/sloglint"), linter.NewConfig(linter.NewNoopDeprecated("scopelint", cfg, linter.DeprecationError)). @@ -712,11 +736,11 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithLoadForGoAnalysis(). WithPresets(linter.PresetBugs, linter.PresetMetaLinter). WithAlternativeNames(megacheckName). + WithAutoFix(). WithURL("https://staticcheck.dev/"), linter.NewConfig(linter.NewNoopDeprecated("structcheck", cfg, linter.DeprecationError)). WithSince("v1.0.0"). - WithLoadForGoAnalysis(). WithPresets(linter.PresetUnused). WithURL("https://github.com/opennota/check"). DeprecatedError("The owner seems to have abandoned the linter.", "v1.49.0", "unused"), @@ -725,17 +749,19 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.20.0"). WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). + WithAutoFix(). WithURL("https://github.com/dominikh/go-tools/tree/master/stylecheck"), linter.NewConfig(tagalign.New(&cfg.LintersSettings.TagAlign)). WithSince("v1.53.0"). - WithPresets(linter.PresetStyle, linter.PresetFormatting). + WithPresets(linter.PresetStyle). WithAutoFix(). WithURL("https://github.com/4meepo/tagalign"), linter.NewConfig(tagliatelle.New(&cfg.LintersSettings.Tagliatelle)). WithSince("v1.40.0"). WithPresets(linter.PresetStyle). + WithLoadForGoAnalysis(). WithURL("https://github.com/ldez/tagliatelle"), linter.NewConfig(tenv.New(&cfg.LintersSettings.Tenv)). @@ -753,6 +779,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithSince("v1.55.0"). WithPresets(linter.PresetTest, linter.PresetBugs). WithLoadForGoAnalysis(). + WithAutoFix(). WithURL("https://github.com/Antonboom/testifylint"), linter.NewConfig(testpackage.New(&cfg.LintersSettings.Testpackage)). @@ -802,11 +829,18 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(usestdlibvars.New(&cfg.LintersSettings.UseStdlibVars)). WithSince("v1.48.0"). WithPresets(linter.PresetStyle). + WithAutoFix(). WithURL("https://github.com/sashamelentyev/usestdlibvars"), + linter.NewConfig(usetesting.New(&cfg.LintersSettings.UseTesting)). + WithSince("v1.63.0"). + WithPresets(linter.PresetTest). + WithLoadForGoAnalysis(). + WithAutoFix(). + WithURL("https://github.com/ldez/usetesting"), + linter.NewConfig(linter.NewNoopDeprecated("varcheck", cfg, linter.DeprecationError)). WithSince("v1.0.0"). - WithLoadForGoAnalysis(). WithPresets(linter.PresetUnused). WithURL("https://github.com/opennota/check"). DeprecatedError("The owner seems to have abandoned the linter.", "v1.49.0", "unused"), @@ -838,6 +872,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { linter.NewConfig(wsl.New(&cfg.LintersSettings.WSL)). WithSince("v1.20.0"). WithPresets(linter.PresetStyle). + WithAutoFix(). WithURL("https://github.com/bombsimon/wsl"), linter.NewConfig(zerologlint.New()). diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go index 2c47c7166e..157fde715f 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go @@ -10,6 +10,7 @@ import ( "github.com/golangci/golangci-lint/internal/errorutil" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/goformatters" "github.com/golangci/golangci-lint/pkg/goutil" "github.com/golangci/golangci-lint/pkg/lint/linter" "github.com/golangci/golangci-lint/pkg/lint/lintersdb" @@ -60,6 +61,11 @@ func NewRunner(log logutils.Log, cfg *config.Config, args []string, goenv *gouti return nil, fmt.Errorf("failed to get enabled linters: %w", err) } + metaFormatter, err := goformatters.NewMetaFormatter(log, cfg, enabledLinters) + if err != nil { + return nil, fmt.Errorf("failed to create meta-formatter: %w", err) + } + return &Runner{ Processors: []processors.Processor{ processors.NewCgo(goenv), @@ -94,7 +100,7 @@ func NewRunner(log logutils.Log, cfg *config.Config, args []string, goenv *gouti processors.NewSeverity(log.Child(logutils.DebugKeySeverityRules), files, &cfg.Severity), // The fixer still needs to see paths for the issues that are relative to the current directory. - processors.NewFixer(cfg, log, fileCache), + processors.NewFixer(cfg, log, fileCache, metaFormatter), // Now we can modify the issues for output. processors.NewPathPrefixer(cfg.Output.PathPrefix), diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go index b65339682c..7c5932368f 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go @@ -12,7 +12,7 @@ const defaultCodeClimateSeverity = "critical" // CodeClimateIssue is a subset of the Code Climate spec. // https://github.com/codeclimate/platform/blob/master/spec/analyzers/SPEC.md#data-types // It is just enough to support GitLab CI Code Quality. -// https://docs.gitlab.com/ee/ci/testing/code_quality.html#implement-a-custom-tool +// https://docs.gitlab.com/ee/ci/testing/code_quality.html#code-quality-report-format type CodeClimateIssue struct { Description string `json:"description"` CheckName string `json:"check_name"` diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go index 1d1c9f7d32..83c4959114 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/printers/teamcity.go @@ -4,7 +4,6 @@ import ( "fmt" "io" "strings" - "unicode/utf8" "github.com/golangci/golangci-lint/pkg/result" ) @@ -112,11 +111,9 @@ func (i InspectionInstance) Print(w io.Writer, replacer *strings.Replacer) (int, } func cutVal(s string, limit int) string { - var size, count int - for i := 0; i < limit && count < len(s); i++ { - _, size = utf8.DecodeRuneInString(s[count:]) - count += size + runes := []rune(s) + if len(runes) > limit { + return string(runes[:limit]) } - - return s[:count] + return s } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go index 32246a6df4..e338963fa3 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go @@ -5,6 +5,7 @@ import ( "fmt" "go/token" + "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" ) @@ -12,18 +13,6 @@ type Range struct { From, To int } -type Replacement struct { - NeedOnlyDelete bool // need to delete all lines of the issue without replacement with new lines - NewLines []string // if NeedDelete is false it's the replacement lines - Inline *InlineFix -} - -type InlineFix struct { - StartCol int // zero-based - Length int // length of chunk to be replaced - NewString string -} - type Issue struct { FromLinter string Text string @@ -33,19 +22,19 @@ type Issue struct { // Source lines of a code with the issue to show SourceLines []string - // If we know how to fix the issue we can provide replacement lines - Replacement *Replacement - // Pkg is needed for proper caching of linting results Pkg *packages.Package `json:"-"` - LineRange *Range `json:",omitempty"` - Pos token.Position + LineRange *Range `json:",omitempty"` + // HunkPos is used only when golangci-lint is run over a diff HunkPos int `json:",omitempty"` + // If we know how to fix the issue we can provide replacement lines + SuggestedFixes []analysis.SuggestedFix `json:",omitempty"` + // If we are expecting a nolint (because this is from nolintlint), record the expected linter ExpectNoLint bool ExpectedNoLintLinter string diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go index 764af5a923..b82c7f2071 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go @@ -1,16 +1,27 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// This file is inspired by go/analysis/internal/checker/checker.go + package processors import ( - "bytes" + "errors" "fmt" "os" - "path/filepath" - "sort" - "strings" + "slices" + + "golang.org/x/exp/maps" - "github.com/golangci/golangci-lint/internal/go/robustio" + "github.com/golangci/golangci-lint/internal/x/tools/diff" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/fsutils" + "github.com/golangci/golangci-lint/pkg/goformatters" + "github.com/golangci/golangci-lint/pkg/goformatters/gci" + "github.com/golangci/golangci-lint/pkg/goformatters/gofmt" + "github.com/golangci/golangci-lint/pkg/goformatters/gofumpt" + "github.com/golangci/golangci-lint/pkg/goformatters/goimports" "github.com/golangci/golangci-lint/pkg/logutils" "github.com/golangci/golangci-lint/pkg/result" "github.com/golangci/golangci-lint/pkg/timeutils" @@ -18,19 +29,23 @@ import ( var _ Processor = (*Fixer)(nil) +const filePerm = 0644 + type Fixer struct { cfg *config.Config log logutils.Log fileCache *fsutils.FileCache sw *timeutils.Stopwatch + formatter *goformatters.MetaFormatter } -func NewFixer(cfg *config.Config, log logutils.Log, fileCache *fsutils.FileCache) *Fixer { +func NewFixer(cfg *config.Config, log logutils.Log, fileCache *fsutils.FileCache, formatter *goformatters.MetaFormatter) *Fixer { return &Fixer{ cfg: cfg, log: log, fileCache: fileCache, sw: timeutils.NewStopwatch("fixer", log), + formatter: formatter, } } @@ -43,218 +58,246 @@ func (p Fixer) Process(issues []result.Issue) ([]result.Issue, error) { return issues, nil } - outIssues := make([]result.Issue, 0, len(issues)) - issuesToFixPerFile := map[string][]result.Issue{} - for i := range issues { - issue := &issues[i] - if issue.Replacement == nil { - outIssues = append(outIssues, *issue) - continue - } + p.log.Infof("Applying suggested fixes") - issuesToFixPerFile[issue.FilePath()] = append(issuesToFixPerFile[issue.FilePath()], *issue) - } - - for file, issuesToFix := range issuesToFixPerFile { - err := p.sw.TrackStageErr("all", func() error { - return p.fixIssuesInFile(file, issuesToFix) - }) - if err != nil { - p.log.Errorf("Failed to fix issues in file %s: %s", file, err) - - // show issues only if can't fix them - outIssues = append(outIssues, issuesToFix...) - } + notFixableIssues, err := timeutils.TrackStage(p.sw, "all", func() ([]result.Issue, error) { + return p.process(issues) + }) + if err != nil { + p.log.Warnf("Failed to fix issues: %v", err) } p.printStat() - return outIssues, nil + return notFixableIssues, nil } -func (Fixer) Finish() {} +//nolint:funlen,gocyclo // This function should not be split. +func (p Fixer) process(issues []result.Issue) ([]result.Issue, error) { + // filenames / linters / edits + editsByLinter := make(map[string]map[string][]diff.Edit) -func (p Fixer) fixIssuesInFile(filePath string, issues []result.Issue) error { - // TODO: don't read the whole file into memory: read line by line; - // can't just use bufio.scanner: it has a line length limit - origFileData, err := p.fileCache.GetFileBytes(filePath) - if err != nil { - return fmt.Errorf("failed to get file bytes for %s: %w", filePath, err) - } + formatters := []string{gofumpt.Name, goimports.Name, gofmt.Name, gci.Name} - origFileLines := bytes.Split(origFileData, []byte("\n")) + var notFixableIssues []result.Issue - tmpFileName := filepath.Join(filepath.Dir(filePath), fmt.Sprintf(".%s.golangci_fix", filepath.Base(filePath))) + toBeFormattedFiles := make(map[string]struct{}) - tmpOutFile, err := os.Create(tmpFileName) - if err != nil { - return fmt.Errorf("failed to make file %s: %w", tmpFileName, err) - } - - // merge multiple issues per line into one issue - issuesPerLine := map[int][]result.Issue{} for i := range issues { - issue := &issues[i] - issuesPerLine[issue.Line()] = append(issuesPerLine[issue.Line()], *issue) - } + issue := issues[i] - issues = issues[:0] // reuse the same memory - for line, lineIssues := range issuesPerLine { - if mergedIssue := p.mergeLineIssues(line, lineIssues, origFileLines); mergedIssue != nil { - issues = append(issues, *mergedIssue) + if slices.Contains(formatters, issue.FromLinter) { + toBeFormattedFiles[issue.FilePath()] = struct{}{} + continue } - } - issues = p.findNotIntersectingIssues(issues) + if issue.SuggestedFixes == nil || skipNoTextEdit(&issue) { + notFixableIssues = append(notFixableIssues, issue) + continue + } - if err = p.writeFixedFile(origFileLines, issues, tmpOutFile); err != nil { - tmpOutFile.Close() - _ = robustio.RemoveAll(tmpOutFile.Name()) - return err + for _, sf := range issue.SuggestedFixes { + for _, edit := range sf.TextEdits { + start, end := edit.Pos, edit.End + if start > end { + return nil, fmt.Errorf("%q suggests invalid fix: pos (%v) > end (%v)", + issue.FromLinter, edit.Pos, edit.End) + } + + edit := diff.Edit{ + Start: int(start), + End: int(end), + New: string(edit.NewText), + } + + if _, ok := editsByLinter[issue.FilePath()]; !ok { + editsByLinter[issue.FilePath()] = make(map[string][]diff.Edit) + } + + editsByLinter[issue.FilePath()][issue.FromLinter] = append(editsByLinter[issue.FilePath()][issue.FromLinter], edit) + } + } } - tmpOutFile.Close() + // Validate and group the edits to each actual file. + editsByPath := make(map[string][]diff.Edit) + for path, linterToEdits := range editsByLinter { + excludedLinters := make(map[string]struct{}) - if err = robustio.Rename(tmpOutFile.Name(), filePath); err != nil { - _ = robustio.RemoveAll(tmpOutFile.Name()) - return fmt.Errorf("failed to rename %s -> %s: %w", tmpOutFile.Name(), filePath, err) - } + linters := maps.Keys(linterToEdits) - return nil -} + // Does any linter create conflicting edits? + for _, linter := range linters { + edits := linterToEdits[linter] + if _, invalid := validateEdits(edits); invalid > 0 { + name, x, y := linter, edits[invalid-1], edits[invalid] + excludedLinters[name] = struct{}{} -func (p Fixer) mergeLineIssues(lineNum int, lineIssues []result.Issue, origFileLines [][]byte) *result.Issue { - origLine := origFileLines[lineNum-1] // lineNum is 1-based + err := diff3Conflict(path, name, name, []diff.Edit{x}, []diff.Edit{y}) + // TODO(ldez) TUI? + p.log.Warnf("Changes related to %q are skipped for the file %q: %v", + name, path, err) + } + } - if len(lineIssues) == 1 && lineIssues[0].Replacement.Inline == nil { - return &lineIssues[0] - } + // Does any pair of different linters create edits that conflict? + for j := range linters { + for k := range linters[:j] { + x, y := linters[j], linters[k] + if x > y { + x, y = y, x + } - // check issues first - for ind := range lineIssues { - li := &lineIssues[ind] + _, foundX := excludedLinters[x] + _, foundY := excludedLinters[y] + if foundX || foundY { + continue + } - if li.LineRange != nil { - p.log.Infof("Line %d has multiple issues but at least one of them is ranged: %#v", lineNum, lineIssues) - return &lineIssues[0] - } + xedits, yedits := linterToEdits[x], linterToEdits[y] - inline := li.Replacement.Inline + combined := slices.Concat(xedits, yedits) - if inline == nil || len(li.Replacement.NewLines) != 0 || li.Replacement.NeedOnlyDelete { - p.log.Infof("Line %d has multiple issues but at least one of them isn't inline: %#v", lineNum, lineIssues) - return li + if _, invalid := validateEdits(combined); invalid > 0 { + excludedLinters[x] = struct{}{} + p.log.Warnf("Changes related to %q are skipped for the file %q due to conflicts with %q.", x, path, y) + } + } } - if inline.StartCol < 0 || inline.Length <= 0 || inline.StartCol+inline.Length > len(origLine) { - p.log.Warnf("Line %d (%q) has invalid inline fix: %#v, %#v", lineNum, origLine, li, inline) - return nil + var edits []diff.Edit + for linter := range linterToEdits { + if _, found := excludedLinters[linter]; !found { + edits = append(edits, linterToEdits[linter]...) + } } - } - return p.applyInlineFixes(lineIssues, origLine, lineNum) -} + editsByPath[path], _ = validateEdits(edits) // remove duplicates. already validated. + } -func (p Fixer) applyInlineFixes(lineIssues []result.Issue, origLine []byte, lineNum int) *result.Issue { - sort.Slice(lineIssues, func(i, j int) bool { - return lineIssues[i].Replacement.Inline.StartCol < lineIssues[j].Replacement.Inline.StartCol - }) + var editError error - var newLineBuf bytes.Buffer - newLineBuf.Grow(len(origLine)) + var formattedFiles []string - //nolint:misspell // misspelling is intentional - // example: origLine="it's becouse of them", StartCol=5, Length=7, NewString="because" + // Now we've got a set of valid edits for each file. Apply them. + for path, edits := range editsByPath { + contents, err := p.fileCache.GetFileBytes(path) + if err != nil { + editError = errors.Join(editError, fmt.Errorf("%s: %w", path, err)) + continue + } - curOrigLinePos := 0 - for i := range lineIssues { - fix := lineIssues[i].Replacement.Inline - if fix.StartCol < curOrigLinePos { - p.log.Warnf("Line %d has multiple intersecting issues: %#v", lineNum, lineIssues) - return nil + out, err := diff.ApplyBytes(contents, edits) + if err != nil { + editError = errors.Join(editError, fmt.Errorf("%s: %w", path, err)) + continue } - if curOrigLinePos != fix.StartCol { - newLineBuf.Write(origLine[curOrigLinePos:fix.StartCol]) + // Try to format the file. + out = p.formatter.Format(path, out) + + if err := os.WriteFile(path, out, filePerm); err != nil { + editError = errors.Join(editError, fmt.Errorf("%s: %w", path, err)) + continue } - newLineBuf.WriteString(fix.NewString) - curOrigLinePos = fix.StartCol + fix.Length - } - if curOrigLinePos != len(origLine) { - newLineBuf.Write(origLine[curOrigLinePos:]) + + formattedFiles = append(formattedFiles, path) } - mergedIssue := lineIssues[0] // use text from the first issue (it's not really used) - mergedIssue.Replacement = &result.Replacement{ - NewLines: []string{newLineBuf.String()}, + for path := range toBeFormattedFiles { + // Skips files already formatted by the previous fix step. + if !slices.Contains(formattedFiles, path) { + content, err := p.fileCache.GetFileBytes(path) + if err != nil { + p.log.Warnf("Error reading file %s: %v", path, err) + continue + } + + out := p.formatter.Format(path, content) + + if err := os.WriteFile(path, out, filePerm); err != nil { + editError = errors.Join(editError, fmt.Errorf("%s: %w", path, err)) + continue + } + } } - return &mergedIssue + + return notFixableIssues, editError } -func (p Fixer) findNotIntersectingIssues(issues []result.Issue) []result.Issue { - sort.SliceStable(issues, func(i, j int) bool { - a, b := issues[i], issues[j] - return a.Line() < b.Line() - }) +func (Fixer) Finish() {} - var ret []result.Issue - var currentEnd int - for i := range issues { - issue := &issues[i] - rng := issue.GetLineRange() - if rng.From <= currentEnd { - p.log.Infof("Skip issue %#v: intersects with end %d", issue, currentEnd) - continue // skip intersecting issue +func (p Fixer) printStat() { + p.sw.PrintStages() +} + +func skipNoTextEdit(issue *result.Issue) bool { + var onlyMessage int + for _, sf := range issue.SuggestedFixes { + if len(sf.TextEdits) == 0 { + onlyMessage++ } - p.log.Infof("Fix issue %#v with range %v", issue, issue.GetLineRange()) - ret = append(ret, *issue) - currentEnd = rng.To } - return ret + return len(issue.SuggestedFixes) == onlyMessage } -func (p Fixer) writeFixedFile(origFileLines [][]byte, issues []result.Issue, tmpOutFile *os.File) error { - // issues aren't intersecting +// validateEdits returns a list of edits that is sorted and +// contains no duplicate edits. Returns the index of some +// overlapping adjacent edits if there is one and <0 if the +// edits are valid. +// +//nolint:gocritic // Copy of go/analysis/internal/checker/checker.go +func validateEdits(edits []diff.Edit) ([]diff.Edit, int) { + if len(edits) == 0 { + return nil, -1 + } - nextIssueIndex := 0 - for i := 0; i < len(origFileLines); i++ { - var outLine string - var nextIssue *result.Issue - if nextIssueIndex != len(issues) { - nextIssue = &issues[nextIssueIndex] - } + equivalent := func(x, y diff.Edit) bool { + return x.Start == y.Start && x.End == y.End && x.New == y.New + } - origFileLineNumber := i + 1 - if nextIssue == nil || origFileLineNumber != nextIssue.GetLineRange().From { - outLine = string(origFileLines[i]) - } else { - nextIssueIndex++ - rng := nextIssue.GetLineRange() - if rng.From > rng.To { - // Maybe better decision is to skip such issues, re-evaluate if regressed. - p.log.Warnf("[fixer]: issue line range is probably invalid, fix can be incorrect (from=%d, to=%d, linter=%s)", - rng.From, rng.To, nextIssue.FromLinter, - ) - } - i += rng.To - rng.From - if nextIssue.Replacement.NeedOnlyDelete { - continue + diff.SortEdits(edits) + + unique := []diff.Edit{edits[0]} + + invalid := -1 + + for i := 1; i < len(edits); i++ { + prev, cur := edits[i-1], edits[i] + // We skip over equivalent edits without considering them + // an error. This handles identical edits coming from the + // multiple ways of loading a package into a + // *go/packages.Packages for testing, e.g. packages "p" and "p [p.test]". + if !equivalent(prev, cur) { + unique = append(unique, cur) + if prev.End > cur.Start { + invalid = i } - outLine = strings.Join(nextIssue.Replacement.NewLines, "\n") } + } + return unique, invalid +} - if i < len(origFileLines)-1 { - outLine += "\n" - } - if _, err := tmpOutFile.WriteString(outLine); err != nil { - return fmt.Errorf("failed to write output line: %w", err) - } +// diff3Conflict returns an error describing two conflicting sets of +// edits on a file at path. +// Copy of go/analysis/internal/checker/checker.go +func diff3Conflict(path, xlabel, ylabel string, xedits, yedits []diff.Edit) error { + contents, err := os.ReadFile(path) + if err != nil { + return err } + oldlabel, old := "base", string(contents) - return nil -} + xdiff, err := diff.ToUnified(oldlabel, xlabel, old, xedits, diff.DefaultContextLines) + if err != nil { + return err + } + ydiff, err := diff.ToUnified(oldlabel, ylabel, old, yedits, diff.DefaultContextLines) + if err != nil { + return err + } -func (p Fixer) printStat() { - p.sw.PrintStages() + return fmt.Errorf("conflicting edits from %s and %s on %s\nfirst edits:\n%s\nsecond edits:\n%s", + xlabel, ylabel, path, xdiff, ydiff) } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go index 0680c3f296..690cdf3f8a 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_from_linter.go @@ -34,7 +34,7 @@ func (p *MaxFromLinter) Process(issues []result.Issue) ([]result.Issue, error) { } return filterIssuesUnsafe(issues, func(issue *result.Issue) bool { - if issue.Replacement != nil && p.cfg.Issues.NeedFix { + if issue.SuggestedFixes != nil && p.cfg.Issues.NeedFix { // we need to fix all issues at once => we need to return all of them return true } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go index 1647cace09..0d1c286282 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/max_same_issues.go @@ -36,7 +36,7 @@ func (p *MaxSameIssues) Process(issues []result.Issue) ([]result.Issue, error) { } return filterIssuesUnsafe(issues, func(issue *result.Issue) bool { - if issue.Replacement != nil && p.cfg.Issues.NeedFix { + if issue.SuggestedFixes != nil && p.cfg.Issues.NeedFix { // we need to fix all issues at once => we need to return all of them return true } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go index 93a26586d6..fcd65326f9 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/severity.go @@ -1,6 +1,7 @@ package processors import ( + "cmp" "regexp" "github.com/golangci/golangci-lint/pkg/config" @@ -67,10 +68,7 @@ func (p *Severity) transform(issue *result.Issue) *result.Issue { return issue } - issue.Severity = rule.severity - if issue.Severity == "" { - issue.Severity = p.defaultSeverity - } + issue.Severity = cmp.Or(rule.severity, p.defaultSeverity) return issue } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go index 4da73c72ea..7eebea6315 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/sort_results.go @@ -1,10 +1,9 @@ package processors import ( - "errors" + "cmp" "fmt" "slices" - "sort" "strings" "github.com/golangci/golangci-lint/pkg/config" @@ -22,24 +21,32 @@ const ( orderNameSeverity = "severity" ) +const ( + less = iota - 1 + equal + greater +) + var _ Processor = (*SortResults)(nil) +type issueComparator func(a, b *result.Issue) int + type SortResults struct { - cmps map[string]*comparator + cmps map[string][]issueComparator cfg *config.Output } func NewSortResults(cfg *config.Config) *SortResults { return &SortResults{ - cmps: map[string]*comparator{ + cmps: map[string][]issueComparator{ // For sorting we are comparing (in next order): // file names, line numbers, position, and finally - giving up. - orderNameFile: byFileName().SetNext(byLine().SetNext(byColumn())), + orderNameFile: {byFileName, byLine, byColumn}, // For sorting we are comparing: linter name - orderNameLinter: byLinter(), + orderNameLinter: {byLinter}, // For sorting we are comparing: severity - orderNameSeverity: bySeverity(), + orderNameSeverity: {bySeverity}, }, cfg: &cfg.Output, } @@ -57,23 +64,21 @@ func (p SortResults) Process(issues []result.Issue) ([]result.Issue, error) { p.cfg.SortOrder = []string{orderNameFile} } - var cmps []*comparator + var cmps []issueComparator + for _, name := range p.cfg.SortOrder { c, ok := p.cmps[name] if !ok { return nil, fmt.Errorf("unsupported sort-order name %q", name) } - cmps = append(cmps, c) + cmps = append(cmps, c...) } - cmp, err := mergeComparators(cmps) - if err != nil { - return nil, err - } + comp := mergeComparators(cmps...) - sort.Slice(issues, func(i, j int) bool { - return cmp.Compare(&issues[i], &issues[j]) == less + slices.SortFunc(issues, func(a, b result.Issue) int { + return comp(&a, &b) }) return issues, nil @@ -81,147 +86,32 @@ func (p SortResults) Process(issues []result.Issue) ([]result.Issue, error) { func (SortResults) Finish() {} -type compareResult int - -const ( - less compareResult = iota - 1 - equal - greater - none -) - -func (c compareResult) isNeutral() bool { - // return true if compare result is incomparable or equal. - return c == none || c == equal -} - -func (c compareResult) String() string { - switch c { - case less: - return "less" - case equal: - return "equal" - case greater: - return "greater" - default: - return "none" - } -} - -// comparator describes how to implement compare for two "issues". -type comparator struct { - name string - compare func(a, b *result.Issue) compareResult - next *comparator -} - -func (cmp *comparator) Next() *comparator { return cmp.next } - -func (cmp *comparator) SetNext(c *comparator) *comparator { - cmp.next = c - return cmp +func byFileName(a, b *result.Issue) int { + return strings.Compare(a.FilePath(), b.FilePath()) } -func (cmp *comparator) String() string { - s := cmp.name - if cmp.Next() != nil { - s += " > " + cmp.Next().String() - } - - return s +func byLine(a, b *result.Issue) int { + return numericCompare(a.Line(), b.Line()) } -func (cmp *comparator) Compare(a, b *result.Issue) compareResult { - res := cmp.compare(a, b) - if !res.isNeutral() { - return res - } - - if next := cmp.Next(); next != nil { - return next.Compare(a, b) - } - - return res +func byColumn(a, b *result.Issue) int { + return numericCompare(a.Column(), b.Column()) } -func byFileName() *comparator { - return &comparator{ - name: "byFileName", - compare: func(a, b *result.Issue) compareResult { - return compareResult(strings.Compare(a.FilePath(), b.FilePath())) - }, - } +func byLinter(a, b *result.Issue) int { + return strings.Compare(a.FromLinter, b.FromLinter) } -func byLine() *comparator { - return &comparator{ - name: "byLine", - compare: func(a, b *result.Issue) compareResult { - return numericCompare(a.Line(), b.Line()) - }, - } +func bySeverity(a, b *result.Issue) int { + return severityCompare(a.Severity, b.Severity) } -func byColumn() *comparator { - return &comparator{ - name: "byColumn", - compare: func(a, b *result.Issue) compareResult { - return numericCompare(a.Column(), b.Column()) - }, - } -} - -func byLinter() *comparator { - return &comparator{ - name: "byLinter", - compare: func(a, b *result.Issue) compareResult { - return compareResult(strings.Compare(a.FromLinter, b.FromLinter)) - }, - } -} - -func bySeverity() *comparator { - return &comparator{ - name: "bySeverity", - compare: func(a, b *result.Issue) compareResult { - return severityCompare(a.Severity, b.Severity) - }, - } -} - -func mergeComparators(cmps []*comparator) (*comparator, error) { - if len(cmps) == 0 { - return nil, errors.New("no comparator") - } - - for i := range len(cmps) - 1 { - findComparatorTip(cmps[i]).SetNext(cmps[i+1]) - } - - return cmps[0], nil -} - -func findComparatorTip(cmp *comparator) *comparator { - if cmp.Next() != nil { - return findComparatorTip(cmp.Next()) - } - - return cmp -} - -func severityCompare(a, b string) compareResult { +func severityCompare(a, b string) int { // The position inside the slice define the importance (lower to higher). classic := []string{"low", "medium", "high", "warning", "error"} if slices.Contains(classic, a) && slices.Contains(classic, b) { - switch { - case slices.Index(classic, a) > slices.Index(classic, b): - return greater - case slices.Index(classic, a) < slices.Index(classic, b): - return less - default: - return equal - } + return cmp.Compare(slices.Index(classic, a), slices.Index(classic, b)) } if slices.Contains(classic, a) { @@ -232,28 +122,27 @@ func severityCompare(a, b string) compareResult { return less } - return compareResult(strings.Compare(a, b)) + return strings.Compare(a, b) } -func numericCompare(a, b int) compareResult { - var ( - isValuesInvalid = a < 0 || b < 0 - isZeroValuesBoth = a == 0 && b == 0 - isEqual = a == b - isZeroValueInA = b > 0 && a == 0 - isZeroValueInB = a > 0 && b == 0 - ) - - switch { - case isZeroValuesBoth || isEqual: +func numericCompare(a, b int) int { + // Negative values and zeros are skipped (equal) because they either invalid or "neutral" (default int value). + if a <= 0 || b <= 0 { return equal - case isValuesInvalid || isZeroValueInA || isZeroValueInB: - return none - case a > b: - return greater - case a < b: - return less } - return equal + return cmp.Compare(a, b) +} + +func mergeComparators(comps ...issueComparator) issueComparator { + return func(a, b *result.Issue) int { + for _, comp := range comps { + i := comp(a, b) + if i != equal { + return i + } + } + + return equal + } } diff --git a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go index 115196d9a7..ec134f25f6 100644 --- a/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go +++ b/hack/tools/vendor/github.com/golangci/golangci-lint/pkg/result/processors/uniq_by_line.go @@ -26,7 +26,7 @@ func (*UniqByLine) Name() string { } func (p *UniqByLine) Process(issues []result.Issue) ([]result.Issue, error) { - if !p.cfg.Output.UniqByLine { + if !p.cfg.Issues.UniqByLine { return issues, nil } @@ -36,7 +36,7 @@ func (p *UniqByLine) Process(issues []result.Issue) ([]result.Issue, error) { func (*UniqByLine) Finish() {} func (p *UniqByLine) shouldPassIssue(issue *result.Issue) bool { - if issue.Replacement != nil && p.cfg.Issues.NeedFix { + if issue.SuggestedFixes != nil && p.cfg.Issues.NeedFix { // if issue will be auto-fixed we shouldn't collapse issues: // e.g. one line can contain 2 misspellings, they will be in 2 issues and misspell should fix both of them. return true diff --git a/hack/tools/vendor/github.com/golangci/modinfo/.gitignore b/hack/tools/vendor/github.com/golangci/modinfo/.gitignore deleted file mode 100644 index 9f11b755a1..0000000000 --- a/hack/tools/vendor/github.com/golangci/modinfo/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.idea/ diff --git a/hack/tools/vendor/github.com/golangci/modinfo/.golangci.yml b/hack/tools/vendor/github.com/golangci/modinfo/.golangci.yml deleted file mode 100644 index 9698182f2a..0000000000 --- a/hack/tools/vendor/github.com/golangci/modinfo/.golangci.yml +++ /dev/null @@ -1,157 +0,0 @@ -run: - timeout: 7m - -linters-settings: - govet: - enable: - - shadow - gocyclo: - min-complexity: 12 - goconst: - min-len: 3 - min-occurrences: 3 - funlen: - lines: -1 - statements: 50 - misspell: - locale: US - depguard: - rules: - main: - deny: - - pkg: "github.com/instana/testify" - desc: not allowed - - pkg: "github.com/pkg/errors" - desc: Should be replaced by standard lib errors package - tagalign: - align: false - order: - - xml - - json - - yaml - - yml - - toml - - mapstructure - - url - godox: - keywords: - - FIXME - gocritic: - enabled-tags: - - diagnostic - - style - - performance - disabled-checks: - - paramTypeCombine # already handle by gofumpt.extra-rules - - whyNoLint # already handle by nonolint - - unnamedResult - - hugeParam - - sloppyReassign - - rangeValCopy - - octalLiteral - - ptrToRefParam - - appendAssign - - ruleguard - - httpNoBody - - exposedSyncMutex - revive: - rules: - - name: struct-tag - - name: blank-imports - - name: context-as-argument - - name: context-keys-type - - name: dot-imports - - name: error-return - - name: error-strings - - name: error-naming - - name: exported - disabled: true - - name: if-return - - name: increment-decrement - - name: var-naming - - name: var-declaration - - name: package-comments - disabled: true - - name: range - - name: receiver-naming - - name: time-naming - - name: unexported-return - - name: indent-error-flow - - name: errorf - - name: empty-block - - name: superfluous-else - - name: unused-parameter - disabled: true - - name: unreachable-code - - name: redefines-builtin-id - - tagliatelle: - case: - rules: - json: pascal - yaml: camel - xml: camel - header: header - mapstructure: camel - env: upperSnake - envconfig: upperSnake - -linters: - enable-all: true - disable: - - deadcode # deprecated - - exhaustivestruct # deprecated - - golint # deprecated - - ifshort # deprecated - - interfacer # deprecated - - maligned # deprecated - - nosnakecase # deprecated - - scopelint # deprecated - - structcheck # deprecated - - varcheck # deprecated - - cyclop # duplicate of gocyclo - - sqlclosecheck # not relevant (SQL) - - rowserrcheck # not relevant (SQL) - - execinquery # not relevant (SQL) - - lll - - gosec - - dupl # not relevant - - prealloc # too many false-positive - - bodyclose # too many false-positive - - gomnd - - testpackage # not relevant - - tparallel # not relevant - - paralleltest # not relevant - - nestif # too many false-positive - - wrapcheck - - goerr113 # not relevant - - nlreturn # not relevant - - wsl # not relevant - - exhaustive # not relevant - - exhaustruct # not relevant - - makezero # not relevant - - forbidigo - - varnamelen # not relevant - - nilnil # not relevant - - ireturn # not relevant - - contextcheck # too many false-positive - - tenv # we already have a test "framework" to handle env vars - - noctx - - errchkjson - - nonamedreturns - - gosmopolitan # not relevant - - gochecknoglobals - -issues: - exclude-use-default: false - max-issues-per-linter: 0 - max-same-issues: 0 - exclude: - - 'Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*printf?|os\.(Un)?Setenv). is not checked' - - 'ST1000: at least one file in a package should have a package comment' - exclude-rules: - - path: (.+)_test.go - linters: - - funlen - - goconst - - maintidx diff --git a/hack/tools/vendor/github.com/golangci/modinfo/LICENSE b/hack/tools/vendor/github.com/golangci/modinfo/LICENSE deleted file mode 100644 index f288702d2f..0000000000 --- a/hack/tools/vendor/github.com/golangci/modinfo/LICENSE +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/hack/tools/vendor/github.com/golangci/modinfo/Makefile b/hack/tools/vendor/github.com/golangci/modinfo/Makefile deleted file mode 100644 index df91018f11..0000000000 --- a/hack/tools/vendor/github.com/golangci/modinfo/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -.PHONY: clean check test - -default: clean check test - -clean: - rm -rf dist/ cover.out - -test: clean - go test -v -cover ./... - -check: - golangci-lint run diff --git a/hack/tools/vendor/github.com/golangci/modinfo/module.go b/hack/tools/vendor/github.com/golangci/modinfo/module.go deleted file mode 100644 index ff0b21b9b8..0000000000 --- a/hack/tools/vendor/github.com/golangci/modinfo/module.go +++ /dev/null @@ -1,157 +0,0 @@ -package modinfo - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "os" - "os/exec" - "path/filepath" - "reflect" - "sort" - "strings" - "sync" - - "golang.org/x/mod/modfile" - "golang.org/x/tools/go/analysis" -) - -type ModInfo struct { - Path string `json:"Path"` - Dir string `json:"Dir"` - GoMod string `json:"GoMod"` - GoVersion string `json:"GoVersion"` - Main bool `json:"Main"` -} - -var ( - once sync.Once - information []ModInfo - errInfo error -) - -var Analyzer = &analysis.Analyzer{ - Name: "modinfo", - Doc: "Module information", - URL: "https://github.com/golangci/modinfo", - Run: runOnce, - ResultType: reflect.TypeOf([]ModInfo(nil)), -} - -func runOnce(pass *analysis.Pass) (any, error) { - _, ok := os.LookupEnv("MODINFO_DEBUG_DISABLE_ONCE") - if ok { - return GetModuleInfo(pass) - } - - once.Do(func() { - information, errInfo = GetModuleInfo(pass) - }) - - return information, errInfo -} - -// GetModuleInfo gets modules information. -// Always returns 1 element except for workspace (returns all the modules of the workspace). -// Based on `go list -m -json` behavior. -func GetModuleInfo(pass *analysis.Pass) ([]ModInfo, error) { - // https://github.com/golang/go/issues/44753#issuecomment-790089020 - cmd := exec.Command("go", "list", "-m", "-json") - for _, file := range pass.Files { - name := pass.Fset.File(file.Pos()).Name() - if filepath.Ext(name) != ".go" { - continue - } - - cmd.Dir = filepath.Dir(name) - break - } - - out, err := cmd.Output() - if err != nil { - return nil, fmt.Errorf("command go list: %w: %s", err, string(out)) - } - - var infos []ModInfo - - for dec := json.NewDecoder(bytes.NewBuffer(out)); dec.More(); { - var v ModInfo - if err := dec.Decode(&v); err != nil { - return nil, fmt.Errorf("unmarshaling error: %w: %s", err, string(out)) - } - - if v.GoMod == "" { - return nil, errors.New("working directory is not part of a module") - } - - if !v.Main || v.Dir == "" { - continue - } - - infos = append(infos, v) - } - - if len(infos) == 0 { - return nil, errors.New("go.mod file not found") - } - - sort.Slice(infos, func(i, j int) bool { - return len(infos[i].Path) > len(infos[j].Path) - }) - - return infos, nil -} - -// FindModuleFromPass finds the module related to the files of the pass. -func FindModuleFromPass(pass *analysis.Pass) (ModInfo, error) { - infos, ok := pass.ResultOf[Analyzer].([]ModInfo) - if !ok { - return ModInfo{}, errors.New("no modinfo analyzer result") - } - - var name string - for _, file := range pass.Files { - f := pass.Fset.File(file.Pos()).Name() - if filepath.Ext(f) != ".go" { - continue - } - - name = f - break - } - - // no Go file found in analysis pass - if name == "" { - name, _ = os.Getwd() - } - - for _, info := range infos { - if !strings.HasPrefix(name, info.Dir) { - continue - } - return info, nil - } - - return ModInfo{}, errors.New("module information not found") -} - -// ReadModuleFileFromPass read the `go.mod` file from the pass result. -func ReadModuleFileFromPass(pass *analysis.Pass) (*modfile.File, error) { - info, err := FindModuleFromPass(pass) - if err != nil { - return nil, err - } - - return ReadModuleFile(info) -} - -// ReadModuleFile read the `go.mod` file. -func ReadModuleFile(info ModInfo) (*modfile.File, error) { - raw, err := os.ReadFile(info.GoMod) - if err != nil { - return nil, fmt.Errorf("reading go.mod file: %w", err) - } - - return modfile.Parse("go.mod", raw, nil) -} diff --git a/hack/tools/vendor/github.com/golangci/modinfo/readme.md b/hack/tools/vendor/github.com/golangci/modinfo/readme.md deleted file mode 100644 index 2175de8eb4..0000000000 --- a/hack/tools/vendor/github.com/golangci/modinfo/readme.md +++ /dev/null @@ -1,73 +0,0 @@ -# modinfo - -This module contains: -- an analyzer that returns module information. -- methods to find and read `go.mod` file - -## Examples - -```go -package main - -import ( - "fmt" - - "github.com/golangci/modinfo" - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/inspect" -) - -var Analyzer = &analysis.Analyzer{ - Name: "example", - Doc: "Example", - Run: func(pass *analysis.Pass) (interface{}, error) { - file, err := modinfo.ReadModuleFileFromPass(pass) - if err != nil { - return nil, err - } - - fmt.Println("go.mod", file) - - // TODO - - return nil, nil - }, - Requires: []*analysis.Analyzer{ - inspect.Analyzer, - modinfo.Analyzer, - }, -} -``` - -```go -package main - -import ( - "fmt" - - "github.com/golangci/modinfo" - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/inspect" -) - -var Analyzer = &analysis.Analyzer{ - Name: "example", - Doc: "Example", - Run: func(pass *analysis.Pass) (interface{}, error) { - info, err := modinfo.FindModuleFromPass(pass) - if err != nil { - return nil, err - } - - fmt.Println("Module", info.Dir) - - // TODO - - return nil, nil - }, - Requires: []*analysis.Analyzer{ - inspect.Analyzer, - modinfo.Analyzer, - }, -} -``` diff --git a/hack/tools/vendor/github.com/google/cel-go/LICENSE b/hack/tools/vendor/github.com/google/cel-go/LICENSE new file mode 100644 index 0000000000..2493ed2eb4 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/LICENSE @@ -0,0 +1,233 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +=========================================================================== +The common/types/pb/equal.go modification of proto.Equal logic +=========================================================================== +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/cel/BUILD.bazel new file mode 100644 index 0000000000..81549fb4c5 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/cel/BUILD.bazel @@ -0,0 +1,91 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "cel.go", + "decls.go", + "env.go", + "folding.go", + "io.go", + "inlining.go", + "library.go", + "macro.go", + "optimizer.go", + "options.go", + "program.go", + "validator.go", + ], + importpath = "github.com/google/cel-go/cel", + visibility = ["//visibility:public"], + deps = [ + "//checker:go_default_library", + "//checker/decls:go_default_library", + "//common:go_default_library", + "//common/ast:go_default_library", + "//common/containers:go_default_library", + "//common/decls:go_default_library", + "//common/functions:go_default_library", + "//common/operators:go_default_library", + "//common/overloads:go_default_library", + "//common/stdlib:go_default_library", + "//common/types:go_default_library", + "//common/types/pb:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", + "//interpreter:go_default_library", + "//parser:go_default_library", + "@dev_cel_expr//:expr", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//reflect/protodesc:go_default_library", + "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", + "@org_golang_google_protobuf//reflect/protoregistry:go_default_library", + "@org_golang_google_protobuf//types/descriptorpb:go_default_library", + "@org_golang_google_protobuf//types/dynamicpb:go_default_library", + "@org_golang_google_protobuf//types/known/anypb:go_default_library", + "@org_golang_google_protobuf//types/known/durationpb:go_default_library", + "@org_golang_google_protobuf//types/known/timestamppb:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "cel_example_test.go", + "cel_test.go", + "decls_test.go", + "env_test.go", + "folding_test.go", + "io_test.go", + "inlining_test.go", + "optimizer_test.go", + "validator_test.go", + ], + data = [ + "//cel/testdata:gen_test_fds", + ], + embed = [ + ":go_default_library", + ], + deps = [ + "//common/operators:go_default_library", + "//common/overloads:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", + "//ext:go_default_library", + "//test:go_default_library", + "//test/proto2pb:go_default_library", + "//test/proto3pb:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//encoding/prototext:go_default_library", + "@org_golang_google_protobuf//types/known/structpb:go_default_library", + "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library", + ], +) diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/cel.go b/hack/tools/vendor/github.com/google/cel-go/cel/cel.go new file mode 100644 index 0000000000..eb5a9f4cc5 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/cel/cel.go @@ -0,0 +1,19 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cel defines the top-level interface for the Common Expression Language (CEL). +// +// CEL is a non-Turing complete expression language designed to parse, check, and evaluate +// expressions against user-defined environments. +package cel diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/decls.go b/hack/tools/vendor/github.com/google/cel-go/cel/decls.go new file mode 100644 index 0000000000..4188060210 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/cel/decls.go @@ -0,0 +1,370 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "fmt" + + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/decls" + "github.com/google/cel-go/common/functions" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + + celpb "cel.dev/expr" + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// Kind indicates a CEL type's kind which is used to differentiate quickly between simple and complex types. +type Kind = types.Kind + +const ( + // DynKind represents a dynamic type. This kind only exists at type-check time. + DynKind Kind = types.DynKind + + // AnyKind represents a google.protobuf.Any type. This kind only exists at type-check time. + AnyKind = types.AnyKind + + // BoolKind represents a boolean type. + BoolKind = types.BoolKind + + // BytesKind represents a bytes type. + BytesKind = types.BytesKind + + // DoubleKind represents a double type. + DoubleKind = types.DoubleKind + + // DurationKind represents a CEL duration type. + DurationKind = types.DurationKind + + // IntKind represents an integer type. + IntKind = types.IntKind + + // ListKind represents a list type. + ListKind = types.ListKind + + // MapKind represents a map type. + MapKind = types.MapKind + + // NullTypeKind represents a null type. + NullTypeKind = types.NullTypeKind + + // OpaqueKind represents an abstract type which has no accessible fields. + OpaqueKind = types.OpaqueKind + + // StringKind represents a string type. + StringKind = types.StringKind + + // StructKind represents a structured object with typed fields. + StructKind = types.StructKind + + // TimestampKind represents a a CEL time type. + TimestampKind = types.TimestampKind + + // TypeKind represents the CEL type. + TypeKind = types.TypeKind + + // TypeParamKind represents a parameterized type whose type name will be resolved at type-check time, if possible. + TypeParamKind = types.TypeParamKind + + // UintKind represents a uint type. + UintKind = types.UintKind +) + +var ( + // AnyType represents the google.protobuf.Any type. + AnyType = types.AnyType + // BoolType represents the bool type. + BoolType = types.BoolType + // BytesType represents the bytes type. + BytesType = types.BytesType + // DoubleType represents the double type. + DoubleType = types.DoubleType + // DurationType represents the CEL duration type. + DurationType = types.DurationType + // DynType represents a dynamic CEL type whose type will be determined at runtime from context. + DynType = types.DynType + // IntType represents the int type. + IntType = types.IntType + // NullType represents the type of a null value. + NullType = types.NullType + // StringType represents the string type. + StringType = types.StringType + // TimestampType represents the time type. + TimestampType = types.TimestampType + // TypeType represents a CEL type + TypeType = types.TypeType + // UintType represents a uint type. + UintType = types.UintType + + // function references for instantiating new types. + + // ListType creates an instances of a list type value with the provided element type. + ListType = types.NewListType + // MapType creates an instance of a map type value with the provided key and value types. + MapType = types.NewMapType + // NullableType creates an instance of a nullable type with the provided wrapped type. + // + // Note: only primitive types are supported as wrapped types. + NullableType = types.NewNullableType + // OptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional. + OptionalType = types.NewOptionalType + // OpaqueType creates an abstract parameterized type with a given name. + OpaqueType = types.NewOpaqueType + // ObjectType creates a type references to an externally defined type, e.g. a protobuf message type. + ObjectType = types.NewObjectType + // TypeParamType creates a parameterized type instance. + TypeParamType = types.NewTypeParamType +) + +// Type holds a reference to a runtime type with an optional type-checked set of type parameters. +type Type = types.Type + +// Constant creates an instances of an identifier declaration with a variable name, type, and value. +func Constant(name string, t *Type, v ref.Val) EnvOption { + return func(e *Env) (*Env, error) { + e.variables = append(e.variables, decls.NewConstant(name, t, v)) + return e, nil + } +} + +// Variable creates an instance of a variable declaration with a variable name and type. +func Variable(name string, t *Type) EnvOption { + return func(e *Env) (*Env, error) { + e.variables = append(e.variables, decls.NewVariable(name, t)) + return e, nil + } +} + +// Function defines a function and overloads with optional singleton or per-overload bindings. +// +// Using Function is roughly equivalent to calling Declarations() to declare the function signatures +// and Functions() to define the function bindings, if they have been defined. Specifying the +// same function name more than once will result in the aggregation of the function overloads. If any +// signatures conflict between the existing and new function definition an error will be raised. +// However, if the signatures are identical and the overload ids are the same, the redefinition will +// be considered a no-op. +// +// One key difference with using Function() is that each FunctionDecl provided will handle dynamic +// dispatch based on the type-signatures of the overloads provided which means overload resolution at +// runtime is handled out of the box rather than via a custom binding for overload resolution via +// Functions(): +// +// - Overloads are searched in the order they are declared +// - Dynamic dispatch for lists and maps is limited by inspection of the list and map contents +// +// at runtime. Empty lists and maps will result in a 'default dispatch' +// +// - In the event that a default dispatch occurs, the first overload provided is the one invoked +// +// If you intend to use overloads which differentiate based on the key or element type of a list or +// map, consider using a generic function instead: e.g. func(list(T)) or func(map(K, V)) as this +// will allow your implementation to determine how best to handle dispatch and the default behavior +// for empty lists and maps whose contents cannot be inspected. +// +// For functions which use parameterized opaque types (abstract types), consider using a singleton +// function which is capable of inspecting the contents of the type and resolving the appropriate +// overload as CEL can only make inferences by type-name regarding such types. +func Function(name string, opts ...FunctionOpt) EnvOption { + return func(e *Env) (*Env, error) { + fn, err := decls.NewFunction(name, opts...) + if err != nil { + return nil, err + } + if existing, found := e.functions[fn.Name()]; found { + fn, err = existing.Merge(fn) + if err != nil { + return nil, err + } + } + e.functions[fn.Name()] = fn + return e, nil + } +} + +// FunctionOpt defines a functional option for configuring a function declaration. +type FunctionOpt = decls.FunctionOpt + +// SingletonUnaryBinding creates a singleton function definition to be used for all function overloads. +// +// Note, this approach works well if operand is expected to have a specific trait which it implements, +// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. +func SingletonUnaryBinding(fn functions.UnaryOp, traits ...int) FunctionOpt { + return decls.SingletonUnaryBinding(fn, traits...) +} + +// SingletonBinaryImpl creates a singleton function definition to be used with all function overloads. +// +// Note, this approach works well if operand is expected to have a specific trait which it implements, +// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. +// +// Deprecated: use SingletonBinaryBinding +func SingletonBinaryImpl(fn functions.BinaryOp, traits ...int) FunctionOpt { + return decls.SingletonBinaryBinding(fn, traits...) +} + +// SingletonBinaryBinding creates a singleton function definition to be used with all function overloads. +// +// Note, this approach works well if operand is expected to have a specific trait which it implements, +// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. +func SingletonBinaryBinding(fn functions.BinaryOp, traits ...int) FunctionOpt { + return decls.SingletonBinaryBinding(fn, traits...) +} + +// SingletonFunctionImpl creates a singleton function definition to be used with all function overloads. +// +// Note, this approach works well if operand is expected to have a specific trait which it implements, +// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. +// +// Deprecated: use SingletonFunctionBinding +func SingletonFunctionImpl(fn functions.FunctionOp, traits ...int) FunctionOpt { + return decls.SingletonFunctionBinding(fn, traits...) +} + +// SingletonFunctionBinding creates a singleton function definition to be used with all function overloads. +// +// Note, this approach works well if operand is expected to have a specific trait which it implements, +// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. +func SingletonFunctionBinding(fn functions.FunctionOp, traits ...int) FunctionOpt { + return decls.SingletonFunctionBinding(fn, traits...) +} + +// DisableDeclaration disables the function signatures, effectively removing them from the type-check +// environment while preserving the runtime bindings. +func DisableDeclaration(value bool) FunctionOpt { + return decls.DisableDeclaration(value) +} + +// Overload defines a new global overload with an overload id, argument types, and result type. Through the +// use of OverloadOpt options, the overload may also be configured with a binding, an operand trait, and to +// be non-strict. +// +// Note: function bindings should be commonly configured with Overload instances whereas operand traits and +// strict-ness should be rare occurrences. +func Overload(overloadID string, args []*Type, resultType *Type, opts ...OverloadOpt) FunctionOpt { + return decls.Overload(overloadID, args, resultType, opts...) +} + +// MemberOverload defines a new receiver-style overload (or member function) with an overload id, argument types, +// and result type. Through the use of OverloadOpt options, the overload may also be configured with a binding, +// an operand trait, and to be non-strict. +// +// Note: function bindings should be commonly configured with Overload instances whereas operand traits and +// strict-ness should be rare occurrences. +func MemberOverload(overloadID string, args []*Type, resultType *Type, opts ...OverloadOpt) FunctionOpt { + return decls.MemberOverload(overloadID, args, resultType, opts...) +} + +// OverloadOpt is a functional option for configuring a function overload. +type OverloadOpt = decls.OverloadOpt + +// UnaryBinding provides the implementation of a unary overload. The provided function is protected by a runtime +// type-guard which ensures runtime type agreement between the overload signature and runtime argument types. +func UnaryBinding(binding functions.UnaryOp) OverloadOpt { + return decls.UnaryBinding(binding) +} + +// BinaryBinding provides the implementation of a binary overload. The provided function is protected by a runtime +// type-guard which ensures runtime type agreement between the overload signature and runtime argument types. +func BinaryBinding(binding functions.BinaryOp) OverloadOpt { + return decls.BinaryBinding(binding) +} + +// FunctionBinding provides the implementation of a variadic overload. The provided function is protected by a runtime +// type-guard which ensures runtime type agreement between the overload signature and runtime argument types. +func FunctionBinding(binding functions.FunctionOp) OverloadOpt { + return decls.FunctionBinding(binding) +} + +// OverloadIsNonStrict enables the function to be called with error and unknown argument values. +// +// Note: do not use this option unless absoluately necessary as it should be an uncommon feature. +func OverloadIsNonStrict() OverloadOpt { + return decls.OverloadIsNonStrict() +} + +// OverloadOperandTrait configures a set of traits which the first argument to the overload must implement in order to be +// successfully invoked. +func OverloadOperandTrait(trait int) OverloadOpt { + return decls.OverloadOperandTrait(trait) +} + +// TypeToExprType converts a CEL-native type representation to a protobuf CEL Type representation. +func TypeToExprType(t *Type) (*exprpb.Type, error) { + return types.TypeToExprType(t) +} + +// ExprTypeToType converts a protobuf CEL type representation to a CEL-native type representation. +func ExprTypeToType(t *exprpb.Type) (*Type, error) { + return types.ExprTypeToType(t) +} + +// ExprDeclToDeclaration converts a protobuf CEL declaration to a CEL-native declaration, either a Variable or Function. +func ExprDeclToDeclaration(d *exprpb.Decl) (EnvOption, error) { + return AlphaProtoAsDeclaration(d) +} + +// AlphaProtoAsDeclaration converts a v1alpha1.Decl value describing a variable or function into an EnvOption. +func AlphaProtoAsDeclaration(d *exprpb.Decl) (EnvOption, error) { + canonical := &celpb.Decl{} + if err := convertProto(d, canonical); err != nil { + return nil, err + } + return ProtoAsDeclaration(canonical) +} + +// ProtoAsDeclaration converts a canonical celpb.Decl value describing a variable or function into an EnvOption. +func ProtoAsDeclaration(d *celpb.Decl) (EnvOption, error) { + switch d.GetDeclKind().(type) { + case *celpb.Decl_Function: + overloads := d.GetFunction().GetOverloads() + opts := make([]FunctionOpt, len(overloads)) + for i, o := range overloads { + args := make([]*Type, len(o.GetParams())) + for j, p := range o.GetParams() { + a, err := types.ProtoAsType(p) + if err != nil { + return nil, err + } + args[j] = a + } + res, err := types.ProtoAsType(o.GetResultType()) + if err != nil { + return nil, err + } + if o.IsInstanceFunction { + opts[i] = decls.MemberOverload(o.GetOverloadId(), args, res) + } else { + opts[i] = decls.Overload(o.GetOverloadId(), args, res) + } + } + return Function(d.GetName(), opts...), nil + case *celpb.Decl_Ident: + t, err := types.ProtoAsType(d.GetIdent().GetType()) + if err != nil { + return nil, err + } + if d.GetIdent().GetValue() == nil { + return Variable(d.GetName(), t), nil + } + val, err := ast.ProtoConstantAsVal(d.GetIdent().GetValue()) + if err != nil { + return nil, err + } + return Constant(d.GetName(), t, val), nil + default: + return nil, fmt.Errorf("unsupported decl: %v", d) + } +} diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/env.go b/hack/tools/vendor/github.com/google/cel-go/cel/env.go new file mode 100644 index 0000000000..ab736b7769 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/cel/env.go @@ -0,0 +1,894 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "errors" + "sync" + + "github.com/google/cel-go/checker" + chkdecls "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/common" + celast "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/decls" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/interpreter" + "github.com/google/cel-go/parser" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// Source interface representing a user-provided expression. +type Source = common.Source + +// Ast representing the checked or unchecked expression, its source, and related metadata such as +// source position information. +type Ast struct { + source Source + impl *celast.AST +} + +// NativeRep converts the AST to a Go-native representation. +func (ast *Ast) NativeRep() *celast.AST { + if ast == nil { + return nil + } + return ast.impl +} + +// Expr returns the proto serializable instance of the parsed/checked expression. +// +// Deprecated: prefer cel.AstToCheckedExpr() or cel.AstToParsedExpr() and call GetExpr() +// the result instead. +func (ast *Ast) Expr() *exprpb.Expr { + if ast == nil { + return nil + } + pbExpr, _ := celast.ExprToProto(ast.NativeRep().Expr()) + return pbExpr +} + +// IsChecked returns whether the Ast value has been successfully type-checked. +func (ast *Ast) IsChecked() bool { + return ast.NativeRep().IsChecked() +} + +// SourceInfo returns character offset and newline position information about expression elements. +func (ast *Ast) SourceInfo() *exprpb.SourceInfo { + if ast == nil { + return nil + } + pbInfo, _ := celast.SourceInfoToProto(ast.NativeRep().SourceInfo()) + return pbInfo +} + +// ResultType returns the output type of the expression if the Ast has been type-checked, else +// returns chkdecls.Dyn as the parse step cannot infer the type. +// +// Deprecated: use OutputType +func (ast *Ast) ResultType() *exprpb.Type { + out := ast.OutputType() + t, err := TypeToExprType(out) + if err != nil { + return chkdecls.Dyn + } + return t +} + +// OutputType returns the output type of the expression if the Ast has been type-checked, else +// returns cel.DynType as the parse step cannot infer types. +func (ast *Ast) OutputType() *Type { + if ast == nil { + return types.ErrorType + } + return ast.NativeRep().GetType(ast.NativeRep().Expr().ID()) +} + +// Source returns a view of the input used to create the Ast. This source may be complete or +// constructed from the SourceInfo. +func (ast *Ast) Source() Source { + if ast == nil { + return nil + } + return ast.source +} + +// FormatType converts a type message into a string representation. +// +// Deprecated: prefer FormatCELType +func FormatType(t *exprpb.Type) string { + return checker.FormatCheckedType(t) +} + +// FormatCELType formats a cel.Type value to a string representation. +// +// The type formatting is identical to FormatType. +func FormatCELType(t *Type) string { + return checker.FormatCELType(t) +} + +// Env encapsulates the context necessary to perform parsing, type checking, or generation of +// evaluable programs for different expressions. +type Env struct { + Container *containers.Container + variables []*decls.VariableDecl + functions map[string]*decls.FunctionDecl + macros []parser.Macro + adapter types.Adapter + provider types.Provider + features map[int]bool + appliedFeatures map[int]bool + libraries map[string]bool + validators []ASTValidator + costOptions []checker.CostOption + + // Internal parser representation + prsr *parser.Parser + prsrOpts []parser.Option + + // Internal checker representation + chkMutex sync.Mutex + chk *checker.Env + chkErr error + chkOnce sync.Once + chkOpts []checker.Option + + // Program options tied to the environment + progOpts []ProgramOption +} + +// NewEnv creates a program environment configured with the standard library of CEL functions and +// macros. The Env value returned can parse and check any CEL program which builds upon the core +// features documented in the CEL specification. +// +// See the EnvOption helper functions for the options that can be used to configure the +// environment. +func NewEnv(opts ...EnvOption) (*Env, error) { + // Extend the statically configured standard environment, disabling eager validation to ensure + // the cost of setup for the environment is still just as cheap as it is in v0.11.x and earlier + // releases. The user provided options can easily re-enable the eager validation as they are + // processed after this default option. + stdOpts := append([]EnvOption{EagerlyValidateDeclarations(false)}, opts...) + env, err := getStdEnv() + if err != nil { + return nil, err + } + return env.Extend(stdOpts...) +} + +// NewCustomEnv creates a custom program environment which is not automatically configured with the +// standard library of functions and macros documented in the CEL spec. +// +// The purpose for using a custom environment might be for subsetting the standard library produced +// by the cel.StdLib() function. Subsetting CEL is a core aspect of its design that allows users to +// limit the compute and memory impact of a CEL program by controlling the functions and macros +// that may appear in a given expression. +// +// See the EnvOption helper functions for the options that can be used to configure the +// environment. +func NewCustomEnv(opts ...EnvOption) (*Env, error) { + registry, err := types.NewRegistry() + if err != nil { + return nil, err + } + return (&Env{ + variables: []*decls.VariableDecl{}, + functions: map[string]*decls.FunctionDecl{}, + macros: []parser.Macro{}, + Container: containers.DefaultContainer, + adapter: registry, + provider: registry, + features: map[int]bool{}, + appliedFeatures: map[int]bool{}, + libraries: map[string]bool{}, + validators: []ASTValidator{}, + progOpts: []ProgramOption{}, + costOptions: []checker.CostOption{}, + }).configure(opts) +} + +// Check performs type-checking on the input Ast and yields a checked Ast and/or set of Issues. +// If any `ASTValidators` are configured on the environment, they will be applied after a valid +// type-check result. If any issues are detected, the validators will provide them on the +// output Issues object. +// +// Either checking or validation has failed if the returned Issues value and its Issues.Err() +// value are non-nil. Issues should be inspected if they are non-nil, but may not represent a +// fatal error. +// +// It is possible to have both non-nil Ast and Issues values returned from this call: however, +// the mere presence of an Ast does not imply that it is valid for use. +func (e *Env) Check(ast *Ast) (*Ast, *Issues) { + // Construct the internal checker env, erroring if there is an issue adding the declarations. + chk, err := e.initChecker() + if err != nil { + errs := common.NewErrors(ast.Source()) + errs.ReportError(common.NoLocation, err.Error()) + return nil, NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo()) + } + + checked, errs := checker.Check(ast.NativeRep(), ast.Source(), chk) + if len(errs.GetErrors()) > 0 { + return nil, NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo()) + } + // Manually create the Ast to ensure that the Ast source information (which may be more + // detailed than the information provided by Check), is returned to the caller. + ast = &Ast{ + source: ast.Source(), + impl: checked} + + // Avoid creating a validator config if it's not needed. + if len(e.validators) == 0 { + return ast, nil + } + + // Generate a validator configuration from the set of configured validators. + vConfig := newValidatorConfig() + for _, v := range e.validators { + if cv, ok := v.(ASTValidatorConfigurer); ok { + cv.Configure(vConfig) + } + } + // Apply additional validators on the type-checked result. + iss := NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo()) + for _, v := range e.validators { + v.Validate(e, vConfig, checked, iss) + } + if iss.Err() != nil { + return nil, iss + } + return ast, nil +} + +// Compile combines the Parse and Check phases CEL program compilation to produce an Ast and +// associated issues. +// +// If an error is encountered during parsing the Compile step will not continue with the Check +// phase. If non-error issues are encountered during Parse, they may be combined with any issues +// discovered during Check. +// +// Note, for parse-only uses of CEL use Parse. +func (e *Env) Compile(txt string) (*Ast, *Issues) { + return e.CompileSource(common.NewTextSource(txt)) +} + +// CompileSource combines the Parse and Check phases CEL program compilation to produce an Ast and +// associated issues. +// +// If an error is encountered during parsing the CompileSource step will not continue with the +// Check phase. If non-error issues are encountered during Parse, they may be combined with any +// issues discovered during Check. +// +// Note, for parse-only uses of CEL use Parse. +func (e *Env) CompileSource(src Source) (*Ast, *Issues) { + ast, iss := e.ParseSource(src) + if iss.Err() != nil { + return nil, iss + } + checked, iss2 := e.Check(ast) + if iss2.Err() != nil { + return nil, iss2 + } + return checked, iss2 +} + +// Extend the current environment with additional options to produce a new Env. +// +// Note, the extended Env value should not share memory with the original. It is possible, however, +// that a CustomTypeAdapter or CustomTypeProvider options could provide values which are mutable. +// To ensure separation of state between extended environments either make sure the TypeAdapter and +// TypeProvider are immutable, or that their underlying implementations are based on the +// ref.TypeRegistry which provides a Copy method which will be invoked by this method. +func (e *Env) Extend(opts ...EnvOption) (*Env, error) { + chk, chkErr := e.getCheckerOrError() + if chkErr != nil { + return nil, chkErr + } + + prsrOptsCopy := make([]parser.Option, len(e.prsrOpts)) + copy(prsrOptsCopy, e.prsrOpts) + + // The type-checker is configured with Declarations. The declarations may either be provided + // as options which have not yet been validated, or may come from a previous checker instance + // whose types have already been validated. + chkOptsCopy := make([]checker.Option, len(e.chkOpts)) + copy(chkOptsCopy, e.chkOpts) + + // Copy the declarations if needed. + if chk != nil { + // If the type-checker has already been instantiated, then the e.declarations have been + // validated within the chk instance. + chkOptsCopy = append(chkOptsCopy, checker.ValidatedDeclarations(chk)) + } + varsCopy := make([]*decls.VariableDecl, len(e.variables)) + copy(varsCopy, e.variables) + + // Copy macros and program options + macsCopy := make([]parser.Macro, len(e.macros)) + progOptsCopy := make([]ProgramOption, len(e.progOpts)) + copy(macsCopy, e.macros) + copy(progOptsCopy, e.progOpts) + + // Copy the adapter / provider if they appear to be mutable. + adapter := e.adapter + provider := e.provider + adapterReg, isAdapterReg := e.adapter.(*types.Registry) + providerReg, isProviderReg := e.provider.(*types.Registry) + // In most cases the provider and adapter will be a ref.TypeRegistry; + // however, in the rare cases where they are not, they are assumed to + // be immutable. Since it is possible to set the TypeProvider separately + // from the TypeAdapter, the possible configurations which could use a + // TypeRegistry as the base implementation are captured below. + if isAdapterReg && isProviderReg { + reg := providerReg.Copy() + provider = reg + // If the adapter and provider are the same object, set the adapter + // to the same ref.TypeRegistry as the provider. + if adapterReg == providerReg { + adapter = reg + } else { + // Otherwise, make a copy of the adapter. + adapter = adapterReg.Copy() + } + } else if isProviderReg { + provider = providerReg.Copy() + } else if isAdapterReg { + adapter = adapterReg.Copy() + } + + featuresCopy := make(map[int]bool, len(e.features)) + for k, v := range e.features { + featuresCopy[k] = v + } + appliedFeaturesCopy := make(map[int]bool, len(e.appliedFeatures)) + for k, v := range e.appliedFeatures { + appliedFeaturesCopy[k] = v + } + funcsCopy := make(map[string]*decls.FunctionDecl, len(e.functions)) + for k, v := range e.functions { + funcsCopy[k] = v + } + libsCopy := make(map[string]bool, len(e.libraries)) + for k, v := range e.libraries { + libsCopy[k] = v + } + validatorsCopy := make([]ASTValidator, len(e.validators)) + copy(validatorsCopy, e.validators) + costOptsCopy := make([]checker.CostOption, len(e.costOptions)) + copy(costOptsCopy, e.costOptions) + + ext := &Env{ + Container: e.Container, + variables: varsCopy, + functions: funcsCopy, + macros: macsCopy, + progOpts: progOptsCopy, + adapter: adapter, + features: featuresCopy, + appliedFeatures: appliedFeaturesCopy, + libraries: libsCopy, + validators: validatorsCopy, + provider: provider, + chkOpts: chkOptsCopy, + prsrOpts: prsrOptsCopy, + costOptions: costOptsCopy, + } + return ext.configure(opts) +} + +// HasFeature checks whether the environment enables the given feature +// flag, as enumerated in options.go. +func (e *Env) HasFeature(flag int) bool { + enabled, has := e.features[flag] + return has && enabled +} + +// HasLibrary returns whether a specific SingletonLibrary has been configured in the environment. +func (e *Env) HasLibrary(libName string) bool { + configured, exists := e.libraries[libName] + return exists && configured +} + +// Libraries returns a list of SingletonLibrary that have been configured in the environment. +func (e *Env) Libraries() []string { + libraries := make([]string, 0, len(e.libraries)) + for libName := range e.libraries { + libraries = append(libraries, libName) + } + return libraries +} + +// HasFunction returns whether a specific function has been configured in the environment +func (e *Env) HasFunction(functionName string) bool { + _, ok := e.functions[functionName] + return ok +} + +// Functions returns map of Functions, keyed by function name, that have been configured in the environment. +func (e *Env) Functions() map[string]*decls.FunctionDecl { + return e.functions +} + +// HasValidator returns whether a specific ASTValidator has been configured in the environment. +func (e *Env) HasValidator(name string) bool { + for _, v := range e.validators { + if v.Name() == name { + return true + } + } + return false +} + +// Parse parses the input expression value `txt` to a Ast and/or a set of Issues. +// +// This form of Parse creates a Source value for the input `txt` and forwards to the +// ParseSource method. +func (e *Env) Parse(txt string) (*Ast, *Issues) { + src := common.NewTextSource(txt) + return e.ParseSource(src) +} + +// ParseSource parses the input source to an Ast and/or set of Issues. +// +// Parsing has failed if the returned Issues value and its Issues.Err() value is non-nil. +// Issues should be inspected if they are non-nil, but may not represent a fatal error. +// +// It is possible to have both non-nil Ast and Issues values returned from this call; however, +// the mere presence of an Ast does not imply that it is valid for use. +func (e *Env) ParseSource(src Source) (*Ast, *Issues) { + parsed, errs := e.prsr.Parse(src) + if len(errs.GetErrors()) > 0 { + return nil, &Issues{errs: errs} + } + return &Ast{source: src, impl: parsed}, nil +} + +// Program generates an evaluable instance of the Ast within the environment (Env). +func (e *Env) Program(ast *Ast, opts ...ProgramOption) (Program, error) { + return e.PlanProgram(ast.NativeRep(), opts...) +} + +// PlanProgram generates an evaluable instance of the AST in the go-native representation within +// the environment (Env). +func (e *Env) PlanProgram(a *celast.AST, opts ...ProgramOption) (Program, error) { + optSet := e.progOpts + if len(opts) != 0 { + mergedOpts := []ProgramOption{} + mergedOpts = append(mergedOpts, e.progOpts...) + mergedOpts = append(mergedOpts, opts...) + optSet = mergedOpts + } + return newProgram(e, a, optSet) +} + +// CELTypeAdapter returns the `types.Adapter` configured for the environment. +func (e *Env) CELTypeAdapter() types.Adapter { + return e.adapter +} + +// CELTypeProvider returns the `types.Provider` configured for the environment. +func (e *Env) CELTypeProvider() types.Provider { + return e.provider +} + +// TypeAdapter returns the `ref.TypeAdapter` configured for the environment. +// +// Deprecated: use CELTypeAdapter() +func (e *Env) TypeAdapter() ref.TypeAdapter { + return e.adapter +} + +// TypeProvider returns the `ref.TypeProvider` configured for the environment. +// +// Deprecated: use CELTypeProvider() +func (e *Env) TypeProvider() ref.TypeProvider { + if legacyProvider, ok := e.provider.(ref.TypeProvider); ok { + return legacyProvider + } + return &interopLegacyTypeProvider{Provider: e.provider} +} + +// UnknownVars returns an interpreter.PartialActivation which marks all variables declared in the +// Env as unknown AttributePattern values. +// +// Note, the UnknownVars will behave the same as an interpreter.EmptyActivation unless the +// PartialAttributes option is provided as a ProgramOption. +func (e *Env) UnknownVars() interpreter.PartialActivation { + act := interpreter.EmptyActivation() + part, _ := PartialVars(act, e.computeUnknownVars(act)...) + return part +} + +// PartialVars returns an interpreter.PartialActivation where all variables not in the input variable +// set, but which have been configured in the environment, are marked as unknown. +// +// The `vars` value may either be an interpreter.Activation or any valid input to the +// interpreter.NewActivation call. +// +// Note, this is equivalent to calling cel.PartialVars and manually configuring the set of unknown +// variables. For more advanced use cases of partial state where portions of an object graph, rather +// than top-level variables, are missing the PartialVars() method may be a more suitable choice. +// +// Note, the PartialVars will behave the same as an interpreter.EmptyActivation unless the +// PartialAttributes option is provided as a ProgramOption. +func (e *Env) PartialVars(vars any) (interpreter.PartialActivation, error) { + act, err := interpreter.NewActivation(vars) + if err != nil { + return nil, err + } + return PartialVars(act, e.computeUnknownVars(act)...) +} + +// ResidualAst takes an Ast and its EvalDetails to produce a new Ast which only contains the +// attribute references which are unknown. +// +// Residual expressions are beneficial in a few scenarios: +// +// - Optimizing constant expression evaluations away. +// - Indexing and pruning expressions based on known input arguments. +// - Surfacing additional requirements that are needed in order to complete an evaluation. +// - Sharing the evaluation of an expression across multiple machines/nodes. +// +// For example, if an expression targets a 'resource' and 'request' attribute and the possible +// values for the resource are known, a PartialActivation could mark the 'request' as an unknown +// interpreter.AttributePattern and the resulting ResidualAst would be reduced to only the parts +// of the expression that reference the 'request'. +// +// Note, the expression ids within the residual AST generated through this method have no +// correlation to the expression ids of the original AST. +// +// See the PartialVars helper for how to construct a PartialActivation. +// +// TODO: Consider adding an option to generate a Program.Residual to avoid round-tripping to an +// Ast format and then Program again. +func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) { + pruned := interpreter.PruneAst(a.impl.Expr(), a.impl.SourceInfo().MacroCalls(), details.State()) + newAST := &Ast{source: a.Source(), impl: pruned} + expr, err := AstToString(newAST) + if err != nil { + return nil, err + } + parsed, iss := e.Parse(expr) + if iss != nil && iss.Err() != nil { + return nil, iss.Err() + } + if !a.IsChecked() { + return parsed, nil + } + checked, iss := e.Check(parsed) + if iss != nil && iss.Err() != nil { + return nil, iss.Err() + } + return checked, nil +} + +// EstimateCost estimates the cost of a type checked CEL expression using the length estimates of input data and +// extension functions provided by estimator. +func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator, opts ...checker.CostOption) (checker.CostEstimate, error) { + extendedOpts := make([]checker.CostOption, 0, len(e.costOptions)) + extendedOpts = append(extendedOpts, opts...) + extendedOpts = append(extendedOpts, e.costOptions...) + return checker.Cost(ast.impl, estimator, extendedOpts...) +} + +// configure applies a series of EnvOptions to the current environment. +func (e *Env) configure(opts []EnvOption) (*Env, error) { + // Customized the environment using the provided EnvOption values. If an error is + // generated at any step this, will be returned as a nil Env with a non-nil error. + var err error + for _, opt := range opts { + e, err = opt(e) + if err != nil { + return nil, err + } + } + + // If the default UTC timezone fix has been enabled, make sure the library is configured + e, err = e.maybeApplyFeature(featureDefaultUTCTimeZone, Lib(timeUTCLibrary{})) + if err != nil { + return nil, err + } + + // Configure the parser. + prsrOpts := []parser.Option{} + prsrOpts = append(prsrOpts, e.prsrOpts...) + prsrOpts = append(prsrOpts, parser.Macros(e.macros...)) + + if e.HasFeature(featureEnableMacroCallTracking) { + prsrOpts = append(prsrOpts, parser.PopulateMacroCalls(true)) + } + if e.HasFeature(featureVariadicLogicalASTs) { + prsrOpts = append(prsrOpts, parser.EnableVariadicOperatorASTs(true)) + } + e.prsr, err = parser.NewParser(prsrOpts...) + if err != nil { + return nil, err + } + + // Ensure that the checker init happens eagerly rather than lazily. + if e.HasFeature(featureEagerlyValidateDeclarations) { + _, err := e.initChecker() + if err != nil { + return nil, err + } + } + + return e, nil +} + +func (e *Env) initChecker() (*checker.Env, error) { + e.chkOnce.Do(func() { + chkOpts := []checker.Option{} + chkOpts = append(chkOpts, e.chkOpts...) + chkOpts = append(chkOpts, + checker.CrossTypeNumericComparisons( + e.HasFeature(featureCrossTypeNumericComparisons))) + + ce, err := checker.NewEnv(e.Container, e.provider, chkOpts...) + if err != nil { + e.setCheckerOrError(nil, err) + return + } + // Add the statically configured declarations. + err = ce.AddIdents(e.variables...) + if err != nil { + e.setCheckerOrError(nil, err) + return + } + // Add the function declarations which are derived from the FunctionDecl instances. + for _, fn := range e.functions { + if fn.IsDeclarationDisabled() { + continue + } + err = ce.AddFunctions(fn) + if err != nil { + e.setCheckerOrError(nil, err) + return + } + } + // Add function declarations here separately. + e.setCheckerOrError(ce, nil) + }) + return e.getCheckerOrError() +} + +// setCheckerOrError sets the checker.Env or error state in a concurrency-safe manner +func (e *Env) setCheckerOrError(chk *checker.Env, chkErr error) { + e.chkMutex.Lock() + e.chk = chk + e.chkErr = chkErr + e.chkMutex.Unlock() +} + +// getCheckerOrError gets the checker.Env or error state in a concurrency-safe manner +func (e *Env) getCheckerOrError() (*checker.Env, error) { + e.chkMutex.Lock() + defer e.chkMutex.Unlock() + return e.chk, e.chkErr +} + +// maybeApplyFeature determines whether the feature-guarded option is enabled, and if so applies +// the feature if it has not already been enabled. +func (e *Env) maybeApplyFeature(feature int, option EnvOption) (*Env, error) { + if !e.HasFeature(feature) { + return e, nil + } + _, applied := e.appliedFeatures[feature] + if applied { + return e, nil + } + e, err := option(e) + if err != nil { + return nil, err + } + // record that the feature has been applied since it will generate declarations + // and functions which will be propagated on Extend() calls and which should only + // be registered once. + e.appliedFeatures[feature] = true + return e, nil +} + +// computeUnknownVars determines a set of missing variables based on the input activation and the +// environment's configured declaration set. +func (e *Env) computeUnknownVars(vars interpreter.Activation) []*interpreter.AttributePattern { + var unknownPatterns []*interpreter.AttributePattern + for _, v := range e.variables { + varName := v.Name() + if _, found := vars.ResolveName(varName); found { + continue + } + unknownPatterns = append(unknownPatterns, interpreter.NewAttributePattern(varName)) + } + return unknownPatterns +} + +// Error type which references an expression id, a location within source, and a message. +type Error = common.Error + +// Issues defines methods for inspecting the error details of parse and check calls. +// +// Note: in the future, non-fatal warnings and notices may be inspectable via the Issues struct. +type Issues struct { + errs *common.Errors + info *celast.SourceInfo +} + +// NewIssues returns an Issues struct from a common.Errors object. +func NewIssues(errs *common.Errors) *Issues { + return NewIssuesWithSourceInfo(errs, nil) +} + +// NewIssuesWithSourceInfo returns an Issues struct from a common.Errors object with SourceInfo metatata +// which can be used with the `ReportErrorAtID` method for additional error reports within the context +// information that's inferred from an expression id. +func NewIssuesWithSourceInfo(errs *common.Errors, info *celast.SourceInfo) *Issues { + return &Issues{ + errs: errs, + info: info, + } +} + +// Err returns an error value if the issues list contains one or more errors. +func (i *Issues) Err() error { + if i == nil { + return nil + } + if len(i.Errors()) > 0 { + return errors.New(i.String()) + } + return nil +} + +// Errors returns the collection of errors encountered in more granular detail. +func (i *Issues) Errors() []*Error { + if i == nil { + return []*Error{} + } + return i.errs.GetErrors() +} + +// Append collects the issues from another Issues struct into a new Issues object. +func (i *Issues) Append(other *Issues) *Issues { + if i == nil { + return other + } + if other == nil || i == other { + return i + } + return NewIssuesWithSourceInfo(i.errs.Append(other.errs.GetErrors()), i.info) +} + +// String converts the issues to a suitable display string. +func (i *Issues) String() string { + if i == nil { + return "" + } + return i.errs.ToDisplayString() +} + +// ReportErrorAtID reports an error message with an optional set of formatting arguments. +// +// The source metadata for the expression at `id`, if present, is attached to the error report. +// To ensure that source metadata is attached to error reports, use NewIssuesWithSourceInfo. +func (i *Issues) ReportErrorAtID(id int64, message string, args ...any) { + i.errs.ReportErrorAtID(id, i.info.GetStartLocation(id), message, args...) +} + +// getStdEnv lazy initializes the CEL standard environment. +func getStdEnv() (*Env, error) { + stdEnvInit.Do(func() { + stdEnv, stdEnvErr = NewCustomEnv(StdLib(), EagerlyValidateDeclarations(true)) + }) + return stdEnv, stdEnvErr +} + +// interopCELTypeProvider layers support for the types.Provider interface on top of a ref.TypeProvider. +type interopCELTypeProvider struct { + ref.TypeProvider +} + +// FindStructType returns a types.Type instance for the given fully-qualified typeName if one exists. +// +// This method proxies to the underlying ref.TypeProvider's FindType method and converts protobuf type +// into a native type representation. If the conversion fails, the type is listed as not found. +func (p *interopCELTypeProvider) FindStructType(typeName string) (*types.Type, bool) { + if et, found := p.FindType(typeName); found { + t, err := types.ExprTypeToType(et) + if err != nil { + return nil, false + } + return t, true + } + return nil, false +} + +// FindStructFieldNames returns an empty set of field for the interop provider. +// +// To inspect the field names, migrate to a `types.Provider` implementation. +func (p *interopCELTypeProvider) FindStructFieldNames(typeName string) ([]string, bool) { + return []string{}, false +} + +// FindStructFieldType returns a types.FieldType instance for the given fully-qualified typeName and field +// name, if one exists. +// +// This method proxies to the underlying ref.TypeProvider's FindFieldType method and converts protobuf type +// into a native type representation. If the conversion fails, the type is listed as not found. +func (p *interopCELTypeProvider) FindStructFieldType(structType, fieldName string) (*types.FieldType, bool) { + if ft, found := p.FindFieldType(structType, fieldName); found { + t, err := types.ExprTypeToType(ft.Type) + if err != nil { + return nil, false + } + return &types.FieldType{ + Type: t, + IsSet: ft.IsSet, + GetFrom: ft.GetFrom, + }, true + } + return nil, false +} + +// interopLegacyTypeProvider layers support for the ref.TypeProvider interface on top of a types.Provider. +type interopLegacyTypeProvider struct { + types.Provider +} + +// FindType retruns the protobuf Type representation for the input type name if one exists. +// +// This method proxies to the underlying types.Provider FindStructType method and converts the types.Type +// value to a protobuf Type representation. +// +// Failure to convert the type will result in the type not being found. +func (p *interopLegacyTypeProvider) FindType(typeName string) (*exprpb.Type, bool) { + if t, found := p.FindStructType(typeName); found { + et, err := types.TypeToExprType(t) + if err != nil { + return nil, false + } + return et, true + } + return nil, false +} + +// FindFieldType returns the protobuf-based FieldType representation for the input type name and field, +// if one exists. +// +// This call proxies to the types.Provider FindStructFieldType method and converts the types.FIeldType +// value to a protobuf-based ref.FieldType representation if found. +// +// Failure to convert the FieldType will result in the field not being found. +func (p *interopLegacyTypeProvider) FindFieldType(structType, fieldName string) (*ref.FieldType, bool) { + if cft, found := p.FindStructFieldType(structType, fieldName); found { + et, err := types.TypeToExprType(cft.Type) + if err != nil { + return nil, false + } + return &ref.FieldType{ + Type: et, + IsSet: cft.IsSet, + GetFrom: cft.GetFrom, + }, true + } + return nil, false +} + +var ( + stdEnvInit sync.Once + stdEnv *Env + stdEnvErr error +) diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/folding.go b/hack/tools/vendor/github.com/google/cel-go/cel/folding.go new file mode 100644 index 0000000000..d7060896d3 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/cel/folding.go @@ -0,0 +1,559 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "fmt" + + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +// ConstantFoldingOption defines a functional option for configuring constant folding. +type ConstantFoldingOption func(opt *constantFoldingOptimizer) (*constantFoldingOptimizer, error) + +// MaxConstantFoldIterations limits the number of times literals may be folding during optimization. +// +// Defaults to 100 if not set. +func MaxConstantFoldIterations(limit int) ConstantFoldingOption { + return func(opt *constantFoldingOptimizer) (*constantFoldingOptimizer, error) { + opt.maxFoldIterations = limit + return opt, nil + } +} + +// NewConstantFoldingOptimizer creates an optimizer which inlines constant scalar an aggregate +// literal values within function calls and select statements with their evaluated result. +func NewConstantFoldingOptimizer(opts ...ConstantFoldingOption) (ASTOptimizer, error) { + folder := &constantFoldingOptimizer{ + maxFoldIterations: defaultMaxConstantFoldIterations, + } + var err error + for _, o := range opts { + folder, err = o(folder) + if err != nil { + return nil, err + } + } + return folder, nil +} + +type constantFoldingOptimizer struct { + maxFoldIterations int +} + +// Optimize queries the expression graph for scalar and aggregate literal expressions within call and +// select statements and then evaluates them and replaces the call site with the literal result. +// +// Note: only values which can be represented as literals in CEL syntax are supported. +func (opt *constantFoldingOptimizer) Optimize(ctx *OptimizerContext, a *ast.AST) *ast.AST { + root := ast.NavigateAST(a) + + // Walk the list of foldable expression and continue to fold until there are no more folds left. + // All of the fold candidates returned by the constantExprMatcher should succeed unless there's + // a logic bug with the selection of expressions. + foldableExprs := ast.MatchDescendants(root, constantExprMatcher) + foldCount := 0 + for len(foldableExprs) != 0 && foldCount < opt.maxFoldIterations { + for _, fold := range foldableExprs { + // If the expression could be folded because it's a non-strict call, and the + // branches are pruned, continue to the next fold. + if fold.Kind() == ast.CallKind && maybePruneBranches(ctx, fold) { + continue + } + // Otherwise, assume all context is needed to evaluate the expression. + err := tryFold(ctx, a, fold) + if err != nil { + ctx.ReportErrorAtID(fold.ID(), "constant-folding evaluation failed: %v", err.Error()) + return a + } + } + foldCount++ + foldableExprs = ast.MatchDescendants(root, constantExprMatcher) + } + // Once all of the constants have been folded, try to run through the remaining comprehensions + // one last time. In this case, there's no guarantee they'll run, so we only update the + // target comprehension node with the literal value if the evaluation succeeds. + for _, compre := range ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind)) { + tryFold(ctx, a, compre) + } + + // If the output is a list, map, or struct which contains optional entries, then prune it + // to make sure that the optionals, if resolved, do not surface in the output literal. + pruneOptionalElements(ctx, root) + + // Ensure that all intermediate values in the folded expression can be represented as valid + // CEL literals within the AST structure. Use `PostOrderVisit` rather than `MatchDescendents` + // to avoid extra allocations during this final pass through the AST. + ast.PostOrderVisit(root, ast.NewExprVisitor(func(e ast.Expr) { + if e.Kind() != ast.LiteralKind { + return + } + val := e.AsLiteral() + adapted, err := adaptLiteral(ctx, val) + if err != nil { + ctx.ReportErrorAtID(root.ID(), "constant-folding evaluation failed: %v", err.Error()) + return + } + ctx.UpdateExpr(e, adapted) + })) + + return a +} + +// tryFold attempts to evaluate a sub-expression to a literal. +// +// If the evaluation succeeds, the input expr value will be modified to become a literal, otherwise +// the method will return an error. +func tryFold(ctx *OptimizerContext, a *ast.AST, expr ast.Expr) error { + // Assume all context is needed to evaluate the expression. + subAST := &Ast{ + impl: ast.NewCheckedAST(ast.NewAST(expr, a.SourceInfo()), a.TypeMap(), a.ReferenceMap()), + } + prg, err := ctx.Program(subAST) + if err != nil { + return err + } + out, _, err := prg.Eval(NoVars()) + if err != nil { + return err + } + // Update the fold expression to be a literal. + ctx.UpdateExpr(expr, ctx.NewLiteral(out)) + return nil +} + +// maybePruneBranches inspects the non-strict call expression to determine whether +// a branch can be removed. Evaluation will naturally prune logical and / or calls, +// but conditional will not be pruned cleanly, so this is one small area where the +// constant folding step reimplements a portion of the evaluator. +func maybePruneBranches(ctx *OptimizerContext, expr ast.NavigableExpr) bool { + call := expr.AsCall() + args := call.Args() + switch call.FunctionName() { + case operators.LogicalAnd, operators.LogicalOr: + return maybeShortcircuitLogic(ctx, call.FunctionName(), args, expr) + case operators.Conditional: + cond := args[0] + truthy := args[1] + falsy := args[2] + if cond.Kind() != ast.LiteralKind { + return false + } + if cond.AsLiteral() == types.True { + ctx.UpdateExpr(expr, truthy) + } else { + ctx.UpdateExpr(expr, falsy) + } + return true + case operators.In: + haystack := args[1] + if haystack.Kind() == ast.ListKind && haystack.AsList().Size() == 0 { + ctx.UpdateExpr(expr, ctx.NewLiteral(types.False)) + return true + } + needle := args[0] + if needle.Kind() == ast.LiteralKind && haystack.Kind() == ast.ListKind { + needleValue := needle.AsLiteral() + list := haystack.AsList() + for _, e := range list.Elements() { + if e.Kind() == ast.LiteralKind && e.AsLiteral().Equal(needleValue) == types.True { + ctx.UpdateExpr(expr, ctx.NewLiteral(types.True)) + return true + } + } + } + } + return false +} + +func maybeShortcircuitLogic(ctx *OptimizerContext, function string, args []ast.Expr, expr ast.NavigableExpr) bool { + shortcircuit := types.False + skip := types.True + if function == operators.LogicalOr { + shortcircuit = types.True + skip = types.False + } + newArgs := []ast.Expr{} + for _, arg := range args { + if arg.Kind() != ast.LiteralKind { + newArgs = append(newArgs, arg) + continue + } + if arg.AsLiteral() == skip { + continue + } + if arg.AsLiteral() == shortcircuit { + ctx.UpdateExpr(expr, arg) + return true + } + } + if len(newArgs) == 0 { + newArgs = append(newArgs, args[0]) + ctx.UpdateExpr(expr, newArgs[0]) + return true + } + if len(newArgs) == 1 { + ctx.UpdateExpr(expr, newArgs[0]) + return true + } + ctx.UpdateExpr(expr, ctx.NewCall(function, newArgs...)) + return true +} + +// pruneOptionalElements works from the bottom up to resolve optional elements within +// aggregate literals. +// +// Note, many aggregate literals will be resolved as arguments to functions or select +// statements, so this method exists to handle the case where the literal could not be +// fully resolved or exists outside of a call, select, or comprehension context. +func pruneOptionalElements(ctx *OptimizerContext, root ast.NavigableExpr) { + aggregateLiterals := ast.MatchDescendants(root, aggregateLiteralMatcher) + for _, lit := range aggregateLiterals { + switch lit.Kind() { + case ast.ListKind: + pruneOptionalListElements(ctx, lit) + case ast.MapKind: + pruneOptionalMapEntries(ctx, lit) + case ast.StructKind: + pruneOptionalStructFields(ctx, lit) + } + } +} + +func pruneOptionalListElements(ctx *OptimizerContext, e ast.Expr) { + l := e.AsList() + elems := l.Elements() + optIndices := l.OptionalIndices() + if len(optIndices) == 0 { + return + } + updatedElems := []ast.Expr{} + updatedIndices := []int32{} + newOptIndex := -1 + for _, e := range elems { + newOptIndex++ + if !l.IsOptional(int32(newOptIndex)) { + updatedElems = append(updatedElems, e) + continue + } + if e.Kind() != ast.LiteralKind { + updatedElems = append(updatedElems, e) + updatedIndices = append(updatedIndices, int32(newOptIndex)) + continue + } + optElemVal, ok := e.AsLiteral().(*types.Optional) + if !ok { + updatedElems = append(updatedElems, e) + updatedIndices = append(updatedIndices, int32(newOptIndex)) + continue + } + if !optElemVal.HasValue() { + newOptIndex-- // Skipping causes the list to get smaller. + continue + } + ctx.UpdateExpr(e, ctx.NewLiteral(optElemVal.GetValue())) + updatedElems = append(updatedElems, e) + } + ctx.UpdateExpr(e, ctx.NewList(updatedElems, updatedIndices)) +} + +func pruneOptionalMapEntries(ctx *OptimizerContext, e ast.Expr) { + m := e.AsMap() + entries := m.Entries() + updatedEntries := []ast.EntryExpr{} + modified := false + for _, e := range entries { + entry := e.AsMapEntry() + key := entry.Key() + val := entry.Value() + // If the entry is not optional, or the value-side of the optional hasn't + // been resolved to a literal, then preserve the entry as-is. + if !entry.IsOptional() || val.Kind() != ast.LiteralKind { + updatedEntries = append(updatedEntries, e) + continue + } + optElemVal, ok := val.AsLiteral().(*types.Optional) + if !ok { + updatedEntries = append(updatedEntries, e) + continue + } + // When the key is not a literal, but the value is, then it needs to be + // restored to an optional value. + if key.Kind() != ast.LiteralKind { + undoOptVal, err := adaptLiteral(ctx, optElemVal) + if err != nil { + ctx.ReportErrorAtID(val.ID(), "invalid map value literal %v: %v", optElemVal, err) + } + ctx.UpdateExpr(val, undoOptVal) + updatedEntries = append(updatedEntries, e) + continue + } + modified = true + if !optElemVal.HasValue() { + continue + } + ctx.UpdateExpr(val, ctx.NewLiteral(optElemVal.GetValue())) + updatedEntry := ctx.NewMapEntry(key, val, false) + updatedEntries = append(updatedEntries, updatedEntry) + } + if modified { + ctx.UpdateExpr(e, ctx.NewMap(updatedEntries)) + } +} + +func pruneOptionalStructFields(ctx *OptimizerContext, e ast.Expr) { + s := e.AsStruct() + fields := s.Fields() + updatedFields := []ast.EntryExpr{} + modified := false + for _, f := range fields { + field := f.AsStructField() + val := field.Value() + if !field.IsOptional() || val.Kind() != ast.LiteralKind { + updatedFields = append(updatedFields, f) + continue + } + optElemVal, ok := val.AsLiteral().(*types.Optional) + if !ok { + updatedFields = append(updatedFields, f) + continue + } + modified = true + if !optElemVal.HasValue() { + continue + } + ctx.UpdateExpr(val, ctx.NewLiteral(optElemVal.GetValue())) + updatedField := ctx.NewStructField(field.Name(), val, false) + updatedFields = append(updatedFields, updatedField) + } + if modified { + ctx.UpdateExpr(e, ctx.NewStruct(s.TypeName(), updatedFields)) + } +} + +// adaptLiteral converts a runtime CEL value to its equivalent literal expression. +// +// For strongly typed values, the type-provider will be used to reconstruct the fields +// which are present in the literal and their equivalent initialization values. +func adaptLiteral(ctx *OptimizerContext, val ref.Val) (ast.Expr, error) { + switch t := val.Type().(type) { + case *types.Type: + switch t { + case types.BoolType, types.BytesType, types.DoubleType, types.IntType, + types.NullType, types.StringType, types.UintType: + return ctx.NewLiteral(val), nil + case types.DurationType: + return ctx.NewCall( + overloads.TypeConvertDuration, + ctx.NewLiteral(val.ConvertToType(types.StringType)), + ), nil + case types.TimestampType: + return ctx.NewCall( + overloads.TypeConvertTimestamp, + ctx.NewLiteral(val.ConvertToType(types.StringType)), + ), nil + case types.OptionalType: + opt := val.(*types.Optional) + if !opt.HasValue() { + return ctx.NewCall("optional.none"), nil + } + target, err := adaptLiteral(ctx, opt.GetValue()) + if err != nil { + return nil, err + } + return ctx.NewCall("optional.of", target), nil + case types.TypeType: + return ctx.NewIdent(val.(*types.Type).TypeName()), nil + case types.ListType: + l, ok := val.(traits.Lister) + if !ok { + return nil, fmt.Errorf("failed to adapt %v to literal", val) + } + elems := make([]ast.Expr, l.Size().(types.Int)) + idx := 0 + it := l.Iterator() + for it.HasNext() == types.True { + elemVal := it.Next() + elemExpr, err := adaptLiteral(ctx, elemVal) + if err != nil { + return nil, err + } + elems[idx] = elemExpr + idx++ + } + return ctx.NewList(elems, []int32{}), nil + case types.MapType: + m, ok := val.(traits.Mapper) + if !ok { + return nil, fmt.Errorf("failed to adapt %v to literal", val) + } + entries := make([]ast.EntryExpr, m.Size().(types.Int)) + idx := 0 + it := m.Iterator() + for it.HasNext() == types.True { + keyVal := it.Next() + keyExpr, err := adaptLiteral(ctx, keyVal) + if err != nil { + return nil, err + } + valVal := m.Get(keyVal) + valExpr, err := adaptLiteral(ctx, valVal) + if err != nil { + return nil, err + } + entries[idx] = ctx.NewMapEntry(keyExpr, valExpr, false) + idx++ + } + return ctx.NewMap(entries), nil + default: + provider := ctx.CELTypeProvider() + fields, found := provider.FindStructFieldNames(t.TypeName()) + if !found { + return nil, fmt.Errorf("failed to adapt %v to literal", val) + } + tester := val.(traits.FieldTester) + indexer := val.(traits.Indexer) + fieldInits := []ast.EntryExpr{} + for _, f := range fields { + field := types.String(f) + if tester.IsSet(field) != types.True { + continue + } + fieldVal := indexer.Get(field) + fieldExpr, err := adaptLiteral(ctx, fieldVal) + if err != nil { + return nil, err + } + fieldInits = append(fieldInits, ctx.NewStructField(f, fieldExpr, false)) + } + return ctx.NewStruct(t.TypeName(), fieldInits), nil + } + } + return nil, fmt.Errorf("failed to adapt %v to literal", val) +} + +// constantExprMatcher matches calls, select statements, and comprehensions whose arguments +// are all constant scalar or aggregate literal values. +// +// Only comprehensions which are not nested are included as possible constant folds, and only +// if all variables referenced in the comprehension stack exist are only iteration or +// accumulation variables. +func constantExprMatcher(e ast.NavigableExpr) bool { + switch e.Kind() { + case ast.CallKind: + return constantCallMatcher(e) + case ast.SelectKind: + sel := e.AsSelect() // guaranteed to be a navigable value + return constantMatcher(sel.Operand().(ast.NavigableExpr)) + case ast.ComprehensionKind: + if isNestedComprehension(e) { + return false + } + vars := map[string]bool{} + constantExprs := true + visitor := ast.NewExprVisitor(func(e ast.Expr) { + if e.Kind() == ast.ComprehensionKind { + nested := e.AsComprehension() + vars[nested.AccuVar()] = true + vars[nested.IterVar()] = true + } + if e.Kind() == ast.IdentKind && !vars[e.AsIdent()] { + constantExprs = false + } + }) + ast.PreOrderVisit(e, visitor) + return constantExprs + default: + return false + } +} + +// constantCallMatcher identifies strict and non-strict calls which can be folded. +func constantCallMatcher(e ast.NavigableExpr) bool { + call := e.AsCall() + children := e.Children() + fnName := call.FunctionName() + if fnName == operators.LogicalAnd { + for _, child := range children { + if child.Kind() == ast.LiteralKind { + return true + } + } + } + if fnName == operators.LogicalOr { + for _, child := range children { + if child.Kind() == ast.LiteralKind { + return true + } + } + } + if fnName == operators.Conditional { + cond := children[0] + if cond.Kind() == ast.LiteralKind && cond.AsLiteral().Type() == types.BoolType { + return true + } + } + if fnName == operators.In { + haystack := children[1] + if haystack.Kind() == ast.ListKind && haystack.AsList().Size() == 0 { + return true + } + needle := children[0] + if needle.Kind() == ast.LiteralKind && haystack.Kind() == ast.ListKind { + needleValue := needle.AsLiteral() + list := haystack.AsList() + for _, e := range list.Elements() { + if e.Kind() == ast.LiteralKind && e.AsLiteral().Equal(needleValue) == types.True { + return true + } + } + } + } + // convert all other calls with constant arguments + for _, child := range children { + if !constantMatcher(child) { + return false + } + } + return true +} + +func isNestedComprehension(e ast.NavigableExpr) bool { + parent, found := e.Parent() + for found { + if parent.Kind() == ast.ComprehensionKind { + return true + } + parent, found = parent.Parent() + } + return false +} + +func aggregateLiteralMatcher(e ast.NavigableExpr) bool { + return e.Kind() == ast.ListKind || e.Kind() == ast.MapKind || e.Kind() == ast.StructKind +} + +var ( + constantMatcher = ast.ConstantValueMatcher() +) + +const ( + defaultMaxConstantFoldIterations = 100 +) diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/inlining.go b/hack/tools/vendor/github.com/google/cel-go/cel/inlining.go new file mode 100644 index 0000000000..78d5bea65b --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/cel/inlining.go @@ -0,0 +1,228 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/traits" +) + +// InlineVariable holds a variable name to be matched and an AST representing +// the expression graph which should be used to replace it. +type InlineVariable struct { + name string + alias string + def *ast.AST +} + +// Name returns the qualified variable or field selection to replace. +func (v *InlineVariable) Name() string { + return v.name +} + +// Alias returns the alias to use when performing cel.bind() calls during inlining. +func (v *InlineVariable) Alias() string { + return v.alias +} + +// Expr returns the inlined expression value. +func (v *InlineVariable) Expr() ast.Expr { + return v.def.Expr() +} + +// Type indicates the inlined expression type. +func (v *InlineVariable) Type() *Type { + return v.def.GetType(v.def.Expr().ID()) +} + +// NewInlineVariable declares a variable name to be replaced by a checked expression. +func NewInlineVariable(name string, definition *Ast) *InlineVariable { + return NewInlineVariableWithAlias(name, name, definition) +} + +// NewInlineVariableWithAlias declares a variable name to be replaced by a checked expression. +// If the variable occurs more than once, the provided alias will be used to replace the expressions +// where the variable name occurs. +func NewInlineVariableWithAlias(name, alias string, definition *Ast) *InlineVariable { + return &InlineVariable{name: name, alias: alias, def: definition.impl} +} + +// NewInliningOptimizer creates and optimizer which replaces variables with expression definitions. +// +// If a variable occurs one time, the variable is replaced by the inline definition. If the +// variable occurs more than once, the variable occurences are replaced by a cel.bind() call. +func NewInliningOptimizer(inlineVars ...*InlineVariable) ASTOptimizer { + return &inliningOptimizer{variables: inlineVars} +} + +type inliningOptimizer struct { + variables []*InlineVariable +} + +func (opt *inliningOptimizer) Optimize(ctx *OptimizerContext, a *ast.AST) *ast.AST { + root := ast.NavigateAST(a) + for _, inlineVar := range opt.variables { + matches := ast.MatchDescendants(root, opt.matchVariable(inlineVar.Name())) + // Skip cases where the variable isn't in the expression graph + if len(matches) == 0 { + continue + } + + // For a single match, do a direct replacement of the expression sub-graph. + if len(matches) == 1 || !isBindable(matches, inlineVar.Expr(), inlineVar.Type()) { + for _, match := range matches { + // Copy the inlined AST expr and source info. + copyExpr := ctx.CopyASTAndMetadata(inlineVar.def) + opt.inlineExpr(ctx, match, copyExpr, inlineVar.Type()) + } + continue + } + + // For multiple matches, find the least common ancestor (lca) and insert the + // variable as a cel.bind() macro. + var lca ast.NavigableExpr = root + lcaAncestorCount := 0 + ancestors := map[int64]int{} + for _, match := range matches { + // Update the identifier matches with the provided alias. + parent, found := match, true + for found { + ancestorCount, hasAncestor := ancestors[parent.ID()] + if !hasAncestor { + ancestors[parent.ID()] = 1 + parent, found = parent.Parent() + continue + } + if lcaAncestorCount < ancestorCount || (lcaAncestorCount == ancestorCount && lca.Depth() < parent.Depth()) { + lca = parent + lcaAncestorCount = ancestorCount + } + ancestors[parent.ID()] = ancestorCount + 1 + parent, found = parent.Parent() + } + aliasExpr := ctx.NewIdent(inlineVar.Alias()) + opt.inlineExpr(ctx, match, aliasExpr, inlineVar.Type()) + } + + // Copy the inlined AST expr and source info. + copyExpr := ctx.CopyASTAndMetadata(inlineVar.def) + // Update the least common ancestor by inserting a cel.bind() call to the alias. + inlined, bindMacro := ctx.NewBindMacro(lca.ID(), inlineVar.Alias(), copyExpr, lca) + opt.inlineExpr(ctx, lca, inlined, inlineVar.Type()) + ctx.SetMacroCall(lca.ID(), bindMacro) + } + return a +} + +// inlineExpr replaces the current expression with the inlined one, unless the location of the inlining +// happens within a presence test, e.g. has(a.b.c) -> inline alpha for a.b.c in which case an attempt is +// made to determine whether the inlined value can be presence or existence tested. +func (opt *inliningOptimizer) inlineExpr(ctx *OptimizerContext, prev ast.NavigableExpr, inlined ast.Expr, inlinedType *Type) { + switch prev.Kind() { + case ast.SelectKind: + sel := prev.AsSelect() + if !sel.IsTestOnly() { + ctx.UpdateExpr(prev, inlined) + return + } + opt.rewritePresenceExpr(ctx, prev, inlined, inlinedType) + default: + ctx.UpdateExpr(prev, inlined) + } +} + +// rewritePresenceExpr converts the inlined expression, when it occurs within a has() macro, to type-safe +// expression appropriate for the inlined type, if possible. +// +// If the rewrite is not possible an error is reported at the inline expression site. +func (opt *inliningOptimizer) rewritePresenceExpr(ctx *OptimizerContext, prev, inlined ast.Expr, inlinedType *Type) { + // If the input inlined expression is not a select expression it won't work with the has() + // macro. Attempt to rewrite the presence test in terms of the typed input, otherwise error. + if inlined.Kind() == ast.SelectKind { + presenceTest, hasMacro := ctx.NewHasMacro(prev.ID(), inlined) + ctx.UpdateExpr(prev, presenceTest) + ctx.SetMacroCall(prev.ID(), hasMacro) + return + } + + ctx.ClearMacroCall(prev.ID()) + if inlinedType.IsAssignableType(NullType) { + ctx.UpdateExpr(prev, + ctx.NewCall(operators.NotEquals, + inlined, + ctx.NewLiteral(types.NullValue), + )) + return + } + if inlinedType.HasTrait(traits.SizerType) { + ctx.UpdateExpr(prev, + ctx.NewCall(operators.NotEquals, + ctx.NewMemberCall(overloads.Size, inlined), + ctx.NewLiteral(types.IntZero), + )) + return + } + ctx.ReportErrorAtID(prev.ID(), "unable to inline expression type %v into presence test", inlinedType) +} + +// isBindable indicates whether the inlined type can be used within a cel.bind() if the expression +// being replaced occurs within a presence test. Value types with a size() method or field selection +// support can be bound. +// +// In future iterations, support may also be added for indexer types which can be rewritten as an `in` +// expression; however, this would imply a rewrite of the inlined expression that may not be necessary +// in most cases. +func isBindable(matches []ast.NavigableExpr, inlined ast.Expr, inlinedType *Type) bool { + if inlinedType.IsAssignableType(NullType) || + inlinedType.HasTrait(traits.SizerType) { + return true + } + for _, m := range matches { + if m.Kind() != ast.SelectKind { + continue + } + sel := m.AsSelect() + if sel.IsTestOnly() { + return false + } + } + return true +} + +// matchVariable matches simple identifiers, select expressions, and presence test expressions +// which match the (potentially) qualified variable name provided as input. +// +// Note, this function does not support inlining against select expressions which includes optional +// field selection. This may be a future refinement. +func (opt *inliningOptimizer) matchVariable(varName string) ast.ExprMatcher { + return func(e ast.NavigableExpr) bool { + if e.Kind() == ast.IdentKind && e.AsIdent() == varName { + return true + } + if e.Kind() == ast.SelectKind { + sel := e.AsSelect() + // While the `ToQualifiedName` call could take the select directly, this + // would skip presence tests from possible matches, which we would like + // to include. + qualName, found := containers.ToQualifiedName(sel.Operand()) + return found && qualName+"."+sel.FieldName() == varName + } + return false + } +} diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/io.go b/hack/tools/vendor/github.com/google/cel-go/cel/io.go new file mode 100644 index 0000000000..7d08d1c813 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/cel/io.go @@ -0,0 +1,288 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "errors" + "fmt" + "reflect" + + "google.golang.org/protobuf/proto" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + "github.com/google/cel-go/parser" + + celpb "cel.dev/expr" + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +// CheckedExprToAst converts a checked expression proto message to an Ast. +func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast { + checked, _ := CheckedExprToAstWithSource(checkedExpr, nil) + return checked +} + +// CheckedExprToAstWithSource converts a checked expression proto message to an Ast, +// using the provided Source as the textual contents. +// +// In general the source is not necessary unless the AST has been modified between the +// `Parse` and `Check` calls as an `Ast` created from the `Parse` step will carry the source +// through future calls. +// +// Prefer CheckedExprToAst if loading expressions from storage. +func CheckedExprToAstWithSource(checkedExpr *exprpb.CheckedExpr, src Source) (*Ast, error) { + checked, err := ast.ToAST(checkedExpr) + if err != nil { + return nil, err + } + return &Ast{source: src, impl: checked}, nil +} + +// AstToCheckedExpr converts an Ast to an protobuf CheckedExpr value. +// +// If the Ast.IsChecked() returns false, this conversion method will return an error. +func AstToCheckedExpr(a *Ast) (*exprpb.CheckedExpr, error) { + if !a.IsChecked() { + return nil, fmt.Errorf("cannot convert unchecked ast") + } + return ast.ToProto(a.impl) +} + +// ParsedExprToAst converts a parsed expression proto message to an Ast. +func ParsedExprToAst(parsedExpr *exprpb.ParsedExpr) *Ast { + return ParsedExprToAstWithSource(parsedExpr, nil) +} + +// ParsedExprToAstWithSource converts a parsed expression proto message to an Ast, +// using the provided Source as the textual contents. +// +// In general you only need this if you need to recheck a previously checked +// expression, or if you need to separately check a subset of an expression. +// +// Prefer ParsedExprToAst if loading expressions from storage. +func ParsedExprToAstWithSource(parsedExpr *exprpb.ParsedExpr, src Source) *Ast { + info, _ := ast.ProtoToSourceInfo(parsedExpr.GetSourceInfo()) + if src == nil { + src = common.NewInfoSource(parsedExpr.GetSourceInfo()) + } + e, _ := ast.ProtoToExpr(parsedExpr.GetExpr()) + return &Ast{source: src, impl: ast.NewAST(e, info)} +} + +// AstToParsedExpr converts an Ast to an protobuf ParsedExpr value. +func AstToParsedExpr(a *Ast) (*exprpb.ParsedExpr, error) { + return &exprpb.ParsedExpr{ + Expr: a.Expr(), + SourceInfo: a.SourceInfo(), + }, nil +} + +// AstToString converts an Ast back to a string if possible. +// +// Note, the conversion may not be an exact replica of the original expression, but will produce +// a string that is semantically equivalent and whose textual representation is stable. +func AstToString(a *Ast) (string, error) { + return parser.Unparse(a.impl.Expr(), a.impl.SourceInfo()) +} + +// RefValueToValue converts between ref.Val and api.expr.Value. +// The result Value is the serialized proto form. The ref.Val must not be error or unknown. +func RefValueToValue(res ref.Val) (*exprpb.Value, error) { + return ValueAsAlphaProto(res) +} + +func ValueAsAlphaProto(res ref.Val) (*exprpb.Value, error) { + canonical, err := ValueAsProto(res) + if err != nil { + return nil, err + } + alpha := &exprpb.Value{} + err = convertProto(canonical, alpha) + return alpha, err +} + +func ValueAsProto(res ref.Val) (*celpb.Value, error) { + switch res.Type() { + case types.BoolType: + return &celpb.Value{ + Kind: &celpb.Value_BoolValue{BoolValue: res.Value().(bool)}}, nil + case types.BytesType: + return &celpb.Value{ + Kind: &celpb.Value_BytesValue{BytesValue: res.Value().([]byte)}}, nil + case types.DoubleType: + return &celpb.Value{ + Kind: &celpb.Value_DoubleValue{DoubleValue: res.Value().(float64)}}, nil + case types.IntType: + return &celpb.Value{ + Kind: &celpb.Value_Int64Value{Int64Value: res.Value().(int64)}}, nil + case types.ListType: + l := res.(traits.Lister) + sz := l.Size().(types.Int) + elts := make([]*celpb.Value, 0, int64(sz)) + for i := types.Int(0); i < sz; i++ { + v, err := ValueAsProto(l.Get(i)) + if err != nil { + return nil, err + } + elts = append(elts, v) + } + return &celpb.Value{ + Kind: &celpb.Value_ListValue{ + ListValue: &celpb.ListValue{Values: elts}}}, nil + case types.MapType: + mapper := res.(traits.Mapper) + sz := mapper.Size().(types.Int) + entries := make([]*celpb.MapValue_Entry, 0, int64(sz)) + for it := mapper.Iterator(); it.HasNext().(types.Bool); { + k := it.Next() + v := mapper.Get(k) + kv, err := ValueAsProto(k) + if err != nil { + return nil, err + } + vv, err := ValueAsProto(v) + if err != nil { + return nil, err + } + entries = append(entries, &celpb.MapValue_Entry{Key: kv, Value: vv}) + } + return &celpb.Value{ + Kind: &celpb.Value_MapValue{ + MapValue: &celpb.MapValue{Entries: entries}}}, nil + case types.NullType: + return &celpb.Value{ + Kind: &celpb.Value_NullValue{}}, nil + case types.StringType: + return &celpb.Value{ + Kind: &celpb.Value_StringValue{StringValue: res.Value().(string)}}, nil + case types.TypeType: + typeName := res.(ref.Type).TypeName() + return &celpb.Value{Kind: &celpb.Value_TypeValue{TypeValue: typeName}}, nil + case types.UintType: + return &celpb.Value{ + Kind: &celpb.Value_Uint64Value{Uint64Value: res.Value().(uint64)}}, nil + default: + any, err := res.ConvertToNative(anyPbType) + if err != nil { + return nil, err + } + return &celpb.Value{ + Kind: &celpb.Value_ObjectValue{ObjectValue: any.(*anypb.Any)}}, nil + } +} + +var ( + typeNameToTypeValue = map[string]ref.Val{ + "bool": types.BoolType, + "bytes": types.BytesType, + "double": types.DoubleType, + "null_type": types.NullType, + "int": types.IntType, + "list": types.ListType, + "map": types.MapType, + "string": types.StringType, + "type": types.TypeType, + "uint": types.UintType, + } + + anyPbType = reflect.TypeOf(&anypb.Any{}) +) + +// ValueToRefValue converts between exprpb.Value and ref.Val. +func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) { + return AlphaProtoAsValue(adapter, v) +} + +func AlphaProtoAsValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) { + canonical := &celpb.Value{} + if err := convertProto(v, canonical); err != nil { + return nil, err + } + return ProtoAsValue(adapter, canonical) +} + +func ProtoAsValue(adapter types.Adapter, v *celpb.Value) (ref.Val, error) { + switch v.Kind.(type) { + case *celpb.Value_NullValue: + return types.NullValue, nil + case *celpb.Value_BoolValue: + return types.Bool(v.GetBoolValue()), nil + case *celpb.Value_Int64Value: + return types.Int(v.GetInt64Value()), nil + case *celpb.Value_Uint64Value: + return types.Uint(v.GetUint64Value()), nil + case *celpb.Value_DoubleValue: + return types.Double(v.GetDoubleValue()), nil + case *celpb.Value_StringValue: + return types.String(v.GetStringValue()), nil + case *celpb.Value_BytesValue: + return types.Bytes(v.GetBytesValue()), nil + case *celpb.Value_ObjectValue: + any := v.GetObjectValue() + msg, err := anypb.UnmarshalNew(any, proto.UnmarshalOptions{DiscardUnknown: true}) + if err != nil { + return nil, err + } + return adapter.NativeToValue(msg), nil + case *celpb.Value_MapValue: + m := v.GetMapValue() + entries := make(map[ref.Val]ref.Val) + for _, entry := range m.Entries { + key, err := ProtoAsValue(adapter, entry.Key) + if err != nil { + return nil, err + } + pb, err := ProtoAsValue(adapter, entry.Value) + if err != nil { + return nil, err + } + entries[key] = pb + } + return adapter.NativeToValue(entries), nil + case *celpb.Value_ListValue: + l := v.GetListValue() + elts := make([]ref.Val, len(l.Values)) + for i, e := range l.Values { + rv, err := ProtoAsValue(adapter, e) + if err != nil { + return nil, err + } + elts[i] = rv + } + return adapter.NativeToValue(elts), nil + case *celpb.Value_TypeValue: + typeName := v.GetTypeValue() + tv, ok := typeNameToTypeValue[typeName] + if ok { + return tv, nil + } + return types.NewObjectTypeValue(typeName), nil + } + return nil, errors.New("unknown value") +} + +func convertProto(src, dst proto.Message) error { + pb, err := proto.Marshal(src) + if err != nil { + return err + } + err = proto.Unmarshal(pb, dst) + return err +} diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/library.go b/hack/tools/vendor/github.com/google/cel-go/cel/library.go new file mode 100644 index 0000000000..be59f1b028 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/cel/library.go @@ -0,0 +1,790 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "math" + "strconv" + "strings" + "time" + + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/stdlib" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + "github.com/google/cel-go/interpreter" + "github.com/google/cel-go/parser" +) + +const ( + optMapMacro = "optMap" + optFlatMapMacro = "optFlatMap" + hasValueFunc = "hasValue" + optionalNoneFunc = "optional.none" + optionalOfFunc = "optional.of" + optionalOfNonZeroValueFunc = "optional.ofNonZeroValue" + valueFunc = "value" + unusedIterVar = "#unused" +) + +// Library provides a collection of EnvOption and ProgramOption values used to configure a CEL +// environment for a particular use case or with a related set of functionality. +// +// Note, the ProgramOption values provided by a library are expected to be static and not vary +// between calls to Env.Program(). If there is a need for such dynamic configuration, prefer to +// configure these options outside the Library and within the Env.Program() call directly. +type Library interface { + // CompileOptions returns a collection of functional options for configuring the Parse / Check + // environment. + CompileOptions() []EnvOption + + // ProgramOptions returns a collection of functional options which should be included in every + // Program generated from the Env.Program() call. + ProgramOptions() []ProgramOption +} + +// SingletonLibrary refines the Library interface to ensure that libraries in this format are only +// configured once within the environment. +type SingletonLibrary interface { + Library + + // LibraryName provides a namespaced name which is used to check whether the library has already + // been configured in the environment. + LibraryName() string +} + +// Lib creates an EnvOption out of a Library, allowing libraries to be provided as functional args, +// and to be linked to each other. +func Lib(l Library) EnvOption { + singleton, isSingleton := l.(SingletonLibrary) + return func(e *Env) (*Env, error) { + if isSingleton { + if e.HasLibrary(singleton.LibraryName()) { + return e, nil + } + e.libraries[singleton.LibraryName()] = true + } + var err error + for _, opt := range l.CompileOptions() { + e, err = opt(e) + if err != nil { + return nil, err + } + } + e.progOpts = append(e.progOpts, l.ProgramOptions()...) + return e, nil + } +} + +// StdLib returns an EnvOption for the standard library of CEL functions and macros. +func StdLib() EnvOption { + return Lib(stdLibrary{}) +} + +// stdLibrary implements the Library interface and provides functional options for the core CEL +// features documented in the specification. +type stdLibrary struct{} + +// LibraryName implements the SingletonLibrary interface method. +func (stdLibrary) LibraryName() string { + return "cel.lib.std" +} + +// CompileOptions returns options for the standard CEL function declarations and macros. +func (stdLibrary) CompileOptions() []EnvOption { + return []EnvOption{ + func(e *Env) (*Env, error) { + var err error + for _, fn := range stdlib.Functions() { + existing, found := e.functions[fn.Name()] + if found { + fn, err = existing.Merge(fn) + if err != nil { + return nil, err + } + } + e.functions[fn.Name()] = fn + } + return e, nil + }, + func(e *Env) (*Env, error) { + e.variables = append(e.variables, stdlib.Types()...) + return e, nil + }, + Macros(StandardMacros...), + } +} + +// ProgramOptions returns function implementations for the standard CEL functions. +func (stdLibrary) ProgramOptions() []ProgramOption { + return []ProgramOption{} +} + +// OptionalTypes enable support for optional syntax and types in CEL. +// +// The optional value type makes it possible to express whether variables have +// been provided, whether a result has been computed, and in the future whether +// an object field path, map key value, or list index has a value. +// +// # Syntax Changes +// +// OptionalTypes are unlike other CEL extensions because they modify the CEL +// syntax itself, notably through the use of a `?` preceding a field name or +// index value. +// +// ## Field Selection +// +// The optional syntax in field selection is denoted as `obj.?field`. In other +// words, if a field is set, return `optional.of(obj.field)“, else +// `optional.none()`. The optional field selection is viral in the sense that +// after the first optional selection all subsequent selections or indices +// are treated as optional, i.e. the following expressions are equivalent: +// +// obj.?field.subfield +// obj.?field.?subfield +// +// ## Indexing +// +// Similar to field selection, the optional syntax can be used in index +// expressions on maps and lists: +// +// list[?0] +// map[?key] +// +// ## Optional Field Setting +// +// When creating map or message literals, if a field may be optionally set +// based on its presence, then placing a `?` before the field name or key +// will ensure the type on the right-hand side must be optional(T) where T +// is the type of the field or key-value. +// +// The following returns a map with the key expression set only if the +// subfield is present, otherwise an empty map is created: +// +// {?key: obj.?field.subfield} +// +// ## Optional Element Setting +// +// When creating list literals, an element in the list may be optionally added +// when the element expression is preceded by a `?`: +// +// [a, ?b, ?c] // return a list with either [a], [a, b], [a, b, c], or [a, c] +// +// # Optional.Of +// +// Create an optional(T) value of a given value with type T. +// +// optional.of(10) +// +// # Optional.OfNonZeroValue +// +// Create an optional(T) value of a given value with type T if it is not a +// zero-value. A zero-value the default empty value for any given CEL type, +// including empty protobuf message types. If the value is empty, the result +// of this call will be optional.none(). +// +// optional.ofNonZeroValue([1, 2, 3]) // optional(list(int)) +// optional.ofNonZeroValue([]) // optional.none() +// optional.ofNonZeroValue(0) // optional.none() +// optional.ofNonZeroValue("") // optional.none() +// +// # Optional.None +// +// Create an empty optional value. +// +// # HasValue +// +// Determine whether the optional contains a value. +// +// optional.of(b'hello').hasValue() // true +// optional.ofNonZeroValue({}).hasValue() // false +// +// # Value +// +// Get the value contained by the optional. If the optional does not have a +// value, the result will be a CEL error. +// +// optional.of(b'hello').value() // b'hello' +// optional.ofNonZeroValue({}).value() // error +// +// # Or +// +// If the value on the left-hand side is optional.none(), the optional value +// on the right hand side is returned. If the value on the left-hand set is +// valued, then it is returned. This operation is short-circuiting and will +// only evaluate as many links in the `or` chain as are needed to return a +// non-empty optional value. +// +// obj.?field.or(m[?key]) +// l[?index].or(obj.?field.subfield).or(obj.?other) +// +// # OrValue +// +// Either return the value contained within the optional on the left-hand side +// or return the alternative value on the right hand side. +// +// m[?key].orValue("none") +// +// # OptMap +// +// Apply a transformation to the optional's underlying value if it is not empty +// and return an optional typed result based on the transformation. The +// transformation expression type must return a type T which is wrapped into +// an optional. +// +// msg.?elements.optMap(e, e.size()).orValue(0) +// +// # OptFlatMap +// +// Introduced in version: 1 +// +// Apply a transformation to the optional's underlying value if it is not empty +// and return the result. The transform expression must return an optional(T) +// rather than type T. This can be useful when dealing with zero values and +// conditionally generating an empty or non-empty result in ways which cannot +// be expressed with `optMap`. +// +// msg.?elements.optFlatMap(e, e[?0]) // return the first element if present. +func OptionalTypes(opts ...OptionalTypesOption) EnvOption { + lib := &optionalLib{version: math.MaxUint32} + for _, opt := range opts { + lib = opt(lib) + } + return Lib(lib) +} + +type optionalLib struct { + version uint32 +} + +// OptionalTypesOption is a functional interface for configuring the strings library. +type OptionalTypesOption func(*optionalLib) *optionalLib + +// OptionalTypesVersion configures the version of the optional type library. +// +// The version limits which functions are available. Only functions introduced +// below or equal to the given version included in the library. If this option +// is not set, all functions are available. +// +// See the library documentation to determine which version a function was introduced. +// If the documentation does not state which version a function was introduced, it can +// be assumed to be introduced at version 0, when the library was first created. +func OptionalTypesVersion(version uint32) OptionalTypesOption { + return func(lib *optionalLib) *optionalLib { + lib.version = version + return lib + } +} + +// LibraryName implements the SingletonLibrary interface method. +func (lib *optionalLib) LibraryName() string { + return "cel.lib.optional" +} + +// CompileOptions implements the Library interface method. +func (lib *optionalLib) CompileOptions() []EnvOption { + paramTypeK := TypeParamType("K") + paramTypeV := TypeParamType("V") + optionalTypeV := OptionalType(paramTypeV) + listTypeV := ListType(paramTypeV) + mapTypeKV := MapType(paramTypeK, paramTypeV) + + opts := []EnvOption{ + // Enable the optional syntax in the parser. + enableOptionalSyntax(), + + // Introduce the optional type. + Types(types.OptionalType), + + // Configure the optMap and optFlatMap macros. + Macros(ReceiverMacro(optMapMacro, 2, optMap)), + + // Global and member functions for working with optional values. + Function(optionalOfFunc, + Overload("optional_of", []*Type{paramTypeV}, optionalTypeV, + UnaryBinding(func(value ref.Val) ref.Val { + return types.OptionalOf(value) + }))), + Function(optionalOfNonZeroValueFunc, + Overload("optional_ofNonZeroValue", []*Type{paramTypeV}, optionalTypeV, + UnaryBinding(func(value ref.Val) ref.Val { + v, isZeroer := value.(traits.Zeroer) + if !isZeroer || !v.IsZeroValue() { + return types.OptionalOf(value) + } + return types.OptionalNone + }))), + Function(optionalNoneFunc, + Overload("optional_none", []*Type{}, optionalTypeV, + FunctionBinding(func(values ...ref.Val) ref.Val { + return types.OptionalNone + }))), + Function(valueFunc, + MemberOverload("optional_value", []*Type{optionalTypeV}, paramTypeV, + UnaryBinding(func(value ref.Val) ref.Val { + opt := value.(*types.Optional) + return opt.GetValue() + }))), + Function(hasValueFunc, + MemberOverload("optional_hasValue", []*Type{optionalTypeV}, BoolType, + UnaryBinding(func(value ref.Val) ref.Val { + opt := value.(*types.Optional) + return types.Bool(opt.HasValue()) + }))), + + // Implementation of 'or' and 'orValue' are special-cased to support short-circuiting in the + // evaluation chain. + Function("or", + MemberOverload("optional_or_optional", []*Type{optionalTypeV, optionalTypeV}, optionalTypeV)), + Function("orValue", + MemberOverload("optional_orValue_value", []*Type{optionalTypeV, paramTypeV}, paramTypeV)), + + // OptSelect is handled specially by the type-checker, so the receiver's field type is used to determine the + // optput type. + Function(operators.OptSelect, + Overload("select_optional_field", []*Type{DynType, StringType}, optionalTypeV)), + + // OptIndex is handled mostly like any other indexing operation on a list or map, so the type-checker can use + // these signatures to determine type-agreement without any special handling. + Function(operators.OptIndex, + Overload("list_optindex_optional_int", []*Type{listTypeV, IntType}, optionalTypeV), + Overload("optional_list_optindex_optional_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV), + Overload("map_optindex_optional_value", []*Type{mapTypeKV, paramTypeK}, optionalTypeV), + Overload("optional_map_optindex_optional_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)), + + // Index overloads to accommodate using an optional value as the operand. + Function(operators.Index, + Overload("optional_list_index_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV), + Overload("optional_map_index_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)), + } + if lib.version >= 1 { + opts = append(opts, Macros(ReceiverMacro(optFlatMapMacro, 2, optFlatMap))) + } + return opts +} + +// ProgramOptions implements the Library interface method. +func (lib *optionalLib) ProgramOptions() []ProgramOption { + return []ProgramOption{ + CustomDecorator(decorateOptionalOr), + } +} + +func optMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *Error) { + varIdent := args[0] + varName := "" + switch varIdent.Kind() { + case ast.IdentKind: + varName = varIdent.AsIdent() + default: + return nil, meh.NewError(varIdent.ID(), "optMap() variable name must be a simple identifier") + } + mapExpr := args[1] + return meh.NewCall( + operators.Conditional, + meh.NewMemberCall(hasValueFunc, target), + meh.NewCall(optionalOfFunc, + meh.NewComprehension( + meh.NewList(), + unusedIterVar, + varName, + meh.NewMemberCall(valueFunc, meh.Copy(target)), + meh.NewLiteral(types.False), + meh.NewIdent(varName), + mapExpr, + ), + ), + meh.NewCall(optionalNoneFunc), + ), nil +} + +func optFlatMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *Error) { + varIdent := args[0] + varName := "" + switch varIdent.Kind() { + case ast.IdentKind: + varName = varIdent.AsIdent() + default: + return nil, meh.NewError(varIdent.ID(), "optFlatMap() variable name must be a simple identifier") + } + mapExpr := args[1] + return meh.NewCall( + operators.Conditional, + meh.NewMemberCall(hasValueFunc, target), + meh.NewComprehension( + meh.NewList(), + unusedIterVar, + varName, + meh.NewMemberCall(valueFunc, meh.Copy(target)), + meh.NewLiteral(types.False), + meh.NewIdent(varName), + mapExpr, + ), + meh.NewCall(optionalNoneFunc), + ), nil +} + +func enableOptionalSyntax() EnvOption { + return func(e *Env) (*Env, error) { + e.prsrOpts = append(e.prsrOpts, parser.EnableOptionalSyntax(true)) + return e, nil + } +} + +// EnableErrorOnBadPresenceTest enables error generation when a presence test or optional field +// selection is performed on a primitive type. +func EnableErrorOnBadPresenceTest(value bool) EnvOption { + return features(featureEnableErrorOnBadPresenceTest, value) +} + +func decorateOptionalOr(i interpreter.Interpretable) (interpreter.Interpretable, error) { + call, ok := i.(interpreter.InterpretableCall) + if !ok { + return i, nil + } + args := call.Args() + if len(args) != 2 { + return i, nil + } + switch call.Function() { + case "or": + if call.OverloadID() != "" && call.OverloadID() != "optional_or_optional" { + return i, nil + } + return &evalOptionalOr{ + id: call.ID(), + lhs: args[0], + rhs: args[1], + }, nil + case "orValue": + if call.OverloadID() != "" && call.OverloadID() != "optional_orValue_value" { + return i, nil + } + return &evalOptionalOrValue{ + id: call.ID(), + lhs: args[0], + rhs: args[1], + }, nil + default: + return i, nil + } +} + +// evalOptionalOr selects between two optional values, either the first if it has a value, or +// the second optional expression is evaluated and returned. +type evalOptionalOr struct { + id int64 + lhs interpreter.Interpretable + rhs interpreter.Interpretable +} + +// ID implements the Interpretable interface method. +func (opt *evalOptionalOr) ID() int64 { + return opt.id +} + +// Eval evaluates the left-hand side optional to determine whether it contains a value, else +// proceeds with the right-hand side evaluation. +func (opt *evalOptionalOr) Eval(ctx interpreter.Activation) ref.Val { + // short-circuit lhs. + optLHS := opt.lhs.Eval(ctx) + optVal, ok := optLHS.(*types.Optional) + if !ok { + return optLHS + } + if optVal.HasValue() { + return optVal + } + return opt.rhs.Eval(ctx) +} + +// evalOptionalOrValue selects between an optional or a concrete value. If the optional has a value, +// its value is returned, otherwise the alternative value expression is evaluated and returned. +type evalOptionalOrValue struct { + id int64 + lhs interpreter.Interpretable + rhs interpreter.Interpretable +} + +// ID implements the Interpretable interface method. +func (opt *evalOptionalOrValue) ID() int64 { + return opt.id +} + +// Eval evaluates the left-hand side optional to determine whether it contains a value, else +// proceeds with the right-hand side evaluation. +func (opt *evalOptionalOrValue) Eval(ctx interpreter.Activation) ref.Val { + // short-circuit lhs. + optLHS := opt.lhs.Eval(ctx) + optVal, ok := optLHS.(*types.Optional) + if !ok { + return optLHS + } + if optVal.HasValue() { + return optVal.GetValue() + } + return opt.rhs.Eval(ctx) +} + +type timeUTCLibrary struct{} + +func (timeUTCLibrary) CompileOptions() []EnvOption { + return timeOverloadDeclarations +} + +func (timeUTCLibrary) ProgramOptions() []ProgramOption { + return []ProgramOption{} +} + +// Declarations and functions which enable using UTC on time.Time inputs when the timezone is unspecified +// in the CEL expression. +var ( + utcTZ = types.String("UTC") + + timeOverloadDeclarations = []EnvOption{ + Function(overloads.TimeGetHours, + MemberOverload(overloads.DurationToHours, []*Type{DurationType}, IntType, + UnaryBinding(types.DurationGetHours))), + Function(overloads.TimeGetMinutes, + MemberOverload(overloads.DurationToMinutes, []*Type{DurationType}, IntType, + UnaryBinding(types.DurationGetMinutes))), + Function(overloads.TimeGetSeconds, + MemberOverload(overloads.DurationToSeconds, []*Type{DurationType}, IntType, + UnaryBinding(types.DurationGetSeconds))), + Function(overloads.TimeGetMilliseconds, + MemberOverload(overloads.DurationToMilliseconds, []*Type{DurationType}, IntType, + UnaryBinding(types.DurationGetMilliseconds))), + Function(overloads.TimeGetFullYear, + MemberOverload(overloads.TimestampToYear, []*Type{TimestampType}, IntType, + UnaryBinding(func(ts ref.Val) ref.Val { + return timestampGetFullYear(ts, utcTZ) + }), + ), + MemberOverload(overloads.TimestampToYearWithTz, []*Type{TimestampType, StringType}, IntType, + BinaryBinding(timestampGetFullYear), + ), + ), + Function(overloads.TimeGetMonth, + MemberOverload(overloads.TimestampToMonth, []*Type{TimestampType}, IntType, + UnaryBinding(func(ts ref.Val) ref.Val { + return timestampGetMonth(ts, utcTZ) + }), + ), + MemberOverload(overloads.TimestampToMonthWithTz, []*Type{TimestampType, StringType}, IntType, + BinaryBinding(timestampGetMonth), + ), + ), + Function(overloads.TimeGetDayOfYear, + MemberOverload(overloads.TimestampToDayOfYear, []*Type{TimestampType}, IntType, + UnaryBinding(func(ts ref.Val) ref.Val { + return timestampGetDayOfYear(ts, utcTZ) + }), + ), + MemberOverload(overloads.TimestampToDayOfYearWithTz, []*Type{TimestampType, StringType}, IntType, + BinaryBinding(func(ts, tz ref.Val) ref.Val { + return timestampGetDayOfYear(ts, tz) + }), + ), + ), + Function(overloads.TimeGetDayOfMonth, + MemberOverload(overloads.TimestampToDayOfMonthZeroBased, []*Type{TimestampType}, IntType, + UnaryBinding(func(ts ref.Val) ref.Val { + return timestampGetDayOfMonthZeroBased(ts, utcTZ) + }), + ), + MemberOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz, []*Type{TimestampType, StringType}, IntType, + BinaryBinding(timestampGetDayOfMonthZeroBased), + ), + ), + Function(overloads.TimeGetDate, + MemberOverload(overloads.TimestampToDayOfMonthOneBased, []*Type{TimestampType}, IntType, + UnaryBinding(func(ts ref.Val) ref.Val { + return timestampGetDayOfMonthOneBased(ts, utcTZ) + }), + ), + MemberOverload(overloads.TimestampToDayOfMonthOneBasedWithTz, []*Type{TimestampType, StringType}, IntType, + BinaryBinding(timestampGetDayOfMonthOneBased), + ), + ), + Function(overloads.TimeGetDayOfWeek, + MemberOverload(overloads.TimestampToDayOfWeek, []*Type{TimestampType}, IntType, + UnaryBinding(func(ts ref.Val) ref.Val { + return timestampGetDayOfWeek(ts, utcTZ) + }), + ), + MemberOverload(overloads.TimestampToDayOfWeekWithTz, []*Type{TimestampType, StringType}, IntType, + BinaryBinding(timestampGetDayOfWeek), + ), + ), + Function(overloads.TimeGetHours, + MemberOverload(overloads.TimestampToHours, []*Type{TimestampType}, IntType, + UnaryBinding(func(ts ref.Val) ref.Val { + return timestampGetHours(ts, utcTZ) + }), + ), + MemberOverload(overloads.TimestampToHoursWithTz, []*Type{TimestampType, StringType}, IntType, + BinaryBinding(timestampGetHours), + ), + ), + Function(overloads.TimeGetMinutes, + MemberOverload(overloads.TimestampToMinutes, []*Type{TimestampType}, IntType, + UnaryBinding(func(ts ref.Val) ref.Val { + return timestampGetMinutes(ts, utcTZ) + }), + ), + MemberOverload(overloads.TimestampToMinutesWithTz, []*Type{TimestampType, StringType}, IntType, + BinaryBinding(timestampGetMinutes), + ), + ), + Function(overloads.TimeGetSeconds, + MemberOverload(overloads.TimestampToSeconds, []*Type{TimestampType}, IntType, + UnaryBinding(func(ts ref.Val) ref.Val { + return timestampGetSeconds(ts, utcTZ) + }), + ), + MemberOverload(overloads.TimestampToSecondsWithTz, []*Type{TimestampType, StringType}, IntType, + BinaryBinding(timestampGetSeconds), + ), + ), + Function(overloads.TimeGetMilliseconds, + MemberOverload(overloads.TimestampToMilliseconds, []*Type{TimestampType}, IntType, + UnaryBinding(func(ts ref.Val) ref.Val { + return timestampGetMilliseconds(ts, utcTZ) + }), + ), + MemberOverload(overloads.TimestampToMillisecondsWithTz, []*Type{TimestampType, StringType}, IntType, + BinaryBinding(timestampGetMilliseconds), + ), + ), + } +) + +func timestampGetFullYear(ts, tz ref.Val) ref.Val { + t, err := inTimeZone(ts, tz) + if err != nil { + return types.NewErr(err.Error()) + } + return types.Int(t.Year()) +} + +func timestampGetMonth(ts, tz ref.Val) ref.Val { + t, err := inTimeZone(ts, tz) + if err != nil { + return types.NewErr(err.Error()) + } + // CEL spec indicates that the month should be 0-based, but the Time value + // for Month() is 1-based. + return types.Int(t.Month() - 1) +} + +func timestampGetDayOfYear(ts, tz ref.Val) ref.Val { + t, err := inTimeZone(ts, tz) + if err != nil { + return types.NewErr(err.Error()) + } + return types.Int(t.YearDay() - 1) +} + +func timestampGetDayOfMonthZeroBased(ts, tz ref.Val) ref.Val { + t, err := inTimeZone(ts, tz) + if err != nil { + return types.NewErr(err.Error()) + } + return types.Int(t.Day() - 1) +} + +func timestampGetDayOfMonthOneBased(ts, tz ref.Val) ref.Val { + t, err := inTimeZone(ts, tz) + if err != nil { + return types.NewErr(err.Error()) + } + return types.Int(t.Day()) +} + +func timestampGetDayOfWeek(ts, tz ref.Val) ref.Val { + t, err := inTimeZone(ts, tz) + if err != nil { + return types.NewErr(err.Error()) + } + return types.Int(t.Weekday()) +} + +func timestampGetHours(ts, tz ref.Val) ref.Val { + t, err := inTimeZone(ts, tz) + if err != nil { + return types.NewErr(err.Error()) + } + return types.Int(t.Hour()) +} + +func timestampGetMinutes(ts, tz ref.Val) ref.Val { + t, err := inTimeZone(ts, tz) + if err != nil { + return types.NewErr(err.Error()) + } + return types.Int(t.Minute()) +} + +func timestampGetSeconds(ts, tz ref.Val) ref.Val { + t, err := inTimeZone(ts, tz) + if err != nil { + return types.NewErr(err.Error()) + } + return types.Int(t.Second()) +} + +func timestampGetMilliseconds(ts, tz ref.Val) ref.Val { + t, err := inTimeZone(ts, tz) + if err != nil { + return types.NewErr(err.Error()) + } + return types.Int(t.Nanosecond() / 1000000) +} + +func inTimeZone(ts, tz ref.Val) (time.Time, error) { + t := ts.(types.Timestamp) + val := string(tz.(types.String)) + ind := strings.Index(val, ":") + if ind == -1 { + loc, err := time.LoadLocation(val) + if err != nil { + return time.Time{}, err + } + return t.In(loc), nil + } + + // If the input is not the name of a timezone (for example, 'US/Central'), it should be a numerical offset from UTC + // in the format ^(+|-)(0[0-9]|1[0-4]):[0-5][0-9]$. The numerical input is parsed in terms of hours and minutes. + hr, err := strconv.Atoi(string(val[0:ind])) + if err != nil { + return time.Time{}, err + } + min, err := strconv.Atoi(string(val[ind+1:])) + if err != nil { + return time.Time{}, err + } + var offset int + if string(val[0]) == "-" { + offset = hr*60 - min + } else { + offset = hr*60 + min + } + secondsEastOfUTC := int((time.Duration(offset) * time.Minute).Seconds()) + timezone := time.FixedZone("", secondsEastOfUTC) + return t.In(timezone), nil +} diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/macro.go b/hack/tools/vendor/github.com/google/cel-go/cel/macro.go new file mode 100644 index 0000000000..4db1fd57a9 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/cel/macro.go @@ -0,0 +1,576 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "fmt" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/parser" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// Macro describes a function signature to match and the MacroExpander to apply. +// +// Note: when a Macro should apply to multiple overloads (based on arg count) of a given function, +// a Macro should be created per arg-count or as a var arg macro. +type Macro = parser.Macro + +// MacroFactory defines an expansion function which converts a call and its arguments to a cel.Expr value. +type MacroFactory = parser.MacroExpander + +// MacroExprFactory assists with the creation of Expr values in a manner which is consistent +// the internal semantics and id generation behaviors of the parser and checker libraries. +type MacroExprFactory = parser.ExprHelper + +// MacroExpander converts a call and its associated arguments into a protobuf Expr representation. +// +// If the MacroExpander determines within the implementation that an expansion is not needed it may return +// a nil Expr value to indicate a non-match. However, if an expansion is to be performed, but the arguments +// are not well-formed, the result of the expansion will be an error. +// +// The MacroExpander accepts as arguments a MacroExprHelper as well as the arguments used in the function call +// and produces as output an Expr ast node. +// +// Note: when the Macro.IsReceiverStyle() method returns true, the target argument will be nil. +type MacroExpander func(eh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) + +// MacroExprHelper exposes helper methods for creating new expressions within a CEL abstract syntax tree. +// ExprHelper assists with the manipulation of proto-based Expr values in a manner which is +// consistent with the source position and expression id generation code leveraged by both +// the parser and type-checker. +type MacroExprHelper interface { + // Copy the input expression with a brand new set of identifiers. + Copy(*exprpb.Expr) *exprpb.Expr + + // LiteralBool creates an Expr value for a bool literal. + LiteralBool(value bool) *exprpb.Expr + + // LiteralBytes creates an Expr value for a byte literal. + LiteralBytes(value []byte) *exprpb.Expr + + // LiteralDouble creates an Expr value for double literal. + LiteralDouble(value float64) *exprpb.Expr + + // LiteralInt creates an Expr value for an int literal. + LiteralInt(value int64) *exprpb.Expr + + // LiteralString creates am Expr value for a string literal. + LiteralString(value string) *exprpb.Expr + + // LiteralUint creates an Expr value for a uint literal. + LiteralUint(value uint64) *exprpb.Expr + + // NewList creates a CreateList instruction where the list is comprised of the optional set + // of elements provided as arguments. + NewList(elems ...*exprpb.Expr) *exprpb.Expr + + // NewMap creates a CreateStruct instruction for a map where the map is comprised of the + // optional set of key, value entries. + NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr + + // NewMapEntry creates a Map Entry for the key, value pair. + NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry + + // NewObject creates a CreateStruct instruction for an object with a given type name and + // optional set of field initializers. + NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr + + // NewObjectFieldInit creates a new Object field initializer from the field name and value. + NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry + + // Fold creates a fold comprehension instruction. + // + // - iterVar is the iteration variable name. + // - iterRange represents the expression that resolves to a list or map where the elements or + // keys (respectively) will be iterated over. + // - accuVar is the accumulation variable name, typically parser.AccumulatorName. + // - accuInit is the initial expression whose value will be set for the accuVar prior to + // folding. + // - condition is the expression to test to determine whether to continue folding. + // - step is the expression to evaluation at the conclusion of a single fold iteration. + // - result is the computation to evaluate at the conclusion of the fold. + // + // The accuVar should not shadow variable names that you would like to reference within the + // environment in the step and condition expressions. Presently, the name __result__ is commonly + // used by built-in macros but this may change in the future. + Fold(iterVar string, + iterRange *exprpb.Expr, + accuVar string, + accuInit *exprpb.Expr, + condition *exprpb.Expr, + step *exprpb.Expr, + result *exprpb.Expr) *exprpb.Expr + + // Ident creates an identifier Expr value. + Ident(name string) *exprpb.Expr + + // AccuIdent returns an accumulator identifier for use with comprehension results. + AccuIdent() *exprpb.Expr + + // GlobalCall creates a function call Expr value for a global (free) function. + GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr + + // ReceiverCall creates a function call Expr value for a receiver-style function. + ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr + + // PresenceTest creates a Select TestOnly Expr value for modelling has() semantics. + PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr + + // Select create a field traversal Expr value. + Select(operand *exprpb.Expr, field string) *exprpb.Expr + + // OffsetLocation returns the Location of the expression identifier. + OffsetLocation(exprID int64) common.Location + + // NewError associates an error message with a given expression id. + NewError(exprID int64, message string) *Error +} + +// GlobalMacro creates a Macro for a global function with the specified arg count. +func GlobalMacro(function string, argCount int, factory MacroFactory) Macro { + return parser.NewGlobalMacro(function, argCount, factory) +} + +// ReceiverMacro creates a Macro for a receiver function matching the specified arg count. +func ReceiverMacro(function string, argCount int, factory MacroFactory) Macro { + return parser.NewReceiverMacro(function, argCount, factory) +} + +// GlobalVarArgMacro creates a Macro for a global function with a variable arg count. +func GlobalVarArgMacro(function string, factory MacroFactory) Macro { + return parser.NewGlobalVarArgMacro(function, factory) +} + +// ReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count. +func ReceiverVarArgMacro(function string, factory MacroFactory) Macro { + return parser.NewReceiverVarArgMacro(function, factory) +} + +// NewGlobalMacro creates a Macro for a global function with the specified arg count. +// +// Deprecated: use GlobalMacro +func NewGlobalMacro(function string, argCount int, expander MacroExpander) Macro { + expand := adaptingExpander{expander} + return parser.NewGlobalMacro(function, argCount, expand.Expander) +} + +// NewReceiverMacro creates a Macro for a receiver function matching the specified arg count. +// +// Deprecated: use ReceiverMacro +func NewReceiverMacro(function string, argCount int, expander MacroExpander) Macro { + expand := adaptingExpander{expander} + return parser.NewReceiverMacro(function, argCount, expand.Expander) +} + +// NewGlobalVarArgMacro creates a Macro for a global function with a variable arg count. +// +// Deprecated: use GlobalVarArgMacro +func NewGlobalVarArgMacro(function string, expander MacroExpander) Macro { + expand := adaptingExpander{expander} + return parser.NewGlobalVarArgMacro(function, expand.Expander) +} + +// NewReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count. +// +// Deprecated: use ReceiverVarArgMacro +func NewReceiverVarArgMacro(function string, expander MacroExpander) Macro { + expand := adaptingExpander{expander} + return parser.NewReceiverVarArgMacro(function, expand.Expander) +} + +// HasMacroExpander expands the input call arguments into a presence test, e.g. has(.field) +func HasMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { + ph, err := toParserHelper(meh) + if err != nil { + return nil, err + } + arg, err := adaptToExpr(args[0]) + if err != nil { + return nil, err + } + if arg.Kind() == ast.SelectKind { + s := arg.AsSelect() + return adaptToProto(ph.NewPresenceTest(s.Operand(), s.FieldName())) + } + return nil, ph.NewError(arg.ID(), "invalid argument to has() macro") +} + +// ExistsMacroExpander expands the input call arguments into a comprehension that returns true if any of the +// elements in the range match the predicate expressions: +// .exists(, ) +func ExistsMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { + ph, err := toParserHelper(meh) + if err != nil { + return nil, err + } + out, err := parser.MakeExists(ph, mustAdaptToExpr(target), mustAdaptToExprs(args)) + if err != nil { + return nil, err + } + return adaptToProto(out) +} + +// ExistsOneMacroExpander expands the input call arguments into a comprehension that returns true if exactly +// one of the elements in the range match the predicate expressions: +// .exists_one(, ) +func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { + ph, err := toParserHelper(meh) + if err != nil { + return nil, err + } + out, err := parser.MakeExistsOne(ph, mustAdaptToExpr(target), mustAdaptToExprs(args)) + if err != nil { + return nil, err + } + return adaptToProto(out) +} + +// MapMacroExpander expands the input call arguments into a comprehension that transforms each element in the +// input to produce an output list. +// +// There are two call patterns supported by map: +// +// .map(, ) +// .map(, , ) +// +// In the second form only iterVar values which return true when provided to the predicate expression +// are transformed. +func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { + ph, err := toParserHelper(meh) + if err != nil { + return nil, err + } + out, err := parser.MakeMap(ph, mustAdaptToExpr(target), mustAdaptToExprs(args)) + if err != nil { + return nil, err + } + return adaptToProto(out) +} + +// FilterMacroExpander expands the input call arguments into a comprehension which produces a list which contains +// only elements which match the provided predicate expression: +// .filter(, ) +func FilterMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { + ph, err := toParserHelper(meh) + if err != nil { + return nil, err + } + out, err := parser.MakeFilter(ph, mustAdaptToExpr(target), mustAdaptToExprs(args)) + if err != nil { + return nil, err + } + return adaptToProto(out) +} + +var ( + // Aliases to each macro in the CEL standard environment. + // Note: reassigning these macro variables may result in undefined behavior. + + // HasMacro expands "has(m.f)" which tests the presence of a field, avoiding the need to + // specify the field as a string. + HasMacro = parser.HasMacro + + // AllMacro expands "range.all(var, predicate)" into a comprehension which ensures that all + // elements in the range satisfy the predicate. + AllMacro = parser.AllMacro + + // ExistsMacro expands "range.exists(var, predicate)" into a comprehension which ensures that + // some element in the range satisfies the predicate. + ExistsMacro = parser.ExistsMacro + + // ExistsOneMacro expands "range.exists_one(var, predicate)", which is true if for exactly one + // element in range the predicate holds. + ExistsOneMacro = parser.ExistsOneMacro + + // MapMacro expands "range.map(var, function)" into a comprehension which applies the function + // to each element in the range to produce a new list. + MapMacro = parser.MapMacro + + // MapFilterMacro expands "range.map(var, predicate, function)" into a comprehension which + // first filters the elements in the range by the predicate, then applies the transform function + // to produce a new list. + MapFilterMacro = parser.MapFilterMacro + + // FilterMacro expands "range.filter(var, predicate)" into a comprehension which filters + // elements in the range, producing a new list from the elements that satisfy the predicate. + FilterMacro = parser.FilterMacro + + // StandardMacros provides an alias to all the CEL macros defined in the standard environment. + StandardMacros = []Macro{ + HasMacro, AllMacro, ExistsMacro, ExistsOneMacro, MapMacro, MapFilterMacro, FilterMacro, + } + + // NoMacros provides an alias to an empty list of macros + NoMacros = []Macro{} +) + +type adaptingExpander struct { + legacyExpander MacroExpander +} + +func (adapt *adaptingExpander) Expander(eh parser.ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { + var legacyTarget *exprpb.Expr = nil + var err *Error = nil + if target != nil { + legacyTarget, err = adaptToProto(target) + if err != nil { + return nil, err + } + } + legacyArgs := make([]*exprpb.Expr, len(args)) + for i, arg := range args { + legacyArgs[i], err = adaptToProto(arg) + if err != nil { + return nil, err + } + } + ah := &adaptingHelper{modernHelper: eh} + legacyExpr, err := adapt.legacyExpander(ah, legacyTarget, legacyArgs) + if err != nil { + return nil, err + } + ex, err := adaptToExpr(legacyExpr) + if err != nil { + return nil, err + } + return ex, nil +} + +func wrapErr(id int64, message string, err error) *common.Error { + return &common.Error{ + Location: common.NoLocation, + Message: fmt.Sprintf("%s: %v", message, err), + ExprID: id, + } +} + +type adaptingHelper struct { + modernHelper parser.ExprHelper +} + +// Copy the input expression with a brand new set of identifiers. +func (ah *adaptingHelper) Copy(e *exprpb.Expr) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.Copy(mustAdaptToExpr(e))) +} + +// LiteralBool creates an Expr value for a bool literal. +func (ah *adaptingHelper) LiteralBool(value bool) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Bool(value))) +} + +// LiteralBytes creates an Expr value for a byte literal. +func (ah *adaptingHelper) LiteralBytes(value []byte) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Bytes(value))) +} + +// LiteralDouble creates an Expr value for double literal. +func (ah *adaptingHelper) LiteralDouble(value float64) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Double(value))) +} + +// LiteralInt creates an Expr value for an int literal. +func (ah *adaptingHelper) LiteralInt(value int64) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Int(value))) +} + +// LiteralString creates am Expr value for a string literal. +func (ah *adaptingHelper) LiteralString(value string) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.String(value))) +} + +// LiteralUint creates an Expr value for a uint literal. +func (ah *adaptingHelper) LiteralUint(value uint64) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Uint(value))) +} + +// NewList creates a CreateList instruction where the list is comprised of the optional set +// of elements provided as arguments. +func (ah *adaptingHelper) NewList(elems ...*exprpb.Expr) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewList(mustAdaptToExprs(elems)...)) +} + +// NewMap creates a CreateStruct instruction for a map where the map is comprised of the +// optional set of key, value entries. +func (ah *adaptingHelper) NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr { + adaptedEntries := make([]ast.EntryExpr, len(entries)) + for i, e := range entries { + adaptedEntries[i] = mustAdaptToEntryExpr(e) + } + return mustAdaptToProto(ah.modernHelper.NewMap(adaptedEntries...)) +} + +// NewMapEntry creates a Map Entry for the key, value pair. +func (ah *adaptingHelper) NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry { + return mustAdaptToProtoEntry( + ah.modernHelper.NewMapEntry(mustAdaptToExpr(key), mustAdaptToExpr(val), optional)) +} + +// NewObject creates a CreateStruct instruction for an object with a given type name and +// optional set of field initializers. +func (ah *adaptingHelper) NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr { + adaptedEntries := make([]ast.EntryExpr, len(fieldInits)) + for i, e := range fieldInits { + adaptedEntries[i] = mustAdaptToEntryExpr(e) + } + return mustAdaptToProto(ah.modernHelper.NewStruct(typeName, adaptedEntries...)) +} + +// NewObjectFieldInit creates a new Object field initializer from the field name and value. +func (ah *adaptingHelper) NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry { + return mustAdaptToProtoEntry( + ah.modernHelper.NewStructField(field, mustAdaptToExpr(init), optional)) +} + +// Fold creates a fold comprehension instruction. +// +// - iterVar is the iteration variable name. +// - iterRange represents the expression that resolves to a list or map where the elements or +// keys (respectively) will be iterated over. +// - accuVar is the accumulation variable name, typically parser.AccumulatorName. +// - accuInit is the initial expression whose value will be set for the accuVar prior to +// folding. +// - condition is the expression to test to determine whether to continue folding. +// - step is the expression to evaluation at the conclusion of a single fold iteration. +// - result is the computation to evaluate at the conclusion of the fold. +// +// The accuVar should not shadow variable names that you would like to reference within the +// environment in the step and condition expressions. Presently, the name __result__ is commonly +// used by built-in macros but this may change in the future. +func (ah *adaptingHelper) Fold(iterVar string, + iterRange *exprpb.Expr, + accuVar string, + accuInit *exprpb.Expr, + condition *exprpb.Expr, + step *exprpb.Expr, + result *exprpb.Expr) *exprpb.Expr { + return mustAdaptToProto( + ah.modernHelper.NewComprehension( + mustAdaptToExpr(iterRange), + iterVar, + accuVar, + mustAdaptToExpr(accuInit), + mustAdaptToExpr(condition), + mustAdaptToExpr(step), + mustAdaptToExpr(result), + ), + ) +} + +// Ident creates an identifier Expr value. +func (ah *adaptingHelper) Ident(name string) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewIdent(name)) +} + +// AccuIdent returns an accumulator identifier for use with comprehension results. +func (ah *adaptingHelper) AccuIdent() *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewAccuIdent()) +} + +// GlobalCall creates a function call Expr value for a global (free) function. +func (ah *adaptingHelper) GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewCall(function, mustAdaptToExprs(args)...)) +} + +// ReceiverCall creates a function call Expr value for a receiver-style function. +func (ah *adaptingHelper) ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr { + return mustAdaptToProto( + ah.modernHelper.NewMemberCall(function, mustAdaptToExpr(target), mustAdaptToExprs(args)...)) +} + +// PresenceTest creates a Select TestOnly Expr value for modelling has() semantics. +func (ah *adaptingHelper) PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr { + op := mustAdaptToExpr(operand) + return mustAdaptToProto(ah.modernHelper.NewPresenceTest(op, field)) +} + +// Select create a field traversal Expr value. +func (ah *adaptingHelper) Select(operand *exprpb.Expr, field string) *exprpb.Expr { + op := mustAdaptToExpr(operand) + return mustAdaptToProto(ah.modernHelper.NewSelect(op, field)) +} + +// OffsetLocation returns the Location of the expression identifier. +func (ah *adaptingHelper) OffsetLocation(exprID int64) common.Location { + return ah.modernHelper.OffsetLocation(exprID) +} + +// NewError associates an error message with a given expression id. +func (ah *adaptingHelper) NewError(exprID int64, message string) *Error { + return ah.modernHelper.NewError(exprID, message) +} + +func mustAdaptToExprs(exprs []*exprpb.Expr) []ast.Expr { + adapted := make([]ast.Expr, len(exprs)) + for i, e := range exprs { + adapted[i] = mustAdaptToExpr(e) + } + return adapted +} + +func mustAdaptToExpr(e *exprpb.Expr) ast.Expr { + out, _ := adaptToExpr(e) + return out +} + +func adaptToExpr(e *exprpb.Expr) (ast.Expr, *Error) { + if e == nil { + return nil, nil + } + out, err := ast.ProtoToExpr(e) + if err != nil { + return nil, wrapErr(e.GetId(), "proto conversion failure", err) + } + return out, nil +} + +func mustAdaptToEntryExpr(e *exprpb.Expr_CreateStruct_Entry) ast.EntryExpr { + out, _ := ast.ProtoToEntryExpr(e) + return out +} + +func mustAdaptToProto(e ast.Expr) *exprpb.Expr { + out, _ := adaptToProto(e) + return out +} + +func adaptToProto(e ast.Expr) (*exprpb.Expr, *Error) { + if e == nil { + return nil, nil + } + out, err := ast.ExprToProto(e) + if err != nil { + return nil, wrapErr(e.ID(), "expr conversion failure", err) + } + return out, nil +} + +func mustAdaptToProtoEntry(e ast.EntryExpr) *exprpb.Expr_CreateStruct_Entry { + out, _ := ast.EntryExprToProto(e) + return out +} + +func toParserHelper(meh MacroExprHelper) (parser.ExprHelper, *Error) { + ah, ok := meh.(*adaptingHelper) + if !ok { + return nil, common.NewError(0, + fmt.Sprintf("unsupported macro helper: %v (%T)", meh, meh), + common.NoLocation) + } + return ah.modernHelper, nil +} diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/optimizer.go b/hack/tools/vendor/github.com/google/cel-go/cel/optimizer.go new file mode 100644 index 0000000000..c149abb703 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/cel/optimizer.go @@ -0,0 +1,535 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "sort" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// StaticOptimizer contains a sequence of ASTOptimizer instances which will be applied in order. +// +// The static optimizer normalizes expression ids and type-checking run between optimization +// passes to ensure that the final optimized output is a valid expression with metadata consistent +// with what would have been generated from a parsed and checked expression. +// +// Note: source position information is best-effort and likely wrong, but optimized expressions +// should be suitable for calls to parser.Unparse. +type StaticOptimizer struct { + optimizers []ASTOptimizer +} + +// NewStaticOptimizer creates a StaticOptimizer with a sequence of ASTOptimizer's to be applied +// to a checked expression. +func NewStaticOptimizer(optimizers ...ASTOptimizer) *StaticOptimizer { + return &StaticOptimizer{ + optimizers: optimizers, + } +} + +// Optimize applies a sequence of optimizations to an Ast within a given environment. +// +// If issues are encountered, the Issues.Err() return value will be non-nil. +func (opt *StaticOptimizer) Optimize(env *Env, a *Ast) (*Ast, *Issues) { + // Make a copy of the AST to be optimized. + optimized := ast.Copy(a.impl) + ids := newIDGenerator(ast.MaxID(a.impl)) + + // Create the optimizer context, could be pooled in the future. + issues := NewIssues(common.NewErrors(a.Source())) + baseFac := ast.NewExprFactory() + exprFac := &optimizerExprFactory{ + idGenerator: ids, + fac: baseFac, + sourceInfo: optimized.SourceInfo(), + } + ctx := &OptimizerContext{ + optimizerExprFactory: exprFac, + Env: env, + Issues: issues, + } + + // Apply the optimizations sequentially. + for _, o := range opt.optimizers { + optimized = o.Optimize(ctx, optimized) + if issues.Err() != nil { + return nil, issues + } + // Normalize expression id metadata including coordination with macro call metadata. + freshIDGen := newIDGenerator(0) + info := optimized.SourceInfo() + expr := optimized.Expr() + normalizeIDs(freshIDGen.renumberStable, expr, info) + cleanupMacroRefs(expr, info) + + // Recheck the updated expression for any possible type-agreement or validation errors. + parsed := &Ast{ + source: a.Source(), + impl: ast.NewAST(expr, info)} + checked, iss := ctx.Check(parsed) + if iss.Err() != nil { + return nil, iss + } + optimized = checked.impl + } + + // Return the optimized result. + return &Ast{ + source: a.Source(), + impl: optimized, + }, nil +} + +// normalizeIDs ensures that the metadata present with an AST is reset in a manner such +// that the ids within the expression correspond to the ids within macros. +func normalizeIDs(idGen ast.IDGenerator, optimized ast.Expr, info *ast.SourceInfo) { + optimized.RenumberIDs(idGen) + if len(info.MacroCalls()) == 0 { + return + } + + // Sort the macro ids to make sure that the renumbering of macro-specific variables + // is stable across normalization calls. + sortedMacroIDs := []int64{} + for id := range info.MacroCalls() { + sortedMacroIDs = append(sortedMacroIDs, id) + } + sort.Slice(sortedMacroIDs, func(i, j int) bool { return sortedMacroIDs[i] < sortedMacroIDs[j] }) + + // First, update the macro call ids themselves. + callIDMap := map[int64]int64{} + for _, id := range sortedMacroIDs { + callIDMap[id] = idGen(id) + } + // Then update the macro call definitions which refer to these ids, but + // ensure that the updates don't collide and remove macro entries which haven't + // been visited / updated yet. + type macroUpdate struct { + id int64 + call ast.Expr + } + macroUpdates := []macroUpdate{} + for _, oldID := range sortedMacroIDs { + newID := callIDMap[oldID] + call, found := info.GetMacroCall(oldID) + if !found { + continue + } + call.RenumberIDs(idGen) + macroUpdates = append(macroUpdates, macroUpdate{id: newID, call: call}) + info.ClearMacroCall(oldID) + } + for _, u := range macroUpdates { + info.SetMacroCall(u.id, u.call) + } +} + +func cleanupMacroRefs(expr ast.Expr, info *ast.SourceInfo) { + if len(info.MacroCalls()) == 0 { + return + } + + // Sanitize the macro call references once the optimized expression has been computed + // and the ids normalized between the expression and the macros. + exprRefMap := make(map[int64]struct{}) + ast.PostOrderVisit(expr, ast.NewExprVisitor(func(e ast.Expr) { + if e.ID() == 0 { + return + } + exprRefMap[e.ID()] = struct{}{} + })) + // Update the macro call id references to ensure that macro pointers are + // updated consistently across macros. + for _, call := range info.MacroCalls() { + ast.PostOrderVisit(call, ast.NewExprVisitor(func(e ast.Expr) { + if e.ID() == 0 { + return + } + exprRefMap[e.ID()] = struct{}{} + })) + } + for id := range info.MacroCalls() { + if _, found := exprRefMap[id]; !found { + info.ClearMacroCall(id) + } + } +} + +// newIDGenerator ensures that new ids are only created the first time they are encountered. +func newIDGenerator(seed int64) *idGenerator { + return &idGenerator{ + idMap: make(map[int64]int64), + seed: seed, + } +} + +type idGenerator struct { + idMap map[int64]int64 + seed int64 +} + +func (gen *idGenerator) nextID() int64 { + gen.seed++ + return gen.seed +} + +func (gen *idGenerator) renumberStable(id int64) int64 { + if id == 0 { + return 0 + } + if newID, found := gen.idMap[id]; found { + return newID + } + nextID := gen.nextID() + gen.idMap[id] = nextID + return nextID +} + +// OptimizerContext embeds Env and Issues instances to make it easy to type-check and evaluate +// subexpressions and report any errors encountered along the way. The context also embeds the +// optimizerExprFactory which can be used to generate new sub-expressions with expression ids +// consistent with the expectations of a parsed expression. +type OptimizerContext struct { + *Env + *optimizerExprFactory + *Issues +} + +// ExtendEnv auguments the context's environment with the additional options. +func (opt *OptimizerContext) ExtendEnv(opts ...EnvOption) error { + e, err := opt.Env.Extend(opts...) + if err != nil { + return err + } + opt.Env = e + return nil +} + +// ASTOptimizer applies an optimization over an AST and returns the optimized result. +type ASTOptimizer interface { + // Optimize optimizes a type-checked AST within an Environment and accumulates any issues. + Optimize(*OptimizerContext, *ast.AST) *ast.AST +} + +type optimizerExprFactory struct { + *idGenerator + fac ast.ExprFactory + sourceInfo *ast.SourceInfo +} + +// NewAST creates an AST from the current expression using the tracked source info which +// is modified and managed by the OptimizerContext. +func (opt *optimizerExprFactory) NewAST(expr ast.Expr) *ast.AST { + return ast.NewAST(expr, opt.sourceInfo) +} + +// CopyAST creates a renumbered copy of `Expr` and `SourceInfo` values of the input AST, where the +// renumbering uses the same scheme as the core optimizer logic ensuring there are no collisions +// between copies. +// +// Use this method before attempting to merge the expression from AST into another. +func (opt *optimizerExprFactory) CopyAST(a *ast.AST) (ast.Expr, *ast.SourceInfo) { + idGen := newIDGenerator(opt.nextID()) + defer func() { opt.seed = idGen.nextID() }() + copyExpr := opt.fac.CopyExpr(a.Expr()) + copyInfo := ast.CopySourceInfo(a.SourceInfo()) + normalizeIDs(idGen.renumberStable, copyExpr, copyInfo) + return copyExpr, copyInfo +} + +// CopyASTAndMetadata copies the input AST and propagates the macro metadata into the AST being +// optimized. +func (opt *optimizerExprFactory) CopyASTAndMetadata(a *ast.AST) ast.Expr { + copyExpr, copyInfo := opt.CopyAST(a) + for macroID, call := range copyInfo.MacroCalls() { + opt.SetMacroCall(macroID, call) + } + return copyExpr +} + +// ClearMacroCall clears the macro at the given expression id. +func (opt *optimizerExprFactory) ClearMacroCall(id int64) { + opt.sourceInfo.ClearMacroCall(id) +} + +// SetMacroCall sets the macro call metadata for the given macro id within the tracked source info +// metadata. +func (opt *optimizerExprFactory) SetMacroCall(id int64, expr ast.Expr) { + opt.sourceInfo.SetMacroCall(id, expr) +} + +// MacroCalls returns the map of macro calls currently in the context. +func (opt *optimizerExprFactory) MacroCalls() map[int64]ast.Expr { + return opt.sourceInfo.MacroCalls() +} + +// NewBindMacro creates an AST expression representing the expanded bind() macro, and a macro expression +// representing the unexpanded call signature to be inserted into the source info macro call metadata. +func (opt *optimizerExprFactory) NewBindMacro(macroID int64, varName string, varInit, remaining ast.Expr) (astExpr, macroExpr ast.Expr) { + varID := opt.nextID() + remainingID := opt.nextID() + remaining = opt.fac.CopyExpr(remaining) + remaining.RenumberIDs(func(id int64) int64 { + if id == macroID { + return remainingID + } + return id + }) + if call, exists := opt.sourceInfo.GetMacroCall(macroID); exists { + opt.SetMacroCall(remainingID, opt.fac.CopyExpr(call)) + } + + astExpr = opt.fac.NewComprehension(macroID, + opt.fac.NewList(opt.nextID(), []ast.Expr{}, []int32{}), + "#unused", + varName, + opt.fac.CopyExpr(varInit), + opt.fac.NewLiteral(opt.nextID(), types.False), + opt.fac.NewIdent(varID, varName), + remaining) + + macroExpr = opt.fac.NewMemberCall(0, "bind", + opt.fac.NewIdent(opt.nextID(), "cel"), + opt.fac.NewIdent(varID, varName), + opt.fac.CopyExpr(varInit), + opt.fac.CopyExpr(remaining)) + opt.sanitizeMacro(macroID, macroExpr) + return +} + +// NewCall creates a global function call invocation expression. +// +// Example: +// +// countByField(list, fieldName) +// - function: countByField +// - args: [list, fieldName] +func (opt *optimizerExprFactory) NewCall(function string, args ...ast.Expr) ast.Expr { + return opt.fac.NewCall(opt.nextID(), function, args...) +} + +// NewMemberCall creates a member function call invocation expression where 'target' is the receiver of the call. +// +// Example: +// +// list.countByField(fieldName) +// - function: countByField +// - target: list +// - args: [fieldName] +func (opt *optimizerExprFactory) NewMemberCall(function string, target ast.Expr, args ...ast.Expr) ast.Expr { + return opt.fac.NewMemberCall(opt.nextID(), function, target, args...) +} + +// NewIdent creates a new identifier expression. +// +// Examples: +// +// - simple_var_name +// - qualified.subpackage.var_name +func (opt *optimizerExprFactory) NewIdent(name string) ast.Expr { + return opt.fac.NewIdent(opt.nextID(), name) +} + +// NewLiteral creates a new literal expression value. +// +// The range of valid values for a literal generated during optimization is different than for expressions +// generated via parsing / type-checking, as the ref.Val may be _any_ CEL value so long as the value can +// be converted back to a literal-like form. +func (opt *optimizerExprFactory) NewLiteral(value ref.Val) ast.Expr { + return opt.fac.NewLiteral(opt.nextID(), value) +} + +// NewList creates a list expression with a set of optional indices. +// +// Examples: +// +// [a, b] +// - elems: [a, b] +// - optIndices: [] +// +// [a, ?b, ?c] +// - elems: [a, b, c] +// - optIndices: [1, 2] +func (opt *optimizerExprFactory) NewList(elems []ast.Expr, optIndices []int32) ast.Expr { + return opt.fac.NewList(opt.nextID(), elems, optIndices) +} + +// NewMap creates a map from a set of entry expressions which contain a key and value expression. +func (opt *optimizerExprFactory) NewMap(entries []ast.EntryExpr) ast.Expr { + return opt.fac.NewMap(opt.nextID(), entries) +} + +// NewMapEntry creates a map entry with a key and value expression and a flag to indicate whether the +// entry is optional. +// +// Examples: +// +// {a: b} +// - key: a +// - value: b +// - optional: false +// +// {?a: ?b} +// - key: a +// - value: b +// - optional: true +func (opt *optimizerExprFactory) NewMapEntry(key, value ast.Expr, isOptional bool) ast.EntryExpr { + return opt.fac.NewMapEntry(opt.nextID(), key, value, isOptional) +} + +// NewHasMacro generates a test-only select expression to be included within an AST and an unexpanded +// has() macro call signature to be inserted into the source info macro call metadata. +func (opt *optimizerExprFactory) NewHasMacro(macroID int64, s ast.Expr) (astExpr, macroExpr ast.Expr) { + sel := s.AsSelect() + astExpr = opt.fac.NewPresenceTest(macroID, sel.Operand(), sel.FieldName()) + macroExpr = opt.fac.NewCall(0, "has", + opt.NewSelect(opt.fac.CopyExpr(sel.Operand()), sel.FieldName())) + opt.sanitizeMacro(macroID, macroExpr) + return +} + +// NewSelect creates a select expression where a field value is selected from an operand. +// +// Example: +// +// msg.field_name +// - operand: msg +// - field: field_name +func (opt *optimizerExprFactory) NewSelect(operand ast.Expr, field string) ast.Expr { + return opt.fac.NewSelect(opt.nextID(), operand, field) +} + +// NewStruct creates a new typed struct value with an set of field initializations. +// +// Example: +// +// pkg.TypeName{field: value} +// - typeName: pkg.TypeName +// - fields: [{field: value}] +func (opt *optimizerExprFactory) NewStruct(typeName string, fields []ast.EntryExpr) ast.Expr { + return opt.fac.NewStruct(opt.nextID(), typeName, fields) +} + +// NewStructField creates a struct field initialization. +// +// Examples: +// +// {count: 3u} +// - field: count +// - value: 3u +// - optional: false +// +// {?count: x} +// - field: count +// - value: x +// - optional: true +func (opt *optimizerExprFactory) NewStructField(field string, value ast.Expr, isOptional bool) ast.EntryExpr { + return opt.fac.NewStructField(opt.nextID(), field, value, isOptional) +} + +// UpdateExpr updates the target expression with the updated content while preserving macro metadata. +// +// There are four scenarios during the update to consider: +// 1. target is not macro, updated is not macro +// 2. target is macro, updated is not macro +// 3. target is macro, updated is macro +// 4. target is not macro, updated is macro +// +// When the target is a macro already, it may either be updated to a new macro function +// body if the update is also a macro, or it may be removed altogether if the update is +// a macro. +// +// When the update is a macro, then the target references within other macros must be +// updated to point to the new updated macro. Otherwise, other macros which pointed to +// the target body must be replaced with copies of the updated expression body. +func (opt *optimizerExprFactory) UpdateExpr(target, updated ast.Expr) { + // Update the expression + target.SetKindCase(updated) + + // Early return if there's no macros present sa the source info reflects the + // macro set from the target and updated expressions. + if len(opt.sourceInfo.MacroCalls()) == 0 { + return + } + // Determine whether the target expression was a macro. + _, targetIsMacro := opt.sourceInfo.GetMacroCall(target.ID()) + + // Determine whether the updated expression was a macro. + updatedMacro, updatedIsMacro := opt.sourceInfo.GetMacroCall(updated.ID()) + + if updatedIsMacro { + // If the updated call was a macro, then updated id maps to target id, + // and the updated macro moves into the target id slot. + opt.sourceInfo.ClearMacroCall(updated.ID()) + opt.sourceInfo.SetMacroCall(target.ID(), updatedMacro) + } else if targetIsMacro { + // Otherwise if the target expr was a macro, but is no longer, clear + // the macro reference. + opt.sourceInfo.ClearMacroCall(target.ID()) + } + + // Punch holes in the updated value where macros references exist. + macroExpr := opt.fac.CopyExpr(target) + macroRefVisitor := ast.NewExprVisitor(func(e ast.Expr) { + if _, exists := opt.sourceInfo.GetMacroCall(e.ID()); exists { + e.SetKindCase(nil) + } + }) + ast.PostOrderVisit(macroExpr, macroRefVisitor) + + // Update any references to the expression within a macro + macroVisitor := ast.NewExprVisitor(func(call ast.Expr) { + // Update the target expression to point to the macro expression which + // will be empty if the updated expression was a macro. + if call.ID() == target.ID() { + call.SetKindCase(opt.fac.CopyExpr(macroExpr)) + } + // Update the macro call expression if it refers to the updated expression + // id which has since been remapped to the target id. + if call.ID() == updated.ID() { + // Either ensure the expression is a macro reference or a populated with + // the relevant sub-expression if the updated expr was not a macro. + if updatedIsMacro { + call.SetKindCase(nil) + } else { + call.SetKindCase(opt.fac.CopyExpr(macroExpr)) + } + // Since SetKindCase does not renumber the id, ensure the references to + // the old 'updated' id are mapped to the target id. + call.RenumberIDs(func(id int64) int64 { + if id == updated.ID() { + return target.ID() + } + return id + }) + } + }) + for _, call := range opt.sourceInfo.MacroCalls() { + ast.PostOrderVisit(call, macroVisitor) + } +} + +func (opt *optimizerExprFactory) sanitizeMacro(macroID int64, macroExpr ast.Expr) { + macroRefVisitor := ast.NewExprVisitor(func(e ast.Expr) { + if _, exists := opt.sourceInfo.GetMacroCall(e.ID()); exists && e.ID() != macroID { + e.SetKindCase(nil) + } + }) + ast.PostOrderVisit(macroExpr, macroRefVisitor) +} diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/options.go b/hack/tools/vendor/github.com/google/cel-go/cel/options.go new file mode 100644 index 0000000000..69c6942634 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/cel/options.go @@ -0,0 +1,667 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "fmt" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/dynamicpb" + + "github.com/google/cel-go/checker" + "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/functions" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/pb" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/interpreter" + "github.com/google/cel-go/parser" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + descpb "google.golang.org/protobuf/types/descriptorpb" +) + +// These constants beginning with "Feature" enable optional behavior in +// the library. See the documentation for each constant to see its +// effects, compatibility restrictions, and standard conformance. +const ( + _ = iota + + // Enable the tracking of function call expressions replaced by macros. + featureEnableMacroCallTracking + + // Enable the use of cross-type numeric comparisons at the type-checker. + featureCrossTypeNumericComparisons + + // Enable eager validation of declarations to ensure that Env values created + // with `Extend` inherit a validated list of declarations from the parent Env. + featureEagerlyValidateDeclarations + + // Enable the use of the default UTC timezone when a timezone is not specified + // on a CEL timestamp operation. This fixes the scenario where the input time + // is not already in UTC. + featureDefaultUTCTimeZone + + // Enable the serialization of logical operator ASTs as variadic calls, thus + // compressing the logic graph to a single call when multiple like-operator + // expressions occur: e.g. a && b && c && d -> call(_&&_, [a, b, c, d]) + featureVariadicLogicalASTs + + // Enable error generation when a presence test or optional field selection is + // performed on a primitive type. + featureEnableErrorOnBadPresenceTest +) + +// EnvOption is a functional interface for configuring the environment. +type EnvOption func(e *Env) (*Env, error) + +// ClearMacros options clears all parser macros. +// +// Clearing macros will ensure CEL expressions can only contain linear evaluation paths, as +// comprehensions such as `all` and `exists` are enabled only via macros. +func ClearMacros() EnvOption { + return func(e *Env) (*Env, error) { + e.macros = NoMacros + return e, nil + } +} + +// CustomTypeAdapter swaps the default types.Adapter implementation with a custom one. +// +// Note: This option must be specified before the Types and TypeDescs options when used together. +func CustomTypeAdapter(adapter types.Adapter) EnvOption { + return func(e *Env) (*Env, error) { + e.adapter = adapter + return e, nil + } +} + +// CustomTypeProvider replaces the types.Provider implementation with a custom one. +// +// The `provider` variable type may either be types.Provider or ref.TypeProvider (deprecated) +// +// Note: This option must be specified before the Types and TypeDescs options when used together. +func CustomTypeProvider(provider any) EnvOption { + return func(e *Env) (*Env, error) { + var err error + e.provider, err = maybeInteropProvider(provider) + return e, err + } +} + +// Declarations option extends the declaration set configured in the environment. +// +// Note: Declarations will by default be appended to the pre-existing declaration set configured +// for the environment. The NewEnv call builds on top of the standard CEL declarations. For a +// purely custom set of declarations use NewCustomEnv. +func Declarations(decls ...*exprpb.Decl) EnvOption { + declOpts := []EnvOption{} + var err error + var opt EnvOption + // Convert the declarations to `EnvOption` values ahead of time. + // Surface any errors in conversion when the options are applied. + for _, d := range decls { + opt, err = ExprDeclToDeclaration(d) + if err != nil { + break + } + declOpts = append(declOpts, opt) + } + return func(e *Env) (*Env, error) { + if err != nil { + return nil, err + } + for _, o := range declOpts { + e, err = o(e) + if err != nil { + return nil, err + } + } + return e, nil + } +} + +// EagerlyValidateDeclarations ensures that any collisions between configured declarations are caught +// at the time of the `NewEnv` call. +// +// Eagerly validating declarations is also useful for bootstrapping a base `cel.Env` value. +// Calls to base `Env.Extend()` will be significantly faster when declarations are eagerly validated +// as declarations will be collision-checked at most once and only incrementally by way of `Extend` +// +// Disabled by default as not all environments are used for type-checking. +func EagerlyValidateDeclarations(enabled bool) EnvOption { + return features(featureEagerlyValidateDeclarations, enabled) +} + +// HomogeneousAggregateLiterals disables mixed type list and map literal values. +// +// Note, it is still possible to have heterogeneous aggregates when provided as variables to the +// expression, as well as via conversion of well-known dynamic types, or with unchecked +// expressions. +func HomogeneousAggregateLiterals() EnvOption { + return ASTValidators(ValidateHomogeneousAggregateLiterals()) +} + +// variadicLogicalOperatorASTs flatten like-operator chained logical expressions into a single +// variadic call with N-terms. This behavior is useful when serializing to a protocol buffer as +// it will reduce the number of recursive calls needed to deserialize the AST later. +// +// For example, given the following expression the call graph will be rendered accordingly: +// +// expression: a && b && c && (d || e) +// ast: call(_&&_, [a, b, c, call(_||_, [d, e])]) +func variadicLogicalOperatorASTs() EnvOption { + return features(featureVariadicLogicalASTs, true) +} + +// Macros option extends the macro set configured in the environment. +// +// Note: This option must be specified after ClearMacros if used together. +func Macros(macros ...Macro) EnvOption { + return func(e *Env) (*Env, error) { + e.macros = append(e.macros, macros...) + return e, nil + } +} + +// Container sets the container for resolving variable names. Defaults to an empty container. +// +// If all references within an expression are relative to a protocol buffer package, then +// specifying a container of `google.type` would make it possible to write expressions such as +// `Expr{expression: 'a < b'}` instead of having to write `google.type.Expr{...}`. +func Container(name string) EnvOption { + return func(e *Env) (*Env, error) { + cont, err := e.Container.Extend(containers.Name(name)) + if err != nil { + return nil, err + } + e.Container = cont + return e, nil + } +} + +// Abbrevs configures a set of simple names as abbreviations for fully-qualified names. +// +// An abbreviation (abbrev for short) is a simple name that expands to a fully-qualified name. +// Abbreviations can be useful when working with variables, functions, and especially types from +// multiple namespaces: +// +// // CEL object construction +// qual.pkg.version.ObjTypeName{ +// field: alt.container.ver.FieldTypeName{value: ...} +// } +// +// Only one the qualified names above may be used as the CEL container, so at least one of these +// references must be a long qualified name within an otherwise short CEL program. Using the +// following abbreviations, the program becomes much simpler: +// +// // CEL Go option +// Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName") +// // Simplified Object construction +// ObjTypeName{field: FieldTypeName{value: ...}} +// +// There are a few rules for the qualified names and the simple abbreviations generated from them: +// - Qualified names must be dot-delimited, e.g. `package.subpkg.name`. +// - The last element in the qualified name is the abbreviation. +// - Abbreviations must not collide with each other. +// - The abbreviation must not collide with unqualified names in use. +// +// Abbreviations are distinct from container-based references in the following important ways: +// - Abbreviations must expand to a fully-qualified name. +// - Expanded abbreviations do not participate in namespace resolution. +// - Abbreviation expansion is done instead of the container search for a matching identifier. +// - Containers follow C++ namespace resolution rules with searches from the most qualified name +// +// to the least qualified name. +// +// - Container references within the CEL program may be relative, and are resolved to fully +// +// qualified names at either type-check time or program plan time, whichever comes first. +// +// If there is ever a case where an identifier could be in both the container and as an +// abbreviation, the abbreviation wins as this will ensure that the meaning of a program is +// preserved between compilations even as the container evolves. +func Abbrevs(qualifiedNames ...string) EnvOption { + return func(e *Env) (*Env, error) { + cont, err := e.Container.Extend(containers.Abbrevs(qualifiedNames...)) + if err != nil { + return nil, err + } + e.Container = cont + return e, nil + } +} + +// customTypeRegistry is an internal-only interface containing the minimum methods required to support +// custom types. It is a subset of methods from ref.TypeRegistry. +type customTypeRegistry interface { + RegisterDescriptor(protoreflect.FileDescriptor) error + RegisterType(...ref.Type) error +} + +// Types adds one or more type declarations to the environment, allowing for construction of +// type-literals whose definitions are included in the common expression built-in set. +// +// The input types may either be instances of `proto.Message` or `ref.Type`. Any other type +// provided to this option will result in an error. +// +// Well-known protobuf types within the `google.protobuf.*` package are included in the standard +// environment by default. +// +// Note: This option must be specified after the CustomTypeProvider option when used together. +func Types(addTypes ...any) EnvOption { + return func(e *Env) (*Env, error) { + reg, isReg := e.provider.(customTypeRegistry) + if !isReg { + return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider) + } + for _, t := range addTypes { + switch v := t.(type) { + case proto.Message: + fdMap := pb.CollectFileDescriptorSet(v) + for _, fd := range fdMap { + err := reg.RegisterDescriptor(fd) + if err != nil { + return nil, err + } + } + case ref.Type: + err := reg.RegisterType(v) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unsupported type: %T", t) + } + } + return e, nil + } +} + +// TypeDescs adds type declarations from any protoreflect.FileDescriptor, protoregistry.Files, +// google.protobuf.FileDescriptorProto or google.protobuf.FileDescriptorSet provided. +// +// Note that messages instantiated from these descriptors will be *dynamicpb.Message values +// rather than the concrete message type. +// +// TypeDescs are hermetic to a single Env object, but may be copied to other Env values via +// extension or by re-using the same EnvOption with another NewEnv() call. +func TypeDescs(descs ...any) EnvOption { + return func(e *Env) (*Env, error) { + reg, isReg := e.provider.(customTypeRegistry) + if !isReg { + return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider) + } + // Scan the input descriptors for FileDescriptorProto messages and accumulate them into a + // synthetic FileDescriptorSet as the FileDescriptorProto messages may refer to each other + // and will not resolve properly unless they are part of the same set. + var fds *descpb.FileDescriptorSet + for _, d := range descs { + switch f := d.(type) { + case *descpb.FileDescriptorProto: + if fds == nil { + fds = &descpb.FileDescriptorSet{ + File: []*descpb.FileDescriptorProto{}, + } + } + fds.File = append(fds.File, f) + } + } + if fds != nil { + if err := registerFileSet(reg, fds); err != nil { + return nil, err + } + } + for _, d := range descs { + switch f := d.(type) { + case *protoregistry.Files: + if err := registerFiles(reg, f); err != nil { + return nil, err + } + case protoreflect.FileDescriptor: + if err := reg.RegisterDescriptor(f); err != nil { + return nil, err + } + case *descpb.FileDescriptorSet: + if err := registerFileSet(reg, f); err != nil { + return nil, err + } + case *descpb.FileDescriptorProto: + // skip, handled as a synthetic file descriptor set. + default: + return nil, fmt.Errorf("unsupported type descriptor: %T", d) + } + } + return e, nil + } +} + +func registerFileSet(reg customTypeRegistry, fileSet *descpb.FileDescriptorSet) error { + files, err := protodesc.NewFiles(fileSet) + if err != nil { + return fmt.Errorf("protodesc.NewFiles(%v) failed: %v", fileSet, err) + } + return registerFiles(reg, files) +} + +func registerFiles(reg customTypeRegistry, files *protoregistry.Files) error { + var err error + files.RangeFiles(func(fd protoreflect.FileDescriptor) bool { + err = reg.RegisterDescriptor(fd) + return err == nil + }) + return err +} + +// ProgramOption is a functional interface for configuring evaluation bindings and behaviors. +type ProgramOption func(p *prog) (*prog, error) + +// CustomDecorator appends an InterpreterDecorator to the program. +// +// InterpretableDecorators can be used to inspect, alter, or replace the Program plan. +func CustomDecorator(dec interpreter.InterpretableDecorator) ProgramOption { + return func(p *prog) (*prog, error) { + p.decorators = append(p.decorators, dec) + return p, nil + } +} + +// Functions adds function overloads that extend or override the set of CEL built-ins. +// +// Deprecated: use Function() instead to declare the function, its overload signatures, +// and the overload implementations. +func Functions(funcs ...*functions.Overload) ProgramOption { + return func(p *prog) (*prog, error) { + if err := p.dispatcher.Add(funcs...); err != nil { + return nil, err + } + return p, nil + } +} + +// Globals sets the global variable values for a given program. These values may be shadowed by +// variables with the same name provided to the Eval() call. If Globals is used in a Library with +// a Lib EnvOption, vars may shadow variables provided by previously added libraries. +// +// The vars value may either be an `interpreter.Activation` instance or a `map[string]any`. +func Globals(vars any) ProgramOption { + return func(p *prog) (*prog, error) { + defaultVars, err := interpreter.NewActivation(vars) + if err != nil { + return nil, err + } + if p.defaultVars != nil { + defaultVars = interpreter.NewHierarchicalActivation(p.defaultVars, defaultVars) + } + p.defaultVars = defaultVars + return p, nil + } +} + +// OptimizeRegex provides a way to replace the InterpretableCall for regex functions. This can be used +// to compile regex string constants at program creation time and report any errors and then use the +// compiled regex for all regex function invocations. +func OptimizeRegex(regexOptimizations ...*interpreter.RegexOptimization) ProgramOption { + return func(p *prog) (*prog, error) { + p.regexOptimizations = append(p.regexOptimizations, regexOptimizations...) + return p, nil + } +} + +// EvalOption indicates an evaluation option that may affect the evaluation behavior or information +// in the output result. +type EvalOption int + +const ( + // OptTrackState will cause the runtime to return an immutable EvalState value in the Result. + OptTrackState EvalOption = 1 << iota + + // OptExhaustiveEval causes the runtime to disable short-circuits and track state. + OptExhaustiveEval EvalOption = 1< 0 { + decorators = append(decorators, interpreter.InterruptableEval()) + } + // Enable constant folding first. + if p.evalOpts&OptOptimize == OptOptimize { + decorators = append(decorators, interpreter.Optimize()) + p.regexOptimizations = append(p.regexOptimizations, interpreter.MatchesRegexOptimization) + } + // Enable regex compilation of constants immediately after folding constants. + if len(p.regexOptimizations) > 0 { + decorators = append(decorators, interpreter.CompileRegexConstants(p.regexOptimizations...)) + } + + // Enable exhaustive eval, state tracking and cost tracking last since they require a factory. + if p.evalOpts&(OptExhaustiveEval|OptTrackState|OptTrackCost) != 0 { + factory := func(state interpreter.EvalState, costTracker *interpreter.CostTracker) (Program, error) { + costTracker.Estimator = p.callCostEstimator + costTracker.Limit = p.costLimit + for _, costOpt := range p.costOptions { + err := costOpt(costTracker) + if err != nil { + return nil, err + } + } + // Limit capacity to guarantee a reallocation when calling 'append(decs, ...)' below. This + // prevents the underlying memory from being shared between factory function calls causing + // undesired mutations. + decs := decorators[:len(decorators):len(decorators)] + var observers []interpreter.EvalObserver + + if p.evalOpts&(OptExhaustiveEval|OptTrackState) != 0 { + // EvalStateObserver is required for OptExhaustiveEval. + observers = append(observers, interpreter.EvalStateObserver(state)) + } + if p.evalOpts&OptTrackCost == OptTrackCost { + observers = append(observers, interpreter.CostObserver(costTracker)) + } + + // Enable exhaustive eval over a basic observer since it offers a superset of features. + if p.evalOpts&OptExhaustiveEval == OptExhaustiveEval { + decs = append(decs, interpreter.ExhaustiveEval(), interpreter.Observe(observers...)) + } else if len(observers) > 0 { + decs = append(decs, interpreter.Observe(observers...)) + } + + return p.clone().initInterpretable(a, decs) + } + return newProgGen(factory) + } + return p.initInterpretable(a, decorators) +} + +func (p *prog) initInterpretable(a *ast.AST, decs []interpreter.InterpretableDecorator) (*prog, error) { + // When the AST has been exprAST it contains metadata that can be used to speed up program execution. + interpretable, err := p.interpreter.NewInterpretable(a, decs...) + if err != nil { + return nil, err + } + p.interpretable = interpretable + return p, nil +} + +// Eval implements the Program interface method. +func (p *prog) Eval(input any) (v ref.Val, det *EvalDetails, err error) { + // Configure error recovery for unexpected panics during evaluation. Note, the use of named + // return values makes it possible to modify the error response during the recovery + // function. + defer func() { + if r := recover(); r != nil { + switch t := r.(type) { + case interpreter.EvalCancelledError: + err = t + default: + err = fmt.Errorf("internal error: %v", r) + } + } + }() + // Build a hierarchical activation if there are default vars set. + var vars interpreter.Activation + switch v := input.(type) { + case interpreter.Activation: + vars = v + case map[string]any: + vars = activationPool.Setup(v) + defer activationPool.Put(vars) + default: + return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]any, got: (%T)%v", input, input) + } + if p.defaultVars != nil { + vars = interpreter.NewHierarchicalActivation(p.defaultVars, vars) + } + v = p.interpretable.Eval(vars) + // The output of an internal Eval may have a value (`v`) that is a types.Err. This step + // translates the CEL value to a Go error response. This interface does not quite match the + // RPC signature which allows for multiple errors to be returned, but should be sufficient. + if types.IsError(v) { + err = v.(*types.Err) + } + return +} + +// ContextEval implements the Program interface. +func (p *prog) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetails, error) { + if ctx == nil { + return nil, nil, fmt.Errorf("context can not be nil") + } + // Configure the input, making sure to wrap Activation inputs in the special ctxActivation which + // exposes the #interrupted variable and manages rate-limited checks of the ctx.Done() state. + var vars interpreter.Activation + switch v := input.(type) { + case interpreter.Activation: + vars = ctxActivationPool.Setup(v, ctx.Done(), p.interruptCheckFrequency) + defer ctxActivationPool.Put(vars) + case map[string]any: + rawVars := activationPool.Setup(v) + defer activationPool.Put(rawVars) + vars = ctxActivationPool.Setup(rawVars, ctx.Done(), p.interruptCheckFrequency) + defer ctxActivationPool.Put(vars) + default: + return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]any, got: (%T)%v", input, input) + } + return p.Eval(vars) +} + +// progFactory is a helper alias for marking a program creation factory function. +type progFactory func(interpreter.EvalState, *interpreter.CostTracker) (Program, error) + +// progGen holds a reference to a progFactory instance and implements the Program interface. +type progGen struct { + factory progFactory +} + +// newProgGen tests the factory object by calling it once and returns a factory-based Program if +// the test is successful. +func newProgGen(factory progFactory) (Program, error) { + // Test the factory to make sure that configuration errors are spotted at config + tracker, err := interpreter.NewCostTracker(nil) + if err != nil { + return nil, err + } + _, err = factory(interpreter.NewEvalState(), tracker) + if err != nil { + return nil, err + } + return &progGen{factory: factory}, nil +} + +// Eval implements the Program interface method. +func (gen *progGen) Eval(input any) (ref.Val, *EvalDetails, error) { + // The factory based Eval() differs from the standard evaluation model in that it generates a + // new EvalState instance for each call to ensure that unique evaluations yield unique stateful + // results. + state := interpreter.NewEvalState() + costTracker, err := interpreter.NewCostTracker(nil) + if err != nil { + return nil, nil, err + } + det := &EvalDetails{state: state, costTracker: costTracker} + + // Generate a new instance of the interpretable using the factory configured during the call to + // newProgram(). It is incredibly unlikely that the factory call will generate an error given + // the factory test performed within the Program() call. + p, err := gen.factory(state, costTracker) + if err != nil { + return nil, det, err + } + + // Evaluate the input, returning the result and the 'state' within EvalDetails. + v, _, err := p.Eval(input) + if err != nil { + return v, det, err + } + return v, det, nil +} + +// ContextEval implements the Program interface method. +func (gen *progGen) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetails, error) { + if ctx == nil { + return nil, nil, fmt.Errorf("context can not be nil") + } + // The factory based Eval() differs from the standard evaluation model in that it generates a + // new EvalState instance for each call to ensure that unique evaluations yield unique stateful + // results. + state := interpreter.NewEvalState() + costTracker, err := interpreter.NewCostTracker(nil) + if err != nil { + return nil, nil, err + } + det := &EvalDetails{state: state, costTracker: costTracker} + + // Generate a new instance of the interpretable using the factory configured during the call to + // newProgram(). It is incredibly unlikely that the factory call will generate an error given + // the factory test performed within the Program() call. + p, err := gen.factory(state, costTracker) + if err != nil { + return nil, det, err + } + + // Evaluate the input, returning the result and the 'state' within EvalDetails. + v, _, err := p.ContextEval(ctx, input) + if err != nil { + return v, det, err + } + return v, det, nil +} + +type ctxEvalActivation struct { + parent interpreter.Activation + interrupt <-chan struct{} + interruptCheckCount uint + interruptCheckFrequency uint +} + +// ResolveName implements the Activation interface method, but adds a special #interrupted variable +// which is capable of testing whether a 'done' signal is provided from a context.Context channel. +func (a *ctxEvalActivation) ResolveName(name string) (any, bool) { + if name == "#interrupted" { + a.interruptCheckCount++ + if a.interruptCheckCount%a.interruptCheckFrequency == 0 { + select { + case <-a.interrupt: + return true, true + default: + return nil, false + } + } + return nil, false + } + return a.parent.ResolveName(name) +} + +func (a *ctxEvalActivation) Parent() interpreter.Activation { + return a.parent +} + +func newCtxEvalActivationPool() *ctxEvalActivationPool { + return &ctxEvalActivationPool{ + Pool: sync.Pool{ + New: func() any { + return &ctxEvalActivation{} + }, + }, + } +} + +type ctxEvalActivationPool struct { + sync.Pool +} + +// Setup initializes a pooled Activation with the ability check for context.Context cancellation +func (p *ctxEvalActivationPool) Setup(vars interpreter.Activation, done <-chan struct{}, interruptCheckRate uint) *ctxEvalActivation { + a := p.Pool.Get().(*ctxEvalActivation) + a.parent = vars + a.interrupt = done + a.interruptCheckCount = 0 + a.interruptCheckFrequency = interruptCheckRate + return a +} + +type evalActivation struct { + vars map[string]any + lazyVars map[string]any +} + +// ResolveName looks up the value of the input variable name, if found. +// +// Lazy bindings may be supplied within the map-based input in either of the following forms: +// - func() any +// - func() ref.Val +// +// The lazy binding will only be invoked once per evaluation. +// +// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using +// the types.Adapter configured in the environment. +func (a *evalActivation) ResolveName(name string) (any, bool) { + v, found := a.vars[name] + if !found { + return nil, false + } + switch obj := v.(type) { + case func() ref.Val: + if resolved, found := a.lazyVars[name]; found { + return resolved, true + } + lazy := obj() + a.lazyVars[name] = lazy + return lazy, true + case func() any: + if resolved, found := a.lazyVars[name]; found { + return resolved, true + } + lazy := obj() + a.lazyVars[name] = lazy + return lazy, true + default: + return obj, true + } +} + +// Parent implements the interpreter.Activation interface +func (a *evalActivation) Parent() interpreter.Activation { + return nil +} + +func newEvalActivationPool() *evalActivationPool { + return &evalActivationPool{ + Pool: sync.Pool{ + New: func() any { + return &evalActivation{lazyVars: make(map[string]any)} + }, + }, + } +} + +type evalActivationPool struct { + sync.Pool +} + +// Setup initializes a pooled Activation object with the map input. +func (p *evalActivationPool) Setup(vars map[string]any) *evalActivation { + a := p.Pool.Get().(*evalActivation) + a.vars = vars + return a +} + +func (p *evalActivationPool) Put(value any) { + a := value.(*evalActivation) + for k := range a.lazyVars { + delete(a.lazyVars, k) + } + p.Pool.Put(a) +} + +var ( + // activationPool is an internally managed pool of Activation values that wrap map[string]any inputs + activationPool = newEvalActivationPool() + + // ctxActivationPool is an internally managed pool of Activation values that expose a special #interrupted variable + ctxActivationPool = newCtxEvalActivationPool() +) diff --git a/hack/tools/vendor/github.com/google/cel-go/cel/validator.go b/hack/tools/vendor/github.com/google/cel-go/cel/validator.go new file mode 100644 index 0000000000..b50c674520 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/cel/validator.go @@ -0,0 +1,375 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "fmt" + "reflect" + "regexp" + + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/overloads" +) + +const ( + homogeneousValidatorName = "cel.lib.std.validate.types.homogeneous" + + // HomogeneousAggregateLiteralExemptFunctions is the ValidatorConfig key used to configure + // the set of function names which are exempt from homogeneous type checks. The expected type + // is a string list of function names. + // + // As an example, the `.format([args])` call expects the input arguments list to be + // comprised of a variety of types which correspond to the types expected by the format control + // clauses; however, all other uses of a mixed element type list, would be unexpected. + HomogeneousAggregateLiteralExemptFunctions = homogeneousValidatorName + ".exempt" +) + +// ASTValidators configures a set of ASTValidator instances into the target environment. +// +// Validators are applied in the order in which the are specified and are treated as singletons. +// The same ASTValidator with a given name will not be applied more than once. +func ASTValidators(validators ...ASTValidator) EnvOption { + return func(e *Env) (*Env, error) { + for _, v := range validators { + if !e.HasValidator(v.Name()) { + e.validators = append(e.validators, v) + } + } + return e, nil + } +} + +// ASTValidator defines a singleton interface for validating a type-checked Ast against an environment. +// +// Note: the Issues argument is mutable in the sense that it is intended to collect errors which will be +// reported to the caller. +type ASTValidator interface { + // Name returns the name of the validator. Names must be unique. + Name() string + + // Validate validates a given Ast within an Environment and collects a set of potential issues. + // + // The ValidatorConfig is generated from the set of ASTValidatorConfigurer instances prior to + // the invocation of the Validate call. The expectation is that the validator configuration + // is created in sequence and immutable once provided to the Validate call. + // + // See individual validators for more information on their configuration keys and configuration + // properties. + Validate(*Env, ValidatorConfig, *ast.AST, *Issues) +} + +// ValidatorConfig provides an accessor method for querying validator configuration state. +type ValidatorConfig interface { + GetOrDefault(name string, value any) any +} + +// MutableValidatorConfig provides mutation methods for querying and updating validator configuration +// settings. +type MutableValidatorConfig interface { + ValidatorConfig + Set(name string, value any) error +} + +// ASTValidatorConfigurer indicates that this object, currently expected to be an ASTValidator, +// participates in validator configuration settings. +// +// This interface may be split from the expectation of being an ASTValidator instance in the future. +type ASTValidatorConfigurer interface { + Configure(MutableValidatorConfig) error +} + +// validatorConfig implements the ValidatorConfig and MutableValidatorConfig interfaces. +type validatorConfig struct { + data map[string]any +} + +// newValidatorConfig initializes the validator config with default values for core CEL validators. +func newValidatorConfig() *validatorConfig { + return &validatorConfig{ + data: map[string]any{ + HomogeneousAggregateLiteralExemptFunctions: []string{}, + }, + } +} + +// GetOrDefault returns the configured value for the name, if present, else the input default value. +// +// Note, the type-agreement between the input default and configured value is not checked on read. +func (config *validatorConfig) GetOrDefault(name string, value any) any { + v, found := config.data[name] + if !found { + return value + } + return v +} + +// Set configures a validator option with the given name and value. +// +// If the value had previously been set, the new value must have the same reflection type as the old one, +// or the call will error. +func (config *validatorConfig) Set(name string, value any) error { + v, found := config.data[name] + if found && reflect.TypeOf(v) != reflect.TypeOf(value) { + return fmt.Errorf("incompatible configuration type for %s, got %T, wanted %T", name, value, v) + } + config.data[name] = value + return nil +} + +// ExtendedValidations collects a set of common AST validations which reduce the likelihood of runtime errors. +// +// - Validate duration and timestamp literals +// - Ensure regex strings are valid +// - Disable mixed type list and map literals +func ExtendedValidations() EnvOption { + return ASTValidators( + ValidateDurationLiterals(), + ValidateTimestampLiterals(), + ValidateRegexLiterals(), + ValidateHomogeneousAggregateLiterals(), + ) +} + +// ValidateDurationLiterals ensures that duration literal arguments are valid immediately after type-check. +func ValidateDurationLiterals() ASTValidator { + return newFormatValidator(overloads.TypeConvertDuration, 0, evalCall) +} + +// ValidateTimestampLiterals ensures that timestamp literal arguments are valid immediately after type-check. +func ValidateTimestampLiterals() ASTValidator { + return newFormatValidator(overloads.TypeConvertTimestamp, 0, evalCall) +} + +// ValidateRegexLiterals ensures that regex patterns are validated after type-check. +func ValidateRegexLiterals() ASTValidator { + return newFormatValidator(overloads.Matches, 0, compileRegex) +} + +// ValidateHomogeneousAggregateLiterals checks that all list and map literals entries have the same types, i.e. +// no mixed list element types or mixed map key or map value types. +// +// Note: the string format call relies on a mixed element type list for ease of use, so this check skips all +// literals which occur within string format calls. +func ValidateHomogeneousAggregateLiterals() ASTValidator { + return homogeneousAggregateLiteralValidator{} +} + +// ValidateComprehensionNestingLimit ensures that comprehension nesting does not exceed the specified limit. +// +// This validator can be useful for preventing arbitrarily nested comprehensions which can take high polynomial +// time to complete. +// +// Note, this limit does not apply to comprehensions with an empty iteration range, as these comprehensions have +// no actual looping cost. The cel.bind() utilizes the comprehension structure to perform local variable +// assignments and supplies an empty iteration range, so they won't count against the nesting limit either. +func ValidateComprehensionNestingLimit(limit int) ASTValidator { + return nestingLimitValidator{limit: limit} +} + +type argChecker func(env *Env, call, arg ast.Expr) error + +func newFormatValidator(funcName string, argNum int, check argChecker) formatValidator { + return formatValidator{ + funcName: funcName, + check: check, + argNum: argNum, + } +} + +type formatValidator struct { + funcName string + argNum int + check argChecker +} + +// Name returns the unique name of this function format validator. +func (v formatValidator) Name() string { + return fmt.Sprintf("cel.lib.std.validate.functions.%s", v.funcName) +} + +// Validate searches the AST for uses of a given function name with a constant argument and performs a check +// on whether the argument is a valid literal value. +func (v formatValidator) Validate(e *Env, _ ValidatorConfig, a *ast.AST, iss *Issues) { + root := ast.NavigateAST(a) + funcCalls := ast.MatchDescendants(root, ast.FunctionMatcher(v.funcName)) + for _, call := range funcCalls { + callArgs := call.AsCall().Args() + if len(callArgs) <= v.argNum { + continue + } + litArg := callArgs[v.argNum] + if litArg.Kind() != ast.LiteralKind { + continue + } + if err := v.check(e, call, litArg); err != nil { + iss.ReportErrorAtID(litArg.ID(), "invalid %s argument", v.funcName) + } + } +} + +func evalCall(env *Env, call, arg ast.Expr) error { + ast := &Ast{impl: ast.NewAST(call, ast.NewSourceInfo(nil))} + prg, err := env.Program(ast) + if err != nil { + return err + } + _, _, err = prg.Eval(NoVars()) + return err +} + +func compileRegex(_ *Env, _, arg ast.Expr) error { + pattern := arg.AsLiteral().Value().(string) + _, err := regexp.Compile(pattern) + return err +} + +type homogeneousAggregateLiteralValidator struct{} + +// Name returns the unique name of the homogeneous type validator. +func (homogeneousAggregateLiteralValidator) Name() string { + return homogeneousValidatorName +} + +// Validate validates that all lists and map literals have homogeneous types, i.e. don't contain dyn types. +// +// This validator makes an exception for list and map literals which occur at any level of nesting within +// string format calls. +func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig, a *ast.AST, iss *Issues) { + var exemptedFunctions []string + exemptedFunctions = c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, exemptedFunctions).([]string) + root := ast.NavigateAST(a) + listExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.ListKind)) + for _, listExpr := range listExprs { + if inExemptFunction(listExpr, exemptedFunctions) { + continue + } + l := listExpr.AsList() + elements := l.Elements() + optIndices := l.OptionalIndices() + var elemType *Type + for i, e := range elements { + et := a.GetType(e.ID()) + if isOptionalIndex(i, optIndices) { + et = et.Parameters()[0] + } + if elemType == nil { + elemType = et + continue + } + if !elemType.IsEquivalentType(et) { + v.typeMismatch(iss, e.ID(), elemType, et) + break + } + } + } + mapExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.MapKind)) + for _, mapExpr := range mapExprs { + if inExemptFunction(mapExpr, exemptedFunctions) { + continue + } + m := mapExpr.AsMap() + entries := m.Entries() + var keyType, valType *Type + for _, e := range entries { + mapEntry := e.AsMapEntry() + key, val := mapEntry.Key(), mapEntry.Value() + kt, vt := a.GetType(key.ID()), a.GetType(val.ID()) + if mapEntry.IsOptional() { + vt = vt.Parameters()[0] + } + if keyType == nil && valType == nil { + keyType, valType = kt, vt + continue + } + if !keyType.IsEquivalentType(kt) { + v.typeMismatch(iss, key.ID(), keyType, kt) + } + if !valType.IsEquivalentType(vt) { + v.typeMismatch(iss, val.ID(), valType, vt) + } + } + } +} + +func inExemptFunction(e ast.NavigableExpr, exemptFunctions []string) bool { + parent, found := e.Parent() + for found { + if parent.Kind() == ast.CallKind { + fnName := parent.AsCall().FunctionName() + for _, exempt := range exemptFunctions { + if exempt == fnName { + return true + } + } + } + parent, found = parent.Parent() + } + return false +} + +func isOptionalIndex(i int, optIndices []int32) bool { + for _, optInd := range optIndices { + if i == int(optInd) { + return true + } + } + return false +} + +func (homogeneousAggregateLiteralValidator) typeMismatch(iss *Issues, id int64, expected, actual *Type) { + iss.ReportErrorAtID(id, "expected type '%s' but found '%s'", FormatCELType(expected), FormatCELType(actual)) +} + +type nestingLimitValidator struct { + limit int +} + +func (v nestingLimitValidator) Name() string { + return "cel.lib.std.validate.comprehension_nesting_limit" +} + +func (v nestingLimitValidator) Validate(e *Env, _ ValidatorConfig, a *ast.AST, iss *Issues) { + root := ast.NavigateAST(a) + comprehensions := ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind)) + if len(comprehensions) <= v.limit { + return + } + for _, comp := range comprehensions { + count := 0 + e := comp + hasParent := true + for hasParent { + // When the expression is not a comprehension, continue to the next ancestor. + if e.Kind() != ast.ComprehensionKind { + e, hasParent = e.Parent() + continue + } + // When the comprehension has an empty range, continue to the next ancestor + // as this comprehension does not have any associated cost. + iterRange := e.AsComprehension().IterRange() + if iterRange.Kind() == ast.ListKind && iterRange.AsList().Size() == 0 { + e, hasParent = e.Parent() + continue + } + // Otherwise check the nesting limit. + count++ + if count > v.limit { + iss.ReportErrorAtID(comp.ID(), "comprehension exceeds nesting limit") + break + } + e, hasParent = e.Parent() + } + } +} diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/checker/BUILD.bazel new file mode 100644 index 0000000000..678b412a95 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/checker/BUILD.bazel @@ -0,0 +1,64 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "checker.go", + "cost.go", + "env.go", + "errors.go", + "format.go", + "mapping.go", + "options.go", + "printer.go", + "scopes.go", + "types.go", + ], + importpath = "github.com/google/cel-go/checker", + visibility = ["//visibility:public"], + deps = [ + "//checker/decls:go_default_library", + "//common:go_default_library", + "//common/ast:go_default_library", + "//common/containers:go_default_library", + "//common/debug:go_default_library", + "//common/decls:go_default_library", + "//common/operators:go_default_library", + "//common/overloads:go_default_library", + "//common/stdlib:go_default_library", + "//common/types:go_default_library", + "//common/types/pb:go_default_library", + "//common/types/ref:go_default_library", + "//parser:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//types/known/emptypb:go_default_library", + "@org_golang_google_protobuf//types/known/structpb:go_default_library", + ], +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "checker_test.go", + "cost_test.go", + "env_test.go", + "format_test.go", + ], + embed = [ + ":go_default_library", + ], + deps = [ + "//common/types:go_default_library", + "//parser:go_default_library", + "//test:go_default_library", + "//test/proto2pb:go_default_library", + "//test/proto3pb:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + ], +) diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/checker.go b/hack/tools/vendor/github.com/google/cel-go/checker/checker.go new file mode 100644 index 0000000000..0603cfa302 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/checker/checker.go @@ -0,0 +1,711 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package checker defines functions to type-checked a parsed expression +// against a set of identifier and function declarations. +package checker + +import ( + "fmt" + "reflect" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/decls" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +type checker struct { + *ast.AST + ast.ExprFactory + env *Env + errors *typeErrors + mappings *mapping + freeTypeVarCounter int +} + +// Check performs type checking, giving a typed AST. +// +// The input is a parsed AST and an env which encapsulates type binding of variables, +// declarations of built-in functions, descriptions of protocol buffers, and a registry for +// errors. +// +// Returns a type-checked AST, which might not be usable if there are errors in the error +// registry. +func Check(parsed *ast.AST, source common.Source, env *Env) (*ast.AST, *common.Errors) { + errs := common.NewErrors(source) + typeMap := make(map[int64]*types.Type) + refMap := make(map[int64]*ast.ReferenceInfo) + c := checker{ + AST: ast.NewCheckedAST(parsed, typeMap, refMap), + ExprFactory: ast.NewExprFactory(), + env: env, + errors: &typeErrors{errs: errs}, + mappings: newMapping(), + freeTypeVarCounter: 0, + } + c.check(c.Expr()) + + // Walk over the final type map substituting any type parameters either by their bound value + // or by DYN. + for id, t := range c.TypeMap() { + c.SetType(id, substitute(c.mappings, t, true)) + } + return c.AST, errs +} + +func (c *checker) check(e ast.Expr) { + if e == nil { + return + } + switch e.Kind() { + case ast.LiteralKind: + literal := ref.Val(e.AsLiteral()) + switch literal.Type() { + case types.BoolType, types.BytesType, types.DoubleType, types.IntType, + types.NullType, types.StringType, types.UintType: + c.setType(e, literal.Type().(*types.Type)) + default: + c.errors.unexpectedASTType(e.ID(), c.location(e), "literal", literal.Type().TypeName()) + } + case ast.IdentKind: + c.checkIdent(e) + case ast.SelectKind: + c.checkSelect(e) + case ast.CallKind: + c.checkCall(e) + case ast.ListKind: + c.checkCreateList(e) + case ast.MapKind: + c.checkCreateMap(e) + case ast.StructKind: + c.checkCreateStruct(e) + case ast.ComprehensionKind: + c.checkComprehension(e) + default: + c.errors.unexpectedASTType(e.ID(), c.location(e), "unspecified", reflect.TypeOf(e).Name()) + } +} + +func (c *checker) checkIdent(e ast.Expr) { + identName := e.AsIdent() + // Check to see if the identifier is declared. + if ident := c.env.LookupIdent(identName); ident != nil { + c.setType(e, ident.Type()) + c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value())) + // Overwrite the identifier with its fully qualified name. + e.SetKindCase(c.NewIdent(e.ID(), ident.Name())) + return + } + + c.setType(e, types.ErrorType) + c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), identName) +} + +func (c *checker) checkSelect(e ast.Expr) { + sel := e.AsSelect() + // Before traversing down the tree, try to interpret as qualified name. + qname, found := containers.ToQualifiedName(e) + if found { + ident := c.env.LookupIdent(qname) + if ident != nil { + // We don't check for a TestOnly expression here since the `found` result is + // always going to be false for TestOnly expressions. + + // Rewrite the node to be a variable reference to the resolved fully-qualified + // variable name. + c.setType(e, ident.Type()) + c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value())) + e.SetKindCase(c.NewIdent(e.ID(), ident.Name())) + return + } + } + + resultType := c.checkSelectField(e, sel.Operand(), sel.FieldName(), false) + if sel.IsTestOnly() { + resultType = types.BoolType + } + c.setType(e, substitute(c.mappings, resultType, false)) +} + +func (c *checker) checkOptSelect(e ast.Expr) { + // Collect metadata related to the opt select call packaged by the parser. + call := e.AsCall() + operand := call.Args()[0] + field := call.Args()[1] + fieldName, isString := maybeUnwrapString(field) + if !isString { + c.errors.notAnOptionalFieldSelection(field.ID(), c.location(field), field) + return + } + + // Perform type-checking using the field selection logic. + resultType := c.checkSelectField(e, operand, fieldName, true) + c.setType(e, substitute(c.mappings, resultType, false)) + c.setReference(e, ast.NewFunctionReference("select_optional_field")) +} + +func (c *checker) checkSelectField(e, operand ast.Expr, field string, optional bool) *types.Type { + // Interpret as field selection, first traversing down the operand. + c.check(operand) + operandType := substitute(c.mappings, c.getType(operand), false) + + // If the target type is 'optional', unwrap it for the sake of this check. + targetType, isOpt := maybeUnwrapOptional(operandType) + + // Assume error type by default as most types do not support field selection. + resultType := types.ErrorType + switch targetType.Kind() { + case types.MapKind: + // Maps yield their value type as the selection result type. + resultType = targetType.Parameters()[1] + case types.StructKind: + // Objects yield their field type declaration as the selection result type, but only if + // the field is defined. + messageType := targetType + if fieldType, found := c.lookupFieldType(e.ID(), messageType.TypeName(), field); found { + resultType = fieldType + } + case types.TypeParamKind: + // Set the operand type to DYN to prevent assignment to a potentially incorrect type + // at a later point in type-checking. The isAssignable call will update the type + // substitutions for the type param under the covers. + c.isAssignable(types.DynType, targetType) + // Also, set the result type to DYN. + resultType = types.DynType + default: + // Dynamic / error values are treated as DYN type. Errors are handled this way as well + // in order to allow forward progress on the check. + if !isDynOrError(targetType) { + c.errors.typeDoesNotSupportFieldSelection(e.ID(), c.location(e), targetType) + } + resultType = types.DynType + } + + // If the target type was optional coming in, then the result must be optional going out. + if isOpt || optional { + return types.NewOptionalType(resultType) + } + return resultType +} + +func (c *checker) checkCall(e ast.Expr) { + // Note: similar logic exists within the `interpreter/planner.go`. If making changes here + // please consider the impact on planner.go and consolidate implementations or mirror code + // as appropriate. + call := e.AsCall() + fnName := call.FunctionName() + if fnName == operators.OptSelect { + c.checkOptSelect(e) + return + } + + args := call.Args() + // Traverse arguments. + for _, arg := range args { + c.check(arg) + } + + // Regular static call with simple name. + if !call.IsMemberFunction() { + // Check for the existence of the function. + fn := c.env.LookupFunction(fnName) + if fn == nil { + c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), fnName) + c.setType(e, types.ErrorType) + return + } + // Overwrite the function name with its fully qualified resolved name. + e.SetKindCase(c.NewCall(e.ID(), fn.Name(), args...)) + // Check to see whether the overload resolves. + c.resolveOverloadOrError(e, fn, nil, args) + return + } + + // If a receiver 'target' is present, it may either be a receiver function, or a namespaced + // function, but not both. Given a.b.c() either a.b.c is a function or c is a function with + // target a.b. + // + // Check whether the target is a namespaced function name. + target := call.Target() + qualifiedPrefix, maybeQualified := containers.ToQualifiedName(target) + if maybeQualified { + maybeQualifiedName := qualifiedPrefix + "." + fnName + fn := c.env.LookupFunction(maybeQualifiedName) + if fn != nil { + // The function name is namespaced and so preserving the target operand would + // be an inaccurate representation of the desired evaluation behavior. + // Overwrite with fully-qualified resolved function name sans receiver target. + e.SetKindCase(c.NewCall(e.ID(), fn.Name(), args...)) + c.resolveOverloadOrError(e, fn, nil, args) + return + } + } + + // Regular instance call. + c.check(target) + fn := c.env.LookupFunction(fnName) + // Function found, attempt overload resolution. + if fn != nil { + c.resolveOverloadOrError(e, fn, target, args) + return + } + // Function name not declared, record error. + c.setType(e, types.ErrorType) + c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), fnName) +} + +func (c *checker) resolveOverloadOrError( + e ast.Expr, fn *decls.FunctionDecl, target ast.Expr, args []ast.Expr) { + // Attempt to resolve the overload. + resolution := c.resolveOverload(e, fn, target, args) + // No such overload, error noted in the resolveOverload call, type recorded here. + if resolution == nil { + c.setType(e, types.ErrorType) + return + } + // Overload found. + c.setType(e, resolution.Type) + c.setReference(e, resolution.Reference) +} + +func (c *checker) resolveOverload( + call ast.Expr, fn *decls.FunctionDecl, target ast.Expr, args []ast.Expr) *overloadResolution { + + var argTypes []*types.Type + if target != nil { + argTypes = append(argTypes, c.getType(target)) + } + for _, arg := range args { + argTypes = append(argTypes, c.getType(arg)) + } + + var resultType *types.Type + var checkedRef *ast.ReferenceInfo + for _, overload := range fn.OverloadDecls() { + // Determine whether the overload is currently considered. + if c.env.isOverloadDisabled(overload.ID()) { + continue + } + + // Ensure the call style for the overload matches. + if (target == nil && overload.IsMemberFunction()) || + (target != nil && !overload.IsMemberFunction()) { + // not a compatible call style. + continue + } + + // Alternative type-checking behavior when the logical operators are compacted into + // variadic AST representations. + if fn.Name() == operators.LogicalAnd || fn.Name() == operators.LogicalOr { + checkedRef = ast.NewFunctionReference(overload.ID()) + for i, argType := range argTypes { + if !c.isAssignable(argType, types.BoolType) { + c.errors.typeMismatch( + args[i].ID(), + c.locationByID(args[i].ID()), + types.BoolType, + argType) + resultType = types.ErrorType + } + } + if isError(resultType) { + return nil + } + return newResolution(checkedRef, types.BoolType) + } + + overloadType := newFunctionType(overload.ResultType(), overload.ArgTypes()...) + typeParams := overload.TypeParams() + if len(typeParams) != 0 { + // Instantiate overload's type with fresh type variables. + substitutions := newMapping() + for _, typePar := range typeParams { + substitutions.add(types.NewTypeParamType(typePar), c.newTypeVar()) + } + overloadType = substitute(substitutions, overloadType, false) + } + + candidateArgTypes := overloadType.Parameters()[1:] + if c.isAssignableList(argTypes, candidateArgTypes) { + if checkedRef == nil { + checkedRef = ast.NewFunctionReference(overload.ID()) + } else { + checkedRef.AddOverload(overload.ID()) + } + + // First matching overload, determines result type. + fnResultType := substitute(c.mappings, overloadType.Parameters()[0], false) + if resultType == nil { + resultType = fnResultType + } else if !isDyn(resultType) && !fnResultType.IsExactType(resultType) { + resultType = types.DynType + } + } + } + + if resultType == nil { + for i, argType := range argTypes { + argTypes[i] = substitute(c.mappings, argType, true) + } + c.errors.noMatchingOverload(call.ID(), c.location(call), fn.Name(), argTypes, target != nil) + return nil + } + + return newResolution(checkedRef, resultType) +} + +func (c *checker) checkCreateList(e ast.Expr) { + create := e.AsList() + var elemsType *types.Type + optionalIndices := create.OptionalIndices() + optionals := make(map[int32]bool, len(optionalIndices)) + for _, optInd := range optionalIndices { + optionals[optInd] = true + } + for i, e := range create.Elements() { + c.check(e) + elemType := c.getType(e) + if optionals[int32(i)] { + var isOptional bool + elemType, isOptional = maybeUnwrapOptional(elemType) + if !isOptional && !isDyn(elemType) { + c.errors.typeMismatch(e.ID(), c.location(e), types.NewOptionalType(elemType), elemType) + } + } + elemsType = c.joinTypes(e, elemsType, elemType) + } + if elemsType == nil { + // If the list is empty, assign free type var to elem type. + elemsType = c.newTypeVar() + } + c.setType(e, types.NewListType(elemsType)) +} + +func (c *checker) checkCreateMap(e ast.Expr) { + mapVal := e.AsMap() + var mapKeyType *types.Type + var mapValueType *types.Type + for _, e := range mapVal.Entries() { + entry := e.AsMapEntry() + key := entry.Key() + c.check(key) + mapKeyType = c.joinTypes(key, mapKeyType, c.getType(key)) + + val := entry.Value() + c.check(val) + valType := c.getType(val) + if entry.IsOptional() { + var isOptional bool + valType, isOptional = maybeUnwrapOptional(valType) + if !isOptional && !isDyn(valType) { + c.errors.typeMismatch(val.ID(), c.location(val), types.NewOptionalType(valType), valType) + } + } + mapValueType = c.joinTypes(val, mapValueType, valType) + } + if mapKeyType == nil { + // If the map is empty, assign free type variables to typeKey and value type. + mapKeyType = c.newTypeVar() + mapValueType = c.newTypeVar() + } + c.setType(e, types.NewMapType(mapKeyType, mapValueType)) +} + +func (c *checker) checkCreateStruct(e ast.Expr) { + msgVal := e.AsStruct() + // Determine the type of the message. + resultType := types.ErrorType + ident := c.env.LookupIdent(msgVal.TypeName()) + if ident == nil { + c.errors.undeclaredReference( + e.ID(), c.location(e), c.env.container.Name(), msgVal.TypeName()) + c.setType(e, types.ErrorType) + return + } + // Ensure the type name is fully qualified in the AST. + typeName := ident.Name() + if msgVal.TypeName() != typeName { + e.SetKindCase(c.NewStruct(e.ID(), typeName, msgVal.Fields())) + msgVal = e.AsStruct() + } + c.setReference(e, ast.NewIdentReference(typeName, nil)) + identKind := ident.Type().Kind() + if identKind != types.ErrorKind { + if identKind != types.TypeKind { + c.errors.notAType(e.ID(), c.location(e), ident.Type().DeclaredTypeName()) + } else { + resultType = ident.Type().Parameters()[0] + // Backwards compatibility test between well-known types and message types + // In this context, the type is being instantiated by its protobuf name which + // is not ideal or recommended, but some users expect this to work. + if isWellKnownType(resultType) { + typeName = getWellKnownTypeName(resultType) + } else if resultType.Kind() == types.StructKind { + typeName = resultType.DeclaredTypeName() + } else { + c.errors.notAMessageType(e.ID(), c.location(e), resultType.DeclaredTypeName()) + resultType = types.ErrorType + } + } + } + c.setType(e, resultType) + + // Check the field initializers. + for _, f := range msgVal.Fields() { + field := f.AsStructField() + fieldName := field.Name() + value := field.Value() + c.check(value) + + fieldType := types.ErrorType + ft, found := c.lookupFieldType(f.ID(), typeName, fieldName) + if found { + fieldType = ft + } + + valType := c.getType(value) + if field.IsOptional() { + var isOptional bool + valType, isOptional = maybeUnwrapOptional(valType) + if !isOptional && !isDyn(valType) { + c.errors.typeMismatch(value.ID(), c.location(value), types.NewOptionalType(valType), valType) + } + } + if !c.isAssignable(fieldType, valType) { + c.errors.fieldTypeMismatch(f.ID(), c.locationByID(f.ID()), fieldName, fieldType, valType) + } + } +} + +func (c *checker) checkComprehension(e ast.Expr) { + comp := e.AsComprehension() + c.check(comp.IterRange()) + c.check(comp.AccuInit()) + rangeType := substitute(c.mappings, c.getType(comp.IterRange()), false) + + // Create a scope for the comprehension since it has a local accumulation variable. + // This scope will contain the accumulation variable used to compute the result. + accuType := c.getType(comp.AccuInit()) + c.env = c.env.enterScope() + c.env.AddIdents(decls.NewVariable(comp.AccuVar(), accuType)) + + var varType, var2Type *types.Type + switch rangeType.Kind() { + case types.ListKind: + // varType represents the list element type for one-variable comprehensions. + varType = rangeType.Parameters()[0] + if comp.HasIterVar2() { + // varType represents the list index (int) for two-variable comprehensions, + // and var2Type represents the list element type. + var2Type = varType + varType = types.IntType + } + case types.MapKind: + // varType represents the map entry key for all comprehension types. + varType = rangeType.Parameters()[0] + if comp.HasIterVar2() { + // var2Type represents the map entry value for two-variable comprehensions. + var2Type = rangeType.Parameters()[1] + } + case types.DynKind, types.ErrorKind, types.TypeParamKind: + // Set the range type to DYN to prevent assignment to a potentially incorrect type + // at a later point in type-checking. The isAssignable call will update the type + // substitutions for the type param under the covers. + c.isAssignable(types.DynType, rangeType) + // Set the range iteration variable to type DYN as well. + varType = types.DynType + default: + c.errors.notAComprehensionRange(comp.IterRange().ID(), c.location(comp.IterRange()), rangeType) + varType = types.ErrorType + } + + // Create a block scope for the loop. + c.env = c.env.enterScope() + c.env.AddIdents(decls.NewVariable(comp.IterVar(), varType)) + if comp.HasIterVar2() { + c.env.AddIdents(decls.NewVariable(comp.IterVar2(), var2Type)) + } + // Check the variable references in the condition and step. + c.check(comp.LoopCondition()) + c.assertType(comp.LoopCondition(), types.BoolType) + c.check(comp.LoopStep()) + c.assertType(comp.LoopStep(), accuType) + // Exit the loop's block scope before checking the result. + c.env = c.env.exitScope() + c.check(comp.Result()) + // Exit the comprehension scope. + c.env = c.env.exitScope() + c.setType(e, substitute(c.mappings, c.getType(comp.Result()), false)) +} + +// Checks compatibility of joined types, and returns the most general common type. +func (c *checker) joinTypes(e ast.Expr, previous, current *types.Type) *types.Type { + if previous == nil { + return current + } + if c.isAssignable(previous, current) { + return mostGeneral(previous, current) + } + if c.dynAggregateLiteralElementTypesEnabled() { + return types.DynType + } + c.errors.typeMismatch(e.ID(), c.location(e), previous, current) + return types.ErrorType +} + +func (c *checker) dynAggregateLiteralElementTypesEnabled() bool { + return c.env.aggLitElemType == dynElementType +} + +func (c *checker) newTypeVar() *types.Type { + id := c.freeTypeVarCounter + c.freeTypeVarCounter++ + return types.NewTypeParamType(fmt.Sprintf("_var%d", id)) +} + +func (c *checker) isAssignable(t1, t2 *types.Type) bool { + subs := isAssignable(c.mappings, t1, t2) + if subs != nil { + c.mappings = subs + return true + } + + return false +} + +func (c *checker) isAssignableList(l1, l2 []*types.Type) bool { + subs := isAssignableList(c.mappings, l1, l2) + if subs != nil { + c.mappings = subs + return true + } + + return false +} + +func maybeUnwrapString(e ast.Expr) (string, bool) { + switch e.Kind() { + case ast.LiteralKind: + literal := e.AsLiteral() + switch v := literal.(type) { + case types.String: + return string(v), true + } + } + return "", false +} + +func (c *checker) setType(e ast.Expr, t *types.Type) { + if old, found := c.TypeMap()[e.ID()]; found && !old.IsExactType(t) { + c.errors.incompatibleType(e.ID(), c.location(e), e, old, t) + return + } + c.SetType(e.ID(), t) +} + +func (c *checker) getType(e ast.Expr) *types.Type { + return c.TypeMap()[e.ID()] +} + +func (c *checker) setReference(e ast.Expr, r *ast.ReferenceInfo) { + if old, found := c.ReferenceMap()[e.ID()]; found && !old.Equals(r) { + c.errors.referenceRedefinition(e.ID(), c.location(e), e, old, r) + return + } + c.SetReference(e.ID(), r) +} + +func (c *checker) assertType(e ast.Expr, t *types.Type) { + if !c.isAssignable(t, c.getType(e)) { + c.errors.typeMismatch(e.ID(), c.location(e), t, c.getType(e)) + } +} + +type overloadResolution struct { + Type *types.Type + Reference *ast.ReferenceInfo +} + +func newResolution(r *ast.ReferenceInfo, t *types.Type) *overloadResolution { + return &overloadResolution{ + Reference: r, + Type: t, + } +} + +func (c *checker) location(e ast.Expr) common.Location { + return c.locationByID(e.ID()) +} + +func (c *checker) locationByID(id int64) common.Location { + return c.SourceInfo().GetStartLocation(id) +} + +func (c *checker) lookupFieldType(exprID int64, structType, fieldName string) (*types.Type, bool) { + if _, found := c.env.provider.FindStructType(structType); !found { + // This should not happen, anyway, report an error. + c.errors.unexpectedFailedResolution(exprID, c.locationByID(exprID), structType) + return nil, false + } + + if ft, found := c.env.provider.FindStructFieldType(structType, fieldName); found { + return ft.Type, found + } + + c.errors.undefinedField(exprID, c.locationByID(exprID), fieldName) + return nil, false +} + +func isWellKnownType(t *types.Type) bool { + switch t.Kind() { + case types.AnyKind, types.TimestampKind, types.DurationKind, types.DynKind, types.NullTypeKind: + return true + case types.BoolKind, types.BytesKind, types.DoubleKind, types.IntKind, types.StringKind, types.UintKind: + return t.IsAssignableType(types.NullType) + case types.ListKind: + return t.Parameters()[0] == types.DynType + case types.MapKind: + return t.Parameters()[0] == types.StringType && t.Parameters()[1] == types.DynType + } + return false +} + +func getWellKnownTypeName(t *types.Type) string { + if name, found := wellKnownTypes[t.Kind()]; found { + return name + } + return "" +} + +var ( + wellKnownTypes = map[types.Kind]string{ + types.AnyKind: "google.protobuf.Any", + types.BoolKind: "google.protobuf.BoolValue", + types.BytesKind: "google.protobuf.BytesValue", + types.DoubleKind: "google.protobuf.DoubleValue", + types.DurationKind: "google.protobuf.Duration", + types.DynKind: "google.protobuf.Value", + types.IntKind: "google.protobuf.Int64Value", + types.ListKind: "google.protobuf.ListValue", + types.NullTypeKind: "google.protobuf.NullValue", + types.MapKind: "google.protobuf.Struct", + types.StringKind: "google.protobuf.StringValue", + types.TimestampKind: "google.protobuf.Timestamp", + types.UintKind: "google.protobuf.UInt64Value", + } +) diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/cost.go b/hack/tools/vendor/github.com/google/cel-go/checker/cost.go new file mode 100644 index 0000000000..04244694d8 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/checker/cost.go @@ -0,0 +1,705 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +import ( + "math" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/parser" +) + +// WARNING: Any changes to cost calculations in this file require a corresponding change in interpreter/runtimecost.go + +// CostEstimator estimates the sizes of variable length input data and the costs of functions. +type CostEstimator interface { + // EstimateSize returns a SizeEstimate for the given AstNode, or nil if + // the estimator has no estimate to provide. The size is equivalent to the result of the CEL `size()` function: + // length of strings and bytes, number of map entries or number of list items. + // EstimateSize is only called for AstNodes where + // CEL does not know the size; EstimateSize is not called for values defined inline in CEL where the size + // is already obvious to CEL. + EstimateSize(element AstNode) *SizeEstimate + // EstimateCallCost returns the estimated cost of an invocation, or nil if + // the estimator has no estimate to provide. + EstimateCallCost(function, overloadID string, target *AstNode, args []AstNode) *CallEstimate +} + +// CallEstimate includes a CostEstimate for the call, and an optional estimate of the result object size. +// The ResultSize should only be provided if the call results in a map, list, string or bytes. +type CallEstimate struct { + CostEstimate + ResultSize *SizeEstimate +} + +// AstNode represents an AST node for the purpose of cost estimations. +type AstNode interface { + // Path returns a field path through the provided type declarations to the type of the AstNode, or nil if the AstNode does not + // represent type directly reachable from the provided type declarations. + // The first path element is a variable. All subsequent path elements are one of: field name, '@items', '@keys', '@values'. + Path() []string + // Type returns the deduced type of the AstNode. + Type() *types.Type + // Expr returns the expression of the AstNode. + Expr() ast.Expr + // ComputedSize returns a size estimate of the AstNode derived from information available in the CEL expression. + // For constants and inline list and map declarations, the exact size is returned. For concatenated list, strings + // and bytes, the size is derived from the size estimates of the operands. nil is returned if there is no + // computed size available. + ComputedSize() *SizeEstimate +} + +type astNode struct { + path []string + t *types.Type + expr ast.Expr + derivedSize *SizeEstimate +} + +func (e astNode) Path() []string { + return e.path +} + +func (e astNode) Type() *types.Type { + return e.t +} + +func (e astNode) Expr() ast.Expr { + return e.expr +} + +func (e astNode) ComputedSize() *SizeEstimate { + if e.derivedSize != nil { + return e.derivedSize + } + var v uint64 + switch e.expr.Kind() { + case ast.LiteralKind: + switch ck := e.expr.AsLiteral().(type) { + case types.String: + // converting to runes here is an O(n) operation, but + // this is consistent with how size is computed at runtime, + // and how the language definition defines string size + v = uint64(len([]rune(ck))) + case types.Bytes: + v = uint64(len(ck)) + case types.Bool, types.Double, types.Duration, + types.Int, types.Timestamp, types.Uint, + types.Null: + v = uint64(1) + default: + return nil + } + case ast.ListKind: + v = uint64(e.expr.AsList().Size()) + case ast.MapKind: + v = uint64(e.expr.AsMap().Size()) + default: + return nil + } + + return &SizeEstimate{Min: v, Max: v} +} + +// SizeEstimate represents an estimated size of a variable length string, bytes, map or list. +type SizeEstimate struct { + Min, Max uint64 +} + +// Add adds to another SizeEstimate and returns the sum. +// If add would result in an uint64 overflow, the result is math.MaxUint64. +func (se SizeEstimate) Add(sizeEstimate SizeEstimate) SizeEstimate { + return SizeEstimate{ + addUint64NoOverflow(se.Min, sizeEstimate.Min), + addUint64NoOverflow(se.Max, sizeEstimate.Max), + } +} + +// Multiply multiplies by another SizeEstimate and returns the product. +// If multiply would result in an uint64 overflow, the result is math.MaxUint64. +func (se SizeEstimate) Multiply(sizeEstimate SizeEstimate) SizeEstimate { + return SizeEstimate{ + multiplyUint64NoOverflow(se.Min, sizeEstimate.Min), + multiplyUint64NoOverflow(se.Max, sizeEstimate.Max), + } +} + +// MultiplyByCostFactor multiplies a SizeEstimate by a cost factor and returns the CostEstimate with the +// nearest integer of the result, rounded up. +func (se SizeEstimate) MultiplyByCostFactor(costPerUnit float64) CostEstimate { + return CostEstimate{ + multiplyByCostFactor(se.Min, costPerUnit), + multiplyByCostFactor(se.Max, costPerUnit), + } +} + +// MultiplyByCost multiplies by the cost and returns the product. +// If multiply would result in an uint64 overflow, the result is math.MaxUint64. +func (se SizeEstimate) MultiplyByCost(cost CostEstimate) CostEstimate { + return CostEstimate{ + multiplyUint64NoOverflow(se.Min, cost.Min), + multiplyUint64NoOverflow(se.Max, cost.Max), + } +} + +// Union returns a SizeEstimate that encompasses both input the SizeEstimate. +func (se SizeEstimate) Union(size SizeEstimate) SizeEstimate { + result := se + if size.Min < result.Min { + result.Min = size.Min + } + if size.Max > result.Max { + result.Max = size.Max + } + return result +} + +// CostEstimate represents an estimated cost range and provides add and multiply operations +// that do not overflow. +type CostEstimate struct { + Min, Max uint64 +} + +// Add adds the costs and returns the sum. +// If add would result in an uint64 overflow for the min or max, the value is set to math.MaxUint64. +func (ce CostEstimate) Add(cost CostEstimate) CostEstimate { + return CostEstimate{ + addUint64NoOverflow(ce.Min, cost.Min), + addUint64NoOverflow(ce.Max, cost.Max), + } +} + +// Multiply multiplies by the cost and returns the product. +// If multiply would result in an uint64 overflow, the result is math.MaxUint64. +func (ce CostEstimate) Multiply(cost CostEstimate) CostEstimate { + return CostEstimate{ + multiplyUint64NoOverflow(ce.Min, cost.Min), + multiplyUint64NoOverflow(ce.Max, cost.Max), + } +} + +// MultiplyByCostFactor multiplies a CostEstimate by a cost factor and returns the CostEstimate with the +// nearest integer of the result, rounded up. +func (ce CostEstimate) MultiplyByCostFactor(costPerUnit float64) CostEstimate { + return CostEstimate{ + multiplyByCostFactor(ce.Min, costPerUnit), + multiplyByCostFactor(ce.Max, costPerUnit), + } +} + +// Union returns a CostEstimate that encompasses both input the CostEstimates. +func (ce CostEstimate) Union(size CostEstimate) CostEstimate { + result := ce + if size.Min < result.Min { + result.Min = size.Min + } + if size.Max > result.Max { + result.Max = size.Max + } + return result +} + +// addUint64NoOverflow adds non-negative ints. If the result is exceeds math.MaxUint64, math.MaxUint64 +// is returned. +func addUint64NoOverflow(x, y uint64) uint64 { + if y > 0 && x > math.MaxUint64-y { + return math.MaxUint64 + } + return x + y +} + +// multiplyUint64NoOverflow multiplies non-negative ints. If the result is exceeds math.MaxUint64, math.MaxUint64 +// is returned. +func multiplyUint64NoOverflow(x, y uint64) uint64 { + if y != 0 && x > math.MaxUint64/y { + return math.MaxUint64 + } + return x * y +} + +// multiplyByFactor multiplies an integer by a cost factor float and returns the nearest integer value, rounded up. +func multiplyByCostFactor(x uint64, y float64) uint64 { + xFloat := float64(x) + if xFloat > 0 && y > 0 && xFloat > math.MaxUint64/y { + return math.MaxUint64 + } + ceil := math.Ceil(xFloat * y) + if ceil >= doubleTwoTo64 { + return math.MaxUint64 + } + return uint64(ceil) +} + +var ( + selectAndIdentCost = CostEstimate{Min: common.SelectAndIdentCost, Max: common.SelectAndIdentCost} + constCost = CostEstimate{Min: common.ConstCost, Max: common.ConstCost} + + createListBaseCost = CostEstimate{Min: common.ListCreateBaseCost, Max: common.ListCreateBaseCost} + createMapBaseCost = CostEstimate{Min: common.MapCreateBaseCost, Max: common.MapCreateBaseCost} + createMessageBaseCost = CostEstimate{Min: common.StructCreateBaseCost, Max: common.StructCreateBaseCost} +) + +type coster struct { + // exprPath maps from Expr Id to field path. + exprPath map[int64][]string + // iterRanges tracks the iterRange of each iterVar. + iterRanges iterRangeScopes + // computedSizes tracks the computed sizes of call results. + computedSizes map[int64]SizeEstimate + checkedAST *ast.AST + estimator CostEstimator + overloadEstimators map[string]FunctionEstimator + // presenceTestCost will either be a zero or one based on whether has() macros count against cost computations. + presenceTestCost CostEstimate +} + +// Use a stack of iterVar -> iterRange Expr Ids to handle shadowed variable names. +type iterRangeScopes map[string][]int64 + +func (vs iterRangeScopes) push(varName string, expr ast.Expr) { + vs[varName] = append(vs[varName], expr.ID()) +} + +func (vs iterRangeScopes) pop(varName string) { + varStack := vs[varName] + vs[varName] = varStack[:len(varStack)-1] +} + +func (vs iterRangeScopes) peek(varName string) (int64, bool) { + varStack := vs[varName] + if len(varStack) > 0 { + return varStack[len(varStack)-1], true + } + return 0, false +} + +// CostOption configures flags which affect cost computations. +type CostOption func(*coster) error + +// PresenceTestHasCost determines whether presence testing has a cost of one or zero. +// +// Defaults to presence test has a cost of one. +func PresenceTestHasCost(hasCost bool) CostOption { + return func(c *coster) error { + if hasCost { + c.presenceTestCost = selectAndIdentCost + return nil + } + c.presenceTestCost = CostEstimate{Min: 0, Max: 0} + return nil + } +} + +// FunctionEstimator provides a CallEstimate given the target and arguments for a specific function, overload pair. +type FunctionEstimator func(estimator CostEstimator, target *AstNode, args []AstNode) *CallEstimate + +// OverloadCostEstimate binds a FunctionCoster to a specific function overload ID. +// +// When a OverloadCostEstimate is provided, it will override the cost calculation of the CostEstimator provided to +// the Cost() call. +func OverloadCostEstimate(overloadID string, functionCoster FunctionEstimator) CostOption { + return func(c *coster) error { + c.overloadEstimators[overloadID] = functionCoster + return nil + } +} + +// Cost estimates the cost of the parsed and type checked CEL expression. +func Cost(checked *ast.AST, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) { + c := &coster{ + checkedAST: checked, + estimator: estimator, + overloadEstimators: map[string]FunctionEstimator{}, + exprPath: map[int64][]string{}, + iterRanges: map[string][]int64{}, + computedSizes: map[int64]SizeEstimate{}, + presenceTestCost: CostEstimate{Min: 1, Max: 1}, + } + for _, opt := range opts { + err := opt(c) + if err != nil { + return CostEstimate{}, err + } + } + return c.cost(checked.Expr()), nil +} + +func (c *coster) cost(e ast.Expr) CostEstimate { + if e == nil { + return CostEstimate{} + } + var cost CostEstimate + switch e.Kind() { + case ast.LiteralKind: + cost = constCost + case ast.IdentKind: + cost = c.costIdent(e) + case ast.SelectKind: + cost = c.costSelect(e) + case ast.CallKind: + cost = c.costCall(e) + case ast.ListKind: + cost = c.costCreateList(e) + case ast.MapKind: + cost = c.costCreateMap(e) + case ast.StructKind: + cost = c.costCreateStruct(e) + case ast.ComprehensionKind: + cost = c.costComprehension(e) + default: + return CostEstimate{} + } + return cost +} + +func (c *coster) costIdent(e ast.Expr) CostEstimate { + identName := e.AsIdent() + // build and track the field path + if iterRange, ok := c.iterRanges.peek(identName); ok { + switch c.checkedAST.GetType(iterRange).Kind() { + case types.ListKind: + c.addPath(e, append(c.exprPath[iterRange], "@items")) + case types.MapKind: + c.addPath(e, append(c.exprPath[iterRange], "@keys")) + } + } else { + c.addPath(e, []string{identName}) + } + + return selectAndIdentCost +} + +func (c *coster) costSelect(e ast.Expr) CostEstimate { + sel := e.AsSelect() + var sum CostEstimate + if sel.IsTestOnly() { + // recurse, but do not add any cost + // this is equivalent to how evalTestOnly increments the runtime cost counter + // but does not add any additional cost for the qualifier, except here we do + // the reverse (ident adds cost) + sum = sum.Add(c.presenceTestCost) + sum = sum.Add(c.cost(sel.Operand())) + return sum + } + sum = sum.Add(c.cost(sel.Operand())) + targetType := c.getType(sel.Operand()) + switch targetType.Kind() { + case types.MapKind, types.StructKind, types.TypeParamKind: + sum = sum.Add(selectAndIdentCost) + } + + // build and track the field path + c.addPath(e, append(c.getPath(sel.Operand()), sel.FieldName())) + + return sum +} + +func (c *coster) costCall(e ast.Expr) CostEstimate { + call := e.AsCall() + args := call.Args() + + var sum CostEstimate + + argTypes := make([]AstNode, len(args)) + argCosts := make([]CostEstimate, len(args)) + for i, arg := range args { + argCosts[i] = c.cost(arg) + argTypes[i] = c.newAstNode(arg) + } + + overloadIDs := c.checkedAST.GetOverloadIDs(e.ID()) + if len(overloadIDs) == 0 { + return CostEstimate{} + } + var targetType AstNode + if call.IsMemberFunction() { + sum = sum.Add(c.cost(call.Target())) + targetType = c.newAstNode(call.Target()) + } + // Pick a cost estimate range that covers all the overload cost estimation ranges + fnCost := CostEstimate{Min: uint64(math.MaxUint64), Max: 0} + var resultSize *SizeEstimate + for _, overload := range overloadIDs { + overloadCost := c.functionCost(call.FunctionName(), overload, &targetType, argTypes, argCosts) + fnCost = fnCost.Union(overloadCost.CostEstimate) + if overloadCost.ResultSize != nil { + if resultSize == nil { + resultSize = overloadCost.ResultSize + } else { + size := resultSize.Union(*overloadCost.ResultSize) + resultSize = &size + } + } + // build and track the field path for index operations + switch overload { + case overloads.IndexList: + if len(args) > 0 { + c.addPath(e, append(c.getPath(args[0]), "@items")) + } + case overloads.IndexMap: + if len(args) > 0 { + c.addPath(e, append(c.getPath(args[0]), "@values")) + } + } + } + if resultSize != nil { + c.computedSizes[e.ID()] = *resultSize + } + return sum.Add(fnCost) +} + +func (c *coster) costCreateList(e ast.Expr) CostEstimate { + create := e.AsList() + var sum CostEstimate + for _, e := range create.Elements() { + sum = sum.Add(c.cost(e)) + } + return sum.Add(createListBaseCost) +} + +func (c *coster) costCreateMap(e ast.Expr) CostEstimate { + mapVal := e.AsMap() + var sum CostEstimate + for _, ent := range mapVal.Entries() { + entry := ent.AsMapEntry() + sum = sum.Add(c.cost(entry.Key())) + sum = sum.Add(c.cost(entry.Value())) + } + return sum.Add(createMapBaseCost) +} + +func (c *coster) costCreateStruct(e ast.Expr) CostEstimate { + msgVal := e.AsStruct() + var sum CostEstimate + for _, ent := range msgVal.Fields() { + field := ent.AsStructField() + sum = sum.Add(c.cost(field.Value())) + } + return sum.Add(createMessageBaseCost) +} + +func (c *coster) costComprehension(e ast.Expr) CostEstimate { + comp := e.AsComprehension() + var sum CostEstimate + sum = sum.Add(c.cost(comp.IterRange())) + sum = sum.Add(c.cost(comp.AccuInit())) + + // Track the iterRange of each IterVar for field path construction + c.iterRanges.push(comp.IterVar(), comp.IterRange()) + loopCost := c.cost(comp.LoopCondition()) + stepCost := c.cost(comp.LoopStep()) + c.iterRanges.pop(comp.IterVar()) + sum = sum.Add(c.cost(comp.Result())) + rangeCnt := c.sizeEstimate(c.newAstNode(comp.IterRange())) + + c.computedSizes[e.ID()] = rangeCnt + + rangeCost := rangeCnt.MultiplyByCost(stepCost.Add(loopCost)) + sum = sum.Add(rangeCost) + + return sum +} + +func (c *coster) sizeEstimate(t AstNode) SizeEstimate { + if l := t.ComputedSize(); l != nil { + return *l + } + if l := c.estimator.EstimateSize(t); l != nil { + return *l + } + // return an estimate of 1 for return types of set + // lengths, since strings/bytes/more complex objects could be of + // variable length + if isScalar(t.Type()) { + // TODO: since the logic for size estimation is split between + // ComputedSize and isScalar, changing one will likely require changing + // the other, so they should be merged in the future if possible + return SizeEstimate{Min: 1, Max: 1} + } + return SizeEstimate{Min: 0, Max: math.MaxUint64} +} + +func (c *coster) functionCost(function, overloadID string, target *AstNode, args []AstNode, argCosts []CostEstimate) CallEstimate { + argCostSum := func() CostEstimate { + var sum CostEstimate + for _, a := range argCosts { + sum = sum.Add(a) + } + return sum + } + if len(c.overloadEstimators) != 0 { + if estimator, found := c.overloadEstimators[overloadID]; found { + if est := estimator(c.estimator, target, args); est != nil { + callEst := *est + return CallEstimate{CostEstimate: callEst.Add(argCostSum()), ResultSize: est.ResultSize} + } + } + } + if est := c.estimator.EstimateCallCost(function, overloadID, target, args); est != nil { + callEst := *est + return CallEstimate{CostEstimate: callEst.Add(argCostSum()), ResultSize: est.ResultSize} + } + switch overloadID { + // O(n) functions + case overloads.ExtFormatString: + if target != nil { + // ResultSize not calculated because we can't bound the max size. + return CallEstimate{CostEstimate: c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())} + } + case overloads.StringToBytes: + if len(args) == 1 { + sz := c.sizeEstimate(args[0]) + // ResultSize max is when each char converts to 4 bytes. + return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min, Max: sz.Max * 4}} + } + case overloads.BytesToString: + if len(args) == 1 { + sz := c.sizeEstimate(args[0]) + // ResultSize min is when 4 bytes convert to 1 char. + return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min / 4, Max: sz.Max}} + } + case overloads.ExtQuoteString: + if len(args) == 1 { + sz := c.sizeEstimate(args[0]) + // ResultSize max is when each char is escaped. 2 quote chars always added. + return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min + 2, Max: sz.Max*2 + 2}} + } + case overloads.StartsWithString, overloads.EndsWithString: + if len(args) == 1 { + return CallEstimate{CostEstimate: c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())} + } + case overloads.InList: + // If a list is composed entirely of constant values this is O(1), but we don't account for that here. + // We just assume all list containment checks are O(n). + if len(args) == 2 { + return CallEstimate{CostEstimate: c.sizeEstimate(args[1]).MultiplyByCostFactor(1).Add(argCostSum())} + } + // O(nm) functions + case overloads.MatchesString: + // https://swtch.com/~rsc/regexp/regexp1.html applies to RE2 implementation supported by CEL + if target != nil && len(args) == 1 { + // Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0 + // in case where string is empty but regex is still expensive. + strCost := c.sizeEstimate(*target).Add(SizeEstimate{Min: 1, Max: 1}).MultiplyByCostFactor(common.StringTraversalCostFactor) + // We don't know how many expressions are in the regex, just the string length (a huge + // improvement here would be to somehow get a count the number of expressions in the regex or + // how many states are in the regex state machine and use that to measure regex cost). + // For now, we're making a guess that each expression in a regex is typically at least 4 chars + // in length. + regexCost := c.sizeEstimate(args[0]).MultiplyByCostFactor(common.RegexStringLengthCostFactor) + return CallEstimate{CostEstimate: strCost.Multiply(regexCost).Add(argCostSum())} + } + case overloads.ContainsString: + if target != nil && len(args) == 1 { + strCost := c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor) + substrCost := c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor) + return CallEstimate{CostEstimate: strCost.Multiply(substrCost).Add(argCostSum())} + } + case overloads.LogicalOr, overloads.LogicalAnd: + lhs := argCosts[0] + rhs := argCosts[1] + // min cost is min of LHS for short circuited && or || + argCost := CostEstimate{Min: lhs.Min, Max: lhs.Add(rhs).Max} + return CallEstimate{CostEstimate: argCost} + case overloads.Conditional: + size := c.sizeEstimate(args[1]).Union(c.sizeEstimate(args[2])) + conditionalCost := argCosts[0] + ifTrueCost := argCosts[1] + ifFalseCost := argCosts[2] + argCost := conditionalCost.Add(ifTrueCost.Union(ifFalseCost)) + return CallEstimate{CostEstimate: argCost, ResultSize: &size} + case overloads.AddString, overloads.AddBytes, overloads.AddList: + if len(args) == 2 { + lhsSize := c.sizeEstimate(args[0]) + rhsSize := c.sizeEstimate(args[1]) + resultSize := lhsSize.Add(rhsSize) + switch overloadID { + case overloads.AddList: + // list concatenation is O(1), but we handle it here to track size + return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum()), ResultSize: &resultSize} + default: + return CallEstimate{CostEstimate: resultSize.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &resultSize} + } + } + case overloads.LessString, overloads.GreaterString, overloads.LessEqualsString, overloads.GreaterEqualsString, + overloads.LessBytes, overloads.GreaterBytes, overloads.LessEqualsBytes, overloads.GreaterEqualsBytes, + overloads.Equals, overloads.NotEquals: + lhsCost := c.sizeEstimate(args[0]) + rhsCost := c.sizeEstimate(args[1]) + min := uint64(0) + smallestMax := lhsCost.Max + if rhsCost.Max < smallestMax { + smallestMax = rhsCost.Max + } + if smallestMax > 0 { + min = 1 + } + // equality of 2 scalar values results in a cost of 1 + return CallEstimate{CostEstimate: CostEstimate{Min: min, Max: smallestMax}.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())} + } + // O(1) functions + // See CostTracker.costCall for more details about O(1) cost calculations + + // Benchmarks suggest that most of the other operations take +/- 50% of a base cost unit + // which on an Intel xeon 2.20GHz CPU is 50ns. + return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum())} +} + +func (c *coster) getType(e ast.Expr) *types.Type { + return c.checkedAST.GetType(e.ID()) +} + +func (c *coster) getPath(e ast.Expr) []string { + return c.exprPath[e.ID()] +} + +func (c *coster) addPath(e ast.Expr, path []string) { + c.exprPath[e.ID()] = path +} + +func (c *coster) newAstNode(e ast.Expr) *astNode { + path := c.getPath(e) + if len(path) > 0 && path[0] == parser.AccumulatorName { + // only provide paths to root vars; omit accumulator vars + path = nil + } + var derivedSize *SizeEstimate + if size, ok := c.computedSizes[e.ID()]; ok { + derivedSize = &size + } + return &astNode{ + path: path, + t: c.getType(e), + expr: e, + derivedSize: derivedSize} +} + +// isScalar returns true if the given type is known to be of a constant size at +// compile time. isScalar will return false for strings (they are variable-width) +// in addition to protobuf.Any and protobuf.Value (their size is not knowable at compile time). +func isScalar(t *types.Type) bool { + switch t.Kind() { + case types.BoolKind, types.DoubleKind, types.DurationKind, types.IntKind, types.TimestampKind, types.UintKind: + return true + } + return false +} + +var ( + doubleTwoTo64 = math.Ldexp(1.0, 64) +) diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel new file mode 100644 index 0000000000..a6b0be292c --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "decls.go", + ], + importpath = "github.com/google/cel-go/checker/decls", + deps = [ + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//types/known/emptypb:go_default_library", + "@org_golang_google_protobuf//types/known/structpb:go_default_library", + ], +) diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/decls/decls.go b/hack/tools/vendor/github.com/google/cel-go/checker/decls/decls.go new file mode 100644 index 0000000000..c0e5de469f --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/checker/decls/decls.go @@ -0,0 +1,237 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package decls provides helpers for creating variable and function declarations. +package decls + +import ( + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + emptypb "google.golang.org/protobuf/types/known/emptypb" + structpb "google.golang.org/protobuf/types/known/structpb" +) + +var ( + // Error type used to communicate issues during type-checking. + Error = &exprpb.Type{ + TypeKind: &exprpb.Type_Error{ + Error: &emptypb.Empty{}}} + + // Dyn is a top-type used to represent any value. + Dyn = &exprpb.Type{ + TypeKind: &exprpb.Type_Dyn{ + Dyn: &emptypb.Empty{}}} +) + +// Commonly used types. +var ( + Bool = NewPrimitiveType(exprpb.Type_BOOL) + Bytes = NewPrimitiveType(exprpb.Type_BYTES) + Double = NewPrimitiveType(exprpb.Type_DOUBLE) + Int = NewPrimitiveType(exprpb.Type_INT64) + Null = &exprpb.Type{ + TypeKind: &exprpb.Type_Null{ + Null: structpb.NullValue_NULL_VALUE}} + String = NewPrimitiveType(exprpb.Type_STRING) + Uint = NewPrimitiveType(exprpb.Type_UINT64) +) + +// Well-known types. +// TODO: Replace with an abstract type registry. +var ( + Any = NewWellKnownType(exprpb.Type_ANY) + Duration = NewWellKnownType(exprpb.Type_DURATION) + Timestamp = NewWellKnownType(exprpb.Type_TIMESTAMP) +) + +// NewAbstractType creates an abstract type declaration which references a proto +// message name and may also include type parameters. +func NewAbstractType(name string, paramTypes ...*exprpb.Type) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_AbstractType_{ + AbstractType: &exprpb.Type_AbstractType{ + Name: name, + ParameterTypes: paramTypes}}} +} + +// NewOptionalType constructs an abstract type indicating that the parameterized type +// may be contained within the object. +func NewOptionalType(paramType *exprpb.Type) *exprpb.Type { + return NewAbstractType("optional_type", paramType) +} + +// NewFunctionType creates a function invocation contract, typically only used +// by type-checking steps after overload resolution. +func NewFunctionType(resultType *exprpb.Type, + argTypes ...*exprpb.Type) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_Function{ + Function: &exprpb.Type_FunctionType{ + ResultType: resultType, + ArgTypes: argTypes}}} +} + +// NewFunction creates a named function declaration with one or more overloads. +func NewFunction(name string, + overloads ...*exprpb.Decl_FunctionDecl_Overload) *exprpb.Decl { + return &exprpb.Decl{ + Name: name, + DeclKind: &exprpb.Decl_Function{ + Function: &exprpb.Decl_FunctionDecl{ + Overloads: overloads}}} +} + +// NewIdent creates a named identifier declaration with an optional literal +// value. +// +// Literal values are typically only associated with enum identifiers. +// +// Deprecated: Use NewVar or NewConst instead. +func NewIdent(name string, t *exprpb.Type, v *exprpb.Constant) *exprpb.Decl { + return &exprpb.Decl{ + Name: name, + DeclKind: &exprpb.Decl_Ident{ + Ident: &exprpb.Decl_IdentDecl{ + Type: t, + Value: v}}} +} + +// NewConst creates a constant identifier with a CEL constant literal value. +func NewConst(name string, t *exprpb.Type, v *exprpb.Constant) *exprpb.Decl { + return NewIdent(name, t, v) +} + +// NewVar creates a variable identifier. +func NewVar(name string, t *exprpb.Type) *exprpb.Decl { + return NewIdent(name, t, nil) +} + +// NewInstanceOverload creates a instance function overload contract. +// First element of argTypes is instance. +func NewInstanceOverload(id string, argTypes []*exprpb.Type, + resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload { + return &exprpb.Decl_FunctionDecl_Overload{ + OverloadId: id, + ResultType: resultType, + Params: argTypes, + IsInstanceFunction: true} +} + +// NewListType generates a new list with elements of a certain type. +func NewListType(elem *exprpb.Type) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_ListType_{ + ListType: &exprpb.Type_ListType{ + ElemType: elem}}} +} + +// NewMapType generates a new map with typed keys and values. +func NewMapType(key *exprpb.Type, value *exprpb.Type) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_MapType_{ + MapType: &exprpb.Type_MapType{ + KeyType: key, + ValueType: value}}} +} + +// NewObjectType creates an object type for a qualified type name. +func NewObjectType(typeName string) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_MessageType{ + MessageType: typeName}} +} + +// NewOverload creates a function overload declaration which contains a unique +// overload id as well as the expected argument and result types. Overloads +// must be aggregated within a Function declaration. +func NewOverload(id string, argTypes []*exprpb.Type, + resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload { + return &exprpb.Decl_FunctionDecl_Overload{ + OverloadId: id, + ResultType: resultType, + Params: argTypes, + IsInstanceFunction: false} +} + +// NewParameterizedInstanceOverload creates a parametric function instance overload type. +func NewParameterizedInstanceOverload(id string, + argTypes []*exprpb.Type, + resultType *exprpb.Type, + typeParams []string) *exprpb.Decl_FunctionDecl_Overload { + return &exprpb.Decl_FunctionDecl_Overload{ + OverloadId: id, + ResultType: resultType, + Params: argTypes, + TypeParams: typeParams, + IsInstanceFunction: true} +} + +// NewParameterizedOverload creates a parametric function overload type. +func NewParameterizedOverload(id string, + argTypes []*exprpb.Type, + resultType *exprpb.Type, + typeParams []string) *exprpb.Decl_FunctionDecl_Overload { + return &exprpb.Decl_FunctionDecl_Overload{ + OverloadId: id, + ResultType: resultType, + Params: argTypes, + TypeParams: typeParams, + IsInstanceFunction: false} +} + +// NewPrimitiveType creates a type for a primitive value. See the var declarations +// for Int, Uint, etc. +func NewPrimitiveType(primitive exprpb.Type_PrimitiveType) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_Primitive{ + Primitive: primitive}} +} + +// NewTypeType creates a new type designating a type. +func NewTypeType(nested *exprpb.Type) *exprpb.Type { + if nested == nil { + // must set the nested field for a valid oneof option + nested = &exprpb.Type{} + } + return &exprpb.Type{ + TypeKind: &exprpb.Type_Type{ + Type: nested}} +} + +// NewTypeParamType creates a type corresponding to a named, contextual parameter. +func NewTypeParamType(name string) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_TypeParam{ + TypeParam: name}} +} + +// NewWellKnownType creates a type corresponding to a protobuf well-known type +// value. +func NewWellKnownType(wellKnown exprpb.Type_WellKnownType) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_WellKnown{ + WellKnown: wellKnown}} +} + +// NewWrapperType creates a wrapped primitive type instance. Wrapped types +// are roughly equivalent to a nullable, or optionally valued type. +func NewWrapperType(wrapped *exprpb.Type) *exprpb.Type { + primitive := wrapped.GetPrimitive() + if primitive == exprpb.Type_PRIMITIVE_TYPE_UNSPECIFIED { + // TODO: return an error + panic("Wrapped type must be a primitive") + } + return &exprpb.Type{ + TypeKind: &exprpb.Type_Wrapper{ + Wrapper: primitive}} +} diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/env.go b/hack/tools/vendor/github.com/google/cel-go/checker/env.go new file mode 100644 index 0000000000..d5ac05014e --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/checker/env.go @@ -0,0 +1,284 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +import ( + "fmt" + "strings" + + "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/decls" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/parser" +) + +type aggregateLiteralElementType int + +const ( + dynElementType aggregateLiteralElementType = iota + homogenousElementType aggregateLiteralElementType = 1 << iota +) + +var ( + crossTypeNumericComparisonOverloads = map[string]struct{}{ + // double <-> int | uint + overloads.LessDoubleInt64: {}, + overloads.LessDoubleUint64: {}, + overloads.LessEqualsDoubleInt64: {}, + overloads.LessEqualsDoubleUint64: {}, + overloads.GreaterDoubleInt64: {}, + overloads.GreaterDoubleUint64: {}, + overloads.GreaterEqualsDoubleInt64: {}, + overloads.GreaterEqualsDoubleUint64: {}, + // int <-> double | uint + overloads.LessInt64Double: {}, + overloads.LessInt64Uint64: {}, + overloads.LessEqualsInt64Double: {}, + overloads.LessEqualsInt64Uint64: {}, + overloads.GreaterInt64Double: {}, + overloads.GreaterInt64Uint64: {}, + overloads.GreaterEqualsInt64Double: {}, + overloads.GreaterEqualsInt64Uint64: {}, + // uint <-> double | int + overloads.LessUint64Double: {}, + overloads.LessUint64Int64: {}, + overloads.LessEqualsUint64Double: {}, + overloads.LessEqualsUint64Int64: {}, + overloads.GreaterUint64Double: {}, + overloads.GreaterUint64Int64: {}, + overloads.GreaterEqualsUint64Double: {}, + overloads.GreaterEqualsUint64Int64: {}, + } +) + +// Env is the environment for type checking. +// +// The Env is comprised of a container, type provider, declarations, and other related objects +// which can be used to assist with type-checking. +type Env struct { + container *containers.Container + provider types.Provider + declarations *Scopes + aggLitElemType aggregateLiteralElementType + filteredOverloadIDs map[string]struct{} +} + +// NewEnv returns a new *Env with the given parameters. +func NewEnv(container *containers.Container, provider types.Provider, opts ...Option) (*Env, error) { + declarations := newScopes() + declarations.Push() + + envOptions := &options{} + for _, opt := range opts { + if err := opt(envOptions); err != nil { + return nil, err + } + } + aggLitElemType := dynElementType + if envOptions.homogeneousAggregateLiterals { + aggLitElemType = homogenousElementType + } + filteredOverloadIDs := crossTypeNumericComparisonOverloads + if envOptions.crossTypeNumericComparisons { + filteredOverloadIDs = make(map[string]struct{}) + } + if envOptions.validatedDeclarations != nil { + declarations = envOptions.validatedDeclarations.Copy() + } + return &Env{ + container: container, + provider: provider, + declarations: declarations, + aggLitElemType: aggLitElemType, + filteredOverloadIDs: filteredOverloadIDs, + }, nil +} + +// AddIdents configures the checker with a list of variable declarations. +// +// If there are overlapping declarations, the method will error. +func (e *Env) AddIdents(declarations ...*decls.VariableDecl) error { + errMsgs := make([]errorMsg, 0) + for _, d := range declarations { + errMsgs = append(errMsgs, e.addIdent(d)) + } + return formatError(errMsgs) +} + +// AddFunctions configures the checker with a list of function declarations. +// +// If there are overlapping declarations, the method will error. +func (e *Env) AddFunctions(declarations ...*decls.FunctionDecl) error { + errMsgs := make([]errorMsg, 0) + for _, d := range declarations { + errMsgs = append(errMsgs, e.setFunction(d)...) + } + return formatError(errMsgs) +} + +// LookupIdent returns a Decl proto for typeName as an identifier in the Env. +// Returns nil if no such identifier is found in the Env. +func (e *Env) LookupIdent(name string) *decls.VariableDecl { + for _, candidate := range e.container.ResolveCandidateNames(name) { + if ident := e.declarations.FindIdent(candidate); ident != nil { + return ident + } + + // Next try to import the name as a reference to a message type. If found, + // the declaration is added to the outest (global) scope of the + // environment, so next time we can access it faster. + if t, found := e.provider.FindStructType(candidate); found { + decl := decls.NewVariable(candidate, t) + e.declarations.AddIdent(decl) + return decl + } + + if i, found := e.provider.FindIdent(candidate); found { + if t, ok := i.(*types.Type); ok { + decl := decls.NewVariable(candidate, types.NewTypeTypeWithParam(t)) + e.declarations.AddIdent(decl) + return decl + } + } + + // Next try to import this as an enum value by splitting the name in a type prefix and + // the enum inside. + if enumValue := e.provider.EnumValue(candidate); enumValue.Type() != types.ErrType { + decl := decls.NewConstant(candidate, types.IntType, enumValue) + e.declarations.AddIdent(decl) + return decl + } + } + return nil +} + +// LookupFunction returns a Decl proto for typeName as a function in env. +// Returns nil if no such function is found in env. +func (e *Env) LookupFunction(name string) *decls.FunctionDecl { + for _, candidate := range e.container.ResolveCandidateNames(name) { + if fn := e.declarations.FindFunction(candidate); fn != nil { + return fn + } + } + return nil +} + +// setFunction adds the function Decl to the Env. +// Adds a function decl if one doesn't already exist, then adds all overloads from the Decl. +// If overload overlaps with an existing overload, adds to the errors in the Env instead. +func (e *Env) setFunction(fn *decls.FunctionDecl) []errorMsg { + errMsgs := make([]errorMsg, 0) + current := e.declarations.FindFunction(fn.Name()) + if current != nil { + var err error + current, err = current.Merge(fn) + if err != nil { + return append(errMsgs, errorMsg(err.Error())) + } + } else { + current = fn + } + for _, overload := range current.OverloadDecls() { + for _, macro := range parser.AllMacros { + if macro.Function() == current.Name() && + macro.IsReceiverStyle() == overload.IsMemberFunction() && + macro.ArgCount() == len(overload.ArgTypes()) { + errMsgs = append(errMsgs, overlappingMacroError(current.Name(), macro.ArgCount())) + } + } + if len(errMsgs) > 0 { + return errMsgs + } + } + e.declarations.SetFunction(current) + return errMsgs +} + +// addIdent adds the Decl to the declarations in the Env. +// Returns a non-empty errorMsg if the identifier is already declared in the scope. +func (e *Env) addIdent(decl *decls.VariableDecl) errorMsg { + current := e.declarations.FindIdentInScope(decl.Name()) + if current != nil { + if current.DeclarationIsEquivalent(decl) { + return "" + } + return overlappingIdentifierError(decl.Name()) + } + e.declarations.AddIdent(decl) + return "" +} + +// isOverloadDisabled returns whether the overloadID is disabled in the current environment. +func (e *Env) isOverloadDisabled(overloadID string) bool { + _, found := e.filteredOverloadIDs[overloadID] + return found +} + +// validatedDeclarations returns a reference to the validated variable and function declaration scope stack. +// must be copied before use. +func (e *Env) validatedDeclarations() *Scopes { + return e.declarations +} + +// enterScope creates a new Env instance with a new innermost declaration scope. +func (e *Env) enterScope() *Env { + childDecls := e.declarations.Push() + return &Env{ + declarations: childDecls, + container: e.container, + provider: e.provider, + aggLitElemType: e.aggLitElemType, + } +} + +// exitScope creates a new Env instance with the nearest outer declaration scope. +func (e *Env) exitScope() *Env { + parentDecls := e.declarations.Pop() + return &Env{ + declarations: parentDecls, + container: e.container, + provider: e.provider, + aggLitElemType: e.aggLitElemType, + } +} + +// errorMsg is a type alias meant to represent error-based return values which +// may be accumulated into an error at a later point in execution. +type errorMsg string + +func overlappingIdentifierError(name string) errorMsg { + return errorMsg(fmt.Sprintf("overlapping identifier for name '%s'", name)) +} + +func overlappingMacroError(name string, argCount int) errorMsg { + return errorMsg(fmt.Sprintf( + "overlapping macro for name '%s' with %d args", name, argCount)) +} + +func formatError(errMsgs []errorMsg) error { + errStrs := make([]string, 0) + if len(errMsgs) > 0 { + for i := 0; i < len(errMsgs); i++ { + if errMsgs[i] != "" { + errStrs = append(errStrs, string(errMsgs[i])) + } + } + } + if len(errStrs) > 0 { + return fmt.Errorf("%s", strings.Join(errStrs, "\n")) + } + return nil +} diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/errors.go b/hack/tools/vendor/github.com/google/cel-go/checker/errors.go new file mode 100644 index 0000000000..8b3bf0b8b6 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/checker/errors.go @@ -0,0 +1,88 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +import ( + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" +) + +// typeErrors is a specialization of Errors. +type typeErrors struct { + errs *common.Errors +} + +func (e *typeErrors) fieldTypeMismatch(id int64, l common.Location, name string, field, value *types.Type) { + e.errs.ReportErrorAtID(id, l, "expected type of field '%s' is '%s' but provided type is '%s'", + name, FormatCELType(field), FormatCELType(value)) +} + +func (e *typeErrors) incompatibleType(id int64, l common.Location, ex ast.Expr, prev, next *types.Type) { + e.errs.ReportErrorAtID(id, l, + "incompatible type already exists for expression: %v(%d) old:%v, new:%v", ex, ex.ID(), prev, next) +} + +func (e *typeErrors) noMatchingOverload(id int64, l common.Location, name string, args []*types.Type, isInstance bool) { + signature := formatFunctionDeclType(nil, args, isInstance) + e.errs.ReportErrorAtID(id, l, "found no matching overload for '%s' applied to '%s'", name, signature) +} + +func (e *typeErrors) notAComprehensionRange(id int64, l common.Location, t *types.Type) { + e.errs.ReportErrorAtID(id, l, "expression of type '%s' cannot be range of a comprehension (must be list, map, or dynamic)", + FormatCELType(t)) +} + +func (e *typeErrors) notAnOptionalFieldSelection(id int64, l common.Location, field ast.Expr) { + e.errs.ReportErrorAtID(id, l, "unsupported optional field selection: %v", field) +} + +func (e *typeErrors) notAType(id int64, l common.Location, typeName string) { + e.errs.ReportErrorAtID(id, l, "'%s' is not a type", typeName) +} + +func (e *typeErrors) notAMessageType(id int64, l common.Location, typeName string) { + e.errs.ReportErrorAtID(id, l, "'%s' is not a message type", typeName) +} + +func (e *typeErrors) referenceRedefinition(id int64, l common.Location, ex ast.Expr, prev, next *ast.ReferenceInfo) { + e.errs.ReportErrorAtID(id, l, + "reference already exists for expression: %v(%d) old:%v, new:%v", ex, ex.ID(), prev, next) +} + +func (e *typeErrors) typeDoesNotSupportFieldSelection(id int64, l common.Location, t *types.Type) { + e.errs.ReportErrorAtID(id, l, "type '%s' does not support field selection", FormatCELType(t)) +} + +func (e *typeErrors) typeMismatch(id int64, l common.Location, expected, actual *types.Type) { + e.errs.ReportErrorAtID(id, l, "expected type '%s' but found '%s'", + FormatCELType(expected), FormatCELType(actual)) +} + +func (e *typeErrors) undefinedField(id int64, l common.Location, field string) { + e.errs.ReportErrorAtID(id, l, "undefined field '%s'", field) +} + +func (e *typeErrors) undeclaredReference(id int64, l common.Location, container string, name string) { + e.errs.ReportErrorAtID(id, l, "undeclared reference to '%s' (in container '%s')", name, container) +} + +func (e *typeErrors) unexpectedFailedResolution(id int64, l common.Location, typeName string) { + e.errs.ReportErrorAtID(id, l, "unexpected failed resolution of '%s'", typeName) +} + +func (e *typeErrors) unexpectedASTType(id int64, l common.Location, kind, typeName string) { + e.errs.ReportErrorAtID(id, l, "unexpected %s type: %v", kind, typeName) +} diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/format.go b/hack/tools/vendor/github.com/google/cel-go/checker/format.go new file mode 100644 index 0000000000..95842905e6 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/checker/format.go @@ -0,0 +1,216 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +import ( + "fmt" + "strings" + + chkdecls "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/common/types" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +const ( + kindUnknown = iota + 1 + kindError + kindFunction + kindDyn + kindPrimitive + kindWellKnown + kindWrapper + kindNull + kindAbstract + kindType + kindList + kindMap + kindObject + kindTypeParam +) + +// FormatCheckedType converts a type message into a string representation. +func FormatCheckedType(t *exprpb.Type) string { + switch kindOf(t) { + case kindDyn: + return "dyn" + case kindFunction: + return formatFunctionExprType(t.GetFunction().GetResultType(), + t.GetFunction().GetArgTypes(), + false) + case kindList: + return fmt.Sprintf("list(%s)", FormatCheckedType(t.GetListType().GetElemType())) + case kindObject: + return t.GetMessageType() + case kindMap: + return fmt.Sprintf("map(%s, %s)", + FormatCheckedType(t.GetMapType().GetKeyType()), + FormatCheckedType(t.GetMapType().GetValueType())) + case kindNull: + return "null" + case kindPrimitive: + switch t.GetPrimitive() { + case exprpb.Type_UINT64: + return "uint" + case exprpb.Type_INT64: + return "int" + } + return strings.Trim(strings.ToLower(t.GetPrimitive().String()), " ") + case kindType: + if t.GetType() == nil || t.GetType().GetTypeKind() == nil { + return "type" + } + return fmt.Sprintf("type(%s)", FormatCheckedType(t.GetType())) + case kindWellKnown: + switch t.GetWellKnown() { + case exprpb.Type_ANY: + return "any" + case exprpb.Type_DURATION: + return "duration" + case exprpb.Type_TIMESTAMP: + return "timestamp" + } + case kindWrapper: + return fmt.Sprintf("wrapper(%s)", + FormatCheckedType(chkdecls.NewPrimitiveType(t.GetWrapper()))) + case kindError: + return "!error!" + case kindTypeParam: + return t.GetTypeParam() + case kindAbstract: + at := t.GetAbstractType() + params := at.GetParameterTypes() + paramStrs := make([]string, len(params)) + for i, p := range params { + paramStrs[i] = FormatCheckedType(p) + } + return fmt.Sprintf("%s(%s)", at.GetName(), strings.Join(paramStrs, ", ")) + } + return t.String() +} + +type formatter func(any) string + +// FormatCELType formats a types.Type value to a string representation. +// +// The type formatting is identical to FormatCheckedType. +func FormatCELType(t any) string { + dt := t.(*types.Type) + switch dt.Kind() { + case types.AnyKind: + return "any" + case types.DurationKind: + return "duration" + case types.ErrorKind: + return "!error!" + case types.NullTypeKind: + return "null" + case types.TimestampKind: + return "timestamp" + case types.TypeParamKind: + return dt.TypeName() + case types.OpaqueKind: + if dt.TypeName() == "function" { + // There is no explicit function type in the new types representation, so information like + // whether the function is a member function is absent. + return formatFunctionDeclType(dt.Parameters()[0], dt.Parameters()[1:], false) + } + case types.UnspecifiedKind: + return "" + } + if len(dt.Parameters()) == 0 { + return dt.DeclaredTypeName() + } + paramTypeNames := make([]string, 0, len(dt.Parameters())) + for _, p := range dt.Parameters() { + paramTypeNames = append(paramTypeNames, FormatCELType(p)) + } + return fmt.Sprintf("%s(%s)", dt.TypeName(), strings.Join(paramTypeNames, ", ")) +} + +func formatExprType(t any) string { + if t == nil { + return "" + } + return FormatCheckedType(t.(*exprpb.Type)) +} + +func formatFunctionExprType(resultType *exprpb.Type, argTypes []*exprpb.Type, isInstance bool) string { + return formatFunctionInternal[*exprpb.Type](resultType, argTypes, isInstance, formatExprType) +} + +func formatFunctionDeclType(resultType *types.Type, argTypes []*types.Type, isInstance bool) string { + return formatFunctionInternal[*types.Type](resultType, argTypes, isInstance, FormatCELType) +} + +func formatFunctionInternal[T any](resultType T, argTypes []T, isInstance bool, format formatter) string { + result := "" + if isInstance { + target := argTypes[0] + argTypes = argTypes[1:] + result += format(target) + result += "." + } + result += "(" + for i, arg := range argTypes { + if i > 0 { + result += ", " + } + result += format(arg) + } + result += ")" + rt := format(resultType) + if rt != "" { + result += " -> " + result += rt + } + return result +} + +// kindOf returns the kind of the type as defined in the checked.proto. +func kindOf(t *exprpb.Type) int { + if t == nil || t.TypeKind == nil { + return kindUnknown + } + switch t.GetTypeKind().(type) { + case *exprpb.Type_Error: + return kindError + case *exprpb.Type_Function: + return kindFunction + case *exprpb.Type_Dyn: + return kindDyn + case *exprpb.Type_Primitive: + return kindPrimitive + case *exprpb.Type_WellKnown: + return kindWellKnown + case *exprpb.Type_Wrapper: + return kindWrapper + case *exprpb.Type_Null: + return kindNull + case *exprpb.Type_Type: + return kindType + case *exprpb.Type_ListType_: + return kindList + case *exprpb.Type_MapType_: + return kindMap + case *exprpb.Type_MessageType: + return kindObject + case *exprpb.Type_TypeParam: + return kindTypeParam + case *exprpb.Type_AbstractType_: + return kindAbstract + } + return kindUnknown +} diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/mapping.go b/hack/tools/vendor/github.com/google/cel-go/checker/mapping.go new file mode 100644 index 0000000000..8163a908a5 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/checker/mapping.go @@ -0,0 +1,49 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +import ( + "github.com/google/cel-go/common/types" +) + +type mapping struct { + mapping map[string]*types.Type +} + +func newMapping() *mapping { + return &mapping{ + mapping: make(map[string]*types.Type), + } +} + +func (m *mapping) add(from, to *types.Type) { + m.mapping[FormatCELType(from)] = to +} + +func (m *mapping) find(from *types.Type) (*types.Type, bool) { + if r, found := m.mapping[FormatCELType(from)]; found { + return r, found + } + return nil, false +} + +func (m *mapping) copy() *mapping { + c := newMapping() + + for k, v := range m.mapping { + c.mapping[k] = v + } + return c +} diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/options.go b/hack/tools/vendor/github.com/google/cel-go/checker/options.go new file mode 100644 index 0000000000..0560c3813c --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/checker/options.go @@ -0,0 +1,42 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +type options struct { + crossTypeNumericComparisons bool + homogeneousAggregateLiterals bool + validatedDeclarations *Scopes +} + +// Option is a functional option for configuring the type-checker +type Option func(*options) error + +// CrossTypeNumericComparisons toggles type-checker support for numeric comparisons across type +// See https://github.com/google/cel-spec/wiki/proposal-210 for more details. +func CrossTypeNumericComparisons(enabled bool) Option { + return func(opts *options) error { + opts.crossTypeNumericComparisons = enabled + return nil + } +} + +// ValidatedDeclarations provides a references to validated declarations which will be copied +// into new checker instances. +func ValidatedDeclarations(env *Env) Option { + return func(opts *options) error { + opts.validatedDeclarations = env.validatedDeclarations() + return nil + } +} diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/printer.go b/hack/tools/vendor/github.com/google/cel-go/checker/printer.go new file mode 100644 index 0000000000..7a3984f02c --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/checker/printer.go @@ -0,0 +1,74 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +import ( + "sort" + + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/debug" +) + +type semanticAdorner struct { + checked *ast.AST +} + +var _ debug.Adorner = &semanticAdorner{} + +func (a *semanticAdorner) GetMetadata(elem any) string { + result := "" + e, isExpr := elem.(ast.Expr) + if !isExpr { + return result + } + t := a.checked.TypeMap()[e.ID()] + if t != nil { + result += "~" + result += FormatCELType(t) + } + + switch e.Kind() { + case ast.IdentKind, + ast.CallKind, + ast.ListKind, + ast.StructKind, + ast.SelectKind: + if ref, found := a.checked.ReferenceMap()[e.ID()]; found { + if len(ref.OverloadIDs) == 0 { + result += "^" + ref.Name + } else { + sort.Strings(ref.OverloadIDs) + for i, overload := range ref.OverloadIDs { + if i == 0 { + result += "^" + } else { + result += "|" + } + result += overload + } + } + } + } + + return result +} + +// Print returns a string representation of the Expr message, +// annotated with types from the CheckedExpr. The Expr must +// be a sub-expression embedded in the CheckedExpr. +func Print(e ast.Expr, checked *ast.AST) string { + a := &semanticAdorner{checked: checked} + return debug.ToAdornedDebugString(e, a) +} diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/scopes.go b/hack/tools/vendor/github.com/google/cel-go/checker/scopes.go new file mode 100644 index 0000000000..8bb73ddb6a --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/checker/scopes.go @@ -0,0 +1,147 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +import ( + "github.com/google/cel-go/common/decls" +) + +// Scopes represents nested Decl sets where the Scopes value contains a Groups containing all +// identifiers in scope and an optional parent representing outer scopes. +// Each Groups value is a mapping of names to Decls in the ident and function namespaces. +// Lookups are performed such that bindings in inner scopes shadow those in outer scopes. +type Scopes struct { + parent *Scopes + scopes *Group +} + +// newScopes creates a new, empty Scopes. +// Some operations can't be safely performed until a Group is added with Push. +func newScopes() *Scopes { + return &Scopes{ + scopes: newGroup(), + } +} + +// Copy creates a copy of the current Scopes values, including a copy of its parent if non-nil. +func (s *Scopes) Copy() *Scopes { + cpy := newScopes() + if s == nil { + return cpy + } + if s.parent != nil { + cpy.parent = s.parent.Copy() + } + cpy.scopes = s.scopes.copy() + return cpy +} + +// Push creates a new Scopes value which references the current Scope as its parent. +func (s *Scopes) Push() *Scopes { + return &Scopes{ + parent: s, + scopes: newGroup(), + } +} + +// Pop returns the parent Scopes value for the current scope, or the current scope if the parent +// is nil. +func (s *Scopes) Pop() *Scopes { + if s.parent != nil { + return s.parent + } + // TODO: Consider whether this should be an error / panic. + return s +} + +// AddIdent adds the ident Decl in the current scope. +// Note: If the name collides with an existing identifier in the scope, the Decl is overwritten. +func (s *Scopes) AddIdent(decl *decls.VariableDecl) { + s.scopes.idents[decl.Name()] = decl +} + +// FindIdent finds the first ident Decl with a matching name in Scopes, or nil if one cannot be +// found. +// Note: The search is performed from innermost to outermost. +func (s *Scopes) FindIdent(name string) *decls.VariableDecl { + if ident, found := s.scopes.idents[name]; found { + return ident + } + if s.parent != nil { + return s.parent.FindIdent(name) + } + return nil +} + +// FindIdentInScope finds the first ident Decl with a matching name in the current Scopes value, or +// nil if one does not exist. +// Note: The search is only performed on the current scope and does not search outer scopes. +func (s *Scopes) FindIdentInScope(name string) *decls.VariableDecl { + if ident, found := s.scopes.idents[name]; found { + return ident + } + return nil +} + +// SetFunction adds the function Decl to the current scope. +// Note: Any previous entry for a function in the current scope with the same name is overwritten. +func (s *Scopes) SetFunction(fn *decls.FunctionDecl) { + s.scopes.functions[fn.Name()] = fn +} + +// FindFunction finds the first function Decl with a matching name in Scopes. +// The search is performed from innermost to outermost. +// Returns nil if no such function in Scopes. +func (s *Scopes) FindFunction(name string) *decls.FunctionDecl { + if fn, found := s.scopes.functions[name]; found { + return fn + } + if s.parent != nil { + return s.parent.FindFunction(name) + } + return nil +} + +// Group is a set of Decls that is pushed on or popped off a Scopes as a unit. +// Contains separate namespaces for identifier and function Decls. +// (Should be named "Scope" perhaps?) +type Group struct { + idents map[string]*decls.VariableDecl + functions map[string]*decls.FunctionDecl +} + +// copy creates a new Group instance with a shallow copy of the variables and functions. +// If callers need to mutate the exprpb.Decl definitions for a Function, they should copy-on-write. +func (g *Group) copy() *Group { + cpy := &Group{ + idents: make(map[string]*decls.VariableDecl, len(g.idents)), + functions: make(map[string]*decls.FunctionDecl, len(g.functions)), + } + for n, id := range g.idents { + cpy.idents[n] = id + } + for n, fn := range g.functions { + cpy.functions[n] = fn + } + return cpy +} + +// newGroup creates a new Group with empty maps for identifiers and functions. +func newGroup() *Group { + return &Group{ + idents: make(map[string]*decls.VariableDecl), + functions: make(map[string]*decls.FunctionDecl), + } +} diff --git a/hack/tools/vendor/github.com/google/cel-go/checker/types.go b/hack/tools/vendor/github.com/google/cel-go/checker/types.go new file mode 100644 index 0000000000..4c65b2737c --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/checker/types.go @@ -0,0 +1,314 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +import ( + "github.com/google/cel-go/common/types" +) + +// isDyn returns true if the input t is either type DYN or a well-known ANY message. +func isDyn(t *types.Type) bool { + // Note: object type values that are well-known and map to a DYN value in practice + // are sanitized prior to being added to the environment. + switch t.Kind() { + case types.DynKind, types.AnyKind: + return true + default: + return false + } +} + +// isDynOrError returns true if the input is either an Error, DYN, or well-known ANY message. +func isDynOrError(t *types.Type) bool { + return isError(t) || isDyn(t) +} + +func isError(t *types.Type) bool { + return t.Kind() == types.ErrorKind +} + +func isOptional(t *types.Type) bool { + if t.Kind() == types.OpaqueKind { + return t.TypeName() == "optional_type" + } + return false +} + +func maybeUnwrapOptional(t *types.Type) (*types.Type, bool) { + if isOptional(t) { + return t.Parameters()[0], true + } + return t, false +} + +// isEqualOrLessSpecific checks whether one type is equal or less specific than the other one. +// A type is less specific if it matches the other type using the DYN type. +func isEqualOrLessSpecific(t1, t2 *types.Type) bool { + kind1, kind2 := t1.Kind(), t2.Kind() + // The first type is less specific. + if isDyn(t1) || kind1 == types.TypeParamKind { + return true + } + // The first type is not less specific. + if isDyn(t2) || kind2 == types.TypeParamKind { + return false + } + // Types must be of the same kind to be equal. + if kind1 != kind2 { + return false + } + + // With limited exceptions for ANY and JSON values, the types must agree and be equivalent in + // order to return true. + switch kind1 { + case types.OpaqueKind: + if t1.TypeName() != t2.TypeName() || + len(t1.Parameters()) != len(t2.Parameters()) { + return false + } + for i, p1 := range t1.Parameters() { + if !isEqualOrLessSpecific(p1, t2.Parameters()[i]) { + return false + } + } + return true + case types.ListKind: + return isEqualOrLessSpecific(t1.Parameters()[0], t2.Parameters()[0]) + case types.MapKind: + return isEqualOrLessSpecific(t1.Parameters()[0], t2.Parameters()[0]) && + isEqualOrLessSpecific(t1.Parameters()[1], t2.Parameters()[1]) + case types.TypeKind: + return true + default: + return t1.IsExactType(t2) + } +} + +// / internalIsAssignable returns true if t1 is assignable to t2. +func internalIsAssignable(m *mapping, t1, t2 *types.Type) bool { + // Process type parameters. + kind1, kind2 := t1.Kind(), t2.Kind() + if kind2 == types.TypeParamKind { + // If t2 is a valid type substitution for t1, return true. + valid, t2HasSub := isValidTypeSubstitution(m, t1, t2) + if valid { + return true + } + // If t2 is not a valid type sub for t1, and already has a known substitution return false + // since it is not possible for t1 to be a substitution for t2. + if !valid && t2HasSub { + return false + } + // Otherwise, fall through to check whether t1 is a possible substitution for t2. + } + if kind1 == types.TypeParamKind { + // Return whether t1 is a valid substitution for t2. If not, do no additional checks as the + // possible type substitutions have been searched in both directions. + valid, _ := isValidTypeSubstitution(m, t2, t1) + return valid + } + + // Next check for wildcard types. + if isDynOrError(t1) || isDynOrError(t2) { + return true + } + // Preserve the nullness checks of the legacy type-checker. + if kind1 == types.NullTypeKind { + return internalIsAssignableNull(t2) + } + if kind2 == types.NullTypeKind { + return internalIsAssignableNull(t1) + } + + // Test for when the types do not need to agree, but are more specific than dyn. + switch kind1 { + case types.BoolKind, types.BytesKind, types.DoubleKind, types.IntKind, types.StringKind, types.UintKind, + types.AnyKind, types.DurationKind, types.TimestampKind, + types.StructKind: + // Test whether t2 is assignable from t1. The order of this check won't usually matter; + // however, there may be cases where type capabilities are expanded beyond what is supported + // in the current common/types package. For example, an interface designation for a group of + // Struct types. + return t2.IsAssignableType(t1) + case types.TypeKind: + return kind2 == types.TypeKind + case types.OpaqueKind, types.ListKind, types.MapKind: + return t1.Kind() == t2.Kind() && t1.TypeName() == t2.TypeName() && + internalIsAssignableList(m, t1.Parameters(), t2.Parameters()) + default: + return false + } +} + +// isValidTypeSubstitution returns whether t2 (or its type substitution) is a valid type +// substitution for t1, and whether t2 has a type substitution in mapping m. +// +// The type t2 is a valid substitution for t1 if any of the following statements is true +// - t2 has a type substitution (t2sub) equal to t1 +// - t2 has a type substitution (t2sub) assignable to t1 +// - t2 does not occur within t1. +func isValidTypeSubstitution(m *mapping, t1, t2 *types.Type) (valid, hasSub bool) { + // Early return if the t1 and t2 are the same instance. + kind1, kind2 := t1.Kind(), t2.Kind() + if kind1 == kind2 && t1.IsExactType(t2) { + return true, true + } + if t2Sub, found := m.find(t2); found { + // Early return if t1 and t2Sub are the same instance as otherwise the mapping + // might mark a type as being a subtitution for itself. + if kind1 == t2Sub.Kind() && t1.IsExactType(t2Sub) { + return true, true + } + // If the types are compatible, pick the more general type and return true + if internalIsAssignable(m, t1, t2Sub) { + t2New := mostGeneral(t1, t2Sub) + // only update the type reference map if the target type does not occur within it. + if notReferencedIn(m, t2, t2New) { + m.add(t2, t2New) + } + // acknowledge the type agreement, and that the substitution is already tracked. + return true, true + } + return false, true + } + if notReferencedIn(m, t2, t1) { + m.add(t2, t1) + return true, false + } + return false, false +} + +// internalIsAssignableList returns true if the element types at each index in the list are +// assignable from l1[i] to l2[i]. The list lengths must also agree for the lists to be +// assignable. +func internalIsAssignableList(m *mapping, l1, l2 []*types.Type) bool { + if len(l1) != len(l2) { + return false + } + for i, t1 := range l1 { + if !internalIsAssignable(m, t1, l2[i]) { + return false + } + } + return true +} + +// internalIsAssignableNull returns true if the type is nullable. +func internalIsAssignableNull(t *types.Type) bool { + return isLegacyNullable(t) || t.IsAssignableType(types.NullType) +} + +// isLegacyNullable preserves the null-ness compatibility of the original type-checker implementation. +func isLegacyNullable(t *types.Type) bool { + switch t.Kind() { + case types.OpaqueKind, types.StructKind, types.AnyKind, types.DurationKind, types.TimestampKind: + return true + } + return false +} + +// isAssignable returns an updated type substitution mapping if t1 is assignable to t2. +func isAssignable(m *mapping, t1, t2 *types.Type) *mapping { + mCopy := m.copy() + if internalIsAssignable(mCopy, t1, t2) { + return mCopy + } + return nil +} + +// isAssignableList returns an updated type substitution mapping if l1 is assignable to l2. +func isAssignableList(m *mapping, l1, l2 []*types.Type) *mapping { + mCopy := m.copy() + if internalIsAssignableList(mCopy, l1, l2) { + return mCopy + } + return nil +} + +// mostGeneral returns the more general of two types which are known to unify. +func mostGeneral(t1, t2 *types.Type) *types.Type { + if isEqualOrLessSpecific(t1, t2) { + return t1 + } + return t2 +} + +// notReferencedIn checks whether the type doesn't appear directly or transitively within the other +// type. This is a standard requirement for type unification, commonly referred to as the "occurs +// check". +func notReferencedIn(m *mapping, t, withinType *types.Type) bool { + if t.IsExactType(withinType) { + return false + } + withinKind := withinType.Kind() + switch withinKind { + case types.TypeParamKind: + wtSub, found := m.find(withinType) + if !found { + return true + } + return notReferencedIn(m, t, wtSub) + case types.OpaqueKind, types.ListKind, types.MapKind, types.TypeKind: + for _, pt := range withinType.Parameters() { + if !notReferencedIn(m, t, pt) { + return false + } + } + return true + default: + return true + } +} + +// substitute replaces all direct and indirect occurrences of bound type parameters. Unbound type +// parameters are replaced by DYN if typeParamToDyn is true. +func substitute(m *mapping, t *types.Type, typeParamToDyn bool) *types.Type { + if tSub, found := m.find(t); found { + return substitute(m, tSub, typeParamToDyn) + } + kind := t.Kind() + if typeParamToDyn && kind == types.TypeParamKind { + return types.DynType + } + switch kind { + case types.OpaqueKind: + return types.NewOpaqueType(t.TypeName(), substituteParams(m, t.Parameters(), typeParamToDyn)...) + case types.ListKind: + return types.NewListType(substitute(m, t.Parameters()[0], typeParamToDyn)) + case types.MapKind: + return types.NewMapType(substitute(m, t.Parameters()[0], typeParamToDyn), + substitute(m, t.Parameters()[1], typeParamToDyn)) + case types.TypeKind: + if len(t.Parameters()) > 0 { + tParam := t.Parameters()[0] + return types.NewTypeTypeWithParam(substitute(m, tParam, typeParamToDyn)) + } + return t + default: + return t + } +} + +func substituteParams(m *mapping, typeParams []*types.Type, typeParamToDyn bool) []*types.Type { + subParams := make([]*types.Type, len(typeParams)) + for i, tp := range typeParams { + subParams[i] = substitute(m, tp, typeParamToDyn) + } + return subParams +} + +func newFunctionType(resultType *types.Type, argTypes ...*types.Type) *types.Type { + return types.NewOpaqueType("function", append([]*types.Type{resultType}, argTypes...)...) +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/BUILD.bazel new file mode 100644 index 0000000000..eef7f281be --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/BUILD.bazel @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "cost.go", + "error.go", + "errors.go", + "location.go", + "source.go", + ], + importpath = "github.com/google/cel-go/common", + deps = [ + "//common/runes:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + ], +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "errors_test.go", + "source_test.go", + ], + embed = [ + ":go_default_library", + ], +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/ast/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/ast/BUILD.bazel new file mode 100644 index 0000000000..9824f57a9f --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/ast/BUILD.bazel @@ -0,0 +1,57 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "ast.go", + "conversion.go", + "expr.go", + "factory.go", + "navigable.go", + ], + importpath = "github.com/google/cel-go/common/ast", + deps = [ + "//common:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "@dev_cel_expr//:expr", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//types/known/structpb:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "ast_test.go", + "conversion_test.go", + "expr_test.go", + "navigable_test.go", + ], + embed = [ + ":go_default_library", + ], + deps = [ + "//checker:go_default_library", + "//checker/decls:go_default_library", + "//common:go_default_library", + "//common/containers:go_default_library", + "//common/decls:go_default_library", + "//common/operators:go_default_library", + "//common/overloads:go_default_library", + "//common/stdlib:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "//parser:go_default_library", + "//test/proto3pb:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//encoding/prototext:go_default_library", + ], +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/ast/ast.go b/hack/tools/vendor/github.com/google/cel-go/common/ast/ast.go new file mode 100644 index 0000000000..b807669d49 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/ast/ast.go @@ -0,0 +1,457 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ast declares data structures useful for parsed and checked abstract syntax trees +package ast + +import ( + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// AST contains a protobuf expression and source info along with CEL-native type and reference information. +type AST struct { + expr Expr + sourceInfo *SourceInfo + typeMap map[int64]*types.Type + refMap map[int64]*ReferenceInfo +} + +// Expr returns the root ast.Expr value in the AST. +func (a *AST) Expr() Expr { + if a == nil { + return nilExpr + } + return a.expr +} + +// SourceInfo returns the source metadata associated with the parse / type-check passes. +func (a *AST) SourceInfo() *SourceInfo { + if a == nil { + return nil + } + return a.sourceInfo +} + +// GetType returns the type for the expression at the given id, if one exists, else types.DynType. +func (a *AST) GetType(id int64) *types.Type { + if t, found := a.TypeMap()[id]; found { + return t + } + return types.DynType +} + +// SetType sets the type of the expression node at the given id. +func (a *AST) SetType(id int64, t *types.Type) { + if a == nil { + return + } + a.typeMap[id] = t +} + +// TypeMap returns the map of expression ids to type-checked types. +// +// If the AST is not type-checked, the map will be empty. +func (a *AST) TypeMap() map[int64]*types.Type { + if a == nil { + return map[int64]*types.Type{} + } + return a.typeMap +} + +// GetOverloadIDs returns the set of overload function names for a given expression id. +// +// If the expression id is not a function call, or the AST is not type-checked, the result will be empty. +func (a *AST) GetOverloadIDs(id int64) []string { + if ref, found := a.ReferenceMap()[id]; found { + return ref.OverloadIDs + } + return []string{} +} + +// ReferenceMap returns the map of expression id to identifier, constant, and function references. +func (a *AST) ReferenceMap() map[int64]*ReferenceInfo { + if a == nil { + return map[int64]*ReferenceInfo{} + } + return a.refMap +} + +// SetReference adds a reference to the checked AST type map. +func (a *AST) SetReference(id int64, r *ReferenceInfo) { + if a == nil { + return + } + a.refMap[id] = r +} + +// IsChecked returns whether the AST is type-checked. +func (a *AST) IsChecked() bool { + return a != nil && len(a.TypeMap()) > 0 +} + +// NewAST creates a base AST instance with an ast.Expr and ast.SourceInfo value. +func NewAST(e Expr, sourceInfo *SourceInfo) *AST { + if e == nil { + e = nilExpr + } + return &AST{ + expr: e, + sourceInfo: sourceInfo, + typeMap: make(map[int64]*types.Type), + refMap: make(map[int64]*ReferenceInfo), + } +} + +// NewCheckedAST wraps an parsed AST and augments it with type and reference metadata. +func NewCheckedAST(parsed *AST, typeMap map[int64]*types.Type, refMap map[int64]*ReferenceInfo) *AST { + return &AST{ + expr: parsed.Expr(), + sourceInfo: parsed.SourceInfo(), + typeMap: typeMap, + refMap: refMap, + } +} + +// Copy creates a deep copy of the Expr and SourceInfo values in the input AST. +// +// Copies of the Expr value are generated using an internal default ExprFactory. +func Copy(a *AST) *AST { + if a == nil { + return nil + } + e := defaultFactory.CopyExpr(a.expr) + if !a.IsChecked() { + return NewAST(e, CopySourceInfo(a.SourceInfo())) + } + typesCopy := make(map[int64]*types.Type, len(a.typeMap)) + for id, t := range a.typeMap { + typesCopy[id] = t + } + refsCopy := make(map[int64]*ReferenceInfo, len(a.refMap)) + for id, r := range a.refMap { + refsCopy[id] = r + } + return NewCheckedAST(NewAST(e, CopySourceInfo(a.SourceInfo())), typesCopy, refsCopy) +} + +// MaxID returns the upper-bound, non-inclusive, of ids present within the AST's Expr value. +func MaxID(a *AST) int64 { + visitor := &maxIDVisitor{maxID: 1} + PostOrderVisit(a.Expr(), visitor) + for id, call := range a.SourceInfo().MacroCalls() { + PostOrderVisit(call, visitor) + if id > visitor.maxID { + visitor.maxID = id + 1 + } + } + return visitor.maxID + 1 +} + +// NewSourceInfo creates a simple SourceInfo object from an input common.Source value. +func NewSourceInfo(src common.Source) *SourceInfo { + var lineOffsets []int32 + var desc string + baseLine := int32(0) + baseCol := int32(0) + if src != nil { + desc = src.Description() + lineOffsets = src.LineOffsets() + // Determine whether the source metadata should be computed relative + // to a base line and column value. This can be determined by requesting + // the location for offset 0 from the source object. + if loc, found := src.OffsetLocation(0); found { + baseLine = int32(loc.Line()) - 1 + baseCol = int32(loc.Column()) + } + } + return &SourceInfo{ + desc: desc, + lines: lineOffsets, + baseLine: baseLine, + baseCol: baseCol, + offsetRanges: make(map[int64]OffsetRange), + macroCalls: make(map[int64]Expr), + } +} + +// CopySourceInfo creates a deep copy of the MacroCalls within the input SourceInfo. +// +// Copies of macro Expr values are generated using an internal default ExprFactory. +func CopySourceInfo(info *SourceInfo) *SourceInfo { + if info == nil { + return nil + } + rangesCopy := make(map[int64]OffsetRange, len(info.offsetRanges)) + for id, off := range info.offsetRanges { + rangesCopy[id] = off + } + callsCopy := make(map[int64]Expr, len(info.macroCalls)) + for id, call := range info.macroCalls { + callsCopy[id] = defaultFactory.CopyExpr(call) + } + return &SourceInfo{ + syntax: info.syntax, + desc: info.desc, + lines: info.lines, + baseLine: info.baseLine, + baseCol: info.baseCol, + offsetRanges: rangesCopy, + macroCalls: callsCopy, + } +} + +// SourceInfo records basic information about the expression as a textual input and +// as a parsed expression value. +type SourceInfo struct { + syntax string + desc string + lines []int32 + baseLine int32 + baseCol int32 + offsetRanges map[int64]OffsetRange + macroCalls map[int64]Expr +} + +// SyntaxVersion returns the syntax version associated with the text expression. +func (s *SourceInfo) SyntaxVersion() string { + if s == nil { + return "" + } + return s.syntax +} + +// Description provides information about where the expression came from. +func (s *SourceInfo) Description() string { + if s == nil { + return "" + } + return s.desc +} + +// LineOffsets returns a list of the 0-based character offsets in the input text where newlines appear. +func (s *SourceInfo) LineOffsets() []int32 { + if s == nil { + return []int32{} + } + return s.lines +} + +// MacroCalls returns a map of expression id to ast.Expr value where the id represents the expression +// node where the macro was inserted into the AST, and the ast.Expr value represents the original call +// signature which was replaced. +func (s *SourceInfo) MacroCalls() map[int64]Expr { + if s == nil { + return map[int64]Expr{} + } + return s.macroCalls +} + +// GetMacroCall returns the original ast.Expr value for the given expression if it was generated via +// a macro replacement. +// +// Note, parsing options must be enabled to track macro calls before this method will return a value. +func (s *SourceInfo) GetMacroCall(id int64) (Expr, bool) { + e, found := s.MacroCalls()[id] + return e, found +} + +// SetMacroCall records a macro call at a specific location. +func (s *SourceInfo) SetMacroCall(id int64, e Expr) { + if s != nil { + s.macroCalls[id] = e + } +} + +// ClearMacroCall removes the macro call at the given expression id. +func (s *SourceInfo) ClearMacroCall(id int64) { + if s != nil { + delete(s.macroCalls, id) + } +} + +// OffsetRanges returns a map of expression id to OffsetRange values where the range indicates either: +// the start and end position in the input stream where the expression occurs, or the start position +// only. If the range only captures start position, the stop position of the range will be equal to +// the start. +func (s *SourceInfo) OffsetRanges() map[int64]OffsetRange { + if s == nil { + return map[int64]OffsetRange{} + } + return s.offsetRanges +} + +// GetOffsetRange retrieves an OffsetRange for the given expression id if one exists. +func (s *SourceInfo) GetOffsetRange(id int64) (OffsetRange, bool) { + if s == nil { + return OffsetRange{}, false + } + o, found := s.offsetRanges[id] + return o, found +} + +// SetOffsetRange sets the OffsetRange for the given expression id. +func (s *SourceInfo) SetOffsetRange(id int64, o OffsetRange) { + if s == nil { + return + } + s.offsetRanges[id] = o +} + +// ClearOffsetRange removes the OffsetRange for the given expression id. +func (s *SourceInfo) ClearOffsetRange(id int64) { + if s != nil { + delete(s.offsetRanges, id) + } +} + +// GetStartLocation calculates the human-readable 1-based line and 0-based column of the first character +// of the expression node at the id. +func (s *SourceInfo) GetStartLocation(id int64) common.Location { + if o, found := s.GetOffsetRange(id); found { + return s.GetLocationByOffset(o.Start) + } + return common.NoLocation +} + +// GetStopLocation calculates the human-readable 1-based line and 0-based column of the last character for +// the expression node at the given id. +// +// If the SourceInfo was generated from a serialized protobuf representation, the stop location will +// be identical to the start location for the expression. +func (s *SourceInfo) GetStopLocation(id int64) common.Location { + if o, found := s.GetOffsetRange(id); found { + return s.GetLocationByOffset(o.Stop) + } + return common.NoLocation +} + +// GetLocationByOffset returns the line and column information for a given character offset. +func (s *SourceInfo) GetLocationByOffset(offset int32) common.Location { + line := 1 + col := int(offset) + for _, lineOffset := range s.LineOffsets() { + if lineOffset > offset { + break + } + line++ + col = int(offset - lineOffset) + } + return common.NewLocation(line, col) +} + +// ComputeOffset calculates the 0-based character offset from a 1-based line and 0-based column. +func (s *SourceInfo) ComputeOffset(line, col int32) int32 { + if s != nil { + line = s.baseLine + line + col = s.baseCol + col + } + if line == 1 { + return col + } + if line < 1 || line > int32(len(s.LineOffsets())) { + return -1 + } + offset := s.LineOffsets()[line-2] + return offset + col +} + +// OffsetRange captures the start and stop positions of a section of text in the input expression. +type OffsetRange struct { + Start int32 + Stop int32 +} + +// ReferenceInfo contains a CEL native representation of an identifier reference which may refer to +// either a qualified identifier name, a set of overload ids, or a constant value from an enum. +type ReferenceInfo struct { + Name string + OverloadIDs []string + Value ref.Val +} + +// NewIdentReference creates a ReferenceInfo instance for an identifier with an optional constant value. +func NewIdentReference(name string, value ref.Val) *ReferenceInfo { + return &ReferenceInfo{Name: name, Value: value} +} + +// NewFunctionReference creates a ReferenceInfo instance for a set of function overloads. +func NewFunctionReference(overloads ...string) *ReferenceInfo { + info := &ReferenceInfo{} + for _, id := range overloads { + info.AddOverload(id) + } + return info +} + +// AddOverload appends a function overload ID to the ReferenceInfo. +func (r *ReferenceInfo) AddOverload(overloadID string) { + for _, id := range r.OverloadIDs { + if id == overloadID { + return + } + } + r.OverloadIDs = append(r.OverloadIDs, overloadID) +} + +// Equals returns whether two references are identical to each other. +func (r *ReferenceInfo) Equals(other *ReferenceInfo) bool { + if r.Name != other.Name { + return false + } + if len(r.OverloadIDs) != len(other.OverloadIDs) { + return false + } + if len(r.OverloadIDs) != 0 { + overloadMap := make(map[string]struct{}, len(r.OverloadIDs)) + for _, id := range r.OverloadIDs { + overloadMap[id] = struct{}{} + } + for _, id := range other.OverloadIDs { + _, found := overloadMap[id] + if !found { + return false + } + } + } + if r.Value == nil && other.Value == nil { + return true + } + if r.Value == nil && other.Value != nil || + r.Value != nil && other.Value == nil || + r.Value.Equal(other.Value) != types.True { + return false + } + return true +} + +type maxIDVisitor struct { + maxID int64 + *baseVisitor +} + +// VisitExpr updates the max identifier if the incoming expression id is greater than previously observed. +func (v *maxIDVisitor) VisitExpr(e Expr) { + if v.maxID < e.ID() { + v.maxID = e.ID() + } +} + +// VisitEntryExpr updates the max identifier if the incoming entry id is greater than previously observed. +func (v *maxIDVisitor) VisitEntryExpr(e EntryExpr) { + if v.maxID < e.ID() { + v.maxID = e.ID() + } +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/ast/conversion.go b/hack/tools/vendor/github.com/google/cel-go/common/ast/conversion.go new file mode 100644 index 0000000000..435d8f6547 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/ast/conversion.go @@ -0,0 +1,659 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "fmt" + + "google.golang.org/protobuf/proto" + + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + + celpb "cel.dev/expr" + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + structpb "google.golang.org/protobuf/types/known/structpb" +) + +// ToProto converts an AST to a CheckedExpr protobouf. +func ToProto(ast *AST) (*exprpb.CheckedExpr, error) { + refMap := make(map[int64]*exprpb.Reference, len(ast.ReferenceMap())) + for id, ref := range ast.ReferenceMap() { + r, err := ReferenceInfoToProto(ref) + if err != nil { + return nil, err + } + refMap[id] = r + } + typeMap := make(map[int64]*exprpb.Type, len(ast.TypeMap())) + for id, typ := range ast.TypeMap() { + t, err := types.TypeToExprType(typ) + if err != nil { + return nil, err + } + typeMap[id] = t + } + e, err := ExprToProto(ast.Expr()) + if err != nil { + return nil, err + } + info, err := SourceInfoToProto(ast.SourceInfo()) + if err != nil { + return nil, err + } + return &exprpb.CheckedExpr{ + Expr: e, + SourceInfo: info, + ReferenceMap: refMap, + TypeMap: typeMap, + }, nil +} + +// ToAST converts a CheckedExpr protobuf to an AST instance. +func ToAST(checked *exprpb.CheckedExpr) (*AST, error) { + refMap := make(map[int64]*ReferenceInfo, len(checked.GetReferenceMap())) + for id, ref := range checked.GetReferenceMap() { + r, err := ProtoToReferenceInfo(ref) + if err != nil { + return nil, err + } + refMap[id] = r + } + typeMap := make(map[int64]*types.Type, len(checked.GetTypeMap())) + for id, typ := range checked.GetTypeMap() { + t, err := types.ExprTypeToType(typ) + if err != nil { + return nil, err + } + typeMap[id] = t + } + info, err := ProtoToSourceInfo(checked.GetSourceInfo()) + if err != nil { + return nil, err + } + root, err := ProtoToExpr(checked.GetExpr()) + if err != nil { + return nil, err + } + ast := NewCheckedAST(NewAST(root, info), typeMap, refMap) + return ast, nil +} + +// ProtoToExpr converts a protobuf Expr value to an ast.Expr value. +func ProtoToExpr(e *exprpb.Expr) (Expr, error) { + factory := NewExprFactory() + return exprInternal(factory, e) +} + +// ProtoToEntryExpr converts a protobuf struct/map entry to an ast.EntryExpr +func ProtoToEntryExpr(e *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) { + factory := NewExprFactory() + switch e.GetKeyKind().(type) { + case *exprpb.Expr_CreateStruct_Entry_FieldKey: + return exprStructField(factory, e.GetId(), e) + case *exprpb.Expr_CreateStruct_Entry_MapKey: + return exprMapEntry(factory, e.GetId(), e) + } + return nil, fmt.Errorf("unsupported expr entry kind: %v", e) +} + +func exprInternal(factory ExprFactory, e *exprpb.Expr) (Expr, error) { + id := e.GetId() + switch e.GetExprKind().(type) { + case *exprpb.Expr_CallExpr: + return exprCall(factory, id, e.GetCallExpr()) + case *exprpb.Expr_ComprehensionExpr: + return exprComprehension(factory, id, e.GetComprehensionExpr()) + case *exprpb.Expr_ConstExpr: + return exprLiteral(factory, id, e.GetConstExpr()) + case *exprpb.Expr_IdentExpr: + return exprIdent(factory, id, e.GetIdentExpr()) + case *exprpb.Expr_ListExpr: + return exprList(factory, id, e.GetListExpr()) + case *exprpb.Expr_SelectExpr: + return exprSelect(factory, id, e.GetSelectExpr()) + case *exprpb.Expr_StructExpr: + s := e.GetStructExpr() + if s.GetMessageName() != "" { + return exprStruct(factory, id, s) + } + return exprMap(factory, id, s) + } + return factory.NewUnspecifiedExpr(id), nil +} + +func exprCall(factory ExprFactory, id int64, call *exprpb.Expr_Call) (Expr, error) { + var err error + args := make([]Expr, len(call.GetArgs())) + for i, a := range call.GetArgs() { + args[i], err = exprInternal(factory, a) + if err != nil { + return nil, err + } + } + if call.GetTarget() == nil { + return factory.NewCall(id, call.GetFunction(), args...), nil + } + + target, err := exprInternal(factory, call.GetTarget()) + if err != nil { + return nil, err + } + return factory.NewMemberCall(id, call.GetFunction(), target, args...), nil +} + +func exprComprehension(factory ExprFactory, id int64, comp *exprpb.Expr_Comprehension) (Expr, error) { + iterRange, err := exprInternal(factory, comp.GetIterRange()) + if err != nil { + return nil, err + } + accuInit, err := exprInternal(factory, comp.GetAccuInit()) + if err != nil { + return nil, err + } + loopCond, err := exprInternal(factory, comp.GetLoopCondition()) + if err != nil { + return nil, err + } + loopStep, err := exprInternal(factory, comp.GetLoopStep()) + if err != nil { + return nil, err + } + result, err := exprInternal(factory, comp.GetResult()) + if err != nil { + return nil, err + } + return factory.NewComprehensionTwoVar(id, + iterRange, + comp.GetIterVar(), + comp.GetIterVar2(), + comp.GetAccuVar(), + accuInit, + loopCond, + loopStep, + result), nil +} + +func exprLiteral(factory ExprFactory, id int64, c *exprpb.Constant) (Expr, error) { + val, err := ConstantToVal(c) + if err != nil { + return nil, err + } + return factory.NewLiteral(id, val), nil +} + +func exprIdent(factory ExprFactory, id int64, i *exprpb.Expr_Ident) (Expr, error) { + return factory.NewIdent(id, i.GetName()), nil +} + +func exprList(factory ExprFactory, id int64, l *exprpb.Expr_CreateList) (Expr, error) { + elems := make([]Expr, len(l.GetElements())) + for i, e := range l.GetElements() { + elem, err := exprInternal(factory, e) + if err != nil { + return nil, err + } + elems[i] = elem + } + return factory.NewList(id, elems, l.GetOptionalIndices()), nil +} + +func exprMap(factory ExprFactory, id int64, s *exprpb.Expr_CreateStruct) (Expr, error) { + entries := make([]EntryExpr, len(s.GetEntries())) + var err error + for i, entry := range s.GetEntries() { + entries[i], err = exprMapEntry(factory, entry.GetId(), entry) + if err != nil { + return nil, err + } + } + return factory.NewMap(id, entries), nil +} + +func exprMapEntry(factory ExprFactory, id int64, e *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) { + k, err := exprInternal(factory, e.GetMapKey()) + if err != nil { + return nil, err + } + v, err := exprInternal(factory, e.GetValue()) + if err != nil { + return nil, err + } + return factory.NewMapEntry(id, k, v, e.GetOptionalEntry()), nil +} + +func exprSelect(factory ExprFactory, id int64, s *exprpb.Expr_Select) (Expr, error) { + op, err := exprInternal(factory, s.GetOperand()) + if err != nil { + return nil, err + } + if s.GetTestOnly() { + return factory.NewPresenceTest(id, op, s.GetField()), nil + } + return factory.NewSelect(id, op, s.GetField()), nil +} + +func exprStruct(factory ExprFactory, id int64, s *exprpb.Expr_CreateStruct) (Expr, error) { + fields := make([]EntryExpr, len(s.GetEntries())) + var err error + for i, field := range s.GetEntries() { + fields[i], err = exprStructField(factory, field.GetId(), field) + if err != nil { + return nil, err + } + } + return factory.NewStruct(id, s.GetMessageName(), fields), nil +} + +func exprStructField(factory ExprFactory, id int64, f *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) { + v, err := exprInternal(factory, f.GetValue()) + if err != nil { + return nil, err + } + return factory.NewStructField(id, f.GetFieldKey(), v, f.GetOptionalEntry()), nil +} + +// ExprToProto serializes an ast.Expr value to a protobuf Expr representation. +func ExprToProto(e Expr) (*exprpb.Expr, error) { + if e == nil { + return &exprpb.Expr{}, nil + } + switch e.Kind() { + case CallKind: + return protoCall(e.ID(), e.AsCall()) + case ComprehensionKind: + return protoComprehension(e.ID(), e.AsComprehension()) + case IdentKind: + return protoIdent(e.ID(), e.AsIdent()) + case ListKind: + return protoList(e.ID(), e.AsList()) + case LiteralKind: + return protoLiteral(e.ID(), e.AsLiteral()) + case MapKind: + return protoMap(e.ID(), e.AsMap()) + case SelectKind: + return protoSelect(e.ID(), e.AsSelect()) + case StructKind: + return protoStruct(e.ID(), e.AsStruct()) + case UnspecifiedExprKind: + // Handle the case where a macro reference may be getting translated. + // A nested macro 'pointer' is a non-zero expression id with no kind set. + if e.ID() != 0 { + return &exprpb.Expr{Id: e.ID()}, nil + } + return &exprpb.Expr{}, nil + } + return nil, fmt.Errorf("unsupported expr kind: %v", e) +} + +// EntryExprToProto converts an ast.EntryExpr to a protobuf CreateStruct entry +func EntryExprToProto(e EntryExpr) (*exprpb.Expr_CreateStruct_Entry, error) { + switch e.Kind() { + case MapEntryKind: + return protoMapEntry(e.ID(), e.AsMapEntry()) + case StructFieldKind: + return protoStructField(e.ID(), e.AsStructField()) + case UnspecifiedEntryExprKind: + return &exprpb.Expr_CreateStruct_Entry{}, nil + } + return nil, fmt.Errorf("unsupported expr entry kind: %v", e) +} + +func protoCall(id int64, call CallExpr) (*exprpb.Expr, error) { + var err error + var target *exprpb.Expr + if call.IsMemberFunction() { + target, err = ExprToProto(call.Target()) + if err != nil { + return nil, err + } + } + callArgs := call.Args() + args := make([]*exprpb.Expr, len(callArgs)) + for i, a := range callArgs { + args[i], err = ExprToProto(a) + if err != nil { + return nil, err + } + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_CallExpr{ + CallExpr: &exprpb.Expr_Call{ + Function: call.FunctionName(), + Target: target, + Args: args, + }, + }, + }, nil +} + +func protoComprehension(id int64, comp ComprehensionExpr) (*exprpb.Expr, error) { + iterRange, err := ExprToProto(comp.IterRange()) + if err != nil { + return nil, err + } + accuInit, err := ExprToProto(comp.AccuInit()) + if err != nil { + return nil, err + } + loopCond, err := ExprToProto(comp.LoopCondition()) + if err != nil { + return nil, err + } + loopStep, err := ExprToProto(comp.LoopStep()) + if err != nil { + return nil, err + } + result, err := ExprToProto(comp.Result()) + if err != nil { + return nil, err + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_ComprehensionExpr{ + ComprehensionExpr: &exprpb.Expr_Comprehension{ + IterVar: comp.IterVar(), + IterVar2: comp.IterVar2(), + IterRange: iterRange, + AccuVar: comp.AccuVar(), + AccuInit: accuInit, + LoopCondition: loopCond, + LoopStep: loopStep, + Result: result, + }, + }, + }, nil +} + +func protoIdent(id int64, name string) (*exprpb.Expr, error) { + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_IdentExpr{ + IdentExpr: &exprpb.Expr_Ident{ + Name: name, + }, + }, + }, nil +} + +func protoList(id int64, list ListExpr) (*exprpb.Expr, error) { + var err error + elems := make([]*exprpb.Expr, list.Size()) + for i, e := range list.Elements() { + elems[i], err = ExprToProto(e) + if err != nil { + return nil, err + } + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_ListExpr{ + ListExpr: &exprpb.Expr_CreateList{ + Elements: elems, + OptionalIndices: list.OptionalIndices(), + }, + }, + }, nil +} + +func protoLiteral(id int64, val ref.Val) (*exprpb.Expr, error) { + c, err := ValToConstant(val) + if err != nil { + return nil, err + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_ConstExpr{ + ConstExpr: c, + }, + }, nil +} + +func protoMap(id int64, m MapExpr) (*exprpb.Expr, error) { + entries := make([]*exprpb.Expr_CreateStruct_Entry, len(m.Entries())) + var err error + for i, e := range m.Entries() { + entries[i], err = EntryExprToProto(e) + if err != nil { + return nil, err + } + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_StructExpr{ + StructExpr: &exprpb.Expr_CreateStruct{ + Entries: entries, + }, + }, + }, nil +} + +func protoMapEntry(id int64, e MapEntry) (*exprpb.Expr_CreateStruct_Entry, error) { + k, err := ExprToProto(e.Key()) + if err != nil { + return nil, err + } + v, err := ExprToProto(e.Value()) + if err != nil { + return nil, err + } + return &exprpb.Expr_CreateStruct_Entry{ + Id: id, + KeyKind: &exprpb.Expr_CreateStruct_Entry_MapKey{ + MapKey: k, + }, + Value: v, + OptionalEntry: e.IsOptional(), + }, nil +} + +func protoSelect(id int64, s SelectExpr) (*exprpb.Expr, error) { + op, err := ExprToProto(s.Operand()) + if err != nil { + return nil, err + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_SelectExpr{ + SelectExpr: &exprpb.Expr_Select{ + Operand: op, + Field: s.FieldName(), + TestOnly: s.IsTestOnly(), + }, + }, + }, nil +} + +func protoStruct(id int64, s StructExpr) (*exprpb.Expr, error) { + entries := make([]*exprpb.Expr_CreateStruct_Entry, len(s.Fields())) + var err error + for i, e := range s.Fields() { + entries[i], err = EntryExprToProto(e) + if err != nil { + return nil, err + } + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_StructExpr{ + StructExpr: &exprpb.Expr_CreateStruct{ + MessageName: s.TypeName(), + Entries: entries, + }, + }, + }, nil +} + +func protoStructField(id int64, f StructField) (*exprpb.Expr_CreateStruct_Entry, error) { + v, err := ExprToProto(f.Value()) + if err != nil { + return nil, err + } + return &exprpb.Expr_CreateStruct_Entry{ + Id: id, + KeyKind: &exprpb.Expr_CreateStruct_Entry_FieldKey{ + FieldKey: f.Name(), + }, + Value: v, + OptionalEntry: f.IsOptional(), + }, nil +} + +// SourceInfoToProto serializes an ast.SourceInfo value to a protobuf SourceInfo object. +func SourceInfoToProto(info *SourceInfo) (*exprpb.SourceInfo, error) { + if info == nil { + return &exprpb.SourceInfo{}, nil + } + sourceInfo := &exprpb.SourceInfo{ + SyntaxVersion: info.SyntaxVersion(), + Location: info.Description(), + LineOffsets: info.LineOffsets(), + Positions: make(map[int64]int32, len(info.OffsetRanges())), + MacroCalls: make(map[int64]*exprpb.Expr, len(info.MacroCalls())), + } + for id, offset := range info.OffsetRanges() { + sourceInfo.Positions[id] = offset.Start + } + for id, e := range info.MacroCalls() { + call, err := ExprToProto(e) + if err != nil { + return nil, err + } + sourceInfo.MacroCalls[id] = call + } + return sourceInfo, nil +} + +// ProtoToSourceInfo deserializes the protobuf into a native SourceInfo value. +func ProtoToSourceInfo(info *exprpb.SourceInfo) (*SourceInfo, error) { + sourceInfo := &SourceInfo{ + syntax: info.GetSyntaxVersion(), + desc: info.GetLocation(), + lines: info.GetLineOffsets(), + offsetRanges: make(map[int64]OffsetRange, len(info.GetPositions())), + macroCalls: make(map[int64]Expr, len(info.GetMacroCalls())), + } + for id, offset := range info.GetPositions() { + sourceInfo.SetOffsetRange(id, OffsetRange{Start: offset, Stop: offset}) + } + for id, e := range info.GetMacroCalls() { + call, err := ProtoToExpr(e) + if err != nil { + return nil, err + } + sourceInfo.SetMacroCall(id, call) + } + return sourceInfo, nil +} + +// ReferenceInfoToProto converts a ReferenceInfo instance to a protobuf Reference suitable for serialization. +func ReferenceInfoToProto(info *ReferenceInfo) (*exprpb.Reference, error) { + c, err := ValToConstant(info.Value) + if err != nil { + return nil, err + } + return &exprpb.Reference{ + Name: info.Name, + OverloadId: info.OverloadIDs, + Value: c, + }, nil +} + +// ProtoToReferenceInfo converts a protobuf Reference into a CEL-native ReferenceInfo instance. +func ProtoToReferenceInfo(ref *exprpb.Reference) (*ReferenceInfo, error) { + v, err := ConstantToVal(ref.GetValue()) + if err != nil { + return nil, err + } + return &ReferenceInfo{ + Name: ref.GetName(), + OverloadIDs: ref.GetOverloadId(), + Value: v, + }, nil +} + +// ValToConstant converts a CEL-native ref.Val to a protobuf Constant. +// +// Only simple scalar types are supported by this method. +func ValToConstant(v ref.Val) (*exprpb.Constant, error) { + if v == nil { + return nil, nil + } + switch v.Type() { + case types.BoolType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: v.Value().(bool)}}, nil + case types.BytesType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: v.Value().([]byte)}}, nil + case types.DoubleType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: v.Value().(float64)}}, nil + case types.IntType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: v.Value().(int64)}}, nil + case types.NullType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: structpb.NullValue_NULL_VALUE}}, nil + case types.StringType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: v.Value().(string)}}, nil + case types.UintType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: v.Value().(uint64)}}, nil + } + return nil, fmt.Errorf("unsupported constant kind: %v", v.Type()) +} + +// ConstantToVal converts a protobuf Constant to a CEL-native ref.Val. +func ConstantToVal(c *exprpb.Constant) (ref.Val, error) { + return AlphaProtoConstantAsVal(c) +} + +// AlphaProtoConstantAsVal converts a v1alpha1.Constant protobuf to a CEL-native ref.Val. +func AlphaProtoConstantAsVal(c *exprpb.Constant) (ref.Val, error) { + if c == nil { + return nil, nil + } + canonical := &celpb.Constant{} + if err := convertProto(c, canonical); err != nil { + return nil, err + } + return ProtoConstantAsVal(canonical) +} + +// ProtoConstantAsVal converts a canonical celpb.Constant protobuf to a CEL-native ref.Val. +func ProtoConstantAsVal(c *celpb.Constant) (ref.Val, error) { + switch c.GetConstantKind().(type) { + case *celpb.Constant_BoolValue: + return types.Bool(c.GetBoolValue()), nil + case *celpb.Constant_BytesValue: + return types.Bytes(c.GetBytesValue()), nil + case *celpb.Constant_DoubleValue: + return types.Double(c.GetDoubleValue()), nil + case *celpb.Constant_Int64Value: + return types.Int(c.GetInt64Value()), nil + case *celpb.Constant_NullValue: + return types.NullValue, nil + case *celpb.Constant_StringValue: + return types.String(c.GetStringValue()), nil + case *celpb.Constant_Uint64Value: + return types.Uint(c.GetUint64Value()), nil + } + return nil, fmt.Errorf("unsupported constant kind: %v", c.GetConstantKind()) +} + +func convertProto(src, dst proto.Message) error { + pb, err := proto.Marshal(src) + if err != nil { + return err + } + err = proto.Unmarshal(pb, dst) + return err +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/ast/expr.go b/hack/tools/vendor/github.com/google/cel-go/common/ast/expr.go new file mode 100644 index 0000000000..9f55cb3b9f --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/ast/expr.go @@ -0,0 +1,884 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "github.com/google/cel-go/common/types/ref" +) + +// ExprKind represents the expression node kind. +type ExprKind int + +const ( + // UnspecifiedExprKind represents an unset expression with no specified properties. + UnspecifiedExprKind ExprKind = iota + + // CallKind represents a function call. + CallKind + + // ComprehensionKind represents a comprehension expression generated by a macro. + ComprehensionKind + + // IdentKind represents a simple variable, constant, or type identifier. + IdentKind + + // ListKind represents a list literal expression. + ListKind + + // LiteralKind represents a primitive scalar literal. + LiteralKind + + // MapKind represents a map literal expression. + MapKind + + // SelectKind represents a field selection expression. + SelectKind + + // StructKind represents a struct literal expression. + StructKind +) + +// Expr represents the base expression node in a CEL abstract syntax tree. +// +// Depending on the `Kind()` value, the Expr may be converted to a concrete expression types +// as indicated by the `As` methods. +type Expr interface { + // ID of the expression as it appears in the AST + ID() int64 + + // Kind of the expression node. See ExprKind for the valid enum values. + Kind() ExprKind + + // AsCall adapts the expr into a CallExpr + // + // The Kind() must be equal to a CallKind for the conversion to be well-defined. + AsCall() CallExpr + + // AsComprehension adapts the expr into a ComprehensionExpr. + // + // The Kind() must be equal to a ComprehensionKind for the conversion to be well-defined. + AsComprehension() ComprehensionExpr + + // AsIdent adapts the expr into an identifier string. + // + // The Kind() must be equal to an IdentKind for the conversion to be well-defined. + AsIdent() string + + // AsLiteral adapts the expr into a constant ref.Val. + // + // The Kind() must be equal to a LiteralKind for the conversion to be well-defined. + AsLiteral() ref.Val + + // AsList adapts the expr into a ListExpr. + // + // The Kind() must be equal to a ListKind for the conversion to be well-defined. + AsList() ListExpr + + // AsMap adapts the expr into a MapExpr. + // + // The Kind() must be equal to a MapKind for the conversion to be well-defined. + AsMap() MapExpr + + // AsSelect adapts the expr into a SelectExpr. + // + // The Kind() must be equal to a SelectKind for the conversion to be well-defined. + AsSelect() SelectExpr + + // AsStruct adapts the expr into a StructExpr. + // + // The Kind() must be equal to a StructKind for the conversion to be well-defined. + AsStruct() StructExpr + + // RenumberIDs performs an in-place update of the expression and all of its descendents numeric ids. + RenumberIDs(IDGenerator) + + // SetKindCase replaces the contents of the current expression with the contents of the other. + // + // The SetKindCase takes ownership of any expression instances references within the input Expr. + // A shallow copy is made of the Expr value itself, but not a deep one. + // + // This method should only be used during AST rewrites using temporary Expr values. + SetKindCase(Expr) + + // isExpr is a marker interface. + isExpr() +} + +// EntryExprKind represents the possible EntryExpr kinds. +type EntryExprKind int + +const ( + // UnspecifiedEntryExprKind indicates that the entry expr is not set. + UnspecifiedEntryExprKind EntryExprKind = iota + + // MapEntryKind indicates that the entry is a MapEntry type with key and value expressions. + MapEntryKind + + // StructFieldKind indicates that the entry is a StructField with a field name and initializer + // expression. + StructFieldKind +) + +// EntryExpr represents the base entry expression in a CEL map or struct literal. +type EntryExpr interface { + // ID of the entry as it appears in the AST. + ID() int64 + + // Kind of the entry expression node. See EntryExprKind for valid enum values. + Kind() EntryExprKind + + // AsMapEntry casts the EntryExpr to a MapEntry. + // + // The Kind() must be equal to MapEntryKind for the conversion to be well-defined. + AsMapEntry() MapEntry + + // AsStructField casts the EntryExpr to a StructField + // + // The Kind() must be equal to StructFieldKind for the conversion to be well-defined. + AsStructField() StructField + + // RenumberIDs performs an in-place update of the expression and all of its descendents numeric ids. + RenumberIDs(IDGenerator) + + isEntryExpr() +} + +// IDGenerator produces unique ids suitable for tagging expression nodes +type IDGenerator func(originalID int64) int64 + +// CallExpr defines an interface for inspecting a function call and its arguments. +type CallExpr interface { + // FunctionName returns the name of the function. + FunctionName() string + + // IsMemberFunction returns whether the call has a non-nil target indicating it is a member function + IsMemberFunction() bool + + // Target returns the target of the expression if one is present. + Target() Expr + + // Args returns the list of call arguments, excluding the target. + Args() []Expr + + // marker interface method + isExpr() +} + +// ListExpr defines an interface for inspecting a list literal expression. +type ListExpr interface { + // Elements returns the list elements as navigable expressions. + Elements() []Expr + + // OptionalIndicies returns the list of optional indices in the list literal. + OptionalIndices() []int32 + + // IsOptional indicates whether the given element index is optional. + IsOptional(int32) bool + + // Size returns the number of elements in the list. + Size() int + + // marker interface method + isExpr() +} + +// SelectExpr defines an interface for inspecting a select expression. +type SelectExpr interface { + // Operand returns the selection operand expression. + Operand() Expr + + // FieldName returns the field name being selected from the operand. + FieldName() string + + // IsTestOnly indicates whether the select expression is a presence test generated by a macro. + IsTestOnly() bool + + // marker interface method + isExpr() +} + +// MapExpr defines an interface for inspecting a map expression. +type MapExpr interface { + // Entries returns the map key value pairs as EntryExpr values. + Entries() []EntryExpr + + // Size returns the number of entries in the map. + Size() int + + // marker interface method + isExpr() +} + +// MapEntry defines an interface for inspecting a map entry. +type MapEntry interface { + // Key returns the map entry key expression. + Key() Expr + + // Value returns the map entry value expression. + Value() Expr + + // IsOptional returns whether the entry is optional. + IsOptional() bool + + // marker interface method + isEntryExpr() +} + +// StructExpr defines an interfaces for inspecting a struct and its field initializers. +type StructExpr interface { + // TypeName returns the struct type name. + TypeName() string + + // Fields returns the set of field initializers in the struct expression as EntryExpr values. + Fields() []EntryExpr + + // marker interface method + isExpr() +} + +// StructField defines an interface for inspecting a struct field initialization. +type StructField interface { + // Name returns the name of the field. + Name() string + + // Value returns the field initialization expression. + Value() Expr + + // IsOptional returns whether the field is optional. + IsOptional() bool + + // marker interface method + isEntryExpr() +} + +// ComprehensionExpr defines an interface for inspecting a comprehension expression. +type ComprehensionExpr interface { + // IterRange returns the iteration range expression. + IterRange() Expr + + // IterVar returns the iteration variable name. + // + // For one-variable comprehensions, the iter var refers to the element value + // when iterating over a list, or the map key when iterating over a map. + // + // For two-variable comprehneions, the iter var refers to the list index or the + // map key. + IterVar() string + + // IterVar2 returns the second iteration variable name. + // + // When the value is non-empty, the comprehension is a two-variable comprehension. + IterVar2() string + + // HasIterVar2 returns true if the second iteration variable is non-empty. + HasIterVar2() bool + + // AccuVar returns the accumulation variable name. + AccuVar() string + + // AccuInit returns the accumulation variable initialization expression. + AccuInit() Expr + + // LoopCondition returns the loop condition expression. + LoopCondition() Expr + + // LoopStep returns the loop step expression. + LoopStep() Expr + + // Result returns the comprehension result expression. + Result() Expr + + // marker interface method + isExpr() +} + +var _ Expr = &expr{} + +type expr struct { + id int64 + exprKindCase +} + +type exprKindCase interface { + Kind() ExprKind + + renumberIDs(IDGenerator) + + isExpr() +} + +func (e *expr) ID() int64 { + if e == nil { + return 0 + } + return e.id +} + +func (e *expr) Kind() ExprKind { + if e == nil || e.exprKindCase == nil { + return UnspecifiedExprKind + } + return e.exprKindCase.Kind() +} + +func (e *expr) AsCall() CallExpr { + if e.Kind() != CallKind { + return nilCall + } + return e.exprKindCase.(CallExpr) +} + +func (e *expr) AsComprehension() ComprehensionExpr { + if e.Kind() != ComprehensionKind { + return nilCompre + } + return e.exprKindCase.(ComprehensionExpr) +} + +func (e *expr) AsIdent() string { + if e.Kind() != IdentKind { + return "" + } + return string(e.exprKindCase.(baseIdentExpr)) +} + +func (e *expr) AsLiteral() ref.Val { + if e.Kind() != LiteralKind { + return nil + } + return e.exprKindCase.(*baseLiteral).Val +} + +func (e *expr) AsList() ListExpr { + if e.Kind() != ListKind { + return nilList + } + return e.exprKindCase.(ListExpr) +} + +func (e *expr) AsMap() MapExpr { + if e.Kind() != MapKind { + return nilMap + } + return e.exprKindCase.(MapExpr) +} + +func (e *expr) AsSelect() SelectExpr { + if e.Kind() != SelectKind { + return nilSel + } + return e.exprKindCase.(SelectExpr) +} + +func (e *expr) AsStruct() StructExpr { + if e.Kind() != StructKind { + return nilStruct + } + return e.exprKindCase.(StructExpr) +} + +func (e *expr) SetKindCase(other Expr) { + if e == nil { + return + } + if other == nil { + e.exprKindCase = nil + return + } + switch other.Kind() { + case CallKind: + c := other.AsCall() + e.exprKindCase = &baseCallExpr{ + function: c.FunctionName(), + target: c.Target(), + args: c.Args(), + isMember: c.IsMemberFunction(), + } + case ComprehensionKind: + c := other.AsComprehension() + e.exprKindCase = &baseComprehensionExpr{ + iterRange: c.IterRange(), + iterVar: c.IterVar(), + iterVar2: c.IterVar2(), + accuVar: c.AccuVar(), + accuInit: c.AccuInit(), + loopCond: c.LoopCondition(), + loopStep: c.LoopStep(), + result: c.Result(), + } + case IdentKind: + e.exprKindCase = baseIdentExpr(other.AsIdent()) + case ListKind: + l := other.AsList() + optIndexMap := make(map[int32]struct{}, len(l.OptionalIndices())) + for _, idx := range l.OptionalIndices() { + optIndexMap[idx] = struct{}{} + } + e.exprKindCase = &baseListExpr{ + elements: l.Elements(), + optIndices: l.OptionalIndices(), + optIndexMap: optIndexMap, + } + case LiteralKind: + e.exprKindCase = &baseLiteral{Val: other.AsLiteral()} + case MapKind: + e.exprKindCase = &baseMapExpr{ + entries: other.AsMap().Entries(), + } + case SelectKind: + s := other.AsSelect() + e.exprKindCase = &baseSelectExpr{ + operand: s.Operand(), + field: s.FieldName(), + testOnly: s.IsTestOnly(), + } + case StructKind: + s := other.AsStruct() + e.exprKindCase = &baseStructExpr{ + typeName: s.TypeName(), + fields: s.Fields(), + } + case UnspecifiedExprKind: + e.exprKindCase = nil + } +} + +func (e *expr) RenumberIDs(idGen IDGenerator) { + if e == nil { + return + } + e.id = idGen(e.id) + if e.exprKindCase != nil { + e.exprKindCase.renumberIDs(idGen) + } +} + +type baseCallExpr struct { + function string + target Expr + args []Expr + isMember bool +} + +func (*baseCallExpr) Kind() ExprKind { + return CallKind +} + +func (e *baseCallExpr) FunctionName() string { + if e == nil { + return "" + } + return e.function +} + +func (e *baseCallExpr) IsMemberFunction() bool { + if e == nil { + return false + } + return e.isMember +} + +func (e *baseCallExpr) Target() Expr { + if e == nil || !e.IsMemberFunction() { + return nilExpr + } + return e.target +} + +func (e *baseCallExpr) Args() []Expr { + if e == nil { + return []Expr{} + } + return e.args +} + +func (e *baseCallExpr) renumberIDs(idGen IDGenerator) { + if e.IsMemberFunction() { + e.Target().RenumberIDs(idGen) + } + for _, arg := range e.Args() { + arg.RenumberIDs(idGen) + } +} + +func (*baseCallExpr) isExpr() {} + +var _ ComprehensionExpr = &baseComprehensionExpr{} + +type baseComprehensionExpr struct { + iterRange Expr + iterVar string + iterVar2 string + accuVar string + accuInit Expr + loopCond Expr + loopStep Expr + result Expr +} + +func (*baseComprehensionExpr) Kind() ExprKind { + return ComprehensionKind +} + +func (e *baseComprehensionExpr) IterRange() Expr { + if e == nil { + return nilExpr + } + return e.iterRange +} + +func (e *baseComprehensionExpr) IterVar() string { + return e.iterVar +} + +func (e *baseComprehensionExpr) IterVar2() string { + return e.iterVar2 +} + +func (e *baseComprehensionExpr) HasIterVar2() bool { + return e.iterVar2 != "" +} + +func (e *baseComprehensionExpr) AccuVar() string { + return e.accuVar +} + +func (e *baseComprehensionExpr) AccuInit() Expr { + if e == nil { + return nilExpr + } + return e.accuInit +} + +func (e *baseComprehensionExpr) LoopCondition() Expr { + if e == nil { + return nilExpr + } + return e.loopCond +} + +func (e *baseComprehensionExpr) LoopStep() Expr { + if e == nil { + return nilExpr + } + return e.loopStep +} + +func (e *baseComprehensionExpr) Result() Expr { + if e == nil { + return nilExpr + } + return e.result +} + +func (e *baseComprehensionExpr) renumberIDs(idGen IDGenerator) { + e.IterRange().RenumberIDs(idGen) + e.AccuInit().RenumberIDs(idGen) + e.LoopCondition().RenumberIDs(idGen) + e.LoopStep().RenumberIDs(idGen) + e.Result().RenumberIDs(idGen) +} + +func (*baseComprehensionExpr) isExpr() {} + +var _ exprKindCase = baseIdentExpr("") + +type baseIdentExpr string + +func (baseIdentExpr) Kind() ExprKind { + return IdentKind +} + +func (e baseIdentExpr) renumberIDs(IDGenerator) {} + +func (baseIdentExpr) isExpr() {} + +var _ exprKindCase = &baseLiteral{} +var _ ref.Val = &baseLiteral{} + +type baseLiteral struct { + ref.Val +} + +func (*baseLiteral) Kind() ExprKind { + return LiteralKind +} + +func (l *baseLiteral) renumberIDs(IDGenerator) {} + +func (*baseLiteral) isExpr() {} + +var _ ListExpr = &baseListExpr{} + +type baseListExpr struct { + elements []Expr + optIndices []int32 + optIndexMap map[int32]struct{} +} + +func (*baseListExpr) Kind() ExprKind { + return ListKind +} + +func (e *baseListExpr) Elements() []Expr { + if e == nil { + return []Expr{} + } + return e.elements +} + +func (e *baseListExpr) IsOptional(index int32) bool { + _, found := e.optIndexMap[index] + return found +} + +func (e *baseListExpr) OptionalIndices() []int32 { + if e == nil { + return []int32{} + } + return e.optIndices +} + +func (e *baseListExpr) Size() int { + return len(e.Elements()) +} + +func (e *baseListExpr) renumberIDs(idGen IDGenerator) { + for _, elem := range e.Elements() { + elem.RenumberIDs(idGen) + } +} + +func (*baseListExpr) isExpr() {} + +type baseMapExpr struct { + entries []EntryExpr +} + +func (*baseMapExpr) Kind() ExprKind { + return MapKind +} + +func (e *baseMapExpr) Entries() []EntryExpr { + if e == nil { + return []EntryExpr{} + } + return e.entries +} + +func (e *baseMapExpr) Size() int { + return len(e.Entries()) +} + +func (e *baseMapExpr) renumberIDs(idGen IDGenerator) { + for _, entry := range e.Entries() { + entry.RenumberIDs(idGen) + } +} + +func (*baseMapExpr) isExpr() {} + +type baseSelectExpr struct { + operand Expr + field string + testOnly bool +} + +func (*baseSelectExpr) Kind() ExprKind { + return SelectKind +} + +func (e *baseSelectExpr) Operand() Expr { + if e == nil || e.operand == nil { + return nilExpr + } + return e.operand +} + +func (e *baseSelectExpr) FieldName() string { + if e == nil { + return "" + } + return e.field +} + +func (e *baseSelectExpr) IsTestOnly() bool { + if e == nil { + return false + } + return e.testOnly +} + +func (e *baseSelectExpr) renumberIDs(idGen IDGenerator) { + e.Operand().RenumberIDs(idGen) +} + +func (*baseSelectExpr) isExpr() {} + +type baseStructExpr struct { + typeName string + fields []EntryExpr +} + +func (*baseStructExpr) Kind() ExprKind { + return StructKind +} + +func (e *baseStructExpr) TypeName() string { + if e == nil { + return "" + } + return e.typeName +} + +func (e *baseStructExpr) Fields() []EntryExpr { + if e == nil { + return []EntryExpr{} + } + return e.fields +} + +func (e *baseStructExpr) renumberIDs(idGen IDGenerator) { + for _, f := range e.Fields() { + f.RenumberIDs(idGen) + } +} + +func (*baseStructExpr) isExpr() {} + +type entryExprKindCase interface { + Kind() EntryExprKind + + renumberIDs(IDGenerator) + + isEntryExpr() +} + +var _ EntryExpr = &entryExpr{} + +type entryExpr struct { + id int64 + entryExprKindCase +} + +func (e *entryExpr) ID() int64 { + return e.id +} + +func (e *entryExpr) AsMapEntry() MapEntry { + if e.Kind() != MapEntryKind { + return nilMapEntry + } + return e.entryExprKindCase.(MapEntry) +} + +func (e *entryExpr) AsStructField() StructField { + if e.Kind() != StructFieldKind { + return nilStructField + } + return e.entryExprKindCase.(StructField) +} + +func (e *entryExpr) RenumberIDs(idGen IDGenerator) { + e.id = idGen(e.id) + e.entryExprKindCase.renumberIDs(idGen) +} + +type baseMapEntry struct { + key Expr + value Expr + isOptional bool +} + +func (e *baseMapEntry) Kind() EntryExprKind { + return MapEntryKind +} + +func (e *baseMapEntry) Key() Expr { + if e == nil { + return nilExpr + } + return e.key +} + +func (e *baseMapEntry) Value() Expr { + if e == nil { + return nilExpr + } + return e.value +} + +func (e *baseMapEntry) IsOptional() bool { + if e == nil { + return false + } + return e.isOptional +} + +func (e *baseMapEntry) renumberIDs(idGen IDGenerator) { + e.Key().RenumberIDs(idGen) + e.Value().RenumberIDs(idGen) +} + +func (*baseMapEntry) isEntryExpr() {} + +type baseStructField struct { + field string + value Expr + isOptional bool +} + +func (f *baseStructField) Kind() EntryExprKind { + return StructFieldKind +} + +func (f *baseStructField) Name() string { + if f == nil { + return "" + } + return f.field +} + +func (f *baseStructField) Value() Expr { + if f == nil { + return nilExpr + } + return f.value +} + +func (f *baseStructField) IsOptional() bool { + if f == nil { + return false + } + return f.isOptional +} + +func (f *baseStructField) renumberIDs(idGen IDGenerator) { + f.Value().RenumberIDs(idGen) +} + +func (*baseStructField) isEntryExpr() {} + +var ( + nilExpr *expr = nil + nilCall *baseCallExpr = nil + nilCompre *baseComprehensionExpr = nil + nilList *baseListExpr = nil + nilMap *baseMapExpr = nil + nilMapEntry *baseMapEntry = nil + nilSel *baseSelectExpr = nil + nilStruct *baseStructExpr = nil + nilStructField *baseStructField = nil +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/ast/factory.go b/hack/tools/vendor/github.com/google/cel-go/common/ast/factory.go new file mode 100644 index 0000000000..994806b793 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/ast/factory.go @@ -0,0 +1,313 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import "github.com/google/cel-go/common/types/ref" + +// ExprFactory interfaces defines a set of methods necessary for building native expression values. +type ExprFactory interface { + // CopyExpr creates a deep copy of the input Expr value. + CopyExpr(Expr) Expr + + // CopyEntryExpr creates a deep copy of the input EntryExpr value. + CopyEntryExpr(EntryExpr) EntryExpr + + // NewCall creates an Expr value representing a global function call. + NewCall(id int64, function string, args ...Expr) Expr + + // NewComprehension creates an Expr value representing a one-variable comprehension over a value range. + NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCondition, loopStep, result Expr) Expr + + // NewComprehensionTwoVar creates an Expr value representing a two-variable comprehension over a value range. + NewComprehensionTwoVar(id int64, iterRange Expr, iterVar, iterVar2, accuVar string, accuInit, loopCondition, loopStep, result Expr) Expr + + // NewMemberCall creates an Expr value representing a member function call. + NewMemberCall(id int64, function string, receiver Expr, args ...Expr) Expr + + // NewIdent creates an Expr value representing an identifier. + NewIdent(id int64, name string) Expr + + // NewAccuIdent creates an Expr value representing an accumulator identifier within a + //comprehension. + NewAccuIdent(id int64) Expr + + // NewLiteral creates an Expr value representing a literal value, such as a string or integer. + NewLiteral(id int64, value ref.Val) Expr + + // NewList creates an Expr value representing a list literal expression with optional indices. + // + // Optional indicies will typically be empty unless the CEL optional types are enabled. + NewList(id int64, elems []Expr, optIndices []int32) Expr + + // NewMap creates an Expr value representing a map literal expression + NewMap(id int64, entries []EntryExpr) Expr + + // NewMapEntry creates a MapEntry with a given key, value, and a flag indicating whether + // the key is optionally set. + NewMapEntry(id int64, key, value Expr, isOptional bool) EntryExpr + + // NewPresenceTest creates an Expr representing a field presence test on an operand expression. + NewPresenceTest(id int64, operand Expr, field string) Expr + + // NewSelect creates an Expr representing a field selection on an operand expression. + NewSelect(id int64, operand Expr, field string) Expr + + // NewStruct creates an Expr value representing a struct literal with a given type name and a + // set of field initializers. + NewStruct(id int64, typeName string, fields []EntryExpr) Expr + + // NewStructField creates a StructField with a given field name, value, and a flag indicating + // whether the field is optionally set. + NewStructField(id int64, field string, value Expr, isOptional bool) EntryExpr + + // NewUnspecifiedExpr creates an empty expression node. + NewUnspecifiedExpr(id int64) Expr + + isExprFactory() +} + +type baseExprFactory struct{} + +// NewExprFactory creates an ExprFactory instance. +func NewExprFactory() ExprFactory { + return &baseExprFactory{} +} + +func (fac *baseExprFactory) NewCall(id int64, function string, args ...Expr) Expr { + if len(args) == 0 { + args = []Expr{} + } + return fac.newExpr( + id, + &baseCallExpr{ + function: function, + target: nilExpr, + args: args, + isMember: false, + }) +} + +func (fac *baseExprFactory) NewMemberCall(id int64, function string, target Expr, args ...Expr) Expr { + if len(args) == 0 { + args = []Expr{} + } + return fac.newExpr( + id, + &baseCallExpr{ + function: function, + target: target, + args: args, + isMember: true, + }) +} + +func (fac *baseExprFactory) NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCond, loopStep, result Expr) Expr { + // Set the iter_var2 to empty string to indicate the second variable is omitted + return fac.NewComprehensionTwoVar(id, iterRange, iterVar, "", accuVar, accuInit, loopCond, loopStep, result) +} + +func (fac *baseExprFactory) NewComprehensionTwoVar(id int64, iterRange Expr, iterVar, iterVar2, accuVar string, accuInit, loopCond, loopStep, result Expr) Expr { + return fac.newExpr( + id, + &baseComprehensionExpr{ + iterRange: iterRange, + iterVar: iterVar, + iterVar2: iterVar2, + accuVar: accuVar, + accuInit: accuInit, + loopCond: loopCond, + loopStep: loopStep, + result: result, + }) +} + +func (fac *baseExprFactory) NewIdent(id int64, name string) Expr { + return fac.newExpr(id, baseIdentExpr(name)) +} + +func (fac *baseExprFactory) NewAccuIdent(id int64) Expr { + return fac.NewIdent(id, "__result__") +} + +func (fac *baseExprFactory) NewLiteral(id int64, value ref.Val) Expr { + return fac.newExpr(id, &baseLiteral{Val: value}) +} + +func (fac *baseExprFactory) NewList(id int64, elems []Expr, optIndices []int32) Expr { + optIndexMap := make(map[int32]struct{}, len(optIndices)) + for _, idx := range optIndices { + optIndexMap[idx] = struct{}{} + } + return fac.newExpr(id, + &baseListExpr{ + elements: elems, + optIndices: optIndices, + optIndexMap: optIndexMap, + }) +} + +func (fac *baseExprFactory) NewMap(id int64, entries []EntryExpr) Expr { + return fac.newExpr(id, &baseMapExpr{entries: entries}) +} + +func (fac *baseExprFactory) NewMapEntry(id int64, key, value Expr, isOptional bool) EntryExpr { + return fac.newEntryExpr( + id, + &baseMapEntry{ + key: key, + value: value, + isOptional: isOptional, + }) +} + +func (fac *baseExprFactory) NewPresenceTest(id int64, operand Expr, field string) Expr { + return fac.newExpr( + id, + &baseSelectExpr{ + operand: operand, + field: field, + testOnly: true, + }) +} + +func (fac *baseExprFactory) NewSelect(id int64, operand Expr, field string) Expr { + return fac.newExpr( + id, + &baseSelectExpr{ + operand: operand, + field: field, + }) +} + +func (fac *baseExprFactory) NewStruct(id int64, typeName string, fields []EntryExpr) Expr { + return fac.newExpr( + id, + &baseStructExpr{ + typeName: typeName, + fields: fields, + }) +} + +func (fac *baseExprFactory) NewStructField(id int64, field string, value Expr, isOptional bool) EntryExpr { + return fac.newEntryExpr( + id, + &baseStructField{ + field: field, + value: value, + isOptional: isOptional, + }) +} + +func (fac *baseExprFactory) NewUnspecifiedExpr(id int64) Expr { + return fac.newExpr(id, nil) +} + +func (fac *baseExprFactory) CopyExpr(e Expr) Expr { + // unwrap navigable expressions to avoid unnecessary allocations during copying. + if nav, ok := e.(*navigableExprImpl); ok { + e = nav.Expr + } + switch e.Kind() { + case CallKind: + c := e.AsCall() + argsCopy := make([]Expr, len(c.Args())) + for i, arg := range c.Args() { + argsCopy[i] = fac.CopyExpr(arg) + } + if !c.IsMemberFunction() { + return fac.NewCall(e.ID(), c.FunctionName(), argsCopy...) + } + return fac.NewMemberCall(e.ID(), c.FunctionName(), fac.CopyExpr(c.Target()), argsCopy...) + case ComprehensionKind: + compre := e.AsComprehension() + return fac.NewComprehensionTwoVar(e.ID(), + fac.CopyExpr(compre.IterRange()), + compre.IterVar(), + compre.IterVar2(), + compre.AccuVar(), + fac.CopyExpr(compre.AccuInit()), + fac.CopyExpr(compre.LoopCondition()), + fac.CopyExpr(compre.LoopStep()), + fac.CopyExpr(compre.Result())) + case IdentKind: + return fac.NewIdent(e.ID(), e.AsIdent()) + case ListKind: + l := e.AsList() + elemsCopy := make([]Expr, l.Size()) + for i, elem := range l.Elements() { + elemsCopy[i] = fac.CopyExpr(elem) + } + return fac.NewList(e.ID(), elemsCopy, l.OptionalIndices()) + case LiteralKind: + return fac.NewLiteral(e.ID(), e.AsLiteral()) + case MapKind: + m := e.AsMap() + entriesCopy := make([]EntryExpr, m.Size()) + for i, entry := range m.Entries() { + entriesCopy[i] = fac.CopyEntryExpr(entry) + } + return fac.NewMap(e.ID(), entriesCopy) + case SelectKind: + s := e.AsSelect() + if s.IsTestOnly() { + return fac.NewPresenceTest(e.ID(), fac.CopyExpr(s.Operand()), s.FieldName()) + } + return fac.NewSelect(e.ID(), fac.CopyExpr(s.Operand()), s.FieldName()) + case StructKind: + s := e.AsStruct() + fieldsCopy := make([]EntryExpr, len(s.Fields())) + for i, field := range s.Fields() { + fieldsCopy[i] = fac.CopyEntryExpr(field) + } + return fac.NewStruct(e.ID(), s.TypeName(), fieldsCopy) + default: + return fac.NewUnspecifiedExpr(e.ID()) + } +} + +func (fac *baseExprFactory) CopyEntryExpr(e EntryExpr) EntryExpr { + switch e.Kind() { + case MapEntryKind: + entry := e.AsMapEntry() + return fac.NewMapEntry(e.ID(), + fac.CopyExpr(entry.Key()), fac.CopyExpr(entry.Value()), entry.IsOptional()) + case StructFieldKind: + field := e.AsStructField() + return fac.NewStructField(e.ID(), + field.Name(), fac.CopyExpr(field.Value()), field.IsOptional()) + default: + return fac.newEntryExpr(e.ID(), nil) + } +} + +func (*baseExprFactory) isExprFactory() {} + +func (fac *baseExprFactory) newExpr(id int64, e exprKindCase) Expr { + return &expr{ + id: id, + exprKindCase: e, + } +} + +func (fac *baseExprFactory) newEntryExpr(id int64, e entryExprKindCase) EntryExpr { + return &entryExpr{ + id: id, + entryExprKindCase: e, + } +} + +var ( + defaultFactory = &baseExprFactory{} +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/ast/navigable.go b/hack/tools/vendor/github.com/google/cel-go/common/ast/navigable.go new file mode 100644 index 0000000000..d7a90fb7c3 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/ast/navigable.go @@ -0,0 +1,660 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// NavigableExpr represents the base navigable expression value with methods to inspect the +// parent and child expressions. +type NavigableExpr interface { + Expr + + // Type of the expression. + // + // If the expression is type-checked, the type check metadata is returned. If the expression + // has not been type-checked, the types.DynType value is returned. + Type() *types.Type + + // Parent returns the parent expression node, if one exists. + Parent() (NavigableExpr, bool) + + // Children returns a list of child expression nodes. + Children() []NavigableExpr + + // Depth indicates the depth in the expression tree. + // + // The root expression has depth 0. + Depth() int +} + +// NavigateAST converts an AST to a NavigableExpr +func NavigateAST(ast *AST) NavigableExpr { + return NavigateExpr(ast, ast.Expr()) +} + +// NavigateExpr creates a NavigableExpr whose type information is backed by the input AST. +// +// If the expression is already a NavigableExpr, the parent and depth information will be +// propagated on the new NavigableExpr value; otherwise, the expr value will be treated +// as though it is the root of the expression graph with a depth of 0. +func NavigateExpr(ast *AST, expr Expr) NavigableExpr { + depth := 0 + var parent NavigableExpr = nil + if nav, ok := expr.(NavigableExpr); ok { + depth = nav.Depth() + parent, _ = nav.Parent() + } + return newNavigableExpr(ast, parent, expr, depth) +} + +// ExprMatcher takes a NavigableExpr in and indicates whether the value is a match. +// +// This function type should be use with the `Match` and `MatchList` calls. +type ExprMatcher func(NavigableExpr) bool + +// ConstantValueMatcher returns an ExprMatcher which will return true if the input NavigableExpr +// is comprised of all constant values, such as a simple literal or even list and map literal. +func ConstantValueMatcher() ExprMatcher { + return matchIsConstantValue +} + +// KindMatcher returns an ExprMatcher which will return true if the input NavigableExpr.Kind() matches +// the specified `kind`. +func KindMatcher(kind ExprKind) ExprMatcher { + return func(e NavigableExpr) bool { + return e.Kind() == kind + } +} + +// FunctionMatcher returns an ExprMatcher which will match NavigableExpr nodes of CallKind type whose +// function name is equal to `funcName`. +func FunctionMatcher(funcName string) ExprMatcher { + return func(e NavigableExpr) bool { + if e.Kind() != CallKind { + return false + } + return e.AsCall().FunctionName() == funcName + } +} + +// AllMatcher returns true for all descendants of a NavigableExpr, effectively flattening them into a list. +// +// Such a result would work well with subsequent MatchList calls. +func AllMatcher() ExprMatcher { + return func(NavigableExpr) bool { + return true + } +} + +// MatchDescendants takes a NavigableExpr and ExprMatcher and produces a list of NavigableExpr values +// matching the input criteria in post-order (bottom up). +func MatchDescendants(expr NavigableExpr, matcher ExprMatcher) []NavigableExpr { + matches := []NavigableExpr{} + navVisitor := &baseVisitor{ + visitExpr: func(e Expr) { + nav := e.(NavigableExpr) + if matcher(nav) { + matches = append(matches, nav) + } + }, + } + visit(expr, navVisitor, postOrder, 0, 0) + return matches +} + +// MatchSubset applies an ExprMatcher to a list of NavigableExpr values and their descendants, producing a +// subset of NavigableExpr values which match. +func MatchSubset(exprs []NavigableExpr, matcher ExprMatcher) []NavigableExpr { + matches := []NavigableExpr{} + navVisitor := &baseVisitor{ + visitExpr: func(e Expr) { + nav := e.(NavigableExpr) + if matcher(nav) { + matches = append(matches, nav) + } + }, + } + for _, expr := range exprs { + visit(expr, navVisitor, postOrder, 0, 1) + } + return matches +} + +// Visitor defines an object for visiting Expr and EntryExpr nodes within an expression graph. +type Visitor interface { + // VisitExpr visits the input expression. + VisitExpr(Expr) + + // VisitEntryExpr visits the input entry expression, i.e. a struct field or map entry. + VisitEntryExpr(EntryExpr) +} + +type baseVisitor struct { + visitExpr func(Expr) + visitEntryExpr func(EntryExpr) +} + +// VisitExpr visits the Expr if the internal expr visitor has been configured. +func (v *baseVisitor) VisitExpr(e Expr) { + if v.visitExpr != nil { + v.visitExpr(e) + } +} + +// VisitEntryExpr visits the entry if the internal expr entry visitor has been configured. +func (v *baseVisitor) VisitEntryExpr(e EntryExpr) { + if v.visitEntryExpr != nil { + v.visitEntryExpr(e) + } +} + +// NewExprVisitor creates a visitor which only visits expression nodes. +func NewExprVisitor(v func(Expr)) Visitor { + return &baseVisitor{ + visitExpr: v, + visitEntryExpr: nil, + } +} + +// PostOrderVisit walks the expression graph and calls the visitor in post-order (bottom-up). +func PostOrderVisit(expr Expr, visitor Visitor) { + visit(expr, visitor, postOrder, 0, 0) +} + +// PreOrderVisit walks the expression graph and calls the visitor in pre-order (top-down). +func PreOrderVisit(expr Expr, visitor Visitor) { + visit(expr, visitor, preOrder, 0, 0) +} + +type visitOrder int + +const ( + preOrder = iota + 1 + postOrder +) + +// TODO: consider exposing a way to configure a limit for the max visit depth. +// It's possible that we could want to configure this on the NewExprVisitor() +// and through MatchDescendents() / MaxID(). +func visit(expr Expr, visitor Visitor, order visitOrder, depth, maxDepth int) { + if maxDepth > 0 && depth == maxDepth { + return + } + if order == preOrder { + visitor.VisitExpr(expr) + } + switch expr.Kind() { + case CallKind: + c := expr.AsCall() + if c.IsMemberFunction() { + visit(c.Target(), visitor, order, depth+1, maxDepth) + } + for _, arg := range c.Args() { + visit(arg, visitor, order, depth+1, maxDepth) + } + case ComprehensionKind: + c := expr.AsComprehension() + visit(c.IterRange(), visitor, order, depth+1, maxDepth) + visit(c.AccuInit(), visitor, order, depth+1, maxDepth) + visit(c.LoopCondition(), visitor, order, depth+1, maxDepth) + visit(c.LoopStep(), visitor, order, depth+1, maxDepth) + visit(c.Result(), visitor, order, depth+1, maxDepth) + case ListKind: + l := expr.AsList() + for _, elem := range l.Elements() { + visit(elem, visitor, order, depth+1, maxDepth) + } + case MapKind: + m := expr.AsMap() + for _, e := range m.Entries() { + if order == preOrder { + visitor.VisitEntryExpr(e) + } + entry := e.AsMapEntry() + visit(entry.Key(), visitor, order, depth+1, maxDepth) + visit(entry.Value(), visitor, order, depth+1, maxDepth) + if order == postOrder { + visitor.VisitEntryExpr(e) + } + } + case SelectKind: + visit(expr.AsSelect().Operand(), visitor, order, depth+1, maxDepth) + case StructKind: + s := expr.AsStruct() + for _, f := range s.Fields() { + visitor.VisitEntryExpr(f) + visit(f.AsStructField().Value(), visitor, order, depth+1, maxDepth) + } + } + if order == postOrder { + visitor.VisitExpr(expr) + } +} + +func matchIsConstantValue(e NavigableExpr) bool { + if e.Kind() == LiteralKind { + return true + } + if e.Kind() == StructKind || e.Kind() == MapKind || e.Kind() == ListKind { + for _, child := range e.Children() { + if !matchIsConstantValue(child) { + return false + } + } + return true + } + return false +} + +func newNavigableExpr(ast *AST, parent NavigableExpr, expr Expr, depth int) NavigableExpr { + // Reduce navigable expression nesting by unwrapping the embedded Expr value. + if nav, ok := expr.(*navigableExprImpl); ok { + expr = nav.Expr + } + nav := &navigableExprImpl{ + Expr: expr, + depth: depth, + ast: ast, + parent: parent, + createChildren: getChildFactory(expr), + } + return nav +} + +type navigableExprImpl struct { + Expr + depth int + ast *AST + parent NavigableExpr + createChildren childFactory +} + +func (nav *navigableExprImpl) Parent() (NavigableExpr, bool) { + if nav.parent != nil { + return nav.parent, true + } + return nil, false +} + +func (nav *navigableExprImpl) ID() int64 { + return nav.Expr.ID() +} + +func (nav *navigableExprImpl) Kind() ExprKind { + return nav.Expr.Kind() +} + +func (nav *navigableExprImpl) Type() *types.Type { + return nav.ast.GetType(nav.ID()) +} + +func (nav *navigableExprImpl) Children() []NavigableExpr { + return nav.createChildren(nav) +} + +func (nav *navigableExprImpl) Depth() int { + return nav.depth +} + +func (nav *navigableExprImpl) AsCall() CallExpr { + return navigableCallImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsComprehension() ComprehensionExpr { + return navigableComprehensionImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsIdent() string { + return nav.Expr.AsIdent() +} + +func (nav *navigableExprImpl) AsList() ListExpr { + return navigableListImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsLiteral() ref.Val { + return nav.Expr.AsLiteral() +} + +func (nav *navigableExprImpl) AsMap() MapExpr { + return navigableMapImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsSelect() SelectExpr { + return navigableSelectImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsStruct() StructExpr { + return navigableStructImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) createChild(e Expr) NavigableExpr { + return newNavigableExpr(nav.ast, nav, e, nav.depth+1) +} + +func (nav *navigableExprImpl) isExpr() {} + +type navigableCallImpl struct { + *navigableExprImpl +} + +func (call navigableCallImpl) FunctionName() string { + return call.Expr.AsCall().FunctionName() +} + +func (call navigableCallImpl) IsMemberFunction() bool { + return call.Expr.AsCall().IsMemberFunction() +} + +func (call navigableCallImpl) Target() Expr { + t := call.Expr.AsCall().Target() + if t != nil { + return call.createChild(t) + } + return nil +} + +func (call navigableCallImpl) Args() []Expr { + args := call.Expr.AsCall().Args() + navArgs := make([]Expr, len(args)) + for i, a := range args { + navArgs[i] = call.createChild(a) + } + return navArgs +} + +type navigableComprehensionImpl struct { + *navigableExprImpl +} + +func (comp navigableComprehensionImpl) IterRange() Expr { + return comp.createChild(comp.Expr.AsComprehension().IterRange()) +} + +func (comp navigableComprehensionImpl) IterVar() string { + return comp.Expr.AsComprehension().IterVar() +} + +func (comp navigableComprehensionImpl) IterVar2() string { + return comp.Expr.AsComprehension().IterVar2() +} + +func (comp navigableComprehensionImpl) HasIterVar2() bool { + return comp.Expr.AsComprehension().HasIterVar2() +} + +func (comp navigableComprehensionImpl) AccuVar() string { + return comp.Expr.AsComprehension().AccuVar() +} + +func (comp navigableComprehensionImpl) AccuInit() Expr { + return comp.createChild(comp.Expr.AsComprehension().AccuInit()) +} + +func (comp navigableComprehensionImpl) LoopCondition() Expr { + return comp.createChild(comp.Expr.AsComprehension().LoopCondition()) +} + +func (comp navigableComprehensionImpl) LoopStep() Expr { + return comp.createChild(comp.Expr.AsComprehension().LoopStep()) +} + +func (comp navigableComprehensionImpl) Result() Expr { + return comp.createChild(comp.Expr.AsComprehension().Result()) +} + +type navigableListImpl struct { + *navigableExprImpl +} + +func (l navigableListImpl) Elements() []Expr { + pbElems := l.Expr.AsList().Elements() + elems := make([]Expr, len(pbElems)) + for i := 0; i < len(pbElems); i++ { + elems[i] = l.createChild(pbElems[i]) + } + return elems +} + +func (l navigableListImpl) IsOptional(index int32) bool { + return l.Expr.AsList().IsOptional(index) +} + +func (l navigableListImpl) OptionalIndices() []int32 { + return l.Expr.AsList().OptionalIndices() +} + +func (l navigableListImpl) Size() int { + return l.Expr.AsList().Size() +} + +type navigableMapImpl struct { + *navigableExprImpl +} + +func (m navigableMapImpl) Entries() []EntryExpr { + mapExpr := m.Expr.AsMap() + entries := make([]EntryExpr, len(mapExpr.Entries())) + for i, e := range mapExpr.Entries() { + entry := e.AsMapEntry() + entries[i] = &entryExpr{ + id: e.ID(), + entryExprKindCase: navigableEntryImpl{ + key: m.createChild(entry.Key()), + val: m.createChild(entry.Value()), + isOpt: entry.IsOptional(), + }, + } + } + return entries +} + +func (m navigableMapImpl) Size() int { + return m.Expr.AsMap().Size() +} + +type navigableEntryImpl struct { + key NavigableExpr + val NavigableExpr + isOpt bool +} + +func (e navigableEntryImpl) Kind() EntryExprKind { + return MapEntryKind +} + +func (e navigableEntryImpl) Key() Expr { + return e.key +} + +func (e navigableEntryImpl) Value() Expr { + return e.val +} + +func (e navigableEntryImpl) IsOptional() bool { + return e.isOpt +} + +func (e navigableEntryImpl) renumberIDs(IDGenerator) {} + +func (e navigableEntryImpl) isEntryExpr() {} + +type navigableSelectImpl struct { + *navigableExprImpl +} + +func (sel navigableSelectImpl) FieldName() string { + return sel.Expr.AsSelect().FieldName() +} + +func (sel navigableSelectImpl) IsTestOnly() bool { + return sel.Expr.AsSelect().IsTestOnly() +} + +func (sel navigableSelectImpl) Operand() Expr { + return sel.createChild(sel.Expr.AsSelect().Operand()) +} + +type navigableStructImpl struct { + *navigableExprImpl +} + +func (s navigableStructImpl) TypeName() string { + return s.Expr.AsStruct().TypeName() +} + +func (s navigableStructImpl) Fields() []EntryExpr { + fieldInits := s.Expr.AsStruct().Fields() + fields := make([]EntryExpr, len(fieldInits)) + for i, f := range fieldInits { + field := f.AsStructField() + fields[i] = &entryExpr{ + id: f.ID(), + entryExprKindCase: navigableFieldImpl{ + name: field.Name(), + val: s.createChild(field.Value()), + isOpt: field.IsOptional(), + }, + } + } + return fields +} + +type navigableFieldImpl struct { + name string + val NavigableExpr + isOpt bool +} + +func (f navigableFieldImpl) Kind() EntryExprKind { + return StructFieldKind +} + +func (f navigableFieldImpl) Name() string { + return f.name +} + +func (f navigableFieldImpl) Value() Expr { + return f.val +} + +func (f navigableFieldImpl) IsOptional() bool { + return f.isOpt +} + +func (f navigableFieldImpl) renumberIDs(IDGenerator) {} + +func (f navigableFieldImpl) isEntryExpr() {} + +func getChildFactory(expr Expr) childFactory { + if expr == nil { + return noopFactory + } + switch expr.Kind() { + case LiteralKind: + return noopFactory + case IdentKind: + return noopFactory + case SelectKind: + return selectFactory + case CallKind: + return callArgFactory + case ListKind: + return listElemFactory + case MapKind: + return mapEntryFactory + case StructKind: + return structEntryFactory + case ComprehensionKind: + return comprehensionFactory + default: + return noopFactory + } +} + +type childFactory func(*navigableExprImpl) []NavigableExpr + +func noopFactory(*navigableExprImpl) []NavigableExpr { + return nil +} + +func selectFactory(nav *navigableExprImpl) []NavigableExpr { + return []NavigableExpr{nav.createChild(nav.AsSelect().Operand())} +} + +func callArgFactory(nav *navigableExprImpl) []NavigableExpr { + call := nav.Expr.AsCall() + argCount := len(call.Args()) + if call.IsMemberFunction() { + argCount++ + } + navExprs := make([]NavigableExpr, argCount) + i := 0 + if call.IsMemberFunction() { + navExprs[i] = nav.createChild(call.Target()) + i++ + } + for _, arg := range call.Args() { + navExprs[i] = nav.createChild(arg) + i++ + } + return navExprs +} + +func listElemFactory(nav *navigableExprImpl) []NavigableExpr { + l := nav.Expr.AsList() + navExprs := make([]NavigableExpr, len(l.Elements())) + for i, e := range l.Elements() { + navExprs[i] = nav.createChild(e) + } + return navExprs +} + +func structEntryFactory(nav *navigableExprImpl) []NavigableExpr { + s := nav.Expr.AsStruct() + entries := make([]NavigableExpr, len(s.Fields())) + for i, e := range s.Fields() { + f := e.AsStructField() + entries[i] = nav.createChild(f.Value()) + } + return entries +} + +func mapEntryFactory(nav *navigableExprImpl) []NavigableExpr { + m := nav.Expr.AsMap() + entries := make([]NavigableExpr, len(m.Entries())*2) + j := 0 + for _, e := range m.Entries() { + mapEntry := e.AsMapEntry() + entries[j] = nav.createChild(mapEntry.Key()) + entries[j+1] = nav.createChild(mapEntry.Value()) + j += 2 + } + return entries +} + +func comprehensionFactory(nav *navigableExprImpl) []NavigableExpr { + compre := nav.Expr.AsComprehension() + return []NavigableExpr{ + nav.createChild(compre.IterRange()), + nav.createChild(compre.AccuInit()), + nav.createChild(compre.LoopCondition()), + nav.createChild(compre.LoopStep()), + nav.createChild(compre.Result()), + } +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/containers/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/containers/BUILD.bazel new file mode 100644 index 0000000000..81197f0641 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/containers/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "container.go", + ], + importpath = "github.com/google/cel-go/common/containers", + deps = [ + "//common/ast:go_default_library", + ], +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "container_test.go", + ], + embed = [ + ":go_default_library", + ], + deps = [ + "//common/ast:go_default_library", + ], +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/containers/container.go b/hack/tools/vendor/github.com/google/cel-go/common/containers/container.go new file mode 100644 index 0000000000..3097a3f785 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/containers/container.go @@ -0,0 +1,328 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package containers defines types and functions for resolving qualified names within a namespace +// or type provided to CEL. +package containers + +import ( + "fmt" + "strings" + "unicode" + + "github.com/google/cel-go/common/ast" +) + +var ( + // DefaultContainer has an empty container name. + DefaultContainer *Container = nil + + // Empty map to search for aliases when needed. + noAliases = make(map[string]string) +) + +// NewContainer creates a new Container with the fully-qualified name. +func NewContainer(opts ...ContainerOption) (*Container, error) { + var c *Container + var err error + for _, opt := range opts { + c, err = opt(c) + if err != nil { + return nil, err + } + } + return c, nil +} + +// Container holds a reference to an optional qualified container name and set of aliases. +// +// The program container can be used to simplify variable, function, and type specification within +// CEL programs and behaves more or less like a C++ namespace. See ResolveCandidateNames for more +// details. +type Container struct { + name string + aliases map[string]string +} + +// Extend creates a new Container with the existing settings and applies a series of +// ContainerOptions to further configure the new container. +func (c *Container) Extend(opts ...ContainerOption) (*Container, error) { + if c == nil { + return NewContainer(opts...) + } + // Copy the name and aliases of the existing container. + ext := &Container{name: c.Name()} + if len(c.aliasSet()) > 0 { + aliasSet := make(map[string]string, len(c.aliasSet())) + for k, v := range c.aliasSet() { + aliasSet[k] = v + } + ext.aliases = aliasSet + } + // Apply the new options to the container. + var err error + for _, opt := range opts { + ext, err = opt(ext) + if err != nil { + return nil, err + } + } + return ext, nil +} + +// Name returns the fully-qualified name of the container. +// +// The name may conceptually be a namespace, package, or type. +func (c *Container) Name() string { + if c == nil { + return "" + } + return c.name +} + +// ResolveCandidateNames returns the candidates name of namespaced identifiers in C++ resolution +// order. +// +// Names which shadow other names are returned first. If a name includes a leading dot ('.'), +// the name is treated as an absolute identifier which cannot be shadowed. +// +// Given a container name a.b.c.M.N and a type name R.s, this will deliver in order: +// +// a.b.c.M.N.R.s +// a.b.c.M.R.s +// a.b.c.R.s +// a.b.R.s +// a.R.s +// R.s +// +// If aliases or abbreviations are configured for the container, then alias names will take +// precedence over containerized names. +func (c *Container) ResolveCandidateNames(name string) []string { + if strings.HasPrefix(name, ".") { + qn := name[1:] + alias, isAlias := c.findAlias(qn) + if isAlias { + return []string{alias} + } + return []string{qn} + } + alias, isAlias := c.findAlias(name) + if isAlias { + return []string{alias} + } + if c.Name() == "" { + return []string{name} + } + nextCont := c.Name() + candidates := []string{nextCont + "." + name} + for i := strings.LastIndex(nextCont, "."); i >= 0; i = strings.LastIndex(nextCont, ".") { + nextCont = nextCont[:i] + candidates = append(candidates, nextCont+"."+name) + } + return append(candidates, name) +} + +// aliasSet returns the alias to fully-qualified name mapping stored in the container. +func (c *Container) aliasSet() map[string]string { + if c == nil || c.aliases == nil { + return noAliases + } + return c.aliases +} + +// findAlias takes a name as input and returns an alias expansion if one exists. +// +// If the name is qualified, the first component of the qualified name is checked against known +// aliases. Any alias that is found in a qualified name is expanded in the result: +// +// alias: R -> my.alias.R +// name: R.S.T +// output: my.alias.R.S.T +// +// Note, the name must not have a leading dot. +func (c *Container) findAlias(name string) (string, bool) { + // If an alias exists for the name, ensure it is searched last. + simple := name + qualifier := "" + dot := strings.Index(name, ".") + if dot >= 0 { + simple = name[0:dot] + qualifier = name[dot:] + } + alias, found := c.aliasSet()[simple] + if !found { + return "", false + } + return alias + qualifier, true +} + +// ContainerOption specifies a functional configuration option for a Container. +// +// Note, ContainerOption implementations must be able to handle nil container inputs. +type ContainerOption func(*Container) (*Container, error) + +// Abbrevs configures a set of simple names as abbreviations for fully-qualified names. +// +// An abbreviation (abbrev for short) is a simple name that expands to a fully-qualified name. +// Abbreviations can be useful when working with variables, functions, and especially types from +// multiple namespaces: +// +// // CEL object construction +// qual.pkg.version.ObjTypeName{ +// field: alt.container.ver.FieldTypeName{value: ...} +// } +// +// Only one the qualified names above may be used as the CEL container, so at least one of these +// references must be a long qualified name within an otherwise short CEL program. Using the +// following abbreviations, the program becomes much simpler: +// +// // CEL Go option +// Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName") +// // Simplified Object construction +// ObjTypeName{field: FieldTypeName{value: ...}} +// +// There are a few rules for the qualified names and the simple abbreviations generated from them: +// - Qualified names must be dot-delimited, e.g. `package.subpkg.name`. +// - The last element in the qualified name is the abbreviation. +// - Abbreviations must not collide with each other. +// - The abbreviation must not collide with unqualified names in use. +// +// Abbreviations are distinct from container-based references in the following important ways: +// - Abbreviations must expand to a fully-qualified name. +// - Expanded abbreviations do not participate in namespace resolution. +// - Abbreviation expansion is done instead of the container search for a matching identifier. +// - Containers follow C++ namespace resolution rules with searches from the most qualified name +// to the least qualified name. +// - Container references within the CEL program may be relative, and are resolved to fully +// qualified names at either type-check time or program plan time, whichever comes first. +// +// If there is ever a case where an identifier could be in both the container and as an +// abbreviation, the abbreviation wins as this will ensure that the meaning of a program is +// preserved between compilations even as the container evolves. +func Abbrevs(qualifiedNames ...string) ContainerOption { + return func(c *Container) (*Container, error) { + for _, qn := range qualifiedNames { + qn = strings.TrimSpace(qn) + for _, r := range qn { + if !isIdentifierChar(r) { + return nil, fmt.Errorf( + "invalid qualified name: %s, wanted name of the form 'qualified.name'", qn) + } + } + ind := strings.LastIndex(qn, ".") + if ind <= 0 || ind >= len(qn)-1 { + return nil, fmt.Errorf( + "invalid qualified name: %s, wanted name of the form 'qualified.name'", qn) + } + alias := qn[ind+1:] + var err error + c, err = aliasAs("abbreviation", qn, alias)(c) + if err != nil { + return nil, err + } + } + return c, nil + } +} + +// Alias associates a fully-qualified name with a user-defined alias. +// +// In general, Abbrevs is preferred to Alias since the names generated from the Abbrevs option +// are more easily traced back to source code. The Alias option is useful for propagating alias +// configuration from one Container instance to another, and may also be useful for remapping +// poorly chosen protobuf message / package names. +// +// Note: all of the rules that apply to Abbrevs also apply to Alias. +func Alias(qualifiedName, alias string) ContainerOption { + return aliasAs("alias", qualifiedName, alias) +} + +func aliasAs(kind, qualifiedName, alias string) ContainerOption { + return func(c *Container) (*Container, error) { + if len(alias) == 0 || strings.Contains(alias, ".") { + return nil, fmt.Errorf( + "%s must be non-empty and simple (not qualified): %s=%s", kind, kind, alias) + } + + if qualifiedName[0:1] == "." { + return nil, fmt.Errorf("qualified name must not begin with a leading '.': %s", + qualifiedName) + } + ind := strings.LastIndex(qualifiedName, ".") + if ind <= 0 || ind == len(qualifiedName)-1 { + return nil, fmt.Errorf("%s must refer to a valid qualified name: %s", + kind, qualifiedName) + } + aliasRef, found := c.aliasSet()[alias] + if found { + return nil, fmt.Errorf( + "%s collides with existing reference: name=%s, %s=%s, existing=%s", + kind, qualifiedName, kind, alias, aliasRef) + } + if strings.HasPrefix(c.Name(), alias+".") || c.Name() == alias { + return nil, fmt.Errorf( + "%s collides with container name: name=%s, %s=%s, container=%s", + kind, qualifiedName, kind, alias, c.Name()) + } + if c == nil { + c = &Container{} + } + if c.aliases == nil { + c.aliases = make(map[string]string) + } + c.aliases[alias] = qualifiedName + return c, nil + } +} + +func isIdentifierChar(r rune) bool { + return r <= unicode.MaxASCII && (r == '.' || r == '_' || unicode.IsLetter(r) || unicode.IsNumber(r)) +} + +// Name sets the fully-qualified name of the Container. +func Name(name string) ContainerOption { + return func(c *Container) (*Container, error) { + if len(name) > 0 && name[0:1] == "." { + return nil, fmt.Errorf("container name must not contain a leading '.': %s", name) + } + if c.Name() == name { + return c, nil + } + if c == nil { + return &Container{name: name}, nil + } + c.name = name + return c, nil + } +} + +// ToQualifiedName converts an expression AST into a qualified name if possible, with a boolean +// 'found' value that indicates if the conversion is successful. +func ToQualifiedName(e ast.Expr) (string, bool) { + switch e.Kind() { + case ast.IdentKind: + id := e.AsIdent() + return id, true + case ast.SelectKind: + sel := e.AsSelect() + // Test only expressions are not valid as qualified names. + if sel.IsTestOnly() { + return "", false + } + if qual, found := ToQualifiedName(sel.Operand()); found { + return qual + "." + sel.FieldName(), true + } + } + return "", false +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/cost.go b/hack/tools/vendor/github.com/google/cel-go/common/cost.go new file mode 100644 index 0000000000..5e24bd0f47 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/cost.go @@ -0,0 +1,40 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +const ( + // SelectAndIdentCost is the cost of an operation that accesses an identifier or performs a select. + SelectAndIdentCost = 1 + + // ConstCost is the cost of an operation that accesses a constant. + ConstCost = 0 + + // ListCreateBaseCost is the base cost of any operation that creates a new list. + ListCreateBaseCost = 10 + + // MapCreateBaseCost is the base cost of any operation that creates a new map. + MapCreateBaseCost = 30 + + // StructCreateBaseCost is the base cost of any operation that creates a new struct. + StructCreateBaseCost = 40 + + // StringTraversalCostFactor is multiplied to a length of a string when computing the cost of traversing the entire + // string once. + StringTraversalCostFactor = 0.1 + + // RegexStringLengthCostFactor is multiplied ot the length of a regex string pattern when computing the cost of + // applying the regex to a string of unit cost. + RegexStringLengthCostFactor = 0.25 +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/debug/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/debug/BUILD.bazel new file mode 100644 index 0000000000..724ed34045 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/debug/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "debug.go", + ], + importpath = "github.com/google/cel-go/common/debug", + deps = [ + "//common:go_default_library", + "//common/ast:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + ], +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/debug/debug.go b/hack/tools/vendor/github.com/google/cel-go/common/debug/debug.go new file mode 100644 index 0000000000..25d2e3d71c --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/debug/debug.go @@ -0,0 +1,314 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package debug provides tools to print a parsed expression graph and +// adorn each expression element with additional metadata. +package debug + +import ( + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// Adorner returns debug metadata that will be tacked on to the string +// representation of an expression. +type Adorner interface { + // GetMetadata for the input context. + GetMetadata(ctx any) string +} + +// Writer manages writing expressions to an internal string. +type Writer interface { + fmt.Stringer + + // Buffer pushes an expression into an internal queue of expressions to + // write to a string. + Buffer(e ast.Expr) +} + +type emptyDebugAdorner struct { +} + +var emptyAdorner Adorner = &emptyDebugAdorner{} + +func (a *emptyDebugAdorner) GetMetadata(e any) string { + return "" +} + +// ToDebugString gives the unadorned string representation of the Expr. +func ToDebugString(e ast.Expr) string { + return ToAdornedDebugString(e, emptyAdorner) +} + +// ToAdornedDebugString gives the adorned string representation of the Expr. +func ToAdornedDebugString(e ast.Expr, adorner Adorner) string { + w := newDebugWriter(adorner) + w.Buffer(e) + return w.String() +} + +// debugWriter is used to print out pretty-printed debug strings. +type debugWriter struct { + adorner Adorner + buffer bytes.Buffer + indent int + lineStart bool +} + +func newDebugWriter(a Adorner) *debugWriter { + return &debugWriter{ + adorner: a, + indent: 0, + lineStart: true, + } +} + +func (w *debugWriter) Buffer(e ast.Expr) { + if e == nil { + return + } + switch e.Kind() { + case ast.LiteralKind: + w.append(formatLiteral(e.AsLiteral())) + case ast.IdentKind: + w.append(e.AsIdent()) + case ast.SelectKind: + w.appendSelect(e.AsSelect()) + case ast.CallKind: + w.appendCall(e.AsCall()) + case ast.ListKind: + w.appendList(e.AsList()) + case ast.MapKind: + w.appendMap(e.AsMap()) + case ast.StructKind: + w.appendStruct(e.AsStruct()) + case ast.ComprehensionKind: + w.appendComprehension(e.AsComprehension()) + } + w.adorn(e) +} + +func (w *debugWriter) appendSelect(sel ast.SelectExpr) { + w.Buffer(sel.Operand()) + w.append(".") + w.append(sel.FieldName()) + if sel.IsTestOnly() { + w.append("~test-only~") + } +} + +func (w *debugWriter) appendCall(call ast.CallExpr) { + if call.IsMemberFunction() { + w.Buffer(call.Target()) + w.append(".") + } + w.append(call.FunctionName()) + w.append("(") + if len(call.Args()) > 0 { + w.addIndent() + w.appendLine() + for i, arg := range call.Args() { + if i > 0 { + w.append(",") + w.appendLine() + } + w.Buffer(arg) + } + w.removeIndent() + w.appendLine() + } + w.append(")") +} + +func (w *debugWriter) appendList(list ast.ListExpr) { + w.append("[") + if len(list.Elements()) > 0 { + w.appendLine() + w.addIndent() + for i, elem := range list.Elements() { + if i > 0 { + w.append(",") + w.appendLine() + } + w.Buffer(elem) + } + w.removeIndent() + w.appendLine() + } + w.append("]") +} + +func (w *debugWriter) appendStruct(obj ast.StructExpr) { + w.append(obj.TypeName()) + w.append("{") + if len(obj.Fields()) > 0 { + w.appendLine() + w.addIndent() + for i, f := range obj.Fields() { + field := f.AsStructField() + if i > 0 { + w.append(",") + w.appendLine() + } + if field.IsOptional() { + w.append("?") + } + w.append(field.Name()) + w.append(":") + w.Buffer(field.Value()) + w.adorn(f) + } + w.removeIndent() + w.appendLine() + } + w.append("}") +} + +func (w *debugWriter) appendMap(m ast.MapExpr) { + w.append("{") + if m.Size() > 0 { + w.appendLine() + w.addIndent() + for i, e := range m.Entries() { + entry := e.AsMapEntry() + if i > 0 { + w.append(",") + w.appendLine() + } + if entry.IsOptional() { + w.append("?") + } + w.Buffer(entry.Key()) + w.append(":") + w.Buffer(entry.Value()) + w.adorn(e) + } + w.removeIndent() + w.appendLine() + } + w.append("}") +} + +func (w *debugWriter) appendComprehension(comprehension ast.ComprehensionExpr) { + w.append("__comprehension__(") + w.addIndent() + w.appendLine() + w.append("// Variable") + w.appendLine() + w.append(comprehension.IterVar()) + w.append(",") + w.appendLine() + if comprehension.HasIterVar2() { + w.append(comprehension.IterVar2()) + w.append(",") + w.appendLine() + } + w.append("// Target") + w.appendLine() + w.Buffer(comprehension.IterRange()) + w.append(",") + w.appendLine() + w.append("// Accumulator") + w.appendLine() + w.append(comprehension.AccuVar()) + w.append(",") + w.appendLine() + w.append("// Init") + w.appendLine() + w.Buffer(comprehension.AccuInit()) + w.append(",") + w.appendLine() + w.append("// LoopCondition") + w.appendLine() + w.Buffer(comprehension.LoopCondition()) + w.append(",") + w.appendLine() + w.append("// LoopStep") + w.appendLine() + w.Buffer(comprehension.LoopStep()) + w.append(",") + w.appendLine() + w.append("// Result") + w.appendLine() + w.Buffer(comprehension.Result()) + w.append(")") + w.removeIndent() +} + +func formatLiteral(c ref.Val) string { + switch v := c.(type) { + case types.Bool: + return fmt.Sprintf("%t", v) + case types.Bytes: + return fmt.Sprintf("b\"%s\"", string(v)) + case types.Double: + return fmt.Sprintf("%v", float64(v)) + case types.Int: + return fmt.Sprintf("%d", int64(v)) + case types.String: + return strconv.Quote(string(v)) + case types.Uint: + return fmt.Sprintf("%du", uint64(v)) + case types.Null: + return "null" + default: + panic("Unknown constant type") + } +} + +func (w *debugWriter) append(s string) { + w.doIndent() + w.buffer.WriteString(s) +} + +func (w *debugWriter) appendFormat(f string, args ...any) { + w.append(fmt.Sprintf(f, args...)) +} + +func (w *debugWriter) doIndent() { + if w.lineStart { + w.lineStart = false + w.buffer.WriteString(strings.Repeat(" ", w.indent)) + } +} + +func (w *debugWriter) adorn(e any) { + w.append(w.adorner.GetMetadata(e)) +} + +func (w *debugWriter) appendLine() { + w.buffer.WriteString("\n") + w.lineStart = true +} + +func (w *debugWriter) addIndent() { + w.indent++ +} + +func (w *debugWriter) removeIndent() { + w.indent-- + if w.indent < 0 { + panic("negative indent") + } +} + +func (w *debugWriter) String() string { + return w.buffer.String() +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/decls/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/decls/BUILD.bazel new file mode 100644 index 0000000000..17791dce6a --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/decls/BUILD.bazel @@ -0,0 +1,39 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "decls.go", + ], + importpath = "github.com/google/cel-go/common/decls", + deps = [ + "//checker/decls:go_default_library", + "//common/functions:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "decls_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//checker/decls:go_default_library", + "//common/overloads:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + ], +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/decls/decls.go b/hack/tools/vendor/github.com/google/cel-go/common/decls/decls.go new file mode 100644 index 0000000000..f67808febe --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/decls/decls.go @@ -0,0 +1,846 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package decls contains function and variable declaration structs and helper methods. +package decls + +import ( + "fmt" + "strings" + + chkdecls "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/common/functions" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// NewFunction creates a new function declaration with a set of function options to configure overloads +// and function definitions (implementations). +// +// Functions are checked for name collisions and singleton redefinition. +func NewFunction(name string, opts ...FunctionOpt) (*FunctionDecl, error) { + fn := &FunctionDecl{ + name: name, + overloads: map[string]*OverloadDecl{}, + overloadOrdinals: []string{}, + } + var err error + for _, opt := range opts { + fn, err = opt(fn) + if err != nil { + return nil, err + } + } + if len(fn.overloads) == 0 { + return nil, fmt.Errorf("function %s must have at least one overload", name) + } + return fn, nil +} + +// FunctionDecl defines a function name, overload set, and optionally a singleton definition for all +// overload instances. +type FunctionDecl struct { + name string + + // overloads associated with the function name. + overloads map[string]*OverloadDecl + + // singleton implementation of the function for all overloads. + // + // If this option is set, an error will occur if any overloads specify a per-overload implementation + // or if another function with the same name attempts to redefine the singleton. + singleton *functions.Overload + + // disableTypeGuards is a performance optimization to disable detailed runtime type checks which could + // add overhead on common operations. Setting this option true leaves error checks and argument checks + // intact. + disableTypeGuards bool + + // state indicates that the binding should be provided as a declaration, as a runtime binding, or both. + state declarationState + + // overloadOrdinals indicates the order in which the overload was declared. + overloadOrdinals []string +} + +type declarationState int + +const ( + declarationStateUnset declarationState = iota + declarationDisabled + declarationEnabled +) + +// Name returns the function name in human-readable terms, e.g. 'contains' of 'math.least' +func (f *FunctionDecl) Name() string { + if f == nil { + return "" + } + return f.name +} + +// IsDeclarationDisabled indicates that the function implementation should be added to the dispatcher, but the +// declaration should not be exposed for use in expressions. +func (f *FunctionDecl) IsDeclarationDisabled() bool { + return f.state == declarationDisabled +} + +// Merge combines an existing function declaration with another. +// +// If a function is extended, by say adding new overloads to an existing function, then it is merged with the +// prior definition of the function at which point its overloads must not collide with pre-existing overloads +// and its bindings (singleton, or per-overload) must not conflict with previous definitions either. +func (f *FunctionDecl) Merge(other *FunctionDecl) (*FunctionDecl, error) { + if f == other { + return f, nil + } + if f.Name() != other.Name() { + return nil, fmt.Errorf("cannot merge unrelated functions. %s and %s", f.Name(), other.Name()) + } + merged := &FunctionDecl{ + name: f.Name(), + overloads: make(map[string]*OverloadDecl, len(f.overloads)), + singleton: f.singleton, + overloadOrdinals: make([]string, len(f.overloads)), + // if one function is expecting type-guards and the other is not, then they + // must not be disabled. + disableTypeGuards: f.disableTypeGuards && other.disableTypeGuards, + // default to the current functions declaration state. + state: f.state, + } + // If the other state indicates that the declaration should be explicitly enabled or + // disabled, then update the merged state with the most recent value. + if other.state != declarationStateUnset { + merged.state = other.state + } + // baseline copy of the overloads and their ordinals + copy(merged.overloadOrdinals, f.overloadOrdinals) + for oID, o := range f.overloads { + merged.overloads[oID] = o + } + // overloads and their ordinals are added from the left + for _, oID := range other.overloadOrdinals { + o := other.overloads[oID] + err := merged.AddOverload(o) + if err != nil { + return nil, fmt.Errorf("function declaration merge failed: %v", err) + } + } + if other.singleton != nil { + if merged.singleton != nil && merged.singleton != other.singleton { + return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name()) + } + merged.singleton = other.singleton + } + return merged, nil +} + +// AddOverload ensures that the new overload does not collide with an existing overload signature; +// however, if the function signatures are identical, the implementation may be rewritten as its +// difficult to compare functions by object identity. +func (f *FunctionDecl) AddOverload(overload *OverloadDecl) error { + if f == nil { + return fmt.Errorf("nil function cannot add overload: %s", overload.ID()) + } + for oID, o := range f.overloads { + if oID != overload.ID() && o.SignatureOverlaps(overload) { + return fmt.Errorf("overload signature collision in function %s: %s collides with %s", f.Name(), oID, overload.ID()) + } + if oID == overload.ID() { + if o.SignatureEquals(overload) && o.IsNonStrict() == overload.IsNonStrict() { + // Allow redefinition of an overload implementation so long as the signatures match. + if overload.hasBinding() { + f.overloads[oID] = overload + } + return nil + } + return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.Name(), oID) + } + } + f.overloadOrdinals = append(f.overloadOrdinals, overload.ID()) + f.overloads[overload.ID()] = overload + return nil +} + +// OverloadDecls returns the overload declarations in the order in which they were declared. +func (f *FunctionDecl) OverloadDecls() []*OverloadDecl { + if f == nil { + return []*OverloadDecl{} + } + overloads := make([]*OverloadDecl, 0, len(f.overloads)) + for _, oID := range f.overloadOrdinals { + overloads = append(overloads, f.overloads[oID]) + } + return overloads +} + +// Bindings produces a set of function bindings, if any are defined. +func (f *FunctionDecl) Bindings() ([]*functions.Overload, error) { + if f == nil { + return []*functions.Overload{}, nil + } + overloads := []*functions.Overload{} + nonStrict := false + for _, oID := range f.overloadOrdinals { + o := f.overloads[oID] + if o.hasBinding() { + overload := &functions.Overload{ + Operator: o.ID(), + Unary: o.guardedUnaryOp(f.Name(), f.disableTypeGuards), + Binary: o.guardedBinaryOp(f.Name(), f.disableTypeGuards), + Function: o.guardedFunctionOp(f.Name(), f.disableTypeGuards), + OperandTrait: o.OperandTrait(), + NonStrict: o.IsNonStrict(), + } + overloads = append(overloads, overload) + nonStrict = nonStrict || o.IsNonStrict() + } + } + if f.singleton != nil { + if len(overloads) != 0 { + return nil, fmt.Errorf("singleton function incompatible with specialized overloads: %s", f.Name()) + } + overloads = []*functions.Overload{ + { + Operator: f.Name(), + Unary: f.singleton.Unary, + Binary: f.singleton.Binary, + Function: f.singleton.Function, + OperandTrait: f.singleton.OperandTrait, + }, + } + // fall-through to return single overload case. + } + if len(overloads) == 0 { + return overloads, nil + } + // Single overload. Replicate an entry for it using the function name as well. + if len(overloads) == 1 { + if overloads[0].Operator == f.Name() { + return overloads, nil + } + return append(overloads, &functions.Overload{ + Operator: f.Name(), + Unary: overloads[0].Unary, + Binary: overloads[0].Binary, + Function: overloads[0].Function, + NonStrict: overloads[0].NonStrict, + OperandTrait: overloads[0].OperandTrait, + }), nil + } + // All of the defined overloads are wrapped into a top-level function which + // performs dynamic dispatch to the proper overload based on the argument types. + bindings := append([]*functions.Overload{}, overloads...) + funcDispatch := func(args ...ref.Val) ref.Val { + for _, oID := range f.overloadOrdinals { + o := f.overloads[oID] + // During dynamic dispatch over multiple functions, signature agreement checks + // are preserved in order to assist with the function resolution step. + switch len(args) { + case 1: + if o.unaryOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) { + return o.unaryOp(args[0]) + } + case 2: + if o.binaryOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) { + return o.binaryOp(args[0], args[1]) + } + } + if o.functionOp != nil && o.matchesRuntimeSignature(f.disableTypeGuards, args...) { + return o.functionOp(args...) + } + // eventually this will fall through to the noSuchOverload below. + } + return MaybeNoSuchOverload(f.Name(), args...) + } + function := &functions.Overload{ + Operator: f.Name(), + Function: funcDispatch, + NonStrict: nonStrict, + } + return append(bindings, function), nil +} + +// MaybeNoSuchOverload determines whether to propagate an error if one is provided as an argument, or +// to return an unknown set, or to produce a new error for a missing function signature. +func MaybeNoSuchOverload(funcName string, args ...ref.Val) ref.Val { + argTypes := make([]string, len(args)) + var unk *types.Unknown = nil + for i, arg := range args { + if types.IsError(arg) { + return arg + } + if types.IsUnknown(arg) { + unk = types.MergeUnknowns(arg.(*types.Unknown), unk) + } + argTypes[i] = arg.Type().TypeName() + } + if unk != nil { + return unk + } + signature := strings.Join(argTypes, ", ") + return types.NewErr("no such overload: %s(%s)", funcName, signature) +} + +// FunctionOpt defines a functional option for mutating a function declaration. +type FunctionOpt func(*FunctionDecl) (*FunctionDecl, error) + +// DisableTypeGuards disables automatically generated function invocation guards on direct overload calls. +// Type guards remain on during dynamic dispatch for parsed-only expressions. +func DisableTypeGuards(value bool) FunctionOpt { + return func(fn *FunctionDecl) (*FunctionDecl, error) { + fn.disableTypeGuards = value + return fn, nil + } +} + +// DisableDeclaration indicates that the function declaration should be disabled, but the runtime function +// binding should be provided. Marking a function as runtime-only is a safe way to manage deprecations +// of function declarations while still preserving the runtime behavior for previously compiled expressions. +func DisableDeclaration(value bool) FunctionOpt { + return func(fn *FunctionDecl) (*FunctionDecl, error) { + if value { + fn.state = declarationDisabled + } else { + fn.state = declarationEnabled + } + return fn, nil + } +} + +// SingletonUnaryBinding creates a singleton function definition to be used for all function overloads. +// +// Note, this approach works well if operand is expected to have a specific trait which it implements, +// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. +func SingletonUnaryBinding(fn functions.UnaryOp, traits ...int) FunctionOpt { + trait := 0 + for _, t := range traits { + trait = trait | t + } + return func(f *FunctionDecl) (*FunctionDecl, error) { + if f.singleton != nil { + return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name()) + } + f.singleton = &functions.Overload{ + Operator: f.Name(), + Unary: fn, + OperandTrait: trait, + } + return f, nil + } +} + +// SingletonBinaryBinding creates a singleton function definition to be used with all function overloads. +// +// Note, this approach works well if operand is expected to have a specific trait which it implements, +// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. +func SingletonBinaryBinding(fn functions.BinaryOp, traits ...int) FunctionOpt { + trait := 0 + for _, t := range traits { + trait = trait | t + } + return func(f *FunctionDecl) (*FunctionDecl, error) { + if f.singleton != nil { + return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name()) + } + f.singleton = &functions.Overload{ + Operator: f.Name(), + Binary: fn, + OperandTrait: trait, + } + return f, nil + } +} + +// SingletonFunctionBinding creates a singleton function definition to be used with all function overloads. +// +// Note, this approach works well if operand is expected to have a specific trait which it implements, +// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. +func SingletonFunctionBinding(fn functions.FunctionOp, traits ...int) FunctionOpt { + trait := 0 + for _, t := range traits { + trait = trait | t + } + return func(f *FunctionDecl) (*FunctionDecl, error) { + if f.singleton != nil { + return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name()) + } + f.singleton = &functions.Overload{ + Operator: f.Name(), + Function: fn, + OperandTrait: trait, + } + return f, nil + } +} + +// Overload defines a new global overload with an overload id, argument types, and result type. Through the +// use of OverloadOpt options, the overload may also be configured with a binding, an operand trait, and to +// be non-strict. +// +// Note: function bindings should be commonly configured with Overload instances whereas operand traits and +// strict-ness should be rare occurrences. +func Overload(overloadID string, + args []*types.Type, resultType *types.Type, + opts ...OverloadOpt) FunctionOpt { + return newOverload(overloadID, false, args, resultType, opts...) +} + +// MemberOverload defines a new receiver-style overload (or member function) with an overload id, argument types, +// and result type. Through the use of OverloadOpt options, the overload may also be configured with a binding, +// an operand trait, and to be non-strict. +// +// Note: function bindings should be commonly configured with Overload instances whereas operand traits and +// strict-ness should be rare occurrences. +func MemberOverload(overloadID string, + args []*types.Type, resultType *types.Type, + opts ...OverloadOpt) FunctionOpt { + return newOverload(overloadID, true, args, resultType, opts...) +} + +func newOverload(overloadID string, + memberFunction bool, args []*types.Type, resultType *types.Type, + opts ...OverloadOpt) FunctionOpt { + return func(f *FunctionDecl) (*FunctionDecl, error) { + overload, err := newOverloadInternal(overloadID, memberFunction, args, resultType, opts...) + if err != nil { + return nil, err + } + err = f.AddOverload(overload) + if err != nil { + return nil, err + } + return f, nil + } +} + +func newOverloadInternal(overloadID string, + memberFunction bool, args []*types.Type, resultType *types.Type, + opts ...OverloadOpt) (*OverloadDecl, error) { + overload := &OverloadDecl{ + id: overloadID, + argTypes: args, + resultType: resultType, + isMemberFunction: memberFunction, + } + var err error + for _, opt := range opts { + overload, err = opt(overload) + if err != nil { + return nil, err + } + } + return overload, nil +} + +// OverloadDecl contains the definition of a single overload id with a specific signature, and an optional +// implementation. +type OverloadDecl struct { + id string + argTypes []*types.Type + resultType *types.Type + isMemberFunction bool + // nonStrict indicates that the function will accept error and unknown arguments as inputs. + nonStrict bool + // operandTrait indicates whether the member argument should have a specific type-trait. + // + // This is useful for creating overloads which operate on a type-interface rather than a concrete type. + operandTrait int + + // Function implementation options. Optional, but encouraged. + // unaryOp is a function binding that takes a single argument. + unaryOp functions.UnaryOp + // binaryOp is a function binding that takes two arguments. + binaryOp functions.BinaryOp + // functionOp is a catch-all for zero-arity and three-plus arity functions. + functionOp functions.FunctionOp +} + +// ID mirrors the overload signature and provides a unique id which may be referenced within the type-checker +// and interpreter to optimize performance. +// +// The ID format is usually one of two styles: +// global: __ +// member: ___ +func (o *OverloadDecl) ID() string { + if o == nil { + return "" + } + return o.id +} + +// ArgTypes contains the set of argument types expected by the overload. +// +// For member functions ArgTypes[0] represents the member operand type. +func (o *OverloadDecl) ArgTypes() []*types.Type { + if o == nil { + return emptyArgs + } + return o.argTypes +} + +// IsMemberFunction indicates whether the overload is a member function +func (o *OverloadDecl) IsMemberFunction() bool { + if o == nil { + return false + } + return o.isMemberFunction +} + +// IsNonStrict returns whether the overload accepts errors and unknown values as arguments. +func (o *OverloadDecl) IsNonStrict() bool { + if o == nil { + return false + } + return o.nonStrict +} + +// OperandTrait returns the trait mask of the first operand to the overload call, e.g. +// `traits.Indexer` +func (o *OverloadDecl) OperandTrait() int { + if o == nil { + return 0 + } + return o.operandTrait +} + +// ResultType indicates the output type from calling the function. +func (o *OverloadDecl) ResultType() *types.Type { + if o == nil { + // *types.Type is nil-safe + return nil + } + return o.resultType +} + +// TypeParams returns the type parameter names associated with the overload. +func (o *OverloadDecl) TypeParams() []string { + typeParams := map[string]struct{}{} + collectParamNames(typeParams, o.ResultType()) + for _, arg := range o.ArgTypes() { + collectParamNames(typeParams, arg) + } + params := make([]string, 0, len(typeParams)) + for param := range typeParams { + params = append(params, param) + } + return params +} + +// SignatureEquals determines whether the incoming overload declaration signature is equal to the current signature. +// +// Result type, operand trait, and strict-ness are not considered as part of signature equality. +func (o *OverloadDecl) SignatureEquals(other *OverloadDecl) bool { + if o == other { + return true + } + if o.ID() != other.ID() || o.IsMemberFunction() != other.IsMemberFunction() || len(o.ArgTypes()) != len(other.ArgTypes()) { + return false + } + for i, at := range o.ArgTypes() { + oat := other.ArgTypes()[i] + if !at.IsEquivalentType(oat) { + return false + } + } + return o.ResultType().IsEquivalentType(other.ResultType()) +} + +// SignatureOverlaps indicates whether two functions have non-equal, but overloapping function signatures. +// +// For example, list(dyn) collides with list(string) since the 'dyn' type can contain a 'string' type. +func (o *OverloadDecl) SignatureOverlaps(other *OverloadDecl) bool { + if o.IsMemberFunction() != other.IsMemberFunction() || len(o.ArgTypes()) != len(other.ArgTypes()) { + return false + } + argsOverlap := true + for i, argType := range o.ArgTypes() { + otherArgType := other.ArgTypes()[i] + argsOverlap = argsOverlap && + (argType.IsAssignableType(otherArgType) || + otherArgType.IsAssignableType(argType)) + } + return argsOverlap +} + +// hasBinding indicates whether the overload already has a definition. +func (o *OverloadDecl) hasBinding() bool { + return o != nil && (o.unaryOp != nil || o.binaryOp != nil || o.functionOp != nil) +} + +// guardedUnaryOp creates an invocation guard around the provided unary operator, if one is defined. +func (o *OverloadDecl) guardedUnaryOp(funcName string, disableTypeGuards bool) functions.UnaryOp { + if o.unaryOp == nil { + return nil + } + return func(arg ref.Val) ref.Val { + if !o.matchesRuntimeUnarySignature(disableTypeGuards, arg) { + return MaybeNoSuchOverload(funcName, arg) + } + return o.unaryOp(arg) + } +} + +// guardedBinaryOp creates an invocation guard around the provided binary operator, if one is defined. +func (o *OverloadDecl) guardedBinaryOp(funcName string, disableTypeGuards bool) functions.BinaryOp { + if o.binaryOp == nil { + return nil + } + return func(arg1, arg2 ref.Val) ref.Val { + if !o.matchesRuntimeBinarySignature(disableTypeGuards, arg1, arg2) { + return MaybeNoSuchOverload(funcName, arg1, arg2) + } + return o.binaryOp(arg1, arg2) + } +} + +// guardedFunctionOp creates an invocation guard around the provided variadic function binding, if one is provided. +func (o *OverloadDecl) guardedFunctionOp(funcName string, disableTypeGuards bool) functions.FunctionOp { + if o.functionOp == nil { + return nil + } + return func(args ...ref.Val) ref.Val { + if !o.matchesRuntimeSignature(disableTypeGuards, args...) { + return MaybeNoSuchOverload(funcName, args...) + } + return o.functionOp(args...) + } +} + +// matchesRuntimeUnarySignature indicates whether the argument type is runtime assiganble to the overload's expected argument. +func (o *OverloadDecl) matchesRuntimeUnarySignature(disableTypeGuards bool, arg ref.Val) bool { + return matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[0], arg) && + matchOperandTrait(o.OperandTrait(), arg) +} + +// matchesRuntimeBinarySignature indicates whether the argument types are runtime assiganble to the overload's expected arguments. +func (o *OverloadDecl) matchesRuntimeBinarySignature(disableTypeGuards bool, arg1, arg2 ref.Val) bool { + return matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[0], arg1) && + matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[1], arg2) && + matchOperandTrait(o.OperandTrait(), arg1) +} + +// matchesRuntimeSignature indicates whether the argument types are runtime assiganble to the overload's expected arguments. +func (o *OverloadDecl) matchesRuntimeSignature(disableTypeGuards bool, args ...ref.Val) bool { + if len(args) != len(o.ArgTypes()) { + return false + } + if len(args) == 0 { + return true + } + for i, arg := range args { + if !matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[i], arg) { + return false + } + } + return matchOperandTrait(o.OperandTrait(), args[0]) +} + +func matchRuntimeArgType(nonStrict, disableTypeGuards bool, argType *types.Type, arg ref.Val) bool { + if nonStrict && (disableTypeGuards || types.IsUnknownOrError(arg)) { + return true + } + if types.IsUnknownOrError(arg) { + return false + } + return disableTypeGuards || argType.IsAssignableRuntimeType(arg) +} + +func matchOperandTrait(trait int, arg ref.Val) bool { + return trait == 0 || arg.Type().HasTrait(trait) || types.IsUnknownOrError(arg) +} + +// OverloadOpt is a functional option for configuring a function overload. +type OverloadOpt func(*OverloadDecl) (*OverloadDecl, error) + +// UnaryBinding provides the implementation of a unary overload. The provided function is protected by a runtime +// type-guard which ensures runtime type agreement between the overload signature and runtime argument types. +func UnaryBinding(binding functions.UnaryOp) OverloadOpt { + return func(o *OverloadDecl) (*OverloadDecl, error) { + if o.hasBinding() { + return nil, fmt.Errorf("overload already has a binding: %s", o.ID()) + } + if len(o.ArgTypes()) != 1 { + return nil, fmt.Errorf("unary function bound to non-unary overload: %s", o.ID()) + } + o.unaryOp = binding + return o, nil + } +} + +// BinaryBinding provides the implementation of a binary overload. The provided function is protected by a runtime +// type-guard which ensures runtime type agreement between the overload signature and runtime argument types. +func BinaryBinding(binding functions.BinaryOp) OverloadOpt { + return func(o *OverloadDecl) (*OverloadDecl, error) { + if o.hasBinding() { + return nil, fmt.Errorf("overload already has a binding: %s", o.ID()) + } + if len(o.ArgTypes()) != 2 { + return nil, fmt.Errorf("binary function bound to non-binary overload: %s", o.ID()) + } + o.binaryOp = binding + return o, nil + } +} + +// FunctionBinding provides the implementation of a variadic overload. The provided function is protected by a runtime +// type-guard which ensures runtime type agreement between the overload signature and runtime argument types. +func FunctionBinding(binding functions.FunctionOp) OverloadOpt { + return func(o *OverloadDecl) (*OverloadDecl, error) { + if o.hasBinding() { + return nil, fmt.Errorf("overload already has a binding: %s", o.ID()) + } + o.functionOp = binding + return o, nil + } +} + +// OverloadIsNonStrict enables the function to be called with error and unknown argument values. +// +// Note: do not use this option unless absoluately necessary as it should be an uncommon feature. +func OverloadIsNonStrict() OverloadOpt { + return func(o *OverloadDecl) (*OverloadDecl, error) { + o.nonStrict = true + return o, nil + } +} + +// OverloadOperandTrait configures a set of traits which the first argument to the overload must implement in order to be +// successfully invoked. +func OverloadOperandTrait(trait int) OverloadOpt { + return func(o *OverloadDecl) (*OverloadDecl, error) { + o.operandTrait = trait + return o, nil + } +} + +// NewConstant creates a new constant declaration. +func NewConstant(name string, t *types.Type, v ref.Val) *VariableDecl { + return &VariableDecl{name: name, varType: t, value: v} +} + +// NewVariable creates a new variable declaration. +func NewVariable(name string, t *types.Type) *VariableDecl { + return &VariableDecl{name: name, varType: t} +} + +// VariableDecl defines a variable declaration which may optionally have a constant value. +type VariableDecl struct { + name string + varType *types.Type + value ref.Val +} + +// Name returns the fully-qualified variable name +func (v *VariableDecl) Name() string { + if v == nil { + return "" + } + return v.name +} + +// Type returns the types.Type value associated with the variable. +func (v *VariableDecl) Type() *types.Type { + if v == nil { + // types.Type is nil-safe + return nil + } + return v.varType +} + +// Value returns the constant value associated with the declaration. +func (v *VariableDecl) Value() ref.Val { + if v == nil { + return nil + } + return v.value +} + +// DeclarationIsEquivalent returns true if one variable declaration has the same name and same type as the input. +func (v *VariableDecl) DeclarationIsEquivalent(other *VariableDecl) bool { + if v == other { + return true + } + return v.Name() == other.Name() && v.Type().IsEquivalentType(other.Type()) +} + +// TypeVariable creates a new type identifier for use within a types.Provider +func TypeVariable(t *types.Type) *VariableDecl { + return NewVariable(t.TypeName(), types.NewTypeTypeWithParam(t)) +} + +// variableDeclToExprDecl converts a go-native variable declaration into a protobuf-type variable declaration. +func variableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) { + varType, err := types.TypeToExprType(v.Type()) + if err != nil { + return nil, err + } + return chkdecls.NewVar(v.Name(), varType), nil +} + +// functionDeclToExprDecl converts a go-native function declaration into a protobuf-typed function declaration. +func functionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) { + overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(f.overloads)) + for i, oID := range f.overloadOrdinals { + o := f.overloads[oID] + paramNames := map[string]struct{}{} + argTypes := make([]*exprpb.Type, len(o.ArgTypes())) + for j, a := range o.ArgTypes() { + collectParamNames(paramNames, a) + at, err := types.TypeToExprType(a) + if err != nil { + return nil, err + } + argTypes[j] = at + } + collectParamNames(paramNames, o.ResultType()) + resultType, err := types.TypeToExprType(o.ResultType()) + if err != nil { + return nil, err + } + if len(paramNames) == 0 { + if o.IsMemberFunction() { + overloads[i] = chkdecls.NewInstanceOverload(oID, argTypes, resultType) + } else { + overloads[i] = chkdecls.NewOverload(oID, argTypes, resultType) + } + } else { + params := []string{} + for pn := range paramNames { + params = append(params, pn) + } + if o.IsMemberFunction() { + overloads[i] = chkdecls.NewParameterizedInstanceOverload(oID, argTypes, resultType, params) + } else { + overloads[i] = chkdecls.NewParameterizedOverload(oID, argTypes, resultType, params) + } + } + } + return chkdecls.NewFunction(f.Name(), overloads...), nil +} + +func collectParamNames(paramNames map[string]struct{}, arg *types.Type) { + if arg.Kind() == types.TypeParamKind { + paramNames[arg.TypeName()] = struct{}{} + } + for _, param := range arg.Parameters() { + collectParamNames(paramNames, param) + } +} + +var ( + emptyArgs = []*types.Type{} +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/doc.go b/hack/tools/vendor/github.com/google/cel-go/common/doc.go new file mode 100644 index 0000000000..5362fdfe4b --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package common defines types and utilities common to expression parsing, +// checking, and interpretation +package common diff --git a/hack/tools/vendor/github.com/google/cel-go/common/error.go b/hack/tools/vendor/github.com/google/cel-go/common/error.go new file mode 100644 index 0000000000..0cf21345e6 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/error.go @@ -0,0 +1,74 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +// NewError creates an error associated with an expression id with the given message at the given location. +func NewError(id int64, message string, location Location) *Error { + return &Error{Message: message, Location: location, ExprID: id} +} + +// Error type which references an expression id, a location within source, and a message. +type Error struct { + Location Location + Message string + ExprID int64 +} + +const ( + dot = "." + ind = "^" + wideDot = "\uff0e" + wideInd = "\uff3e" + + // maxSnippetLength is the largest number of characters which can be rendered in an error message snippet. + maxSnippetLength = 16384 +) + +// ToDisplayString decorates the error message with the source location. +func (e *Error) ToDisplayString(source Source) string { + var result = fmt.Sprintf("ERROR: %s:%d:%d: %s", + source.Description(), + e.Location.Line(), + e.Location.Column()+1, // add one to the 0-based column for display + e.Message) + if snippet, found := source.Snippet(e.Location.Line()); found && len(snippet) <= maxSnippetLength { + snippet := strings.Replace(snippet, "\t", " ", -1) + srcLine := "\n | " + snippet + var bytes = []byte(snippet) + var indLine = "\n | " + for i := 0; i < e.Location.Column() && len(bytes) > 0; i++ { + _, sz := utf8.DecodeRune(bytes) + bytes = bytes[sz:] + if sz > 1 { + indLine += wideDot + } else { + indLine += dot + } + } + if _, sz := utf8.DecodeRune(bytes); sz > 1 { + indLine += wideInd + } else { + indLine += ind + } + result += srcLine + indLine + } + return result +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/errors.go b/hack/tools/vendor/github.com/google/cel-go/common/errors.go new file mode 100644 index 0000000000..25adc73d8e --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/errors.go @@ -0,0 +1,103 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +import ( + "fmt" + "sort" + "strings" +) + +// Errors type which contains a list of errors observed during parsing. +type Errors struct { + errors []*Error + source Source + numErrors int + maxErrorsToReport int +} + +// NewErrors creates a new instance of the Errors type. +func NewErrors(source Source) *Errors { + return &Errors{ + errors: []*Error{}, + source: source, + maxErrorsToReport: 100, + } +} + +// ReportError records an error at a source location. +func (e *Errors) ReportError(l Location, format string, args ...any) { + e.ReportErrorAtID(0, l, format, args...) +} + +// ReportErrorAtID records an error at a source location and expression id. +func (e *Errors) ReportErrorAtID(id int64, l Location, format string, args ...any) { + e.numErrors++ + if e.numErrors > e.maxErrorsToReport { + return + } + err := &Error{ + ExprID: id, + Location: l, + Message: fmt.Sprintf(format, args...), + } + e.errors = append(e.errors, err) +} + +// GetErrors returns the list of observed errors. +func (e *Errors) GetErrors() []*Error { + return e.errors[:] +} + +// Append creates a new Errors object with the current and input errors. +func (e *Errors) Append(errs []*Error) *Errors { + return &Errors{ + errors: append(e.errors[:], errs...), + source: e.source, + numErrors: e.numErrors + len(errs), + maxErrorsToReport: e.maxErrorsToReport, + } +} + +// ToDisplayString returns the error set to a newline delimited string. +func (e *Errors) ToDisplayString() string { + errorsInString := e.maxErrorsToReport + if e.numErrors > e.maxErrorsToReport { + // add one more error to indicate the number of errors truncated. + errorsInString++ + } else { + // otherwise the error set will just contain the number of errors. + errorsInString = e.numErrors + } + + result := make([]string, errorsInString) + sort.SliceStable(e.errors, func(i, j int) bool { + ei := e.errors[i].Location + ej := e.errors[j].Location + return ei.Line() < ej.Line() || + (ei.Line() == ej.Line() && ei.Column() < ej.Column()) + }) + for i, err := range e.errors { + // This can happen during the append of two errors objects + if i >= e.maxErrorsToReport { + break + } + result[i] = err.ToDisplayString(e.source) + } + if e.numErrors > e.maxErrorsToReport { + result[e.maxErrorsToReport] = fmt.Sprintf("%d more errors were truncated", e.numErrors-e.maxErrorsToReport) + } + return strings.Join(result, "\n") +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/functions/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/functions/BUILD.bazel new file mode 100644 index 0000000000..3cc27d60ce --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/functions/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "functions.go", + ], + importpath = "github.com/google/cel-go/common/functions", + deps = [ + "//common/types/ref:go_default_library", + ], +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/functions/functions.go b/hack/tools/vendor/github.com/google/cel-go/common/functions/functions.go new file mode 100644 index 0000000000..67f4a5944e --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/functions/functions.go @@ -0,0 +1,61 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package functions defines the standard builtin functions supported by the interpreter +package functions + +import "github.com/google/cel-go/common/types/ref" + +// Overload defines a named overload of a function, indicating an operand trait +// which must be present on the first argument to the overload as well as one +// of either a unary, binary, or function implementation. +// +// The majority of operators within the expression language are unary or binary +// and the specializations simplify the call contract for implementers of +// types with operator overloads. Any added complexity is assumed to be handled +// by the generic FunctionOp. +type Overload struct { + // Operator name as written in an expression or defined within + // operators.go. + Operator string + + // Operand trait used to dispatch the call. The zero-value indicates a + // global function overload or that one of the Unary / Binary / Function + // definitions should be used to execute the call. + OperandTrait int + + // Unary defines the overload with a UnaryOp implementation. May be nil. + Unary UnaryOp + + // Binary defines the overload with a BinaryOp implementation. May be nil. + Binary BinaryOp + + // Function defines the overload with a FunctionOp implementation. May be + // nil. + Function FunctionOp + + // NonStrict specifies whether the Overload will tolerate arguments that + // are types.Err or types.Unknown. + NonStrict bool +} + +// UnaryOp is a function that takes a single value and produces an output. +type UnaryOp func(value ref.Val) ref.Val + +// BinaryOp is a function that takes two values and produces an output. +type BinaryOp func(lhs ref.Val, rhs ref.Val) ref.Val + +// FunctionOp is a function with accepts zero or more arguments and produces +// a value or error as a result. +type FunctionOp func(values ...ref.Val) ref.Val diff --git a/hack/tools/vendor/github.com/google/cel-go/common/location.go b/hack/tools/vendor/github.com/google/cel-go/common/location.go new file mode 100644 index 0000000000..ec3fa7cb50 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/location.go @@ -0,0 +1,51 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +// Location interface to represent a location within Source. +type Location interface { + Line() int // 1-based line number within source. + Column() int // 0-based column number within source. +} + +// SourceLocation helper type to manually construct a location. +type SourceLocation struct { + line int + column int +} + +var ( + // Location implements the SourceLocation interface. + _ Location = &SourceLocation{} + // NoLocation is a particular illegal location. + NoLocation = &SourceLocation{-1, -1} +) + +// NewLocation creates a new location. +func NewLocation(line, column int) Location { + return &SourceLocation{ + line: line, + column: column} +} + +// Line returns the 1-based line of the location. +func (l *SourceLocation) Line() int { + return l.line +} + +// Column returns the 0-based column number of the location. +func (l *SourceLocation) Column() int { + return l.column +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/operators/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/operators/BUILD.bazel new file mode 100644 index 0000000000..b5b67f0623 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/operators/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "operators.go", + ], + importpath = "github.com/google/cel-go/common/operators", +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/operators/operators.go b/hack/tools/vendor/github.com/google/cel-go/common/operators/operators.go new file mode 100644 index 0000000000..f9b39bda3f --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/operators/operators.go @@ -0,0 +1,157 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package operators defines the internal function names of operators. +// +// All operators in the expression language are modelled as function calls. +package operators + +// String "names" for CEL operators. +const ( + // Symbolic operators. + Conditional = "_?_:_" + LogicalAnd = "_&&_" + LogicalOr = "_||_" + LogicalNot = "!_" + Equals = "_==_" + NotEquals = "_!=_" + Less = "_<_" + LessEquals = "_<=_" + Greater = "_>_" + GreaterEquals = "_>=_" + Add = "_+_" + Subtract = "_-_" + Multiply = "_*_" + Divide = "_/_" + Modulo = "_%_" + Negate = "-_" + Index = "_[_]" + OptIndex = "_[?_]" + OptSelect = "_?._" + + // Macros, must have a valid identifier. + Has = "has" + All = "all" + Exists = "exists" + ExistsOne = "exists_one" + Map = "map" + Filter = "filter" + + // Named operators, must not have be valid identifiers. + NotStrictlyFalse = "@not_strictly_false" + In = "@in" + + // Deprecated: named operators with valid identifiers. + OldNotStrictlyFalse = "__not_strictly_false__" + OldIn = "_in_" +) + +var ( + operators = map[string]string{ + "+": Add, + "/": Divide, + "==": Equals, + ">": Greater, + ">=": GreaterEquals, + "in": In, + "<": Less, + "<=": LessEquals, + "%": Modulo, + "*": Multiply, + "!=": NotEquals, + "-": Subtract, + } + // operatorMap of the operator symbol which refers to a struct containing the display name, + // if applicable, the operator precedence, and the arity. + // + // If the symbol does not have a display name listed in the map, it is only because it requires + // special casing to render properly as text. + operatorMap = map[string]struct { + displayName string + precedence int + arity int + }{ + Conditional: {displayName: "", precedence: 8, arity: 3}, + LogicalOr: {displayName: "||", precedence: 7, arity: 2}, + LogicalAnd: {displayName: "&&", precedence: 6, arity: 2}, + Equals: {displayName: "==", precedence: 5, arity: 2}, + Greater: {displayName: ">", precedence: 5, arity: 2}, + GreaterEquals: {displayName: ">=", precedence: 5, arity: 2}, + In: {displayName: "in", precedence: 5, arity: 2}, + Less: {displayName: "<", precedence: 5, arity: 2}, + LessEquals: {displayName: "<=", precedence: 5, arity: 2}, + NotEquals: {displayName: "!=", precedence: 5, arity: 2}, + OldIn: {displayName: "in", precedence: 5, arity: 2}, + Add: {displayName: "+", precedence: 4, arity: 2}, + Subtract: {displayName: "-", precedence: 4, arity: 2}, + Divide: {displayName: "/", precedence: 3, arity: 2}, + Modulo: {displayName: "%", precedence: 3, arity: 2}, + Multiply: {displayName: "*", precedence: 3, arity: 2}, + LogicalNot: {displayName: "!", precedence: 2, arity: 1}, + Negate: {displayName: "-", precedence: 2, arity: 1}, + Index: {displayName: "", precedence: 1, arity: 2}, + OptIndex: {displayName: "", precedence: 1, arity: 2}, + OptSelect: {displayName: "", precedence: 1, arity: 2}, + } +) + +// Find the internal function name for an operator, if the input text is one. +func Find(text string) (string, bool) { + op, found := operators[text] + return op, found +} + +// FindReverse returns the unmangled, text representation of the operator. +func FindReverse(symbol string) (string, bool) { + op, found := operatorMap[symbol] + if !found { + return "", false + } + return op.displayName, true +} + +// FindReverseBinaryOperator returns the unmangled, text representation of a binary operator. +// +// If the symbol does refer to an operator, but the operator does not have a display name the +// result is false. +func FindReverseBinaryOperator(symbol string) (string, bool) { + op, found := operatorMap[symbol] + if !found || op.arity != 2 { + return "", false + } + if op.displayName == "" { + return "", false + } + return op.displayName, true +} + +// Precedence returns the operator precedence, where the higher the number indicates +// higher precedence operations. +func Precedence(symbol string) int { + op, found := operatorMap[symbol] + if !found { + return 0 + } + return op.precedence +} + +// Arity returns the number of argument the operator takes +// -1 is returned if an undefined symbol is provided +func Arity(symbol string) int { + op, found := operatorMap[symbol] + if !found { + return -1 + } + return op.arity +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/overloads/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/overloads/BUILD.bazel new file mode 100644 index 0000000000..e46e2f4830 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/overloads/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "overloads.go", + ], + importpath = "github.com/google/cel-go/common/overloads", +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/overloads/overloads.go b/hack/tools/vendor/github.com/google/cel-go/common/overloads/overloads.go new file mode 100644 index 0000000000..9d50f4367b --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/overloads/overloads.go @@ -0,0 +1,327 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package overloads defines the internal overload identifiers for function and +// operator overloads. +package overloads + +// Boolean logic overloads +const ( + Conditional = "conditional" + LogicalAnd = "logical_and" + LogicalOr = "logical_or" + LogicalNot = "logical_not" + NotStrictlyFalse = "not_strictly_false" + Equals = "equals" + NotEquals = "not_equals" + LessBool = "less_bool" + LessInt64 = "less_int64" + LessInt64Double = "less_int64_double" + LessInt64Uint64 = "less_int64_uint64" + LessUint64 = "less_uint64" + LessUint64Double = "less_uint64_double" + LessUint64Int64 = "less_uint64_int64" + LessDouble = "less_double" + LessDoubleInt64 = "less_double_int64" + LessDoubleUint64 = "less_double_uint64" + LessString = "less_string" + LessBytes = "less_bytes" + LessTimestamp = "less_timestamp" + LessDuration = "less_duration" + LessEqualsBool = "less_equals_bool" + LessEqualsInt64 = "less_equals_int64" + LessEqualsInt64Double = "less_equals_int64_double" + LessEqualsInt64Uint64 = "less_equals_int64_uint64" + LessEqualsUint64 = "less_equals_uint64" + LessEqualsUint64Double = "less_equals_uint64_double" + LessEqualsUint64Int64 = "less_equals_uint64_int64" + LessEqualsDouble = "less_equals_double" + LessEqualsDoubleInt64 = "less_equals_double_int64" + LessEqualsDoubleUint64 = "less_equals_double_uint64" + LessEqualsString = "less_equals_string" + LessEqualsBytes = "less_equals_bytes" + LessEqualsTimestamp = "less_equals_timestamp" + LessEqualsDuration = "less_equals_duration" + GreaterBool = "greater_bool" + GreaterInt64 = "greater_int64" + GreaterInt64Double = "greater_int64_double" + GreaterInt64Uint64 = "greater_int64_uint64" + GreaterUint64 = "greater_uint64" + GreaterUint64Double = "greater_uint64_double" + GreaterUint64Int64 = "greater_uint64_int64" + GreaterDouble = "greater_double" + GreaterDoubleInt64 = "greater_double_int64" + GreaterDoubleUint64 = "greater_double_uint64" + GreaterString = "greater_string" + GreaterBytes = "greater_bytes" + GreaterTimestamp = "greater_timestamp" + GreaterDuration = "greater_duration" + GreaterEqualsBool = "greater_equals_bool" + GreaterEqualsInt64 = "greater_equals_int64" + GreaterEqualsInt64Double = "greater_equals_int64_double" + GreaterEqualsInt64Uint64 = "greater_equals_int64_uint64" + GreaterEqualsUint64 = "greater_equals_uint64" + GreaterEqualsUint64Double = "greater_equals_uint64_double" + GreaterEqualsUint64Int64 = "greater_equals_uint64_int64" + GreaterEqualsDouble = "greater_equals_double" + GreaterEqualsDoubleInt64 = "greater_equals_double_int64" + GreaterEqualsDoubleUint64 = "greater_equals_double_uint64" + GreaterEqualsString = "greater_equals_string" + GreaterEqualsBytes = "greater_equals_bytes" + GreaterEqualsTimestamp = "greater_equals_timestamp" + GreaterEqualsDuration = "greater_equals_duration" +) + +// Math overloads +const ( + AddInt64 = "add_int64" + AddUint64 = "add_uint64" + AddDouble = "add_double" + AddString = "add_string" + AddBytes = "add_bytes" + AddList = "add_list" + AddTimestampDuration = "add_timestamp_duration" + AddDurationTimestamp = "add_duration_timestamp" + AddDurationDuration = "add_duration_duration" + SubtractInt64 = "subtract_int64" + SubtractUint64 = "subtract_uint64" + SubtractDouble = "subtract_double" + SubtractTimestampTimestamp = "subtract_timestamp_timestamp" + SubtractTimestampDuration = "subtract_timestamp_duration" + SubtractDurationDuration = "subtract_duration_duration" + MultiplyInt64 = "multiply_int64" + MultiplyUint64 = "multiply_uint64" + MultiplyDouble = "multiply_double" + DivideInt64 = "divide_int64" + DivideUint64 = "divide_uint64" + DivideDouble = "divide_double" + ModuloInt64 = "modulo_int64" + ModuloUint64 = "modulo_uint64" + NegateInt64 = "negate_int64" + NegateDouble = "negate_double" +) + +// Index overloads +const ( + IndexList = "index_list" + IndexMap = "index_map" + IndexMessage = "index_message" // TODO: introduce concept of types.Message +) + +// In operators +const ( + DeprecatedIn = "in" + InList = "in_list" + InMap = "in_map" + InMessage = "in_message" // TODO: introduce concept of types.Message +) + +// Size overloads +const ( + Size = "size" + SizeString = "size_string" + SizeBytes = "size_bytes" + SizeList = "size_list" + SizeMap = "size_map" + SizeStringInst = "string_size" + SizeBytesInst = "bytes_size" + SizeListInst = "list_size" + SizeMapInst = "map_size" +) + +// String function names. +const ( + Contains = "contains" + EndsWith = "endsWith" + Matches = "matches" + StartsWith = "startsWith" +) + +// Extension function overloads with complex behaviors that need to be referenced in runtime and static analysis cost computations. +const ( + ExtQuoteString = "strings_quote" +) + +// String function overload names. +const ( + ContainsString = "contains_string" + EndsWithString = "ends_with_string" + MatchesString = "matches_string" + StartsWithString = "starts_with_string" +) + +// Extension function overloads with complex behaviors that need to be referenced in runtime and static analysis cost computations. +const ( + ExtFormatString = "string_format" +) + +// Time-based functions. +const ( + TimeGetFullYear = "getFullYear" + TimeGetMonth = "getMonth" + TimeGetDayOfYear = "getDayOfYear" + TimeGetDate = "getDate" + TimeGetDayOfMonth = "getDayOfMonth" + TimeGetDayOfWeek = "getDayOfWeek" + TimeGetHours = "getHours" + TimeGetMinutes = "getMinutes" + TimeGetSeconds = "getSeconds" + TimeGetMilliseconds = "getMilliseconds" +) + +// Timestamp overloads for time functions without timezones. +const ( + TimestampToYear = "timestamp_to_year" + TimestampToMonth = "timestamp_to_month" + TimestampToDayOfYear = "timestamp_to_day_of_year" + TimestampToDayOfMonthZeroBased = "timestamp_to_day_of_month" + TimestampToDayOfMonthOneBased = "timestamp_to_day_of_month_1_based" + TimestampToDayOfWeek = "timestamp_to_day_of_week" + TimestampToHours = "timestamp_to_hours" + TimestampToMinutes = "timestamp_to_minutes" + TimestampToSeconds = "timestamp_to_seconds" + TimestampToMilliseconds = "timestamp_to_milliseconds" +) + +// Timestamp overloads for time functions with timezones. +const ( + TimestampToYearWithTz = "timestamp_to_year_with_tz" + TimestampToMonthWithTz = "timestamp_to_month_with_tz" + TimestampToDayOfYearWithTz = "timestamp_to_day_of_year_with_tz" + TimestampToDayOfMonthZeroBasedWithTz = "timestamp_to_day_of_month_with_tz" + TimestampToDayOfMonthOneBasedWithTz = "timestamp_to_day_of_month_1_based_with_tz" + TimestampToDayOfWeekWithTz = "timestamp_to_day_of_week_with_tz" + TimestampToHoursWithTz = "timestamp_to_hours_with_tz" + TimestampToMinutesWithTz = "timestamp_to_minutes_with_tz" + TimestampToSecondsWithTz = "timestamp_to_seconds_tz" + TimestampToMillisecondsWithTz = "timestamp_to_milliseconds_with_tz" +) + +// Duration overloads for time functions. +const ( + DurationToHours = "duration_to_hours" + DurationToMinutes = "duration_to_minutes" + DurationToSeconds = "duration_to_seconds" + DurationToMilliseconds = "duration_to_milliseconds" +) + +// Type conversion methods and overloads +const ( + TypeConvertInt = "int" + TypeConvertUint = "uint" + TypeConvertDouble = "double" + TypeConvertBool = "bool" + TypeConvertString = "string" + TypeConvertBytes = "bytes" + TypeConvertTimestamp = "timestamp" + TypeConvertDuration = "duration" + TypeConvertType = "type" + TypeConvertDyn = "dyn" +) + +// Int conversion functions. +const ( + IntToInt = "int64_to_int64" + UintToInt = "uint64_to_int64" + DoubleToInt = "double_to_int64" + StringToInt = "string_to_int64" + TimestampToInt = "timestamp_to_int64" + DurationToInt = "duration_to_int64" +) + +// Uint conversion functions. +const ( + UintToUint = "uint64_to_uint64" + IntToUint = "int64_to_uint64" + DoubleToUint = "double_to_uint64" + StringToUint = "string_to_uint64" +) + +// Double conversion functions. +const ( + DoubleToDouble = "double_to_double" + IntToDouble = "int64_to_double" + UintToDouble = "uint64_to_double" + StringToDouble = "string_to_double" +) + +// Bool conversion functions. +const ( + BoolToBool = "bool_to_bool" + StringToBool = "string_to_bool" +) + +// Bytes conversion functions. +const ( + BytesToBytes = "bytes_to_bytes" + StringToBytes = "string_to_bytes" +) + +// String conversion functions. +const ( + StringToString = "string_to_string" + BoolToString = "bool_to_string" + IntToString = "int64_to_string" + UintToString = "uint64_to_string" + DoubleToString = "double_to_string" + BytesToString = "bytes_to_string" + TimestampToString = "timestamp_to_string" + DurationToString = "duration_to_string" +) + +// Timestamp conversion functions +const ( + TimestampToTimestamp = "timestamp_to_timestamp" + StringToTimestamp = "string_to_timestamp" + IntToTimestamp = "int64_to_timestamp" +) + +// Convert duration from string +const ( + DurationToDuration = "duration_to_duration" + StringToDuration = "string_to_duration" + IntToDuration = "int64_to_duration" +) + +// Convert to dyn +const ( + ToDyn = "to_dyn" +) + +// Comprehensions helper methods, not directly accessible via a developer. +const ( + Iterator = "@iterator" + HasNext = "@hasNext" + Next = "@next" +) + +// IsTypeConversionFunction returns whether the input function is a standard library type +// conversion function. +func IsTypeConversionFunction(function string) bool { + switch function { + case TypeConvertBool, + TypeConvertBytes, + TypeConvertDouble, + TypeConvertDuration, + TypeConvertDyn, + TypeConvertInt, + TypeConvertString, + TypeConvertTimestamp, + TypeConvertType, + TypeConvertUint: + return true + default: + return false + } +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/runes/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/runes/BUILD.bazel new file mode 100644 index 0000000000..bb30242cfa --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/runes/BUILD.bazel @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "buffer.go", + ], + importpath = "github.com/google/cel-go/common/runes", +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "buffer_test.go", + ], + embed = [ + ":go_default_library", + ], +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/runes/buffer.go b/hack/tools/vendor/github.com/google/cel-go/common/runes/buffer.go new file mode 100644 index 0000000000..021198224d --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/runes/buffer.go @@ -0,0 +1,242 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package runes provides interfaces and utilities for working with runes. +package runes + +import ( + "strings" + "unicode/utf8" +) + +// Buffer is an interface for accessing a contiguous array of code points. +type Buffer interface { + Get(i int) rune + Slice(i, j int) string + Len() int +} + +type emptyBuffer struct{} + +func (e *emptyBuffer) Get(i int) rune { + panic("slice index out of bounds") +} + +func (e *emptyBuffer) Slice(i, j int) string { + if i != 0 || i != j { + panic("slice index out of bounds") + } + return "" +} + +func (e *emptyBuffer) Len() int { + return 0 +} + +var _ Buffer = &emptyBuffer{} + +// asciiBuffer is an implementation for an array of code points that contain code points only from +// the ASCII character set. +type asciiBuffer struct { + arr []byte +} + +func (a *asciiBuffer) Get(i int) rune { + return rune(uint32(a.arr[i])) +} + +func (a *asciiBuffer) Slice(i, j int) string { + return string(a.arr[i:j]) +} + +func (a *asciiBuffer) Len() int { + return len(a.arr) +} + +var _ Buffer = &asciiBuffer{} + +// basicBuffer is an implementation for an array of code points that contain code points from both +// the Latin-1 character set and Basic Multilingual Plane. +type basicBuffer struct { + arr []uint16 +} + +func (b *basicBuffer) Get(i int) rune { + return rune(uint32(b.arr[i])) +} + +func (b *basicBuffer) Slice(i, j int) string { + var str strings.Builder + str.Grow((j - i) * 3) // Worst case encoding size for 0xffff is 3. + for ; i < j; i++ { + str.WriteRune(rune(uint32(b.arr[i]))) + } + return str.String() +} + +func (b *basicBuffer) Len() int { + return len(b.arr) +} + +var _ Buffer = &basicBuffer{} + +// supplementalBuffer is an implementation for an array of code points that contain code points from +// the Latin-1 character set, Basic Multilingual Plane, or the Supplemental Multilingual Plane. +type supplementalBuffer struct { + arr []rune +} + +func (s *supplementalBuffer) Get(i int) rune { + return rune(uint32(s.arr[i])) +} + +func (s *supplementalBuffer) Slice(i, j int) string { + return string(s.arr[i:j]) +} + +func (s *supplementalBuffer) Len() int { + return len(s.arr) +} + +var _ Buffer = &supplementalBuffer{} + +var nilBuffer = &emptyBuffer{} + +// NewBuffer returns an efficient implementation of Buffer for the given text based on the ranges of +// the encoded code points contained within. +// +// Code points are represented as an array of byte, uint16, or rune. This approach ensures that +// each index represents a code point by itself without needing to use an array of rune. At first +// we assume all code points are less than or equal to '\u007f'. If this holds true, the +// underlying storage is a byte array containing only ASCII characters. If we encountered a code +// point above this range but less than or equal to '\uffff' we allocate a uint16 array, copy the +// elements of previous byte array to the uint16 array, and continue. If this holds true, the +// underlying storage is a uint16 array containing only Unicode characters in the Basic Multilingual +// Plane. If we encounter a code point above '\uffff' we allocate an rune array, copy the previous +// elements of the byte or uint16 array, and continue. The underlying storage is an rune array +// containing any Unicode character. +func NewBuffer(data string) Buffer { + buf, _ := newBuffer(data, false) + return buf +} + +// NewBufferAndLineOffsets returns an efficient implementation of Buffer for the given text based on +// the ranges of the encoded code points contained within, as well as returning the line offsets. +// +// Code points are represented as an array of byte, uint16, or rune. This approach ensures that +// each index represents a code point by itself without needing to use an array of rune. At first +// we assume all code points are less than or equal to '\u007f'. If this holds true, the +// underlying storage is a byte array containing only ASCII characters. If we encountered a code +// point above this range but less than or equal to '\uffff' we allocate a uint16 array, copy the +// elements of previous byte array to the uint16 array, and continue. If this holds true, the +// underlying storage is a uint16 array containing only Unicode characters in the Basic Multilingual +// Plane. If we encounter a code point above '\uffff' we allocate an rune array, copy the previous +// elements of the byte or uint16 array, and continue. The underlying storage is an rune array +// containing any Unicode character. +func NewBufferAndLineOffsets(data string) (Buffer, []int32) { + return newBuffer(data, true) +} + +func newBuffer(data string, lines bool) (Buffer, []int32) { + if len(data) == 0 { + return nilBuffer, []int32{0} + } + var ( + idx = 0 + off int32 = 0 + buf8 = make([]byte, 0, len(data)) + buf16 []uint16 + buf32 []rune + offs []int32 + ) + for idx < len(data) { + r, s := utf8.DecodeRuneInString(data[idx:]) + idx += s + if lines && r == '\n' { + offs = append(offs, off+1) + } + if r < utf8.RuneSelf { + buf8 = append(buf8, byte(r)) + off++ + continue + } + if r <= 0xffff { + buf16 = make([]uint16, len(buf8), len(data)) + for i, v := range buf8 { + buf16[i] = uint16(v) + } + buf8 = nil + buf16 = append(buf16, uint16(r)) + off++ + goto copy16 + } + buf32 = make([]rune, len(buf8), len(data)) + for i, v := range buf8 { + buf32[i] = rune(uint32(v)) + } + buf8 = nil + buf32 = append(buf32, r) + off++ + goto copy32 + } + if lines { + offs = append(offs, off+1) + } + return &asciiBuffer{ + arr: buf8, + }, offs +copy16: + for idx < len(data) { + r, s := utf8.DecodeRuneInString(data[idx:]) + idx += s + if lines && r == '\n' { + offs = append(offs, off+1) + } + if r <= 0xffff { + buf16 = append(buf16, uint16(r)) + off++ + continue + } + buf32 = make([]rune, len(buf16), len(data)) + for i, v := range buf16 { + buf32[i] = rune(uint32(v)) + } + buf16 = nil + buf32 = append(buf32, r) + off++ + goto copy32 + } + if lines { + offs = append(offs, off+1) + } + return &basicBuffer{ + arr: buf16, + }, offs +copy32: + for idx < len(data) { + r, s := utf8.DecodeRuneInString(data[idx:]) + idx += s + if lines && r == '\n' { + offs = append(offs, off+1) + } + buf32 = append(buf32, r) + off++ + } + if lines { + offs = append(offs, off+1) + } + return &supplementalBuffer{ + arr: buf32, + }, offs +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/source.go b/hack/tools/vendor/github.com/google/cel-go/common/source.go new file mode 100644 index 0000000000..ec79cb5454 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/source.go @@ -0,0 +1,173 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +import ( + "github.com/google/cel-go/common/runes" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// Source interface for filter source contents. +type Source interface { + // Content returns the source content represented as a string. + // Examples contents are the single file contents, textbox field, + // or url parameter. + Content() string + + // Description gives a brief description of the source. + // Example descriptions are a file name or ui element. + Description() string + + // LineOffsets gives the character offsets at which lines occur. + // The zero-th entry should refer to the break between the first + // and second line, or EOF if there is only one line of source. + LineOffsets() []int32 + + // LocationOffset translates a Location to an offset. + // Given the line and column of the Location returns the + // Location's character offset in the Source, and a bool + // indicating whether the Location was found. + LocationOffset(location Location) (int32, bool) + + // OffsetLocation translates a character offset to a Location, or + // false if the conversion was not feasible. + OffsetLocation(offset int32) (Location, bool) + + // NewLocation takes an input line and column and produces a Location. + // The default behavior is to treat the line and column as absolute, + // but concrete derivations may use this method to convert a relative + // line and column position into an absolute location. + NewLocation(line, col int) Location + + // Snippet returns a line of content and whether the line was found. + Snippet(line int) (string, bool) +} + +// The sourceImpl type implementation of the Source interface. +type sourceImpl struct { + runes.Buffer + description string + lineOffsets []int32 +} + +var _ runes.Buffer = &sourceImpl{} + +// TODO(jimlarson) "Character offsets" should index the code points +// within the UTF-8 encoded string. It currently indexes bytes. +// Can be accomplished by using rune[] instead of string for contents. + +// NewTextSource creates a new Source from the input text string. +func NewTextSource(text string) Source { + return NewStringSource(text, "") +} + +// NewStringSource creates a new Source from the given contents and description. +func NewStringSource(contents string, description string) Source { + // Compute line offsets up front as they are referred to frequently. + buf, offs := runes.NewBufferAndLineOffsets(contents) + return &sourceImpl{ + Buffer: buf, + description: description, + lineOffsets: offs, + } +} + +// NewInfoSource creates a new Source from a SourceInfo. +func NewInfoSource(info *exprpb.SourceInfo) Source { + return &sourceImpl{ + Buffer: runes.NewBuffer(""), + description: info.GetLocation(), + lineOffsets: info.GetLineOffsets(), + } +} + +// Content implements the Source interface method. +func (s *sourceImpl) Content() string { + return s.Slice(0, s.Len()) +} + +// Description implements the Source interface method. +func (s *sourceImpl) Description() string { + return s.description +} + +// LineOffsets implements the Source interface method. +func (s *sourceImpl) LineOffsets() []int32 { + return s.lineOffsets +} + +// LocationOffset implements the Source interface method. +func (s *sourceImpl) LocationOffset(location Location) (int32, bool) { + if lineOffset, found := s.findLineOffset(location.Line()); found { + return lineOffset + int32(location.Column()), true + } + return -1, false +} + +// NewLocation implements the Source interface method. +func (s *sourceImpl) NewLocation(line, col int) Location { + return NewLocation(line, col) +} + +// OffsetLocation implements the Source interface method. +func (s *sourceImpl) OffsetLocation(offset int32) (Location, bool) { + line, lineOffset := s.findLine(offset) + return NewLocation(int(line), int(offset-lineOffset)), true +} + +// Snippet implements the Source interface method. +func (s *sourceImpl) Snippet(line int) (string, bool) { + charStart, found := s.findLineOffset(line) + if !found || s.Len() == 0 { + return "", false + } + charEnd, found := s.findLineOffset(line + 1) + if found { + return s.Slice(int(charStart), int(charEnd-1)), true + } + return s.Slice(int(charStart), s.Len()), true +} + +// findLineOffset returns the offset where the (1-indexed) line begins, +// or false if line doesn't exist. +func (s *sourceImpl) findLineOffset(line int) (int32, bool) { + if line == 1 { + return 0, true + } + if line > 1 && line <= int(len(s.lineOffsets)) { + offset := s.lineOffsets[line-2] + return offset, true + } + return -1, false +} + +// findLine finds the line that contains the given character offset and +// returns the line number and offset of the beginning of that line. +// Note that the last line is treated as if it contains all offsets +// beyond the end of the actual source. +func (s *sourceImpl) findLine(characterOffset int32) (int32, int32) { + var line int32 = 1 + for _, lineOffset := range s.lineOffsets { + if lineOffset > characterOffset { + break + } + line++ + } + if line == 1 { + return line, 0 + } + return line, s.lineOffsets[line-2] +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel new file mode 100644 index 0000000000..b55f452156 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "standard.go", + ], + importpath = "github.com/google/cel-go/common/stdlib", + deps = [ + "//common/decls:go_default_library", + "//common/functions:go_default_library", + "//common/operators:go_default_library", + "//common/overloads:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", + ], +) \ No newline at end of file diff --git a/hack/tools/vendor/github.com/google/cel-go/common/stdlib/standard.go b/hack/tools/vendor/github.com/google/cel-go/common/stdlib/standard.go new file mode 100644 index 0000000000..1550c17863 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/stdlib/standard.go @@ -0,0 +1,620 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package stdlib contains all of the standard library function declarations and definitions for CEL. +package stdlib + +import ( + "github.com/google/cel-go/common/decls" + "github.com/google/cel-go/common/functions" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +var ( + stdFunctions []*decls.FunctionDecl + stdTypes []*decls.VariableDecl +) + +func init() { + paramA := types.NewTypeParamType("A") + paramB := types.NewTypeParamType("B") + listOfA := types.NewListType(paramA) + mapOfAB := types.NewMapType(paramA, paramB) + + stdTypes = []*decls.VariableDecl{ + decls.TypeVariable(types.BoolType), + decls.TypeVariable(types.BytesType), + decls.TypeVariable(types.DoubleType), + decls.TypeVariable(types.DurationType), + decls.TypeVariable(types.IntType), + decls.TypeVariable(listOfA), + decls.TypeVariable(mapOfAB), + decls.TypeVariable(types.NullType), + decls.TypeVariable(types.StringType), + decls.TypeVariable(types.TimestampType), + decls.TypeVariable(types.TypeType), + decls.TypeVariable(types.UintType), + } + + stdFunctions = []*decls.FunctionDecl{ + // Logical operators. Special-cased within the interpreter. + // Note, the singleton binding prevents extensions from overriding the operator behavior. + function(operators.Conditional, + decls.Overload(overloads.Conditional, argTypes(types.BoolType, paramA, paramA), paramA, + decls.OverloadIsNonStrict()), + decls.SingletonFunctionBinding(noFunctionOverrides)), + function(operators.LogicalAnd, + decls.Overload(overloads.LogicalAnd, argTypes(types.BoolType, types.BoolType), types.BoolType, + decls.OverloadIsNonStrict()), + decls.SingletonBinaryBinding(noBinaryOverrides)), + function(operators.LogicalOr, + decls.Overload(overloads.LogicalOr, argTypes(types.BoolType, types.BoolType), types.BoolType, + decls.OverloadIsNonStrict()), + decls.SingletonBinaryBinding(noBinaryOverrides)), + function(operators.LogicalNot, + decls.Overload(overloads.LogicalNot, argTypes(types.BoolType), types.BoolType), + decls.SingletonUnaryBinding(func(val ref.Val) ref.Val { + b, ok := val.(types.Bool) + if !ok { + return types.MaybeNoSuchOverloadErr(val) + } + return b.Negate() + })), + + // Comprehension short-circuiting related function + function(operators.NotStrictlyFalse, + decls.Overload(overloads.NotStrictlyFalse, argTypes(types.BoolType), types.BoolType, + decls.OverloadIsNonStrict(), + decls.UnaryBinding(notStrictlyFalse))), + // Deprecated: __not_strictly_false__ + function(operators.OldNotStrictlyFalse, + decls.DisableDeclaration(true), // safe deprecation + decls.Overload(operators.OldNotStrictlyFalse, argTypes(types.BoolType), types.BoolType, + decls.OverloadIsNonStrict(), + decls.UnaryBinding(notStrictlyFalse))), + + // Equality / inequality. Special-cased in the interpreter + function(operators.Equals, + decls.Overload(overloads.Equals, argTypes(paramA, paramA), types.BoolType), + decls.SingletonBinaryBinding(noBinaryOverrides)), + function(operators.NotEquals, + decls.Overload(overloads.NotEquals, argTypes(paramA, paramA), types.BoolType), + decls.SingletonBinaryBinding(noBinaryOverrides)), + + // Mathematical operators + function(operators.Add, + decls.Overload(overloads.AddBytes, + argTypes(types.BytesType, types.BytesType), types.BytesType), + decls.Overload(overloads.AddDouble, + argTypes(types.DoubleType, types.DoubleType), types.DoubleType), + decls.Overload(overloads.AddDurationDuration, + argTypes(types.DurationType, types.DurationType), types.DurationType), + decls.Overload(overloads.AddDurationTimestamp, + argTypes(types.DurationType, types.TimestampType), types.TimestampType), + decls.Overload(overloads.AddTimestampDuration, + argTypes(types.TimestampType, types.DurationType), types.TimestampType), + decls.Overload(overloads.AddInt64, + argTypes(types.IntType, types.IntType), types.IntType), + decls.Overload(overloads.AddList, + argTypes(listOfA, listOfA), listOfA), + decls.Overload(overloads.AddString, + argTypes(types.StringType, types.StringType), types.StringType), + decls.Overload(overloads.AddUint64, + argTypes(types.UintType, types.UintType), types.UintType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + return lhs.(traits.Adder).Add(rhs) + }, traits.AdderType)), + function(operators.Divide, + decls.Overload(overloads.DivideDouble, + argTypes(types.DoubleType, types.DoubleType), types.DoubleType), + decls.Overload(overloads.DivideInt64, + argTypes(types.IntType, types.IntType), types.IntType), + decls.Overload(overloads.DivideUint64, + argTypes(types.UintType, types.UintType), types.UintType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + return lhs.(traits.Divider).Divide(rhs) + }, traits.DividerType)), + function(operators.Modulo, + decls.Overload(overloads.ModuloInt64, + argTypes(types.IntType, types.IntType), types.IntType), + decls.Overload(overloads.ModuloUint64, + argTypes(types.UintType, types.UintType), types.UintType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + return lhs.(traits.Modder).Modulo(rhs) + }, traits.ModderType)), + function(operators.Multiply, + decls.Overload(overloads.MultiplyDouble, + argTypes(types.DoubleType, types.DoubleType), types.DoubleType), + decls.Overload(overloads.MultiplyInt64, + argTypes(types.IntType, types.IntType), types.IntType), + decls.Overload(overloads.MultiplyUint64, + argTypes(types.UintType, types.UintType), types.UintType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + return lhs.(traits.Multiplier).Multiply(rhs) + }, traits.MultiplierType)), + function(operators.Negate, + decls.Overload(overloads.NegateDouble, argTypes(types.DoubleType), types.DoubleType), + decls.Overload(overloads.NegateInt64, argTypes(types.IntType), types.IntType), + decls.SingletonUnaryBinding(func(val ref.Val) ref.Val { + if types.IsBool(val) { + return types.MaybeNoSuchOverloadErr(val) + } + return val.(traits.Negater).Negate() + }, traits.NegatorType)), + function(operators.Subtract, + decls.Overload(overloads.SubtractDouble, + argTypes(types.DoubleType, types.DoubleType), types.DoubleType), + decls.Overload(overloads.SubtractDurationDuration, + argTypes(types.DurationType, types.DurationType), types.DurationType), + decls.Overload(overloads.SubtractInt64, + argTypes(types.IntType, types.IntType), types.IntType), + decls.Overload(overloads.SubtractTimestampDuration, + argTypes(types.TimestampType, types.DurationType), types.TimestampType), + decls.Overload(overloads.SubtractTimestampTimestamp, + argTypes(types.TimestampType, types.TimestampType), types.DurationType), + decls.Overload(overloads.SubtractUint64, + argTypes(types.UintType, types.UintType), types.UintType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + return lhs.(traits.Subtractor).Subtract(rhs) + }, traits.SubtractorType)), + + // Relations operators + + function(operators.Less, + decls.Overload(overloads.LessBool, + argTypes(types.BoolType, types.BoolType), types.BoolType), + decls.Overload(overloads.LessInt64, + argTypes(types.IntType, types.IntType), types.BoolType), + decls.Overload(overloads.LessInt64Double, + argTypes(types.IntType, types.DoubleType), types.BoolType), + decls.Overload(overloads.LessInt64Uint64, + argTypes(types.IntType, types.UintType), types.BoolType), + decls.Overload(overloads.LessUint64, + argTypes(types.UintType, types.UintType), types.BoolType), + decls.Overload(overloads.LessUint64Double, + argTypes(types.UintType, types.DoubleType), types.BoolType), + decls.Overload(overloads.LessUint64Int64, + argTypes(types.UintType, types.IntType), types.BoolType), + decls.Overload(overloads.LessDouble, + argTypes(types.DoubleType, types.DoubleType), types.BoolType), + decls.Overload(overloads.LessDoubleInt64, + argTypes(types.DoubleType, types.IntType), types.BoolType), + decls.Overload(overloads.LessDoubleUint64, + argTypes(types.DoubleType, types.UintType), types.BoolType), + decls.Overload(overloads.LessString, + argTypes(types.StringType, types.StringType), types.BoolType), + decls.Overload(overloads.LessBytes, + argTypes(types.BytesType, types.BytesType), types.BoolType), + decls.Overload(overloads.LessTimestamp, + argTypes(types.TimestampType, types.TimestampType), types.BoolType), + decls.Overload(overloads.LessDuration, + argTypes(types.DurationType, types.DurationType), types.BoolType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + cmp := lhs.(traits.Comparer).Compare(rhs) + if cmp == types.IntNegOne { + return types.True + } + if cmp == types.IntOne || cmp == types.IntZero { + return types.False + } + return cmp + }, traits.ComparerType)), + + function(operators.LessEquals, + decls.Overload(overloads.LessEqualsBool, + argTypes(types.BoolType, types.BoolType), types.BoolType), + decls.Overload(overloads.LessEqualsInt64, + argTypes(types.IntType, types.IntType), types.BoolType), + decls.Overload(overloads.LessEqualsInt64Double, + argTypes(types.IntType, types.DoubleType), types.BoolType), + decls.Overload(overloads.LessEqualsInt64Uint64, + argTypes(types.IntType, types.UintType), types.BoolType), + decls.Overload(overloads.LessEqualsUint64, + argTypes(types.UintType, types.UintType), types.BoolType), + decls.Overload(overloads.LessEqualsUint64Double, + argTypes(types.UintType, types.DoubleType), types.BoolType), + decls.Overload(overloads.LessEqualsUint64Int64, + argTypes(types.UintType, types.IntType), types.BoolType), + decls.Overload(overloads.LessEqualsDouble, + argTypes(types.DoubleType, types.DoubleType), types.BoolType), + decls.Overload(overloads.LessEqualsDoubleInt64, + argTypes(types.DoubleType, types.IntType), types.BoolType), + decls.Overload(overloads.LessEqualsDoubleUint64, + argTypes(types.DoubleType, types.UintType), types.BoolType), + decls.Overload(overloads.LessEqualsString, + argTypes(types.StringType, types.StringType), types.BoolType), + decls.Overload(overloads.LessEqualsBytes, + argTypes(types.BytesType, types.BytesType), types.BoolType), + decls.Overload(overloads.LessEqualsTimestamp, + argTypes(types.TimestampType, types.TimestampType), types.BoolType), + decls.Overload(overloads.LessEqualsDuration, + argTypes(types.DurationType, types.DurationType), types.BoolType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + cmp := lhs.(traits.Comparer).Compare(rhs) + if cmp == types.IntNegOne || cmp == types.IntZero { + return types.True + } + if cmp == types.IntOne { + return types.False + } + return cmp + }, traits.ComparerType)), + + function(operators.Greater, + decls.Overload(overloads.GreaterBool, + argTypes(types.BoolType, types.BoolType), types.BoolType), + decls.Overload(overloads.GreaterInt64, + argTypes(types.IntType, types.IntType), types.BoolType), + decls.Overload(overloads.GreaterInt64Double, + argTypes(types.IntType, types.DoubleType), types.BoolType), + decls.Overload(overloads.GreaterInt64Uint64, + argTypes(types.IntType, types.UintType), types.BoolType), + decls.Overload(overloads.GreaterUint64, + argTypes(types.UintType, types.UintType), types.BoolType), + decls.Overload(overloads.GreaterUint64Double, + argTypes(types.UintType, types.DoubleType), types.BoolType), + decls.Overload(overloads.GreaterUint64Int64, + argTypes(types.UintType, types.IntType), types.BoolType), + decls.Overload(overloads.GreaterDouble, + argTypes(types.DoubleType, types.DoubleType), types.BoolType), + decls.Overload(overloads.GreaterDoubleInt64, + argTypes(types.DoubleType, types.IntType), types.BoolType), + decls.Overload(overloads.GreaterDoubleUint64, + argTypes(types.DoubleType, types.UintType), types.BoolType), + decls.Overload(overloads.GreaterString, + argTypes(types.StringType, types.StringType), types.BoolType), + decls.Overload(overloads.GreaterBytes, + argTypes(types.BytesType, types.BytesType), types.BoolType), + decls.Overload(overloads.GreaterTimestamp, + argTypes(types.TimestampType, types.TimestampType), types.BoolType), + decls.Overload(overloads.GreaterDuration, + argTypes(types.DurationType, types.DurationType), types.BoolType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + cmp := lhs.(traits.Comparer).Compare(rhs) + if cmp == types.IntOne { + return types.True + } + if cmp == types.IntNegOne || cmp == types.IntZero { + return types.False + } + return cmp + }, traits.ComparerType)), + + function(operators.GreaterEquals, + decls.Overload(overloads.GreaterEqualsBool, + argTypes(types.BoolType, types.BoolType), types.BoolType), + decls.Overload(overloads.GreaterEqualsInt64, + argTypes(types.IntType, types.IntType), types.BoolType), + decls.Overload(overloads.GreaterEqualsInt64Double, + argTypes(types.IntType, types.DoubleType), types.BoolType), + decls.Overload(overloads.GreaterEqualsInt64Uint64, + argTypes(types.IntType, types.UintType), types.BoolType), + decls.Overload(overloads.GreaterEqualsUint64, + argTypes(types.UintType, types.UintType), types.BoolType), + decls.Overload(overloads.GreaterEqualsUint64Double, + argTypes(types.UintType, types.DoubleType), types.BoolType), + decls.Overload(overloads.GreaterEqualsUint64Int64, + argTypes(types.UintType, types.IntType), types.BoolType), + decls.Overload(overloads.GreaterEqualsDouble, + argTypes(types.DoubleType, types.DoubleType), types.BoolType), + decls.Overload(overloads.GreaterEqualsDoubleInt64, + argTypes(types.DoubleType, types.IntType), types.BoolType), + decls.Overload(overloads.GreaterEqualsDoubleUint64, + argTypes(types.DoubleType, types.UintType), types.BoolType), + decls.Overload(overloads.GreaterEqualsString, + argTypes(types.StringType, types.StringType), types.BoolType), + decls.Overload(overloads.GreaterEqualsBytes, + argTypes(types.BytesType, types.BytesType), types.BoolType), + decls.Overload(overloads.GreaterEqualsTimestamp, + argTypes(types.TimestampType, types.TimestampType), types.BoolType), + decls.Overload(overloads.GreaterEqualsDuration, + argTypes(types.DurationType, types.DurationType), types.BoolType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + cmp := lhs.(traits.Comparer).Compare(rhs) + if cmp == types.IntOne || cmp == types.IntZero { + return types.True + } + if cmp == types.IntNegOne { + return types.False + } + return cmp + }, traits.ComparerType)), + + // Indexing + function(operators.Index, + decls.Overload(overloads.IndexList, argTypes(listOfA, types.IntType), paramA), + decls.Overload(overloads.IndexMap, argTypes(mapOfAB, paramA), paramB), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + return lhs.(traits.Indexer).Get(rhs) + }, traits.IndexerType)), + + // Collections operators + function(operators.In, + decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType), + decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType), + decls.SingletonBinaryBinding(inAggregate)), + function(operators.OldIn, + decls.DisableDeclaration(true), // safe deprecation + decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType), + decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType), + decls.SingletonBinaryBinding(inAggregate)), + function(overloads.DeprecatedIn, + decls.DisableDeclaration(true), // safe deprecation + decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType), + decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType), + decls.SingletonBinaryBinding(inAggregate)), + function(overloads.Size, + decls.Overload(overloads.SizeBytes, argTypes(types.BytesType), types.IntType), + decls.MemberOverload(overloads.SizeBytesInst, argTypes(types.BytesType), types.IntType), + decls.Overload(overloads.SizeList, argTypes(listOfA), types.IntType), + decls.MemberOverload(overloads.SizeListInst, argTypes(listOfA), types.IntType), + decls.Overload(overloads.SizeMap, argTypes(mapOfAB), types.IntType), + decls.MemberOverload(overloads.SizeMapInst, argTypes(mapOfAB), types.IntType), + decls.Overload(overloads.SizeString, argTypes(types.StringType), types.IntType), + decls.MemberOverload(overloads.SizeStringInst, argTypes(types.StringType), types.IntType), + decls.SingletonUnaryBinding(func(val ref.Val) ref.Val { + return val.(traits.Sizer).Size() + }, traits.SizerType)), + + // Type conversions + function(overloads.TypeConvertType, + decls.Overload(overloads.TypeConvertType, argTypes(paramA), types.NewTypeTypeWithParam(paramA)), + decls.SingletonUnaryBinding(convertToType(types.TypeType))), + + // Bool conversions + function(overloads.TypeConvertBool, + decls.Overload(overloads.BoolToBool, argTypes(types.BoolType), types.BoolType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.StringToBool, argTypes(types.StringType), types.BoolType, + decls.UnaryBinding(convertToType(types.BoolType)))), + + // Bytes conversions + function(overloads.TypeConvertBytes, + decls.Overload(overloads.BytesToBytes, argTypes(types.BytesType), types.BytesType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.StringToBytes, argTypes(types.StringType), types.BytesType, + decls.UnaryBinding(convertToType(types.BytesType)))), + + // Double conversions + function(overloads.TypeConvertDouble, + decls.Overload(overloads.DoubleToDouble, argTypes(types.DoubleType), types.DoubleType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.IntToDouble, argTypes(types.IntType), types.DoubleType, + decls.UnaryBinding(convertToType(types.DoubleType))), + decls.Overload(overloads.StringToDouble, argTypes(types.StringType), types.DoubleType, + decls.UnaryBinding(convertToType(types.DoubleType))), + decls.Overload(overloads.UintToDouble, argTypes(types.UintType), types.DoubleType, + decls.UnaryBinding(convertToType(types.DoubleType)))), + + // Duration conversions + function(overloads.TypeConvertDuration, + decls.Overload(overloads.DurationToDuration, argTypes(types.DurationType), types.DurationType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.IntToDuration, argTypes(types.IntType), types.DurationType, + decls.UnaryBinding(convertToType(types.DurationType))), + decls.Overload(overloads.StringToDuration, argTypes(types.StringType), types.DurationType, + decls.UnaryBinding(convertToType(types.DurationType)))), + + // Dyn conversions + function(overloads.TypeConvertDyn, + decls.Overload(overloads.ToDyn, argTypes(paramA), types.DynType), + decls.SingletonUnaryBinding(identity)), + + // Int conversions + function(overloads.TypeConvertInt, + decls.Overload(overloads.IntToInt, argTypes(types.IntType), types.IntType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.DoubleToInt, argTypes(types.DoubleType), types.IntType, + decls.UnaryBinding(convertToType(types.IntType))), + decls.Overload(overloads.DurationToInt, argTypes(types.DurationType), types.IntType, + decls.UnaryBinding(convertToType(types.IntType))), + decls.Overload(overloads.StringToInt, argTypes(types.StringType), types.IntType, + decls.UnaryBinding(convertToType(types.IntType))), + decls.Overload(overloads.TimestampToInt, argTypes(types.TimestampType), types.IntType, + decls.UnaryBinding(convertToType(types.IntType))), + decls.Overload(overloads.UintToInt, argTypes(types.UintType), types.IntType, + decls.UnaryBinding(convertToType(types.IntType))), + ), + + // String conversions + function(overloads.TypeConvertString, + decls.Overload(overloads.StringToString, argTypes(types.StringType), types.StringType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.BoolToString, argTypes(types.BoolType), types.StringType, + decls.UnaryBinding(convertToType(types.StringType))), + decls.Overload(overloads.BytesToString, argTypes(types.BytesType), types.StringType, + decls.UnaryBinding(convertToType(types.StringType))), + decls.Overload(overloads.DoubleToString, argTypes(types.DoubleType), types.StringType, + decls.UnaryBinding(convertToType(types.StringType))), + decls.Overload(overloads.DurationToString, argTypes(types.DurationType), types.StringType, + decls.UnaryBinding(convertToType(types.StringType))), + decls.Overload(overloads.IntToString, argTypes(types.IntType), types.StringType, + decls.UnaryBinding(convertToType(types.StringType))), + decls.Overload(overloads.TimestampToString, argTypes(types.TimestampType), types.StringType, + decls.UnaryBinding(convertToType(types.StringType))), + decls.Overload(overloads.UintToString, argTypes(types.UintType), types.StringType, + decls.UnaryBinding(convertToType(types.StringType)))), + + // Timestamp conversions + function(overloads.TypeConvertTimestamp, + decls.Overload(overloads.TimestampToTimestamp, argTypes(types.TimestampType), types.TimestampType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.IntToTimestamp, argTypes(types.IntType), types.TimestampType, + decls.UnaryBinding(convertToType(types.TimestampType))), + decls.Overload(overloads.StringToTimestamp, argTypes(types.StringType), types.TimestampType, + decls.UnaryBinding(convertToType(types.TimestampType)))), + + // Uint conversions + function(overloads.TypeConvertUint, + decls.Overload(overloads.UintToUint, argTypes(types.UintType), types.UintType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.DoubleToUint, argTypes(types.DoubleType), types.UintType, + decls.UnaryBinding(convertToType(types.UintType))), + decls.Overload(overloads.IntToUint, argTypes(types.IntType), types.UintType, + decls.UnaryBinding(convertToType(types.UintType))), + decls.Overload(overloads.StringToUint, argTypes(types.StringType), types.UintType, + decls.UnaryBinding(convertToType(types.UintType)))), + + // String functions + function(overloads.Contains, + decls.MemberOverload(overloads.ContainsString, + argTypes(types.StringType, types.StringType), types.BoolType, + decls.BinaryBinding(types.StringContains)), + decls.DisableTypeGuards(true)), + function(overloads.EndsWith, + decls.MemberOverload(overloads.EndsWithString, + argTypes(types.StringType, types.StringType), types.BoolType, + decls.BinaryBinding(types.StringEndsWith)), + decls.DisableTypeGuards(true)), + function(overloads.StartsWith, + decls.MemberOverload(overloads.StartsWithString, + argTypes(types.StringType, types.StringType), types.BoolType, + decls.BinaryBinding(types.StringStartsWith)), + decls.DisableTypeGuards(true)), + function(overloads.Matches, + decls.Overload(overloads.Matches, argTypes(types.StringType, types.StringType), types.BoolType), + decls.MemberOverload(overloads.MatchesString, + argTypes(types.StringType, types.StringType), types.BoolType), + decls.SingletonBinaryBinding(func(str, pat ref.Val) ref.Val { + return str.(traits.Matcher).Match(pat) + }, traits.MatcherType)), + + // Timestamp / duration functions + function(overloads.TimeGetFullYear, + decls.MemberOverload(overloads.TimestampToYear, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToYearWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType)), + + function(overloads.TimeGetMonth, + decls.MemberOverload(overloads.TimestampToMonth, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToMonthWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType)), + + function(overloads.TimeGetDayOfYear, + decls.MemberOverload(overloads.TimestampToDayOfYear, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToDayOfYearWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType)), + + function(overloads.TimeGetDayOfMonth, + decls.MemberOverload(overloads.TimestampToDayOfMonthZeroBased, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType)), + + function(overloads.TimeGetDate, + decls.MemberOverload(overloads.TimestampToDayOfMonthOneBased, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToDayOfMonthOneBasedWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType)), + + function(overloads.TimeGetDayOfWeek, + decls.MemberOverload(overloads.TimestampToDayOfWeek, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToDayOfWeekWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType)), + + function(overloads.TimeGetHours, + decls.MemberOverload(overloads.TimestampToHours, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToHoursWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType), + decls.MemberOverload(overloads.DurationToHours, + argTypes(types.DurationType), types.IntType)), + + function(overloads.TimeGetMinutes, + decls.MemberOverload(overloads.TimestampToMinutes, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToMinutesWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType), + decls.MemberOverload(overloads.DurationToMinutes, + argTypes(types.DurationType), types.IntType)), + + function(overloads.TimeGetSeconds, + decls.MemberOverload(overloads.TimestampToSeconds, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToSecondsWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType), + decls.MemberOverload(overloads.DurationToSeconds, + argTypes(types.DurationType), types.IntType)), + + function(overloads.TimeGetMilliseconds, + decls.MemberOverload(overloads.TimestampToMilliseconds, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToMillisecondsWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType), + decls.MemberOverload(overloads.DurationToMilliseconds, + argTypes(types.DurationType), types.IntType)), + } +} + +// Functions returns the set of standard library function declarations and definitions for CEL. +func Functions() []*decls.FunctionDecl { + return stdFunctions +} + +// Types returns the set of standard library types for CEL. +func Types() []*decls.VariableDecl { + return stdTypes +} + +func notStrictlyFalse(value ref.Val) ref.Val { + if types.IsBool(value) { + return value + } + return types.True +} + +func inAggregate(lhs ref.Val, rhs ref.Val) ref.Val { + if rhs.Type().HasTrait(traits.ContainerType) { + return rhs.(traits.Container).Contains(lhs) + } + return types.ValOrErr(rhs, "no such overload") +} + +func function(name string, opts ...decls.FunctionOpt) *decls.FunctionDecl { + fn, err := decls.NewFunction(name, opts...) + if err != nil { + panic(err) + } + return fn +} + +func argTypes(args ...*types.Type) []*types.Type { + return args +} + +func noBinaryOverrides(rhs, lhs ref.Val) ref.Val { + return types.NoSuchOverloadErr() +} + +func noFunctionOverrides(args ...ref.Val) ref.Val { + return types.NoSuchOverloadErr() +} + +func identity(val ref.Val) ref.Val { + return val +} + +func convertToType(t ref.Type) functions.UnaryOp { + return func(val ref.Val) ref.Val { + return val.ConvertToType(t) + } +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/types/BUILD.bazel new file mode 100644 index 0000000000..8f010fae44 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/BUILD.bazel @@ -0,0 +1,92 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "any_value.go", + "bool.go", + "bytes.go", + "compare.go", + "double.go", + "duration.go", + "err.go", + "int.go", + "iterator.go", + "json_value.go", + "list.go", + "map.go", + "null.go", + "object.go", + "optional.go", + "overflow.go", + "provider.go", + "string.go", + "timestamp.go", + "types.go", + "uint.go", + "unknown.go", + "util.go", + ], + importpath = "github.com/google/cel-go/common/types", + deps = [ + "//checker/decls:go_default_library", + "//common/overloads:go_default_library", + "//common/types/pb:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", + "@com_github_stoewer_go_strcase//:go_default_library", + "@dev_cel_expr//:expr", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//encoding/protojson:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", + "@org_golang_google_protobuf//types/dynamicpb:go_default_library", + "@org_golang_google_protobuf//types/known/anypb:go_default_library", + "@org_golang_google_protobuf//types/known/durationpb:go_default_library", + "@org_golang_google_protobuf//types/known/structpb:go_default_library", + "@org_golang_google_protobuf//types/known/timestamppb:go_default_library", + "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library", + ], +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "bool_test.go", + "bytes_test.go", + "double_test.go", + "duration_test.go", + "int_test.go", + "json_list_test.go", + "json_struct_test.go", + "list_test.go", + "map_test.go", + "null_test.go", + "object_test.go", + "optional_test.go", + "provider_test.go", + "string_test.go", + "timestamp_test.go", + "types_test.go", + "uint_test.go", + "unknown_test.go", + "util_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//common/types/ref:go_default_library", + "//test:go_default_library", + "//test/proto3pb:test_all_types_go_proto", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//encoding/protojson:go_default_library", + "@org_golang_google_protobuf//types/known/anypb:go_default_library", + "@org_golang_google_protobuf//types/known/durationpb:go_default_library", + "@org_golang_google_protobuf//types/known/timestamppb:go_default_library", + ], +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/any_value.go b/hack/tools/vendor/github.com/google/cel-go/common/types/any_value.go new file mode 100644 index 0000000000..cda0f13acf --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/any_value.go @@ -0,0 +1,24 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "reflect" + + anypb "google.golang.org/protobuf/types/known/anypb" +) + +// anyValueType constant representing the reflected type of google.protobuf.Any. +var anyValueType = reflect.TypeOf(&anypb.Any{}) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/bool.go b/hack/tools/vendor/github.com/google/cel-go/common/types/bool.go new file mode 100644 index 0000000000..565734f3ff --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/bool.go @@ -0,0 +1,141 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + "strconv" + + "github.com/google/cel-go/common/types/ref" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +// Bool type that implements ref.Val and supports comparison and negation. +type Bool bool + +var ( + // boolWrapperType golang reflected type for protobuf bool wrapper type. + boolWrapperType = reflect.TypeOf(&wrapperspb.BoolValue{}) +) + +// Boolean constants +const ( + False = Bool(false) + True = Bool(true) +) + +// Compare implements the traits.Comparer interface method. +func (b Bool) Compare(other ref.Val) ref.Val { + otherBool, ok := other.(Bool) + if !ok { + return ValOrErr(other, "no such overload") + } + if b == otherBool { + return IntZero + } + if !b && otherBool { + return IntNegOne + } + return IntOne +} + +// ConvertToNative implements the ref.Val interface method. +func (b Bool) ConvertToNative(typeDesc reflect.Type) (any, error) { + switch typeDesc.Kind() { + case reflect.Bool: + return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil + case reflect.Ptr: + switch typeDesc { + case anyValueType: + // Primitives must be wrapped to a wrapperspb.BoolValue before being packed into an Any. + return anypb.New(wrapperspb.Bool(bool(b))) + case boolWrapperType: + // Convert the bool to a wrapperspb.BoolValue. + return wrapperspb.Bool(bool(b)), nil + case jsonValueType: + // Return the bool as a new structpb.Value. + return structpb.NewBoolValue(bool(b)), nil + default: + if typeDesc.Elem().Kind() == reflect.Bool { + p := bool(b) + return &p, nil + } + } + case reflect.Interface: + bv := b.Value() + if reflect.TypeOf(bv).Implements(typeDesc) { + return bv, nil + } + if reflect.TypeOf(b).Implements(typeDesc) { + return b, nil + } + } + return nil, fmt.Errorf("type conversion error from bool to '%v'", typeDesc) +} + +// ConvertToType implements the ref.Val interface method. +func (b Bool) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case StringType: + return String(strconv.FormatBool(bool(b))) + case BoolType: + return b + case TypeType: + return BoolType + } + return NewErr("type conversion error from '%v' to '%v'", BoolType, typeVal) +} + +// Equal implements the ref.Val interface method. +func (b Bool) Equal(other ref.Val) ref.Val { + otherBool, ok := other.(Bool) + return Bool(ok && b == otherBool) +} + +// IsZeroValue returns true if the boolean value is false. +func (b Bool) IsZeroValue() bool { + return b == False +} + +// Negate implements the traits.Negater interface method. +func (b Bool) Negate() ref.Val { + return !b +} + +// Type implements the ref.Val interface method. +func (b Bool) Type() ref.Type { + return BoolType +} + +// Value implements the ref.Val interface method. +func (b Bool) Value() any { + return bool(b) +} + +// IsBool returns whether the input ref.Val or ref.Type is equal to BoolType. +func IsBool(elem ref.Val) bool { + switch v := elem.(type) { + case Bool: + return true + case ref.Val: + return v.Type() == BoolType + default: + return false + } +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/bytes.go b/hack/tools/vendor/github.com/google/cel-go/common/types/bytes.go new file mode 100644 index 0000000000..7e813e291b --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/bytes.go @@ -0,0 +1,140 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "bytes" + "encoding/base64" + "fmt" + "reflect" + "unicode/utf8" + + "github.com/google/cel-go/common/types/ref" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +// Bytes type that implements ref.Val and supports add, compare, and size +// operations. +type Bytes []byte + +var ( + // byteWrapperType golang reflected type for protobuf bytes wrapper type. + byteWrapperType = reflect.TypeOf(&wrapperspb.BytesValue{}) +) + +// Add implements traits.Adder interface method by concatenating byte sequences. +func (b Bytes) Add(other ref.Val) ref.Val { + otherBytes, ok := other.(Bytes) + if !ok { + return ValOrErr(other, "no such overload") + } + return append(b, otherBytes...) +} + +// Compare implements traits.Comparer interface method by lexicographic ordering. +func (b Bytes) Compare(other ref.Val) ref.Val { + otherBytes, ok := other.(Bytes) + if !ok { + return ValOrErr(other, "no such overload") + } + return Int(bytes.Compare(b, otherBytes)) +} + +// ConvertToNative implements the ref.Val interface method. +func (b Bytes) ConvertToNative(typeDesc reflect.Type) (any, error) { + switch typeDesc.Kind() { + case reflect.Array: + if len(b) != typeDesc.Len() { + return nil, fmt.Errorf("[%d]byte not assignable to [%d]byte array", len(b), typeDesc.Len()) + } + refArrPtr := reflect.New(reflect.ArrayOf(len(b), typeDesc.Elem())) + refArr := refArrPtr.Elem() + for i, byt := range b { + refArr.Index(i).Set(reflect.ValueOf(byt).Convert(typeDesc.Elem())) + } + return refArr.Interface(), nil + case reflect.Slice: + return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil + case reflect.Ptr: + switch typeDesc { + case anyValueType: + // Primitives must be wrapped before being set on an Any field. + return anypb.New(wrapperspb.Bytes([]byte(b))) + case byteWrapperType: + // Convert the bytes to a wrapperspb.BytesValue. + return wrapperspb.Bytes([]byte(b)), nil + case jsonValueType: + // CEL follows the proto3 to JSON conversion by encoding bytes to a string via base64. + // The encoding below matches the golang 'encoding/json' behavior during marshaling, + // which uses base64.StdEncoding. + str := base64.StdEncoding.EncodeToString([]byte(b)) + return structpb.NewStringValue(str), nil + } + case reflect.Interface: + bv := b.Value() + if reflect.TypeOf(bv).Implements(typeDesc) { + return bv, nil + } + if reflect.TypeOf(b).Implements(typeDesc) { + return b, nil + } + } + return nil, fmt.Errorf("type conversion error from Bytes to '%v'", typeDesc) +} + +// ConvertToType implements the ref.Val interface method. +func (b Bytes) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case StringType: + if !utf8.Valid(b) { + return NewErr("invalid UTF-8 in bytes, cannot convert to string") + } + return String(b) + case BytesType: + return b + case TypeType: + return BytesType + } + return NewErr("type conversion error from '%s' to '%s'", BytesType, typeVal) +} + +// Equal implements the ref.Val interface method. +func (b Bytes) Equal(other ref.Val) ref.Val { + otherBytes, ok := other.(Bytes) + return Bool(ok && bytes.Equal(b, otherBytes)) +} + +// IsZeroValue returns true if the byte array is empty. +func (b Bytes) IsZeroValue() bool { + return len(b) == 0 +} + +// Size implements the traits.Sizer interface method. +func (b Bytes) Size() ref.Val { + return Int(len(b)) +} + +// Type implements the ref.Val interface method. +func (b Bytes) Type() ref.Type { + return BytesType +} + +// Value implements the ref.Val interface method. +func (b Bytes) Value() any { + return []byte(b) +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/compare.go b/hack/tools/vendor/github.com/google/cel-go/common/types/compare.go new file mode 100644 index 0000000000..e196826180 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/compare.go @@ -0,0 +1,97 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "math" + + "github.com/google/cel-go/common/types/ref" +) + +func compareDoubleInt(d Double, i Int) Int { + if d < math.MinInt64 { + return IntNegOne + } + if d > math.MaxInt64 { + return IntOne + } + return compareDouble(d, Double(i)) +} + +func compareIntDouble(i Int, d Double) Int { + return -compareDoubleInt(d, i) +} + +func compareDoubleUint(d Double, u Uint) Int { + if d < 0 { + return IntNegOne + } + if d > math.MaxUint64 { + return IntOne + } + return compareDouble(d, Double(u)) +} + +func compareUintDouble(u Uint, d Double) Int { + return -compareDoubleUint(d, u) +} + +func compareIntUint(i Int, u Uint) Int { + if i < 0 || u > math.MaxInt64 { + return IntNegOne + } + cmp := i - Int(u) + if cmp < 0 { + return IntNegOne + } + if cmp > 0 { + return IntOne + } + return IntZero +} + +func compareUintInt(u Uint, i Int) Int { + return -compareIntUint(i, u) +} + +func compareDouble(a, b Double) Int { + if a < b { + return IntNegOne + } + if a > b { + return IntOne + } + return IntZero +} + +func compareInt(a, b Int) ref.Val { + if a < b { + return IntNegOne + } + if a > b { + return IntOne + } + return IntZero +} + +func compareUint(a, b Uint) ref.Val { + if a < b { + return IntNegOne + } + if a > b { + return IntOne + } + return IntZero +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/doc.go b/hack/tools/vendor/github.com/google/cel-go/common/types/doc.go new file mode 100644 index 0000000000..5f641d7043 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package types contains the types, traits, and utilities common to all +// components of expression handling. +package types diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/double.go b/hack/tools/vendor/github.com/google/cel-go/common/types/double.go new file mode 100644 index 0000000000..027e789786 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/double.go @@ -0,0 +1,211 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "math" + "reflect" + + "github.com/google/cel-go/common/types/ref" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +// Double type that implements ref.Val, comparison, and mathematical +// operations. +type Double float64 + +var ( + // doubleWrapperType reflected type for protobuf double wrapper type. + doubleWrapperType = reflect.TypeOf(&wrapperspb.DoubleValue{}) + + // floatWrapperType reflected type for protobuf float wrapper type. + floatWrapperType = reflect.TypeOf(&wrapperspb.FloatValue{}) +) + +// Add implements traits.Adder.Add. +func (d Double) Add(other ref.Val) ref.Val { + otherDouble, ok := other.(Double) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + return d + otherDouble +} + +// Compare implements traits.Comparer.Compare. +func (d Double) Compare(other ref.Val) ref.Val { + if math.IsNaN(float64(d)) { + return NewErr("NaN values cannot be ordered") + } + switch ov := other.(type) { + case Double: + if math.IsNaN(float64(ov)) { + return NewErr("NaN values cannot be ordered") + } + return compareDouble(d, ov) + case Int: + return compareDoubleInt(d, ov) + case Uint: + return compareDoubleUint(d, ov) + default: + return MaybeNoSuchOverloadErr(other) + } +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (d Double) ConvertToNative(typeDesc reflect.Type) (any, error) { + switch typeDesc.Kind() { + case reflect.Float32: + v := float32(d) + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Float64: + v := float64(d) + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Ptr: + switch typeDesc { + case anyValueType: + // Primitives must be wrapped before being set on an Any field. + return anypb.New(wrapperspb.Double(float64(d))) + case doubleWrapperType: + // Convert to a wrapperspb.DoubleValue + return wrapperspb.Double(float64(d)), nil + case floatWrapperType: + // Convert to a wrapperspb.FloatValue (with truncation). + return wrapperspb.Float(float32(d)), nil + case jsonValueType: + // Note, there are special cases for proto3 to json conversion that + // expect the floating point value to be converted to a NaN, + // Infinity, or -Infinity string values, but the jsonpb string + // marshaling of the protobuf.Value will handle this conversion. + return structpb.NewNumberValue(float64(d)), nil + } + switch typeDesc.Elem().Kind() { + case reflect.Float32: + v := float32(d) + p := reflect.New(typeDesc.Elem()) + p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem())) + return p.Interface(), nil + case reflect.Float64: + v := float64(d) + p := reflect.New(typeDesc.Elem()) + p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem())) + return p.Interface(), nil + } + case reflect.Interface: + dv := d.Value() + if reflect.TypeOf(dv).Implements(typeDesc) { + return dv, nil + } + if reflect.TypeOf(d).Implements(typeDesc) { + return d, nil + } + } + return nil, fmt.Errorf("type conversion error from Double to '%v'", typeDesc) +} + +// ConvertToType implements ref.Val.ConvertToType. +func (d Double) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case IntType: + i, err := doubleToInt64Checked(float64(d)) + if err != nil { + return WrapErr(err) + } + return Int(i) + case UintType: + i, err := doubleToUint64Checked(float64(d)) + if err != nil { + return WrapErr(err) + } + return Uint(i) + case DoubleType: + return d + case StringType: + return String(fmt.Sprintf("%g", float64(d))) + case TypeType: + return DoubleType + } + return NewErr("type conversion error from '%s' to '%s'", DoubleType, typeVal) +} + +// Divide implements traits.Divider.Divide. +func (d Double) Divide(other ref.Val) ref.Val { + otherDouble, ok := other.(Double) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + return d / otherDouble +} + +// Equal implements ref.Val.Equal. +func (d Double) Equal(other ref.Val) ref.Val { + if math.IsNaN(float64(d)) { + return False + } + switch ov := other.(type) { + case Double: + if math.IsNaN(float64(ov)) { + return False + } + return Bool(d == ov) + case Int: + return Bool(compareDoubleInt(d, ov) == 0) + case Uint: + return Bool(compareDoubleUint(d, ov) == 0) + default: + return False + } +} + +// IsZeroValue returns true if double value is 0.0 +func (d Double) IsZeroValue() bool { + return float64(d) == 0.0 +} + +// Multiply implements traits.Multiplier.Multiply. +func (d Double) Multiply(other ref.Val) ref.Val { + otherDouble, ok := other.(Double) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + return d * otherDouble +} + +// Negate implements traits.Negater.Negate. +func (d Double) Negate() ref.Val { + return -d +} + +// Subtract implements traits.Subtractor.Subtract. +func (d Double) Subtract(subtrahend ref.Val) ref.Val { + subtraDouble, ok := subtrahend.(Double) + if !ok { + return MaybeNoSuchOverloadErr(subtrahend) + } + return d - subtraDouble +} + +// Type implements ref.Val.Type. +func (d Double) Type() ref.Type { + return DoubleType +} + +// Value implements ref.Val.Value. +func (d Double) Value() any { + return float64(d) +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/duration.go b/hack/tools/vendor/github.com/google/cel-go/common/types/duration.go new file mode 100644 index 0000000000..596e56d6b0 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/duration.go @@ -0,0 +1,222 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + "strconv" + "time" + + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types/ref" + + anypb "google.golang.org/protobuf/types/known/anypb" + dpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" +) + +// Duration type that implements ref.Val and supports add, compare, negate, +// and subtract operators. This type is also a receiver which means it can +// participate in dispatch to receiver functions. +type Duration struct { + time.Duration +} + +func durationOf(d time.Duration) Duration { + return Duration{Duration: d} +} + +var ( + durationValueType = reflect.TypeOf(&dpb.Duration{}) + + durationZeroArgOverloads = map[string]func(ref.Val) ref.Val{ + overloads.TimeGetHours: DurationGetHours, + overloads.TimeGetMinutes: DurationGetMinutes, + overloads.TimeGetSeconds: DurationGetSeconds, + overloads.TimeGetMilliseconds: DurationGetMilliseconds, + } +) + +// Add implements traits.Adder.Add. +func (d Duration) Add(other ref.Val) ref.Val { + switch other.Type() { + case DurationType: + dur2 := other.(Duration) + val, err := addDurationChecked(d.Duration, dur2.Duration) + if err != nil { + return WrapErr(err) + } + return durationOf(val) + case TimestampType: + ts := other.(Timestamp).Time + val, err := addTimeDurationChecked(ts, d.Duration) + if err != nil { + return WrapErr(err) + } + return timestampOf(val) + } + return MaybeNoSuchOverloadErr(other) +} + +// Compare implements traits.Comparer.Compare. +func (d Duration) Compare(other ref.Val) ref.Val { + otherDur, ok := other.(Duration) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + d1 := d.Duration + d2 := otherDur.Duration + switch { + case d1 < d2: + return IntNegOne + case d1 > d2: + return IntOne + default: + return IntZero + } +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (d Duration) ConvertToNative(typeDesc reflect.Type) (any, error) { + // If the duration is already assignable to the desired type return it. + if reflect.TypeOf(d.Duration).AssignableTo(typeDesc) { + return d.Duration, nil + } + if reflect.TypeOf(d).AssignableTo(typeDesc) { + return d, nil + } + switch typeDesc { + case anyValueType: + // Pack the duration as a dpb.Duration into an Any value. + return anypb.New(dpb.New(d.Duration)) + case durationValueType: + // Unwrap the CEL value to its underlying proto value. + return dpb.New(d.Duration), nil + case jsonValueType: + // CEL follows the proto3 to JSON conversion. + // Note, using jsonpb would wrap the result in extra double quotes. + v := d.ConvertToType(StringType) + if IsError(v) { + return nil, v.(*Err) + } + return structpb.NewStringValue(string(v.(String))), nil + } + return nil, fmt.Errorf("type conversion error from 'Duration' to '%v'", typeDesc) +} + +// ConvertToType implements ref.Val.ConvertToType. +func (d Duration) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case StringType: + return String(strconv.FormatFloat(d.Seconds(), 'f', -1, 64) + "s") + case IntType: + return Int(d.Duration) + case DurationType: + return d + case TypeType: + return DurationType + } + return NewErr("type conversion error from '%s' to '%s'", DurationType, typeVal) +} + +// Equal implements ref.Val.Equal. +func (d Duration) Equal(other ref.Val) ref.Val { + otherDur, ok := other.(Duration) + return Bool(ok && d.Duration == otherDur.Duration) +} + +// IsZeroValue returns true if the duration value is zero +func (d Duration) IsZeroValue() bool { + return d.Duration == 0 +} + +// Negate implements traits.Negater.Negate. +func (d Duration) Negate() ref.Val { + val, err := negateDurationChecked(d.Duration) + if err != nil { + return WrapErr(err) + } + return durationOf(val) +} + +// Receive implements traits.Receiver.Receive. +func (d Duration) Receive(function string, overload string, args []ref.Val) ref.Val { + if len(args) == 0 { + if f, found := durationZeroArgOverloads[function]; found { + return f(d) + } + } + return NoSuchOverloadErr() +} + +// Subtract implements traits.Subtractor.Subtract. +func (d Duration) Subtract(subtrahend ref.Val) ref.Val { + subtraDur, ok := subtrahend.(Duration) + if !ok { + return MaybeNoSuchOverloadErr(subtrahend) + } + val, err := subtractDurationChecked(d.Duration, subtraDur.Duration) + if err != nil { + return WrapErr(err) + } + return durationOf(val) +} + +// Type implements ref.Val.Type. +func (d Duration) Type() ref.Type { + return DurationType +} + +// Value implements ref.Val.Value. +func (d Duration) Value() any { + return d.Duration +} + +// DurationGetHours returns the duration in hours. +func DurationGetHours(val ref.Val) ref.Val { + dur, ok := val.(Duration) + if !ok { + return MaybeNoSuchOverloadErr(val) + } + return Int(dur.Hours()) +} + +// DurationGetMinutes returns duration in minutes. +func DurationGetMinutes(val ref.Val) ref.Val { + dur, ok := val.(Duration) + if !ok { + return MaybeNoSuchOverloadErr(val) + } + return Int(dur.Minutes()) +} + +// DurationGetSeconds returns duration in seconds. +func DurationGetSeconds(val ref.Val) ref.Val { + dur, ok := val.(Duration) + if !ok { + return MaybeNoSuchOverloadErr(val) + } + return Int(dur.Seconds()) +} + +// DurationGetMilliseconds returns duration in milliseconds. +func DurationGetMilliseconds(val ref.Val) ref.Val { + dur, ok := val.(Duration) + if !ok { + return MaybeNoSuchOverloadErr(val) + } + return Int(dur.Milliseconds()) +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/err.go b/hack/tools/vendor/github.com/google/cel-go/common/types/err.go new file mode 100644 index 0000000000..9c9d9e21ef --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/err.go @@ -0,0 +1,169 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "errors" + "fmt" + "reflect" + + "github.com/google/cel-go/common/types/ref" +) + +// Error interface which allows types types.Err values to be treated as error values. +type Error interface { + error + ref.Val +} + +// Err type which extends the built-in go error and implements ref.Val. +type Err struct { + error + id int64 +} + +var ( + // ErrType singleton. + ErrType = NewOpaqueType("error") + + // errDivideByZero is an error indicating a division by zero of an integer value. + errDivideByZero = errors.New("division by zero") + // errModulusByZero is an error indicating a modulus by zero of an integer value. + errModulusByZero = errors.New("modulus by zero") + // errIntOverflow is an error representing integer overflow. + errIntOverflow = errors.New("integer overflow") + // errUintOverflow is an error representing unsigned integer overflow. + errUintOverflow = errors.New("unsigned integer overflow") + // errDurationOverflow is an error representing duration overflow. + errDurationOverflow = errors.New("duration overflow") + // errTimestampOverflow is an error representing timestamp overflow. + errTimestampOverflow = errors.New("timestamp overflow") + celErrTimestampOverflow = &Err{error: errTimestampOverflow} + + // celErrNoSuchOverload indicates that the call arguments did not match a supported method signature. + celErrNoSuchOverload = NewErr("no such overload") +) + +// NewErr creates a new Err described by the format string and args. +// TODO: Audit the use of this function and standardize the error messages and codes. +func NewErr(format string, args ...any) ref.Val { + return &Err{error: fmt.Errorf(format, args...)} +} + +// NewErrWithNodeID creates a new Err described by the format string and args. +// TODO: Audit the use of this function and standardize the error messages and codes. +func NewErrWithNodeID(id int64, format string, args ...any) ref.Val { + return &Err{error: fmt.Errorf(format, args...), id: id} +} + +// LabelErrNode returns val unaltered it is not an Err or if the error has a non-zero +// AST node ID already present. Otherwise the id is added to the error for +// recovery with the Err.NodeID method. +func LabelErrNode(id int64, val ref.Val) ref.Val { + if err, ok := val.(*Err); ok && err.id == 0 { + err.id = id + return err + } + return val +} + +// NoSuchOverloadErr returns a new types.Err instance with a no such overload message. +func NoSuchOverloadErr() ref.Val { + return celErrNoSuchOverload +} + +// UnsupportedRefValConversionErr returns a types.NewErr instance with a no such conversion +// message that indicates that the native value could not be converted to a CEL ref.Val. +func UnsupportedRefValConversionErr(val any) ref.Val { + return NewErr("unsupported conversion to ref.Val: (%T)%v", val, val) +} + +// MaybeNoSuchOverloadErr returns the error or unknown if the input ref.Val is one of these types, +// else a new no such overload error. +func MaybeNoSuchOverloadErr(val ref.Val) ref.Val { + return ValOrErr(val, "no such overload") +} + +// ValOrErr either returns the existing error or creates a new one. +// TODO: Audit the use of this function and standardize the error messages and codes. +func ValOrErr(val ref.Val, format string, args ...any) ref.Val { + if val == nil || !IsUnknownOrError(val) { + return NewErr(format, args...) + } + return val +} + +// WrapErr wraps an existing Go error value into a CEL Err value. +func WrapErr(err error) ref.Val { + return &Err{error: err} +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (e *Err) ConvertToNative(typeDesc reflect.Type) (any, error) { + return nil, e.error +} + +// ConvertToType implements ref.Val.ConvertToType. +func (e *Err) ConvertToType(typeVal ref.Type) ref.Val { + // Errors are not convertible to other representations. + return e +} + +// Equal implements ref.Val.Equal. +func (e *Err) Equal(other ref.Val) ref.Val { + // An error cannot be equal to any other value, so it returns itself. + return e +} + +// String implements fmt.Stringer. +func (e *Err) String() string { + return e.error.Error() +} + +// Type implements ref.Val.Type. +func (e *Err) Type() ref.Type { + return ErrType +} + +// Value implements ref.Val.Value. +func (e *Err) Value() any { + return e.error +} + +// NodeID returns the AST node ID of the expression that returned the error. +func (e *Err) NodeID() int64 { + return e.id +} + +// Is implements errors.Is. +func (e *Err) Is(target error) bool { + return e.error.Error() == target.Error() +} + +// Unwrap implements errors.Unwrap. +func (e *Err) Unwrap() error { + return e.error +} + +// IsError returns whether the input element ref.Type or ref.Val is equal to +// the ErrType singleton. +func IsError(val ref.Val) bool { + switch val.(type) { + case *Err: + return true + default: + return false + } +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/int.go b/hack/tools/vendor/github.com/google/cel-go/common/types/int.go new file mode 100644 index 0000000000..0ae9507c39 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/int.go @@ -0,0 +1,303 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "math" + "reflect" + "strconv" + "time" + + "github.com/google/cel-go/common/types/ref" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +// Int type that implements ref.Val as well as comparison and math operators. +type Int int64 + +// Int constants used for comparison results. +const ( + // IntZero is the zero-value for Int + IntZero = Int(0) + IntOne = Int(1) + IntNegOne = Int(-1) +) + +var ( + // int32WrapperType reflected type for protobuf int32 wrapper type. + int32WrapperType = reflect.TypeOf(&wrapperspb.Int32Value{}) + + // int64WrapperType reflected type for protobuf int64 wrapper type. + int64WrapperType = reflect.TypeOf(&wrapperspb.Int64Value{}) +) + +// Add implements traits.Adder.Add. +func (i Int) Add(other ref.Val) ref.Val { + otherInt, ok := other.(Int) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + val, err := addInt64Checked(int64(i), int64(otherInt)) + if err != nil { + return WrapErr(err) + } + return Int(val) +} + +// Compare implements traits.Comparer.Compare. +func (i Int) Compare(other ref.Val) ref.Val { + switch ov := other.(type) { + case Double: + if math.IsNaN(float64(ov)) { + return NewErr("NaN values cannot be ordered") + } + return compareIntDouble(i, ov) + case Int: + return compareInt(i, ov) + case Uint: + return compareIntUint(i, ov) + default: + return MaybeNoSuchOverloadErr(other) + } +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (i Int) ConvertToNative(typeDesc reflect.Type) (any, error) { + switch typeDesc.Kind() { + case reflect.Int, reflect.Int32: + // Enums are also mapped as int32 derivations. + // Note, the code doesn't convert to the enum value directly since this is not known, but + // the net effect with respect to proto-assignment is handled correctly by the reflection + // Convert method. + v, err := int64ToInt32Checked(int64(i)) + if err != nil { + return nil, err + } + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Int8: + v, err := int64ToInt8Checked(int64(i)) + if err != nil { + return nil, err + } + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Int16: + v, err := int64ToInt16Checked(int64(i)) + if err != nil { + return nil, err + } + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Int64: + return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil + case reflect.Ptr: + switch typeDesc { + case anyValueType: + // Primitives must be wrapped before being set on an Any field. + return anypb.New(wrapperspb.Int64(int64(i))) + case int32WrapperType: + // Convert the value to a wrapperspb.Int32Value, error on overflow. + v, err := int64ToInt32Checked(int64(i)) + if err != nil { + return nil, err + } + return wrapperspb.Int32(v), nil + case int64WrapperType: + // Convert the value to a wrapperspb.Int64Value. + return wrapperspb.Int64(int64(i)), nil + case jsonValueType: + // The proto-to-JSON conversion rules would convert all 64-bit integer values to JSON + // decimal strings. Because CEL ints might come from the automatic widening of 32-bit + // values in protos, the JSON type is chosen dynamically based on the value. + // + // - Integers -2^53-1 < n < 2^53-1 are encoded as JSON numbers. + // - Integers outside this range are encoded as JSON strings. + // + // The integer to float range represents the largest interval where such a conversion + // can round-trip accurately. Thus, conversions from a 32-bit source can expect a JSON + // number as with protobuf. Those consuming JSON from a 64-bit source must be able to + // handle either a JSON number or a JSON decimal string. To handle these cases safely + // the string values must be explicitly converted to int() within a CEL expression; + // however, it is best to simply stay within the JSON number range when building JSON + // objects in CEL. + if i.isJSONSafe() { + return structpb.NewNumberValue(float64(i)), nil + } + // Proto3 to JSON conversion requires string-formatted int64 values + // since the conversion to floating point would result in truncation. + return structpb.NewStringValue(strconv.FormatInt(int64(i), 10)), nil + } + switch typeDesc.Elem().Kind() { + case reflect.Int32: + // Convert the value to a wrapperspb.Int32Value, error on overflow. + v, err := int64ToInt32Checked(int64(i)) + if err != nil { + return nil, err + } + p := reflect.New(typeDesc.Elem()) + p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem())) + return p.Interface(), nil + case reflect.Int64: + v := int64(i) + p := reflect.New(typeDesc.Elem()) + p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem())) + return p.Interface(), nil + } + case reflect.Interface: + iv := i.Value() + if reflect.TypeOf(iv).Implements(typeDesc) { + return iv, nil + } + if reflect.TypeOf(i).Implements(typeDesc) { + return i, nil + } + } + return nil, fmt.Errorf("unsupported type conversion from 'int' to %v", typeDesc) +} + +// ConvertToType implements ref.Val.ConvertToType. +func (i Int) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case IntType: + return i + case UintType: + u, err := int64ToUint64Checked(int64(i)) + if err != nil { + return WrapErr(err) + } + return Uint(u) + case DoubleType: + return Double(i) + case StringType: + return String(fmt.Sprintf("%d", int64(i))) + case TimestampType: + // The maximum positive value that can be passed to time.Unix is math.MaxInt64 minus the number + // of seconds between year 1 and year 1970. See comments on unixToInternal. + if int64(i) < minUnixTime || int64(i) > maxUnixTime { + return celErrTimestampOverflow + } + return timestampOf(time.Unix(int64(i), 0).UTC()) + case TypeType: + return IntType + } + return NewErr("type conversion error from '%s' to '%s'", IntType, typeVal) +} + +// Divide implements traits.Divider.Divide. +func (i Int) Divide(other ref.Val) ref.Val { + otherInt, ok := other.(Int) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + val, err := divideInt64Checked(int64(i), int64(otherInt)) + if err != nil { + return WrapErr(err) + } + return Int(val) +} + +// Equal implements ref.Val.Equal. +func (i Int) Equal(other ref.Val) ref.Val { + switch ov := other.(type) { + case Double: + if math.IsNaN(float64(ov)) { + return False + } + return Bool(compareIntDouble(i, ov) == 0) + case Int: + return Bool(i == ov) + case Uint: + return Bool(compareIntUint(i, ov) == 0) + default: + return False + } +} + +// IsZeroValue returns true if integer is equal to 0 +func (i Int) IsZeroValue() bool { + return i == IntZero +} + +// Modulo implements traits.Modder.Modulo. +func (i Int) Modulo(other ref.Val) ref.Val { + otherInt, ok := other.(Int) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + val, err := moduloInt64Checked(int64(i), int64(otherInt)) + if err != nil { + return WrapErr(err) + } + return Int(val) +} + +// Multiply implements traits.Multiplier.Multiply. +func (i Int) Multiply(other ref.Val) ref.Val { + otherInt, ok := other.(Int) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + val, err := multiplyInt64Checked(int64(i), int64(otherInt)) + if err != nil { + return WrapErr(err) + } + return Int(val) +} + +// Negate implements traits.Negater.Negate. +func (i Int) Negate() ref.Val { + val, err := negateInt64Checked(int64(i)) + if err != nil { + return WrapErr(err) + } + return Int(val) +} + +// Subtract implements traits.Subtractor.Subtract. +func (i Int) Subtract(subtrahend ref.Val) ref.Val { + subtraInt, ok := subtrahend.(Int) + if !ok { + return MaybeNoSuchOverloadErr(subtrahend) + } + val, err := subtractInt64Checked(int64(i), int64(subtraInt)) + if err != nil { + return WrapErr(err) + } + return Int(val) +} + +// Type implements ref.Val.Type. +func (i Int) Type() ref.Type { + return IntType +} + +// Value implements ref.Val.Value. +func (i Int) Value() any { + return int64(i) +} + +// isJSONSafe indicates whether the int is safely representable as a floating point value in JSON. +func (i Int) isJSONSafe() bool { + return i >= minIntJSON && i <= maxIntJSON +} + +const ( + // maxIntJSON is defined as the Number.MAX_SAFE_INTEGER value per EcmaScript 6. + maxIntJSON = 1<<53 - 1 + // minIntJSON is defined as the Number.MIN_SAFE_INTEGER value per EcmaScript 6. + minIntJSON = -maxIntJSON +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/iterator.go b/hack/tools/vendor/github.com/google/cel-go/common/types/iterator.go new file mode 100644 index 0000000000..98e9147b6e --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/iterator.go @@ -0,0 +1,55 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +var ( + // IteratorType singleton. + IteratorType = NewObjectType("iterator", traits.IteratorType) +) + +// baseIterator is the basis for list, map, and object iterators. +// +// An iterator in and of itself should not be a valid value for comparison, but must implement the +// `ref.Val` methods in order to be well-supported within instruction arguments processed by the +// interpreter. +type baseIterator struct{} + +func (*baseIterator) ConvertToNative(typeDesc reflect.Type) (any, error) { + return nil, fmt.Errorf("type conversion on iterators not supported") +} + +func (*baseIterator) ConvertToType(typeVal ref.Type) ref.Val { + return NewErr("no such overload") +} + +func (*baseIterator) Equal(other ref.Val) ref.Val { + return NewErr("no such overload") +} + +func (*baseIterator) Type() ref.Type { + return IteratorType +} + +func (*baseIterator) Value() any { + return nil +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/json_value.go b/hack/tools/vendor/github.com/google/cel-go/common/types/json_value.go new file mode 100644 index 0000000000..13a4efe7ad --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/json_value.go @@ -0,0 +1,29 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "reflect" + + structpb "google.golang.org/protobuf/types/known/structpb" +) + +// JSON type constants representing the reflected types of protobuf JSON values. +var ( + jsonValueType = reflect.TypeOf(&structpb.Value{}) + jsonListValueType = reflect.TypeOf(&structpb.ListValue{}) + jsonStructType = reflect.TypeOf(&structpb.Struct{}) + jsonNullType = reflect.TypeOf(structpb.NullValue_NULL_VALUE) +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/list.go b/hack/tools/vendor/github.com/google/cel-go/common/types/list.go new file mode 100644 index 0000000000..ca47d39fec --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/list.go @@ -0,0 +1,574 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + "strings" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" +) + +// NewDynamicList returns a traits.Lister with heterogenous elements. +// value should be an array of "native" types, i.e. any type that +// NativeToValue() can convert to a ref.Val. +func NewDynamicList(adapter Adapter, value any) traits.Lister { + refValue := reflect.ValueOf(value) + return &baseList{ + Adapter: adapter, + value: value, + size: refValue.Len(), + get: func(i int) any { + return refValue.Index(i).Interface() + }, + } +} + +// NewStringList returns a traits.Lister containing only strings. +func NewStringList(adapter Adapter, elems []string) traits.Lister { + return &baseList{ + Adapter: adapter, + value: elems, + size: len(elems), + get: func(i int) any { return elems[i] }, + } +} + +// NewRefValList returns a traits.Lister with ref.Val elements. +// +// This type specialization is used with list literals within CEL expressions. +func NewRefValList(adapter Adapter, elems []ref.Val) traits.Lister { + return &baseList{ + Adapter: adapter, + value: elems, + size: len(elems), + get: func(i int) any { return elems[i] }, + } +} + +// NewProtoList returns a traits.Lister based on a pb.List instance. +func NewProtoList(adapter Adapter, list protoreflect.List) traits.Lister { + return &baseList{ + Adapter: adapter, + value: list, + size: list.Len(), + get: func(i int) any { return list.Get(i).Interface() }, + } +} + +// NewJSONList returns a traits.Lister based on structpb.ListValue instance. +func NewJSONList(adapter Adapter, l *structpb.ListValue) traits.Lister { + vals := l.GetValues() + return &baseList{ + Adapter: adapter, + value: l, + size: len(vals), + get: func(i int) any { return vals[i] }, + } +} + +// NewMutableList creates a new mutable list whose internal state can be modified. +func NewMutableList(adapter Adapter) traits.MutableLister { + var mutableValues []ref.Val + l := &mutableList{ + baseList: &baseList{ + Adapter: adapter, + value: mutableValues, + size: 0, + }, + mutableValues: mutableValues, + } + l.get = func(i int) any { + return l.mutableValues[i] + } + return l +} + +// baseList points to a list containing elements of any type. +// The `value` is an array of native values, and refValue is its reflection object. +// The `Adapter` enables native type to CEL type conversions. +type baseList struct { + Adapter + value any + + // size indicates the number of elements within the list. + // Since objects are immutable the size of a list is static. + size int + + // get returns a value at the specified integer index. + // The index is guaranteed to be checked against the list index range. + get func(int) any +} + +// Add implements the traits.Adder interface method. +func (l *baseList) Add(other ref.Val) ref.Val { + otherList, ok := other.(traits.Lister) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + if l.Size() == IntZero { + return other + } + if otherList.Size() == IntZero { + return l + } + return &concatList{ + Adapter: l.Adapter, + prevList: l, + nextList: otherList} +} + +// Contains implements the traits.Container interface method. +func (l *baseList) Contains(elem ref.Val) ref.Val { + for i := 0; i < l.size; i++ { + val := l.NativeToValue(l.get(i)) + cmp := elem.Equal(val) + b, ok := cmp.(Bool) + if ok && b == True { + return True + } + } + return False +} + +// ConvertToNative implements the ref.Val interface method. +func (l *baseList) ConvertToNative(typeDesc reflect.Type) (any, error) { + // If the underlying list value is assignable to the reflected type return it. + if reflect.TypeOf(l.value).AssignableTo(typeDesc) { + return l.value, nil + } + // If the list wrapper is assignable to the desired type return it. + if reflect.TypeOf(l).AssignableTo(typeDesc) { + return l, nil + } + // Attempt to convert the list to a set of well known protobuf types. + switch typeDesc { + case anyValueType: + json, err := l.ConvertToNative(jsonListValueType) + if err != nil { + return nil, err + } + return anypb.New(json.(proto.Message)) + case jsonValueType, jsonListValueType: + jsonValues, err := + l.ConvertToNative(reflect.TypeOf([]*structpb.Value{})) + if err != nil { + return nil, err + } + jsonList := &structpb.ListValue{Values: jsonValues.([]*structpb.Value)} + if typeDesc == jsonListValueType { + return jsonList, nil + } + return structpb.NewListValue(jsonList), nil + } + // Non-list conversion. + if typeDesc.Kind() != reflect.Slice && typeDesc.Kind() != reflect.Array { + return nil, fmt.Errorf("type conversion error from list to '%v'", typeDesc) + } + + // List conversion. + // Allow the element ConvertToNative() function to determine whether conversion is possible. + otherElemType := typeDesc.Elem() + elemCount := l.size + var nativeList reflect.Value + if typeDesc.Kind() == reflect.Array { + nativeList = reflect.New(reflect.ArrayOf(elemCount, typeDesc)).Elem().Index(0) + } else { + nativeList = reflect.MakeSlice(typeDesc, elemCount, elemCount) + + } + for i := 0; i < elemCount; i++ { + elem := l.NativeToValue(l.get(i)) + nativeElemVal, err := elem.ConvertToNative(otherElemType) + if err != nil { + return nil, err + } + nativeList.Index(i).Set(reflect.ValueOf(nativeElemVal)) + } + return nativeList.Interface(), nil +} + +// ConvertToType implements the ref.Val interface method. +func (l *baseList) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case ListType: + return l + case TypeType: + return ListType + } + return NewErr("type conversion error from '%s' to '%s'", ListType, typeVal) +} + +// Equal implements the ref.Val interface method. +func (l *baseList) Equal(other ref.Val) ref.Val { + otherList, ok := other.(traits.Lister) + if !ok { + return False + } + if l.Size() != otherList.Size() { + return False + } + for i := IntZero; i < l.Size().(Int); i++ { + thisElem := l.Get(i) + otherElem := otherList.Get(i) + elemEq := Equal(thisElem, otherElem) + if elemEq == False { + return False + } + } + return True +} + +// Get implements the traits.Indexer interface method. +func (l *baseList) Get(index ref.Val) ref.Val { + ind, err := IndexOrError(index) + if err != nil { + return ValOrErr(index, err.Error()) + } + if ind < 0 || ind >= l.size { + return NewErr("index '%d' out of range in list size '%d'", ind, l.Size()) + } + return l.NativeToValue(l.get(ind)) +} + +// IsZeroValue returns true if the list is empty. +func (l *baseList) IsZeroValue() bool { + return l.size == 0 +} + +// Fold calls the FoldEntry method for each (index, value) pair in the list. +func (l *baseList) Fold(f traits.Folder) { + for i := 0; i < l.size; i++ { + if !f.FoldEntry(i, l.get(i)) { + break + } + } +} + +// Iterator implements the traits.Iterable interface method. +func (l *baseList) Iterator() traits.Iterator { + return newListIterator(l) +} + +// Size implements the traits.Sizer interface method. +func (l *baseList) Size() ref.Val { + return Int(l.size) +} + +// Type implements the ref.Val interface method. +func (l *baseList) Type() ref.Type { + return ListType +} + +// Value implements the ref.Val interface method. +func (l *baseList) Value() any { + return l.value +} + +// String converts the list to a human readable string form. +func (l *baseList) String() string { + var sb strings.Builder + sb.WriteString("[") + for i := 0; i < l.size; i++ { + sb.WriteString(fmt.Sprintf("%v", l.get(i))) + if i != l.size-1 { + sb.WriteString(", ") + } + } + sb.WriteString("]") + return sb.String() +} + +// mutableList aggregates values into its internal storage. For use with internal CEL variables only. +type mutableList struct { + *baseList + mutableValues []ref.Val +} + +// Add copies elements from the other list into the internal storage of the mutable list. +// The ref.Val returned by Add is the receiver. +func (l *mutableList) Add(other ref.Val) ref.Val { + switch otherList := other.(type) { + case *mutableList: + l.mutableValues = append(l.mutableValues, otherList.mutableValues...) + l.size += len(otherList.mutableValues) + case traits.Lister: + for i := IntZero; i < otherList.Size().(Int); i++ { + l.size++ + l.mutableValues = append(l.mutableValues, otherList.Get(i)) + } + default: + return MaybeNoSuchOverloadErr(otherList) + } + return l +} + +// ToImmutableList returns an immutable list based on the internal storage of the mutable list. +func (l *mutableList) ToImmutableList() traits.Lister { + // The reference to internal state is guaranteed to be safe as this call is only performed + // when mutations have been completed. + return NewRefValList(l.Adapter, l.mutableValues) +} + +// concatList combines two list implementations together into a view. +// The `Adapter` enables native type to CEL type conversions. +type concatList struct { + Adapter + value any + prevList traits.Lister + nextList traits.Lister +} + +// Add implements the traits.Adder interface method. +func (l *concatList) Add(other ref.Val) ref.Val { + otherList, ok := other.(traits.Lister) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + if l.Size() == IntZero { + return other + } + if otherList.Size() == IntZero { + return l + } + return &concatList{ + Adapter: l.Adapter, + prevList: l, + nextList: otherList} +} + +// Contains implements the traits.Container interface method. +func (l *concatList) Contains(elem ref.Val) ref.Val { + // The concat list relies on the IsErrorOrUnknown checks against the input element to be + // performed by the `prevList` and/or `nextList`. + prev := l.prevList.Contains(elem) + // Short-circuit the return if the elem was found in the prev list. + if prev == True { + return prev + } + // Return if the elem was found in the next list. + next := l.nextList.Contains(elem) + if next == True { + return next + } + // Handle the case where an error or unknown was encountered before checking next. + if IsUnknownOrError(prev) { + return prev + } + // Otherwise, rely on the next value as the representative result. + return next +} + +// ConvertToNative implements the ref.Val interface method. +func (l *concatList) ConvertToNative(typeDesc reflect.Type) (any, error) { + combined := NewDynamicList(l.Adapter, l.Value().([]any)) + return combined.ConvertToNative(typeDesc) +} + +// ConvertToType implements the ref.Val interface method. +func (l *concatList) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case ListType: + return l + case TypeType: + return ListType + } + return NewErr("type conversion error from '%s' to '%s'", ListType, typeVal) +} + +// Equal implements the ref.Val interface method. +func (l *concatList) Equal(other ref.Val) ref.Val { + otherList, ok := other.(traits.Lister) + if !ok { + return False + } + if l.Size() != otherList.Size() { + return False + } + var maybeErr ref.Val + for i := IntZero; i < l.Size().(Int); i++ { + thisElem := l.Get(i) + otherElem := otherList.Get(i) + elemEq := Equal(thisElem, otherElem) + if elemEq == False { + return False + } + if maybeErr == nil && IsUnknownOrError(elemEq) { + maybeErr = elemEq + } + } + if maybeErr != nil { + return maybeErr + } + return True +} + +// Get implements the traits.Indexer interface method. +func (l *concatList) Get(index ref.Val) ref.Val { + ind, err := IndexOrError(index) + if err != nil { + return ValOrErr(index, err.Error()) + } + i := Int(ind) + if i < l.prevList.Size().(Int) { + return l.prevList.Get(i) + } + offset := i - l.prevList.Size().(Int) + return l.nextList.Get(offset) +} + +// IsZeroValue returns true if the list is empty. +func (l *concatList) IsZeroValue() bool { + return l.Size().(Int) == 0 +} + +// Fold calls the FoldEntry method for each (index, value) pair in the list. +func (l *concatList) Fold(f traits.Folder) { + for i := Int(0); i < l.Size().(Int); i++ { + if !f.FoldEntry(i, l.Get(i)) { + break + } + } +} + +// Iterator implements the traits.Iterable interface method. +func (l *concatList) Iterator() traits.Iterator { + return newListIterator(l) +} + +// Size implements the traits.Sizer interface method. +func (l *concatList) Size() ref.Val { + return l.prevList.Size().(Int).Add(l.nextList.Size()) +} + +// String converts the concatenated list to a human-readable string. +func (l *concatList) String() string { + var sb strings.Builder + sb.WriteString("[") + for i := Int(0); i < l.Size().(Int); i++ { + sb.WriteString(fmt.Sprintf("%v", l.Get(i))) + if i != l.Size().(Int)-1 { + sb.WriteString(", ") + } + } + sb.WriteString("]") + return sb.String() +} + +// Type implements the ref.Val interface method. +func (l *concatList) Type() ref.Type { + return ListType +} + +// Value implements the ref.Val interface method. +func (l *concatList) Value() any { + if l.value == nil { + merged := make([]any, l.Size().(Int)) + prevLen := l.prevList.Size().(Int) + for i := Int(0); i < prevLen; i++ { + merged[i] = l.prevList.Get(i).Value() + } + nextLen := l.nextList.Size().(Int) + for j := Int(0); j < nextLen; j++ { + merged[prevLen+j] = l.nextList.Get(j).Value() + } + l.value = merged + } + return l.value +} + +func newListIterator(listValue traits.Lister) traits.Iterator { + return &listIterator{ + listValue: listValue, + len: listValue.Size().(Int), + } +} + +type listIterator struct { + *baseIterator + listValue traits.Lister + cursor Int + len Int +} + +// HasNext implements the traits.Iterator interface method. +func (it *listIterator) HasNext() ref.Val { + return Bool(it.cursor < it.len) +} + +// Next implements the traits.Iterator interface method. +func (it *listIterator) Next() ref.Val { + if it.HasNext() == True { + index := it.cursor + it.cursor++ + return it.listValue.Get(index) + } + return nil +} + +// IndexOrError converts an input index value into either a lossless integer index or an error. +func IndexOrError(index ref.Val) (int, error) { + switch iv := index.(type) { + case Int: + return int(iv), nil + case Double: + if ik, ok := doubleToInt64Lossless(float64(iv)); ok { + return int(ik), nil + } + return -1, fmt.Errorf("unsupported index value %v in list", index) + case Uint: + if ik, ok := uint64ToInt64Lossless(uint64(iv)); ok { + return int(ik), nil + } + return -1, fmt.Errorf("unsupported index value %v in list", index) + default: + return -1, fmt.Errorf("unsupported index type '%s' in list", index.Type()) + } +} + +// ToFoldableList will create a Foldable version of a list suitable for key-value pair iteration. +// +// For values which are already Foldable, this call is a no-op. For all other values, the fold is +// driven via the Size() and Get() calls which means that the folding will function, but take a +// performance hit. +func ToFoldableList(l traits.Lister) traits.Foldable { + if f, ok := l.(traits.Foldable); ok { + return f + } + return interopFoldableList{Lister: l} +} + +type interopFoldableList struct { + traits.Lister +} + +// Fold implements the traits.Foldable interface method and performs an iteration over the +// range of elements of the list. +func (l interopFoldableList) Fold(f traits.Folder) { + sz := l.Size().(Int) + for i := Int(0); i < sz; i++ { + if !f.FoldEntry(i, l.Get(i)) { + break + } + } +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/map.go b/hack/tools/vendor/github.com/google/cel-go/common/types/map.go new file mode 100644 index 0000000000..cb6cce78b0 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/map.go @@ -0,0 +1,1002 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + "strings" + + "github.com/stoewer/go-strcase" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "github.com/google/cel-go/common/types/pb" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" +) + +// NewDynamicMap returns a traits.Mapper value with dynamic key, value pairs. +func NewDynamicMap(adapter Adapter, value any) traits.Mapper { + refValue := reflect.ValueOf(value) + return &baseMap{ + Adapter: adapter, + mapAccessor: newReflectMapAccessor(adapter, refValue), + value: value, + size: refValue.Len(), + } +} + +// NewJSONStruct creates a traits.Mapper implementation backed by a JSON struct that has been +// encoded in protocol buffer form. +// +// The `adapter` argument provides type adaptation capabilities from proto to CEL. +func NewJSONStruct(adapter Adapter, value *structpb.Struct) traits.Mapper { + fields := value.GetFields() + return &baseMap{ + Adapter: adapter, + mapAccessor: newJSONStructAccessor(adapter, fields), + value: value, + size: len(fields), + } +} + +// NewRefValMap returns a specialized traits.Mapper with CEL valued keys and values. +func NewRefValMap(adapter Adapter, value map[ref.Val]ref.Val) traits.Mapper { + return &baseMap{ + Adapter: adapter, + mapAccessor: newRefValMapAccessor(value), + value: value, + size: len(value), + } +} + +// NewStringInterfaceMap returns a specialized traits.Mapper with string keys and interface values. +func NewStringInterfaceMap(adapter Adapter, value map[string]any) traits.Mapper { + return &baseMap{ + Adapter: adapter, + mapAccessor: newStringIfaceMapAccessor(adapter, value), + value: value, + size: len(value), + } +} + +// NewStringStringMap returns a specialized traits.Mapper with string keys and values. +func NewStringStringMap(adapter Adapter, value map[string]string) traits.Mapper { + return &baseMap{ + Adapter: adapter, + mapAccessor: newStringMapAccessor(value), + value: value, + size: len(value), + } +} + +// NewProtoMap returns a specialized traits.Mapper for handling protobuf map values. +func NewProtoMap(adapter Adapter, value *pb.Map) traits.Mapper { + return &protoMap{ + Adapter: adapter, + value: value, + } +} + +// NewMutableMap constructs a mutable map from an adapter and a set of map values. +func NewMutableMap(adapter Adapter, mutableValues map[ref.Val]ref.Val) traits.MutableMapper { + mutableCopy := make(map[ref.Val]ref.Val, len(mutableValues)) + for k, v := range mutableValues { + mutableCopy[k] = v + } + m := &mutableMap{ + baseMap: &baseMap{ + Adapter: adapter, + mapAccessor: newRefValMapAccessor(mutableCopy), + value: mutableCopy, + size: len(mutableCopy), + }, + mutableValues: mutableCopy, + } + return m +} + +// mapAccessor is a private interface for finding values within a map and iterating over the keys. +// This interface implements portions of the API surface area required by the traits.Mapper +// interface. +type mapAccessor interface { + // Find returns a value, if one exists, for the input key. + // + // If the key is not found the function returns (nil, false). + Find(ref.Val) (ref.Val, bool) + + // Iterator returns an Iterator over the map key set. + Iterator() traits.Iterator + + // Fold calls the FoldEntry method for each (key, value) pair in the map. + Fold(traits.Folder) +} + +// baseMap is a reflection based map implementation designed to handle a variety of map-like types. +// +// Since CEL is side-effect free, the base map represents an immutable object. +type baseMap struct { + // TypeAdapter used to convert keys and values accessed within the map. + Adapter + + // mapAccessor interface implementation used to find and iterate over map keys. + mapAccessor + + // value is the native Go value upon which the map type operators. + value any + + // size is the number of entries in the map. + size int +} + +// Contains implements the traits.Container interface method. +func (m *baseMap) Contains(index ref.Val) ref.Val { + _, found := m.Find(index) + return Bool(found) +} + +// ConvertToNative implements the ref.Val interface method. +func (m *baseMap) ConvertToNative(typeDesc reflect.Type) (any, error) { + // If the map is already assignable to the desired type return it, e.g. interfaces and + // maps with the same key value types. + if reflect.TypeOf(m.value).AssignableTo(typeDesc) { + return m.value, nil + } + if reflect.TypeOf(m).AssignableTo(typeDesc) { + return m, nil + } + switch typeDesc { + case anyValueType: + json, err := m.ConvertToNative(jsonStructType) + if err != nil { + return nil, err + } + return anypb.New(json.(proto.Message)) + case jsonValueType, jsonStructType: + jsonEntries, err := + m.ConvertToNative(reflect.TypeOf(map[string]*structpb.Value{})) + if err != nil { + return nil, err + } + jsonMap := &structpb.Struct{Fields: jsonEntries.(map[string]*structpb.Value)} + if typeDesc == jsonStructType { + return jsonMap, nil + } + return structpb.NewStructValue(jsonMap), nil + } + + // Unwrap pointers, but track their use. + isPtr := false + if typeDesc.Kind() == reflect.Ptr { + tk := typeDesc + typeDesc = typeDesc.Elem() + if typeDesc.Kind() == reflect.Ptr { + return nil, fmt.Errorf("unsupported type conversion to '%v'", tk) + } + isPtr = true + } + switch typeDesc.Kind() { + // Map conversion. + case reflect.Map: + otherKey := typeDesc.Key() + otherElem := typeDesc.Elem() + nativeMap := reflect.MakeMapWithSize(typeDesc, m.size) + it := m.Iterator() + for it.HasNext() == True { + key := it.Next() + refKeyValue, err := key.ConvertToNative(otherKey) + if err != nil { + return nil, err + } + refElemValue, err := m.Get(key).ConvertToNative(otherElem) + if err != nil { + return nil, err + } + nativeMap.SetMapIndex(reflect.ValueOf(refKeyValue), reflect.ValueOf(refElemValue)) + } + return nativeMap.Interface(), nil + case reflect.Struct: + nativeStructPtr := reflect.New(typeDesc) + nativeStruct := nativeStructPtr.Elem() + it := m.Iterator() + for it.HasNext() == True { + key := it.Next() + // Ensure the field name being referenced is exported. + // Only exported (public) field names can be set by reflection, where the name + // must be at least one character in length and start with an upper-case letter. + fieldName := key.ConvertToType(StringType) + if IsError(fieldName) { + return nil, fieldName.(*Err) + } + name := string(fieldName.(String)) + name = strcase.UpperCamelCase(name) + fieldRef := nativeStruct.FieldByName(name) + if !fieldRef.IsValid() { + return nil, fmt.Errorf("type conversion error, no such field '%s' in type '%v'", name, typeDesc) + } + fieldValue, err := m.Get(key).ConvertToNative(fieldRef.Type()) + if err != nil { + return nil, err + } + fieldRef.Set(reflect.ValueOf(fieldValue)) + } + if isPtr { + return nativeStructPtr.Interface(), nil + } + return nativeStruct.Interface(), nil + } + return nil, fmt.Errorf("type conversion error from map to '%v'", typeDesc) +} + +// ConvertToType implements the ref.Val interface method. +func (m *baseMap) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case MapType: + return m + case TypeType: + return MapType + } + return NewErr("type conversion error from '%s' to '%s'", MapType, typeVal) +} + +// Equal implements the ref.Val interface method. +func (m *baseMap) Equal(other ref.Val) ref.Val { + otherMap, ok := other.(traits.Mapper) + if !ok { + return False + } + if m.Size() != otherMap.Size() { + return False + } + it := m.Iterator() + for it.HasNext() == True { + key := it.Next() + thisVal, _ := m.Find(key) + otherVal, found := otherMap.Find(key) + if !found { + return False + } + valEq := Equal(thisVal, otherVal) + if valEq == False { + return False + } + } + return True +} + +// Get implements the traits.Indexer interface method. +func (m *baseMap) Get(key ref.Val) ref.Val { + v, found := m.Find(key) + if !found { + return ValOrErr(v, "no such key: %v", key) + } + return v +} + +// IsZeroValue returns true if the map is empty. +func (m *baseMap) IsZeroValue() bool { + return m.size == 0 +} + +// Size implements the traits.Sizer interface method. +func (m *baseMap) Size() ref.Val { + return Int(m.size) +} + +// String converts the map into a human-readable string. +func (m *baseMap) String() string { + var sb strings.Builder + sb.WriteString("{") + it := m.Iterator() + i := 0 + for it.HasNext() == True { + k := it.Next() + v, _ := m.Find(k) + sb.WriteString(fmt.Sprintf("%v: %v", k, v)) + if i != m.size-1 { + sb.WriteString(", ") + } + i++ + } + sb.WriteString("}") + return sb.String() +} + +// Type implements the ref.Val interface method. +func (m *baseMap) Type() ref.Type { + return MapType +} + +// Value implements the ref.Val interface method. +func (m *baseMap) Value() any { + return m.value +} + +// mutableMap holds onto a set of mutable values which are used for intermediate computations. +type mutableMap struct { + *baseMap + mutableValues map[ref.Val]ref.Val +} + +// Insert implements the traits.MutableMapper interface method, returning true if the key insertion +// succeeds. +func (m *mutableMap) Insert(k, v ref.Val) ref.Val { + if _, found := m.Find(k); found { + return NewErr("insert failed: key %v already exists", k) + } + m.mutableValues[k] = v + return m +} + +// ToImmutableMap implements the traits.MutableMapper interface method, converting a mutable map +// an immutable map implementation. +func (m *mutableMap) ToImmutableMap() traits.Mapper { + return NewRefValMap(m.Adapter, m.mutableValues) +} + +func newJSONStructAccessor(adapter Adapter, st map[string]*structpb.Value) mapAccessor { + return &jsonStructAccessor{ + Adapter: adapter, + st: st, + } +} + +type jsonStructAccessor struct { + Adapter + st map[string]*structpb.Value +} + +// Find searches the json struct field map for the input key value and returns (value, true) if +// found. +// +// If the key is not found the function returns (nil, false). +func (a *jsonStructAccessor) Find(key ref.Val) (ref.Val, bool) { + strKey, ok := key.(String) + if !ok { + return nil, false + } + keyVal, found := a.st[string(strKey)] + if !found { + return nil, false + } + return a.NativeToValue(keyVal), true +} + +// Iterator creates a new traits.Iterator from the set of JSON struct field names. +func (a *jsonStructAccessor) Iterator() traits.Iterator { + // Copy the keys to make their order stable. + mapKeys := make([]string, len(a.st)) + i := 0 + for k := range a.st { + mapKeys[i] = k + i++ + } + return &stringKeyIterator{ + mapKeys: mapKeys, + len: len(mapKeys), + } +} + +// Fold calls the FoldEntry method for each (key, value) pair in the map. +func (a *jsonStructAccessor) Fold(f traits.Folder) { + for k, v := range a.st { + if !f.FoldEntry(k, v) { + break + } + } +} + +func newReflectMapAccessor(adapter Adapter, value reflect.Value) mapAccessor { + keyType := value.Type().Key() + return &reflectMapAccessor{ + Adapter: adapter, + refValue: value, + keyType: keyType, + } +} + +type reflectMapAccessor struct { + Adapter + refValue reflect.Value + keyType reflect.Type +} + +// Find converts the input key to a native Golang type and then uses reflection to find the key, +// returning (value, true) if present. +// +// If the key is not found the function returns (nil, false). +func (m *reflectMapAccessor) Find(key ref.Val) (ref.Val, bool) { + if m.refValue.Len() == 0 { + return nil, false + } + if keyVal, found := m.findInternal(key); found { + return keyVal, true + } + switch k := key.(type) { + // Double is not a valid proto map key type, so check for the key as an int or uint. + case Double: + if ik, ok := doubleToInt64Lossless(float64(k)); ok { + if keyVal, found := m.findInternal(Int(ik)); found { + return keyVal, true + } + } + if uk, ok := doubleToUint64Lossless(float64(k)); ok { + return m.findInternal(Uint(uk)) + } + // map keys of type double are not supported. + case Int: + if uk, ok := int64ToUint64Lossless(int64(k)); ok { + return m.findInternal(Uint(uk)) + } + case Uint: + if ik, ok := uint64ToInt64Lossless(uint64(k)); ok { + return m.findInternal(Int(ik)) + } + } + return nil, false +} + +// findInternal attempts to convert the incoming key to the map's internal native type +// and then returns the value, if found. +func (m *reflectMapAccessor) findInternal(key ref.Val) (ref.Val, bool) { + k, err := key.ConvertToNative(m.keyType) + if err != nil { + return nil, false + } + refKey := reflect.ValueOf(k) + val := m.refValue.MapIndex(refKey) + if val.IsValid() { + return m.NativeToValue(val.Interface()), true + } + return nil, false +} + +// Iterator creates a Golang reflection based traits.Iterator. +func (m *reflectMapAccessor) Iterator() traits.Iterator { + return &mapIterator{ + Adapter: m.Adapter, + mapKeys: m.refValue.MapRange(), + len: m.refValue.Len(), + } +} + +// Fold calls the FoldEntry method for each (key, value) pair in the map. +func (m *reflectMapAccessor) Fold(f traits.Folder) { + mapRange := m.refValue.MapRange() + for mapRange.Next() { + if !f.FoldEntry(mapRange.Key().Interface(), mapRange.Value().Interface()) { + break + } + } +} + +func newRefValMapAccessor(mapVal map[ref.Val]ref.Val) mapAccessor { + return &refValMapAccessor{mapVal: mapVal} +} + +type refValMapAccessor struct { + mapVal map[ref.Val]ref.Val +} + +// Find uses native map accesses to find the key, returning (value, true) if present. +// +// If the key is not found the function returns (nil, false). +func (a *refValMapAccessor) Find(key ref.Val) (ref.Val, bool) { + if len(a.mapVal) == 0 { + return nil, false + } + if keyVal, found := a.mapVal[key]; found { + return keyVal, true + } + switch k := key.(type) { + case Double: + if ik, ok := doubleToInt64Lossless(float64(k)); ok { + if keyVal, found := a.mapVal[Int(ik)]; found { + return keyVal, found + } + } + if uk, ok := doubleToUint64Lossless(float64(k)); ok { + keyVal, found := a.mapVal[Uint(uk)] + return keyVal, found + } + // map keys of type double are not supported. + case Int: + if uk, ok := int64ToUint64Lossless(int64(k)); ok { + keyVal, found := a.mapVal[Uint(uk)] + return keyVal, found + } + case Uint: + if ik, ok := uint64ToInt64Lossless(uint64(k)); ok { + keyVal, found := a.mapVal[Int(ik)] + return keyVal, found + } + } + return nil, false +} + +// Iterator produces a new traits.Iterator which iterates over the map keys via Golang reflection. +func (a *refValMapAccessor) Iterator() traits.Iterator { + return &mapIterator{ + Adapter: DefaultTypeAdapter, + mapKeys: reflect.ValueOf(a.mapVal).MapRange(), + len: len(a.mapVal), + } +} + +// Fold calls the FoldEntry method for each (key, value) pair in the map. +func (a *refValMapAccessor) Fold(f traits.Folder) { + for k, v := range a.mapVal { + if !f.FoldEntry(k, v) { + break + } + } +} + +func newStringMapAccessor(strMap map[string]string) mapAccessor { + return &stringMapAccessor{mapVal: strMap} +} + +type stringMapAccessor struct { + mapVal map[string]string +} + +// Find uses native map accesses to find the key, returning (value, true) if present. +// +// If the key is not found the function returns (nil, false). +func (a *stringMapAccessor) Find(key ref.Val) (ref.Val, bool) { + strKey, ok := key.(String) + if !ok { + return nil, false + } + keyVal, found := a.mapVal[string(strKey)] + if !found { + return nil, false + } + return String(keyVal), true +} + +// Iterator creates a new traits.Iterator from the string key set of the map. +func (a *stringMapAccessor) Iterator() traits.Iterator { + // Copy the keys to make their order stable. + mapKeys := make([]string, len(a.mapVal)) + i := 0 + for k := range a.mapVal { + mapKeys[i] = k + i++ + } + return &stringKeyIterator{ + mapKeys: mapKeys, + len: len(mapKeys), + } +} + +// Fold calls the FoldEntry method for each (key, value) pair in the map. +func (a *stringMapAccessor) Fold(f traits.Folder) { + for k, v := range a.mapVal { + if !f.FoldEntry(k, v) { + break + } + } +} + +func newStringIfaceMapAccessor(adapter Adapter, mapVal map[string]any) mapAccessor { + return &stringIfaceMapAccessor{ + Adapter: adapter, + mapVal: mapVal, + } +} + +type stringIfaceMapAccessor struct { + Adapter + mapVal map[string]any +} + +// Find uses native map accesses to find the key, returning (value, true) if present. +// +// If the key is not found the function returns (nil, false). +func (a *stringIfaceMapAccessor) Find(key ref.Val) (ref.Val, bool) { + strKey, ok := key.(String) + if !ok { + return nil, false + } + keyVal, found := a.mapVal[string(strKey)] + if !found { + return nil, false + } + return a.NativeToValue(keyVal), true +} + +// Iterator creates a new traits.Iterator from the string key set of the map. +func (a *stringIfaceMapAccessor) Iterator() traits.Iterator { + // Copy the keys to make their order stable. + mapKeys := make([]string, len(a.mapVal)) + i := 0 + for k := range a.mapVal { + mapKeys[i] = k + i++ + } + return &stringKeyIterator{ + mapKeys: mapKeys, + len: len(mapKeys), + } +} + +// Fold calls the FoldEntry method for each (key, value) pair in the map. +func (a *stringIfaceMapAccessor) Fold(f traits.Folder) { + for k, v := range a.mapVal { + if !f.FoldEntry(k, v) { + break + } + } +} + +// protoMap is a specialized, separate implementation of the traits.Mapper interfaces tailored to +// accessing protoreflect.Map values. +type protoMap struct { + Adapter + value *pb.Map +} + +// Contains returns whether the map contains the given key. +func (m *protoMap) Contains(key ref.Val) ref.Val { + _, found := m.Find(key) + return Bool(found) +} + +// ConvertToNative implements the ref.Val interface method. +// +// Note, assignment to Golang struct types is not yet supported. +func (m *protoMap) ConvertToNative(typeDesc reflect.Type) (any, error) { + // If the map is already assignable to the desired type return it, e.g. interfaces and + // maps with the same key value types. + switch typeDesc { + case anyValueType: + json, err := m.ConvertToNative(jsonStructType) + if err != nil { + return nil, err + } + return anypb.New(json.(proto.Message)) + case jsonValueType, jsonStructType: + jsonEntries, err := + m.ConvertToNative(reflect.TypeOf(map[string]*structpb.Value{})) + if err != nil { + return nil, err + } + jsonMap := &structpb.Struct{ + Fields: jsonEntries.(map[string]*structpb.Value)} + if typeDesc == jsonStructType { + return jsonMap, nil + } + return structpb.NewStructValue(jsonMap), nil + } + switch typeDesc.Kind() { + case reflect.Struct, reflect.Ptr: + if reflect.TypeOf(m.value).AssignableTo(typeDesc) { + return m.value, nil + } + if reflect.TypeOf(m).AssignableTo(typeDesc) { + return m, nil + } + } + if typeDesc.Kind() != reflect.Map { + return nil, fmt.Errorf("unsupported type conversion: %v to map", typeDesc) + } + + keyType := m.value.KeyType.ReflectType() + valType := m.value.ValueType.ReflectType() + otherKeyType := typeDesc.Key() + otherValType := typeDesc.Elem() + mapVal := reflect.MakeMapWithSize(typeDesc, m.value.Len()) + var err error + m.value.Range(func(key protoreflect.MapKey, val protoreflect.Value) bool { + ntvKey := key.Interface() + ntvVal := val.Interface() + switch pv := ntvVal.(type) { + case protoreflect.Message: + ntvVal = pv.Interface() + } + if keyType == otherKeyType && valType == otherValType { + mapVal.SetMapIndex(reflect.ValueOf(ntvKey), reflect.ValueOf(ntvVal)) + return true + } + celKey := m.NativeToValue(ntvKey) + celVal := m.NativeToValue(ntvVal) + ntvKey, err = celKey.ConvertToNative(otherKeyType) + if err != nil { + // early terminate the range loop. + return false + } + ntvVal, err = celVal.ConvertToNative(otherValType) + if err != nil { + // early terminate the range loop. + return false + } + mapVal.SetMapIndex(reflect.ValueOf(ntvKey), reflect.ValueOf(ntvVal)) + return true + }) + if err != nil { + return nil, err + } + return mapVal.Interface(), nil +} + +// ConvertToType implements the ref.Val interface method. +func (m *protoMap) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case MapType: + return m + case TypeType: + return MapType + } + return NewErr("type conversion error from '%s' to '%s'", MapType, typeVal) +} + +// Equal implements the ref.Val interface method. +func (m *protoMap) Equal(other ref.Val) ref.Val { + otherMap, ok := other.(traits.Mapper) + if !ok { + return False + } + if m.value.Map.Len() != int(otherMap.Size().(Int)) { + return False + } + var retVal ref.Val = True + m.value.Range(func(key protoreflect.MapKey, val protoreflect.Value) bool { + keyVal := m.NativeToValue(key.Interface()) + valVal := m.NativeToValue(val) + otherVal, found := otherMap.Find(keyVal) + if !found { + retVal = False + return false + } + valEq := Equal(valVal, otherVal) + if valEq != True { + retVal = valEq + return false + } + return true + }) + return retVal +} + +// Find returns whether the protoreflect.Map contains the input key. +// +// If the key is not found the function returns (nil, false). +func (m *protoMap) Find(key ref.Val) (ref.Val, bool) { + if keyVal, found := m.findInternal(key); found { + return keyVal, true + } + switch k := key.(type) { + // Double is not a valid proto map key type, so check for the key as an int or uint. + case Double: + if ik, ok := doubleToInt64Lossless(float64(k)); ok { + if keyVal, found := m.findInternal(Int(ik)); found { + return keyVal, true + } + } + if uk, ok := doubleToUint64Lossless(float64(k)); ok { + return m.findInternal(Uint(uk)) + } + // map keys of type double are not supported. + case Int: + if uk, ok := int64ToUint64Lossless(int64(k)); ok { + return m.findInternal(Uint(uk)) + } + case Uint: + if ik, ok := uint64ToInt64Lossless(uint64(k)); ok { + return m.findInternal(Int(ik)) + } + } + return nil, false +} + +// findInternal attempts to convert the incoming key to the map's internal native type +// and then returns the value, if found. +func (m *protoMap) findInternal(key ref.Val) (ref.Val, bool) { + // Convert the input key to the expected protobuf key type. + ntvKey, err := key.ConvertToNative(m.value.KeyType.ReflectType()) + if err != nil { + return nil, false + } + // Use protoreflection to get the key value. + val := m.value.Get(protoreflect.ValueOf(ntvKey).MapKey()) + if !val.IsValid() { + return nil, false + } + // Perform nominal type unwrapping from the input value. + switch v := val.Interface().(type) { + case protoreflect.List, protoreflect.Map: + // Maps do not support list or map values + return nil, false + default: + return m.NativeToValue(v), true + } +} + +// Get implements the traits.Indexer interface method. +func (m *protoMap) Get(key ref.Val) ref.Val { + v, found := m.Find(key) + if !found { + return ValOrErr(v, "no such key: %v", key) + } + return v +} + +// IsZeroValue returns true if the map is empty. +func (m *protoMap) IsZeroValue() bool { + return m.value.Len() == 0 +} + +// Iterator implements the traits.Iterable interface method. +func (m *protoMap) Iterator() traits.Iterator { + // Copy the keys to make their order stable. + mapKeys := make([]protoreflect.MapKey, 0, m.value.Len()) + m.value.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + mapKeys = append(mapKeys, k) + return true + }) + return &protoMapIterator{ + Adapter: m.Adapter, + mapKeys: mapKeys, + len: m.value.Len(), + } +} + +// Fold calls the FoldEntry method for each (key, value) pair in the map. +func (m *protoMap) Fold(f traits.Folder) { + m.value.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + return f.FoldEntry(k.Interface(), v.Interface()) + }) +} + +// Size returns the number of entries in the protoreflect.Map. +func (m *protoMap) Size() ref.Val { + return Int(m.value.Len()) +} + +// Type implements the ref.Val interface method. +func (m *protoMap) Type() ref.Type { + return MapType +} + +// Value implements the ref.Val interface method. +func (m *protoMap) Value() any { + return m.value +} + +type mapIterator struct { + *baseIterator + Adapter + mapKeys *reflect.MapIter + cursor int + len int +} + +// HasNext implements the traits.Iterator interface method. +func (it *mapIterator) HasNext() ref.Val { + return Bool(it.cursor < it.len) +} + +// Next implements the traits.Iterator interface method. +func (it *mapIterator) Next() ref.Val { + if it.HasNext() == True && it.mapKeys.Next() { + it.cursor++ + refKey := it.mapKeys.Key() + return it.NativeToValue(refKey.Interface()) + } + return nil +} + +type protoMapIterator struct { + *baseIterator + Adapter + mapKeys []protoreflect.MapKey + cursor int + len int +} + +// HasNext implements the traits.Iterator interface method. +func (it *protoMapIterator) HasNext() ref.Val { + return Bool(it.cursor < it.len) +} + +// Next implements the traits.Iterator interface method. +func (it *protoMapIterator) Next() ref.Val { + if it.HasNext() == True { + index := it.cursor + it.cursor++ + refKey := it.mapKeys[index] + return it.NativeToValue(refKey.Interface()) + } + return nil +} + +type stringKeyIterator struct { + *baseIterator + mapKeys []string + cursor int + len int +} + +// HasNext implements the traits.Iterator interface method. +func (it *stringKeyIterator) HasNext() ref.Val { + return Bool(it.cursor < it.len) +} + +// Next implements the traits.Iterator interface method. +func (it *stringKeyIterator) Next() ref.Val { + if it.HasNext() == True { + index := it.cursor + it.cursor++ + return String(it.mapKeys[index]) + } + return nil +} + +// ToFoldableMap will create a Foldable version of a map suitable for key-value pair iteration. +// +// For values which are already Foldable, this call is a no-op. For all other values, the fold +// is driven via the Iterator HasNext() and Next() calls as well as the map's Get() method +// which means that the folding will function, but take a performance hit. +func ToFoldableMap(m traits.Mapper) traits.Foldable { + if f, ok := m.(traits.Foldable); ok { + return f + } + return interopFoldableMap{Mapper: m} +} + +type interopFoldableMap struct { + traits.Mapper +} + +func (m interopFoldableMap) Fold(f traits.Folder) { + it := m.Iterator() + for it.HasNext() == True { + k := it.Next() + if !f.FoldEntry(k, m.Get(k)) { + break + } + } +} + +// InsertMapKeyValue inserts a key, value pair into the target map if the target map does not +// already contain the given key. +// +// If the map is mutable, it is modified in-place per the MutableMapper contract. +// If the map is not mutable, a copy containing the new key, value pair is made. +func InsertMapKeyValue(m traits.Mapper, k, v ref.Val) ref.Val { + if mutable, ok := m.(traits.MutableMapper); ok { + return mutable.Insert(k, v) + } + + // Otherwise perform the slow version of the insertion which makes a copy of the incoming map. + if _, found := m.Find(k); !found { + size := m.Size().(Int) + copy := make(map[ref.Val]ref.Val, size+1) + copy[k] = v + it := m.Iterator() + for it.HasNext() == True { + nextK := it.Next() + nextV := m.Get(nextK) + copy[nextK] = nextV + } + return DefaultTypeAdapter.NativeToValue(copy) + } + return NewErr("insert failed: key %v already exists", k) +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/null.go b/hack/tools/vendor/github.com/google/cel-go/common/types/null.go new file mode 100644 index 0000000000..36514ff200 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/null.go @@ -0,0 +1,119 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/proto" + + "github.com/google/cel-go/common/types/ref" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" +) + +// Null type implementation. +type Null structpb.NullValue + +var ( + // NullValue singleton. + NullValue = Null(structpb.NullValue_NULL_VALUE) + + // golang reflect type for Null values. + nullReflectType = reflect.TypeOf(NullValue) + + protoIfaceType = reflect.TypeOf((*proto.Message)(nil)).Elem() +) + +// ConvertToNative implements ref.Val.ConvertToNative. +func (n Null) ConvertToNative(typeDesc reflect.Type) (any, error) { + switch typeDesc.Kind() { + case reflect.Int32: + switch typeDesc { + case jsonNullType: + return structpb.NullValue_NULL_VALUE, nil + case nullReflectType: + return n, nil + } + case reflect.Ptr: + switch typeDesc { + case anyValueType: + // Convert to a JSON-null before packing to an Any field since the enum value for JSON + // null cannot be packed directly. + pb, err := n.ConvertToNative(jsonValueType) + if err != nil { + return nil, err + } + return anypb.New(pb.(proto.Message)) + case jsonValueType: + return structpb.NewNullValue(), nil + case boolWrapperType, byteWrapperType, doubleWrapperType, floatWrapperType, + int32WrapperType, int64WrapperType, stringWrapperType, uint32WrapperType, + uint64WrapperType, durationValueType, timestampValueType, protoIfaceType: + return nil, nil + case jsonListValueType, jsonStructType: + // skip handling + default: + if typeDesc.Implements(protoIfaceType) { + return nil, nil + } + } + case reflect.Interface: + nv := n.Value() + if reflect.TypeOf(nv).Implements(typeDesc) { + return nv, nil + } + if reflect.TypeOf(n).Implements(typeDesc) { + return n, nil + } + } + // If the type conversion isn't supported return an error. + return nil, fmt.Errorf("type conversion error from '%v' to '%v'", NullType, typeDesc) +} + +// ConvertToType implements ref.Val.ConvertToType. +func (n Null) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case StringType: + return String("null") + case NullType: + return n + case TypeType: + return NullType + } + return NewErr("type conversion error from '%s' to '%s'", NullType, typeVal) +} + +// Equal implements ref.Val.Equal. +func (n Null) Equal(other ref.Val) ref.Val { + return Bool(NullType == other.Type()) +} + +// IsZeroValue returns true as null always represents an absent value. +func (n Null) IsZeroValue() bool { + return true +} + +// Type implements ref.Val.Type. +func (n Null) Type() ref.Type { + return NullType +} + +// Value implements ref.Val.Value. +func (n Null) Value() any { + return structpb.NullValue_NULL_VALUE +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/object.go b/hack/tools/vendor/github.com/google/cel-go/common/types/object.go new file mode 100644 index 0000000000..8ba0af9fbe --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/object.go @@ -0,0 +1,165 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + + "github.com/google/cel-go/common/types/pb" + "github.com/google/cel-go/common/types/ref" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" +) + +type protoObj struct { + Adapter + value proto.Message + typeDesc *pb.TypeDescription + typeValue ref.Val +} + +// NewObject returns an object based on a proto.Message value which handles +// conversion between protobuf type values and expression type values. +// Objects support indexing and iteration. +// +// Note: the type value is pulled from the list of registered types within the +// type provider. If the proto type is not registered within the type provider, +// then this will result in an error within the type adapter / provider. +func NewObject(adapter Adapter, + typeDesc *pb.TypeDescription, + typeValue ref.Val, + value proto.Message) ref.Val { + return &protoObj{ + Adapter: adapter, + value: value, + typeDesc: typeDesc, + typeValue: typeValue} +} + +func (o *protoObj) ConvertToNative(typeDesc reflect.Type) (any, error) { + srcPB := o.value + if reflect.TypeOf(srcPB).AssignableTo(typeDesc) { + return srcPB, nil + } + if reflect.TypeOf(o).AssignableTo(typeDesc) { + return o, nil + } + switch typeDesc { + case anyValueType: + _, isAny := srcPB.(*anypb.Any) + if isAny { + return srcPB, nil + } + return anypb.New(srcPB) + case jsonValueType: + // Marshal the proto to JSON first, and then rehydrate as protobuf.Value as there is no + // support for direct conversion from proto.Message to protobuf.Value. + bytes, err := protojson.Marshal(srcPB) + if err != nil { + return nil, err + } + json := &structpb.Value{} + err = protojson.Unmarshal(bytes, json) + if err != nil { + return nil, err + } + return json, nil + default: + if typeDesc == o.typeDesc.ReflectType() { + return o.value, nil + } + if typeDesc.Kind() == reflect.Ptr { + val := reflect.New(typeDesc.Elem()).Interface() + dstPB, ok := val.(proto.Message) + if ok { + err := pb.Merge(dstPB, srcPB) + if err != nil { + return nil, fmt.Errorf("type conversion error: %v", err) + } + return dstPB, nil + } + } + } + return nil, fmt.Errorf("type conversion error from '%T' to '%v'", o.value, typeDesc) +} + +func (o *protoObj) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + default: + if o.Type().TypeName() == typeVal.TypeName() { + return o + } + case TypeType: + return o.typeValue + } + return NewErr("type conversion error from '%s' to '%s'", o.typeDesc.Name(), typeVal) +} + +func (o *protoObj) Equal(other ref.Val) ref.Val { + otherPB, ok := other.Value().(proto.Message) + return Bool(ok && pb.Equal(o.value, otherPB)) +} + +// IsSet tests whether a field which is defined is set to a non-default value. +func (o *protoObj) IsSet(field ref.Val) ref.Val { + protoFieldName, ok := field.(String) + if !ok { + return MaybeNoSuchOverloadErr(field) + } + protoFieldStr := string(protoFieldName) + fd, found := o.typeDesc.FieldByName(protoFieldStr) + if !found { + return NewErr("no such field '%s'", field) + } + if fd.IsSet(o.value) { + return True + } + return False +} + +// IsZeroValue returns true if the protobuf object is empty. +func (o *protoObj) IsZeroValue() bool { + return proto.Equal(o.value, o.typeDesc.Zero()) +} + +func (o *protoObj) Get(index ref.Val) ref.Val { + protoFieldName, ok := index.(String) + if !ok { + return MaybeNoSuchOverloadErr(index) + } + protoFieldStr := string(protoFieldName) + fd, found := o.typeDesc.FieldByName(protoFieldStr) + if !found { + return NewErr("no such field '%s'", index) + } + fv, err := fd.GetFrom(o.value) + if err != nil { + return NewErr(err.Error()) + } + return o.NativeToValue(fv) +} + +func (o *protoObj) Type() ref.Type { + return o.typeValue.(ref.Type) +} + +func (o *protoObj) Value() any { + return o.value +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/optional.go b/hack/tools/vendor/github.com/google/cel-go/common/types/optional.go new file mode 100644 index 0000000000..97845a740c --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/optional.go @@ -0,0 +1,108 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "errors" + "fmt" + "reflect" + + "github.com/google/cel-go/common/types/ref" +) + +var ( + // OptionalType indicates the runtime type of an optional value. + OptionalType = NewOpaqueType("optional_type") + + // OptionalNone is a sentinel value which is used to indicate an empty optional value. + OptionalNone = &Optional{} +) + +// OptionalOf returns an optional value which wraps a concrete CEL value. +func OptionalOf(value ref.Val) *Optional { + return &Optional{value: value} +} + +// Optional value which points to a value if non-empty. +type Optional struct { + value ref.Val +} + +// HasValue returns true if the optional has a value. +func (o *Optional) HasValue() bool { + return o.value != nil +} + +// GetValue returns the wrapped value contained in the optional. +func (o *Optional) GetValue() ref.Val { + if !o.HasValue() { + return NewErr("optional.none() dereference") + } + return o.value +} + +// ConvertToNative implements the ref.Val interface method. +func (o *Optional) ConvertToNative(typeDesc reflect.Type) (any, error) { + if !o.HasValue() { + return nil, errors.New("optional.none() dereference") + } + return o.value.ConvertToNative(typeDesc) +} + +// ConvertToType implements the ref.Val interface method. +func (o *Optional) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case OptionalType: + return o + case TypeType: + return OptionalType + } + return NewErr("type conversion error from '%s' to '%s'", OptionalType, typeVal) +} + +// Equal determines whether the values contained by two optional values are equal. +func (o *Optional) Equal(other ref.Val) ref.Val { + otherOpt, isOpt := other.(*Optional) + if !isOpt { + return False + } + if !o.HasValue() { + return Bool(!otherOpt.HasValue()) + } + if !otherOpt.HasValue() { + return False + } + return o.value.Equal(otherOpt.value) +} + +func (o *Optional) String() string { + if o.HasValue() { + return fmt.Sprintf("optional(%v)", o.GetValue()) + } + return "optional.none()" +} + +// Type implements the ref.Val interface method. +func (o *Optional) Type() ref.Type { + return OptionalType +} + +// Value returns the underlying 'Value()' of the wrapped value, if present. +func (o *Optional) Value() any { + if o.value == nil { + return nil + } + return o.value.Value() +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/overflow.go b/hack/tools/vendor/github.com/google/cel-go/common/types/overflow.go new file mode 100644 index 0000000000..dcb66ef596 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/overflow.go @@ -0,0 +1,429 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "math" + "time" +) + +var ( + doubleTwoTo64 = math.Ldexp(1.0, 64) +) + +// addInt64Checked performs addition with overflow detection of two int64 values. +// +// If the operation fails the error return value will be non-nil. +func addInt64Checked(x, y int64) (int64, error) { + if (y > 0 && x > math.MaxInt64-y) || (y < 0 && x < math.MinInt64-y) { + return 0, errIntOverflow + } + return x + y, nil +} + +// subtractInt64Checked performs subtraction with overflow detection of two int64 values. +// +// If the operation fails the error return value will be non-nil. +func subtractInt64Checked(x, y int64) (int64, error) { + if (y < 0 && x > math.MaxInt64+y) || (y > 0 && x < math.MinInt64+y) { + return 0, errIntOverflow + } + return x - y, nil +} + +// negateInt64Checked performs negation with overflow detection of an int64. +// +// If the operation fails the error return value will be non-nil. +func negateInt64Checked(x int64) (int64, error) { + // In twos complement, negating MinInt64 would result in a valid of MaxInt64+1. + if x == math.MinInt64 { + return 0, errIntOverflow + } + return -x, nil +} + +// multiplyInt64Checked performs multiplication with overflow detection of two int64 value. +// +// If the operation fails the error return value will be non-nil. +func multiplyInt64Checked(x, y int64) (int64, error) { + // Detecting multiplication overflow is more complicated than the others. The first two detect + // attempting to negate MinInt64, which would result in MaxInt64+1. The other four detect normal + // overflow conditions. + if (x == -1 && y == math.MinInt64) || (y == -1 && x == math.MinInt64) || + // x is positive, y is positive + (x > 0 && y > 0 && x > math.MaxInt64/y) || + // x is positive, y is negative + (x > 0 && y < 0 && y < math.MinInt64/x) || + // x is negative, y is positive + (x < 0 && y > 0 && x < math.MinInt64/y) || + // x is negative, y is negative + (x < 0 && y < 0 && y < math.MaxInt64/x) { + return 0, errIntOverflow + } + return x * y, nil +} + +// divideInt64Checked performs division with overflow detection of two int64 values, +// as well as a division by zero check. +// +// If the operation fails the error return value will be non-nil. +func divideInt64Checked(x, y int64) (int64, error) { + // Division by zero. + if y == 0 { + return 0, errDivideByZero + } + // In twos complement, negating MinInt64 would result in a valid of MaxInt64+1. + if x == math.MinInt64 && y == -1 { + return 0, errIntOverflow + } + return x / y, nil +} + +// moduloInt64Checked performs modulo with overflow detection of two int64 values +// as well as a modulus by zero check. +// +// If the operation fails the error return value will be non-nil. +func moduloInt64Checked(x, y int64) (int64, error) { + // Modulus by zero. + if y == 0 { + return 0, errModulusByZero + } + // In twos complement, negating MinInt64 would result in a valid of MaxInt64+1. + if x == math.MinInt64 && y == -1 { + return 0, errIntOverflow + } + return x % y, nil +} + +// addUint64Checked performs addition with overflow detection of two uint64 values. +// +// If the operation fails due to overflow the error return value will be non-nil. +func addUint64Checked(x, y uint64) (uint64, error) { + if y > 0 && x > math.MaxUint64-y { + return 0, errUintOverflow + } + return x + y, nil +} + +// subtractUint64Checked performs subtraction with overflow detection of two uint64 values. +// +// If the operation fails due to overflow the error return value will be non-nil. +func subtractUint64Checked(x, y uint64) (uint64, error) { + if y > x { + return 0, errUintOverflow + } + return x - y, nil +} + +// multiplyUint64Checked performs multiplication with overflow detection of two uint64 values. +// +// If the operation fails due to overflow the error return value will be non-nil. +func multiplyUint64Checked(x, y uint64) (uint64, error) { + if y != 0 && x > math.MaxUint64/y { + return 0, errUintOverflow + } + return x * y, nil +} + +// divideUint64Checked performs division with a test for division by zero. +// +// If the operation fails the error return value will be non-nil. +func divideUint64Checked(x, y uint64) (uint64, error) { + if y == 0 { + return 0, errDivideByZero + } + return x / y, nil +} + +// moduloUint64Checked performs modulo with a test for modulus by zero. +// +// If the operation fails the error return value will be non-nil. +func moduloUint64Checked(x, y uint64) (uint64, error) { + if y == 0 { + return 0, errModulusByZero + } + return x % y, nil +} + +// addDurationChecked performs addition with overflow detection of two time.Durations. +// +// If the operation fails due to overflow the error return value will be non-nil. +func addDurationChecked(x, y time.Duration) (time.Duration, error) { + val, err := addInt64Checked(int64(x), int64(y)) + if err != nil { + return time.Duration(0), err + } + return time.Duration(val), nil +} + +// subtractDurationChecked performs subtraction with overflow detection of two time.Durations. +// +// If the operation fails due to overflow the error return value will be non-nil. +func subtractDurationChecked(x, y time.Duration) (time.Duration, error) { + val, err := subtractInt64Checked(int64(x), int64(y)) + if err != nil { + return time.Duration(0), err + } + return time.Duration(val), nil +} + +// negateDurationChecked performs negation with overflow detection of a time.Duration. +// +// If the operation fails due to overflow the error return value will be non-nil. +func negateDurationChecked(x time.Duration) (time.Duration, error) { + val, err := negateInt64Checked(int64(x)) + if err != nil { + return time.Duration(0), err + } + return time.Duration(val), nil +} + +// addDurationChecked performs addition with overflow detection of a time.Time and time.Duration. +// +// If the operation fails due to overflow the error return value will be non-nil. +func addTimeDurationChecked(x time.Time, y time.Duration) (time.Time, error) { + // This is tricky. A time is represented as (int64, int32) where the first is seconds and second + // is nanoseconds. A duration is int64 representing nanoseconds. We cannot normalize time to int64 + // as it could potentially overflow. The only way to proceed is to break time and duration into + // second and nanosecond components. + + // First we break time into its components by truncating and subtracting. + sec1 := x.Truncate(time.Second).Unix() // Truncate to seconds. + nsec1 := x.Sub(x.Truncate(time.Second)).Nanoseconds() // Get nanoseconds by truncating and subtracting. + + // Second we break duration into its components by dividing and modulo. + sec2 := int64(y) / int64(time.Second) // Truncate to seconds. + nsec2 := int64(y) % int64(time.Second) // Get remainder. + + // Add seconds first, detecting any overflow. + sec, err := addInt64Checked(sec1, sec2) + if err != nil { + return time.Time{}, err + } + // Nanoseconds cannot overflow as time.Time normalizes them to [0, 999999999]. + nsec := nsec1 + nsec2 + + // We need to normalize nanoseconds to be positive and carry extra nanoseconds to seconds. + // Adapted from time.Unix(int64, int64). + if nsec < 0 || nsec >= int64(time.Second) { + // Add seconds. + sec, err = addInt64Checked(sec, nsec/int64(time.Second)) + if err != nil { + return time.Time{}, err + } + + nsec -= (nsec / int64(time.Second)) * int64(time.Second) + if nsec < 0 { + // Subtract an extra second + sec, err = addInt64Checked(sec, -1) + if err != nil { + return time.Time{}, err + } + nsec += int64(time.Second) + } + } + + // Check if the the number of seconds from Unix epoch is within our acceptable range. + if sec < minUnixTime || sec > maxUnixTime { + return time.Time{}, errTimestampOverflow + } + + // Return resulting time and propagate time zone. + return time.Unix(sec, nsec).In(x.Location()), nil +} + +// subtractTimeChecked performs subtraction with overflow detection of two time.Time. +// +// If the operation fails due to overflow the error return value will be non-nil. +func subtractTimeChecked(x, y time.Time) (time.Duration, error) { + // Similar to addTimeDurationOverflow() above. + + // First we break time into its components by truncating and subtracting. + sec1 := x.Truncate(time.Second).Unix() // Truncate to seconds. + nsec1 := x.Sub(x.Truncate(time.Second)).Nanoseconds() // Get nanoseconds by truncating and subtracting. + + // Second we break duration into its components by truncating and subtracting. + sec2 := y.Truncate(time.Second).Unix() // Truncate to seconds. + nsec2 := y.Sub(y.Truncate(time.Second)).Nanoseconds() // Get nanoseconds by truncating and subtracting. + + // Subtract seconds first, detecting any overflow. + sec, err := subtractInt64Checked(sec1, sec2) + if err != nil { + return time.Duration(0), err + } + + // Nanoseconds cannot overflow as time.Time normalizes them to [0, 999999999]. + nsec := nsec1 - nsec2 + + // Scale seconds to nanoseconds detecting overflow. + tsec, err := multiplyInt64Checked(sec, int64(time.Second)) + if err != nil { + return time.Duration(0), err + } + + // Lastly we need to add the two nanoseconds together. + val, err := addInt64Checked(tsec, nsec) + if err != nil { + return time.Duration(0), err + } + + return time.Duration(val), nil +} + +// subtractTimeDurationChecked performs subtraction with overflow detection of a time.Time and +// time.Duration. +// +// If the operation fails due to overflow the error return value will be non-nil. +func subtractTimeDurationChecked(x time.Time, y time.Duration) (time.Time, error) { + // The easiest way to implement this is to negate y and add them. + // x - y = x + -y + val, err := negateDurationChecked(y) + if err != nil { + return time.Time{}, err + } + return addTimeDurationChecked(x, val) +} + +// doubleToInt64Checked converts a double to an int64 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func doubleToInt64Checked(v float64) (int64, error) { + if math.IsInf(v, 0) || math.IsNaN(v) || v <= float64(math.MinInt64) || v >= float64(math.MaxInt64) { + return 0, errIntOverflow + } + return int64(v), nil +} + +// doubleToInt64Checked converts a double to a uint64 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func doubleToUint64Checked(v float64) (uint64, error) { + if math.IsInf(v, 0) || math.IsNaN(v) || v < 0 || v >= doubleTwoTo64 { + return 0, errUintOverflow + } + return uint64(v), nil +} + +// int64ToUint64Checked converts an int64 to a uint64 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func int64ToUint64Checked(v int64) (uint64, error) { + if v < 0 { + return 0, errUintOverflow + } + return uint64(v), nil +} + +// int64ToInt8Checked converts an int64 to an int8 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func int64ToInt8Checked(v int64) (int8, error) { + if v < math.MinInt8 || v > math.MaxInt8 { + return 0, errIntOverflow + } + return int8(v), nil +} + +// int64ToInt16Checked converts an int64 to an int16 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func int64ToInt16Checked(v int64) (int16, error) { + if v < math.MinInt16 || v > math.MaxInt16 { + return 0, errIntOverflow + } + return int16(v), nil +} + +// int64ToInt32Checked converts an int64 to an int32 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func int64ToInt32Checked(v int64) (int32, error) { + if v < math.MinInt32 || v > math.MaxInt32 { + return 0, errIntOverflow + } + return int32(v), nil +} + +// uint64ToUint8Checked converts a uint64 to a uint8 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func uint64ToUint8Checked(v uint64) (uint8, error) { + if v > math.MaxUint8 { + return 0, errUintOverflow + } + return uint8(v), nil +} + +// uint64ToUint16Checked converts a uint64 to a uint16 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func uint64ToUint16Checked(v uint64) (uint16, error) { + if v > math.MaxUint16 { + return 0, errUintOverflow + } + return uint16(v), nil +} + +// uint64ToUint32Checked converts a uint64 to a uint32 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func uint64ToUint32Checked(v uint64) (uint32, error) { + if v > math.MaxUint32 { + return 0, errUintOverflow + } + return uint32(v), nil +} + +// uint64ToInt64Checked converts a uint64 to an int64 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func uint64ToInt64Checked(v uint64) (int64, error) { + if v > math.MaxInt64 { + return 0, errIntOverflow + } + return int64(v), nil +} + +func doubleToUint64Lossless(v float64) (uint64, bool) { + u, err := doubleToUint64Checked(v) + if err != nil { + return 0, false + } + if float64(u) != v { + return 0, false + } + return u, true +} + +func doubleToInt64Lossless(v float64) (int64, bool) { + i, err := doubleToInt64Checked(v) + if err != nil { + return 0, false + } + if float64(i) != v { + return 0, false + } + return i, true +} + +func int64ToUint64Lossless(v int64) (uint64, bool) { + u, err := int64ToUint64Checked(v) + return u, err == nil +} + +func uint64ToInt64Lossless(v uint64) (int64, bool) { + i, err := uint64ToInt64Checked(v) + return i, err == nil +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel new file mode 100644 index 0000000000..e2b9d37b56 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel @@ -0,0 +1,53 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "checked.go", + "enum.go", + "equal.go", + "file.go", + "pb.go", + "type.go", + ], + importpath = "github.com/google/cel-go/common/types/pb", + deps = [ + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//encoding/protowire:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", + "@org_golang_google_protobuf//reflect/protoregistry:go_default_library", + "@org_golang_google_protobuf//types/dynamicpb:go_default_library", + "@org_golang_google_protobuf//types/known/anypb:go_default_library", + "@org_golang_google_protobuf//types/known/durationpb:go_default_library", + "@org_golang_google_protobuf//types/known/emptypb:go_default_library", + "@org_golang_google_protobuf//types/known/structpb:go_default_library", + "@org_golang_google_protobuf//types/known/timestamppb:go_default_library", + "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library", + ], +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "equal_test.go", + "file_test.go", + "pb_test.go", + "type_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//checker/decls:go_default_library", + "//test/proto2pb:test_all_types_go_proto", + "//test/proto3pb:test_all_types_go_proto", + "@org_golang_google_protobuf//reflect/protodesc:go_default_library", + "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", + "@org_golang_google_protobuf//types/descriptorpb:go_default_library", + ], +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/pb/checked.go b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/checked.go new file mode 100644 index 0000000000..312a6a072f --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/checked.go @@ -0,0 +1,93 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pb + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + emptypb "google.golang.org/protobuf/types/known/emptypb" + structpb "google.golang.org/protobuf/types/known/structpb" +) + +var ( + // CheckedPrimitives map from proto field descriptor type to expr.Type. + CheckedPrimitives = map[protoreflect.Kind]*exprpb.Type{ + protoreflect.BoolKind: checkedBool, + protoreflect.BytesKind: checkedBytes, + protoreflect.DoubleKind: checkedDouble, + protoreflect.FloatKind: checkedDouble, + protoreflect.Int32Kind: checkedInt, + protoreflect.Int64Kind: checkedInt, + protoreflect.Sint32Kind: checkedInt, + protoreflect.Sint64Kind: checkedInt, + protoreflect.Uint32Kind: checkedUint, + protoreflect.Uint64Kind: checkedUint, + protoreflect.Fixed32Kind: checkedUint, + protoreflect.Fixed64Kind: checkedUint, + protoreflect.Sfixed32Kind: checkedInt, + protoreflect.Sfixed64Kind: checkedInt, + protoreflect.StringKind: checkedString} + + // CheckedWellKnowns map from qualified proto type name to expr.Type for + // well-known proto types. + CheckedWellKnowns = map[string]*exprpb.Type{ + // Wrapper types. + "google.protobuf.BoolValue": checkedWrap(checkedBool), + "google.protobuf.BytesValue": checkedWrap(checkedBytes), + "google.protobuf.DoubleValue": checkedWrap(checkedDouble), + "google.protobuf.FloatValue": checkedWrap(checkedDouble), + "google.protobuf.Int64Value": checkedWrap(checkedInt), + "google.protobuf.Int32Value": checkedWrap(checkedInt), + "google.protobuf.UInt64Value": checkedWrap(checkedUint), + "google.protobuf.UInt32Value": checkedWrap(checkedUint), + "google.protobuf.StringValue": checkedWrap(checkedString), + // Well-known types. + "google.protobuf.Any": checkedAny, + "google.protobuf.Duration": checkedDuration, + "google.protobuf.Timestamp": checkedTimestamp, + // Json types. + "google.protobuf.ListValue": checkedListDyn, + "google.protobuf.NullValue": checkedNull, + "google.protobuf.Struct": checkedMapStringDyn, + "google.protobuf.Value": checkedDyn, + } + + // common types + checkedDyn = &exprpb.Type{TypeKind: &exprpb.Type_Dyn{Dyn: &emptypb.Empty{}}} + // Wrapper and primitive types. + checkedBool = checkedPrimitive(exprpb.Type_BOOL) + checkedBytes = checkedPrimitive(exprpb.Type_BYTES) + checkedDouble = checkedPrimitive(exprpb.Type_DOUBLE) + checkedInt = checkedPrimitive(exprpb.Type_INT64) + checkedString = checkedPrimitive(exprpb.Type_STRING) + checkedUint = checkedPrimitive(exprpb.Type_UINT64) + // Well-known type equivalents. + checkedAny = checkedWellKnown(exprpb.Type_ANY) + checkedDuration = checkedWellKnown(exprpb.Type_DURATION) + checkedTimestamp = checkedWellKnown(exprpb.Type_TIMESTAMP) + // Json-based type equivalents. + checkedNull = &exprpb.Type{ + TypeKind: &exprpb.Type_Null{ + Null: structpb.NullValue_NULL_VALUE}} + checkedListDyn = &exprpb.Type{ + TypeKind: &exprpb.Type_ListType_{ + ListType: &exprpb.Type_ListType{ElemType: checkedDyn}}} + checkedMapStringDyn = &exprpb.Type{ + TypeKind: &exprpb.Type_MapType_{ + MapType: &exprpb.Type_MapType{ + KeyType: checkedString, + ValueType: checkedDyn}}} +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/pb/enum.go b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/enum.go new file mode 100644 index 0000000000..09a1546308 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/enum.go @@ -0,0 +1,44 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pb + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// newEnumValueDescription produces an enum value description with the fully qualified enum value +// name and the enum value descriptor. +func newEnumValueDescription(name string, desc protoreflect.EnumValueDescriptor) *EnumValueDescription { + return &EnumValueDescription{ + enumValueName: name, + desc: desc, + } +} + +// EnumValueDescription maps a fully-qualified enum value name to its numeric value. +type EnumValueDescription struct { + enumValueName string + desc protoreflect.EnumValueDescriptor +} + +// Name returns the fully-qualified identifier name for the enum value. +func (ed *EnumValueDescription) Name() string { + return ed.enumValueName +} + +// Value returns the (numeric) value of the enum. +func (ed *EnumValueDescription) Value() int32 { + return int32(ed.desc.Number()) +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/pb/equal.go b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/equal.go new file mode 100644 index 0000000000..76893d85ea --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/equal.go @@ -0,0 +1,206 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pb + +import ( + "bytes" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + anypb "google.golang.org/protobuf/types/known/anypb" +) + +// Equal returns whether two proto.Message instances are equal using the following criteria: +// +// - Messages must share the same instance of the type descriptor +// - Known set fields are compared using semantics equality +// - Bytes are compared using bytes.Equal +// - Scalar values are compared with operator == +// - List and map types are equal if they have the same length and all elements are equal +// - Messages are equal if they share the same descriptor and all set fields are equal +// - Unknown fields are compared using byte equality +// - NaN values are not equal to each other +// - google.protobuf.Any values are unpacked before comparison +// - If the type descriptor for a protobuf.Any cannot be found, byte equality is used rather than +// semantic equality. +// +// This method of proto equality mirrors the behavior of the C++ protobuf MessageDifferencer +// whereas the golang proto.Equal implementation mirrors the Java protobuf equals() methods +// behaviors which needed to treat NaN values as equal due to Java semantics. +func Equal(x, y proto.Message) bool { + if x == nil || y == nil { + return x == nil && y == nil + } + xRef := x.ProtoReflect() + yRef := y.ProtoReflect() + return equalMessage(xRef, yRef) +} + +func equalMessage(mx, my protoreflect.Message) bool { + // Note, the original proto.Equal upon which this implementation is based does not specifically handle the + // case when both messages are invalid. It is assumed that the descriptors will be equal and that byte-wise + // comparison will be used, though the semantics of validity are neither clear, nor promised within the + // proto.Equal implementation. + if mx.IsValid() != my.IsValid() || mx.Descriptor() != my.Descriptor() { + return false + } + + // This is an innovation on the default proto.Equal where protobuf.Any values are unpacked before comparison + // as otherwise the Any values are compared by bytes rather than structurally. + if isAny(mx) && isAny(my) { + ax := mx.Interface().(*anypb.Any) + ay := my.Interface().(*anypb.Any) + // If the values are not the same type url, return false. + if ax.GetTypeUrl() != ay.GetTypeUrl() { + return false + } + // If the values are byte equal, then return true. + if bytes.Equal(ax.GetValue(), ay.GetValue()) { + return true + } + // Otherwise fall through to the semantic comparison of the any values. + x, err := ax.UnmarshalNew() + if err != nil { + return false + } + y, err := ay.UnmarshalNew() + if err != nil { + return false + } + // Recursively compare the unwrapped messages to ensure nested Any values are unwrapped accordingly. + return equalMessage(x.ProtoReflect(), y.ProtoReflect()) + } + + // Walk the set fields to determine field-wise equality + nx := 0 + equal := true + mx.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool { + nx++ + equal = my.Has(fd) && equalField(fd, vx, my.Get(fd)) + return equal + }) + if !equal { + return false + } + // Establish the count of set fields on message y + ny := 0 + my.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool { + ny++ + return true + }) + // If the number of set fields is not equal return false. + if nx != ny { + return false + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +func equalField(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { + switch { + case fd.IsMap(): + return equalMap(fd, x.Map(), y.Map()) + case fd.IsList(): + return equalList(fd, x.List(), y.List()) + default: + return equalValue(fd, x, y) + } +} + +func equalMap(fd protoreflect.FieldDescriptor, x, y protoreflect.Map) bool { + if x.Len() != y.Len() { + return false + } + equal := true + x.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { + vy := y.Get(k) + equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy) + return equal + }) + return equal +} + +func equalList(fd protoreflect.FieldDescriptor, x, y protoreflect.List) bool { + if x.Len() != y.Len() { + return false + } + for i := x.Len() - 1; i >= 0; i-- { + if !equalValue(fd, x.Get(i), y.Get(i)) { + return false + } + } + return true +} + +func equalValue(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { + switch fd.Kind() { + case protoreflect.BoolKind: + return x.Bool() == y.Bool() + case protoreflect.EnumKind: + return x.Enum() == y.Enum() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, + protoreflect.Int64Kind, protoreflect.Sint64Kind, + protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: + return x.Int() == y.Int() + case protoreflect.Uint32Kind, protoreflect.Uint64Kind, + protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: + return x.Uint() == y.Uint() + case protoreflect.FloatKind, protoreflect.DoubleKind: + return x.Float() == y.Float() + case protoreflect.StringKind: + return x.String() == y.String() + case protoreflect.BytesKind: + return bytes.Equal(x.Bytes(), y.Bytes()) + case protoreflect.MessageKind, protoreflect.GroupKind: + return equalMessage(x.Message(), y.Message()) + default: + return x.Interface() == y.Interface() + } +} + +func equalUnknown(x, y protoreflect.RawFields) bool { + lenX := len(x) + lenY := len(y) + if lenX != lenY { + return false + } + if lenX == 0 { + return true + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + return reflect.DeepEqual(mx, my) +} + +func isAny(m protoreflect.Message) bool { + return string(m.Descriptor().FullName()) == "google.protobuf.Any" +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/pb/file.go b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/file.go new file mode 100644 index 0000000000..e323afb1df --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/file.go @@ -0,0 +1,202 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pb + +import ( + "fmt" + + "google.golang.org/protobuf/reflect/protoreflect" + + dynamicpb "google.golang.org/protobuf/types/dynamicpb" +) + +// newFileDescription returns a FileDescription instance with a complete listing of all the message +// types and enum values, as well as a map of extensions declared within any scope in the file. +func newFileDescription(fileDesc protoreflect.FileDescriptor, pbdb *Db) (*FileDescription, extensionMap) { + metadata := collectFileMetadata(fileDesc) + enums := make(map[string]*EnumValueDescription) + for name, enumVal := range metadata.enumValues { + enums[name] = newEnumValueDescription(name, enumVal) + } + types := make(map[string]*TypeDescription) + for name, msgType := range metadata.msgTypes { + types[name] = newTypeDescription(name, msgType, pbdb.extensions) + } + fileExtMap := make(extensionMap) + for typeName, extensions := range metadata.msgExtensionMap { + messageExtMap, found := fileExtMap[typeName] + if !found { + messageExtMap = make(map[string]*FieldDescription) + } + for _, ext := range extensions { + extDesc := dynamicpb.NewExtensionType(ext).TypeDescriptor() + messageExtMap[string(ext.FullName())] = newFieldDescription(extDesc) + } + fileExtMap[typeName] = messageExtMap + } + return &FileDescription{ + name: fileDesc.Path(), + types: types, + enums: enums, + }, fileExtMap +} + +// FileDescription holds a map of all types and enum values declared within a proto file. +type FileDescription struct { + name string + types map[string]*TypeDescription + enums map[string]*EnumValueDescription +} + +// Copy creates a copy of the FileDescription with updated Db references within its types. +func (fd *FileDescription) Copy(pbdb *Db) *FileDescription { + typesCopy := make(map[string]*TypeDescription, len(fd.types)) + for k, v := range fd.types { + typesCopy[k] = v.Copy(pbdb) + } + return &FileDescription{ + name: fd.name, + types: typesCopy, + enums: fd.enums, + } +} + +// GetName returns the fully qualified file path for the file. +func (fd *FileDescription) GetName() string { + return fd.name +} + +// GetEnumDescription returns an EnumDescription for a qualified enum value +// name declared within the .proto file. +func (fd *FileDescription) GetEnumDescription(enumName string) (*EnumValueDescription, bool) { + ed, found := fd.enums[sanitizeProtoName(enumName)] + return ed, found +} + +// GetEnumNames returns the string names of all enum values in the file. +func (fd *FileDescription) GetEnumNames() []string { + enumNames := make([]string, len(fd.enums)) + i := 0 + for _, e := range fd.enums { + enumNames[i] = e.Name() + i++ + } + return enumNames +} + +// GetTypeDescription returns a TypeDescription for a qualified protobuf message type name +// declared within the .proto file. +func (fd *FileDescription) GetTypeDescription(typeName string) (*TypeDescription, bool) { + td, found := fd.types[sanitizeProtoName(typeName)] + return td, found +} + +// GetTypeNames returns the list of all type names contained within the file. +func (fd *FileDescription) GetTypeNames() []string { + typeNames := make([]string, len(fd.types)) + i := 0 + for _, t := range fd.types { + typeNames[i] = t.Name() + i++ + } + return typeNames +} + +// sanitizeProtoName strips the leading '.' from the proto message name. +func sanitizeProtoName(name string) string { + if name != "" && name[0] == '.' { + return name[1:] + } + return name +} + +// fileMetadata is a flattened view of message types and enum values within a file descriptor. +type fileMetadata struct { + // msgTypes maps from fully-qualified message name to descriptor. + msgTypes map[string]protoreflect.MessageDescriptor + // enumValues maps from fully-qualified enum value to enum value descriptor. + enumValues map[string]protoreflect.EnumValueDescriptor + // msgExtensionMap maps from the protobuf message name being extended to a set of extensions + // for the type. + msgExtensionMap map[string][]protoreflect.ExtensionDescriptor + + // TODO: support enum type definitions for use in future type-check enhancements. +} + +// collectFileMetadata traverses the proto file object graph to collect message types and enum +// values and index them by their fully qualified names. +func collectFileMetadata(fileDesc protoreflect.FileDescriptor) *fileMetadata { + msgTypes := make(map[string]protoreflect.MessageDescriptor) + enumValues := make(map[string]protoreflect.EnumValueDescriptor) + msgExtensionMap := make(map[string][]protoreflect.ExtensionDescriptor) + collectMsgTypes(fileDesc.Messages(), msgTypes, enumValues, msgExtensionMap) + collectEnumValues(fileDesc.Enums(), enumValues) + collectExtensions(fileDesc.Extensions(), msgExtensionMap) + return &fileMetadata{ + msgTypes: msgTypes, + enumValues: enumValues, + msgExtensionMap: msgExtensionMap, + } +} + +// collectMsgTypes recursively collects messages, nested messages, and nested enums into a map of +// fully qualified protobuf names to descriptors. +func collectMsgTypes(msgTypes protoreflect.MessageDescriptors, + msgTypeMap map[string]protoreflect.MessageDescriptor, + enumValueMap map[string]protoreflect.EnumValueDescriptor, + msgExtensionMap map[string][]protoreflect.ExtensionDescriptor) { + for i := 0; i < msgTypes.Len(); i++ { + msgType := msgTypes.Get(i) + msgTypeMap[string(msgType.FullName())] = msgType + nestedMsgTypes := msgType.Messages() + if nestedMsgTypes.Len() != 0 { + collectMsgTypes(nestedMsgTypes, msgTypeMap, enumValueMap, msgExtensionMap) + } + nestedEnumTypes := msgType.Enums() + if nestedEnumTypes.Len() != 0 { + collectEnumValues(nestedEnumTypes, enumValueMap) + } + nestedExtensions := msgType.Extensions() + if nestedExtensions.Len() != 0 { + collectExtensions(nestedExtensions, msgExtensionMap) + } + } +} + +// collectEnumValues accumulates the enum values within an enum declaration. +func collectEnumValues(enumTypes protoreflect.EnumDescriptors, enumValueMap map[string]protoreflect.EnumValueDescriptor) { + for i := 0; i < enumTypes.Len(); i++ { + enumType := enumTypes.Get(i) + enumTypeValues := enumType.Values() + for j := 0; j < enumTypeValues.Len(); j++ { + enumValue := enumTypeValues.Get(j) + enumValueName := fmt.Sprintf("%s.%s", string(enumType.FullName()), string(enumValue.Name())) + enumValueMap[enumValueName] = enumValue + } + } +} + +func collectExtensions(extensions protoreflect.ExtensionDescriptors, msgExtensionMap map[string][]protoreflect.ExtensionDescriptor) { + for i := 0; i < extensions.Len(); i++ { + ext := extensions.Get(i) + extendsMsg := string(ext.ContainingMessage().FullName()) + msgExts, found := msgExtensionMap[extendsMsg] + if !found { + msgExts = []protoreflect.ExtensionDescriptor{} + } + msgExts = append(msgExts, ext) + msgExtensionMap[extendsMsg] = msgExts + } +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/pb/pb.go b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/pb.go new file mode 100644 index 0000000000..eadebcb04e --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/pb.go @@ -0,0 +1,258 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pb reflects over protocol buffer descriptors to generate objects +// that simplify type, enum, and field lookup. +package pb + +import ( + "fmt" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + anypb "google.golang.org/protobuf/types/known/anypb" + durpb "google.golang.org/protobuf/types/known/durationpb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + structpb "google.golang.org/protobuf/types/known/structpb" + tspb "google.golang.org/protobuf/types/known/timestamppb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +// Db maps from file / message / enum name to file description. +// +// Each Db is isolated from each other, and while information about protobuf descriptors may be +// fetched from the global protobuf registry, no descriptors are added to this registry, else +// the isolation guarantees of the Db object would be violated. +type Db struct { + revFileDescriptorMap map[string]*FileDescription + // files contains the deduped set of FileDescriptions whose types are contained in the pb.Db. + files []*FileDescription + // extensions contains the mapping between a given type name, extension name and its FieldDescription + extensions map[string]map[string]*FieldDescription +} + +// extensionsMap is a type alias to a map[typeName]map[extensionName]*FieldDescription +type extensionMap = map[string]map[string]*FieldDescription + +var ( + // DefaultDb used at evaluation time or unless overridden at check time. + DefaultDb = &Db{ + revFileDescriptorMap: make(map[string]*FileDescription), + files: []*FileDescription{}, + extensions: make(extensionMap), + } +) + +// Merge will copy the source proto message into the destination, or error if the merge cannot be completed. +// +// Unlike the proto.Merge, this method will fallback to proto.Marshal/Unmarshal of the two proto messages do not +// share the same instance of their type descriptor. +func Merge(dstPB, srcPB proto.Message) error { + src, dst := srcPB.ProtoReflect(), dstPB.ProtoReflect() + if src.Descriptor() == dst.Descriptor() { + proto.Merge(dstPB, srcPB) + return nil + } + if src.Descriptor().FullName() != dst.Descriptor().FullName() { + return fmt.Errorf("pb.Merge() arguments must be the same type. got: %v, %v", + dst.Descriptor().FullName(), src.Descriptor().FullName()) + } + bytes, err := proto.Marshal(srcPB) + if err != nil { + return fmt.Errorf("pb.Merge(dstPB, srcPB) failed to marshal source proto: %v", err) + } + err = proto.Unmarshal(bytes, dstPB) + if err != nil { + return fmt.Errorf("pb.Merge(dstPB, srcPB) failed to unmarshal to dest proto: %v", err) + } + return nil +} + +// NewDb creates a new `pb.Db` with an empty type name to file description map. +func NewDb() *Db { + pbdb := &Db{ + revFileDescriptorMap: make(map[string]*FileDescription), + files: []*FileDescription{}, + extensions: make(extensionMap), + } + // The FileDescription objects in the default db contain lazily initialized TypeDescription + // values which may point to the state contained in the DefaultDb irrespective of this shallow + // copy; however, the type graph for a field is idempotently computed, and is guaranteed to + // only be initialized once thanks to atomic values within the TypeDescription objects, so it + // is safe to share these values across instances. + for k, v := range DefaultDb.revFileDescriptorMap { + pbdb.revFileDescriptorMap[k] = v + } + pbdb.files = append(pbdb.files, DefaultDb.files...) + return pbdb +} + +// Copy creates a copy of the current database with its own internal descriptor mapping. +func (pbdb *Db) Copy() *Db { + copy := NewDb() + for _, fd := range pbdb.files { + hasFile := false + for _, fd2 := range copy.files { + if fd2 == fd { + hasFile = true + } + } + if !hasFile { + fd = fd.Copy(copy) + copy.files = append(copy.files, fd) + } + for _, enumValName := range fd.GetEnumNames() { + copy.revFileDescriptorMap[enumValName] = fd + } + for _, msgTypeName := range fd.GetTypeNames() { + copy.revFileDescriptorMap[msgTypeName] = fd + } + copy.revFileDescriptorMap[fd.GetName()] = fd + } + for typeName, extFieldMap := range pbdb.extensions { + copyExtFieldMap, found := copy.extensions[typeName] + if !found { + copyExtFieldMap = make(map[string]*FieldDescription, len(extFieldMap)) + } + for extFieldName, fd := range extFieldMap { + copyExtFieldMap[extFieldName] = fd + } + copy.extensions[typeName] = copyExtFieldMap + } + return copy +} + +// FileDescriptions returns the set of file descriptions associated with this db. +func (pbdb *Db) FileDescriptions() []*FileDescription { + return pbdb.files +} + +// RegisterDescriptor produces a `FileDescription` from a `FileDescriptor` and registers the +// message and enum types into the `pb.Db`. +func (pbdb *Db) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) (*FileDescription, error) { + fd, found := pbdb.revFileDescriptorMap[fileDesc.Path()] + if found { + return fd, nil + } + // Make sure to search the global registry to see if a protoreflect.FileDescriptor for + // the file specified has been linked into the binary. If so, use the copy of the descriptor + // from the global cache. + // + // Note: Proto reflection relies on descriptor values being object equal rather than object + // equivalence. This choice means that a FieldDescriptor generated from a FileDescriptorProto + // will be incompatible with the FieldDescriptor in the global registry and any message created + // from that global registry. + globalFD, err := protoregistry.GlobalFiles.FindFileByPath(fileDesc.Path()) + if err == nil { + fileDesc = globalFD + } + var fileExtMap extensionMap + fd, fileExtMap = newFileDescription(fileDesc, pbdb) + for _, enumValName := range fd.GetEnumNames() { + pbdb.revFileDescriptorMap[enumValName] = fd + } + for _, msgTypeName := range fd.GetTypeNames() { + pbdb.revFileDescriptorMap[msgTypeName] = fd + } + pbdb.revFileDescriptorMap[fd.GetName()] = fd + + // Return the specific file descriptor registered. + pbdb.files = append(pbdb.files, fd) + + // Index the protobuf message extensions from the file into the pbdb + for typeName, extMap := range fileExtMap { + typeExtMap, found := pbdb.extensions[typeName] + if !found { + pbdb.extensions[typeName] = extMap + continue + } + for extName, field := range extMap { + typeExtMap[extName] = field + } + } + return fd, nil +} + +// RegisterMessage produces a `FileDescription` from a `message` and registers the message and all +// other definitions within the message file into the `pb.Db`. +func (pbdb *Db) RegisterMessage(message proto.Message) (*FileDescription, error) { + msgDesc := message.ProtoReflect().Descriptor() + msgName := msgDesc.FullName() + typeName := sanitizeProtoName(string(msgName)) + if fd, found := pbdb.revFileDescriptorMap[typeName]; found { + return fd, nil + } + return pbdb.RegisterDescriptor(msgDesc.ParentFile()) +} + +// DescribeEnum takes a qualified enum name and returns an `EnumDescription` if it exists in the +// `pb.Db`. +func (pbdb *Db) DescribeEnum(enumName string) (*EnumValueDescription, bool) { + enumName = sanitizeProtoName(enumName) + if fd, found := pbdb.revFileDescriptorMap[enumName]; found { + return fd.GetEnumDescription(enumName) + } + return nil, false +} + +// DescribeType returns a `TypeDescription` for the `typeName` if it exists in the `pb.Db`. +func (pbdb *Db) DescribeType(typeName string) (*TypeDescription, bool) { + typeName = sanitizeProtoName(typeName) + if fd, found := pbdb.revFileDescriptorMap[typeName]; found { + return fd.GetTypeDescription(typeName) + } + return nil, false +} + +// CollectFileDescriptorSet builds a file descriptor set associated with the file where the input +// message is declared. +func CollectFileDescriptorSet(message proto.Message) map[string]protoreflect.FileDescriptor { + fdMap := map[string]protoreflect.FileDescriptor{} + parentFile := message.ProtoReflect().Descriptor().ParentFile() + fdMap[parentFile.Path()] = parentFile + // Initialize list of dependencies + deps := make([]protoreflect.FileImport, parentFile.Imports().Len()) + for i := 0; i < parentFile.Imports().Len(); i++ { + deps[i] = parentFile.Imports().Get(i) + } + // Expand list for new dependencies + for i := 0; i < len(deps); i++ { + dep := deps[i] + if _, found := fdMap[dep.Path()]; found { + continue + } + fdMap[dep.Path()] = dep.FileDescriptor + for j := 0; j < dep.FileDescriptor.Imports().Len(); j++ { + deps = append(deps, dep.FileDescriptor.Imports().Get(j)) + } + } + return fdMap +} + +func init() { + // Describe well-known types to ensure they can always be resolved by the check and interpret + // execution phases. + // + // The following subset of message types is enough to ensure that all well-known types can + // resolved in the runtime, since describing the value results in describing the whole file + // where the message is declared. + DefaultDb.RegisterMessage(&anypb.Any{}) + DefaultDb.RegisterMessage(&durpb.Duration{}) + DefaultDb.RegisterMessage(&emptypb.Empty{}) + DefaultDb.RegisterMessage(&tspb.Timestamp{}) + DefaultDb.RegisterMessage(&structpb.Value{}) + DefaultDb.RegisterMessage(&wrapperspb.BoolValue{}) +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/pb/type.go b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/type.go new file mode 100644 index 0000000000..bdd474c95a --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/pb/type.go @@ -0,0 +1,614 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pb + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + dynamicpb "google.golang.org/protobuf/types/dynamicpb" + anypb "google.golang.org/protobuf/types/known/anypb" + dpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + tpb "google.golang.org/protobuf/types/known/timestamppb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +// description is a private interface used to make it convenient to perform type unwrapping at +// the TypeDescription or FieldDescription level. +type description interface { + // Zero returns an empty immutable protobuf message when the description is a protobuf message + // type. + Zero() proto.Message +} + +// newTypeDescription produces a TypeDescription value for the fully-qualified proto type name +// with a given descriptor. +func newTypeDescription(typeName string, desc protoreflect.MessageDescriptor, extensions extensionMap) *TypeDescription { + msgType := dynamicpb.NewMessageType(desc) + msgZero := dynamicpb.NewMessage(desc) + fieldMap := map[string]*FieldDescription{} + fields := desc.Fields() + for i := 0; i < fields.Len(); i++ { + f := fields.Get(i) + fieldMap[string(f.Name())] = newFieldDescription(f) + } + return &TypeDescription{ + typeName: typeName, + desc: desc, + msgType: msgType, + fieldMap: fieldMap, + extensions: extensions, + reflectType: reflectTypeOf(msgZero), + zeroMsg: zeroValueOf(msgZero), + } +} + +// TypeDescription is a collection of type metadata relevant to expression +// checking and evaluation. +type TypeDescription struct { + typeName string + desc protoreflect.MessageDescriptor + msgType protoreflect.MessageType + fieldMap map[string]*FieldDescription + extensions extensionMap + reflectType reflect.Type + zeroMsg proto.Message +} + +// Copy copies the type description with updated references to the Db. +func (td *TypeDescription) Copy(pbdb *Db) *TypeDescription { + return &TypeDescription{ + typeName: td.typeName, + desc: td.desc, + msgType: td.msgType, + fieldMap: td.fieldMap, + extensions: pbdb.extensions, + reflectType: td.reflectType, + zeroMsg: td.zeroMsg, + } +} + +// FieldMap returns a string field name to FieldDescription map. +func (td *TypeDescription) FieldMap() map[string]*FieldDescription { + return td.fieldMap +} + +// FieldByName returns (FieldDescription, true) if the field name is declared within the type. +func (td *TypeDescription) FieldByName(name string) (*FieldDescription, bool) { + fd, found := td.fieldMap[name] + if found { + return fd, true + } + extFieldMap, found := td.extensions[td.typeName] + if !found { + return nil, false + } + fd, found = extFieldMap[name] + return fd, found +} + +// MaybeUnwrap accepts a proto message as input and unwraps it to a primitive CEL type if possible. +// +// This method returns the unwrapped value and 'true', else the original value and 'false'. +func (td *TypeDescription) MaybeUnwrap(msg proto.Message) (any, bool, error) { + return unwrap(td, msg) +} + +// Name returns the fully-qualified name of the type. +func (td *TypeDescription) Name() string { + return string(td.desc.FullName()) +} + +// New returns a mutable proto message +func (td *TypeDescription) New() protoreflect.Message { + return td.msgType.New() +} + +// ReflectType returns the Golang reflect.Type for this type. +func (td *TypeDescription) ReflectType() reflect.Type { + return td.reflectType +} + +// Zero returns the zero proto.Message value for this type. +func (td *TypeDescription) Zero() proto.Message { + return td.zeroMsg +} + +// newFieldDescription creates a new field description from a protoreflect.FieldDescriptor. +func newFieldDescription(fieldDesc protoreflect.FieldDescriptor) *FieldDescription { + var reflectType reflect.Type + var zeroMsg proto.Message + switch fieldDesc.Kind() { + case protoreflect.EnumKind: + reflectType = reflectTypeOf(protoreflect.EnumNumber(0)) + case protoreflect.GroupKind, protoreflect.MessageKind: + zeroMsg = dynamicpb.NewMessage(fieldDesc.Message()) + reflectType = reflectTypeOf(zeroMsg) + default: + reflectType = reflectTypeOf(fieldDesc.Default().Interface()) + if fieldDesc.IsList() { + var elemValue protoreflect.Value + if fieldDesc.IsExtension() { + et := dynamicpb.NewExtensionType(fieldDesc) + elemValue = et.New().List().NewElement() + } else { + parentMsgType := fieldDesc.ContainingMessage() + parentMsg := dynamicpb.NewMessage(parentMsgType) + listField := parentMsg.NewField(fieldDesc).List() + elemValue = listField.NewElement() + } + elem := elemValue.Interface() + switch elemType := elem.(type) { + case protoreflect.Message: + elem = elemType.Interface() + } + reflectType = reflectTypeOf(elem) + } + } + // Ensure the list type is appropriately reflected as a Go-native list. + if fieldDesc.IsList() { + reflectType = reflect.SliceOf(reflectType) + } + var keyType, valType *FieldDescription + if fieldDesc.IsMap() { + keyType = newFieldDescription(fieldDesc.MapKey()) + valType = newFieldDescription(fieldDesc.MapValue()) + } + return &FieldDescription{ + desc: fieldDesc, + KeyType: keyType, + ValueType: valType, + reflectType: reflectType, + zeroMsg: zeroValueOf(zeroMsg), + } +} + +// FieldDescription holds metadata related to fields declared within a type. +type FieldDescription struct { + // KeyType holds the key FieldDescription for map fields. + KeyType *FieldDescription + // ValueType holds the value FieldDescription for map fields. + ValueType *FieldDescription + + desc protoreflect.FieldDescriptor + reflectType reflect.Type + zeroMsg proto.Message +} + +// CheckedType returns the type-definition used at type-check time. +func (fd *FieldDescription) CheckedType() *exprpb.Type { + if fd.desc.IsMap() { + return &exprpb.Type{ + TypeKind: &exprpb.Type_MapType_{ + MapType: &exprpb.Type_MapType{ + KeyType: fd.KeyType.typeDefToType(), + ValueType: fd.ValueType.typeDefToType(), + }, + }, + } + } + if fd.desc.IsList() { + return &exprpb.Type{ + TypeKind: &exprpb.Type_ListType_{ + ListType: &exprpb.Type_ListType{ + ElemType: fd.typeDefToType()}}} + } + return fd.typeDefToType() +} + +// Descriptor returns the protoreflect.FieldDescriptor for this type. +func (fd *FieldDescription) Descriptor() protoreflect.FieldDescriptor { + return fd.desc +} + +// IsSet returns whether the field is set on the target value, per the proto presence conventions +// of proto2 or proto3 accordingly. +// +// This function implements the FieldType.IsSet function contract which can be used to operate on +// more than just protobuf field accesses; however, the target here must be a protobuf.Message. +func (fd *FieldDescription) IsSet(target any) bool { + switch v := target.(type) { + case proto.Message: + pbRef := v.ProtoReflect() + pbDesc := pbRef.Descriptor() + if pbDesc == fd.desc.ContainingMessage() { + // When the target protobuf shares the same message descriptor instance as the field + // descriptor, use the cached field descriptor value. + return pbRef.Has(fd.desc) + } + // Otherwise, fallback to a dynamic lookup of the field descriptor from the target + // instance as an attempt to use the cached field descriptor will result in a panic. + return pbRef.Has(pbDesc.Fields().ByName(protoreflect.Name(fd.Name()))) + default: + return false + } +} + +// GetFrom returns the accessor method associated with the field on the proto generated struct. +// +// If the field is not set, the proto default value is returned instead. +// +// This function implements the FieldType.GetFrom function contract which can be used to operate +// on more than just protobuf field accesses; however, the target here must be a protobuf.Message. +func (fd *FieldDescription) GetFrom(target any) (any, error) { + v, ok := target.(proto.Message) + if !ok { + return nil, fmt.Errorf("unsupported field selection target: (%T)%v", target, target) + } + pbRef := v.ProtoReflect() + pbDesc := pbRef.Descriptor() + var fieldVal any + if pbDesc == fd.desc.ContainingMessage() { + // When the target protobuf shares the same message descriptor instance as the field + // descriptor, use the cached field descriptor value. + fieldVal = pbRef.Get(fd.desc).Interface() + } else { + // Otherwise, fallback to a dynamic lookup of the field descriptor from the target + // instance as an attempt to use the cached field descriptor will result in a panic. + fieldVal = pbRef.Get(pbDesc.Fields().ByName(protoreflect.Name(fd.Name()))).Interface() + } + switch fv := fieldVal.(type) { + // Fast-path return for primitive types. + case bool, []byte, float32, float64, int32, int64, string, uint32, uint64, protoreflect.List: + return fv, nil + case protoreflect.EnumNumber: + return int64(fv), nil + case protoreflect.Map: + // Return a wrapper around the protobuf-reflected Map types which carries additional + // information about the key and value definitions of the map. + return &Map{Map: fv, KeyType: fd.KeyType, ValueType: fd.ValueType}, nil + case protoreflect.Message: + // Make sure to unwrap well-known protobuf types before returning. + unwrapped, _, err := fd.MaybeUnwrapDynamic(fv) + return unwrapped, err + default: + return fv, nil + } +} + +// IsEnum returns true if the field type refers to an enum value. +func (fd *FieldDescription) IsEnum() bool { + return fd.ProtoKind() == protoreflect.EnumKind +} + +// IsMap returns true if the field is of map type. +func (fd *FieldDescription) IsMap() bool { + return fd.desc.IsMap() +} + +// IsMessage returns true if the field is of message type. +func (fd *FieldDescription) IsMessage() bool { + kind := fd.ProtoKind() + return kind == protoreflect.MessageKind || kind == protoreflect.GroupKind +} + +// IsOneof returns true if the field is declared within a oneof block. +func (fd *FieldDescription) IsOneof() bool { + return fd.desc.ContainingOneof() != nil +} + +// IsList returns true if the field is a repeated value. +// +// This method will also return true for map values, so check whether the +// field is also a map. +func (fd *FieldDescription) IsList() bool { + return fd.desc.IsList() +} + +// MaybeUnwrapDynamic takes the reflected protoreflect.Message and determines whether the +// value can be unwrapped to a more primitive CEL type. +// +// This function returns the unwrapped value and 'true' on success, or the original value +// and 'false' otherwise. +func (fd *FieldDescription) MaybeUnwrapDynamic(msg protoreflect.Message) (any, bool, error) { + return unwrapDynamic(fd, msg) +} + +// Name returns the CamelCase name of the field within the proto-based struct. +func (fd *FieldDescription) Name() string { + return string(fd.desc.Name()) +} + +// ProtoKind returns the protobuf reflected kind of the field. +func (fd *FieldDescription) ProtoKind() protoreflect.Kind { + return fd.desc.Kind() +} + +// ReflectType returns the Golang reflect.Type for this field. +func (fd *FieldDescription) ReflectType() reflect.Type { + return fd.reflectType +} + +// String returns the fully qualified name of the field within its type as well as whether the +// field occurs within a oneof. +func (fd *FieldDescription) String() string { + return fmt.Sprintf("%v.%s `oneof=%t`", fd.desc.ContainingMessage().FullName(), fd.Name(), fd.IsOneof()) +} + +// Zero returns the zero value for the protobuf message represented by this field. +// +// If the field is not a proto.Message type, the zero value is nil. +func (fd *FieldDescription) Zero() proto.Message { + return fd.zeroMsg +} + +func (fd *FieldDescription) typeDefToType() *exprpb.Type { + if fd.IsMessage() { + msgType := string(fd.desc.Message().FullName()) + if wk, found := CheckedWellKnowns[msgType]; found { + return wk + } + return checkedMessageType(msgType) + } + if fd.IsEnum() { + return checkedInt + } + return CheckedPrimitives[fd.ProtoKind()] +} + +// Map wraps the protoreflect.Map object with a key and value FieldDescription for use in +// retrieving individual elements within CEL value data types. +type Map struct { + protoreflect.Map + KeyType *FieldDescription + ValueType *FieldDescription +} + +func checkedMessageType(name string) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_MessageType{MessageType: name}} +} + +func checkedPrimitive(primitive exprpb.Type_PrimitiveType) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_Primitive{Primitive: primitive}} +} + +func checkedWellKnown(wellKnown exprpb.Type_WellKnownType) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_WellKnown{WellKnown: wellKnown}} +} + +func checkedWrap(t *exprpb.Type) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_Wrapper{Wrapper: t.GetPrimitive()}} +} + +// unwrap unwraps the provided proto.Message value, potentially based on the description if the +// input message is a *dynamicpb.Message which obscures the typing information from Go. +// +// Returns the unwrapped value and 'true' if unwrapped, otherwise the input value and 'false'. +func unwrap(desc description, msg proto.Message) (any, bool, error) { + switch v := msg.(type) { + case *anypb.Any: + dynMsg, err := v.UnmarshalNew() + if err != nil { + return v, false, err + } + return unwrapDynamic(desc, dynMsg.ProtoReflect()) + case *dynamicpb.Message: + return unwrapDynamic(desc, v) + case *dpb.Duration: + return v.AsDuration(), true, nil + case *tpb.Timestamp: + return v.AsTime(), true, nil + case *structpb.Value: + switch v.GetKind().(type) { + case *structpb.Value_BoolValue: + return v.GetBoolValue(), true, nil + case *structpb.Value_ListValue: + return v.GetListValue(), true, nil + case *structpb.Value_NullValue: + return structpb.NullValue_NULL_VALUE, true, nil + case *structpb.Value_NumberValue: + return v.GetNumberValue(), true, nil + case *structpb.Value_StringValue: + return v.GetStringValue(), true, nil + case *structpb.Value_StructValue: + return v.GetStructValue(), true, nil + default: + return structpb.NullValue_NULL_VALUE, true, nil + } + case *wrapperspb.BoolValue: + if v == nil { + return nil, true, nil + } + return v.GetValue(), true, nil + case *wrapperspb.BytesValue: + if v == nil { + return nil, true, nil + } + return v.GetValue(), true, nil + case *wrapperspb.DoubleValue: + if v == nil { + return nil, true, nil + } + return v.GetValue(), true, nil + case *wrapperspb.FloatValue: + if v == nil { + return nil, true, nil + } + return float64(v.GetValue()), true, nil + case *wrapperspb.Int32Value: + if v == nil { + return nil, true, nil + } + return int64(v.GetValue()), true, nil + case *wrapperspb.Int64Value: + if v == nil { + return nil, true, nil + } + return v.GetValue(), true, nil + case *wrapperspb.StringValue: + if v == nil { + return nil, true, nil + } + return v.GetValue(), true, nil + case *wrapperspb.UInt32Value: + if v == nil { + return nil, true, nil + } + return uint64(v.GetValue()), true, nil + case *wrapperspb.UInt64Value: + if v == nil { + return nil, true, nil + } + return v.GetValue(), true, nil + } + return msg, false, nil +} + +// unwrapDynamic unwraps a reflected protobuf Message value. +// +// Returns the unwrapped value and 'true' if unwrapped, otherwise the input value and 'false'. +func unwrapDynamic(desc description, refMsg protoreflect.Message) (any, bool, error) { + msg := refMsg.Interface() + if !refMsg.IsValid() { + msg = desc.Zero() + } + // In order to ensure that these wrapped types match the expectations of the CEL type system + // the dynamicpb.Message must be merged with an protobuf instance of the well-known type value. + typeName := string(refMsg.Descriptor().FullName()) + switch typeName { + case "google.protobuf.Any": + // Note, Any values require further unwrapping; however, this unwrapping may or may not + // be to a well-known type. If the unwrapped value is a well-known type it will be further + // unwrapped before being returned to the caller. Otherwise, the dynamic protobuf object + // represented by the Any will be returned. + unwrappedAny := &anypb.Any{} + err := Merge(unwrappedAny, msg) + if err != nil { + return nil, false, fmt.Errorf("unwrap dynamic field failed: %v", err) + } + dynMsg, err := unwrappedAny.UnmarshalNew() + if err != nil { + // Allow the error to move further up the stack as it should result in an type + // conversion error if the caller does not recover it somehow. + return nil, false, fmt.Errorf("unmarshal dynamic any failed: %v", err) + } + // Attempt to unwrap the dynamic type, otherwise return the dynamic message. + unwrapped, nested, err := unwrapDynamic(desc, dynMsg.ProtoReflect()) + if err == nil && nested { + return unwrapped, true, nil + } + return dynMsg, true, err + case "google.protobuf.BoolValue", + "google.protobuf.BytesValue", + "google.protobuf.DoubleValue", + "google.protobuf.FloatValue", + "google.protobuf.Int32Value", + "google.protobuf.Int64Value", + "google.protobuf.StringValue", + "google.protobuf.UInt32Value", + "google.protobuf.UInt64Value": + // The msg value is ignored when dealing with wrapper types as they have a null or value + // behavior, rather than the standard zero value behavior of other proto message types. + if !refMsg.IsValid() { + return structpb.NullValue_NULL_VALUE, true, nil + } + valueField := refMsg.Descriptor().Fields().ByName("value") + return refMsg.Get(valueField).Interface(), true, nil + case "google.protobuf.Duration": + unwrapped := &dpb.Duration{} + err := Merge(unwrapped, msg) + if err != nil { + return nil, false, err + } + return unwrapped.AsDuration(), true, nil + case "google.protobuf.ListValue": + unwrapped := &structpb.ListValue{} + err := Merge(unwrapped, msg) + if err != nil { + return nil, false, err + } + return unwrapped, true, nil + case "google.protobuf.NullValue": + return structpb.NullValue_NULL_VALUE, true, nil + case "google.protobuf.Struct": + unwrapped := &structpb.Struct{} + err := Merge(unwrapped, msg) + if err != nil { + return nil, false, err + } + return unwrapped, true, nil + case "google.protobuf.Timestamp": + unwrapped := &tpb.Timestamp{} + err := Merge(unwrapped, msg) + if err != nil { + return nil, false, err + } + return unwrapped.AsTime(), true, nil + case "google.protobuf.Value": + unwrapped := &structpb.Value{} + err := Merge(unwrapped, msg) + if err != nil { + return nil, false, err + } + return unwrap(desc, unwrapped) + } + return msg, false, nil +} + +// reflectTypeOf intercepts the reflect.Type call to ensure that dynamicpb.Message types preserve +// well-known protobuf reflected types expected by the CEL type system. +func reflectTypeOf(val any) reflect.Type { + switch v := val.(type) { + case proto.Message: + return reflect.TypeOf(zeroValueOf(v)) + default: + return reflect.TypeOf(v) + } +} + +// zeroValueOf will return the strongest possible proto.Message representing the default protobuf +// message value of the input msg type. +func zeroValueOf(msg proto.Message) proto.Message { + if msg == nil { + return nil + } + typeName := string(msg.ProtoReflect().Descriptor().FullName()) + zeroVal, found := zeroValueMap[typeName] + if found { + return zeroVal + } + return msg +} + +var ( + jsonValueTypeURL = "types.googleapis.com/google.protobuf.Value" + + zeroValueMap = map[string]proto.Message{ + "google.protobuf.Any": &anypb.Any{TypeUrl: jsonValueTypeURL}, + "google.protobuf.Duration": &dpb.Duration{}, + "google.protobuf.ListValue": &structpb.ListValue{}, + "google.protobuf.Struct": &structpb.Struct{}, + "google.protobuf.Timestamp": &tpb.Timestamp{}, + "google.protobuf.Value": &structpb.Value{}, + "google.protobuf.BoolValue": wrapperspb.Bool(false), + "google.protobuf.BytesValue": wrapperspb.Bytes([]byte{}), + "google.protobuf.DoubleValue": wrapperspb.Double(0.0), + "google.protobuf.FloatValue": wrapperspb.Float(0.0), + "google.protobuf.Int32Value": wrapperspb.Int32(0), + "google.protobuf.Int64Value": wrapperspb.Int64(0), + "google.protobuf.StringValue": wrapperspb.String(""), + "google.protobuf.UInt32Value": wrapperspb.UInt32(0), + "google.protobuf.UInt64Value": wrapperspb.UInt64(0), + } +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/provider.go b/hack/tools/vendor/github.com/google/cel-go/common/types/provider.go new file mode 100644 index 0000000000..936a4e28b2 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/provider.go @@ -0,0 +1,766 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + "time" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "github.com/google/cel-go/common/types/pb" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + anypb "google.golang.org/protobuf/types/known/anypb" + dpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + tpb "google.golang.org/protobuf/types/known/timestamppb" +) + +// Adapter converts native Go values of varying type and complexity to equivalent CEL values. +type Adapter = ref.TypeAdapter + +// Provider specifies functions for creating new object instances and for resolving +// enum values by name. +type Provider interface { + // EnumValue returns the numeric value of the given enum value name. + EnumValue(enumName string) ref.Val + + // FindIdent takes a qualified identifier name and returns a ref.Val if one exists. + FindIdent(identName string) (ref.Val, bool) + + // FindStructType returns the Type give a qualified type name. + // + // For historical reasons, only struct types are expected to be returned through this + // method, and the type values are expected to be wrapped in a TypeType instance using + // TypeTypeWithParam(). + // + // Returns false if not found. + FindStructType(structType string) (*Type, bool) + + // FindStructFieldNames returns thet field names associated with the type, if the type + // is found. + FindStructFieldNames(structType string) ([]string, bool) + + // FieldStructFieldType returns the field type for a checked type value. Returns + // false if the field could not be found. + FindStructFieldType(structType, fieldName string) (*FieldType, bool) + + // NewValue creates a new type value from a qualified name and map of field + // name to value. + // + // Note, for each value, the Val.ConvertToNative function will be invoked + // to convert the Val to the field's native type. If an error occurs during + // conversion, the NewValue will be a types.Err. + NewValue(structType string, fields map[string]ref.Val) ref.Val +} + +// FieldType represents a field's type value and whether that field supports presence detection. +type FieldType struct { + // Type of the field as a CEL native type value. + Type *Type + + // IsSet indicates whether the field is set on an input object. + IsSet ref.FieldTester + + // GetFrom retrieves the field value on the input object, if set. + GetFrom ref.FieldGetter +} + +// Registry provides type information for a set of registered types. +type Registry struct { + revTypeMap map[string]*Type + pbdb *pb.Db +} + +// NewRegistry accepts a list of proto message instances and returns a type +// provider which can create new instances of the provided message or any +// message that proto depends upon in its FileDescriptor. +func NewRegistry(types ...proto.Message) (*Registry, error) { + p := &Registry{ + revTypeMap: make(map[string]*Type), + pbdb: pb.NewDb(), + } + err := p.RegisterType( + BoolType, + BytesType, + DoubleType, + DurationType, + IntType, + ListType, + MapType, + NullType, + StringType, + TimestampType, + TypeType, + UintType) + if err != nil { + return nil, err + } + // This block ensures that the well-known protobuf types are registered by default. + for _, fd := range p.pbdb.FileDescriptions() { + err = p.registerAllTypes(fd) + if err != nil { + return nil, err + } + } + for _, msgType := range types { + err = p.RegisterMessage(msgType) + if err != nil { + return nil, err + } + } + return p, nil +} + +// NewEmptyRegistry returns a registry which is completely unconfigured. +func NewEmptyRegistry() *Registry { + return &Registry{ + revTypeMap: make(map[string]*Type), + pbdb: pb.NewDb(), + } +} + +// Copy copies the current state of the registry into its own memory space. +func (p *Registry) Copy() *Registry { + copy := &Registry{ + revTypeMap: make(map[string]*Type), + pbdb: p.pbdb.Copy(), + } + for k, v := range p.revTypeMap { + copy.revTypeMap[k] = v + } + return copy +} + +// EnumValue returns the numeric value of the given enum value name. +func (p *Registry) EnumValue(enumName string) ref.Val { + enumVal, found := p.pbdb.DescribeEnum(enumName) + if !found { + return NewErr("unknown enum name '%s'", enumName) + } + return Int(enumVal.Value()) +} + +// FindFieldType returns the field type for a checked type value. Returns false if +// the field could not be found. +// +// Deprecated: use FindStructFieldType +func (p *Registry) FindFieldType(structType, fieldName string) (*ref.FieldType, bool) { + msgType, found := p.pbdb.DescribeType(structType) + if !found { + return nil, false + } + field, found := msgType.FieldByName(fieldName) + if !found { + return nil, false + } + return &ref.FieldType{ + Type: field.CheckedType(), + IsSet: field.IsSet, + GetFrom: field.GetFrom}, true +} + +// FindStructFieldNames returns the set of field names for the given struct type, +// if the type exists in the registry. +func (p *Registry) FindStructFieldNames(structType string) ([]string, bool) { + msgType, found := p.pbdb.DescribeType(structType) + if !found { + return []string{}, false + } + fieldMap := msgType.FieldMap() + fields := make([]string, len(fieldMap)) + idx := 0 + for f := range fieldMap { + fields[idx] = f + idx++ + } + return fields, true +} + +// FindStructFieldType returns the field type for a checked type value. Returns +// false if the field could not be found. +func (p *Registry) FindStructFieldType(structType, fieldName string) (*FieldType, bool) { + msgType, found := p.pbdb.DescribeType(structType) + if !found { + return nil, false + } + field, found := msgType.FieldByName(fieldName) + if !found { + return nil, false + } + return &FieldType{ + Type: fieldDescToCELType(field), + IsSet: field.IsSet, + GetFrom: field.GetFrom}, true +} + +// FindIdent takes a qualified identifier name and returns a ref.Val if one exists. +func (p *Registry) FindIdent(identName string) (ref.Val, bool) { + if t, found := p.revTypeMap[identName]; found { + return t, true + } + if enumVal, found := p.pbdb.DescribeEnum(identName); found { + return Int(enumVal.Value()), true + } + return nil, false +} + +// FindType looks up the Type given a qualified typeName. Returns false if not found. +// +// Deprecated: use FindStructType +func (p *Registry) FindType(structType string) (*exprpb.Type, bool) { + if _, found := p.pbdb.DescribeType(structType); !found { + return nil, false + } + if structType != "" && structType[0] == '.' { + structType = structType[1:] + } + return &exprpb.Type{ + TypeKind: &exprpb.Type_Type{ + Type: &exprpb.Type{ + TypeKind: &exprpb.Type_MessageType{ + MessageType: structType}}}}, true +} + +// FindStructType returns the Type give a qualified type name. +// +// For historical reasons, only struct types are expected to be returned through this +// method, and the type values are expected to be wrapped in a TypeType instance using +// TypeTypeWithParam(). +// +// Returns false if not found. +func (p *Registry) FindStructType(structType string) (*Type, bool) { + if _, found := p.pbdb.DescribeType(structType); !found { + return nil, false + } + if structType != "" && structType[0] == '.' { + structType = structType[1:] + } + return NewTypeTypeWithParam(NewObjectType(structType)), true +} + +// NewValue creates a new type value from a qualified name and map of field +// name to value. +// +// Note, for each value, the Val.ConvertToNative function will be invoked +// to convert the Val to the field's native type. If an error occurs during +// conversion, the NewValue will be a types.Err. +func (p *Registry) NewValue(structType string, fields map[string]ref.Val) ref.Val { + td, found := p.pbdb.DescribeType(structType) + if !found { + return NewErr("unknown type '%s'", structType) + } + msg := td.New() + fieldMap := td.FieldMap() + for name, value := range fields { + field, found := fieldMap[name] + if !found { + return NewErr("no such field: %s", name) + } + err := msgSetField(msg, field, value) + if err != nil { + return &Err{error: err} + } + } + return p.NativeToValue(msg.Interface()) +} + +// RegisterDescriptor registers the contents of a protocol buffer `FileDescriptor`. +func (p *Registry) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) error { + fd, err := p.pbdb.RegisterDescriptor(fileDesc) + if err != nil { + return err + } + return p.registerAllTypes(fd) +} + +// RegisterMessage registers a protocol buffer message and its dependencies. +func (p *Registry) RegisterMessage(message proto.Message) error { + fd, err := p.pbdb.RegisterMessage(message) + if err != nil { + return err + } + return p.registerAllTypes(fd) +} + +// RegisterType registers a type value with the provider which ensures the provider is aware of how to +// map the type to an identifier. +// +// If the `ref.Type` value is a `*types.Type` it will be registered directly by its runtime type name. +// If the `ref.Type` value is not a `*types.Type` instance, a `*types.Type` instance which reflects the +// traits present on the input and the runtime type name. By default this foreign type will be treated +// as a types.StructKind. To avoid potential issues where the `ref.Type` values does not match the +// generated `*types.Type` instance, consider always using the `*types.Type` to represent type extensions +// to CEL, even when they're not based on protobuf types. +func (p *Registry) RegisterType(types ...ref.Type) error { + for _, t := range types { + celType := maybeForeignType(t) + existing, found := p.revTypeMap[t.TypeName()] + if !found { + p.revTypeMap[t.TypeName()] = celType + continue + } + if !existing.IsEquivalentType(celType) { + return fmt.Errorf("type registration conflict. found: %v, input: %v", existing, celType) + } + if existing.traitMask != celType.traitMask { + return fmt.Errorf( + "type registered with conflicting traits: %v with traits %v, input: %v", + existing.TypeName(), existing.traitMask, celType.traitMask) + } + } + return nil +} + +// NativeToValue converts various "native" types to ref.Val with this specific implementation +// providing support for custom proto-based types. +// +// This method should be the inverse of ref.Val.ConvertToNative. +func (p *Registry) NativeToValue(value any) ref.Val { + if val, found := nativeToValue(p, value); found { + return val + } + switch v := value.(type) { + case proto.Message: + typeName := string(v.ProtoReflect().Descriptor().FullName()) + td, found := p.pbdb.DescribeType(typeName) + if !found { + return NewErr("unknown type: '%s'", typeName) + } + unwrapped, isUnwrapped, err := td.MaybeUnwrap(v) + if err != nil { + return UnsupportedRefValConversionErr(v) + } + if isUnwrapped { + return p.NativeToValue(unwrapped) + } + typeVal, found := p.FindIdent(typeName) + if !found { + return NewErr("unknown type: '%s'", typeName) + } + return NewObject(p, td, typeVal, v) + case *pb.Map: + return NewProtoMap(p, v) + case protoreflect.List: + return NewProtoList(p, v) + case protoreflect.Message: + return p.NativeToValue(v.Interface()) + case protoreflect.Value: + return p.NativeToValue(v.Interface()) + } + return UnsupportedRefValConversionErr(value) +} + +func (p *Registry) registerAllTypes(fd *pb.FileDescription) error { + for _, typeName := range fd.GetTypeNames() { + // skip well-known type names since they're automatically sanitized + // during NewObjectType() calls. + if _, found := checkedWellKnowns[typeName]; found { + continue + } + err := p.RegisterType(NewObjectTypeValue(typeName)) + if err != nil { + return err + } + } + return nil +} + +func fieldDescToCELType(field *pb.FieldDescription) *Type { + if field.IsMap() { + return NewMapType( + singularFieldDescToCELType(field.KeyType), + singularFieldDescToCELType(field.ValueType)) + } + if field.IsList() { + return NewListType(singularFieldDescToCELType(field)) + } + return singularFieldDescToCELType(field) +} + +func singularFieldDescToCELType(field *pb.FieldDescription) *Type { + if field.IsMessage() { + return NewObjectType(string(field.Descriptor().Message().FullName())) + } + if field.IsEnum() { + return IntType + } + return ProtoCELPrimitives[field.ProtoKind()] +} + +// defaultTypeAdapter converts go native types to CEL values. +type defaultTypeAdapter struct{} + +var ( + // DefaultTypeAdapter adapts canonical CEL types from their equivalent Go values. + DefaultTypeAdapter = &defaultTypeAdapter{} +) + +// NativeToValue implements the ref.TypeAdapter interface. +func (a *defaultTypeAdapter) NativeToValue(value any) ref.Val { + if val, found := nativeToValue(a, value); found { + return val + } + return UnsupportedRefValConversionErr(value) +} + +// nativeToValue returns the converted (ref.Val, true) of a conversion is found, +// otherwise (nil, false) +func nativeToValue(a Adapter, value any) (ref.Val, bool) { + switch v := value.(type) { + case nil: + return NullValue, true + case *Bool: + if v != nil { + return *v, true + } + case *Bytes: + if v != nil { + return *v, true + } + case *Double: + if v != nil { + return *v, true + } + case *Int: + if v != nil { + return *v, true + } + case *String: + if v != nil { + return *v, true + } + case *Uint: + if v != nil { + return *v, true + } + case bool: + return Bool(v), true + case int: + return Int(v), true + case int32: + return Int(v), true + case int64: + return Int(v), true + case uint: + return Uint(v), true + case uint32: + return Uint(v), true + case uint64: + return Uint(v), true + case float32: + return Double(v), true + case float64: + return Double(v), true + case string: + return String(v), true + case *dpb.Duration: + return Duration{Duration: v.AsDuration()}, true + case time.Duration: + return Duration{Duration: v}, true + case *tpb.Timestamp: + return Timestamp{Time: v.AsTime()}, true + case time.Time: + return Timestamp{Time: v}, true + case *bool: + if v != nil { + return Bool(*v), true + } + case *float32: + if v != nil { + return Double(*v), true + } + case *float64: + if v != nil { + return Double(*v), true + } + case *int: + if v != nil { + return Int(*v), true + } + case *int32: + if v != nil { + return Int(*v), true + } + case *int64: + if v != nil { + return Int(*v), true + } + case *string: + if v != nil { + return String(*v), true + } + case *uint: + if v != nil { + return Uint(*v), true + } + case *uint32: + if v != nil { + return Uint(*v), true + } + case *uint64: + if v != nil { + return Uint(*v), true + } + case []byte: + return Bytes(v), true + // specializations for common lists types. + case []string: + return NewStringList(a, v), true + case []ref.Val: + return NewRefValList(a, v), true + // specializations for common map types. + case map[string]string: + return NewStringStringMap(a, v), true + case map[string]any: + return NewStringInterfaceMap(a, v), true + case map[ref.Val]ref.Val: + return NewRefValMap(a, v), true + // additional specializations may be added upon request / need. + case *anypb.Any: + if v == nil { + return UnsupportedRefValConversionErr(v), true + } + unpackedAny, err := v.UnmarshalNew() + if err != nil { + return NewErr("anypb.UnmarshalNew() failed for type %q: %v", v.GetTypeUrl(), err), true + } + return a.NativeToValue(unpackedAny), true + case *structpb.NullValue, structpb.NullValue: + return NullValue, true + case *structpb.ListValue: + return NewJSONList(a, v), true + case *structpb.Struct: + return NewJSONStruct(a, v), true + case ref.Val: + return v, true + case protoreflect.EnumNumber: + return Int(v), true + case proto.Message: + if v == nil { + return UnsupportedRefValConversionErr(v), true + } + typeName := string(v.ProtoReflect().Descriptor().FullName()) + td, found := pb.DefaultDb.DescribeType(typeName) + if !found { + return nil, false + } + val, unwrapped, err := td.MaybeUnwrap(v) + if err != nil { + return UnsupportedRefValConversionErr(v), true + } + if !unwrapped { + return nil, false + } + return a.NativeToValue(val), true + // Note: dynamicpb.Message implements the proto.Message _and_ protoreflect.Message interfaces + // which means that this case must appear after handling a proto.Message type. + case protoreflect.Message: + return a.NativeToValue(v.Interface()), true + default: + refValue := reflect.ValueOf(v) + if refValue.Kind() == reflect.Ptr { + if refValue.IsNil() { + return UnsupportedRefValConversionErr(v), true + } + refValue = refValue.Elem() + } + refKind := refValue.Kind() + switch refKind { + case reflect.Array, reflect.Slice: + if refValue.Type().Elem() == reflect.TypeOf(byte(0)) { + if refValue.CanAddr() { + return Bytes(refValue.Bytes()), true + } + tmp := reflect.New(refValue.Type()) + tmp.Elem().Set(refValue) + return Bytes(tmp.Elem().Bytes()), true + } + return NewDynamicList(a, v), true + case reflect.Map: + return NewDynamicMap(a, v), true + // type aliases of primitive types cannot be asserted as that type, but rather need + // to be downcast to int32 before being converted to a CEL representation. + case reflect.Bool: + boolTupe := reflect.TypeOf(false) + return Bool(refValue.Convert(boolTupe).Interface().(bool)), true + case reflect.Int: + intType := reflect.TypeOf(int(0)) + return Int(refValue.Convert(intType).Interface().(int)), true + case reflect.Int8: + intType := reflect.TypeOf(int8(0)) + return Int(refValue.Convert(intType).Interface().(int8)), true + case reflect.Int16: + intType := reflect.TypeOf(int16(0)) + return Int(refValue.Convert(intType).Interface().(int16)), true + case reflect.Int32: + intType := reflect.TypeOf(int32(0)) + return Int(refValue.Convert(intType).Interface().(int32)), true + case reflect.Int64: + intType := reflect.TypeOf(int64(0)) + return Int(refValue.Convert(intType).Interface().(int64)), true + case reflect.Uint: + uintType := reflect.TypeOf(uint(0)) + return Uint(refValue.Convert(uintType).Interface().(uint)), true + case reflect.Uint8: + uintType := reflect.TypeOf(uint8(0)) + return Uint(refValue.Convert(uintType).Interface().(uint8)), true + case reflect.Uint16: + uintType := reflect.TypeOf(uint16(0)) + return Uint(refValue.Convert(uintType).Interface().(uint16)), true + case reflect.Uint32: + uintType := reflect.TypeOf(uint32(0)) + return Uint(refValue.Convert(uintType).Interface().(uint32)), true + case reflect.Uint64: + uintType := reflect.TypeOf(uint64(0)) + return Uint(refValue.Convert(uintType).Interface().(uint64)), true + case reflect.Float32: + doubleType := reflect.TypeOf(float32(0)) + return Double(refValue.Convert(doubleType).Interface().(float32)), true + case reflect.Float64: + doubleType := reflect.TypeOf(float64(0)) + return Double(refValue.Convert(doubleType).Interface().(float64)), true + case reflect.String: + stringType := reflect.TypeOf("") + return String(refValue.Convert(stringType).Interface().(string)), true + } + } + return nil, false +} + +func msgSetField(target protoreflect.Message, field *pb.FieldDescription, val ref.Val) error { + if field.IsList() { + lv := target.NewField(field.Descriptor()) + list, ok := val.(traits.Lister) + if !ok { + return unsupportedTypeConversionError(field, val) + } + err := msgSetListField(lv.List(), field, list) + if err != nil { + return err + } + target.Set(field.Descriptor(), lv) + return nil + } + if field.IsMap() { + mv := target.NewField(field.Descriptor()) + mp, ok := val.(traits.Mapper) + if !ok { + return unsupportedTypeConversionError(field, val) + } + err := msgSetMapField(mv.Map(), field, mp) + if err != nil { + return err + } + target.Set(field.Descriptor(), mv) + return nil + } + v, err := val.ConvertToNative(field.ReflectType()) + if err != nil { + return fieldTypeConversionError(field, err) + } + if v == nil { + return nil + } + switch pv := v.(type) { + case proto.Message: + v = pv.ProtoReflect() + } + target.Set(field.Descriptor(), protoreflect.ValueOf(v)) + return nil +} + +func msgSetListField(target protoreflect.List, listField *pb.FieldDescription, listVal traits.Lister) error { + elemReflectType := listField.ReflectType().Elem() + for i := Int(0); i < listVal.Size().(Int); i++ { + elem := listVal.Get(i) + elemVal, err := elem.ConvertToNative(elemReflectType) + if err != nil { + return fieldTypeConversionError(listField, err) + } + if elemVal == nil { + continue + } + switch ev := elemVal.(type) { + case proto.Message: + elemVal = ev.ProtoReflect() + } + target.Append(protoreflect.ValueOf(elemVal)) + } + return nil +} + +func msgSetMapField(target protoreflect.Map, mapField *pb.FieldDescription, mapVal traits.Mapper) error { + targetKeyType := mapField.KeyType.ReflectType() + targetValType := mapField.ValueType.ReflectType() + it := mapVal.Iterator() + for it.HasNext() == True { + key := it.Next() + val := mapVal.Get(key) + k, err := key.ConvertToNative(targetKeyType) + if err != nil { + return fieldTypeConversionError(mapField, err) + } + v, err := val.ConvertToNative(targetValType) + if err != nil { + return fieldTypeConversionError(mapField, err) + } + if v == nil { + continue + } + switch pv := v.(type) { + case proto.Message: + v = pv.ProtoReflect() + } + target.Set(protoreflect.ValueOf(k).MapKey(), protoreflect.ValueOf(v)) + } + return nil +} + +func unsupportedTypeConversionError(field *pb.FieldDescription, val ref.Val) error { + msgName := field.Descriptor().ContainingMessage().FullName() + return fmt.Errorf("unsupported field type for %v.%v: %v", msgName, field.Name(), val.Type()) +} + +func fieldTypeConversionError(field *pb.FieldDescription, err error) error { + msgName := field.Descriptor().ContainingMessage().FullName() + return fmt.Errorf("field type conversion error for %v.%v value type: %v", msgName, field.Name(), err) +} + +var ( + // ProtoCELPrimitives provides a map from the protoreflect Kind to the equivalent CEL type. + ProtoCELPrimitives = map[protoreflect.Kind]*Type{ + protoreflect.BoolKind: BoolType, + protoreflect.BytesKind: BytesType, + protoreflect.DoubleKind: DoubleType, + protoreflect.FloatKind: DoubleType, + protoreflect.Int32Kind: IntType, + protoreflect.Int64Kind: IntType, + protoreflect.Sint32Kind: IntType, + protoreflect.Sint64Kind: IntType, + protoreflect.Uint32Kind: UintType, + protoreflect.Uint64Kind: UintType, + protoreflect.Fixed32Kind: UintType, + protoreflect.Fixed64Kind: UintType, + protoreflect.Sfixed32Kind: IntType, + protoreflect.Sfixed64Kind: IntType, + protoreflect.StringKind: StringType, + } +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel new file mode 100644 index 0000000000..79330c3321 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "provider.go", + "reference.go", + ], + importpath = "github.com/google/cel-go/common/types/ref", + deps = [ + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", + ], +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/ref/provider.go b/hack/tools/vendor/github.com/google/cel-go/common/types/ref/provider.go new file mode 100644 index 0000000000..b9820023d6 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/ref/provider.go @@ -0,0 +1,102 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ref + +import ( + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// TypeProvider specifies functions for creating new object instances and for +// resolving enum values by name. +// +// Deprecated: use types.Provider +type TypeProvider interface { + // EnumValue returns the numeric value of the given enum value name. + EnumValue(enumName string) Val + + // FindIdent takes a qualified identifier name and returns a Value if one exists. + FindIdent(identName string) (Val, bool) + + // FindType looks up the Type given a qualified typeName. Returns false if not found. + FindType(typeName string) (*exprpb.Type, bool) + + // FieldFieldType returns the field type for a checked type value. Returns false if + // the field could not be found. + FindFieldType(messageType, fieldName string) (*FieldType, bool) + + // NewValue creates a new type value from a qualified name and map of field name + // to value. + // + // Note, for each value, the Val.ConvertToNative function will be invoked to convert + // the Val to the field's native type. If an error occurs during conversion, the + // NewValue will be a types.Err. + NewValue(typeName string, fields map[string]Val) Val +} + +// TypeAdapter converts native Go values of varying type and complexity to equivalent CEL values. +// +// Deprecated: use types.Adapter +type TypeAdapter interface { + // NativeToValue converts the input `value` to a CEL `ref.Val`. + NativeToValue(value any) Val +} + +// TypeRegistry allows third-parties to add custom types to CEL. Not all `TypeProvider` +// implementations support type-customization, so these features are optional. However, a +// `TypeRegistry` should be a `TypeProvider` and a `TypeAdapter` to ensure that types +// which are registered can be converted to CEL representations. +// +// Deprecated: use types.Registry +type TypeRegistry interface { + TypeAdapter + TypeProvider + + // RegisterDescriptor registers the contents of a protocol buffer `FileDescriptor`. + RegisterDescriptor(fileDesc protoreflect.FileDescriptor) error + + // RegisterMessage registers a protocol buffer message and its dependencies. + RegisterMessage(message proto.Message) error + + // RegisterType registers a type value with the provider which ensures the + // provider is aware of how to map the type to an identifier. + // + // If a type is provided more than once with an alternative definition, the + // call will result in an error. + RegisterType(types ...Type) error +} + +// FieldType represents a field's type value and whether that field supports +// presence detection. +// +// Deprecated: use types.FieldType +type FieldType struct { + // Type of the field as a protobuf type value. + Type *exprpb.Type + + // IsSet indicates whether the field is set on an input object. + IsSet FieldTester + + // GetFrom retrieves the field value on the input object, if set. + GetFrom FieldGetter +} + +// FieldTester is used to test field presence on an input object. +type FieldTester func(target any) bool + +// FieldGetter is used to get the field value from an input object, if set. +type FieldGetter func(target any) (any, error) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/ref/reference.go b/hack/tools/vendor/github.com/google/cel-go/common/types/ref/reference.go new file mode 100644 index 0000000000..e0d58145cd --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/ref/reference.go @@ -0,0 +1,63 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ref contains the reference interfaces used throughout the types components. +package ref + +import ( + "reflect" +) + +// Type interface indicate the name of a given type. +type Type interface { + // HasTrait returns whether the type has a given trait associated with it. + // + // See common/types/traits/traits.go for a list of supported traits. + HasTrait(trait int) bool + + // TypeName returns the qualified type name of the type. + // + // The type name is also used as the type's identifier name at type-check and interpretation time. + TypeName() string +} + +// Val interface defines the functions supported by all expression values. +// Val implementations may specialize the behavior of the value through the addition of traits. +type Val interface { + // ConvertToNative converts the Value to a native Go struct according to the + // reflected type description, or error if the conversion is not feasible. + // + // The ConvertToNative method is intended to be used to support conversion between CEL types + // and native types during object creation expressions or by clients who need to adapt the, + // returned CEL value into an equivalent Go value instance. + // + // When implementing or using ConvertToNative, the following guidelines apply: + // - Use ConvertToNative when marshalling CEL evaluation results to native types. + // - Do not use ConvertToNative within CEL extension functions. + // - Document whether your implementation supports non-CEL field types, such as Go or Protobuf. + ConvertToNative(typeDesc reflect.Type) (any, error) + + // ConvertToType supports type conversions between CEL value types supported by the expression language. + ConvertToType(typeValue Type) Val + + // Equal returns true if the `other` value has the same type and content as the implementing struct. + Equal(other Val) Val + + // Type returns the TypeValue of the value. + Type() Type + + // Value returns the raw value of the instance which may not be directly compatible with the expression + // language types. + Value() any +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/string.go b/hack/tools/vendor/github.com/google/cel-go/common/types/string.go new file mode 100644 index 0000000000..3a93743f24 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/string.go @@ -0,0 +1,226 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types/ref" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +// String type implementation which supports addition, comparison, matching, +// and size functions. +type String string + +var ( + stringOneArgOverloads = map[string]func(ref.Val, ref.Val) ref.Val{ + overloads.Contains: StringContains, + overloads.EndsWith: StringEndsWith, + overloads.StartsWith: StringStartsWith, + } + + stringWrapperType = reflect.TypeOf(&wrapperspb.StringValue{}) +) + +// Add implements traits.Adder.Add. +func (s String) Add(other ref.Val) ref.Val { + otherString, ok := other.(String) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + return s + otherString +} + +// Compare implements traits.Comparer.Compare. +func (s String) Compare(other ref.Val) ref.Val { + otherString, ok := other.(String) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + return Int(strings.Compare(s.Value().(string), otherString.Value().(string))) +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (s String) ConvertToNative(typeDesc reflect.Type) (any, error) { + switch typeDesc.Kind() { + case reflect.String: + return reflect.ValueOf(s).Convert(typeDesc).Interface(), nil + case reflect.Ptr: + switch typeDesc { + case anyValueType: + // Primitives must be wrapped before being set on an Any field. + return anypb.New(wrapperspb.String(string(s))) + case jsonValueType: + // Convert to a protobuf representation of a JSON String. + return structpb.NewStringValue(string(s)), nil + case stringWrapperType: + // Convert to a wrapperspb.StringValue. + return wrapperspb.String(string(s)), nil + } + if typeDesc.Elem().Kind() == reflect.String { + p := s.Value().(string) + return &p, nil + } + case reflect.Interface: + sv := s.Value() + if reflect.TypeOf(sv).Implements(typeDesc) { + return sv, nil + } + if reflect.TypeOf(s).Implements(typeDesc) { + return s, nil + } + } + return nil, fmt.Errorf( + "unsupported native conversion from string to '%v'", typeDesc) +} + +// ConvertToType implements ref.Val.ConvertToType. +func (s String) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case IntType: + if n, err := strconv.ParseInt(s.Value().(string), 10, 64); err == nil { + return Int(n) + } + case UintType: + if n, err := strconv.ParseUint(s.Value().(string), 10, 64); err == nil { + return Uint(n) + } + case DoubleType: + if n, err := strconv.ParseFloat(s.Value().(string), 64); err == nil { + return Double(n) + } + case BoolType: + if b, err := strconv.ParseBool(s.Value().(string)); err == nil { + return Bool(b) + } + case BytesType: + return Bytes(s) + case DurationType: + if d, err := time.ParseDuration(s.Value().(string)); err == nil { + return durationOf(d) + } + case TimestampType: + if t, err := time.Parse(time.RFC3339, s.Value().(string)); err == nil { + if t.Unix() < minUnixTime || t.Unix() > maxUnixTime { + return celErrTimestampOverflow + } + return timestampOf(t) + } + case StringType: + return s + case TypeType: + return StringType + } + return NewErr("type conversion error from '%s' to '%s'", StringType, typeVal) +} + +// Equal implements ref.Val.Equal. +func (s String) Equal(other ref.Val) ref.Val { + otherString, ok := other.(String) + return Bool(ok && s == otherString) +} + +// IsZeroValue returns true if the string is empty. +func (s String) IsZeroValue() bool { + return len(s) == 0 +} + +// Match implements traits.Matcher.Match. +func (s String) Match(pattern ref.Val) ref.Val { + pat, ok := pattern.(String) + if !ok { + return MaybeNoSuchOverloadErr(pattern) + } + matched, err := regexp.MatchString(pat.Value().(string), s.Value().(string)) + if err != nil { + return &Err{error: err} + } + return Bool(matched) +} + +// Receive implements traits.Receiver.Receive. +func (s String) Receive(function string, overload string, args []ref.Val) ref.Val { + switch len(args) { + case 1: + if f, found := stringOneArgOverloads[function]; found { + return f(s, args[0]) + } + } + return NoSuchOverloadErr() +} + +// Size implements traits.Sizer.Size. +func (s String) Size() ref.Val { + return Int(len([]rune(s.Value().(string)))) +} + +// Type implements ref.Val.Type. +func (s String) Type() ref.Type { + return StringType +} + +// Value implements ref.Val.Value. +func (s String) Value() any { + return string(s) +} + +// StringContains returns whether the string contains a substring. +func StringContains(s, sub ref.Val) ref.Val { + str, ok := s.(String) + if !ok { + return MaybeNoSuchOverloadErr(s) + } + subStr, ok := sub.(String) + if !ok { + return MaybeNoSuchOverloadErr(sub) + } + return Bool(strings.Contains(string(str), string(subStr))) +} + +// StringEndsWith returns whether the target string contains the input suffix. +func StringEndsWith(s, suf ref.Val) ref.Val { + str, ok := s.(String) + if !ok { + return MaybeNoSuchOverloadErr(s) + } + sufStr, ok := suf.(String) + if !ok { + return MaybeNoSuchOverloadErr(suf) + } + return Bool(strings.HasSuffix(string(str), string(sufStr))) +} + +// StringStartsWith returns whether the target string contains the input prefix. +func StringStartsWith(s, pre ref.Val) ref.Val { + str, ok := s.(String) + if !ok { + return MaybeNoSuchOverloadErr(s) + } + preStr, ok := pre.(String) + if !ok { + return MaybeNoSuchOverloadErr(pre) + } + return Bool(strings.HasPrefix(string(str), string(preStr))) +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/timestamp.go b/hack/tools/vendor/github.com/google/cel-go/common/types/timestamp.go new file mode 100644 index 0000000000..33acdea8ef --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/timestamp.go @@ -0,0 +1,311 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "time" + + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types/ref" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + tpb "google.golang.org/protobuf/types/known/timestamppb" +) + +// Timestamp type implementation which supports add, compare, and subtract +// operations. Timestamps are also capable of participating in dynamic +// function dispatch to instance methods. +type Timestamp struct { + time.Time +} + +func timestampOf(t time.Time) Timestamp { + // Note that this function does not validate that time.Time is in our supported range. + return Timestamp{Time: t} +} + +const ( + // The number of seconds between year 1 and year 1970. This is borrowed from + // https://golang.org/src/time/time.go. + unixToInternal int64 = (1969*365 + 1969/4 - 1969/100 + 1969/400) * (60 * 60 * 24) + + // Number of seconds between `0001-01-01T00:00:00Z` and the Unix epoch. + minUnixTime int64 = -62135596800 + // Number of seconds between `9999-12-31T23:59:59.999999999Z` and the Unix epoch. + maxUnixTime int64 = 253402300799 +) + +// Add implements traits.Adder.Add. +func (t Timestamp) Add(other ref.Val) ref.Val { + switch other.Type() { + case DurationType: + return other.(Duration).Add(t) + } + return MaybeNoSuchOverloadErr(other) +} + +// Compare implements traits.Comparer.Compare. +func (t Timestamp) Compare(other ref.Val) ref.Val { + if TimestampType != other.Type() { + return MaybeNoSuchOverloadErr(other) + } + ts1 := t.Time + ts2 := other.(Timestamp).Time + switch { + case ts1.Before(ts2): + return IntNegOne + case ts1.After(ts2): + return IntOne + default: + return IntZero + } +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (t Timestamp) ConvertToNative(typeDesc reflect.Type) (any, error) { + // If the timestamp is already assignable to the desired type return it. + if reflect.TypeOf(t.Time).AssignableTo(typeDesc) { + return t.Time, nil + } + if reflect.TypeOf(t).AssignableTo(typeDesc) { + return t, nil + } + switch typeDesc { + case anyValueType: + // Pack the underlying time as a tpb.Timestamp into an Any value. + return anypb.New(tpb.New(t.Time)) + case jsonValueType: + // CEL follows the proto3 to JSON conversion which formats as an RFC 3339 encoded JSON + // string. + v := t.ConvertToType(StringType) + if IsError(v) { + return nil, v.(*Err) + } + return structpb.NewStringValue(string(v.(String))), nil + case timestampValueType: + // Unwrap the underlying tpb.Timestamp. + return tpb.New(t.Time), nil + } + return nil, fmt.Errorf("type conversion error from 'Timestamp' to '%v'", typeDesc) +} + +// ConvertToType implements ref.Val.ConvertToType. +func (t Timestamp) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case StringType: + return String(t.Format(time.RFC3339Nano)) + case IntType: + // Return the Unix time in seconds since 1970 + return Int(t.Unix()) + case TimestampType: + return t + case TypeType: + return TimestampType + } + return NewErr("type conversion error from '%s' to '%s'", TimestampType, typeVal) +} + +// Equal implements ref.Val.Equal. +func (t Timestamp) Equal(other ref.Val) ref.Val { + otherTime, ok := other.(Timestamp) + return Bool(ok && t.Time.Equal(otherTime.Time)) +} + +// IsZeroValue returns true if the timestamp is epoch 0. +func (t Timestamp) IsZeroValue() bool { + return t.IsZero() +} + +// Receive implements traits.Receiver.Receive. +func (t Timestamp) Receive(function string, overload string, args []ref.Val) ref.Val { + switch len(args) { + case 0: + if f, found := timestampZeroArgOverloads[function]; found { + return f(t.Time) + } + case 1: + if f, found := timestampOneArgOverloads[function]; found { + return f(t.Time, args[0]) + } + } + return NoSuchOverloadErr() +} + +// Subtract implements traits.Subtractor.Subtract. +func (t Timestamp) Subtract(subtrahend ref.Val) ref.Val { + switch subtrahend.Type() { + case DurationType: + dur := subtrahend.(Duration) + val, err := subtractTimeDurationChecked(t.Time, dur.Duration) + if err != nil { + return WrapErr(err) + } + return timestampOf(val) + case TimestampType: + t2 := subtrahend.(Timestamp).Time + val, err := subtractTimeChecked(t.Time, t2) + if err != nil { + return WrapErr(err) + } + return durationOf(val) + } + return MaybeNoSuchOverloadErr(subtrahend) +} + +// Type implements ref.Val.Type. +func (t Timestamp) Type() ref.Type { + return TimestampType +} + +// Value implements ref.Val.Value. +func (t Timestamp) Value() any { + return t.Time +} + +var ( + timestampValueType = reflect.TypeOf(&tpb.Timestamp{}) + + timestampZeroArgOverloads = map[string]func(time.Time) ref.Val{ + overloads.TimeGetFullYear: timestampGetFullYear, + overloads.TimeGetMonth: timestampGetMonth, + overloads.TimeGetDayOfYear: timestampGetDayOfYear, + overloads.TimeGetDate: timestampGetDayOfMonthOneBased, + overloads.TimeGetDayOfMonth: timestampGetDayOfMonthZeroBased, + overloads.TimeGetDayOfWeek: timestampGetDayOfWeek, + overloads.TimeGetHours: timestampGetHours, + overloads.TimeGetMinutes: timestampGetMinutes, + overloads.TimeGetSeconds: timestampGetSeconds, + overloads.TimeGetMilliseconds: timestampGetMilliseconds} + + timestampOneArgOverloads = map[string]func(time.Time, ref.Val) ref.Val{ + overloads.TimeGetFullYear: timestampGetFullYearWithTz, + overloads.TimeGetMonth: timestampGetMonthWithTz, + overloads.TimeGetDayOfYear: timestampGetDayOfYearWithTz, + overloads.TimeGetDate: timestampGetDayOfMonthOneBasedWithTz, + overloads.TimeGetDayOfMonth: timestampGetDayOfMonthZeroBasedWithTz, + overloads.TimeGetDayOfWeek: timestampGetDayOfWeekWithTz, + overloads.TimeGetHours: timestampGetHoursWithTz, + overloads.TimeGetMinutes: timestampGetMinutesWithTz, + overloads.TimeGetSeconds: timestampGetSecondsWithTz, + overloads.TimeGetMilliseconds: timestampGetMillisecondsWithTz} +) + +type timestampVisitor func(time.Time) ref.Val + +func timestampGetFullYear(t time.Time) ref.Val { + return Int(t.Year()) +} +func timestampGetMonth(t time.Time) ref.Val { + // CEL spec indicates that the month should be 0-based, but the Time value + // for Month() is 1-based. + return Int(t.Month() - 1) +} +func timestampGetDayOfYear(t time.Time) ref.Val { + return Int(t.YearDay() - 1) +} +func timestampGetDayOfMonthZeroBased(t time.Time) ref.Val { + return Int(t.Day() - 1) +} +func timestampGetDayOfMonthOneBased(t time.Time) ref.Val { + return Int(t.Day()) +} +func timestampGetDayOfWeek(t time.Time) ref.Val { + return Int(t.Weekday()) +} +func timestampGetHours(t time.Time) ref.Val { + return Int(t.Hour()) +} +func timestampGetMinutes(t time.Time) ref.Val { + return Int(t.Minute()) +} +func timestampGetSeconds(t time.Time) ref.Val { + return Int(t.Second()) +} +func timestampGetMilliseconds(t time.Time) ref.Val { + return Int(t.Nanosecond() / 1000000) +} + +func timestampGetFullYearWithTz(t time.Time, tz ref.Val) ref.Val { + return timeZone(tz, timestampGetFullYear)(t) +} +func timestampGetMonthWithTz(t time.Time, tz ref.Val) ref.Val { + return timeZone(tz, timestampGetMonth)(t) +} +func timestampGetDayOfYearWithTz(t time.Time, tz ref.Val) ref.Val { + return timeZone(tz, timestampGetDayOfYear)(t) +} +func timestampGetDayOfMonthZeroBasedWithTz(t time.Time, tz ref.Val) ref.Val { + return timeZone(tz, timestampGetDayOfMonthZeroBased)(t) +} +func timestampGetDayOfMonthOneBasedWithTz(t time.Time, tz ref.Val) ref.Val { + return timeZone(tz, timestampGetDayOfMonthOneBased)(t) +} +func timestampGetDayOfWeekWithTz(t time.Time, tz ref.Val) ref.Val { + return timeZone(tz, timestampGetDayOfWeek)(t) +} +func timestampGetHoursWithTz(t time.Time, tz ref.Val) ref.Val { + return timeZone(tz, timestampGetHours)(t) +} +func timestampGetMinutesWithTz(t time.Time, tz ref.Val) ref.Val { + return timeZone(tz, timestampGetMinutes)(t) +} +func timestampGetSecondsWithTz(t time.Time, tz ref.Val) ref.Val { + return timeZone(tz, timestampGetSeconds)(t) +} +func timestampGetMillisecondsWithTz(t time.Time, tz ref.Val) ref.Val { + return timeZone(tz, timestampGetMilliseconds)(t) +} + +func timeZone(tz ref.Val, visitor timestampVisitor) timestampVisitor { + return func(t time.Time) ref.Val { + if StringType != tz.Type() { + return MaybeNoSuchOverloadErr(tz) + } + val := string(tz.(String)) + ind := strings.Index(val, ":") + if ind == -1 { + loc, err := time.LoadLocation(val) + if err != nil { + return WrapErr(err) + } + return visitor(t.In(loc)) + } + + // If the input is not the name of a timezone (for example, 'US/Central'), it should be a numerical offset from UTC + // in the format ^(+|-)(0[0-9]|1[0-4]):[0-5][0-9]$. The numerical input is parsed in terms of hours and minutes. + hr, err := strconv.Atoi(string(val[0:ind])) + if err != nil { + return WrapErr(err) + } + min, err := strconv.Atoi(string(val[ind+1:])) + if err != nil { + return WrapErr(err) + } + var offset int + if string(val[0]) == "-" { + offset = hr*60 - min + } else { + offset = hr*60 + min + } + secondsEastOfUTC := int((time.Duration(offset) * time.Minute).Seconds()) + timezone := time.FixedZone("", secondsEastOfUTC) + return visitor(t.In(timezone)) + } +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel new file mode 100644 index 0000000000..b19eb8301e --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "comparer.go", + "container.go", + "field_tester.go", + "indexer.go", + "iterator.go", + "lister.go", + "mapper.go", + "matcher.go", + "math.go", + "receiver.go", + "sizer.go", + "traits.go", + "zeroer.go", + ], + importpath = "github.com/google/cel-go/common/types/traits", + deps = [ + "//common/types/ref:go_default_library", + ], +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/comparer.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/comparer.go new file mode 100644 index 0000000000..b531d9ae2b --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/comparer.go @@ -0,0 +1,33 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import ( + "github.com/google/cel-go/common/types/ref" +) + +// Comparer interface for ordering comparisons between values in order to +// support '<', '<=', '>=', '>' overloads. +type Comparer interface { + // Compare this value to the input other value, returning an Int: + // + // this < other -> Int(-1) + // this == other -> Int(0) + // this > other -> Int(1) + // + // If the comparison cannot be made or is not supported, an error should + // be returned. + Compare(other ref.Val) ref.Val +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/container.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/container.go new file mode 100644 index 0000000000..cf5c621ae9 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/container.go @@ -0,0 +1,23 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import "github.com/google/cel-go/common/types/ref" + +// Container interface which permits containment tests such as 'a in b'. +type Container interface { + // Contains returns true if the value exists within the object. + Contains(value ref.Val) ref.Val +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/field_tester.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/field_tester.go new file mode 100644 index 0000000000..816a956523 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/field_tester.go @@ -0,0 +1,30 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import ( + "github.com/google/cel-go/common/types/ref" +) + +// FieldTester indicates if a defined field on an object type is set to a +// non-default value. +// +// For use with the `has()` macro. +type FieldTester interface { + // IsSet returns true if the field is defined and set to a non-default + // value. The method will return false if defined and not set, and an error + // if the field is not defined. + IsSet(field ref.Val) ref.Val +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/indexer.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/indexer.go new file mode 100644 index 0000000000..662c6836c3 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/indexer.go @@ -0,0 +1,25 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import ( + "github.com/google/cel-go/common/types/ref" +) + +// Indexer permits random access of elements by index 'a[b()]'. +type Indexer interface { + // Get the value at the specified index or error. + Get(index ref.Val) ref.Val +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/iterator.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/iterator.go new file mode 100644 index 0000000000..91c10f08fc --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/iterator.go @@ -0,0 +1,49 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import ( + "github.com/google/cel-go/common/types/ref" +) + +// Iterable aggregate types permit traversal over their elements. +type Iterable interface { + // Iterator returns a new iterator view of the struct. + Iterator() Iterator +} + +// Iterator permits safe traversal over the contents of an aggregate type. +type Iterator interface { + ref.Val + + // HasNext returns true if there are unvisited elements in the Iterator. + HasNext() ref.Val + + // Next returns the next element. + Next() ref.Val +} + +// Foldable aggregate types support iteration over (key, value) or (index, value) pairs. +type Foldable interface { + // Fold invokes the Folder.FoldEntry for all entries in the type + Fold(Folder) +} + +// Folder performs a fold on a given entry and indicates whether to continue folding. +type Folder interface { + // FoldEntry indicates the key, value pair associated with the entry. + // If the output is true, continue folding. Otherwise, terminate the fold. + FoldEntry(key, val any) bool +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/lister.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/lister.go new file mode 100644 index 0000000000..e54781a602 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/lister.go @@ -0,0 +1,36 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import "github.com/google/cel-go/common/types/ref" + +// Lister interface which aggregates the traits of a list. +type Lister interface { + ref.Val + Adder + Container + Indexer + Iterable + Sizer +} + +// MutableLister interface which emits an immutable result after an intermediate computation. +// +// Note, this interface is intended only to be used within Comprehensions where the mutable +// value is not directly observable within the user-authored CEL expression. +type MutableLister interface { + Lister + ToImmutableList() Lister +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/mapper.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/mapper.go new file mode 100644 index 0000000000..d13333f3f6 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/mapper.go @@ -0,0 +1,48 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import "github.com/google/cel-go/common/types/ref" + +// Mapper interface which aggregates the traits of a maps. +type Mapper interface { + ref.Val + Container + Indexer + Iterable + Sizer + + // Find returns a value, if one exists, for the input key. + // + // If the key is not found the function returns (nil, false). + // If the input key is not valid for the map, or is Err or Unknown the function returns + // (Unknown|Err, false). + Find(key ref.Val) (ref.Val, bool) +} + +// MutableMapper interface which emits an immutable result after an intermediate computation. +// +// Note, this interface is intended only to be used within Comprehensions where the mutable +// value is not directly observable within the user-authored CEL expression. +type MutableMapper interface { + Mapper + + // Insert a key, value pair into the map, returning the map if the insert is successful + // and an error if key already exists in the mutable map. + Insert(k, v ref.Val) ref.Val + + // ToImmutableMap converts a mutable map into an immutable map. + ToImmutableMap() Mapper +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/matcher.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/matcher.go new file mode 100644 index 0000000000..085dc94ff4 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/matcher.go @@ -0,0 +1,23 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import "github.com/google/cel-go/common/types/ref" + +// Matcher interface for supporting 'matches()' overloads. +type Matcher interface { + // Match returns true if the pattern matches the current value. + Match(pattern ref.Val) ref.Val +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/math.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/math.go new file mode 100644 index 0000000000..86d5b9137e --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/math.go @@ -0,0 +1,62 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import "github.com/google/cel-go/common/types/ref" + +// Adder interface to support '+' operator overloads. +type Adder interface { + // Add returns a combination of the current value and other value. + // + // If the other value is an unsupported type, an error is returned. + Add(other ref.Val) ref.Val +} + +// Divider interface to support '/' operator overloads. +type Divider interface { + // Divide returns the result of dividing the current value by the input + // denominator. + // + // A denominator value of zero results in an error. + Divide(denominator ref.Val) ref.Val +} + +// Modder interface to support '%' operator overloads. +type Modder interface { + // Modulo returns the result of taking the modulus of the current value + // by the denominator. + // + // A denominator value of zero results in an error. + Modulo(denominator ref.Val) ref.Val +} + +// Multiplier interface to support '*' operator overloads. +type Multiplier interface { + // Multiply returns the result of multiplying the current and input value. + Multiply(other ref.Val) ref.Val +} + +// Negater interface to support unary '-' and '!' operator overloads. +type Negater interface { + // Negate returns the complement of the current value. + Negate() ref.Val +} + +// Subtractor interface to support binary '-' operator overloads. +type Subtractor interface { + // Subtract returns the result of subtracting the input from the current + // value. + Subtract(subtrahend ref.Val) ref.Val +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/receiver.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/receiver.go new file mode 100644 index 0000000000..8f41db45e8 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/receiver.go @@ -0,0 +1,24 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import "github.com/google/cel-go/common/types/ref" + +// Receiver interface for routing instance method calls within a value. +type Receiver interface { + // Receive accepts a function name, overload id, and arguments and returns + // a value. + Receive(function string, overload string, args []ref.Val) ref.Val +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/sizer.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/sizer.go new file mode 100644 index 0000000000..b80d25137a --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/sizer.go @@ -0,0 +1,25 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import ( + "github.com/google/cel-go/common/types/ref" +) + +// Sizer interface for supporting 'size()' overloads. +type Sizer interface { + // Size returns the number of elements or length of the value. + Size() ref.Val +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/traits.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/traits.go new file mode 100644 index 0000000000..51a09df564 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/traits.go @@ -0,0 +1,79 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package traits defines interfaces that a type may implement to participate +// in operator overloads and function dispatch. +package traits + +const ( + // AdderType types provide a '+' operator overload. + AdderType = 1 << iota + + // ComparerType types support ordering comparisons '<', '<=', '>', '>='. + ComparerType + + // ContainerType types support 'in' operations. + ContainerType + + // DividerType types support '/' operations. + DividerType + + // FieldTesterType types support the detection of field value presence. + FieldTesterType + + // IndexerType types support index access with dynamic values. + IndexerType + + // IterableType types can be iterated over in comprehensions. + IterableType + + // IteratorType types support iterator semantics. + IteratorType + + // MatcherType types support pattern matching via 'matches' method. + MatcherType + + // ModderType types support modulus operations '%' + ModderType + + // MultiplierType types support '*' operations. + MultiplierType + + // NegatorType types support either negation via '!' or '-' + NegatorType + + // ReceiverType types support dynamic dispatch to instance methods. + ReceiverType + + // SizerType types support the size() method. + SizerType + + // SubtractorType types support '-' operations. + SubtractorType + + // FoldableType types support comprehensions v2 macros which iterate over (key, value) pairs. + FoldableType +) + +const ( + // ListerType supports a set of traits necessary for list operations. + // + // The ListerType is syntactic sugar and not intended to be a perfect reflection of all List operators. + ListerType = AdderType | ContainerType | IndexerType | IterableType | SizerType + + // MapperType supports a set of traits necessary for map operations. + // + // The MapperType is syntactic sugar and not intended to be a perfect reflection of all Map operators. + MapperType = ContainerType | IndexerType | IterableType | SizerType +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/traits/zeroer.go b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/zeroer.go new file mode 100644 index 0000000000..0b7c830a24 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/traits/zeroer.go @@ -0,0 +1,21 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +// Zeroer interface for testing whether a CEL value is a zero value for its type. +type Zeroer interface { + // IsZeroValue indicates whether the object is the zero value for the type. + IsZeroValue() bool +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/types.go b/hack/tools/vendor/github.com/google/cel-go/common/types/types.go new file mode 100644 index 0000000000..1c5b6c40c3 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/types.go @@ -0,0 +1,864 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + "strings" + + "google.golang.org/protobuf/proto" + + chkdecls "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + + celpb "cel.dev/expr" + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// Kind indicates a CEL type's kind which is used to differentiate quickly between simple +// and complex types. +type Kind uint + +const ( + // UnspecifiedKind is returned when the type is nil or its kind is not specified. + UnspecifiedKind Kind = iota + + // DynKind represents a dynamic type. This kind only exists at type-check time. + DynKind + + // AnyKind represents a google.protobuf.Any type. This kind only exists at type-check time. + // Prefer DynKind to AnyKind as AnyKind has a specific meaning which is based on protobuf + // well-known types. + AnyKind + + // BoolKind represents a boolean type. + BoolKind + + // BytesKind represents a bytes type. + BytesKind + + // DoubleKind represents a double type. + DoubleKind + + // DurationKind represents a CEL duration type. + DurationKind + + // ErrorKind represents a CEL error type. + ErrorKind + + // IntKind represents an integer type. + IntKind + + // ListKind represents a list type. + ListKind + + // MapKind represents a map type. + MapKind + + // NullTypeKind represents a null type. + NullTypeKind + + // OpaqueKind represents an abstract type which has no accessible fields. + OpaqueKind + + // StringKind represents a string type. + StringKind + + // StructKind represents a structured object with typed fields. + StructKind + + // TimestampKind represents a a CEL time type. + TimestampKind + + // TypeKind represents the CEL type. + TypeKind + + // TypeParamKind represents a parameterized type whose type name will be resolved at type-check time, if possible. + TypeParamKind + + // UintKind represents a uint type. + UintKind + + // UnknownKind represents an unknown value type. + UnknownKind +) + +var ( + // AnyType represents the google.protobuf.Any type. + AnyType = &Type{ + kind: AnyKind, + runtimeTypeName: "google.protobuf.Any", + traitMask: traits.FieldTesterType | + traits.IndexerType, + } + // BoolType represents the bool type. + BoolType = &Type{ + kind: BoolKind, + runtimeTypeName: "bool", + traitMask: traits.ComparerType | + traits.NegatorType, + } + // BytesType represents the bytes type. + BytesType = &Type{ + kind: BytesKind, + runtimeTypeName: "bytes", + traitMask: traits.AdderType | + traits.ComparerType | + traits.SizerType, + } + // DoubleType represents the double type. + DoubleType = &Type{ + kind: DoubleKind, + runtimeTypeName: "double", + traitMask: traits.AdderType | + traits.ComparerType | + traits.DividerType | + traits.MultiplierType | + traits.NegatorType | + traits.SubtractorType, + } + // DurationType represents the CEL duration type. + DurationType = &Type{ + kind: DurationKind, + runtimeTypeName: "google.protobuf.Duration", + traitMask: traits.AdderType | + traits.ComparerType | + traits.NegatorType | + traits.ReceiverType | + traits.SubtractorType, + } + // DynType represents a dynamic CEL type whose type will be determined at runtime from context. + DynType = &Type{ + kind: DynKind, + runtimeTypeName: "dyn", + } + // ErrorType represents a CEL error value. + ErrorType = &Type{ + kind: ErrorKind, + runtimeTypeName: "error", + } + // IntType represents the int type. + IntType = &Type{ + kind: IntKind, + runtimeTypeName: "int", + traitMask: traits.AdderType | + traits.ComparerType | + traits.DividerType | + traits.ModderType | + traits.MultiplierType | + traits.NegatorType | + traits.SubtractorType, + } + // ListType represents the runtime list type. + ListType = NewListType(nil) + // MapType represents the runtime map type. + MapType = NewMapType(nil, nil) + // NullType represents the type of a null value. + NullType = &Type{ + kind: NullTypeKind, + runtimeTypeName: "null_type", + } + // StringType represents the string type. + StringType = &Type{ + kind: StringKind, + runtimeTypeName: "string", + traitMask: traits.AdderType | + traits.ComparerType | + traits.MatcherType | + traits.ReceiverType | + traits.SizerType, + } + // TimestampType represents the time type. + TimestampType = &Type{ + kind: TimestampKind, + runtimeTypeName: "google.protobuf.Timestamp", + traitMask: traits.AdderType | + traits.ComparerType | + traits.ReceiverType | + traits.SubtractorType, + } + // TypeType represents a CEL type + TypeType = &Type{ + kind: TypeKind, + runtimeTypeName: "type", + } + // UintType represents a uint type. + UintType = &Type{ + kind: UintKind, + runtimeTypeName: "uint", + traitMask: traits.AdderType | + traits.ComparerType | + traits.DividerType | + traits.ModderType | + traits.MultiplierType | + traits.SubtractorType, + } + // UnknownType represents an unknown value type. + UnknownType = &Type{ + kind: UnknownKind, + runtimeTypeName: "unknown", + } +) + +var _ ref.Type = &Type{} +var _ ref.Val = &Type{} + +// Type holds a reference to a runtime type with an optional type-checked set of type parameters. +type Type struct { + // kind indicates general category of the type. + kind Kind + + // parameters holds the optional type-checked set of type Parameters that are used during static analysis. + parameters []*Type + + // runtimeTypeName indicates the runtime type name of the type. + runtimeTypeName string + + // isAssignableType function determines whether one type is assignable to this type. + // A nil value for the isAssignableType function falls back to equality of kind, runtimeType, and parameters. + isAssignableType func(other *Type) bool + + // isAssignableRuntimeType function determines whether the runtime type (with erasure) is assignable to this type. + // A nil value for the isAssignableRuntimeType function falls back to the equality of the type or type name. + isAssignableRuntimeType func(other ref.Val) bool + + // traitMask is a mask of flags which indicate the capabilities of the type. + traitMask int +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (t *Type) ConvertToNative(typeDesc reflect.Type) (any, error) { + return nil, fmt.Errorf("type conversion not supported for 'type'") +} + +// ConvertToType implements ref.Val.ConvertToType. +func (t *Type) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case TypeType: + return TypeType + case StringType: + return String(t.TypeName()) + } + return NewErr("type conversion error from '%s' to '%s'", TypeType, typeVal) +} + +// Equal indicates whether two types have the same runtime type name. +// +// The name Equal is a bit of a misnomer, but for historical reasons, this is the +// runtime behavior. For a more accurate definition see IsType(). +func (t *Type) Equal(other ref.Val) ref.Val { + otherType, ok := other.(ref.Type) + return Bool(ok && t.TypeName() == otherType.TypeName()) +} + +// HasTrait implements the ref.Type interface method. +func (t *Type) HasTrait(trait int) bool { + return trait&t.traitMask == trait +} + +// IsExactType indicates whether the two types are exactly the same. This check also verifies type parameter type names. +func (t *Type) IsExactType(other *Type) bool { + return t.isTypeInternal(other, true) +} + +// IsEquivalentType indicates whether two types are equivalent. This check ignores type parameter type names. +func (t *Type) IsEquivalentType(other *Type) bool { + return t.isTypeInternal(other, false) +} + +// Kind indicates general category of the type. +func (t *Type) Kind() Kind { + if t == nil { + return UnspecifiedKind + } + return t.kind +} + +// isTypeInternal checks whether the two types are equivalent or exactly the same based on the checkTypeParamName flag. +func (t *Type) isTypeInternal(other *Type, checkTypeParamName bool) bool { + if t == nil { + return false + } + if t == other { + return true + } + if t.Kind() != other.Kind() || len(t.Parameters()) != len(other.Parameters()) { + return false + } + if (checkTypeParamName || t.Kind() != TypeParamKind) && t.TypeName() != other.TypeName() { + return false + } + for i, p := range t.Parameters() { + if !p.isTypeInternal(other.Parameters()[i], checkTypeParamName) { + return false + } + } + return true +} + +// IsAssignableType determines whether the current type is type-check assignable from the input fromType. +func (t *Type) IsAssignableType(fromType *Type) bool { + if t == nil { + return false + } + if t.isAssignableType != nil { + return t.isAssignableType(fromType) + } + return t.defaultIsAssignableType(fromType) +} + +// IsAssignableRuntimeType determines whether the current type is runtime assignable from the input runtimeType. +// +// At runtime, parameterized types are erased and so a function which type-checks to support a map(string, string) +// will have a runtime assignable type of a map. +func (t *Type) IsAssignableRuntimeType(val ref.Val) bool { + if t == nil { + return false + } + if t.isAssignableRuntimeType != nil { + return t.isAssignableRuntimeType(val) + } + return t.defaultIsAssignableRuntimeType(val) +} + +// Parameters returns the list of type parameters if set. +// +// For ListKind, Parameters()[0] represents the list element type +// For MapKind, Parameters()[0] represents the map key type, and Parameters()[1] represents the map +// value type. +func (t *Type) Parameters() []*Type { + if t == nil { + return emptyParams + } + return t.parameters +} + +// DeclaredTypeName indicates the fully qualified and parameterized type-check type name. +func (t *Type) DeclaredTypeName() string { + // if the type itself is neither null, nor dyn, but is assignable to null, then it's a wrapper type. + if t.Kind() != NullTypeKind && !t.isDyn() && t.IsAssignableType(NullType) { + return fmt.Sprintf("wrapper(%s)", t.TypeName()) + } + return t.TypeName() +} + +// Type implements the ref.Val interface method. +func (t *Type) Type() ref.Type { + return TypeType +} + +// Value implements the ref.Val interface method. +func (t *Type) Value() any { + return t.TypeName() +} + +// TypeName returns the type-erased fully qualified runtime type name. +// +// TypeName implements the ref.Type interface method. +func (t *Type) TypeName() string { + if t == nil { + return "" + } + return t.runtimeTypeName +} + +// WithTraits creates a copy of the current Type and sets the trait mask to the traits parameter. +// +// This method should be used with Opaque types where the type acts like a container, e.g. vector. +func (t *Type) WithTraits(traits int) *Type { + if t == nil { + return nil + } + return &Type{ + kind: t.kind, + parameters: t.parameters, + runtimeTypeName: t.runtimeTypeName, + isAssignableType: t.isAssignableType, + isAssignableRuntimeType: t.isAssignableRuntimeType, + traitMask: traits, + } +} + +// String returns a human-readable definition of the type name. +func (t *Type) String() string { + if len(t.Parameters()) == 0 { + return t.DeclaredTypeName() + } + params := make([]string, len(t.Parameters())) + for i, p := range t.Parameters() { + params[i] = p.String() + } + return fmt.Sprintf("%s(%s)", t.DeclaredTypeName(), strings.Join(params, ", ")) +} + +// isDyn indicates whether the type is dynamic in any way. +func (t *Type) isDyn() bool { + k := t.Kind() + return k == DynKind || k == AnyKind || k == TypeParamKind +} + +// defaultIsAssignableType provides the standard definition of what it means for one type to be assignable to another +// where any of the following may return a true result: +// - The from types are the same instance +// - The target type is dynamic +// - The fromType has the same kind and type name as the target type, and all parameters of the target type +// +// are IsAssignableType() from the parameters of the fromType. +func (t *Type) defaultIsAssignableType(fromType *Type) bool { + if t == fromType || t.isDyn() { + return true + } + if t.Kind() != fromType.Kind() || + t.TypeName() != fromType.TypeName() || + len(t.Parameters()) != len(fromType.Parameters()) { + return false + } + for i, tp := range t.Parameters() { + fp := fromType.Parameters()[i] + if !tp.IsAssignableType(fp) { + return false + } + } + return true +} + +// defaultIsAssignableRuntimeType inspects the type and in the case of list and map elements, the key and element types +// to determine whether a ref.Val is assignable to the declared type for a function signature. +func (t *Type) defaultIsAssignableRuntimeType(val ref.Val) bool { + valType := val.Type() + // If the current type and value type don't agree, then return + if !(t.isDyn() || t.TypeName() == valType.TypeName()) { + return false + } + switch t.Kind() { + case ListKind: + elemType := t.Parameters()[0] + l := val.(traits.Lister) + if l.Size() == IntZero { + return true + } + it := l.Iterator() + elemVal := it.Next() + return elemType.IsAssignableRuntimeType(elemVal) + case MapKind: + keyType := t.Parameters()[0] + elemType := t.Parameters()[1] + m := val.(traits.Mapper) + if m.Size() == IntZero { + return true + } + it := m.Iterator() + keyVal := it.Next() + elemVal := m.Get(keyVal) + return keyType.IsAssignableRuntimeType(keyVal) && elemType.IsAssignableRuntimeType(elemVal) + } + return true +} + +// NewListType creates an instances of a list type value with the provided element type. +func NewListType(elemType *Type) *Type { + return &Type{ + kind: ListKind, + parameters: []*Type{elemType}, + runtimeTypeName: "list", + traitMask: traits.AdderType | + traits.ContainerType | + traits.IndexerType | + traits.IterableType | + traits.SizerType, + } +} + +// NewMapType creates an instance of a map type value with the provided key and value types. +func NewMapType(keyType, valueType *Type) *Type { + return &Type{ + kind: MapKind, + parameters: []*Type{keyType, valueType}, + runtimeTypeName: "map", + traitMask: traits.ContainerType | + traits.IndexerType | + traits.IterableType | + traits.SizerType, + } +} + +// NewNullableType creates an instance of a nullable type with the provided wrapped type. +// +// Note: only primitive types are supported as wrapped types. +func NewNullableType(wrapped *Type) *Type { + return &Type{ + kind: wrapped.Kind(), + parameters: wrapped.Parameters(), + runtimeTypeName: wrapped.TypeName(), + traitMask: wrapped.traitMask, + isAssignableType: func(other *Type) bool { + return NullType.IsAssignableType(other) || wrapped.IsAssignableType(other) + }, + isAssignableRuntimeType: func(other ref.Val) bool { + return NullType.IsAssignableRuntimeType(other) || wrapped.IsAssignableRuntimeType(other) + }, + } +} + +// NewOptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional. +func NewOptionalType(param *Type) *Type { + return NewOpaqueType("optional_type", param) +} + +// NewOpaqueType creates an abstract parameterized type with a given name. +func NewOpaqueType(name string, params ...*Type) *Type { + return &Type{ + kind: OpaqueKind, + parameters: params, + runtimeTypeName: name, + } +} + +// NewObjectType creates a type reference to an externally defined type, e.g. a protobuf message type. +// +// An object type is assumed to support field presence testing and field indexing. Additionally, the +// type may also indicate additional traits through the use of the optional traits vararg argument. +func NewObjectType(typeName string, traits ...int) *Type { + // Function sanitizes object types on the fly + if wkt, found := checkedWellKnowns[typeName]; found { + return wkt + } + traitMask := 0 + for _, trait := range traits { + traitMask |= trait + } + return &Type{ + kind: StructKind, + parameters: emptyParams, + runtimeTypeName: typeName, + traitMask: structTypeTraitMask | traitMask, + } +} + +// NewObjectTypeValue creates a type reference to an externally defined type. +// +// Deprecated: use cel.ObjectType(typeName) +func NewObjectTypeValue(typeName string) *Type { + return NewObjectType(typeName) +} + +// NewTypeValue creates an opaque type which has a set of optional type traits as defined in +// the common/types/traits package. +// +// Deprecated: use cel.ObjectType(typeName, traits) +func NewTypeValue(typeName string, traits ...int) *Type { + traitMask := 0 + for _, trait := range traits { + traitMask |= trait + } + return &Type{ + kind: StructKind, + parameters: emptyParams, + runtimeTypeName: typeName, + traitMask: traitMask, + } +} + +// NewTypeParamType creates a parameterized type instance. +func NewTypeParamType(paramName string) *Type { + return &Type{ + kind: TypeParamKind, + runtimeTypeName: paramName, + } +} + +// NewTypeTypeWithParam creates a type with a type parameter. +// Used for type-checking purposes, but equivalent to TypeType otherwise. +func NewTypeTypeWithParam(param *Type) *Type { + return &Type{ + kind: TypeKind, + runtimeTypeName: "type", + parameters: []*Type{param}, + } +} + +// TypeToExprType converts a CEL-native type representation to a protobuf CEL Type representation. +func TypeToExprType(t *Type) (*exprpb.Type, error) { + switch t.Kind() { + case AnyKind: + return chkdecls.Any, nil + case BoolKind: + return maybeWrapper(t, chkdecls.Bool), nil + case BytesKind: + return maybeWrapper(t, chkdecls.Bytes), nil + case DoubleKind: + return maybeWrapper(t, chkdecls.Double), nil + case DurationKind: + return chkdecls.Duration, nil + case DynKind: + return chkdecls.Dyn, nil + case ErrorKind: + return chkdecls.Error, nil + case IntKind: + return maybeWrapper(t, chkdecls.Int), nil + case ListKind: + if len(t.Parameters()) != 1 { + return nil, fmt.Errorf("invalid list, got %d parameters, wanted one", len(t.Parameters())) + } + et, err := TypeToExprType(t.Parameters()[0]) + if err != nil { + return nil, err + } + return chkdecls.NewListType(et), nil + case MapKind: + if len(t.Parameters()) != 2 { + return nil, fmt.Errorf("invalid map, got %d parameters, wanted two", len(t.Parameters())) + } + kt, err := TypeToExprType(t.Parameters()[0]) + if err != nil { + return nil, err + } + vt, err := TypeToExprType(t.Parameters()[1]) + if err != nil { + return nil, err + } + return chkdecls.NewMapType(kt, vt), nil + case NullTypeKind: + return chkdecls.Null, nil + case OpaqueKind: + params := make([]*exprpb.Type, len(t.Parameters())) + for i, p := range t.Parameters() { + pt, err := TypeToExprType(p) + if err != nil { + return nil, err + } + params[i] = pt + } + return chkdecls.NewAbstractType(t.TypeName(), params...), nil + case StringKind: + return maybeWrapper(t, chkdecls.String), nil + case StructKind: + return chkdecls.NewObjectType(t.TypeName()), nil + case TimestampKind: + return chkdecls.Timestamp, nil + case TypeParamKind: + return chkdecls.NewTypeParamType(t.TypeName()), nil + case TypeKind: + if len(t.Parameters()) == 1 { + p, err := TypeToExprType(t.Parameters()[0]) + if err != nil { + return nil, err + } + return chkdecls.NewTypeType(p), nil + } + return chkdecls.NewTypeType(nil), nil + case UintKind: + return maybeWrapper(t, chkdecls.Uint), nil + } + return nil, fmt.Errorf("missing type conversion to proto: %v", t) +} + +// ExprTypeToType converts a protobuf CEL type representation to a CEL-native type representation. +func ExprTypeToType(t *exprpb.Type) (*Type, error) { + return AlphaProtoAsType(t) +} + +// AlphaProtoAsType converts a CEL v1alpha1.Type protobuf type to a CEL-native type representation. +func AlphaProtoAsType(t *exprpb.Type) (*Type, error) { + canonical := &celpb.Type{} + if err := convertProto(t, canonical); err != nil { + return nil, err + } + return ProtoAsType(canonical) +} + +// ProtoAsType converts a canonical CEL celpb.Type protobuf type to a CEL-native type representation. +func ProtoAsType(t *celpb.Type) (*Type, error) { + switch t.GetTypeKind().(type) { + case *celpb.Type_Dyn: + return DynType, nil + case *celpb.Type_AbstractType_: + paramTypes := make([]*Type, len(t.GetAbstractType().GetParameterTypes())) + for i, p := range t.GetAbstractType().GetParameterTypes() { + pt, err := ProtoAsType(p) + if err != nil { + return nil, err + } + paramTypes[i] = pt + } + return NewOpaqueType(t.GetAbstractType().GetName(), paramTypes...), nil + case *celpb.Type_ListType_: + et, err := ProtoAsType(t.GetListType().GetElemType()) + if err != nil { + return nil, err + } + return NewListType(et), nil + case *celpb.Type_MapType_: + kt, err := ProtoAsType(t.GetMapType().GetKeyType()) + if err != nil { + return nil, err + } + vt, err := ProtoAsType(t.GetMapType().GetValueType()) + if err != nil { + return nil, err + } + return NewMapType(kt, vt), nil + case *celpb.Type_MessageType: + return NewObjectType(t.GetMessageType()), nil + case *celpb.Type_Null: + return NullType, nil + case *celpb.Type_Primitive: + switch t.GetPrimitive() { + case celpb.Type_BOOL: + return BoolType, nil + case celpb.Type_BYTES: + return BytesType, nil + case celpb.Type_DOUBLE: + return DoubleType, nil + case celpb.Type_INT64: + return IntType, nil + case celpb.Type_STRING: + return StringType, nil + case celpb.Type_UINT64: + return UintType, nil + default: + return nil, fmt.Errorf("unsupported primitive type: %v", t) + } + case *celpb.Type_TypeParam: + return NewTypeParamType(t.GetTypeParam()), nil + case *celpb.Type_Type: + if t.GetType().GetTypeKind() != nil { + p, err := ProtoAsType(t.GetType()) + if err != nil { + return nil, err + } + return NewTypeTypeWithParam(p), nil + } + return TypeType, nil + case *celpb.Type_WellKnown: + switch t.GetWellKnown() { + case celpb.Type_ANY: + return AnyType, nil + case celpb.Type_DURATION: + return DurationType, nil + case celpb.Type_TIMESTAMP: + return TimestampType, nil + default: + return nil, fmt.Errorf("unsupported well-known type: %v", t) + } + case *celpb.Type_Wrapper: + t, err := ProtoAsType(&celpb.Type{TypeKind: &celpb.Type_Primitive{Primitive: t.GetWrapper()}}) + if err != nil { + return nil, err + } + return NewNullableType(t), nil + case *celpb.Type_Error: + return ErrorType, nil + default: + return nil, fmt.Errorf("unsupported type: %v", t) + } +} + +func maybeWrapper(t *Type, pbType *exprpb.Type) *exprpb.Type { + if t.IsAssignableType(NullType) { + return chkdecls.NewWrapperType(pbType) + } + return pbType +} + +func maybeForeignType(t ref.Type) *Type { + if celType, ok := t.(*Type); ok { + return celType + } + // Inspect the incoming type to determine its traits. The assumption will be that the incoming + // type does not have any field values; however, if the trait mask indicates that field testing + // and indexing are supported, the foreign type is marked as a struct. + traitMask := 0 + for _, trait := range allTraits { + if t.HasTrait(trait) { + traitMask |= trait + } + } + // Treat the value like a struct. If it has no fields, this is harmless to denote the type + // as such since it basically becomes an opaque type by convention. + return NewObjectType(t.TypeName(), traitMask) +} + +func convertProto(src, dst proto.Message) error { + pb, err := proto.Marshal(src) + if err != nil { + return err + } + err = proto.Unmarshal(pb, dst) + return err +} + +func primitiveType(primitive celpb.Type_PrimitiveType) *celpb.Type { + return &celpb.Type{ + TypeKind: &celpb.Type_Primitive{ + Primitive: primitive, + }, + } +} + +var ( + checkedWellKnowns = map[string]*Type{ + // Wrapper types. + "google.protobuf.BoolValue": NewNullableType(BoolType), + "google.protobuf.BytesValue": NewNullableType(BytesType), + "google.protobuf.DoubleValue": NewNullableType(DoubleType), + "google.protobuf.FloatValue": NewNullableType(DoubleType), + "google.protobuf.Int64Value": NewNullableType(IntType), + "google.protobuf.Int32Value": NewNullableType(IntType), + "google.protobuf.UInt64Value": NewNullableType(UintType), + "google.protobuf.UInt32Value": NewNullableType(UintType), + "google.protobuf.StringValue": NewNullableType(StringType), + // Well-known types. + "google.protobuf.Any": AnyType, + "google.protobuf.Duration": DurationType, + "google.protobuf.Timestamp": TimestampType, + // Json types. + "google.protobuf.ListValue": NewListType(DynType), + "google.protobuf.NullValue": NullType, + "google.protobuf.Struct": NewMapType(StringType, DynType), + "google.protobuf.Value": DynType, + } + + emptyParams = []*Type{} + + allTraits = []int{ + traits.AdderType, + traits.ComparerType, + traits.ContainerType, + traits.DividerType, + traits.FieldTesterType, + traits.IndexerType, + traits.IterableType, + traits.IteratorType, + traits.MatcherType, + traits.ModderType, + traits.MultiplierType, + traits.NegatorType, + traits.ReceiverType, + traits.SizerType, + traits.SubtractorType, + } + + structTypeTraitMask = traits.FieldTesterType | traits.IndexerType + + boolType = primitiveType(celpb.Type_BOOL) + bytesType = primitiveType(celpb.Type_BYTES) + doubleType = primitiveType(celpb.Type_DOUBLE) + intType = primitiveType(celpb.Type_INT64) + stringType = primitiveType(celpb.Type_STRING) + uintType = primitiveType(celpb.Type_UINT64) +) diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/uint.go b/hack/tools/vendor/github.com/google/cel-go/common/types/uint.go new file mode 100644 index 0000000000..6d74f30d88 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/uint.go @@ -0,0 +1,256 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "math" + "reflect" + "strconv" + + "github.com/google/cel-go/common/types/ref" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +// Uint type implementation which supports comparison and math operators. +type Uint uint64 + +var ( + uint32WrapperType = reflect.TypeOf(&wrapperspb.UInt32Value{}) + + uint64WrapperType = reflect.TypeOf(&wrapperspb.UInt64Value{}) +) + +// Uint constants +const ( + uintZero = Uint(0) +) + +// Add implements traits.Adder.Add. +func (i Uint) Add(other ref.Val) ref.Val { + otherUint, ok := other.(Uint) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + val, err := addUint64Checked(uint64(i), uint64(otherUint)) + if err != nil { + return WrapErr(err) + } + return Uint(val) +} + +// Compare implements traits.Comparer.Compare. +func (i Uint) Compare(other ref.Val) ref.Val { + switch ov := other.(type) { + case Double: + if math.IsNaN(float64(ov)) { + return NewErr("NaN values cannot be ordered") + } + return compareUintDouble(i, ov) + case Int: + return compareUintInt(i, ov) + case Uint: + return compareUint(i, ov) + default: + return MaybeNoSuchOverloadErr(other) + } +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (i Uint) ConvertToNative(typeDesc reflect.Type) (any, error) { + switch typeDesc.Kind() { + case reflect.Uint, reflect.Uint32: + v, err := uint64ToUint32Checked(uint64(i)) + if err != nil { + return 0, err + } + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Uint8: + v, err := uint64ToUint8Checked(uint64(i)) + if err != nil { + return 0, err + } + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Uint16: + v, err := uint64ToUint16Checked(uint64(i)) + if err != nil { + return 0, err + } + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Uint64: + return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil + case reflect.Ptr: + switch typeDesc { + case anyValueType: + // Primitives must be wrapped before being set on an Any field. + return anypb.New(wrapperspb.UInt64(uint64(i))) + case jsonValueType: + // JSON can accurately represent 32-bit uints as floating point values. + if i.isJSONSafe() { + return structpb.NewNumberValue(float64(i)), nil + } + // Proto3 to JSON conversion requires string-formatted uint64 values + // since the conversion to floating point would result in truncation. + return structpb.NewStringValue(strconv.FormatUint(uint64(i), 10)), nil + case uint32WrapperType: + // Convert the value to a wrapperspb.UInt32Value, error on overflow. + v, err := uint64ToUint32Checked(uint64(i)) + if err != nil { + return 0, err + } + return wrapperspb.UInt32(v), nil + case uint64WrapperType: + // Convert the value to a wrapperspb.UInt64Value. + return wrapperspb.UInt64(uint64(i)), nil + } + switch typeDesc.Elem().Kind() { + case reflect.Uint32: + v, err := uint64ToUint32Checked(uint64(i)) + if err != nil { + return 0, err + } + p := reflect.New(typeDesc.Elem()) + p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem())) + return p.Interface(), nil + case reflect.Uint64: + v := uint64(i) + p := reflect.New(typeDesc.Elem()) + p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem())) + return p.Interface(), nil + } + case reflect.Interface: + iv := i.Value() + if reflect.TypeOf(iv).Implements(typeDesc) { + return iv, nil + } + if reflect.TypeOf(i).Implements(typeDesc) { + return i, nil + } + } + return nil, fmt.Errorf("unsupported type conversion from 'uint' to %v", typeDesc) +} + +// ConvertToType implements ref.Val.ConvertToType. +func (i Uint) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case IntType: + v, err := uint64ToInt64Checked(uint64(i)) + if err != nil { + return WrapErr(err) + } + return Int(v) + case UintType: + return i + case DoubleType: + return Double(i) + case StringType: + return String(fmt.Sprintf("%d", uint64(i))) + case TypeType: + return UintType + } + return NewErr("type conversion error from '%s' to '%s'", UintType, typeVal) +} + +// Divide implements traits.Divider.Divide. +func (i Uint) Divide(other ref.Val) ref.Val { + otherUint, ok := other.(Uint) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + div, err := divideUint64Checked(uint64(i), uint64(otherUint)) + if err != nil { + return WrapErr(err) + } + return Uint(div) +} + +// Equal implements ref.Val.Equal. +func (i Uint) Equal(other ref.Val) ref.Val { + switch ov := other.(type) { + case Double: + if math.IsNaN(float64(ov)) { + return False + } + return Bool(compareUintDouble(i, ov) == 0) + case Int: + return Bool(compareUintInt(i, ov) == 0) + case Uint: + return Bool(i == ov) + default: + return False + } +} + +// IsZeroValue returns true if the uint is zero. +func (i Uint) IsZeroValue() bool { + return i == 0 +} + +// Modulo implements traits.Modder.Modulo. +func (i Uint) Modulo(other ref.Val) ref.Val { + otherUint, ok := other.(Uint) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + mod, err := moduloUint64Checked(uint64(i), uint64(otherUint)) + if err != nil { + return WrapErr(err) + } + return Uint(mod) +} + +// Multiply implements traits.Multiplier.Multiply. +func (i Uint) Multiply(other ref.Val) ref.Val { + otherUint, ok := other.(Uint) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + val, err := multiplyUint64Checked(uint64(i), uint64(otherUint)) + if err != nil { + return WrapErr(err) + } + return Uint(val) +} + +// Subtract implements traits.Subtractor.Subtract. +func (i Uint) Subtract(subtrahend ref.Val) ref.Val { + subtraUint, ok := subtrahend.(Uint) + if !ok { + return MaybeNoSuchOverloadErr(subtrahend) + } + val, err := subtractUint64Checked(uint64(i), uint64(subtraUint)) + if err != nil { + return WrapErr(err) + } + return Uint(val) +} + +// Type implements ref.Val.Type. +func (i Uint) Type() ref.Type { + return UintType +} + +// Value implements ref.Val.Value. +func (i Uint) Value() any { + return uint64(i) +} + +// isJSONSafe indicates whether the uint is safely representable as a floating point value in JSON. +func (i Uint) isJSONSafe() bool { + return i <= maxIntJSON +} diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/unknown.go b/hack/tools/vendor/github.com/google/cel-go/common/types/unknown.go new file mode 100644 index 0000000000..9dd2b25794 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/unknown.go @@ -0,0 +1,326 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "math" + "reflect" + "sort" + "strings" + "unicode" + + "github.com/google/cel-go/common/types/ref" +) + +var ( + unspecifiedAttribute = &AttributeTrail{qualifierPath: []any{}} +) + +// NewAttributeTrail creates a new simple attribute from a variable name. +func NewAttributeTrail(variable string) *AttributeTrail { + if variable == "" { + return unspecifiedAttribute + } + return &AttributeTrail{variable: variable} +} + +// AttributeTrail specifies a variable with an optional qualifier path. An attribute value is expected to +// correspond to an AbsoluteAttribute, meaning a field selection which starts with a top-level variable. +// +// The qualifer path elements adhere to the AttributeQualifier type constraint. +type AttributeTrail struct { + variable string + qualifierPath []any +} + +// Equal returns whether two attribute values have the same variable name and qualifier paths. +func (a *AttributeTrail) Equal(other *AttributeTrail) bool { + if a.Variable() != other.Variable() || len(a.QualifierPath()) != len(other.QualifierPath()) { + return false + } + for i, q := range a.QualifierPath() { + qual := other.QualifierPath()[i] + if !qualifiersEqual(q, qual) { + return false + } + } + return true +} + +func qualifiersEqual(a, b any) bool { + if a == b { + return true + } + switch numA := a.(type) { + case int64: + numB, ok := b.(uint64) + if !ok { + return false + } + return intUintEqual(numA, numB) + case uint64: + numB, ok := b.(int64) + if !ok { + return false + } + return intUintEqual(numB, numA) + default: + return false + } +} + +func intUintEqual(i int64, u uint64) bool { + if i < 0 || u > math.MaxInt64 { + return false + } + return i == int64(u) +} + +// Variable returns the variable name associated with the attribute. +func (a *AttributeTrail) Variable() string { + return a.variable +} + +// QualifierPath returns the optional set of qualifying fields or indices applied to the variable. +func (a *AttributeTrail) QualifierPath() []any { + return a.qualifierPath +} + +// String returns the string representation of the Attribute. +func (a *AttributeTrail) String() string { + if a.variable == "" { + return "" + } + var str strings.Builder + str.WriteString(a.variable) + for _, q := range a.qualifierPath { + switch q := q.(type) { + case bool, int64: + str.WriteString(fmt.Sprintf("[%v]", q)) + case uint64: + str.WriteString(fmt.Sprintf("[%vu]", q)) + case string: + if isIdentifierCharacter(q) { + str.WriteString(fmt.Sprintf(".%v", q)) + } else { + str.WriteString(fmt.Sprintf("[%q]", q)) + } + } + } + return str.String() +} + +func isIdentifierCharacter(str string) bool { + for _, c := range str { + if unicode.IsLetter(c) || unicode.IsDigit(c) || string(c) == "_" { + continue + } + return false + } + return true +} + +// AttributeQualifier constrains the possible types which may be used to qualify an attribute. +type AttributeQualifier interface { + bool | int64 | uint64 | string +} + +// QualifyAttribute qualifies an attribute using a valid AttributeQualifier type. +func QualifyAttribute[T AttributeQualifier](attr *AttributeTrail, qualifier T) *AttributeTrail { + attr.qualifierPath = append(attr.qualifierPath, qualifier) + return attr +} + +// Unknown type which collects expression ids which caused the current value to become unknown. +type Unknown struct { + attributeTrails map[int64][]*AttributeTrail +} + +// NewUnknown creates a new unknown at a given expression id for an attribute. +// +// If the attribute is nil, the attribute value will be the `unspecifiedAttribute`. +func NewUnknown(id int64, attr *AttributeTrail) *Unknown { + if attr == nil { + attr = unspecifiedAttribute + } + return &Unknown{ + attributeTrails: map[int64][]*AttributeTrail{id: {attr}}, + } +} + +// IDs returns the set of unknown expression ids contained by this value. +// +// Numeric identifiers are guaranteed to be in sorted order. +func (u *Unknown) IDs() []int64 { + ids := make(int64Slice, len(u.attributeTrails)) + i := 0 + for id := range u.attributeTrails { + ids[i] = id + i++ + } + ids.Sort() + return ids +} + +// GetAttributeTrails returns the attribute trails, if present, missing for a given expression id. +func (u *Unknown) GetAttributeTrails(id int64) ([]*AttributeTrail, bool) { + trails, found := u.attributeTrails[id] + return trails, found +} + +// Contains returns true if the input unknown is a subset of the current unknown. +func (u *Unknown) Contains(other *Unknown) bool { + for id, otherTrails := range other.attributeTrails { + trails, found := u.attributeTrails[id] + if !found || len(otherTrails) != len(trails) { + return false + } + for _, ot := range otherTrails { + found := false + for _, t := range trails { + if t.Equal(ot) { + found = true + break + } + } + if !found { + return false + } + } + } + return true +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (u *Unknown) ConvertToNative(typeDesc reflect.Type) (any, error) { + return u.Value(), nil +} + +// ConvertToType is an identity function since unknown values cannot be modified. +func (u *Unknown) ConvertToType(typeVal ref.Type) ref.Val { + return u +} + +// Equal is an identity function since unknown values cannot be modified. +func (u *Unknown) Equal(other ref.Val) ref.Val { + return u +} + +// String implements the Stringer interface +func (u *Unknown) String() string { + var str strings.Builder + for id, attrs := range u.attributeTrails { + if str.Len() != 0 { + str.WriteString(", ") + } + if len(attrs) == 1 { + str.WriteString(fmt.Sprintf("%v (%d)", attrs[0], id)) + } else { + str.WriteString(fmt.Sprintf("%v (%d)", attrs, id)) + } + } + return str.String() +} + +// Type implements ref.Val.Type. +func (u *Unknown) Type() ref.Type { + return UnknownType +} + +// Value implements ref.Val.Value. +func (u *Unknown) Value() any { + return u +} + +// IsUnknown returns whether the element ref.Val is in instance of *types.Unknown +func IsUnknown(val ref.Val) bool { + switch val.(type) { + case *Unknown: + return true + default: + return false + } +} + +// MaybeMergeUnknowns determines whether an input value and another, possibly nil, unknown will produce +// an unknown result. +// +// If the input `val` is another Unknown, then the result will be the merge of the `val` and the input +// `unk`. If the `val` is not unknown, then the result will depend on whether the input `unk` is nil. +// If both values are non-nil and unknown, then the return value will be a merge of both unknowns. +func MaybeMergeUnknowns(val ref.Val, unk *Unknown) (*Unknown, bool) { + src, isUnk := val.(*Unknown) + if !isUnk { + if unk != nil { + return unk, true + } + return unk, false + } + return MergeUnknowns(src, unk), true +} + +// MergeUnknowns combines two unknown values into a new unknown value. +func MergeUnknowns(unk1, unk2 *Unknown) *Unknown { + if unk1 == nil { + return unk2 + } + if unk2 == nil { + return unk1 + } + out := &Unknown{ + attributeTrails: make(map[int64][]*AttributeTrail, len(unk1.attributeTrails)+len(unk2.attributeTrails)), + } + for id, ats := range unk1.attributeTrails { + out.attributeTrails[id] = ats + } + for id, ats := range unk2.attributeTrails { + existing, found := out.attributeTrails[id] + if !found { + out.attributeTrails[id] = ats + continue + } + + for _, at := range ats { + found := false + for _, et := range existing { + if at.Equal(et) { + found = true + break + } + } + if !found { + existing = append(existing, at) + } + } + out.attributeTrails[id] = existing + } + return out +} + +// int64Slice is an implementation of the sort.Interface +type int64Slice []int64 + +// Len returns the number of elements in the slice. +func (x int64Slice) Len() int { return len(x) } + +// Less indicates whether the value at index i is less than the value at index j. +func (x int64Slice) Less(i, j int) bool { return x[i] < x[j] } + +// Swap swaps the values at indices i and j in place. +func (x int64Slice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +// Sort is a convenience method: x.Sort() calls Sort(x). +func (x int64Slice) Sort() { sort.Sort(x) } diff --git a/hack/tools/vendor/github.com/google/cel-go/common/types/util.go b/hack/tools/vendor/github.com/google/cel-go/common/types/util.go new file mode 100644 index 0000000000..71662eee31 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/common/types/util.go @@ -0,0 +1,48 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/google/cel-go/common/types/ref" +) + +// IsUnknownOrError returns whether the input element ref.Val is an ErrType or UnknownType. +func IsUnknownOrError(val ref.Val) bool { + switch val.(type) { + case *Unknown, *Err: + return true + } + return false +} + +// IsPrimitiveType returns whether the input element ref.Val is a primitive type. +// Note, primitive types do not include well-known types such as Duration and Timestamp. +func IsPrimitiveType(val ref.Val) bool { + switch val.Type() { + case BoolType, BytesType, DoubleType, IntType, StringType, UintType: + return true + } + return false +} + +// Equal returns whether the two ref.Value are heterogeneously equivalent. +func Equal(lhs ref.Val, rhs ref.Val) ref.Val { + lNull := lhs == NullValue + rNull := rhs == NullValue + if lNull || rNull { + return Bool(lNull == rNull) + } + return lhs.Equal(rhs) +} diff --git a/hack/tools/vendor/github.com/google/cel-go/ext/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/ext/BUILD.bazel new file mode 100644 index 0000000000..1fece70066 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/ext/BUILD.bazel @@ -0,0 +1,72 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "bindings.go", + "encoders.go", + "formatting.go", + "guards.go", + "lists.go", + "math.go", + "native.go", + "protos.go", + "sets.go", + "strings.go", + ], + importpath = "github.com/google/cel-go/ext", + visibility = ["//visibility:public"], + deps = [ + "//cel:go_default_library", + "//checker:go_default_library", + "//common/ast:go_default_library", + "//common/decls:go_default_library", + "//common/overloads:go_default_library", + "//common/operators:go_default_library", + "//common/types:go_default_library", + "//common/types/pb:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", + "//interpreter:go_default_library", + "//parser:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_x_text//language:go_default_library", + "@org_golang_x_text//message:go_default_library", + ], +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "encoders_test.go", + "lists_test.go", + "math_test.go", + "native_test.go", + "protos_test.go", + "sets_test.go", + "strings_test.go", + ], + embed = [ + ":go_default_library", + ], + deps = [ + "//cel:go_default_library", + "//checker:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", + "//test:go_default_library", + "//test/proto2pb:go_default_library", + "//test/proto3pb:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library", + "@org_golang_google_protobuf//encoding/protojson:go_default_library", + ], +) diff --git a/hack/tools/vendor/github.com/google/cel-go/ext/README.md b/hack/tools/vendor/github.com/google/cel-go/ext/README.md new file mode 100644 index 0000000000..07e544d0dc --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/ext/README.md @@ -0,0 +1,899 @@ +# Extensions + +CEL extensions are a related set of constants, functions, macros, or other +features which may not be covered by the core CEL spec. + +## Bindings + +Returns a cel.EnvOption to configure support for local variable bindings +in expressions. + +### Cel.Bind + +Binds a simple identifier to an initialization expression which may be used +in a subsequenct result expression. Bindings may also be nested within each +other. + + cel.bind(, , ) + +Examples: + + cel.bind(a, 'hello', + cel.bind(b, 'world', a + b + b + a)) // "helloworldworldhello" + + // Avoid a list allocation within the exists comprehension. + cel.bind(valid_values, [a, b, c], + [d, e, f].exists(elem, elem in valid_values)) + +Local bindings are not guaranteed to be evaluated before use. + +## Encoders + +Encoding utilies for marshalling data into standardized representations. + +### Base64.Decode + +Decodes base64-encoded string to bytes. + +This function will return an error if the string input is not +base64-encoded. + + base64.decode() -> + +Examples: + + base64.decode('aGVsbG8=') // return b'hello' + base64.decode('aGVsbG8') // error + +### Base64.Encode + +Encodes bytes to a base64-encoded string. + + base64.encode() -> + +Example: + + base64.encode(b'hello') // return 'aGVsbG8=' + +## Math + +Math helper macros and functions. + +Note, all macros use the 'math' namespace; however, at the time of macro +expansion the namespace looks just like any other identifier. If you are +currently using a variable named 'math', the macro will likely work just as +intended; however, there is some chance for collision. + +### Math.Greatest + +Returns the greatest valued number present in the arguments to the macro. + +Greatest is a variable argument count macro which must take at least one +argument. Simple numeric and list literals are supported as valid argument +types; however, other literals will be flagged as errors during macro +expansion. If the argument expression does not resolve to a numeric or +list(numeric) type during type-checking, or during runtime then an error +will be produced. If a list argument is empty, this too will produce an +error. + + math.greatest(, ...) -> + +Examples: + + math.greatest(1) // 1 + math.greatest(1u, 2u) // 2u + math.greatest(-42.0, -21.5, -100.0) // -21.5 + math.greatest([-42.0, -21.5, -100.0]) // -21.5 + math.greatest(numbers) // numbers must be list(numeric) + + math.greatest() // parse error + math.greatest('string') // parse error + math.greatest(a, b) // check-time error if a or b is non-numeric + math.greatest(dyn('string')) // runtime error + +### Math.Least + +Returns the least valued number present in the arguments to the macro. + +Least is a variable argument count macro which must take at least one +argument. Simple numeric and list literals are supported as valid argument +types; however, other literals will be flagged as errors during macro +expansion. If the argument expression does not resolve to a numeric or +list(numeric) type during type-checking, or during runtime then an error +will be produced. If a list argument is empty, this too will produce an +error. + + math.least(, ...) -> + +Examples: + + math.least(1) // 1 + math.least(1u, 2u) // 1u + math.least(-42.0, -21.5, -100.0) // -100.0 + math.least([-42.0, -21.5, -100.0]) // -100.0 + math.least(numbers) // numbers must be list(numeric) + + math.least() // parse error + math.least('string') // parse error + math.least(a, b) // check-time error if a or b is non-numeric + math.least(dyn('string')) // runtime error + +### Math.BitOr + +Introduced at version: 1 + +Performs a bitwise-OR operation over two int or uint values. + + math.bitOr(, ) -> + math.bitOr(, ) -> + +Examples: + + math.bitOr(1u, 2u) // returns 3u + math.bitOr(-2, -4) // returns -2 + +### Math.BitAnd + +Introduced at version: 1 + +Performs a bitwise-AND operation over two int or uint values. + + math.bitAnd(, ) -> + math.bitAnd(, ) -> + +Examples: + + math.bitAnd(3u, 2u) // return 2u + math.bitAnd(3, 5) // returns 3 + math.bitAnd(-3, -5) // returns -7 + +### Math.BitXor + +Introduced at version: 1 + + math.bitXor(, ) -> + math.bitXor(, ) -> + +Performs a bitwise-XOR operation over two int or uint values. + +Examples: + + math.bitXor(3u, 5u) // returns 6u + math.bitXor(1, 3) // returns 2 + +### Math.BitNot + +Introduced at version: 1 + +Function which accepts a single int or uint and performs a bitwise-NOT +ones-complement of the given binary value. + + math.bitNot() -> + math.bitNot() -> + +Examples + + math.bitNot(1) // returns -1 + math.bitNot(-1) // return 0 + math.bitNot(0u) // returns 18446744073709551615u + +### Math.BitShiftLeft + +Introduced at version: 1 + +Perform a left shift of bits on the first parameter, by the amount of bits +specified in the second parameter. The first parameter is either a uint or +an int. The second parameter must be an int. + +When the second parameter is 64 or greater, 0 will be always be returned +since the number of bits shifted is greater than or equal to the total bit +length of the number being shifted. Negative valued bit shifts will result +in a runtime error. + + math.bitShiftLeft(, ) -> + math.bitShiftLeft(, ) -> + +Examples + + math.bitShiftLeft(1, 2) // returns 4 + math.bitShiftLeft(-1, 2) // returns -4 + math.bitShiftLeft(1u, 2) // return 4u + math.bitShiftLeft(1u, 200) // returns 0u + +### Math.BitShiftRight + +Introduced at version: 1 + +Perform a right shift of bits on the first parameter, by the amount of bits +specified in the second parameter. The first parameter is either a uint or +an int. The second parameter must be an int. + +When the second parameter is 64 or greater, 0 will always be returned since +the number of bits shifted is greater than or equal to the total bit length +of the number being shifted. Negative valued bit shifts will result in a +runtime error. + +The sign bit extension will not be preserved for this operation: vacant bits +on the left are filled with 0. + + math.bitShiftRight(, ) -> + math.bitShiftRight(, ) -> + +Examples + + math.bitShiftRight(1024, 2) // returns 256 + math.bitShiftRight(1024u, 2) // returns 256u + math.bitShiftRight(1024u, 64) // returns 0u + +### Math.Ceil + +Introduced at version: 1 + +Compute the ceiling of a double value. + + math.ceil() -> + +Examples: + + math.ceil(1.2) // returns 2.0 + math.ceil(-1.2) // returns -1.0 + +### Math.Floor + +Introduced at version: 1 + +Compute the floor of a double value. + + math.floor() -> + +Examples: + + math.floor(1.2) // returns 1.0 + math.floor(-1.2) // returns -2.0 + +### Math.Round + +Introduced at version: 1 + +Rounds the double value to the nearest whole number with ties rounding away +from zero, e.g. 1.5 -> 2.0, -1.5 -> -2.0. + + math.round() -> + +Examples: + + math.round(1.2) // returns 1.0 + math.round(1.5) // returns 2.0 + math.round(-1.5) // returns -2.0 + +### Math.Trunc + +Introduced at version: 1 + +Truncates the fractional portion of the double value. + + math.trunc() -> + +Examples: + + math.trunc(-1.3) // returns -1.0 + math.trunc(1.3) // returns 1.0 + +### Math.Abs + +Introduced at version: 1 + +Returns the absolute value of the numeric type provided as input. If the +value is NaN, the output is NaN. If the input is int64 min, the function +will result in an overflow error. + + math.abs() -> + math.abs() -> + math.abs() -> + +Examples: + + math.abs(-1) // returns 1 + math.abs(1) // returns 1 + math.abs(-9223372036854775808) // overlflow error + +### Math.Sign + +Introduced at version: 1 + +Returns the sign of the numeric type, either -1, 0, 1 as an int, double, or +uint depending on the overload. For floating point values, if NaN is +provided as input, the output is also NaN. The implementation does not +differentiate between positive and negative zero. + + math.sign() -> + math.sign() -> + math.sign() -> + +Examples: + + math.sign(-42) // returns -1 + math.sign(0) // returns 0 + math.sign(42) // returns 1 + +### Math.IsInf + +Introduced at version: 1 + +Returns true if the input double value is -Inf or +Inf. + + math.isInf() -> + +Examples: + + math.isInf(1.0/0.0) // returns true + math.isInf(1.2) // returns false + +### Math.IsNaN + +Introduced at version: 1 + +Returns true if the input double value is NaN, false otherwise. + + math.isNaN() -> + +Examples: + + math.isNaN(0.0/0.0) // returns true + math.isNaN(1.2) // returns false + +### Math.IsFinite + +Introduced at version: 1 + +Returns true if the value is a finite number. Equivalent in behavior to: +!math.isNaN(double) && !math.isInf(double) + + math.isFinite() -> + +Examples: + + math.isFinite(0.0/0.0) // returns false + math.isFinite(1.2) // returns true + +## Protos + +Protos configure extended macros and functions for proto manipulation. + +Note, all macros use the 'proto' namespace; however, at the time of macro +expansion the namespace looks just like any other identifier. If you are +currently using a variable named 'proto', the macro will likely work just as +you intend; however, there is some chance for collision. + +### Protos.GetExt + +Macro which generates a select expression that retrieves an extension field +from the input proto2 syntax message. If the field is not set, the default +value forthe extension field is returned according to safe-traversal semantics. + + proto.getExt(, ) -> + +Example: + + proto.getExt(msg, google.expr.proto2.test.int32_ext) // returns int value + +### Protos.HasExt + +Macro which generates a test-only select expression that determines whether +an extension field is set on a proto2 syntax message. + + proto.hasExt(, ) -> + +Example: + + proto.hasExt(msg, google.expr.proto2.test.int32_ext) // returns true || false + +## Lists + +Extended functions for list manipulation. As a general note, all indices are +zero-based. + +### Distinct + +**Introduced in version 2** + +Returns the distinct elements of a list. + + .distinct() -> + +Examples: + + [1, 2, 2, 3, 3, 3].distinct() // return [1, 2, 3] + ["b", "b", "c", "a", "c"].distinct() // return ["b", "c", "a"] + [1, "b", 2, "b"].distinct() // return [1, "b", 2] + +### Flatten + +**Introduced in version 1** + +Flattens a list recursively. +If an optional depth is provided, the list is flattened to a the specificied level. +A negative depth value will result in an error. + + .flatten() -> + .flatten(, ) -> + +Examples: + + [1,[2,3],[4]].flatten() // return [1, 2, 3, 4] + [1,[2,[3,4]]].flatten() // return [1, 2, [3, 4]] + [1,2,[],[],[3,4]].flatten() // return [1, 2, 3, 4] + [1,[2,[3,[4]]]].flatten(2) // return [1, 2, 3, [4]] + [1,[2,[3,[4]]]].flatten(-1) // error + +### Range + +**Introduced in version 2** + +Returns a list of integers from 0 to n-1. + + lists.range() -> + +Examples: + + lists.range(5) -> [0, 1, 2, 3, 4] + + +### Reverse + +**Introduced in version 2** + +Returns the elements of a list in reverse order. + + .reverse() -> + +Examples: + + [5, 3, 1, 2].reverse() // return [2, 1, 3, 5] + + +### Slice + + +Returns a new sub-list using the indexes provided. + + .slice(, ) -> + +Examples: + + [1,2,3,4].slice(1, 3) // return [2, 3] + [1,2,3,4].slice(2, 4) // return [3, 4] + +### Sort + +**Introduced in version 2** + +Sorts a list with comparable elements. If the element type is not comparable +or the element types are not the same, the function will produce an error. + + .sort() -> + T in {int, uint, double, bool, duration, timestamp, string, bytes} + +Examples: + + [3, 2, 1].sort() // return [1, 2, 3] + ["b", "c", "a"].sort() // return ["a", "b", "c"] + [1, "b"].sort() // error + [[1, 2, 3]].sort() // error + +### SortBy + +**Introduced in version 2** + +Sorts a list by a key value, i.e., the order is determined by the result of +an expression applied to each element of the list. + + .sortBy(, ) -> + keyExpr returns a value in {int, uint, double, bool, duration, timestamp, string, bytes} + +Examples: + + [ + Player { name: "foo", score: 0 }, + Player { name: "bar", score: -10 }, + Player { name: "baz", score: 1000 }, + ].sortBy(e, e.score).map(e, e.name) + == ["bar", "foo", "baz"] + +## Sets + +Sets provides set relationship tests. + +There is no set type within CEL, and while one may be introduced in the +future, there are cases where a `list` type is known to behave like a set. +For such cases, this library provides some basic functionality for +determining set containment, equivalence, and intersection. + +### Sets.Contains + +Returns whether the first list argument contains all elements in the second +list argument. The list may contain elements of any type and standard CEL +equality is used to determine whether a value exists in both lists. If the +second list is empty, the result will always return true. + + sets.contains(list(T), list(T)) -> bool + +Examples: + + sets.contains([], []) // true + sets.contains([], [1]) // false + sets.contains([1, 2, 3, 4], [2, 3]) // true + sets.contains([1, 2.0, 3u], [1.0, 2u, 3]) // true + +### Sets.Equivalent + +Returns whether the first and second list are set equivalent. Lists are set +equivalent if for every item in the first list, there is an element in the +second which is equal. The lists may not be of the same size as they do not +guarantee the elements within them are unique, so size does not factor into +the computation. + + sets.equivalent(list(T), list(T)) -> bool + +Examples: + + sets.equivalent([], []) // true + sets.equivalent([1], [1, 1]) // true + sets.equivalent([1], [1u, 1.0]) // true + sets.equivalent([1, 2, 3], [3u, 2.0, 1]) // true + +### Sets.Intersects + +Returns whether the first list has at least one element whose value is equal +to an element in the second list. If either list is empty, the result will +be false. + + sets.intersects(list(T), list(T)) -> bool + +Examples: + + sets.intersects([1], []) // false + sets.intersects([1], [1, 2]) // true + sets.intersects([[1], [2, 3]], [[1, 2], [2, 3.0]]) // true + +## Strings + +Extended functions for string manipulation. As a general note, all indices are +zero-based. + +### CharAt + +Returns the character at the given position. If the position is negative, or +greater than the length of the string, the function will produce an error: + + .charAt() -> + +Examples: + + 'hello'.charAt(4) // return 'o' + 'hello'.charAt(5) // return '' + 'hello'.charAt(-1) // error + +### IndexOf + +Returns the integer index of the first occurrence of the search string. If the +search string is not found the function returns -1. + +The function also accepts an optional position from which to begin the +substring search. If the substring is the empty string, the index where the +search starts is returned (zero or custom). + + .indexOf() -> + .indexOf(, ) -> + +Examples: + + 'hello mellow'.indexOf('') // returns 0 + 'hello mellow'.indexOf('ello') // returns 1 + 'hello mellow'.indexOf('jello') // returns -1 + 'hello mellow'.indexOf('', 2) // returns 2 + 'hello mellow'.indexOf('ello', 2) // returns 7 + 'hello mellow'.indexOf('ello', 20) // returns -1 + 'hello mellow'.indexOf('ello', -1) // error + +### Join + +Returns a new string where the elements of string list are concatenated. + +The function also accepts an optional separator which is placed between +elements in the resulting string. + + >.join() -> + >.join() -> + +Examples: + + ['hello', 'mellow'].join() // returns 'hellomellow' + ['hello', 'mellow'].join(' ') // returns 'hello mellow' + [].join() // returns '' + [].join('/') // returns '' + +### LastIndexOf + +Returns the integer index of the last occurrence of the search string. If the +search string is not found the function returns -1. + +The function also accepts an optional position which represents the last index +to be considered as the beginning of the substring match. If the substring is +the empty string, the index where the search starts is returned (string length +or custom). + + .lastIndexOf() -> + .lastIndexOf(, ) -> + +Examples: + + 'hello mellow'.lastIndexOf('') // returns 12 + 'hello mellow'.lastIndexOf('ello') // returns 7 + 'hello mellow'.lastIndexOf('jello') // returns -1 + 'hello mellow'.lastIndexOf('ello', 6) // returns 1 + 'hello mellow'.lastIndexOf('ello', 20) // returns -1 + 'hello mellow'.lastIndexOf('ello', -1) // error + +### LowerAscii + +Returns a new string where all ASCII characters are lower-cased. + +This function does not perform Unicode case-mapping for characters outside the +ASCII range. + + .lowerAscii() -> + +Examples: + + 'TacoCat'.lowerAscii() // returns 'tacocat' + 'TacoCÆt Xii'.lowerAscii() // returns 'tacocÆt xii' + +### Quote + +**Introduced in version 1** + +Takes the given string and makes it safe to print (without any formatting due to escape sequences). +If any invalid UTF-8 characters are encountered, they are replaced with \uFFFD. + + strings.quote() + +Examples: + + strings.quote('single-quote with "double quote"') // returns '"single-quote with \"double quote\""' + strings.quote("two escape sequences \a\n") // returns '"two escape sequences \\a\\n"' + +### Replace + +Returns a new string based on the target, which replaces the occurrences of a +search string with a replacement string if present. The function accepts an +optional limit on the number of substring replacements to be made. + +When the replacement limit is 0, the result is the original string. When the +limit is a negative number, the function behaves the same as replace all. + + .replace(, ) -> + .replace(, , ) -> + +Examples: + + 'hello hello'.replace('he', 'we') // returns 'wello wello' + 'hello hello'.replace('he', 'we', -1) // returns 'wello wello' + 'hello hello'.replace('he', 'we', 1) // returns 'wello hello' + 'hello hello'.replace('he', 'we', 0) // returns 'hello hello' + +### Split + +Returns a list of strings split from the input by the given separator. The +function accepts an optional argument specifying a limit on the number of +substrings produced by the split. + +When the split limit is 0, the result is an empty list. When the limit is 1, +the result is the target string to split. When the limit is a negative +number, the function behaves the same as split all. + + .split() -> > + .split(, ) -> > + +Examples: + + 'hello hello hello'.split(' ') // returns ['hello', 'hello', 'hello'] + 'hello hello hello'.split(' ', 0) // returns [] + 'hello hello hello'.split(' ', 1) // returns ['hello hello hello'] + 'hello hello hello'.split(' ', 2) // returns ['hello', 'hello hello'] + 'hello hello hello'.split(' ', -1) // returns ['hello', 'hello', 'hello'] + +### Substring + +Returns the substring given a numeric range corresponding to character +positions. Optionally may omit the trailing range for a substring from a given +character position until the end of a string. + +Character offsets are 0-based with an inclusive start range and exclusive end +range. It is an error to specify an end range that is lower than the start +range, or for either the start or end index to be negative or exceed the string +length. + + .substring() -> + .substring(, ) -> + +Examples: + + 'tacocat'.substring(4) // returns 'cat' + 'tacocat'.substring(0, 4) // returns 'taco' + 'tacocat'.substring(-1) // error + 'tacocat'.substring(2, 1) // error + +### Trim + +Returns a new string which removes the leading and trailing whitespace in the +target string. The trim function uses the Unicode definition of whitespace +which does not include the zero-width spaces. See: +https://en.wikipedia.org/wiki/Whitespace_character#Unicode + + .trim() -> + +Examples: + + ' \ttrim\n '.trim() // returns 'trim' + +### UpperAscii + +Returns a new string where all ASCII characters are upper-cased. + +This function does not perform Unicode case-mapping for characters outside the +ASCII range. + + .upperAscii() -> + +Examples: + + 'TacoCat'.upperAscii() // returns 'TACOCAT' + 'TacoCÆt Xii'.upperAscii() // returns 'TACOCÆT XII' + +### Reverse + +Returns a new string whose characters are the same as the target string, only formatted in +reverse order. +This function relies on converting strings to rune arrays in order to reverse. +It can be located in Version 3 of strings. + + .reverse() -> + +Examples: + + 'gums'.reverse() // returns 'smug' + 'John Smith'.reverse() // returns 'htimS nhoJ' + +## TwoVarComprehensions + +TwoVarComprehensions introduces support for two-variable comprehensions. + +The two-variable form of comprehensions looks similar to the one-variable +counterparts. Where possible, the same macro names were used and additional +macro signatures added. The notable distinction for two-variable comprehensions +is the introduction of `transformList`, `transformMap`, and `transformMapEntry` +support for list and map types rather than the more traditional `map` and +`filter` macros. + +### All + +Comprehension which tests whether all elements in the list or map satisfy a +given predicate. The `all` macro evaluates in a manner consistent with logical +AND and will short-circuit when encountering a `false` value. + + .all(indexVar, valueVar, ) -> bool + .all(keyVar, valueVar, ) -> bool + +Examples: + + [1, 2, 3].all(i, j, i < j) // returns true + {'hello': 'world', 'taco': 'taco'}.all(k, v, k != v) // returns false + + // Combines two-variable comprehension with single variable + {'h': ['hello', 'hi'], 'j': ['joke', 'jog']} + .all(k, vals, vals.all(v, v.startsWith(k))) // returns true + +### Exists + +Comprehension which tests whether any element in a list or map exists which +satisfies a given predicate. The `exists` macro evaluates in a manner consistent +with logical OR and will short-circuit when encountering a `true` value. + + .exists(indexVar, valueVar, ) -> bool + .exists(keyVar, valueVar, ) -> bool + +Examples: + + {'greeting': 'hello', 'farewell': 'goodbye'} + .exists(k, v, k.startsWith('good') || v.endsWith('bye')) // returns true + [1, 2, 4, 8, 16].exists(i, v, v == 1024 && i == 10) // returns false + +### ExistsOne + +Comprehension which tests whether exactly one element in a list or map exists +which satisfies a given predicate expression. This comprehension does not +short-circuit in keeping with the one-variable exists one macro semantics. + + .existsOne(indexVar, valueVar, ) + .existsOne(keyVar, valueVar, ) + +This macro may also be used with the `exists_one` function name, for +compatibility with the one-variable macro of the same name. + +Examples: + + [1, 2, 1, 3, 1, 4].existsOne(i, v, i == 1 || v == 1) // returns false + [1, 1, 2, 2, 3, 3].existsOne(i, v, i == 2 && v == 2) // returns true + {'i': 0, 'j': 1, 'k': 2}.existsOne(i, v, i == 'l' || v == 1) // returns true + +### TransformList + +Comprehension which converts a map or a list into a list value. The output +expression of the comprehension determines the contents of the output list. +Elements in the list may optionally be filtered according to a predicate +expression, where elements that satisfy the predicate are transformed. + + .transformList(indexVar, valueVar, ) + .transformList(indexVar, valueVar, , ) + .transformList(keyVar, valueVar, ) + .transformList(keyVar, valueVar, , ) + +Examples: + + [1, 2, 3].transformList(indexVar, valueVar, + (indexVar * valueVar) + valueVar) // returns [1, 4, 9] + [1, 2, 3].transformList(indexVar, valueVar, indexVar % 2 == 0 + (indexVar * valueVar) + valueVar) // returns [1, 9] + {'greeting': 'hello', 'farewell': 'goodbye'} + .transformList(k, _, k) // returns ['greeting', 'farewell'] + {'greeting': 'hello', 'farewell': 'goodbye'} + .transformList(_, v, v) // returns ['hello', 'goodbye'] + +### TransformMap + +Comprehension which converts a map or a list into a map value. The output +expression of the comprehension determines the value of the output map entry; +however, the key remains fixed. Elements in the map may optionally be filtered +according to a predicate expression, where elements that satisfy the predicate +are transformed. + + .transformMap(indexVar, valueVar, ) + .transformMap(indexVar, valueVar, , ) + .transformMap(keyVar, valueVar, ) + .transformMap(keyVar, valueVar, , ) + +Examples: + + [1, 2, 3].transformMap(indexVar, valueVar, + (indexVar * valueVar) + valueVar) // returns {0: 1, 1: 4, 2: 9} + [1, 2, 3].transformMap(indexVar, valueVar, indexVar % 2 == 0 + (indexVar * valueVar) + valueVar) // returns {0: 1, 2: 9} + {'greeting': 'hello'}.transformMap(k, v, v + '!') // returns {'greeting': 'hello!'} + +### TransformMapEntry + +Comprehension which converts a map or a list into a map value; however, this +transform expects the entry expression be a map literal. If the transform +produces an entry which duplicates a key in the target map, the comprehension +will error. Note, that key equality is determined using CEL equality which +asserts that numeric values which are equal, even if they don't have the same +type will cause a key collision. + +Elements in the map may optionally be filtered according to a predicate +expression, where elements that satisfy the predicate are transformed. + + .transformMap(indexVar, valueVar, ) + .transformMap(indexVar, valueVar, , ) + .transformMap(keyVar, valueVar, ) + .transformMap(keyVar, valueVar, , ) + +Examples: + + // returns {'hello': 'greeting'} + {'greeting': 'hello'}.transformMapEntry(keyVar, valueVar, {valueVar: keyVar}) + // reverse lookup, require all values in list be unique + [1, 2, 3].transformMapEntry(indexVar, valueVar, {valueVar: indexVar}) + + {'greeting': 'aloha', 'farewell': 'aloha'} + .transformMapEntry(keyVar, valueVar, {valueVar: keyVar}) // error, duplicate key diff --git a/hack/tools/vendor/github.com/google/cel-go/ext/bindings.go b/hack/tools/vendor/github.com/google/cel-go/ext/bindings.go new file mode 100644 index 0000000000..50cf4fb3d6 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/ext/bindings.go @@ -0,0 +1,336 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "errors" + "fmt" + "math" + "strconv" + "strings" + "sync" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + "github.com/google/cel-go/interpreter" +) + +// Bindings returns a cel.EnvOption to configure support for local variable +// bindings in expressions. +// +// # Cel.Bind +// +// Binds a simple identifier to an initialization expression which may be used +// in a subsequenct result expression. Bindings may also be nested within each +// other. +// +// cel.bind(, , ) +// +// Examples: +// +// cel.bind(a, 'hello', +// cel.bind(b, 'world', a + b + b + a)) // "helloworldworldhello" +// +// // Avoid a list allocation within the exists comprehension. +// cel.bind(valid_values, [a, b, c], +// [d, e, f].exists(elem, elem in valid_values)) +// +// Local bindings are not guaranteed to be evaluated before use. +func Bindings(options ...BindingsOption) cel.EnvOption { + b := &celBindings{version: math.MaxUint32} + for _, o := range options { + b = o(b) + } + return cel.Lib(b) +} + +const ( + celNamespace = "cel" + bindMacro = "bind" + blockFunc = "@block" + unusedIterVar = "#unused" +) + +// BindingsOption declares a functional operator for configuring the Bindings library behavior. +type BindingsOption func(*celBindings) *celBindings + +// BindingsVersion sets the version of the bindings library to an explicit version. +func BindingsVersion(version uint32) BindingsOption { + return func(lib *celBindings) *celBindings { + lib.version = version + return lib + } +} + +type celBindings struct { + version uint32 +} + +func (*celBindings) LibraryName() string { + return "cel.lib.ext.cel.bindings" +} + +func (lib *celBindings) CompileOptions() []cel.EnvOption { + opts := []cel.EnvOption{ + cel.Macros( + // cel.bind(var, , ) + cel.ReceiverMacro(bindMacro, 3, celBind), + ), + } + if lib.version >= 1 { + // The cel.@block signature takes a list of subexpressions and a typed expression which is + // used as the output type. + paramType := cel.TypeParamType("T") + opts = append(opts, + cel.Function("cel.@block", + cel.Overload("cel_block_list", + []*cel.Type{cel.ListType(cel.DynType), paramType}, paramType)), + ) + opts = append(opts, cel.ASTValidators(blockValidationExemption{})) + } + return opts +} + +func (lib *celBindings) ProgramOptions() []cel.ProgramOption { + if lib.version >= 1 { + celBlockPlan := func(i interpreter.Interpretable) (interpreter.Interpretable, error) { + call, ok := i.(interpreter.InterpretableCall) + if !ok { + return i, nil + } + switch call.Function() { + case "cel.@block": + args := call.Args() + if len(args) != 2 { + return nil, fmt.Errorf("cel.@block expects two arguments, but got %d", len(args)) + } + expr := args[1] + // Non-empty block + if block, ok := args[0].(interpreter.InterpretableConstructor); ok { + slotExprs := block.InitVals() + return newDynamicBlock(slotExprs, expr), nil + } + // Constant valued block which can happen during runtime optimization. + if cons, ok := args[0].(interpreter.InterpretableConst); ok { + if cons.Value().Type() == types.ListType { + l := cons.Value().(traits.Lister) + if l.Size().Equal(types.IntZero) == types.True { + return args[1], nil + } + return newConstantBlock(l, expr), nil + } + } + return nil, errors.New("cel.@block expects a list constructor as the first argument") + default: + return i, nil + } + } + return []cel.ProgramOption{cel.CustomDecorator(celBlockPlan)} + } + return []cel.ProgramOption{} +} + +type blockValidationExemption struct{} + +// Name returns the name of the validator. +func (blockValidationExemption) Name() string { + return "cel.lib.ext.validate.functions.cel.block" +} + +// Configure implements the ASTValidatorConfigurer interface and augments the list of functions to skip +// during homogeneous aggregate literal type-checks. +func (blockValidationExemption) Configure(config cel.MutableValidatorConfig) error { + functions := config.GetOrDefault(cel.HomogeneousAggregateLiteralExemptFunctions, []string{}).([]string) + functions = append(functions, "cel.@block") + return config.Set(cel.HomogeneousAggregateLiteralExemptFunctions, functions) +} + +// Validate is a no-op as the intent is to simply disable strong type-checks for list literals during +// when they occur within cel.@block calls as the arg types have already been validated. +func (blockValidationExemption) Validate(env *cel.Env, _ cel.ValidatorConfig, a *ast.AST, iss *cel.Issues) { +} + +func celBind(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + if !macroTargetMatchesNamespace(celNamespace, target) { + return nil, nil + } + varIdent := args[0] + varName := "" + switch varIdent.Kind() { + case ast.IdentKind: + varName = varIdent.AsIdent() + default: + return nil, mef.NewError(varIdent.ID(), "cel.bind() variable names must be simple identifiers") + } + varInit := args[1] + resultExpr := args[2] + return mef.NewComprehension( + mef.NewList(), + unusedIterVar, + varName, + varInit, + mef.NewLiteral(types.False), + mef.NewIdent(varName), + resultExpr, + ), nil +} + +func newDynamicBlock(slotExprs []interpreter.Interpretable, expr interpreter.Interpretable) interpreter.Interpretable { + bs := &dynamicBlock{ + slotExprs: slotExprs, + expr: expr, + } + bs.slotActivationPool = &sync.Pool{ + New: func() any { + slotCount := len(slotExprs) + sa := &dynamicSlotActivation{ + slotExprs: slotExprs, + slotCount: slotCount, + slotVals: make([]*slotVal, slotCount), + } + for i := 0; i < slotCount; i++ { + sa.slotVals[i] = &slotVal{} + } + return sa + }, + } + return bs +} + +type dynamicBlock struct { + slotExprs []interpreter.Interpretable + expr interpreter.Interpretable + slotActivationPool *sync.Pool +} + +// ID implements the Interpretable interface method. +func (b *dynamicBlock) ID() int64 { + return b.expr.ID() +} + +// Eval implements the Interpretable interface method. +func (b *dynamicBlock) Eval(activation interpreter.Activation) ref.Val { + sa := b.slotActivationPool.Get().(*dynamicSlotActivation) + sa.Activation = activation + defer b.clearSlots(sa) + return b.expr.Eval(sa) +} + +func (b *dynamicBlock) clearSlots(sa *dynamicSlotActivation) { + sa.reset() + b.slotActivationPool.Put(sa) +} + +type slotVal struct { + value *ref.Val + visited bool +} + +type dynamicSlotActivation struct { + interpreter.Activation + slotExprs []interpreter.Interpretable + slotCount int + slotVals []*slotVal +} + +// ResolveName implements the Activation interface method but handles variables prefixed with `@index` +// as special variables which exist within the slot-based memory of the cel.@block() where each slot +// refers to an expression which must be computed only once. +func (sa *dynamicSlotActivation) ResolveName(name string) (any, bool) { + if idx, found := matchSlot(name, sa.slotCount); found { + v := sa.slotVals[idx] + if v.visited { + // Return not found if the index expression refers to itself + if v.value == nil { + return nil, false + } + return *v.value, true + } + v.visited = true + val := sa.slotExprs[idx].Eval(sa) + v.value = &val + return val, true + } + return sa.Activation.ResolveName(name) +} + +func (sa *dynamicSlotActivation) reset() { + sa.Activation = nil + for _, sv := range sa.slotVals { + sv.visited = false + sv.value = nil + } +} + +func newConstantBlock(slots traits.Lister, expr interpreter.Interpretable) interpreter.Interpretable { + count := slots.Size().(types.Int) + return &constantBlock{slots: slots, slotCount: int(count), expr: expr} +} + +type constantBlock struct { + slots traits.Lister + slotCount int + expr interpreter.Interpretable +} + +// ID implements the interpreter.Interpretable interface method. +func (b *constantBlock) ID() int64 { + return b.expr.ID() +} + +// Eval implements the interpreter.Interpretable interface method, and will proxy @index prefixed variable +// lookups into a set of constant slots determined from the plan step. +func (b *constantBlock) Eval(activation interpreter.Activation) ref.Val { + vars := constantSlotActivation{Activation: activation, slots: b.slots, slotCount: b.slotCount} + return b.expr.Eval(vars) +} + +type constantSlotActivation struct { + interpreter.Activation + slots traits.Lister + slotCount int +} + +// ResolveName implements Activation interface method and proxies @index prefixed lookups into the slot +// activation associated with the block scope. +func (sa constantSlotActivation) ResolveName(name string) (any, bool) { + if idx, found := matchSlot(name, sa.slotCount); found { + return sa.slots.Get(types.Int(idx)), true + } + return sa.Activation.ResolveName(name) +} + +func matchSlot(name string, slotCount int) (int, bool) { + if idx, found := strings.CutPrefix(name, indexPrefix); found { + idx, err := strconv.Atoi(idx) + // Return not found if the index is not numeric + if err != nil { + return -1, false + } + // Return not found if the index is not a valid slot + if idx < 0 || idx >= slotCount { + return -1, false + } + return idx, true + } + return -1, false +} + +var ( + indexPrefix = "@index" +) diff --git a/hack/tools/vendor/github.com/google/cel-go/ext/comprehensions.go b/hack/tools/vendor/github.com/google/cel-go/ext/comprehensions.go new file mode 100644 index 0000000000..1428558d82 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/ext/comprehensions.go @@ -0,0 +1,410 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "fmt" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + "github.com/google/cel-go/parser" +) + +const ( + mapInsert = "cel.@mapInsert" + mapInsertOverloadMap = "@mapInsert_map_map" + mapInsertOverloadKeyValue = "@mapInsert_map_key_value" +) + +// TwoVarComprehensions introduces support for two-variable comprehensions. +// +// The two-variable form of comprehensions looks similar to the one-variable counterparts. +// Where possible, the same macro names were used and additional macro signatures added. +// The notable distinction for two-variable comprehensions is the introduction of +// `transformList`, `transformMap`, and `transformMapEntry` support for list and map types +// rather than the more traditional `map` and `filter` macros. +// +// # All +// +// Comprehension which tests whether all elements in the list or map satisfy a given +// predicate. The `all` macro evaluates in a manner consistent with logical AND and will +// short-circuit when encountering a `false` value. +// +// .all(indexVar, valueVar, ) -> bool +// .all(keyVar, valueVar, ) -> bool +// +// Examples: +// +// [1, 2, 3].all(i, j, i < j) // returns true +// {'hello': 'world', 'taco': 'taco'}.all(k, v, k != v) // returns false +// +// // Combines two-variable comprehension with single variable +// {'h': ['hello', 'hi'], 'j': ['joke', 'jog']} +// .all(k, vals, vals.all(v, v.startsWith(k))) // returns true +// +// # Exists +// +// Comprehension which tests whether any element in a list or map exists which satisfies +// a given predicate. The `exists` macro evaluates in a manner consistent with logical OR +// and will short-circuit when encountering a `true` value. +// +// .exists(indexVar, valueVar, ) -> bool +// .exists(keyVar, valueVar, ) -> bool +// +// Examples: +// +// {'greeting': 'hello', 'farewell': 'goodbye'} +// .exists(k, v, k.startsWith('good') || v.endsWith('bye')) // returns true +// [1, 2, 4, 8, 16].exists(i, v, v == 1024 && i == 10) // returns false +// +// # ExistsOne +// +// Comprehension which tests whether exactly one element in a list or map exists which +// satisfies a given predicate expression. This comprehension does not short-circuit in +// keeping with the one-variable exists one macro semantics. +// +// .existsOne(indexVar, valueVar, ) +// .existsOne(keyVar, valueVar, ) +// +// This macro may also be used with the `exists_one` function name, for compatibility +// with the one-variable macro of the same name. +// +// Examples: +// +// [1, 2, 1, 3, 1, 4].existsOne(i, v, i == 1 || v == 1) // returns false +// [1, 1, 2, 2, 3, 3].existsOne(i, v, i == 2 && v == 2) // returns true +// {'i': 0, 'j': 1, 'k': 2}.existsOne(i, v, i == 'l' || v == 1) // returns true +// +// # TransformList +// +// Comprehension which converts a map or a list into a list value. The output expression +// of the comprehension determines the contents of the output list. Elements in the list +// may optionally be filtered according to a predicate expression, where elements that +// satisfy the predicate are transformed. +// +// .transformList(indexVar, valueVar, ) +// .transformList(indexVar, valueVar, , ) +// .transformList(keyVar, valueVar, ) +// .transformList(keyVar, valueVar, , ) +// +// Examples: +// +// [1, 2, 3].transformList(indexVar, valueVar, +// (indexVar * valueVar) + valueVar) // returns [1, 4, 9] +// [1, 2, 3].transformList(indexVar, valueVar, indexVar % 2 == 0 +// (indexVar * valueVar) + valueVar) // returns [1, 9] +// {'greeting': 'hello', 'farewell': 'goodbye'} +// .transformList(k, _, k) // returns ['greeting', 'farewell'] +// {'greeting': 'hello', 'farewell': 'goodbye'} +// .transformList(_, v, v) // returns ['hello', 'goodbye'] +// +// # TransformMap +// +// Comprehension which converts a map or a list into a map value. The output expression +// of the comprehension determines the value of the output map entry; however, the key +// remains fixed. Elements in the map may optionally be filtered according to a predicate +// expression, where elements that satisfy the predicate are transformed. +// +// .transformMap(indexVar, valueVar, ) +// .transformMap(indexVar, valueVar, , ) +// .transformMap(keyVar, valueVar, ) +// .transformMap(keyVar, valueVar, , ) +// +// Examples: +// +// [1, 2, 3].transformMap(indexVar, valueVar, +// (indexVar * valueVar) + valueVar) // returns {0: 1, 1: 4, 2: 9} +// [1, 2, 3].transformMap(indexVar, valueVar, indexVar % 2 == 0 +// (indexVar * valueVar) + valueVar) // returns {0: 1, 2: 9} +// {'greeting': 'hello'}.transformMap(k, v, v + '!') // returns {'greeting': 'hello!'} +// +// # TransformMapEntry +// +// Comprehension which converts a map or a list into a map value; however, this transform +// expects the entry expression be a map literal. If the tranform produces an entry which +// duplicates a key in the target map, the comprehension will error. Note, that key +// equality is determined using CEL equality which asserts that numeric values which are +// equal, even if they don't have the same type will cause a key collision. +// +// Elements in the map may optionally be filtered according to a predicate expression, where +// elements that satisfy the predicate are transformed. +// +// .transformMap(indexVar, valueVar, ) +// .transformMap(indexVar, valueVar, , ) +// .transformMap(keyVar, valueVar, ) +// .transformMap(keyVar, valueVar, , ) +// +// Examples: +// +// // returns {'hello': 'greeting'} +// {'greeting': 'hello'}.transformMapEntry(keyVar, valueVar, {valueVar: keyVar}) +// // reverse lookup, require all values in list be unique +// [1, 2, 3].transformMapEntry(indexVar, valueVar, {valueVar: indexVar}) +// +// {'greeting': 'aloha', 'farewell': 'aloha'} +// .transformMapEntry(keyVar, valueVar, {valueVar: keyVar}) // error, duplicate key +func TwoVarComprehensions() cel.EnvOption { + return cel.Lib(compreV2Lib{}) +} + +type compreV2Lib struct{} + +// LibraryName implements that SingletonLibrary interface method. +func (compreV2Lib) LibraryName() string { + return "cel.lib.ext.comprev2" +} + +// CompileOptions implements the cel.Library interface method. +func (compreV2Lib) CompileOptions() []cel.EnvOption { + kType := cel.TypeParamType("K") + vType := cel.TypeParamType("V") + mapKVType := cel.MapType(kType, vType) + opts := []cel.EnvOption{ + cel.Macros( + cel.ReceiverMacro("all", 3, quantifierAll), + cel.ReceiverMacro("exists", 3, quantifierExists), + cel.ReceiverMacro("existsOne", 3, quantifierExistsOne), + cel.ReceiverMacro("exists_one", 3, quantifierExistsOne), + cel.ReceiverMacro("transformList", 3, transformList), + cel.ReceiverMacro("transformList", 4, transformList), + cel.ReceiverMacro("transformMap", 3, transformMap), + cel.ReceiverMacro("transformMap", 4, transformMap), + cel.ReceiverMacro("transformMapEntry", 3, transformMapEntry), + cel.ReceiverMacro("transformMapEntry", 4, transformMapEntry), + ), + cel.Function(mapInsert, + cel.Overload(mapInsertOverloadKeyValue, []*cel.Type{mapKVType, kType, vType}, mapKVType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + m := args[0].(traits.Mapper) + k := args[1] + v := args[2] + return types.InsertMapKeyValue(m, k, v) + })), + cel.Overload(mapInsertOverloadMap, []*cel.Type{mapKVType, mapKVType}, mapKVType, + cel.BinaryBinding(func(targetMap, updateMap ref.Val) ref.Val { + tm := targetMap.(traits.Mapper) + um := updateMap.(traits.Mapper) + umIt := um.Iterator() + for umIt.HasNext() == types.True { + k := umIt.Next() + updateOrErr := types.InsertMapKeyValue(tm, k, um.Get(k)) + if types.IsError(updateOrErr) { + return updateOrErr + } + tm = updateOrErr.(traits.Mapper) + } + return tm + })), + ), + } + return opts +} + +// ProgramOptions implements the cel.Library interface method +func (compreV2Lib) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func quantifierAll(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + iterVar1, iterVar2, err := extractIterVars(mef, args[0], args[1]) + if err != nil { + return nil, err + } + + return mef.NewComprehensionTwoVar( + target, + iterVar1, + iterVar2, + parser.AccumulatorName, + /*accuInit=*/ mef.NewLiteral(types.True), + /*condition=*/ mef.NewCall(operators.NotStrictlyFalse, mef.NewAccuIdent()), + /*step=*/ mef.NewCall(operators.LogicalAnd, mef.NewAccuIdent(), args[2]), + /*result=*/ mef.NewAccuIdent(), + ), nil +} + +func quantifierExists(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + iterVar1, iterVar2, err := extractIterVars(mef, args[0], args[1]) + if err != nil { + return nil, err + } + + return mef.NewComprehensionTwoVar( + target, + iterVar1, + iterVar2, + parser.AccumulatorName, + /*accuInit=*/ mef.NewLiteral(types.False), + /*condition=*/ mef.NewCall(operators.NotStrictlyFalse, mef.NewCall(operators.LogicalNot, mef.NewAccuIdent())), + /*step=*/ mef.NewCall(operators.LogicalOr, mef.NewAccuIdent(), args[2]), + /*result=*/ mef.NewAccuIdent(), + ), nil +} + +func quantifierExistsOne(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + iterVar1, iterVar2, err := extractIterVars(mef, args[0], args[1]) + if err != nil { + return nil, err + } + + return mef.NewComprehensionTwoVar( + target, + iterVar1, + iterVar2, + parser.AccumulatorName, + /*accuInit=*/ mef.NewLiteral(types.Int(0)), + /*condition=*/ mef.NewLiteral(types.True), + /*step=*/ mef.NewCall(operators.Conditional, args[2], + mef.NewCall(operators.Add, mef.NewAccuIdent(), mef.NewLiteral(types.Int(1))), + mef.NewAccuIdent()), + /*result=*/ mef.NewCall(operators.Equals, mef.NewAccuIdent(), mef.NewLiteral(types.Int(1))), + ), nil +} + +func transformList(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + iterVar1, iterVar2, err := extractIterVars(mef, args[0], args[1]) + if err != nil { + return nil, err + } + + var transform ast.Expr + var filter ast.Expr + if len(args) == 4 { + filter = args[2] + transform = args[3] + } else { + filter = nil + transform = args[2] + } + + // __result__ = __result__ + [transform] + step := mef.NewCall(operators.Add, mef.NewAccuIdent(), mef.NewList(transform)) + if filter != nil { + // __result__ = (filter) ? __result__ + [transform] : __result__ + step = mef.NewCall(operators.Conditional, filter, step, mef.NewAccuIdent()) + } + + return mef.NewComprehensionTwoVar( + target, + iterVar1, + iterVar2, + parser.AccumulatorName, + /*accuInit=*/ mef.NewList(), + /*condition=*/ mef.NewLiteral(types.True), + step, + /*result=*/ mef.NewAccuIdent(), + ), nil +} + +func transformMap(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + iterVar1, iterVar2, err := extractIterVars(mef, args[0], args[1]) + if err != nil { + return nil, err + } + + var transform ast.Expr + var filter ast.Expr + if len(args) == 4 { + filter = args[2] + transform = args[3] + } else { + filter = nil + transform = args[2] + } + + // __result__ = cel.@mapInsert(__result__, iterVar1, transform) + step := mef.NewCall(mapInsert, mef.NewAccuIdent(), mef.NewIdent(iterVar1), transform) + if filter != nil { + // __result__ = (filter) ? cel.@mapInsert(__result__, iterVar1, transform) : __result__ + step = mef.NewCall(operators.Conditional, filter, step, mef.NewAccuIdent()) + } + return mef.NewComprehensionTwoVar( + target, + iterVar1, + iterVar2, + parser.AccumulatorName, + /*accuInit=*/ mef.NewMap(), + /*condition=*/ mef.NewLiteral(types.True), + step, + /*result=*/ mef.NewAccuIdent(), + ), nil +} + +func transformMapEntry(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + iterVar1, iterVar2, err := extractIterVars(mef, args[0], args[1]) + if err != nil { + return nil, err + } + + var transform ast.Expr + var filter ast.Expr + if len(args) == 4 { + filter = args[2] + transform = args[3] + } else { + filter = nil + transform = args[2] + } + + // __result__ = cel.@mapInsert(__result__, transform) + step := mef.NewCall(mapInsert, mef.NewAccuIdent(), transform) + if filter != nil { + // __result__ = (filter) ? cel.@mapInsert(__result__, transform) : __result__ + step = mef.NewCall(operators.Conditional, filter, step, mef.NewAccuIdent()) + } + return mef.NewComprehensionTwoVar( + target, + iterVar1, + iterVar2, + parser.AccumulatorName, + /*accuInit=*/ mef.NewMap(), + /*condition=*/ mef.NewLiteral(types.True), + step, + /*result=*/ mef.NewAccuIdent(), + ), nil +} + +func extractIterVars(mef cel.MacroExprFactory, arg0, arg1 ast.Expr) (string, string, *cel.Error) { + iterVar1, err := extractIterVar(mef, arg0) + if err != nil { + return "", "", err + } + iterVar2, err := extractIterVar(mef, arg1) + if err != nil { + return "", "", err + } + if iterVar1 == iterVar2 { + return "", "", mef.NewError(arg1.ID(), fmt.Sprintf("duplicate variable name: %s", iterVar1)) + } + if iterVar1 == parser.AccumulatorName { + return "", "", mef.NewError(arg0.ID(), "iteration variable overwrites accumulator variable") + } + if iterVar2 == parser.AccumulatorName { + return "", "", mef.NewError(arg1.ID(), "iteration variable overwrites accumulator variable") + } + return iterVar1, iterVar2, nil +} + +func extractIterVar(mef cel.MacroExprFactory, target ast.Expr) (string, *cel.Error) { + iterVar, found := extractIdent(target) + if !found { + return "", mef.NewError(target.ID(), "argument must be a simple name") + } + return iterVar, nil +} diff --git a/hack/tools/vendor/github.com/google/cel-go/ext/encoders.go b/hack/tools/vendor/github.com/google/cel-go/ext/encoders.go new file mode 100644 index 0000000000..ac04b1a7b0 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/ext/encoders.go @@ -0,0 +1,94 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "encoding/base64" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// Encoders returns a cel.EnvOption to configure extended functions for string, byte, and object +// encodings. +// +// # Base64.Decode +// +// Decodes base64-encoded string to bytes. +// +// This function will return an error if the string input is not base64-encoded. +// +// base64.decode() -> +// +// Examples: +// +// base64.decode('aGVsbG8=') // return b'hello' +// base64.decode('aGVsbG8') // return b'hello' +// +// # Base64.Encode +// +// Encodes bytes to a base64-encoded string. +// +// base64.encode() -> +// +// Examples: +// +// base64.encode(b'hello') // return b'aGVsbG8=' +func Encoders() cel.EnvOption { + return cel.Lib(encoderLib{}) +} + +type encoderLib struct{} + +func (encoderLib) LibraryName() string { + return "cel.lib.ext.encoders" +} + +func (encoderLib) CompileOptions() []cel.EnvOption { + return []cel.EnvOption{ + cel.Function("base64.decode", + cel.Overload("base64_decode_string", []*cel.Type{cel.StringType}, cel.BytesType, + cel.UnaryBinding(func(str ref.Val) ref.Val { + s := str.(types.String) + return bytesOrError(base64DecodeString(string(s))) + }))), + cel.Function("base64.encode", + cel.Overload("base64_encode_bytes", []*cel.Type{cel.BytesType}, cel.StringType, + cel.UnaryBinding(func(bytes ref.Val) ref.Val { + b := bytes.(types.Bytes) + return stringOrError(base64EncodeBytes([]byte(b))) + }))), + } +} + +func (encoderLib) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func base64DecodeString(str string) ([]byte, error) { + b, err := base64.StdEncoding.DecodeString(str) + if err == nil { + return b, nil + } + if _, tryAltEncoding := err.(base64.CorruptInputError); tryAltEncoding { + return base64.RawStdEncoding.DecodeString(str) + } + return nil, err +} + +func base64EncodeBytes(bytes []byte) (string, error) { + return base64.StdEncoding.EncodeToString(bytes), nil +} diff --git a/hack/tools/vendor/github.com/google/cel-go/ext/formatting.go b/hack/tools/vendor/github.com/google/cel-go/ext/formatting.go new file mode 100644 index 0000000000..dbff613b2a --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/ext/formatting.go @@ -0,0 +1,904 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "errors" + "fmt" + "math" + "sort" + "strconv" + "strings" + "unicode" + + "golang.org/x/text/language" + "golang.org/x/text/message" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +type clauseImpl func(ref.Val, string) (string, error) + +func clauseForType(argType ref.Type) (clauseImpl, error) { + switch argType { + case types.IntType, types.UintType: + return formatDecimal, nil + case types.StringType, types.BytesType, types.BoolType, types.NullType, types.TypeType: + return FormatString, nil + case types.TimestampType, types.DurationType: + // special case to ensure timestamps/durations get printed as CEL literals + return func(arg ref.Val, locale string) (string, error) { + argStrVal := arg.ConvertToType(types.StringType) + argStr := argStrVal.Value().(string) + if arg.Type() == types.TimestampType { + return fmt.Sprintf("timestamp(%q)", argStr), nil + } + if arg.Type() == types.DurationType { + return fmt.Sprintf("duration(%q)", argStr), nil + } + return "", fmt.Errorf("cannot convert argument of type %s to timestamp/duration", arg.Type().TypeName()) + }, nil + case types.ListType: + return formatList, nil + case types.MapType: + return formatMap, nil + case types.DoubleType: + // avoid formatFixed so we can output a period as the decimal separator in order + // to always be a valid CEL literal + return func(arg ref.Val, locale string) (string, error) { + argDouble, ok := arg.Value().(float64) + if !ok { + return "", fmt.Errorf("couldn't convert %s to float64", arg.Type().TypeName()) + } + fmtStr := fmt.Sprintf("%%.%df", defaultPrecision) + return fmt.Sprintf(fmtStr, argDouble), nil + }, nil + case types.TypeType: + return func(arg ref.Val, locale string) (string, error) { + return fmt.Sprintf("type(%s)", arg.Value().(string)), nil + }, nil + default: + return nil, fmt.Errorf("no formatting function for %s", argType.TypeName()) + } +} + +func formatList(arg ref.Val, locale string) (string, error) { + argList := arg.(traits.Lister) + argIterator := argList.Iterator() + var listStrBuilder strings.Builder + _, err := listStrBuilder.WriteRune('[') + if err != nil { + return "", fmt.Errorf("error writing to list string: %w", err) + } + for argIterator.HasNext() == types.True { + member := argIterator.Next() + memberFormat, err := clauseForType(member.Type()) + if err != nil { + return "", err + } + unquotedStr, err := memberFormat(member, locale) + if err != nil { + return "", err + } + str := quoteForCEL(member, unquotedStr) + _, err = listStrBuilder.WriteString(str) + if err != nil { + return "", fmt.Errorf("error writing to list string: %w", err) + } + if argIterator.HasNext() == types.True { + _, err = listStrBuilder.WriteString(", ") + if err != nil { + return "", fmt.Errorf("error writing to list string: %w", err) + } + } + } + _, err = listStrBuilder.WriteRune(']') + if err != nil { + return "", fmt.Errorf("error writing to list string: %w", err) + } + return listStrBuilder.String(), nil +} + +func formatMap(arg ref.Val, locale string) (string, error) { + argMap := arg.(traits.Mapper) + argIterator := argMap.Iterator() + type mapPair struct { + key string + value string + } + argPairs := make([]mapPair, argMap.Size().Value().(int64)) + i := 0 + for argIterator.HasNext() == types.True { + key := argIterator.Next() + var keyFormat clauseImpl + switch key.Type() { + case types.StringType, types.BoolType: + keyFormat = FormatString + case types.IntType, types.UintType: + keyFormat = formatDecimal + default: + return "", fmt.Errorf("no formatting function for map key of type %s", key.Type().TypeName()) + } + unquotedKeyStr, err := keyFormat(key, locale) + if err != nil { + return "", err + } + keyStr := quoteForCEL(key, unquotedKeyStr) + value, found := argMap.Find(key) + if !found { + return "", fmt.Errorf("could not find key: %q", key) + } + valueFormat, err := clauseForType(value.Type()) + if err != nil { + return "", err + } + unquotedValueStr, err := valueFormat(value, locale) + if err != nil { + return "", err + } + valueStr := quoteForCEL(value, unquotedValueStr) + argPairs[i] = mapPair{keyStr, valueStr} + i++ + } + sort.SliceStable(argPairs, func(x, y int) bool { + return argPairs[x].key < argPairs[y].key + }) + var mapStrBuilder strings.Builder + _, err := mapStrBuilder.WriteRune('{') + if err != nil { + return "", fmt.Errorf("error writing to map string: %w", err) + } + for i, entry := range argPairs { + _, err = mapStrBuilder.WriteString(fmt.Sprintf("%s:%s", entry.key, entry.value)) + if err != nil { + return "", fmt.Errorf("error writing to map string: %w", err) + } + if i < len(argPairs)-1 { + _, err = mapStrBuilder.WriteString(", ") + if err != nil { + return "", fmt.Errorf("error writing to map string: %w", err) + } + } + } + _, err = mapStrBuilder.WriteRune('}') + if err != nil { + return "", fmt.Errorf("error writing to map string: %w", err) + } + return mapStrBuilder.String(), nil +} + +// quoteForCEL takes a formatted, unquoted value and quotes it in a manner suitable +// for embedding directly in CEL. +func quoteForCEL(refVal ref.Val, unquotedValue string) string { + switch refVal.Type() { + case types.StringType: + return fmt.Sprintf("%q", unquotedValue) + case types.BytesType: + return fmt.Sprintf("b%q", unquotedValue) + case types.DoubleType: + // special case to handle infinity/NaN + num := refVal.Value().(float64) + if math.IsInf(num, 1) || math.IsInf(num, -1) || math.IsNaN(num) { + return fmt.Sprintf("%q", unquotedValue) + } + return unquotedValue + default: + return unquotedValue + } +} + +// FormatString returns the string representation of a CEL value. +// +// It is used to implement the %s specifier in the (string).format() extension function. +func FormatString(arg ref.Val, locale string) (string, error) { + switch arg.Type() { + case types.ListType: + return formatList(arg, locale) + case types.MapType: + return formatMap(arg, locale) + case types.IntType, types.UintType, types.DoubleType, + types.BoolType, types.StringType, types.TimestampType, types.BytesType, types.DurationType, types.TypeType: + argStrVal := arg.ConvertToType(types.StringType) + argStr, ok := argStrVal.Value().(string) + if !ok { + return "", fmt.Errorf("could not convert argument %q to string", argStrVal) + } + return argStr, nil + case types.NullType: + return "null", nil + default: + return "", stringFormatError(runtimeID, arg.Type().TypeName()) + } +} + +func formatDecimal(arg ref.Val, locale string) (string, error) { + switch arg.Type() { + case types.IntType: + argInt, ok := arg.ConvertToType(types.IntType).Value().(int64) + if !ok { + return "", fmt.Errorf("could not convert \"%s\" to int64", arg.Value()) + } + return fmt.Sprintf("%d", argInt), nil + case types.UintType: + argInt, ok := arg.ConvertToType(types.UintType).Value().(uint64) + if !ok { + return "", fmt.Errorf("could not convert \"%s\" to uint64", arg.Value()) + } + return fmt.Sprintf("%d", argInt), nil + default: + return "", decimalFormatError(runtimeID, arg.Type().TypeName()) + } +} + +func matchLanguage(locale string) (language.Tag, error) { + matcher, err := makeMatcher(locale) + if err != nil { + return language.Und, err + } + tag, _ := language.MatchStrings(matcher, locale) + return tag, nil +} + +func makeMatcher(locale string) (language.Matcher, error) { + tags := make([]language.Tag, 0) + tag, err := language.Parse(locale) + if err != nil { + return nil, err + } + tags = append(tags, tag) + return language.NewMatcher(tags), nil +} + +type stringFormatter struct{} + +func (c *stringFormatter) String(arg ref.Val, locale string) (string, error) { + return FormatString(arg, locale) +} + +func (c *stringFormatter) Decimal(arg ref.Val, locale string) (string, error) { + return formatDecimal(arg, locale) +} + +func (c *stringFormatter) Fixed(precision *int) func(ref.Val, string) (string, error) { + if precision == nil { + precision = new(int) + *precision = defaultPrecision + } + return func(arg ref.Val, locale string) (string, error) { + strException := false + if arg.Type() == types.StringType { + argStr := arg.Value().(string) + if argStr == "NaN" || argStr == "Infinity" || argStr == "-Infinity" { + strException = true + } + } + if arg.Type() != types.DoubleType && !strException { + return "", fixedPointFormatError(runtimeID, arg.Type().TypeName()) + } + argFloatVal := arg.ConvertToType(types.DoubleType) + argFloat, ok := argFloatVal.Value().(float64) + if !ok { + return "", fmt.Errorf("could not convert \"%s\" to float64", argFloatVal.Value()) + } + fmtStr := fmt.Sprintf("%%.%df", *precision) + + matchedLocale, err := matchLanguage(locale) + if err != nil { + return "", fmt.Errorf("error matching locale: %w", err) + } + return message.NewPrinter(matchedLocale).Sprintf(fmtStr, argFloat), nil + } +} + +func (c *stringFormatter) Scientific(precision *int) func(ref.Val, string) (string, error) { + if precision == nil { + precision = new(int) + *precision = defaultPrecision + } + return func(arg ref.Val, locale string) (string, error) { + strException := false + if arg.Type() == types.StringType { + argStr := arg.Value().(string) + if argStr == "NaN" || argStr == "Infinity" || argStr == "-Infinity" { + strException = true + } + } + if arg.Type() != types.DoubleType && !strException { + return "", scientificFormatError(runtimeID, arg.Type().TypeName()) + } + argFloatVal := arg.ConvertToType(types.DoubleType) + argFloat, ok := argFloatVal.Value().(float64) + if !ok { + return "", fmt.Errorf("could not convert \"%v\" to float64", argFloatVal.Value()) + } + matchedLocale, err := matchLanguage(locale) + if err != nil { + return "", fmt.Errorf("error matching locale: %w", err) + } + fmtStr := fmt.Sprintf("%%%de", *precision) + return message.NewPrinter(matchedLocale).Sprintf(fmtStr, argFloat), nil + } +} + +func (c *stringFormatter) Binary(arg ref.Val, locale string) (string, error) { + switch arg.Type() { + case types.IntType: + argInt := arg.Value().(int64) + // locale is intentionally unused as integers formatted as binary + // strings are locale-independent + return fmt.Sprintf("%b", argInt), nil + case types.UintType: + argInt := arg.Value().(uint64) + return fmt.Sprintf("%b", argInt), nil + case types.BoolType: + argBool := arg.Value().(bool) + if argBool { + return "1", nil + } + return "0", nil + default: + return "", binaryFormatError(runtimeID, arg.Type().TypeName()) + } +} + +func (c *stringFormatter) Hex(useUpper bool) func(ref.Val, string) (string, error) { + return func(arg ref.Val, locale string) (string, error) { + fmtStr := "%x" + if useUpper { + fmtStr = "%X" + } + switch arg.Type() { + case types.StringType, types.BytesType: + if arg.Type() == types.BytesType { + return fmt.Sprintf(fmtStr, arg.Value().([]byte)), nil + } + return fmt.Sprintf(fmtStr, arg.Value().(string)), nil + case types.IntType: + argInt, ok := arg.Value().(int64) + if !ok { + return "", fmt.Errorf("could not convert \"%s\" to int64", arg.Value()) + } + return fmt.Sprintf(fmtStr, argInt), nil + case types.UintType: + argInt, ok := arg.Value().(uint64) + if !ok { + return "", fmt.Errorf("could not convert \"%s\" to uint64", arg.Value()) + } + return fmt.Sprintf(fmtStr, argInt), nil + default: + return "", hexFormatError(runtimeID, arg.Type().TypeName()) + } + } +} + +func (c *stringFormatter) Octal(arg ref.Val, locale string) (string, error) { + switch arg.Type() { + case types.IntType: + argInt := arg.Value().(int64) + return fmt.Sprintf("%o", argInt), nil + case types.UintType: + argInt := arg.Value().(uint64) + return fmt.Sprintf("%o", argInt), nil + default: + return "", octalFormatError(runtimeID, arg.Type().TypeName()) + } +} + +// stringFormatValidator implements the cel.ASTValidator interface allowing for static validation +// of string.format calls. +type stringFormatValidator struct{} + +// Name returns the name of the validator. +func (stringFormatValidator) Name() string { + return "cel.lib.ext.validate.functions.string.format" +} + +// Configure implements the ASTValidatorConfigurer interface and augments the list of functions to skip +// during homogeneous aggregate literal type-checks. +func (stringFormatValidator) Configure(config cel.MutableValidatorConfig) error { + functions := config.GetOrDefault(cel.HomogeneousAggregateLiteralExemptFunctions, []string{}).([]string) + functions = append(functions, "format") + return config.Set(cel.HomogeneousAggregateLiteralExemptFunctions, functions) +} + +// Validate parses all literal format strings and type checks the format clause against the argument +// at the corresponding ordinal within the list literal argument to the function, if one is specified. +func (stringFormatValidator) Validate(env *cel.Env, _ cel.ValidatorConfig, a *ast.AST, iss *cel.Issues) { + root := ast.NavigateAST(a) + formatCallExprs := ast.MatchDescendants(root, matchConstantFormatStringWithListLiteralArgs(a)) + for _, e := range formatCallExprs { + call := e.AsCall() + formatStr := call.Target().AsLiteral().Value().(string) + args := call.Args()[0].AsList().Elements() + formatCheck := &stringFormatChecker{ + args: args, + ast: a, + } + // use a placeholder locale, since locale doesn't affect syntax + _, err := parseFormatString(formatStr, formatCheck, formatCheck, "en_US") + if err != nil { + iss.ReportErrorAtID(getErrorExprID(e.ID(), err), err.Error()) + continue + } + seenArgs := formatCheck.argsRequested + if len(args) > seenArgs { + iss.ReportErrorAtID(e.ID(), + "too many arguments supplied to string.format (expected %d, got %d)", seenArgs, len(args)) + } + } +} + +// getErrorExprID determines which list literal argument triggered a type-disagreement for the +// purposes of more accurate error message reports. +func getErrorExprID(id int64, err error) int64 { + fmtErr, ok := err.(formatError) + if ok { + return fmtErr.id + } + wrapped := errors.Unwrap(err) + if wrapped != nil { + return getErrorExprID(id, wrapped) + } + return id +} + +// matchConstantFormatStringWithListLiteralArgs matches all valid expression nodes for string +// format checking. +func matchConstantFormatStringWithListLiteralArgs(a *ast.AST) ast.ExprMatcher { + return func(e ast.NavigableExpr) bool { + if e.Kind() != ast.CallKind { + return false + } + call := e.AsCall() + if !call.IsMemberFunction() || call.FunctionName() != "format" { + return false + } + overloadIDs := a.GetOverloadIDs(e.ID()) + if len(overloadIDs) != 0 { + found := false + for _, overload := range overloadIDs { + if overload == overloads.ExtFormatString { + found = true + break + } + } + if !found { + return false + } + } + formatString := call.Target() + if formatString.Kind() != ast.LiteralKind || formatString.AsLiteral().Type() != cel.StringType { + return false + } + args := call.Args() + if len(args) != 1 { + return false + } + formatArgs := args[0] + return formatArgs.Kind() == ast.ListKind + } +} + +// stringFormatChecker implements the formatStringInterpolater interface +type stringFormatChecker struct { + args []ast.Expr + argsRequested int + currArgIndex int64 + ast *ast.AST +} + +func (c *stringFormatChecker) String(arg ref.Val, locale string) (string, error) { + formatArg := c.args[c.currArgIndex] + valid, badID := c.verifyString(formatArg) + if !valid { + return "", stringFormatError(badID, c.typeOf(badID).TypeName()) + } + return "", nil +} + +func (c *stringFormatChecker) Decimal(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + valid := c.verifyTypeOneOf(id, types.IntType, types.UintType) + if !valid { + return "", decimalFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil +} + +func (c *stringFormatChecker) Fixed(precision *int) func(ref.Val, string) (string, error) { + return func(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + // we allow StringType since "NaN", "Infinity", and "-Infinity" are also valid values + valid := c.verifyTypeOneOf(id, types.DoubleType, types.StringType) + if !valid { + return "", fixedPointFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil + } +} + +func (c *stringFormatChecker) Scientific(precision *int) func(ref.Val, string) (string, error) { + return func(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + valid := c.verifyTypeOneOf(id, types.DoubleType, types.StringType) + if !valid { + return "", scientificFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil + } +} + +func (c *stringFormatChecker) Binary(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + valid := c.verifyTypeOneOf(id, types.IntType, types.UintType, types.BoolType) + if !valid { + return "", binaryFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil +} + +func (c *stringFormatChecker) Hex(useUpper bool) func(ref.Val, string) (string, error) { + return func(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + valid := c.verifyTypeOneOf(id, types.IntType, types.UintType, types.StringType, types.BytesType) + if !valid { + return "", hexFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil + } +} + +func (c *stringFormatChecker) Octal(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + valid := c.verifyTypeOneOf(id, types.IntType, types.UintType) + if !valid { + return "", octalFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil +} + +func (c *stringFormatChecker) Arg(index int64) (ref.Val, error) { + c.argsRequested++ + c.currArgIndex = index + // return a dummy value - this is immediately passed to back to us + // through one of the FormatCallback functions, so anything will do + return types.Int(0), nil +} + +func (c *stringFormatChecker) Size() int64 { + return int64(len(c.args)) +} + +func (c *stringFormatChecker) typeOf(id int64) *cel.Type { + return c.ast.GetType(id) +} + +func (c *stringFormatChecker) verifyTypeOneOf(id int64, validTypes ...*cel.Type) bool { + t := c.typeOf(id) + if t == cel.DynType { + return true + } + for _, vt := range validTypes { + // Only check runtime type compatibility without delving deeper into parameterized types + if t.Kind() == vt.Kind() { + return true + } + } + return false +} + +func (c *stringFormatChecker) verifyString(sub ast.Expr) (bool, int64) { + paramA := cel.TypeParamType("A") + paramB := cel.TypeParamType("B") + subVerified := c.verifyTypeOneOf(sub.ID(), + cel.ListType(paramA), cel.MapType(paramA, paramB), + cel.IntType, cel.UintType, cel.DoubleType, cel.BoolType, cel.StringType, + cel.TimestampType, cel.BytesType, cel.DurationType, cel.TypeType, cel.NullType) + if !subVerified { + return false, sub.ID() + } + switch sub.Kind() { + case ast.ListKind: + for _, e := range sub.AsList().Elements() { + // recursively verify if we're dealing with a list/map + verified, id := c.verifyString(e) + if !verified { + return false, id + } + } + return true, sub.ID() + case ast.MapKind: + for _, e := range sub.AsMap().Entries() { + // recursively verify if we're dealing with a list/map + entry := e.AsMapEntry() + verified, id := c.verifyString(entry.Key()) + if !verified { + return false, id + } + verified, id = c.verifyString(entry.Value()) + if !verified { + return false, id + } + } + return true, sub.ID() + default: + return true, sub.ID() + } +} + +// helper routines for reporting common errors during string formatting static validation and +// runtime execution. + +func binaryFormatError(id int64, badType string) error { + return newFormatError(id, "only integers and bools can be formatted as binary, was given %s", badType) +} + +func decimalFormatError(id int64, badType string) error { + return newFormatError(id, "decimal clause can only be used on integers, was given %s", badType) +} + +func fixedPointFormatError(id int64, badType string) error { + return newFormatError(id, "fixed-point clause can only be used on doubles, was given %s", badType) +} + +func hexFormatError(id int64, badType string) error { + return newFormatError(id, "only integers, byte buffers, and strings can be formatted as hex, was given %s", badType) +} + +func octalFormatError(id int64, badType string) error { + return newFormatError(id, "octal clause can only be used on integers, was given %s", badType) +} + +func scientificFormatError(id int64, badType string) error { + return newFormatError(id, "scientific clause can only be used on doubles, was given %s", badType) +} + +func stringFormatError(id int64, badType string) error { + return newFormatError(id, "string clause can only be used on strings, bools, bytes, ints, doubles, maps, lists, types, durations, and timestamps, was given %s", badType) +} + +type formatError struct { + id int64 + msg string +} + +func newFormatError(id int64, msg string, args ...any) error { + return formatError{ + id: id, + msg: fmt.Sprintf(msg, args...), + } +} + +func (e formatError) Error() string { + return e.msg +} + +func (e formatError) Is(target error) bool { + return e.msg == target.Error() +} + +// stringArgList implements the formatListArgs interface. +type stringArgList struct { + args traits.Lister +} + +func (c *stringArgList) Arg(index int64) (ref.Val, error) { + if index >= c.args.Size().Value().(int64) { + return nil, fmt.Errorf("index %d out of range", index) + } + return c.args.Get(types.Int(index)), nil +} + +func (c *stringArgList) Size() int64 { + return c.args.Size().Value().(int64) +} + +// formatStringInterpolator is an interface that allows user-defined behavior +// for formatting clause implementations, as well as argument retrieval. +// Each function is expected to support the appropriate types as laid out in +// the string.format documentation, and to return an error if given an inappropriate type. +type formatStringInterpolator interface { + // String takes a ref.Val and a string representing the current locale identifier + // and returns the Val formatted as a string, or an error if one occurred. + String(ref.Val, string) (string, error) + + // Decimal takes a ref.Val and a string representing the current locale identifier + // and returns the Val formatted as a decimal integer, or an error if one occurred. + Decimal(ref.Val, string) (string, error) + + // Fixed takes an int pointer representing precision (or nil if none was given) and + // returns a function operating in a similar manner to String and Decimal, taking a + // ref.Val and locale and returning the appropriate string. A closure is returned + // so precision can be set without needing an additional function call/configuration. + Fixed(*int) func(ref.Val, string) (string, error) + + // Scientific functions identically to Fixed, except the string returned from the closure + // is expected to be in scientific notation. + Scientific(*int) func(ref.Val, string) (string, error) + + // Binary takes a ref.Val and a string representing the current locale identifier + // and returns the Val formatted as a binary integer, or an error if one occurred. + Binary(ref.Val, string) (string, error) + + // Hex takes a boolean that, if true, indicates the hex string output by the returned + // closure should use uppercase letters for A-F. + Hex(bool) func(ref.Val, string) (string, error) + + // Octal takes a ref.Val and a string representing the current locale identifier and + // returns the Val formatted in octal, or an error if one occurred. + Octal(ref.Val, string) (string, error) +} + +// formatListArgs is an interface that allows user-defined list-like datatypes to be used +// for formatting clause implementations. +type formatListArgs interface { + // Arg returns the ref.Val at the given index, or an error if one occurred. + Arg(int64) (ref.Val, error) + + // Size returns the length of the argument list. + Size() int64 +} + +// parseFormatString formats a string according to the string.format syntax, taking the clause implementations +// from the provided FormatCallback and the args from the given FormatList. +func parseFormatString(formatStr string, callback formatStringInterpolator, list formatListArgs, locale string) (string, error) { + i := 0 + argIndex := 0 + var builtStr strings.Builder + for i < len(formatStr) { + if formatStr[i] == '%' { + if i+1 < len(formatStr) && formatStr[i+1] == '%' { + err := builtStr.WriteByte('%') + if err != nil { + return "", fmt.Errorf("error writing format string: %w", err) + } + i += 2 + continue + } else { + argAny, err := list.Arg(int64(argIndex)) + if err != nil { + return "", err + } + if i+1 >= len(formatStr) { + return "", errors.New("unexpected end of string") + } + if int64(argIndex) >= list.Size() { + return "", fmt.Errorf("index %d out of range", argIndex) + } + numRead, val, refErr := parseAndFormatClause(formatStr[i:], argAny, callback, list, locale) + if refErr != nil { + return "", refErr + } + _, err = builtStr.WriteString(val) + if err != nil { + return "", fmt.Errorf("error writing format string: %w", err) + } + i += numRead + argIndex++ + } + } else { + err := builtStr.WriteByte(formatStr[i]) + if err != nil { + return "", fmt.Errorf("error writing format string: %w", err) + } + i++ + } + } + return builtStr.String(), nil +} + +// parseAndFormatClause parses the format clause at the start of the given string with val, and returns +// how many characters were consumed and the substituted string form of val, or an error if one occurred. +func parseAndFormatClause(formatStr string, val ref.Val, callback formatStringInterpolator, list formatListArgs, locale string) (int, string, error) { + i := 1 + read, formatter, err := parseFormattingClause(formatStr[i:], callback) + i += read + if err != nil { + return -1, "", newParseFormatError("could not parse formatting clause", err) + } + + valStr, err := formatter(val, locale) + if err != nil { + return -1, "", newParseFormatError("error during formatting", err) + } + return i, valStr, nil +} + +func parseFormattingClause(formatStr string, callback formatStringInterpolator) (int, clauseImpl, error) { + i := 0 + read, precision, err := parsePrecision(formatStr[i:]) + i += read + if err != nil { + return -1, nil, fmt.Errorf("error while parsing precision: %w", err) + } + r := rune(formatStr[i]) + i++ + switch r { + case 's': + return i, callback.String, nil + case 'd': + return i, callback.Decimal, nil + case 'f': + return i, callback.Fixed(precision), nil + case 'e': + return i, callback.Scientific(precision), nil + case 'b': + return i, callback.Binary, nil + case 'x', 'X': + return i, callback.Hex(unicode.IsUpper(r)), nil + case 'o': + return i, callback.Octal, nil + default: + return -1, nil, fmt.Errorf("unrecognized formatting clause \"%c\"", r) + } +} + +func parsePrecision(formatStr string) (int, *int, error) { + i := 0 + if formatStr[i] != '.' { + return i, nil, nil + } + i++ + var buffer strings.Builder + for { + if i >= len(formatStr) { + return -1, nil, errors.New("could not find end of precision specifier") + } + if !isASCIIDigit(rune(formatStr[i])) { + break + } + buffer.WriteByte(formatStr[i]) + i++ + } + precision, err := strconv.Atoi(buffer.String()) + if err != nil { + return -1, nil, fmt.Errorf("error while converting precision to integer: %w", err) + } + return i, &precision, nil +} + +func isASCIIDigit(r rune) bool { + return r <= unicode.MaxASCII && unicode.IsDigit(r) +} + +type parseFormatError struct { + msg string + wrapped error +} + +func newParseFormatError(msg string, wrapped error) error { + return parseFormatError{msg: msg, wrapped: wrapped} +} + +func (e parseFormatError) Error() string { + return fmt.Sprintf("%s: %s", e.msg, e.wrapped.Error()) +} + +func (e parseFormatError) Is(target error) bool { + return e.Error() == target.Error() +} + +func (e parseFormatError) Unwrap() error { + return e.wrapped +} + +const ( + runtimeID = int64(-1) +) diff --git a/hack/tools/vendor/github.com/google/cel-go/ext/guards.go b/hack/tools/vendor/github.com/google/cel-go/ext/guards.go new file mode 100644 index 0000000000..ccede289fb --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/ext/guards.go @@ -0,0 +1,67 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// function invocation guards for common call signatures within extension functions. + +func intOrError(i int64, err error) ref.Val { + if err != nil { + return types.NewErr(err.Error()) + } + return types.Int(i) +} + +func bytesOrError(bytes []byte, err error) ref.Val { + if err != nil { + return types.NewErr(err.Error()) + } + return types.Bytes(bytes) +} + +func stringOrError(str string, err error) ref.Val { + if err != nil { + return types.NewErr(err.Error()) + } + return types.String(str) +} + +func listStringOrError(strs []string, err error) ref.Val { + if err != nil { + return types.NewErr(err.Error()) + } + return types.DefaultTypeAdapter.NativeToValue(strs) +} + +func extractIdent(target ast.Expr) (string, bool) { + switch target.Kind() { + case ast.IdentKind: + return target.AsIdent(), true + default: + return "", false + } +} + +func macroTargetMatchesNamespace(ns string, target ast.Expr) bool { + if id, found := extractIdent(target); found { + return id == ns + } + return false +} diff --git a/hack/tools/vendor/github.com/google/cel-go/ext/lists.go b/hack/tools/vendor/github.com/google/cel-go/ext/lists.go new file mode 100644 index 0000000000..d0b90ea92e --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/ext/lists.go @@ -0,0 +1,560 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "fmt" + "math" + "sort" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/decls" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + "github.com/google/cel-go/parser" +) + +var comparableTypes = []*cel.Type{ + cel.IntType, + cel.UintType, + cel.DoubleType, + cel.BoolType, + cel.DurationType, + cel.TimestampType, + cel.StringType, + cel.BytesType, +} + +// Lists returns a cel.EnvOption to configure extended functions for list manipulation. +// As a general note, all indices are zero-based. +// +// # Distinct +// +// Introduced in version: 2 +// +// Returns the distinct elements of a list. +// +// .distinct() -> +// +// Examples: +// +// [1, 2, 2, 3, 3, 3].distinct() // return [1, 2, 3] +// ["b", "b", "c", "a", "c"].distinct() // return ["b", "c", "a"] +// [1, "b", 2, "b"].distinct() // return [1, "b", 2] +// +// # Range +// +// Introduced in version: 2 +// +// Returns a list of integers from 0 to n-1. +// +// lists.range() -> +// +// Examples: +// +// lists.range(5) -> [0, 1, 2, 3, 4] +// +// # Reverse +// +// Introduced in version: 2 +// +// Returns the elements of a list in reverse order. +// +// .reverse() -> +// +// Examples: +// +// [5, 3, 1, 2].reverse() // return [2, 1, 3, 5] +// +// # Slice +// +// Returns a new sub-list using the indexes provided. +// +// .slice(, ) -> +// +// Examples: +// +// [1,2,3,4].slice(1, 3) // return [2, 3] +// [1,2,3,4].slice(2, 4) // return [3 ,4] +// +// # Flatten +// +// Flattens a list recursively. +// If an optional depth is provided, the list is flattened to a the specificied level. +// A negative depth value will result in an error. +// +// .flatten() -> +// .flatten(, ) -> +// +// Examples: +// +// [1,[2,3],[4]].flatten() // return [1, 2, 3, 4] +// [1,[2,[3,4]]].flatten() // return [1, 2, [3, 4]] +// [1,2,[],[],[3,4]].flatten() // return [1, 2, 3, 4] +// [1,[2,[3,[4]]]].flatten(2) // return [1, 2, 3, [4]] +// [1,[2,[3,[4]]]].flatten(-1) // error +// +// # Sort +// +// Introduced in version: 2 +// +// Sorts a list with comparable elements. If the element type is not comparable +// or the element types are not the same, the function will produce an error. +// +// .sort() -> +// T in {int, uint, double, bool, duration, timestamp, string, bytes} +// +// Examples: +// +// [3, 2, 1].sort() // return [1, 2, 3] +// ["b", "c", "a"].sort() // return ["a", "b", "c"] +// [1, "b"].sort() // error +// [[1, 2, 3]].sort() // error +// +// # SortBy +// +// Sorts a list by a key value, i.e., the order is determined by the result of +// an expression applied to each element of the list. +// The output of the key expression must be a comparable type, otherwise the +// function will return an error. +// +// .sortBy(, ) -> +// keyExpr returns a value in {int, uint, double, bool, duration, timestamp, string, bytes} + +// Examples: +// +// [ +// Player { name: "foo", score: 0 }, +// Player { name: "bar", score: -10 }, +// Player { name: "baz", score: 1000 }, +// ].sortBy(e, e.score).map(e, e.name) +// == ["bar", "foo", "baz"] + +func Lists(options ...ListsOption) cel.EnvOption { + l := &listsLib{ + version: math.MaxUint32, + } + for _, o := range options { + l = o(l) + } + + return cel.Lib(l) +} + +type listsLib struct { + version uint32 +} + +// LibraryName implements the SingletonLibrary interface method. +func (listsLib) LibraryName() string { + return "cel.lib.ext.lists" +} + +// ListsOption is a functional interface for configuring the strings library. +type ListsOption func(*listsLib) *listsLib + +// ListsVersion configures the version of the string library. +// +// The version limits which functions are available. Only functions introduced +// below or equal to the given version included in the library. If this option +// is not set, all functions are available. +// +// See the library documentation to determine which version a function was introduced. +// If the documentation does not state which version a function was introduced, it can +// be assumed to be introduced at version 0, when the library was first created. +func ListsVersion(version uint32) ListsOption { + return func(lib *listsLib) *listsLib { + lib.version = version + return lib + } +} + +// CompileOptions implements the Library interface method. +func (lib listsLib) CompileOptions() []cel.EnvOption { + listType := cel.ListType(cel.TypeParamType("T")) + listListType := cel.ListType(listType) + listDyn := cel.ListType(cel.DynType) + opts := []cel.EnvOption{ + cel.Function("slice", + cel.MemberOverload("list_slice", + []*cel.Type{listType, cel.IntType, cel.IntType}, listType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + list := args[0].(traits.Lister) + start := args[1].(types.Int) + end := args[2].(types.Int) + result, err := slice(list, start, end) + if err != nil { + return types.WrapErr(err) + } + return result + }), + ), + ), + } + if lib.version >= 1 { + opts = append(opts, + cel.Function("flatten", + cel.MemberOverload("list_flatten", + []*cel.Type{listListType}, listType, + cel.UnaryBinding(func(arg ref.Val) ref.Val { + list, ok := arg.(traits.Lister) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + flatList, err := flatten(list, 1) + if err != nil { + return types.WrapErr(err) + } + + return types.DefaultTypeAdapter.NativeToValue(flatList) + }), + ), + cel.MemberOverload("list_flatten_int", + []*cel.Type{listDyn, types.IntType}, listDyn, + cel.BinaryBinding(func(arg1, arg2 ref.Val) ref.Val { + list, ok := arg1.(traits.Lister) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + depth, ok := arg2.(types.Int) + if !ok { + return types.MaybeNoSuchOverloadErr(arg2) + } + flatList, err := flatten(list, int64(depth)) + if err != nil { + return types.WrapErr(err) + } + + return types.DefaultTypeAdapter.NativeToValue(flatList) + }), + ), + // To handle the case where a variable of just `list(T)` is provided at runtime + // with a graceful failure more, disable the type guards since the implementation + // can handle lists which are already flat. + decls.DisableTypeGuards(true), + ), + ) + } + if lib.version >= 2 { + sortDecl := cel.Function("sort", + append( + templatedOverloads(comparableTypes, func(t *cel.Type) cel.FunctionOpt { + return cel.MemberOverload( + fmt.Sprintf("list_%s_sort", t.TypeName()), + []*cel.Type{cel.ListType(t)}, cel.ListType(t), + ) + }), + cel.SingletonUnaryBinding( + func(arg ref.Val) ref.Val { + list, ok := arg.(traits.Lister) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + sorted, err := sortList(list) + if err != nil { + return types.WrapErr(err) + } + + return sorted + }, + // List traits + traits.ListerType, + ), + )..., + ) + opts = append(opts, sortDecl) + opts = append(opts, cel.Macros(cel.ReceiverMacro("sortBy", 2, sortByMacro))) + opts = append(opts, cel.Function("@sortByAssociatedKeys", + append( + templatedOverloads(comparableTypes, func(u *cel.Type) cel.FunctionOpt { + return cel.MemberOverload( + fmt.Sprintf("list_%s_sortByAssociatedKeys", u.TypeName()), + []*cel.Type{listType, cel.ListType(u)}, listType, + ) + }), + cel.SingletonBinaryBinding( + func(arg1 ref.Val, arg2 ref.Val) ref.Val { + list, ok := arg1.(traits.Lister) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + keys, ok := arg2.(traits.Lister) + if !ok { + return types.MaybeNoSuchOverloadErr(arg2) + } + sorted, err := sortListByAssociatedKeys(list, keys) + if err != nil { + return types.WrapErr(err) + } + + return sorted + }, + // List traits + traits.ListerType, + ), + )..., + )) + + opts = append(opts, cel.Function("lists.range", + cel.Overload("lists_range", + []*cel.Type{cel.IntType}, cel.ListType(cel.IntType), + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + n := args[0].(types.Int) + result, err := genRange(n) + if err != nil { + return types.WrapErr(err) + } + return result + }), + ), + )) + opts = append(opts, cel.Function("reverse", + cel.MemberOverload("list_reverse", + []*cel.Type{listType}, listType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + list := args[0].(traits.Lister) + result, err := reverseList(list) + if err != nil { + return types.WrapErr(err) + } + return result + }), + ), + )) + opts = append(opts, cel.Function("distinct", + cel.MemberOverload("list_distinct", + []*cel.Type{listType}, listType, + cel.UnaryBinding(func(list ref.Val) ref.Val { + result, err := distinctList(list.(traits.Lister)) + if err != nil { + return types.WrapErr(err) + } + return result + }), + ), + )) + } + + return opts +} + +// ProgramOptions implements the Library interface method. +func (listsLib) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func genRange(n types.Int) (ref.Val, error) { + var newList []ref.Val + for i := types.Int(0); i < n; i++ { + newList = append(newList, i) + } + return types.DefaultTypeAdapter.NativeToValue(newList), nil +} + +func reverseList(list traits.Lister) (ref.Val, error) { + var newList []ref.Val + listLength := list.Size().(types.Int) + for i := types.Int(0); i < listLength; i++ { + val := list.Get(listLength - i - 1) + newList = append(newList, val) + } + return types.DefaultTypeAdapter.NativeToValue(newList), nil +} + +func slice(list traits.Lister, start, end types.Int) (ref.Val, error) { + listLength := list.Size().(types.Int) + if start < 0 || end < 0 { + return nil, fmt.Errorf("cannot slice(%d, %d), negative indexes not supported", start, end) + } + if start > end { + return nil, fmt.Errorf("cannot slice(%d, %d), start index must be less than or equal to end index", start, end) + } + if listLength < end { + return nil, fmt.Errorf("cannot slice(%d, %d), list is length %d", start, end, listLength) + } + + var newList []ref.Val + for i := types.Int(start); i < end; i++ { + val := list.Get(i) + newList = append(newList, val) + } + return types.DefaultTypeAdapter.NativeToValue(newList), nil +} + +func flatten(list traits.Lister, depth int64) ([]ref.Val, error) { + if depth < 0 { + return nil, fmt.Errorf("level must be non-negative") + } + + var newList []ref.Val + iter := list.Iterator() + + for iter.HasNext() == types.True { + val := iter.Next() + nestedList, isList := val.(traits.Lister) + + if !isList || depth == 0 { + newList = append(newList, val) + continue + } else { + flattenedList, err := flatten(nestedList, depth-1) + if err != nil { + return nil, err + } + + newList = append(newList, flattenedList...) + } + } + + return newList, nil +} + +func sortList(list traits.Lister) (ref.Val, error) { + return sortListByAssociatedKeys(list, list) +} + +// Internal function used for the implementation of sort() and sortBy(). +// +// Sorts a list of arbitrary elements, according to the order produced by sorting +// another list of comparable elements. If the element type of the keys is not +// comparable or the element types are not the same, the function will produce an error. +// +// .@sortByAssociatedKeys() -> +// U in {int, uint, double, bool, duration, timestamp, string, bytes} +// +// Example: +// +// ["foo", "bar", "baz"].@sortByAssociatedKeys([3, 1, 2]) // return ["bar", "baz", "foo"] +func sortListByAssociatedKeys(list, keys traits.Lister) (ref.Val, error) { + listLength := list.Size().(types.Int) + keysLength := keys.Size().(types.Int) + if listLength != keysLength { + return nil, fmt.Errorf( + "@sortByAssociatedKeys() expected a list of the same size as the associated keys list, but got %d and %d elements respectively", + listLength, + keysLength, + ) + } + if listLength == 0 { + return list, nil + } + elem := keys.Get(types.IntZero) + if _, ok := elem.(traits.Comparer); !ok { + return nil, fmt.Errorf("list elements must be comparable") + } + + sortedIndices := make([]ref.Val, 0, listLength) + for i := types.IntZero; i < listLength; i++ { + if keys.Get(i).Type() != elem.Type() { + return nil, fmt.Errorf("list elements must have the same type") + } + sortedIndices = append(sortedIndices, i) + } + + sort.Slice(sortedIndices, func(i, j int) bool { + iKey := keys.Get(sortedIndices[i]) + jKey := keys.Get(sortedIndices[j]) + return iKey.(traits.Comparer).Compare(jKey) == types.IntNegOne + }) + + sorted := make([]ref.Val, 0, listLength) + + for _, sortedIdx := range sortedIndices { + sorted = append(sorted, list.Get(sortedIdx)) + } + return types.DefaultTypeAdapter.NativeToValue(sorted), nil +} + +// sortByMacro transforms an expression like: +// +// mylistExpr.sortBy(e, -math.abs(e)) +// +// into something equivalent to: +// +// cel.bind( +// __sortBy_input__, +// myListExpr, +// __sortBy_input__.@sortByAssociatedKeys(__sortBy_input__.map(e, -math.abs(e)) +// ) +func sortByMacro(meh cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + varIdent := meh.NewIdent("@__sortBy_input__") + varName := varIdent.AsIdent() + + targetKind := target.Kind() + if targetKind != ast.ListKind && + targetKind != ast.SelectKind && + targetKind != ast.IdentKind && + targetKind != ast.ComprehensionKind && targetKind != ast.CallKind { + return nil, meh.NewError(target.ID(), fmt.Sprintf("sortBy can only be applied to a list, identifier, comprehension, call or select expression")) + } + + mapCompr, err := parser.MakeMap(meh, meh.Copy(varIdent), args) + if err != nil { + return nil, err + } + callExpr := meh.NewMemberCall("@sortByAssociatedKeys", + meh.Copy(varIdent), + mapCompr, + ) + + bindExpr := meh.NewComprehension( + meh.NewList(), + "#unused", + varName, + target, + meh.NewLiteral(types.False), + varIdent, + callExpr, + ) + + return bindExpr, nil +} + +func distinctList(list traits.Lister) (ref.Val, error) { + listLength := list.Size().(types.Int) + if listLength == 0 { + return list, nil + } + uniqueList := make([]ref.Val, 0, listLength) + for i := types.IntZero; i < listLength; i++ { + val := list.Get(i) + seen := false + for j := types.IntZero; j < types.Int(len(uniqueList)); j++ { + if i == j { + continue + } + other := uniqueList[j] + if val.Equal(other) == types.True { + seen = true + break + } + } + if !seen { + uniqueList = append(uniqueList, val) + } + } + + return types.DefaultTypeAdapter.NativeToValue(uniqueList), nil +} + +func templatedOverloads(types []*cel.Type, template func(t *cel.Type) cel.FunctionOpt) []cel.FunctionOpt { + overloads := make([]cel.FunctionOpt, len(types)) + for i, t := range types { + overloads[i] = template(t) + } + return overloads +} diff --git a/hack/tools/vendor/github.com/google/cel-go/ext/math.go b/hack/tools/vendor/github.com/google/cel-go/ext/math.go new file mode 100644 index 0000000000..250246db15 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/ext/math.go @@ -0,0 +1,901 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "fmt" + "math" + "strings" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +// Math returns a cel.EnvOption to configure namespaced math helper macros and +// functions. +// +// Note, all macros use the 'math' namespace; however, at the time of macro +// expansion the namespace looks just like any other identifier. If you are +// currently using a variable named 'math', the macro will likely work just as +// intended; however, there is some chance for collision. +// +// # Math.Greatest +// +// Returns the greatest valued number present in the arguments to the macro. +// +// Greatest is a variable argument count macro which must take at least one +// argument. Simple numeric and list literals are supported as valid argument +// types; however, other literals will be flagged as errors during macro +// expansion. If the argument expression does not resolve to a numeric or +// list(numeric) type during type-checking, or during runtime then an error +// will be produced. If a list argument is empty, this too will produce an +// error. +// +// math.greatest(, ...) -> +// +// Examples: +// +// math.greatest(1) // 1 +// math.greatest(1u, 2u) // 2u +// math.greatest(-42.0, -21.5, -100.0) // -21.5 +// math.greatest([-42.0, -21.5, -100.0]) // -21.5 +// math.greatest(numbers) // numbers must be list(numeric) +// +// math.greatest() // parse error +// math.greatest('string') // parse error +// math.greatest(a, b) // check-time error if a or b is non-numeric +// math.greatest(dyn('string')) // runtime error +// +// # Math.Least +// +// Returns the least valued number present in the arguments to the macro. +// +// Least is a variable argument count macro which must take at least one +// argument. Simple numeric and list literals are supported as valid argument +// types; however, other literals will be flagged as errors during macro +// expansion. If the argument expression does not resolve to a numeric or +// list(numeric) type during type-checking, or during runtime then an error +// will be produced. If a list argument is empty, this too will produce an +// error. +// +// math.least(, ...) -> +// +// Examples: +// +// math.least(1) // 1 +// math.least(1u, 2u) // 1u +// math.least(-42.0, -21.5, -100.0) // -100.0 +// math.least([-42.0, -21.5, -100.0]) // -100.0 +// math.least(numbers) // numbers must be list(numeric) +// +// math.least() // parse error +// math.least('string') // parse error +// math.least(a, b) // check-time error if a or b is non-numeric +// math.least(dyn('string')) // runtime error +// +// # Math.BitOr +// +// Introduced at version: 1 +// +// Performs a bitwise-OR operation over two int or uint values. +// +// math.bitOr(, ) -> +// math.bitOr(, ) -> +// +// Examples: +// +// math.bitOr(1u, 2u) // returns 3u +// math.bitOr(-2, -4) // returns -2 +// +// # Math.BitAnd +// +// Introduced at version: 1 +// +// Performs a bitwise-AND operation over two int or uint values. +// +// math.bitAnd(, ) -> +// math.bitAnd(, ) -> +// +// Examples: +// +// math.bitAnd(3u, 2u) // return 2u +// math.bitAnd(3, 5) // returns 3 +// math.bitAnd(-3, -5) // returns -7 +// +// # Math.BitXor +// +// Introduced at version: 1 +// +// math.bitXor(, ) -> +// math.bitXor(, ) -> +// +// Performs a bitwise-XOR operation over two int or uint values. +// +// Examples: +// +// math.bitXor(3u, 5u) // returns 6u +// math.bitXor(1, 3) // returns 2 +// +// # Math.BitNot +// +// Introduced at version: 1 +// +// Function which accepts a single int or uint and performs a bitwise-NOT +// ones-complement of the given binary value. +// +// math.bitNot() -> +// math.bitNot() -> +// +// Examples +// +// math.bitNot(1) // returns -1 +// math.bitNot(-1) // return 0 +// math.bitNot(0u) // returns 18446744073709551615u +// +// # Math.BitShiftLeft +// +// Introduced at version: 1 +// +// Perform a left shift of bits on the first parameter, by the amount of bits +// specified in the second parameter. The first parameter is either a uint or +// an int. The second parameter must be an int. +// +// When the second parameter is 64 or greater, 0 will be always be returned +// since the number of bits shifted is greater than or equal to the total bit +// length of the number being shifted. Negative valued bit shifts will result +// in a runtime error. +// +// math.bitShiftLeft(, ) -> +// math.bitShiftLeft(, ) -> +// +// Examples +// +// math.bitShiftLeft(1, 2) // returns 4 +// math.bitShiftLeft(-1, 2) // returns -4 +// math.bitShiftLeft(1u, 2) // return 4u +// math.bitShiftLeft(1u, 200) // returns 0u +// +// # Math.BitShiftRight +// +// Introduced at version: 1 +// +// Perform a right shift of bits on the first parameter, by the amount of bits +// specified in the second parameter. The first parameter is either a uint or +// an int. The second parameter must be an int. +// +// When the second parameter is 64 or greater, 0 will always be returned since +// the number of bits shifted is greater than or equal to the total bit length +// of the number being shifted. Negative valued bit shifts will result in a +// runtime error. +// +// The sign bit extension will not be preserved for this operation: vacant bits +// on the left are filled with 0. +// +// math.bitShiftRight(, ) -> +// math.bitShiftRight(, ) -> +// +// Examples +// +// math.bitShiftRight(1024, 2) // returns 256 +// math.bitShiftRight(1024u, 2) // returns 256u +// math.bitShiftRight(1024u, 64) // returns 0u +// +// # Math.Ceil +// +// Introduced at version: 1 +// +// Compute the ceiling of a double value. +// +// math.ceil() -> +// +// Examples: +// +// math.ceil(1.2) // returns 2.0 +// math.ceil(-1.2) // returns -1.0 +// +// # Math.Floor +// +// Introduced at version: 1 +// +// Compute the floor of a double value. +// +// math.floor() -> +// +// Examples: +// +// math.floor(1.2) // returns 1.0 +// math.floor(-1.2) // returns -2.0 +// +// # Math.Round +// +// Introduced at version: 1 +// +// Rounds the double value to the nearest whole number with ties rounding away +// from zero, e.g. 1.5 -> 2.0, -1.5 -> -2.0. +// +// math.round() -> +// +// Examples: +// +// math.round(1.2) // returns 1.0 +// math.round(1.5) // returns 2.0 +// math.round(-1.5) // returns -2.0 +// +// # Math.Trunc +// +// Introduced at version: 1 +// +// Truncates the fractional portion of the double value. +// +// math.trunc() -> +// +// Examples: +// +// math.trunc(-1.3) // returns -1.0 +// math.trunc(1.3) // returns 1.0 +// +// # Math.Abs +// +// Introduced at version: 1 +// +// Returns the absolute value of the numeric type provided as input. If the +// value is NaN, the output is NaN. If the input is int64 min, the function +// will result in an overflow error. +// +// math.abs() -> +// math.abs() -> +// math.abs() -> +// +// Examples: +// +// math.abs(-1) // returns 1 +// math.abs(1) // returns 1 +// math.abs(-9223372036854775808) // overflow error +// +// # Math.Sign +// +// Introduced at version: 1 +// +// Returns the sign of the numeric type, either -1, 0, 1 as an int, double, or +// uint depending on the overload. For floating point values, if NaN is +// provided as input, the output is also NaN. The implementation does not +// differentiate between positive and negative zero. +// +// math.sign() -> +// math.sign() -> +// math.sign() -> +// +// Examples: +// +// math.sign(-42) // returns -1 +// math.sign(0) // returns 0 +// math.sign(42) // returns 1 +// +// # Math.IsInf +// +// Introduced at version: 1 +// +// Returns true if the input double value is -Inf or +Inf. +// +// math.isInf() -> +// +// Examples: +// +// math.isInf(1.0/0.0) // returns true +// math.isInf(1.2) // returns false +// +// # Math.IsNaN +// +// Introduced at version: 1 +// +// Returns true if the input double value is NaN, false otherwise. +// +// math.isNaN() -> +// +// Examples: +// +// math.isNaN(0.0/0.0) // returns true +// math.isNaN(1.2) // returns false +// +// # Math.IsFinite +// +// Introduced at version: 1 +// +// Returns true if the value is a finite number. Equivalent in behavior to: +// !math.isNaN(double) && !math.isInf(double) +// +// math.isFinite() -> +// +// Examples: +// +// math.isFinite(0.0/0.0) // returns false +// math.isFinite(1.2) // returns true +func Math(options ...MathOption) cel.EnvOption { + m := &mathLib{version: math.MaxUint32} + for _, o := range options { + m = o(m) + } + return cel.Lib(m) +} + +const ( + mathNamespace = "math" + leastMacro = "least" + greatestMacro = "greatest" + + // Min-max functions + minFunc = "math.@min" + maxFunc = "math.@max" + + // Rounding functions + ceilFunc = "math.ceil" + floorFunc = "math.floor" + roundFunc = "math.round" + truncFunc = "math.trunc" + + // Floating point helper functions + isInfFunc = "math.isInf" + isNanFunc = "math.isNaN" + isFiniteFunc = "math.isFinite" + + // Signedness functions + absFunc = "math.abs" + signFunc = "math.sign" + + // Bitwise functions + bitAndFunc = "math.bitAnd" + bitOrFunc = "math.bitOr" + bitXorFunc = "math.bitXor" + bitNotFunc = "math.bitNot" + bitShiftLeftFunc = "math.bitShiftLeft" + bitShiftRightFunc = "math.bitShiftRight" +) + +var ( + errIntOverflow = types.NewErr("integer overflow") +) + +// MathOption declares a functional operator for configuring math extensions. +type MathOption func(*mathLib) *mathLib + +// MathVersion sets the library version for math extensions. +func MathVersion(version uint32) MathOption { + return func(lib *mathLib) *mathLib { + lib.version = version + return lib + } +} + +type mathLib struct { + version uint32 +} + +// LibraryName implements the SingletonLibrary interface method. +func (*mathLib) LibraryName() string { + return "cel.lib.ext.math" +} + +// CompileOptions implements the Library interface method. +func (lib *mathLib) CompileOptions() []cel.EnvOption { + opts := []cel.EnvOption{ + cel.Macros( + // math.least(num, ...) + cel.ReceiverVarArgMacro(leastMacro, mathLeast), + // math.greatest(num, ...) + cel.ReceiverVarArgMacro(greatestMacro, mathGreatest), + ), + cel.Function(minFunc, + cel.Overload("math_@min_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(identity)), + cel.Overload("math_@min_int", []*cel.Type{cel.IntType}, cel.IntType, + cel.UnaryBinding(identity)), + cel.Overload("math_@min_uint", []*cel.Type{cel.UintType}, cel.UintType, + cel.UnaryBinding(identity)), + cel.Overload("math_@min_double_double", []*cel.Type{cel.DoubleType, cel.DoubleType}, cel.DoubleType, + cel.BinaryBinding(minPair)), + cel.Overload("math_@min_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(minPair)), + cel.Overload("math_@min_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType, + cel.BinaryBinding(minPair)), + cel.Overload("math_@min_int_uint", []*cel.Type{cel.IntType, cel.UintType}, cel.DynType, + cel.BinaryBinding(minPair)), + cel.Overload("math_@min_int_double", []*cel.Type{cel.IntType, cel.DoubleType}, cel.DynType, + cel.BinaryBinding(minPair)), + cel.Overload("math_@min_double_int", []*cel.Type{cel.DoubleType, cel.IntType}, cel.DynType, + cel.BinaryBinding(minPair)), + cel.Overload("math_@min_double_uint", []*cel.Type{cel.DoubleType, cel.UintType}, cel.DynType, + cel.BinaryBinding(minPair)), + cel.Overload("math_@min_uint_int", []*cel.Type{cel.UintType, cel.IntType}, cel.DynType, + cel.BinaryBinding(minPair)), + cel.Overload("math_@min_uint_double", []*cel.Type{cel.UintType, cel.DoubleType}, cel.DynType, + cel.BinaryBinding(minPair)), + cel.Overload("math_@min_list_double", []*cel.Type{cel.ListType(cel.DoubleType)}, cel.DoubleType, + cel.UnaryBinding(minList)), + cel.Overload("math_@min_list_int", []*cel.Type{cel.ListType(cel.IntType)}, cel.IntType, + cel.UnaryBinding(minList)), + cel.Overload("math_@min_list_uint", []*cel.Type{cel.ListType(cel.UintType)}, cel.UintType, + cel.UnaryBinding(minList)), + ), + cel.Function(maxFunc, + cel.Overload("math_@max_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(identity)), + cel.Overload("math_@max_int", []*cel.Type{cel.IntType}, cel.IntType, + cel.UnaryBinding(identity)), + cel.Overload("math_@max_uint", []*cel.Type{cel.UintType}, cel.UintType, + cel.UnaryBinding(identity)), + cel.Overload("math_@max_double_double", []*cel.Type{cel.DoubleType, cel.DoubleType}, cel.DoubleType, + cel.BinaryBinding(maxPair)), + cel.Overload("math_@max_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(maxPair)), + cel.Overload("math_@max_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType, + cel.BinaryBinding(maxPair)), + cel.Overload("math_@max_int_uint", []*cel.Type{cel.IntType, cel.UintType}, cel.DynType, + cel.BinaryBinding(maxPair)), + cel.Overload("math_@max_int_double", []*cel.Type{cel.IntType, cel.DoubleType}, cel.DynType, + cel.BinaryBinding(maxPair)), + cel.Overload("math_@max_double_int", []*cel.Type{cel.DoubleType, cel.IntType}, cel.DynType, + cel.BinaryBinding(maxPair)), + cel.Overload("math_@max_double_uint", []*cel.Type{cel.DoubleType, cel.UintType}, cel.DynType, + cel.BinaryBinding(maxPair)), + cel.Overload("math_@max_uint_int", []*cel.Type{cel.UintType, cel.IntType}, cel.DynType, + cel.BinaryBinding(maxPair)), + cel.Overload("math_@max_uint_double", []*cel.Type{cel.UintType, cel.DoubleType}, cel.DynType, + cel.BinaryBinding(maxPair)), + cel.Overload("math_@max_list_double", []*cel.Type{cel.ListType(cel.DoubleType)}, cel.DoubleType, + cel.UnaryBinding(maxList)), + cel.Overload("math_@max_list_int", []*cel.Type{cel.ListType(cel.IntType)}, cel.IntType, + cel.UnaryBinding(maxList)), + cel.Overload("math_@max_list_uint", []*cel.Type{cel.ListType(cel.UintType)}, cel.UintType, + cel.UnaryBinding(maxList)), + ), + } + if lib.version >= 1 { + opts = append(opts, + // Rounding function declarations + cel.Function(ceilFunc, + cel.Overload("math_ceil_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(ceil))), + cel.Function(floorFunc, + cel.Overload("math_floor_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(floor))), + cel.Function(roundFunc, + cel.Overload("math_round_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(round))), + cel.Function(truncFunc, + cel.Overload("math_trunc_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(trunc))), + + // Floating point helpers + cel.Function(isInfFunc, + cel.Overload("math_isInf_double", []*cel.Type{cel.DoubleType}, cel.BoolType, + cel.UnaryBinding(isInf))), + cel.Function(isNanFunc, + cel.Overload("math_isNaN_double", []*cel.Type{cel.DoubleType}, cel.BoolType, + cel.UnaryBinding(isNaN))), + cel.Function(isFiniteFunc, + cel.Overload("math_isFinite_double", []*cel.Type{cel.DoubleType}, cel.BoolType, + cel.UnaryBinding(isFinite))), + + // Signedness functions + cel.Function(absFunc, + cel.Overload("math_abs_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(absDouble)), + cel.Overload("math_abs_int", []*cel.Type{cel.IntType}, cel.IntType, + cel.UnaryBinding(absInt)), + cel.Overload("math_abs_uint", []*cel.Type{cel.UintType}, cel.UintType, + cel.UnaryBinding(identity)), + ), + cel.Function(signFunc, + cel.Overload("math_sign_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(sign)), + cel.Overload("math_sign_int", []*cel.Type{cel.IntType}, cel.IntType, + cel.UnaryBinding(sign)), + cel.Overload("math_sign_uint", []*cel.Type{cel.UintType}, cel.UintType, + cel.UnaryBinding(sign)), + ), + + // Bitwise operator declarations + cel.Function(bitAndFunc, + cel.Overload("math_bitAnd_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitAndPairInt)), + cel.Overload("math_bitAnd_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType, + cel.BinaryBinding(bitAndPairUint)), + ), + cel.Function(bitOrFunc, + cel.Overload("math_bitOr_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitOrPairInt)), + cel.Overload("math_bitOr_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType, + cel.BinaryBinding(bitOrPairUint)), + ), + cel.Function(bitXorFunc, + cel.Overload("math_bitXor_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitXorPairInt)), + cel.Overload("math_bitXor_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType, + cel.BinaryBinding(bitXorPairUint)), + ), + cel.Function(bitNotFunc, + cel.Overload("math_bitNot_int_int", []*cel.Type{cel.IntType}, cel.IntType, + cel.UnaryBinding(bitNotInt)), + cel.Overload("math_bitNot_uint_uint", []*cel.Type{cel.UintType}, cel.UintType, + cel.UnaryBinding(bitNotUint)), + ), + cel.Function(bitShiftLeftFunc, + cel.Overload("math_bitShiftLeft_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitShiftLeftIntInt)), + cel.Overload("math_bitShiftLeft_uint_int", []*cel.Type{cel.UintType, cel.IntType}, cel.UintType, + cel.BinaryBinding(bitShiftLeftUintInt)), + ), + cel.Function(bitShiftRightFunc, + cel.Overload("math_bitShiftRight_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitShiftRightIntInt)), + cel.Overload("math_bitShiftRight_uint_int", []*cel.Type{cel.UintType, cel.IntType}, cel.UintType, + cel.BinaryBinding(bitShiftRightUintInt)), + ), + ) + } + return opts +} + +// ProgramOptions implements the Library interface method. +func (*mathLib) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func mathLeast(meh cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + if !macroTargetMatchesNamespace(mathNamespace, target) { + return nil, nil + } + switch len(args) { + case 0: + return nil, meh.NewError(target.ID(), "math.least() requires at least one argument") + case 1: + if isListLiteralWithNumericArgs(args[0]) || isNumericArgType(args[0]) { + return meh.NewCall(minFunc, args[0]), nil + } + return nil, meh.NewError(args[0].ID(), "math.least() invalid single argument value") + case 2: + err := checkInvalidArgs(meh, "math.least()", args) + if err != nil { + return nil, err + } + return meh.NewCall(minFunc, args...), nil + default: + err := checkInvalidArgs(meh, "math.least()", args) + if err != nil { + return nil, err + } + return meh.NewCall(minFunc, meh.NewList(args...)), nil + } +} + +func mathGreatest(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + if !macroTargetMatchesNamespace(mathNamespace, target) { + return nil, nil + } + switch len(args) { + case 0: + return nil, mef.NewError(target.ID(), "math.greatest() requires at least one argument") + case 1: + if isListLiteralWithNumericArgs(args[0]) || isNumericArgType(args[0]) { + return mef.NewCall(maxFunc, args[0]), nil + } + return nil, mef.NewError(args[0].ID(), "math.greatest() invalid single argument value") + case 2: + err := checkInvalidArgs(mef, "math.greatest()", args) + if err != nil { + return nil, err + } + return mef.NewCall(maxFunc, args...), nil + default: + err := checkInvalidArgs(mef, "math.greatest()", args) + if err != nil { + return nil, err + } + return mef.NewCall(maxFunc, mef.NewList(args...)), nil + } +} + +func identity(val ref.Val) ref.Val { + return val +} + +func ceil(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Double(math.Ceil(float64(v))) +} + +func floor(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Double(math.Floor(float64(v))) +} + +func round(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Double(math.Round(float64(v))) +} + +func trunc(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Double(math.Trunc(float64(v))) +} + +func isInf(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Bool(math.IsInf(float64(v), 0)) +} + +func isFinite(val ref.Val) ref.Val { + v := float64(val.(types.Double)) + return types.Bool(!math.IsInf(v, 0) && !math.IsNaN(v)) +} + +func isNaN(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Bool(math.IsNaN(float64(v))) +} + +func absDouble(val ref.Val) ref.Val { + v := float64(val.(types.Double)) + return types.Double(math.Abs(v)) +} + +func absInt(val ref.Val) ref.Val { + v := int64(val.(types.Int)) + if v == math.MinInt64 { + return errIntOverflow + } + if v >= 0 { + return val + } + return -types.Int(v) +} + +func sign(val ref.Val) ref.Val { + switch v := val.(type) { + case types.Double: + if isNaN(v) == types.True { + return v + } + zero := types.Double(0) + if v > zero { + return types.Double(1) + } + if v < zero { + return types.Double(-1) + } + return zero + case types.Int: + return v.Compare(types.IntZero) + case types.Uint: + if v == types.Uint(0) { + return types.Uint(0) + } + return types.Uint(1) + default: + return maybeSuffixError(val, "math.sign") + } +} + +func bitAndPairInt(first, second ref.Val) ref.Val { + l := first.(types.Int) + r := second.(types.Int) + return l & r +} + +func bitAndPairUint(first, second ref.Val) ref.Val { + l := first.(types.Uint) + r := second.(types.Uint) + return l & r +} + +func bitOrPairInt(first, second ref.Val) ref.Val { + l := first.(types.Int) + r := second.(types.Int) + return l | r +} + +func bitOrPairUint(first, second ref.Val) ref.Val { + l := first.(types.Uint) + r := second.(types.Uint) + return l | r +} + +func bitXorPairInt(first, second ref.Val) ref.Val { + l := first.(types.Int) + r := second.(types.Int) + return l ^ r +} + +func bitXorPairUint(first, second ref.Val) ref.Val { + l := first.(types.Uint) + r := second.(types.Uint) + return l ^ r +} + +func bitNotInt(value ref.Val) ref.Val { + v := value.(types.Int) + return ^v +} + +func bitNotUint(value ref.Val) ref.Val { + v := value.(types.Uint) + return ^v +} + +func bitShiftLeftIntInt(value, bits ref.Val) ref.Val { + v := value.(types.Int) + bs := bits.(types.Int) + if bs < types.IntZero { + return types.NewErr("math.bitShiftLeft() negative offset: %d", bs) + } + return v << bs +} + +func bitShiftLeftUintInt(value, bits ref.Val) ref.Val { + v := value.(types.Uint) + bs := bits.(types.Int) + if bs < types.IntZero { + return types.NewErr("math.bitShiftLeft() negative offset: %d", bs) + } + return v << bs +} + +func bitShiftRightIntInt(value, bits ref.Val) ref.Val { + v := value.(types.Int) + bs := bits.(types.Int) + if bs < types.IntZero { + return types.NewErr("math.bitShiftRight() negative offset: %d", bs) + } + return types.Int(types.Uint(v) >> bs) +} + +func bitShiftRightUintInt(value, bits ref.Val) ref.Val { + v := value.(types.Uint) + bs := bits.(types.Int) + if bs < types.IntZero { + return types.NewErr("math.bitShiftRight() negative offset: %d", bs) + } + return v >> bs +} + +func minPair(first, second ref.Val) ref.Val { + cmp, ok := first.(traits.Comparer) + if !ok { + return types.MaybeNoSuchOverloadErr(first) + } + out := cmp.Compare(second) + if types.IsUnknownOrError(out) { + return maybeSuffixError(out, "math.@min") + } + if out == types.IntOne { + return second + } + return first +} + +func minList(numList ref.Val) ref.Val { + l := numList.(traits.Lister) + size := l.Size().(types.Int) + if size == types.IntZero { + return types.NewErr("math.@min(list) argument must not be empty") + } + min := l.Get(types.IntZero) + for i := types.IntOne; i < size; i++ { + min = minPair(min, l.Get(i)) + } + switch min.Type() { + case types.IntType, types.DoubleType, types.UintType, types.UnknownType: + return min + default: + return types.NewErr("no such overload: math.@min") + } +} + +func maxPair(first, second ref.Val) ref.Val { + cmp, ok := first.(traits.Comparer) + if !ok { + return types.MaybeNoSuchOverloadErr(first) + } + out := cmp.Compare(second) + if types.IsUnknownOrError(out) { + return maybeSuffixError(out, "math.@max") + } + if out == types.IntNegOne { + return second + } + return first +} + +func maxList(numList ref.Val) ref.Val { + l := numList.(traits.Lister) + size := l.Size().(types.Int) + if size == types.IntZero { + return types.NewErr("math.@max(list) argument must not be empty") + } + max := l.Get(types.IntZero) + for i := types.IntOne; i < size; i++ { + max = maxPair(max, l.Get(i)) + } + switch max.Type() { + case types.IntType, types.DoubleType, types.UintType, types.UnknownType: + return max + default: + return types.NewErr("no such overload: math.@max") + } +} + +func checkInvalidArgs(meh cel.MacroExprFactory, funcName string, args []ast.Expr) *cel.Error { + for _, arg := range args { + err := checkInvalidArgLiteral(funcName, arg) + if err != nil { + return meh.NewError(arg.ID(), err.Error()) + } + } + return nil +} + +func checkInvalidArgLiteral(funcName string, arg ast.Expr) error { + if !isNumericArgType(arg) { + return fmt.Errorf("%s simple literal arguments must be numeric", funcName) + } + return nil +} + +func isNumericArgType(arg ast.Expr) bool { + switch arg.Kind() { + case ast.LiteralKind: + c := ref.Val(arg.AsLiteral()) + switch c.(type) { + case types.Double, types.Int, types.Uint: + return true + default: + return false + } + case ast.ListKind, ast.MapKind, ast.StructKind: + return false + default: + return true + } +} + +func isListLiteralWithNumericArgs(arg ast.Expr) bool { + switch arg.Kind() { + case ast.ListKind: + list := arg.AsList() + if list.Size() == 0 { + return false + } + for _, e := range list.Elements() { + if !isNumericArgType(e) { + return false + } + } + return true + } + return false +} + +func maybeSuffixError(val ref.Val, suffix string) ref.Val { + if types.IsError(val) { + msg := val.(*types.Err).String() + if !strings.Contains(msg, suffix) { + return types.NewErr("%s: %s", msg, suffix) + } + } + return val +} diff --git a/hack/tools/vendor/github.com/google/cel-go/ext/native.go b/hack/tools/vendor/github.com/google/cel-go/ext/native.go new file mode 100644 index 0000000000..36ab4a7aec --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/ext/native.go @@ -0,0 +1,782 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "errors" + "fmt" + "reflect" + "strings" + "time" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/pb" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + + structpb "google.golang.org/protobuf/types/known/structpb" +) + +var ( + nativeObjTraitMask = traits.FieldTesterType | traits.IndexerType + jsonValueType = reflect.TypeOf(&structpb.Value{}) + jsonStructType = reflect.TypeOf(&structpb.Struct{}) +) + +// NativeTypes creates a type provider which uses reflect.Type and reflect.Value instances +// to produce type definitions that can be used within CEL. +// +// All struct types in Go are exposed to CEL via their simple package name and struct type name: +// +// ```go +// package identity +// +// type Account struct { +// ID int +// } +// +// ``` +// +// The type `identity.Account` would be exported to CEL using the same qualified name, e.g. +// `identity.Account{ID: 1234}` would create a new `Account` instance with the `ID` field +// populated. +// +// Only exported fields are exposed via NativeTypes, and the type-mapping between Go and CEL +// is as follows: +// +// | Go type | CEL type | +// |-------------------------------------|-----------| +// | bool | bool | +// | []byte | bytes | +// | float32, float64 | double | +// | int, int8, int16, int32, int64 | int | +// | string | string | +// | uint, uint8, uint16, uint32, uint64 | uint | +// | time.Duration | duration | +// | time.Time | timestamp | +// | array, slice | list | +// | map | map | +// +// Please note, if you intend to configure support for proto messages in addition to native +// types, you will need to provide the protobuf types before the golang native types. The +// same advice holds if you are using custom type adapters and type providers. The native type +// provider composes over whichever type adapter and provider is configured in the cel.Env at +// the time that it is invoked. +// +// There is also the possibility to rename the fields of native structs by setting the `cel` tag +// for fields you want to override. In order to enable this feature, pass in the `EnableStructTag` +// option. Here is an example to see it in action: +// +// ```go +// package identity +// +// type Account struct { +// ID int +// OwnerName string `cel:"owner"` +// } +// +// ``` +// +// The `OwnerName` field is now accessible in CEL via `owner`, e.g. `identity.Account{owner: 'bob'}`. +// In case there are duplicated field names in the struct, an error will be returned. +func NativeTypes(args ...any) cel.EnvOption { + return func(env *cel.Env) (*cel.Env, error) { + nativeTypes := make([]any, 0, len(args)) + tpOptions := nativeTypeOptions{} + + for _, v := range args { + switch v := v.(type) { + case NativeTypesOption: + err := v(&tpOptions) + if err != nil { + return nil, err + } + default: + nativeTypes = append(nativeTypes, v) + } + } + + tp, err := newNativeTypeProvider(tpOptions, env.CELTypeAdapter(), env.CELTypeProvider(), nativeTypes...) + if err != nil { + return nil, err + } + + env, err = cel.CustomTypeAdapter(tp)(env) + if err != nil { + return nil, err + } + return cel.CustomTypeProvider(tp)(env) + } +} + +// NativeTypesOption is a functional interface for configuring handling of native types. +type NativeTypesOption func(*nativeTypeOptions) error + +// NativeTypesFieldNameHandler is a handler for mapping a reflect.StructField to a CEL field name. +// This can be used to override the default Go struct field to CEL field name mapping. +type NativeTypesFieldNameHandler = func(field reflect.StructField) string + +func fieldNameByTag(structTagToParse string) func(field reflect.StructField) string { + return func(field reflect.StructField) string { + tag, found := field.Tag.Lookup(structTagToParse) + if found { + splits := strings.Split(tag, ",") + if len(splits) > 0 { + // We make the assumption that the leftmost entry in the tag is the name. + // This seems to be true for most tags that have the concept of a name/key, such as: + // https://pkg.go.dev/encoding/xml#Marshal + // https://pkg.go.dev/encoding/json#Marshal + // https://pkg.go.dev/go.mongodb.org/mongo-driver/bson#hdr-Structs + // https://pkg.go.dev/gopkg.in/yaml.v2#Marshal + name := splits[0] + return name + } + } + + return field.Name + } +} + +type nativeTypeOptions struct { + // fieldNameHandler controls how CEL should perform struct field renames. + // This is most commonly used for switching to parsing based off the struct field tag, + // such as "cel" or "json". + fieldNameHandler NativeTypesFieldNameHandler +} + +// ParseStructTags configures if native types field names should be overridable by CEL struct tags. +// This is equivalent to ParseStructTag("cel") +func ParseStructTags(enabled bool) NativeTypesOption { + return func(ntp *nativeTypeOptions) error { + if enabled { + ntp.fieldNameHandler = fieldNameByTag("cel") + } else { + ntp.fieldNameHandler = nil + } + return nil + } +} + +// ParseStructTag configures the struct tag to parse. The 0th item in the tag is used as the name of the CEL field. +// For example: +// If the tag to parse is "cel" and the struct field has tag cel:"foo", the CEL struct field will be "foo". +// If the tag to parse is "json" and the struct field has tag json:"foo,omitempty", the CEL struct field will be "foo". +func ParseStructTag(tag string) NativeTypesOption { + return func(ntp *nativeTypeOptions) error { + ntp.fieldNameHandler = fieldNameByTag(tag) + return nil + } +} + +// ParseStructField configures how to parse Go struct fields. It can be used to customize struct field parsing. +func ParseStructField(handler NativeTypesFieldNameHandler) NativeTypesOption { + return func(ntp *nativeTypeOptions) error { + ntp.fieldNameHandler = handler + return nil + } +} + +func newNativeTypeProvider(tpOptions nativeTypeOptions, adapter types.Adapter, provider types.Provider, refTypes ...any) (*nativeTypeProvider, error) { + nativeTypes := make(map[string]*nativeType, len(refTypes)) + for _, refType := range refTypes { + switch rt := refType.(type) { + case reflect.Type: + result, err := newNativeTypes(tpOptions.fieldNameHandler, rt) + if err != nil { + return nil, err + } + for idx := range result { + nativeTypes[result[idx].TypeName()] = result[idx] + } + case reflect.Value: + result, err := newNativeTypes(tpOptions.fieldNameHandler, rt.Type()) + if err != nil { + return nil, err + } + for idx := range result { + nativeTypes[result[idx].TypeName()] = result[idx] + } + default: + return nil, fmt.Errorf("unsupported native type: %v (%T) must be reflect.Type or reflect.Value", rt, rt) + } + } + return &nativeTypeProvider{ + nativeTypes: nativeTypes, + baseAdapter: adapter, + baseProvider: provider, + options: tpOptions, + }, nil +} + +type nativeTypeProvider struct { + nativeTypes map[string]*nativeType + baseAdapter types.Adapter + baseProvider types.Provider + options nativeTypeOptions +} + +// EnumValue proxies to the types.Provider configured at the times the NativeTypes +// option was configured. +func (tp *nativeTypeProvider) EnumValue(enumName string) ref.Val { + return tp.baseProvider.EnumValue(enumName) +} + +// FindIdent looks up natives type instances by qualified identifier, and if not found +// proxies to the composed types.Provider. +func (tp *nativeTypeProvider) FindIdent(typeName string) (ref.Val, bool) { + if t, found := tp.nativeTypes[typeName]; found { + return t, true + } + return tp.baseProvider.FindIdent(typeName) +} + +// FindStructType looks up the CEL type definition by qualified identifier, and if not found +// proxies to the composed types.Provider. +func (tp *nativeTypeProvider) FindStructType(typeName string) (*types.Type, bool) { + if _, found := tp.nativeTypes[typeName]; found { + return types.NewTypeTypeWithParam(types.NewObjectType(typeName)), true + } + if celType, found := tp.baseProvider.FindStructType(typeName); found { + return celType, true + } + return tp.baseProvider.FindStructType(typeName) +} + +func toFieldName(fieldNameHandler NativeTypesFieldNameHandler, f reflect.StructField) string { + if fieldNameHandler == nil { + return f.Name + } + + return fieldNameHandler(f) +} + +// FindStructFieldNames looks up the type definition first from the native types, then from +// the backing provider type set. If found, a set of field names corresponding to the type +// will be returned. +func (tp *nativeTypeProvider) FindStructFieldNames(typeName string) ([]string, bool) { + if t, found := tp.nativeTypes[typeName]; found { + fieldCount := t.refType.NumField() + fields := make([]string, fieldCount) + for i := 0; i < fieldCount; i++ { + fields[i] = toFieldName(tp.options.fieldNameHandler, t.refType.Field(i)) + } + return fields, true + } + if celTypeFields, found := tp.baseProvider.FindStructFieldNames(typeName); found { + return celTypeFields, true + } + return tp.baseProvider.FindStructFieldNames(typeName) +} + +// FindStructFieldType looks up a native type's field definition, and if the type name is not a native +// type then proxies to the composed types.Provider +func (tp *nativeTypeProvider) FindStructFieldType(typeName, fieldName string) (*types.FieldType, bool) { + t, found := tp.nativeTypes[typeName] + if !found { + return tp.baseProvider.FindStructFieldType(typeName, fieldName) + } + refField, isDefined := t.hasField(fieldName) + if !found || !isDefined { + return nil, false + } + celType, ok := convertToCelType(refField.Type) + if !ok { + return nil, false + } + return &types.FieldType{ + Type: celType, + IsSet: func(obj any) bool { + refVal := reflect.Indirect(reflect.ValueOf(obj)) + refField := refVal.FieldByName(refField.Name) + return !refField.IsZero() + }, + GetFrom: func(obj any) (any, error) { + refVal := reflect.Indirect(reflect.ValueOf(obj)) + refField := refVal.FieldByName(refField.Name) + return getFieldValue(refField), nil + }, + }, true +} + +// NewValue implements the ref.TypeProvider interface method. +func (tp *nativeTypeProvider) NewValue(typeName string, fields map[string]ref.Val) ref.Val { + t, found := tp.nativeTypes[typeName] + if !found { + return tp.baseProvider.NewValue(typeName, fields) + } + refPtr := reflect.New(t.refType) + refVal := refPtr.Elem() + for fieldName, val := range fields { + refFieldDef, isDefined := t.hasField(fieldName) + if !isDefined { + return types.NewErr("no such field: %s", fieldName) + } + fieldVal, err := val.ConvertToNative(refFieldDef.Type) + if err != nil { + return types.NewErr(err.Error()) + } + refField := refVal.FieldByIndex(refFieldDef.Index) + refFieldVal := reflect.ValueOf(fieldVal) + refField.Set(refFieldVal) + } + return tp.NativeToValue(refPtr.Interface()) +} + +// NewValue adapts native values to CEL values and will proxy to the composed type adapter +// for non-native types. +func (tp *nativeTypeProvider) NativeToValue(val any) ref.Val { + if val == nil { + return types.NullValue + } + if v, ok := val.(ref.Val); ok { + return v + } + rawVal := reflect.ValueOf(val) + refVal := rawVal + if refVal.Kind() == reflect.Ptr { + refVal = reflect.Indirect(refVal) + } + // This isn't quite right if you're also supporting proto, + // but maybe an acceptable limitation. + switch refVal.Kind() { + case reflect.Array, reflect.Slice: + switch val := val.(type) { + case []byte: + return tp.baseAdapter.NativeToValue(val) + default: + if refVal.Type().Elem() == reflect.TypeOf(byte(0)) { + return tp.baseAdapter.NativeToValue(val) + } + return types.NewDynamicList(tp, val) + } + case reflect.Map: + return types.NewDynamicMap(tp, val) + case reflect.Struct: + switch val := val.(type) { + case proto.Message, *pb.Map, protoreflect.List, protoreflect.Message, protoreflect.Value, + time.Time: + return tp.baseAdapter.NativeToValue(val) + default: + return tp.newNativeObject(val, rawVal) + } + default: + return tp.baseAdapter.NativeToValue(val) + } +} + +func convertToCelType(refType reflect.Type) (*cel.Type, bool) { + switch refType.Kind() { + case reflect.Bool: + return cel.BoolType, true + case reflect.Float32, reflect.Float64: + return cel.DoubleType, true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if refType == durationType { + return cel.DurationType, true + } + return cel.IntType, true + case reflect.String: + return cel.StringType, true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return cel.UintType, true + case reflect.Array, reflect.Slice: + refElem := refType.Elem() + if refElem == reflect.TypeOf(byte(0)) { + return cel.BytesType, true + } + elemType, ok := convertToCelType(refElem) + if !ok { + return nil, false + } + return cel.ListType(elemType), true + case reflect.Map: + keyType, ok := convertToCelType(refType.Key()) + if !ok { + return nil, false + } + // Ensure the key type is a int, bool, uint, string + elemType, ok := convertToCelType(refType.Elem()) + if !ok { + return nil, false + } + return cel.MapType(keyType, elemType), true + case reflect.Struct: + if refType == timestampType { + return cel.TimestampType, true + } + return cel.ObjectType( + fmt.Sprintf("%s.%s", simplePkgAlias(refType.PkgPath()), refType.Name()), + ), true + case reflect.Pointer: + if refType.Implements(pbMsgInterfaceType) { + pbMsg := reflect.New(refType.Elem()).Interface().(protoreflect.ProtoMessage) + return cel.ObjectType(string(pbMsg.ProtoReflect().Descriptor().FullName())), true + } + return convertToCelType(refType.Elem()) + } + return nil, false +} + +func (tp *nativeTypeProvider) newNativeObject(val any, refValue reflect.Value) ref.Val { + valType, err := newNativeType(tp.options.fieldNameHandler, refValue.Type()) + if err != nil { + return types.NewErr(err.Error()) + } + return &nativeObj{ + Adapter: tp, + val: val, + valType: valType, + refValue: refValue, + } +} + +type nativeObj struct { + types.Adapter + val any + valType *nativeType + refValue reflect.Value +} + +// ConvertToNative implements the ref.Val interface method. +// +// CEL does not have a notion of pointers, so whether a field is a pointer or value +// is handled as part of this conversion step. +func (o *nativeObj) ConvertToNative(typeDesc reflect.Type) (any, error) { + if o.refValue.Type() == typeDesc { + return o.val, nil + } + if o.refValue.Kind() == reflect.Pointer && o.refValue.Type().Elem() == typeDesc { + return o.refValue.Elem().Interface(), nil + } + if typeDesc.Kind() == reflect.Pointer && o.refValue.Type() == typeDesc.Elem() { + ptr := reflect.New(typeDesc.Elem()) + ptr.Elem().Set(o.refValue) + return ptr.Interface(), nil + } + switch typeDesc { + case jsonValueType: + jsonStruct, err := o.ConvertToNative(jsonStructType) + if err != nil { + return nil, err + } + return structpb.NewStructValue(jsonStruct.(*structpb.Struct)), nil + case jsonStructType: + refVal := reflect.Indirect(o.refValue) + refType := refVal.Type() + fields := make(map[string]*structpb.Value, refVal.NumField()) + for i := 0; i < refVal.NumField(); i++ { + fieldType := refType.Field(i) + fieldValue := refVal.Field(i) + if !fieldValue.IsValid() || fieldValue.IsZero() { + continue + } + fieldName := toFieldName(o.valType.fieldNameHandler, fieldType) + fieldCELVal := o.NativeToValue(fieldValue.Interface()) + fieldJSONVal, err := fieldCELVal.ConvertToNative(jsonValueType) + if err != nil { + return nil, err + } + fields[fieldName] = fieldJSONVal.(*structpb.Value) + } + return &structpb.Struct{Fields: fields}, nil + } + return nil, fmt.Errorf("type conversion error from '%v' to '%v'", o.Type(), typeDesc) +} + +// ConvertToType implements the ref.Val interface method. +func (o *nativeObj) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case types.TypeType: + return o.valType + default: + if typeVal.TypeName() == o.valType.typeName { + return o + } + } + return types.NewErr("type conversion error from '%s' to '%s'", o.Type(), typeVal) +} + +// Equal implements the ref.Val interface method. +// +// Note, that in Golang a pointer to a value is not equal to the value it contains. +// In CEL pointers and values to which they point are equal. +func (o *nativeObj) Equal(other ref.Val) ref.Val { + otherNtv, ok := other.(*nativeObj) + if !ok { + return types.False + } + val := o.val + otherVal := otherNtv.val + refVal := o.refValue + otherRefVal := otherNtv.refValue + if refVal.Kind() != otherRefVal.Kind() { + if refVal.Kind() == reflect.Pointer { + val = refVal.Elem().Interface() + } else if otherRefVal.Kind() == reflect.Pointer { + otherVal = otherRefVal.Elem().Interface() + } + } + return types.Bool(reflect.DeepEqual(val, otherVal)) +} + +// IsZeroValue indicates whether the contained Golang value is a zero value. +// +// Golang largely follows proto3 semantics for zero values. +func (o *nativeObj) IsZeroValue() bool { + return reflect.Indirect(o.refValue).IsZero() +} + +// IsSet tests whether a field which is defined is set to a non-default value. +func (o *nativeObj) IsSet(field ref.Val) ref.Val { + refField, refErr := o.getReflectedField(field) + if refErr != nil { + return refErr + } + return types.Bool(!refField.IsZero()) +} + +// Get returns the value fo a field name. +func (o *nativeObj) Get(field ref.Val) ref.Val { + refField, refErr := o.getReflectedField(field) + if refErr != nil { + return refErr + } + return adaptFieldValue(o, refField) +} + +func (o *nativeObj) getReflectedField(field ref.Val) (reflect.Value, ref.Val) { + fieldName, ok := field.(types.String) + if !ok { + return reflect.Value{}, types.MaybeNoSuchOverloadErr(field) + } + fieldNameStr := string(fieldName) + refField, isDefined := o.valType.hasField(fieldNameStr) + if !isDefined { + return reflect.Value{}, types.NewErr("no such field: %s", fieldName) + } + refVal := reflect.Indirect(o.refValue) + return refVal.FieldByIndex(refField.Index), nil +} + +// Type implements the ref.Val interface method. +func (o *nativeObj) Type() ref.Type { + return o.valType +} + +// Value implements the ref.Val interface method. +func (o *nativeObj) Value() any { + return o.val +} + +func newNativeTypes(fieldNameHandler NativeTypesFieldNameHandler, rawType reflect.Type) ([]*nativeType, error) { + nt, err := newNativeType(fieldNameHandler, rawType) + if err != nil { + return nil, err + } + result := []*nativeType{nt} + + alreadySeen := make(map[string]struct{}) + var iterateStructMembers func(reflect.Type) + iterateStructMembers = func(t reflect.Type) { + if k := t.Kind(); k == reflect.Pointer || k == reflect.Slice || k == reflect.Array || k == reflect.Map { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return + } + if _, seen := alreadySeen[t.String()]; seen { + return + } + alreadySeen[t.String()] = struct{}{} + nt, ntErr := newNativeType(fieldNameHandler, t) + if ntErr != nil { + err = ntErr + return + } + result = append(result, nt) + + for idx := 0; idx < t.NumField(); idx++ { + iterateStructMembers(t.Field(idx).Type) + } + } + iterateStructMembers(rawType) + + return result, err +} + +var ( + errDuplicatedFieldName = errors.New("field name already exists in struct") +) + +func newNativeType(fieldNameHandler NativeTypesFieldNameHandler, rawType reflect.Type) (*nativeType, error) { + refType := rawType + if refType.Kind() == reflect.Pointer { + refType = refType.Elem() + } + if !isValidObjectType(refType) { + return nil, fmt.Errorf("unsupported reflect.Type %v, must be reflect.Struct", rawType) + } + + // Since naming collisions can only happen with struct tag parsing, we only check for them if it is enabled. + if fieldNameHandler != nil { + fieldNames := make(map[string]struct{}) + + for idx := 0; idx < refType.NumField(); idx++ { + field := refType.Field(idx) + fieldName := toFieldName(fieldNameHandler, field) + + if _, found := fieldNames[fieldName]; found { + return nil, fmt.Errorf("invalid field name `%s` in struct `%s`: %w", fieldName, refType.Name(), errDuplicatedFieldName) + } else { + fieldNames[fieldName] = struct{}{} + } + } + } + + return &nativeType{ + typeName: fmt.Sprintf("%s.%s", simplePkgAlias(refType.PkgPath()), refType.Name()), + refType: refType, + fieldNameHandler: fieldNameHandler, + }, nil +} + +type nativeType struct { + typeName string + refType reflect.Type + fieldNameHandler NativeTypesFieldNameHandler +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (t *nativeType) ConvertToNative(typeDesc reflect.Type) (any, error) { + return nil, fmt.Errorf("type conversion error for type to '%v'", typeDesc) +} + +// ConvertToType implements ref.Val.ConvertToType. +func (t *nativeType) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case types.TypeType: + return types.TypeType + } + return types.NewErr("type conversion error from '%s' to '%s'", types.TypeType, typeVal) +} + +// Equal returns true of both type names are equal to each other. +func (t *nativeType) Equal(other ref.Val) ref.Val { + otherType, ok := other.(ref.Type) + return types.Bool(ok && t.TypeName() == otherType.TypeName()) +} + +// HasTrait implements the ref.Type interface method. +func (t *nativeType) HasTrait(trait int) bool { + return nativeObjTraitMask&trait == trait +} + +// String implements the strings.Stringer interface method. +func (t *nativeType) String() string { + return t.typeName +} + +// Type implements the ref.Val interface method. +func (t *nativeType) Type() ref.Type { + return types.TypeType +} + +// TypeName implements the ref.Type interface method. +func (t *nativeType) TypeName() string { + return t.typeName +} + +// Value implements the ref.Val interface method. +func (t *nativeType) Value() any { + return t.typeName +} + +// fieldByName returns the corresponding reflect.StructField for the give name either by matching +// field tag or field name. +func (t *nativeType) fieldByName(fieldName string) (reflect.StructField, bool) { + if t.fieldNameHandler == nil { + return t.refType.FieldByName(fieldName) + } + + for i := 0; i < t.refType.NumField(); i++ { + f := t.refType.Field(i) + if toFieldName(t.fieldNameHandler, f) == fieldName { + return f, true + } + } + + return reflect.StructField{}, false +} + +// hasField returns whether a field name has a corresponding Golang reflect.StructField +func (t *nativeType) hasField(fieldName string) (reflect.StructField, bool) { + f, found := t.fieldByName(fieldName) + if !found || !f.IsExported() || !isSupportedType(f.Type) { + return reflect.StructField{}, false + } + return f, true +} + +func adaptFieldValue(adapter types.Adapter, refField reflect.Value) ref.Val { + return adapter.NativeToValue(getFieldValue(refField)) +} + +func getFieldValue(refField reflect.Value) any { + if refField.IsZero() { + switch refField.Kind() { + case reflect.Struct: + if refField.Type() == timestampType { + return time.Unix(0, 0) + } + case reflect.Pointer: + return reflect.New(refField.Type().Elem()).Interface() + } + } + return refField.Interface() +} + +func simplePkgAlias(pkgPath string) string { + paths := strings.Split(pkgPath, "/") + if len(paths) == 0 { + return "" + } + return paths[len(paths)-1] +} + +func isValidObjectType(refType reflect.Type) bool { + return refType.Kind() == reflect.Struct +} + +func isSupportedType(refType reflect.Type) bool { + switch refType.Kind() { + case reflect.Chan, reflect.Complex64, reflect.Complex128, reflect.Func, reflect.UnsafePointer, reflect.Uintptr: + return false + case reflect.Array, reflect.Slice: + return isSupportedType(refType.Elem()) + case reflect.Map: + return isSupportedType(refType.Key()) && isSupportedType(refType.Elem()) + } + return true +} + +var ( + pbMsgInterfaceType = reflect.TypeOf((*protoreflect.ProtoMessage)(nil)).Elem() + timestampType = reflect.TypeOf(time.Now()) + durationType = reflect.TypeOf(time.Nanosecond) +) diff --git a/hack/tools/vendor/github.com/google/cel-go/ext/protos.go b/hack/tools/vendor/github.com/google/cel-go/ext/protos.go new file mode 100644 index 0000000000..68796f60ad --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/ext/protos.go @@ -0,0 +1,140 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/ast" +) + +// Protos returns a cel.EnvOption to configure extended macros and functions for +// proto manipulation. +// +// Note, all macros use the 'proto' namespace; however, at the time of macro +// expansion the namespace looks just like any other identifier. If you are +// currently using a variable named 'proto', the macro will likely work just as +// intended; however, there is some chance for collision. +// +// # Protos.GetExt +// +// Macro which generates a select expression that retrieves an extension field +// from the input proto2 syntax message. If the field is not set, the default +// value forthe extension field is returned according to safe-traversal semantics. +// +// proto.getExt(, ) -> +// +// Examples: +// +// proto.getExt(msg, google.expr.proto2.test.int32_ext) // returns int value +// +// # Protos.HasExt +// +// Macro which generates a test-only select expression that determines whether +// an extension field is set on a proto2 syntax message. +// +// proto.hasExt(, ) -> +// +// Examples: +// +// proto.hasExt(msg, google.expr.proto2.test.int32_ext) // returns true || false +func Protos() cel.EnvOption { + return cel.Lib(protoLib{}) +} + +var ( + protoNamespace = "proto" + hasExtension = "hasExt" + getExtension = "getExt" +) + +type protoLib struct{} + +// LibraryName implements the SingletonLibrary interface method. +func (protoLib) LibraryName() string { + return "cel.lib.ext.protos" +} + +// CompileOptions implements the Library interface method. +func (protoLib) CompileOptions() []cel.EnvOption { + return []cel.EnvOption{ + cel.Macros( + // proto.getExt(msg, select_expression) + cel.ReceiverMacro(getExtension, 2, getProtoExt), + // proto.hasExt(msg, select_expression) + cel.ReceiverMacro(hasExtension, 2, hasProtoExt), + ), + } +} + +// ProgramOptions implements the Library interface method. +func (protoLib) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +// hasProtoExt generates a test-only select expression for a fully-qualified extension name on a protobuf message. +func hasProtoExt(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + if !macroTargetMatchesNamespace(protoNamespace, target) { + return nil, nil + } + extensionField, err := getExtFieldName(mef, args[1]) + if err != nil { + return nil, err + } + return mef.NewPresenceTest(args[0], extensionField), nil +} + +// getProtoExt generates a select expression for a fully-qualified extension name on a protobuf message. +func getProtoExt(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + if !macroTargetMatchesNamespace(protoNamespace, target) { + return nil, nil + } + extFieldName, err := getExtFieldName(mef, args[1]) + if err != nil { + return nil, err + } + return mef.NewSelect(args[0], extFieldName), nil +} + +func getExtFieldName(mef cel.MacroExprFactory, expr ast.Expr) (string, *cel.Error) { + isValid := false + extensionField := "" + switch expr.Kind() { + case ast.SelectKind: + extensionField, isValid = validateIdentifier(expr) + } + if !isValid { + return "", mef.NewError(expr.ID(), "invalid extension field") + } + return extensionField, nil +} + +func validateIdentifier(expr ast.Expr) (string, bool) { + switch expr.Kind() { + case ast.IdentKind: + return expr.AsIdent(), true + case ast.SelectKind: + sel := expr.AsSelect() + if sel.IsTestOnly() { + return "", false + } + opStr, isIdent := validateIdentifier(sel.Operand()) + if !isIdent { + return "", false + } + return opStr + "." + sel.FieldName(), true + default: + return "", false + } +} diff --git a/hack/tools/vendor/github.com/google/cel-go/ext/sets.go b/hack/tools/vendor/github.com/google/cel-go/ext/sets.go new file mode 100644 index 0000000000..7e94166552 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/ext/sets.go @@ -0,0 +1,261 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "math" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + "github.com/google/cel-go/interpreter" +) + +// Sets returns a cel.EnvOption to configure namespaced set relationship +// functions. +// +// There is no set type within CEL, and while one may be introduced in the +// future, there are cases where a `list` type is known to behave like a set. +// For such cases, this library provides some basic functionality for +// determining set containment, equivalence, and intersection. +// +// # Sets.Contains +// +// Returns whether the first list argument contains all elements in the second +// list argument. The list may contain elements of any type and standard CEL +// equality is used to determine whether a value exists in both lists. If the +// second list is empty, the result will always return true. +// +// sets.contains(list(T), list(T)) -> bool +// +// Examples: +// +// sets.contains([], []) // true +// sets.contains([], [1]) // false +// sets.contains([1, 2, 3, 4], [2, 3]) // true +// sets.contains([1, 2.0, 3u], [1.0, 2u, 3]) // true +// +// # Sets.Equivalent +// +// Returns whether the first and second list are set equivalent. Lists are set +// equivalent if for every item in the first list, there is an element in the +// second which is equal. The lists may not be of the same size as they do not +// guarantee the elements within them are unique, so size does not factor into +// the computation. +// +// Examples: +// +// sets.equivalent([], []) // true +// sets.equivalent([1], [1, 1]) // true +// sets.equivalent([1], [1u, 1.0]) // true +// sets.equivalent([1, 2, 3], [3u, 2.0, 1]) // true +// +// # Sets.Intersects +// +// Returns whether the first list has at least one element whose value is equal +// to an element in the second list. If either list is empty, the result will +// be false. +// +// Examples: +// +// sets.intersects([1], []) // false +// sets.intersects([1], [1, 2]) // true +// sets.intersects([[1], [2, 3]], [[1, 2], [2, 3.0]]) // true +func Sets() cel.EnvOption { + return cel.Lib(setsLib{}) +} + +type setsLib struct{} + +// LibraryName implements the SingletonLibrary interface method. +func (setsLib) LibraryName() string { + return "cel.lib.ext.sets" +} + +// CompileOptions implements the Library interface method. +func (setsLib) CompileOptions() []cel.EnvOption { + listType := cel.ListType(cel.TypeParamType("T")) + return []cel.EnvOption{ + cel.Function("sets.contains", + cel.Overload("list_sets_contains_list", []*cel.Type{listType, listType}, cel.BoolType, + cel.BinaryBinding(setsContains))), + cel.Function("sets.equivalent", + cel.Overload("list_sets_equivalent_list", []*cel.Type{listType, listType}, cel.BoolType, + cel.BinaryBinding(setsEquivalent))), + cel.Function("sets.intersects", + cel.Overload("list_sets_intersects_list", []*cel.Type{listType, listType}, cel.BoolType, + cel.BinaryBinding(setsIntersects))), + cel.CostEstimatorOptions( + checker.OverloadCostEstimate("list_sets_contains_list", estimateSetsCost(1)), + checker.OverloadCostEstimate("list_sets_intersects_list", estimateSetsCost(1)), + // equivalence requires potentially two m*n comparisons to ensure each list is contained by the other + checker.OverloadCostEstimate("list_sets_equivalent_list", estimateSetsCost(2)), + ), + } +} + +// ProgramOptions implements the Library interface method. +func (setsLib) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{ + cel.CostTrackerOptions( + interpreter.OverloadCostTracker("list_sets_contains_list", trackSetsCost(1)), + interpreter.OverloadCostTracker("list_sets_intersects_list", trackSetsCost(1)), + interpreter.OverloadCostTracker("list_sets_equivalent_list", trackSetsCost(2)), + ), + } +} + +// NewSetMembershipOptimizer rewrites set membership tests using the `in` operator against a list +// of constant values of enum, int, uint, string, or boolean type into a set membership test against +// a map where the map keys are the elements of the list. +func NewSetMembershipOptimizer() (cel.ASTOptimizer, error) { + return setsLib{}, nil +} + +func (setsLib) Optimize(ctx *cel.OptimizerContext, a *ast.AST) *ast.AST { + root := ast.NavigateAST(a) + matches := ast.MatchDescendants(root, matchInConstantList(a)) + for _, match := range matches { + call := match.AsCall() + listArg := call.Args()[1] + entries := make([]ast.EntryExpr, len(listArg.AsList().Elements())) + for i, elem := range listArg.AsList().Elements() { + var entry ast.EntryExpr + if r, found := a.ReferenceMap()[elem.ID()]; found && r.Value != nil { + entry = ctx.NewMapEntry(ctx.NewLiteral(r.Value), ctx.NewLiteral(types.True), false) + } else { + entry = ctx.NewMapEntry(elem, ctx.NewLiteral(types.True), false) + } + entries[i] = entry + } + mapArg := ctx.NewMap(entries) + ctx.UpdateExpr(listArg, mapArg) + } + return a +} + +func matchInConstantList(a *ast.AST) ast.ExprMatcher { + return func(e ast.NavigableExpr) bool { + if e.Kind() != ast.CallKind { + return false + } + call := e.AsCall() + if call.FunctionName() != operators.In { + return false + } + aggregateVal := call.Args()[1] + if aggregateVal.Kind() != ast.ListKind { + return false + } + listVal := aggregateVal.AsList() + for _, elem := range listVal.Elements() { + if r, found := a.ReferenceMap()[elem.ID()]; found { + if r.Value != nil { + continue + } + } + if elem.Kind() != ast.LiteralKind { + return false + } + lit := elem.AsLiteral() + if !(lit.Type() == cel.StringType || lit.Type() == cel.IntType || + lit.Type() == cel.UintType || lit.Type() == cel.BoolType) { + return false + } + } + return true + } +} + +func setsIntersects(listA, listB ref.Val) ref.Val { + lA := listA.(traits.Lister) + lB := listB.(traits.Lister) + it := lA.Iterator() + for it.HasNext() == types.True { + exists := lB.Contains(it.Next()) + if exists == types.True { + return types.True + } + } + return types.False +} + +func setsContains(list, sublist ref.Val) ref.Val { + l := list.(traits.Lister) + sub := sublist.(traits.Lister) + it := sub.Iterator() + for it.HasNext() == types.True { + exists := l.Contains(it.Next()) + if exists != types.True { + return exists + } + } + return types.True +} + +func setsEquivalent(listA, listB ref.Val) ref.Val { + aContainsB := setsContains(listA, listB) + if aContainsB != types.True { + return aContainsB + } + return setsContains(listB, listA) +} + +func estimateSetsCost(costFactor float64) checker.FunctionEstimator { + return func(estimator checker.CostEstimator, target *checker.AstNode, args []checker.AstNode) *checker.CallEstimate { + if len(args) == 2 { + arg0Size := estimateSize(estimator, args[0]) + arg1Size := estimateSize(estimator, args[1]) + costEstimate := arg0Size.Multiply(arg1Size).MultiplyByCostFactor(costFactor).Add(callCostEstimate) + return &checker.CallEstimate{CostEstimate: costEstimate} + } + return nil + } +} + +func estimateSize(estimator checker.CostEstimator, node checker.AstNode) checker.SizeEstimate { + if l := node.ComputedSize(); l != nil { + return *l + } + if l := estimator.EstimateSize(node); l != nil { + return *l + } + return checker.SizeEstimate{Min: 0, Max: math.MaxUint64} +} + +func trackSetsCost(costFactor float64) interpreter.FunctionTracker { + return func(args []ref.Val, _ ref.Val) *uint64 { + lhsSize := actualSize(args[0]) + rhsSize := actualSize(args[1]) + cost := callCost + uint64(float64(lhsSize*rhsSize)*costFactor) + return &cost + } +} + +func actualSize(value ref.Val) uint64 { + if sz, ok := value.(traits.Sizer); ok { + return uint64(sz.Size().(types.Int)) + } + return 1 +} + +var ( + callCostEstimate = checker.CostEstimate{Min: 1, Max: 1} + callCost = uint64(1) +) diff --git a/hack/tools/vendor/github.com/google/cel-go/ext/strings.go b/hack/tools/vendor/github.com/google/cel-go/ext/strings.go new file mode 100644 index 0000000000..2e590a4c57 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/ext/strings.go @@ -0,0 +1,775 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ext contains CEL extension libraries where each library defines a related set of +// constants, functions, macros, or other configuration settings which may not be covered by +// the core CEL spec. +package ext + +import ( + "fmt" + "math" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/text/language" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +const ( + defaultLocale = "en-US" + defaultPrecision = 6 +) + +// Strings returns a cel.EnvOption to configure extended functions for string manipulation. +// As a general note, all indices are zero-based. +// +// # CharAt +// +// Returns the character at the given position. If the position is negative, or greater than +// the length of the string, the function will produce an error: +// +// .charAt() -> +// +// Examples: +// +// 'hello'.charAt(4) // return 'o' +// 'hello'.charAt(5) // return '' +// 'hello'.charAt(-1) // error +// +// # Format +// +// Introduced at version: 1 +// +// Returns a new string with substitutions being performed, printf-style. +// The valid formatting clauses are: +// +// `%s` - substitutes a string. This can also be used on bools, lists, maps, bytes, +// Duration and Timestamp, in addition to all numerical types (int, uint, and double). +// Note that the dot/period decimal separator will always be used when printing a list +// or map that contains a double, and that null can be passed (which results in the +// string "null") in addition to types. +// `%d` - substitutes an integer. +// `%f` - substitutes a double with fixed-point precision. The default precision is 6, but +// this can be adjusted. The strings `Infinity`, `-Infinity`, and `NaN` are also valid input +// for this clause. +// `%e` - substitutes a double in scientific notation. The default precision is 6, but this +// can be adjusted. +// `%b` - substitutes an integer with its equivalent binary string. Can also be used on bools. +// `%x` - substitutes an integer with its equivalent in hexadecimal, or if given a string or +// bytes, will output each character's equivalent in hexadecimal. +// `%X` - same as above, but with A-F capitalized. +// `%o` - substitutes an integer with its equivalent in octal. +// +// .format() -> +// +// Examples: +// +// "this is a string: %s\nand an integer: %d".format(["str", 42]) // returns "this is a string: str\nand an integer: 42" +// "a double substituted with %%s: %s".format([64.2]) // returns "a double substituted with %s: 64.2" +// "string type: %s".format([type(string)]) // returns "string type: string" +// "timestamp: %s".format([timestamp("2023-02-03T23:31:20+00:00")]) // returns "timestamp: 2023-02-03T23:31:20Z" +// "duration: %s".format([duration("1h45m47s")]) // returns "duration: 6347s" +// "%f".format([3.14]) // returns "3.140000" +// "scientific notation: %e".format([2.71828]) // returns "scientific notation: 2.718280\u202f\u00d7\u202f10\u2070\u2070" +// "5 in binary: %b".format([5]), // returns "5 in binary; 101" +// "26 in hex: %x".format([26]), // returns "26 in hex: 1a" +// "26 in hex (uppercase): %X".format([26]) // returns "26 in hex (uppercase): 1A" +// "30 in octal: %o".format([30]) // returns "30 in octal: 36" +// "a map inside a list: %s".format([[1, 2, 3, {"a": "x", "b": "y", "c": "z"}]]) // returns "a map inside a list: [1, 2, 3, {"a":"x", "b":"y", "c":"d"}]" +// "true bool: %s - false bool: %s\nbinary bool: %b".format([true, false, true]) // returns "true bool: true - false bool: false\nbinary bool: 1" +// +// Passing an incorrect type (a string to `%b`) is considered an error, as well as attempting +// to use more formatting clauses than there are arguments (`%d %d %d` while passing two ints, for instance). +// If compile-time checking is enabled, and the formatting string is a constant, and the argument list is a literal, +// then letting any arguments go unused/unformatted is also considered an error. +// +// # IndexOf +// +// Returns the integer index of the first occurrence of the search string. If the search string is +// not found the function returns -1. +// +// The function also accepts an optional position from which to begin the substring search. If the +// substring is the empty string, the index where the search starts is returned (zero or custom). +// +// .indexOf() -> +// .indexOf(, ) -> +// +// Examples: +// +// 'hello mellow'.indexOf('') // returns 0 +// 'hello mellow'.indexOf('ello') // returns 1 +// 'hello mellow'.indexOf('jello') // returns -1 +// 'hello mellow'.indexOf('', 2) // returns 2 +// 'hello mellow'.indexOf('ello', 2) // returns 7 +// 'hello mellow'.indexOf('ello', 20) // returns -1 +// 'hello mellow'.indexOf('ello', -1) // error +// +// # Join +// +// Returns a new string where the elements of string list are concatenated. +// +// The function also accepts an optional separator which is placed between elements in the resulting string. +// +// >.join() -> +// >.join() -> +// +// Examples: +// +// ['hello', 'mellow'].join() // returns 'hellomellow' +// ['hello', 'mellow'].join(' ') // returns 'hello mellow' +// [].join() // returns '' +// [].join('/') // returns '' +// +// # LastIndexOf +// +// Returns the integer index at the start of the last occurrence of the search string. If the +// search string is not found the function returns -1. +// +// The function also accepts an optional position which represents the last index to be +// considered as the beginning of the substring match. If the substring is the empty string, +// the index where the search starts is returned (string length or custom). +// +// .lastIndexOf() -> +// .lastIndexOf(, ) -> +// +// Examples: +// +// 'hello mellow'.lastIndexOf('') // returns 12 +// 'hello mellow'.lastIndexOf('ello') // returns 7 +// 'hello mellow'.lastIndexOf('jello') // returns -1 +// 'hello mellow'.lastIndexOf('ello', 6) // returns 1 +// 'hello mellow'.lastIndexOf('ello', 20) // returns -1 +// 'hello mellow'.lastIndexOf('ello', -1) // error +// +// # LowerAscii +// +// Returns a new string where all ASCII characters are lower-cased. +// +// This function does not perform Unicode case-mapping for characters outside the ASCII range. +// +// .lowerAscii() -> +// +// Examples: +// +// 'TacoCat'.lowerAscii() // returns 'tacocat' +// 'TacoCÆt Xii'.lowerAscii() // returns 'tacocÆt xii' +// +// # Strings.Quote +// +// Introduced in version: 1 +// +// Takes the given string and makes it safe to print (without any formatting due to escape sequences). +// If any invalid UTF-8 characters are encountered, they are replaced with \uFFFD. +// +// strings.quote() +// +// Examples: +// +// strings.quote('single-quote with "double quote"') // returns '"single-quote with \"double quote\""' +// strings.quote("two escape sequences \a\n") // returns '"two escape sequences \\a\\n"' +// +// # Replace +// +// Returns a new string based on the target, which replaces the occurrences of a search string +// with a replacement string if present. The function accepts an optional limit on the number of +// substring replacements to be made. +// +// When the replacement limit is 0, the result is the original string. When the limit is a negative +// number, the function behaves the same as replace all. +// +// .replace(, ) -> +// .replace(, , ) -> +// +// Examples: +// +// 'hello hello'.replace('he', 'we') // returns 'wello wello' +// 'hello hello'.replace('he', 'we', -1) // returns 'wello wello' +// 'hello hello'.replace('he', 'we', 1) // returns 'wello hello' +// 'hello hello'.replace('he', 'we', 0) // returns 'hello hello' +// 'hello hello'.replace('', '_') // returns '_h_e_l_l_o_ _h_e_l_l_o_' +// 'hello hello'.replace('h', '') // returns 'ello ello' +// +// # Split +// +// Returns a list of strings split from the input by the given separator. The function accepts +// an optional argument specifying a limit on the number of substrings produced by the split. +// +// When the split limit is 0, the result is an empty list. When the limit is 1, the result is the +// target string to split. When the limit is a negative number, the function behaves the same as +// split all. +// +// .split() -> > +// .split(, ) -> > +// +// Examples: +// +// 'hello hello hello'.split(' ') // returns ['hello', 'hello', 'hello'] +// 'hello hello hello'.split(' ', 0) // returns [] +// 'hello hello hello'.split(' ', 1) // returns ['hello hello hello'] +// 'hello hello hello'.split(' ', 2) // returns ['hello', 'hello hello'] +// 'hello hello hello'.split(' ', -1) // returns ['hello', 'hello', 'hello'] +// +// # Substring +// +// Returns the substring given a numeric range corresponding to character positions. Optionally +// may omit the trailing range for a substring from a given character position until the end of +// a string. +// +// Character offsets are 0-based with an inclusive start range and exclusive end range. It is an +// error to specify an end range that is lower than the start range, or for either the start or end +// index to be negative or exceed the string length. +// +// .substring() -> +// .substring(, ) -> +// +// Examples: +// +// 'tacocat'.substring(4) // returns 'cat' +// 'tacocat'.substring(0, 4) // returns 'taco' +// 'tacocat'.substring(-1) // error +// 'tacocat'.substring(2, 1) // error +// +// # Trim +// +// Returns a new string which removes the leading and trailing whitespace in the target string. +// The trim function uses the Unicode definition of whitespace which does not include the +// zero-width spaces. See: https://en.wikipedia.org/wiki/Whitespace_character#Unicode +// +// .trim() -> +// +// Examples: +// +// ' \ttrim\n '.trim() // returns 'trim' +// +// # UpperAscii +// +// Returns a new string where all ASCII characters are upper-cased. +// +// This function does not perform Unicode case-mapping for characters outside the ASCII range. +// +// .upperAscii() -> +// +// Examples: +// +// 'TacoCat'.upperAscii() // returns 'TACOCAT' +// 'TacoCÆt Xii'.upperAscii() // returns 'TACOCÆT XII' +// +// # Reverse +// +// Introduced at version: 3 +// +// Returns a new string whose characters are the same as the target string, only formatted in +// reverse order. +// This function relies on converting strings to rune arrays in order to reverse +// +// .reverse() -> +// +// Examples: +// +// 'gums'.reverse() // returns 'smug' +// 'John Smith'.reverse() // returns 'htimS nhoJ' +func Strings(options ...StringsOption) cel.EnvOption { + s := &stringLib{ + version: math.MaxUint32, + validateFormat: true, + } + for _, o := range options { + s = o(s) + } + return cel.Lib(s) +} + +type stringLib struct { + locale string + version uint32 + validateFormat bool +} + +// LibraryName implements the SingletonLibrary interface method. +func (*stringLib) LibraryName() string { + return "cel.lib.ext.strings" +} + +// StringsOption is a functional interface for configuring the strings library. +type StringsOption func(*stringLib) *stringLib + +// StringsLocale configures the library with the given locale. The locale tag will +// be checked for validity at the time that EnvOptions are configured. If this option +// is not passed, string.format will behave as if en_US was passed as the locale. +func StringsLocale(locale string) StringsOption { + return func(sl *stringLib) *stringLib { + sl.locale = locale + return sl + } +} + +// StringsVersion configures the version of the string library. +// +// The version limits which functions are available. Only functions introduced +// below or equal to the given version included in the library. If this option +// is not set, all functions are available. +// +// See the library documentation to determine which version a function was introduced. +// If the documentation does not state which version a function was introduced, it can +// be assumed to be introduced at version 0, when the library was first created. +func StringsVersion(version uint32) StringsOption { + return func(lib *stringLib) *stringLib { + lib.version = version + return lib + } +} + +// StringsValidateFormatCalls validates type-checked ASTs to ensure that string.format() calls have +// valid formatting clauses and valid argument types for each clause. +// +// Enabled by default. +func StringsValidateFormatCalls(value bool) StringsOption { + return func(s *stringLib) *stringLib { + s.validateFormat = value + return s + } +} + +// CompileOptions implements the Library interface method. +func (lib *stringLib) CompileOptions() []cel.EnvOption { + formatLocale := "en_US" + if lib.locale != "" { + // ensure locale is properly-formed if set + _, err := language.Parse(lib.locale) + if err != nil { + return []cel.EnvOption{ + func(e *cel.Env) (*cel.Env, error) { + return nil, fmt.Errorf("failed to parse locale: %w", err) + }, + } + } + formatLocale = lib.locale + } + + opts := []cel.EnvOption{ + cel.Function("charAt", + cel.MemberOverload("string_char_at_int", []*cel.Type{cel.StringType, cel.IntType}, cel.StringType, + cel.BinaryBinding(func(str, ind ref.Val) ref.Val { + s := str.(types.String) + i := ind.(types.Int) + return stringOrError(charAt(string(s), int64(i))) + }))), + cel.Function("indexOf", + cel.MemberOverload("string_index_of_string", []*cel.Type{cel.StringType, cel.StringType}, cel.IntType, + cel.BinaryBinding(func(str, substr ref.Val) ref.Val { + s := str.(types.String) + sub := substr.(types.String) + return intOrError(indexOf(string(s), string(sub))) + })), + cel.MemberOverload("string_index_of_string_int", []*cel.Type{cel.StringType, cel.StringType, cel.IntType}, cel.IntType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + s := args[0].(types.String) + sub := args[1].(types.String) + offset := args[2].(types.Int) + return intOrError(indexOfOffset(string(s), string(sub), int64(offset))) + }))), + cel.Function("lastIndexOf", + cel.MemberOverload("string_last_index_of_string", []*cel.Type{cel.StringType, cel.StringType}, cel.IntType, + cel.BinaryBinding(func(str, substr ref.Val) ref.Val { + s := str.(types.String) + sub := substr.(types.String) + return intOrError(lastIndexOf(string(s), string(sub))) + })), + cel.MemberOverload("string_last_index_of_string_int", []*cel.Type{cel.StringType, cel.StringType, cel.IntType}, cel.IntType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + s := args[0].(types.String) + sub := args[1].(types.String) + offset := args[2].(types.Int) + return intOrError(lastIndexOfOffset(string(s), string(sub), int64(offset))) + }))), + cel.Function("lowerAscii", + cel.MemberOverload("string_lower_ascii", []*cel.Type{cel.StringType}, cel.StringType, + cel.UnaryBinding(func(str ref.Val) ref.Val { + s := str.(types.String) + return stringOrError(lowerASCII(string(s))) + }))), + cel.Function("replace", + cel.MemberOverload( + "string_replace_string_string", []*cel.Type{cel.StringType, cel.StringType, cel.StringType}, cel.StringType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + str := args[0].(types.String) + old := args[1].(types.String) + new := args[2].(types.String) + return stringOrError(replace(string(str), string(old), string(new))) + })), + cel.MemberOverload( + "string_replace_string_string_int", []*cel.Type{cel.StringType, cel.StringType, cel.StringType, cel.IntType}, cel.StringType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + str := args[0].(types.String) + old := args[1].(types.String) + new := args[2].(types.String) + n := args[3].(types.Int) + return stringOrError(replaceN(string(str), string(old), string(new), int64(n))) + }))), + cel.Function("split", + cel.MemberOverload("string_split_string", []*cel.Type{cel.StringType, cel.StringType}, cel.ListType(cel.StringType), + cel.BinaryBinding(func(str, separator ref.Val) ref.Val { + s := str.(types.String) + sep := separator.(types.String) + return listStringOrError(split(string(s), string(sep))) + })), + cel.MemberOverload("string_split_string_int", []*cel.Type{cel.StringType, cel.StringType, cel.IntType}, cel.ListType(cel.StringType), + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + s := args[0].(types.String) + sep := args[1].(types.String) + n := args[2].(types.Int) + return listStringOrError(splitN(string(s), string(sep), int64(n))) + }))), + cel.Function("substring", + cel.MemberOverload("string_substring_int", []*cel.Type{cel.StringType, cel.IntType}, cel.StringType, + cel.BinaryBinding(func(str, offset ref.Val) ref.Val { + s := str.(types.String) + off := offset.(types.Int) + return stringOrError(substr(string(s), int64(off))) + })), + cel.MemberOverload("string_substring_int_int", []*cel.Type{cel.StringType, cel.IntType, cel.IntType}, cel.StringType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + s := args[0].(types.String) + start := args[1].(types.Int) + end := args[2].(types.Int) + return stringOrError(substrRange(string(s), int64(start), int64(end))) + }))), + cel.Function("trim", + cel.MemberOverload("string_trim", []*cel.Type{cel.StringType}, cel.StringType, + cel.UnaryBinding(func(str ref.Val) ref.Val { + s := str.(types.String) + return stringOrError(trimSpace(string(s))) + }))), + cel.Function("upperAscii", + cel.MemberOverload("string_upper_ascii", []*cel.Type{cel.StringType}, cel.StringType, + cel.UnaryBinding(func(str ref.Val) ref.Val { + s := str.(types.String) + return stringOrError(upperASCII(string(s))) + }))), + } + if lib.version >= 1 { + opts = append(opts, cel.Function("format", + cel.MemberOverload("string_format", []*cel.Type{cel.StringType, cel.ListType(cel.DynType)}, cel.StringType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + s := string(args[0].(types.String)) + formatArgs := args[1].(traits.Lister) + return stringOrError(parseFormatString(s, &stringFormatter{}, &stringArgList{formatArgs}, formatLocale)) + }))), + cel.Function("strings.quote", cel.Overload("strings_quote", []*cel.Type{cel.StringType}, cel.StringType, + cel.UnaryBinding(func(str ref.Val) ref.Val { + s := str.(types.String) + return stringOrError(quote(string(s))) + }))), + + cel.ASTValidators(stringFormatValidator{})) + + } + if lib.version >= 2 { + opts = append(opts, + cel.Function("join", + cel.MemberOverload("list_join", []*cel.Type{cel.ListType(cel.StringType)}, cel.StringType, + cel.UnaryBinding(func(list ref.Val) ref.Val { + l := list.(traits.Lister) + return stringOrError(joinValSeparator(l, "")) + })), + cel.MemberOverload("list_join_string", []*cel.Type{cel.ListType(cel.StringType), cel.StringType}, cel.StringType, + cel.BinaryBinding(func(list, delim ref.Val) ref.Val { + l := list.(traits.Lister) + d := delim.(types.String) + return stringOrError(joinValSeparator(l, string(d))) + }))), + ) + } else { + opts = append(opts, + cel.Function("join", + cel.MemberOverload("list_join", []*cel.Type{cel.ListType(cel.StringType)}, cel.StringType, + cel.UnaryBinding(func(list ref.Val) ref.Val { + l, err := list.ConvertToNative(stringListType) + if err != nil { + return types.WrapErr(err) + } + return stringOrError(join(l.([]string))) + })), + cel.MemberOverload("list_join_string", []*cel.Type{cel.ListType(cel.StringType), cel.StringType}, cel.StringType, + cel.BinaryBinding(func(list, delim ref.Val) ref.Val { + l, err := list.ConvertToNative(stringListType) + if err != nil { + return types.WrapErr(err) + } + d := delim.(types.String) + return stringOrError(joinSeparator(l.([]string), string(d))) + }))), + ) + } + if lib.version >= 3 { + opts = append(opts, + cel.Function("reverse", + cel.MemberOverload("string_reverse", []*cel.Type{cel.StringType}, cel.StringType, + cel.UnaryBinding(func(str ref.Val) ref.Val { + s := str.(types.String) + return stringOrError(reverse(string(s))) + }))), + ) + } + if lib.validateFormat { + opts = append(opts, cel.ASTValidators(stringFormatValidator{})) + } + return opts +} + +// ProgramOptions implements the Library interface method. +func (*stringLib) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func charAt(str string, ind int64) (string, error) { + i := int(ind) + runes := []rune(str) + if i < 0 || i > len(runes) { + return "", fmt.Errorf("index out of range: %d", ind) + } + if i == len(runes) { + return "", nil + } + return string(runes[i]), nil +} + +func indexOf(str, substr string) (int64, error) { + return indexOfOffset(str, substr, int64(0)) +} + +func indexOfOffset(str, substr string, offset int64) (int64, error) { + if substr == "" { + return offset, nil + } + off := int(offset) + runes := []rune(str) + subrunes := []rune(substr) + if off < 0 { + return -1, fmt.Errorf("index out of range: %d", off) + } + // If the offset exceeds the length, return -1 rather than error. + if off >= len(runes) { + return -1, nil + } + for i := off; i < len(runes)-(len(subrunes)-1); i++ { + found := true + for j := 0; j < len(subrunes); j++ { + if runes[i+j] != subrunes[j] { + found = false + break + } + } + if found { + return int64(i), nil + } + } + return -1, nil +} + +func lastIndexOf(str, substr string) (int64, error) { + runes := []rune(str) + if substr == "" { + return int64(len(runes)), nil + } + return lastIndexOfOffset(str, substr, int64(len(runes)-1)) +} + +func lastIndexOfOffset(str, substr string, offset int64) (int64, error) { + if substr == "" { + return offset, nil + } + off := int(offset) + runes := []rune(str) + subrunes := []rune(substr) + if off < 0 { + return -1, fmt.Errorf("index out of range: %d", off) + } + // If the offset is far greater than the length return -1 + if off >= len(runes) { + return -1, nil + } + if off > len(runes)-len(subrunes) { + off = len(runes) - len(subrunes) + } + for i := off; i >= 0; i-- { + found := true + for j := 0; j < len(subrunes); j++ { + if runes[i+j] != subrunes[j] { + found = false + break + } + } + if found { + return int64(i), nil + } + } + return -1, nil +} + +func lowerASCII(str string) (string, error) { + runes := []rune(str) + for i, r := range runes { + if r <= unicode.MaxASCII { + r = unicode.ToLower(r) + runes[i] = r + } + } + return string(runes), nil +} + +func replace(str, old, new string) (string, error) { + return strings.ReplaceAll(str, old, new), nil +} + +func replaceN(str, old, new string, n int64) (string, error) { + return strings.Replace(str, old, new, int(n)), nil +} + +func split(str, sep string) ([]string, error) { + return strings.Split(str, sep), nil +} + +func splitN(str, sep string, n int64) ([]string, error) { + return strings.SplitN(str, sep, int(n)), nil +} + +func substr(str string, start int64) (string, error) { + runes := []rune(str) + if int(start) < 0 || int(start) > len(runes) { + return "", fmt.Errorf("index out of range: %d", start) + } + return string(runes[start:]), nil +} + +func substrRange(str string, start, end int64) (string, error) { + runes := []rune(str) + l := len(runes) + if start > end { + return "", fmt.Errorf("invalid substring range. start: %d, end: %d", start, end) + } + if int(start) < 0 || int(start) > l { + return "", fmt.Errorf("index out of range: %d", start) + } + if int(end) < 0 || int(end) > l { + return "", fmt.Errorf("index out of range: %d", end) + } + return string(runes[int(start):int(end)]), nil +} + +func trimSpace(str string) (string, error) { + return strings.TrimSpace(str), nil +} + +func upperASCII(str string) (string, error) { + runes := []rune(str) + for i, r := range runes { + if r <= unicode.MaxASCII { + r = unicode.ToUpper(r) + runes[i] = r + } + } + return string(runes), nil +} + +func reverse(str string) (string, error) { + chars := []rune(str) + for i, j := 0, len(chars)-1; i < j; i, j = i+1, j-1 { + chars[i], chars[j] = chars[j], chars[i] + } + return string(chars), nil +} + +func joinSeparator(strs []string, separator string) (string, error) { + return strings.Join(strs, separator), nil +} + +func join(strs []string) (string, error) { + return strings.Join(strs, ""), nil +} + +func joinValSeparator(strs traits.Lister, separator string) (string, error) { + sz := strs.Size().(types.Int) + var sb strings.Builder + for i := types.Int(0); i < sz; i++ { + if i != 0 { + sb.WriteString(separator) + } + elem := strs.Get(i) + str, ok := elem.(types.String) + if !ok { + return "", fmt.Errorf("join: invalid input: %v", elem) + } + sb.WriteString(string(str)) + } + return sb.String(), nil +} + +// quote implements a string quoting function. The string will be wrapped in +// double quotes, and all valid CEL escape sequences will be escaped to show up +// literally if printed. If the input contains any invalid UTF-8, the invalid runes +// will be replaced with utf8.RuneError. +func quote(s string) (string, error) { + var quotedStrBuilder strings.Builder + for _, c := range sanitize(s) { + switch c { + case '\a': + quotedStrBuilder.WriteString("\\a") + case '\b': + quotedStrBuilder.WriteString("\\b") + case '\f': + quotedStrBuilder.WriteString("\\f") + case '\n': + quotedStrBuilder.WriteString("\\n") + case '\r': + quotedStrBuilder.WriteString("\\r") + case '\t': + quotedStrBuilder.WriteString("\\t") + case '\v': + quotedStrBuilder.WriteString("\\v") + case '\\': + quotedStrBuilder.WriteString("\\\\") + case '"': + quotedStrBuilder.WriteString("\\\"") + default: + quotedStrBuilder.WriteRune(c) + } + } + escapedStr := quotedStrBuilder.String() + return "\"" + escapedStr + "\"", nil +} + +// sanitize replaces all invalid runes in the given string with utf8.RuneError. +func sanitize(s string) string { + var sanitizedStringBuilder strings.Builder + for _, r := range s { + if !utf8.ValidRune(r) { + sanitizedStringBuilder.WriteRune(utf8.RuneError) + } else { + sanitizedStringBuilder.WriteRune(r) + } + } + return sanitizedStringBuilder.String() +} + +var ( + stringListType = reflect.TypeOf([]string{}) +) diff --git a/hack/tools/vendor/github.com/google/cel-go/interpreter/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/interpreter/BUILD.bazel new file mode 100644 index 0000000000..220e23d475 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/interpreter/BUILD.bazel @@ -0,0 +1,74 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "activation.go", + "attribute_patterns.go", + "attributes.go", + "decorators.go", + "dispatcher.go", + "evalstate.go", + "interpretable.go", + "interpreter.go", + "optimizations.go", + "planner.go", + "prune.go", + "runtimecost.go", + ], + importpath = "github.com/google/cel-go/interpreter", + deps = [ + "//common:go_default_library", + "//common/ast:go_default_library", + "//common/containers:go_default_library", + "//common/functions:go_default_library", + "//common/operators:go_default_library", + "//common/overloads:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//types/known/durationpb:go_default_library", + "@org_golang_google_protobuf//types/known/structpb:go_default_library", + "@org_golang_google_protobuf//types/known/timestamppb:go_default_library", + "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "activation_test.go", + "attribute_patterns_test.go", + "attributes_test.go", + "interpreter_test.go", + "prune_test.go", + "runtimecost_test.go", + ], + embed = [ + ":go_default_library", + ], + deps = [ + "//checker:go_default_library", + "//common/containers:go_default_library", + "//common/debug:go_default_library", + "//common/decls:go_default_library", + "//common/functions:go_default_library", + "//common/operators:go_default_library", + "//common/stdlib:go_default_library", + "//common/types:go_default_library", + "//parser:go_default_library", + "//test:go_default_library", + "//test/proto2pb:go_default_library", + "//test/proto3pb:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//types/known/anypb:go_default_library", + ], +) diff --git a/hack/tools/vendor/github.com/google/cel-go/interpreter/activation.go b/hack/tools/vendor/github.com/google/cel-go/interpreter/activation.go new file mode 100644 index 0000000000..1577f35909 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/interpreter/activation.go @@ -0,0 +1,168 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "errors" + "fmt" + + "github.com/google/cel-go/common/types/ref" +) + +// Activation used to resolve identifiers by name and references by id. +// +// An Activation is the primary mechanism by which a caller supplies input into a CEL program. +type Activation interface { + // ResolveName returns a value from the activation by qualified name, or false if the name + // could not be found. + ResolveName(name string) (any, bool) + + // Parent returns the parent of the current activation, may be nil. + // If non-nil, the parent will be searched during resolve calls. + Parent() Activation +} + +// EmptyActivation returns a variable-free activation. +func EmptyActivation() Activation { + return emptyActivation{} +} + +// emptyActivation is a variable-free activation. +type emptyActivation struct{} + +func (emptyActivation) ResolveName(string) (any, bool) { return nil, false } +func (emptyActivation) Parent() Activation { return nil } + +// NewActivation returns an activation based on a map-based binding where the map keys are +// expected to be qualified names used with ResolveName calls. +// +// The input `bindings` may either be of type `Activation` or `map[string]any`. +// +// Lazy bindings may be supplied within the map-based input in either of the following forms: +// - func() any +// - func() ref.Val +// +// The output of the lazy binding will overwrite the variable reference in the internal map. +// +// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using +// the types.Adapter configured in the environment. +func NewActivation(bindings any) (Activation, error) { + if bindings == nil { + return nil, errors.New("bindings must be non-nil") + } + a, isActivation := bindings.(Activation) + if isActivation { + return a, nil + } + m, isMap := bindings.(map[string]any) + if !isMap { + return nil, fmt.Errorf( + "activation input must be an activation or map[string]interface: got %T", + bindings) + } + return &mapActivation{bindings: m}, nil +} + +// mapActivation which implements Activation and maps of named values. +// +// Named bindings may lazily supply values by providing a function which accepts no arguments and +// produces an interface value. +type mapActivation struct { + bindings map[string]any +} + +// Parent implements the Activation interface method. +func (a *mapActivation) Parent() Activation { + return nil +} + +// ResolveName implements the Activation interface method. +func (a *mapActivation) ResolveName(name string) (any, bool) { + obj, found := a.bindings[name] + if !found { + return nil, false + } + fn, isLazy := obj.(func() ref.Val) + if isLazy { + obj = fn() + a.bindings[name] = obj + } + fnRaw, isLazy := obj.(func() any) + if isLazy { + obj = fnRaw() + a.bindings[name] = obj + } + return obj, found +} + +// hierarchicalActivation which implements Activation and contains a parent and +// child activation. +type hierarchicalActivation struct { + parent Activation + child Activation +} + +// Parent implements the Activation interface method. +func (a *hierarchicalActivation) Parent() Activation { + return a.parent +} + +// ResolveName implements the Activation interface method. +func (a *hierarchicalActivation) ResolveName(name string) (any, bool) { + if object, found := a.child.ResolveName(name); found { + return object, found + } + return a.parent.ResolveName(name) +} + +// NewHierarchicalActivation takes two activations and produces a new one which prioritizes +// resolution in the child first and parent(s) second. +func NewHierarchicalActivation(parent Activation, child Activation) Activation { + return &hierarchicalActivation{parent, child} +} + +// NewPartialActivation returns an Activation which contains a list of AttributePattern values +// representing field and index operations that should result in a 'types.Unknown' result. +// +// The `bindings` value may be any value type supported by the interpreter.NewActivation call, +// but is typically either an existing Activation or map[string]any. +func NewPartialActivation(bindings any, + unknowns ...*AttributePattern) (PartialActivation, error) { + a, err := NewActivation(bindings) + if err != nil { + return nil, err + } + return &partActivation{Activation: a, unknowns: unknowns}, nil +} + +// PartialActivation extends the Activation interface with a set of UnknownAttributePatterns. +type PartialActivation interface { + Activation + + // UnknownAttributePaths returns a set of AttributePattern values which match Attribute + // expressions for data accesses whose values are not yet known. + UnknownAttributePatterns() []*AttributePattern +} + +// partActivation is the default implementations of the PartialActivation interface. +type partActivation struct { + Activation + unknowns []*AttributePattern +} + +// UnknownAttributePatterns implements the PartialActivation interface method. +func (a *partActivation) UnknownAttributePatterns() []*AttributePattern { + return a.unknowns +} diff --git a/hack/tools/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go b/hack/tools/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go new file mode 100644 index 0000000000..8f19bde7e1 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go @@ -0,0 +1,397 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "fmt" + + "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// AttributePattern represents a top-level variable with an optional set of qualifier patterns. +// +// When using a CEL expression within a container, e.g. a package or namespace, the variable name +// in the pattern must match the qualified name produced during the variable namespace resolution. +// For example, if variable `c` appears in an expression whose container is `a.b`, the variable +// name supplied to the pattern must be `a.b.c` +// +// The qualifier patterns for attribute matching must be one of the following: +// +// - valid map key type: string, int, uint, bool +// - wildcard (*) +// +// Examples: +// +// 1. ns.myvar["complex-value"] +// 2. ns.myvar["complex-value"][0] +// 3. ns.myvar["complex-value"].*.name +// +// The first example is simple: match an attribute where the variable is 'ns.myvar' with a +// field access on 'complex-value'. The second example expands the match to indicate that only +// a specific index `0` should match. And lastly, the third example matches any indexed access +// that later selects the 'name' field. +type AttributePattern struct { + variable string + qualifierPatterns []*AttributeQualifierPattern +} + +// NewAttributePattern produces a new mutable AttributePattern based on a variable name. +func NewAttributePattern(variable string) *AttributePattern { + return &AttributePattern{ + variable: variable, + qualifierPatterns: []*AttributeQualifierPattern{}, + } +} + +// QualString adds a string qualifier pattern to the AttributePattern. The string may be a valid +// identifier, or string map key including empty string. +func (apat *AttributePattern) QualString(pattern string) *AttributePattern { + apat.qualifierPatterns = append(apat.qualifierPatterns, + &AttributeQualifierPattern{value: pattern}) + return apat +} + +// QualInt adds an int qualifier pattern to the AttributePattern. The index may be either a map or +// list index. +func (apat *AttributePattern) QualInt(pattern int64) *AttributePattern { + apat.qualifierPatterns = append(apat.qualifierPatterns, + &AttributeQualifierPattern{value: pattern}) + return apat +} + +// QualUint adds an uint qualifier pattern for a map index operation to the AttributePattern. +func (apat *AttributePattern) QualUint(pattern uint64) *AttributePattern { + apat.qualifierPatterns = append(apat.qualifierPatterns, + &AttributeQualifierPattern{value: pattern}) + return apat +} + +// QualBool adds a bool qualifier pattern for a map index operation to the AttributePattern. +func (apat *AttributePattern) QualBool(pattern bool) *AttributePattern { + apat.qualifierPatterns = append(apat.qualifierPatterns, + &AttributeQualifierPattern{value: pattern}) + return apat +} + +// Wildcard adds a special sentinel qualifier pattern that will match any single qualifier. +func (apat *AttributePattern) Wildcard() *AttributePattern { + apat.qualifierPatterns = append(apat.qualifierPatterns, + &AttributeQualifierPattern{wildcard: true}) + return apat +} + +// VariableMatches returns true if the fully qualified variable matches the AttributePattern +// fully qualified variable name. +func (apat *AttributePattern) VariableMatches(variable string) bool { + return apat.variable == variable +} + +// QualifierPatterns returns the set of AttributeQualifierPattern values on the AttributePattern. +func (apat *AttributePattern) QualifierPatterns() []*AttributeQualifierPattern { + return apat.qualifierPatterns +} + +// AttributeQualifierPattern holds a wildcard or valued qualifier pattern. +type AttributeQualifierPattern struct { + wildcard bool + value any +} + +// Matches returns true if the qualifier pattern is a wildcard, or the Qualifier implements the +// qualifierValueEquator interface and its IsValueEqualTo returns true for the qualifier pattern. +func (qpat *AttributeQualifierPattern) Matches(q Qualifier) bool { + if qpat.wildcard { + return true + } + qve, ok := q.(qualifierValueEquator) + return ok && qve.QualifierValueEquals(qpat.value) +} + +// qualifierValueEquator defines an interface for determining if an input value, of valid map key +// type, is equal to the value held in the Qualifier. This interface is used by the +// AttributeQualifierPattern to determine pattern matches for non-wildcard qualifier patterns. +// +// Note: Attribute values are also Qualifier values; however, Attributes are resolved before +// qualification happens. This is an implementation detail, but one relevant to why the Attribute +// types do not surface in the list of implementations. +// +// See: partialAttributeFactory.matchesUnknownPatterns for more details on how this interface is +// used. +type qualifierValueEquator interface { + // QualifierValueEquals returns true if the input value is equal to the value held in the + // Qualifier. + QualifierValueEquals(value any) bool +} + +// QualifierValueEquals implementation for boolean qualifiers. +func (q *boolQualifier) QualifierValueEquals(value any) bool { + bval, ok := value.(bool) + return ok && q.value == bval +} + +// QualifierValueEquals implementation for field qualifiers. +func (q *fieldQualifier) QualifierValueEquals(value any) bool { + sval, ok := value.(string) + return ok && q.Name == sval +} + +// QualifierValueEquals implementation for string qualifiers. +func (q *stringQualifier) QualifierValueEquals(value any) bool { + sval, ok := value.(string) + return ok && q.value == sval +} + +// QualifierValueEquals implementation for int qualifiers. +func (q *intQualifier) QualifierValueEquals(value any) bool { + return numericValueEquals(value, q.celValue) +} + +// QualifierValueEquals implementation for uint qualifiers. +func (q *uintQualifier) QualifierValueEquals(value any) bool { + return numericValueEquals(value, q.celValue) +} + +// QualifierValueEquals implementation for double qualifiers. +func (q *doubleQualifier) QualifierValueEquals(value any) bool { + return numericValueEquals(value, q.celValue) +} + +// numericValueEquals uses CEL equality to determine whether two number values are +func numericValueEquals(value any, celValue ref.Val) bool { + val := types.DefaultTypeAdapter.NativeToValue(value) + return celValue.Equal(val) == types.True +} + +// NewPartialAttributeFactory returns an AttributeFactory implementation capable of performing +// AttributePattern matches with PartialActivation inputs. +func NewPartialAttributeFactory(container *containers.Container, adapter types.Adapter, provider types.Provider, opts ...AttrFactoryOption) AttributeFactory { + fac := NewAttributeFactory(container, adapter, provider, opts...) + return &partialAttributeFactory{ + AttributeFactory: fac, + container: container, + adapter: adapter, + provider: provider, + } +} + +type partialAttributeFactory struct { + AttributeFactory + container *containers.Container + adapter types.Adapter + provider types.Provider +} + +// AbsoluteAttribute implementation of the AttributeFactory interface which wraps the +// NamespacedAttribute resolution in an internal attributeMatcher object to dynamically match +// unknown patterns from PartialActivation inputs if given. +func (fac *partialAttributeFactory) AbsoluteAttribute(id int64, names ...string) NamespacedAttribute { + attr := fac.AttributeFactory.AbsoluteAttribute(id, names...) + return &attributeMatcher{fac: fac, NamespacedAttribute: attr} +} + +// MaybeAttribute implementation of the AttributeFactory interface which ensure that the set of +// 'maybe' NamespacedAttribute values are produced using the partialAttributeFactory rather than +// the base AttributeFactory implementation. +func (fac *partialAttributeFactory) MaybeAttribute(id int64, name string) Attribute { + return &maybeAttribute{ + id: id, + attrs: []NamespacedAttribute{ + fac.AbsoluteAttribute(id, fac.container.ResolveCandidateNames(name)...), + }, + adapter: fac.adapter, + provider: fac.provider, + fac: fac, + } +} + +// matchesUnknownPatterns returns true if the variable names and qualifiers for a given +// Attribute value match any of the ActivationPattern objects in the set of unknown activation +// patterns on the given PartialActivation. +// +// For example, in the expression `a.b`, the Attribute is composed of variable `a`, with string +// qualifier `b`. When a PartialActivation is supplied, it indicates that some or all of the data +// provided in the input is unknown by specifying unknown AttributePatterns. An AttributePattern +// that refers to variable `a` with a string qualifier of `c` will not match `a.b`; however, any +// of the following patterns will match Attribute `a.b`: +// +// - `AttributePattern("a")` +// - `AttributePattern("a").Wildcard()` +// - `AttributePattern("a").QualString("b")` +// - `AttributePattern("a").QualString("b").QualInt(0)` +// +// Any AttributePattern which overlaps an Attribute or vice-versa will produce an Unknown result +// for the last pattern matched variable or qualifier in the Attribute. In the first matching +// example, the expression id representing variable `a` would be listed in the Unknown result, +// whereas in the other pattern examples, the qualifier `b` would be returned as the Unknown. +func (fac *partialAttributeFactory) matchesUnknownPatterns( + vars PartialActivation, + attrID int64, + variableNames []string, + qualifiers []Qualifier) (*types.Unknown, error) { + patterns := vars.UnknownAttributePatterns() + candidateIndices := map[int]struct{}{} + for _, variable := range variableNames { + for i, pat := range patterns { + if pat.VariableMatches(variable) { + if len(qualifiers) == 0 { + return types.NewUnknown(attrID, types.NewAttributeTrail(variable)), nil + } + candidateIndices[i] = struct{}{} + } + } + } + // Determine whether to return early if there are no candidate unknown patterns. + if len(candidateIndices) == 0 { + return nil, nil + } + // Resolve the attribute qualifiers into a static set. This prevents more dynamic + // Attribute resolutions than necessary when there are multiple unknown patterns + // that traverse the same Attribute-based qualifier field. + newQuals := make([]Qualifier, len(qualifiers)) + for i, qual := range qualifiers { + attr, isAttr := qual.(Attribute) + if isAttr { + val, err := attr.Resolve(vars) + if err != nil { + return nil, err + } + // If this resolution behavior ever changes, new implementations of the + // qualifierValueEquator may be required to handle proper resolution. + qual, err = fac.NewQualifier(nil, qual.ID(), val, attr.IsOptional()) + if err != nil { + return nil, err + } + } + newQuals[i] = qual + } + // Determine whether any of the unknown patterns match. + for patIdx := range candidateIndices { + pat := patterns[patIdx] + isUnk := true + matchExprID := attrID + qualPats := pat.QualifierPatterns() + for i, qual := range newQuals { + if i >= len(qualPats) { + break + } + matchExprID = qual.ID() + qualPat := qualPats[i] + // Note, the AttributeQualifierPattern relies on the input Qualifier not being an + // Attribute, since there is no way to resolve the Attribute with the information + // provided to the Matches call. + if !qualPat.Matches(qual) { + isUnk = false + break + } + } + if isUnk { + attr := types.NewAttributeTrail(pat.variable) + for i := 0; i < len(qualPats) && i < len(newQuals); i++ { + if qual, ok := newQuals[i].(ConstantQualifier); ok { + switch v := qual.Value().Value().(type) { + case bool: + types.QualifyAttribute[bool](attr, v) + case float64: + types.QualifyAttribute[int64](attr, int64(v)) + case int64: + types.QualifyAttribute[int64](attr, v) + case string: + types.QualifyAttribute[string](attr, v) + case uint64: + types.QualifyAttribute[uint64](attr, v) + default: + types.QualifyAttribute[string](attr, fmt.Sprintf("%v", v)) + } + } else { + types.QualifyAttribute[string](attr, "*") + } + } + return types.NewUnknown(matchExprID, attr), nil + } + } + return nil, nil +} + +// attributeMatcher embeds the NamespacedAttribute interface which allows it to participate in +// AttributePattern matching against Attribute values without having to modify the code paths that +// identify Attributes in expressions. +type attributeMatcher struct { + NamespacedAttribute + qualifiers []Qualifier + fac *partialAttributeFactory +} + +// AddQualifier implements the Attribute interface method. +func (m *attributeMatcher) AddQualifier(qual Qualifier) (Attribute, error) { + // Add the qualifier to the embedded NamespacedAttribute. If the input to the Resolve + // method is not a PartialActivation, or does not match an unknown attribute pattern, the + // Resolve method is directly invoked on the underlying NamespacedAttribute. + _, err := m.NamespacedAttribute.AddQualifier(qual) + if err != nil { + return nil, err + } + // The attributeMatcher overloads TryResolve and will attempt to match unknown patterns against + // the variable name and qualifier set contained within the Attribute. These values are not + // directly inspectable on the top-level NamespacedAttribute interface and so are tracked within + // the attributeMatcher. + m.qualifiers = append(m.qualifiers, qual) + return m, nil +} + +// Resolve is an implementation of the NamespacedAttribute interface method which tests +// for matching unknown attribute patterns and returns types.Unknown if present. Otherwise, +// the standard Resolve logic applies. +func (m *attributeMatcher) Resolve(vars Activation) (any, error) { + id := m.NamespacedAttribute.ID() + // Bug in how partial activation is resolved, should search parents as well. + partial, isPartial := toPartialActivation(vars) + if isPartial { + unk, err := m.fac.matchesUnknownPatterns( + partial, + id, + m.CandidateVariableNames(), + m.qualifiers) + if err != nil { + return nil, err + } + if unk != nil { + return unk, nil + } + } + return m.NamespacedAttribute.Resolve(vars) +} + +// Qualify is an implementation of the Qualifier interface method. +func (m *attributeMatcher) Qualify(vars Activation, obj any) (any, error) { + return attrQualify(m.fac, vars, obj, m) +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (m *attributeMatcher) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return attrQualifyIfPresent(m.fac, vars, obj, m, presenceOnly) +} + +func toPartialActivation(vars Activation) (PartialActivation, bool) { + pv, ok := vars.(PartialActivation) + if ok { + return pv, true + } + if vars.Parent() != nil { + return toPartialActivation(vars.Parent()) + } + return nil, false +} diff --git a/hack/tools/vendor/github.com/google/cel-go/interpreter/attributes.go b/hack/tools/vendor/github.com/google/cel-go/interpreter/attributes.go new file mode 100644 index 0000000000..b1b3aacc83 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/interpreter/attributes.go @@ -0,0 +1,1436 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "fmt" + "strings" + + "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +// AttributeFactory provides methods creating Attribute and Qualifier values. +type AttributeFactory interface { + // AbsoluteAttribute creates an attribute that refers to a top-level variable name. + // + // Checked expressions generate absolute attribute with a single name. + // Parse-only expressions may have more than one possible absolute identifier when the + // expression is created within a container, e.g. package or namespace. + // + // When there is more than one name supplied to the AbsoluteAttribute call, the names + // must be in CEL's namespace resolution order. The name arguments provided here are + // returned in the same order as they were provided by the NamespacedAttribute + // CandidateVariableNames method. + AbsoluteAttribute(id int64, names ...string) NamespacedAttribute + + // ConditionalAttribute creates an attribute with two Attribute branches, where the Attribute + // that is resolved depends on the boolean evaluation of the input 'expr'. + ConditionalAttribute(id int64, expr Interpretable, t, f Attribute) Attribute + + // MaybeAttribute creates an attribute that refers to either a field selection or a namespaced + // variable name. + // + // Only expressions which have not been type-checked may generate oneof attributes. + MaybeAttribute(id int64, name string) Attribute + + // RelativeAttribute creates an attribute whose value is a qualification of a dynamic + // computation rather than a static variable reference. + RelativeAttribute(id int64, operand Interpretable) Attribute + + // NewQualifier creates a qualifier on the target object with a given value. + // + // The 'val' may be an Attribute or any proto-supported map key type: bool, int, string, uint. + // + // The qualifier may consider the object type being qualified, if present. If absent, the + // qualification should be considered dynamic and the qualification should still work, though + // it may be sub-optimal. + NewQualifier(objType *types.Type, qualID int64, val any, opt bool) (Qualifier, error) +} + +// Qualifier marker interface for designating different qualifier values and where they appear +// within field selections and index call expressions (`_[_]`). +type Qualifier interface { + // ID where the qualifier appears within an expression. + ID() int64 + + // IsOptional specifies whether the qualifier is optional. + // Instead of a direct qualification, an optional qualifier will be resolved via QualifyIfPresent + // rather than Qualify. A non-optional qualifier may also be resolved through QualifyIfPresent if + // the object to qualify is itself optional. + IsOptional() bool + + // Qualify performs a qualification, e.g. field selection, on the input object and returns + // the value of the access and whether the value was set. A non-nil value with a false presence + // test result indicates that the value being returned is the default value. + Qualify(vars Activation, obj any) (any, error) + + // QualifyIfPresent qualifies the object if the qualifier is declared or defined on the object. + // The 'presenceOnly' flag indicates that the value is not necessary, just a boolean status as + // to whether the qualifier is present. + QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) +} + +// ConstantQualifier interface embeds the Qualifier interface and provides an option to inspect the +// qualifier's constant value. +// +// Non-constant qualifiers are of Attribute type. +type ConstantQualifier interface { + Qualifier + + // Value returns the constant value associated with the qualifier. + Value() ref.Val +} + +// Attribute values are a variable or value with an optional set of qualifiers, such as field, key, +// or index accesses. +type Attribute interface { + Qualifier + + // AddQualifier adds a qualifier on the Attribute or error if the qualification is not a valid qualifier type. + AddQualifier(Qualifier) (Attribute, error) + + // Resolve returns the value of the Attribute and whether it was present given an Activation. + // For objects which support safe traversal, the value may be non-nil and the presence flag be false. + // + // If an error is encountered during attribute resolution, it will be returned immediately. + // If the attribute cannot be resolved within the Activation, the result must be: `nil`, `error` + // with the error indicating which variable was missing. + Resolve(Activation) (any, error) +} + +// NamespacedAttribute values are a variable within a namespace, and an optional set of qualifiers +// such as field, key, or index accesses. +type NamespacedAttribute interface { + Attribute + + // CandidateVariableNames returns the possible namespaced variable names for this Attribute in + // the CEL namespace resolution order. + CandidateVariableNames() []string + + // Qualifiers returns the list of qualifiers associated with the Attribute. + Qualifiers() []Qualifier +} + +// AttrFactoryOption specifies a functional option for configuring an attribute factory. +type AttrFactoryOption func(*attrFactory) *attrFactory + +// EnableErrorOnBadPresenceTest error generation when a presence test or optional field selection +// is performed on a primitive type. +func EnableErrorOnBadPresenceTest(value bool) AttrFactoryOption { + return func(fac *attrFactory) *attrFactory { + fac.errorOnBadPresenceTest = value + return fac + } +} + +// NewAttributeFactory returns a default AttributeFactory which is produces Attribute values +// capable of resolving types by simple names and qualify the values using the supported qualifier +// types: bool, int, string, and uint. +func NewAttributeFactory(cont *containers.Container, a types.Adapter, p types.Provider, opts ...AttrFactoryOption) AttributeFactory { + fac := &attrFactory{ + container: cont, + adapter: a, + provider: p, + } + for _, o := range opts { + fac = o(fac) + } + return fac +} + +type attrFactory struct { + container *containers.Container + adapter types.Adapter + provider types.Provider + + errorOnBadPresenceTest bool +} + +// AbsoluteAttribute refers to a variable value and an optional qualifier path. +// +// The namespaceNames represent the names the variable could have based on namespace +// resolution rules. +func (r *attrFactory) AbsoluteAttribute(id int64, names ...string) NamespacedAttribute { + return &absoluteAttribute{ + id: id, + namespaceNames: names, + qualifiers: []Qualifier{}, + adapter: r.adapter, + provider: r.provider, + fac: r, + errorOnBadPresenceTest: r.errorOnBadPresenceTest, + } +} + +// ConditionalAttribute supports the case where an attribute selection may occur on a conditional +// expression, e.g. (cond ? a : b).c +func (r *attrFactory) ConditionalAttribute(id int64, expr Interpretable, t, f Attribute) Attribute { + return &conditionalAttribute{ + id: id, + expr: expr, + truthy: t, + falsy: f, + adapter: r.adapter, + fac: r, + } +} + +// MaybeAttribute collects variants of unchecked AbsoluteAttribute values which could either be +// direct variable accesses or some combination of variable access with qualification. +func (r *attrFactory) MaybeAttribute(id int64, name string) Attribute { + return &maybeAttribute{ + id: id, + attrs: []NamespacedAttribute{ + r.AbsoluteAttribute(id, r.container.ResolveCandidateNames(name)...), + }, + adapter: r.adapter, + provider: r.provider, + fac: r, + } +} + +// RelativeAttribute refers to an expression and an optional qualifier path. +func (r *attrFactory) RelativeAttribute(id int64, operand Interpretable) Attribute { + return &relativeAttribute{ + id: id, + operand: operand, + qualifiers: []Qualifier{}, + adapter: r.adapter, + fac: r, + errorOnBadPresenceTest: r.errorOnBadPresenceTest, + } +} + +// NewQualifier is an implementation of the AttributeFactory interface. +func (r *attrFactory) NewQualifier(objType *types.Type, qualID int64, val any, opt bool) (Qualifier, error) { + // Before creating a new qualifier check to see if this is a protobuf message field access. + // If so, use the precomputed GetFrom qualification method rather than the standard + // stringQualifier. + str, isStr := val.(string) + if isStr && objType != nil && objType.Kind() == types.StructKind { + ft, found := r.provider.FindStructFieldType(objType.TypeName(), str) + if found && ft.IsSet != nil && ft.GetFrom != nil { + return &fieldQualifier{ + id: qualID, + Name: str, + FieldType: ft, + adapter: r.adapter, + optional: opt, + }, nil + } + } + return newQualifier(r.adapter, qualID, val, opt, r.errorOnBadPresenceTest) +} + +type absoluteAttribute struct { + id int64 + // namespaceNames represent the names the variable could have based on declared container + // (package) of the expression. + namespaceNames []string + qualifiers []Qualifier + adapter types.Adapter + provider types.Provider + fac AttributeFactory + + errorOnBadPresenceTest bool +} + +// ID implements the Attribute interface method. +func (a *absoluteAttribute) ID() int64 { + qualCount := len(a.qualifiers) + if qualCount == 0 { + return a.id + } + return a.qualifiers[qualCount-1].ID() +} + +// IsOptional returns trivially false for an attribute as the attribute represents a fully +// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier +// is created and marks the attribute as optional. +func (a *absoluteAttribute) IsOptional() bool { + return false +} + +// AddQualifier implements the Attribute interface method. +func (a *absoluteAttribute) AddQualifier(qual Qualifier) (Attribute, error) { + a.qualifiers = append(a.qualifiers, qual) + return a, nil +} + +// CandidateVariableNames implements the NamespaceAttribute interface method. +func (a *absoluteAttribute) CandidateVariableNames() []string { + return a.namespaceNames +} + +// Qualifiers returns the list of Qualifier instances associated with the namespaced attribute. +func (a *absoluteAttribute) Qualifiers() []Qualifier { + return a.qualifiers +} + +// Qualify is an implementation of the Qualifier interface method. +func (a *absoluteAttribute) Qualify(vars Activation, obj any) (any, error) { + return attrQualify(a.fac, vars, obj, a) +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (a *absoluteAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly) +} + +// String implements the Stringer interface method. +func (a *absoluteAttribute) String() string { + return fmt.Sprintf("id: %v, names: %v", a.id, a.namespaceNames) +} + +// Resolve returns the resolved Attribute value given the Activation, or error if the Attribute +// variable is not found, or if its Qualifiers cannot be applied successfully. +// +// If the variable name cannot be found as an Activation variable or in the TypeProvider as +// a type, then the result is `nil`, `error` with the error indicating the name of the first +// variable searched as missing. +func (a *absoluteAttribute) Resolve(vars Activation) (any, error) { + for _, nm := range a.namespaceNames { + // If the variable is found, process it. Otherwise, wait until the checks to + // determine whether the type is unknown before returning. + obj, found := vars.ResolveName(nm) + if found { + if celErr, ok := obj.(*types.Err); ok { + return nil, celErr.Unwrap() + } + obj, isOpt, err := applyQualifiers(vars, obj, a.qualifiers) + if err != nil { + return nil, err + } + if isOpt { + val := a.adapter.NativeToValue(obj) + if types.IsUnknown(val) { + return val, nil + } + return types.OptionalOf(val), nil + } + return obj, nil + } + // Attempt to resolve the qualified type name if the name is not a variable identifier. + typ, found := a.provider.FindIdent(nm) + if found { + if len(a.qualifiers) == 0 { + return typ, nil + } + } + } + var attrNames strings.Builder + for i, nm := range a.namespaceNames { + if i != 0 { + attrNames.WriteString(", ") + } + attrNames.WriteString(nm) + } + return nil, missingAttribute(attrNames.String()) +} + +type conditionalAttribute struct { + id int64 + expr Interpretable + truthy Attribute + falsy Attribute + adapter types.Adapter + fac AttributeFactory +} + +// ID is an implementation of the Attribute interface method. +func (a *conditionalAttribute) ID() int64 { + // There's a field access after the conditional. + if a.truthy.ID() == a.falsy.ID() { + return a.truthy.ID() + } + // Otherwise return the conditional id as the consistent id being tracked. + return a.id +} + +// IsOptional returns trivially false for an attribute as the attribute represents a fully +// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier +// is created and marks the attribute as optional. +func (a *conditionalAttribute) IsOptional() bool { + return false +} + +// AddQualifier appends the same qualifier to both sides of the conditional, in effect managing +// the qualification of alternate attributes. +func (a *conditionalAttribute) AddQualifier(qual Qualifier) (Attribute, error) { + _, err := a.truthy.AddQualifier(qual) + if err != nil { + return nil, err + } + _, err = a.falsy.AddQualifier(qual) + if err != nil { + return nil, err + } + return a, nil +} + +// Qualify is an implementation of the Qualifier interface method. +func (a *conditionalAttribute) Qualify(vars Activation, obj any) (any, error) { + return attrQualify(a.fac, vars, obj, a) +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (a *conditionalAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly) +} + +// Resolve evaluates the condition, and then resolves the truthy or falsy branch accordingly. +func (a *conditionalAttribute) Resolve(vars Activation) (any, error) { + val := a.expr.Eval(vars) + if val == types.True { + return a.truthy.Resolve(vars) + } + if val == types.False { + return a.falsy.Resolve(vars) + } + if types.IsUnknown(val) { + return val, nil + } + return nil, types.MaybeNoSuchOverloadErr(val).(*types.Err) +} + +// String is an implementation of the Stringer interface method. +func (a *conditionalAttribute) String() string { + return fmt.Sprintf("id: %v, truthy attribute: %v, falsy attribute: %v", a.id, a.truthy, a.falsy) +} + +type maybeAttribute struct { + id int64 + attrs []NamespacedAttribute + adapter types.Adapter + provider types.Provider + fac AttributeFactory +} + +// ID is an implementation of the Attribute interface method. +func (a *maybeAttribute) ID() int64 { + return a.attrs[0].ID() +} + +// IsOptional returns trivially false for an attribute as the attribute represents a fully +// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier +// is created and marks the attribute as optional. +func (a *maybeAttribute) IsOptional() bool { + return false +} + +// AddQualifier adds a qualifier to each possible attribute variant, and also creates +// a new namespaced variable from the qualified value. +// +// The algorithm for building the maybe attribute is as follows: +// +// 1. Create a maybe attribute from a simple identifier when it occurs in a parsed-only expression +// +// mb = MaybeAttribute(, "a") +// +// Initializing the maybe attribute creates an absolute attribute internally which includes the +// possible namespaced names of the attribute. In this example, let's assume we are in namespace +// 'ns', then the maybe is either one of the following variable names: +// +// possible variables names -- ns.a, a +// +// 2. Adding a qualifier to the maybe means that the variable name could be a longer qualified +// name, or a field selection on one of the possible variable names produced earlier: +// +// mb.AddQualifier("b") +// +// possible variables names -- ns.a.b, a.b +// possible field selection -- ns.a['b'], a['b'] +// +// If none of the attributes within the maybe resolves a value, the result is an error. +func (a *maybeAttribute) AddQualifier(qual Qualifier) (Attribute, error) { + str := "" + isStr := false + cq, isConst := qual.(ConstantQualifier) + if isConst { + str, isStr = cq.Value().Value().(string) + } + var augmentedNames []string + // First add the qualifier to all existing attributes in the oneof. + for _, attr := range a.attrs { + if isStr && len(attr.Qualifiers()) == 0 { + candidateVars := attr.CandidateVariableNames() + augmentedNames = make([]string, len(candidateVars)) + for i, name := range candidateVars { + augmentedNames[i] = fmt.Sprintf("%s.%s", name, str) + } + } + _, err := attr.AddQualifier(qual) + if err != nil { + return nil, err + } + } + // Next, ensure the most specific variable / type reference is searched first. + if len(augmentedNames) != 0 { + a.attrs = append([]NamespacedAttribute{a.fac.AbsoluteAttribute(qual.ID(), augmentedNames...)}, a.attrs...) + } + return a, nil +} + +// Qualify is an implementation of the Qualifier interface method. +func (a *maybeAttribute) Qualify(vars Activation, obj any) (any, error) { + return attrQualify(a.fac, vars, obj, a) +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (a *maybeAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly) +} + +// Resolve follows the variable resolution rules to determine whether the attribute is a variable +// or a field selection. +func (a *maybeAttribute) Resolve(vars Activation) (any, error) { + var maybeErr error + for _, attr := range a.attrs { + obj, err := attr.Resolve(vars) + // Return an error if one is encountered. + if err != nil { + resErr, ok := err.(*resolutionError) + if !ok { + return nil, err + } + // If this was not a missing variable error, return it. + if !resErr.isMissingAttribute() { + return nil, err + } + // When the variable is missing in a maybe attribute we defer erroring. + if maybeErr == nil { + maybeErr = resErr + } + // Continue attempting to resolve possible variables. + continue + } + return obj, nil + } + // Else, produce a no such attribute error. + return nil, maybeErr +} + +// String is an implementation of the Stringer interface method. +func (a *maybeAttribute) String() string { + return fmt.Sprintf("id: %v, attributes: %v", a.id, a.attrs) +} + +type relativeAttribute struct { + id int64 + operand Interpretable + qualifiers []Qualifier + adapter types.Adapter + fac AttributeFactory + + errorOnBadPresenceTest bool +} + +// ID is an implementation of the Attribute interface method. +func (a *relativeAttribute) ID() int64 { + qualCount := len(a.qualifiers) + if qualCount == 0 { + return a.id + } + return a.qualifiers[qualCount-1].ID() +} + +// IsOptional returns trivially false for an attribute as the attribute represents a fully +// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier +// is created and marks the attribute as optional. +func (a *relativeAttribute) IsOptional() bool { + return false +} + +// AddQualifier implements the Attribute interface method. +func (a *relativeAttribute) AddQualifier(qual Qualifier) (Attribute, error) { + a.qualifiers = append(a.qualifiers, qual) + return a, nil +} + +// Qualify is an implementation of the Qualifier interface method. +func (a *relativeAttribute) Qualify(vars Activation, obj any) (any, error) { + return attrQualify(a.fac, vars, obj, a) +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (a *relativeAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly) +} + +// Resolve expression value and qualifier relative to the expression result. +func (a *relativeAttribute) Resolve(vars Activation) (any, error) { + // First, evaluate the operand. + v := a.operand.Eval(vars) + if types.IsError(v) { + return nil, v.(*types.Err) + } + if types.IsUnknown(v) { + return v, nil + } + obj, isOpt, err := applyQualifiers(vars, v, a.qualifiers) + if err != nil { + return nil, err + } + if isOpt { + val := a.adapter.NativeToValue(obj) + if types.IsUnknown(val) { + return val, nil + } + return types.OptionalOf(val), nil + } + return obj, nil +} + +// String is an implementation of the Stringer interface method. +func (a *relativeAttribute) String() string { + return fmt.Sprintf("id: %v, operand: %v", a.id, a.operand) +} + +func newQualifier(adapter types.Adapter, id int64, v any, opt, errorOnBadPresenceTest bool) (Qualifier, error) { + var qual Qualifier + switch val := v.(type) { + case Attribute: + // Note, attributes are initially identified as non-optional since they represent a top-level + // field access; however, when used as a relative qualifier, e.g. a[?b.c], then an attrQualifier + // is created which intercepts the IsOptional check for the attribute in order to return the + // correct result. + return &attrQualifier{ + id: id, + Attribute: val, + optional: opt, + }, nil + case string: + qual = &stringQualifier{ + id: id, + value: val, + celValue: types.String(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case int: + qual = &intQualifier{ + id: id, + value: int64(val), + celValue: types.Int(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case int32: + qual = &intQualifier{ + id: id, + value: int64(val), + celValue: types.Int(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case int64: + qual = &intQualifier{ + id: id, + value: val, + celValue: types.Int(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case uint: + qual = &uintQualifier{ + id: id, + value: uint64(val), + celValue: types.Uint(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case uint32: + qual = &uintQualifier{ + id: id, + value: uint64(val), + celValue: types.Uint(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case uint64: + qual = &uintQualifier{ + id: id, + value: val, + celValue: types.Uint(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case bool: + qual = &boolQualifier{ + id: id, + value: val, + celValue: types.Bool(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case float32: + qual = &doubleQualifier{ + id: id, + value: float64(val), + celValue: types.Double(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case float64: + qual = &doubleQualifier{ + id: id, + value: val, + celValue: types.Double(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case types.String: + qual = &stringQualifier{ + id: id, + value: string(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case types.Int: + qual = &intQualifier{ + id: id, + value: int64(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case types.Uint: + qual = &uintQualifier{ + id: id, + value: uint64(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case types.Bool: + qual = &boolQualifier{ + id: id, + value: bool(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case types.Double: + qual = &doubleQualifier{ + id: id, + value: float64(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case *types.Unknown: + qual = &unknownQualifier{id: id, value: val} + default: + if q, ok := v.(Qualifier); ok { + return q, nil + } + return nil, fmt.Errorf("invalid qualifier type: %T", v) + } + return qual, nil +} + +type attrQualifier struct { + id int64 + Attribute + optional bool +} + +// ID implements the Qualifier interface method and returns the qualification instruction id +// rather than the attribute id. +func (q *attrQualifier) ID() int64 { + return q.id +} + +// IsOptional implements the Qualifier interface method. +func (q *attrQualifier) IsOptional() bool { + return q.optional +} + +type stringQualifier struct { + id int64 + value string + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool +} + +// ID is an implementation of the Qualifier interface method. +func (q *stringQualifier) ID() int64 { + return q.id +} + +// IsOptional implements the Qualifier interface method. +func (q *stringQualifier) IsOptional() bool { + return q.optional +} + +// Qualify implements the Qualifier interface method. +func (q *stringQualifier) Qualify(vars Activation, obj any) (any, error) { + val, _, err := q.qualifyInternal(vars, obj, false, false) + return val, err +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (q *stringQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return q.qualifyInternal(vars, obj, true, presenceOnly) +} + +func (q *stringQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) { + s := q.value + switch o := obj.(type) { + case map[string]any: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + case map[string]string: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + case map[string]int: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + case map[string]int32: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + case map[string]int64: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + case map[string]uint: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + case map[string]uint32: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + case map[string]uint64: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + case map[string]float32: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + case map[string]float64: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + case map[string]bool: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + default: + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) + } + if presenceTest { + return nil, false, nil + } + return nil, false, missingKey(q.celValue) +} + +// Value implements the ConstantQualifier interface +func (q *stringQualifier) Value() ref.Val { + return q.celValue +} + +type intQualifier struct { + id int64 + value int64 + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool +} + +// ID is an implementation of the Qualifier interface method. +func (q *intQualifier) ID() int64 { + return q.id +} + +// IsOptional implements the Qualifier interface method. +func (q *intQualifier) IsOptional() bool { + return q.optional +} + +// Qualify implements the Qualifier interface method. +func (q *intQualifier) Qualify(vars Activation, obj any) (any, error) { + val, _, err := q.qualifyInternal(vars, obj, false, false) + return val, err +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (q *intQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return q.qualifyInternal(vars, obj, true, presenceOnly) +} + +func (q *intQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) { + i := q.value + var isMap bool + switch o := obj.(type) { + // The specialized map types supported by an int qualifier are considerably fewer than the set + // of specialized map types supported by string qualifiers since they are less frequently used + // than string-based map keys. Additional specializations may be added in the future if + // desired. + case map[int]any: + isMap = true + obj, isKey := o[int(i)] + if isKey { + return obj, true, nil + } + case map[int32]any: + isMap = true + obj, isKey := o[int32(i)] + if isKey { + return obj, true, nil + } + case map[int64]any: + isMap = true + obj, isKey := o[i] + if isKey { + return obj, true, nil + } + case []any: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + case []string: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + case []int: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + case []int32: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + case []int64: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + case []uint: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + case []uint32: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + case []uint64: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + case []float32: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + case []float64: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + case []bool: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + default: + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) + } + if presenceTest { + return nil, false, nil + } + if isMap { + return nil, false, missingKey(q.celValue) + } + return nil, false, missingIndex(q.celValue) +} + +// Value implements the ConstantQualifier interface +func (q *intQualifier) Value() ref.Val { + return q.celValue +} + +type uintQualifier struct { + id int64 + value uint64 + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool +} + +// ID is an implementation of the Qualifier interface method. +func (q *uintQualifier) ID() int64 { + return q.id +} + +// IsOptional implements the Qualifier interface method. +func (q *uintQualifier) IsOptional() bool { + return q.optional +} + +// Qualify implements the Qualifier interface method. +func (q *uintQualifier) Qualify(vars Activation, obj any) (any, error) { + val, _, err := q.qualifyInternal(vars, obj, false, false) + return val, err +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (q *uintQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return q.qualifyInternal(vars, obj, true, presenceOnly) +} + +func (q *uintQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) { + u := q.value + switch o := obj.(type) { + // The specialized map types supported by a uint qualifier are considerably fewer than the set + // of specialized map types supported by string qualifiers since they are less frequently used + // than string-based map keys. Additional specializations may be added in the future if + // desired. + case map[uint]any: + obj, isKey := o[uint(u)] + if isKey { + return obj, true, nil + } + case map[uint32]any: + obj, isKey := o[uint32(u)] + if isKey { + return obj, true, nil + } + case map[uint64]any: + obj, isKey := o[u] + if isKey { + return obj, true, nil + } + default: + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) + } + if presenceTest { + return nil, false, nil + } + return nil, false, missingKey(q.celValue) +} + +// Value implements the ConstantQualifier interface +func (q *uintQualifier) Value() ref.Val { + return q.celValue +} + +type boolQualifier struct { + id int64 + value bool + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool +} + +// ID is an implementation of the Qualifier interface method. +func (q *boolQualifier) ID() int64 { + return q.id +} + +// IsOptional implements the Qualifier interface method. +func (q *boolQualifier) IsOptional() bool { + return q.optional +} + +// Qualify implements the Qualifier interface method. +func (q *boolQualifier) Qualify(vars Activation, obj any) (any, error) { + val, _, err := q.qualifyInternal(vars, obj, false, false) + return val, err +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (q *boolQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return q.qualifyInternal(vars, obj, true, presenceOnly) +} + +func (q *boolQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) { + b := q.value + switch o := obj.(type) { + case map[bool]any: + obj, isKey := o[b] + if isKey { + return obj, true, nil + } + default: + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) + } + if presenceTest { + return nil, false, nil + } + return nil, false, missingKey(q.celValue) +} + +// Value implements the ConstantQualifier interface +func (q *boolQualifier) Value() ref.Val { + return q.celValue +} + +// fieldQualifier indicates that the qualification is a well-defined field with a known +// field type. When the field type is known this can be used to improve the speed and +// efficiency of field resolution. +type fieldQualifier struct { + id int64 + Name string + FieldType *types.FieldType + adapter types.Adapter + optional bool +} + +// ID is an implementation of the Qualifier interface method. +func (q *fieldQualifier) ID() int64 { + return q.id +} + +// IsOptional implements the Qualifier interface method. +func (q *fieldQualifier) IsOptional() bool { + return q.optional +} + +// Qualify implements the Qualifier interface method. +func (q *fieldQualifier) Qualify(vars Activation, obj any) (any, error) { + if rv, ok := obj.(ref.Val); ok { + obj = rv.Value() + } + val, err := q.FieldType.GetFrom(obj) + if err != nil { + return nil, err + } + return val, nil +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (q *fieldQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + if rv, ok := obj.(ref.Val); ok { + obj = rv.Value() + } + if !q.FieldType.IsSet(obj) { + return nil, false, nil + } + if presenceOnly { + return nil, true, nil + } + val, err := q.FieldType.GetFrom(obj) + if err != nil { + return nil, false, err + } + return val, true, nil +} + +// Value implements the ConstantQualifier interface +func (q *fieldQualifier) Value() ref.Val { + return types.String(q.Name) +} + +// doubleQualifier qualifies a CEL object, map, or list using a double value. +// +// This qualifier is used for working with dynamic data like JSON or protobuf.Any where the value +// type may not be known ahead of time and may not conform to the standard types supported as valid +// protobuf map key types. +type doubleQualifier struct { + id int64 + value float64 + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool +} + +// ID is an implementation of the Qualifier interface method. +func (q *doubleQualifier) ID() int64 { + return q.id +} + +// IsOptional implements the Qualifier interface method. +func (q *doubleQualifier) IsOptional() bool { + return q.optional +} + +// Qualify implements the Qualifier interface method. +func (q *doubleQualifier) Qualify(vars Activation, obj any) (any, error) { + val, _, err := q.qualifyInternal(vars, obj, false, false) + return val, err +} + +func (q *doubleQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return q.qualifyInternal(vars, obj, true, presenceOnly) +} + +func (q *doubleQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) { + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) +} + +// Value implements the ConstantQualifier interface +func (q *doubleQualifier) Value() ref.Val { + return q.celValue +} + +// unknownQualifier is a simple qualifier which always returns a preconfigured set of unknown values +// for any value subject to qualification. This is consistent with CEL's unknown handling elsewhere. +type unknownQualifier struct { + id int64 + value *types.Unknown +} + +// ID is an implementation of the Qualifier interface method. +func (q *unknownQualifier) ID() int64 { + return q.id +} + +// IsOptional returns trivially false as an the unknown value is always returned. +func (q *unknownQualifier) IsOptional() bool { + return false +} + +// Qualify returns the unknown value associated with this qualifier. +func (q *unknownQualifier) Qualify(vars Activation, obj any) (any, error) { + return q.value, nil +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (q *unknownQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return q.value, true, nil +} + +// Value implements the ConstantQualifier interface +func (q *unknownQualifier) Value() ref.Val { + return q.value +} + +func applyQualifiers(vars Activation, obj any, qualifiers []Qualifier) (any, bool, error) { + optObj, isOpt := obj.(*types.Optional) + if isOpt { + if !optObj.HasValue() { + return optObj, false, nil + } + obj = optObj.GetValue().Value() + } + + var err error + for _, qual := range qualifiers { + var qualObj any + isOpt = isOpt || qual.IsOptional() + if isOpt { + var present bool + qualObj, present, err = qual.QualifyIfPresent(vars, obj, false) + if err != nil { + return nil, false, err + } + if !present { + // We return optional none here with a presence of 'false' as the layers + // above will attempt to call types.OptionalOf() on a present value if any + // of the qualifiers is optional. + return types.OptionalNone, false, nil + } + } else { + qualObj, err = qual.Qualify(vars, obj) + if err != nil { + return nil, false, err + } + } + obj = qualObj + } + return obj, isOpt, nil +} + +// attrQualify performs a qualification using the result of an attribute evaluation. +func attrQualify(fac AttributeFactory, vars Activation, obj any, qualAttr Attribute) (any, error) { + val, err := qualAttr.Resolve(vars) + if err != nil { + return nil, err + } + qual, err := fac.NewQualifier(nil, qualAttr.ID(), val, qualAttr.IsOptional()) + if err != nil { + return nil, err + } + return qual.Qualify(vars, obj) +} + +// attrQualifyIfPresent conditionally performs the qualification of the result of attribute is present +// on the target object. +func attrQualifyIfPresent(fac AttributeFactory, vars Activation, obj any, qualAttr Attribute, + presenceOnly bool) (any, bool, error) { + val, err := qualAttr.Resolve(vars) + if err != nil { + return nil, false, err + } + qual, err := fac.NewQualifier(nil, qualAttr.ID(), val, qualAttr.IsOptional()) + if err != nil { + return nil, false, err + } + return qual.QualifyIfPresent(vars, obj, presenceOnly) +} + +// refQualify attempts to convert the value to a CEL value and then uses reflection methods to try and +// apply the qualifier with the option to presence test field accesses before retrieving field values. +func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, presenceOnly, errorOnBadPresenceTest bool) (ref.Val, bool, error) { + celVal := adapter.NativeToValue(obj) + switch v := celVal.(type) { + case *types.Unknown: + return v, true, nil + case *types.Err: + return nil, false, v + case traits.Mapper: + val, found := v.Find(idx) + // If the index is of the wrong type for the map, then it is possible + // for the Find call to produce an error. + if types.IsError(val) { + return nil, false, val.(*types.Err) + } + if found { + return val, true, nil + } + if presenceTest { + return nil, false, nil + } + return nil, false, missingKey(idx) + case traits.Lister: + // If the index argument is not a valid numeric type, then it is possible + // for the index operation to produce an error. + i, err := types.IndexOrError(idx) + if err != nil { + return nil, false, err + } + celIndex := types.Int(i) + if i >= 0 && celIndex < v.Size().(types.Int) { + return v.Get(idx), true, nil + } + if presenceTest { + return nil, false, nil + } + return nil, false, missingIndex(idx) + case traits.Indexer: + if presenceTest { + ft, ok := v.(traits.FieldTester) + if ok { + presence := ft.IsSet(idx) + if types.IsError(presence) { + return nil, false, presence.(*types.Err) + } + // If not found or presence only test, then return. + // Otherwise, if found, obtain the value later on. + if presenceOnly || presence == types.False { + return nil, presence == types.True, nil + } + } + } + val := v.Get(idx) + if types.IsError(val) { + return nil, false, val.(*types.Err) + } + return val, true, nil + default: + if presenceTest && !errorOnBadPresenceTest { + return nil, false, nil + } + return nil, false, missingKey(idx) + } +} + +// resolutionError is a custom error type which encodes the different error states which may +// occur during attribute resolution. +type resolutionError struct { + missingAttribute string + missingIndex ref.Val + missingKey ref.Val +} + +func (e *resolutionError) isMissingAttribute() bool { + return e.missingAttribute != "" +} + +func missingIndex(missing ref.Val) *resolutionError { + return &resolutionError{ + missingIndex: missing, + } +} + +func missingKey(missing ref.Val) *resolutionError { + return &resolutionError{ + missingKey: missing, + } +} + +func missingAttribute(attr string) *resolutionError { + return &resolutionError{ + missingAttribute: attr, + } +} + +// Error implements the error interface method. +func (e *resolutionError) Error() string { + if e.missingKey != nil { + return fmt.Sprintf("no such key: %v", e.missingKey) + } + if e.missingIndex != nil { + return fmt.Sprintf("index out of bounds: %v", e.missingIndex) + } + if e.missingAttribute != "" { + return fmt.Sprintf("no such attribute(s): %s", e.missingAttribute) + } + return "invalid attribute" +} + +// Is implements the errors.Is() method used by more recent versions of Go. +func (e *resolutionError) Is(err error) bool { + return err.Error() == e.Error() +} diff --git a/hack/tools/vendor/github.com/google/cel-go/interpreter/decorators.go b/hack/tools/vendor/github.com/google/cel-go/interpreter/decorators.go new file mode 100644 index 0000000000..502db35fc0 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/interpreter/decorators.go @@ -0,0 +1,272 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +// InterpretableDecorator is a functional interface for decorating or replacing +// Interpretable expression nodes at construction time. +type InterpretableDecorator func(Interpretable) (Interpretable, error) + +// decObserveEval records evaluation state into an EvalState object. +func decObserveEval(observer EvalObserver) InterpretableDecorator { + return func(i Interpretable) (Interpretable, error) { + switch inst := i.(type) { + case *evalWatch, *evalWatchAttr, *evalWatchConst, *evalWatchConstructor: + // these instruction are already watching, return straight-away. + return i, nil + case InterpretableAttribute: + return &evalWatchAttr{ + InterpretableAttribute: inst, + observer: observer, + }, nil + case InterpretableConst: + return &evalWatchConst{ + InterpretableConst: inst, + observer: observer, + }, nil + case InterpretableConstructor: + return &evalWatchConstructor{ + constructor: inst, + observer: observer, + }, nil + default: + return &evalWatch{ + Interpretable: i, + observer: observer, + }, nil + } + } +} + +// decInterruptFolds creates an intepretable decorator which marks comprehensions as interruptable +// where the interrupt state is communicated via a hidden variable on the Activation. +func decInterruptFolds() InterpretableDecorator { + return func(i Interpretable) (Interpretable, error) { + fold, ok := i.(*evalFold) + if !ok { + return i, nil + } + fold.interruptable = true + return fold, nil + } +} + +// decDisableShortcircuits ensures that all branches of an expression will be evaluated, no short-circuiting. +func decDisableShortcircuits() InterpretableDecorator { + return func(i Interpretable) (Interpretable, error) { + switch expr := i.(type) { + case *evalOr: + return &evalExhaustiveOr{ + id: expr.id, + terms: expr.terms, + }, nil + case *evalAnd: + return &evalExhaustiveAnd{ + id: expr.id, + terms: expr.terms, + }, nil + case *evalFold: + expr.exhaustive = true + return expr, nil + case InterpretableAttribute: + cond, isCond := expr.Attr().(*conditionalAttribute) + if isCond { + return &evalExhaustiveConditional{ + id: cond.id, + attr: cond, + adapter: expr.Adapter(), + }, nil + } + } + return i, nil + } +} + +// decOptimize optimizes the program plan by looking for common evaluation patterns and +// conditionally precomputing the result. +// - build list and map values with constant elements. +// - convert 'in' operations to set membership tests if possible. +func decOptimize() InterpretableDecorator { + return func(i Interpretable) (Interpretable, error) { + switch inst := i.(type) { + case *evalList: + return maybeBuildListLiteral(i, inst) + case *evalMap: + return maybeBuildMapLiteral(i, inst) + case InterpretableCall: + if inst.OverloadID() == overloads.InList { + return maybeOptimizeSetMembership(i, inst) + } + if overloads.IsTypeConversionFunction(inst.Function()) { + return maybeOptimizeConstUnary(i, inst) + } + } + return i, nil + } +} + +// decRegexOptimizer compiles regex pattern string constants. +func decRegexOptimizer(regexOptimizations ...*RegexOptimization) InterpretableDecorator { + functionMatchMap := make(map[string]*RegexOptimization) + overloadMatchMap := make(map[string]*RegexOptimization) + for _, m := range regexOptimizations { + functionMatchMap[m.Function] = m + if m.OverloadID != "" { + overloadMatchMap[m.OverloadID] = m + } + } + + return func(i Interpretable) (Interpretable, error) { + call, ok := i.(InterpretableCall) + if !ok { + return i, nil + } + + var matcher *RegexOptimization + var found bool + if call.OverloadID() != "" { + matcher, found = overloadMatchMap[call.OverloadID()] + } + if !found { + matcher, found = functionMatchMap[call.Function()] + } + if !found || matcher.RegexIndex >= len(call.Args()) { + return i, nil + } + args := call.Args() + regexArg := args[matcher.RegexIndex] + regexStr, isConst := regexArg.(InterpretableConst) + if !isConst { + return i, nil + } + pattern, ok := regexStr.Value().(types.String) + if !ok { + return i, nil + } + return matcher.Factory(call, string(pattern)) + } +} + +func maybeOptimizeConstUnary(i Interpretable, call InterpretableCall) (Interpretable, error) { + args := call.Args() + if len(args) != 1 { + return i, nil + } + _, isConst := args[0].(InterpretableConst) + if !isConst { + return i, nil + } + val := call.Eval(EmptyActivation()) + if types.IsError(val) { + return nil, val.(*types.Err) + } + return NewConstValue(call.ID(), val), nil +} + +func maybeBuildListLiteral(i Interpretable, l *evalList) (Interpretable, error) { + for _, elem := range l.elems { + _, isConst := elem.(InterpretableConst) + if !isConst { + return i, nil + } + } + return NewConstValue(l.ID(), l.Eval(EmptyActivation())), nil +} + +func maybeBuildMapLiteral(i Interpretable, mp *evalMap) (Interpretable, error) { + for idx, key := range mp.keys { + _, isConst := key.(InterpretableConst) + if !isConst { + return i, nil + } + _, isConst = mp.vals[idx].(InterpretableConst) + if !isConst { + return i, nil + } + } + return NewConstValue(mp.ID(), mp.Eval(EmptyActivation())), nil +} + +// maybeOptimizeSetMembership may convert an 'in' operation against a list to map key membership +// test if the following conditions are true: +// - the list is a constant with homogeneous element types. +// - the elements are all of primitive type. +func maybeOptimizeSetMembership(i Interpretable, inlist InterpretableCall) (Interpretable, error) { + args := inlist.Args() + lhs := args[0] + rhs := args[1] + l, isConst := rhs.(InterpretableConst) + if !isConst { + return i, nil + } + // When the incoming binary call is flagged with as the InList overload, the value will + // always be convertible to a `traits.Lister` type. + list := l.Value().(traits.Lister) + if list.Size() == types.IntZero { + return NewConstValue(inlist.ID(), types.False), nil + } + it := list.Iterator() + valueSet := make(map[ref.Val]ref.Val) + for it.HasNext() == types.True { + elem := it.Next() + if !types.IsPrimitiveType(elem) || elem.Type() == types.BytesType { + // Note, non-primitive type are not yet supported, and []byte isn't hashable. + return i, nil + } + valueSet[elem] = types.True + switch ev := elem.(type) { + case types.Double: + iv := ev.ConvertToType(types.IntType) + // Ensure that only lossless conversions are added to the set + if !types.IsError(iv) && iv.Equal(ev) == types.True { + valueSet[iv] = types.True + } + // Ensure that only lossless conversions are added to the set + uv := ev.ConvertToType(types.UintType) + if !types.IsError(uv) && uv.Equal(ev) == types.True { + valueSet[uv] = types.True + } + case types.Int: + dv := ev.ConvertToType(types.DoubleType) + if !types.IsError(dv) { + valueSet[dv] = types.True + } + uv := ev.ConvertToType(types.UintType) + if !types.IsError(uv) { + valueSet[uv] = types.True + } + case types.Uint: + dv := ev.ConvertToType(types.DoubleType) + if !types.IsError(dv) { + valueSet[dv] = types.True + } + iv := ev.ConvertToType(types.IntType) + if !types.IsError(iv) { + valueSet[iv] = types.True + } + } + } + return &evalSetMembership{ + inst: inlist, + arg: lhs, + valueSet: valueSet, + }, nil +} diff --git a/hack/tools/vendor/github.com/google/cel-go/interpreter/dispatcher.go b/hack/tools/vendor/github.com/google/cel-go/interpreter/dispatcher.go new file mode 100644 index 0000000000..8f0bdb7b8e --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/interpreter/dispatcher.go @@ -0,0 +1,100 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "fmt" + + "github.com/google/cel-go/common/functions" +) + +// Dispatcher resolves function calls to their appropriate overload. +type Dispatcher interface { + // Add one or more overloads, returning an error if any Overload has the same Overload#Name. + Add(overloads ...*functions.Overload) error + + // FindOverload returns an Overload definition matching the provided name. + FindOverload(overload string) (*functions.Overload, bool) + + // OverloadIds returns the set of all overload identifiers configured for dispatch. + OverloadIds() []string +} + +// NewDispatcher returns an empty Dispatcher instance. +func NewDispatcher() Dispatcher { + return &defaultDispatcher{ + overloads: make(map[string]*functions.Overload)} +} + +// ExtendDispatcher returns a Dispatcher which inherits the overloads of its parent, and +// provides an isolation layer between built-ins and extension functions which is useful +// for forward compatibility. +func ExtendDispatcher(parent Dispatcher) Dispatcher { + return &defaultDispatcher{ + parent: parent, + overloads: make(map[string]*functions.Overload)} +} + +// overloadMap helper type for indexing overloads by function name. +type overloadMap map[string]*functions.Overload + +// defaultDispatcher struct which contains an overload map. +type defaultDispatcher struct { + parent Dispatcher + overloads overloadMap +} + +// Add implements the Dispatcher.Add interface method. +func (d *defaultDispatcher) Add(overloads ...*functions.Overload) error { + for _, o := range overloads { + // add the overload unless an overload of the same name has already been provided. + if _, found := d.overloads[o.Operator]; found { + return fmt.Errorf("overload already exists '%s'", o.Operator) + } + // index the overload by function name. + d.overloads[o.Operator] = o + } + return nil +} + +// FindOverload implements the Dispatcher.FindOverload interface method. +func (d *defaultDispatcher) FindOverload(overload string) (*functions.Overload, bool) { + o, found := d.overloads[overload] + // Attempt to dispatch to an overload defined in the parent. + if !found && d.parent != nil { + return d.parent.FindOverload(overload) + } + return o, found +} + +// OverloadIds implements the Dispatcher interface method. +func (d *defaultDispatcher) OverloadIds() []string { + i := 0 + overloads := make([]string, len(d.overloads)) + for name := range d.overloads { + overloads[i] = name + i++ + } + if d.parent == nil { + return overloads + } + parentOverloads := d.parent.OverloadIds() + for _, pName := range parentOverloads { + if _, found := d.overloads[pName]; !found { + overloads = append(overloads, pName) + } + } + return overloads +} diff --git a/hack/tools/vendor/github.com/google/cel-go/interpreter/evalstate.go b/hack/tools/vendor/github.com/google/cel-go/interpreter/evalstate.go new file mode 100644 index 0000000000..4bdd1fdc73 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/interpreter/evalstate.go @@ -0,0 +1,79 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "github.com/google/cel-go/common/types/ref" +) + +// EvalState tracks the values associated with expression ids during execution. +type EvalState interface { + // IDs returns the list of ids with recorded values. + IDs() []int64 + + // Value returns the observed value of the given expression id if found, and a nil false + // result if not. + Value(int64) (ref.Val, bool) + + // SetValue sets the observed value of the expression id. + SetValue(int64, ref.Val) + + // Reset clears the previously recorded expression values. + Reset() +} + +// evalState permits the mutation of evaluation state for a given expression id. +type evalState struct { + values map[int64]ref.Val +} + +// NewEvalState returns an EvalState instanced used to observe the intermediate +// evaluations of an expression. +func NewEvalState() EvalState { + return &evalState{ + values: make(map[int64]ref.Val), + } +} + +// IDs implements the EvalState interface method. +func (s *evalState) IDs() []int64 { + var ids []int64 + for k, v := range s.values { + if v != nil { + ids = append(ids, k) + } + } + return ids +} + +// Value is an implementation of the EvalState interface method. +func (s *evalState) Value(exprID int64) (ref.Val, bool) { + val, found := s.values[exprID] + return val, found +} + +// SetValue is an implementation of the EvalState interface method. +func (s *evalState) SetValue(exprID int64, val ref.Val) { + if val == nil { + delete(s.values, exprID) + } else { + s.values[exprID] = val + } +} + +// Reset implements the EvalState interface method. +func (s *evalState) Reset() { + s.values = map[int64]ref.Val{} +} diff --git a/hack/tools/vendor/github.com/google/cel-go/interpreter/functions/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/interpreter/functions/BUILD.bazel new file mode 100644 index 0000000000..4a80c3ea08 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/interpreter/functions/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "functions.go", + ], + importpath = "github.com/google/cel-go/interpreter/functions", + deps = [ + "//common/functions:go_default_library", + ], +) diff --git a/hack/tools/vendor/github.com/google/cel-go/interpreter/functions/functions.go b/hack/tools/vendor/github.com/google/cel-go/interpreter/functions/functions.go new file mode 100644 index 0000000000..21ffb69246 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/interpreter/functions/functions.go @@ -0,0 +1,39 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package functions defines the standard builtin functions supported by the +// interpreter and as declared within the checker#StandardDeclarations. +package functions + +import fn "github.com/google/cel-go/common/functions" + +// Overload defines a named overload of a function, indicating an operand trait +// which must be present on the first argument to the overload as well as one +// of either a unary, binary, or function implementation. +// +// The majority of operators within the expression language are unary or binary +// and the specializations simplify the call contract for implementers of +// types with operator overloads. Any added complexity is assumed to be handled +// by the generic FunctionOp. +type Overload = fn.Overload + +// UnaryOp is a function that takes a single value and produces an output. +type UnaryOp = fn.UnaryOp + +// BinaryOp is a function that takes two values and produces an output. +type BinaryOp = fn.BinaryOp + +// FunctionOp is a function with accepts zero or more arguments and produces +// a value or error as a result. +type FunctionOp = fn.FunctionOp diff --git a/hack/tools/vendor/github.com/google/cel-go/interpreter/interpretable.go b/hack/tools/vendor/github.com/google/cel-go/interpreter/interpretable.go new file mode 100644 index 0000000000..ebc432e9dd --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/interpreter/interpretable.go @@ -0,0 +1,1407 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "fmt" + "sync" + + "github.com/google/cel-go/common/functions" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +// Interpretable can accept a given Activation and produce a value along with +// an accompanying EvalState which can be used to inspect whether additional +// data might be necessary to complete the evaluation. +type Interpretable interface { + // ID value corresponding to the expression node. + ID() int64 + + // Eval an Activation to produce an output. + Eval(activation Activation) ref.Val +} + +// InterpretableConst interface for tracking whether the Interpretable is a constant value. +type InterpretableConst interface { + Interpretable + + // Value returns the constant value of the instruction. + Value() ref.Val +} + +// InterpretableAttribute interface for tracking whether the Interpretable is an attribute. +type InterpretableAttribute interface { + Interpretable + + // Attr returns the Attribute value. + Attr() Attribute + + // Adapter returns the type adapter to be used for adapting resolved Attribute values. + Adapter() types.Adapter + + // AddQualifier proxies the Attribute.AddQualifier method. + // + // Note, this method may mutate the current attribute state. If the desire is to clone the + // Attribute, the Attribute should first be copied before adding the qualifier. Attributes + // are not copyable by default, so this is a capable that would need to be added to the + // AttributeFactory or specifically to the underlying Attribute implementation. + AddQualifier(Qualifier) (Attribute, error) + + // Qualify replicates the Attribute.Qualify method to permit extension and interception + // of object qualification. + Qualify(vars Activation, obj any) (any, error) + + // QualifyIfPresent qualifies the object if the qualifier is declared or defined on the object. + // The 'presenceOnly' flag indicates that the value is not necessary, just a boolean status as + // to whether the qualifier is present. + QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) + + // IsOptional indicates whether the resulting value is an optional type. + IsOptional() bool + + // Resolve returns the value of the Attribute given the current Activation. + Resolve(Activation) (any, error) +} + +// InterpretableCall interface for inspecting Interpretable instructions related to function calls. +type InterpretableCall interface { + Interpretable + + // Function returns the function name as it appears in text or mangled operator name as it + // appears in the operators.go file. + Function() string + + // OverloadID returns the overload id associated with the function specialization. + // Overload ids are stable across language boundaries and can be treated as synonymous with a + // unique function signature. + OverloadID() string + + // Args returns the normalized arguments to the function overload. + // For receiver-style functions, the receiver target is arg 0. + Args() []Interpretable +} + +// InterpretableConstructor interface for inspecting Interpretable instructions that initialize a list, map +// or struct. +type InterpretableConstructor interface { + Interpretable + + // InitVals returns all the list elements, map key and values or struct field values. + InitVals() []Interpretable + + // Type returns the type constructed. + Type() ref.Type +} + +// Core Interpretable implementations used during the program planning phase. + +type evalTestOnly struct { + id int64 + InterpretableAttribute +} + +// ID implements the Interpretable interface method. +func (test *evalTestOnly) ID() int64 { + return test.id +} + +// Eval implements the Interpretable interface method. +func (test *evalTestOnly) Eval(ctx Activation) ref.Val { + val, err := test.Resolve(ctx) + // Return an error if the resolve step fails + if err != nil { + return types.LabelErrNode(test.id, types.WrapErr(err)) + } + if optVal, isOpt := val.(*types.Optional); isOpt { + return types.Bool(optVal.HasValue()) + } + return test.Adapter().NativeToValue(val) +} + +// AddQualifier appends a qualifier that will always and only perform a presence test. +func (test *evalTestOnly) AddQualifier(q Qualifier) (Attribute, error) { + cq, ok := q.(ConstantQualifier) + if !ok { + return nil, fmt.Errorf("test only expressions must have constant qualifiers: %v", q) + } + return test.InterpretableAttribute.AddQualifier(&testOnlyQualifier{ConstantQualifier: cq}) +} + +type testOnlyQualifier struct { + ConstantQualifier +} + +// Qualify determines whether the test-only qualifier is present on the input object. +func (q *testOnlyQualifier) Qualify(vars Activation, obj any) (any, error) { + out, present, err := q.ConstantQualifier.QualifyIfPresent(vars, obj, true) + if err != nil { + return nil, err + } + if unk, isUnk := out.(types.Unknown); isUnk { + return unk, nil + } + if opt, isOpt := out.(types.Optional); isOpt { + return opt.HasValue(), nil + } + return present, nil +} + +// QualifyIfPresent returns whether the target field in the test-only expression is present. +func (q *testOnlyQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + // Only ever test for presence. + return q.ConstantQualifier.QualifyIfPresent(vars, obj, true) +} + +// QualifierValueEquals determines whether the test-only constant qualifier equals the input value. +func (q *testOnlyQualifier) QualifierValueEquals(value any) bool { + // The input qualifier will always be of type string + return q.ConstantQualifier.Value().Value() == value +} + +// NewConstValue creates a new constant valued Interpretable. +func NewConstValue(id int64, val ref.Val) InterpretableConst { + return &evalConst{ + id: id, + val: val, + } +} + +type evalConst struct { + id int64 + val ref.Val +} + +// ID implements the Interpretable interface method. +func (cons *evalConst) ID() int64 { + return cons.id +} + +// Eval implements the Interpretable interface method. +func (cons *evalConst) Eval(ctx Activation) ref.Val { + return cons.val +} + +// Value implements the InterpretableConst interface method. +func (cons *evalConst) Value() ref.Val { + return cons.val +} + +type evalOr struct { + id int64 + terms []Interpretable +} + +// ID implements the Interpretable interface method. +func (or *evalOr) ID() int64 { + return or.id +} + +// Eval implements the Interpretable interface method. +func (or *evalOr) Eval(ctx Activation) ref.Val { + var err ref.Val = nil + var unk *types.Unknown + for _, term := range or.terms { + val := term.Eval(ctx) + boolVal, ok := val.(types.Bool) + // short-circuit on true. + if ok && boolVal == types.True { + return types.True + } + if !ok { + isUnk := false + unk, isUnk = types.MaybeMergeUnknowns(val, unk) + if !isUnk && err == nil { + if types.IsError(val) { + err = val + } else { + err = types.MaybeNoSuchOverloadErr(val) + } + err = types.LabelErrNode(or.id, err) + } + } + } + if unk != nil { + return unk + } + if err != nil { + return err + } + return types.False +} + +type evalAnd struct { + id int64 + terms []Interpretable +} + +// ID implements the Interpretable interface method. +func (and *evalAnd) ID() int64 { + return and.id +} + +// Eval implements the Interpretable interface method. +func (and *evalAnd) Eval(ctx Activation) ref.Val { + var err ref.Val = nil + var unk *types.Unknown + for _, term := range and.terms { + val := term.Eval(ctx) + boolVal, ok := val.(types.Bool) + // short-circuit on false. + if ok && boolVal == types.False { + return types.False + } + if !ok { + isUnk := false + unk, isUnk = types.MaybeMergeUnknowns(val, unk) + if !isUnk && err == nil { + if types.IsError(val) { + err = val + } else { + err = types.MaybeNoSuchOverloadErr(val) + } + err = types.LabelErrNode(and.id, err) + } + } + } + if unk != nil { + return unk + } + if err != nil { + return err + } + return types.True +} + +type evalEq struct { + id int64 + lhs Interpretable + rhs Interpretable +} + +// ID implements the Interpretable interface method. +func (eq *evalEq) ID() int64 { + return eq.id +} + +// Eval implements the Interpretable interface method. +func (eq *evalEq) Eval(ctx Activation) ref.Val { + lVal := eq.lhs.Eval(ctx) + rVal := eq.rhs.Eval(ctx) + if types.IsUnknownOrError(lVal) { + return lVal + } + if types.IsUnknownOrError(rVal) { + return rVal + } + return types.Equal(lVal, rVal) +} + +// Function implements the InterpretableCall interface method. +func (*evalEq) Function() string { + return operators.Equals +} + +// OverloadID implements the InterpretableCall interface method. +func (*evalEq) OverloadID() string { + return overloads.Equals +} + +// Args implements the InterpretableCall interface method. +func (eq *evalEq) Args() []Interpretable { + return []Interpretable{eq.lhs, eq.rhs} +} + +type evalNe struct { + id int64 + lhs Interpretable + rhs Interpretable +} + +// ID implements the Interpretable interface method. +func (ne *evalNe) ID() int64 { + return ne.id +} + +// Eval implements the Interpretable interface method. +func (ne *evalNe) Eval(ctx Activation) ref.Val { + lVal := ne.lhs.Eval(ctx) + rVal := ne.rhs.Eval(ctx) + if types.IsUnknownOrError(lVal) { + return lVal + } + if types.IsUnknownOrError(rVal) { + return rVal + } + return types.Bool(types.Equal(lVal, rVal) != types.True) +} + +// Function implements the InterpretableCall interface method. +func (*evalNe) Function() string { + return operators.NotEquals +} + +// OverloadID implements the InterpretableCall interface method. +func (*evalNe) OverloadID() string { + return overloads.NotEquals +} + +// Args implements the InterpretableCall interface method. +func (ne *evalNe) Args() []Interpretable { + return []Interpretable{ne.lhs, ne.rhs} +} + +type evalZeroArity struct { + id int64 + function string + overload string + impl functions.FunctionOp +} + +// ID implements the Interpretable interface method. +func (zero *evalZeroArity) ID() int64 { + return zero.id +} + +// Eval implements the Interpretable interface method. +func (zero *evalZeroArity) Eval(ctx Activation) ref.Val { + return types.LabelErrNode(zero.id, zero.impl()) +} + +// Function implements the InterpretableCall interface method. +func (zero *evalZeroArity) Function() string { + return zero.function +} + +// OverloadID implements the InterpretableCall interface method. +func (zero *evalZeroArity) OverloadID() string { + return zero.overload +} + +// Args returns the argument to the unary function. +func (zero *evalZeroArity) Args() []Interpretable { + return []Interpretable{} +} + +type evalUnary struct { + id int64 + function string + overload string + arg Interpretable + trait int + impl functions.UnaryOp + nonStrict bool +} + +// ID implements the Interpretable interface method. +func (un *evalUnary) ID() int64 { + return un.id +} + +// Eval implements the Interpretable interface method. +func (un *evalUnary) Eval(ctx Activation) ref.Val { + argVal := un.arg.Eval(ctx) + // Early return if the argument to the function is unknown or error. + strict := !un.nonStrict + if strict && types.IsUnknownOrError(argVal) { + return argVal + } + // If the implementation is bound and the argument value has the right traits required to + // invoke it, then call the implementation. + if un.impl != nil && (un.trait == 0 || (!strict && types.IsUnknownOrError(argVal)) || argVal.Type().HasTrait(un.trait)) { + return types.LabelErrNode(un.id, un.impl(argVal)) + } + // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the + // operand (arg0). + if argVal.Type().HasTrait(traits.ReceiverType) { + return types.LabelErrNode(un.id, argVal.(traits.Receiver).Receive(un.function, un.overload, []ref.Val{})) + } + return types.NewErrWithNodeID(un.id, "no such overload: %s", un.function) +} + +// Function implements the InterpretableCall interface method. +func (un *evalUnary) Function() string { + return un.function +} + +// OverloadID implements the InterpretableCall interface method. +func (un *evalUnary) OverloadID() string { + return un.overload +} + +// Args returns the argument to the unary function. +func (un *evalUnary) Args() []Interpretable { + return []Interpretable{un.arg} +} + +type evalBinary struct { + id int64 + function string + overload string + lhs Interpretable + rhs Interpretable + trait int + impl functions.BinaryOp + nonStrict bool +} + +// ID implements the Interpretable interface method. +func (bin *evalBinary) ID() int64 { + return bin.id +} + +// Eval implements the Interpretable interface method. +func (bin *evalBinary) Eval(ctx Activation) ref.Val { + lVal := bin.lhs.Eval(ctx) + rVal := bin.rhs.Eval(ctx) + // Early return if any argument to the function is unknown or error. + strict := !bin.nonStrict + if strict { + if types.IsUnknownOrError(lVal) { + return lVal + } + if types.IsUnknownOrError(rVal) { + return rVal + } + } + // If the implementation is bound and the argument value has the right traits required to + // invoke it, then call the implementation. + if bin.impl != nil && (bin.trait == 0 || (!strict && types.IsUnknownOrError(lVal)) || lVal.Type().HasTrait(bin.trait)) { + return types.LabelErrNode(bin.id, bin.impl(lVal, rVal)) + } + // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the + // operand (arg0). + if lVal.Type().HasTrait(traits.ReceiverType) { + return types.LabelErrNode(bin.id, lVal.(traits.Receiver).Receive(bin.function, bin.overload, []ref.Val{rVal})) + } + return types.NewErrWithNodeID(bin.id, "no such overload: %s", bin.function) +} + +// Function implements the InterpretableCall interface method. +func (bin *evalBinary) Function() string { + return bin.function +} + +// OverloadID implements the InterpretableCall interface method. +func (bin *evalBinary) OverloadID() string { + return bin.overload +} + +// Args returns the argument to the unary function. +func (bin *evalBinary) Args() []Interpretable { + return []Interpretable{bin.lhs, bin.rhs} +} + +type evalVarArgs struct { + id int64 + function string + overload string + args []Interpretable + trait int + impl functions.FunctionOp + nonStrict bool +} + +// NewCall creates a new call Interpretable. +func NewCall(id int64, function, overload string, args []Interpretable, impl functions.FunctionOp) InterpretableCall { + return &evalVarArgs{ + id: id, + function: function, + overload: overload, + args: args, + impl: impl, + } +} + +// ID implements the Interpretable interface method. +func (fn *evalVarArgs) ID() int64 { + return fn.id +} + +// Eval implements the Interpretable interface method. +func (fn *evalVarArgs) Eval(ctx Activation) ref.Val { + argVals := make([]ref.Val, len(fn.args)) + // Early return if any argument to the function is unknown or error. + strict := !fn.nonStrict + for i, arg := range fn.args { + argVals[i] = arg.Eval(ctx) + if strict && types.IsUnknownOrError(argVals[i]) { + return argVals[i] + } + } + // If the implementation is bound and the argument value has the right traits required to + // invoke it, then call the implementation. + arg0 := argVals[0] + if fn.impl != nil && (fn.trait == 0 || (!strict && types.IsUnknownOrError(arg0)) || arg0.Type().HasTrait(fn.trait)) { + return types.LabelErrNode(fn.id, fn.impl(argVals...)) + } + // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the + // operand (arg0). + if arg0.Type().HasTrait(traits.ReceiverType) { + return types.LabelErrNode(fn.id, arg0.(traits.Receiver).Receive(fn.function, fn.overload, argVals[1:])) + } + return types.NewErrWithNodeID(fn.id, "no such overload: %s %d", fn.function, fn.id) +} + +// Function implements the InterpretableCall interface method. +func (fn *evalVarArgs) Function() string { + return fn.function +} + +// OverloadID implements the InterpretableCall interface method. +func (fn *evalVarArgs) OverloadID() string { + return fn.overload +} + +// Args returns the argument to the unary function. +func (fn *evalVarArgs) Args() []Interpretable { + return fn.args +} + +type evalList struct { + id int64 + elems []Interpretable + optionals []bool + hasOptionals bool + adapter types.Adapter +} + +// ID implements the Interpretable interface method. +func (l *evalList) ID() int64 { + return l.id +} + +// Eval implements the Interpretable interface method. +func (l *evalList) Eval(ctx Activation) ref.Val { + elemVals := make([]ref.Val, 0, len(l.elems)) + // If any argument is unknown or error early terminate. + for i, elem := range l.elems { + elemVal := elem.Eval(ctx) + if types.IsUnknownOrError(elemVal) { + return elemVal + } + if l.hasOptionals && l.optionals[i] { + optVal, ok := elemVal.(*types.Optional) + if !ok { + return types.LabelErrNode(l.id, invalidOptionalElementInit(elemVal)) + } + if !optVal.HasValue() { + continue + } + elemVal = optVal.GetValue() + } + elemVals = append(elemVals, elemVal) + } + return l.adapter.NativeToValue(elemVals) +} + +func (l *evalList) InitVals() []Interpretable { + return l.elems +} + +func (l *evalList) Type() ref.Type { + return types.ListType +} + +type evalMap struct { + id int64 + keys []Interpretable + vals []Interpretable + optionals []bool + hasOptionals bool + adapter types.Adapter +} + +// ID implements the Interpretable interface method. +func (m *evalMap) ID() int64 { + return m.id +} + +// Eval implements the Interpretable interface method. +func (m *evalMap) Eval(ctx Activation) ref.Val { + entries := make(map[ref.Val]ref.Val) + // If any argument is unknown or error early terminate. + for i, key := range m.keys { + keyVal := key.Eval(ctx) + if types.IsUnknownOrError(keyVal) { + return keyVal + } + valVal := m.vals[i].Eval(ctx) + if types.IsUnknownOrError(valVal) { + return valVal + } + if m.hasOptionals && m.optionals[i] { + optVal, ok := valVal.(*types.Optional) + if !ok { + return types.LabelErrNode(m.id, invalidOptionalEntryInit(keyVal, valVal)) + } + if !optVal.HasValue() { + delete(entries, keyVal) + continue + } + valVal = optVal.GetValue() + } + entries[keyVal] = valVal + } + return m.adapter.NativeToValue(entries) +} + +func (m *evalMap) InitVals() []Interpretable { + if len(m.keys) != len(m.vals) { + return nil + } + result := make([]Interpretable, len(m.keys)+len(m.vals)) + idx := 0 + for i, k := range m.keys { + v := m.vals[i] + result[idx] = k + idx++ + result[idx] = v + idx++ + } + return result +} + +func (m *evalMap) Type() ref.Type { + return types.MapType +} + +type evalObj struct { + id int64 + typeName string + fields []string + vals []Interpretable + optionals []bool + hasOptionals bool + provider types.Provider +} + +// ID implements the Interpretable interface method. +func (o *evalObj) ID() int64 { + return o.id +} + +// Eval implements the Interpretable interface method. +func (o *evalObj) Eval(ctx Activation) ref.Val { + fieldVals := make(map[string]ref.Val) + // If any argument is unknown or error early terminate. + for i, field := range o.fields { + val := o.vals[i].Eval(ctx) + if types.IsUnknownOrError(val) { + return val + } + if o.hasOptionals && o.optionals[i] { + optVal, ok := val.(*types.Optional) + if !ok { + return types.LabelErrNode(o.id, invalidOptionalEntryInit(field, val)) + } + if !optVal.HasValue() { + delete(fieldVals, field) + continue + } + val = optVal.GetValue() + } + fieldVals[field] = val + } + return types.LabelErrNode(o.id, o.provider.NewValue(o.typeName, fieldVals)) +} + +// InitVals implements the InterpretableConstructor interface method. +func (o *evalObj) InitVals() []Interpretable { + return o.vals +} + +// Type implements the InterpretableConstructor interface method. +func (o *evalObj) Type() ref.Type { + return types.NewObjectType(o.typeName) +} + +type evalFold struct { + id int64 + accuVar string + iterVar string + iterVar2 string + iterRange Interpretable + accu Interpretable + cond Interpretable + step Interpretable + result Interpretable + adapter types.Adapter + + // note an exhaustive fold will ensure that all branches are evaluated + // when using mutable values, these branches will mutate the final result + // rather than make a throw-away computation. + exhaustive bool + interruptable bool +} + +// ID implements the Interpretable interface method. +func (fold *evalFold) ID() int64 { + return fold.id +} + +// Eval implements the Interpretable interface method. +func (fold *evalFold) Eval(ctx Activation) ref.Val { + // Initialize the folder interface + f := newFolder(fold, ctx) + defer releaseFolder(f) + + foldRange := fold.iterRange.Eval(ctx) + if fold.iterVar2 != "" { + var foldable traits.Foldable + switch r := foldRange.(type) { + case traits.Mapper: + foldable = types.ToFoldableMap(r) + case traits.Lister: + foldable = types.ToFoldableList(r) + default: + return types.NewErrWithNodeID(fold.ID(), "unsupported comprehension range type: %T", foldRange) + } + foldable.Fold(f) + return f.evalResult() + } + + if !foldRange.Type().HasTrait(traits.IterableType) { + return types.ValOrErr(foldRange, "got '%T', expected iterable type", foldRange) + } + iterable := foldRange.(traits.Iterable) + return f.foldIterable(iterable) +} + +// Optional Interpretable implementations that specialize, subsume, or extend the core evaluation +// plan via decorators. + +// evalSetMembership is an Interpretable implementation which tests whether an input value +// exists within the set of map keys used to model a set. +type evalSetMembership struct { + inst Interpretable + arg Interpretable + valueSet map[ref.Val]ref.Val +} + +// ID implements the Interpretable interface method. +func (e *evalSetMembership) ID() int64 { + return e.inst.ID() +} + +// Eval implements the Interpretable interface method. +func (e *evalSetMembership) Eval(ctx Activation) ref.Val { + val := e.arg.Eval(ctx) + if types.IsUnknownOrError(val) { + return val + } + if ret, found := e.valueSet[val]; found { + return ret + } + return types.False +} + +// evalWatch is an Interpretable implementation that wraps the execution of a given +// expression so that it may observe the computed value and send it to an observer. +type evalWatch struct { + Interpretable + observer EvalObserver +} + +// Eval implements the Interpretable interface method. +func (e *evalWatch) Eval(ctx Activation) ref.Val { + val := e.Interpretable.Eval(ctx) + e.observer(e.ID(), e.Interpretable, val) + return val +} + +// evalWatchAttr describes a watcher of an InterpretableAttribute Interpretable. +// +// Since the watcher may be selected against at a later stage in program planning, the watcher +// must implement the InterpretableAttribute interface by proxy. +type evalWatchAttr struct { + InterpretableAttribute + observer EvalObserver +} + +// AddQualifier creates a wrapper over the incoming qualifier which observes the qualification +// result. +func (e *evalWatchAttr) AddQualifier(q Qualifier) (Attribute, error) { + switch qual := q.(type) { + // By default, the qualifier is either a constant or an attribute + // There may be some custom cases where the attribute is neither. + case ConstantQualifier: + // Expose a method to test whether the qualifier matches the input pattern. + q = &evalWatchConstQual{ + ConstantQualifier: qual, + observer: e.observer, + adapter: e.Adapter(), + } + case *evalWatchAttr: + // Unwrap the evalWatchAttr since the observation will be applied during Qualify or + // QualifyIfPresent rather than Eval. + q = &evalWatchAttrQual{ + Attribute: qual.InterpretableAttribute, + observer: e.observer, + adapter: e.Adapter(), + } + case Attribute: + // Expose methods which intercept the qualification prior to being applied as a qualifier. + // Using this interface ensures that the qualifier is converted to a constant value one + // time during attribute pattern matching as the method embeds the Attribute interface + // needed to trip the conversion to a constant. + q = &evalWatchAttrQual{ + Attribute: qual, + observer: e.observer, + adapter: e.Adapter(), + } + default: + // This is likely a custom qualifier type. + q = &evalWatchQual{ + Qualifier: qual, + observer: e.observer, + adapter: e.Adapter(), + } + } + _, err := e.InterpretableAttribute.AddQualifier(q) + return e, err +} + +// Eval implements the Interpretable interface method. +func (e *evalWatchAttr) Eval(vars Activation) ref.Val { + val := e.InterpretableAttribute.Eval(vars) + e.observer(e.ID(), e.InterpretableAttribute, val) + return val +} + +// evalWatchConstQual observes the qualification of an object using a constant boolean, int, +// string, or uint. +type evalWatchConstQual struct { + ConstantQualifier + observer EvalObserver + adapter types.Adapter +} + +// Qualify observes the qualification of a object via a constant boolean, int, string, or uint. +func (e *evalWatchConstQual) Qualify(vars Activation, obj any) (any, error) { + out, err := e.ConstantQualifier.Qualify(vars, obj) + var val ref.Val + if err != nil { + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) + } else { + val = e.adapter.NativeToValue(out) + } + e.observer(e.ID(), e.ConstantQualifier, val) + return out, err +} + +// QualifyIfPresent conditionally qualifies the variable and only records a value if one is present. +func (e *evalWatchConstQual) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + out, present, err := e.ConstantQualifier.QualifyIfPresent(vars, obj, presenceOnly) + var val ref.Val + if err != nil { + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) + } else if out != nil { + val = e.adapter.NativeToValue(out) + } else if presenceOnly { + val = types.Bool(present) + } + if present || presenceOnly { + e.observer(e.ID(), e.ConstantQualifier, val) + } + return out, present, err +} + +// QualifierValueEquals tests whether the incoming value is equal to the qualifying constant. +func (e *evalWatchConstQual) QualifierValueEquals(value any) bool { + qve, ok := e.ConstantQualifier.(qualifierValueEquator) + return ok && qve.QualifierValueEquals(value) +} + +// evalWatchAttrQual observes the qualification of an object by a value computed at runtime. +type evalWatchAttrQual struct { + Attribute + observer EvalObserver + adapter ref.TypeAdapter +} + +// Qualify observes the qualification of a object via a value computed at runtime. +func (e *evalWatchAttrQual) Qualify(vars Activation, obj any) (any, error) { + out, err := e.Attribute.Qualify(vars, obj) + var val ref.Val + if err != nil { + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) + } else { + val = e.adapter.NativeToValue(out) + } + e.observer(e.ID(), e.Attribute, val) + return out, err +} + +// QualifyIfPresent conditionally qualifies the variable and only records a value if one is present. +func (e *evalWatchAttrQual) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + out, present, err := e.Attribute.QualifyIfPresent(vars, obj, presenceOnly) + var val ref.Val + if err != nil { + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) + } else if out != nil { + val = e.adapter.NativeToValue(out) + } else if presenceOnly { + val = types.Bool(present) + } + if present || presenceOnly { + e.observer(e.ID(), e.Attribute, val) + } + return out, present, err +} + +// evalWatchQual observes the qualification of an object by a value computed at runtime. +type evalWatchQual struct { + Qualifier + observer EvalObserver + adapter types.Adapter +} + +// Qualify observes the qualification of a object via a value computed at runtime. +func (e *evalWatchQual) Qualify(vars Activation, obj any) (any, error) { + out, err := e.Qualifier.Qualify(vars, obj) + var val ref.Val + if err != nil { + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) + } else { + val = e.adapter.NativeToValue(out) + } + e.observer(e.ID(), e.Qualifier, val) + return out, err +} + +// QualifyIfPresent conditionally qualifies the variable and only records a value if one is present. +func (e *evalWatchQual) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + out, present, err := e.Qualifier.QualifyIfPresent(vars, obj, presenceOnly) + var val ref.Val + if err != nil { + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) + } else if out != nil { + val = e.adapter.NativeToValue(out) + } else if presenceOnly { + val = types.Bool(present) + } + if present || presenceOnly { + e.observer(e.ID(), e.Qualifier, val) + } + return out, present, err +} + +// evalWatchConst describes a watcher of an instConst Interpretable. +type evalWatchConst struct { + InterpretableConst + observer EvalObserver +} + +// Eval implements the Interpretable interface method. +func (e *evalWatchConst) Eval(vars Activation) ref.Val { + val := e.Value() + e.observer(e.ID(), e.InterpretableConst, val) + return val +} + +// evalExhaustiveOr is just like evalOr, but does not short-circuit argument evaluation. +type evalExhaustiveOr struct { + id int64 + terms []Interpretable +} + +// ID implements the Interpretable interface method. +func (or *evalExhaustiveOr) ID() int64 { + return or.id +} + +// Eval implements the Interpretable interface method. +func (or *evalExhaustiveOr) Eval(ctx Activation) ref.Val { + var err ref.Val = nil + var unk *types.Unknown + isTrue := false + for _, term := range or.terms { + val := term.Eval(ctx) + boolVal, ok := val.(types.Bool) + // flag the result as true + if ok && boolVal == types.True { + isTrue = true + } + if !ok && !isTrue { + isUnk := false + unk, isUnk = types.MaybeMergeUnknowns(val, unk) + if !isUnk && err == nil { + if types.IsError(val) { + err = val + } else { + err = types.MaybeNoSuchOverloadErr(val) + } + } + } + } + if isTrue { + return types.True + } + if unk != nil { + return unk + } + if err != nil { + return err + } + return types.False +} + +// evalExhaustiveAnd is just like evalAnd, but does not short-circuit argument evaluation. +type evalExhaustiveAnd struct { + id int64 + terms []Interpretable +} + +// ID implements the Interpretable interface method. +func (and *evalExhaustiveAnd) ID() int64 { + return and.id +} + +// Eval implements the Interpretable interface method. +func (and *evalExhaustiveAnd) Eval(ctx Activation) ref.Val { + var err ref.Val = nil + var unk *types.Unknown + isFalse := false + for _, term := range and.terms { + val := term.Eval(ctx) + boolVal, ok := val.(types.Bool) + // short-circuit on false. + if ok && boolVal == types.False { + isFalse = true + } + if !ok && !isFalse { + isUnk := false + unk, isUnk = types.MaybeMergeUnknowns(val, unk) + if !isUnk && err == nil { + if types.IsError(val) { + err = val + } else { + err = types.MaybeNoSuchOverloadErr(val) + } + } + } + } + if isFalse { + return types.False + } + if unk != nil { + return unk + } + if err != nil { + return err + } + return types.True +} + +// evalExhaustiveConditional is like evalConditional, but does not short-circuit argument +// evaluation. +type evalExhaustiveConditional struct { + id int64 + adapter types.Adapter + attr *conditionalAttribute +} + +// ID implements the Interpretable interface method. +func (cond *evalExhaustiveConditional) ID() int64 { + return cond.id +} + +// Eval implements the Interpretable interface method. +func (cond *evalExhaustiveConditional) Eval(ctx Activation) ref.Val { + cVal := cond.attr.expr.Eval(ctx) + tVal, tErr := cond.attr.truthy.Resolve(ctx) + fVal, fErr := cond.attr.falsy.Resolve(ctx) + cBool, ok := cVal.(types.Bool) + if !ok { + return types.ValOrErr(cVal, "no such overload") + } + if cBool { + if tErr != nil { + return types.LabelErrNode(cond.id, types.WrapErr(tErr)) + } + return cond.adapter.NativeToValue(tVal) + } + if fErr != nil { + return types.LabelErrNode(cond.id, types.WrapErr(fErr)) + } + return cond.adapter.NativeToValue(fVal) +} + +// evalAttr evaluates an Attribute value. +type evalAttr struct { + adapter types.Adapter + attr Attribute + optional bool +} + +var _ InterpretableAttribute = &evalAttr{} + +// ID of the attribute instruction. +func (a *evalAttr) ID() int64 { + return a.attr.ID() +} + +// AddQualifier implements the InterpretableAttribute interface method. +func (a *evalAttr) AddQualifier(qual Qualifier) (Attribute, error) { + attr, err := a.attr.AddQualifier(qual) + a.attr = attr + return attr, err +} + +// Attr implements the InterpretableAttribute interface method. +func (a *evalAttr) Attr() Attribute { + return a.attr +} + +// Adapter implements the InterpretableAttribute interface method. +func (a *evalAttr) Adapter() types.Adapter { + return a.adapter +} + +// Eval implements the Interpretable interface method. +func (a *evalAttr) Eval(ctx Activation) ref.Val { + v, err := a.attr.Resolve(ctx) + if err != nil { + return types.LabelErrNode(a.ID(), types.WrapErr(err)) + } + return a.adapter.NativeToValue(v) +} + +// Qualify proxies to the Attribute's Qualify method. +func (a *evalAttr) Qualify(ctx Activation, obj any) (any, error) { + return a.attr.Qualify(ctx, obj) +} + +// QualifyIfPresent proxies to the Attribute's QualifyIfPresent method. +func (a *evalAttr) QualifyIfPresent(ctx Activation, obj any, presenceOnly bool) (any, bool, error) { + return a.attr.QualifyIfPresent(ctx, obj, presenceOnly) +} + +func (a *evalAttr) IsOptional() bool { + return a.optional +} + +// Resolve proxies to the Attribute's Resolve method. +func (a *evalAttr) Resolve(ctx Activation) (any, error) { + return a.attr.Resolve(ctx) +} + +type evalWatchConstructor struct { + constructor InterpretableConstructor + observer EvalObserver +} + +// InitVals implements the InterpretableConstructor InitVals function. +func (c *evalWatchConstructor) InitVals() []Interpretable { + return c.constructor.InitVals() +} + +// Type implements the InterpretableConstructor Type function. +func (c *evalWatchConstructor) Type() ref.Type { + return c.constructor.Type() +} + +// ID implements the Interpretable ID function. +func (c *evalWatchConstructor) ID() int64 { + return c.constructor.ID() +} + +// Eval implements the Interpretable Eval function. +func (c *evalWatchConstructor) Eval(ctx Activation) ref.Val { + val := c.constructor.Eval(ctx) + c.observer(c.ID(), c.constructor, val) + return val +} + +func invalidOptionalEntryInit(field any, value ref.Val) ref.Val { + return types.NewErr("cannot initialize optional entry '%v' from non-optional value %v", field, value) +} + +func invalidOptionalElementInit(value ref.Val) ref.Val { + return types.NewErr("cannot initialize optional list element from non-optional value %v", value) +} + +// newFolder creates or initializes a pooled folder instance. +func newFolder(eval *evalFold, ctx Activation) *folder { + f := folderPool.Get().(*folder) + f.evalFold = eval + f.Activation = ctx + return f +} + +// releaseFolder resets and releases a pooled folder instance. +func releaseFolder(f *folder) { + f.reset() + folderPool.Put(f) +} + +// folder tracks the state associated with folding a list or map with a comprehension v2 style macro. +// +// The folder embeds an interpreter.Activation and Interpretable evalFold value as well as implements +// the traits.Folder interface methods. +// +// Instances of a folder are intended to be pooled to minimize allocation overhead with this temporary +// bookkeeping object which supports lazy evaluation of the accumulator init expression which is useful +// in preserving evaluation order semantics which might otherwise be disrupted through the use of +// cel.bind or cel.@block. +type folder struct { + *evalFold + Activation + + // fold state objects. + accuVal ref.Val + iterVar1Val any + iterVar2Val any + + // bookkeeping flags to modify Activation and fold behaviors. + initialized bool + mutableValue bool + interrupted bool + computeResult bool +} + +func (f *folder) foldIterable(iterable traits.Iterable) ref.Val { + it := iterable.Iterator() + for it.HasNext() == types.True { + f.iterVar1Val = it.Next() + + cond := f.cond.Eval(f) + condBool, ok := cond.(types.Bool) + if f.interrupted || (!f.exhaustive && ok && condBool != types.True) { + return f.evalResult() + } + + // Update the accumulation value and check for eval interuption. + f.accuVal = f.step.Eval(f) + f.initialized = true + if f.interruptable && checkInterrupt(f.Activation) { + f.interrupted = true + return f.evalResult() + } + } + return f.evalResult() +} + +// FoldEntry will either fold comprehension v1 style macros if iterVar2 is unset, or comprehension v2 style +// macros if both the iterVar and iterVar2 are set to non-empty strings. +func (f *folder) FoldEntry(key, val any) bool { + // Default to referencing both values. + f.iterVar1Val = key + f.iterVar2Val = val + + // Terminate evaluation if evaluation is interrupted or the condition is not true and exhaustive + // eval is not enabled. + cond := f.cond.Eval(f) + condBool, ok := cond.(types.Bool) + if f.interrupted || (!f.exhaustive && ok && condBool != types.True) { + return false + } + + // Update the accumulation value and check for eval interuption. + f.accuVal = f.step.Eval(f) + f.initialized = true + if f.interruptable && checkInterrupt(f.Activation) { + f.interrupted = true + return false + } + return true +} + +// ResolveName overrides the default Activation lookup to perform lazy initialization of the accumulator +// and specialized lookups of iteration values with consideration for whether the final result is being +// computed and the iteration variables should be ignored. +func (f *folder) ResolveName(name string) (any, bool) { + if name == f.accuVar { + if !f.initialized { + f.initialized = true + initVal := f.accu.Eval(f.Activation) + if !f.exhaustive { + if l, isList := initVal.(traits.Lister); isList && l.Size() == types.IntZero { + initVal = types.NewMutableList(f.adapter) + f.mutableValue = true + } + if m, isMap := initVal.(traits.Mapper); isMap && m.Size() == types.IntZero { + initVal = types.NewMutableMap(f.adapter, map[ref.Val]ref.Val{}) + f.mutableValue = true + } + } + f.accuVal = initVal + } + return f.accuVal, true + } + if !f.computeResult { + if name == f.iterVar { + f.iterVar1Val = f.adapter.NativeToValue(f.iterVar1Val) + return f.iterVar1Val, true + } + if name == f.iterVar2 { + f.iterVar2Val = f.adapter.NativeToValue(f.iterVar2Val) + return f.iterVar2Val, true + } + } + return f.Activation.ResolveName(name) +} + +// evalResult computes the final result of the fold after all entries have been folded and accumulated. +func (f *folder) evalResult() ref.Val { + f.computeResult = true + if f.interrupted { + return types.NewErr("operation interrupted") + } + res := f.result.Eval(f) + // Convert a mutable list or map to an immutable one if the comprehension has generated a list or + // map as a result. + if !types.IsUnknownOrError(res) && f.mutableValue { + if _, ok := res.(traits.MutableLister); ok { + res = res.(traits.MutableLister).ToImmutableList() + } + if _, ok := res.(traits.MutableMapper); ok { + res = res.(traits.MutableMapper).ToImmutableMap() + } + } + return res +} + +// reset clears any state associated with folder evaluation. +func (f *folder) reset() { + f.evalFold = nil + f.Activation = nil + f.accuVal = nil + f.iterVar1Val = nil + f.iterVar2Val = nil + + f.initialized = false + f.mutableValue = false + f.interrupted = false + f.computeResult = false +} + +func checkInterrupt(a Activation) bool { + stop, found := a.ResolveName("#interrupted") + return found && stop == true +} + +var ( + // pool of var folders to reduce allocations during folds. + folderPool = &sync.Pool{ + New: func() any { + return &folder{} + }, + } +) diff --git a/hack/tools/vendor/github.com/google/cel-go/interpreter/interpreter.go b/hack/tools/vendor/github.com/google/cel-go/interpreter/interpreter.go new file mode 100644 index 0000000000..0aca74d88b --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/interpreter/interpreter.go @@ -0,0 +1,185 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package interpreter provides functions to evaluate parsed expressions with +// the option to augment the evaluation with inputs and functions supplied at +// evaluation time. +package interpreter + +import ( + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// Interpreter generates a new Interpretable from a checked or unchecked expression. +type Interpreter interface { + // NewInterpretable creates an Interpretable from a checked expression and an + // optional list of InterpretableDecorator values. + NewInterpretable(exprAST *ast.AST, decorators ...InterpretableDecorator) (Interpretable, error) +} + +// EvalObserver is a functional interface that accepts an expression id and an observed value. +// The id identifies the expression that was evaluated, the programStep is the Interpretable or Qualifier that +// was evaluated and value is the result of the evaluation. +type EvalObserver func(id int64, programStep any, value ref.Val) + +// Observe constructs a decorator that calls all the provided observers in order after evaluating each Interpretable +// or Qualifier during program evaluation. +func Observe(observers ...EvalObserver) InterpretableDecorator { + if len(observers) == 1 { + return decObserveEval(observers[0]) + } + observeFn := func(id int64, programStep any, val ref.Val) { + for _, observer := range observers { + observer(id, programStep, val) + } + } + return decObserveEval(observeFn) +} + +// EvalCancelledError represents a cancelled program evaluation operation. +type EvalCancelledError struct { + Message string + // Type identifies the cause of the cancellation. + Cause CancellationCause +} + +func (e EvalCancelledError) Error() string { + return e.Message +} + +// CancellationCause enumerates the ways a program evaluation operation can be cancelled. +type CancellationCause int + +const ( + // ContextCancelled indicates that the operation was cancelled in response to a Golang context cancellation. + ContextCancelled CancellationCause = iota + + // CostLimitExceeded indicates that the operation was cancelled in response to the actual cost limit being + // exceeded. + CostLimitExceeded +) + +// TODO: Replace all usages of TrackState with EvalStateObserver + +// TrackState decorates each expression node with an observer which records the value +// associated with the given expression id. EvalState must be provided to the decorator. +// This decorator is not thread-safe, and the EvalState must be reset between Eval() +// calls. +// DEPRECATED: Please use EvalStateObserver instead. It composes gracefully with additional observers. +func TrackState(state EvalState) InterpretableDecorator { + return Observe(EvalStateObserver(state)) +} + +// EvalStateObserver provides an observer which records the value +// associated with the given expression id. EvalState must be provided to the observer. +// This decorator is not thread-safe, and the EvalState must be reset between Eval() +// calls. +func EvalStateObserver(state EvalState) EvalObserver { + return func(id int64, programStep any, val ref.Val) { + state.SetValue(id, val) + } +} + +// ExhaustiveEval replaces operations that short-circuit with versions that evaluate +// expressions and couples this behavior with the TrackState() decorator to provide +// insight into the evaluation state of the entire expression. EvalState must be +// provided to the decorator. This decorator is not thread-safe, and the EvalState +// must be reset between Eval() calls. +func ExhaustiveEval() InterpretableDecorator { + ex := decDisableShortcircuits() + return func(i Interpretable) (Interpretable, error) { + return ex(i) + } +} + +// InterruptableEval annotates comprehension loops with information that indicates they +// should check the `#interrupted` state within a custom Activation. +// +// The custom activation is currently managed higher up in the stack within the 'cel' package +// and should not require any custom support on behalf of callers. +func InterruptableEval() InterpretableDecorator { + return decInterruptFolds() +} + +// Optimize will pre-compute operations such as list and map construction and optimize +// call arguments to set membership tests. The set of optimizations will increase over time. +func Optimize() InterpretableDecorator { + return decOptimize() +} + +// RegexOptimization provides a way to replace an InterpretableCall for a regex function when the +// RegexIndex argument is a string constant. Typically, the Factory would compile the regex pattern at +// RegexIndex and report any errors (at program creation time) and then use the compiled regex for +// all regex function invocations. +type RegexOptimization struct { + // Function is the name of the function to optimize. + Function string + // OverloadID is the ID of the overload to optimize. + OverloadID string + // RegexIndex is the index position of the regex pattern argument. Only calls to the function where this argument is + // a string constant will be delegated to this optimizer. + RegexIndex int + // Factory constructs a replacement InterpretableCall node that optimizes the regex function call. Factory is + // provided with the unoptimized regex call and the string constant at the RegexIndex argument. + // The Factory may compile the regex for use across all invocations of the call, return any errors and + // return an interpreter.NewCall with the desired regex optimized function impl. + Factory func(call InterpretableCall, regexPattern string) (InterpretableCall, error) +} + +// CompileRegexConstants compiles regex pattern string constants at program creation time and reports any regex pattern +// compile errors. +func CompileRegexConstants(regexOptimizations ...*RegexOptimization) InterpretableDecorator { + return decRegexOptimizer(regexOptimizations...) +} + +type exprInterpreter struct { + dispatcher Dispatcher + container *containers.Container + provider types.Provider + adapter types.Adapter + attrFactory AttributeFactory +} + +// NewInterpreter builds an Interpreter from a Dispatcher and TypeProvider which will be used +// throughout the Eval of all Interpretable instances generated from it. +func NewInterpreter(dispatcher Dispatcher, + container *containers.Container, + provider types.Provider, + adapter types.Adapter, + attrFactory AttributeFactory) Interpreter { + return &exprInterpreter{ + dispatcher: dispatcher, + container: container, + provider: provider, + adapter: adapter, + attrFactory: attrFactory} +} + +// NewIntepretable implements the Interpreter interface method. +func (i *exprInterpreter) NewInterpretable( + checked *ast.AST, + decorators ...InterpretableDecorator) (Interpretable, error) { + p := newPlanner( + i.dispatcher, + i.provider, + i.adapter, + i.attrFactory, + i.container, + checked, + decorators...) + return p.Plan(checked.Expr()) +} diff --git a/hack/tools/vendor/github.com/google/cel-go/interpreter/optimizations.go b/hack/tools/vendor/github.com/google/cel-go/interpreter/optimizations.go new file mode 100644 index 0000000000..2fc87e693b --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/interpreter/optimizations.go @@ -0,0 +1,46 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "regexp" + + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// MatchesRegexOptimization optimizes the 'matches' standard library function by compiling the regex pattern and +// reporting any compilation errors at program creation time, and using the compiled regex pattern for all function +// call invocations. +var MatchesRegexOptimization = &RegexOptimization{ + Function: "matches", + RegexIndex: 1, + Factory: func(call InterpretableCall, regexPattern string) (InterpretableCall, error) { + compiledRegex, err := regexp.Compile(regexPattern) + if err != nil { + return nil, err + } + return NewCall(call.ID(), call.Function(), call.OverloadID(), call.Args(), func(values ...ref.Val) ref.Val { + if len(values) != 2 { + return types.NoSuchOverloadErr() + } + in, ok := values[0].Value().(string) + if !ok { + return types.NoSuchOverloadErr() + } + return types.Bool(compiledRegex.MatchString(in)) + }), nil + }, +} diff --git a/hack/tools/vendor/github.com/google/cel-go/interpreter/planner.go b/hack/tools/vendor/github.com/google/cel-go/interpreter/planner.go new file mode 100644 index 0000000000..3d918ce872 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/interpreter/planner.go @@ -0,0 +1,757 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "fmt" + "strings" + + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/functions" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/types" +) + +// interpretablePlanner creates an Interpretable evaluation plan from a proto Expr value. +type interpretablePlanner interface { + // Plan generates an Interpretable value (or error) from the input proto Expr. + Plan(expr ast.Expr) (Interpretable, error) +} + +// newPlanner creates an interpretablePlanner which references a Dispatcher, TypeProvider, +// TypeAdapter, Container, and CheckedExpr value. These pieces of data are used to resolve +// functions, types, and namespaced identifiers at plan time rather than at runtime since +// it only needs to be done once and may be semi-expensive to compute. +func newPlanner(disp Dispatcher, + provider types.Provider, + adapter types.Adapter, + attrFactory AttributeFactory, + cont *containers.Container, + exprAST *ast.AST, + decorators ...InterpretableDecorator) interpretablePlanner { + return &planner{ + disp: disp, + provider: provider, + adapter: adapter, + attrFactory: attrFactory, + container: cont, + refMap: exprAST.ReferenceMap(), + typeMap: exprAST.TypeMap(), + decorators: decorators, + } +} + +// planner is an implementation of the interpretablePlanner interface. +type planner struct { + disp Dispatcher + provider types.Provider + adapter types.Adapter + attrFactory AttributeFactory + container *containers.Container + refMap map[int64]*ast.ReferenceInfo + typeMap map[int64]*types.Type + decorators []InterpretableDecorator +} + +// Plan implements the interpretablePlanner interface. This implementation of the Plan method also +// applies decorators to each Interpretable generated as part of the overall plan. Decorators are +// useful for layering functionality into the evaluation that is not natively understood by CEL, +// such as state-tracking, expression re-write, and possibly efficient thread-safe memoization of +// repeated expressions. +func (p *planner) Plan(expr ast.Expr) (Interpretable, error) { + switch expr.Kind() { + case ast.CallKind: + return p.decorate(p.planCall(expr)) + case ast.IdentKind: + return p.decorate(p.planIdent(expr)) + case ast.LiteralKind: + return p.decorate(p.planConst(expr)) + case ast.SelectKind: + return p.decorate(p.planSelect(expr)) + case ast.ListKind: + return p.decorate(p.planCreateList(expr)) + case ast.MapKind: + return p.decorate(p.planCreateMap(expr)) + case ast.StructKind: + return p.decorate(p.planCreateStruct(expr)) + case ast.ComprehensionKind: + return p.decorate(p.planComprehension(expr)) + } + return nil, fmt.Errorf("unsupported expr: %v", expr) +} + +// decorate applies the InterpretableDecorator functions to the given Interpretable. +// Both the Interpretable and error generated by a Plan step are accepted as arguments +// for convenience. +func (p *planner) decorate(i Interpretable, err error) (Interpretable, error) { + if err != nil { + return nil, err + } + for _, dec := range p.decorators { + i, err = dec(i) + if err != nil { + return nil, err + } + } + return i, nil +} + +// planIdent creates an Interpretable that resolves an identifier from an Activation. +func (p *planner) planIdent(expr ast.Expr) (Interpretable, error) { + // Establish whether the identifier is in the reference map. + if identRef, found := p.refMap[expr.ID()]; found { + return p.planCheckedIdent(expr.ID(), identRef) + } + // Create the possible attribute list for the unresolved reference. + ident := expr.AsIdent() + return &evalAttr{ + adapter: p.adapter, + attr: p.attrFactory.MaybeAttribute(expr.ID(), ident), + }, nil +} + +func (p *planner) planCheckedIdent(id int64, identRef *ast.ReferenceInfo) (Interpretable, error) { + // Plan a constant reference if this is the case for this simple identifier. + if identRef.Value != nil { + return NewConstValue(id, identRef.Value), nil + } + + // Check to see whether the type map indicates this is a type name. All types should be + // registered with the provider. + cType := p.typeMap[id] + if cType.Kind() == types.TypeKind { + cVal, found := p.provider.FindIdent(identRef.Name) + if !found { + return nil, fmt.Errorf("reference to undefined type: %s", identRef.Name) + } + return NewConstValue(id, cVal), nil + } + + // Otherwise, return the attribute for the resolved identifier name. + return &evalAttr{ + adapter: p.adapter, + attr: p.attrFactory.AbsoluteAttribute(id, identRef.Name), + }, nil +} + +// planSelect creates an Interpretable with either: +// +// a) selects a field from a map or proto. +// b) creates a field presence test for a select within a has() macro. +// c) resolves the select expression to a namespaced identifier. +func (p *planner) planSelect(expr ast.Expr) (Interpretable, error) { + // If the Select id appears in the reference map from the CheckedExpr proto then it is either + // a namespaced identifier or enum value. + if identRef, found := p.refMap[expr.ID()]; found { + return p.planCheckedIdent(expr.ID(), identRef) + } + + sel := expr.AsSelect() + // Plan the operand evaluation. + op, err := p.Plan(sel.Operand()) + if err != nil { + return nil, err + } + opType := p.typeMap[sel.Operand().ID()] + + // If the Select was marked TestOnly, this is a presence test. + // + // Note: presence tests are defined for structured (e.g. proto) and dynamic values (map, json) + // as follows: + // - True if the object field has a non-default value, e.g. obj.str != "" + // - True if the dynamic value has the field defined, e.g. key in map + // + // However, presence tests are not defined for qualified identifier names with primitive types. + // If a string named 'a.b.c' is declared in the environment and referenced within `has(a.b.c)`, + // it is not clear whether has should error or follow the convention defined for structured + // values. + + // Establish the attribute reference. + attr, isAttr := op.(InterpretableAttribute) + if !isAttr { + attr, err = p.relativeAttr(op.ID(), op, false) + if err != nil { + return nil, err + } + } + + // Build a qualifier for the attribute. + qual, err := p.attrFactory.NewQualifier(opType, expr.ID(), sel.FieldName(), false) + if err != nil { + return nil, err + } + // Modify the attribute to be test-only. + if sel.IsTestOnly() { + attr = &evalTestOnly{ + id: expr.ID(), + InterpretableAttribute: attr, + } + } + // Append the qualifier on the attribute. + _, err = attr.AddQualifier(qual) + return attr, err +} + +// planCall creates a callable Interpretable while specializing for common functions and invocation +// patterns. Specifically, conditional operators &&, ||, ?:, and (in)equality functions result in +// optimized Interpretable values. +func (p *planner) planCall(expr ast.Expr) (Interpretable, error) { + call := expr.AsCall() + target, fnName, oName := p.resolveFunction(expr) + argCount := len(call.Args()) + var offset int + if target != nil { + argCount++ + offset++ + } + + args := make([]Interpretable, argCount) + if target != nil { + arg, err := p.Plan(target) + if err != nil { + return nil, err + } + args[0] = arg + } + for i, argExpr := range call.Args() { + arg, err := p.Plan(argExpr) + if err != nil { + return nil, err + } + args[i+offset] = arg + } + + // Generate specialized Interpretable operators by function name if possible. + switch fnName { + case operators.LogicalAnd: + return p.planCallLogicalAnd(expr, args) + case operators.LogicalOr: + return p.planCallLogicalOr(expr, args) + case operators.Conditional: + return p.planCallConditional(expr, args) + case operators.Equals: + return p.planCallEqual(expr, args) + case operators.NotEquals: + return p.planCallNotEqual(expr, args) + case operators.Index: + return p.planCallIndex(expr, args, false) + case operators.OptSelect, operators.OptIndex: + return p.planCallIndex(expr, args, true) + } + + // Otherwise, generate Interpretable calls specialized by argument count. + // Try to find the specific function by overload id. + var fnDef *functions.Overload + if oName != "" { + fnDef, _ = p.disp.FindOverload(oName) + } + // If the overload id couldn't resolve the function, try the simple function name. + if fnDef == nil { + fnDef, _ = p.disp.FindOverload(fnName) + } + switch argCount { + case 0: + return p.planCallZero(expr, fnName, oName, fnDef) + case 1: + // If the FunctionOp has been used, then use it as it may exist for the purposes + // of dynamic dispatch within a singleton function implementation. + if fnDef != nil && fnDef.Unary == nil && fnDef.Function != nil { + return p.planCallVarArgs(expr, fnName, oName, fnDef, args) + } + return p.planCallUnary(expr, fnName, oName, fnDef, args) + case 2: + // If the FunctionOp has been used, then use it as it may exist for the purposes + // of dynamic dispatch within a singleton function implementation. + if fnDef != nil && fnDef.Binary == nil && fnDef.Function != nil { + return p.planCallVarArgs(expr, fnName, oName, fnDef, args) + } + return p.planCallBinary(expr, fnName, oName, fnDef, args) + default: + return p.planCallVarArgs(expr, fnName, oName, fnDef, args) + } +} + +// planCallZero generates a zero-arity callable Interpretable. +func (p *planner) planCallZero(expr ast.Expr, + function string, + overload string, + impl *functions.Overload) (Interpretable, error) { + if impl == nil || impl.Function == nil { + return nil, fmt.Errorf("no such overload: %s()", function) + } + return &evalZeroArity{ + id: expr.ID(), + function: function, + overload: overload, + impl: impl.Function, + }, nil +} + +// planCallUnary generates a unary callable Interpretable. +func (p *planner) planCallUnary(expr ast.Expr, + function string, + overload string, + impl *functions.Overload, + args []Interpretable) (Interpretable, error) { + var fn functions.UnaryOp + var trait int + var nonStrict bool + if impl != nil { + if impl.Unary == nil { + return nil, fmt.Errorf("no such overload: %s(arg)", function) + } + fn = impl.Unary + trait = impl.OperandTrait + nonStrict = impl.NonStrict + } + return &evalUnary{ + id: expr.ID(), + function: function, + overload: overload, + arg: args[0], + trait: trait, + impl: fn, + nonStrict: nonStrict, + }, nil +} + +// planCallBinary generates a binary callable Interpretable. +func (p *planner) planCallBinary(expr ast.Expr, + function string, + overload string, + impl *functions.Overload, + args []Interpretable) (Interpretable, error) { + var fn functions.BinaryOp + var trait int + var nonStrict bool + if impl != nil { + if impl.Binary == nil { + return nil, fmt.Errorf("no such overload: %s(lhs, rhs)", function) + } + fn = impl.Binary + trait = impl.OperandTrait + nonStrict = impl.NonStrict + } + return &evalBinary{ + id: expr.ID(), + function: function, + overload: overload, + lhs: args[0], + rhs: args[1], + trait: trait, + impl: fn, + nonStrict: nonStrict, + }, nil +} + +// planCallVarArgs generates a variable argument callable Interpretable. +func (p *planner) planCallVarArgs(expr ast.Expr, + function string, + overload string, + impl *functions.Overload, + args []Interpretable) (Interpretable, error) { + var fn functions.FunctionOp + var trait int + var nonStrict bool + if impl != nil { + if impl.Function == nil { + return nil, fmt.Errorf("no such overload: %s(...)", function) + } + fn = impl.Function + trait = impl.OperandTrait + nonStrict = impl.NonStrict + } + return &evalVarArgs{ + id: expr.ID(), + function: function, + overload: overload, + args: args, + trait: trait, + impl: fn, + nonStrict: nonStrict, + }, nil +} + +// planCallEqual generates an equals (==) Interpretable. +func (p *planner) planCallEqual(expr ast.Expr, args []Interpretable) (Interpretable, error) { + return &evalEq{ + id: expr.ID(), + lhs: args[0], + rhs: args[1], + }, nil +} + +// planCallNotEqual generates a not equals (!=) Interpretable. +func (p *planner) planCallNotEqual(expr ast.Expr, args []Interpretable) (Interpretable, error) { + return &evalNe{ + id: expr.ID(), + lhs: args[0], + rhs: args[1], + }, nil +} + +// planCallLogicalAnd generates a logical and (&&) Interpretable. +func (p *planner) planCallLogicalAnd(expr ast.Expr, args []Interpretable) (Interpretable, error) { + return &evalAnd{ + id: expr.ID(), + terms: args, + }, nil +} + +// planCallLogicalOr generates a logical or (||) Interpretable. +func (p *planner) planCallLogicalOr(expr ast.Expr, args []Interpretable) (Interpretable, error) { + return &evalOr{ + id: expr.ID(), + terms: args, + }, nil +} + +// planCallConditional generates a conditional / ternary (c ? t : f) Interpretable. +func (p *planner) planCallConditional(expr ast.Expr, args []Interpretable) (Interpretable, error) { + cond := args[0] + t := args[1] + var tAttr Attribute + truthyAttr, isTruthyAttr := t.(InterpretableAttribute) + if isTruthyAttr { + tAttr = truthyAttr.Attr() + } else { + tAttr = p.attrFactory.RelativeAttribute(t.ID(), t) + } + + f := args[2] + var fAttr Attribute + falsyAttr, isFalsyAttr := f.(InterpretableAttribute) + if isFalsyAttr { + fAttr = falsyAttr.Attr() + } else { + fAttr = p.attrFactory.RelativeAttribute(f.ID(), f) + } + + return &evalAttr{ + adapter: p.adapter, + attr: p.attrFactory.ConditionalAttribute(expr.ID(), cond, tAttr, fAttr), + }, nil +} + +// planCallIndex either extends an attribute with the argument to the index operation, or creates +// a relative attribute based on the return of a function call or operation. +func (p *planner) planCallIndex(expr ast.Expr, args []Interpretable, optional bool) (Interpretable, error) { + op := args[0] + ind := args[1] + opType := p.typeMap[op.ID()] + + // Establish the attribute reference. + var err error + attr, isAttr := op.(InterpretableAttribute) + if !isAttr { + attr, err = p.relativeAttr(op.ID(), op, false) + if err != nil { + return nil, err + } + } + + // Construct the qualifier type. + var qual Qualifier + switch ind := ind.(type) { + case InterpretableConst: + qual, err = p.attrFactory.NewQualifier(opType, expr.ID(), ind.Value(), optional) + case InterpretableAttribute: + qual, err = p.attrFactory.NewQualifier(opType, expr.ID(), ind, optional) + default: + qual, err = p.relativeAttr(expr.ID(), ind, optional) + } + if err != nil { + return nil, err + } + + // Add the qualifier to the attribute + _, err = attr.AddQualifier(qual) + return attr, err +} + +// planCreateList generates a list construction Interpretable. +func (p *planner) planCreateList(expr ast.Expr) (Interpretable, error) { + list := expr.AsList() + optionalIndices := list.OptionalIndices() + elements := list.Elements() + optionals := make([]bool, len(elements)) + for _, index := range optionalIndices { + if index < 0 || index >= int32(len(elements)) { + return nil, fmt.Errorf("optional index %d out of element bounds [0, %d]", index, len(elements)) + } + optionals[index] = true + } + elems := make([]Interpretable, len(elements)) + for i, elem := range elements { + elemVal, err := p.Plan(elem) + if err != nil { + return nil, err + } + elems[i] = elemVal + } + return &evalList{ + id: expr.ID(), + elems: elems, + optionals: optionals, + hasOptionals: len(optionals) != 0, + adapter: p.adapter, + }, nil +} + +// planCreateStruct generates a map or object construction Interpretable. +func (p *planner) planCreateMap(expr ast.Expr) (Interpretable, error) { + m := expr.AsMap() + entries := m.Entries() + optionals := make([]bool, len(entries)) + keys := make([]Interpretable, len(entries)) + vals := make([]Interpretable, len(entries)) + for i, e := range entries { + entry := e.AsMapEntry() + keyVal, err := p.Plan(entry.Key()) + if err != nil { + return nil, err + } + keys[i] = keyVal + + valVal, err := p.Plan(entry.Value()) + if err != nil { + return nil, err + } + vals[i] = valVal + optionals[i] = entry.IsOptional() + } + return &evalMap{ + id: expr.ID(), + keys: keys, + vals: vals, + optionals: optionals, + hasOptionals: len(optionals) != 0, + adapter: p.adapter, + }, nil +} + +// planCreateObj generates an object construction Interpretable. +func (p *planner) planCreateStruct(expr ast.Expr) (Interpretable, error) { + obj := expr.AsStruct() + typeName, defined := p.resolveTypeName(obj.TypeName()) + if !defined { + return nil, fmt.Errorf("unknown type: %s", obj.TypeName()) + } + objFields := obj.Fields() + optionals := make([]bool, len(objFields)) + fields := make([]string, len(objFields)) + vals := make([]Interpretable, len(objFields)) + for i, f := range objFields { + field := f.AsStructField() + fields[i] = field.Name() + val, err := p.Plan(field.Value()) + if err != nil { + return nil, err + } + vals[i] = val + optionals[i] = field.IsOptional() + } + return &evalObj{ + id: expr.ID(), + typeName: typeName, + fields: fields, + vals: vals, + optionals: optionals, + hasOptionals: len(optionals) != 0, + provider: p.provider, + }, nil +} + +// planComprehension generates an Interpretable fold operation. +func (p *planner) planComprehension(expr ast.Expr) (Interpretable, error) { + fold := expr.AsComprehension() + accu, err := p.Plan(fold.AccuInit()) + if err != nil { + return nil, err + } + iterRange, err := p.Plan(fold.IterRange()) + if err != nil { + return nil, err + } + cond, err := p.Plan(fold.LoopCondition()) + if err != nil { + return nil, err + } + step, err := p.Plan(fold.LoopStep()) + if err != nil { + return nil, err + } + result, err := p.Plan(fold.Result()) + if err != nil { + return nil, err + } + return &evalFold{ + id: expr.ID(), + accuVar: fold.AccuVar(), + accu: accu, + iterVar: fold.IterVar(), + iterVar2: fold.IterVar2(), + iterRange: iterRange, + cond: cond, + step: step, + result: result, + adapter: p.adapter, + }, nil +} + +// planConst generates a constant valued Interpretable. +func (p *planner) planConst(expr ast.Expr) (Interpretable, error) { + return NewConstValue(expr.ID(), expr.AsLiteral()), nil +} + +// resolveTypeName takes a qualified string constructed at parse time, applies the proto +// namespace resolution rules to it in a scan over possible matching types in the TypeProvider. +func (p *planner) resolveTypeName(typeName string) (string, bool) { + for _, qualifiedTypeName := range p.container.ResolveCandidateNames(typeName) { + if _, found := p.provider.FindStructType(qualifiedTypeName); found { + return qualifiedTypeName, true + } + } + return "", false +} + +// resolveFunction determines the call target, function name, and overload name from a given Expr +// value. +// +// The resolveFunction resolves ambiguities where a function may either be a receiver-style +// invocation or a qualified global function name. +// - The target expression may only consist of ident and select expressions. +// - The function is declared in the environment using its fully-qualified name. +// - The fully-qualified function name matches the string serialized target value. +func (p *planner) resolveFunction(expr ast.Expr) (ast.Expr, string, string) { + // Note: similar logic exists within the `checker/checker.go`. If making changes here + // please consider the impact on checker.go and consolidate implementations or mirror code + // as appropriate. + call := expr.AsCall() + var target ast.Expr = nil + if call.IsMemberFunction() { + target = call.Target() + } + fnName := call.FunctionName() + + // Checked expressions always have a reference map entry, and _should_ have the fully qualified + // function name as the fnName value. + oRef, hasOverload := p.refMap[expr.ID()] + if hasOverload { + if len(oRef.OverloadIDs) == 1 { + return target, fnName, oRef.OverloadIDs[0] + } + // Note, this namespaced function name will not appear as a fully qualified name in ASTs + // built and stored before cel-go v0.5.0; however, this functionality did not work at all + // before the v0.5.0 release. + return target, fnName, "" + } + + // Parse-only expressions need to handle the same logic as is normally performed at check time, + // but with potentially much less information. The only reliable source of information about + // which functions are configured is the dispatcher. + if target == nil { + // If the user has a parse-only expression, then it should have been configured as such in + // the interpreter dispatcher as it may have been omitted from the checker environment. + for _, qualifiedName := range p.container.ResolveCandidateNames(fnName) { + _, found := p.disp.FindOverload(qualifiedName) + if found { + return nil, qualifiedName, "" + } + } + // It's possible that the overload was not found, but this situation is accounted for in + // the planCall phase; however, the leading dot used for denoting fully-qualified + // namespaced identifiers must be stripped, as all declarations already use fully-qualified + // names. This stripping behavior is handled automatically by the ResolveCandidateNames + // call. + return target, stripLeadingDot(fnName), "" + } + + // Handle the situation where the function target actually indicates a qualified function name. + qualifiedPrefix, maybeQualified := p.toQualifiedName(target) + if maybeQualified { + maybeQualifiedName := qualifiedPrefix + "." + fnName + for _, qualifiedName := range p.container.ResolveCandidateNames(maybeQualifiedName) { + _, found := p.disp.FindOverload(qualifiedName) + if found { + // Clear the target to ensure the proper arity is used for finding the + // implementation. + return nil, qualifiedName, "" + } + } + } + // In the default case, the function is exactly as it was advertised: a receiver call on with + // an expression-based target with the given simple function name. + return target, fnName, "" +} + +// relativeAttr indicates that the attribute in this case acts as a qualifier and as such needs to +// be observed to ensure that it's evaluation value is properly recorded for state tracking. +func (p *planner) relativeAttr(id int64, eval Interpretable, opt bool) (InterpretableAttribute, error) { + eAttr, ok := eval.(InterpretableAttribute) + if !ok { + eAttr = &evalAttr{ + adapter: p.adapter, + attr: p.attrFactory.RelativeAttribute(id, eval), + optional: opt, + } + } + // This looks like it should either decorate the new evalAttr node, or early return the InterpretableAttribute + decAttr, err := p.decorate(eAttr, nil) + if err != nil { + return nil, err + } + eAttr, ok = decAttr.(InterpretableAttribute) + if !ok { + return nil, fmt.Errorf("invalid attribute decoration: %v(%T)", decAttr, decAttr) + } + return eAttr, nil +} + +// toQualifiedName converts an expression AST into a qualified name if possible, with a boolean +// 'found' value that indicates if the conversion is successful. +func (p *planner) toQualifiedName(operand ast.Expr) (string, bool) { + // If the checker identified the expression as an attribute by the type-checker, then it can't + // possibly be part of qualified name in a namespace. + _, isAttr := p.refMap[operand.ID()] + if isAttr { + return "", false + } + // Since functions cannot be both namespaced and receiver functions, if the operand is not an + // qualified variable name, return the (possibly) qualified name given the expressions. + switch operand.Kind() { + case ast.IdentKind: + id := operand.AsIdent() + return id, true + case ast.SelectKind: + sel := operand.AsSelect() + // Test only expressions are not valid as qualified names. + if sel.IsTestOnly() { + return "", false + } + if qual, found := p.toQualifiedName(sel.Operand()); found { + return qual + "." + sel.FieldName(), true + } + } + return "", false +} + +func stripLeadingDot(name string) string { + if strings.HasPrefix(name, ".") { + return name[1:] + } + return name +} diff --git a/hack/tools/vendor/github.com/google/cel-go/interpreter/prune.go b/hack/tools/vendor/github.com/google/cel-go/interpreter/prune.go new file mode 100644 index 0000000000..410d80dc43 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/interpreter/prune.go @@ -0,0 +1,543 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +type astPruner struct { + ast.ExprFactory + expr ast.Expr + macroCalls map[int64]ast.Expr + state EvalState + nextExprID int64 +} + +// TODO Consider having a separate walk of the AST that finds common +// subexpressions. This can be called before or after constant folding to find +// common subexpressions. + +// PruneAst prunes the given AST based on the given EvalState and generates a new AST. +// Given AST is copied on write and a new AST is returned. +// Couple of typical use cases this interface would be: +// +// A) +// 1) Evaluate expr with some unknowns, +// 2) If result is unknown: +// +// a) PruneAst +// b) Goto 1 +// +// Functional call results which are known would be effectively cached across +// iterations. +// +// B) +// 1) Compile the expression (maybe via a service and maybe after checking a +// +// compiled expression does not exists in local cache) +// +// 2) Prepare the environment and the interpreter. Activation might be empty. +// 3) Eval the expression. This might return unknown or error or a concrete +// +// value. +// +// 4) PruneAst +// 4) Maybe cache the expression +// This is effectively constant folding the expression. How the environment is +// prepared in step 2 is flexible. For example, If the caller caches the +// compiled and constant folded expressions, but is not willing to constant +// fold(and thus cache results of) some external calls, then they can prepare +// the overloads accordingly. +func PruneAst(expr ast.Expr, macroCalls map[int64]ast.Expr, state EvalState) *ast.AST { + pruneState := NewEvalState() + for _, id := range state.IDs() { + v, _ := state.Value(id) + pruneState.SetValue(id, v) + } + pruner := &astPruner{ + ExprFactory: ast.NewExprFactory(), + expr: expr, + macroCalls: macroCalls, + state: pruneState, + nextExprID: getMaxID(expr)} + newExpr, _ := pruner.maybePrune(expr) + newInfo := ast.NewSourceInfo(nil) + for id, call := range pruner.macroCalls { + newInfo.SetMacroCall(id, call) + } + return ast.NewAST(newExpr, newInfo) +} + +func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (ast.Expr, bool) { + switch v := val.(type) { + case types.Bool, types.Bytes, types.Double, types.Int, types.Null, types.String, types.Uint: + p.state.SetValue(id, val) + return p.NewLiteral(id, val), true + case types.Duration: + p.state.SetValue(id, val) + durationString := v.ConvertToType(types.StringType).(types.String) + return p.NewCall(id, overloads.TypeConvertDuration, p.NewLiteral(p.nextID(), durationString)), true + case types.Timestamp: + timestampString := v.ConvertToType(types.StringType).(types.String) + return p.NewCall(id, overloads.TypeConvertTimestamp, p.NewLiteral(p.nextID(), timestampString)), true + } + + // Attempt to build a list literal. + if list, isList := val.(traits.Lister); isList { + sz := list.Size().(types.Int) + elemExprs := make([]ast.Expr, sz) + for i := types.Int(0); i < sz; i++ { + elem := list.Get(i) + if types.IsUnknownOrError(elem) { + return nil, false + } + elemExpr, ok := p.maybeCreateLiteral(p.nextID(), elem) + if !ok { + return nil, false + } + elemExprs[i] = elemExpr + } + p.state.SetValue(id, val) + return p.NewList(id, elemExprs, []int32{}), true + } + + // Create a map literal if possible. + if mp, isMap := val.(traits.Mapper); isMap { + it := mp.Iterator() + entries := make([]ast.EntryExpr, mp.Size().(types.Int)) + i := 0 + for it.HasNext() != types.False { + key := it.Next() + val := mp.Get(key) + if types.IsUnknownOrError(key) || types.IsUnknownOrError(val) { + return nil, false + } + keyExpr, ok := p.maybeCreateLiteral(p.nextID(), key) + if !ok { + return nil, false + } + valExpr, ok := p.maybeCreateLiteral(p.nextID(), val) + if !ok { + return nil, false + } + entry := p.NewMapEntry(p.nextID(), keyExpr, valExpr, false) + entries[i] = entry + i++ + } + p.state.SetValue(id, val) + return p.NewMap(id, entries), true + } + + // TODO(issues/377) To construct message literals, the type provider will need to support + // the enumeration the fields for a given message. + return nil, false +} + +func (p *astPruner) maybePruneOptional(elem ast.Expr) (ast.Expr, bool) { + elemVal, found := p.value(elem.ID()) + if found && elemVal.Type() == types.OptionalType { + opt := elemVal.(*types.Optional) + if !opt.HasValue() { + return nil, true + } + if newElem, pruned := p.maybeCreateLiteral(elem.ID(), opt.GetValue()); pruned { + return newElem, true + } + } + return elem, false +} + +func (p *astPruner) maybePruneIn(node ast.Expr) (ast.Expr, bool) { + // elem in list + call := node.AsCall() + val, exists := p.maybeValue(call.Args()[1].ID()) + if !exists { + return nil, false + } + if sz, ok := val.(traits.Sizer); ok && sz.Size() == types.IntZero { + return p.maybeCreateLiteral(node.ID(), types.False) + } + return nil, false +} + +func (p *astPruner) maybePruneLogicalNot(node ast.Expr) (ast.Expr, bool) { + call := node.AsCall() + arg := call.Args()[0] + val, exists := p.maybeValue(arg.ID()) + if !exists { + return nil, false + } + if b, ok := val.(types.Bool); ok { + return p.maybeCreateLiteral(node.ID(), !b) + } + return nil, false +} + +func (p *astPruner) maybePruneOr(node ast.Expr) (ast.Expr, bool) { + call := node.AsCall() + // We know result is unknown, so we have at least one unknown arg + // and if one side is a known value, we know we can ignore it. + if v, exists := p.maybeValue(call.Args()[0].ID()); exists { + if v == types.True { + return p.maybeCreateLiteral(node.ID(), types.True) + } + return call.Args()[1], true + } + if v, exists := p.maybeValue(call.Args()[1].ID()); exists { + if v == types.True { + return p.maybeCreateLiteral(node.ID(), types.True) + } + return call.Args()[0], true + } + return nil, false +} + +func (p *astPruner) maybePruneAnd(node ast.Expr) (ast.Expr, bool) { + call := node.AsCall() + // We know result is unknown, so we have at least one unknown arg + // and if one side is a known value, we know we can ignore it. + if v, exists := p.maybeValue(call.Args()[0].ID()); exists { + if v == types.False { + return p.maybeCreateLiteral(node.ID(), types.False) + } + return call.Args()[1], true + } + if v, exists := p.maybeValue(call.Args()[1].ID()); exists { + if v == types.False { + return p.maybeCreateLiteral(node.ID(), types.False) + } + return call.Args()[0], true + } + return nil, false +} + +func (p *astPruner) maybePruneConditional(node ast.Expr) (ast.Expr, bool) { + call := node.AsCall() + cond, exists := p.maybeValue(call.Args()[0].ID()) + if !exists { + return nil, false + } + if cond.Value().(bool) { + return call.Args()[1], true + } + return call.Args()[2], true +} + +func (p *astPruner) maybePruneFunction(node ast.Expr) (ast.Expr, bool) { + if _, exists := p.value(node.ID()); !exists { + return nil, false + } + call := node.AsCall() + if call.FunctionName() == operators.LogicalOr { + return p.maybePruneOr(node) + } + if call.FunctionName() == operators.LogicalAnd { + return p.maybePruneAnd(node) + } + if call.FunctionName() == operators.Conditional { + return p.maybePruneConditional(node) + } + if call.FunctionName() == operators.In { + return p.maybePruneIn(node) + } + if call.FunctionName() == operators.LogicalNot { + return p.maybePruneLogicalNot(node) + } + return nil, false +} + +func (p *astPruner) maybePrune(node ast.Expr) (ast.Expr, bool) { + return p.prune(node) +} + +func (p *astPruner) prune(node ast.Expr) (ast.Expr, bool) { + if node == nil { + return node, false + } + val, valueExists := p.maybeValue(node.ID()) + if valueExists { + if newNode, ok := p.maybeCreateLiteral(node.ID(), val); ok { + delete(p.macroCalls, node.ID()) + return newNode, true + } + } + if macro, found := p.macroCalls[node.ID()]; found { + // Ensure that intermediate values for the comprehension are cleared during pruning + if node.Kind() == ast.ComprehensionKind { + compre := node.AsComprehension() + visit(macro, clearIterVarVisitor(compre.IterVar(), p.state)) + } + // prune the expression in terms of the macro call instead of the expanded form. + if newMacro, pruned := p.prune(macro); pruned { + p.macroCalls[node.ID()] = newMacro + } + } + + // We have either an unknown/error value, or something we don't want to + // transform, or expression was not evaluated. If possible, drill down + // more. + switch node.Kind() { + case ast.SelectKind: + sel := node.AsSelect() + if operand, isPruned := p.maybePrune(sel.Operand()); isPruned { + if sel.IsTestOnly() { + return p.NewPresenceTest(node.ID(), operand, sel.FieldName()), true + } + return p.NewSelect(node.ID(), operand, sel.FieldName()), true + } + case ast.CallKind: + argsPruned := false + call := node.AsCall() + args := call.Args() + newArgs := make([]ast.Expr, len(args)) + for i, a := range args { + newArgs[i] = a + if arg, isPruned := p.maybePrune(a); isPruned { + argsPruned = true + newArgs[i] = arg + } + } + if !call.IsMemberFunction() { + newCall := p.NewCall(node.ID(), call.FunctionName(), newArgs...) + if prunedCall, isPruned := p.maybePruneFunction(newCall); isPruned { + return prunedCall, true + } + return newCall, argsPruned + } + newTarget := call.Target() + targetPruned := false + if prunedTarget, isPruned := p.maybePrune(call.Target()); isPruned { + targetPruned = true + newTarget = prunedTarget + } + newCall := p.NewMemberCall(node.ID(), call.FunctionName(), newTarget, newArgs...) + if prunedCall, isPruned := p.maybePruneFunction(newCall); isPruned { + return prunedCall, true + } + return newCall, targetPruned || argsPruned + case ast.ListKind: + l := node.AsList() + elems := l.Elements() + optIndices := l.OptionalIndices() + optIndexMap := map[int32]bool{} + for _, i := range optIndices { + optIndexMap[i] = true + } + newOptIndexMap := make(map[int32]bool, len(optIndexMap)) + newElems := make([]ast.Expr, 0, len(elems)) + var listPruned bool + prunedIdx := 0 + for i, elem := range elems { + _, isOpt := optIndexMap[int32(i)] + if isOpt { + newElem, pruned := p.maybePruneOptional(elem) + if pruned { + listPruned = true + if newElem != nil { + newElems = append(newElems, newElem) + prunedIdx++ + } + continue + } + newOptIndexMap[int32(prunedIdx)] = true + } + if newElem, prunedElem := p.maybePrune(elem); prunedElem { + newElems = append(newElems, newElem) + listPruned = true + } else { + newElems = append(newElems, elem) + } + prunedIdx++ + } + optIndices = make([]int32, len(newOptIndexMap)) + idx := 0 + for i := range newOptIndexMap { + optIndices[idx] = i + idx++ + } + if listPruned { + return p.NewList(node.ID(), newElems, optIndices), true + } + case ast.MapKind: + var mapPruned bool + m := node.AsMap() + entries := m.Entries() + newEntries := make([]ast.EntryExpr, len(entries)) + for i, entry := range entries { + newEntries[i] = entry + e := entry.AsMapEntry() + newKey, keyPruned := p.maybePrune(e.Key()) + newValue, valuePruned := p.maybePrune(e.Value()) + if !keyPruned && !valuePruned { + continue + } + mapPruned = true + newEntry := p.NewMapEntry(entry.ID(), newKey, newValue, e.IsOptional()) + newEntries[i] = newEntry + } + if mapPruned { + return p.NewMap(node.ID(), newEntries), true + } + case ast.StructKind: + var structPruned bool + obj := node.AsStruct() + fields := obj.Fields() + newFields := make([]ast.EntryExpr, len(fields)) + for i, field := range fields { + newFields[i] = field + f := field.AsStructField() + newValue, prunedValue := p.maybePrune(f.Value()) + if !prunedValue { + continue + } + structPruned = true + newEntry := p.NewStructField(field.ID(), f.Name(), newValue, f.IsOptional()) + newFields[i] = newEntry + } + if structPruned { + return p.NewStruct(node.ID(), obj.TypeName(), newFields), true + } + case ast.ComprehensionKind: + compre := node.AsComprehension() + // Only the range of the comprehension is pruned since the state tracking only records + // the last iteration of the comprehension and not each step in the evaluation which + // means that the any residuals computed in between might be inaccurate. + if newRange, pruned := p.maybePrune(compre.IterRange()); pruned { + return p.NewComprehension( + node.ID(), + newRange, + compre.IterVar(), + compre.AccuVar(), + compre.AccuInit(), + compre.LoopCondition(), + compre.LoopStep(), + compre.Result(), + ), true + } + } + return node, false +} + +func (p *astPruner) value(id int64) (ref.Val, bool) { + val, found := p.state.Value(id) + return val, (found && val != nil) +} + +func (p *astPruner) maybeValue(id int64) (ref.Val, bool) { + val, found := p.value(id) + if !found || types.IsUnknownOrError(val) { + return nil, false + } + return val, true +} + +func (p *astPruner) nextID() int64 { + next := p.nextExprID + p.nextExprID++ + return next +} + +type astVisitor struct { + // visitEntry is called on every expr node, including those within a map/struct entry. + visitExpr func(expr ast.Expr) + // visitEntry is called before entering the key, value of a map/struct entry. + visitEntry func(entry ast.EntryExpr) +} + +func getMaxID(expr ast.Expr) int64 { + maxID := int64(1) + visit(expr, maxIDVisitor(&maxID)) + return maxID +} + +func clearIterVarVisitor(varName string, state EvalState) astVisitor { + return astVisitor{ + visitExpr: func(e ast.Expr) { + if e.Kind() == ast.IdentKind && e.AsIdent() == varName { + state.SetValue(e.ID(), nil) + } + }, + } +} + +func maxIDVisitor(maxID *int64) astVisitor { + return astVisitor{ + visitExpr: func(e ast.Expr) { + if e.ID() >= *maxID { + *maxID = e.ID() + 1 + } + }, + visitEntry: func(e ast.EntryExpr) { + if e.ID() >= *maxID { + *maxID = e.ID() + 1 + } + }, + } +} + +func visit(expr ast.Expr, visitor astVisitor) { + exprs := []ast.Expr{expr} + for len(exprs) != 0 { + e := exprs[0] + if visitor.visitExpr != nil { + visitor.visitExpr(e) + } + exprs = exprs[1:] + switch e.Kind() { + case ast.SelectKind: + exprs = append(exprs, e.AsSelect().Operand()) + case ast.CallKind: + call := e.AsCall() + if call.Target() != nil { + exprs = append(exprs, call.Target()) + } + exprs = append(exprs, call.Args()...) + case ast.ComprehensionKind: + compre := e.AsComprehension() + exprs = append(exprs, + compre.IterRange(), + compre.AccuInit(), + compre.LoopCondition(), + compre.LoopStep(), + compre.Result()) + case ast.ListKind: + list := e.AsList() + exprs = append(exprs, list.Elements()...) + case ast.MapKind: + for _, entry := range e.AsMap().Entries() { + e := entry.AsMapEntry() + if visitor.visitEntry != nil { + visitor.visitEntry(entry) + } + exprs = append(exprs, e.Key()) + exprs = append(exprs, e.Value()) + } + case ast.StructKind: + for _, entry := range e.AsStruct().Fields() { + f := entry.AsStructField() + if visitor.visitEntry != nil { + visitor.visitEntry(entry) + } + exprs = append(exprs, f.Value()) + } + } + } +} diff --git a/hack/tools/vendor/github.com/google/cel-go/interpreter/runtimecost.go b/hack/tools/vendor/github.com/google/cel-go/interpreter/runtimecost.go new file mode 100644 index 0000000000..b9b307c155 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/interpreter/runtimecost.go @@ -0,0 +1,316 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "math" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +// WARNING: Any changes to cost calculations in this file require a corresponding change in checker/cost.go + +// ActualCostEstimator provides function call cost estimations at runtime +// CallCost returns an estimated cost for the function overload invocation with the given args, or nil if it has no +// estimate to provide. CEL attempts to provide reasonable estimates for its standard function library, so CallCost +// should typically not need to provide an estimate for CELs standard function. +type ActualCostEstimator interface { + CallCost(function, overloadID string, args []ref.Val, result ref.Val) *uint64 +} + +// CostObserver provides an observer that tracks runtime cost. +func CostObserver(tracker *CostTracker) EvalObserver { + observer := func(id int64, programStep any, val ref.Val) { + switch t := programStep.(type) { + case ConstantQualifier: + // TODO: Push identifiers on to the stack before observing constant qualifiers that apply to them + // and enable the below pop. Once enabled this can case can be collapsed into the Qualifier case. + tracker.cost++ + case InterpretableConst: + // zero cost + case InterpretableAttribute: + switch a := t.Attr().(type) { + case *conditionalAttribute: + // Ternary has no direct cost. All cost is from the conditional and the true/false branch expressions. + tracker.stack.drop(a.falsy.ID(), a.truthy.ID(), a.expr.ID()) + default: + tracker.stack.drop(t.Attr().ID()) + tracker.cost += common.SelectAndIdentCost + } + if !tracker.presenceTestHasCost { + if _, isTestOnly := programStep.(*evalTestOnly); isTestOnly { + tracker.cost -= common.SelectAndIdentCost + } + } + case *evalExhaustiveConditional: + // Ternary has no direct cost. All cost is from the conditional and the true/false branch expressions. + tracker.stack.drop(t.attr.falsy.ID(), t.attr.truthy.ID(), t.attr.expr.ID()) + + // While the field names are identical, the boolean operation eval structs do not share an interface and so + // must be handled individually. + case *evalOr: + for _, term := range t.terms { + tracker.stack.drop(term.ID()) + } + case *evalAnd: + for _, term := range t.terms { + tracker.stack.drop(term.ID()) + } + case *evalExhaustiveOr: + for _, term := range t.terms { + tracker.stack.drop(term.ID()) + } + case *evalExhaustiveAnd: + for _, term := range t.terms { + tracker.stack.drop(term.ID()) + } + case *evalFold: + tracker.stack.drop(t.iterRange.ID()) + case Qualifier: + tracker.cost++ + case InterpretableCall: + if argVals, ok := tracker.stack.dropArgs(t.Args()); ok { + tracker.cost += tracker.costCall(t, argVals, val) + } + case InterpretableConstructor: + tracker.stack.dropArgs(t.InitVals()) + switch t.Type() { + case types.ListType: + tracker.cost += common.ListCreateBaseCost + case types.MapType: + tracker.cost += common.MapCreateBaseCost + default: + tracker.cost += common.StructCreateBaseCost + } + } + tracker.stack.push(val, id) + + if tracker.Limit != nil && tracker.cost > *tracker.Limit { + panic(EvalCancelledError{Cause: CostLimitExceeded, Message: "operation cancelled: actual cost limit exceeded"}) + } + } + return observer +} + +// CostTrackerOption configures the behavior of CostTracker objects. +type CostTrackerOption func(*CostTracker) error + +// CostTrackerLimit sets the runtime limit on the evaluation cost during execution and will terminate the expression +// evaluation if the limit is exceeded. +func CostTrackerLimit(limit uint64) CostTrackerOption { + return func(tracker *CostTracker) error { + tracker.Limit = &limit + return nil + } +} + +// PresenceTestHasCost determines whether presence testing has a cost of one or zero. +// Defaults to presence test has a cost of one. +func PresenceTestHasCost(hasCost bool) CostTrackerOption { + return func(tracker *CostTracker) error { + tracker.presenceTestHasCost = hasCost + return nil + } +} + +// NewCostTracker creates a new CostTracker with a given estimator and a set of functional CostTrackerOption values. +func NewCostTracker(estimator ActualCostEstimator, opts ...CostTrackerOption) (*CostTracker, error) { + tracker := &CostTracker{ + Estimator: estimator, + overloadTrackers: map[string]FunctionTracker{}, + presenceTestHasCost: true, + } + for _, opt := range opts { + err := opt(tracker) + if err != nil { + return nil, err + } + } + return tracker, nil +} + +// OverloadCostTracker binds an overload ID to a runtime FunctionTracker implementation. +// +// OverloadCostTracker instances augment or override ActualCostEstimator decisions, allowing for versioned and/or +// optional cost tracking changes. +func OverloadCostTracker(overloadID string, fnTracker FunctionTracker) CostTrackerOption { + return func(tracker *CostTracker) error { + tracker.overloadTrackers[overloadID] = fnTracker + return nil + } +} + +// FunctionTracker computes the actual cost of evaluating the functions with the given arguments and result. +type FunctionTracker func(args []ref.Val, result ref.Val) *uint64 + +// CostTracker represents the information needed for tracking runtime cost. +type CostTracker struct { + Estimator ActualCostEstimator + overloadTrackers map[string]FunctionTracker + Limit *uint64 + presenceTestHasCost bool + + cost uint64 + stack refValStack +} + +// ActualCost returns the runtime cost +func (c *CostTracker) ActualCost() uint64 { + return c.cost +} + +func (c *CostTracker) costCall(call InterpretableCall, args []ref.Val, result ref.Val) uint64 { + var cost uint64 + if len(c.overloadTrackers) != 0 { + if tracker, found := c.overloadTrackers[call.OverloadID()]; found { + callCost := tracker(args, result) + if callCost != nil { + cost += *callCost + return cost + } + } + } + if c.Estimator != nil { + callCost := c.Estimator.CallCost(call.Function(), call.OverloadID(), args, result) + if callCost != nil { + cost += *callCost + return cost + } + } + // if user didn't specify, the default way of calculating runtime cost would be used. + // if user has their own implementation of ActualCostEstimator, make sure to cover the mapping between overloadId and cost calculation + switch call.OverloadID() { + // O(n) functions + case overloads.StartsWithString, overloads.EndsWithString, overloads.StringToBytes, overloads.BytesToString, overloads.ExtQuoteString, overloads.ExtFormatString: + cost += uint64(math.Ceil(float64(c.actualSize(args[0])) * common.StringTraversalCostFactor)) + case overloads.InList: + // If a list is composed entirely of constant values this is O(1), but we don't account for that here. + // We just assume all list containment checks are O(n). + cost += c.actualSize(args[1]) + // O(min(m, n)) functions + case overloads.LessString, overloads.GreaterString, overloads.LessEqualsString, overloads.GreaterEqualsString, + overloads.LessBytes, overloads.GreaterBytes, overloads.LessEqualsBytes, overloads.GreaterEqualsBytes, + overloads.Equals, overloads.NotEquals: + // When we check the equality of 2 scalar values (e.g. 2 integers, 2 floating-point numbers, 2 booleans etc.), + // the CostTracker.actualSize() function by definition returns 1 for each operand, resulting in an overall cost + // of 1. + lhsSize := c.actualSize(args[0]) + rhsSize := c.actualSize(args[1]) + minSize := lhsSize + if rhsSize < minSize { + minSize = rhsSize + } + cost += uint64(math.Ceil(float64(minSize) * common.StringTraversalCostFactor)) + // O(m+n) functions + case overloads.AddString, overloads.AddBytes: + // In the worst case scenario, we would need to reallocate a new backing store and copy both operands over. + cost += uint64(math.Ceil(float64(c.actualSize(args[0])+c.actualSize(args[1])) * common.StringTraversalCostFactor)) + // O(nm) functions + case overloads.MatchesString: + // https://swtch.com/~rsc/regexp/regexp1.html applies to RE2 implementation supported by CEL + // Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0 + // in case where string is empty but regex is still expensive. + strCost := uint64(math.Ceil((1.0 + float64(c.actualSize(args[0]))) * common.StringTraversalCostFactor)) + // We don't know how many expressions are in the regex, just the string length (a huge + // improvement here would be to somehow get a count the number of expressions in the regex or + // how many states are in the regex state machine and use that to measure regex cost). + // For now, we're making a guess that each expression in a regex is typically at least 4 chars + // in length. + regexCost := uint64(math.Ceil(float64(c.actualSize(args[1])) * common.RegexStringLengthCostFactor)) + cost += strCost * regexCost + case overloads.ContainsString: + strCost := uint64(math.Ceil(float64(c.actualSize(args[0])) * common.StringTraversalCostFactor)) + substrCost := uint64(math.Ceil(float64(c.actualSize(args[1])) * common.StringTraversalCostFactor)) + cost += strCost * substrCost + + default: + // The following operations are assumed to have O(1) complexity. + // - AddList due to the implementation. Index lookup can be O(c) the + // number of concatenated lists, but we don't track that is cost calculations. + // - Conversions, since none perform a traversal of a type of unbound length. + // - Computing the size of strings, byte sequences, lists and maps. + // - Logical operations and all operators on fixed width scalars (comparisons, equality) + // - Any functions that don't have a declared cost either here or in provided ActualCostEstimator. + cost++ + + } + return cost +} + +// actualSize returns the size of value +func (c *CostTracker) actualSize(value ref.Val) uint64 { + if sz, ok := value.(traits.Sizer); ok { + return uint64(sz.Size().(types.Int)) + } + return 1 +} + +type stackVal struct { + Val ref.Val + ID int64 +} + +// refValStack keeps track of values of the stack for cost calculation purposes +type refValStack []stackVal + +func (s *refValStack) push(val ref.Val, id int64) { + value := stackVal{Val: val, ID: id} + *s = append(*s, value) +} + +// TODO: Allowing drop and dropArgs to remove stack items above the IDs they are provided is a workaround. drop and dropArgs +// should find and remove only the stack items matching the provided IDs once all attributes are properly pushed and popped from stack. + +// drop searches the stack for each ID and removes the ID and all stack items above it. +// If none of the IDs are found, the stack is not modified. +// WARNING: It is possible for multiple expressions with the same ID to exist (due to how macros are implemented) so it's +// possible that a dropped ID will remain on the stack. They should be removed when IDs on the stack are popped. +func (s *refValStack) drop(ids ...int64) { + for _, id := range ids { + for idx := len(*s) - 1; idx >= 0; idx-- { + if (*s)[idx].ID == id { + *s = (*s)[:idx] + break + } + } + } +} + +// dropArgs searches the stack for all the args by their IDs, accumulates their associated ref.Vals and drops any +// stack items above any of the arg IDs. If any of the IDs are not found the stack, false is returned. +// Args are assumed to be found in the stack in reverse order, i.e. the last arg is expected to be found highest in +// the stack. +// WARNING: It is possible for multiple expressions with the same ID to exist (due to how macros are implemented) so it's +// possible that a dropped ID will remain on the stack. They should be removed when IDs on the stack are popped. +func (s *refValStack) dropArgs(args []Interpretable) ([]ref.Val, bool) { + result := make([]ref.Val, len(args)) +argloop: + for nIdx := len(args) - 1; nIdx >= 0; nIdx-- { + for idx := len(*s) - 1; idx >= 0; idx-- { + if (*s)[idx].ID == args[nIdx].ID() { + el := (*s)[idx] + *s = (*s)[:idx] + result[nIdx] = el.Val + continue argloop + } + } + return nil, false + } + return result, true +} diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/parser/BUILD.bazel new file mode 100644 index 0000000000..97bc9bd434 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/BUILD.bazel @@ -0,0 +1,58 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "errors.go", + "helper.go", + "input.go", + "macro.go", + "options.go", + "parser.go", + "unescape.go", + "unparser.go", + ], + importpath = "github.com/google/cel-go/parser", + visibility = ["//visibility:public"], + deps = [ + "//common:go_default_library", + "//common/ast:go_default_library", + "//common/operators:go_default_library", + "//common/runes:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "//parser/gen:go_default_library", + "@com_github_antlr4_go_antlr_v4//:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//types/known/structpb:go_default_library", + ], +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "helper_test.go", + "parser_test.go", + "unescape_test.go", + "unparser_test.go", + ], + embed = [ + ":go_default_library", + ], + deps = [ + "//common/ast:go_default_library", + "//common/debug:go_default_library", + "//common/types:go_default_library", + "//parser/gen:go_default_library", + "//test:go_default_library", + "@com_github_antlr4_go_antlr_v4//:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//testing/protocmp:go_default_library", + ], +) diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/errors.go b/hack/tools/vendor/github.com/google/cel-go/parser/errors.go new file mode 100644 index 0000000000..93ae7a3ad8 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/errors.go @@ -0,0 +1,43 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "fmt" + + "github.com/google/cel-go/common" +) + +// parseErrors is a specialization of Errors. +type parseErrors struct { + errs *common.Errors +} + +// errorCount indicates the number of errors reported. +func (e *parseErrors) errorCount() int { + return len(e.errs.GetErrors()) +} + +func (e *parseErrors) internalError(message string) { + e.errs.ReportErrorAtID(0, common.NoLocation, message) +} + +func (e *parseErrors) syntaxError(l common.Location, message string) { + e.errs.ReportErrorAtID(0, l, fmt.Sprintf("Syntax error: %s", message)) +} + +func (e *parseErrors) reportErrorAtID(id int64, l common.Location, message string, args ...any) { + e.errs.ReportErrorAtID(id, l, message, args...) +} diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel b/hack/tools/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel new file mode 100644 index 0000000000..3efed87b70 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package( + default_visibility = ["//:__subpackages__"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "cel_base_listener.go", + "cel_base_visitor.go", + "cel_lexer.go", + "cel_listener.go", + "cel_parser.go", + "cel_visitor.go", + ], + data = [ + "CEL.tokens", + "CELLexer.tokens", + ], + importpath = "github.com/google/cel-go/parser/gen", + deps = [ + "@com_github_antlr4_go_antlr_v4//:go_default_library", + ], +) diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/gen/CEL.g4 b/hack/tools/vendor/github.com/google/cel-go/parser/gen/CEL.g4 new file mode 100644 index 0000000000..b011da803c --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/gen/CEL.g4 @@ -0,0 +1,200 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +grammar CEL; + +// Grammar Rules +// ============= + +start + : e=expr EOF + ; + +expr + : e=conditionalOr (op='?' e1=conditionalOr ':' e2=expr)? + ; + +conditionalOr + : e=conditionalAnd (ops+='||' e1+=conditionalAnd)* + ; + +conditionalAnd + : e=relation (ops+='&&' e1+=relation)* + ; + +relation + : calc + | relation op=('<'|'<='|'>='|'>'|'=='|'!='|'in') relation + ; + +calc + : unary + | calc op=('*'|'/'|'%') calc + | calc op=('+'|'-') calc + ; + +unary + : member # MemberExpr + | (ops+='!')+ member # LogicalNot + | (ops+='-')+ member # Negate + ; + +member + : primary # PrimaryExpr + | member op='.' (opt='?')? id=IDENTIFIER # Select + | member op='.' id=IDENTIFIER open='(' args=exprList? ')' # MemberCall + | member op='[' (opt='?')? index=expr ']' # Index + ; + +primary + : leadingDot='.'? id=IDENTIFIER (op='(' args=exprList? ')')? # IdentOrGlobalCall + | '(' e=expr ')' # Nested + | op='[' elems=listInit? ','? ']' # CreateList + | op='{' entries=mapInitializerList? ','? '}' # CreateStruct + | leadingDot='.'? ids+=IDENTIFIER (ops+='.' ids+=IDENTIFIER)* + op='{' entries=fieldInitializerList? ','? '}' # CreateMessage + | literal # ConstantLiteral + ; + +exprList + : e+=expr (',' e+=expr)* + ; + +listInit + : elems+=optExpr (',' elems+=optExpr)* + ; + +fieldInitializerList + : fields+=optField cols+=':' values+=expr (',' fields+=optField cols+=':' values+=expr)* + ; + +optField + : (opt='?')? IDENTIFIER + ; + +mapInitializerList + : keys+=optExpr cols+=':' values+=expr (',' keys+=optExpr cols+=':' values+=expr)* + ; + +optExpr + : (opt='?')? e=expr + ; + +literal + : sign=MINUS? tok=NUM_INT # Int + | tok=NUM_UINT # Uint + | sign=MINUS? tok=NUM_FLOAT # Double + | tok=STRING # String + | tok=BYTES # Bytes + | tok=CEL_TRUE # BoolTrue + | tok=CEL_FALSE # BoolFalse + | tok=NUL # Null + ; + +// Lexer Rules +// =========== + +EQUALS : '=='; +NOT_EQUALS : '!='; +IN: 'in'; +LESS : '<'; +LESS_EQUALS : '<='; +GREATER_EQUALS : '>='; +GREATER : '>'; +LOGICAL_AND : '&&'; +LOGICAL_OR : '||'; + +LBRACKET : '['; +RPRACKET : ']'; +LBRACE : '{'; +RBRACE : '}'; +LPAREN : '('; +RPAREN : ')'; +DOT : '.'; +COMMA : ','; +MINUS : '-'; +EXCLAM : '!'; +QUESTIONMARK : '?'; +COLON : ':'; +PLUS : '+'; +STAR : '*'; +SLASH : '/'; +PERCENT : '%'; +CEL_TRUE : 'true'; +CEL_FALSE : 'false'; +NUL : 'null'; + +fragment BACKSLASH : '\\'; +fragment LETTER : 'A'..'Z' | 'a'..'z' ; +fragment DIGIT : '0'..'9' ; +fragment EXPONENT : ('e' | 'E') ( '+' | '-' )? DIGIT+ ; +fragment HEXDIGIT : ('0'..'9'|'a'..'f'|'A'..'F') ; +fragment RAW : 'r' | 'R'; + +fragment ESC_SEQ + : ESC_CHAR_SEQ + | ESC_BYTE_SEQ + | ESC_UNI_SEQ + | ESC_OCT_SEQ + ; + +fragment ESC_CHAR_SEQ + : BACKSLASH ('a'|'b'|'f'|'n'|'r'|'t'|'v'|'"'|'\''|'\\'|'?'|'`') + ; + +fragment ESC_OCT_SEQ + : BACKSLASH ('0'..'3') ('0'..'7') ('0'..'7') + ; + +fragment ESC_BYTE_SEQ + : BACKSLASH ( 'x' | 'X' ) HEXDIGIT HEXDIGIT + ; + +fragment ESC_UNI_SEQ + : BACKSLASH 'u' HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT + | BACKSLASH 'U' HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT + ; + +WHITESPACE : ( '\t' | ' ' | '\r' | '\n'| '\u000C' )+ -> channel(HIDDEN) ; +COMMENT : '//' (~'\n')* -> channel(HIDDEN) ; + +NUM_FLOAT + : ( DIGIT+ ('.' DIGIT+) EXPONENT? + | DIGIT+ EXPONENT + | '.' DIGIT+ EXPONENT? + ) + ; + +NUM_INT + : ( DIGIT+ | '0x' HEXDIGIT+ ); + +NUM_UINT + : DIGIT+ ( 'u' | 'U' ) + | '0x' HEXDIGIT+ ( 'u' | 'U' ) + ; + +STRING + : '"' (ESC_SEQ | ~('\\'|'"'|'\n'|'\r'))* '"' + | '\'' (ESC_SEQ | ~('\\'|'\''|'\n'|'\r'))* '\'' + | '"""' (ESC_SEQ | ~('\\'))*? '"""' + | '\'\'\'' (ESC_SEQ | ~('\\'))*? '\'\'\'' + | RAW '"' ~('"'|'\n'|'\r')* '"' + | RAW '\'' ~('\''|'\n'|'\r')* '\'' + | RAW '"""' .*? '"""' + | RAW '\'\'\'' .*? '\'\'\'' + ; + +BYTES : ('b' | 'B') STRING; + +IDENTIFIER : (LETTER | '_') ( LETTER | DIGIT | '_')*; diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/gen/CEL.interp b/hack/tools/vendor/github.com/google/cel-go/parser/gen/CEL.interp new file mode 100644 index 0000000000..75b8bb3e20 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/gen/CEL.interp @@ -0,0 +1,99 @@ +token literal names: +null +'==' +'!=' +'in' +'<' +'<=' +'>=' +'>' +'&&' +'||' +'[' +']' +'{' +'}' +'(' +')' +'.' +',' +'-' +'!' +'?' +':' +'+' +'*' +'/' +'%' +'true' +'false' +'null' +null +null +null +null +null +null +null +null + +token symbolic names: +null +EQUALS +NOT_EQUALS +IN +LESS +LESS_EQUALS +GREATER_EQUALS +GREATER +LOGICAL_AND +LOGICAL_OR +LBRACKET +RPRACKET +LBRACE +RBRACE +LPAREN +RPAREN +DOT +COMMA +MINUS +EXCLAM +QUESTIONMARK +COLON +PLUS +STAR +SLASH +PERCENT +CEL_TRUE +CEL_FALSE +NUL +WHITESPACE +COMMENT +NUM_FLOAT +NUM_INT +NUM_UINT +STRING +BYTES +IDENTIFIER + +rule names: +start +expr +conditionalOr +conditionalAnd +relation +calc +unary +member +primary +exprList +listInit +fieldInitializerList +optField +mapInitializerList +optExpr +literal + + +atn: +[4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1, 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3, 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1, 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6, 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10, 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136, 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1, 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8, 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8, 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186, 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10, 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12, 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14, 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15, 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249, 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1, 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14, 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0, 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28, 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0, 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38, 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0, 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6, 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50, 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0, 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3, 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56, 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1, 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64, 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0, 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0, 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73, 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1, 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79, 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87, 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5, 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94, 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0, 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100, 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10, 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0, 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0, 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111, 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114, 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10, 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0, 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0, 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124, 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124, 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0, 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0, 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134, 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137, 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0, 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0, 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145, 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149, 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0, 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0, 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157, 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162, 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0, 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0, 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168, 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173, 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1, 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179, 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144, 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0, 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0, 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187, 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1, 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28, 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0, 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199, 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5, 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2, 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0, 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208, 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0, 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0, 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219, 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223, 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0, 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0, 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1, 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0, 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0, 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241, 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249, 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5, 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0, 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248, 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48, 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146, 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235, 240, 248] \ No newline at end of file diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/gen/CEL.tokens b/hack/tools/vendor/github.com/google/cel-go/parser/gen/CEL.tokens new file mode 100644 index 0000000000..b305bdad32 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/gen/CEL.tokens @@ -0,0 +1,64 @@ +EQUALS=1 +NOT_EQUALS=2 +IN=3 +LESS=4 +LESS_EQUALS=5 +GREATER_EQUALS=6 +GREATER=7 +LOGICAL_AND=8 +LOGICAL_OR=9 +LBRACKET=10 +RPRACKET=11 +LBRACE=12 +RBRACE=13 +LPAREN=14 +RPAREN=15 +DOT=16 +COMMA=17 +MINUS=18 +EXCLAM=19 +QUESTIONMARK=20 +COLON=21 +PLUS=22 +STAR=23 +SLASH=24 +PERCENT=25 +CEL_TRUE=26 +CEL_FALSE=27 +NUL=28 +WHITESPACE=29 +COMMENT=30 +NUM_FLOAT=31 +NUM_INT=32 +NUM_UINT=33 +STRING=34 +BYTES=35 +IDENTIFIER=36 +'=='=1 +'!='=2 +'in'=3 +'<'=4 +'<='=5 +'>='=6 +'>'=7 +'&&'=8 +'||'=9 +'['=10 +']'=11 +'{'=12 +'}'=13 +'('=14 +')'=15 +'.'=16 +','=17 +'-'=18 +'!'=19 +'?'=20 +':'=21 +'+'=22 +'*'=23 +'/'=24 +'%'=25 +'true'=26 +'false'=27 +'null'=28 diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp b/hack/tools/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp new file mode 100644 index 0000000000..26e7f471e8 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp @@ -0,0 +1,136 @@ +token literal names: +null +'==' +'!=' +'in' +'<' +'<=' +'>=' +'>' +'&&' +'||' +'[' +']' +'{' +'}' +'(' +')' +'.' +',' +'-' +'!' +'?' +':' +'+' +'*' +'/' +'%' +'true' +'false' +'null' +null +null +null +null +null +null +null +null + +token symbolic names: +null +EQUALS +NOT_EQUALS +IN +LESS +LESS_EQUALS +GREATER_EQUALS +GREATER +LOGICAL_AND +LOGICAL_OR +LBRACKET +RPRACKET +LBRACE +RBRACE +LPAREN +RPAREN +DOT +COMMA +MINUS +EXCLAM +QUESTIONMARK +COLON +PLUS +STAR +SLASH +PERCENT +CEL_TRUE +CEL_FALSE +NUL +WHITESPACE +COMMENT +NUM_FLOAT +NUM_INT +NUM_UINT +STRING +BYTES +IDENTIFIER + +rule names: +EQUALS +NOT_EQUALS +IN +LESS +LESS_EQUALS +GREATER_EQUALS +GREATER +LOGICAL_AND +LOGICAL_OR +LBRACKET +RPRACKET +LBRACE +RBRACE +LPAREN +RPAREN +DOT +COMMA +MINUS +EXCLAM +QUESTIONMARK +COLON +PLUS +STAR +SLASH +PERCENT +CEL_TRUE +CEL_FALSE +NUL +BACKSLASH +LETTER +DIGIT +EXPONENT +HEXDIGIT +RAW +ESC_SEQ +ESC_CHAR_SEQ +ESC_OCT_SEQ +ESC_BYTE_SEQ +ESC_UNI_SEQ +WHITESPACE +COMMENT +NUM_FLOAT +NUM_INT +NUM_UINT +STRING +BYTES +IDENTIFIER + +channel names: +DEFAULT_TOKEN_CHANNEL +HIDDEN + +mode names: +DEFAULT_MODE + +atn: +[4, 0, 36, 423, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 177, 8, 31, 1, 31, 4, 31, 180, 8, 31, 11, 31, 12, 31, 181, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 34, 3, 34, 192, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 3, 38, 225, 8, 38, 1, 39, 4, 39, 228, 8, 39, 11, 39, 12, 39, 229, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 238, 8, 40, 10, 40, 12, 40, 241, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 246, 8, 41, 11, 41, 12, 41, 247, 1, 41, 1, 41, 4, 41, 252, 8, 41, 11, 41, 12, 41, 253, 1, 41, 3, 41, 257, 8, 41, 1, 41, 4, 41, 260, 8, 41, 11, 41, 12, 41, 261, 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 268, 8, 41, 11, 41, 12, 41, 269, 1, 41, 3, 41, 273, 8, 41, 3, 41, 275, 8, 41, 1, 42, 4, 42, 278, 8, 42, 11, 42, 12, 42, 279, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 286, 8, 42, 11, 42, 12, 42, 287, 3, 42, 290, 8, 42, 1, 43, 4, 43, 293, 8, 43, 11, 43, 12, 43, 294, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 303, 8, 43, 11, 43, 12, 43, 304, 1, 43, 1, 43, 3, 43, 309, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44, 314, 8, 44, 10, 44, 12, 44, 317, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 323, 8, 44, 10, 44, 12, 44, 326, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 335, 8, 44, 10, 44, 12, 44, 338, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 349, 8, 44, 10, 44, 12, 44, 352, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 360, 8, 44, 10, 44, 12, 44, 363, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 370, 8, 44, 10, 44, 12, 44, 373, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 383, 8, 44, 10, 44, 12, 44, 386, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 398, 8, 44, 10, 44, 12, 44, 401, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 3, 44, 407, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, 46, 3, 46, 414, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 419, 8, 46, 10, 46, 12, 46, 422, 9, 46, 4, 336, 350, 384, 399, 0, 47, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0, 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31, 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 1, 0, 16, 2, 0, 65, 90, 97, 122, 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97, 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96, 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120, 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117, 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92, 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39, 39, 2, 0, 66, 66, 98, 98, 456, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 1, 95, 1, 0, 0, 0, 3, 98, 1, 0, 0, 0, 5, 101, 1, 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 106, 1, 0, 0, 0, 11, 109, 1, 0, 0, 0, 13, 112, 1, 0, 0, 0, 15, 114, 1, 0, 0, 0, 17, 117, 1, 0, 0, 0, 19, 120, 1, 0, 0, 0, 21, 122, 1, 0, 0, 0, 23, 124, 1, 0, 0, 0, 25, 126, 1, 0, 0, 0, 27, 128, 1, 0, 0, 0, 29, 130, 1, 0, 0, 0, 31, 132, 1, 0, 0, 0, 33, 134, 1, 0, 0, 0, 35, 136, 1, 0, 0, 0, 37, 138, 1, 0, 0, 0, 39, 140, 1, 0, 0, 0, 41, 142, 1, 0, 0, 0, 43, 144, 1, 0, 0, 0, 45, 146, 1, 0, 0, 0, 47, 148, 1, 0, 0, 0, 49, 150, 1, 0, 0, 0, 51, 152, 1, 0, 0, 0, 53, 157, 1, 0, 0, 0, 55, 163, 1, 0, 0, 0, 57, 168, 1, 0, 0, 0, 59, 170, 1, 0, 0, 0, 61, 172, 1, 0, 0, 0, 63, 174, 1, 0, 0, 0, 65, 183, 1, 0, 0, 0, 67, 185, 1, 0, 0, 0, 69, 191, 1, 0, 0, 0, 71, 193, 1, 0, 0, 0, 73, 196, 1, 0, 0, 0, 75, 201, 1, 0, 0, 0, 77, 224, 1, 0, 0, 0, 79, 227, 1, 0, 0, 0, 81, 233, 1, 0, 0, 0, 83, 274, 1, 0, 0, 0, 85, 289, 1, 0, 0, 0, 87, 308, 1, 0, 0, 0, 89, 406, 1, 0, 0, 0, 91, 408, 1, 0, 0, 0, 93, 413, 1, 0, 0, 0, 95, 96, 5, 61, 0, 0, 96, 97, 5, 61, 0, 0, 97, 2, 1, 0, 0, 0, 98, 99, 5, 33, 0, 0, 99, 100, 5, 61, 0, 0, 100, 4, 1, 0, 0, 0, 101, 102, 5, 105, 0, 0, 102, 103, 5, 110, 0, 0, 103, 6, 1, 0, 0, 0, 104, 105, 5, 60, 0, 0, 105, 8, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 108, 5, 61, 0, 0, 108, 10, 1, 0, 0, 0, 109, 110, 5, 62, 0, 0, 110, 111, 5, 61, 0, 0, 111, 12, 1, 0, 0, 0, 112, 113, 5, 62, 0, 0, 113, 14, 1, 0, 0, 0, 114, 115, 5, 38, 0, 0, 115, 116, 5, 38, 0, 0, 116, 16, 1, 0, 0, 0, 117, 118, 5, 124, 0, 0, 118, 119, 5, 124, 0, 0, 119, 18, 1, 0, 0, 0, 120, 121, 5, 91, 0, 0, 121, 20, 1, 0, 0, 0, 122, 123, 5, 93, 0, 0, 123, 22, 1, 0, 0, 0, 124, 125, 5, 123, 0, 0, 125, 24, 1, 0, 0, 0, 126, 127, 5, 125, 0, 0, 127, 26, 1, 0, 0, 0, 128, 129, 5, 40, 0, 0, 129, 28, 1, 0, 0, 0, 130, 131, 5, 41, 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 5, 46, 0, 0, 133, 32, 1, 0, 0, 0, 134, 135, 5, 44, 0, 0, 135, 34, 1, 0, 0, 0, 136, 137, 5, 45, 0, 0, 137, 36, 1, 0, 0, 0, 138, 139, 5, 33, 0, 0, 139, 38, 1, 0, 0, 0, 140, 141, 5, 63, 0, 0, 141, 40, 1, 0, 0, 0, 142, 143, 5, 58, 0, 0, 143, 42, 1, 0, 0, 0, 144, 145, 5, 43, 0, 0, 145, 44, 1, 0, 0, 0, 146, 147, 5, 42, 0, 0, 147, 46, 1, 0, 0, 0, 148, 149, 5, 47, 0, 0, 149, 48, 1, 0, 0, 0, 150, 151, 5, 37, 0, 0, 151, 50, 1, 0, 0, 0, 152, 153, 5, 116, 0, 0, 153, 154, 5, 114, 0, 0, 154, 155, 5, 117, 0, 0, 155, 156, 5, 101, 0, 0, 156, 52, 1, 0, 0, 0, 157, 158, 5, 102, 0, 0, 158, 159, 5, 97, 0, 0, 159, 160, 5, 108, 0, 0, 160, 161, 5, 115, 0, 0, 161, 162, 5, 101, 0, 0, 162, 54, 1, 0, 0, 0, 163, 164, 5, 110, 0, 0, 164, 165, 5, 117, 0, 0, 165, 166, 5, 108, 0, 0, 166, 167, 5, 108, 0, 0, 167, 56, 1, 0, 0, 0, 168, 169, 5, 92, 0, 0, 169, 58, 1, 0, 0, 0, 170, 171, 7, 0, 0, 0, 171, 60, 1, 0, 0, 0, 172, 173, 2, 48, 57, 0, 173, 62, 1, 0, 0, 0, 174, 176, 7, 1, 0, 0, 175, 177, 7, 2, 0, 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 179, 1, 0, 0, 0, 178, 180, 3, 61, 30, 0, 179, 178, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 179, 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 64, 1, 0, 0, 0, 183, 184, 7, 3, 0, 0, 184, 66, 1, 0, 0, 0, 185, 186, 7, 4, 0, 0, 186, 68, 1, 0, 0, 0, 187, 192, 3, 71, 35, 0, 188, 192, 3, 75, 37, 0, 189, 192, 3, 77, 38, 0, 190, 192, 3, 73, 36, 0, 191, 187, 1, 0, 0, 0, 191, 188, 1, 0, 0, 0, 191, 189, 1, 0, 0, 0, 191, 190, 1, 0, 0, 0, 192, 70, 1, 0, 0, 0, 193, 194, 3, 57, 28, 0, 194, 195, 7, 5, 0, 0, 195, 72, 1, 0, 0, 0, 196, 197, 3, 57, 28, 0, 197, 198, 2, 48, 51, 0, 198, 199, 2, 48, 55, 0, 199, 200, 2, 48, 55, 0, 200, 74, 1, 0, 0, 0, 201, 202, 3, 57, 28, 0, 202, 203, 7, 6, 0, 0, 203, 204, 3, 65, 32, 0, 204, 205, 3, 65, 32, 0, 205, 76, 1, 0, 0, 0, 206, 207, 3, 57, 28, 0, 207, 208, 5, 117, 0, 0, 208, 209, 3, 65, 32, 0, 209, 210, 3, 65, 32, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 225, 1, 0, 0, 0, 213, 214, 3, 57, 28, 0, 214, 215, 5, 85, 0, 0, 215, 216, 3, 65, 32, 0, 216, 217, 3, 65, 32, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3, 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3, 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 225, 1, 0, 0, 0, 224, 206, 1, 0, 0, 0, 224, 213, 1, 0, 0, 0, 225, 78, 1, 0, 0, 0, 226, 228, 7, 7, 0, 0, 227, 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229, 230, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 6, 39, 0, 0, 232, 80, 1, 0, 0, 0, 233, 234, 5, 47, 0, 0, 234, 235, 5, 47, 0, 0, 235, 239, 1, 0, 0, 0, 236, 238, 8, 8, 0, 0, 237, 236, 1, 0, 0, 0, 238, 241, 1, 0, 0, 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 242, 1, 0, 0, 0, 241, 239, 1, 0, 0, 0, 242, 243, 6, 40, 0, 0, 243, 82, 1, 0, 0, 0, 244, 246, 3, 61, 30, 0, 245, 244, 1, 0, 0, 0, 246, 247, 1, 0, 0, 0, 247, 245, 1, 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 5, 46, 0, 0, 250, 252, 3, 61, 30, 0, 251, 250, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0, 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 256, 1, 0, 0, 0, 255, 257, 3, 63, 31, 0, 256, 255, 1, 0, 0, 0, 256, 257, 1, 0, 0, 0, 257, 275, 1, 0, 0, 0, 258, 260, 3, 61, 30, 0, 259, 258, 1, 0, 0, 0, 260, 261, 1, 0, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, 263, 1, 0, 0, 0, 263, 264, 3, 63, 31, 0, 264, 275, 1, 0, 0, 0, 265, 267, 5, 46, 0, 0, 266, 268, 3, 61, 30, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269, 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0, 0, 0, 271, 273, 3, 63, 31, 0, 272, 271, 1, 0, 0, 0, 272, 273, 1, 0, 0, 0, 273, 275, 1, 0, 0, 0, 274, 245, 1, 0, 0, 0, 274, 259, 1, 0, 0, 0, 274, 265, 1, 0, 0, 0, 275, 84, 1, 0, 0, 0, 276, 278, 3, 61, 30, 0, 277, 276, 1, 0, 0, 0, 278, 279, 1, 0, 0, 0, 279, 277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 290, 1, 0, 0, 0, 281, 282, 5, 48, 0, 0, 282, 283, 5, 120, 0, 0, 283, 285, 1, 0, 0, 0, 284, 286, 3, 65, 32, 0, 285, 284, 1, 0, 0, 0, 286, 287, 1, 0, 0, 0, 287, 285, 1, 0, 0, 0, 287, 288, 1, 0, 0, 0, 288, 290, 1, 0, 0, 0, 289, 277, 1, 0, 0, 0, 289, 281, 1, 0, 0, 0, 290, 86, 1, 0, 0, 0, 291, 293, 3, 61, 30, 0, 292, 291, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 292, 1, 0, 0, 0, 294, 295, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 297, 7, 9, 0, 0, 297, 309, 1, 0, 0, 0, 298, 299, 5, 48, 0, 0, 299, 300, 5, 120, 0, 0, 300, 302, 1, 0, 0, 0, 301, 303, 3, 65, 32, 0, 302, 301, 1, 0, 0, 0, 303, 304, 1, 0, 0, 0, 304, 302, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306, 1, 0, 0, 0, 306, 307, 7, 9, 0, 0, 307, 309, 1, 0, 0, 0, 308, 292, 1, 0, 0, 0, 308, 298, 1, 0, 0, 0, 309, 88, 1, 0, 0, 0, 310, 315, 5, 34, 0, 0, 311, 314, 3, 69, 34, 0, 312, 314, 8, 10, 0, 0, 313, 311, 1, 0, 0, 0, 313, 312, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316, 1, 0, 0, 0, 316, 318, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 407, 5, 34, 0, 0, 319, 324, 5, 39, 0, 0, 320, 323, 3, 69, 34, 0, 321, 323, 8, 11, 0, 0, 322, 320, 1, 0, 0, 0, 322, 321, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324, 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 327, 1, 0, 0, 0, 326, 324, 1, 0, 0, 0, 327, 407, 5, 39, 0, 0, 328, 329, 5, 34, 0, 0, 329, 330, 5, 34, 0, 0, 330, 331, 5, 34, 0, 0, 331, 336, 1, 0, 0, 0, 332, 335, 3, 69, 34, 0, 333, 335, 8, 12, 0, 0, 334, 332, 1, 0, 0, 0, 334, 333, 1, 0, 0, 0, 335, 338, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 336, 334, 1, 0, 0, 0, 337, 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 340, 5, 34, 0, 0, 340, 341, 5, 34, 0, 0, 341, 407, 5, 34, 0, 0, 342, 343, 5, 39, 0, 0, 343, 344, 5, 39, 0, 0, 344, 345, 5, 39, 0, 0, 345, 350, 1, 0, 0, 0, 346, 349, 3, 69, 34, 0, 347, 349, 8, 12, 0, 0, 348, 346, 1, 0, 0, 0, 348, 347, 1, 0, 0, 0, 349, 352, 1, 0, 0, 0, 350, 351, 1, 0, 0, 0, 350, 348, 1, 0, 0, 0, 351, 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 354, 5, 39, 0, 0, 354, 355, 5, 39, 0, 0, 355, 407, 5, 39, 0, 0, 356, 357, 3, 67, 33, 0, 357, 361, 5, 34, 0, 0, 358, 360, 8, 13, 0, 0, 359, 358, 1, 0, 0, 0, 360, 363, 1, 0, 0, 0, 361, 359, 1, 0, 0, 0, 361, 362, 1, 0, 0, 0, 362, 364, 1, 0, 0, 0, 363, 361, 1, 0, 0, 0, 364, 365, 5, 34, 0, 0, 365, 407, 1, 0, 0, 0, 366, 367, 3, 67, 33, 0, 367, 371, 5, 39, 0, 0, 368, 370, 8, 14, 0, 0, 369, 368, 1, 0, 0, 0, 370, 373, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 371, 372, 1, 0, 0, 0, 372, 374, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 374, 375, 5, 39, 0, 0, 375, 407, 1, 0, 0, 0, 376, 377, 3, 67, 33, 0, 377, 378, 5, 34, 0, 0, 378, 379, 5, 34, 0, 0, 379, 380, 5, 34, 0, 0, 380, 384, 1, 0, 0, 0, 381, 383, 9, 0, 0, 0, 382, 381, 1, 0, 0, 0, 383, 386, 1, 0, 0, 0, 384, 385, 1, 0, 0, 0, 384, 382, 1, 0, 0, 0, 385, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0, 387, 388, 5, 34, 0, 0, 388, 389, 5, 34, 0, 0, 389, 390, 5, 34, 0, 0, 390, 407, 1, 0, 0, 0, 391, 392, 3, 67, 33, 0, 392, 393, 5, 39, 0, 0, 393, 394, 5, 39, 0, 0, 394, 395, 5, 39, 0, 0, 395, 399, 1, 0, 0, 0, 396, 398, 9, 0, 0, 0, 397, 396, 1, 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 400, 1, 0, 0, 0, 399, 397, 1, 0, 0, 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, 403, 5, 39, 0, 0, 403, 404, 5, 39, 0, 0, 404, 405, 5, 39, 0, 0, 405, 407, 1, 0, 0, 0, 406, 310, 1, 0, 0, 0, 406, 319, 1, 0, 0, 0, 406, 328, 1, 0, 0, 0, 406, 342, 1, 0, 0, 0, 406, 356, 1, 0, 0, 0, 406, 366, 1, 0, 0, 0, 406, 376, 1, 0, 0, 0, 406, 391, 1, 0, 0, 0, 407, 90, 1, 0, 0, 0, 408, 409, 7, 15, 0, 0, 409, 410, 3, 89, 44, 0, 410, 92, 1, 0, 0, 0, 411, 414, 3, 59, 29, 0, 412, 414, 5, 95, 0, 0, 413, 411, 1, 0, 0, 0, 413, 412, 1, 0, 0, 0, 414, 420, 1, 0, 0, 0, 415, 419, 3, 59, 29, 0, 416, 419, 3, 61, 30, 0, 417, 419, 5, 95, 0, 0, 418, 415, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 418, 417, 1, 0, 0, 0, 419, 422, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 421, 1, 0, 0, 0, 421, 94, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 36, 0, 176, 181, 191, 224, 229, 239, 247, 253, 256, 261, 269, 272, 274, 279, 287, 289, 294, 304, 308, 313, 315, 322, 324, 334, 336, 348, 350, 361, 371, 384, 399, 406, 413, 418, 420, 1, 0, 1, 0] \ No newline at end of file diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens b/hack/tools/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens new file mode 100644 index 0000000000..b305bdad32 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens @@ -0,0 +1,64 @@ +EQUALS=1 +NOT_EQUALS=2 +IN=3 +LESS=4 +LESS_EQUALS=5 +GREATER_EQUALS=6 +GREATER=7 +LOGICAL_AND=8 +LOGICAL_OR=9 +LBRACKET=10 +RPRACKET=11 +LBRACE=12 +RBRACE=13 +LPAREN=14 +RPAREN=15 +DOT=16 +COMMA=17 +MINUS=18 +EXCLAM=19 +QUESTIONMARK=20 +COLON=21 +PLUS=22 +STAR=23 +SLASH=24 +PERCENT=25 +CEL_TRUE=26 +CEL_FALSE=27 +NUL=28 +WHITESPACE=29 +COMMENT=30 +NUM_FLOAT=31 +NUM_INT=32 +NUM_UINT=33 +STRING=34 +BYTES=35 +IDENTIFIER=36 +'=='=1 +'!='=2 +'in'=3 +'<'=4 +'<='=5 +'>='=6 +'>'=7 +'&&'=8 +'||'=9 +'['=10 +']'=11 +'{'=12 +'}'=13 +'('=14 +')'=15 +'.'=16 +','=17 +'-'=18 +'!'=19 +'?'=20 +':'=21 +'+'=22 +'*'=23 +'/'=24 +'%'=25 +'true'=26 +'false'=27 +'null'=28 diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go b/hack/tools/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go new file mode 100644 index 0000000000..c49d038676 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go @@ -0,0 +1,219 @@ +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. + +package gen // CEL +import "github.com/antlr4-go/antlr/v4" + +// BaseCELListener is a complete listener for a parse tree produced by CELParser. +type BaseCELListener struct{} + +var _ CELListener = &BaseCELListener{} + +// VisitTerminal is called when a terminal node is visited. +func (s *BaseCELListener) VisitTerminal(node antlr.TerminalNode) {} + +// VisitErrorNode is called when an error node is visited. +func (s *BaseCELListener) VisitErrorNode(node antlr.ErrorNode) {} + +// EnterEveryRule is called when any rule is entered. +func (s *BaseCELListener) EnterEveryRule(ctx antlr.ParserRuleContext) {} + +// ExitEveryRule is called when any rule is exited. +func (s *BaseCELListener) ExitEveryRule(ctx antlr.ParserRuleContext) {} + +// EnterStart is called when production start is entered. +func (s *BaseCELListener) EnterStart(ctx *StartContext) {} + +// ExitStart is called when production start is exited. +func (s *BaseCELListener) ExitStart(ctx *StartContext) {} + +// EnterExpr is called when production expr is entered. +func (s *BaseCELListener) EnterExpr(ctx *ExprContext) {} + +// ExitExpr is called when production expr is exited. +func (s *BaseCELListener) ExitExpr(ctx *ExprContext) {} + +// EnterConditionalOr is called when production conditionalOr is entered. +func (s *BaseCELListener) EnterConditionalOr(ctx *ConditionalOrContext) {} + +// ExitConditionalOr is called when production conditionalOr is exited. +func (s *BaseCELListener) ExitConditionalOr(ctx *ConditionalOrContext) {} + +// EnterConditionalAnd is called when production conditionalAnd is entered. +func (s *BaseCELListener) EnterConditionalAnd(ctx *ConditionalAndContext) {} + +// ExitConditionalAnd is called when production conditionalAnd is exited. +func (s *BaseCELListener) ExitConditionalAnd(ctx *ConditionalAndContext) {} + +// EnterRelation is called when production relation is entered. +func (s *BaseCELListener) EnterRelation(ctx *RelationContext) {} + +// ExitRelation is called when production relation is exited. +func (s *BaseCELListener) ExitRelation(ctx *RelationContext) {} + +// EnterCalc is called when production calc is entered. +func (s *BaseCELListener) EnterCalc(ctx *CalcContext) {} + +// ExitCalc is called when production calc is exited. +func (s *BaseCELListener) ExitCalc(ctx *CalcContext) {} + +// EnterMemberExpr is called when production MemberExpr is entered. +func (s *BaseCELListener) EnterMemberExpr(ctx *MemberExprContext) {} + +// ExitMemberExpr is called when production MemberExpr is exited. +func (s *BaseCELListener) ExitMemberExpr(ctx *MemberExprContext) {} + +// EnterLogicalNot is called when production LogicalNot is entered. +func (s *BaseCELListener) EnterLogicalNot(ctx *LogicalNotContext) {} + +// ExitLogicalNot is called when production LogicalNot is exited. +func (s *BaseCELListener) ExitLogicalNot(ctx *LogicalNotContext) {} + +// EnterNegate is called when production Negate is entered. +func (s *BaseCELListener) EnterNegate(ctx *NegateContext) {} + +// ExitNegate is called when production Negate is exited. +func (s *BaseCELListener) ExitNegate(ctx *NegateContext) {} + +// EnterMemberCall is called when production MemberCall is entered. +func (s *BaseCELListener) EnterMemberCall(ctx *MemberCallContext) {} + +// ExitMemberCall is called when production MemberCall is exited. +func (s *BaseCELListener) ExitMemberCall(ctx *MemberCallContext) {} + +// EnterSelect is called when production Select is entered. +func (s *BaseCELListener) EnterSelect(ctx *SelectContext) {} + +// ExitSelect is called when production Select is exited. +func (s *BaseCELListener) ExitSelect(ctx *SelectContext) {} + +// EnterPrimaryExpr is called when production PrimaryExpr is entered. +func (s *BaseCELListener) EnterPrimaryExpr(ctx *PrimaryExprContext) {} + +// ExitPrimaryExpr is called when production PrimaryExpr is exited. +func (s *BaseCELListener) ExitPrimaryExpr(ctx *PrimaryExprContext) {} + +// EnterIndex is called when production Index is entered. +func (s *BaseCELListener) EnterIndex(ctx *IndexContext) {} + +// ExitIndex is called when production Index is exited. +func (s *BaseCELListener) ExitIndex(ctx *IndexContext) {} + +// EnterIdentOrGlobalCall is called when production IdentOrGlobalCall is entered. +func (s *BaseCELListener) EnterIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) {} + +// ExitIdentOrGlobalCall is called when production IdentOrGlobalCall is exited. +func (s *BaseCELListener) ExitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) {} + +// EnterNested is called when production Nested is entered. +func (s *BaseCELListener) EnterNested(ctx *NestedContext) {} + +// ExitNested is called when production Nested is exited. +func (s *BaseCELListener) ExitNested(ctx *NestedContext) {} + +// EnterCreateList is called when production CreateList is entered. +func (s *BaseCELListener) EnterCreateList(ctx *CreateListContext) {} + +// ExitCreateList is called when production CreateList is exited. +func (s *BaseCELListener) ExitCreateList(ctx *CreateListContext) {} + +// EnterCreateStruct is called when production CreateStruct is entered. +func (s *BaseCELListener) EnterCreateStruct(ctx *CreateStructContext) {} + +// ExitCreateStruct is called when production CreateStruct is exited. +func (s *BaseCELListener) ExitCreateStruct(ctx *CreateStructContext) {} + +// EnterCreateMessage is called when production CreateMessage is entered. +func (s *BaseCELListener) EnterCreateMessage(ctx *CreateMessageContext) {} + +// ExitCreateMessage is called when production CreateMessage is exited. +func (s *BaseCELListener) ExitCreateMessage(ctx *CreateMessageContext) {} + +// EnterConstantLiteral is called when production ConstantLiteral is entered. +func (s *BaseCELListener) EnterConstantLiteral(ctx *ConstantLiteralContext) {} + +// ExitConstantLiteral is called when production ConstantLiteral is exited. +func (s *BaseCELListener) ExitConstantLiteral(ctx *ConstantLiteralContext) {} + +// EnterExprList is called when production exprList is entered. +func (s *BaseCELListener) EnterExprList(ctx *ExprListContext) {} + +// ExitExprList is called when production exprList is exited. +func (s *BaseCELListener) ExitExprList(ctx *ExprListContext) {} + +// EnterListInit is called when production listInit is entered. +func (s *BaseCELListener) EnterListInit(ctx *ListInitContext) {} + +// ExitListInit is called when production listInit is exited. +func (s *BaseCELListener) ExitListInit(ctx *ListInitContext) {} + +// EnterFieldInitializerList is called when production fieldInitializerList is entered. +func (s *BaseCELListener) EnterFieldInitializerList(ctx *FieldInitializerListContext) {} + +// ExitFieldInitializerList is called when production fieldInitializerList is exited. +func (s *BaseCELListener) ExitFieldInitializerList(ctx *FieldInitializerListContext) {} + +// EnterOptField is called when production optField is entered. +func (s *BaseCELListener) EnterOptField(ctx *OptFieldContext) {} + +// ExitOptField is called when production optField is exited. +func (s *BaseCELListener) ExitOptField(ctx *OptFieldContext) {} + +// EnterMapInitializerList is called when production mapInitializerList is entered. +func (s *BaseCELListener) EnterMapInitializerList(ctx *MapInitializerListContext) {} + +// ExitMapInitializerList is called when production mapInitializerList is exited. +func (s *BaseCELListener) ExitMapInitializerList(ctx *MapInitializerListContext) {} + +// EnterOptExpr is called when production optExpr is entered. +func (s *BaseCELListener) EnterOptExpr(ctx *OptExprContext) {} + +// ExitOptExpr is called when production optExpr is exited. +func (s *BaseCELListener) ExitOptExpr(ctx *OptExprContext) {} + +// EnterInt is called when production Int is entered. +func (s *BaseCELListener) EnterInt(ctx *IntContext) {} + +// ExitInt is called when production Int is exited. +func (s *BaseCELListener) ExitInt(ctx *IntContext) {} + +// EnterUint is called when production Uint is entered. +func (s *BaseCELListener) EnterUint(ctx *UintContext) {} + +// ExitUint is called when production Uint is exited. +func (s *BaseCELListener) ExitUint(ctx *UintContext) {} + +// EnterDouble is called when production Double is entered. +func (s *BaseCELListener) EnterDouble(ctx *DoubleContext) {} + +// ExitDouble is called when production Double is exited. +func (s *BaseCELListener) ExitDouble(ctx *DoubleContext) {} + +// EnterString is called when production String is entered. +func (s *BaseCELListener) EnterString(ctx *StringContext) {} + +// ExitString is called when production String is exited. +func (s *BaseCELListener) ExitString(ctx *StringContext) {} + +// EnterBytes is called when production Bytes is entered. +func (s *BaseCELListener) EnterBytes(ctx *BytesContext) {} + +// ExitBytes is called when production Bytes is exited. +func (s *BaseCELListener) ExitBytes(ctx *BytesContext) {} + +// EnterBoolTrue is called when production BoolTrue is entered. +func (s *BaseCELListener) EnterBoolTrue(ctx *BoolTrueContext) {} + +// ExitBoolTrue is called when production BoolTrue is exited. +func (s *BaseCELListener) ExitBoolTrue(ctx *BoolTrueContext) {} + +// EnterBoolFalse is called when production BoolFalse is entered. +func (s *BaseCELListener) EnterBoolFalse(ctx *BoolFalseContext) {} + +// ExitBoolFalse is called when production BoolFalse is exited. +func (s *BaseCELListener) ExitBoolFalse(ctx *BoolFalseContext) {} + +// EnterNull is called when production Null is entered. +func (s *BaseCELListener) EnterNull(ctx *NullContext) {} + +// ExitNull is called when production Null is exited. +func (s *BaseCELListener) ExitNull(ctx *NullContext) {} diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go b/hack/tools/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go new file mode 100644 index 0000000000..b2c0783d39 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go @@ -0,0 +1,141 @@ +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. + +package gen // CEL +import "github.com/antlr4-go/antlr/v4" + + +type BaseCELVisitor struct { + *antlr.BaseParseTreeVisitor +} + +func (v *BaseCELVisitor) VisitStart(ctx *StartContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitExpr(ctx *ExprContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitConditionalOr(ctx *ConditionalOrContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitConditionalAnd(ctx *ConditionalAndContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitRelation(ctx *RelationContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitCalc(ctx *CalcContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitMemberExpr(ctx *MemberExprContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitLogicalNot(ctx *LogicalNotContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitNegate(ctx *NegateContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitMemberCall(ctx *MemberCallContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitSelect(ctx *SelectContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitPrimaryExpr(ctx *PrimaryExprContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitIndex(ctx *IndexContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitNested(ctx *NestedContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitCreateList(ctx *CreateListContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitCreateStruct(ctx *CreateStructContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitCreateMessage(ctx *CreateMessageContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitConstantLiteral(ctx *ConstantLiteralContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitExprList(ctx *ExprListContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitListInit(ctx *ListInitContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitFieldInitializerList(ctx *FieldInitializerListContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitOptField(ctx *OptFieldContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitMapInitializerList(ctx *MapInitializerListContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitOptExpr(ctx *OptExprContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitInt(ctx *IntContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitUint(ctx *UintContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitDouble(ctx *DoubleContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitString(ctx *StringContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitBytes(ctx *BytesContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitBoolTrue(ctx *BoolTrueContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitBoolFalse(ctx *BoolFalseContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitNull(ctx *NullContext) interface{} { + return v.VisitChildren(ctx) +} diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go b/hack/tools/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go new file mode 100644 index 0000000000..e026cc46f0 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go @@ -0,0 +1,344 @@ +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. + +package gen +import ( + "fmt" + "sync" + "unicode" + "github.com/antlr4-go/antlr/v4" +) +// Suppress unused import error +var _ = fmt.Printf +var _ = sync.Once{} +var _ = unicode.IsLetter + + +type CELLexer struct { + *antlr.BaseLexer + channelNames []string + modeNames []string + // TODO: EOF string +} + +var CELLexerLexerStaticData struct { + once sync.Once + serializedATN []int32 + ChannelNames []string + ModeNames []string + LiteralNames []string + SymbolicNames []string + RuleNames []string + PredictionContextCache *antlr.PredictionContextCache + atn *antlr.ATN + decisionToDFA []*antlr.DFA +} + +func cellexerLexerInit() { + staticData := &CELLexerLexerStaticData + staticData.ChannelNames = []string{ + "DEFAULT_TOKEN_CHANNEL", "HIDDEN", + } + staticData.ModeNames = []string{ + "DEFAULT_MODE", + } + staticData.LiteralNames = []string{ + "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'", + "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'", + "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'", + } + staticData.SymbolicNames = []string{ + "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", + "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", + "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", + "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", + "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", + "STRING", "BYTES", "IDENTIFIER", + } + staticData.RuleNames = []string{ + "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", + "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", + "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", + "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", + "NUL", "BACKSLASH", "LETTER", "DIGIT", "EXPONENT", "HEXDIGIT", "RAW", + "ESC_SEQ", "ESC_CHAR_SEQ", "ESC_OCT_SEQ", "ESC_BYTE_SEQ", "ESC_UNI_SEQ", + "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", "STRING", + "BYTES", "IDENTIFIER", + } + staticData.PredictionContextCache = antlr.NewPredictionContextCache() + staticData.serializedATN = []int32{ + 4, 0, 36, 423, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, + 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, + 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, + 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, + 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, + 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, + 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, + 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, + 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, + 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, + 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, + 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, + 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, + 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, + 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, + 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, + 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 177, 8, 31, 1, 31, 4, 31, 180, 8, 31, + 11, 31, 12, 31, 181, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, + 34, 3, 34, 192, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, + 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, + 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, + 1, 38, 1, 38, 1, 38, 3, 38, 225, 8, 38, 1, 39, 4, 39, 228, 8, 39, 11, 39, + 12, 39, 229, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 238, 8, 40, + 10, 40, 12, 40, 241, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 246, 8, 41, 11, + 41, 12, 41, 247, 1, 41, 1, 41, 4, 41, 252, 8, 41, 11, 41, 12, 41, 253, + 1, 41, 3, 41, 257, 8, 41, 1, 41, 4, 41, 260, 8, 41, 11, 41, 12, 41, 261, + 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 268, 8, 41, 11, 41, 12, 41, 269, 1, + 41, 3, 41, 273, 8, 41, 3, 41, 275, 8, 41, 1, 42, 4, 42, 278, 8, 42, 11, + 42, 12, 42, 279, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 286, 8, 42, 11, 42, + 12, 42, 287, 3, 42, 290, 8, 42, 1, 43, 4, 43, 293, 8, 43, 11, 43, 12, 43, + 294, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 303, 8, 43, 11, 43, + 12, 43, 304, 1, 43, 1, 43, 3, 43, 309, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44, + 314, 8, 44, 10, 44, 12, 44, 317, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, + 44, 323, 8, 44, 10, 44, 12, 44, 326, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, + 1, 44, 1, 44, 1, 44, 5, 44, 335, 8, 44, 10, 44, 12, 44, 338, 9, 44, 1, + 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 349, + 8, 44, 10, 44, 12, 44, 352, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, + 44, 5, 44, 360, 8, 44, 10, 44, 12, 44, 363, 9, 44, 1, 44, 1, 44, 1, 44, + 1, 44, 1, 44, 5, 44, 370, 8, 44, 10, 44, 12, 44, 373, 9, 44, 1, 44, 1, + 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 383, 8, 44, 10, 44, + 12, 44, 386, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, + 44, 1, 44, 1, 44, 5, 44, 398, 8, 44, 10, 44, 12, 44, 401, 9, 44, 1, 44, + 1, 44, 1, 44, 1, 44, 3, 44, 407, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, + 46, 3, 46, 414, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 419, 8, 46, 10, 46, + 12, 46, 422, 9, 46, 4, 336, 350, 384, 399, 0, 47, 1, 1, 3, 2, 5, 3, 7, + 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, + 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, + 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0, + 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31, + 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 1, 0, 16, 2, 0, 65, 90, 97, 122, + 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97, + 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96, + 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120, + 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117, + 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92, + 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39, + 39, 2, 0, 66, 66, 98, 98, 456, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, + 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, + 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, + 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, + 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, + 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, + 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, + 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, + 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, + 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 1, 95, 1, 0, 0, 0, + 3, 98, 1, 0, 0, 0, 5, 101, 1, 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 106, 1, 0, + 0, 0, 11, 109, 1, 0, 0, 0, 13, 112, 1, 0, 0, 0, 15, 114, 1, 0, 0, 0, 17, + 117, 1, 0, 0, 0, 19, 120, 1, 0, 0, 0, 21, 122, 1, 0, 0, 0, 23, 124, 1, + 0, 0, 0, 25, 126, 1, 0, 0, 0, 27, 128, 1, 0, 0, 0, 29, 130, 1, 0, 0, 0, + 31, 132, 1, 0, 0, 0, 33, 134, 1, 0, 0, 0, 35, 136, 1, 0, 0, 0, 37, 138, + 1, 0, 0, 0, 39, 140, 1, 0, 0, 0, 41, 142, 1, 0, 0, 0, 43, 144, 1, 0, 0, + 0, 45, 146, 1, 0, 0, 0, 47, 148, 1, 0, 0, 0, 49, 150, 1, 0, 0, 0, 51, 152, + 1, 0, 0, 0, 53, 157, 1, 0, 0, 0, 55, 163, 1, 0, 0, 0, 57, 168, 1, 0, 0, + 0, 59, 170, 1, 0, 0, 0, 61, 172, 1, 0, 0, 0, 63, 174, 1, 0, 0, 0, 65, 183, + 1, 0, 0, 0, 67, 185, 1, 0, 0, 0, 69, 191, 1, 0, 0, 0, 71, 193, 1, 0, 0, + 0, 73, 196, 1, 0, 0, 0, 75, 201, 1, 0, 0, 0, 77, 224, 1, 0, 0, 0, 79, 227, + 1, 0, 0, 0, 81, 233, 1, 0, 0, 0, 83, 274, 1, 0, 0, 0, 85, 289, 1, 0, 0, + 0, 87, 308, 1, 0, 0, 0, 89, 406, 1, 0, 0, 0, 91, 408, 1, 0, 0, 0, 93, 413, + 1, 0, 0, 0, 95, 96, 5, 61, 0, 0, 96, 97, 5, 61, 0, 0, 97, 2, 1, 0, 0, 0, + 98, 99, 5, 33, 0, 0, 99, 100, 5, 61, 0, 0, 100, 4, 1, 0, 0, 0, 101, 102, + 5, 105, 0, 0, 102, 103, 5, 110, 0, 0, 103, 6, 1, 0, 0, 0, 104, 105, 5, + 60, 0, 0, 105, 8, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 108, 5, 61, 0, + 0, 108, 10, 1, 0, 0, 0, 109, 110, 5, 62, 0, 0, 110, 111, 5, 61, 0, 0, 111, + 12, 1, 0, 0, 0, 112, 113, 5, 62, 0, 0, 113, 14, 1, 0, 0, 0, 114, 115, 5, + 38, 0, 0, 115, 116, 5, 38, 0, 0, 116, 16, 1, 0, 0, 0, 117, 118, 5, 124, + 0, 0, 118, 119, 5, 124, 0, 0, 119, 18, 1, 0, 0, 0, 120, 121, 5, 91, 0, + 0, 121, 20, 1, 0, 0, 0, 122, 123, 5, 93, 0, 0, 123, 22, 1, 0, 0, 0, 124, + 125, 5, 123, 0, 0, 125, 24, 1, 0, 0, 0, 126, 127, 5, 125, 0, 0, 127, 26, + 1, 0, 0, 0, 128, 129, 5, 40, 0, 0, 129, 28, 1, 0, 0, 0, 130, 131, 5, 41, + 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 5, 46, 0, 0, 133, 32, 1, 0, 0, 0, + 134, 135, 5, 44, 0, 0, 135, 34, 1, 0, 0, 0, 136, 137, 5, 45, 0, 0, 137, + 36, 1, 0, 0, 0, 138, 139, 5, 33, 0, 0, 139, 38, 1, 0, 0, 0, 140, 141, 5, + 63, 0, 0, 141, 40, 1, 0, 0, 0, 142, 143, 5, 58, 0, 0, 143, 42, 1, 0, 0, + 0, 144, 145, 5, 43, 0, 0, 145, 44, 1, 0, 0, 0, 146, 147, 5, 42, 0, 0, 147, + 46, 1, 0, 0, 0, 148, 149, 5, 47, 0, 0, 149, 48, 1, 0, 0, 0, 150, 151, 5, + 37, 0, 0, 151, 50, 1, 0, 0, 0, 152, 153, 5, 116, 0, 0, 153, 154, 5, 114, + 0, 0, 154, 155, 5, 117, 0, 0, 155, 156, 5, 101, 0, 0, 156, 52, 1, 0, 0, + 0, 157, 158, 5, 102, 0, 0, 158, 159, 5, 97, 0, 0, 159, 160, 5, 108, 0, + 0, 160, 161, 5, 115, 0, 0, 161, 162, 5, 101, 0, 0, 162, 54, 1, 0, 0, 0, + 163, 164, 5, 110, 0, 0, 164, 165, 5, 117, 0, 0, 165, 166, 5, 108, 0, 0, + 166, 167, 5, 108, 0, 0, 167, 56, 1, 0, 0, 0, 168, 169, 5, 92, 0, 0, 169, + 58, 1, 0, 0, 0, 170, 171, 7, 0, 0, 0, 171, 60, 1, 0, 0, 0, 172, 173, 2, + 48, 57, 0, 173, 62, 1, 0, 0, 0, 174, 176, 7, 1, 0, 0, 175, 177, 7, 2, 0, + 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 179, 1, 0, 0, 0, 178, + 180, 3, 61, 30, 0, 179, 178, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 179, + 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 64, 1, 0, 0, 0, 183, 184, 7, 3, + 0, 0, 184, 66, 1, 0, 0, 0, 185, 186, 7, 4, 0, 0, 186, 68, 1, 0, 0, 0, 187, + 192, 3, 71, 35, 0, 188, 192, 3, 75, 37, 0, 189, 192, 3, 77, 38, 0, 190, + 192, 3, 73, 36, 0, 191, 187, 1, 0, 0, 0, 191, 188, 1, 0, 0, 0, 191, 189, + 1, 0, 0, 0, 191, 190, 1, 0, 0, 0, 192, 70, 1, 0, 0, 0, 193, 194, 3, 57, + 28, 0, 194, 195, 7, 5, 0, 0, 195, 72, 1, 0, 0, 0, 196, 197, 3, 57, 28, + 0, 197, 198, 2, 48, 51, 0, 198, 199, 2, 48, 55, 0, 199, 200, 2, 48, 55, + 0, 200, 74, 1, 0, 0, 0, 201, 202, 3, 57, 28, 0, 202, 203, 7, 6, 0, 0, 203, + 204, 3, 65, 32, 0, 204, 205, 3, 65, 32, 0, 205, 76, 1, 0, 0, 0, 206, 207, + 3, 57, 28, 0, 207, 208, 5, 117, 0, 0, 208, 209, 3, 65, 32, 0, 209, 210, + 3, 65, 32, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 225, + 1, 0, 0, 0, 213, 214, 3, 57, 28, 0, 214, 215, 5, 85, 0, 0, 215, 216, 3, + 65, 32, 0, 216, 217, 3, 65, 32, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3, + 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3, + 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 225, 1, 0, 0, 0, 224, 206, 1, 0, + 0, 0, 224, 213, 1, 0, 0, 0, 225, 78, 1, 0, 0, 0, 226, 228, 7, 7, 0, 0, + 227, 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229, + 230, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 6, 39, 0, 0, 232, 80, + 1, 0, 0, 0, 233, 234, 5, 47, 0, 0, 234, 235, 5, 47, 0, 0, 235, 239, 1, + 0, 0, 0, 236, 238, 8, 8, 0, 0, 237, 236, 1, 0, 0, 0, 238, 241, 1, 0, 0, + 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 242, 1, 0, 0, 0, 241, + 239, 1, 0, 0, 0, 242, 243, 6, 40, 0, 0, 243, 82, 1, 0, 0, 0, 244, 246, + 3, 61, 30, 0, 245, 244, 1, 0, 0, 0, 246, 247, 1, 0, 0, 0, 247, 245, 1, + 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 5, 46, 0, + 0, 250, 252, 3, 61, 30, 0, 251, 250, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0, + 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 256, 1, 0, 0, 0, 255, + 257, 3, 63, 31, 0, 256, 255, 1, 0, 0, 0, 256, 257, 1, 0, 0, 0, 257, 275, + 1, 0, 0, 0, 258, 260, 3, 61, 30, 0, 259, 258, 1, 0, 0, 0, 260, 261, 1, + 0, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, 263, 1, 0, 0, + 0, 263, 264, 3, 63, 31, 0, 264, 275, 1, 0, 0, 0, 265, 267, 5, 46, 0, 0, + 266, 268, 3, 61, 30, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269, + 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0, 0, 0, 271, 273, + 3, 63, 31, 0, 272, 271, 1, 0, 0, 0, 272, 273, 1, 0, 0, 0, 273, 275, 1, + 0, 0, 0, 274, 245, 1, 0, 0, 0, 274, 259, 1, 0, 0, 0, 274, 265, 1, 0, 0, + 0, 275, 84, 1, 0, 0, 0, 276, 278, 3, 61, 30, 0, 277, 276, 1, 0, 0, 0, 278, + 279, 1, 0, 0, 0, 279, 277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 290, + 1, 0, 0, 0, 281, 282, 5, 48, 0, 0, 282, 283, 5, 120, 0, 0, 283, 285, 1, + 0, 0, 0, 284, 286, 3, 65, 32, 0, 285, 284, 1, 0, 0, 0, 286, 287, 1, 0, + 0, 0, 287, 285, 1, 0, 0, 0, 287, 288, 1, 0, 0, 0, 288, 290, 1, 0, 0, 0, + 289, 277, 1, 0, 0, 0, 289, 281, 1, 0, 0, 0, 290, 86, 1, 0, 0, 0, 291, 293, + 3, 61, 30, 0, 292, 291, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 292, 1, + 0, 0, 0, 294, 295, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 297, 7, 9, 0, + 0, 297, 309, 1, 0, 0, 0, 298, 299, 5, 48, 0, 0, 299, 300, 5, 120, 0, 0, + 300, 302, 1, 0, 0, 0, 301, 303, 3, 65, 32, 0, 302, 301, 1, 0, 0, 0, 303, + 304, 1, 0, 0, 0, 304, 302, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306, + 1, 0, 0, 0, 306, 307, 7, 9, 0, 0, 307, 309, 1, 0, 0, 0, 308, 292, 1, 0, + 0, 0, 308, 298, 1, 0, 0, 0, 309, 88, 1, 0, 0, 0, 310, 315, 5, 34, 0, 0, + 311, 314, 3, 69, 34, 0, 312, 314, 8, 10, 0, 0, 313, 311, 1, 0, 0, 0, 313, + 312, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316, + 1, 0, 0, 0, 316, 318, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 407, 5, 34, + 0, 0, 319, 324, 5, 39, 0, 0, 320, 323, 3, 69, 34, 0, 321, 323, 8, 11, 0, + 0, 322, 320, 1, 0, 0, 0, 322, 321, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324, + 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 327, 1, 0, 0, 0, 326, 324, + 1, 0, 0, 0, 327, 407, 5, 39, 0, 0, 328, 329, 5, 34, 0, 0, 329, 330, 5, + 34, 0, 0, 330, 331, 5, 34, 0, 0, 331, 336, 1, 0, 0, 0, 332, 335, 3, 69, + 34, 0, 333, 335, 8, 12, 0, 0, 334, 332, 1, 0, 0, 0, 334, 333, 1, 0, 0, + 0, 335, 338, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 336, 334, 1, 0, 0, 0, 337, + 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 340, 5, 34, 0, 0, 340, 341, + 5, 34, 0, 0, 341, 407, 5, 34, 0, 0, 342, 343, 5, 39, 0, 0, 343, 344, 5, + 39, 0, 0, 344, 345, 5, 39, 0, 0, 345, 350, 1, 0, 0, 0, 346, 349, 3, 69, + 34, 0, 347, 349, 8, 12, 0, 0, 348, 346, 1, 0, 0, 0, 348, 347, 1, 0, 0, + 0, 349, 352, 1, 0, 0, 0, 350, 351, 1, 0, 0, 0, 350, 348, 1, 0, 0, 0, 351, + 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 354, 5, 39, 0, 0, 354, 355, + 5, 39, 0, 0, 355, 407, 5, 39, 0, 0, 356, 357, 3, 67, 33, 0, 357, 361, 5, + 34, 0, 0, 358, 360, 8, 13, 0, 0, 359, 358, 1, 0, 0, 0, 360, 363, 1, 0, + 0, 0, 361, 359, 1, 0, 0, 0, 361, 362, 1, 0, 0, 0, 362, 364, 1, 0, 0, 0, + 363, 361, 1, 0, 0, 0, 364, 365, 5, 34, 0, 0, 365, 407, 1, 0, 0, 0, 366, + 367, 3, 67, 33, 0, 367, 371, 5, 39, 0, 0, 368, 370, 8, 14, 0, 0, 369, 368, + 1, 0, 0, 0, 370, 373, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 371, 372, 1, 0, + 0, 0, 372, 374, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 374, 375, 5, 39, 0, 0, + 375, 407, 1, 0, 0, 0, 376, 377, 3, 67, 33, 0, 377, 378, 5, 34, 0, 0, 378, + 379, 5, 34, 0, 0, 379, 380, 5, 34, 0, 0, 380, 384, 1, 0, 0, 0, 381, 383, + 9, 0, 0, 0, 382, 381, 1, 0, 0, 0, 383, 386, 1, 0, 0, 0, 384, 385, 1, 0, + 0, 0, 384, 382, 1, 0, 0, 0, 385, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0, + 387, 388, 5, 34, 0, 0, 388, 389, 5, 34, 0, 0, 389, 390, 5, 34, 0, 0, 390, + 407, 1, 0, 0, 0, 391, 392, 3, 67, 33, 0, 392, 393, 5, 39, 0, 0, 393, 394, + 5, 39, 0, 0, 394, 395, 5, 39, 0, 0, 395, 399, 1, 0, 0, 0, 396, 398, 9, + 0, 0, 0, 397, 396, 1, 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 400, 1, 0, 0, + 0, 399, 397, 1, 0, 0, 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, + 403, 5, 39, 0, 0, 403, 404, 5, 39, 0, 0, 404, 405, 5, 39, 0, 0, 405, 407, + 1, 0, 0, 0, 406, 310, 1, 0, 0, 0, 406, 319, 1, 0, 0, 0, 406, 328, 1, 0, + 0, 0, 406, 342, 1, 0, 0, 0, 406, 356, 1, 0, 0, 0, 406, 366, 1, 0, 0, 0, + 406, 376, 1, 0, 0, 0, 406, 391, 1, 0, 0, 0, 407, 90, 1, 0, 0, 0, 408, 409, + 7, 15, 0, 0, 409, 410, 3, 89, 44, 0, 410, 92, 1, 0, 0, 0, 411, 414, 3, + 59, 29, 0, 412, 414, 5, 95, 0, 0, 413, 411, 1, 0, 0, 0, 413, 412, 1, 0, + 0, 0, 414, 420, 1, 0, 0, 0, 415, 419, 3, 59, 29, 0, 416, 419, 3, 61, 30, + 0, 417, 419, 5, 95, 0, 0, 418, 415, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 418, + 417, 1, 0, 0, 0, 419, 422, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 421, + 1, 0, 0, 0, 421, 94, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 36, 0, 176, 181, + 191, 224, 229, 239, 247, 253, 256, 261, 269, 272, 274, 279, 287, 289, 294, + 304, 308, 313, 315, 322, 324, 334, 336, 348, 350, 361, 371, 384, 399, 406, + 413, 418, 420, 1, 0, 1, 0, +} + deserializer := antlr.NewATNDeserializer(nil) + staticData.atn = deserializer.Deserialize(staticData.serializedATN) + atn := staticData.atn + staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState)) + decisionToDFA := staticData.decisionToDFA + for index, state := range atn.DecisionToState { + decisionToDFA[index] = antlr.NewDFA(state, index) + } +} + +// CELLexerInit initializes any static state used to implement CELLexer. By default the +// static state used to implement the lexer is lazily initialized during the first call to +// NewCELLexer(). You can call this function if you wish to initialize the static state ahead +// of time. +func CELLexerInit() { + staticData := &CELLexerLexerStaticData + staticData.once.Do(cellexerLexerInit) +} + +// NewCELLexer produces a new lexer instance for the optional input antlr.CharStream. +func NewCELLexer(input antlr.CharStream) *CELLexer { + CELLexerInit() + l := new(CELLexer) + l.BaseLexer = antlr.NewBaseLexer(input) + staticData := &CELLexerLexerStaticData + l.Interpreter = antlr.NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.PredictionContextCache) + l.channelNames = staticData.ChannelNames + l.modeNames = staticData.ModeNames + l.RuleNames = staticData.RuleNames + l.LiteralNames = staticData.LiteralNames + l.SymbolicNames = staticData.SymbolicNames + l.GrammarFileName = "CEL.g4" + // TODO: l.EOF = antlr.TokenEOF + + return l +} + +// CELLexer tokens. +const ( + CELLexerEQUALS = 1 + CELLexerNOT_EQUALS = 2 + CELLexerIN = 3 + CELLexerLESS = 4 + CELLexerLESS_EQUALS = 5 + CELLexerGREATER_EQUALS = 6 + CELLexerGREATER = 7 + CELLexerLOGICAL_AND = 8 + CELLexerLOGICAL_OR = 9 + CELLexerLBRACKET = 10 + CELLexerRPRACKET = 11 + CELLexerLBRACE = 12 + CELLexerRBRACE = 13 + CELLexerLPAREN = 14 + CELLexerRPAREN = 15 + CELLexerDOT = 16 + CELLexerCOMMA = 17 + CELLexerMINUS = 18 + CELLexerEXCLAM = 19 + CELLexerQUESTIONMARK = 20 + CELLexerCOLON = 21 + CELLexerPLUS = 22 + CELLexerSTAR = 23 + CELLexerSLASH = 24 + CELLexerPERCENT = 25 + CELLexerCEL_TRUE = 26 + CELLexerCEL_FALSE = 27 + CELLexerNUL = 28 + CELLexerWHITESPACE = 29 + CELLexerCOMMENT = 30 + CELLexerNUM_FLOAT = 31 + CELLexerNUM_INT = 32 + CELLexerNUM_UINT = 33 + CELLexerSTRING = 34 + CELLexerBYTES = 35 + CELLexerIDENTIFIER = 36 +) + diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/gen/cel_listener.go b/hack/tools/vendor/github.com/google/cel-go/parser/gen/cel_listener.go new file mode 100644 index 0000000000..22dc997898 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/gen/cel_listener.go @@ -0,0 +1,208 @@ +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. + +package gen // CEL +import "github.com/antlr4-go/antlr/v4" + + +// CELListener is a complete listener for a parse tree produced by CELParser. +type CELListener interface { + antlr.ParseTreeListener + + // EnterStart is called when entering the start production. + EnterStart(c *StartContext) + + // EnterExpr is called when entering the expr production. + EnterExpr(c *ExprContext) + + // EnterConditionalOr is called when entering the conditionalOr production. + EnterConditionalOr(c *ConditionalOrContext) + + // EnterConditionalAnd is called when entering the conditionalAnd production. + EnterConditionalAnd(c *ConditionalAndContext) + + // EnterRelation is called when entering the relation production. + EnterRelation(c *RelationContext) + + // EnterCalc is called when entering the calc production. + EnterCalc(c *CalcContext) + + // EnterMemberExpr is called when entering the MemberExpr production. + EnterMemberExpr(c *MemberExprContext) + + // EnterLogicalNot is called when entering the LogicalNot production. + EnterLogicalNot(c *LogicalNotContext) + + // EnterNegate is called when entering the Negate production. + EnterNegate(c *NegateContext) + + // EnterMemberCall is called when entering the MemberCall production. + EnterMemberCall(c *MemberCallContext) + + // EnterSelect is called when entering the Select production. + EnterSelect(c *SelectContext) + + // EnterPrimaryExpr is called when entering the PrimaryExpr production. + EnterPrimaryExpr(c *PrimaryExprContext) + + // EnterIndex is called when entering the Index production. + EnterIndex(c *IndexContext) + + // EnterIdentOrGlobalCall is called when entering the IdentOrGlobalCall production. + EnterIdentOrGlobalCall(c *IdentOrGlobalCallContext) + + // EnterNested is called when entering the Nested production. + EnterNested(c *NestedContext) + + // EnterCreateList is called when entering the CreateList production. + EnterCreateList(c *CreateListContext) + + // EnterCreateStruct is called when entering the CreateStruct production. + EnterCreateStruct(c *CreateStructContext) + + // EnterCreateMessage is called when entering the CreateMessage production. + EnterCreateMessage(c *CreateMessageContext) + + // EnterConstantLiteral is called when entering the ConstantLiteral production. + EnterConstantLiteral(c *ConstantLiteralContext) + + // EnterExprList is called when entering the exprList production. + EnterExprList(c *ExprListContext) + + // EnterListInit is called when entering the listInit production. + EnterListInit(c *ListInitContext) + + // EnterFieldInitializerList is called when entering the fieldInitializerList production. + EnterFieldInitializerList(c *FieldInitializerListContext) + + // EnterOptField is called when entering the optField production. + EnterOptField(c *OptFieldContext) + + // EnterMapInitializerList is called when entering the mapInitializerList production. + EnterMapInitializerList(c *MapInitializerListContext) + + // EnterOptExpr is called when entering the optExpr production. + EnterOptExpr(c *OptExprContext) + + // EnterInt is called when entering the Int production. + EnterInt(c *IntContext) + + // EnterUint is called when entering the Uint production. + EnterUint(c *UintContext) + + // EnterDouble is called when entering the Double production. + EnterDouble(c *DoubleContext) + + // EnterString is called when entering the String production. + EnterString(c *StringContext) + + // EnterBytes is called when entering the Bytes production. + EnterBytes(c *BytesContext) + + // EnterBoolTrue is called when entering the BoolTrue production. + EnterBoolTrue(c *BoolTrueContext) + + // EnterBoolFalse is called when entering the BoolFalse production. + EnterBoolFalse(c *BoolFalseContext) + + // EnterNull is called when entering the Null production. + EnterNull(c *NullContext) + + // ExitStart is called when exiting the start production. + ExitStart(c *StartContext) + + // ExitExpr is called when exiting the expr production. + ExitExpr(c *ExprContext) + + // ExitConditionalOr is called when exiting the conditionalOr production. + ExitConditionalOr(c *ConditionalOrContext) + + // ExitConditionalAnd is called when exiting the conditionalAnd production. + ExitConditionalAnd(c *ConditionalAndContext) + + // ExitRelation is called when exiting the relation production. + ExitRelation(c *RelationContext) + + // ExitCalc is called when exiting the calc production. + ExitCalc(c *CalcContext) + + // ExitMemberExpr is called when exiting the MemberExpr production. + ExitMemberExpr(c *MemberExprContext) + + // ExitLogicalNot is called when exiting the LogicalNot production. + ExitLogicalNot(c *LogicalNotContext) + + // ExitNegate is called when exiting the Negate production. + ExitNegate(c *NegateContext) + + // ExitMemberCall is called when exiting the MemberCall production. + ExitMemberCall(c *MemberCallContext) + + // ExitSelect is called when exiting the Select production. + ExitSelect(c *SelectContext) + + // ExitPrimaryExpr is called when exiting the PrimaryExpr production. + ExitPrimaryExpr(c *PrimaryExprContext) + + // ExitIndex is called when exiting the Index production. + ExitIndex(c *IndexContext) + + // ExitIdentOrGlobalCall is called when exiting the IdentOrGlobalCall production. + ExitIdentOrGlobalCall(c *IdentOrGlobalCallContext) + + // ExitNested is called when exiting the Nested production. + ExitNested(c *NestedContext) + + // ExitCreateList is called when exiting the CreateList production. + ExitCreateList(c *CreateListContext) + + // ExitCreateStruct is called when exiting the CreateStruct production. + ExitCreateStruct(c *CreateStructContext) + + // ExitCreateMessage is called when exiting the CreateMessage production. + ExitCreateMessage(c *CreateMessageContext) + + // ExitConstantLiteral is called when exiting the ConstantLiteral production. + ExitConstantLiteral(c *ConstantLiteralContext) + + // ExitExprList is called when exiting the exprList production. + ExitExprList(c *ExprListContext) + + // ExitListInit is called when exiting the listInit production. + ExitListInit(c *ListInitContext) + + // ExitFieldInitializerList is called when exiting the fieldInitializerList production. + ExitFieldInitializerList(c *FieldInitializerListContext) + + // ExitOptField is called when exiting the optField production. + ExitOptField(c *OptFieldContext) + + // ExitMapInitializerList is called when exiting the mapInitializerList production. + ExitMapInitializerList(c *MapInitializerListContext) + + // ExitOptExpr is called when exiting the optExpr production. + ExitOptExpr(c *OptExprContext) + + // ExitInt is called when exiting the Int production. + ExitInt(c *IntContext) + + // ExitUint is called when exiting the Uint production. + ExitUint(c *UintContext) + + // ExitDouble is called when exiting the Double production. + ExitDouble(c *DoubleContext) + + // ExitString is called when exiting the String production. + ExitString(c *StringContext) + + // ExitBytes is called when exiting the Bytes production. + ExitBytes(c *BytesContext) + + // ExitBoolTrue is called when exiting the BoolTrue production. + ExitBoolTrue(c *BoolTrueContext) + + // ExitBoolFalse is called when exiting the BoolFalse production. + ExitBoolFalse(c *BoolFalseContext) + + // ExitNull is called when exiting the Null production. + ExitNull(c *NullContext) +} diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/gen/cel_parser.go b/hack/tools/vendor/github.com/google/cel-go/parser/gen/cel_parser.go new file mode 100644 index 0000000000..35334af615 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/gen/cel_parser.go @@ -0,0 +1,6274 @@ +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. + +package gen // CEL +import ( + "fmt" + "strconv" + "sync" + + "github.com/antlr4-go/antlr/v4" +) + +// Suppress unused import errors +var _ = fmt.Printf +var _ = strconv.Itoa +var _ = sync.Once{} + + +type CELParser struct { + *antlr.BaseParser +} + +var CELParserStaticData struct { + once sync.Once + serializedATN []int32 + LiteralNames []string + SymbolicNames []string + RuleNames []string + PredictionContextCache *antlr.PredictionContextCache + atn *antlr.ATN + decisionToDFA []*antlr.DFA +} + +func celParserInit() { + staticData := &CELParserStaticData + staticData.LiteralNames = []string{ + "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'", + "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'", + "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'", + } + staticData.SymbolicNames = []string{ + "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", + "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", + "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", + "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", + "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", + "STRING", "BYTES", "IDENTIFIER", + } + staticData.RuleNames = []string{ + "start", "expr", "conditionalOr", "conditionalAnd", "relation", "calc", + "unary", "member", "primary", "exprList", "listInit", "fieldInitializerList", + "optField", "mapInitializerList", "optExpr", "literal", + } + staticData.PredictionContextCache = antlr.NewPredictionContextCache() + staticData.serializedATN = []int32{ + 4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, + 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, + 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, + 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1, + 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3, + 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, + 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, + 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1, + 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6, + 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, + 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7, + 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10, + 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136, + 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, + 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1, + 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8, + 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8, + 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186, + 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10, + 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, + 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12, + 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, + 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14, + 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15, + 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249, + 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, + 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, + 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1, + 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14, + 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0, + 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28, + 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0, + 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38, + 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0, + 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6, + 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50, + 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0, + 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3, + 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56, + 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1, + 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64, + 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0, + 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0, + 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73, + 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1, + 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79, + 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, + 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87, + 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, + 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5, + 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94, + 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0, + 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100, + 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10, + 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0, + 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0, + 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111, + 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114, + 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10, + 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0, + 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0, + 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124, + 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124, + 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0, + 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0, + 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134, + 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137, + 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0, + 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0, + 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145, + 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149, + 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0, + 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0, + 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, + 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157, + 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162, + 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0, + 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0, + 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168, + 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173, + 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1, + 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0, + 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179, + 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144, + 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0, + 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0, + 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187, + 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1, + 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28, + 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0, + 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199, + 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5, + 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2, + 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0, + 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208, + 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0, + 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0, + 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219, + 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223, + 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0, + 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0, + 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230, + 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1, + 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0, + 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0, + 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241, + 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249, + 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5, + 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0, + 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248, + 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48, + 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146, + 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235, + 240, 248, +} + deserializer := antlr.NewATNDeserializer(nil) + staticData.atn = deserializer.Deserialize(staticData.serializedATN) + atn := staticData.atn + staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState)) + decisionToDFA := staticData.decisionToDFA + for index, state := range atn.DecisionToState { + decisionToDFA[index] = antlr.NewDFA(state, index) + } +} + +// CELParserInit initializes any static state used to implement CELParser. By default the +// static state used to implement the parser is lazily initialized during the first call to +// NewCELParser(). You can call this function if you wish to initialize the static state ahead +// of time. +func CELParserInit() { + staticData := &CELParserStaticData + staticData.once.Do(celParserInit) +} + +// NewCELParser produces a new parser instance for the optional input antlr.TokenStream. +func NewCELParser(input antlr.TokenStream) *CELParser { + CELParserInit() + this := new(CELParser) + this.BaseParser = antlr.NewBaseParser(input) + staticData := &CELParserStaticData + this.Interpreter = antlr.NewParserATNSimulator(this, staticData.atn, staticData.decisionToDFA, staticData.PredictionContextCache) + this.RuleNames = staticData.RuleNames + this.LiteralNames = staticData.LiteralNames + this.SymbolicNames = staticData.SymbolicNames + this.GrammarFileName = "CEL.g4" + + return this +} + + +// CELParser tokens. +const ( + CELParserEOF = antlr.TokenEOF + CELParserEQUALS = 1 + CELParserNOT_EQUALS = 2 + CELParserIN = 3 + CELParserLESS = 4 + CELParserLESS_EQUALS = 5 + CELParserGREATER_EQUALS = 6 + CELParserGREATER = 7 + CELParserLOGICAL_AND = 8 + CELParserLOGICAL_OR = 9 + CELParserLBRACKET = 10 + CELParserRPRACKET = 11 + CELParserLBRACE = 12 + CELParserRBRACE = 13 + CELParserLPAREN = 14 + CELParserRPAREN = 15 + CELParserDOT = 16 + CELParserCOMMA = 17 + CELParserMINUS = 18 + CELParserEXCLAM = 19 + CELParserQUESTIONMARK = 20 + CELParserCOLON = 21 + CELParserPLUS = 22 + CELParserSTAR = 23 + CELParserSLASH = 24 + CELParserPERCENT = 25 + CELParserCEL_TRUE = 26 + CELParserCEL_FALSE = 27 + CELParserNUL = 28 + CELParserWHITESPACE = 29 + CELParserCOMMENT = 30 + CELParserNUM_FLOAT = 31 + CELParserNUM_INT = 32 + CELParserNUM_UINT = 33 + CELParserSTRING = 34 + CELParserBYTES = 35 + CELParserIDENTIFIER = 36 +) + +// CELParser rules. +const ( + CELParserRULE_start = 0 + CELParserRULE_expr = 1 + CELParserRULE_conditionalOr = 2 + CELParserRULE_conditionalAnd = 3 + CELParserRULE_relation = 4 + CELParserRULE_calc = 5 + CELParserRULE_unary = 6 + CELParserRULE_member = 7 + CELParserRULE_primary = 8 + CELParserRULE_exprList = 9 + CELParserRULE_listInit = 10 + CELParserRULE_fieldInitializerList = 11 + CELParserRULE_optField = 12 + CELParserRULE_mapInitializerList = 13 + CELParserRULE_optExpr = 14 + CELParserRULE_literal = 15 +) + +// IStartContext is an interface to support dynamic dispatch. +type IStartContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetE returns the e rule contexts. + GetE() IExprContext + + + // SetE sets the e rule contexts. + SetE(IExprContext) + + + // Getter signatures + EOF() antlr.TerminalNode + Expr() IExprContext + + // IsStartContext differentiates from other interfaces. + IsStartContext() +} + +type StartContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + e IExprContext +} + +func NewEmptyStartContext() *StartContext { + var p = new(StartContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_start + return p +} + +func InitEmptyStartContext(p *StartContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_start +} + +func (*StartContext) IsStartContext() {} + +func NewStartContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *StartContext { + var p = new(StartContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_start + + return p +} + +func (s *StartContext) GetParser() antlr.Parser { return s.parser } + +func (s *StartContext) GetE() IExprContext { return s.e } + + +func (s *StartContext) SetE(v IExprContext) { s.e = v } + + +func (s *StartContext) EOF() antlr.TerminalNode { + return s.GetToken(CELParserEOF, 0) +} + +func (s *StartContext) Expr() IExprContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *StartContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *StartContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *StartContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterStart(s) + } +} + +func (s *StartContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitStart(s) + } +} + +func (s *StartContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitStart(s) + + default: + return t.VisitChildren(s) + } +} + + + + +func (p *CELParser) Start_() (localctx IStartContext) { + localctx = NewStartContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 0, CELParserRULE_start) + p.EnterOuterAlt(localctx, 1) + { + p.SetState(32) + + var _x = p.Expr() + + + localctx.(*StartContext).e = _x + } + { + p.SetState(33) + p.Match(CELParserEOF) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IExprContext is an interface to support dynamic dispatch. +type IExprContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetOp returns the op token. + GetOp() antlr.Token + + + // SetOp sets the op token. + SetOp(antlr.Token) + + + // GetE returns the e rule contexts. + GetE() IConditionalOrContext + + // GetE1 returns the e1 rule contexts. + GetE1() IConditionalOrContext + + // GetE2 returns the e2 rule contexts. + GetE2() IExprContext + + + // SetE sets the e rule contexts. + SetE(IConditionalOrContext) + + // SetE1 sets the e1 rule contexts. + SetE1(IConditionalOrContext) + + // SetE2 sets the e2 rule contexts. + SetE2(IExprContext) + + + // Getter signatures + AllConditionalOr() []IConditionalOrContext + ConditionalOr(i int) IConditionalOrContext + COLON() antlr.TerminalNode + QUESTIONMARK() antlr.TerminalNode + Expr() IExprContext + + // IsExprContext differentiates from other interfaces. + IsExprContext() +} + +type ExprContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + e IConditionalOrContext + op antlr.Token + e1 IConditionalOrContext + e2 IExprContext +} + +func NewEmptyExprContext() *ExprContext { + var p = new(ExprContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_expr + return p +} + +func InitEmptyExprContext(p *ExprContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_expr +} + +func (*ExprContext) IsExprContext() {} + +func NewExprContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExprContext { + var p = new(ExprContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_expr + + return p +} + +func (s *ExprContext) GetParser() antlr.Parser { return s.parser } + +func (s *ExprContext) GetOp() antlr.Token { return s.op } + + +func (s *ExprContext) SetOp(v antlr.Token) { s.op = v } + + +func (s *ExprContext) GetE() IConditionalOrContext { return s.e } + +func (s *ExprContext) GetE1() IConditionalOrContext { return s.e1 } + +func (s *ExprContext) GetE2() IExprContext { return s.e2 } + + +func (s *ExprContext) SetE(v IConditionalOrContext) { s.e = v } + +func (s *ExprContext) SetE1(v IConditionalOrContext) { s.e1 = v } + +func (s *ExprContext) SetE2(v IExprContext) { s.e2 = v } + + +func (s *ExprContext) AllConditionalOr() []IConditionalOrContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IConditionalOrContext); ok { + len++ + } + } + + tst := make([]IConditionalOrContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IConditionalOrContext); ok { + tst[i] = t.(IConditionalOrContext) + i++ + } + } + + return tst +} + +func (s *ExprContext) ConditionalOr(i int) IConditionalOrContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IConditionalOrContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IConditionalOrContext) +} + +func (s *ExprContext) COLON() antlr.TerminalNode { + return s.GetToken(CELParserCOLON, 0) +} + +func (s *ExprContext) QUESTIONMARK() antlr.TerminalNode { + return s.GetToken(CELParserQUESTIONMARK, 0) +} + +func (s *ExprContext) Expr() IExprContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *ExprContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *ExprContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterExpr(s) + } +} + +func (s *ExprContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitExpr(s) + } +} + +func (s *ExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitExpr(s) + + default: + return t.VisitChildren(s) + } +} + + + + +func (p *CELParser) Expr() (localctx IExprContext) { + localctx = NewExprContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 2, CELParserRULE_expr) + var _la int + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(35) + + var _x = p.ConditionalOr() + + + localctx.(*ExprContext).e = _x + } + p.SetState(41) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserQUESTIONMARK { + { + p.SetState(36) + + var _m = p.Match(CELParserQUESTIONMARK) + + localctx.(*ExprContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(37) + + var _x = p.ConditionalOr() + + + localctx.(*ExprContext).e1 = _x + } + { + p.SetState(38) + p.Match(CELParserCOLON) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(39) + + var _x = p.Expr() + + + localctx.(*ExprContext).e2 = _x + } + + } + + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IConditionalOrContext is an interface to support dynamic dispatch. +type IConditionalOrContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetS9 returns the s9 token. + GetS9() antlr.Token + + + // SetS9 sets the s9 token. + SetS9(antlr.Token) + + + // GetOps returns the ops token list. + GetOps() []antlr.Token + + + // SetOps sets the ops token list. + SetOps([]antlr.Token) + + + // GetE returns the e rule contexts. + GetE() IConditionalAndContext + + // Get_conditionalAnd returns the _conditionalAnd rule contexts. + Get_conditionalAnd() IConditionalAndContext + + + // SetE sets the e rule contexts. + SetE(IConditionalAndContext) + + // Set_conditionalAnd sets the _conditionalAnd rule contexts. + Set_conditionalAnd(IConditionalAndContext) + + + // GetE1 returns the e1 rule context list. + GetE1() []IConditionalAndContext + + + // SetE1 sets the e1 rule context list. + SetE1([]IConditionalAndContext) + + + // Getter signatures + AllConditionalAnd() []IConditionalAndContext + ConditionalAnd(i int) IConditionalAndContext + AllLOGICAL_OR() []antlr.TerminalNode + LOGICAL_OR(i int) antlr.TerminalNode + + // IsConditionalOrContext differentiates from other interfaces. + IsConditionalOrContext() +} + +type ConditionalOrContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + e IConditionalAndContext + s9 antlr.Token + ops []antlr.Token + _conditionalAnd IConditionalAndContext + e1 []IConditionalAndContext +} + +func NewEmptyConditionalOrContext() *ConditionalOrContext { + var p = new(ConditionalOrContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_conditionalOr + return p +} + +func InitEmptyConditionalOrContext(p *ConditionalOrContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_conditionalOr +} + +func (*ConditionalOrContext) IsConditionalOrContext() {} + +func NewConditionalOrContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ConditionalOrContext { + var p = new(ConditionalOrContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_conditionalOr + + return p +} + +func (s *ConditionalOrContext) GetParser() antlr.Parser { return s.parser } + +func (s *ConditionalOrContext) GetS9() antlr.Token { return s.s9 } + + +func (s *ConditionalOrContext) SetS9(v antlr.Token) { s.s9 = v } + + +func (s *ConditionalOrContext) GetOps() []antlr.Token { return s.ops } + + +func (s *ConditionalOrContext) SetOps(v []antlr.Token) { s.ops = v } + + +func (s *ConditionalOrContext) GetE() IConditionalAndContext { return s.e } + +func (s *ConditionalOrContext) Get_conditionalAnd() IConditionalAndContext { return s._conditionalAnd } + + +func (s *ConditionalOrContext) SetE(v IConditionalAndContext) { s.e = v } + +func (s *ConditionalOrContext) Set_conditionalAnd(v IConditionalAndContext) { s._conditionalAnd = v } + + +func (s *ConditionalOrContext) GetE1() []IConditionalAndContext { return s.e1 } + + +func (s *ConditionalOrContext) SetE1(v []IConditionalAndContext) { s.e1 = v } + + +func (s *ConditionalOrContext) AllConditionalAnd() []IConditionalAndContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IConditionalAndContext); ok { + len++ + } + } + + tst := make([]IConditionalAndContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IConditionalAndContext); ok { + tst[i] = t.(IConditionalAndContext) + i++ + } + } + + return tst +} + +func (s *ConditionalOrContext) ConditionalAnd(i int) IConditionalAndContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IConditionalAndContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IConditionalAndContext) +} + +func (s *ConditionalOrContext) AllLOGICAL_OR() []antlr.TerminalNode { + return s.GetTokens(CELParserLOGICAL_OR) +} + +func (s *ConditionalOrContext) LOGICAL_OR(i int) antlr.TerminalNode { + return s.GetToken(CELParserLOGICAL_OR, i) +} + +func (s *ConditionalOrContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ConditionalOrContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *ConditionalOrContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterConditionalOr(s) + } +} + +func (s *ConditionalOrContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitConditionalOr(s) + } +} + +func (s *ConditionalOrContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitConditionalOr(s) + + default: + return t.VisitChildren(s) + } +} + + + + +func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) { + localctx = NewConditionalOrContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 4, CELParserRULE_conditionalOr) + var _la int + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(43) + + var _x = p.ConditionalAnd() + + + localctx.(*ConditionalOrContext).e = _x + } + p.SetState(48) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + for _la == CELParserLOGICAL_OR { + { + p.SetState(44) + + var _m = p.Match(CELParserLOGICAL_OR) + + localctx.(*ConditionalOrContext).s9 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*ConditionalOrContext).ops = append(localctx.(*ConditionalOrContext).ops, localctx.(*ConditionalOrContext).s9) + { + p.SetState(45) + + var _x = p.ConditionalAnd() + + + localctx.(*ConditionalOrContext)._conditionalAnd = _x + } + localctx.(*ConditionalOrContext).e1 = append(localctx.(*ConditionalOrContext).e1, localctx.(*ConditionalOrContext)._conditionalAnd) + + + p.SetState(50) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + } + + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IConditionalAndContext is an interface to support dynamic dispatch. +type IConditionalAndContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetS8 returns the s8 token. + GetS8() antlr.Token + + + // SetS8 sets the s8 token. + SetS8(antlr.Token) + + + // GetOps returns the ops token list. + GetOps() []antlr.Token + + + // SetOps sets the ops token list. + SetOps([]antlr.Token) + + + // GetE returns the e rule contexts. + GetE() IRelationContext + + // Get_relation returns the _relation rule contexts. + Get_relation() IRelationContext + + + // SetE sets the e rule contexts. + SetE(IRelationContext) + + // Set_relation sets the _relation rule contexts. + Set_relation(IRelationContext) + + + // GetE1 returns the e1 rule context list. + GetE1() []IRelationContext + + + // SetE1 sets the e1 rule context list. + SetE1([]IRelationContext) + + + // Getter signatures + AllRelation() []IRelationContext + Relation(i int) IRelationContext + AllLOGICAL_AND() []antlr.TerminalNode + LOGICAL_AND(i int) antlr.TerminalNode + + // IsConditionalAndContext differentiates from other interfaces. + IsConditionalAndContext() +} + +type ConditionalAndContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + e IRelationContext + s8 antlr.Token + ops []antlr.Token + _relation IRelationContext + e1 []IRelationContext +} + +func NewEmptyConditionalAndContext() *ConditionalAndContext { + var p = new(ConditionalAndContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_conditionalAnd + return p +} + +func InitEmptyConditionalAndContext(p *ConditionalAndContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_conditionalAnd +} + +func (*ConditionalAndContext) IsConditionalAndContext() {} + +func NewConditionalAndContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ConditionalAndContext { + var p = new(ConditionalAndContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_conditionalAnd + + return p +} + +func (s *ConditionalAndContext) GetParser() antlr.Parser { return s.parser } + +func (s *ConditionalAndContext) GetS8() antlr.Token { return s.s8 } + + +func (s *ConditionalAndContext) SetS8(v antlr.Token) { s.s8 = v } + + +func (s *ConditionalAndContext) GetOps() []antlr.Token { return s.ops } + + +func (s *ConditionalAndContext) SetOps(v []antlr.Token) { s.ops = v } + + +func (s *ConditionalAndContext) GetE() IRelationContext { return s.e } + +func (s *ConditionalAndContext) Get_relation() IRelationContext { return s._relation } + + +func (s *ConditionalAndContext) SetE(v IRelationContext) { s.e = v } + +func (s *ConditionalAndContext) Set_relation(v IRelationContext) { s._relation = v } + + +func (s *ConditionalAndContext) GetE1() []IRelationContext { return s.e1 } + + +func (s *ConditionalAndContext) SetE1(v []IRelationContext) { s.e1 = v } + + +func (s *ConditionalAndContext) AllRelation() []IRelationContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IRelationContext); ok { + len++ + } + } + + tst := make([]IRelationContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IRelationContext); ok { + tst[i] = t.(IRelationContext) + i++ + } + } + + return tst +} + +func (s *ConditionalAndContext) Relation(i int) IRelationContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IRelationContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IRelationContext) +} + +func (s *ConditionalAndContext) AllLOGICAL_AND() []antlr.TerminalNode { + return s.GetTokens(CELParserLOGICAL_AND) +} + +func (s *ConditionalAndContext) LOGICAL_AND(i int) antlr.TerminalNode { + return s.GetToken(CELParserLOGICAL_AND, i) +} + +func (s *ConditionalAndContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ConditionalAndContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *ConditionalAndContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterConditionalAnd(s) + } +} + +func (s *ConditionalAndContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitConditionalAnd(s) + } +} + +func (s *ConditionalAndContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitConditionalAnd(s) + + default: + return t.VisitChildren(s) + } +} + + + + +func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) { + localctx = NewConditionalAndContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 6, CELParserRULE_conditionalAnd) + var _la int + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(51) + + var _x = p.relation(0) + + localctx.(*ConditionalAndContext).e = _x + } + p.SetState(56) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + for _la == CELParserLOGICAL_AND { + { + p.SetState(52) + + var _m = p.Match(CELParserLOGICAL_AND) + + localctx.(*ConditionalAndContext).s8 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*ConditionalAndContext).ops = append(localctx.(*ConditionalAndContext).ops, localctx.(*ConditionalAndContext).s8) + { + p.SetState(53) + + var _x = p.relation(0) + + localctx.(*ConditionalAndContext)._relation = _x + } + localctx.(*ConditionalAndContext).e1 = append(localctx.(*ConditionalAndContext).e1, localctx.(*ConditionalAndContext)._relation) + + + p.SetState(58) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + } + + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IRelationContext is an interface to support dynamic dispatch. +type IRelationContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetOp returns the op token. + GetOp() antlr.Token + + + // SetOp sets the op token. + SetOp(antlr.Token) + + + // Getter signatures + Calc() ICalcContext + AllRelation() []IRelationContext + Relation(i int) IRelationContext + LESS() antlr.TerminalNode + LESS_EQUALS() antlr.TerminalNode + GREATER_EQUALS() antlr.TerminalNode + GREATER() antlr.TerminalNode + EQUALS() antlr.TerminalNode + NOT_EQUALS() antlr.TerminalNode + IN() antlr.TerminalNode + + // IsRelationContext differentiates from other interfaces. + IsRelationContext() +} + +type RelationContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + op antlr.Token +} + +func NewEmptyRelationContext() *RelationContext { + var p = new(RelationContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_relation + return p +} + +func InitEmptyRelationContext(p *RelationContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_relation +} + +func (*RelationContext) IsRelationContext() {} + +func NewRelationContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *RelationContext { + var p = new(RelationContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_relation + + return p +} + +func (s *RelationContext) GetParser() antlr.Parser { return s.parser } + +func (s *RelationContext) GetOp() antlr.Token { return s.op } + + +func (s *RelationContext) SetOp(v antlr.Token) { s.op = v } + + +func (s *RelationContext) Calc() ICalcContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICalcContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(ICalcContext) +} + +func (s *RelationContext) AllRelation() []IRelationContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IRelationContext); ok { + len++ + } + } + + tst := make([]IRelationContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IRelationContext); ok { + tst[i] = t.(IRelationContext) + i++ + } + } + + return tst +} + +func (s *RelationContext) Relation(i int) IRelationContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IRelationContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IRelationContext) +} + +func (s *RelationContext) LESS() antlr.TerminalNode { + return s.GetToken(CELParserLESS, 0) +} + +func (s *RelationContext) LESS_EQUALS() antlr.TerminalNode { + return s.GetToken(CELParserLESS_EQUALS, 0) +} + +func (s *RelationContext) GREATER_EQUALS() antlr.TerminalNode { + return s.GetToken(CELParserGREATER_EQUALS, 0) +} + +func (s *RelationContext) GREATER() antlr.TerminalNode { + return s.GetToken(CELParserGREATER, 0) +} + +func (s *RelationContext) EQUALS() antlr.TerminalNode { + return s.GetToken(CELParserEQUALS, 0) +} + +func (s *RelationContext) NOT_EQUALS() antlr.TerminalNode { + return s.GetToken(CELParserNOT_EQUALS, 0) +} + +func (s *RelationContext) IN() antlr.TerminalNode { + return s.GetToken(CELParserIN, 0) +} + +func (s *RelationContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *RelationContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *RelationContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterRelation(s) + } +} + +func (s *RelationContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitRelation(s) + } +} + +func (s *RelationContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitRelation(s) + + default: + return t.VisitChildren(s) + } +} + + + + + +func (p *CELParser) Relation() (localctx IRelationContext) { + return p.relation(0) +} + +func (p *CELParser) relation(_p int) (localctx IRelationContext) { + var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext() + + _parentState := p.GetState() + localctx = NewRelationContext(p, p.GetParserRuleContext(), _parentState) + var _prevctx IRelationContext = localctx + var _ antlr.ParserRuleContext = _prevctx // TODO: To prevent unused variable warning. + _startState := 8 + p.EnterRecursionRule(localctx, 8, CELParserRULE_relation, _p) + var _la int + + var _alt int + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(60) + p.calc(0) + } + + p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) + p.SetState(67) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 3, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { + if _alt == 1 { + if p.GetParseListeners() != nil { + p.TriggerExitRuleEvent() + } + _prevctx = localctx + localctx = NewRelationContext(p, _parentctx, _parentState) + p.PushNewRecursionContext(localctx, _startState, CELParserRULE_relation) + p.SetState(62) + + if !(p.Precpred(p.GetParserRuleContext(), 1)) { + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) + goto errorExit + } + { + p.SetState(63) + + var _lt = p.GetTokenStream().LT(1) + + localctx.(*RelationContext).op = _lt + + _la = p.GetTokenStream().LA(1) + + if !(((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 254) != 0)) { + var _ri = p.GetErrorHandler().RecoverInline(p) + + localctx.(*RelationContext).op = _ri + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + { + p.SetState(64) + p.relation(2) + } + + + } + p.SetState(69) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 3, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + } + + + + errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.UnrollRecursionContexts(_parentctx) + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// ICalcContext is an interface to support dynamic dispatch. +type ICalcContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetOp returns the op token. + GetOp() antlr.Token + + + // SetOp sets the op token. + SetOp(antlr.Token) + + + // Getter signatures + Unary() IUnaryContext + AllCalc() []ICalcContext + Calc(i int) ICalcContext + STAR() antlr.TerminalNode + SLASH() antlr.TerminalNode + PERCENT() antlr.TerminalNode + PLUS() antlr.TerminalNode + MINUS() antlr.TerminalNode + + // IsCalcContext differentiates from other interfaces. + IsCalcContext() +} + +type CalcContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + op antlr.Token +} + +func NewEmptyCalcContext() *CalcContext { + var p = new(CalcContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_calc + return p +} + +func InitEmptyCalcContext(p *CalcContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_calc +} + +func (*CalcContext) IsCalcContext() {} + +func NewCalcContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *CalcContext { + var p = new(CalcContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_calc + + return p +} + +func (s *CalcContext) GetParser() antlr.Parser { return s.parser } + +func (s *CalcContext) GetOp() antlr.Token { return s.op } + + +func (s *CalcContext) SetOp(v antlr.Token) { s.op = v } + + +func (s *CalcContext) Unary() IUnaryContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IUnaryContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IUnaryContext) +} + +func (s *CalcContext) AllCalc() []ICalcContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(ICalcContext); ok { + len++ + } + } + + tst := make([]ICalcContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(ICalcContext); ok { + tst[i] = t.(ICalcContext) + i++ + } + } + + return tst +} + +func (s *CalcContext) Calc(i int) ICalcContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICalcContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(ICalcContext) +} + +func (s *CalcContext) STAR() antlr.TerminalNode { + return s.GetToken(CELParserSTAR, 0) +} + +func (s *CalcContext) SLASH() antlr.TerminalNode { + return s.GetToken(CELParserSLASH, 0) +} + +func (s *CalcContext) PERCENT() antlr.TerminalNode { + return s.GetToken(CELParserPERCENT, 0) +} + +func (s *CalcContext) PLUS() antlr.TerminalNode { + return s.GetToken(CELParserPLUS, 0) +} + +func (s *CalcContext) MINUS() antlr.TerminalNode { + return s.GetToken(CELParserMINUS, 0) +} + +func (s *CalcContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *CalcContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *CalcContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterCalc(s) + } +} + +func (s *CalcContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitCalc(s) + } +} + +func (s *CalcContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitCalc(s) + + default: + return t.VisitChildren(s) + } +} + + + + + +func (p *CELParser) Calc() (localctx ICalcContext) { + return p.calc(0) +} + +func (p *CELParser) calc(_p int) (localctx ICalcContext) { + var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext() + + _parentState := p.GetState() + localctx = NewCalcContext(p, p.GetParserRuleContext(), _parentState) + var _prevctx ICalcContext = localctx + var _ antlr.ParserRuleContext = _prevctx // TODO: To prevent unused variable warning. + _startState := 10 + p.EnterRecursionRule(localctx, 10, CELParserRULE_calc, _p) + var _la int + + var _alt int + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(71) + p.Unary() + } + + p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) + p.SetState(81) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 5, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { + if _alt == 1 { + if p.GetParseListeners() != nil { + p.TriggerExitRuleEvent() + } + _prevctx = localctx + p.SetState(79) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + + switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 4, p.GetParserRuleContext()) { + case 1: + localctx = NewCalcContext(p, _parentctx, _parentState) + p.PushNewRecursionContext(localctx, _startState, CELParserRULE_calc) + p.SetState(73) + + if !(p.Precpred(p.GetParserRuleContext(), 2)) { + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", "")) + goto errorExit + } + { + p.SetState(74) + + var _lt = p.GetTokenStream().LT(1) + + localctx.(*CalcContext).op = _lt + + _la = p.GetTokenStream().LA(1) + + if !(((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 58720256) != 0)) { + var _ri = p.GetErrorHandler().RecoverInline(p) + + localctx.(*CalcContext).op = _ri + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + { + p.SetState(75) + p.calc(3) + } + + + case 2: + localctx = NewCalcContext(p, _parentctx, _parentState) + p.PushNewRecursionContext(localctx, _startState, CELParserRULE_calc) + p.SetState(76) + + if !(p.Precpred(p.GetParserRuleContext(), 1)) { + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) + goto errorExit + } + { + p.SetState(77) + + var _lt = p.GetTokenStream().LT(1) + + localctx.(*CalcContext).op = _lt + + _la = p.GetTokenStream().LA(1) + + if !(_la == CELParserMINUS || _la == CELParserPLUS) { + var _ri = p.GetErrorHandler().RecoverInline(p) + + localctx.(*CalcContext).op = _ri + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + { + p.SetState(78) + p.calc(2) + } + + case antlr.ATNInvalidAltNumber: + goto errorExit + } + + } + p.SetState(83) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 5, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + } + + + + errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.UnrollRecursionContexts(_parentctx) + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IUnaryContext is an interface to support dynamic dispatch. +type IUnaryContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + // IsUnaryContext differentiates from other interfaces. + IsUnaryContext() +} + +type UnaryContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyUnaryContext() *UnaryContext { + var p = new(UnaryContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_unary + return p +} + +func InitEmptyUnaryContext(p *UnaryContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_unary +} + +func (*UnaryContext) IsUnaryContext() {} + +func NewUnaryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *UnaryContext { + var p = new(UnaryContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_unary + + return p +} + +func (s *UnaryContext) GetParser() antlr.Parser { return s.parser } + +func (s *UnaryContext) CopyAll(ctx *UnaryContext) { + s.CopyFrom(&ctx.BaseParserRuleContext) +} + +func (s *UnaryContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *UnaryContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + + + +type LogicalNotContext struct { + UnaryContext + s19 antlr.Token + ops []antlr.Token +} + +func NewLogicalNotContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *LogicalNotContext { + var p = new(LogicalNotContext) + + InitEmptyUnaryContext(&p.UnaryContext) + p.parser = parser + p.CopyAll(ctx.(*UnaryContext)) + + return p +} + + +func (s *LogicalNotContext) GetS19() antlr.Token { return s.s19 } + + +func (s *LogicalNotContext) SetS19(v antlr.Token) { s.s19 = v } + + +func (s *LogicalNotContext) GetOps() []antlr.Token { return s.ops } + + +func (s *LogicalNotContext) SetOps(v []antlr.Token) { s.ops = v } + +func (s *LogicalNotContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *LogicalNotContext) Member() IMemberContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IMemberContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IMemberContext) +} + +func (s *LogicalNotContext) AllEXCLAM() []antlr.TerminalNode { + return s.GetTokens(CELParserEXCLAM) +} + +func (s *LogicalNotContext) EXCLAM(i int) antlr.TerminalNode { + return s.GetToken(CELParserEXCLAM, i) +} + + +func (s *LogicalNotContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterLogicalNot(s) + } +} + +func (s *LogicalNotContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitLogicalNot(s) + } +} + +func (s *LogicalNotContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitLogicalNot(s) + + default: + return t.VisitChildren(s) + } +} + + +type MemberExprContext struct { + UnaryContext +} + +func NewMemberExprContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *MemberExprContext { + var p = new(MemberExprContext) + + InitEmptyUnaryContext(&p.UnaryContext) + p.parser = parser + p.CopyAll(ctx.(*UnaryContext)) + + return p +} + +func (s *MemberExprContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *MemberExprContext) Member() IMemberContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IMemberContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IMemberContext) +} + + +func (s *MemberExprContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterMemberExpr(s) + } +} + +func (s *MemberExprContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitMemberExpr(s) + } +} + +func (s *MemberExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitMemberExpr(s) + + default: + return t.VisitChildren(s) + } +} + + +type NegateContext struct { + UnaryContext + s18 antlr.Token + ops []antlr.Token +} + +func NewNegateContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NegateContext { + var p = new(NegateContext) + + InitEmptyUnaryContext(&p.UnaryContext) + p.parser = parser + p.CopyAll(ctx.(*UnaryContext)) + + return p +} + + +func (s *NegateContext) GetS18() antlr.Token { return s.s18 } + + +func (s *NegateContext) SetS18(v antlr.Token) { s.s18 = v } + + +func (s *NegateContext) GetOps() []antlr.Token { return s.ops } + + +func (s *NegateContext) SetOps(v []antlr.Token) { s.ops = v } + +func (s *NegateContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *NegateContext) Member() IMemberContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IMemberContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IMemberContext) +} + +func (s *NegateContext) AllMINUS() []antlr.TerminalNode { + return s.GetTokens(CELParserMINUS) +} + +func (s *NegateContext) MINUS(i int) antlr.TerminalNode { + return s.GetToken(CELParserMINUS, i) +} + + +func (s *NegateContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterNegate(s) + } +} + +func (s *NegateContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitNegate(s) + } +} + +func (s *NegateContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitNegate(s) + + default: + return t.VisitChildren(s) + } +} + + + +func (p *CELParser) Unary() (localctx IUnaryContext) { + localctx = NewUnaryContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 12, CELParserRULE_unary) + var _la int + + var _alt int + + p.SetState(97) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + + switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 8, p.GetParserRuleContext()) { + case 1: + localctx = NewMemberExprContext(p, localctx) + p.EnterOuterAlt(localctx, 1) + { + p.SetState(84) + p.member(0) + } + + + case 2: + localctx = NewLogicalNotContext(p, localctx) + p.EnterOuterAlt(localctx, 2) + p.SetState(86) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + for ok := true; ok; ok = _la == CELParserEXCLAM { + { + p.SetState(85) + + var _m = p.Match(CELParserEXCLAM) + + localctx.(*LogicalNotContext).s19 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*LogicalNotContext).ops = append(localctx.(*LogicalNotContext).ops, localctx.(*LogicalNotContext).s19) + + + p.SetState(88) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(90) + p.member(0) + } + + + case 3: + localctx = NewNegateContext(p, localctx) + p.EnterOuterAlt(localctx, 3) + p.SetState(92) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = 1 + for ok := true; ok; ok = _alt != 2 && _alt != antlr.ATNInvalidAltNumber { + switch _alt { + case 1: + { + p.SetState(91) + + var _m = p.Match(CELParserMINUS) + + localctx.(*NegateContext).s18 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*NegateContext).ops = append(localctx.(*NegateContext).ops, localctx.(*NegateContext).s18) + + + + + default: + p.SetError(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + goto errorExit + } + + p.SetState(94) + p.GetErrorHandler().Sync(p) + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 7, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + } + { + p.SetState(96) + p.member(0) + } + + case antlr.ATNInvalidAltNumber: + goto errorExit + } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IMemberContext is an interface to support dynamic dispatch. +type IMemberContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + // IsMemberContext differentiates from other interfaces. + IsMemberContext() +} + +type MemberContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyMemberContext() *MemberContext { + var p = new(MemberContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_member + return p +} + +func InitEmptyMemberContext(p *MemberContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_member +} + +func (*MemberContext) IsMemberContext() {} + +func NewMemberContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *MemberContext { + var p = new(MemberContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_member + + return p +} + +func (s *MemberContext) GetParser() antlr.Parser { return s.parser } + +func (s *MemberContext) CopyAll(ctx *MemberContext) { + s.CopyFrom(&ctx.BaseParserRuleContext) +} + +func (s *MemberContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *MemberContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + + + + +type MemberCallContext struct { + MemberContext + op antlr.Token + id antlr.Token + open antlr.Token + args IExprListContext +} + +func NewMemberCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *MemberCallContext { + var p = new(MemberCallContext) + + InitEmptyMemberContext(&p.MemberContext) + p.parser = parser + p.CopyAll(ctx.(*MemberContext)) + + return p +} + + +func (s *MemberCallContext) GetOp() antlr.Token { return s.op } + +func (s *MemberCallContext) GetId() antlr.Token { return s.id } + +func (s *MemberCallContext) GetOpen() antlr.Token { return s.open } + + +func (s *MemberCallContext) SetOp(v antlr.Token) { s.op = v } + +func (s *MemberCallContext) SetId(v antlr.Token) { s.id = v } + +func (s *MemberCallContext) SetOpen(v antlr.Token) { s.open = v } + + +func (s *MemberCallContext) GetArgs() IExprListContext { return s.args } + + +func (s *MemberCallContext) SetArgs(v IExprListContext) { s.args = v } + +func (s *MemberCallContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *MemberCallContext) Member() IMemberContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IMemberContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IMemberContext) +} + +func (s *MemberCallContext) RPAREN() antlr.TerminalNode { + return s.GetToken(CELParserRPAREN, 0) +} + +func (s *MemberCallContext) DOT() antlr.TerminalNode { + return s.GetToken(CELParserDOT, 0) +} + +func (s *MemberCallContext) IDENTIFIER() antlr.TerminalNode { + return s.GetToken(CELParserIDENTIFIER, 0) +} + +func (s *MemberCallContext) LPAREN() antlr.TerminalNode { + return s.GetToken(CELParserLPAREN, 0) +} + +func (s *MemberCallContext) ExprList() IExprListContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprListContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IExprListContext) +} + + +func (s *MemberCallContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterMemberCall(s) + } +} + +func (s *MemberCallContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitMemberCall(s) + } +} + +func (s *MemberCallContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitMemberCall(s) + + default: + return t.VisitChildren(s) + } +} + + +type SelectContext struct { + MemberContext + op antlr.Token + opt antlr.Token + id antlr.Token +} + +func NewSelectContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *SelectContext { + var p = new(SelectContext) + + InitEmptyMemberContext(&p.MemberContext) + p.parser = parser + p.CopyAll(ctx.(*MemberContext)) + + return p +} + + +func (s *SelectContext) GetOp() antlr.Token { return s.op } + +func (s *SelectContext) GetOpt() antlr.Token { return s.opt } + +func (s *SelectContext) GetId() antlr.Token { return s.id } + + +func (s *SelectContext) SetOp(v antlr.Token) { s.op = v } + +func (s *SelectContext) SetOpt(v antlr.Token) { s.opt = v } + +func (s *SelectContext) SetId(v antlr.Token) { s.id = v } + +func (s *SelectContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *SelectContext) Member() IMemberContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IMemberContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IMemberContext) +} + +func (s *SelectContext) DOT() antlr.TerminalNode { + return s.GetToken(CELParserDOT, 0) +} + +func (s *SelectContext) IDENTIFIER() antlr.TerminalNode { + return s.GetToken(CELParserIDENTIFIER, 0) +} + +func (s *SelectContext) QUESTIONMARK() antlr.TerminalNode { + return s.GetToken(CELParserQUESTIONMARK, 0) +} + + +func (s *SelectContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterSelect(s) + } +} + +func (s *SelectContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitSelect(s) + } +} + +func (s *SelectContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitSelect(s) + + default: + return t.VisitChildren(s) + } +} + + +type PrimaryExprContext struct { + MemberContext +} + +func NewPrimaryExprContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *PrimaryExprContext { + var p = new(PrimaryExprContext) + + InitEmptyMemberContext(&p.MemberContext) + p.parser = parser + p.CopyAll(ctx.(*MemberContext)) + + return p +} + +func (s *PrimaryExprContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *PrimaryExprContext) Primary() IPrimaryContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IPrimaryContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IPrimaryContext) +} + + +func (s *PrimaryExprContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterPrimaryExpr(s) + } +} + +func (s *PrimaryExprContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitPrimaryExpr(s) + } +} + +func (s *PrimaryExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitPrimaryExpr(s) + + default: + return t.VisitChildren(s) + } +} + + +type IndexContext struct { + MemberContext + op antlr.Token + opt antlr.Token + index IExprContext +} + +func NewIndexContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IndexContext { + var p = new(IndexContext) + + InitEmptyMemberContext(&p.MemberContext) + p.parser = parser + p.CopyAll(ctx.(*MemberContext)) + + return p +} + + +func (s *IndexContext) GetOp() antlr.Token { return s.op } + +func (s *IndexContext) GetOpt() antlr.Token { return s.opt } + + +func (s *IndexContext) SetOp(v antlr.Token) { s.op = v } + +func (s *IndexContext) SetOpt(v antlr.Token) { s.opt = v } + + +func (s *IndexContext) GetIndex() IExprContext { return s.index } + + +func (s *IndexContext) SetIndex(v IExprContext) { s.index = v } + +func (s *IndexContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *IndexContext) Member() IMemberContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IMemberContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IMemberContext) +} + +func (s *IndexContext) RPRACKET() antlr.TerminalNode { + return s.GetToken(CELParserRPRACKET, 0) +} + +func (s *IndexContext) LBRACKET() antlr.TerminalNode { + return s.GetToken(CELParserLBRACKET, 0) +} + +func (s *IndexContext) Expr() IExprContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *IndexContext) QUESTIONMARK() antlr.TerminalNode { + return s.GetToken(CELParserQUESTIONMARK, 0) +} + + +func (s *IndexContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterIndex(s) + } +} + +func (s *IndexContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitIndex(s) + } +} + +func (s *IndexContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitIndex(s) + + default: + return t.VisitChildren(s) + } +} + + + +func (p *CELParser) Member() (localctx IMemberContext) { + return p.member(0) +} + +func (p *CELParser) member(_p int) (localctx IMemberContext) { + var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext() + + _parentState := p.GetState() + localctx = NewMemberContext(p, p.GetParserRuleContext(), _parentState) + var _prevctx IMemberContext = localctx + var _ antlr.ParserRuleContext = _prevctx // TODO: To prevent unused variable warning. + _startState := 14 + p.EnterRecursionRule(localctx, 14, CELParserRULE_member, _p) + var _la int + + var _alt int + + p.EnterOuterAlt(localctx, 1) + localctx = NewPrimaryExprContext(p, localctx) + p.SetParserRuleContext(localctx) + _prevctx = localctx + + { + p.SetState(100) + p.Primary() + } + + p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) + p.SetState(126) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 13, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { + if _alt == 1 { + if p.GetParseListeners() != nil { + p.TriggerExitRuleEvent() + } + _prevctx = localctx + p.SetState(124) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + + switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 12, p.GetParserRuleContext()) { + case 1: + localctx = NewSelectContext(p, NewMemberContext(p, _parentctx, _parentState)) + p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) + p.SetState(102) + + if !(p.Precpred(p.GetParserRuleContext(), 3)) { + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 3)", "")) + goto errorExit + } + { + p.SetState(103) + + var _m = p.Match(CELParserDOT) + + localctx.(*SelectContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(105) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserQUESTIONMARK { + { + p.SetState(104) + + var _m = p.Match(CELParserQUESTIONMARK) + + localctx.(*SelectContext).opt = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(107) + + var _m = p.Match(CELParserIDENTIFIER) + + localctx.(*SelectContext).id = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 2: + localctx = NewMemberCallContext(p, NewMemberContext(p, _parentctx, _parentState)) + p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) + p.SetState(108) + + if !(p.Precpred(p.GetParserRuleContext(), 2)) { + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", "")) + goto errorExit + } + { + p.SetState(109) + + var _m = p.Match(CELParserDOT) + + localctx.(*MemberCallContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(110) + + var _m = p.Match(CELParserIDENTIFIER) + + localctx.(*MemberCallContext).id = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(111) + + var _m = p.Match(CELParserLPAREN) + + localctx.(*MemberCallContext).open = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(113) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135762105344) != 0) { + { + p.SetState(112) + + var _x = p.ExprList() + + + localctx.(*MemberCallContext).args = _x + } + + } + { + p.SetState(115) + p.Match(CELParserRPAREN) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 3: + localctx = NewIndexContext(p, NewMemberContext(p, _parentctx, _parentState)) + p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) + p.SetState(116) + + if !(p.Precpred(p.GetParserRuleContext(), 1)) { + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) + goto errorExit + } + { + p.SetState(117) + + var _m = p.Match(CELParserLBRACKET) + + localctx.(*IndexContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(119) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserQUESTIONMARK { + { + p.SetState(118) + + var _m = p.Match(CELParserQUESTIONMARK) + + localctx.(*IndexContext).opt = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(121) + + var _x = p.Expr() + + + localctx.(*IndexContext).index = _x + } + { + p.SetState(122) + p.Match(CELParserRPRACKET) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + case antlr.ATNInvalidAltNumber: + goto errorExit + } + + } + p.SetState(128) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 13, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + } + + + + errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.UnrollRecursionContexts(_parentctx) + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IPrimaryContext is an interface to support dynamic dispatch. +type IPrimaryContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + // IsPrimaryContext differentiates from other interfaces. + IsPrimaryContext() +} + +type PrimaryContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyPrimaryContext() *PrimaryContext { + var p = new(PrimaryContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_primary + return p +} + +func InitEmptyPrimaryContext(p *PrimaryContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_primary +} + +func (*PrimaryContext) IsPrimaryContext() {} + +func NewPrimaryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *PrimaryContext { + var p = new(PrimaryContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_primary + + return p +} + +func (s *PrimaryContext) GetParser() antlr.Parser { return s.parser } + +func (s *PrimaryContext) CopyAll(ctx *PrimaryContext) { + s.CopyFrom(&ctx.BaseParserRuleContext) +} + +func (s *PrimaryContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *PrimaryContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + + + +type CreateListContext struct { + PrimaryContext + op antlr.Token + elems IListInitContext +} + +func NewCreateListContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateListContext { + var p = new(CreateListContext) + + InitEmptyPrimaryContext(&p.PrimaryContext) + p.parser = parser + p.CopyAll(ctx.(*PrimaryContext)) + + return p +} + + +func (s *CreateListContext) GetOp() antlr.Token { return s.op } + + +func (s *CreateListContext) SetOp(v antlr.Token) { s.op = v } + + +func (s *CreateListContext) GetElems() IListInitContext { return s.elems } + + +func (s *CreateListContext) SetElems(v IListInitContext) { s.elems = v } + +func (s *CreateListContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *CreateListContext) RPRACKET() antlr.TerminalNode { + return s.GetToken(CELParserRPRACKET, 0) +} + +func (s *CreateListContext) LBRACKET() antlr.TerminalNode { + return s.GetToken(CELParserLBRACKET, 0) +} + +func (s *CreateListContext) COMMA() antlr.TerminalNode { + return s.GetToken(CELParserCOMMA, 0) +} + +func (s *CreateListContext) ListInit() IListInitContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IListInitContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IListInitContext) +} + + +func (s *CreateListContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterCreateList(s) + } +} + +func (s *CreateListContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitCreateList(s) + } +} + +func (s *CreateListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitCreateList(s) + + default: + return t.VisitChildren(s) + } +} + + +type CreateStructContext struct { + PrimaryContext + op antlr.Token + entries IMapInitializerListContext +} + +func NewCreateStructContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateStructContext { + var p = new(CreateStructContext) + + InitEmptyPrimaryContext(&p.PrimaryContext) + p.parser = parser + p.CopyAll(ctx.(*PrimaryContext)) + + return p +} + + +func (s *CreateStructContext) GetOp() antlr.Token { return s.op } + + +func (s *CreateStructContext) SetOp(v antlr.Token) { s.op = v } + + +func (s *CreateStructContext) GetEntries() IMapInitializerListContext { return s.entries } + + +func (s *CreateStructContext) SetEntries(v IMapInitializerListContext) { s.entries = v } + +func (s *CreateStructContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *CreateStructContext) RBRACE() antlr.TerminalNode { + return s.GetToken(CELParserRBRACE, 0) +} + +func (s *CreateStructContext) LBRACE() antlr.TerminalNode { + return s.GetToken(CELParserLBRACE, 0) +} + +func (s *CreateStructContext) COMMA() antlr.TerminalNode { + return s.GetToken(CELParserCOMMA, 0) +} + +func (s *CreateStructContext) MapInitializerList() IMapInitializerListContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IMapInitializerListContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IMapInitializerListContext) +} + + +func (s *CreateStructContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterCreateStruct(s) + } +} + +func (s *CreateStructContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitCreateStruct(s) + } +} + +func (s *CreateStructContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitCreateStruct(s) + + default: + return t.VisitChildren(s) + } +} + + +type ConstantLiteralContext struct { + PrimaryContext +} + +func NewConstantLiteralContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ConstantLiteralContext { + var p = new(ConstantLiteralContext) + + InitEmptyPrimaryContext(&p.PrimaryContext) + p.parser = parser + p.CopyAll(ctx.(*PrimaryContext)) + + return p +} + +func (s *ConstantLiteralContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ConstantLiteralContext) Literal() ILiteralContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ILiteralContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(ILiteralContext) +} + + +func (s *ConstantLiteralContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterConstantLiteral(s) + } +} + +func (s *ConstantLiteralContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitConstantLiteral(s) + } +} + +func (s *ConstantLiteralContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitConstantLiteral(s) + + default: + return t.VisitChildren(s) + } +} + + +type NestedContext struct { + PrimaryContext + e IExprContext +} + +func NewNestedContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NestedContext { + var p = new(NestedContext) + + InitEmptyPrimaryContext(&p.PrimaryContext) + p.parser = parser + p.CopyAll(ctx.(*PrimaryContext)) + + return p +} + + +func (s *NestedContext) GetE() IExprContext { return s.e } + + +func (s *NestedContext) SetE(v IExprContext) { s.e = v } + +func (s *NestedContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *NestedContext) LPAREN() antlr.TerminalNode { + return s.GetToken(CELParserLPAREN, 0) +} + +func (s *NestedContext) RPAREN() antlr.TerminalNode { + return s.GetToken(CELParserRPAREN, 0) +} + +func (s *NestedContext) Expr() IExprContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + + +func (s *NestedContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterNested(s) + } +} + +func (s *NestedContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitNested(s) + } +} + +func (s *NestedContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitNested(s) + + default: + return t.VisitChildren(s) + } +} + + +type CreateMessageContext struct { + PrimaryContext + leadingDot antlr.Token + _IDENTIFIER antlr.Token + ids []antlr.Token + s16 antlr.Token + ops []antlr.Token + op antlr.Token + entries IFieldInitializerListContext +} + +func NewCreateMessageContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateMessageContext { + var p = new(CreateMessageContext) + + InitEmptyPrimaryContext(&p.PrimaryContext) + p.parser = parser + p.CopyAll(ctx.(*PrimaryContext)) + + return p +} + + +func (s *CreateMessageContext) GetLeadingDot() antlr.Token { return s.leadingDot } + +func (s *CreateMessageContext) Get_IDENTIFIER() antlr.Token { return s._IDENTIFIER } + +func (s *CreateMessageContext) GetS16() antlr.Token { return s.s16 } + +func (s *CreateMessageContext) GetOp() antlr.Token { return s.op } + + +func (s *CreateMessageContext) SetLeadingDot(v antlr.Token) { s.leadingDot = v } + +func (s *CreateMessageContext) Set_IDENTIFIER(v antlr.Token) { s._IDENTIFIER = v } + +func (s *CreateMessageContext) SetS16(v antlr.Token) { s.s16 = v } + +func (s *CreateMessageContext) SetOp(v antlr.Token) { s.op = v } + + +func (s *CreateMessageContext) GetIds() []antlr.Token { return s.ids } + +func (s *CreateMessageContext) GetOps() []antlr.Token { return s.ops } + + +func (s *CreateMessageContext) SetIds(v []antlr.Token) { s.ids = v } + +func (s *CreateMessageContext) SetOps(v []antlr.Token) { s.ops = v } + + +func (s *CreateMessageContext) GetEntries() IFieldInitializerListContext { return s.entries } + + +func (s *CreateMessageContext) SetEntries(v IFieldInitializerListContext) { s.entries = v } + +func (s *CreateMessageContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *CreateMessageContext) RBRACE() antlr.TerminalNode { + return s.GetToken(CELParserRBRACE, 0) +} + +func (s *CreateMessageContext) AllIDENTIFIER() []antlr.TerminalNode { + return s.GetTokens(CELParserIDENTIFIER) +} + +func (s *CreateMessageContext) IDENTIFIER(i int) antlr.TerminalNode { + return s.GetToken(CELParserIDENTIFIER, i) +} + +func (s *CreateMessageContext) LBRACE() antlr.TerminalNode { + return s.GetToken(CELParserLBRACE, 0) +} + +func (s *CreateMessageContext) COMMA() antlr.TerminalNode { + return s.GetToken(CELParserCOMMA, 0) +} + +func (s *CreateMessageContext) AllDOT() []antlr.TerminalNode { + return s.GetTokens(CELParserDOT) +} + +func (s *CreateMessageContext) DOT(i int) antlr.TerminalNode { + return s.GetToken(CELParserDOT, i) +} + +func (s *CreateMessageContext) FieldInitializerList() IFieldInitializerListContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IFieldInitializerListContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IFieldInitializerListContext) +} + + +func (s *CreateMessageContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterCreateMessage(s) + } +} + +func (s *CreateMessageContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitCreateMessage(s) + } +} + +func (s *CreateMessageContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitCreateMessage(s) + + default: + return t.VisitChildren(s) + } +} + + +type IdentOrGlobalCallContext struct { + PrimaryContext + leadingDot antlr.Token + id antlr.Token + op antlr.Token + args IExprListContext +} + +func NewIdentOrGlobalCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IdentOrGlobalCallContext { + var p = new(IdentOrGlobalCallContext) + + InitEmptyPrimaryContext(&p.PrimaryContext) + p.parser = parser + p.CopyAll(ctx.(*PrimaryContext)) + + return p +} + + +func (s *IdentOrGlobalCallContext) GetLeadingDot() antlr.Token { return s.leadingDot } + +func (s *IdentOrGlobalCallContext) GetId() antlr.Token { return s.id } + +func (s *IdentOrGlobalCallContext) GetOp() antlr.Token { return s.op } + + +func (s *IdentOrGlobalCallContext) SetLeadingDot(v antlr.Token) { s.leadingDot = v } + +func (s *IdentOrGlobalCallContext) SetId(v antlr.Token) { s.id = v } + +func (s *IdentOrGlobalCallContext) SetOp(v antlr.Token) { s.op = v } + + +func (s *IdentOrGlobalCallContext) GetArgs() IExprListContext { return s.args } + + +func (s *IdentOrGlobalCallContext) SetArgs(v IExprListContext) { s.args = v } + +func (s *IdentOrGlobalCallContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *IdentOrGlobalCallContext) IDENTIFIER() antlr.TerminalNode { + return s.GetToken(CELParserIDENTIFIER, 0) +} + +func (s *IdentOrGlobalCallContext) RPAREN() antlr.TerminalNode { + return s.GetToken(CELParserRPAREN, 0) +} + +func (s *IdentOrGlobalCallContext) DOT() antlr.TerminalNode { + return s.GetToken(CELParserDOT, 0) +} + +func (s *IdentOrGlobalCallContext) LPAREN() antlr.TerminalNode { + return s.GetToken(CELParserLPAREN, 0) +} + +func (s *IdentOrGlobalCallContext) ExprList() IExprListContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprListContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IExprListContext) +} + + +func (s *IdentOrGlobalCallContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterIdentOrGlobalCall(s) + } +} + +func (s *IdentOrGlobalCallContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitIdentOrGlobalCall(s) + } +} + +func (s *IdentOrGlobalCallContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitIdentOrGlobalCall(s) + + default: + return t.VisitChildren(s) + } +} + + + +func (p *CELParser) Primary() (localctx IPrimaryContext) { + localctx = NewPrimaryContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 16, CELParserRULE_primary) + var _la int + + p.SetState(180) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + + switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 25, p.GetParserRuleContext()) { + case 1: + localctx = NewIdentOrGlobalCallContext(p, localctx) + p.EnterOuterAlt(localctx, 1) + p.SetState(130) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserDOT { + { + p.SetState(129) + + var _m = p.Match(CELParserDOT) + + localctx.(*IdentOrGlobalCallContext).leadingDot = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(132) + + var _m = p.Match(CELParserIDENTIFIER) + + localctx.(*IdentOrGlobalCallContext).id = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(138) + p.GetErrorHandler().Sync(p) + + + if p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 16, p.GetParserRuleContext()) == 1 { + { + p.SetState(133) + + var _m = p.Match(CELParserLPAREN) + + localctx.(*IdentOrGlobalCallContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(135) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135762105344) != 0) { + { + p.SetState(134) + + var _x = p.ExprList() + + + localctx.(*IdentOrGlobalCallContext).args = _x + } + + } + { + p.SetState(137) + p.Match(CELParserRPAREN) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } else if p.HasError() { // JIM + goto errorExit + } + + + case 2: + localctx = NewNestedContext(p, localctx) + p.EnterOuterAlt(localctx, 2) + { + p.SetState(140) + p.Match(CELParserLPAREN) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(141) + + var _x = p.Expr() + + + localctx.(*NestedContext).e = _x + } + { + p.SetState(142) + p.Match(CELParserRPAREN) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 3: + localctx = NewCreateListContext(p, localctx) + p.EnterOuterAlt(localctx, 3) + { + p.SetState(144) + + var _m = p.Match(CELParserLBRACKET) + + localctx.(*CreateListContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(146) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135763153920) != 0) { + { + p.SetState(145) + + var _x = p.ListInit() + + + localctx.(*CreateListContext).elems = _x + } + + } + p.SetState(149) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserCOMMA { + { + p.SetState(148) + p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(151) + p.Match(CELParserRPRACKET) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 4: + localctx = NewCreateStructContext(p, localctx) + p.EnterOuterAlt(localctx, 4) + { + p.SetState(152) + + var _m = p.Match(CELParserLBRACE) + + localctx.(*CreateStructContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(154) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135763153920) != 0) { + { + p.SetState(153) + + var _x = p.MapInitializerList() + + + localctx.(*CreateStructContext).entries = _x + } + + } + p.SetState(157) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserCOMMA { + { + p.SetState(156) + p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(159) + p.Match(CELParserRBRACE) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 5: + localctx = NewCreateMessageContext(p, localctx) + p.EnterOuterAlt(localctx, 5) + p.SetState(161) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserDOT { + { + p.SetState(160) + + var _m = p.Match(CELParserDOT) + + localctx.(*CreateMessageContext).leadingDot = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(163) + + var _m = p.Match(CELParserIDENTIFIER) + + localctx.(*CreateMessageContext)._IDENTIFIER = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*CreateMessageContext).ids = append(localctx.(*CreateMessageContext).ids, localctx.(*CreateMessageContext)._IDENTIFIER) + p.SetState(168) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + for _la == CELParserDOT { + { + p.SetState(164) + + var _m = p.Match(CELParserDOT) + + localctx.(*CreateMessageContext).s16 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*CreateMessageContext).ops = append(localctx.(*CreateMessageContext).ops, localctx.(*CreateMessageContext).s16) + { + p.SetState(165) + + var _m = p.Match(CELParserIDENTIFIER) + + localctx.(*CreateMessageContext)._IDENTIFIER = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*CreateMessageContext).ids = append(localctx.(*CreateMessageContext).ids, localctx.(*CreateMessageContext)._IDENTIFIER) + + + p.SetState(170) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(171) + + var _m = p.Match(CELParserLBRACE) + + localctx.(*CreateMessageContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(173) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserQUESTIONMARK || _la == CELParserIDENTIFIER { + { + p.SetState(172) + + var _x = p.FieldInitializerList() + + + localctx.(*CreateMessageContext).entries = _x + } + + } + p.SetState(176) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserCOMMA { + { + p.SetState(175) + p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(178) + p.Match(CELParserRBRACE) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 6: + localctx = NewConstantLiteralContext(p, localctx) + p.EnterOuterAlt(localctx, 6) + { + p.SetState(179) + p.Literal() + } + + case antlr.ATNInvalidAltNumber: + goto errorExit + } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IExprListContext is an interface to support dynamic dispatch. +type IExprListContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Get_expr returns the _expr rule contexts. + Get_expr() IExprContext + + + // Set_expr sets the _expr rule contexts. + Set_expr(IExprContext) + + + // GetE returns the e rule context list. + GetE() []IExprContext + + + // SetE sets the e rule context list. + SetE([]IExprContext) + + + // Getter signatures + AllExpr() []IExprContext + Expr(i int) IExprContext + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsExprListContext differentiates from other interfaces. + IsExprListContext() +} + +type ExprListContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + _expr IExprContext + e []IExprContext +} + +func NewEmptyExprListContext() *ExprListContext { + var p = new(ExprListContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_exprList + return p +} + +func InitEmptyExprListContext(p *ExprListContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_exprList +} + +func (*ExprListContext) IsExprListContext() {} + +func NewExprListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExprListContext { + var p = new(ExprListContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_exprList + + return p +} + +func (s *ExprListContext) GetParser() antlr.Parser { return s.parser } + +func (s *ExprListContext) Get_expr() IExprContext { return s._expr } + + +func (s *ExprListContext) Set_expr(v IExprContext) { s._expr = v } + + +func (s *ExprListContext) GetE() []IExprContext { return s.e } + + +func (s *ExprListContext) SetE(v []IExprContext) { s.e = v } + + +func (s *ExprListContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *ExprListContext) Expr(i int) IExprContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *ExprListContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(CELParserCOMMA) +} + +func (s *ExprListContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(CELParserCOMMA, i) +} + +func (s *ExprListContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExprListContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *ExprListContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterExprList(s) + } +} + +func (s *ExprListContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitExprList(s) + } +} + +func (s *ExprListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitExprList(s) + + default: + return t.VisitChildren(s) + } +} + + + + +func (p *CELParser) ExprList() (localctx IExprListContext) { + localctx = NewExprListContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 18, CELParserRULE_exprList) + var _la int + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(182) + + var _x = p.Expr() + + + localctx.(*ExprListContext)._expr = _x + } + localctx.(*ExprListContext).e = append(localctx.(*ExprListContext).e, localctx.(*ExprListContext)._expr) + p.SetState(187) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + for _la == CELParserCOMMA { + { + p.SetState(183) + p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(184) + + var _x = p.Expr() + + + localctx.(*ExprListContext)._expr = _x + } + localctx.(*ExprListContext).e = append(localctx.(*ExprListContext).e, localctx.(*ExprListContext)._expr) + + + p.SetState(189) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + } + + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IListInitContext is an interface to support dynamic dispatch. +type IListInitContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Get_optExpr returns the _optExpr rule contexts. + Get_optExpr() IOptExprContext + + + // Set_optExpr sets the _optExpr rule contexts. + Set_optExpr(IOptExprContext) + + + // GetElems returns the elems rule context list. + GetElems() []IOptExprContext + + + // SetElems sets the elems rule context list. + SetElems([]IOptExprContext) + + + // Getter signatures + AllOptExpr() []IOptExprContext + OptExpr(i int) IOptExprContext + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsListInitContext differentiates from other interfaces. + IsListInitContext() +} + +type ListInitContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + _optExpr IOptExprContext + elems []IOptExprContext +} + +func NewEmptyListInitContext() *ListInitContext { + var p = new(ListInitContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_listInit + return p +} + +func InitEmptyListInitContext(p *ListInitContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_listInit +} + +func (*ListInitContext) IsListInitContext() {} + +func NewListInitContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ListInitContext { + var p = new(ListInitContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_listInit + + return p +} + +func (s *ListInitContext) GetParser() antlr.Parser { return s.parser } + +func (s *ListInitContext) Get_optExpr() IOptExprContext { return s._optExpr } + + +func (s *ListInitContext) Set_optExpr(v IOptExprContext) { s._optExpr = v } + + +func (s *ListInitContext) GetElems() []IOptExprContext { return s.elems } + + +func (s *ListInitContext) SetElems(v []IOptExprContext) { s.elems = v } + + +func (s *ListInitContext) AllOptExpr() []IOptExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IOptExprContext); ok { + len++ + } + } + + tst := make([]IOptExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IOptExprContext); ok { + tst[i] = t.(IOptExprContext) + i++ + } + } + + return tst +} + +func (s *ListInitContext) OptExpr(i int) IOptExprContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IOptExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IOptExprContext) +} + +func (s *ListInitContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(CELParserCOMMA) +} + +func (s *ListInitContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(CELParserCOMMA, i) +} + +func (s *ListInitContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ListInitContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *ListInitContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterListInit(s) + } +} + +func (s *ListInitContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitListInit(s) + } +} + +func (s *ListInitContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitListInit(s) + + default: + return t.VisitChildren(s) + } +} + + + + +func (p *CELParser) ListInit() (localctx IListInitContext) { + localctx = NewListInitContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 20, CELParserRULE_listInit) + var _alt int + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(190) + + var _x = p.OptExpr() + + + localctx.(*ListInitContext)._optExpr = _x + } + localctx.(*ListInitContext).elems = append(localctx.(*ListInitContext).elems, localctx.(*ListInitContext)._optExpr) + p.SetState(195) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 27, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { + if _alt == 1 { + { + p.SetState(191) + p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(192) + + var _x = p.OptExpr() + + + localctx.(*ListInitContext)._optExpr = _x + } + localctx.(*ListInitContext).elems = append(localctx.(*ListInitContext).elems, localctx.(*ListInitContext)._optExpr) + + + } + p.SetState(197) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 27, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + } + + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IFieldInitializerListContext is an interface to support dynamic dispatch. +type IFieldInitializerListContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetS21 returns the s21 token. + GetS21() antlr.Token + + + // SetS21 sets the s21 token. + SetS21(antlr.Token) + + + // GetCols returns the cols token list. + GetCols() []antlr.Token + + + // SetCols sets the cols token list. + SetCols([]antlr.Token) + + + // Get_optField returns the _optField rule contexts. + Get_optField() IOptFieldContext + + // Get_expr returns the _expr rule contexts. + Get_expr() IExprContext + + + // Set_optField sets the _optField rule contexts. + Set_optField(IOptFieldContext) + + // Set_expr sets the _expr rule contexts. + Set_expr(IExprContext) + + + // GetFields returns the fields rule context list. + GetFields() []IOptFieldContext + + // GetValues returns the values rule context list. + GetValues() []IExprContext + + + // SetFields sets the fields rule context list. + SetFields([]IOptFieldContext) + + // SetValues sets the values rule context list. + SetValues([]IExprContext) + + + // Getter signatures + AllOptField() []IOptFieldContext + OptField(i int) IOptFieldContext + AllCOLON() []antlr.TerminalNode + COLON(i int) antlr.TerminalNode + AllExpr() []IExprContext + Expr(i int) IExprContext + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsFieldInitializerListContext differentiates from other interfaces. + IsFieldInitializerListContext() +} + +type FieldInitializerListContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + _optField IOptFieldContext + fields []IOptFieldContext + s21 antlr.Token + cols []antlr.Token + _expr IExprContext + values []IExprContext +} + +func NewEmptyFieldInitializerListContext() *FieldInitializerListContext { + var p = new(FieldInitializerListContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_fieldInitializerList + return p +} + +func InitEmptyFieldInitializerListContext(p *FieldInitializerListContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_fieldInitializerList +} + +func (*FieldInitializerListContext) IsFieldInitializerListContext() {} + +func NewFieldInitializerListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *FieldInitializerListContext { + var p = new(FieldInitializerListContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_fieldInitializerList + + return p +} + +func (s *FieldInitializerListContext) GetParser() antlr.Parser { return s.parser } + +func (s *FieldInitializerListContext) GetS21() antlr.Token { return s.s21 } + + +func (s *FieldInitializerListContext) SetS21(v antlr.Token) { s.s21 = v } + + +func (s *FieldInitializerListContext) GetCols() []antlr.Token { return s.cols } + + +func (s *FieldInitializerListContext) SetCols(v []antlr.Token) { s.cols = v } + + +func (s *FieldInitializerListContext) Get_optField() IOptFieldContext { return s._optField } + +func (s *FieldInitializerListContext) Get_expr() IExprContext { return s._expr } + + +func (s *FieldInitializerListContext) Set_optField(v IOptFieldContext) { s._optField = v } + +func (s *FieldInitializerListContext) Set_expr(v IExprContext) { s._expr = v } + + +func (s *FieldInitializerListContext) GetFields() []IOptFieldContext { return s.fields } + +func (s *FieldInitializerListContext) GetValues() []IExprContext { return s.values } + + +func (s *FieldInitializerListContext) SetFields(v []IOptFieldContext) { s.fields = v } + +func (s *FieldInitializerListContext) SetValues(v []IExprContext) { s.values = v } + + +func (s *FieldInitializerListContext) AllOptField() []IOptFieldContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IOptFieldContext); ok { + len++ + } + } + + tst := make([]IOptFieldContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IOptFieldContext); ok { + tst[i] = t.(IOptFieldContext) + i++ + } + } + + return tst +} + +func (s *FieldInitializerListContext) OptField(i int) IOptFieldContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IOptFieldContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IOptFieldContext) +} + +func (s *FieldInitializerListContext) AllCOLON() []antlr.TerminalNode { + return s.GetTokens(CELParserCOLON) +} + +func (s *FieldInitializerListContext) COLON(i int) antlr.TerminalNode { + return s.GetToken(CELParserCOLON, i) +} + +func (s *FieldInitializerListContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *FieldInitializerListContext) Expr(i int) IExprContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *FieldInitializerListContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(CELParserCOMMA) +} + +func (s *FieldInitializerListContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(CELParserCOMMA, i) +} + +func (s *FieldInitializerListContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *FieldInitializerListContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *FieldInitializerListContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterFieldInitializerList(s) + } +} + +func (s *FieldInitializerListContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitFieldInitializerList(s) + } +} + +func (s *FieldInitializerListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitFieldInitializerList(s) + + default: + return t.VisitChildren(s) + } +} + + + + +func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContext) { + localctx = NewFieldInitializerListContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 22, CELParserRULE_fieldInitializerList) + var _alt int + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(198) + + var _x = p.OptField() + + + localctx.(*FieldInitializerListContext)._optField = _x + } + localctx.(*FieldInitializerListContext).fields = append(localctx.(*FieldInitializerListContext).fields, localctx.(*FieldInitializerListContext)._optField) + { + p.SetState(199) + + var _m = p.Match(CELParserCOLON) + + localctx.(*FieldInitializerListContext).s21 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*FieldInitializerListContext).cols = append(localctx.(*FieldInitializerListContext).cols, localctx.(*FieldInitializerListContext).s21) + { + p.SetState(200) + + var _x = p.Expr() + + + localctx.(*FieldInitializerListContext)._expr = _x + } + localctx.(*FieldInitializerListContext).values = append(localctx.(*FieldInitializerListContext).values, localctx.(*FieldInitializerListContext)._expr) + p.SetState(208) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 28, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { + if _alt == 1 { + { + p.SetState(201) + p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(202) + + var _x = p.OptField() + + + localctx.(*FieldInitializerListContext)._optField = _x + } + localctx.(*FieldInitializerListContext).fields = append(localctx.(*FieldInitializerListContext).fields, localctx.(*FieldInitializerListContext)._optField) + { + p.SetState(203) + + var _m = p.Match(CELParserCOLON) + + localctx.(*FieldInitializerListContext).s21 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*FieldInitializerListContext).cols = append(localctx.(*FieldInitializerListContext).cols, localctx.(*FieldInitializerListContext).s21) + { + p.SetState(204) + + var _x = p.Expr() + + + localctx.(*FieldInitializerListContext)._expr = _x + } + localctx.(*FieldInitializerListContext).values = append(localctx.(*FieldInitializerListContext).values, localctx.(*FieldInitializerListContext)._expr) + + + } + p.SetState(210) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 28, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + } + + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IOptFieldContext is an interface to support dynamic dispatch. +type IOptFieldContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetOpt returns the opt token. + GetOpt() antlr.Token + + + // SetOpt sets the opt token. + SetOpt(antlr.Token) + + + // Getter signatures + IDENTIFIER() antlr.TerminalNode + QUESTIONMARK() antlr.TerminalNode + + // IsOptFieldContext differentiates from other interfaces. + IsOptFieldContext() +} + +type OptFieldContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + opt antlr.Token +} + +func NewEmptyOptFieldContext() *OptFieldContext { + var p = new(OptFieldContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_optField + return p +} + +func InitEmptyOptFieldContext(p *OptFieldContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_optField +} + +func (*OptFieldContext) IsOptFieldContext() {} + +func NewOptFieldContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *OptFieldContext { + var p = new(OptFieldContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_optField + + return p +} + +func (s *OptFieldContext) GetParser() antlr.Parser { return s.parser } + +func (s *OptFieldContext) GetOpt() antlr.Token { return s.opt } + + +func (s *OptFieldContext) SetOpt(v antlr.Token) { s.opt = v } + + +func (s *OptFieldContext) IDENTIFIER() antlr.TerminalNode { + return s.GetToken(CELParserIDENTIFIER, 0) +} + +func (s *OptFieldContext) QUESTIONMARK() antlr.TerminalNode { + return s.GetToken(CELParserQUESTIONMARK, 0) +} + +func (s *OptFieldContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *OptFieldContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *OptFieldContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterOptField(s) + } +} + +func (s *OptFieldContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitOptField(s) + } +} + +func (s *OptFieldContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitOptField(s) + + default: + return t.VisitChildren(s) + } +} + + + + +func (p *CELParser) OptField() (localctx IOptFieldContext) { + localctx = NewOptFieldContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 24, CELParserRULE_optField) + var _la int + + p.EnterOuterAlt(localctx, 1) + p.SetState(212) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserQUESTIONMARK { + { + p.SetState(211) + + var _m = p.Match(CELParserQUESTIONMARK) + + localctx.(*OptFieldContext).opt = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(214) + p.Match(CELParserIDENTIFIER) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IMapInitializerListContext is an interface to support dynamic dispatch. +type IMapInitializerListContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetS21 returns the s21 token. + GetS21() antlr.Token + + + // SetS21 sets the s21 token. + SetS21(antlr.Token) + + + // GetCols returns the cols token list. + GetCols() []antlr.Token + + + // SetCols sets the cols token list. + SetCols([]antlr.Token) + + + // Get_optExpr returns the _optExpr rule contexts. + Get_optExpr() IOptExprContext + + // Get_expr returns the _expr rule contexts. + Get_expr() IExprContext + + + // Set_optExpr sets the _optExpr rule contexts. + Set_optExpr(IOptExprContext) + + // Set_expr sets the _expr rule contexts. + Set_expr(IExprContext) + + + // GetKeys returns the keys rule context list. + GetKeys() []IOptExprContext + + // GetValues returns the values rule context list. + GetValues() []IExprContext + + + // SetKeys sets the keys rule context list. + SetKeys([]IOptExprContext) + + // SetValues sets the values rule context list. + SetValues([]IExprContext) + + + // Getter signatures + AllOptExpr() []IOptExprContext + OptExpr(i int) IOptExprContext + AllCOLON() []antlr.TerminalNode + COLON(i int) antlr.TerminalNode + AllExpr() []IExprContext + Expr(i int) IExprContext + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsMapInitializerListContext differentiates from other interfaces. + IsMapInitializerListContext() +} + +type MapInitializerListContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + _optExpr IOptExprContext + keys []IOptExprContext + s21 antlr.Token + cols []antlr.Token + _expr IExprContext + values []IExprContext +} + +func NewEmptyMapInitializerListContext() *MapInitializerListContext { + var p = new(MapInitializerListContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_mapInitializerList + return p +} + +func InitEmptyMapInitializerListContext(p *MapInitializerListContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_mapInitializerList +} + +func (*MapInitializerListContext) IsMapInitializerListContext() {} + +func NewMapInitializerListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *MapInitializerListContext { + var p = new(MapInitializerListContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_mapInitializerList + + return p +} + +func (s *MapInitializerListContext) GetParser() antlr.Parser { return s.parser } + +func (s *MapInitializerListContext) GetS21() antlr.Token { return s.s21 } + + +func (s *MapInitializerListContext) SetS21(v antlr.Token) { s.s21 = v } + + +func (s *MapInitializerListContext) GetCols() []antlr.Token { return s.cols } + + +func (s *MapInitializerListContext) SetCols(v []antlr.Token) { s.cols = v } + + +func (s *MapInitializerListContext) Get_optExpr() IOptExprContext { return s._optExpr } + +func (s *MapInitializerListContext) Get_expr() IExprContext { return s._expr } + + +func (s *MapInitializerListContext) Set_optExpr(v IOptExprContext) { s._optExpr = v } + +func (s *MapInitializerListContext) Set_expr(v IExprContext) { s._expr = v } + + +func (s *MapInitializerListContext) GetKeys() []IOptExprContext { return s.keys } + +func (s *MapInitializerListContext) GetValues() []IExprContext { return s.values } + + +func (s *MapInitializerListContext) SetKeys(v []IOptExprContext) { s.keys = v } + +func (s *MapInitializerListContext) SetValues(v []IExprContext) { s.values = v } + + +func (s *MapInitializerListContext) AllOptExpr() []IOptExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IOptExprContext); ok { + len++ + } + } + + tst := make([]IOptExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IOptExprContext); ok { + tst[i] = t.(IOptExprContext) + i++ + } + } + + return tst +} + +func (s *MapInitializerListContext) OptExpr(i int) IOptExprContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IOptExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IOptExprContext) +} + +func (s *MapInitializerListContext) AllCOLON() []antlr.TerminalNode { + return s.GetTokens(CELParserCOLON) +} + +func (s *MapInitializerListContext) COLON(i int) antlr.TerminalNode { + return s.GetToken(CELParserCOLON, i) +} + +func (s *MapInitializerListContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *MapInitializerListContext) Expr(i int) IExprContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *MapInitializerListContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(CELParserCOMMA) +} + +func (s *MapInitializerListContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(CELParserCOMMA, i) +} + +func (s *MapInitializerListContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *MapInitializerListContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *MapInitializerListContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterMapInitializerList(s) + } +} + +func (s *MapInitializerListContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitMapInitializerList(s) + } +} + +func (s *MapInitializerListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitMapInitializerList(s) + + default: + return t.VisitChildren(s) + } +} + + + + +func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { + localctx = NewMapInitializerListContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 26, CELParserRULE_mapInitializerList) + var _alt int + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(216) + + var _x = p.OptExpr() + + + localctx.(*MapInitializerListContext)._optExpr = _x + } + localctx.(*MapInitializerListContext).keys = append(localctx.(*MapInitializerListContext).keys, localctx.(*MapInitializerListContext)._optExpr) + { + p.SetState(217) + + var _m = p.Match(CELParserCOLON) + + localctx.(*MapInitializerListContext).s21 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*MapInitializerListContext).cols = append(localctx.(*MapInitializerListContext).cols, localctx.(*MapInitializerListContext).s21) + { + p.SetState(218) + + var _x = p.Expr() + + + localctx.(*MapInitializerListContext)._expr = _x + } + localctx.(*MapInitializerListContext).values = append(localctx.(*MapInitializerListContext).values, localctx.(*MapInitializerListContext)._expr) + p.SetState(226) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 30, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { + if _alt == 1 { + { + p.SetState(219) + p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(220) + + var _x = p.OptExpr() + + + localctx.(*MapInitializerListContext)._optExpr = _x + } + localctx.(*MapInitializerListContext).keys = append(localctx.(*MapInitializerListContext).keys, localctx.(*MapInitializerListContext)._optExpr) + { + p.SetState(221) + + var _m = p.Match(CELParserCOLON) + + localctx.(*MapInitializerListContext).s21 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*MapInitializerListContext).cols = append(localctx.(*MapInitializerListContext).cols, localctx.(*MapInitializerListContext).s21) + { + p.SetState(222) + + var _x = p.Expr() + + + localctx.(*MapInitializerListContext)._expr = _x + } + localctx.(*MapInitializerListContext).values = append(localctx.(*MapInitializerListContext).values, localctx.(*MapInitializerListContext)._expr) + + + } + p.SetState(228) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 30, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + } + + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IOptExprContext is an interface to support dynamic dispatch. +type IOptExprContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetOpt returns the opt token. + GetOpt() antlr.Token + + + // SetOpt sets the opt token. + SetOpt(antlr.Token) + + + // GetE returns the e rule contexts. + GetE() IExprContext + + + // SetE sets the e rule contexts. + SetE(IExprContext) + + + // Getter signatures + Expr() IExprContext + QUESTIONMARK() antlr.TerminalNode + + // IsOptExprContext differentiates from other interfaces. + IsOptExprContext() +} + +type OptExprContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + opt antlr.Token + e IExprContext +} + +func NewEmptyOptExprContext() *OptExprContext { + var p = new(OptExprContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_optExpr + return p +} + +func InitEmptyOptExprContext(p *OptExprContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_optExpr +} + +func (*OptExprContext) IsOptExprContext() {} + +func NewOptExprContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *OptExprContext { + var p = new(OptExprContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_optExpr + + return p +} + +func (s *OptExprContext) GetParser() antlr.Parser { return s.parser } + +func (s *OptExprContext) GetOpt() antlr.Token { return s.opt } + + +func (s *OptExprContext) SetOpt(v antlr.Token) { s.opt = v } + + +func (s *OptExprContext) GetE() IExprContext { return s.e } + + +func (s *OptExprContext) SetE(v IExprContext) { s.e = v } + + +func (s *OptExprContext) Expr() IExprContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *OptExprContext) QUESTIONMARK() antlr.TerminalNode { + return s.GetToken(CELParserQUESTIONMARK, 0) +} + +func (s *OptExprContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *OptExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *OptExprContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterOptExpr(s) + } +} + +func (s *OptExprContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitOptExpr(s) + } +} + +func (s *OptExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitOptExpr(s) + + default: + return t.VisitChildren(s) + } +} + + + + +func (p *CELParser) OptExpr() (localctx IOptExprContext) { + localctx = NewOptExprContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 28, CELParserRULE_optExpr) + var _la int + + p.EnterOuterAlt(localctx, 1) + p.SetState(230) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserQUESTIONMARK { + { + p.SetState(229) + + var _m = p.Match(CELParserQUESTIONMARK) + + localctx.(*OptExprContext).opt = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(232) + + var _x = p.Expr() + + + localctx.(*OptExprContext).e = _x + } + + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// ILiteralContext is an interface to support dynamic dispatch. +type ILiteralContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + // IsLiteralContext differentiates from other interfaces. + IsLiteralContext() +} + +type LiteralContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyLiteralContext() *LiteralContext { + var p = new(LiteralContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_literal + return p +} + +func InitEmptyLiteralContext(p *LiteralContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_literal +} + +func (*LiteralContext) IsLiteralContext() {} + +func NewLiteralContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *LiteralContext { + var p = new(LiteralContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_literal + + return p +} + +func (s *LiteralContext) GetParser() antlr.Parser { return s.parser } + +func (s *LiteralContext) CopyAll(ctx *LiteralContext) { + s.CopyFrom(&ctx.BaseParserRuleContext) +} + +func (s *LiteralContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *LiteralContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + + + +type BytesContext struct { + LiteralContext + tok antlr.Token +} + +func NewBytesContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BytesContext { + var p = new(BytesContext) + + InitEmptyLiteralContext(&p.LiteralContext) + p.parser = parser + p.CopyAll(ctx.(*LiteralContext)) + + return p +} + + +func (s *BytesContext) GetTok() antlr.Token { return s.tok } + + +func (s *BytesContext) SetTok(v antlr.Token) { s.tok = v } + +func (s *BytesContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *BytesContext) BYTES() antlr.TerminalNode { + return s.GetToken(CELParserBYTES, 0) +} + + +func (s *BytesContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterBytes(s) + } +} + +func (s *BytesContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitBytes(s) + } +} + +func (s *BytesContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitBytes(s) + + default: + return t.VisitChildren(s) + } +} + + +type UintContext struct { + LiteralContext + tok antlr.Token +} + +func NewUintContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *UintContext { + var p = new(UintContext) + + InitEmptyLiteralContext(&p.LiteralContext) + p.parser = parser + p.CopyAll(ctx.(*LiteralContext)) + + return p +} + + +func (s *UintContext) GetTok() antlr.Token { return s.tok } + + +func (s *UintContext) SetTok(v antlr.Token) { s.tok = v } + +func (s *UintContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *UintContext) NUM_UINT() antlr.TerminalNode { + return s.GetToken(CELParserNUM_UINT, 0) +} + + +func (s *UintContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterUint(s) + } +} + +func (s *UintContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitUint(s) + } +} + +func (s *UintContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitUint(s) + + default: + return t.VisitChildren(s) + } +} + + +type NullContext struct { + LiteralContext + tok antlr.Token +} + +func NewNullContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NullContext { + var p = new(NullContext) + + InitEmptyLiteralContext(&p.LiteralContext) + p.parser = parser + p.CopyAll(ctx.(*LiteralContext)) + + return p +} + + +func (s *NullContext) GetTok() antlr.Token { return s.tok } + + +func (s *NullContext) SetTok(v antlr.Token) { s.tok = v } + +func (s *NullContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *NullContext) NUL() antlr.TerminalNode { + return s.GetToken(CELParserNUL, 0) +} + + +func (s *NullContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterNull(s) + } +} + +func (s *NullContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitNull(s) + } +} + +func (s *NullContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitNull(s) + + default: + return t.VisitChildren(s) + } +} + + +type BoolFalseContext struct { + LiteralContext + tok antlr.Token +} + +func NewBoolFalseContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BoolFalseContext { + var p = new(BoolFalseContext) + + InitEmptyLiteralContext(&p.LiteralContext) + p.parser = parser + p.CopyAll(ctx.(*LiteralContext)) + + return p +} + + +func (s *BoolFalseContext) GetTok() antlr.Token { return s.tok } + + +func (s *BoolFalseContext) SetTok(v antlr.Token) { s.tok = v } + +func (s *BoolFalseContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *BoolFalseContext) CEL_FALSE() antlr.TerminalNode { + return s.GetToken(CELParserCEL_FALSE, 0) +} + + +func (s *BoolFalseContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterBoolFalse(s) + } +} + +func (s *BoolFalseContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitBoolFalse(s) + } +} + +func (s *BoolFalseContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitBoolFalse(s) + + default: + return t.VisitChildren(s) + } +} + + +type StringContext struct { + LiteralContext + tok antlr.Token +} + +func NewStringContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *StringContext { + var p = new(StringContext) + + InitEmptyLiteralContext(&p.LiteralContext) + p.parser = parser + p.CopyAll(ctx.(*LiteralContext)) + + return p +} + + +func (s *StringContext) GetTok() antlr.Token { return s.tok } + + +func (s *StringContext) SetTok(v antlr.Token) { s.tok = v } + +func (s *StringContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *StringContext) STRING() antlr.TerminalNode { + return s.GetToken(CELParserSTRING, 0) +} + + +func (s *StringContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterString(s) + } +} + +func (s *StringContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitString(s) + } +} + +func (s *StringContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitString(s) + + default: + return t.VisitChildren(s) + } +} + + +type DoubleContext struct { + LiteralContext + sign antlr.Token + tok antlr.Token +} + +func NewDoubleContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *DoubleContext { + var p = new(DoubleContext) + + InitEmptyLiteralContext(&p.LiteralContext) + p.parser = parser + p.CopyAll(ctx.(*LiteralContext)) + + return p +} + + +func (s *DoubleContext) GetSign() antlr.Token { return s.sign } + +func (s *DoubleContext) GetTok() antlr.Token { return s.tok } + + +func (s *DoubleContext) SetSign(v antlr.Token) { s.sign = v } + +func (s *DoubleContext) SetTok(v antlr.Token) { s.tok = v } + +func (s *DoubleContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *DoubleContext) NUM_FLOAT() antlr.TerminalNode { + return s.GetToken(CELParserNUM_FLOAT, 0) +} + +func (s *DoubleContext) MINUS() antlr.TerminalNode { + return s.GetToken(CELParserMINUS, 0) +} + + +func (s *DoubleContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterDouble(s) + } +} + +func (s *DoubleContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitDouble(s) + } +} + +func (s *DoubleContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitDouble(s) + + default: + return t.VisitChildren(s) + } +} + + +type BoolTrueContext struct { + LiteralContext + tok antlr.Token +} + +func NewBoolTrueContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BoolTrueContext { + var p = new(BoolTrueContext) + + InitEmptyLiteralContext(&p.LiteralContext) + p.parser = parser + p.CopyAll(ctx.(*LiteralContext)) + + return p +} + + +func (s *BoolTrueContext) GetTok() antlr.Token { return s.tok } + + +func (s *BoolTrueContext) SetTok(v antlr.Token) { s.tok = v } + +func (s *BoolTrueContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *BoolTrueContext) CEL_TRUE() antlr.TerminalNode { + return s.GetToken(CELParserCEL_TRUE, 0) +} + + +func (s *BoolTrueContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterBoolTrue(s) + } +} + +func (s *BoolTrueContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitBoolTrue(s) + } +} + +func (s *BoolTrueContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitBoolTrue(s) + + default: + return t.VisitChildren(s) + } +} + + +type IntContext struct { + LiteralContext + sign antlr.Token + tok antlr.Token +} + +func NewIntContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IntContext { + var p = new(IntContext) + + InitEmptyLiteralContext(&p.LiteralContext) + p.parser = parser + p.CopyAll(ctx.(*LiteralContext)) + + return p +} + + +func (s *IntContext) GetSign() antlr.Token { return s.sign } + +func (s *IntContext) GetTok() antlr.Token { return s.tok } + + +func (s *IntContext) SetSign(v antlr.Token) { s.sign = v } + +func (s *IntContext) SetTok(v antlr.Token) { s.tok = v } + +func (s *IntContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *IntContext) NUM_INT() antlr.TerminalNode { + return s.GetToken(CELParserNUM_INT, 0) +} + +func (s *IntContext) MINUS() antlr.TerminalNode { + return s.GetToken(CELParserMINUS, 0) +} + + +func (s *IntContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterInt(s) + } +} + +func (s *IntContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitInt(s) + } +} + +func (s *IntContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitInt(s) + + default: + return t.VisitChildren(s) + } +} + + + +func (p *CELParser) Literal() (localctx ILiteralContext) { + localctx = NewLiteralContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 30, CELParserRULE_literal) + var _la int + + p.SetState(248) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + + switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 34, p.GetParserRuleContext()) { + case 1: + localctx = NewIntContext(p, localctx) + p.EnterOuterAlt(localctx, 1) + p.SetState(235) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserMINUS { + { + p.SetState(234) + + var _m = p.Match(CELParserMINUS) + + localctx.(*IntContext).sign = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(237) + + var _m = p.Match(CELParserNUM_INT) + + localctx.(*IntContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 2: + localctx = NewUintContext(p, localctx) + p.EnterOuterAlt(localctx, 2) + { + p.SetState(238) + + var _m = p.Match(CELParserNUM_UINT) + + localctx.(*UintContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 3: + localctx = NewDoubleContext(p, localctx) + p.EnterOuterAlt(localctx, 3) + p.SetState(240) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserMINUS { + { + p.SetState(239) + + var _m = p.Match(CELParserMINUS) + + localctx.(*DoubleContext).sign = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(242) + + var _m = p.Match(CELParserNUM_FLOAT) + + localctx.(*DoubleContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 4: + localctx = NewStringContext(p, localctx) + p.EnterOuterAlt(localctx, 4) + { + p.SetState(243) + + var _m = p.Match(CELParserSTRING) + + localctx.(*StringContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 5: + localctx = NewBytesContext(p, localctx) + p.EnterOuterAlt(localctx, 5) + { + p.SetState(244) + + var _m = p.Match(CELParserBYTES) + + localctx.(*BytesContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 6: + localctx = NewBoolTrueContext(p, localctx) + p.EnterOuterAlt(localctx, 6) + { + p.SetState(245) + + var _m = p.Match(CELParserCEL_TRUE) + + localctx.(*BoolTrueContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 7: + localctx = NewBoolFalseContext(p, localctx) + p.EnterOuterAlt(localctx, 7) + { + p.SetState(246) + + var _m = p.Match(CELParserCEL_FALSE) + + localctx.(*BoolFalseContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 8: + localctx = NewNullContext(p, localctx) + p.EnterOuterAlt(localctx, 8) + { + p.SetState(247) + + var _m = p.Match(CELParserNUL) + + localctx.(*NullContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + case antlr.ATNInvalidAltNumber: + goto errorExit + } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +func (p *CELParser) Sempred(localctx antlr.RuleContext, ruleIndex, predIndex int) bool { + switch ruleIndex { + case 4: + var t *RelationContext = nil + if localctx != nil { t = localctx.(*RelationContext) } + return p.Relation_Sempred(t, predIndex) + + case 5: + var t *CalcContext = nil + if localctx != nil { t = localctx.(*CalcContext) } + return p.Calc_Sempred(t, predIndex) + + case 7: + var t *MemberContext = nil + if localctx != nil { t = localctx.(*MemberContext) } + return p.Member_Sempred(t, predIndex) + + + default: + panic("No predicate with index: " + fmt.Sprint(ruleIndex)) + } +} + +func (p *CELParser) Relation_Sempred(localctx antlr.RuleContext, predIndex int) bool { + switch predIndex { + case 0: + return p.Precpred(p.GetParserRuleContext(), 1) + + default: + panic("No predicate with index: " + fmt.Sprint(predIndex)) + } +} + +func (p *CELParser) Calc_Sempred(localctx antlr.RuleContext, predIndex int) bool { + switch predIndex { + case 1: + return p.Precpred(p.GetParserRuleContext(), 2) + + case 2: + return p.Precpred(p.GetParserRuleContext(), 1) + + default: + panic("No predicate with index: " + fmt.Sprint(predIndex)) + } +} + +func (p *CELParser) Member_Sempred(localctx antlr.RuleContext, predIndex int) bool { + switch predIndex { + case 3: + return p.Precpred(p.GetParserRuleContext(), 3) + + case 4: + return p.Precpred(p.GetParserRuleContext(), 2) + + case 5: + return p.Precpred(p.GetParserRuleContext(), 1) + + default: + panic("No predicate with index: " + fmt.Sprint(predIndex)) + } +} + diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go b/hack/tools/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go new file mode 100644 index 0000000000..d2fbd563ab --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go @@ -0,0 +1,110 @@ +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. + +package gen // CEL +import "github.com/antlr4-go/antlr/v4" + + +// A complete Visitor for a parse tree produced by CELParser. +type CELVisitor interface { + antlr.ParseTreeVisitor + + // Visit a parse tree produced by CELParser#start. + VisitStart(ctx *StartContext) interface{} + + // Visit a parse tree produced by CELParser#expr. + VisitExpr(ctx *ExprContext) interface{} + + // Visit a parse tree produced by CELParser#conditionalOr. + VisitConditionalOr(ctx *ConditionalOrContext) interface{} + + // Visit a parse tree produced by CELParser#conditionalAnd. + VisitConditionalAnd(ctx *ConditionalAndContext) interface{} + + // Visit a parse tree produced by CELParser#relation. + VisitRelation(ctx *RelationContext) interface{} + + // Visit a parse tree produced by CELParser#calc. + VisitCalc(ctx *CalcContext) interface{} + + // Visit a parse tree produced by CELParser#MemberExpr. + VisitMemberExpr(ctx *MemberExprContext) interface{} + + // Visit a parse tree produced by CELParser#LogicalNot. + VisitLogicalNot(ctx *LogicalNotContext) interface{} + + // Visit a parse tree produced by CELParser#Negate. + VisitNegate(ctx *NegateContext) interface{} + + // Visit a parse tree produced by CELParser#MemberCall. + VisitMemberCall(ctx *MemberCallContext) interface{} + + // Visit a parse tree produced by CELParser#Select. + VisitSelect(ctx *SelectContext) interface{} + + // Visit a parse tree produced by CELParser#PrimaryExpr. + VisitPrimaryExpr(ctx *PrimaryExprContext) interface{} + + // Visit a parse tree produced by CELParser#Index. + VisitIndex(ctx *IndexContext) interface{} + + // Visit a parse tree produced by CELParser#IdentOrGlobalCall. + VisitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) interface{} + + // Visit a parse tree produced by CELParser#Nested. + VisitNested(ctx *NestedContext) interface{} + + // Visit a parse tree produced by CELParser#CreateList. + VisitCreateList(ctx *CreateListContext) interface{} + + // Visit a parse tree produced by CELParser#CreateStruct. + VisitCreateStruct(ctx *CreateStructContext) interface{} + + // Visit a parse tree produced by CELParser#CreateMessage. + VisitCreateMessage(ctx *CreateMessageContext) interface{} + + // Visit a parse tree produced by CELParser#ConstantLiteral. + VisitConstantLiteral(ctx *ConstantLiteralContext) interface{} + + // Visit a parse tree produced by CELParser#exprList. + VisitExprList(ctx *ExprListContext) interface{} + + // Visit a parse tree produced by CELParser#listInit. + VisitListInit(ctx *ListInitContext) interface{} + + // Visit a parse tree produced by CELParser#fieldInitializerList. + VisitFieldInitializerList(ctx *FieldInitializerListContext) interface{} + + // Visit a parse tree produced by CELParser#optField. + VisitOptField(ctx *OptFieldContext) interface{} + + // Visit a parse tree produced by CELParser#mapInitializerList. + VisitMapInitializerList(ctx *MapInitializerListContext) interface{} + + // Visit a parse tree produced by CELParser#optExpr. + VisitOptExpr(ctx *OptExprContext) interface{} + + // Visit a parse tree produced by CELParser#Int. + VisitInt(ctx *IntContext) interface{} + + // Visit a parse tree produced by CELParser#Uint. + VisitUint(ctx *UintContext) interface{} + + // Visit a parse tree produced by CELParser#Double. + VisitDouble(ctx *DoubleContext) interface{} + + // Visit a parse tree produced by CELParser#String. + VisitString(ctx *StringContext) interface{} + + // Visit a parse tree produced by CELParser#Bytes. + VisitBytes(ctx *BytesContext) interface{} + + // Visit a parse tree produced by CELParser#BoolTrue. + VisitBoolTrue(ctx *BoolTrueContext) interface{} + + // Visit a parse tree produced by CELParser#BoolFalse. + VisitBoolFalse(ctx *BoolFalseContext) interface{} + + // Visit a parse tree produced by CELParser#Null. + VisitNull(ctx *NullContext) interface{} + +} \ No newline at end of file diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/gen/doc.go b/hack/tools/vendor/github.com/google/cel-go/parser/gen/doc.go new file mode 100644 index 0000000000..57edd4434d --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/gen/doc.go @@ -0,0 +1,16 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package gen contains all of the ANTLR-generated sources used by the cel-go parser. +package gen diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/gen/generate.sh b/hack/tools/vendor/github.com/google/cel-go/parser/gen/generate.sh new file mode 100644 index 0000000000..27a9559f7c --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/gen/generate.sh @@ -0,0 +1,35 @@ +#!/bin/bash -eu +# +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# To regenerate the CEL lexer/parser statically do the following: +# 1. Download the latest anltr tool from https://www.antlr.org/download.html +# 2. Copy the downloaded jar to the gen directory. It will have a name +# like antlr--complete.jar. +# 3. Modify the script below to refer to the current ANTLR version. +# 4. Execute the generation script from the gen directory. +# 5. Delete the jar and commit the regenerated sources. + +#!/bin/sh + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +# Generate AntLR artifacts. +java -Xmx500M -cp ${DIR}/antlr-4.13.1-complete.jar org.antlr.v4.Tool \ + -Dlanguage=Go \ + -package gen \ + -o ${DIR} \ + -visitor ${DIR}/CEL.g4 + diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/helper.go b/hack/tools/vendor/github.com/google/cel-go/parser/helper.go new file mode 100644 index 0000000000..9f09ead0e7 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/helper.go @@ -0,0 +1,510 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "sync" + + antlr "github.com/antlr4-go/antlr/v4" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +type parserHelper struct { + exprFactory ast.ExprFactory + source common.Source + sourceInfo *ast.SourceInfo + nextID int64 +} + +func newParserHelper(source common.Source, fac ast.ExprFactory) *parserHelper { + return &parserHelper{ + exprFactory: fac, + source: source, + sourceInfo: ast.NewSourceInfo(source), + nextID: 1, + } +} + +func (p *parserHelper) getSourceInfo() *ast.SourceInfo { + return p.sourceInfo +} + +func (p *parserHelper) newLiteral(ctx any, value ref.Val) ast.Expr { + return p.exprFactory.NewLiteral(p.newID(ctx), value) +} + +func (p *parserHelper) newLiteralBool(ctx any, value bool) ast.Expr { + return p.newLiteral(ctx, types.Bool(value)) +} + +func (p *parserHelper) newLiteralString(ctx any, value string) ast.Expr { + return p.newLiteral(ctx, types.String(value)) +} + +func (p *parserHelper) newLiteralBytes(ctx any, value []byte) ast.Expr { + return p.newLiteral(ctx, types.Bytes(value)) +} + +func (p *parserHelper) newLiteralInt(ctx any, value int64) ast.Expr { + return p.newLiteral(ctx, types.Int(value)) +} + +func (p *parserHelper) newLiteralUint(ctx any, value uint64) ast.Expr { + return p.newLiteral(ctx, types.Uint(value)) +} + +func (p *parserHelper) newLiteralDouble(ctx any, value float64) ast.Expr { + return p.newLiteral(ctx, types.Double(value)) +} + +func (p *parserHelper) newIdent(ctx any, name string) ast.Expr { + return p.exprFactory.NewIdent(p.newID(ctx), name) +} + +func (p *parserHelper) newSelect(ctx any, operand ast.Expr, field string) ast.Expr { + return p.exprFactory.NewSelect(p.newID(ctx), operand, field) +} + +func (p *parserHelper) newPresenceTest(ctx any, operand ast.Expr, field string) ast.Expr { + return p.exprFactory.NewPresenceTest(p.newID(ctx), operand, field) +} + +func (p *parserHelper) newGlobalCall(ctx any, function string, args ...ast.Expr) ast.Expr { + return p.exprFactory.NewCall(p.newID(ctx), function, args...) +} + +func (p *parserHelper) newReceiverCall(ctx any, function string, target ast.Expr, args ...ast.Expr) ast.Expr { + return p.exprFactory.NewMemberCall(p.newID(ctx), function, target, args...) +} + +func (p *parserHelper) newList(ctx any, elements []ast.Expr, optionals ...int32) ast.Expr { + return p.exprFactory.NewList(p.newID(ctx), elements, optionals) +} + +func (p *parserHelper) newMap(ctx any, entries ...ast.EntryExpr) ast.Expr { + return p.exprFactory.NewMap(p.newID(ctx), entries) +} + +func (p *parserHelper) newMapEntry(entryID int64, key ast.Expr, value ast.Expr, optional bool) ast.EntryExpr { + return p.exprFactory.NewMapEntry(entryID, key, value, optional) +} + +func (p *parserHelper) newObject(ctx any, typeName string, fields ...ast.EntryExpr) ast.Expr { + return p.exprFactory.NewStruct(p.newID(ctx), typeName, fields) +} + +func (p *parserHelper) newObjectField(fieldID int64, field string, value ast.Expr, optional bool) ast.EntryExpr { + return p.exprFactory.NewStructField(fieldID, field, value, optional) +} + +func (p *parserHelper) newComprehension(ctx any, + iterRange ast.Expr, + iterVar, + accuVar string, + accuInit ast.Expr, + condition ast.Expr, + step ast.Expr, + result ast.Expr) ast.Expr { + return p.exprFactory.NewComprehension( + p.newID(ctx), iterRange, iterVar, accuVar, accuInit, condition, step, result) +} + +func (p *parserHelper) newComprehensionTwoVar(ctx any, + iterRange ast.Expr, + iterVar, iterVar2, + accuVar string, + accuInit ast.Expr, + condition ast.Expr, + step ast.Expr, + result ast.Expr) ast.Expr { + return p.exprFactory.NewComprehensionTwoVar( + p.newID(ctx), iterRange, iterVar, iterVar2, accuVar, accuInit, condition, step, result) +} + +func (p *parserHelper) newID(ctx any) int64 { + if id, isID := ctx.(int64); isID { + return id + } + return p.id(ctx) +} + +func (p *parserHelper) newExpr(ctx any) ast.Expr { + return p.exprFactory.NewUnspecifiedExpr(p.newID(ctx)) +} + +func (p *parserHelper) id(ctx any) int64 { + var offset ast.OffsetRange + switch c := ctx.(type) { + case antlr.ParserRuleContext: + start := c.GetStart() + offset.Start = p.sourceInfo.ComputeOffset(int32(start.GetLine()), int32(start.GetColumn())) + offset.Stop = offset.Start + int32(len(c.GetText())) + case antlr.Token: + offset.Start = p.sourceInfo.ComputeOffset(int32(c.GetLine()), int32(c.GetColumn())) + offset.Stop = offset.Start + int32(len(c.GetText())) + case common.Location: + offset.Start = p.sourceInfo.ComputeOffset(int32(c.Line()), int32(c.Column())) + offset.Stop = offset.Start + case ast.OffsetRange: + offset = c + default: + // This should only happen if the ctx is nil + return -1 + } + id := p.nextID + p.sourceInfo.SetOffsetRange(id, offset) + p.nextID++ + return id +} + +func (p *parserHelper) deleteID(id int64) { + p.sourceInfo.ClearOffsetRange(id) + if id == p.nextID-1 { + p.nextID-- + } +} + +func (p *parserHelper) getLocation(id int64) common.Location { + return p.sourceInfo.GetStartLocation(id) +} + +func (p *parserHelper) getLocationByOffset(offset int32) common.Location { + return p.getSourceInfo().GetLocationByOffset(offset) +} + +// buildMacroCallArg iterates the expression and returns a new expression +// where all macros have been replaced by their IDs in MacroCalls +func (p *parserHelper) buildMacroCallArg(expr ast.Expr) ast.Expr { + if _, found := p.sourceInfo.GetMacroCall(expr.ID()); found { + return p.exprFactory.NewUnspecifiedExpr(expr.ID()) + } + + switch expr.Kind() { + case ast.CallKind: + // Iterate the AST from `expr` recursively looking for macros. Because we are at most + // starting from the top level macro, this recursion is bounded by the size of the AST. This + // means that the depth check on the AST during parsing will catch recursion overflows + // before we get to here. + call := expr.AsCall() + macroArgs := make([]ast.Expr, len(call.Args())) + for index, arg := range call.Args() { + macroArgs[index] = p.buildMacroCallArg(arg) + } + if !call.IsMemberFunction() { + return p.exprFactory.NewCall(expr.ID(), call.FunctionName(), macroArgs...) + } + macroTarget := p.buildMacroCallArg(call.Target()) + return p.exprFactory.NewMemberCall(expr.ID(), call.FunctionName(), macroTarget, macroArgs...) + case ast.ListKind: + list := expr.AsList() + macroListArgs := make([]ast.Expr, list.Size()) + for i, elem := range list.Elements() { + macroListArgs[i] = p.buildMacroCallArg(elem) + } + return p.exprFactory.NewList(expr.ID(), macroListArgs, list.OptionalIndices()) + } + return expr +} + +// addMacroCall adds the macro the the MacroCalls map in source info. If a macro has args/subargs/target +// that are macros, their ID will be stored instead for later self-lookups. +func (p *parserHelper) addMacroCall(exprID int64, function string, target ast.Expr, args ...ast.Expr) { + macroArgs := make([]ast.Expr, len(args)) + for index, arg := range args { + macroArgs[index] = p.buildMacroCallArg(arg) + } + if target == nil { + p.sourceInfo.SetMacroCall(exprID, p.exprFactory.NewCall(0, function, macroArgs...)) + return + } + macroTarget := target + if _, found := p.sourceInfo.GetMacroCall(target.ID()); found { + macroTarget = p.exprFactory.NewUnspecifiedExpr(target.ID()) + } else { + macroTarget = p.buildMacroCallArg(target) + } + p.sourceInfo.SetMacroCall(exprID, p.exprFactory.NewMemberCall(0, function, macroTarget, macroArgs...)) +} + +// logicManager compacts logical trees into a more efficient structure which is semantically +// equivalent with how the logic graph is constructed by the ANTLR parser. +// +// The purpose of the logicManager is to ensure a compact serialization format for the logical &&, || +// operators which have a tendency to create long DAGs which are skewed in one direction. Since the +// operators are commutative re-ordering the terms *must not* affect the evaluation result. +// +// The logic manager will either render the terms to N-chained && / || operators as a single logical +// call with N-terms, or will rebalance the tree. Rebalancing the terms is a safe, if somewhat +// controversial choice as it alters the traditional order of execution assumptions present in most +// expressions. +type logicManager struct { + exprFactory ast.ExprFactory + function string + terms []ast.Expr + ops []int64 + variadicASTs bool +} + +// newVariadicLogicManager creates a logic manager instance bound to a specific function and its first term. +func newVariadicLogicManager(fac ast.ExprFactory, function string, term ast.Expr) *logicManager { + return &logicManager{ + exprFactory: fac, + function: function, + terms: []ast.Expr{term}, + ops: []int64{}, + variadicASTs: true, + } +} + +// newBalancingLogicManager creates a logic manager instance bound to a specific function and its first term. +func newBalancingLogicManager(fac ast.ExprFactory, function string, term ast.Expr) *logicManager { + return &logicManager{ + exprFactory: fac, + function: function, + terms: []ast.Expr{term}, + ops: []int64{}, + variadicASTs: false, + } +} + +// addTerm adds an operation identifier and term to the set of terms to be balanced. +func (l *logicManager) addTerm(op int64, term ast.Expr) { + l.terms = append(l.terms, term) + l.ops = append(l.ops, op) +} + +// toExpr renders the logic graph into an Expr value, either balancing a tree of logical +// operations or creating a variadic representation of the logical operator. +func (l *logicManager) toExpr() ast.Expr { + if len(l.terms) == 1 { + return l.terms[0] + } + if l.variadicASTs { + return l.exprFactory.NewCall(l.ops[0], l.function, l.terms...) + } + return l.balancedTree(0, len(l.ops)-1) +} + +// balancedTree recursively balances the terms provided to a commutative operator. +func (l *logicManager) balancedTree(lo, hi int) ast.Expr { + mid := (lo + hi + 1) / 2 + + var left ast.Expr + if mid == lo { + left = l.terms[mid] + } else { + left = l.balancedTree(lo, mid-1) + } + + var right ast.Expr + if mid == hi { + right = l.terms[mid+1] + } else { + right = l.balancedTree(mid+1, hi) + } + return l.exprFactory.NewCall(l.ops[mid], l.function, left, right) +} + +type exprHelper struct { + *parserHelper + id int64 +} + +func (e *exprHelper) nextMacroID() int64 { + return e.parserHelper.id(e.parserHelper.getLocation(e.id)) +} + +// Copy implements the ExprHelper interface method by producing a copy of the input Expr value +// with a fresh set of numeric identifiers the Expr and all its descendants. +func (e *exprHelper) Copy(expr ast.Expr) ast.Expr { + offsetRange, _ := e.parserHelper.sourceInfo.GetOffsetRange(expr.ID()) + copyID := e.parserHelper.newID(offsetRange) + switch expr.Kind() { + case ast.LiteralKind: + return e.exprFactory.NewLiteral(copyID, expr.AsLiteral()) + case ast.IdentKind: + return e.exprFactory.NewIdent(copyID, expr.AsIdent()) + case ast.SelectKind: + sel := expr.AsSelect() + op := e.Copy(sel.Operand()) + if sel.IsTestOnly() { + return e.exprFactory.NewPresenceTest(copyID, op, sel.FieldName()) + } + return e.exprFactory.NewSelect(copyID, op, sel.FieldName()) + case ast.CallKind: + call := expr.AsCall() + args := call.Args() + argsCopy := make([]ast.Expr, len(args)) + for i, arg := range args { + argsCopy[i] = e.Copy(arg) + } + if !call.IsMemberFunction() { + return e.exprFactory.NewCall(copyID, call.FunctionName(), argsCopy...) + } + return e.exprFactory.NewMemberCall(copyID, call.FunctionName(), e.Copy(call.Target()), argsCopy...) + case ast.ListKind: + list := expr.AsList() + elems := list.Elements() + elemsCopy := make([]ast.Expr, len(elems)) + for i, elem := range elems { + elemsCopy[i] = e.Copy(elem) + } + return e.exprFactory.NewList(copyID, elemsCopy, list.OptionalIndices()) + case ast.MapKind: + m := expr.AsMap() + entries := m.Entries() + entriesCopy := make([]ast.EntryExpr, len(entries)) + for i, en := range entries { + entry := en.AsMapEntry() + entryID := e.nextMacroID() + entriesCopy[i] = e.exprFactory.NewMapEntry(entryID, + e.Copy(entry.Key()), e.Copy(entry.Value()), entry.IsOptional()) + } + return e.exprFactory.NewMap(copyID, entriesCopy) + case ast.StructKind: + s := expr.AsStruct() + fields := s.Fields() + fieldsCopy := make([]ast.EntryExpr, len(fields)) + for i, f := range fields { + field := f.AsStructField() + fieldID := e.nextMacroID() + fieldsCopy[i] = e.exprFactory.NewStructField(fieldID, + field.Name(), e.Copy(field.Value()), field.IsOptional()) + } + return e.exprFactory.NewStruct(copyID, s.TypeName(), fieldsCopy) + case ast.ComprehensionKind: + compre := expr.AsComprehension() + iterRange := e.Copy(compre.IterRange()) + accuInit := e.Copy(compre.AccuInit()) + cond := e.Copy(compre.LoopCondition()) + step := e.Copy(compre.LoopStep()) + result := e.Copy(compre.Result()) + // All comprehensions can be represented by the two-variable comprehension since the + // differentiation between one and two-variable is whether the iterVar2 value is non-empty. + return e.exprFactory.NewComprehensionTwoVar(copyID, + iterRange, compre.IterVar(), compre.IterVar2(), compre.AccuVar(), accuInit, cond, step, result) + } + return e.exprFactory.NewUnspecifiedExpr(copyID) +} + +// NewLiteral implements the ExprHelper interface method. +func (e *exprHelper) NewLiteral(value ref.Val) ast.Expr { + return e.exprFactory.NewLiteral(e.nextMacroID(), value) +} + +// NewList implements the ExprHelper interface method. +func (e *exprHelper) NewList(elems ...ast.Expr) ast.Expr { + return e.exprFactory.NewList(e.nextMacroID(), elems, []int32{}) +} + +// NewMap implements the ExprHelper interface method. +func (e *exprHelper) NewMap(entries ...ast.EntryExpr) ast.Expr { + return e.exprFactory.NewMap(e.nextMacroID(), entries) +} + +// NewMapEntry implements the ExprHelper interface method. +func (e *exprHelper) NewMapEntry(key ast.Expr, val ast.Expr, optional bool) ast.EntryExpr { + return e.exprFactory.NewMapEntry(e.nextMacroID(), key, val, optional) +} + +// NewStruct implements the ExprHelper interface method. +func (e *exprHelper) NewStruct(typeName string, fieldInits ...ast.EntryExpr) ast.Expr { + return e.exprFactory.NewStruct(e.nextMacroID(), typeName, fieldInits) +} + +// NewStructField implements the ExprHelper interface method. +func (e *exprHelper) NewStructField(field string, init ast.Expr, optional bool) ast.EntryExpr { + return e.exprFactory.NewStructField(e.nextMacroID(), field, init, optional) +} + +// NewComprehension implements the ExprHelper interface method. +func (e *exprHelper) NewComprehension( + iterRange ast.Expr, + iterVar string, + accuVar string, + accuInit ast.Expr, + condition ast.Expr, + step ast.Expr, + result ast.Expr) ast.Expr { + return e.exprFactory.NewComprehension( + e.nextMacroID(), iterRange, iterVar, accuVar, accuInit, condition, step, result) +} + +// NewComprehensionTwoVar implements the ExprHelper interface method. +func (e *exprHelper) NewComprehensionTwoVar( + iterRange ast.Expr, + iterVar, + iterVar2, + accuVar string, + accuInit, + condition, + step, + result ast.Expr) ast.Expr { + return e.exprFactory.NewComprehensionTwoVar( + e.nextMacroID(), iterRange, iterVar, iterVar2, accuVar, accuInit, condition, step, result) +} + +// NewIdent implements the ExprHelper interface method. +func (e *exprHelper) NewIdent(name string) ast.Expr { + return e.exprFactory.NewIdent(e.nextMacroID(), name) +} + +// NewAccuIdent implements the ExprHelper interface method. +func (e *exprHelper) NewAccuIdent() ast.Expr { + return e.exprFactory.NewAccuIdent(e.nextMacroID()) +} + +// NewGlobalCall implements the ExprHelper interface method. +func (e *exprHelper) NewCall(function string, args ...ast.Expr) ast.Expr { + return e.exprFactory.NewCall(e.nextMacroID(), function, args...) +} + +// NewMemberCall implements the ExprHelper interface method. +func (e *exprHelper) NewMemberCall(function string, target ast.Expr, args ...ast.Expr) ast.Expr { + return e.exprFactory.NewMemberCall(e.nextMacroID(), function, target, args...) +} + +// NewPresenceTest implements the ExprHelper interface method. +func (e *exprHelper) NewPresenceTest(operand ast.Expr, field string) ast.Expr { + return e.exprFactory.NewPresenceTest(e.nextMacroID(), operand, field) +} + +// NewSelect implements the ExprHelper interface method. +func (e *exprHelper) NewSelect(operand ast.Expr, field string) ast.Expr { + return e.exprFactory.NewSelect(e.nextMacroID(), operand, field) +} + +// OffsetLocation implements the ExprHelper interface method. +func (e *exprHelper) OffsetLocation(exprID int64) common.Location { + return e.parserHelper.sourceInfo.GetStartLocation(exprID) +} + +// NewError associates an error message with a given expression id, populating the source offset location of the error if possible. +func (e *exprHelper) NewError(exprID int64, message string) *common.Error { + return common.NewError(exprID, message, e.OffsetLocation(exprID)) +} + +var ( + // Thread-safe pool of ExprHelper values to minimize alloc overhead of ExprHelper creations. + exprHelperPool = &sync.Pool{ + New: func() any { + return &exprHelper{} + }, + } +) diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/input.go b/hack/tools/vendor/github.com/google/cel-go/parser/input.go new file mode 100644 index 0000000000..44792455d8 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/input.go @@ -0,0 +1,129 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + antlr "github.com/antlr4-go/antlr/v4" + + "github.com/google/cel-go/common/runes" +) + +type charStream struct { + buf runes.Buffer + pos int + src string +} + +// Consume implements (antlr.CharStream).Consume. +func (c *charStream) Consume() { + if c.pos >= c.buf.Len() { + panic("cannot consume EOF") + } + c.pos++ +} + +// LA implements (antlr.CharStream).LA. +func (c *charStream) LA(offset int) int { + if offset == 0 { + return 0 + } + if offset < 0 { + offset++ + } + pos := c.pos + offset - 1 + if pos < 0 || pos >= c.buf.Len() { + return antlr.TokenEOF + } + return int(c.buf.Get(pos)) +} + +// LT mimics (*antlr.InputStream).LT. +func (c *charStream) LT(offset int) int { + return c.LA(offset) +} + +// Mark implements (antlr.CharStream).Mark. +func (c *charStream) Mark() int { + return -1 +} + +// Release implements (antlr.CharStream).Release. +func (c *charStream) Release(marker int) {} + +// Index implements (antlr.CharStream).Index. +func (c *charStream) Index() int { + return c.pos +} + +// Seek implements (antlr.CharStream).Seek. +func (c *charStream) Seek(index int) { + if index <= c.pos { + c.pos = index + return + } + if index < c.buf.Len() { + c.pos = index + } else { + c.pos = c.buf.Len() + } +} + +// Size implements (antlr.CharStream).Size. +func (c *charStream) Size() int { + return c.buf.Len() +} + +// GetSourceName implements (antlr.CharStream).GetSourceName. +func (c *charStream) GetSourceName() string { + return c.src +} + +// GetText implements (antlr.CharStream).GetText. +func (c *charStream) GetText(start, stop int) string { + if stop >= c.buf.Len() { + stop = c.buf.Len() - 1 + } + if start >= c.buf.Len() { + return "" + } + return c.buf.Slice(start, stop+1) +} + +// GetTextFromTokens implements (antlr.CharStream).GetTextFromTokens. +func (c *charStream) GetTextFromTokens(start, stop antlr.Token) string { + if start != nil && stop != nil { + return c.GetText(start.GetTokenIndex(), stop.GetTokenIndex()) + } + return "" +} + +// GetTextFromInterval implements (antlr.CharStream).GetTextFromInterval. +func (c *charStream) GetTextFromInterval(i antlr.Interval) string { + return c.GetText(i.Start, i.Stop) +} + +// String mimics (*antlr.InputStream).String. +func (c *charStream) String() string { + return c.buf.Slice(0, c.buf.Len()) +} + +var _ antlr.CharStream = &charStream{} + +func newCharStream(buf runes.Buffer, desc string) antlr.CharStream { + return &charStream{ + buf: buf, + src: desc, + } +} diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/macro.go b/hack/tools/vendor/github.com/google/cel-go/parser/macro.go new file mode 100644 index 0000000000..dc47b4203c --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/macro.go @@ -0,0 +1,428 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "fmt" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// NewGlobalMacro creates a Macro for a global function with the specified arg count. +func NewGlobalMacro(function string, argCount int, expander MacroExpander) Macro { + return ¯o{ + function: function, + argCount: argCount, + expander: expander} +} + +// NewReceiverMacro creates a Macro for a receiver function matching the specified arg count. +func NewReceiverMacro(function string, argCount int, expander MacroExpander) Macro { + return ¯o{ + function: function, + argCount: argCount, + expander: expander, + receiverStyle: true} +} + +// NewGlobalVarArgMacro creates a Macro for a global function with a variable arg count. +func NewGlobalVarArgMacro(function string, expander MacroExpander) Macro { + return ¯o{ + function: function, + expander: expander, + varArgStyle: true} +} + +// NewReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count. +func NewReceiverVarArgMacro(function string, expander MacroExpander) Macro { + return ¯o{ + function: function, + expander: expander, + receiverStyle: true, + varArgStyle: true} +} + +// Macro interface for describing the function signature to match and the MacroExpander to apply. +// +// Note: when a Macro should apply to multiple overloads (based on arg count) of a given function, +// a Macro should be created per arg-count. +type Macro interface { + // Function name to match. + Function() string + + // ArgCount for the function call. + // + // When the macro is a var-arg style macro, the return value will be zero, but the MacroKey + // will contain a `*` where the arg count would have been. + ArgCount() int + + // IsReceiverStyle returns true if the macro matches a receiver style call. + IsReceiverStyle() bool + + // MacroKey returns the macro signatures accepted by this macro. + // + // Format: `::`. + // + // When the macros is a var-arg style macro, the `arg-count` value is represented as a `*`. + MacroKey() string + + // Expander returns the MacroExpander to apply when the macro key matches the parsed call + // signature. + Expander() MacroExpander +} + +// Macro type which declares the function name and arg count expected for the +// macro, as well as a macro expansion function. +type macro struct { + function string + receiverStyle bool + varArgStyle bool + argCount int + expander MacroExpander +} + +// Function returns the macro's function name (i.e. the function whose syntax it mimics). +func (m *macro) Function() string { + return m.function +} + +// ArgCount returns the number of arguments the macro expects. +func (m *macro) ArgCount() int { + return m.argCount +} + +// IsReceiverStyle returns whether the macro is receiver style. +func (m *macro) IsReceiverStyle() bool { + return m.receiverStyle +} + +// Expander implements the Macro interface method. +func (m *macro) Expander() MacroExpander { + return m.expander +} + +// MacroKey implements the Macro interface method. +func (m *macro) MacroKey() string { + if m.varArgStyle { + return makeVarArgMacroKey(m.function, m.receiverStyle) + } + return makeMacroKey(m.function, m.argCount, m.receiverStyle) +} + +func makeMacroKey(name string, args int, receiverStyle bool) string { + return fmt.Sprintf("%s:%d:%v", name, args, receiverStyle) +} + +func makeVarArgMacroKey(name string, receiverStyle bool) string { + return fmt.Sprintf("%s:*:%v", name, receiverStyle) +} + +// MacroExpander converts a call and its associated arguments into a new CEL abstract syntax tree. +// +// If the MacroExpander determines within the implementation that an expansion is not needed it may return +// a nil Expr value to indicate a non-match. However, if an expansion is to be performed, but the arguments +// are not well-formed, the result of the expansion will be an error. +// +// The MacroExpander accepts as arguments a MacroExprHelper as well as the arguments used in the function call +// and produces as output an Expr ast node. +// +// Note: when the Macro.IsReceiverStyle() method returns true, the target argument will be nil. +type MacroExpander func(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) + +// ExprHelper assists with the creation of Expr values in a manner which is consistent +// the internal semantics and id generation behaviors of the parser and checker libraries. +type ExprHelper interface { + // Copy the input expression with a brand new set of identifiers. + Copy(ast.Expr) ast.Expr + + // Literal creates an Expr value for a scalar literal value. + NewLiteral(value ref.Val) ast.Expr + + // NewList creates a list literal instruction with an optional set of elements. + NewList(elems ...ast.Expr) ast.Expr + + // NewMap creates a CreateStruct instruction for a map where the map is comprised of the + // optional set of key, value entries. + NewMap(entries ...ast.EntryExpr) ast.Expr + + // NewMapEntry creates a Map Entry for the key, value pair. + NewMapEntry(key ast.Expr, val ast.Expr, optional bool) ast.EntryExpr + + // NewStruct creates a struct literal expression with an optional set of field initializers. + NewStruct(typeName string, fieldInits ...ast.EntryExpr) ast.Expr + + // NewStructField creates a new struct field initializer from the field name and value. + NewStructField(field string, init ast.Expr, optional bool) ast.EntryExpr + + // NewComprehension creates a new one-variable comprehension instruction. + // + // - iterRange represents the expression that resolves to a list or map where the elements or + // keys (respectively) will be iterated over. + // - iterVar is the variable name for the list element value, or the map key, depending on the + // range type. + // - accuVar is the accumulation variable name, typically parser.AccumulatorName. + // - accuInit is the initial expression whose value will be set for the accuVar prior to + // folding. + // - condition is the expression to test to determine whether to continue folding. + // - step is the expression to evaluation at the conclusion of a single fold iteration. + // - result is the computation to evaluate at the conclusion of the fold. + // + // The accuVar should not shadow variable names that you would like to reference within the + // environment in the step and condition expressions. Presently, the name __result__ is commonly + // used by built-in macros but this may change in the future. + NewComprehension(iterRange ast.Expr, + iterVar, + accuVar string, + accuInit, + condition, + step, + result ast.Expr) ast.Expr + + // NewComprehensionTwoVar creates a new two-variable comprehension instruction. + // + // - iterRange represents the expression that resolves to a list or map where the elements or + // keys (respectively) will be iterated over. + // - iterVar is the iteration variable assigned to the list index or the map key. + // - iterVar2 is the iteration variable assigned to the list element value or the map key value. + // - accuVar is the accumulation variable name, typically parser.AccumulatorName. + // - accuInit is the initial expression whose value will be set for the accuVar prior to + // folding. + // - condition is the expression to test to determine whether to continue folding. + // - step is the expression to evaluation at the conclusion of a single fold iteration. + // - result is the computation to evaluate at the conclusion of the fold. + // + // The accuVar should not shadow variable names that you would like to reference within the + // environment in the step and condition expressions. Presently, the name __result__ is commonly + // used by built-in macros but this may change in the future. + NewComprehensionTwoVar(iterRange ast.Expr, + iterVar, + iterVar2, + accuVar string, + accuInit, + condition, + step, + result ast.Expr) ast.Expr + + // NewIdent creates an identifier Expr value. + NewIdent(name string) ast.Expr + + // NewAccuIdent returns an accumulator identifier for use with comprehension results. + NewAccuIdent() ast.Expr + + // NewCall creates a function call Expr value for a global (free) function. + NewCall(function string, args ...ast.Expr) ast.Expr + + // NewMemberCall creates a function call Expr value for a receiver-style function. + NewMemberCall(function string, target ast.Expr, args ...ast.Expr) ast.Expr + + // NewPresenceTest creates a Select TestOnly Expr value for modelling has() semantics. + NewPresenceTest(operand ast.Expr, field string) ast.Expr + + // NewSelect create a field traversal Expr value. + NewSelect(operand ast.Expr, field string) ast.Expr + + // OffsetLocation returns the Location of the expression identifier. + OffsetLocation(exprID int64) common.Location + + // NewError associates an error message with a given expression id. + NewError(exprID int64, message string) *common.Error +} + +var ( + // HasMacro expands "has(m.f)" which tests the presence of a field, avoiding the need to + // specify the field as a string. + HasMacro = NewGlobalMacro(operators.Has, 1, MakeHas) + + // AllMacro expands "range.all(var, predicate)" into a comprehension which ensures that all + // elements in the range satisfy the predicate. + AllMacro = NewReceiverMacro(operators.All, 2, MakeAll) + + // ExistsMacro expands "range.exists(var, predicate)" into a comprehension which ensures that + // some element in the range satisfies the predicate. + ExistsMacro = NewReceiverMacro(operators.Exists, 2, MakeExists) + + // ExistsOneMacro expands "range.exists_one(var, predicate)", which is true if for exactly one + // element in range the predicate holds. + ExistsOneMacro = NewReceiverMacro(operators.ExistsOne, 2, MakeExistsOne) + + // MapMacro expands "range.map(var, function)" into a comprehension which applies the function + // to each element in the range to produce a new list. + MapMacro = NewReceiverMacro(operators.Map, 2, MakeMap) + + // MapFilterMacro expands "range.map(var, predicate, function)" into a comprehension which + // first filters the elements in the range by the predicate, then applies the transform function + // to produce a new list. + MapFilterMacro = NewReceiverMacro(operators.Map, 3, MakeMap) + + // FilterMacro expands "range.filter(var, predicate)" into a comprehension which filters + // elements in the range, producing a new list from the elements that satisfy the predicate. + FilterMacro = NewReceiverMacro(operators.Filter, 2, MakeFilter) + + // AllMacros includes the list of all spec-supported macros. + AllMacros = []Macro{ + HasMacro, + AllMacro, + ExistsMacro, + ExistsOneMacro, + MapMacro, + MapFilterMacro, + FilterMacro, + } + + // NoMacros list. + NoMacros = []Macro{} +) + +// AccumulatorName is the traditional variable name assigned to the fold accumulator variable. +const AccumulatorName = "__result__" + +type quantifierKind int + +const ( + quantifierAll quantifierKind = iota + quantifierExists + quantifierExistsOne +) + +// MakeAll expands the input call arguments into a comprehension that returns true if all of the +// elements in the range match the predicate expressions: +// .all(, ) +func MakeAll(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { + return makeQuantifier(quantifierAll, eh, target, args) +} + +// MakeExists expands the input call arguments into a comprehension that returns true if any of the +// elements in the range match the predicate expressions: +// .exists(, ) +func MakeExists(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { + return makeQuantifier(quantifierExists, eh, target, args) +} + +// MakeExistsOne expands the input call arguments into a comprehension that returns true if exactly +// one of the elements in the range match the predicate expressions: +// .exists_one(, ) +func MakeExistsOne(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { + return makeQuantifier(quantifierExistsOne, eh, target, args) +} + +// MakeMap expands the input call arguments into a comprehension that transforms each element in the +// input to produce an output list. +// +// There are two call patterns supported by map: +// +// .map(, ) +// .map(, , ) +// +// In the second form only iterVar values which return true when provided to the predicate expression +// are transformed. +func MakeMap(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { + v, found := extractIdent(args[0]) + if !found { + return nil, eh.NewError(args[0].ID(), "argument is not an identifier") + } + + var fn ast.Expr + var filter ast.Expr + + if len(args) == 3 { + filter = args[1] + fn = args[2] + } else { + filter = nil + fn = args[1] + } + + init := eh.NewList() + condition := eh.NewLiteral(types.True) + step := eh.NewCall(operators.Add, eh.NewAccuIdent(), eh.NewList(fn)) + + if filter != nil { + step = eh.NewCall(operators.Conditional, filter, step, eh.NewAccuIdent()) + } + return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, eh.NewAccuIdent()), nil +} + +// MakeFilter expands the input call arguments into a comprehension which produces a list which contains +// only elements which match the provided predicate expression: +// .filter(, ) +func MakeFilter(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { + v, found := extractIdent(args[0]) + if !found { + return nil, eh.NewError(args[0].ID(), "argument is not an identifier") + } + + filter := args[1] + init := eh.NewList() + condition := eh.NewLiteral(types.True) + step := eh.NewCall(operators.Add, eh.NewAccuIdent(), eh.NewList(args[0])) + step = eh.NewCall(operators.Conditional, filter, step, eh.NewAccuIdent()) + return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, eh.NewAccuIdent()), nil +} + +// MakeHas expands the input call arguments into a presence test, e.g. has(.field) +func MakeHas(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { + if args[0].Kind() == ast.SelectKind { + s := args[0].AsSelect() + return eh.NewPresenceTest(s.Operand(), s.FieldName()), nil + } + return nil, eh.NewError(args[0].ID(), "invalid argument to has() macro") +} + +func makeQuantifier(kind quantifierKind, eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { + v, found := extractIdent(args[0]) + if !found { + return nil, eh.NewError(args[0].ID(), "argument must be a simple name") + } + + var init ast.Expr + var condition ast.Expr + var step ast.Expr + var result ast.Expr + switch kind { + case quantifierAll: + init = eh.NewLiteral(types.True) + condition = eh.NewCall(operators.NotStrictlyFalse, eh.NewAccuIdent()) + step = eh.NewCall(operators.LogicalAnd, eh.NewAccuIdent(), args[1]) + result = eh.NewAccuIdent() + case quantifierExists: + init = eh.NewLiteral(types.False) + condition = eh.NewCall( + operators.NotStrictlyFalse, + eh.NewCall(operators.LogicalNot, eh.NewAccuIdent())) + step = eh.NewCall(operators.LogicalOr, eh.NewAccuIdent(), args[1]) + result = eh.NewAccuIdent() + case quantifierExistsOne: + init = eh.NewLiteral(types.Int(0)) + condition = eh.NewLiteral(types.True) + step = eh.NewCall(operators.Conditional, args[1], + eh.NewCall(operators.Add, eh.NewAccuIdent(), eh.NewLiteral(types.Int(1))), eh.NewAccuIdent()) + result = eh.NewCall(operators.Equals, eh.NewAccuIdent(), eh.NewLiteral(types.Int(1))) + default: + return nil, eh.NewError(args[0].ID(), fmt.Sprintf("unrecognized quantifier '%v'", kind)) + } + return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, result), nil +} + +func extractIdent(e ast.Expr) (string, bool) { + switch e.Kind() { + case ast.IdentKind: + return e.AsIdent(), true + } + return "", false +} diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/options.go b/hack/tools/vendor/github.com/google/cel-go/parser/options.go new file mode 100644 index 0000000000..61fc3adec4 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/options.go @@ -0,0 +1,140 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import "fmt" + +type options struct { + maxRecursionDepth int + errorReportingLimit int + errorRecoveryTokenLookaheadLimit int + errorRecoveryLimit int + expressionSizeCodePointLimit int + macros map[string]Macro + populateMacroCalls bool + enableOptionalSyntax bool + enableVariadicOperatorASTs bool +} + +// Option configures the behavior of the parser. +type Option func(*options) error + +// MaxRecursionDepth limits the maximum depth the parser will attempt to parse the expression before giving up. +func MaxRecursionDepth(limit int) Option { + return func(opts *options) error { + if limit < -1 { + return fmt.Errorf("max recursion depth must be greater than or equal to -1: %d", limit) + } + opts.maxRecursionDepth = limit + return nil + } +} + +// ErrorRecoveryLookaheadTokenLimit limits the number of lexer tokens that may be considered during error recovery. +// +// Error recovery often involves looking ahead in the input to determine if there's a point at which parsing may +// successfully resume. In some pathological cases, the parser can look through quite a large set of input which +// in turn generates a lot of back-tracking and performance degredation. +// +// The limit must be >= 1, and is recommended to be less than the default of 256. +func ErrorRecoveryLookaheadTokenLimit(limit int) Option { + return func(opts *options) error { + if limit < 1 { + return fmt.Errorf("error recovery lookahead token limit must be at least 1: %d", limit) + } + opts.errorRecoveryTokenLookaheadLimit = limit + return nil + } +} + +// ErrorRecoveryLimit limits the number of attempts the parser will perform to recover from an error. +func ErrorRecoveryLimit(limit int) Option { + return func(opts *options) error { + if limit < -1 { + return fmt.Errorf("error recovery limit must be greater than or equal to -1: %d", limit) + } + opts.errorRecoveryLimit = limit + return nil + } +} + +// ErrorReportingLimit limits the number of syntax error reports before terminating parsing. +// +// The limit must be at least 1. If unset, the limit will be 100. +func ErrorReportingLimit(limit int) Option { + return func(opts *options) error { + if limit < 1 { + return fmt.Errorf("error reporting limit must be at least 1: %d", limit) + } + opts.errorReportingLimit = limit + return nil + } +} + +// ExpressionSizeCodePointLimit is an option which limits the maximum code point count of an +// expression. +func ExpressionSizeCodePointLimit(expressionSizeCodePointLimit int) Option { + return func(opts *options) error { + if expressionSizeCodePointLimit < -1 { + return fmt.Errorf("expression size code point limit must be greater than or equal to -1: %d", expressionSizeCodePointLimit) + } + opts.expressionSizeCodePointLimit = expressionSizeCodePointLimit + return nil + } +} + +// Macros adds the given macros to the parser. +func Macros(macros ...Macro) Option { + return func(opts *options) error { + for _, m := range macros { + if m != nil { + if opts.macros == nil { + opts.macros = make(map[string]Macro) + } + opts.macros[m.MacroKey()] = m + } + } + return nil + } +} + +// PopulateMacroCalls ensures that the original call signatures replaced by expanded macros +// are preserved in the `SourceInfo` of parse result. +func PopulateMacroCalls(populateMacroCalls bool) Option { + return func(opts *options) error { + opts.populateMacroCalls = populateMacroCalls + return nil + } +} + +// EnableOptionalSyntax enables syntax for optional field and index selection. +func EnableOptionalSyntax(optionalSyntax bool) Option { + return func(opts *options) error { + opts.enableOptionalSyntax = optionalSyntax + return nil + } +} + +// EnableVariadicOperatorASTs enables a compact representation of chained like-kind commutative +// operators. e.g. `a || b || c || d` -> `call(op='||', args=[a, b, c, d])` +// +// The benefit of enabling variadic operators ASTs is a more compact representation deeply nested +// logic graphs. +func EnableVariadicOperatorASTs(varArgASTs bool) Option { + return func(opts *options) error { + opts.enableVariadicOperatorASTs = varArgASTs + return nil + } +} diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/parser.go b/hack/tools/vendor/github.com/google/cel-go/parser/parser.go new file mode 100644 index 0000000000..5cbb176727 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/parser.go @@ -0,0 +1,1012 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package parser declares an expression parser with support for macro +// expansion. +package parser + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + antlr "github.com/antlr4-go/antlr/v4" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/runes" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/parser/gen" +) + +// Parser encapsulates the context necessary to perform parsing for different expressions. +type Parser struct { + options +} + +// NewParser builds and returns a new Parser using the provided options. +func NewParser(opts ...Option) (*Parser, error) { + p := &Parser{} + for _, opt := range opts { + if err := opt(&p.options); err != nil { + return nil, err + } + } + if p.errorReportingLimit == 0 { + p.errorReportingLimit = 100 + } + if p.maxRecursionDepth == 0 { + p.maxRecursionDepth = 250 + } + if p.maxRecursionDepth == -1 { + p.maxRecursionDepth = int((^uint(0)) >> 1) + } + if p.errorRecoveryTokenLookaheadLimit == 0 { + p.errorRecoveryTokenLookaheadLimit = 256 + } + if p.errorRecoveryLimit == 0 { + p.errorRecoveryLimit = 30 + } + if p.errorRecoveryLimit == -1 { + p.errorRecoveryLimit = int((^uint(0)) >> 1) + } + if p.expressionSizeCodePointLimit == 0 { + p.expressionSizeCodePointLimit = 100_000 + } + if p.expressionSizeCodePointLimit == -1 { + p.expressionSizeCodePointLimit = int((^uint(0)) >> 1) + } + // Bool is false by default, so populateMacroCalls will be false by default + return p, nil +} + +// mustNewParser does the work of NewParser and panics if an error occurs. +// +// This function is only intended for internal use and is for backwards compatibility in Parse and +// ParseWithMacros, where we know the options will result in an error. +func mustNewParser(opts ...Option) *Parser { + p, err := NewParser(opts...) + if err != nil { + panic(err) + } + return p +} + +// Parse parses the expression represented by source and returns the result. +func (p *Parser) Parse(source common.Source) (*ast.AST, *common.Errors) { + errs := common.NewErrors(source) + fac := ast.NewExprFactory() + impl := parser{ + errors: &parseErrors{errs}, + exprFactory: fac, + helper: newParserHelper(source, fac), + macros: p.macros, + maxRecursionDepth: p.maxRecursionDepth, + errorReportingLimit: p.errorReportingLimit, + errorRecoveryLimit: p.errorRecoveryLimit, + errorRecoveryLookaheadTokenLimit: p.errorRecoveryTokenLookaheadLimit, + populateMacroCalls: p.populateMacroCalls, + enableOptionalSyntax: p.enableOptionalSyntax, + enableVariadicOperatorASTs: p.enableVariadicOperatorASTs, + } + buf, ok := source.(runes.Buffer) + if !ok { + buf = runes.NewBuffer(source.Content()) + } + var out ast.Expr + if buf.Len() > p.expressionSizeCodePointLimit { + out = impl.reportError(common.NoLocation, + "expression code point size exceeds limit: size: %d, limit %d", + buf.Len(), p.expressionSizeCodePointLimit) + } else { + out = impl.parse(buf, source.Description()) + } + return ast.NewAST(out, impl.helper.getSourceInfo()), errs +} + +// reservedIds are not legal to use as variables. We exclude them post-parse, as they *are* valid +// field names for protos, and it would complicate the grammar to distinguish the cases. +var reservedIds = map[string]struct{}{ + "as": {}, + "break": {}, + "const": {}, + "continue": {}, + "else": {}, + "false": {}, + "for": {}, + "function": {}, + "if": {}, + "import": {}, + "in": {}, + "let": {}, + "loop": {}, + "package": {}, + "namespace": {}, + "null": {}, + "return": {}, + "true": {}, + "var": {}, + "void": {}, + "while": {}, +} + +// Parse converts a source input a parsed expression. +// This function calls ParseWithMacros with AllMacros. +// +// Deprecated: Use NewParser().Parse() instead. +func Parse(source common.Source) (*ast.AST, *common.Errors) { + return mustNewParser(Macros(AllMacros...)).Parse(source) +} + +type recursionError struct { + message string +} + +// Error implements error. +func (re *recursionError) Error() string { + return re.message +} + +var _ error = &recursionError{} + +type recursionListener struct { + maxDepth int + ruleTypeDepth map[int]*int +} + +func (rl *recursionListener) VisitTerminal(node antlr.TerminalNode) {} + +func (rl *recursionListener) VisitErrorNode(node antlr.ErrorNode) {} + +func (rl *recursionListener) EnterEveryRule(ctx antlr.ParserRuleContext) { + if ctx == nil { + return + } + ruleIndex := ctx.GetRuleIndex() + depth, found := rl.ruleTypeDepth[ruleIndex] + if !found { + var counter = 1 + rl.ruleTypeDepth[ruleIndex] = &counter + depth = &counter + } else { + *depth++ + } + if *depth > rl.maxDepth { + panic(&recursionError{ + message: fmt.Sprintf("expression recursion limit exceeded: %d", rl.maxDepth), + }) + } +} + +func (rl *recursionListener) ExitEveryRule(ctx antlr.ParserRuleContext) { + if ctx == nil { + return + } + ruleIndex := ctx.GetRuleIndex() + if depth, found := rl.ruleTypeDepth[ruleIndex]; found && *depth > 0 { + *depth-- + } +} + +var _ antlr.ParseTreeListener = &recursionListener{} + +type tooManyErrors struct { + errorReportingLimit int +} + +func (t *tooManyErrors) Error() string { + return fmt.Sprintf("More than %d syntax errors", t.errorReportingLimit) +} + +var _ error = &tooManyErrors{} + +type recoveryLimitError struct { + message string +} + +// Error implements error. +func (rl *recoveryLimitError) Error() string { + return rl.message +} + +type lookaheadLimitError struct { + message string +} + +func (ll *lookaheadLimitError) Error() string { + return ll.message +} + +var _ error = &recoveryLimitError{} + +type recoveryLimitErrorStrategy struct { + *antlr.DefaultErrorStrategy + errorRecoveryLimit int + errorRecoveryTokenLookaheadLimit int + recoveryAttempts int +} + +type lookaheadConsumer struct { + antlr.Parser + errorRecoveryTokenLookaheadLimit int + lookaheadAttempts int +} + +func (lc *lookaheadConsumer) Consume() antlr.Token { + if lc.lookaheadAttempts >= lc.errorRecoveryTokenLookaheadLimit { + panic(&lookaheadLimitError{ + message: fmt.Sprintf("error recovery token lookahead limit exceeded: %d", lc.errorRecoveryTokenLookaheadLimit), + }) + } + lc.lookaheadAttempts++ + return lc.Parser.Consume() +} + +func (rl *recoveryLimitErrorStrategy) Recover(recognizer antlr.Parser, e antlr.RecognitionException) { + rl.checkAttempts(recognizer) + lc := &lookaheadConsumer{Parser: recognizer, errorRecoveryTokenLookaheadLimit: rl.errorRecoveryTokenLookaheadLimit} + rl.DefaultErrorStrategy.Recover(lc, e) +} + +func (rl *recoveryLimitErrorStrategy) RecoverInline(recognizer antlr.Parser) antlr.Token { + rl.checkAttempts(recognizer) + lc := &lookaheadConsumer{Parser: recognizer, errorRecoveryTokenLookaheadLimit: rl.errorRecoveryTokenLookaheadLimit} + return rl.DefaultErrorStrategy.RecoverInline(lc) +} + +func (rl *recoveryLimitErrorStrategy) checkAttempts(recognizer antlr.Parser) { + if rl.recoveryAttempts == rl.errorRecoveryLimit { + rl.recoveryAttempts++ + msg := fmt.Sprintf("error recovery attempt limit exceeded: %d", rl.errorRecoveryLimit) + recognizer.NotifyErrorListeners(msg, nil, nil) + panic(&recoveryLimitError{ + message: msg, + }) + } + rl.recoveryAttempts++ +} + +var _ antlr.ErrorStrategy = &recoveryLimitErrorStrategy{} + +type parser struct { + gen.BaseCELVisitor + errors *parseErrors + exprFactory ast.ExprFactory + helper *parserHelper + macros map[string]Macro + recursionDepth int + errorReports int + maxRecursionDepth int + errorReportingLimit int + errorRecoveryLimit int + errorRecoveryLookaheadTokenLimit int + populateMacroCalls bool + enableOptionalSyntax bool + enableVariadicOperatorASTs bool +} + +var _ gen.CELVisitor = (*parser)(nil) + +func (p *parser) parse(expr runes.Buffer, desc string) ast.Expr { + lexer := gen.NewCELLexer(newCharStream(expr, desc)) + lexer.RemoveErrorListeners() + lexer.AddErrorListener(p) + + prsr := gen.NewCELParser(antlr.NewCommonTokenStream(lexer, 0)) + prsr.RemoveErrorListeners() + + prsrListener := &recursionListener{ + maxDepth: p.maxRecursionDepth, + ruleTypeDepth: map[int]*int{}, + } + + prsr.AddErrorListener(p) + prsr.AddParseListener(prsrListener) + + prsr.SetErrorHandler(&recoveryLimitErrorStrategy{ + DefaultErrorStrategy: antlr.NewDefaultErrorStrategy(), + errorRecoveryLimit: p.errorRecoveryLimit, + errorRecoveryTokenLookaheadLimit: p.errorRecoveryLookaheadTokenLimit, + }) + + defer func() { + if val := recover(); val != nil { + switch err := val.(type) { + case *lookaheadLimitError: + p.errors.internalError(err.Error()) + case *recursionError: + p.errors.internalError(err.Error()) + case *tooManyErrors: + // do nothing + case *recoveryLimitError: + // do nothing, listeners already notified and error reported. + default: + panic(val) + } + } + }() + + return p.Visit(prsr.Start_()).(ast.Expr) +} + +// Visitor implementations. +func (p *parser) Visit(tree antlr.ParseTree) any { + t := unnest(tree) + switch tree := t.(type) { + case *gen.StartContext: + return p.VisitStart(tree) + case *gen.ExprContext: + p.checkAndIncrementRecursionDepth() + out := p.VisitExpr(tree) + p.decrementRecursionDepth() + return out + case *gen.ConditionalAndContext: + return p.VisitConditionalAnd(tree) + case *gen.ConditionalOrContext: + return p.VisitConditionalOr(tree) + case *gen.RelationContext: + p.checkAndIncrementRecursionDepth() + out := p.VisitRelation(tree) + p.decrementRecursionDepth() + return out + case *gen.CalcContext: + p.checkAndIncrementRecursionDepth() + out := p.VisitCalc(tree) + p.decrementRecursionDepth() + return out + case *gen.LogicalNotContext: + return p.VisitLogicalNot(tree) + case *gen.IdentOrGlobalCallContext: + return p.VisitIdentOrGlobalCall(tree) + case *gen.SelectContext: + p.checkAndIncrementRecursionDepth() + out := p.VisitSelect(tree) + p.decrementRecursionDepth() + return out + case *gen.MemberCallContext: + p.checkAndIncrementRecursionDepth() + out := p.VisitMemberCall(tree) + p.decrementRecursionDepth() + return out + case *gen.MapInitializerListContext: + return p.VisitMapInitializerList(tree) + case *gen.NegateContext: + return p.VisitNegate(tree) + case *gen.IndexContext: + p.checkAndIncrementRecursionDepth() + out := p.VisitIndex(tree) + p.decrementRecursionDepth() + return out + case *gen.UnaryContext: + return p.VisitUnary(tree) + case *gen.CreateListContext: + return p.VisitCreateList(tree) + case *gen.CreateMessageContext: + return p.VisitCreateMessage(tree) + case *gen.CreateStructContext: + return p.VisitCreateStruct(tree) + case *gen.IntContext: + return p.VisitInt(tree) + case *gen.UintContext: + return p.VisitUint(tree) + case *gen.DoubleContext: + return p.VisitDouble(tree) + case *gen.StringContext: + return p.VisitString(tree) + case *gen.BytesContext: + return p.VisitBytes(tree) + case *gen.BoolFalseContext: + return p.VisitBoolFalse(tree) + case *gen.BoolTrueContext: + return p.VisitBoolTrue(tree) + case *gen.NullContext: + return p.VisitNull(tree) + } + + // Report at least one error if the parser reaches an unknown parse element. + // Typically, this happens if the parser has already encountered a syntax error elsewhere. + if p.errors.errorCount() == 0 { + txt := "<>" + if t != nil { + txt = fmt.Sprintf("<<%T>>", t) + } + return p.reportError(common.NoLocation, "unknown parse element encountered: %s", txt) + } + return p.helper.newExpr(common.NoLocation) + +} + +// Visit a parse tree produced by CELParser#start. +func (p *parser) VisitStart(ctx *gen.StartContext) any { + return p.Visit(ctx.Expr()) +} + +// Visit a parse tree produced by CELParser#expr. +func (p *parser) VisitExpr(ctx *gen.ExprContext) any { + result := p.Visit(ctx.GetE()).(ast.Expr) + if ctx.GetOp() == nil { + return result + } + opID := p.helper.id(ctx.GetOp()) + ifTrue := p.Visit(ctx.GetE1()).(ast.Expr) + ifFalse := p.Visit(ctx.GetE2()).(ast.Expr) + return p.globalCallOrMacro(opID, operators.Conditional, result, ifTrue, ifFalse) +} + +// Visit a parse tree produced by CELParser#conditionalOr. +func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) any { + result := p.Visit(ctx.GetE()).(ast.Expr) + l := p.newLogicManager(operators.LogicalOr, result) + rest := ctx.GetE1() + for i, op := range ctx.GetOps() { + if i >= len(rest) { + return p.reportError(ctx, "unexpected character, wanted '||'") + } + next := p.Visit(rest[i]).(ast.Expr) + opID := p.helper.id(op) + l.addTerm(opID, next) + } + return l.toExpr() +} + +// Visit a parse tree produced by CELParser#conditionalAnd. +func (p *parser) VisitConditionalAnd(ctx *gen.ConditionalAndContext) any { + result := p.Visit(ctx.GetE()).(ast.Expr) + l := p.newLogicManager(operators.LogicalAnd, result) + rest := ctx.GetE1() + for i, op := range ctx.GetOps() { + if i >= len(rest) { + return p.reportError(ctx, "unexpected character, wanted '&&'") + } + next := p.Visit(rest[i]).(ast.Expr) + opID := p.helper.id(op) + l.addTerm(opID, next) + } + return l.toExpr() +} + +// Visit a parse tree produced by CELParser#relation. +func (p *parser) VisitRelation(ctx *gen.RelationContext) any { + opText := "" + if ctx.GetOp() != nil { + opText = ctx.GetOp().GetText() + } + if op, found := operators.Find(opText); found { + lhs := p.Visit(ctx.Relation(0)).(ast.Expr) + opID := p.helper.id(ctx.GetOp()) + rhs := p.Visit(ctx.Relation(1)).(ast.Expr) + return p.globalCallOrMacro(opID, op, lhs, rhs) + } + return p.reportError(ctx, "operator not found") +} + +// Visit a parse tree produced by CELParser#calc. +func (p *parser) VisitCalc(ctx *gen.CalcContext) any { + opText := "" + if ctx.GetOp() != nil { + opText = ctx.GetOp().GetText() + } + if op, found := operators.Find(opText); found { + lhs := p.Visit(ctx.Calc(0)).(ast.Expr) + opID := p.helper.id(ctx.GetOp()) + rhs := p.Visit(ctx.Calc(1)).(ast.Expr) + return p.globalCallOrMacro(opID, op, lhs, rhs) + } + return p.reportError(ctx, "operator not found") +} + +func (p *parser) VisitUnary(ctx *gen.UnaryContext) any { + return p.helper.newLiteralString(ctx, "<>") +} + +// Visit a parse tree produced by CELParser#LogicalNot. +func (p *parser) VisitLogicalNot(ctx *gen.LogicalNotContext) any { + if len(ctx.GetOps())%2 == 0 { + return p.Visit(ctx.Member()) + } + opID := p.helper.id(ctx.GetOps()[0]) + target := p.Visit(ctx.Member()).(ast.Expr) + return p.globalCallOrMacro(opID, operators.LogicalNot, target) +} + +func (p *parser) VisitNegate(ctx *gen.NegateContext) any { + if len(ctx.GetOps())%2 == 0 { + return p.Visit(ctx.Member()) + } + opID := p.helper.id(ctx.GetOps()[0]) + target := p.Visit(ctx.Member()).(ast.Expr) + return p.globalCallOrMacro(opID, operators.Negate, target) +} + +// VisitSelect visits a parse tree produced by CELParser#Select. +func (p *parser) VisitSelect(ctx *gen.SelectContext) any { + operand := p.Visit(ctx.Member()).(ast.Expr) + // Handle the error case where no valid identifier is specified. + if ctx.GetId() == nil || ctx.GetOp() == nil { + return p.helper.newExpr(ctx) + } + id := ctx.GetId().GetText() + if ctx.GetOpt() != nil { + if !p.enableOptionalSyntax { + return p.reportError(ctx.GetOp(), "unsupported syntax '.?'") + } + return p.helper.newGlobalCall( + ctx.GetOp(), + operators.OptSelect, + operand, + p.helper.newLiteralString(ctx.GetId(), id)) + } + return p.helper.newSelect(ctx.GetOp(), operand, id) +} + +// VisitMemberCall visits a parse tree produced by CELParser#MemberCall. +func (p *parser) VisitMemberCall(ctx *gen.MemberCallContext) any { + operand := p.Visit(ctx.Member()).(ast.Expr) + // Handle the error case where no valid identifier is specified. + if ctx.GetId() == nil { + return p.helper.newExpr(ctx) + } + id := ctx.GetId().GetText() + opID := p.helper.id(ctx.GetOpen()) + return p.receiverCallOrMacro(opID, id, operand, p.visitExprList(ctx.GetArgs())...) +} + +// Visit a parse tree produced by CELParser#Index. +func (p *parser) VisitIndex(ctx *gen.IndexContext) any { + target := p.Visit(ctx.Member()).(ast.Expr) + // Handle the error case where no valid identifier is specified. + if ctx.GetOp() == nil { + return p.helper.newExpr(ctx) + } + opID := p.helper.id(ctx.GetOp()) + index := p.Visit(ctx.GetIndex()).(ast.Expr) + operator := operators.Index + if ctx.GetOpt() != nil { + if !p.enableOptionalSyntax { + return p.reportError(ctx.GetOp(), "unsupported syntax '[?'") + } + operator = operators.OptIndex + } + return p.globalCallOrMacro(opID, operator, target, index) +} + +// Visit a parse tree produced by CELParser#CreateMessage. +func (p *parser) VisitCreateMessage(ctx *gen.CreateMessageContext) any { + messageName := "" + for _, id := range ctx.GetIds() { + if len(messageName) != 0 { + messageName += "." + } + messageName += id.GetText() + } + if ctx.GetLeadingDot() != nil { + messageName = "." + messageName + } + objID := p.helper.id(ctx.GetOp()) + entries := p.VisitIFieldInitializerList(ctx.GetEntries()).([]ast.EntryExpr) + return p.helper.newObject(objID, messageName, entries...) +} + +// Visit a parse tree of field initializers. +func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext) any { + if ctx == nil || ctx.GetFields() == nil { + // This is the result of a syntax error handled elswhere, return empty. + return []ast.EntryExpr{} + } + + result := make([]ast.EntryExpr, len(ctx.GetFields())) + cols := ctx.GetCols() + vals := ctx.GetValues() + for i, f := range ctx.GetFields() { + if i >= len(cols) || i >= len(vals) { + // This is the result of a syntax error detected elsewhere. + return []ast.EntryExpr{} + } + initID := p.helper.id(cols[i]) + optField := f.(*gen.OptFieldContext) + optional := optField.GetOpt() != nil + if !p.enableOptionalSyntax && optional { + p.reportError(optField, "unsupported syntax '?'") + continue + } + // The field may be empty due to a prior error. + id := optField.IDENTIFIER() + if id == nil { + return []ast.EntryExpr{} + } + fieldName := id.GetText() + value := p.Visit(vals[i]).(ast.Expr) + field := p.helper.newObjectField(initID, fieldName, value, optional) + result[i] = field + } + return result +} + +// Visit a parse tree produced by CELParser#IdentOrGlobalCall. +func (p *parser) VisitIdentOrGlobalCall(ctx *gen.IdentOrGlobalCallContext) any { + identName := "" + if ctx.GetLeadingDot() != nil { + identName = "." + } + // Handle the error case where no valid identifier is specified. + if ctx.GetId() == nil { + return p.helper.newExpr(ctx) + } + // Handle reserved identifiers. + id := ctx.GetId().GetText() + if _, ok := reservedIds[id]; ok { + return p.reportError(ctx, "reserved identifier: %s", id) + } + identName += id + if ctx.GetOp() != nil { + opID := p.helper.id(ctx.GetOp()) + return p.globalCallOrMacro(opID, identName, p.visitExprList(ctx.GetArgs())...) + } + return p.helper.newIdent(ctx.GetId(), identName) +} + +// Visit a parse tree produced by CELParser#CreateList. +func (p *parser) VisitCreateList(ctx *gen.CreateListContext) any { + listID := p.helper.id(ctx.GetOp()) + elems, optionals := p.visitListInit(ctx.GetElems()) + return p.helper.newList(listID, elems, optionals...) +} + +// Visit a parse tree produced by CELParser#CreateStruct. +func (p *parser) VisitCreateStruct(ctx *gen.CreateStructContext) any { + structID := p.helper.id(ctx.GetOp()) + entries := []ast.EntryExpr{} + if ctx.GetEntries() != nil { + entries = p.Visit(ctx.GetEntries()).([]ast.EntryExpr) + } + return p.helper.newMap(structID, entries...) +} + +// Visit a parse tree produced by CELParser#mapInitializerList. +func (p *parser) VisitMapInitializerList(ctx *gen.MapInitializerListContext) any { + if ctx == nil || ctx.GetKeys() == nil { + // This is the result of a syntax error handled elswhere, return empty. + return []ast.EntryExpr{} + } + + result := make([]ast.EntryExpr, len(ctx.GetCols())) + keys := ctx.GetKeys() + vals := ctx.GetValues() + for i, col := range ctx.GetCols() { + colID := p.helper.id(col) + if i >= len(keys) || i >= len(vals) { + // This is the result of a syntax error detected elsewhere. + return []ast.EntryExpr{} + } + optKey := keys[i] + optional := optKey.GetOpt() != nil + if !p.enableOptionalSyntax && optional { + p.reportError(optKey, "unsupported syntax '?'") + continue + } + key := p.Visit(optKey.GetE()).(ast.Expr) + value := p.Visit(vals[i]).(ast.Expr) + entry := p.helper.newMapEntry(colID, key, value, optional) + result[i] = entry + } + return result +} + +// Visit a parse tree produced by CELParser#Int. +func (p *parser) VisitInt(ctx *gen.IntContext) any { + text := ctx.GetTok().GetText() + base := 10 + if strings.HasPrefix(text, "0x") { + base = 16 + text = text[2:] + } + if ctx.GetSign() != nil { + text = ctx.GetSign().GetText() + text + } + i, err := strconv.ParseInt(text, base, 64) + if err != nil { + return p.reportError(ctx, "invalid int literal") + } + return p.helper.newLiteralInt(ctx, i) +} + +// Visit a parse tree produced by CELParser#Uint. +func (p *parser) VisitUint(ctx *gen.UintContext) any { + text := ctx.GetTok().GetText() + // trim the 'u' designator included in the uint literal. + text = text[:len(text)-1] + base := 10 + if strings.HasPrefix(text, "0x") { + base = 16 + text = text[2:] + } + i, err := strconv.ParseUint(text, base, 64) + if err != nil { + return p.reportError(ctx, "invalid uint literal") + } + return p.helper.newLiteralUint(ctx, i) +} + +// Visit a parse tree produced by CELParser#Double. +func (p *parser) VisitDouble(ctx *gen.DoubleContext) any { + txt := ctx.GetTok().GetText() + if ctx.GetSign() != nil { + txt = ctx.GetSign().GetText() + txt + } + f, err := strconv.ParseFloat(txt, 64) + if err != nil { + return p.reportError(ctx, "invalid double literal") + } + return p.helper.newLiteralDouble(ctx, f) + +} + +// Visit a parse tree produced by CELParser#String. +func (p *parser) VisitString(ctx *gen.StringContext) any { + s := p.unquote(ctx, ctx.GetText(), false) + return p.helper.newLiteralString(ctx, s) +} + +// Visit a parse tree produced by CELParser#Bytes. +func (p *parser) VisitBytes(ctx *gen.BytesContext) any { + b := []byte(p.unquote(ctx, ctx.GetTok().GetText()[1:], true)) + return p.helper.newLiteralBytes(ctx, b) +} + +// Visit a parse tree produced by CELParser#BoolTrue. +func (p *parser) VisitBoolTrue(ctx *gen.BoolTrueContext) any { + return p.helper.newLiteralBool(ctx, true) +} + +// Visit a parse tree produced by CELParser#BoolFalse. +func (p *parser) VisitBoolFalse(ctx *gen.BoolFalseContext) any { + return p.helper.newLiteralBool(ctx, false) +} + +// Visit a parse tree produced by CELParser#Null. +func (p *parser) VisitNull(ctx *gen.NullContext) any { + return p.helper.exprFactory.NewLiteral(p.helper.newID(ctx), types.NullValue) +} + +func (p *parser) visitExprList(ctx gen.IExprListContext) []ast.Expr { + if ctx == nil { + return []ast.Expr{} + } + return p.visitSlice(ctx.GetE()) +} + +func (p *parser) visitListInit(ctx gen.IListInitContext) ([]ast.Expr, []int32) { + if ctx == nil { + return []ast.Expr{}, []int32{} + } + elements := ctx.GetElems() + result := make([]ast.Expr, len(elements)) + optionals := []int32{} + for i, e := range elements { + ex := p.Visit(e.GetE()).(ast.Expr) + if ex == nil { + return []ast.Expr{}, []int32{} + } + result[i] = ex + if e.GetOpt() != nil { + if !p.enableOptionalSyntax { + p.reportError(e.GetOpt(), "unsupported syntax '?'") + continue + } + optionals = append(optionals, int32(i)) + } + } + return result, optionals +} + +func (p *parser) visitSlice(expressions []gen.IExprContext) []ast.Expr { + if expressions == nil { + return []ast.Expr{} + } + result := make([]ast.Expr, len(expressions)) + for i, e := range expressions { + ex := p.Visit(e).(ast.Expr) + result[i] = ex + } + return result +} + +func (p *parser) unquote(ctx any, value string, isBytes bool) string { + text, err := unescape(value, isBytes) + if err != nil { + p.reportError(ctx, "%s", err.Error()) + return value + } + return text +} + +func (p *parser) newLogicManager(function string, term ast.Expr) *logicManager { + if p.enableVariadicOperatorASTs { + return newVariadicLogicManager(p.exprFactory, function, term) + } + return newBalancingLogicManager(p.exprFactory, function, term) +} + +func (p *parser) reportError(ctx any, format string, args ...any) ast.Expr { + var location common.Location + err := p.helper.newExpr(ctx) + switch c := ctx.(type) { + case common.Location: + location = c + case antlr.Token, antlr.ParserRuleContext: + location = p.helper.getLocation(err.ID()) + } + // Provide arguments to the report error. + p.errors.reportErrorAtID(err.ID(), location, format, args...) + return err +} + +// ANTLR Parse listener implementations +func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol any, line, column int, msg string, e antlr.RecognitionException) { + offset := p.helper.sourceInfo.ComputeOffset(int32(line), int32(column)) + l := p.helper.getLocationByOffset(offset) + // Hack to keep existing error messages consistent with previous versions of CEL when a reserved word + // is used as an identifier. This behavior needs to be overhauled to provide consistent, normalized error + // messages out of ANTLR to prevent future breaking changes related to error message content. + if strings.Contains(msg, "no viable alternative") { + msg = reservedIdentifier.ReplaceAllString(msg, mismatchedReservedIdentifier) + } + // Ensure that no more than 100 syntax errors are reported as this will halt attempts to recover from a + // seriously broken expression. + if p.errorReports < p.errorReportingLimit { + p.errorReports++ + p.errors.syntaxError(l, msg) + } else { + tme := &tooManyErrors{errorReportingLimit: p.errorReportingLimit} + p.errors.syntaxError(l, tme.Error()) + panic(tme) + } +} + +func (p *parser) ReportAmbiguity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, exact bool, ambigAlts *antlr.BitSet, configs *antlr.ATNConfigSet) { + // Intentional +} + +func (p *parser) ReportAttemptingFullContext(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, conflictingAlts *antlr.BitSet, configs *antlr.ATNConfigSet) { + // Intentional +} + +func (p *parser) ReportContextSensitivity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex, prediction int, configs *antlr.ATNConfigSet) { + // Intentional +} + +func (p *parser) globalCallOrMacro(exprID int64, function string, args ...ast.Expr) ast.Expr { + if expr, found := p.expandMacro(exprID, function, nil, args...); found { + return expr + } + return p.helper.newGlobalCall(exprID, function, args...) +} + +func (p *parser) receiverCallOrMacro(exprID int64, function string, target ast.Expr, args ...ast.Expr) ast.Expr { + if expr, found := p.expandMacro(exprID, function, target, args...); found { + return expr + } + return p.helper.newReceiverCall(exprID, function, target, args...) +} + +func (p *parser) expandMacro(exprID int64, function string, target ast.Expr, args ...ast.Expr) (ast.Expr, bool) { + macro, found := p.macros[makeMacroKey(function, len(args), target != nil)] + if !found { + macro, found = p.macros[makeVarArgMacroKey(function, target != nil)] + if !found { + return nil, false + } + } + eh := exprHelperPool.Get().(*exprHelper) + defer exprHelperPool.Put(eh) + eh.parserHelper = p.helper + eh.id = exprID + expr, err := macro.Expander()(eh, target, args) + // An error indicates that the macro was matched, but the arguments were not well-formed. + if err != nil { + loc := err.Location + if loc == nil { + loc = p.helper.getLocation(exprID) + } + p.helper.deleteID(exprID) + return p.reportError(loc, err.Message), true + } + // A nil value from the macro indicates that the macro implementation decided that + // an expansion should not be performed. + if expr == nil { + return nil, false + } + if p.populateMacroCalls { + p.helper.addMacroCall(expr.ID(), function, target, args...) + } + p.helper.deleteID(exprID) + return expr, true +} + +func (p *parser) checkAndIncrementRecursionDepth() { + p.recursionDepth++ + if p.recursionDepth > p.maxRecursionDepth { + panic(&recursionError{message: "max recursion depth exceeded"}) + } +} + +func (p *parser) decrementRecursionDepth() { + p.recursionDepth-- +} + +// unnest traverses down the left-hand side of the parse graph until it encounters the first compound +// parse node or the first leaf in the parse graph. +func unnest(tree antlr.ParseTree) antlr.ParseTree { + for tree != nil { + switch t := tree.(type) { + case *gen.ExprContext: + // conditionalOr op='?' conditionalOr : expr + if t.GetOp() != nil { + return t + } + // conditionalOr + tree = t.GetE() + case *gen.ConditionalOrContext: + // conditionalAnd (ops=|| conditionalAnd)* + if t.GetOps() != nil && len(t.GetOps()) > 0 { + return t + } + // conditionalAnd + tree = t.GetE() + case *gen.ConditionalAndContext: + // relation (ops=&& relation)* + if t.GetOps() != nil && len(t.GetOps()) > 0 { + return t + } + // relation + tree = t.GetE() + case *gen.RelationContext: + // relation op relation + if t.GetOp() != nil { + return t + } + // calc + tree = t.Calc() + case *gen.CalcContext: + // calc op calc + if t.GetOp() != nil { + return t + } + // unary + tree = t.Unary() + case *gen.MemberExprContext: + // member expands to one of: primary, select, index, or create message + tree = t.Member() + case *gen.PrimaryExprContext: + // primary expands to one of identifier, nested, create list, create struct, literal + tree = t.Primary() + case *gen.NestedContext: + // contains a nested 'expr' + tree = t.GetE() + case *gen.ConstantLiteralContext: + // expands to a primitive literal + tree = t.Literal() + default: + return t + } + } + return tree +} + +var ( + reservedIdentifier = regexp.MustCompile("no viable alternative at input '.(true|false|null)'") + mismatchedReservedIdentifier = "mismatched input '$1' expecting IDENTIFIER" +) diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/unescape.go b/hack/tools/vendor/github.com/google/cel-go/parser/unescape.go new file mode 100644 index 0000000000..27c57a9f3a --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/unescape.go @@ -0,0 +1,237 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +// Unescape takes a quoted string, unquotes, and unescapes it. +// +// This function performs escaping compatible with GoogleSQL. +func unescape(value string, isBytes bool) (string, error) { + // All strings normalize newlines to the \n representation. + value = newlineNormalizer.Replace(value) + n := len(value) + + // Nothing to unescape / decode. + if n < 2 { + return value, fmt.Errorf("unable to unescape string") + } + + // Raw string preceded by the 'r|R' prefix. + isRawLiteral := false + if value[0] == 'r' || value[0] == 'R' { + value = value[1:] + n = len(value) + isRawLiteral = true + } + + // Quoted string of some form, must have same first and last char. + if value[0] != value[n-1] || (value[0] != '"' && value[0] != '\'') { + return value, fmt.Errorf("unable to unescape string") + } + + // Normalize the multi-line CEL string representation to a standard + // Go quoted string. + if n >= 6 { + if strings.HasPrefix(value, "'''") { + if !strings.HasSuffix(value, "'''") { + return value, fmt.Errorf("unable to unescape string") + } + value = "\"" + value[3:n-3] + "\"" + } else if strings.HasPrefix(value, `"""`) { + if !strings.HasSuffix(value, `"""`) { + return value, fmt.Errorf("unable to unescape string") + } + value = "\"" + value[3:n-3] + "\"" + } + n = len(value) + } + value = value[1 : n-1] + // If there is nothing to escape, then return. + if isRawLiteral || !strings.ContainsRune(value, '\\') { + return value, nil + } + + // Otherwise the string contains escape characters. + // The following logic is adapted from `strconv/quote.go` + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*n/2) + for len(value) > 0 { + c, encode, rest, err := unescapeChar(value, isBytes) + if err != nil { + return "", err + } + value = rest + if c < utf8.RuneSelf || !encode { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + } + return string(buf), nil +} + +// unescapeChar takes a string input and returns the following info: +// +// value - the escaped unicode rune at the front of the string. +// encode - the value should be unicode-encoded +// tail - the remainder of the input string. +// err - error value, if the character could not be unescaped. +// +// When encode is true the return value may still fit within a single byte, +// but unicode encoding is attempted which is more expensive than when the +// value is known to self-represent as a single byte. +// +// If isBytes is set, unescape as a bytes literal so octal and hex escapes +// represent byte values, not unicode code points. +func unescapeChar(s string, isBytes bool) (value rune, encode bool, tail string, err error) { + // 1. Character is not an escape sequence. + switch c := s[0]; { + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // 2. Last character is the start of an escape sequence. + if len(s) <= 1 { + err = fmt.Errorf("unable to unescape string, found '\\' as last character") + return + } + + c := s[1] + s = s[2:] + // 3. Common escape sequences shared with Google SQL + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case '\\': + value = '\\' + case '\'': + value = '\'' + case '"': + value = '"' + case '`': + value = '`' + case '?': + value = '?' + + // 4. Unicode escape sequences, reproduced from `strconv/quote.go` + case 'x', 'X', 'u', 'U': + n := 0 + encode = true + switch c { + case 'x', 'X': + n = 2 + encode = !isBytes + case 'u': + n = 4 + if isBytes { + err = fmt.Errorf("unable to unescape string") + return + } + case 'U': + n = 8 + if isBytes { + err = fmt.Errorf("unable to unescape string") + return + } + } + var v rune + if len(s) < n { + err = fmt.Errorf("unable to unescape string") + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = fmt.Errorf("unable to unescape string") + return + } + v = v<<4 | x + } + s = s[n:] + if !isBytes && v > utf8.MaxRune { + err = fmt.Errorf("unable to unescape string") + return + } + value = v + + // 5. Octal escape sequences, must be three digits \[0-3][0-7][0-7] + case '0', '1', '2', '3': + if len(s) < 2 { + err = fmt.Errorf("unable to unescape octal sequence in string") + return + } + v := rune(c - '0') + for j := 0; j < 2; j++ { + x := s[j] + if x < '0' || x > '7' { + err = fmt.Errorf("unable to unescape octal sequence in string") + return + } + v = v*8 + rune(x-'0') + } + if !isBytes && v > utf8.MaxRune { + err = fmt.Errorf("unable to unescape string") + return + } + value = v + s = s[2:] + encode = !isBytes + + // Unknown escape sequence. + default: + err = fmt.Errorf("unable to unescape string") + } + + tail = s + return +} + +func unhex(b byte) (rune, bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return 0, false +} + +var ( + newlineNormalizer = strings.NewReplacer("\r\n", "\n", "\r", "\n") +) diff --git a/hack/tools/vendor/github.com/google/cel-go/parser/unparser.go b/hack/tools/vendor/github.com/google/cel-go/parser/unparser.go new file mode 100644 index 0000000000..91cf729447 --- /dev/null +++ b/hack/tools/vendor/github.com/google/cel-go/parser/unparser.go @@ -0,0 +1,629 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/types" +) + +// Unparse takes an input expression and source position information and generates a human-readable +// expression. +// +// Note, unparsing an AST will often generate the same expression as was originally parsed, but some +// formatting may be lost in translation, notably: +// +// - All quoted literals are doubled quoted. +// - Byte literals are represented as octal escapes (same as Google SQL). +// - Floating point values are converted to the small number of digits needed to represent the value. +// - Spacing around punctuation marks may be lost. +// - Parentheses will only be applied when they affect operator precedence. +// +// This function optionally takes in one or more UnparserOption to alter the unparsing behavior, such as +// performing word wrapping on expressions. +func Unparse(expr ast.Expr, info *ast.SourceInfo, opts ...UnparserOption) (string, error) { + unparserOpts := &unparserOption{ + wrapOnColumn: defaultWrapOnColumn, + wrapAfterColumnLimit: defaultWrapAfterColumnLimit, + operatorsToWrapOn: defaultOperatorsToWrapOn, + } + + var err error + for _, opt := range opts { + unparserOpts, err = opt(unparserOpts) + if err != nil { + return "", err + } + } + + un := &unparser{ + info: info, + options: unparserOpts, + } + err = un.visit(expr) + if err != nil { + return "", err + } + return un.str.String(), nil +} + +// unparser visits an expression to reconstruct a human-readable string from an AST. +type unparser struct { + str strings.Builder + info *ast.SourceInfo + options *unparserOption + lastWrappedIndex int +} + +func (un *unparser) visit(expr ast.Expr) error { + if expr == nil { + return errors.New("unsupported expression") + } + visited, err := un.visitMaybeMacroCall(expr) + if visited || err != nil { + return err + } + switch expr.Kind() { + case ast.CallKind: + return un.visitCall(expr) + case ast.LiteralKind: + return un.visitConst(expr) + case ast.IdentKind: + return un.visitIdent(expr) + case ast.ListKind: + return un.visitList(expr) + case ast.MapKind: + return un.visitStructMap(expr) + case ast.SelectKind: + return un.visitSelect(expr) + case ast.StructKind: + return un.visitStructMsg(expr) + default: + return fmt.Errorf("unsupported expression: %v", expr) + } +} + +func (un *unparser) visitCall(expr ast.Expr) error { + c := expr.AsCall() + fun := c.FunctionName() + switch fun { + // ternary operator + case operators.Conditional: + return un.visitCallConditional(expr) + // optional select operator + case operators.OptSelect: + return un.visitOptSelect(expr) + // index operator + case operators.Index: + return un.visitCallIndex(expr) + // optional index operator + case operators.OptIndex: + return un.visitCallOptIndex(expr) + // unary operators + case operators.LogicalNot, operators.Negate: + return un.visitCallUnary(expr) + // binary operators + case operators.Add, + operators.Divide, + operators.Equals, + operators.Greater, + operators.GreaterEquals, + operators.In, + operators.Less, + operators.LessEquals, + operators.LogicalAnd, + operators.LogicalOr, + operators.Modulo, + operators.Multiply, + operators.NotEquals, + operators.OldIn, + operators.Subtract: + return un.visitCallBinary(expr) + // standard function calls. + default: + return un.visitCallFunc(expr) + } +} + +func (un *unparser) visitCallBinary(expr ast.Expr) error { + c := expr.AsCall() + fun := c.FunctionName() + args := c.Args() + lhs := args[0] + // add parens if the current operator is lower precedence than the lhs expr operator. + lhsParen := isComplexOperatorWithRespectTo(fun, lhs) + rhs := args[1] + // add parens if the current operator is lower precedence than the rhs expr operator, + // or the same precedence and the operator is left recursive. + rhsParen := isComplexOperatorWithRespectTo(fun, rhs) + if !rhsParen && isLeftRecursive(fun) { + rhsParen = isSamePrecedence(fun, rhs) + } + err := un.visitMaybeNested(lhs, lhsParen) + if err != nil { + return err + } + unmangled, found := operators.FindReverseBinaryOperator(fun) + if !found { + return fmt.Errorf("cannot unmangle operator: %s", fun) + } + + un.writeOperatorWithWrapping(fun, unmangled) + return un.visitMaybeNested(rhs, rhsParen) +} + +func (un *unparser) visitCallConditional(expr ast.Expr) error { + c := expr.AsCall() + args := c.Args() + // add parens if operand is a conditional itself. + nested := isSamePrecedence(operators.Conditional, args[0]) || + isComplexOperator(args[0]) + err := un.visitMaybeNested(args[0], nested) + if err != nil { + return err + } + un.writeOperatorWithWrapping(operators.Conditional, "?") + + // add parens if operand is a conditional itself. + nested = isSamePrecedence(operators.Conditional, args[1]) || + isComplexOperator(args[1]) + err = un.visitMaybeNested(args[1], nested) + if err != nil { + return err + } + + un.str.WriteString(" : ") + // add parens if operand is a conditional itself. + nested = isSamePrecedence(operators.Conditional, args[2]) || + isComplexOperator(args[2]) + + return un.visitMaybeNested(args[2], nested) +} + +func (un *unparser) visitCallFunc(expr ast.Expr) error { + c := expr.AsCall() + fun := c.FunctionName() + args := c.Args() + if c.IsMemberFunction() { + nested := isBinaryOrTernaryOperator(c.Target()) + err := un.visitMaybeNested(c.Target(), nested) + if err != nil { + return err + } + un.str.WriteString(".") + } + un.str.WriteString(fun) + un.str.WriteString("(") + for i, arg := range args { + err := un.visit(arg) + if err != nil { + return err + } + if i < len(args)-1 { + un.str.WriteString(", ") + } + } + un.str.WriteString(")") + return nil +} + +func (un *unparser) visitCallIndex(expr ast.Expr) error { + return un.visitCallIndexInternal(expr, "[") +} + +func (un *unparser) visitCallOptIndex(expr ast.Expr) error { + return un.visitCallIndexInternal(expr, "[?") +} + +func (un *unparser) visitCallIndexInternal(expr ast.Expr, op string) error { + c := expr.AsCall() + args := c.Args() + nested := isBinaryOrTernaryOperator(args[0]) + err := un.visitMaybeNested(args[0], nested) + if err != nil { + return err + } + un.str.WriteString(op) + err = un.visit(args[1]) + if err != nil { + return err + } + un.str.WriteString("]") + return nil +} + +func (un *unparser) visitCallUnary(expr ast.Expr) error { + c := expr.AsCall() + fun := c.FunctionName() + args := c.Args() + unmangled, found := operators.FindReverse(fun) + if !found { + return fmt.Errorf("cannot unmangle operator: %s", fun) + } + un.str.WriteString(unmangled) + nested := isComplexOperator(args[0]) + return un.visitMaybeNested(args[0], nested) +} + +func (un *unparser) visitConst(expr ast.Expr) error { + val := expr.AsLiteral() + switch val := val.(type) { + case types.Bool: + un.str.WriteString(strconv.FormatBool(bool(val))) + case types.Bytes: + // bytes constants are surrounded with b"" + un.str.WriteString(`b"`) + un.str.WriteString(bytesToOctets([]byte(val))) + un.str.WriteString(`"`) + case types.Double: + // represent the float using the minimum required digits + d := strconv.FormatFloat(float64(val), 'g', -1, 64) + un.str.WriteString(d) + if !strings.Contains(d, ".") { + un.str.WriteString(".0") + } + case types.Int: + i := strconv.FormatInt(int64(val), 10) + un.str.WriteString(i) + case types.Null: + un.str.WriteString("null") + case types.String: + // strings will be double quoted with quotes escaped. + un.str.WriteString(strconv.Quote(string(val))) + case types.Uint: + // uint literals have a 'u' suffix. + ui := strconv.FormatUint(uint64(val), 10) + un.str.WriteString(ui) + un.str.WriteString("u") + default: + return fmt.Errorf("unsupported constant: %v", expr) + } + return nil +} + +func (un *unparser) visitIdent(expr ast.Expr) error { + un.str.WriteString(expr.AsIdent()) + return nil +} + +func (un *unparser) visitList(expr ast.Expr) error { + l := expr.AsList() + elems := l.Elements() + optIndices := make(map[int]bool, len(elems)) + for _, idx := range l.OptionalIndices() { + optIndices[int(idx)] = true + } + un.str.WriteString("[") + for i, elem := range elems { + if optIndices[i] { + un.str.WriteString("?") + } + err := un.visit(elem) + if err != nil { + return err + } + if i < len(elems)-1 { + un.str.WriteString(", ") + } + } + un.str.WriteString("]") + return nil +} + +func (un *unparser) visitOptSelect(expr ast.Expr) error { + c := expr.AsCall() + args := c.Args() + operand := args[0] + field := args[1].AsLiteral().(types.String) + return un.visitSelectInternal(operand, false, ".?", string(field)) +} + +func (un *unparser) visitSelect(expr ast.Expr) error { + sel := expr.AsSelect() + return un.visitSelectInternal(sel.Operand(), sel.IsTestOnly(), ".", sel.FieldName()) +} + +func (un *unparser) visitSelectInternal(operand ast.Expr, testOnly bool, op string, field string) error { + // handle the case when the select expression was generated by the has() macro. + if testOnly { + un.str.WriteString("has(") + } + nested := !testOnly && isBinaryOrTernaryOperator(operand) + err := un.visitMaybeNested(operand, nested) + if err != nil { + return err + } + un.str.WriteString(op) + un.str.WriteString(field) + if testOnly { + un.str.WriteString(")") + } + return nil +} + +func (un *unparser) visitStructMsg(expr ast.Expr) error { + m := expr.AsStruct() + fields := m.Fields() + un.str.WriteString(m.TypeName()) + un.str.WriteString("{") + for i, f := range fields { + field := f.AsStructField() + f := field.Name() + if field.IsOptional() { + un.str.WriteString("?") + } + un.str.WriteString(f) + un.str.WriteString(": ") + v := field.Value() + err := un.visit(v) + if err != nil { + return err + } + if i < len(fields)-1 { + un.str.WriteString(", ") + } + } + un.str.WriteString("}") + return nil +} + +func (un *unparser) visitStructMap(expr ast.Expr) error { + m := expr.AsMap() + entries := m.Entries() + un.str.WriteString("{") + for i, e := range entries { + entry := e.AsMapEntry() + k := entry.Key() + if entry.IsOptional() { + un.str.WriteString("?") + } + err := un.visit(k) + if err != nil { + return err + } + un.str.WriteString(": ") + v := entry.Value() + err = un.visit(v) + if err != nil { + return err + } + if i < len(entries)-1 { + un.str.WriteString(", ") + } + } + un.str.WriteString("}") + return nil +} + +func (un *unparser) visitMaybeMacroCall(expr ast.Expr) (bool, error) { + call, found := un.info.GetMacroCall(expr.ID()) + if !found { + return false, nil + } + return true, un.visit(call) +} + +func (un *unparser) visitMaybeNested(expr ast.Expr, nested bool) error { + if nested { + un.str.WriteString("(") + } + err := un.visit(expr) + if err != nil { + return err + } + if nested { + un.str.WriteString(")") + } + return nil +} + +// isLeftRecursive indicates whether the parser resolves the call in a left-recursive manner as +// this can have an effect of how parentheses affect the order of operations in the AST. +func isLeftRecursive(op string) bool { + return op != operators.LogicalAnd && op != operators.LogicalOr +} + +// isSamePrecedence indicates whether the precedence of the input operator is the same as the +// precedence of the (possible) operation represented in the input Expr. +// +// If the expr is not a Call, the result is false. +func isSamePrecedence(op string, expr ast.Expr) bool { + if expr.Kind() != ast.CallKind { + return false + } + c := expr.AsCall() + other := c.FunctionName() + return operators.Precedence(op) == operators.Precedence(other) +} + +// isLowerPrecedence indicates whether the precedence of the input operator is lower precedence +// than the (possible) operation represented in the input Expr. +// +// If the expr is not a Call, the result is false. +func isLowerPrecedence(op string, expr ast.Expr) bool { + c := expr.AsCall() + other := c.FunctionName() + return operators.Precedence(op) < operators.Precedence(other) +} + +// Indicates whether the expr is a complex operator, i.e., a call expression +// with 2 or more arguments. +func isComplexOperator(expr ast.Expr) bool { + if expr.Kind() == ast.CallKind && len(expr.AsCall().Args()) >= 2 { + return true + } + return false +} + +// Indicates whether it is a complex operation compared to another. +// expr is *not* considered complex if it is not a call expression or has +// less than two arguments, or if it has a higher precedence than op. +func isComplexOperatorWithRespectTo(op string, expr ast.Expr) bool { + if expr.Kind() != ast.CallKind || len(expr.AsCall().Args()) < 2 { + return false + } + return isLowerPrecedence(op, expr) +} + +// Indicate whether this is a binary or ternary operator. +func isBinaryOrTernaryOperator(expr ast.Expr) bool { + if expr.Kind() != ast.CallKind || len(expr.AsCall().Args()) < 2 { + return false + } + _, isBinaryOp := operators.FindReverseBinaryOperator(expr.AsCall().FunctionName()) + return isBinaryOp || isSamePrecedence(operators.Conditional, expr) +} + +// bytesToOctets converts byte sequences to a string using a three digit octal encoded value +// per byte. +func bytesToOctets(byteVal []byte) string { + var b strings.Builder + for _, c := range byteVal { + fmt.Fprintf(&b, "\\%03o", c) + } + return b.String() +} + +// writeOperatorWithWrapping outputs the operator and inserts a newline for operators configured +// in the unparser options. +func (un *unparser) writeOperatorWithWrapping(fun string, unmangled string) bool { + _, wrapOperatorExists := un.options.operatorsToWrapOn[fun] + lineLength := un.str.Len() - un.lastWrappedIndex + len(fun) + + if wrapOperatorExists && lineLength >= un.options.wrapOnColumn { + un.lastWrappedIndex = un.str.Len() + // wrapAfterColumnLimit flag dictates whether the newline is placed + // before or after the operator + if un.options.wrapAfterColumnLimit { + // Input: a && b + // Output: a &&\nb + un.str.WriteString(" ") + un.str.WriteString(unmangled) + un.str.WriteString("\n") + } else { + // Input: a && b + // Output: a\n&& b + un.str.WriteString("\n") + un.str.WriteString(unmangled) + un.str.WriteString(" ") + } + return true + } + un.str.WriteString(" ") + un.str.WriteString(unmangled) + un.str.WriteString(" ") + return false +} + +// Defined defaults for the unparser options +var ( + defaultWrapOnColumn = 80 + defaultWrapAfterColumnLimit = true + defaultOperatorsToWrapOn = map[string]bool{ + operators.LogicalAnd: true, + operators.LogicalOr: true, + } +) + +// UnparserOption is a functional option for configuring the output formatting +// of the Unparse function. +type UnparserOption func(*unparserOption) (*unparserOption, error) + +// Internal representation of the UnparserOption type +type unparserOption struct { + wrapOnColumn int + operatorsToWrapOn map[string]bool + wrapAfterColumnLimit bool +} + +// WrapOnColumn wraps the output expression when its string length exceeds a specified limit +// for operators set by WrapOnOperators function or by default, "&&" and "||" will be wrapped. +// +// Example usage: +// +// Unparse(expr, sourceInfo, WrapOnColumn(40), WrapOnOperators(Operators.LogicalAnd)) +// +// This will insert a newline immediately after the logical AND operator for the below example input: +// +// Input: +// 'my-principal-group' in request.auth.claims && request.auth.claims.iat > now - duration('5m') +// +// Output: +// 'my-principal-group' in request.auth.claims && +// request.auth.claims.iat > now - duration('5m') +func WrapOnColumn(col int) UnparserOption { + return func(opt *unparserOption) (*unparserOption, error) { + if col < 1 { + return nil, fmt.Errorf("Invalid unparser option. Wrap column value must be greater than or equal to 1. Got %v instead", col) + } + opt.wrapOnColumn = col + return opt, nil + } +} + +// WrapOnOperators specifies which operators to perform word wrapping on an output expression when its string length +// exceeds the column limit set by WrapOnColumn function. +// +// Word wrapping is supported on non-unary symbolic operators. Refer to operators.go for the full list +// +// This will replace any previously supplied operators instead of merging them. +func WrapOnOperators(symbols ...string) UnparserOption { + return func(opt *unparserOption) (*unparserOption, error) { + opt.operatorsToWrapOn = make(map[string]bool) + for _, symbol := range symbols { + _, found := operators.FindReverse(symbol) + if !found { + return nil, fmt.Errorf("Invalid unparser option. Unsupported operator: %s", symbol) + } + arity := operators.Arity(symbol) + if arity < 2 { + return nil, fmt.Errorf("Invalid unparser option. Unary operators are unsupported: %s", symbol) + } + + opt.operatorsToWrapOn[symbol] = true + } + + return opt, nil + } +} + +// WrapAfterColumnLimit dictates whether to insert a newline before or after the specified operator +// when word wrapping is performed. +// +// Example usage: +// +// Unparse(expr, sourceInfo, WrapOnColumn(40), WrapOnOperators(Operators.LogicalAnd), WrapAfterColumnLimit(false)) +// +// This will insert a newline immediately before the logical AND operator for the below example input, ensuring +// that the length of a line never exceeds the specified column limit: +// +// Input: +// 'my-principal-group' in request.auth.claims && request.auth.claims.iat > now - duration('5m') +// +// Output: +// 'my-principal-group' in request.auth.claims +// && request.auth.claims.iat > now - duration('5m') +func WrapAfterColumnLimit(wrapAfter bool) UnparserOption { + return func(opt *unparserOption) (*unparserOption, error) { + opt.wrapAfterColumnLimit = wrapAfter + return opt, nil + } +} diff --git a/hack/tools/vendor/github.com/google/uuid/CHANGELOG.md b/hack/tools/vendor/github.com/google/uuid/CHANGELOG.md new file mode 100644 index 0000000000..7ec5ac7ea9 --- /dev/null +++ b/hack/tools/vendor/github.com/google/uuid/CHANGELOG.md @@ -0,0 +1,41 @@ +# Changelog + +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + +## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) + + +### Features + +* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29)) + +## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) + + +### Features + +* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4)) + +### Fixes + +* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior) + +## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) + + +### Bug Fixes + +* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) + +## Changelog diff --git a/hack/tools/vendor/github.com/google/uuid/CONTRIBUTING.md b/hack/tools/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 0000000000..a502fdc515 --- /dev/null +++ b/hack/tools/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Tips + +Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). + +Always try to include a test case! If it is not possible or not necessary, +please explain why in the pull request description. + +### Releasing + +Commits that would precipitate a SemVer change, as described in the Conventional +Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) +to create a release candidate pull request. Once submitted, `release-please` +will create a release. + +For tips on how to work with `release-please`, see its documentation. + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/hack/tools/vendor/github.com/google/uuid/CONTRIBUTORS b/hack/tools/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 0000000000..b4bb97f6bc --- /dev/null +++ b/hack/tools/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/hack/tools/vendor/golang.org/x/xerrors/LICENSE b/hack/tools/vendor/github.com/google/uuid/LICENSE similarity index 96% rename from hack/tools/vendor/golang.org/x/xerrors/LICENSE rename to hack/tools/vendor/github.com/google/uuid/LICENSE index e4a47e17f1..5dc68268d9 100644 --- a/hack/tools/vendor/golang.org/x/xerrors/LICENSE +++ b/hack/tools/vendor/github.com/google/uuid/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2019 The Go Authors. All rights reserved. +Copyright (c) 2009,2014 Google Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/hack/tools/vendor/github.com/google/uuid/README.md b/hack/tools/vendor/github.com/google/uuid/README.md new file mode 100644 index 0000000000..3e9a61889d --- /dev/null +++ b/hack/tools/vendor/github.com/google/uuid/README.md @@ -0,0 +1,21 @@ +# uuid +The uuid package generates and inspects UUIDs based on +[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +```sh +go get github.com/google/uuid +``` + +###### Documentation +[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://pkg.go.dev/github.com/google/uuid diff --git a/hack/tools/vendor/github.com/google/uuid/dce.go b/hack/tools/vendor/github.com/google/uuid/dce.go new file mode 100644 index 0000000000..fa820b9d30 --- /dev/null +++ b/hack/tools/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/hack/tools/vendor/github.com/google/uuid/doc.go b/hack/tools/vendor/github.com/google/uuid/doc.go new file mode 100644 index 0000000000..5b8a4b9af8 --- /dev/null +++ b/hack/tools/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/hack/tools/vendor/github.com/google/uuid/hash.go b/hack/tools/vendor/github.com/google/uuid/hash.go new file mode 100644 index 0000000000..dc60082d3b --- /dev/null +++ b/hack/tools/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) //nolint:errcheck + h.Write(data) //nolint:errcheck + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/hack/tools/vendor/github.com/google/uuid/marshal.go b/hack/tools/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 0000000000..14bd34072b --- /dev/null +++ b/hack/tools/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + return err + } + *uuid = id + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/hack/tools/vendor/github.com/google/uuid/node.go b/hack/tools/vendor/github.com/google/uuid/node.go new file mode 100644 index 0000000000..d651a2b061 --- /dev/null +++ b/hack/tools/vendor/github.com/google/uuid/node.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/hack/tools/vendor/github.com/google/uuid/node_js.go b/hack/tools/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 0000000000..b2a0bc8711 --- /dev/null +++ b/hack/tools/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This removes the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/hack/tools/vendor/github.com/google/uuid/node_net.go b/hack/tools/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 0000000000..0cbbcddbd6 --- /dev/null +++ b/hack/tools/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/hack/tools/vendor/github.com/google/uuid/null.go b/hack/tools/vendor/github.com/google/uuid/null.go new file mode 100644 index 0000000000..d7fcbf2865 --- /dev/null +++ b/hack/tools/vendor/github.com/google/uuid/null.go @@ -0,0 +1,118 @@ +// Copyright 2021 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +var jsonNull = []byte("null") + +// NullUUID represents a UUID that may be null. +// NullUUID implements the SQL driver.Scanner interface so +// it can be used as a scan destination: +// +// var u uuid.NullUUID +// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) +// ... +// if u.Valid { +// // use u.UUID +// } else { +// // NULL value +// } +// +type NullUUID struct { + UUID UUID + Valid bool // Valid is true if UUID is not NULL +} + +// Scan implements the SQL driver.Scanner interface. +func (nu *NullUUID) Scan(value interface{}) error { + if value == nil { + nu.UUID, nu.Valid = Nil, false + return nil + } + + err := nu.UUID.Scan(value) + if err != nil { + nu.Valid = false + return err + } + + nu.Valid = true + return nil +} + +// Value implements the driver Valuer interface. +func (nu NullUUID) Value() (driver.Value, error) { + if !nu.Valid { + return nil, nil + } + // Delegate to UUID Value function + return nu.UUID.Value() +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (nu NullUUID) MarshalBinary() ([]byte, error) { + if nu.Valid { + return nu.UUID[:], nil + } + + return []byte(nil), nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (nu *NullUUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(nu.UUID[:], data) + nu.Valid = true + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (nu NullUUID) MarshalText() ([]byte, error) { + if nu.Valid { + return nu.UUID.MarshalText() + } + + return jsonNull, nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (nu *NullUUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + nu.Valid = false + return err + } + nu.UUID = id + nu.Valid = true + return nil +} + +// MarshalJSON implements json.Marshaler. +func (nu NullUUID) MarshalJSON() ([]byte, error) { + if nu.Valid { + return json.Marshal(nu.UUID) + } + + return jsonNull, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (nu *NullUUID) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, jsonNull) { + *nu = NullUUID{} + return nil // valid null UUID + } + err := json.Unmarshal(data, &nu.UUID) + nu.Valid = err == nil + return err +} diff --git a/hack/tools/vendor/github.com/google/uuid/sql.go b/hack/tools/vendor/github.com/google/uuid/sql.go new file mode 100644 index 0000000000..2e02ec06c0 --- /dev/null +++ b/hack/tools/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently. +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/hack/tools/vendor/github.com/google/uuid/time.go b/hack/tools/vendor/github.com/google/uuid/time.go new file mode 100644 index 0000000000..c351129279 --- /dev/null +++ b/hack/tools/vendor/github.com/google/uuid/time.go @@ -0,0 +1,134 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs. +func (uuid UUID) Time() Time { + var t Time + switch uuid.Version() { + case 6: + time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110 + t = Time(time) + case 7: + time := binary.BigEndian.Uint64(uuid[:8]) + t = Time((time>>16)*10000 + g1582ns100) + default: // forward compatible + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + t = Time(time) + } + return t +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/hack/tools/vendor/github.com/google/uuid/util.go b/hack/tools/vendor/github.com/google/uuid/util.go new file mode 100644 index 0000000000..5ea6c73780 --- /dev/null +++ b/hack/tools/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/hack/tools/vendor/github.com/google/uuid/uuid.go b/hack/tools/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 0000000000..5232b48678 --- /dev/null +++ b/hack/tools/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,365 @@ +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" + "sync" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +const randPoolSize = 16 * 16 + +var ( + rander = rand.Reader // random function + poolEnabled = false + poolMu sync.Mutex + poolPos = randPoolSize // protected with poolMu + pool [randPoolSize]byte // protected with poolMu +) + +type invalidLengthError struct{ len int } + +func (err invalidLengthError) Error() string { + return fmt.Sprintf("invalid UUID length: %d", err.len) +} + +// IsInvalidLengthError is matcher function for custom error invalidLengthError +func IsInvalidLengthError(err error) bool { + _, ok := err.(invalidLengthError) + return ok +} + +// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both +// the standard UUID forms defined in RFC 4122 +// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition, +// Parse accepts non-standard strings such as the raw hex encoding +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings, +// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are +// examined in the latter case. Parse should not be used to validate strings as +// it parses non-standard encodings as indicated above. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(s)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34, + } { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(b)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34, + } { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// Validate returns an error if s is not a properly formatted UUID in one of the following formats: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} +// It returns an error if the format is invalid, otherwise nil. +func Validate(s string) error { + switch len(s) { + // Standard UUID format + case 36: + + // UUID with "urn:uuid:" prefix + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // UUID enclosed in braces + case 36 + 2: + if s[0] != '{' || s[len(s)-1] != '}' { + return fmt.Errorf("invalid bracketed UUID format") + } + s = s[1 : len(s)-1] + + // UUID without hyphens + case 32: + for i := 0; i < len(s); i += 2 { + _, ok := xtob(s[i], s[i+1]) + if !ok { + return errors.New("invalid UUID format") + } + } + + default: + return invalidLengthError{len(s)} + } + + // Check for standard UUID format + if len(s) == 36 { + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return errors.New("invalid UUID format") + } + for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} { + if _, ok := xtob(s[x], s[x+1]); !ok { + return errors.New("invalid UUID format") + } + } + } + + return nil +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} + +// EnableRandPool enables internal randomness pool used for Random +// (Version 4) UUID generation. The pool contains random bytes read from +// the random number generator on demand in batches. Enabling the pool +// may improve the UUID generation throughput significantly. +// +// Since the pool is stored on the Go heap, this feature may be a bad fit +// for security sensitive applications. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func EnableRandPool() { + poolEnabled = true +} + +// DisableRandPool disables the randomness pool if it was previously +// enabled with EnableRandPool. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func DisableRandPool() { + poolEnabled = false + defer poolMu.Unlock() + poolMu.Lock() + poolPos = randPoolSize +} + +// UUIDs is a slice of UUID types. +type UUIDs []UUID + +// Strings returns a string slice containing the string form of each UUID in uuids. +func (uuids UUIDs) Strings() []string { + var uuidStrs = make([]string, len(uuids)) + for i, uuid := range uuids { + uuidStrs[i] = uuid.String() + } + return uuidStrs +} diff --git a/hack/tools/vendor/github.com/google/uuid/version1.go b/hack/tools/vendor/github.com/google/uuid/version1.go new file mode 100644 index 0000000000..463109629e --- /dev/null +++ b/hack/tools/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/hack/tools/vendor/github.com/google/uuid/version4.go b/hack/tools/vendor/github.com/google/uuid/version4.go new file mode 100644 index 0000000000..7697802e4d --- /dev/null +++ b/hack/tools/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,76 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewString creates a new random UUID and returns it as a string or panics. +// NewString is equivalent to the expression +// +// uuid.New().String() +func NewString() string { + return Must(NewRandom()).String() +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// Uses the randomness pool if it was enabled with EnableRandPool. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + if !poolEnabled { + return NewRandomFromReader(rander) + } + return newRandomFromPool() +} + +// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. +func NewRandomFromReader(r io.Reader) (UUID, error) { + var uuid UUID + _, err := io.ReadFull(r, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} + +func newRandomFromPool() (UUID, error) { + var uuid UUID + poolMu.Lock() + if poolPos == randPoolSize { + _, err := io.ReadFull(rander, pool[:]) + if err != nil { + poolMu.Unlock() + return Nil, err + } + poolPos = 0 + } + copy(uuid[:], pool[poolPos:(poolPos+16)]) + poolPos += 16 + poolMu.Unlock() + + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/hack/tools/vendor/github.com/google/uuid/version6.go b/hack/tools/vendor/github.com/google/uuid/version6.go new file mode 100644 index 0000000000..339a959a7a --- /dev/null +++ b/hack/tools/vendor/github.com/google/uuid/version6.go @@ -0,0 +1,56 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "encoding/binary" + +// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality. +// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. +// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6 +// +// NewV6 returns a Version 6 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewV6 returns Nil and an error. +func NewV6() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_mid | time_low_and_version | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |clk_seq_hi_res | clk_seq_low | node (0-1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | node (2-5) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + + binary.BigEndian.PutUint64(uuid[0:], uint64(now)) + binary.BigEndian.PutUint16(uuid[8:], seq) + + uuid[6] = 0x60 | (uuid[6] & 0x0F) + uuid[8] = 0x80 | (uuid[8] & 0x3F) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/hack/tools/vendor/github.com/google/uuid/version7.go b/hack/tools/vendor/github.com/google/uuid/version7.go new file mode 100644 index 0000000000..3167b643d4 --- /dev/null +++ b/hack/tools/vendor/github.com/google/uuid/version7.go @@ -0,0 +1,104 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// UUID version 7 features a time-ordered value field derived from the widely +// implemented and well known Unix Epoch timestamp source, +// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded. +// As well as improved entropy characteristics over versions 1 or 6. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7 +// +// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible. +// +// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch). +// Uses the randomness pool if it was enabled with EnableRandPool. +// On error, NewV7 returns Nil and an error +func NewV7() (UUID, error) { + uuid, err := NewRandom() + if err != nil { + return uuid, err + } + makeV7(uuid[:]) + return uuid, nil +} + +// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch). +// it use NewRandomFromReader fill random bits. +// On error, NewV7FromReader returns Nil and an error. +func NewV7FromReader(r io.Reader) (UUID, error) { + uuid, err := NewRandomFromReader(r) + if err != nil { + return uuid, err + } + + makeV7(uuid[:]) + return uuid, nil +} + +// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) +// uuid[8] already has the right version number (Variant is 10) +// see function NewV7 and NewV7FromReader +func makeV7(uuid []byte) { + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | ver | rand_a (12 bit seq) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |var| rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + _ = uuid[15] // bounds check + + t, s := getV7Time() + + uuid[0] = byte(t >> 40) + uuid[1] = byte(t >> 32) + uuid[2] = byte(t >> 24) + uuid[3] = byte(t >> 16) + uuid[4] = byte(t >> 8) + uuid[5] = byte(t) + + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq +} diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE new file mode 100644 index 0000000000..364516251b --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2015, Gengo, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of Gengo, Inc. nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel new file mode 100644 index 0000000000..b8fbb2b77c --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "httprule", + srcs = [ + "compile.go", + "parse.go", + "types.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule", + deps = ["//utilities"], +) + +go_test( + name = "httprule_test", + size = "small", + srcs = [ + "compile_test.go", + "parse_test.go", + "types_test.go", + ], + embed = [":httprule"], + deps = [ + "//utilities", + "@org_golang_google_grpc//grpclog", + ], +) + +alias( + name = "go_default_library", + actual = ":httprule", + visibility = ["//:__subpackages__"], +) diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go new file mode 100644 index 0000000000..3cd9372959 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go @@ -0,0 +1,121 @@ +package httprule + +import ( + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" +) + +const ( + opcodeVersion = 1 +) + +// Template is a compiled representation of path templates. +type Template struct { + // Version is the version number of the format. + Version int + // OpCodes is a sequence of operations. + OpCodes []int + // Pool is a constant pool + Pool []string + // Verb is a VERB part in the template. + Verb string + // Fields is a list of field paths bound in this template. + Fields []string + // Original template (example: /v1/a_bit_of_everything) + Template string +} + +// Compiler compiles utilities representation of path templates into marshallable operations. +// They can be unmarshalled by runtime.NewPattern. +type Compiler interface { + Compile() Template +} + +type op struct { + // code is the opcode of the operation + code utilities.OpCode + + // str is a string operand of the code. + // num is ignored if str is not empty. + str string + + // num is a numeric operand of the code. + num int +} + +func (w wildcard) compile() []op { + return []op{ + {code: utilities.OpPush}, + } +} + +func (w deepWildcard) compile() []op { + return []op{ + {code: utilities.OpPushM}, + } +} + +func (l literal) compile() []op { + return []op{ + { + code: utilities.OpLitPush, + str: string(l), + }, + } +} + +func (v variable) compile() []op { + var ops []op + for _, s := range v.segments { + ops = append(ops, s.compile()...) + } + ops = append(ops, op{ + code: utilities.OpConcatN, + num: len(v.segments), + }, op{ + code: utilities.OpCapture, + str: v.path, + }) + + return ops +} + +func (t template) Compile() Template { + var rawOps []op + for _, s := range t.segments { + rawOps = append(rawOps, s.compile()...) + } + + var ( + ops []int + pool []string + fields []string + ) + consts := make(map[string]int) + for _, op := range rawOps { + ops = append(ops, int(op.code)) + if op.str == "" { + ops = append(ops, op.num) + } else { + // eof segment literal represents the "/" path pattern + if op.str == eof { + op.str = "" + } + if _, ok := consts[op.str]; !ok { + consts[op.str] = len(pool) + pool = append(pool, op.str) + } + ops = append(ops, consts[op.str]) + } + if op.code == utilities.OpCapture { + fields = append(fields, op.str) + } + } + return Template{ + Version: opcodeVersion, + OpCodes: ops, + Pool: pool, + Verb: t.verb, + Fields: fields, + Template: t.template, + } +} diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go new file mode 100644 index 0000000000..c056bd3058 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go @@ -0,0 +1,11 @@ +//go:build gofuzz +// +build gofuzz + +package httprule + +func Fuzz(data []byte) int { + if _, err := Parse(string(data)); err != nil { + return 0 + } + return 0 +} diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go new file mode 100644 index 0000000000..65ffcf5cf8 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go @@ -0,0 +1,368 @@ +package httprule + +import ( + "errors" + "fmt" + "strings" +) + +// InvalidTemplateError indicates that the path template is not valid. +type InvalidTemplateError struct { + tmpl string + msg string +} + +func (e InvalidTemplateError) Error() string { + return fmt.Sprintf("%s: %s", e.msg, e.tmpl) +} + +// Parse parses the string representation of path template +func Parse(tmpl string) (Compiler, error) { + if !strings.HasPrefix(tmpl, "/") { + return template{}, InvalidTemplateError{tmpl: tmpl, msg: "no leading /"} + } + tokens, verb := tokenize(tmpl[1:]) + + p := parser{tokens: tokens} + segs, err := p.topLevelSegments() + if err != nil { + return template{}, InvalidTemplateError{tmpl: tmpl, msg: err.Error()} + } + + return template{ + segments: segs, + verb: verb, + template: tmpl, + }, nil +} + +func tokenize(path string) (tokens []string, verb string) { + if path == "" { + return []string{eof}, "" + } + + const ( + init = iota + field + nested + ) + st := init + for path != "" { + var idx int + switch st { + case init: + idx = strings.IndexAny(path, "/{") + case field: + idx = strings.IndexAny(path, ".=}") + case nested: + idx = strings.IndexAny(path, "/}") + } + if idx < 0 { + tokens = append(tokens, path) + break + } + switch r := path[idx]; r { + case '/', '.': + case '{': + st = field + case '=': + st = nested + case '}': + st = init + } + if idx == 0 { + tokens = append(tokens, path[idx:idx+1]) + } else { + tokens = append(tokens, path[:idx], path[idx:idx+1]) + } + path = path[idx+1:] + } + + l := len(tokens) + // See + // https://github.com/grpc-ecosystem/grpc-gateway/pull/1947#issuecomment-774523693 ; + // although normal and backwards-compat logic here is to use the last index + // of a colon, if the final segment is a variable followed by a colon, the + // part following the colon must be a verb. Hence if the previous token is + // an end var marker, we switch the index we're looking for to Index instead + // of LastIndex, so that we correctly grab the remaining part of the path as + // the verb. + var penultimateTokenIsEndVar bool + switch l { + case 0, 1: + // Not enough to be variable so skip this logic and don't result in an + // invalid index + default: + penultimateTokenIsEndVar = tokens[l-2] == "}" + } + t := tokens[l-1] + var idx int + if penultimateTokenIsEndVar { + idx = strings.Index(t, ":") + } else { + idx = strings.LastIndex(t, ":") + } + if idx == 0 { + tokens, verb = tokens[:l-1], t[1:] + } else if idx > 0 { + tokens[l-1], verb = t[:idx], t[idx+1:] + } + tokens = append(tokens, eof) + return tokens, verb +} + +// parser is a parser of the template syntax defined in github.com/googleapis/googleapis/google/api/http.proto. +type parser struct { + tokens []string + accepted []string +} + +// topLevelSegments is the target of this parser. +func (p *parser) topLevelSegments() ([]segment, error) { + if _, err := p.accept(typeEOF); err == nil { + p.tokens = p.tokens[:0] + return []segment{literal(eof)}, nil + } + segs, err := p.segments() + if err != nil { + return nil, err + } + if _, err := p.accept(typeEOF); err != nil { + return nil, fmt.Errorf("unexpected token %q after segments %q", p.tokens[0], strings.Join(p.accepted, "")) + } + return segs, nil +} + +func (p *parser) segments() ([]segment, error) { + s, err := p.segment() + if err != nil { + return nil, err + } + + segs := []segment{s} + for { + if _, err := p.accept("/"); err != nil { + return segs, nil + } + s, err := p.segment() + if err != nil { + return segs, err + } + segs = append(segs, s) + } +} + +func (p *parser) segment() (segment, error) { + if _, err := p.accept("*"); err == nil { + return wildcard{}, nil + } + if _, err := p.accept("**"); err == nil { + return deepWildcard{}, nil + } + if l, err := p.literal(); err == nil { + return l, nil + } + + v, err := p.variable() + if err != nil { + return nil, fmt.Errorf("segment neither wildcards, literal or variable: %w", err) + } + return v, nil +} + +func (p *parser) literal() (segment, error) { + lit, err := p.accept(typeLiteral) + if err != nil { + return nil, err + } + return literal(lit), nil +} + +func (p *parser) variable() (segment, error) { + if _, err := p.accept("{"); err != nil { + return nil, err + } + + path, err := p.fieldPath() + if err != nil { + return nil, err + } + + var segs []segment + if _, err := p.accept("="); err == nil { + segs, err = p.segments() + if err != nil { + return nil, fmt.Errorf("invalid segment in variable %q: %w", path, err) + } + } else { + segs = []segment{wildcard{}} + } + + if _, err := p.accept("}"); err != nil { + return nil, fmt.Errorf("unterminated variable segment: %s", path) + } + return variable{ + path: path, + segments: segs, + }, nil +} + +func (p *parser) fieldPath() (string, error) { + c, err := p.accept(typeIdent) + if err != nil { + return "", err + } + components := []string{c} + for { + if _, err := p.accept("."); err != nil { + return strings.Join(components, "."), nil + } + c, err := p.accept(typeIdent) + if err != nil { + return "", fmt.Errorf("invalid field path component: %w", err) + } + components = append(components, c) + } +} + +// A termType is a type of terminal symbols. +type termType string + +// These constants define some of valid values of termType. +// They improve readability of parse functions. +// +// You can also use "/", "*", "**", "." or "=" as valid values. +const ( + typeIdent = termType("ident") + typeLiteral = termType("literal") + typeEOF = termType("$") +) + +// eof is the terminal symbol which always appears at the end of token sequence. +const eof = "\u0000" + +// accept tries to accept a token in "p". +// This function consumes a token and returns it if it matches to the specified "term". +// If it doesn't match, the function does not consume any tokens and return an error. +func (p *parser) accept(term termType) (string, error) { + t := p.tokens[0] + switch term { + case "/", "*", "**", ".", "=", "{", "}": + if t != string(term) && t != "/" { + return "", fmt.Errorf("expected %q but got %q", term, t) + } + case typeEOF: + if t != eof { + return "", fmt.Errorf("expected EOF but got %q", t) + } + case typeIdent: + if err := expectIdent(t); err != nil { + return "", err + } + case typeLiteral: + if err := expectPChars(t); err != nil { + return "", err + } + default: + return "", fmt.Errorf("unknown termType %q", term) + } + p.tokens = p.tokens[1:] + p.accepted = append(p.accepted, t) + return t, nil +} + +// expectPChars determines if "t" consists of only pchars defined in RFC3986. +// +// https://www.ietf.org/rfc/rfc3986.txt, P.49 +// +// pchar = unreserved / pct-encoded / sub-delims / ":" / "@" +// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" +// sub-delims = "!" / "$" / "&" / "'" / "(" / ")" +// / "*" / "+" / "," / ";" / "=" +// pct-encoded = "%" HEXDIG HEXDIG +func expectPChars(t string) error { + const ( + init = iota + pct1 + pct2 + ) + st := init + for _, r := range t { + if st != init { + if !isHexDigit(r) { + return fmt.Errorf("invalid hexdigit: %c(%U)", r, r) + } + switch st { + case pct1: + st = pct2 + case pct2: + st = init + } + continue + } + + // unreserved + switch { + case 'A' <= r && r <= 'Z': + continue + case 'a' <= r && r <= 'z': + continue + case '0' <= r && r <= '9': + continue + } + switch r { + case '-', '.', '_', '~': + // unreserved + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': + // sub-delims + case ':', '@': + // rest of pchar + case '%': + // pct-encoded + st = pct1 + default: + return fmt.Errorf("invalid character in path segment: %q(%U)", r, r) + } + } + if st != init { + return fmt.Errorf("invalid percent-encoding in %q", t) + } + return nil +} + +// expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*). +func expectIdent(ident string) error { + if ident == "" { + return errors.New("empty identifier") + } + for pos, r := range ident { + switch { + case '0' <= r && r <= '9': + if pos == 0 { + return fmt.Errorf("identifier starting with digit: %s", ident) + } + continue + case 'A' <= r && r <= 'Z': + continue + case 'a' <= r && r <= 'z': + continue + case r == '_': + continue + default: + return fmt.Errorf("invalid character %q(%U) in identifier: %s", r, r, ident) + } + } + return nil +} + +func isHexDigit(r rune) bool { + switch { + case '0' <= r && r <= '9': + return true + case 'A' <= r && r <= 'F': + return true + case 'a' <= r && r <= 'f': + return true + } + return false +} diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go new file mode 100644 index 0000000000..5a814a0004 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go @@ -0,0 +1,60 @@ +package httprule + +import ( + "fmt" + "strings" +) + +type template struct { + segments []segment + verb string + template string +} + +type segment interface { + fmt.Stringer + compile() (ops []op) +} + +type wildcard struct{} + +type deepWildcard struct{} + +type literal string + +type variable struct { + path string + segments []segment +} + +func (wildcard) String() string { + return "*" +} + +func (deepWildcard) String() string { + return "**" +} + +func (l literal) String() string { + return string(l) +} + +func (v variable) String() string { + var segs []string + for _, s := range v.segments { + segs = append(segs, s.String()) + } + return fmt.Sprintf("{%s=%s}", v.path, strings.Join(segs, "/")) +} + +func (t template) String() string { + var segs []string + for _, s := range t.segments { + segs = append(segs, s.String()) + } + str := strings.Join(segs, "/") + if t.verb != "" { + str = fmt.Sprintf("%s:%s", str, t.verb) + } + return "/" + str +} diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel new file mode 100644 index 0000000000..78d7c9f5c8 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel @@ -0,0 +1,97 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "runtime", + srcs = [ + "context.go", + "convert.go", + "doc.go", + "errors.go", + "fieldmask.go", + "handler.go", + "marshal_httpbodyproto.go", + "marshal_json.go", + "marshal_jsonpb.go", + "marshal_proto.go", + "marshaler.go", + "marshaler_registry.go", + "mux.go", + "pattern.go", + "proto2_convert.go", + "query.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/runtime", + deps = [ + "//internal/httprule", + "//utilities", + "@org_golang_google_genproto_googleapis_api//httpbody", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//grpclog", + "@org_golang_google_grpc//health/grpc_health_v1", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//reflect/protoreflect", + "@org_golang_google_protobuf//reflect/protoregistry", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/fieldmaskpb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", + ], +) + +go_test( + name = "runtime_test", + size = "small", + srcs = [ + "context_test.go", + "convert_test.go", + "errors_test.go", + "fieldmask_test.go", + "handler_test.go", + "marshal_httpbodyproto_test.go", + "marshal_json_test.go", + "marshal_jsonpb_test.go", + "marshal_proto_test.go", + "marshaler_registry_test.go", + "mux_internal_test.go", + "mux_test.go", + "pattern_test.go", + "query_fuzz_test.go", + "query_test.go", + ], + embed = [":runtime"], + deps = [ + "//runtime/internal/examplepb", + "//utilities", + "@com_github_google_go_cmp//cmp", + "@com_github_google_go_cmp//cmp/cmpopts", + "@org_golang_google_genproto_googleapis_api//httpbody", + "@org_golang_google_genproto_googleapis_rpc//errdetails", + "@org_golang_google_genproto_googleapis_rpc//status", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//health/grpc_health_v1", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//testing/protocmp", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/fieldmaskpb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", + ], +) + +alias( + name = "go_default_library", + actual = ":runtime", + visibility = ["//visibility:public"], +) diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go new file mode 100644 index 0000000000..5dd4e44786 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -0,0 +1,406 @@ +package runtime + +import ( + "context" + "encoding/base64" + "fmt" + "net" + "net/http" + "net/textproto" + "strconv" + "strings" + "sync" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// MetadataHeaderPrefix is the http prefix that represents custom metadata +// parameters to or from a gRPC call. +const MetadataHeaderPrefix = "Grpc-Metadata-" + +// MetadataPrefix is prepended to permanent HTTP header keys (as specified +// by the IANA) when added to the gRPC context. +const MetadataPrefix = "grpcgateway-" + +// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to +// HTTP headers in a response handled by grpc-gateway +const MetadataTrailerPrefix = "Grpc-Trailer-" + +const metadataGrpcTimeout = "Grpc-Timeout" +const metadataHeaderBinarySuffix = "-Bin" + +const xForwardedFor = "X-Forwarded-For" +const xForwardedHost = "X-Forwarded-Host" + +// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound +// header isn't present. If the value is 0 the sent `context` will not have a timeout. +var DefaultContextTimeout = 0 * time.Second + +// malformedHTTPHeaders lists the headers that the gRPC server may reject outright as malformed. +// See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more context. +var malformedHTTPHeaders = map[string]struct{}{ + "connection": {}, +} + +type ( + rpcMethodKey struct{} + httpPathPatternKey struct{} + + AnnotateContextOption func(ctx context.Context) context.Context +) + +func WithHTTPPathPattern(pattern string) AnnotateContextOption { + return func(ctx context.Context) context.Context { + return withHTTPPathPattern(ctx, pattern) + } +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +/* +AnnotateContext adds context information such as metadata from the request. + +At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", +except that the forwarded destination is not another HTTP service but rather +a gRPC service. +*/ +func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...) + if err != nil { + return nil, err + } + if md == nil { + return ctx, nil + } + + return metadata.NewOutgoingContext(ctx, md), nil +} + +// AnnotateIncomingContext adds context information such as metadata from the request. +// Attach metadata as incoming context. +func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...) + if err != nil { + return nil, err + } + if md == nil { + return ctx, nil + } + + return metadata.NewIncomingContext(ctx, md), nil +} + +func isValidGRPCMetadataKey(key string) bool { + // Must be a valid gRPC "Header-Name" as defined here: + // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md + // This means 0-9 a-z _ - . + // Only lowercase letters are valid in the wire protocol, but the client library will normalize + // uppercase ASCII to lowercase, so uppercase ASCII is also acceptable. + bytes := []byte(key) // gRPC validates strings on the byte level, not Unicode. + for _, ch := range bytes { + validLowercaseLetter := ch >= 'a' && ch <= 'z' + validUppercaseLetter := ch >= 'A' && ch <= 'Z' + validDigit := ch >= '0' && ch <= '9' + validOther := ch == '.' || ch == '-' || ch == '_' + if !validLowercaseLetter && !validUppercaseLetter && !validDigit && !validOther { + return false + } + } + return true +} + +func isValidGRPCMetadataTextValue(textValue string) bool { + // Must be a valid gRPC "ASCII-Value" as defined here: + // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md + // This means printable ASCII (including/plus spaces); 0x20 to 0x7E inclusive. + bytes := []byte(textValue) // gRPC validates strings on the byte level, not Unicode. + for _, ch := range bytes { + if ch < 0x20 || ch > 0x7E { + return false + } + } + return true +} + +func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) { + ctx = withRPCMethod(ctx, rpcMethodName) + for _, o := range options { + ctx = o(ctx) + } + timeout := DefaultContextTimeout + if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { + var err error + timeout, err = timeoutDecode(tm) + if err != nil { + return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) + } + } + var pairs []string + for key, vals := range req.Header { + key = textproto.CanonicalMIMEHeaderKey(key) + switch key { + case xForwardedFor, xForwardedHost: + // Handled separately below + continue + } + + for _, val := range vals { + // For backwards-compatibility, pass through 'authorization' header with no prefix. + if key == "Authorization" { + pairs = append(pairs, "authorization", val) + } + if h, ok := mux.incomingHeaderMatcher(key); ok { + if !isValidGRPCMetadataKey(h) { + grpclog.Errorf("HTTP header name %q is not valid as gRPC metadata key; skipping", h) + continue + } + // Handles "-bin" metadata in grpc, since grpc will do another base64 + // encode before sending to server, we need to decode it first. + if strings.HasSuffix(key, metadataHeaderBinarySuffix) { + b, err := decodeBinHeader(val) + if err != nil { + return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err) + } + + val = string(b) + } else if !isValidGRPCMetadataTextValue(val) { + grpclog.Errorf("Value of HTTP header %q contains non-ASCII value (not valid as gRPC metadata): skipping", h) + continue + } + pairs = append(pairs, h, val) + } + } + } + if host := req.Header.Get(xForwardedHost); host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), host) + } else if req.Host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host) + } + + xff := req.Header.Values(xForwardedFor) + if addr := req.RemoteAddr; addr != "" { + if remoteIP, _, err := net.SplitHostPort(addr); err == nil { + xff = append(xff, remoteIP) + } + } + if len(xff) > 0 { + pairs = append(pairs, strings.ToLower(xForwardedFor), strings.Join(xff, ", ")) + } + + if timeout != 0 { + ctx, _ = context.WithTimeout(ctx, timeout) + } + if len(pairs) == 0 { + return ctx, nil, nil + } + md := metadata.Pairs(pairs...) + for _, mda := range mux.metadataAnnotators { + md = metadata.Join(md, mda(ctx, req)) + } + return ctx, md, nil +} + +// ServerMetadata consists of metadata sent from gRPC server. +type ServerMetadata struct { + HeaderMD metadata.MD + TrailerMD metadata.MD +} + +type serverMetadataKey struct{} + +// NewServerMetadataContext creates a new context with ServerMetadata +func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context { + if ctx == nil { + ctx = context.Background() + } + return context.WithValue(ctx, serverMetadataKey{}, md) +} + +// ServerMetadataFromContext returns the ServerMetadata in ctx +func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) { + if ctx == nil { + return md, false + } + md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata) + return +} + +// ServerTransportStream implements grpc.ServerTransportStream. +// It should only be used by the generated files to support grpc.SendHeader +// outside of gRPC server use. +type ServerTransportStream struct { + mu sync.Mutex + header metadata.MD + trailer metadata.MD +} + +// Method returns the method for the stream. +func (s *ServerTransportStream) Method() string { + return "" +} + +// Header returns the header metadata of the stream. +func (s *ServerTransportStream) Header() metadata.MD { + s.mu.Lock() + defer s.mu.Unlock() + return s.header.Copy() +} + +// SetHeader sets the header metadata. +func (s *ServerTransportStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + + s.mu.Lock() + s.header = metadata.Join(s.header, md) + s.mu.Unlock() + return nil +} + +// SendHeader sets the header metadata. +func (s *ServerTransportStream) SendHeader(md metadata.MD) error { + return s.SetHeader(md) +} + +// Trailer returns the cached trailer metadata. +func (s *ServerTransportStream) Trailer() metadata.MD { + s.mu.Lock() + defer s.mu.Unlock() + return s.trailer.Copy() +} + +// SetTrailer sets the trailer metadata. +func (s *ServerTransportStream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + + s.mu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.mu.Unlock() + return nil +} + +func timeoutDecode(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("timeout string is too short: %q", s) + } + d, ok := timeoutUnitToDuration(s[size-1]) + if !ok { + return 0, fmt.Errorf("timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + return d * time.Duration(t), nil +} + +func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { + switch u { + case 'H': + return time.Hour, true + case 'M': + return time.Minute, true + case 'S': + return time.Second, true + case 'm': + return time.Millisecond, true + case 'u': + return time.Microsecond, true + case 'n': + return time.Nanosecond, true + default: + return + } +} + +// isPermanentHTTPHeader checks whether hdr belongs to the list of +// permanent request headers maintained by IANA. +// http://www.iana.org/assignments/message-headers/message-headers.xml +func isPermanentHTTPHeader(hdr string) bool { + switch hdr { + case + "Accept", + "Accept-Charset", + "Accept-Language", + "Accept-Ranges", + "Authorization", + "Cache-Control", + "Content-Type", + "Cookie", + "Date", + "Expect", + "From", + "Host", + "If-Match", + "If-Modified-Since", + "If-None-Match", + "If-Schedule-Tag-Match", + "If-Unmodified-Since", + "Max-Forwards", + "Origin", + "Pragma", + "Referer", + "User-Agent", + "Via", + "Warning": + return true + } + return false +} + +// isMalformedHTTPHeader checks whether header belongs to the list of +// "malformed headers" and would be rejected by the gRPC server. +func isMalformedHTTPHeader(header string) bool { + _, isMalformed := malformedHTTPHeaders[strings.ToLower(header)] + return isMalformed +} + +// RPCMethod returns the method string for the server context. The returned +// string is in the format of "/package.service/method". +func RPCMethod(ctx context.Context) (string, bool) { + m := ctx.Value(rpcMethodKey{}) + if m == nil { + return "", false + } + ms, ok := m.(string) + if !ok { + return "", false + } + return ms, true +} + +func withRPCMethod(ctx context.Context, rpcMethodName string) context.Context { + return context.WithValue(ctx, rpcMethodKey{}, rpcMethodName) +} + +// HTTPPathPattern returns the HTTP path pattern string relating to the HTTP handler, if one exists. +// The format of the returned string is defined by the google.api.http path template type. +func HTTPPathPattern(ctx context.Context) (string, bool) { + m := ctx.Value(httpPathPatternKey{}) + if m == nil { + return "", false + } + ms, ok := m.(string) + if !ok { + return "", false + } + return ms, true +} + +func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context { + return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern) +} diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go new file mode 100644 index 0000000000..d7b15fcfb3 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go @@ -0,0 +1,318 @@ +package runtime + +import ( + "encoding/base64" + "fmt" + "strconv" + "strings" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +// String just returns the given string. +// It is just for compatibility to other types. +func String(val string) (string, error) { + return val, nil +} + +// StringSlice converts 'val' where individual strings are separated by +// 'sep' into a string slice. +func StringSlice(val, sep string) ([]string, error) { + return strings.Split(val, sep), nil +} + +// Bool converts the given string representation of a boolean value into bool. +func Bool(val string) (bool, error) { + return strconv.ParseBool(val) +} + +// BoolSlice converts 'val' where individual booleans are separated by +// 'sep' into a bool slice. +func BoolSlice(val, sep string) ([]bool, error) { + s := strings.Split(val, sep) + values := make([]bool, len(s)) + for i, v := range s { + value, err := Bool(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Float64 converts the given string representation into representation of a floating point number into float64. +func Float64(val string) (float64, error) { + return strconv.ParseFloat(val, 64) +} + +// Float64Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float64 slice. +func Float64Slice(val, sep string) ([]float64, error) { + s := strings.Split(val, sep) + values := make([]float64, len(s)) + for i, v := range s { + value, err := Float64(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Float32 converts the given string representation of a floating point number into float32. +func Float32(val string) (float32, error) { + f, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +// Float32Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float32 slice. +func Float32Slice(val, sep string) ([]float32, error) { + s := strings.Split(val, sep) + values := make([]float32, len(s)) + for i, v := range s { + value, err := Float32(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Int64 converts the given string representation of an integer into int64. +func Int64(val string) (int64, error) { + return strconv.ParseInt(val, 0, 64) +} + +// Int64Slice converts 'val' where individual integers are separated by +// 'sep' into a int64 slice. +func Int64Slice(val, sep string) ([]int64, error) { + s := strings.Split(val, sep) + values := make([]int64, len(s)) + for i, v := range s { + value, err := Int64(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Int32 converts the given string representation of an integer into int32. +func Int32(val string) (int32, error) { + i, err := strconv.ParseInt(val, 0, 32) + if err != nil { + return 0, err + } + return int32(i), nil +} + +// Int32Slice converts 'val' where individual integers are separated by +// 'sep' into a int32 slice. +func Int32Slice(val, sep string) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Int32(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Uint64 converts the given string representation of an integer into uint64. +func Uint64(val string) (uint64, error) { + return strconv.ParseUint(val, 0, 64) +} + +// Uint64Slice converts 'val' where individual integers are separated by +// 'sep' into a uint64 slice. +func Uint64Slice(val, sep string) ([]uint64, error) { + s := strings.Split(val, sep) + values := make([]uint64, len(s)) + for i, v := range s { + value, err := Uint64(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Uint32 converts the given string representation of an integer into uint32. +func Uint32(val string) (uint32, error) { + i, err := strconv.ParseUint(val, 0, 32) + if err != nil { + return 0, err + } + return uint32(i), nil +} + +// Uint32Slice converts 'val' where individual integers are separated by +// 'sep' into a uint32 slice. +func Uint32Slice(val, sep string) ([]uint32, error) { + s := strings.Split(val, sep) + values := make([]uint32, len(s)) + for i, v := range s { + value, err := Uint32(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Bytes converts the given string representation of a byte sequence into a slice of bytes +// A bytes sequence is encoded in URL-safe base64 without padding +func Bytes(val string) ([]byte, error) { + b, err := base64.StdEncoding.DecodeString(val) + if err != nil { + b, err = base64.URLEncoding.DecodeString(val) + if err != nil { + return nil, err + } + } + return b, nil +} + +// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe +// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. +func BytesSlice(val, sep string) ([][]byte, error) { + s := strings.Split(val, sep) + values := make([][]byte, len(s)) + for i, v := range s { + value, err := Bytes(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp. +func Timestamp(val string) (*timestamppb.Timestamp, error) { + var r timestamppb.Timestamp + val = strconv.Quote(strings.Trim(val, `"`)) + unmarshaler := &protojson.UnmarshalOptions{} + if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil { + return nil, err + } + return &r, nil +} + +// Duration converts the given string into a timestamp.Duration. +func Duration(val string) (*durationpb.Duration, error) { + var r durationpb.Duration + val = strconv.Quote(strings.Trim(val, `"`)) + unmarshaler := &protojson.UnmarshalOptions{} + if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil { + return nil, err + } + return &r, nil +} + +// Enum converts the given string into an int32 that should be type casted into the +// correct enum proto type. +func Enum(val string, enumValMap map[string]int32) (int32, error) { + e, ok := enumValMap[val] + if ok { + return e, nil + } + + i, err := Int32(val) + if err != nil { + return 0, fmt.Errorf("%s is not valid", val) + } + for _, v := range enumValMap { + if v == i { + return i, nil + } + } + return 0, fmt.Errorf("%s is not valid", val) +} + +// EnumSlice converts 'val' where individual enums are separated by 'sep' +// into a int32 slice. Each individual int32 should be type casted into the +// correct enum proto type. +func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Enum(v, enumValMap) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Support for google.protobuf.wrappers on top of primitive types + +// StringValue well-known type support as wrapper around string type +func StringValue(val string) (*wrapperspb.StringValue, error) { + return wrapperspb.String(val), nil +} + +// FloatValue well-known type support as wrapper around float32 type +func FloatValue(val string) (*wrapperspb.FloatValue, error) { + parsedVal, err := Float32(val) + return wrapperspb.Float(parsedVal), err +} + +// DoubleValue well-known type support as wrapper around float64 type +func DoubleValue(val string) (*wrapperspb.DoubleValue, error) { + parsedVal, err := Float64(val) + return wrapperspb.Double(parsedVal), err +} + +// BoolValue well-known type support as wrapper around bool type +func BoolValue(val string) (*wrapperspb.BoolValue, error) { + parsedVal, err := Bool(val) + return wrapperspb.Bool(parsedVal), err +} + +// Int32Value well-known type support as wrapper around int32 type +func Int32Value(val string) (*wrapperspb.Int32Value, error) { + parsedVal, err := Int32(val) + return wrapperspb.Int32(parsedVal), err +} + +// UInt32Value well-known type support as wrapper around uint32 type +func UInt32Value(val string) (*wrapperspb.UInt32Value, error) { + parsedVal, err := Uint32(val) + return wrapperspb.UInt32(parsedVal), err +} + +// Int64Value well-known type support as wrapper around int64 type +func Int64Value(val string) (*wrapperspb.Int64Value, error) { + parsedVal, err := Int64(val) + return wrapperspb.Int64(parsedVal), err +} + +// UInt64Value well-known type support as wrapper around uint64 type +func UInt64Value(val string) (*wrapperspb.UInt64Value, error) { + parsedVal, err := Uint64(val) + return wrapperspb.UInt64(parsedVal), err +} + +// BytesValue well-known type support as wrapper around bytes[] type +func BytesValue(val string) (*wrapperspb.BytesValue, error) { + parsedVal, err := Bytes(val) + return wrapperspb.Bytes(parsedVal), err +} diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go new file mode 100644 index 0000000000..b6e5ddf7a9 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go @@ -0,0 +1,5 @@ +/* +Package runtime contains runtime helper functions used by +servers which protoc-gen-grpc-gateway generates. +*/ +package runtime diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go new file mode 100644 index 0000000000..5682998699 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -0,0 +1,181 @@ +package runtime + +import ( + "context" + "errors" + "io" + "net/http" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// ErrorHandlerFunc is the signature used to configure error handling. +type ErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) + +// StreamErrorHandlerFunc is the signature used to configure stream error handling. +type StreamErrorHandlerFunc func(context.Context, error) *status.Status + +// RoutingErrorHandlerFunc is the signature used to configure error handling for routing errors. +type RoutingErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, int) + +// HTTPStatusError is the error to use when needing to provide a different HTTP status code for an error +// passed to the DefaultRoutingErrorHandler. +type HTTPStatusError struct { + HTTPStatus int + Err error +} + +func (e *HTTPStatusError) Error() string { + return e.Err.Error() +} + +// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. +// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +func HTTPStatusFromCode(code codes.Code) int { + switch code { + case codes.OK: + return http.StatusOK + case codes.Canceled: + return 499 + case codes.Unknown: + return http.StatusInternalServerError + case codes.InvalidArgument: + return http.StatusBadRequest + case codes.DeadlineExceeded: + return http.StatusGatewayTimeout + case codes.NotFound: + return http.StatusNotFound + case codes.AlreadyExists: + return http.StatusConflict + case codes.PermissionDenied: + return http.StatusForbidden + case codes.Unauthenticated: + return http.StatusUnauthorized + case codes.ResourceExhausted: + return http.StatusTooManyRequests + case codes.FailedPrecondition: + // Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status. + return http.StatusBadRequest + case codes.Aborted: + return http.StatusConflict + case codes.OutOfRange: + return http.StatusBadRequest + case codes.Unimplemented: + return http.StatusNotImplemented + case codes.Internal: + return http.StatusInternalServerError + case codes.Unavailable: + return http.StatusServiceUnavailable + case codes.DataLoss: + return http.StatusInternalServerError + default: + grpclog.Warningf("Unknown gRPC error code: %v", code) + return http.StatusInternalServerError + } +} + +// HTTPError uses the mux-configured error handler. +func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + mux.errorHandler(ctx, mux, marshaler, w, r, err) +} + +// DefaultHTTPErrorHandler is the default error handler. +// If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode. +// If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is +// intended to allow passing through of specific statuses via the function set via WithRoutingErrorHandler +// for the ServeMux constructor to handle edge cases which the standard mappings in HTTPStatusFromCode +// are insufficient for. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body written by this function is a Status message marshaled by the Marshaler. +func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + // return Internal when Marshal failed + const fallback = `{"code": 13, "message": "failed to marshal error message"}` + + var customStatus *HTTPStatusError + if errors.As(err, &customStatus) { + err = customStatus.Err + } + + s := status.Convert(err) + pb := s.Proto() + + w.Header().Del("Trailer") + w.Header().Del("Transfer-Encoding") + + contentType := marshaler.ContentType(pb) + w.Header().Set("Content-Type", contentType) + + if s.Code() == codes.Unauthenticated { + w.Header().Set("WWW-Authenticate", s.Message()) + } + + buf, merr := marshaler.Marshal(pb) + if merr != nil { + grpclog.Errorf("Failed to marshal error message %q: %v", s, merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Error("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + + // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 + // Unless the request includes a TE header field indicating "trailers" + // is acceptable, as described in Section 4.3, a server SHOULD NOT + // generate trailer fields that it believes are necessary for the user + // agent to receive. + doForwardTrailers := requestAcceptsTrailers(r) + + if doForwardTrailers { + handleForwardResponseTrailerHeader(w, mux, md) + w.Header().Set("Transfer-Encoding", "chunked") + } + + st := HTTPStatusFromCode(s.Code()) + if customStatus != nil { + st = customStatus.HTTPStatus + } + + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + + if doForwardTrailers { + handleForwardResponseTrailer(w, mux, md) + } +} + +func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status { + return status.Convert(err) +} + +// DefaultRoutingErrorHandler is our default handler for routing errors. +// By default http error codes mapped on the following error codes: +// +// NotFound -> grpc.NotFound +// StatusBadRequest -> grpc.InvalidArgument +// MethodNotAllowed -> grpc.Unimplemented +// Other -> grpc.Internal, method is not expecting to be called for anything else +func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) { + sterr := status.Error(codes.Internal, "Unexpected routing error") + switch httpStatus { + case http.StatusBadRequest: + sterr = status.Error(codes.InvalidArgument, http.StatusText(httpStatus)) + case http.StatusMethodNotAllowed: + sterr = status.Error(codes.Unimplemented, http.StatusText(httpStatus)) + case http.StatusNotFound: + sterr = status.Error(codes.NotFound, http.StatusText(httpStatus)) + } + mux.errorHandler(ctx, mux, marshaler, w, r, sterr) +} diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go new file mode 100644 index 0000000000..9005d6a0bf --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go @@ -0,0 +1,168 @@ +package runtime + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "sort" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + field_mask "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor { + fd := fields.ByName(protoreflect.Name(name)) + if fd != nil { + return fd + } + + return fields.ByJSONName(name) +} + +// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body. +func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.FieldMask, error) { + fm := &field_mask.FieldMask{} + var root interface{} + + if err := json.NewDecoder(r).Decode(&root); err != nil { + if errors.Is(err, io.EOF) { + return fm, nil + } + return nil, err + } + + queue := []fieldMaskPathItem{{node: root, msg: msg.ProtoReflect()}} + for len(queue) > 0 { + // dequeue an item + item := queue[0] + queue = queue[1:] + + m, ok := item.node.(map[string]interface{}) + switch { + case ok && len(m) > 0: + // if the item is an object, then enqueue all of its children + for k, v := range m { + if item.msg == nil { + return nil, errors.New("JSON structure did not match request type") + } + + fd := getFieldByName(item.msg.Descriptor().Fields(), k) + if fd == nil { + return nil, fmt.Errorf("could not find field %q in %q", k, item.msg.Descriptor().FullName()) + } + + if isDynamicProtoMessage(fd.Message()) { + for _, p := range buildPathsBlindly(string(fd.FullName().Name()), v) { + newPath := p + if item.path != "" { + newPath = item.path + "." + newPath + } + queue = append(queue, fieldMaskPathItem{path: newPath}) + } + continue + } + + if isProtobufAnyMessage(fd.Message()) && !fd.IsList() { + _, hasTypeField := v.(map[string]interface{})["@type"] + if hasTypeField { + queue = append(queue, fieldMaskPathItem{path: k}) + continue + } else { + return nil, fmt.Errorf("could not find field @type in %q in message %q", k, item.msg.Descriptor().FullName()) + } + + } + + child := fieldMaskPathItem{ + node: v, + } + if item.path == "" { + child.path = string(fd.FullName().Name()) + } else { + child.path = item.path + "." + string(fd.FullName().Name()) + } + + switch { + case fd.IsList(), fd.IsMap(): + // As per: https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/field_mask.proto#L85-L86 + // Do not recurse into repeated fields. The repeated field goes on the end of the path and we stop. + fm.Paths = append(fm.Paths, child.path) + case fd.Message() != nil: + child.msg = item.msg.Get(fd).Message() + fallthrough + default: + queue = append(queue, child) + } + } + case ok && len(m) == 0: + fallthrough + case len(item.path) > 0: + // otherwise, it's a leaf node so print its path + fm.Paths = append(fm.Paths, item.path) + } + } + + // Sort for deterministic output in the presence + // of repeated fields. + sort.Strings(fm.Paths) + + return fm, nil +} + +func isProtobufAnyMessage(md protoreflect.MessageDescriptor) bool { + return md != nil && (md.FullName() == "google.protobuf.Any") +} + +func isDynamicProtoMessage(md protoreflect.MessageDescriptor) bool { + return md != nil && (md.FullName() == "google.protobuf.Struct" || md.FullName() == "google.protobuf.Value") +} + +// buildPathsBlindly does not attempt to match proto field names to the +// json value keys. Instead it relies completely on the structure of +// the unmarshalled json contained within in. +// Returns a slice containing all subpaths with the root at the +// passed in name and json value. +func buildPathsBlindly(name string, in interface{}) []string { + m, ok := in.(map[string]interface{}) + if !ok { + return []string{name} + } + + var paths []string + queue := []fieldMaskPathItem{{path: name, node: m}} + for len(queue) > 0 { + cur := queue[0] + queue = queue[1:] + + m, ok := cur.node.(map[string]interface{}) + if !ok { + // This should never happen since we should always check that we only add + // nodes of type map[string]interface{} to the queue. + continue + } + for k, v := range m { + if mi, ok := v.(map[string]interface{}); ok { + queue = append(queue, fieldMaskPathItem{path: cur.path + "." + k, node: mi}) + } else { + // This is not a struct, so there are no more levels to descend. + curPath := cur.path + "." + k + paths = append(paths, curPath) + } + } + } + return paths +} + +// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +type fieldMaskPathItem struct { + // the list of prior fields leading up to node connected by dots + path string + + // a generic decoded json object the current item to inspect for further path extraction + node interface{} + + // parent message + msg protoreflect.Message +} diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go new file mode 100644 index 0000000000..de1eef1f4f --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -0,0 +1,235 @@ +package runtime + +import ( + "context" + "errors" + "io" + "net/http" + "net/textproto" + "strconv" + "strings" + + "google.golang.org/genproto/googleapis/api/httpbody" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// ForwardResponseStream forwards the stream from gRPC server to REST client. +func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + rc := http.NewResponseController(w) + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Error("Failed to extract ServerMetadata from context") + http.Error(w, "unexpected error", http.StatusInternalServerError) + return + } + handleForwardResponseServerMetadata(w, mux, md) + + w.Header().Set("Transfer-Encoding", "chunked") + if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + var delimiter []byte + if d, ok := marshaler.(Delimited); ok { + delimiter = d.Delimiter() + } else { + delimiter = []byte("\n") + } + + var wroteHeader bool + for { + resp, err := recv() + if errors.Is(err, io.EOF) { + return + } + if err != nil { + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + + if !wroteHeader { + w.Header().Set("Content-Type", marshaler.ContentType(resp)) + } + + var buf []byte + httpBody, isHTTPBody := resp.(*httpbody.HttpBody) + switch { + case resp == nil: + buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response"))) + case isHTTPBody: + buf = httpBody.GetData() + default: + result := map[string]interface{}{"result": resp} + if rb, ok := resp.(responseBody); ok { + result["result"] = rb.XXX_ResponseBody() + } + + buf, err = marshaler.Marshal(result) + } + + if err != nil { + grpclog.Errorf("Failed to marshal response chunk: %v", err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to send response chunk: %v", err) + return + } + wroteHeader = true + if _, err := w.Write(delimiter); err != nil { + grpclog.Errorf("Failed to send delimiter chunk: %v", err) + return + } + err = rc.Flush() + if err != nil { + if errors.Is(err, http.ErrNotSupported) { + grpclog.Errorf("Flush not supported in %T", w) + http.Error(w, "unexpected type of web server", http.StatusInternalServerError) + return + } + grpclog.Errorf("Failed to flush response to client: %v", err) + return + } + } +} + +func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { + for k, vs := range md.HeaderMD { + if h, ok := mux.outgoingHeaderMatcher(k); ok { + for _, v := range vs { + w.Header().Add(h, v) + } + } + } +} + +func handleForwardResponseTrailerHeader(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { + for k := range md.TrailerMD { + if h, ok := mux.outgoingTrailerMatcher(k); ok { + w.Header().Add("Trailer", textproto.CanonicalMIMEHeaderKey(h)) + } + } +} + +func handleForwardResponseTrailer(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { + for k, vs := range md.TrailerMD { + if h, ok := mux.outgoingTrailerMatcher(k); ok { + for _, v := range vs { + w.Header().Add(h, v) + } + } + } +} + +// responseBody interface contains method for getting field for marshaling to the response body +// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule` +type responseBody interface { + XXX_ResponseBody() interface{} +} + +// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. +func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Error("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + + // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 + // Unless the request includes a TE header field indicating "trailers" + // is acceptable, as described in Section 4.3, a server SHOULD NOT + // generate trailer fields that it believes are necessary for the user + // agent to receive. + doForwardTrailers := requestAcceptsTrailers(req) + + if doForwardTrailers { + handleForwardResponseTrailerHeader(w, mux, md) + w.Header().Set("Transfer-Encoding", "chunked") + } + + contentType := marshaler.ContentType(resp) + w.Header().Set("Content-Type", contentType) + + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + var buf []byte + var err error + if rb, ok := resp.(responseBody); ok { + buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) + } else { + buf, err = marshaler.Marshal(resp) + } + if err != nil { + grpclog.Errorf("Marshal error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + if !doForwardTrailers { + w.Header().Set("Content-Length", strconv.Itoa(len(buf))) + } + + if _, err = w.Write(buf); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + + if doForwardTrailers { + handleForwardResponseTrailer(w, mux, md) + } +} + +func requestAcceptsTrailers(req *http.Request) bool { + te := req.Header.Get("TE") + return strings.Contains(strings.ToLower(te), "trailers") +} + +func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error { + if len(opts) == 0 { + return nil + } + for _, opt := range opts { + if err := opt(ctx, w, resp); err != nil { + grpclog.Errorf("Error handling ForwardResponseOptions: %v", err) + return err + } + } + return nil +} + +func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error, delimiter []byte) { + st := mux.streamErrorHandler(ctx, err) + msg := errorChunk(st) + if !wroteHeader { + w.Header().Set("Content-Type", marshaler.ContentType(msg)) + w.WriteHeader(HTTPStatusFromCode(st.Code())) + } + buf, err := marshaler.Marshal(msg) + if err != nil { + grpclog.Errorf("Failed to marshal an error: %v", err) + return + } + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to notify error to client: %v", err) + return + } + if _, err := w.Write(delimiter); err != nil { + grpclog.Errorf("Failed to send delimiter chunk: %v", err) + return + } +} + +func errorChunk(st *status.Status) map[string]proto.Message { + return map[string]proto.Message{"error": st.Proto()} +} diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go new file mode 100644 index 0000000000..6de2e220c7 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go @@ -0,0 +1,32 @@ +package runtime + +import ( + "google.golang.org/genproto/googleapis/api/httpbody" +) + +// HTTPBodyMarshaler is a Marshaler which supports marshaling of a +// google.api.HttpBody message as the full response body if it is +// the actual message used as the response. If not, then this will +// simply fallback to the Marshaler specified as its default Marshaler. +type HTTPBodyMarshaler struct { + Marshaler +} + +// ContentType returns its specified content type in case v is a +// google.api.HttpBody message, otherwise it will fall back to the default Marshalers +// content type. +func (h *HTTPBodyMarshaler) ContentType(v interface{}) string { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.GetContentType() + } + return h.Marshaler.ContentType(v) +} + +// Marshal marshals "v" by returning the body bytes if v is a +// google.api.HttpBody message, otherwise it falls back to the default Marshaler. +func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.GetData(), nil + } + return h.Marshaler.Marshal(v) +} diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go new file mode 100644 index 0000000000..fe52081ab9 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go @@ -0,0 +1,50 @@ +package runtime + +import ( + "encoding/json" + "io" +) + +// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON +// with the standard "encoding/json" package of Golang. +// Although it is generally faster for simple proto messages than JSONPb, +// it does not support advanced features of protobuf, e.g. map, oneof, .... +// +// The NewEncoder and NewDecoder types return *json.Encoder and +// *json.Decoder respectively. +type JSONBuiltin struct{} + +// ContentType always Returns "application/json". +func (*JSONBuiltin) ContentType(_ interface{}) string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// MarshalIndent is like Marshal but applies Indent to format the output +func (j *JSONBuiltin) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return json.MarshalIndent(v, prefix, indent) +} + +// Unmarshal unmarshals JSON data into "v". +func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder { + return json.NewDecoder(r) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *JSONBuiltin) Delimiter() []byte { + return []byte("\n") +} diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go new file mode 100644 index 0000000000..8376d1e0ef --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go @@ -0,0 +1,349 @@ +package runtime + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + "strconv" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +// JSONPb is a Marshaler which marshals/unmarshals into/from JSON +// with the "google.golang.org/protobuf/encoding/protojson" marshaler. +// It supports the full functionality of protobuf unlike JSONBuiltin. +// +// The NewDecoder method returns a DecoderWrapper, so the underlying +// *json.Decoder methods can be used. +type JSONPb struct { + protojson.MarshalOptions + protojson.UnmarshalOptions +} + +// ContentType always returns "application/json". +func (*JSONPb) ContentType(_ interface{}) string { + return "application/json" +} + +// Marshal marshals "v" into JSON. +func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { + var buf bytes.Buffer + if err := j.marshalTo(&buf, v); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + buf, err := j.marshalNonProtoField(v) + if err != nil { + return err + } + if j.Indent != "" { + b := &bytes.Buffer{} + if err := json.Indent(b, buf, "", j.Indent); err != nil { + return err + } + buf = b.Bytes() + } + _, err = w.Write(buf) + return err + } + + b, err := j.MarshalOptions.Marshal(p) + if err != nil { + return err + } + + _, err = w.Write(b) + return err +} + +var ( + // protoMessageType is stored to prevent constant lookup of the same type at runtime. + protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() +) + +// marshalNonProto marshals a non-message field of a protobuf message. +// This function does not correctly marshal arbitrary data structures into JSON, +// it is only capable of marshaling non-message field values of protobuf, +// i.e. primitive types, enums; pointers to primitives or enums; maps from +// integer/string types to primitives/enums/pointers to messages. +func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { + if v == nil { + return []byte("null"), nil + } + rv := reflect.ValueOf(v) + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return []byte("null"), nil + } + rv = rv.Elem() + } + + if rv.Kind() == reflect.Slice { + if rv.IsNil() { + if j.EmitUnpopulated { + return []byte("[]"), nil + } + return []byte("null"), nil + } + + if rv.Type().Elem().Implements(protoMessageType) { + var buf bytes.Buffer + if err := buf.WriteByte('['); err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { + if err := buf.WriteByte(','); err != nil { + return nil, err + } + } + if err := j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { + return nil, err + } + } + if err := buf.WriteByte(']'); err != nil { + return nil, err + } + + return buf.Bytes(), nil + } + + if rv.Type().Elem().Implements(typeProtoEnum) { + var buf bytes.Buffer + if err := buf.WriteByte('['); err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { + if err := buf.WriteByte(','); err != nil { + return nil, err + } + } + var err error + if j.UseEnumNumbers { + _, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10)) + } else { + _, err = buf.WriteString("\"" + rv.Index(i).Interface().(protoEnum).String() + "\"") + } + if err != nil { + return nil, err + } + } + if err := buf.WriteByte(']'); err != nil { + return nil, err + } + + return buf.Bytes(), nil + } + } + + if rv.Kind() == reflect.Map { + m := make(map[string]*json.RawMessage) + for _, k := range rv.MapKeys() { + buf, err := j.Marshal(rv.MapIndex(k).Interface()) + if err != nil { + return nil, err + } + m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) + } + return json.Marshal(m) + } + if enum, ok := rv.Interface().(protoEnum); ok && !j.UseEnumNumbers { + return json.Marshal(enum.String()) + } + return json.Marshal(rv.Interface()) +} + +// Unmarshal unmarshals JSON "data" into "v" +func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { + return unmarshalJSONPb(data, j.UnmarshalOptions, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONPb) NewDecoder(r io.Reader) Decoder { + d := json.NewDecoder(r) + return DecoderWrapper{ + Decoder: d, + UnmarshalOptions: j.UnmarshalOptions, + } +} + +// DecoderWrapper is a wrapper around a *json.Decoder that adds +// support for protos to the Decode method. +type DecoderWrapper struct { + *json.Decoder + protojson.UnmarshalOptions +} + +// Decode wraps the embedded decoder's Decode method to support +// protos using a jsonpb.Unmarshaler. +func (d DecoderWrapper) Decode(v interface{}) error { + return decodeJSONPb(d.Decoder, d.UnmarshalOptions, v) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONPb) NewEncoder(w io.Writer) Encoder { + return EncoderFunc(func(v interface{}) error { + if err := j.marshalTo(w, v); err != nil { + return err + } + // mimic json.Encoder by adding a newline (makes output + // easier to read when it contains multiple encoded items) + _, err := w.Write(j.Delimiter()) + return err + }) +} + +func unmarshalJSONPb(data []byte, unmarshaler protojson.UnmarshalOptions, v interface{}) error { + d := json.NewDecoder(bytes.NewReader(data)) + return decodeJSONPb(d, unmarshaler, v) +} + +func decodeJSONPb(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + return decodeNonProtoField(d, unmarshaler, v) + } + + // Decode into bytes for marshalling + var b json.RawMessage + if err := d.Decode(&b); err != nil { + return err + } + + return unmarshaler.Unmarshal([]byte(b), p) +} + +func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", v) + } + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + if rv.Type().ConvertibleTo(typeProtoMessage) { + // Decode into bytes for marshalling + var b json.RawMessage + if err := d.Decode(&b); err != nil { + return err + } + + return unmarshaler.Unmarshal([]byte(b), rv.Interface().(proto.Message)) + } + rv = rv.Elem() + } + if rv.Kind() == reflect.Map { + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + conv, ok := convFromType[rv.Type().Key().Kind()] + if !ok { + return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key()) + } + + m := make(map[string]*json.RawMessage) + if err := d.Decode(&m); err != nil { + return err + } + for k, v := range m { + result := conv.Call([]reflect.Value{reflect.ValueOf(k)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + bk := result[0] + bv := reflect.New(rv.Type().Elem()) + if v == nil { + null := json.RawMessage("null") + v = &null + } + if err := unmarshalJSONPb([]byte(*v), unmarshaler, bv.Interface()); err != nil { + return err + } + rv.SetMapIndex(bk, bv.Elem()) + } + return nil + } + if rv.Kind() == reflect.Slice { + if rv.Type().Elem().Kind() == reflect.Uint8 { + var sl []byte + if err := d.Decode(&sl); err != nil { + return err + } + if sl != nil { + rv.SetBytes(sl) + } + return nil + } + + var sl []json.RawMessage + if err := d.Decode(&sl); err != nil { + return err + } + if sl != nil { + rv.Set(reflect.MakeSlice(rv.Type(), 0, 0)) + } + for _, item := range sl { + bv := reflect.New(rv.Type().Elem()) + if err := unmarshalJSONPb([]byte(item), unmarshaler, bv.Interface()); err != nil { + return err + } + rv.Set(reflect.Append(rv, bv.Elem())) + } + return nil + } + if _, ok := rv.Interface().(protoEnum); ok { + var repr interface{} + if err := d.Decode(&repr); err != nil { + return err + } + switch v := repr.(type) { + case string: + // TODO(yugui) Should use proto.StructProperties? + return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) + case float64: + rv.Set(reflect.ValueOf(int32(v)).Convert(rv.Type())) + return nil + default: + return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) + } + } + return d.Decode(v) +} + +type protoEnum interface { + fmt.Stringer + EnumDescriptor() ([]byte, []int) +} + +var typeProtoEnum = reflect.TypeOf((*protoEnum)(nil)).Elem() + +var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() + +// Delimiter for newline encoded JSON streams. +func (j *JSONPb) Delimiter() []byte { + return []byte("\n") +} + +var ( + convFromType = map[reflect.Kind]reflect.Value{ + reflect.String: reflect.ValueOf(String), + reflect.Bool: reflect.ValueOf(Bool), + reflect.Float64: reflect.ValueOf(Float64), + reflect.Float32: reflect.ValueOf(Float32), + reflect.Int64: reflect.ValueOf(Int64), + reflect.Int32: reflect.ValueOf(Int32), + reflect.Uint64: reflect.ValueOf(Uint64), + reflect.Uint32: reflect.ValueOf(Uint32), + reflect.Slice: reflect.ValueOf(Bytes), + } +) diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go new file mode 100644 index 0000000000..398c780dc2 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go @@ -0,0 +1,60 @@ +package runtime + +import ( + "errors" + "io" + + "google.golang.org/protobuf/proto" +) + +// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes +type ProtoMarshaller struct{} + +// ContentType always returns "application/octet-stream". +func (*ProtoMarshaller) ContentType(_ interface{}) string { + return "application/octet-stream" +} + +// Marshal marshals "value" into Proto +func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) { + message, ok := value.(proto.Message) + if !ok { + return nil, errors.New("unable to marshal non proto field") + } + return proto.Marshal(message) +} + +// Unmarshal unmarshals proto "data" into "value" +func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error { + message, ok := value.(proto.Message) + if !ok { + return errors.New("unable to unmarshal non proto field") + } + return proto.Unmarshal(data, message) +} + +// NewDecoder returns a Decoder which reads proto stream from "reader". +func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder { + return DecoderFunc(func(value interface{}) error { + buffer, err := io.ReadAll(reader) + if err != nil { + return err + } + return marshaller.Unmarshal(buffer, value) + }) +} + +// NewEncoder returns an Encoder which writes proto stream into "writer". +func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder { + return EncoderFunc(func(value interface{}) error { + buffer, err := marshaller.Marshal(value) + if err != nil { + return err + } + if _, err := writer.Write(buffer); err != nil { + return err + } + + return nil + }) +} diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go new file mode 100644 index 0000000000..2c0d25ff49 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go @@ -0,0 +1,50 @@ +package runtime + +import ( + "io" +) + +// Marshaler defines a conversion between byte sequence and gRPC payloads / fields. +type Marshaler interface { + // Marshal marshals "v" into byte sequence. + Marshal(v interface{}) ([]byte, error) + // Unmarshal unmarshals "data" into "v". + // "v" must be a pointer value. + Unmarshal(data []byte, v interface{}) error + // NewDecoder returns a Decoder which reads byte sequence from "r". + NewDecoder(r io.Reader) Decoder + // NewEncoder returns an Encoder which writes bytes sequence into "w". + NewEncoder(w io.Writer) Encoder + // ContentType returns the Content-Type which this marshaler is responsible for. + // The parameter describes the type which is being marshalled, which can sometimes + // affect the content type returned. + ContentType(v interface{}) string +} + +// Decoder decodes a byte sequence +type Decoder interface { + Decode(v interface{}) error +} + +// Encoder encodes gRPC payloads / fields into byte sequence. +type Encoder interface { + Encode(v interface{}) error +} + +// DecoderFunc adapts an decoder function into Decoder. +type DecoderFunc func(v interface{}) error + +// Decode delegates invocations to the underlying function itself. +func (f DecoderFunc) Decode(v interface{}) error { return f(v) } + +// EncoderFunc adapts an encoder function into Encoder +type EncoderFunc func(v interface{}) error + +// Encode delegates invocations to the underlying function itself. +func (f EncoderFunc) Encode(v interface{}) error { return f(v) } + +// Delimited defines the streaming delimiter. +type Delimited interface { + // Delimiter returns the record separator for the stream. + Delimiter() []byte +} diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go new file mode 100644 index 0000000000..0b051e6e89 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go @@ -0,0 +1,109 @@ +package runtime + +import ( + "errors" + "mime" + "net/http" + + "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/encoding/protojson" +) + +// MIMEWildcard is the fallback MIME type used for requests which do not match +// a registered MIME type. +const MIMEWildcard = "*" + +var ( + acceptHeader = http.CanonicalHeaderKey("Accept") + contentTypeHeader = http.CanonicalHeaderKey("Content-Type") + + defaultMarshaler = &HTTPBodyMarshaler{ + Marshaler: &JSONPb{ + MarshalOptions: protojson.MarshalOptions{ + EmitUnpopulated: true, + }, + UnmarshalOptions: protojson.UnmarshalOptions{ + DiscardUnknown: true, + }, + }, + } +) + +// MarshalerForRequest returns the inbound/outbound marshalers for this request. +// It checks the registry on the ServeMux for the MIME type set by the Content-Type header. +// If it isn't set (or the request Content-Type is empty), checks for "*". +// If there are multiple Content-Type headers set, choose the first one that it can +// exactly match in the registry. +// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler. +func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) { + for _, acceptVal := range r.Header[acceptHeader] { + if m, ok := mux.marshalers.mimeMap[acceptVal]; ok { + outbound = m + break + } + } + + for _, contentTypeVal := range r.Header[contentTypeHeader] { + contentType, _, err := mime.ParseMediaType(contentTypeVal) + if err != nil { + grpclog.Errorf("Failed to parse Content-Type %s: %v", contentTypeVal, err) + continue + } + if m, ok := mux.marshalers.mimeMap[contentType]; ok { + inbound = m + break + } + } + + if inbound == nil { + inbound = mux.marshalers.mimeMap[MIMEWildcard] + } + if outbound == nil { + outbound = inbound + } + + return inbound, outbound +} + +// marshalerRegistry is a mapping from MIME types to Marshalers. +type marshalerRegistry struct { + mimeMap map[string]Marshaler +} + +// add adds a marshaler for a case-sensitive MIME type string ("*" to match any +// MIME type). +func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { + if len(mime) == 0 { + return errors.New("empty MIME type") + } + + m.mimeMap[mime] = marshaler + + return nil +} + +// makeMarshalerMIMERegistry returns a new registry of marshalers. +// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. +// +// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler +// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler +// with a "application/json" Content-Type. +// "*" can be used to match any Content-Type. +// This can be attached to a ServerMux with the marshaler option. +func makeMarshalerMIMERegistry() marshalerRegistry { + return marshalerRegistry{ + mimeMap: map[string]Marshaler{ + MIMEWildcard: defaultMarshaler, + }, + } +} + +// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound +// Marshalers to a MIME type in mux. +func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption { + return func(mux *ServeMux) { + if err := mux.marshalers.add(mime, marshaler); err != nil { + panic(err) + } + } +} diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go new file mode 100644 index 0000000000..ed9a7e4387 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -0,0 +1,486 @@ +package runtime + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/textproto" + "regexp" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// UnescapingMode defines the behavior of ServeMux when unescaping path parameters. +type UnescapingMode int + +const ( + // UnescapingModeLegacy is the default V2 behavior, which escapes the entire + // path string before doing any routing. + UnescapingModeLegacy UnescapingMode = iota + + // UnescapingModeAllExceptReserved unescapes all path parameters except RFC 6570 + // reserved characters. + UnescapingModeAllExceptReserved + + // UnescapingModeAllExceptSlash unescapes URL path parameters except path + // separators, which will be left as "%2F". + UnescapingModeAllExceptSlash + + // UnescapingModeAllCharacters unescapes all URL path parameters. + UnescapingModeAllCharacters + + // UnescapingModeDefault is the default escaping type. + // TODO(v3): default this to UnescapingModeAllExceptReserved per grpc-httpjson-transcoding's + // reference implementation + UnescapingModeDefault = UnescapingModeLegacy +) + +var encodedPathSplitter = regexp.MustCompile("(/|%2F)") + +// A HandlerFunc handles a specific pair of path pattern and HTTP method. +type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) + +// ServeMux is a request multiplexer for grpc-gateway. +// It matches http requests to patterns and invokes the corresponding handler. +type ServeMux struct { + // handlers maps HTTP method to a list of handlers. + handlers map[string][]handler + forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error + marshalers marshalerRegistry + incomingHeaderMatcher HeaderMatcherFunc + outgoingHeaderMatcher HeaderMatcherFunc + outgoingTrailerMatcher HeaderMatcherFunc + metadataAnnotators []func(context.Context, *http.Request) metadata.MD + errorHandler ErrorHandlerFunc + streamErrorHandler StreamErrorHandlerFunc + routingErrorHandler RoutingErrorHandlerFunc + disablePathLengthFallback bool + unescapingMode UnescapingMode +} + +// ServeMuxOption is an option that can be given to a ServeMux on construction. +type ServeMuxOption func(*ServeMux) + +// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. +// +// forwardResponseOption is an option that will be called on the relevant context.Context, +// http.ResponseWriter, and proto.Message before every forwarded response. +// +// The message may be nil in the case where just a header is being sent. +func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption) + } +} + +// WithUnescapingMode sets the escaping type. See the definitions of UnescapingMode +// for more information. +func WithUnescapingMode(mode UnescapingMode) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.unescapingMode = mode + } +} + +// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. +// Configuring this will mean the generated OpenAPI output is no longer correct, and it should be +// done with careful consideration. +func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption { + return func(serveMux *ServeMux) { + currentQueryParser = queryParameterParser + } +} + +// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context. +type HeaderMatcherFunc func(string) (string, bool) + +// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header +// keys (as specified by the IANA, e.g: Accept, Cookie, Host) to the gRPC metadata with the grpcgateway- prefix. If you want to know which headers are considered permanent, you can view the isPermanentHTTPHeader function. +// HTTP headers that start with 'Grpc-Metadata-' are mapped to gRPC metadata after removing the prefix 'Grpc-Metadata-'. +// Other headers are not added to the gRPC metadata. +func DefaultHeaderMatcher(key string) (string, bool) { + switch key = textproto.CanonicalMIMEHeaderKey(key); { + case isPermanentHTTPHeader(key): + return MetadataPrefix + key, true + case strings.HasPrefix(key, MetadataHeaderPrefix): + return key[len(MetadataHeaderPrefix):], true + } + return "", false +} + +func defaultOutgoingHeaderMatcher(key string) (string, bool) { + return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true +} + +func defaultOutgoingTrailerMatcher(key string) (string, bool) { + return fmt.Sprintf("%s%s", MetadataTrailerPrefix, key), true +} + +// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. +// +// This matcher will be called with each header in http.Request. If matcher returns true, that header will be +// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return the modified header. +func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + for _, header := range fn.matchedMalformedHeaders() { + grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header) + } + + return func(mux *ServeMux) { + mux.incomingHeaderMatcher = fn + } +} + +// matchedMalformedHeaders returns the malformed headers that would be forwarded to gRPC server. +func (fn HeaderMatcherFunc) matchedMalformedHeaders() []string { + if fn == nil { + return nil + } + headers := make([]string, 0) + for header := range malformedHTTPHeaders { + out, accept := fn(header) + if accept && isMalformedHTTPHeader(out) { + headers = append(headers, out) + } + } + return headers +} + +// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. +// +// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be +// passed to http response returned from gateway. To transform the header before passing to response, +// matcher should return the modified header. +func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.outgoingHeaderMatcher = fn + } +} + +// WithOutgoingTrailerMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. +// +// This matcher will be called with each header in response trailer metadata. If matcher returns true, that header will be +// passed to http response returned from gateway. To transform the header before passing to response, +// matcher should return the modified header. +func WithOutgoingTrailerMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.outgoingTrailerMatcher = fn + } +} + +// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used by services that need to read from http.Request and modify gRPC context. A common use case +// is reading token from cookie and adding it in gRPC context. +func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator) + } +} + +// WithErrorHandler returns a ServeMuxOption for configuring a custom error handler. +// +// This can be used to configure a custom error response. +func WithErrorHandler(fn ErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.errorHandler = fn + } +} + +// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream +// error handler, which allows for customizing the error trailer for server-streaming +// calls. +// +// For stream errors that occur before any response has been written, the mux's +// ErrorHandler will be invoked. However, once data has been written, the errors must +// be handled differently: they must be included in the response body. The response body's +// final message will include the error details returned by the stream error handler. +func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.streamErrorHandler = fn + } +} + +// WithRoutingErrorHandler returns a ServeMuxOption for configuring a custom error handler to handle http routing errors. +// +// Method called for errors which can happen before gRPC route selected or executed. +// The following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest +func WithRoutingErrorHandler(fn RoutingErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.routingErrorHandler = fn + } +} + +// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback. +func WithDisablePathLengthFallback() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.disablePathLengthFallback = true + } +} + +// WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath. +// When called the handler will forward the request to the upstream grpc service health check (defined in the +// gRPC Health Checking Protocol). +// +// See here https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/health_check/ for more information on how +// to setup the protocol in the grpc server. +// +// If you define a service as query parameter, this will also be forwarded as service in the HealthCheckRequest. +func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpointPath string) ServeMuxOption { + return func(s *ServeMux) { + // error can be ignored since pattern is definitely valid + _ = s.HandlePath( + http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string, + ) { + _, outboundMarshaler := MarshalerForRequest(s, r) + + resp, err := healthCheckClient.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{ + Service: r.URL.Query().Get("service"), + }) + if err != nil { + s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) + return + } + + w.Header().Set("Content-Type", "application/json") + + if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING { + switch resp.GetStatus() { + case grpc_health_v1.HealthCheckResponse_NOT_SERVING, grpc_health_v1.HealthCheckResponse_UNKNOWN: + err = status.Error(codes.Unavailable, resp.String()) + case grpc_health_v1.HealthCheckResponse_SERVICE_UNKNOWN: + err = status.Error(codes.NotFound, resp.String()) + } + + s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) + return + } + + _ = outboundMarshaler.NewEncoder(w).Encode(resp) + }) + } +} + +// WithHealthzEndpoint returns a ServeMuxOption that will add a /healthz endpoint to the created ServeMux. +// +// See WithHealthEndpointAt for the general implementation. +func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMuxOption { + return WithHealthEndpointAt(healthCheckClient, "/healthz") +} + +// NewServeMux returns a new ServeMux whose internal mapping is empty. +func NewServeMux(opts ...ServeMuxOption) *ServeMux { + serveMux := &ServeMux{ + handlers: make(map[string][]handler), + forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), + marshalers: makeMarshalerMIMERegistry(), + errorHandler: DefaultHTTPErrorHandler, + streamErrorHandler: DefaultStreamErrorHandler, + routingErrorHandler: DefaultRoutingErrorHandler, + unescapingMode: UnescapingModeDefault, + } + + for _, opt := range opts { + opt(serveMux) + } + + if serveMux.incomingHeaderMatcher == nil { + serveMux.incomingHeaderMatcher = DefaultHeaderMatcher + } + if serveMux.outgoingHeaderMatcher == nil { + serveMux.outgoingHeaderMatcher = defaultOutgoingHeaderMatcher + } + if serveMux.outgoingTrailerMatcher == nil { + serveMux.outgoingTrailerMatcher = defaultOutgoingTrailerMatcher + } + + return serveMux +} + +// Handle associates "h" to the pair of HTTP method and path pattern. +func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { + s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...) +} + +// HandlePath allows users to configure custom path handlers. +// refer: https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/inject_router/ +func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) error { + compiler, err := httprule.Parse(pathPattern) + if err != nil { + return fmt.Errorf("parsing path pattern: %w", err) + } + tp := compiler.Compile() + pattern, err := NewPattern(tp.Version, tp.OpCodes, tp.Pool, tp.Verb) + if err != nil { + return fmt.Errorf("creating new pattern: %w", err) + } + s.Handle(meth, pattern, h) + return nil +} + +// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.URL.Path. +func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + path := r.URL.Path + if !strings.HasPrefix(path, "/") { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusBadRequest) + return + } + + // TODO(v3): remove UnescapingModeLegacy + if s.unescapingMode != UnescapingModeLegacy && r.URL.RawPath != "" { + path = r.URL.RawPath + } + + if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { + if err := r.ParseForm(); err != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) + return + } + r.Method = strings.ToUpper(override) + } + + var pathComponents []string + // since in UnescapeModeLegacy, the URL will already have been fully unescaped, if we also split on "%2F" + // in this escaping mode we would be double unescaping but in UnescapingModeAllCharacters, we still do as the + // path is the RawPath (i.e. unescaped). That does mean that the behavior of this function will change its default + // behavior when the UnescapingModeDefault gets changed from UnescapingModeLegacy to UnescapingModeAllExceptReserved + if s.unescapingMode == UnescapingModeAllCharacters { + pathComponents = encodedPathSplitter.Split(path[1:], -1) + } else { + pathComponents = strings.Split(path[1:], "/") + } + + lastPathComponent := pathComponents[len(pathComponents)-1] + + for _, h := range s.handlers[r.Method] { + // If the pattern has a verb, explicitly look for a suffix in the last + // component that matches a colon plus the verb. This allows us to + // handle some cases that otherwise can't be correctly handled by the + // former LastIndex case, such as when the verb literal itself contains + // a colon. This should work for all cases that have run through the + // parser because we know what verb we're looking for, however, there + // are still some cases that the parser itself cannot disambiguate. See + // the comment there if interested. + + var verb string + patVerb := h.pat.Verb() + + idx := -1 + if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) { + idx = len(lastPathComponent) - len(patVerb) - 1 + } + if idx == 0 { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound) + return + } + + comps := make([]string, len(pathComponents)) + copy(comps, pathComponents) + + if idx > 0 { + comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:] + } + + pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode) + if err != nil { + var mse MalformedSequenceError + if ok := errors.As(err, &mse); ok { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{ + HTTPStatus: http.StatusBadRequest, + Err: mse, + }) + } + continue + } + h.h(w, r, pathParams) + return + } + + // if no handler has found for the request, lookup for other methods + // to handle POST -> GET fallback if the request is subject to path + // length fallback. + // Note we are not eagerly checking the request here as we want to return the + // right HTTP status code, and we need to process the fallback candidates in + // order to do that. + for m, handlers := range s.handlers { + if m == r.Method { + continue + } + for _, h := range handlers { + var verb string + patVerb := h.pat.Verb() + + idx := -1 + if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) { + idx = len(lastPathComponent) - len(patVerb) - 1 + } + + comps := make([]string, len(pathComponents)) + copy(comps, pathComponents) + + if idx > 0 { + comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:] + } + + pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode) + if err != nil { + var mse MalformedSequenceError + if ok := errors.As(err, &mse); ok { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{ + HTTPStatus: http.StatusBadRequest, + Err: mse, + }) + } + continue + } + + // X-HTTP-Method-Override is optional. Always allow fallback to POST. + // Also, only consider POST -> GET fallbacks, and avoid falling back to + // potentially dangerous operations like DELETE. + if s.isPathLengthFallback(r) && m == http.MethodGet { + if err := r.ParseForm(); err != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) + return + } + h.h(w, r, pathParams) + return + } + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusMethodNotAllowed) + return + } + } + + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound) +} + +// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. +func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error { + return s.forwardResponseOptions +} + +func (s *ServeMux) isPathLengthFallback(r *http.Request) bool { + return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded" +} + +type handler struct { + pat Pattern + h HandlerFunc +} diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go new file mode 100644 index 0000000000..e54507145b --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go @@ -0,0 +1,381 @@ +package runtime + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc/grpclog" +) + +var ( + // ErrNotMatch indicates that the given HTTP request path does not match to the pattern. + ErrNotMatch = errors.New("not match to the path pattern") + // ErrInvalidPattern indicates that the given definition of Pattern is not valid. + ErrInvalidPattern = errors.New("invalid pattern") +) + +type MalformedSequenceError string + +func (e MalformedSequenceError) Error() string { + return "malformed path escape " + strconv.Quote(string(e)) +} + +type op struct { + code utilities.OpCode + operand int +} + +// Pattern is a template pattern of http request paths defined in +// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto +type Pattern struct { + // ops is a list of operations + ops []op + // pool is a constant pool indexed by the operands or vars. + pool []string + // vars is a list of variables names to be bound by this pattern + vars []string + // stacksize is the max depth of the stack + stacksize int + // tailLen is the length of the fixed-size segments after a deep wildcard + tailLen int + // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part. + verb string +} + +// NewPattern returns a new Pattern from the given definition values. +// "ops" is a sequence of op codes. "pool" is a constant pool. +// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part. +// "version" must be 1 for now. +// It returns an error if the given definition is invalid. +func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) { + if version != 1 { + grpclog.Errorf("unsupported version: %d", version) + return Pattern{}, ErrInvalidPattern + } + + l := len(ops) + if l%2 != 0 { + grpclog.Errorf("odd number of ops codes: %d", l) + return Pattern{}, ErrInvalidPattern + } + + var ( + typedOps []op + stack, maxstack int + tailLen int + pushMSeen bool + vars []string + ) + for i := 0; i < l; i += 2 { + op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]} + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpPushM: + if pushMSeen { + grpclog.Error("pushM appears twice") + return Pattern{}, ErrInvalidPattern + } + pushMSeen = true + stack++ + case utilities.OpLitPush: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Errorf("negative literal index: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpConcatN: + if op.operand <= 0 { + grpclog.Errorf("negative concat size: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + stack -= op.operand + if stack < 0 { + grpclog.Error("stack underflow") + return Pattern{}, ErrInvalidPattern + } + stack++ + case utilities.OpCapture: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Errorf("variable name index out of bound: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + v := pool[op.operand] + op.operand = len(vars) + vars = append(vars, v) + stack-- + if stack < 0 { + grpclog.Error("stack underflow") + return Pattern{}, ErrInvalidPattern + } + default: + grpclog.Errorf("invalid opcode: %d", op.code) + return Pattern{}, ErrInvalidPattern + } + + if maxstack < stack { + maxstack = stack + } + typedOps = append(typedOps, op) + } + return Pattern{ + ops: typedOps, + pool: pool, + vars: vars, + stacksize: maxstack, + tailLen: tailLen, + verb: verb, + }, nil +} + +// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization. +func MustPattern(p Pattern, err error) Pattern { + if err != nil { + grpclog.Fatalf("Pattern initialization failed: %v", err) + } + return p +} + +// MatchAndEscape examines components to determine if they match to a Pattern. +// MatchAndEscape will return an error if no Patterns matched or if a pattern +// matched but contained malformed escape sequences. If successful, the function +// returns a mapping from field paths to their captured values. +func (p Pattern) MatchAndEscape(components []string, verb string, unescapingMode UnescapingMode) (map[string]string, error) { + if p.verb != verb { + if p.verb != "" { + return nil, ErrNotMatch + } + if len(components) == 0 { + components = []string{":" + verb} + } else { + components = append([]string{}, components...) + components[len(components)-1] += ":" + verb + } + } + + var pos int + stack := make([]string, 0, p.stacksize) + captured := make([]string, len(p.vars)) + l := len(components) + for _, op := range p.ops { + var err error + + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush, utilities.OpLitPush: + if pos >= l { + return nil, ErrNotMatch + } + c := components[pos] + if op.code == utilities.OpLitPush { + if lit := p.pool[op.operand]; c != lit { + return nil, ErrNotMatch + } + } else if op.code == utilities.OpPush { + if c, err = unescape(c, unescapingMode, false); err != nil { + return nil, err + } + } + stack = append(stack, c) + pos++ + case utilities.OpPushM: + end := len(components) + if end < pos+p.tailLen { + return nil, ErrNotMatch + } + end -= p.tailLen + c := strings.Join(components[pos:end], "/") + if c, err = unescape(c, unescapingMode, true); err != nil { + return nil, err + } + stack = append(stack, c) + pos = end + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + captured[op.operand] = stack[n] + stack = stack[:n] + } + } + if pos < l { + return nil, ErrNotMatch + } + bindings := make(map[string]string) + for i, val := range captured { + bindings[p.vars[i]] = val + } + return bindings, nil +} + +// MatchAndEscape examines components to determine if they match to a Pattern. +// It will never perform per-component unescaping (see: UnescapingModeLegacy). +// MatchAndEscape will return an error if no Patterns matched. If successful, +// the function returns a mapping from field paths to their captured values. +// +// Deprecated: Use MatchAndEscape. +func (p Pattern) Match(components []string, verb string) (map[string]string, error) { + return p.MatchAndEscape(components, verb, UnescapingModeDefault) +} + +// Verb returns the verb part of the Pattern. +func (p Pattern) Verb() string { return p.verb } + +func (p Pattern) String() string { + var stack []string + for _, op := range p.ops { + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + stack = append(stack, "*") + case utilities.OpLitPush: + stack = append(stack, p.pool[op.operand]) + case utilities.OpPushM: + stack = append(stack, "**") + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n]) + } + } + segs := strings.Join(stack, "/") + if p.verb != "" { + return fmt.Sprintf("/%s:%s", segs, p.verb) + } + return "/" + segs +} + +/* + * The following code is adopted and modified from Go's standard library + * and carries the attached license. + * + * Copyright 2009 The Go Authors. All rights reserved. + * Use of this source code is governed by a BSD-style + * license that can be found in the LICENSE file. + */ + +// ishex returns whether or not the given byte is a valid hex character +func ishex(c byte) bool { + switch { + case '0' <= c && c <= '9': + return true + case 'a' <= c && c <= 'f': + return true + case 'A' <= c && c <= 'F': + return true + } + return false +} + +func isRFC6570Reserved(c byte) bool { + switch c { + case '!', '#', '$', '&', '\'', '(', ')', '*', + '+', ',', '/', ':', ';', '=', '?', '@', '[', ']': + return true + default: + return false + } +} + +// unhex converts a hex point to the bit representation +func unhex(c byte) byte { + switch { + case '0' <= c && c <= '9': + return c - '0' + case 'a' <= c && c <= 'f': + return c - 'a' + 10 + case 'A' <= c && c <= 'F': + return c - 'A' + 10 + } + return 0 +} + +// shouldUnescapeWithMode returns true if the character is escapable with the +// given mode +func shouldUnescapeWithMode(c byte, mode UnescapingMode) bool { + switch mode { + case UnescapingModeAllExceptReserved: + if isRFC6570Reserved(c) { + return false + } + case UnescapingModeAllExceptSlash: + if c == '/' { + return false + } + case UnescapingModeAllCharacters: + return true + } + return true +} + +// unescape unescapes a path string using the provided mode +func unescape(s string, mode UnescapingMode, multisegment bool) (string, error) { + // TODO(v3): remove UnescapingModeLegacy + if mode == UnescapingModeLegacy { + return s, nil + } + + if !multisegment { + mode = UnescapingModeAllCharacters + } + + // Count %, check that they're well-formed. + n := 0 + for i := 0; i < len(s); { + if s[i] == '%' { + n++ + if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) { + s = s[i:] + if len(s) > 3 { + s = s[:3] + } + + return "", MalformedSequenceError(s) + } + i += 3 + } else { + i++ + } + } + + if n == 0 { + return s, nil + } + + var t strings.Builder + t.Grow(len(s)) + for i := 0; i < len(s); i++ { + switch s[i] { + case '%': + c := unhex(s[i+1])<<4 | unhex(s[i+2]) + if shouldUnescapeWithMode(c, mode) { + t.WriteByte(c) + i += 2 + continue + } + fallthrough + default: + t.WriteByte(s[i]) + } + } + + return t.String(), nil +} diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go new file mode 100644 index 0000000000..d549407f20 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go @@ -0,0 +1,80 @@ +package runtime + +import ( + "google.golang.org/protobuf/proto" +) + +// StringP returns a pointer to a string whose pointee is same as the given string value. +func StringP(val string) (*string, error) { + return proto.String(val), nil +} + +// BoolP parses the given string representation of a boolean value, +// and returns a pointer to a bool whose value is same as the parsed value. +func BoolP(val string) (*bool, error) { + b, err := Bool(val) + if err != nil { + return nil, err + } + return proto.Bool(b), nil +} + +// Float64P parses the given string representation of a floating point number, +// and returns a pointer to a float64 whose value is same as the parsed number. +func Float64P(val string) (*float64, error) { + f, err := Float64(val) + if err != nil { + return nil, err + } + return proto.Float64(f), nil +} + +// Float32P parses the given string representation of a floating point number, +// and returns a pointer to a float32 whose value is same as the parsed number. +func Float32P(val string) (*float32, error) { + f, err := Float32(val) + if err != nil { + return nil, err + } + return proto.Float32(f), nil +} + +// Int64P parses the given string representation of an integer +// and returns a pointer to a int64 whose value is same as the parsed integer. +func Int64P(val string) (*int64, error) { + i, err := Int64(val) + if err != nil { + return nil, err + } + return proto.Int64(i), nil +} + +// Int32P parses the given string representation of an integer +// and returns a pointer to a int32 whose value is same as the parsed integer. +func Int32P(val string) (*int32, error) { + i, err := Int32(val) + if err != nil { + return nil, err + } + return proto.Int32(i), err +} + +// Uint64P parses the given string representation of an integer +// and returns a pointer to a uint64 whose value is same as the parsed integer. +func Uint64P(val string) (*uint64, error) { + i, err := Uint64(val) + if err != nil { + return nil, err + } + return proto.Uint64(i), err +} + +// Uint32P parses the given string representation of an integer +// and returns a pointer to a uint32 whose value is same as the parsed integer. +func Uint32P(val string) (*uint32, error) { + i, err := Uint32(val) + if err != nil { + return nil, err + } + return proto.Uint32(i), err +} diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go new file mode 100644 index 0000000000..fe634174b8 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go @@ -0,0 +1,372 @@ +package runtime + +import ( + "errors" + "fmt" + "net/url" + "regexp" + "strconv" + "strings" + "time" + + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/known/durationpb" + field_mask "google.golang.org/protobuf/types/known/fieldmaskpb" + "google.golang.org/protobuf/types/known/structpb" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`) + +var currentQueryParser QueryParameterParser = &DefaultQueryParser{} + +// QueryParameterParser defines interface for all query parameter parsers +type QueryParameterParser interface { + Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error +} + +// PopulateQueryParameters parses query parameters +// into "msg" using current query parser +func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + return currentQueryParser.Parse(msg, values, filter) +} + +// DefaultQueryParser is a QueryParameterParser which implements the default +// query parameters parsing behavior. +// +// See https://github.com/grpc-ecosystem/grpc-gateway/issues/2632 for more context. +type DefaultQueryParser struct{} + +// Parse populates "values" into "msg". +// A value is ignored if its key starts with one of the elements in "filter". +func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + for key, values := range values { + if match := valuesKeyRegexp.FindStringSubmatch(key); len(match) == 3 { + key = match[1] + values = append([]string{match[2]}, values...) + } + + msgValue := msg.ProtoReflect() + fieldPath := normalizeFieldPath(msgValue, strings.Split(key, ".")) + if filter.HasCommonPrefix(fieldPath) { + continue + } + if err := populateFieldValueFromPath(msgValue, fieldPath, values); err != nil { + return err + } + } + return nil +} + +// PopulateFieldFromPath sets a value in a nested Protobuf structure. +func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error { + fieldPath := strings.Split(fieldPathString, ".") + return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value}) +} + +func normalizeFieldPath(msgValue protoreflect.Message, fieldPath []string) []string { + newFieldPath := make([]string, 0, len(fieldPath)) + for i, fieldName := range fieldPath { + fields := msgValue.Descriptor().Fields() + fieldDesc := fields.ByTextName(fieldName) + if fieldDesc == nil { + fieldDesc = fields.ByJSONName(fieldName) + } + if fieldDesc == nil { + // return initial field path values if no matching message field was found + return fieldPath + } + + newFieldPath = append(newFieldPath, string(fieldDesc.Name())) + + // If this is the last element, we're done + if i == len(fieldPath)-1 { + break + } + + // Only singular message fields are allowed + if fieldDesc.Message() == nil || fieldDesc.Cardinality() == protoreflect.Repeated { + return fieldPath + } + + // Get the nested message + msgValue = msgValue.Get(fieldDesc).Message() + } + + return newFieldPath +} + +func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error { + if len(fieldPath) < 1 { + return errors.New("no field path") + } + if len(values) < 1 { + return errors.New("no value provided") + } + + var fieldDescriptor protoreflect.FieldDescriptor + for i, fieldName := range fieldPath { + fields := msgValue.Descriptor().Fields() + + // Get field by name + fieldDescriptor = fields.ByName(protoreflect.Name(fieldName)) + if fieldDescriptor == nil { + fieldDescriptor = fields.ByJSONName(fieldName) + if fieldDescriptor == nil { + // We're not returning an error here because this could just be + // an extra query parameter that isn't part of the request. + grpclog.Infof("field not found in %q: %q", msgValue.Descriptor().FullName(), strings.Join(fieldPath, ".")) + return nil + } + } + + // If this is the last element, we're done + if i == len(fieldPath)-1 { + break + } + + // Only singular message fields are allowed + if fieldDescriptor.Message() == nil || fieldDescriptor.Cardinality() == protoreflect.Repeated { + return fmt.Errorf("invalid path: %q is not a message", fieldName) + } + + // Get the nested message + msgValue = msgValue.Mutable(fieldDescriptor).Message() + } + + // Check if oneof already set + if of := fieldDescriptor.ContainingOneof(); of != nil { + if f := msgValue.WhichOneof(of); f != nil { + return fmt.Errorf("field already set for oneof %q", of.FullName().Name()) + } + } + + switch { + case fieldDescriptor.IsList(): + return populateRepeatedField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).List(), values) + case fieldDescriptor.IsMap(): + return populateMapField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).Map(), values) + } + + if len(values) > 1 { + return fmt.Errorf("too many values for field %q: %s", fieldDescriptor.FullName().Name(), strings.Join(values, ", ")) + } + + return populateField(fieldDescriptor, msgValue, values[0]) +} + +func populateField(fieldDescriptor protoreflect.FieldDescriptor, msgValue protoreflect.Message, value string) error { + v, err := parseField(fieldDescriptor, value) + if err != nil { + return fmt.Errorf("parsing field %q: %w", fieldDescriptor.FullName().Name(), err) + } + + msgValue.Set(fieldDescriptor, v) + return nil +} + +func populateRepeatedField(fieldDescriptor protoreflect.FieldDescriptor, list protoreflect.List, values []string) error { + for _, value := range values { + v, err := parseField(fieldDescriptor, value) + if err != nil { + return fmt.Errorf("parsing list %q: %w", fieldDescriptor.FullName().Name(), err) + } + list.Append(v) + } + + return nil +} + +func populateMapField(fieldDescriptor protoreflect.FieldDescriptor, mp protoreflect.Map, values []string) error { + if len(values) != 2 { + return fmt.Errorf("more than one value provided for key %q in map %q", values[0], fieldDescriptor.FullName()) + } + + key, err := parseField(fieldDescriptor.MapKey(), values[0]) + if err != nil { + return fmt.Errorf("parsing map key %q: %w", fieldDescriptor.FullName().Name(), err) + } + + value, err := parseField(fieldDescriptor.MapValue(), values[1]) + if err != nil { + return fmt.Errorf("parsing map value %q: %w", fieldDescriptor.FullName().Name(), err) + } + + mp.Set(key.MapKey(), value) + + return nil +} + +func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (protoreflect.Value, error) { + switch fieldDescriptor.Kind() { + case protoreflect.BoolKind: + v, err := strconv.ParseBool(value) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfBool(v), nil + case protoreflect.EnumKind: + enum, err := protoregistry.GlobalTypes.FindEnumByName(fieldDescriptor.Enum().FullName()) + if err != nil { + if errors.Is(err, protoregistry.NotFound) { + return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName()) + } + return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err) + } + // Look for enum by name + v := enum.Descriptor().Values().ByName(protoreflect.Name(value)) + if v == nil { + i, err := strconv.Atoi(value) + if err != nil { + return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value) + } + // Look for enum by number + if v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i)); v == nil { + return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value) + } + } + return protoreflect.ValueOfEnum(v.Number()), nil + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfInt32(int32(v)), nil + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfInt64(v), nil + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + v, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfUint32(uint32(v)), nil + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfUint64(v), nil + case protoreflect.FloatKind: + v, err := strconv.ParseFloat(value, 32) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfFloat32(float32(v)), nil + case protoreflect.DoubleKind: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfFloat64(v), nil + case protoreflect.StringKind: + return protoreflect.ValueOfString(value), nil + case protoreflect.BytesKind: + v, err := Bytes(value) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfBytes(v), nil + case protoreflect.MessageKind, protoreflect.GroupKind: + return parseMessage(fieldDescriptor.Message(), value) + default: + panic(fmt.Sprintf("unknown field kind: %v", fieldDescriptor.Kind())) + } +} + +func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (protoreflect.Value, error) { + var msg proto.Message + switch msgDescriptor.FullName() { + case "google.protobuf.Timestamp": + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return protoreflect.Value{}, err + } + msg = timestamppb.New(t) + case "google.protobuf.Duration": + d, err := time.ParseDuration(value) + if err != nil { + return protoreflect.Value{}, err + } + msg = durationpb.New(d) + case "google.protobuf.DoubleValue": + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Double(v) + case "google.protobuf.FloatValue": + v, err := strconv.ParseFloat(value, 32) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Float(float32(v)) + case "google.protobuf.Int64Value": + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Int64(v) + case "google.protobuf.Int32Value": + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Int32(int32(v)) + case "google.protobuf.UInt64Value": + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.UInt64(v) + case "google.protobuf.UInt32Value": + v, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.UInt32(uint32(v)) + case "google.protobuf.BoolValue": + v, err := strconv.ParseBool(value) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Bool(v) + case "google.protobuf.StringValue": + msg = wrapperspb.String(value) + case "google.protobuf.BytesValue": + v, err := Bytes(value) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Bytes(v) + case "google.protobuf.FieldMask": + fm := &field_mask.FieldMask{} + fm.Paths = append(fm.Paths, strings.Split(value, ",")...) + msg = fm + case "google.protobuf.Value": + var v structpb.Value + if err := protojson.Unmarshal([]byte(value), &v); err != nil { + return protoreflect.Value{}, err + } + msg = &v + case "google.protobuf.Struct": + var v structpb.Struct + if err := protojson.Unmarshal([]byte(value), &v); err != nil { + return protoreflect.Value{}, err + } + msg = &v + default: + return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName())) + } + + return protoreflect.ValueOfMessage(msg.ProtoReflect()), nil +} diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel new file mode 100644 index 0000000000..b894094657 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "utilities", + srcs = [ + "doc.go", + "pattern.go", + "readerfactory.go", + "string_array_flag.go", + "trie.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities", +) + +go_test( + name = "utilities_test", + size = "small", + srcs = [ + "string_array_flag_test.go", + "trie_test.go", + ], + deps = [":utilities"], +) + +alias( + name = "go_default_library", + actual = ":utilities", + visibility = ["//visibility:public"], +) diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go new file mode 100644 index 0000000000..cf79a4d588 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go @@ -0,0 +1,2 @@ +// Package utilities provides members for internal use in grpc-gateway. +package utilities diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go new file mode 100644 index 0000000000..dfe7de4864 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go @@ -0,0 +1,22 @@ +package utilities + +// An OpCode is a opcode of compiled path patterns. +type OpCode int + +// These constants are the valid values of OpCode. +const ( + // OpNop does nothing + OpNop = OpCode(iota) + // OpPush pushes a component to stack + OpPush + // OpLitPush pushes a component to stack if it matches to the literal + OpLitPush + // OpPushM concatenates the remaining components and pushes it to stack + OpPushM + // OpConcatN pops N items from stack, concatenates them and pushes it back to stack + OpConcatN + // OpCapture pops an item and binds it to the variable + OpCapture + // OpEnd is the least positive invalid opcode. + OpEnd +) diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go new file mode 100644 index 0000000000..01d26edae3 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go @@ -0,0 +1,19 @@ +package utilities + +import ( + "bytes" + "io" +) + +// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins +// at the start of the stream +func IOReaderFactory(r io.Reader) (func() io.Reader, error) { + b, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + return func() io.Reader { + return bytes.NewReader(b) + }, nil +} diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go new file mode 100644 index 0000000000..d224ab776c --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go @@ -0,0 +1,33 @@ +package utilities + +import ( + "flag" + "strings" +) + +// flagInterface is an cut down interface to `flag` +type flagInterface interface { + Var(value flag.Value, name string, usage string) +} + +// StringArrayFlag defines a flag with the specified name and usage string. +// The return value is the address of a `StringArrayFlags` variable that stores the repeated values of the flag. +func StringArrayFlag(f flagInterface, name string, usage string) *StringArrayFlags { + value := &StringArrayFlags{} + f.Var(value, name, usage) + return value +} + +// StringArrayFlags is a wrapper of `[]string` to provider an interface for `flag.Var` +type StringArrayFlags []string + +// String returns a string representation of `StringArrayFlags` +func (i *StringArrayFlags) String() string { + return strings.Join(*i, ",") +} + +// Set appends a value to `StringArrayFlags` +func (i *StringArrayFlags) Set(value string) error { + *i = append(*i, value) + return nil +} diff --git a/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go new file mode 100644 index 0000000000..dd99b0ed25 --- /dev/null +++ b/hack/tools/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go @@ -0,0 +1,174 @@ +package utilities + +import ( + "sort" +) + +// DoubleArray is a Double Array implementation of trie on sequences of strings. +type DoubleArray struct { + // Encoding keeps an encoding from string to int + Encoding map[string]int + // Base is the base array of Double Array + Base []int + // Check is the check array of Double Array + Check []int +} + +// NewDoubleArray builds a DoubleArray from a set of sequences of strings. +func NewDoubleArray(seqs [][]string) *DoubleArray { + da := &DoubleArray{Encoding: make(map[string]int)} + if len(seqs) == 0 { + return da + } + + encoded := registerTokens(da, seqs) + sort.Sort(byLex(encoded)) + + root := node{row: -1, col: -1, left: 0, right: len(encoded)} + addSeqs(da, encoded, 0, root) + + for i := len(da.Base); i > 0; i-- { + if da.Check[i-1] != 0 { + da.Base = da.Base[:i] + da.Check = da.Check[:i] + break + } + } + return da +} + +func registerTokens(da *DoubleArray, seqs [][]string) [][]int { + var result [][]int + for _, seq := range seqs { + encoded := make([]int, 0, len(seq)) + for _, token := range seq { + if _, ok := da.Encoding[token]; !ok { + da.Encoding[token] = len(da.Encoding) + } + encoded = append(encoded, da.Encoding[token]) + } + result = append(result, encoded) + } + for i := range result { + result[i] = append(result[i], len(da.Encoding)) + } + return result +} + +type node struct { + row, col int + left, right int +} + +func (n node) value(seqs [][]int) int { + return seqs[n.row][n.col] +} + +func (n node) children(seqs [][]int) []*node { + var result []*node + lastVal := int(-1) + last := new(node) + for i := n.left; i < n.right; i++ { + if lastVal == seqs[i][n.col+1] { + continue + } + last.right = i + last = &node{ + row: i, + col: n.col + 1, + left: i, + } + result = append(result, last) + } + last.right = n.right + return result +} + +func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) { + ensureSize(da, pos) + + children := n.children(seqs) + var i int + for i = 1; ; i++ { + ok := func() bool { + for _, child := range children { + code := child.value(seqs) + j := i + code + ensureSize(da, j) + if da.Check[j] != 0 { + return false + } + } + return true + }() + if ok { + break + } + } + da.Base[pos] = i + for _, child := range children { + code := child.value(seqs) + j := i + code + da.Check[j] = pos + 1 + } + terminator := len(da.Encoding) + for _, child := range children { + code := child.value(seqs) + if code == terminator { + continue + } + j := i + code + addSeqs(da, seqs, j, *child) + } +} + +func ensureSize(da *DoubleArray, i int) { + for i >= len(da.Base) { + da.Base = append(da.Base, make([]int, len(da.Base)+1)...) + da.Check = append(da.Check, make([]int, len(da.Check)+1)...) + } +} + +type byLex [][]int + +func (l byLex) Len() int { return len(l) } +func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l byLex) Less(i, j int) bool { + si := l[i] + sj := l[j] + var k int + for k = 0; k < len(si) && k < len(sj); k++ { + if si[k] < sj[k] { + return true + } + if si[k] > sj[k] { + return false + } + } + return k < len(sj) +} + +// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence. +func (da *DoubleArray) HasCommonPrefix(seq []string) bool { + if len(da.Base) == 0 { + return false + } + + var i int + for _, t := range seq { + code, ok := da.Encoding[t] + if !ok { + break + } + j := da.Base[i] + code + if len(da.Check) <= j || da.Check[j] != i+1 { + break + } + i = j + } + j := da.Base[i] + len(da.Encoding) + if len(da.Check) <= j || da.Check[j] != i+1 { + return false + } + return true +} diff --git a/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/.gitignore b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/CHANGELOG.md b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/CHANGELOG.md new file mode 100644 index 0000000000..556f1a67b1 --- /dev/null +++ b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/CHANGELOG.md @@ -0,0 +1,27 @@ +# UNRELEASED + +# 2.0.0 (December 15th, 2022) + +* Update API to use generics [[GH-43](https://github.com/hashicorp/go-immutable-radix/pull/43)) + +# 1.3.0 (September 17th, 2020) + +FEATURES + +* Add reverse tree traversal [[GH-30](https://github.com/hashicorp/go-immutable-radix/pull/30)] + +# 1.2.0 (March 18th, 2020) + +FEATURES + +* Adds a `Clone` method to `Txn` allowing transactions to be split either into two independently mutable trees. [[GH-26](https://github.com/hashicorp/go-immutable-radix/pull/26)] + +# 1.1.0 (May 22nd, 2019) + +FEATURES + +* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)] + +# 1.0.0 (August 30th, 2018) + +* go mod adopted diff --git a/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/LICENSE b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/LICENSE new file mode 100644 index 0000000000..f4f97ee585 --- /dev/null +++ b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/LICENSE @@ -0,0 +1,365 @@ +Copyright (c) 2015 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/README.md b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/README.md new file mode 100644 index 0000000000..e17ccf4d11 --- /dev/null +++ b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/README.md @@ -0,0 +1,73 @@ +go-immutable-radix [![Run CI Tests](https://github.com/hashicorp/go-immutable-radix/actions/workflows/ci.yaml/badge.svg)](https://github.com/hashicorp/go-immutable-radix/actions/workflows/ci.yaml) +========= + +Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree). +The package only provides a single `Tree` implementation, optimized for sparse nodes. + +As a radix tree, it provides the following: + * O(k) operations. In many cases, this can be faster than a hash table since + the hash function is an O(k) operation, and hash tables have very poor cache locality. + * Minimum / Maximum value lookups + * Ordered iteration + +A tree supports using a transaction to batch multiple updates (insert, delete) +in a more efficient manner than performing each operation one at a time. + +For a mutable variant, see [go-radix](https://github.com/armon/go-radix). + +V2 +== + +The v2 of go-immutable-radix introduces generics to improve compile-time type +safety for users of the package. The module name for v2 is +`github.com/hashicorp/go-immutable-radix/v2`. + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix). + +Example +======= + +Below is a simple example of usage + +```go +// Create a tree +r := iradix.New[int]() +r, _, _ = r.Insert([]byte("foo"), 1) +r, _, _ = r.Insert([]byte("bar"), 2) +r, _, _ = r.Insert([]byte("foobar"), 2) + +// Find the longest prefix match +m, _, _ := r.Root().LongestPrefix([]byte("foozip")) +if string(m) != "foo" { + panic("should be foo") +} +``` + +Here is an example of performing a range scan of the keys. + +```go +// Create a tree +r := iradix.New[int]() +r, _, _ = r.Insert([]byte("001"), 1) +r, _, _ = r.Insert([]byte("002"), 2) +r, _, _ = r.Insert([]byte("005"), 5) +r, _, _ = r.Insert([]byte("010"), 10) +r, _, _ = r.Insert([]byte("100"), 10) + +// Range scan over the keys that sort lexicographically between [003, 050) +it := r.Root().Iterator() +it.SeekLowerBound([]byte("003")) +for key, _, ok := it.Next(); ok; key, _, ok = it.Next() { + if string(key) >= "050" { + break + } + fmt.Println(string(key)) +} +// Output: +// 005 +// 010 +``` + diff --git a/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/edges.go b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/edges.go new file mode 100644 index 0000000000..2e452f3e6f --- /dev/null +++ b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/edges.go @@ -0,0 +1,21 @@ +package iradix + +import "sort" + +type edges[T any] []edge[T] + +func (e edges[T]) Len() int { + return len(e) +} + +func (e edges[T]) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges[T]) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges[T]) Sort() { + sort.Sort(e) +} diff --git a/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/iradix.go b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/iradix.go new file mode 100644 index 0000000000..8774020bcc --- /dev/null +++ b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/iradix.go @@ -0,0 +1,679 @@ +package iradix + +import ( + "bytes" + "strings" + + "github.com/hashicorp/golang-lru/v2/simplelru" +) + +const ( + // defaultModifiedCache is the default size of the modified node + // cache used per transaction. This is used to cache the updates + // to the nodes near the root, while the leaves do not need to be + // cached. This is important for very large transactions to prevent + // the modified cache from growing to be enormous. This is also used + // to set the max size of the mutation notify maps since those should + // also be bounded in a similar way. + defaultModifiedCache = 8192 +) + +// Tree implements an immutable radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over a standard +// hash map is prefix-based lookups and ordered iteration. The immutability +// means that it is safe to concurrently read from a Tree without any +// coordination. +type Tree[T any] struct { + root *Node[T] + size int +} + +// New returns an empty Tree +func New[T any]() *Tree[T] { + t := &Tree[T]{ + root: &Node[T]{ + mutateCh: make(chan struct{}), + }, + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree[T]) Len() int { + return t.size +} + +// Txn is a transaction on the tree. This transaction is applied +// atomically and returns a new tree when committed. A transaction +// is not thread safe, and should only be used by a single goroutine. +type Txn[T any] struct { + // root is the modified root for the transaction. + root *Node[T] + + // snap is a snapshot of the root node for use if we have to run the + // slow notify algorithm. + snap *Node[T] + + // size tracks the size of the tree as it is modified during the + // transaction. + size int + + // writable is a cache of writable nodes that have been created during + // the course of the transaction. This allows us to re-use the same + // nodes for further writes and avoid unnecessary copies of nodes that + // have never been exposed outside the transaction. This will only hold + // up to defaultModifiedCache number of entries. + writable *simplelru.LRU[*Node[T], any] + + // trackChannels is used to hold channels that need to be notified to + // signal mutation of the tree. This will only hold up to + // defaultModifiedCache number of entries, after which we will set the + // trackOverflow flag, which will cause us to use a more expensive + // algorithm to perform the notifications. Mutation tracking is only + // performed if trackMutate is true. + trackChannels map[chan struct{}]struct{} + trackOverflow bool + trackMutate bool +} + +// Txn starts a new transaction that can be used to mutate the tree +func (t *Tree[T]) Txn() *Txn[T] { + txn := &Txn[T]{ + root: t.root, + snap: t.root, + size: t.size, + } + return txn +} + +// Clone makes an independent copy of the transaction. The new transaction +// does not track any nodes and has TrackMutate turned off. The cloned transaction will contain any uncommitted writes in the original transaction but further mutations to either will be independent and result in different radix trees on Commit. A cloned transaction may be passed to another goroutine and mutated there independently however each transaction may only be mutated in a single thread. +func (t *Txn[T]) Clone() *Txn[T] { + // reset the writable node cache to avoid leaking future writes into the clone + t.writable = nil + + txn := &Txn[T]{ + root: t.root, + snap: t.snap, + size: t.size, + } + return txn +} + +// TrackMutate can be used to toggle if mutations are tracked. If this is enabled +// then notifications will be issued for affected internal nodes and leaves when +// the transaction is committed. +func (t *Txn[T]) TrackMutate(track bool) { + t.trackMutate = track +} + +// trackChannel safely attempts to track the given mutation channel, setting the +// overflow flag if we can no longer track any more. This limits the amount of +// state that will accumulate during a transaction and we have a slower algorithm +// to switch to if we overflow. +func (t *Txn[T]) trackChannel(ch chan struct{}) { + // In overflow, make sure we don't store any more objects. + if t.trackOverflow { + return + } + + // If this would overflow the state we reject it and set the flag (since + // we aren't tracking everything that's required any longer). + if len(t.trackChannels) >= defaultModifiedCache { + // Mark that we are in the overflow state + t.trackOverflow = true + + // Clear the map so that the channels can be garbage collected. It is + // safe to do this since we have already overflowed and will be using + // the slow notify algorithm. + t.trackChannels = nil + return + } + + // Create the map on the fly when we need it. + if t.trackChannels == nil { + t.trackChannels = make(map[chan struct{}]struct{}) + } + + // Otherwise we are good to track it. + t.trackChannels[ch] = struct{}{} +} + +// writeNode returns a node to be modified, if the current node has already been +// modified during the course of the transaction, it is used in-place. Set +// forLeafUpdate to true if you are getting a write node to update the leaf, +// which will set leaf mutation tracking appropriately as well. +func (t *Txn[T]) writeNode(n *Node[T], forLeafUpdate bool) *Node[T] { + // Ensure the writable set exists. + if t.writable == nil { + lru, err := simplelru.NewLRU[*Node[T], any](defaultModifiedCache, nil) + if err != nil { + panic(err) + } + t.writable = lru + } + + // If this node has already been modified, we can continue to use it + // during this transaction. We know that we don't need to track it for + // a node update since the node is writable, but if this is for a leaf + // update we track it, in case the initial write to this node didn't + // update the leaf. + if _, ok := t.writable.Get(n); ok { + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + return n + } + + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Copy the existing node. If you have set forLeafUpdate it will be + // safe to replace this leaf with another after you get your node for + // writing. You MUST replace it, because the channel associated with + // this leaf will be closed when this transaction is committed. + nc := &Node[T]{ + mutateCh: make(chan struct{}), + leaf: n.leaf, + } + if n.prefix != nil { + nc.prefix = make([]byte, len(n.prefix)) + copy(nc.prefix, n.prefix) + } + if len(n.edges) != 0 { + nc.edges = make([]edge[T], len(n.edges)) + copy(nc.edges, n.edges) + } + + // Mark this node as writable. + t.writable.Add(nc, nil) + return nc +} + +// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction +// Returns the size of the subtree visited +func (t *Txn[T]) trackChannelsAndCount(n *Node[T]) int { + // Count only leaf nodes + leaves := 0 + if n.leaf != nil { + leaves = 1 + } + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Recurse on the children + for _, e := range n.edges { + leaves += t.trackChannelsAndCount(e.node) + } + return leaves +} + +// mergeChild is called to collapse the given node with its child. This is only +// called when the given node is not a leaf and has a single edge. +func (t *Txn[T]) mergeChild(n *Node[T]) { + // Mark the child node as being mutated since we are about to abandon + // it. We don't need to mark the leaf since we are retaining it if it + // is there. + e := n.edges[0] + child := e.node + if t.trackMutate { + t.trackChannel(child.mutateCh) + } + + // Merge the nodes. + n.prefix = concat(n.prefix, child.prefix) + n.leaf = child.leaf + if len(child.edges) != 0 { + n.edges = make([]edge[T], len(child.edges)) + copy(n.edges, child.edges) + } else { + n.edges = nil + } +} + +// insert does a recursive insertion +func (t *Txn[T]) insert(n *Node[T], k, search []byte, v T) (*Node[T], T, bool) { + var zero T + + // Handle key exhaustion + if len(search) == 0 { + var oldVal T + didUpdate := false + if n.isLeaf() { + oldVal = n.leaf.val + didUpdate = true + } + + nc := t.writeNode(n, true) + nc.leaf = &leafNode[T]{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + return nc, oldVal, didUpdate + } + + // Look for the edge + idx, child := n.getEdge(search[0]) + + // No edge, create one + if child == nil { + e := edge[T]{ + label: search[0], + node: &Node[T]{ + mutateCh: make(chan struct{}), + leaf: &leafNode[T]{ + mutateCh: make(chan struct{}), + key: k, + val: v, + }, + prefix: search, + }, + } + nc := t.writeNode(n, false) + nc.addEdge(e) + return nc, zero, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, child.prefix) + if commonPrefix == len(child.prefix) { + search = search[commonPrefix:] + newChild, oldVal, didUpdate := t.insert(child, k, search, v) + if newChild != nil { + nc := t.writeNode(n, false) + nc.edges[idx].node = newChild + return nc, oldVal, didUpdate + } + return nil, oldVal, didUpdate + } + + // Split the node + nc := t.writeNode(n, false) + splitNode := &Node[T]{ + mutateCh: make(chan struct{}), + prefix: search[:commonPrefix], + } + nc.replaceEdge(edge[T]{ + label: search[0], + node: splitNode, + }) + + // Restore the existing child node + modChild := t.writeNode(child, false) + splitNode.addEdge(edge[T]{ + label: modChild.prefix[commonPrefix], + node: modChild, + }) + modChild.prefix = modChild.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode[T]{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + splitNode.leaf = leaf + return nc, zero, false + } + + // Create a new edge for the node + splitNode.addEdge(edge[T]{ + label: search[0], + node: &Node[T]{ + mutateCh: make(chan struct{}), + leaf: leaf, + prefix: search, + }, + }) + return nc, zero, false +} + +// delete does a recursive deletion +func (t *Txn[T]) delete(n *Node[T], search []byte) (*Node[T], *leafNode[T]) { + // Check for key exhaustion + if len(search) == 0 { + if !n.isLeaf() { + return nil, nil + } + // Copy the pointer in case we are in a transaction that already + // modified this node since the node will be reused. Any changes + // made to the node will not affect returning the original leaf + // value. + oldLeaf := n.leaf + + // Remove the leaf node + nc := t.writeNode(n, true) + nc.leaf = nil + + // Check if this node should be merged + if n != t.root && len(nc.edges) == 1 { + t.mergeChild(nc) + } + return nc, oldLeaf + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + if child == nil || !bytes.HasPrefix(search, child.prefix) { + return nil, nil + } + + // Consume the search prefix + search = search[len(child.prefix):] + newChild, leaf := t.delete(child, search) + if newChild == nil { + return nil, nil + } + + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, leaf +} + +// delete does a recursive deletion +func (t *Txn[T]) deletePrefix(n *Node[T], search []byte) (*Node[T], int) { + // Check for key exhaustion + if len(search) == 0 { + nc := t.writeNode(n, true) + if n.isLeaf() { + nc.leaf = nil + } + nc.edges = nil + return nc, t.trackChannelsAndCount(n) + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + // We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix + // Need to do both so that we can delete prefixes that don't correspond to any node in the tree + if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) { + return nil, 0 + } + + // Consume the search prefix + if len(child.prefix) > len(search) { + search = []byte("") + } else { + search = search[len(child.prefix):] + } + newChild, numDeletions := t.deletePrefix(child, search) + if newChild == nil { + return nil, 0 + } + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, numDeletions +} + +// Insert is used to add or update a given key. The return provides +// the previous value and a bool indicating if any was set. +func (t *Txn[T]) Insert(k []byte, v T) (T, bool) { + newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v) + if newRoot != nil { + t.root = newRoot + } + if !didUpdate { + t.size++ + } + return oldVal, didUpdate +} + +// Delete is used to delete a given key. Returns the old value if any, +// and a bool indicating if the key was set. +func (t *Txn[T]) Delete(k []byte) (T, bool) { + var zero T + newRoot, leaf := t.delete(t.root, k) + if newRoot != nil { + t.root = newRoot + } + if leaf != nil { + t.size-- + return leaf.val, true + } + return zero, false +} + +// DeletePrefix is used to delete an entire subtree that matches the prefix +// This will delete all nodes under that prefix +func (t *Txn[T]) DeletePrefix(prefix []byte) bool { + newRoot, numDeletions := t.deletePrefix(t.root, prefix) + if newRoot != nil { + t.root = newRoot + t.size = t.size - numDeletions + return true + } + return false + +} + +// Root returns the current root of the radix tree within this +// transaction. The root is not safe across insert and delete operations, +// but can be used to read the current state during a transaction. +func (t *Txn[T]) Root() *Node[T] { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Txn[T]) Get(k []byte) (T, bool) { + return t.root.Get(k) +} + +// GetWatch is used to lookup a specific key, returning +// the watch channel, value and if it was found +func (t *Txn[T]) GetWatch(k []byte) (<-chan struct{}, T, bool) { + return t.root.GetWatch(k) +} + +// Commit is used to finalize the transaction and return a new tree. If mutation +// tracking is turned on then notifications will also be issued. +func (t *Txn[T]) Commit() *Tree[T] { + nt := t.CommitOnly() + if t.trackMutate { + t.Notify() + } + return nt +} + +// CommitOnly is used to finalize the transaction and return a new tree, but +// does not issue any notifications until Notify is called. +func (t *Txn[T]) CommitOnly() *Tree[T] { + nt := &Tree[T]{t.root, t.size} + t.writable = nil + return nt +} + +// slowNotify does a complete comparison of the before and after trees in order +// to trigger notifications. This doesn't require any additional state but it +// is very expensive to compute. +func (t *Txn[T]) slowNotify() { + snapIter := t.snap.rawIterator() + rootIter := t.root.rawIterator() + for snapIter.Front() != nil || rootIter.Front() != nil { + // If we've exhausted the nodes in the old snapshot, we know + // there's nothing remaining to notify. + if snapIter.Front() == nil { + return + } + snapElem := snapIter.Front() + + // If we've exhausted the nodes in the new root, we know we need + // to invalidate everything that remains in the old snapshot. We + // know from the loop condition there's something in the old + // snapshot. + if rootIter.Front() == nil { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // Do one string compare so we can check the various conditions + // below without repeating the compare. + cmp := strings.Compare(snapIter.Path(), rootIter.Path()) + + // If the snapshot is behind the root, then we must have deleted + // this node during the transaction. + if cmp < 0 { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // If the snapshot is ahead of the root, then we must have added + // this node during the transaction. + if cmp > 0 { + rootIter.Next() + continue + } + + // If we have the same path, then we need to see if we mutated a + // node and possibly the leaf. + rootElem := rootIter.Front() + if snapElem != rootElem { + close(snapElem.mutateCh) + if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) { + close(snapElem.leaf.mutateCh) + } + } + snapIter.Next() + rootIter.Next() + } +} + +// Notify is used along with TrackMutate to trigger notifications. This must +// only be done once a transaction is committed via CommitOnly, and it is called +// automatically by Commit. +func (t *Txn[T]) Notify() { + if !t.trackMutate { + return + } + + // If we've overflowed the tracking state we can't use it in any way and + // need to do a full tree compare. + if t.trackOverflow { + t.slowNotify() + } else { + for ch := range t.trackChannels { + close(ch) + } + } + + // Clean up the tracking state so that a re-notify is safe (will trigger + // the else clause above which will be a no-op). + t.trackChannels = nil + t.trackOverflow = false +} + +// Insert is used to add or update a given key. The return provides +// the new tree, previous value and a bool indicating if any was set. +func (t *Tree[T]) Insert(k []byte, v T) (*Tree[T], T, bool) { + txn := t.Txn() + old, ok := txn.Insert(k, v) + return txn.Commit(), old, ok +} + +// Delete is used to delete a given key. Returns the new tree, +// old value if any, and a bool indicating if the key was set. +func (t *Tree[T]) Delete(k []byte) (*Tree[T], T, bool) { + txn := t.Txn() + old, ok := txn.Delete(k) + return txn.Commit(), old, ok +} + +// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree, +// and a bool indicating if the prefix matched any nodes +func (t *Tree[T]) DeletePrefix(k []byte) (*Tree[T], bool) { + txn := t.Txn() + ok := txn.DeletePrefix(k) + return txn.Commit(), ok +} + +// Root returns the root node of the tree which can be used for richer +// query operations. +func (t *Tree[T]) Root() *Node[T] { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree[T]) Get(k []byte) (T, bool) { + return t.root.Get(k) +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 []byte) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// concat two byte slices, returning a third new copy +func concat(a, b []byte) []byte { + c := make([]byte, len(a)+len(b)) + copy(c, a) + copy(c[len(a):], b) + return c +} diff --git a/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/iter.go b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/iter.go new file mode 100644 index 0000000000..ffd2721c1a --- /dev/null +++ b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/iter.go @@ -0,0 +1,205 @@ +package iradix + +import ( + "bytes" +) + +// Iterator is used to iterate over a set of nodes +// in pre-order +type Iterator[T any] struct { + node *Node[T] + stack []edges[T] +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (i *Iterator[T]) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + // Wipe the stack + i.stack = nil + n := i.node + watch = n.mutateCh + search := prefix + for { + // Check for key exhaustion + if len(search) == 0 { + i.node = n + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + i.node = nil + return + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + i.node = n + return + } else { + i.node = nil + return + } + } +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (i *Iterator[T]) SeekPrefix(prefix []byte) { + i.SeekPrefixWatch(prefix) +} + +func (i *Iterator[T]) recurseMin(n *Node[T]) *Node[T] { + // Traverse to the minimum child + if n.leaf != nil { + return n + } + nEdges := len(n.edges) + if nEdges > 1 { + // Add all the other edges to the stack (the min node will be added as + // we recurse) + i.stack = append(i.stack, n.edges[1:]) + } + if nEdges > 0 { + return i.recurseMin(n.edges[0].node) + } + // Shouldn't be possible + return nil +} + +// SeekLowerBound is used to seek the iterator to the smallest key that is +// greater or equal to the given key. There is no watch variant as it's hard to +// predict based on the radix structure which node(s) changes might affect the +// result. +func (i *Iterator[T]) SeekLowerBound(key []byte) { + // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we + // go because we need only a subset of edges of many nodes in the path to the + // leaf with the lower bound. Note that the iterator will still recurse into + // children that we don't traverse on the way to the reverse lower bound as it + // walks the stack. + i.stack = []edges[T]{} + // i.node starts off in the common case as pointing to the root node of the + // tree. By the time we return we have either found a lower bound and setup + // the stack to traverse all larger keys, or we have not and the stack and + // node should both be nil to prevent the iterator from assuming it is just + // iterating the whole tree from the root node. Either way this needs to end + // up as nil so just set it here. + n := i.node + i.node = nil + search := key + + found := func(n *Node[T]) { + i.stack = append( + i.stack, + edges[T]{edge[T]{node: n}}, + ) + } + + findMin := func(n *Node[T]) { + n = i.recurseMin(n) + if n != nil { + found(n) + return + } + } + + for { + // Compare current prefix with the search key's same-length prefix. + var prefixCmp int + if len(n.prefix) < len(search) { + prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) + } else { + prefixCmp = bytes.Compare(n.prefix, search) + } + + if prefixCmp > 0 { + // Prefix is larger, that means the lower bound is greater than the search + // and from now on we need to follow the minimum path to the smallest + // leaf under this subtree. + findMin(n) + return + } + + if prefixCmp < 0 { + // Prefix is smaller than search prefix, that means there is no lower + // bound + i.node = nil + return + } + + // Prefix is equal, we are still heading for an exact match. If this is a + // leaf and an exact match we're done. + if n.leaf != nil && bytes.Equal(n.leaf.key, key) { + found(n) + return + } + + // Consume the search prefix if the current node has one. Note that this is + // safe because if n.prefix is longer than the search slice prefixCmp would + // have been > 0 above and the method would have already returned. + search = search[len(n.prefix):] + + if len(search) == 0 { + // We've exhausted the search key, but the current node is not an exact + // match or not a leaf. That means that the leaf value if it exists, and + // all child nodes must be strictly greater, the smallest key in this + // subtree must be the lower bound. + findMin(n) + return + } + + // Otherwise, take the lower bound next edge. + idx, lbNode := n.getLowerBoundEdge(search[0]) + if lbNode == nil { + return + } + + // Create stack edges for the all strictly higher edges in this node. + if idx+1 < len(n.edges) { + i.stack = append(i.stack, n.edges[idx+1:]) + } + + // Recurse + n = lbNode + } +} + +// Next returns the next node in order +func (i *Iterator[T]) Next() ([]byte, T, bool) { + var zero T + // Initialize our stack if needed + if i.stack == nil && i.node != nil { + i.stack = []edges[T]{{edge[T]{node: i.node}}} + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack + n := len(i.stack) + last := i.stack[n-1] + elem := last[0].node + + // Update the stack + if len(last) > 1 { + i.stack[n-1] = last[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier + if len(elem.edges) > 0 { + i.stack = append(i.stack, elem.edges) + } + + // Return the leaf values if any + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + } + return nil, zero, false +} diff --git a/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/node.go b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/node.go new file mode 100644 index 0000000000..1be963922f --- /dev/null +++ b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/node.go @@ -0,0 +1,326 @@ +package iradix + +import ( + "bytes" + "sort" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn[T any] func(k []byte, v T) bool + +// leafNode is used to represent a value +type leafNode[T any] struct { + mutateCh chan struct{} + key []byte + val T +} + +// edge is used to represent an edge node +type edge[T any] struct { + label byte + node *Node[T] +} + +// Node is an immutable node in the radix tree +type Node[T any] struct { + // mutateCh is closed if this node is modified + mutateCh chan struct{} + + // leaf is used to store possible leaf + leaf *leafNode[T] + + // prefix is the common prefix we ignore + prefix []byte + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges[T] +} + +func (n *Node[T]) isLeaf() bool { + return n.leaf != nil +} + +func (n *Node[T]) addEdge(e edge[T]) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + n.edges = append(n.edges, e) + if idx != num { + copy(n.edges[idx+1:], n.edges[idx:num]) + n.edges[idx] = e + } +} + +func (n *Node[T]) replaceEdge(e edge[T]) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + if idx < num && n.edges[idx].label == e.label { + n.edges[idx].node = e.node + return + } + panic("replacing missing edge") +} + +func (n *Node[T]) getEdge(label byte) (int, *Node[T]) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node[T]) getLowerBoundEdge(label byte) (int, *Node[T]) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + // we want lower bound behavior so return even if it's not an exact match + if idx < num { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node[T]) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge[T]{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +func (n *Node[T]) GetWatch(k []byte) (<-chan struct{}, T, bool) { + search := k + watch := n.mutateCh + for { + // Check for key exhaustion + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.mutateCh, n.leaf.val, true + } + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + var zero T + return watch, zero, false +} + +func (n *Node[T]) Get(k []byte) (T, bool) { + _, val, ok := n.GetWatch(k) + return val, ok +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (n *Node[T]) LongestPrefix(k []byte) ([]byte, T, bool) { + var last *leafNode[T] + search := k + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaustion + if len(search) == 0 { + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + var zero T + return nil, zero, false +} + +// Minimum is used to return the minimum value in the tree +func (n *Node[T]) Minimum() ([]byte, T, bool) { + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + var zero T + return nil, zero, false +} + +// Maximum is used to return the maximum value in the tree +func (n *Node[T]) Maximum() ([]byte, T, bool) { + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node // bug? + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } else { + break + } + } + var zero T + return nil, zero, false +} + +// Iterator is used to return an iterator at +// the given node to walk the tree +func (n *Node[T]) Iterator() *Iterator[T] { + return &Iterator[T]{node: n} +} + +// ReverseIterator is used to return an iterator at +// the given node to walk the tree backwards +func (n *Node[T]) ReverseIterator() *ReverseIterator[T] { + return NewReverseIterator(n) +} + +// Iterator is used to return an iterator at +// the given node to walk the tree +func (n *Node[T]) PathIterator(path []byte) *PathIterator[T] { + return &PathIterator[T]{node: n, path: path} +} + +// rawIterator is used to return a raw iterator at the given node to walk the +// tree. +func (n *Node[T]) rawIterator() *rawIterator[T] { + iter := &rawIterator[T]{node: n} + iter.Next() + return iter +} + +// Walk is used to walk the tree +func (n *Node[T]) Walk(fn WalkFn[T]) { + recursiveWalk(n, fn) +} + +// WalkBackwards is used to walk the tree in reverse order +func (n *Node[T]) WalkBackwards(fn WalkFn[T]) { + reverseRecursiveWalk(n, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (n *Node[T]) WalkPrefix(prefix []byte, fn WalkFn[T]) { + search := prefix + for { + // Check for key exhaustion + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (n *Node[T]) WalkPath(path []byte, fn WalkFn[T]) { + i := n.PathIterator(path) + + for path, val, ok := i.Next(); ok; path, val, ok = i.Next() { + if fn(path, val) { + return + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk[T any](n *Node[T], fn WalkFn[T]) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} + +// reverseRecursiveWalk is used to do a reverse pre-order +// walk of a node recursively. Returns true if the walk +// should be aborted +func reverseRecursiveWalk[T any](n *Node[T], fn WalkFn[T]) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children in reverse order + for i := len(n.edges) - 1; i >= 0; i-- { + e := n.edges[i] + if reverseRecursiveWalk(e.node, fn) { + return true + } + } + return false +} diff --git a/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/path_iter.go b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/path_iter.go new file mode 100644 index 0000000000..21942afc8a --- /dev/null +++ b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/path_iter.go @@ -0,0 +1,59 @@ +package iradix + +import "bytes" + +// PathIterator is used to iterate over a set of nodes from the root +// down to a specified path. This will iterate over the same values that +// the Node.WalkPath method will. +type PathIterator[T any] struct { + node *Node[T] + path []byte + done bool +} + +// Next returns the next node in order +func (i *PathIterator[T]) Next() ([]byte, T, bool) { + // This is mostly just an asynchronous implementation of the WalkPath + // method on the node. + var zero T + var leaf *leafNode[T] + + for leaf == nil && i.node != nil { + // visit the leaf values if any + if i.node.leaf != nil { + leaf = i.node.leaf + } + + i.iterate() + } + + if leaf != nil { + return leaf.key, leaf.val, true + } + + return nil, zero, false +} + +func (i *PathIterator[T]) iterate() { + // Check for key exhaustion + if len(i.path) == 0 { + i.node = nil + return + } + + // Look for an edge + _, i.node = i.node.getEdge(i.path[0]) + if i.node == nil { + return + } + + // Consume the search prefix + if bytes.HasPrefix(i.path, i.node.prefix) { + i.path = i.path[len(i.node.prefix):] + } else { + // there are no more nodes to iterate through so + // nil out the node to prevent returning results + // for subsequent calls to Next() + i.node = nil + } +} diff --git a/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/raw_iter.go b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/raw_iter.go new file mode 100644 index 0000000000..dd84f089d7 --- /dev/null +++ b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/raw_iter.go @@ -0,0 +1,78 @@ +package iradix + +// rawIterator visits each of the nodes in the tree, even the ones that are not +// leaves. It keeps track of the effective path (what a leaf at a given node +// would be called), which is useful for comparing trees. +type rawIterator[T any] struct { + // node is the starting node in the tree for the iterator. + node *Node[T] + + // stack keeps track of edges in the frontier. + stack []rawStackEntry[T] + + // pos is the current position of the iterator. + pos *Node[T] + + // path is the effective path of the current iterator position, + // regardless of whether the current node is a leaf. + path string +} + +// rawStackEntry is used to keep track of the cumulative common path as well as +// its associated edges in the frontier. +type rawStackEntry[T any] struct { + path string + edges edges[T] +} + +// Front returns the current node that has been iterated to. +func (i *rawIterator[T]) Front() *Node[T] { + return i.pos +} + +// Path returns the effective path of the current node, even if it's not actually +// a leaf. +func (i *rawIterator[T]) Path() string { + return i.path +} + +// Next advances the iterator to the next node. +func (i *rawIterator[T]) Next() { + // Initialize our stack if needed. + if i.stack == nil && i.node != nil { + i.stack = []rawStackEntry[T]{ + { + edges: edges[T]{ + edge[T]{node: i.node}, + }, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack. + n := len(i.stack) + last := i.stack[n-1] + elem := last.edges[0].node + + // Update the stack. + if len(last.edges) > 1 { + i.stack[n-1].edges = last.edges[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier. + if len(elem.edges) > 0 { + path := last.path + string(elem.prefix) + i.stack = append(i.stack, rawStackEntry[T]{path, elem.edges}) + } + + i.pos = elem + i.path = last.path + string(elem.prefix) + return + } + + i.pos = nil + i.path = "" +} diff --git a/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/reverse_iter.go b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/reverse_iter.go new file mode 100644 index 0000000000..2a06cde7cb --- /dev/null +++ b/hack/tools/vendor/github.com/hashicorp/go-immutable-radix/v2/reverse_iter.go @@ -0,0 +1,240 @@ +package iradix + +import ( + "bytes" +) + +// ReverseIterator is used to iterate over a set of nodes +// in reverse in-order +type ReverseIterator[T any] struct { + i *Iterator[T] + + // expandedParents stores the set of parent nodes whose relevant children have + // already been pushed into the stack. This can happen during seek or during + // iteration. + // + // Unlike forward iteration we need to recurse into children before we can + // output the value stored in an internal leaf since all children are greater. + // We use this to track whether we have already ensured all the children are + // in the stack. + expandedParents map[*Node[T]]struct{} +} + +// NewReverseIterator returns a new ReverseIterator at a node +func NewReverseIterator[T any](n *Node[T]) *ReverseIterator[T] { + return &ReverseIterator[T]{ + i: &Iterator[T]{node: n}, + } +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (ri *ReverseIterator[T]) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + return ri.i.SeekPrefixWatch(prefix) +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (ri *ReverseIterator[T]) SeekPrefix(prefix []byte) { + ri.i.SeekPrefixWatch(prefix) +} + +// SeekReverseLowerBound is used to seek the iterator to the largest key that is +// lower or equal to the given key. There is no watch variant as it's hard to +// predict based on the radix structure which node(s) changes might affect the +// result. +func (ri *ReverseIterator[T]) SeekReverseLowerBound(key []byte) { + // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we + // go because we need only a subset of edges of many nodes in the path to the + // leaf with the lower bound. Note that the iterator will still recurse into + // children that we don't traverse on the way to the reverse lower bound as it + // walks the stack. + ri.i.stack = []edges[T]{} + // ri.i.node starts off in the common case as pointing to the root node of the + // tree. By the time we return we have either found a lower bound and setup + // the stack to traverse all larger keys, or we have not and the stack and + // node should both be nil to prevent the iterator from assuming it is just + // iterating the whole tree from the root node. Either way this needs to end + // up as nil so just set it here. + n := ri.i.node + ri.i.node = nil + search := key + + if ri.expandedParents == nil { + ri.expandedParents = make(map[*Node[T]]struct{}) + } + + found := func(n *Node[T]) { + ri.i.stack = append(ri.i.stack, edges[T]{edge[T]{node: n}}) + // We need to mark this node as expanded in advance too otherwise the + // iterator will attempt to walk all of its children even though they are + // greater than the lower bound we have found. We've expanded it in the + // sense that all of its children that we want to walk are already in the + // stack (i.e. none of them). + ri.expandedParents[n] = struct{}{} + } + + for { + // Compare current prefix with the search key's same-length prefix. + var prefixCmp int + if len(n.prefix) < len(search) { + prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) + } else { + prefixCmp = bytes.Compare(n.prefix, search) + } + + if prefixCmp < 0 { + // Prefix is smaller than search prefix, that means there is no exact + // match for the search key. But we are looking in reverse, so the reverse + // lower bound will be the largest leaf under this subtree, since it is + // the value that would come right before the current search key if it + // were in the tree. So we need to follow the maximum path in this subtree + // to find it. Note that this is exactly what the iterator will already do + // if it finds a node in the stack that has _not_ been marked as expanded + // so in this one case we don't call `found` and instead let the iterator + // do the expansion and recursion through all the children. + ri.i.stack = append(ri.i.stack, edges[T]{edge[T]{node: n}}) + return + } + + if prefixCmp > 0 { + // Prefix is larger than search prefix, or there is no prefix but we've + // also exhausted the search key. Either way, that means there is no + // reverse lower bound since nothing comes before our current search + // prefix. + return + } + + // If this is a leaf, something needs to happen! Note that if it's a leaf + // and prefixCmp was zero (which it must be to get here) then the leaf value + // is either an exact match for the search, or it's lower. It can't be + // greater. + if n.isLeaf() { + + // Firstly, if it's an exact match, we're done! + if bytes.Equal(n.leaf.key, key) { + found(n) + return + } + + // It's not so this node's leaf value must be lower and could still be a + // valid contender for reverse lower bound. + + // If it has no children then we are also done. + if len(n.edges) == 0 { + // This leaf is the lower bound. + found(n) + return + } + + // Finally, this leaf is internal (has children) so we'll keep searching, + // but we need to add it to the iterator's stack since it has a leaf value + // that needs to be iterated over. It needs to be added to the stack + // before its children below as it comes first. + ri.i.stack = append(ri.i.stack, edges[T]{edge[T]{node: n}}) + // We also need to mark it as expanded since we'll be adding any of its + // relevant children below and so don't want the iterator to re-add them + // on its way back up the stack. + ri.expandedParents[n] = struct{}{} + } + + // Consume the search prefix. Note that this is safe because if n.prefix is + // longer than the search slice prefixCmp would have been > 0 above and the + // method would have already returned. + search = search[len(n.prefix):] + + if len(search) == 0 { + // We've exhausted the search key but we are not at a leaf. That means all + // children are greater than the search key so a reverse lower bound + // doesn't exist in this subtree. Note that there might still be one in + // the whole radix tree by following a different path somewhere further + // up. If that's the case then the iterator's stack will contain all the + // smaller nodes already and Previous will walk through them correctly. + return + } + + // Otherwise, take the lower bound next edge. + idx, lbNode := n.getLowerBoundEdge(search[0]) + + // From here, we need to update the stack with all values lower than + // the lower bound edge. Since getLowerBoundEdge() returns -1 when the + // search prefix is larger than all edges, we need to place idx at the + // last edge index so they can all be place in the stack, since they + // come before our search prefix. + if idx == -1 { + idx = len(n.edges) + } + + // Create stack edges for the all strictly lower edges in this node. + if len(n.edges[:idx]) > 0 { + ri.i.stack = append(ri.i.stack, n.edges[:idx]) + } + + // Exit if there's no lower bound edge. The stack will have the previous + // nodes already. + if lbNode == nil { + return + } + + // Recurse + n = lbNode + } +} + +// Previous returns the previous node in reverse order +func (ri *ReverseIterator[T]) Previous() ([]byte, T, bool) { + // Initialize our stack if needed + if ri.i.stack == nil && ri.i.node != nil { + ri.i.stack = []edges[T]{ + { + edge[T]{node: ri.i.node}, + }, + } + } + + if ri.expandedParents == nil { + ri.expandedParents = make(map[*Node[T]]struct{}) + } + + for len(ri.i.stack) > 0 { + // Inspect the last element of the stack + n := len(ri.i.stack) + last := ri.i.stack[n-1] + m := len(last) + elem := last[m-1].node + + _, alreadyExpanded := ri.expandedParents[elem] + + // If this is an internal node and we've not seen it already, we need to + // leave it in the stack so we can return its possible leaf value _after_ + // we've recursed through all its children. + if len(elem.edges) > 0 && !alreadyExpanded { + // record that we've seen this node! + ri.expandedParents[elem] = struct{}{} + // push child edges onto stack and skip the rest of the loop to recurse + // into the largest one. + ri.i.stack = append(ri.i.stack, elem.edges) + continue + } + + // Remove the node from the stack + if m > 1 { + ri.i.stack[n-1] = last[:m-1] + } else { + ri.i.stack = ri.i.stack[:n-1] + } + // We don't need this state any more as it's no longer in the stack so we + // won't visit it again + if alreadyExpanded { + delete(ri.expandedParents, elem) + } + + // If this is a leaf, return it + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + + // it's not a leaf so keep walking the stack to find the previous leaf + } + var zero T + return nil, zero, false +} diff --git a/hack/tools/vendor/github.com/hashicorp/golang-lru/v2/LICENSE b/hack/tools/vendor/github.com/hashicorp/golang-lru/v2/LICENSE new file mode 100644 index 0000000000..0e5d580e0e --- /dev/null +++ b/hack/tools/vendor/github.com/hashicorp/golang-lru/v2/LICENSE @@ -0,0 +1,364 @@ +Copyright (c) 2014 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/hack/tools/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go b/hack/tools/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go new file mode 100644 index 0000000000..5cd74a0343 --- /dev/null +++ b/hack/tools/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go @@ -0,0 +1,142 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE_list file. + +package internal + +import "time" + +// Entry is an LRU Entry +type Entry[K comparable, V any] struct { + // Next and previous pointers in the doubly-linked list of elements. + // To simplify the implementation, internally a list l is implemented + // as a ring, such that &l.root is both the next element of the last + // list element (l.Back()) and the previous element of the first list + // element (l.Front()). + next, prev *Entry[K, V] + + // The list to which this element belongs. + list *LruList[K, V] + + // The LRU Key of this element. + Key K + + // The Value stored with this element. + Value V + + // The time this element would be cleaned up, optional + ExpiresAt time.Time + + // The expiry bucket item was put in, optional + ExpireBucket uint8 +} + +// PrevEntry returns the previous list element or nil. +func (e *Entry[K, V]) PrevEntry() *Entry[K, V] { + if p := e.prev; e.list != nil && p != &e.list.root { + return p + } + return nil +} + +// LruList represents a doubly linked list. +// The zero Value for LruList is an empty list ready to use. +type LruList[K comparable, V any] struct { + root Entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used + len int // current list Length excluding (this) sentinel element +} + +// Init initializes or clears list l. +func (l *LruList[K, V]) Init() *LruList[K, V] { + l.root.next = &l.root + l.root.prev = &l.root + l.len = 0 + return l +} + +// NewList returns an initialized list. +func NewList[K comparable, V any]() *LruList[K, V] { return new(LruList[K, V]).Init() } + +// Length returns the number of elements of list l. +// The complexity is O(1). +func (l *LruList[K, V]) Length() int { return l.len } + +// Back returns the last element of list l or nil if the list is empty. +func (l *LruList[K, V]) Back() *Entry[K, V] { + if l.len == 0 { + return nil + } + return l.root.prev +} + +// lazyInit lazily initializes a zero List Value. +func (l *LruList[K, V]) lazyInit() { + if l.root.next == nil { + l.Init() + } +} + +// insert inserts e after at, increments l.len, and returns e. +func (l *LruList[K, V]) insert(e, at *Entry[K, V]) *Entry[K, V] { + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e + e.list = l + l.len++ + return e +} + +// insertValue is a convenience wrapper for insert(&Entry{Value: v, ExpiresAt: ExpiresAt}, at). +func (l *LruList[K, V]) insertValue(k K, v V, expiresAt time.Time, at *Entry[K, V]) *Entry[K, V] { + return l.insert(&Entry[K, V]{Value: v, Key: k, ExpiresAt: expiresAt}, at) +} + +// Remove removes e from its list, decrements l.len +func (l *LruList[K, V]) Remove(e *Entry[K, V]) V { + e.prev.next = e.next + e.next.prev = e.prev + e.next = nil // avoid memory leaks + e.prev = nil // avoid memory leaks + e.list = nil + l.len-- + + return e.Value +} + +// move moves e to next to at. +func (l *LruList[K, V]) move(e, at *Entry[K, V]) { + if e == at { + return + } + e.prev.next = e.next + e.next.prev = e.prev + + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e +} + +// PushFront inserts a new element e with value v at the front of list l and returns e. +func (l *LruList[K, V]) PushFront(k K, v V) *Entry[K, V] { + l.lazyInit() + return l.insertValue(k, v, time.Time{}, &l.root) +} + +// PushFrontExpirable inserts a new expirable element e with Value v at the front of list l and returns e. +func (l *LruList[K, V]) PushFrontExpirable(k K, v V, expiresAt time.Time) *Entry[K, V] { + l.lazyInit() + return l.insertValue(k, v, expiresAt, &l.root) +} + +// MoveToFront moves element e to the front of list l. +// If e is not an element of l, the list is not modified. +// The element must not be nil. +func (l *LruList[K, V]) MoveToFront(e *Entry[K, V]) { + if e.list != l || l.root.next == e { + return + } + // see comment in List.Remove about initialization of l + l.move(e, &l.root) +} diff --git a/hack/tools/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list b/hack/tools/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list new file mode 100644 index 0000000000..c4764e6b2f --- /dev/null +++ b/hack/tools/vendor/github.com/hashicorp/golang-lru/v2/simplelru/LICENSE_list @@ -0,0 +1,29 @@ +This license applies to simplelru/list.go + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/hack/tools/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go b/hack/tools/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go new file mode 100644 index 0000000000..f69792388c --- /dev/null +++ b/hack/tools/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go @@ -0,0 +1,177 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package simplelru + +import ( + "errors" + + "github.com/hashicorp/golang-lru/v2/internal" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback[K comparable, V any] func(key K, value V) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU[K comparable, V any] struct { + size int + evictList *internal.LruList[K, V] + items map[K]*internal.Entry[K, V] + onEvict EvictCallback[K, V] +} + +// NewLRU constructs an LRU of the given size +func NewLRU[K comparable, V any](size int, onEvict EvictCallback[K, V]) (*LRU[K, V], error) { + if size <= 0 { + return nil, errors.New("must provide a positive size") + } + + c := &LRU[K, V]{ + size: size, + evictList: internal.NewList[K, V](), + items: make(map[K]*internal.Entry[K, V]), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU[K, V]) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU[K, V]) Add(key K, value V) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value = value + return false + } + + // Add new item + ent := c.evictList.PushFront(key, value) + c.items[key] = ent + + evict := c.evictList.Length() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU[K, V]) Get(key K) (value V, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + return ent.Value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU[K, V]) Contains(key K) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU[K, V]) Peek(key K) (value V, ok bool) { + var ent *internal.Entry[K, V] + if ent, ok = c.items[key]; ok { + return ent.Value, true + } + return +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU[K, V]) Remove(key K) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU[K, V]) RemoveOldest() (key K, value V, ok bool) { + if ent := c.evictList.Back(); ent != nil { + c.removeElement(ent) + return ent.Key, ent.Value, true + } + return +} + +// GetOldest returns the oldest entry +func (c *LRU[K, V]) GetOldest() (key K, value V, ok bool) { + if ent := c.evictList.Back(); ent != nil { + return ent.Key, ent.Value, true + } + return +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU[K, V]) Keys() []K { + keys := make([]K, c.evictList.Length()) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() { + keys[i] = ent.Key + i++ + } + return keys +} + +// Values returns a slice of the values in the cache, from oldest to newest. +func (c *LRU[K, V]) Values() []V { + values := make([]V, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() { + values[i] = ent.Value + i++ + } + return values +} + +// Len returns the number of items in the cache. +func (c *LRU[K, V]) Len() int { + return c.evictList.Length() +} + +// Resize changes the cache size. +func (c *LRU[K, V]) Resize(size int) (evicted int) { + diff := c.Len() - size + if diff < 0 { + diff = 0 + } + for i := 0; i < diff; i++ { + c.removeOldest() + } + c.size = size + return diff +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU[K, V]) removeOldest() { + if ent := c.evictList.Back(); ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU[K, V]) removeElement(e *internal.Entry[K, V]) { + c.evictList.Remove(e) + delete(c.items, e.Key) + if c.onEvict != nil { + c.onEvict(e.Key, e.Value) + } +} diff --git a/hack/tools/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go b/hack/tools/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go new file mode 100644 index 0000000000..043b8bcc3f --- /dev/null +++ b/hack/tools/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru_interface.go @@ -0,0 +1,46 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package simplelru provides simple LRU implementation based on build-in container/list. +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache[K comparable, V any] interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key K, value V) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key K) (value V, ok bool) + + // Checks if a key exists in cache without updating the recent-ness. + Contains(key K) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key K) (value V, ok bool) + + // Removes a key from the cache. + Remove(key K) bool + + // Removes the oldest entry from cache. + RemoveOldest() (K, V, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (K, V, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []K + + // Values returns a slice of the values in the cache, from oldest to newest. + Values() []V + + // Returns the number of items in the cache. + Len() int + + // Clears all cache entries. + Purge() + + // Resizes cache, returning number evicted + Resize(int) int +} diff --git a/hack/tools/vendor/github.com/jessevdk/go-flags/.travis.yml b/hack/tools/vendor/github.com/jessevdk/go-flags/.travis.yml deleted file mode 100644 index 2fc5e5f5b5..0000000000 --- a/hack/tools/vendor/github.com/jessevdk/go-flags/.travis.yml +++ /dev/null @@ -1,39 +0,0 @@ -language: go - -os: - - linux - - osx - -go: - - 1.16.x - -install: - # go-flags - - go build -v ./... - - # linting - - go get -v golang.org/x/lint/golint - - # code coverage - - go get golang.org/x/tools/cmd/cover - - go get github.com/onsi/ginkgo/ginkgo - - go get github.com/modocache/gover - - if [ "$TRAVIS_SECURE_ENV_VARS" = "true" ]; then go get github.com/mattn/goveralls; fi - -script: - # go-flags - - $(exit $(gofmt -l . | wc -l)) - - go test -v ./... - - # linting - - go tool vet -all=true -v=true . || true - - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/golint ./... - - # code coverage - - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/ginkgo -r -cover - - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/gover - - if [ "$TRAVIS_SECURE_ENV_VARS" = "true" ]; then $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/goveralls -coverprofile=gover.coverprofile -service=travis-ci -repotoken $COVERALLS_TOKEN; fi - -env: - # coveralls.io - secure: "RCYbiB4P0RjQRIoUx/vG/AjP3mmYCbzOmr86DCww1Z88yNcy3hYr3Cq8rpPtYU5v0g7wTpu4adaKIcqRE9xknYGbqj3YWZiCoBP1/n4Z+9sHW3Dsd9D/GRGeHUus0laJUGARjWoCTvoEtOgTdGQDoX7mH+pUUY0FBltNYUdOiiU=" diff --git a/hack/tools/vendor/github.com/jessevdk/go-flags/README.md b/hack/tools/vendor/github.com/jessevdk/go-flags/README.md index f22650b20a..759eeb0d4e 100644 --- a/hack/tools/vendor/github.com/jessevdk/go-flags/README.md +++ b/hack/tools/vendor/github.com/jessevdk/go-flags/README.md @@ -1,7 +1,7 @@ go-flags: a go library for parsing command line arguments ========================================================= -[![GoDoc](https://godoc.org/github.com/jessevdk/go-flags?status.png)](https://godoc.org/github.com/jessevdk/go-flags) [![Build Status](https://travis-ci.org/jessevdk/go-flags.svg?branch=master)](https://travis-ci.org/jessevdk/go-flags) [![Coverage Status](https://img.shields.io/coveralls/jessevdk/go-flags.svg)](https://coveralls.io/r/jessevdk/go-flags?branch=master) +[![GoDoc](https://godoc.org/github.com/jessevdk/go-flags?status.png)](https://godoc.org/github.com/jessevdk/go-flags) This library provides similar functionality to the builtin flag library of go, but provides much more functionality and nicer formatting. From the @@ -78,6 +78,9 @@ var opts struct { // Example of a map IntMap map[string]int `long:"intmap" description:"A map from string to int"` + + // Example of env variable + Thresholds []int `long:"thresholds" default:"1" default:"2" env:"THRESHOLD_VALUES" env-delim:","` } // Callback which will invoke callto: to call a number. diff --git a/hack/tools/vendor/github.com/jessevdk/go-flags/command.go b/hack/tools/vendor/github.com/jessevdk/go-flags/command.go index 879465d7a5..ac4f1e392b 100644 --- a/hack/tools/vendor/github.com/jessevdk/go-flags/command.go +++ b/hack/tools/vendor/github.com/jessevdk/go-flags/command.go @@ -30,6 +30,12 @@ type Command struct { // Whether positional arguments are required ArgsRequired bool + // Whether to pass all arguments after the first non option as remaining + // command line arguments. This is equivalent to strict POSIX processing. + // This is command-local version of PassAfterNonOption Parser flag. It + // cannot be turned off when PassAfterNonOption Parser flag is set. + PassAfterNonOption bool + commands []*Command hasBuiltinHelpGroup bool args []*Arg @@ -244,6 +250,7 @@ func (c *Command) scanSubcommandHandler(parentg *Group) scanHandler { longDescription := mtag.Get("long-description") subcommandsOptional := mtag.Get("subcommands-optional") aliases := mtag.GetMany("alias") + passAfterNonOption := mtag.Get("pass-after-non-option") subc, err := c.AddCommand(subcommand, shortDescription, longDescription, ptrval.Interface()) @@ -261,6 +268,10 @@ func (c *Command) scanSubcommandHandler(parentg *Group) scanHandler { subc.Aliases = aliases } + if len(passAfterNonOption) > 0 { + subc.PassAfterNonOption = true + } + return true, nil } diff --git a/hack/tools/vendor/github.com/jessevdk/go-flags/convert.go b/hack/tools/vendor/github.com/jessevdk/go-flags/convert.go index cda29b2f04..b27f698dc4 100644 --- a/hack/tools/vendor/github.com/jessevdk/go-flags/convert.go +++ b/hack/tools/vendor/github.com/jessevdk/go-flags/convert.go @@ -53,7 +53,7 @@ func getBase(options multiTag, base int) (int, error) { func convertMarshal(val reflect.Value) (bool, string, error) { // Check first for the Marshaler interface - if val.Type().NumMethod() > 0 && val.CanInterface() { + if val.IsValid() && val.Type().NumMethod() > 0 && val.CanInterface() { if marshaler, ok := val.Interface().(Marshaler); ok { ret, err := marshaler.MarshalFlag() return true, ret, err @@ -68,6 +68,10 @@ func convertToString(val reflect.Value, options multiTag) (string, error) { return ret, err } + if !val.IsValid() { + return "", nil + } + tp := val.Type() // Support for time.Duration @@ -220,7 +224,7 @@ func convert(val string, retval reflect.Value, options multiTag) error { retval.SetBool(b) } case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - base, err := getBase(options, 10) + base, err := getBase(options, 0) if err != nil { return err @@ -234,7 +238,7 @@ func convert(val string, retval reflect.Value, options multiTag) error { retval.SetInt(parsed) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - base, err := getBase(options, 10) + base, err := getBase(options, 0) if err != nil { return err @@ -267,7 +271,12 @@ func convert(val string, retval reflect.Value, options multiTag) error { retval.Set(reflect.Append(retval, elemval)) case reflect.Map: - parts := strings.SplitN(val, ":", 2) + keyValueDelimiter := options.Get("key-value-delimiter") + if keyValueDelimiter == "" { + keyValueDelimiter = ":" + } + + parts := strings.SplitN(val, keyValueDelimiter, 2) key := parts[0] var value string diff --git a/hack/tools/vendor/github.com/jessevdk/go-flags/flags.go b/hack/tools/vendor/github.com/jessevdk/go-flags/flags.go index ac2157dd60..a6acf1be1f 100644 --- a/hack/tools/vendor/github.com/jessevdk/go-flags/flags.go +++ b/hack/tools/vendor/github.com/jessevdk/go-flags/flags.go @@ -8,46 +8,45 @@ The flags package is similar in functionality to the go built-in flag package but provides more options and uses reflection to provide a convenient and succinct way of specifying command line options. - -Supported features +# Supported features The following features are supported in go-flags: - Options with short names (-v) - Options with long names (--verbose) - Options with and without arguments (bool v.s. other type) - Options with optional arguments and default values - Option default values from ENVIRONMENT_VARIABLES, including slice and map values - Multiple option groups each containing a set of options - Generate and print well-formatted help message - Passing remaining command line arguments after -- (optional) - Ignoring unknown command line options (optional) - Supports -I/usr/include -I=/usr/include -I /usr/include option argument specification - Supports multiple short options -aux - Supports all primitive go types (string, int{8..64}, uint{8..64}, float) - Supports same option multiple times (can store in slice or last option counts) - Supports maps - Supports function callbacks - Supports namespaces for (nested) option groups + Options with short names (-v) + Options with long names (--verbose) + Options with and without arguments (bool v.s. other type) + Options with optional arguments and default values + Option default values from ENVIRONMENT_VARIABLES, including slice and map values + Multiple option groups each containing a set of options + Generate and print well-formatted help message + Passing remaining command line arguments after -- (optional) + Ignoring unknown command line options (optional) + Supports -I/usr/include -I=/usr/include -I /usr/include option argument specification + Supports multiple short options -aux + Supports all primitive go types (string, int{8..64}, uint{8..64}, float) + Supports same option multiple times (can store in slice or last option counts) + Supports maps + Supports function callbacks + Supports namespaces for (nested) option groups Additional features specific to Windows: - Options with short names (/v) - Options with long names (/verbose) - Windows-style options with arguments use a colon as the delimiter - Modify generated help message with Windows-style / options - Windows style options can be disabled at build time using the "forceposix" - build tag + Options with short names (/v) + Options with long names (/verbose) + Windows-style options with arguments use a colon as the delimiter + Modify generated help message with Windows-style / options + Windows style options can be disabled at build time using the "forceposix" + build tag -Basic usage +# Basic usage The flags package uses structs, reflection and struct field tags to allow users to specify command line options. This results in very simple and concise specification of your application options. For example: - type Options struct { - Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"` - } + type Options struct { + Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"` + } This specifies one option with a short name -v and a long name --verbose. When either -v or --verbose is found on the command line, a 'true' value @@ -60,9 +59,9 @@ whenever the option is encountered, a value is appended to the slice. Map options from string to primitive type are also supported. On the command line, you specify the value for such an option as key:value. For example - type Options struct { - AuthorInfo string[string] `short:"a"` - } + type Options struct { + AuthorInfo string[string] `short:"a"` + } Then, the AuthorInfo map can be filled with something like -a name:Jesse -a "surname:van den Kieboom". @@ -71,96 +70,94 @@ Finally, for full control over the conversion between command line argument values and options, user defined types can choose to implement the Marshaler and Unmarshaler interfaces. - -Available field tags +# Available field tags The following is a list of tags for struct fields supported by go-flags: - short: the short name of the option (single character) - long: the long name of the option - required: if non empty, makes the option required to appear on the command - line. If a required option is not present, the parser will - return ErrRequired (optional) - description: the description of the option (optional) - long-description: the long description of the option. Currently only - displayed in generated man pages (optional) - no-flag: if non-empty, this field is ignored as an option (optional) - - optional: if non-empty, makes the argument of the option optional. When an - argument is optional it can only be specified using - --option=argument (optional) - optional-value: the value of an optional option when the option occurs - without an argument. This tag can be specified multiple - times in the case of maps or slices (optional) - default: the default value of an option. This tag can be specified - multiple times in the case of slices or maps (optional) - default-mask: when specified, this value will be displayed in the help - instead of the actual default value. This is useful - mostly for hiding otherwise sensitive information from - showing up in the help. If default-mask takes the special - value "-", then no default value will be shown at all - (optional) - env: the default value of the option is overridden from the - specified environment variable, if one has been defined. - (optional) - env-delim: the 'env' default value from environment is split into - multiple values with the given delimiter string, use with - slices and maps (optional) - value-name: the name of the argument value (to be shown in the help) - (optional) - choice: limits the values for an option to a set of values. - Repeat this tag once for each allowable value. - e.g. `long:"animal" choice:"cat" choice:"dog"` - hidden: if non-empty, the option is not visible in the help or man page. - - base: a base (radix) used to convert strings to integer values, the - default base is 10 (i.e. decimal) (optional) - - ini-name: the explicit ini option name (optional) - no-ini: if non-empty this field is ignored as an ini option - (optional) - - group: when specified on a struct field, makes the struct - field a separate group with the given name (optional) - namespace: when specified on a group struct field, the namespace - gets prepended to every option's long name and - subgroup's namespace of this group, separated by - the parser's namespace delimiter (optional) - env-namespace: when specified on a group struct field, the env-namespace - gets prepended to every option's env key and - subgroup's env-namespace of this group, separated by - the parser's env-namespace delimiter (optional) - command: when specified on a struct field, makes the struct - field a (sub)command with the given name (optional) - subcommands-optional: when specified on a command struct field, makes - any subcommands of that command optional (optional) - alias: when specified on a command struct field, adds the - specified name as an alias for the command. Can be - be specified multiple times to add more than one - alias (optional) - positional-args: when specified on a field with a struct type, - uses the fields of that struct to parse remaining - positional command line arguments into (in order - of the fields). If a field has a slice type, - then all remaining arguments will be added to it. - Positional arguments are optional by default, - unless the "required" tag is specified together - with the "positional-args" tag. The "required" tag - can also be set on the individual rest argument - fields, to require only the first N positional - arguments. If the "required" tag is set on the - rest arguments slice, then its value determines - the minimum amount of rest arguments that needs to - be provided (e.g. `required:"2"`) (optional) - positional-arg-name: used on a field in a positional argument struct; name - of the positional argument placeholder to be shown in - the help (optional) + short: the short name of the option (single character) + long: the long name of the option + required: if non empty, makes the option required to appear on the command + line. If a required option is not present, the parser will + return ErrRequired (optional) + description: the description of the option (optional) + long-description: the long description of the option. Currently only + displayed in generated man pages (optional) + no-flag: if non-empty, this field is ignored as an option (optional) + + optional: if non-empty, makes the argument of the option optional. When an + argument is optional it can only be specified using + --option=argument (optional) + optional-value: the value of an optional option when the option occurs + without an argument. This tag can be specified multiple + times in the case of maps or slices (optional) + default: the default value of an option. This tag can be specified + multiple times in the case of slices or maps (optional) + default-mask: when specified, this value will be displayed in the help + instead of the actual default value. This is useful + mostly for hiding otherwise sensitive information from + showing up in the help. If default-mask takes the special + value "-", then no default value will be shown at all + (optional) + env: the default value of the option is overridden from the + specified environment variable, if one has been defined. + (optional) + env-delim: the 'env' default value from environment is split into + multiple values with the given delimiter string, use with + slices and maps (optional) + value-name: the name of the argument value (to be shown in the help) + (optional) + choice: limits the values for an option to a set of values. + Repeat this tag once for each allowable value. + e.g. `long:"animal" choice:"cat" choice:"dog"` + hidden: if non-empty, the option is not visible in the help or man page. + + base: a base (radix) used to convert strings to integer values, the + default base is 10 (i.e. decimal) (optional) + + ini-name: the explicit ini option name (optional) + no-ini: if non-empty this field is ignored as an ini option + (optional) + + group: when specified on a struct field, makes the struct + field a separate group with the given name (optional) + namespace: when specified on a group struct field, the namespace + gets prepended to every option's long name and + subgroup's namespace of this group, separated by + the parser's namespace delimiter (optional) + env-namespace: when specified on a group struct field, the env-namespace + gets prepended to every option's env key and + subgroup's env-namespace of this group, separated by + the parser's env-namespace delimiter (optional) + command: when specified on a struct field, makes the struct + field a (sub)command with the given name (optional) + subcommands-optional: when specified on a command struct field, makes + any subcommands of that command optional (optional) + alias: when specified on a command struct field, adds the + specified name as an alias for the command. Can be + be specified multiple times to add more than one + alias (optional) + positional-args: when specified on a field with a struct type, + uses the fields of that struct to parse remaining + positional command line arguments into (in order + of the fields). If a field has a slice type, + then all remaining arguments will be added to it. + Positional arguments are optional by default, + unless the "required" tag is specified together + with the "positional-args" tag. The "required" tag + can also be set on the individual rest argument + fields, to require only the first N positional + arguments. If the "required" tag is set on the + rest arguments slice, then its value determines + the minimum amount of rest arguments that needs to + be provided (e.g. `required:"2"`) (optional) + positional-arg-name: used on a field in a positional argument struct; name + of the positional argument placeholder to be shown in + the help (optional) Either the `short:` tag or the `long:` must be specified to make the field eligible as an option. - -Option groups +# Option groups Option groups are a simple way to semantically separate your options. All options in a particular group are shown together in the help under the name @@ -169,14 +166,12 @@ precisely and emphasize the options affiliation to their group. There are currently three ways to specify option groups. - 1. Use NewNamedParser specifying the various option groups. - 2. Use AddGroup to add a group to an existing parser. - 3. Add a struct field to the top-level options annotated with the - group:"group-name" tag. + 1. Use NewNamedParser specifying the various option groups. + 2. Use AddGroup to add a group to an existing parser. + 3. Add a struct field to the top-level options annotated with the + group:"group-name" tag. - - -Commands +# Commands The flags package also has basic support for commands. Commands are often used in monolithic applications that support various commands or actions. @@ -186,9 +181,9 @@ application. There are currently two ways to specify a command. - 1. Use AddCommand on an existing parser. - 2. Add a struct field to your options struct annotated with the - command:"command-name" tag. + 1. Use AddCommand on an existing parser. + 2. Add a struct field to your options struct annotated with the + command:"command-name" tag. The most common, idiomatic way to implement commands is to define a global parser instance and implement each command in a separate file. These @@ -204,15 +199,14 @@ command has been specified on the command line, in addition to the options of all the parent commands. I.e. considering a -v flag on the parser and an add command, the following are equivalent: - ./app -v add - ./app add -v + ./app -v add + ./app add -v However, if the -v flag is defined on the add command, then the first of the two examples above would fail since the -v flag is not defined before the add command. - -Completion +# Completion go-flags has builtin support to provide bash completion of flags, commands and argument values. To use completion, the binary which uses go-flags @@ -226,7 +220,7 @@ by replacing the argument parsing routine with the completion routine which outputs completions for the passed arguments. The basic invocation to complete a set of arguments is therefore: - GO_FLAGS_COMPLETION=1 ./completion-example arg1 arg2 arg3 + GO_FLAGS_COMPLETION=1 ./completion-example arg1 arg2 arg3 where `completion-example` is the binary, `arg1` and `arg2` are the current arguments, and `arg3` (the last argument) is the argument @@ -237,20 +231,20 @@ are more than 1 completion items. To use this with bash completion, a simple file can be written which calls the binary which supports go-flags completion: - _completion_example() { - # All arguments except the first one - args=("${COMP_WORDS[@]:1:$COMP_CWORD}") + _completion_example() { + # All arguments except the first one + args=("${COMP_WORDS[@]:1:$COMP_CWORD}") - # Only split on newlines - local IFS=$'\n' + # Only split on newlines + local IFS=$'\n' - # Call completion (note that the first element of COMP_WORDS is - # the executable itself) - COMPREPLY=($(GO_FLAGS_COMPLETION=1 ${COMP_WORDS[0]} "${args[@]}")) - return 0 - } + # Call completion (note that the first element of COMP_WORDS is + # the executable itself) + COMPREPLY=($(GO_FLAGS_COMPLETION=1 ${COMP_WORDS[0]} "${args[@]}")) + return 0 + } - complete -F _completion_example completion-example + complete -F _completion_example completion-example Completion requires the parser option PassDoubleDash and is therefore enforced if the environment variable GO_FLAGS_COMPLETION is set. diff --git a/hack/tools/vendor/github.com/jessevdk/go-flags/help.go b/hack/tools/vendor/github.com/jessevdk/go-flags/help.go index 068fce1525..8fd324443a 100644 --- a/hack/tools/vendor/github.com/jessevdk/go-flags/help.go +++ b/hack/tools/vendor/github.com/jessevdk/go-flags/help.go @@ -72,15 +72,15 @@ func (p *Parser) getAlignmentInfo() alignmentInfo { var prevcmd *Command p.eachActiveGroup(func(c *Command, grp *Group) { - if !grp.showInHelp() { - return - } if c != prevcmd { for _, arg := range c.args { ret.updateLen(arg.Name, c != p.Command) } + prevcmd = c + } + if !grp.showInHelp() { + return } - for _, info := range grp.options { if !info.showInHelp() { continue @@ -334,7 +334,11 @@ func (p *Parser) WriteHelp(writer io.Writer) { } if !allcmd.ArgsRequired { - fmt.Fprintf(wr, "[%s]", name) + if arg.Required > 0 { + fmt.Fprintf(wr, "%s", name) + } else { + fmt.Fprintf(wr, "[%s]", name) + } } else { fmt.Fprintf(wr, "%s", name) } diff --git a/hack/tools/vendor/github.com/jessevdk/go-flags/ini.go b/hack/tools/vendor/github.com/jessevdk/go-flags/ini.go index 60b36c79c4..565595e5f1 100644 --- a/hack/tools/vendor/github.com/jessevdk/go-flags/ini.go +++ b/hack/tools/vendor/github.com/jessevdk/go-flags/ini.go @@ -113,18 +113,18 @@ func (i *IniParser) ParseFile(filename string) error { // // The format of the ini file is as follows: // -// [Option group name] -// option = value +// [Option group name] +// option = value // // Each section in the ini file represents an option group or command in the // flags parser. The default flags parser option group (i.e. when using // flags.Parse) is named 'Application Options'. The ini option name is matched // in the following order: // -// 1. Compared to the ini-name tag on the option struct field (if present) -// 2. Compared to the struct field name -// 3. Compared to the option long name (if present) -// 4. Compared to the option short name (if present) +// 1. Compared to the ini-name tag on the option struct field (if present) +// 2. Compared to the struct field name +// 3. Compared to the option long name (if present) +// 4. Compared to the option short name (if present) // // Sections for nested groups and commands can be addressed using a dot `.' // namespacing notation (i.e [subcommand.Options]). Group section names are @@ -584,7 +584,7 @@ func (i *IniParser) parse(ini *ini) error { if i.ParseAsDefaults { err = opt.setDefault(pval) } else { - err = opt.set(pval) + err = opt.Set(pval) } if err != nil { diff --git a/hack/tools/vendor/github.com/jessevdk/go-flags/option.go b/hack/tools/vendor/github.com/jessevdk/go-flags/option.go index f6d6941813..257996a792 100644 --- a/hack/tools/vendor/github.com/jessevdk/go-flags/option.go +++ b/hack/tools/vendor/github.com/jessevdk/go-flags/option.go @@ -239,7 +239,7 @@ func (option *Option) IsSetDefault() bool { // Set the value of an option to the specified value. An error will be returned // if the specified value could not be converted to the corresponding option // value type. -func (option *Option) set(value *string) error { +func (option *Option) Set(value *string) error { kind := option.value.Type().Kind() if (kind == reflect.Map || kind == reflect.Slice) && option.clearReferenceBeforeSet { @@ -287,7 +287,7 @@ func (option *Option) setDefault(value *string) error { return nil } - if err := option.set(value); err != nil { + if err := option.Set(value); err != nil { return err } diff --git a/hack/tools/vendor/github.com/jessevdk/go-flags/optstyle_other.go b/hack/tools/vendor/github.com/jessevdk/go-flags/optstyle_other.go index 56dfdae128..f84b69746d 100644 --- a/hack/tools/vendor/github.com/jessevdk/go-flags/optstyle_other.go +++ b/hack/tools/vendor/github.com/jessevdk/go-flags/optstyle_other.go @@ -1,3 +1,4 @@ +//go:build !windows || forceposix // +build !windows forceposix package flags diff --git a/hack/tools/vendor/github.com/jessevdk/go-flags/optstyle_windows.go b/hack/tools/vendor/github.com/jessevdk/go-flags/optstyle_windows.go index f3f28aeeff..e80290420a 100644 --- a/hack/tools/vendor/github.com/jessevdk/go-flags/optstyle_windows.go +++ b/hack/tools/vendor/github.com/jessevdk/go-flags/optstyle_windows.go @@ -1,3 +1,4 @@ +//go:build !forceposix // +build !forceposix package flags diff --git a/hack/tools/vendor/github.com/jessevdk/go-flags/parser.go b/hack/tools/vendor/github.com/jessevdk/go-flags/parser.go index 3fc3f7ba1c..939dd7bf49 100644 --- a/hack/tools/vendor/github.com/jessevdk/go-flags/parser.go +++ b/hack/tools/vendor/github.com/jessevdk/go-flags/parser.go @@ -113,6 +113,10 @@ const ( // POSIX processing. PassAfterNonOption + // AllowBoolValues allows a user to assign true/false to a boolean value + // rather than raising an error stating it cannot have an argument. + AllowBoolValues + // Default is a convenient default set of options which should cover // most of the uses of the flags package. Default = HelpFlag | PrintErrors | PassDoubleDash @@ -252,7 +256,7 @@ func (p *Parser) ParseArgs(args []string) ([]string, error) { } if !argumentIsOption(arg) { - if (p.Options&PassAfterNonOption) != None && s.lookup.commands[arg] == nil { + if ((p.Options&PassAfterNonOption) != None || s.command.PassAfterNonOption) && s.lookup.commands[arg] == nil { // If PassAfterNonOption is set then all remaining arguments // are considered positional if err = s.addArgs(s.arg); err != nil { @@ -521,11 +525,10 @@ func (p *parseState) estimateCommand() error { func (p *Parser) parseOption(s *parseState, name string, option *Option, canarg bool, argument *string) (err error) { if !option.canArgument() { - if argument != nil { + if argument != nil && (p.Options&AllowBoolValues) == None { return newErrorf(ErrNoArgumentForBool, "bool flag `%s' cannot have an argument", option) } - - err = option.set(nil) + err = option.Set(argument) } else if argument != nil || (canarg && !s.eof()) { var arg string @@ -546,13 +549,13 @@ func (p *Parser) parseOption(s *parseState, name string, option *Option, canarg } if err == nil { - err = option.set(&arg) + err = option.Set(&arg) } } else if option.OptionalArgument { option.empty() for _, v := range option.OptionalValue { - err = option.set(&v) + err = option.Set(&v) if err != nil { break diff --git a/hack/tools/vendor/github.com/jessevdk/go-flags/termsize.go b/hack/tools/vendor/github.com/jessevdk/go-flags/termsize.go index 829e477add..7bcf66feeb 100644 --- a/hack/tools/vendor/github.com/jessevdk/go-flags/termsize.go +++ b/hack/tools/vendor/github.com/jessevdk/go-flags/termsize.go @@ -1,4 +1,5 @@ -// +build !windows,!plan9,!appengine,!wasm +//go:build !windows && !plan9 && !appengine && !wasm && !aix +// +build !windows,!plan9,!appengine,!wasm,!aix package flags diff --git a/hack/tools/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go b/hack/tools/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go index c1ff18673b..d839220aa5 100644 --- a/hack/tools/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go +++ b/hack/tools/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go @@ -1,4 +1,5 @@ -// +build plan9 appengine wasm +//go:build plan9 || appengine || wasm || aix +// +build plan9 appengine wasm aix package flags diff --git a/hack/tools/vendor/github.com/jessevdk/go-flags/termsize_windows.go b/hack/tools/vendor/github.com/jessevdk/go-flags/termsize_windows.go index 5c0fa6ba27..189a1b3faf 100644 --- a/hack/tools/vendor/github.com/jessevdk/go-flags/termsize_windows.go +++ b/hack/tools/vendor/github.com/jessevdk/go-flags/termsize_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package flags diff --git a/hack/tools/vendor/github.com/jjti/go-spancheck/.gitignore b/hack/tools/vendor/github.com/jjti/go-spancheck/.gitignore index 1f83be414c..04b66d911b 100644 --- a/hack/tools/vendor/github.com/jjti/go-spancheck/.gitignore +++ b/hack/tools/vendor/github.com/jjti/go-spancheck/.gitignore @@ -17,3 +17,5 @@ # Dependency directories (remove the comment below to include it) # vendor/ src/ + +.vscode \ No newline at end of file diff --git a/hack/tools/vendor/github.com/jjti/go-spancheck/.golangci.yml b/hack/tools/vendor/github.com/jjti/go-spancheck/.golangci.yml index 15d8513d68..5d6ab12875 100644 --- a/hack/tools/vendor/github.com/jjti/go-spancheck/.golangci.yml +++ b/hack/tools/vendor/github.com/jjti/go-spancheck/.golangci.yml @@ -17,7 +17,6 @@ linters: - errcheck - errname - errorlint - - exhaustive # checks exhaustiveness of enum switch statements - exportloopref # checks for pointers to enclosing loop variables - gci - gochecknoinits # checks that no init functions are present in Go code @@ -59,12 +58,6 @@ linters-settings: - standard # Standard section: captures all standard packages. - default # Default section: contains all imports that could not be matched to another section type. - prefix(github.com/jjti) - exhaustive: - # Program elements to check for exhaustiveness. - # Default: [ switch ] - check: - - switch - - map gocritic: settings: captLocal: diff --git a/hack/tools/vendor/github.com/jjti/go-spancheck/go.work b/hack/tools/vendor/github.com/jjti/go-spancheck/go.work index 7d0a87b9e1..ff04ca17e2 100644 --- a/hack/tools/vendor/github.com/jjti/go-spancheck/go.work +++ b/hack/tools/vendor/github.com/jjti/go-spancheck/go.work @@ -1,4 +1,4 @@ -go 1.20 +go 1.22.1 use ( . diff --git a/hack/tools/vendor/github.com/jjti/go-spancheck/go.work.sum b/hack/tools/vendor/github.com/jjti/go-spancheck/go.work.sum index 04eadf2c51..c96d590d61 100644 --- a/hack/tools/vendor/github.com/jjti/go-spancheck/go.work.sum +++ b/hack/tools/vendor/github.com/jjti/go-spancheck/go.work.sum @@ -1,4 +1,11 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= diff --git a/hack/tools/vendor/github.com/jjti/go-spancheck/spancheck.go b/hack/tools/vendor/github.com/jjti/go-spancheck/spancheck.go index 8fc7945c6b..49e5817285 100644 --- a/hack/tools/vendor/github.com/jjti/go-spancheck/spancheck.go +++ b/hack/tools/vendor/github.com/jjti/go-spancheck/spancheck.go @@ -309,6 +309,11 @@ outer: } seen[b] = true + // Skip successors that are not nested within this current block. + if _, ok := nestedBlockTypes[b.Kind]; !ok { + continue + } + // Prune the search if the block uses v. if blockUses(pass, b) { continue @@ -330,6 +335,21 @@ outer: return search(defBlock.Succs) } +var nestedBlockTypes = map[cfg.BlockKind]struct{}{ + cfg.KindBody: {}, + cfg.KindForBody: {}, + cfg.KindForLoop: {}, + cfg.KindIfElse: {}, + cfg.KindIfThen: {}, + cfg.KindLabel: {}, + cfg.KindRangeBody: {}, + cfg.KindRangeLoop: {}, + cfg.KindSelectCaseBody: {}, + cfg.KindSelectAfterCase: {}, + cfg.KindSwitchCaseBody: {}, + cfg.KindSwitchNextCase: {}, +} + // usesCall reports whether stmts contain a use of the selName call on variable v. func usesCall( pass *analysis.Pass, @@ -340,10 +360,12 @@ func usesCall( startSpanMatchers []spanStartMatcher, depth int, ) bool { - if depth > 1 { // for perf reasons, do not dive too deep thru func literals, just one level deep check. + if depth > 1 { // for perf reasons, do not dive too deep thru func literals, just two levels deep. return false } + cfgs := pass.ResultOf[ctrlflow.Analyzer].(*ctrlflow.CFGs) + found, reAssigned := false, false for _, subStmt := range stmts { stack := []ast.Node{} @@ -351,7 +373,6 @@ func usesCall( switch n := n.(type) { case *ast.FuncLit: if len(stack) > 0 { - cfgs := pass.ResultOf[ctrlflow.Analyzer].(*ctrlflow.CFGs) g := cfgs.FuncLit(n) if g != nil && len(g.Blocks) > 0 { return usesCall(pass, g.Blocks[0].Nodes, sv, selName, ignoreCheckSig, startSpanMatchers, depth+1) @@ -367,6 +388,32 @@ func usesCall( return false } } + case *ast.DeferStmt: + if n.Call == nil { + break + } + + f, ok := n.Call.Fun.(*ast.FuncLit) + if !ok { + break + } + + if g := cfgs.FuncLit(f); g != nil && len(g.Blocks) > 0 { + for _, b := range g.Blocks { + if usesCall( + pass, + b.Nodes, + sv, + selName, + ignoreCheckSig, + startSpanMatchers, + depth+1, + ) { + found = true + return false + } + } + } case nil: if len(stack) > 0 { stack = stack[:len(stack)-1] // pop diff --git a/hack/tools/vendor/github.com/julz/importas/Makefile b/hack/tools/vendor/github.com/julz/importas/Makefile new file mode 100644 index 0000000000..e9838b43bd --- /dev/null +++ b/hack/tools/vendor/github.com/julz/importas/Makefile @@ -0,0 +1,17 @@ +# default task since it's first +.PHONY: all +all: build test + +BINARY = importas +$(BINARY): *.go go.mod go.sum + go build -o $(BINARY) + +.PHONY: build +build: $(BINARY) ## Build binary + +.PHONY: test +test: build ## Unit test + go test -v ./... + +install: ## Install binary + go install diff --git a/hack/tools/vendor/github.com/julz/importas/analyzer.go b/hack/tools/vendor/github.com/julz/importas/analyzer.go index f196534784..25bc09b82f 100644 --- a/hack/tools/vendor/github.com/julz/importas/analyzer.go +++ b/hack/tools/vendor/github.com/julz/importas/analyzer.go @@ -13,7 +13,7 @@ import ( ) var config = &Config{ - RequiredAlias: make(map[string]string), + RequiredAlias: make([][]string, 0), } var Analyzer = &analysis.Analyzer{ @@ -129,11 +129,19 @@ func findEdits(node ast.Node, uses map[*ast.Ident]types.Object, importPath, orig // skip identifiers pointing to a different import statement. continue } + pos := use.Pos() + end := use.End() + replacement := packageReplacement + + if packageReplacement == "." { + replacement = "" + end = end + 1 + } result = append(result, analysis.TextEdit{ - Pos: use.Pos(), - End: use.End(), - NewText: []byte(packageReplacement), + Pos: pos, + End: end, + NewText: []byte(replacement), }) } diff --git a/hack/tools/vendor/github.com/julz/importas/config.go b/hack/tools/vendor/github.com/julz/importas/config.go index 8c9c76d916..58be86c75f 100644 --- a/hack/tools/vendor/github.com/julz/importas/config.go +++ b/hack/tools/vendor/github.com/julz/importas/config.go @@ -4,18 +4,26 @@ import ( "errors" "fmt" "regexp" + "sync" ) type Config struct { - RequiredAlias map[string]string + RequiredAlias aliasList Rules []*Rule DisallowUnaliased bool DisallowExtraAliases bool + muRules sync.Mutex } func (c *Config) CompileRegexp() error { + c.muRules.Lock() + defer c.muRules.Unlock() + if c.Rules != nil { + return nil + } rules := make([]*Rule, 0, len(c.RequiredAlias)) - for path, alias := range c.RequiredAlias { + for _, aliases := range c.RequiredAlias { + path, alias := aliases[0], aliases[1] reg, err := regexp.Compile(fmt.Sprintf("^%s$", path)) if err != nil { return err @@ -26,13 +34,15 @@ func (c *Config) CompileRegexp() error { Alias: alias, }) } - c.Rules = rules return nil } func (c *Config) findRule(path string) *Rule { - for _, rule := range c.Rules { + c.muRules.Lock() + rules := c.Rules + c.muRules.Unlock() + for _, rule := range rules { if rule.Regexp.MatchString(path) { return rule } diff --git a/hack/tools/vendor/github.com/julz/importas/flags.go b/hack/tools/vendor/github.com/julz/importas/flags.go index f8107104ad..cc3f1f3aae 100644 --- a/hack/tools/vendor/github.com/julz/importas/flags.go +++ b/hack/tools/vendor/github.com/julz/importas/flags.go @@ -7,26 +7,27 @@ import ( "strings" ) +var errWrongAlias = errors.New("import flag must be of form path:alias") + func flags(config *Config) flag.FlagSet { fs := flag.FlagSet{} - fs.Var(stringMap(config.RequiredAlias), "alias", "required import alias in form path:alias") + fs.Var(&config.RequiredAlias, "alias", "required import alias in form path:alias") fs.BoolVar(&config.DisallowUnaliased, "no-unaliased", false, "do not allow unaliased imports of aliased packages") fs.BoolVar(&config.DisallowExtraAliases, "no-extra-aliases", false, "do not allow non-required aliases") return fs } -type stringMap map[string]string +type aliasList [][]string -func (v stringMap) Set(val string) error { - spl := strings.SplitN(val, ":", 2) - if len(spl) != 2 { - return errors.New("import flag must be of form path:alias") +func (v *aliasList) Set(val string) error { + lastColon := strings.LastIndex(val, ":") + if lastColon <= 1 { + return errWrongAlias } - - v[spl[0]] = spl[1] + *v = append(*v, []string{val[:lastColon], val[lastColon+1:]}) return nil } -func (v stringMap) String() string { - return fmt.Sprintf("%v", (map[string]string)(v)) +func (v *aliasList) String() string { + return fmt.Sprintf("%v", ([][]string)(*v)) } diff --git a/hack/tools/vendor/github.com/ldez/exptostd/.gitignore b/hack/tools/vendor/github.com/ldez/exptostd/.gitignore new file mode 100644 index 0000000000..ec3a603988 --- /dev/null +++ b/hack/tools/vendor/github.com/ldez/exptostd/.gitignore @@ -0,0 +1,2 @@ +/exptostd +.idea diff --git a/hack/tools/vendor/github.com/ldez/exptostd/.golangci.yml b/hack/tools/vendor/github.com/ldez/exptostd/.golangci.yml new file mode 100644 index 0000000000..7c0a9a019c --- /dev/null +++ b/hack/tools/vendor/github.com/ldez/exptostd/.golangci.yml @@ -0,0 +1,80 @@ +linters: + enable-all: true + disable: + - exportloopref # deprecated + - sqlclosecheck # not relevant (SQL) + - rowserrcheck # not relevant (SQL) + - cyclop # duplicate of gocyclo + - lll + - dupl + - nlreturn + - exhaustive + - exhaustruct + - testpackage + - tparallel + - paralleltest + - prealloc + - varnamelen + - nilnil + - errchkjson + - nonamedreturns + +linters-settings: + govet: + enable-all: true + disable: + - fieldalignment + gocyclo: + min-complexity: 20 + goconst: + min-len: 5 + min-occurrences: 3 + misspell: + locale: US + funlen: + lines: -1 + statements: 40 + godox: + keywords: + - FIXME + gofumpt: + extra-rules: true + depguard: + rules: + main: + deny: + - pkg: "github.com/instana/testify" + desc: not allowed + - pkg: "github.com/pkg/errors" + desc: Should be replaced by standard lib errors package + wsl: + force-case-trailing-whitespace: 1 + allow-trailing-comment: true + gocritic: + enabled-tags: + - diagnostic + - style + - performance + disabled-checks: + - sloppyReassign + - rangeValCopy + - octalLiteral + - paramTypeCombine # already handle by gofumpt.extra-rules + settings: + hugeParam: + sizeThreshold: 100 + +issues: + exclude-use-default: false + max-issues-per-linter: 0 + max-same-issues: 0 + +output: + show-stats: true + sort-results: true + sort-order: + - linter + - file + +run: + timeout: 5m diff --git a/hack/tools/vendor/github.com/ldez/exptostd/LICENSE b/hack/tools/vendor/github.com/ldez/exptostd/LICENSE new file mode 100644 index 0000000000..c1bf0c3288 --- /dev/null +++ b/hack/tools/vendor/github.com/ldez/exptostd/LICENSE @@ -0,0 +1,190 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2024 Fernandez Ludovic + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hack/tools/vendor/github.com/ldez/exptostd/Makefile b/hack/tools/vendor/github.com/ldez/exptostd/Makefile new file mode 100644 index 0000000000..ad72751490 --- /dev/null +++ b/hack/tools/vendor/github.com/ldez/exptostd/Makefile @@ -0,0 +1,15 @@ +.PHONY: clean check test build + +default: clean check test build + +clean: + rm -rf dist/ cover.out + +test: clean + go test -v -cover ./... + +check: + golangci-lint run + +build: + go build -ldflags "-s -w" -trimpath ./cmd/exptostd/ diff --git a/hack/tools/vendor/github.com/ldez/exptostd/exptostd.go b/hack/tools/vendor/github.com/ldez/exptostd/exptostd.go new file mode 100644 index 0000000000..6dc61cb27e --- /dev/null +++ b/hack/tools/vendor/github.com/ldez/exptostd/exptostd.go @@ -0,0 +1,337 @@ +// Package exptostd It is an analyzer that detects functions from golang.org/x/exp/ that can be replaced by std functions. +package exptostd + +import ( + "bytes" + "fmt" + "go/ast" + "go/build" + "go/printer" + "go/token" + "go/types" + "os" + "slices" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const ( + go123 = 123 + go121 = 121 + goDevel = 666 +) + +// Result is step analysis results. +type Result struct { + shouldKeepImport bool + Diagnostics []analysis.Diagnostic +} + +type stdReplacement struct { + MinGo int + Text string + Suggested func(callExpr *ast.CallExpr) (analysis.SuggestedFix, error) +} + +type analyzer struct { + mapsPkgReplacements map[string]stdReplacement + slicesPkgReplacements map[string]stdReplacement + + skipGoVersionDetection bool + goVersion int +} + +// NewAnalyzer create a new Analyzer. +func NewAnalyzer() *analysis.Analyzer { + _, skip := os.LookupEnv("EXPTOSTD_SKIP_GO_VERSION_CHECK") + + l := &analyzer{ + skipGoVersionDetection: skip, + mapsPkgReplacements: map[string]stdReplacement{ + "Keys": {MinGo: go123, Text: "slices.Collect(maps.Keys())", Suggested: suggestedFixForKeysOrValues}, + "Values": {MinGo: go123, Text: "slices.Collect(maps.Values())", Suggested: suggestedFixForKeysOrValues}, + "Equal": {MinGo: go121, Text: "maps.Equal()"}, + "EqualFunc": {MinGo: go121, Text: "maps.EqualFunc()"}, + "Clone": {MinGo: go121, Text: "maps.Clone()"}, + "Copy": {MinGo: go121, Text: "maps.Copy()"}, + "DeleteFunc": {MinGo: go121, Text: "maps.DeleteFunc()"}, + "Clear": {MinGo: go121, Text: "clear()", Suggested: suggestedFixForClear}, + }, + slicesPkgReplacements: map[string]stdReplacement{ + "Equal": {MinGo: go121, Text: "slices.Equal()"}, + "EqualFunc": {MinGo: go121, Text: "slices.EqualFunc()"}, + "Compare": {MinGo: go121, Text: "slices.Compare()"}, + "CompareFunc": {MinGo: go121, Text: "slices.CompareFunc()"}, + "Index": {MinGo: go121, Text: "slices.Index()"}, + "IndexFunc": {MinGo: go121, Text: "slices.IndexFunc()"}, + "Contains": {MinGo: go121, Text: "slices.Contains()"}, + "ContainsFunc": {MinGo: go121, Text: "slices.ContainsFunc()"}, + "Insert": {MinGo: go121, Text: "slices.Insert()"}, + "Delete": {MinGo: go121, Text: "slices.Delete()"}, + "DeleteFunc": {MinGo: go121, Text: "slices.DeleteFunc()"}, + "Replace": {MinGo: go121, Text: "slices.Replace()"}, + "Clone": {MinGo: go121, Text: "slices.Clone()"}, + "Compact": {MinGo: go121, Text: "slices.Compact()"}, + "CompactFunc": {MinGo: go121, Text: "slices.CompactFunc()"}, + "Grow": {MinGo: go121, Text: "slices.Grow()"}, + "Clip": {MinGo: go121, Text: "slices.Clip()"}, + "Reverse": {MinGo: go121, Text: "slices.Reverse()"}, + + "Sort": {MinGo: go121, Text: "slices.Sort()"}, + "SortFunc": {MinGo: go121, Text: "slices.SortFunc()"}, + "SortStableFunc": {MinGo: go121, Text: "slices.SortStableFunc()"}, + "IsSorted": {MinGo: go121, Text: "slices.IsSorted()"}, + "IsSortedFunc": {MinGo: go121, Text: "slices.IsSortedFunc()"}, + "Min": {MinGo: go121, Text: "slices.Min()"}, + "MinFunc": {MinGo: go121, Text: "slices.MinFunc()"}, + "Max": {MinGo: go121, Text: "slices.Max()"}, + "MaxFunc": {MinGo: go121, Text: "slices.MaxFunc()"}, + "BinarySearch": {MinGo: go121, Text: "slices.BinarySearch()"}, + "BinarySearchFunc": {MinGo: go121, Text: "slices.BinarySearchFunc()"}, + }, + } + + return &analysis.Analyzer{ + Name: "exptostd", + Doc: "Detects functions from golang.org/x/exp/ that can be replaced by std functions.", + Run: l.run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } +} + +func (a *analyzer) run(pass *analysis.Pass) (any, error) { + insp, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if !ok { + return nil, nil + } + + a.goVersion = getGoVersion(pass) + + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + (*ast.ImportSpec)(nil), + } + + imports := map[string]*ast.ImportSpec{} + + var shouldKeepExpMaps bool + + var resultExpSlices Result + + insp.Preorder(nodeFilter, func(n ast.Node) { + if importSpec, ok := n.(*ast.ImportSpec); ok { + // skip aliases + if importSpec.Name == nil || importSpec.Name.Name == "" { + imports[trimImportPath(importSpec)] = importSpec + } + + return + } + + callExpr, ok := n.(*ast.CallExpr) + if !ok { + return + } + + selExpr, ok := callExpr.Fun.(*ast.SelectorExpr) + if !ok { + return + } + + ident, ok := selExpr.X.(*ast.Ident) + if !ok { + return + } + + switch ident.Name { + case "maps": + diagnostic, usage := a.detectPackageUsage(pass, a.mapsPkgReplacements, selExpr, ident, callExpr, "golang.org/x/exp/maps") + if usage { + pass.Report(diagnostic) + } + + shouldKeepExpMaps = shouldKeepExpMaps || !usage + + case "slices": + diagnostic, usage := a.detectPackageUsage(pass, a.slicesPkgReplacements, selExpr, ident, callExpr, "golang.org/x/exp/slices") + + if usage { + resultExpSlices.Diagnostics = append(resultExpSlices.Diagnostics, diagnostic) + } + + resultExpSlices.shouldKeepImport = resultExpSlices.shouldKeepImport || !usage + } + }) + + a.suggestReplaceImport(pass, imports, shouldKeepExpMaps, "golang.org/x/exp/maps") + + if resultExpSlices.shouldKeepImport { + for _, diagnostic := range resultExpSlices.Diagnostics { + pass.Report(diagnostic) + } + } else { + a.suggestReplaceImport(pass, imports, resultExpSlices.shouldKeepImport, "golang.org/x/exp/slices") + } + + return nil, nil +} + +func (a *analyzer) detectPackageUsage(pass *analysis.Pass, + replacements map[string]stdReplacement, + selExpr *ast.SelectorExpr, ident *ast.Ident, callExpr *ast.CallExpr, + importPath string, +) (analysis.Diagnostic, bool) { + rp, ok := replacements[selExpr.Sel.Name] + if !ok { + return analysis.Diagnostic{}, false + } + + if !a.skipGoVersionDetection && rp.MinGo > a.goVersion { + return analysis.Diagnostic{}, false + } + + obj := pass.TypesInfo.Uses[ident] + if obj == nil { + return analysis.Diagnostic{}, false + } + + pkg, ok := obj.(*types.PkgName) + if !ok { + return analysis.Diagnostic{}, false + } + + if pkg.Imported().Path() != importPath { + return analysis.Diagnostic{}, false + } + + diagnostic := analysis.Diagnostic{ + Pos: callExpr.Pos(), + Message: fmt.Sprintf("%s.%s() can be replaced by %s", importPath, selExpr.Sel.Name, rp.Text), + } + + if rp.Suggested != nil { + fix, err := rp.Suggested(callExpr) + if err != nil { + diagnostic.Message = fmt.Sprintf("Suggested fix error: %v", err) + } else { + diagnostic.SuggestedFixes = append(diagnostic.SuggestedFixes, fix) + } + } + + return diagnostic, true +} + +func (a *analyzer) suggestReplaceImport(pass *analysis.Pass, imports map[string]*ast.ImportSpec, shouldKeep bool, importPath string) { + imp, ok := imports[importPath] + if !ok || shouldKeep { + return + } + + src := trimImportPath(imp) + + index := strings.LastIndex(src, "/") + + pass.Report(analysis.Diagnostic{ + Pos: imp.Pos(), + End: imp.End(), + Message: fmt.Sprintf("Import statement '%s' can be replaced by '%s'", src, src[index+1:]), + SuggestedFixes: []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: imp.Path.Pos(), + End: imp.Path.End(), + NewText: []byte(string(imp.Path.Value[0]) + src[index+1:] + string(imp.Path.Value[0])), + }}, + }}, + }) +} + +func suggestedFixForClear(callExpr *ast.CallExpr) (analysis.SuggestedFix, error) { + s := &ast.CallExpr{ + Fun: ast.NewIdent("clear"), + Args: callExpr.Args, + Ellipsis: callExpr.Ellipsis, + } + + buf := bytes.NewBuffer(nil) + + err := printer.Fprint(buf, token.NewFileSet(), s) + if err != nil { + return analysis.SuggestedFix{}, fmt.Errorf("print suggested fix: %w", err) + } + + return analysis.SuggestedFix{ + TextEdits: []analysis.TextEdit{{ + Pos: callExpr.Pos(), + End: callExpr.End(), + NewText: buf.Bytes(), + }}, + }, nil +} + +func suggestedFixForKeysOrValues(callExpr *ast.CallExpr) (analysis.SuggestedFix, error) { + s := &ast.CallExpr{ + Fun: &ast.SelectorExpr{ + X: &ast.Ident{Name: "slices"}, + Sel: &ast.Ident{Name: "Collect"}, + }, + Args: []ast.Expr{callExpr}, + } + + buf := bytes.NewBuffer(nil) + + err := printer.Fprint(buf, token.NewFileSet(), s) + if err != nil { + return analysis.SuggestedFix{}, fmt.Errorf("print suggested fix: %w", err) + } + + return analysis.SuggestedFix{ + TextEdits: []analysis.TextEdit{{ + Pos: callExpr.Pos(), + End: callExpr.End(), + NewText: buf.Bytes(), + }}, + }, nil +} + +func getGoVersion(pass *analysis.Pass) int { + // Prior to go1.22, versions.FileVersion returns only the toolchain version, + // which is of no use to us, + // so disable this analyzer on earlier versions. + if !slices.Contains(build.Default.ReleaseTags, "go1.22") { + return 0 // false + } + + pkgVersion := pass.Pkg.GoVersion() + if pkgVersion == "" { + // Empty means Go devel. + return goDevel // true + } + + raw := strings.TrimPrefix(pkgVersion, "go") + + // prerelease version (go1.24rc1) + idx := strings.IndexFunc(raw, func(r rune) bool { + return (r < '0' || r > '9') && r != '.' + }) + + if idx != -1 { + raw = raw[:idx] + } + + vParts := strings.Split(raw, ".") + + v, err := strconv.Atoi(strings.Join(vParts[:2], "")) + if err != nil { + v = 116 + } + + return v +} + +func trimImportPath(spec *ast.ImportSpec) string { + return spec.Path.Value[1 : len(spec.Path.Value)-1] +} diff --git a/hack/tools/vendor/github.com/ldez/exptostd/readme.md b/hack/tools/vendor/github.com/ldez/exptostd/readme.md new file mode 100644 index 0000000000..663120dadd --- /dev/null +++ b/hack/tools/vendor/github.com/ldez/exptostd/readme.md @@ -0,0 +1,112 @@ +# ExpToStd + +Detects functions from golang.org/x/exp/ that can be replaced by std functions. + +[![Sponsor](https://img.shields.io/badge/Sponsor%20me-%E2%9D%A4%EF%B8%8F-pink)](https://github.com/sponsors/ldez) + +Actual detections: + +- `golang.org/x/exp/maps`: + - `Keys` + - `Values` + - `Equal` + - `EqualFunc` + - `Clone` + - `Copy` + - `DeleteFunc` + - `Clear` + +- `golang.org/x/exp/slices`: + - `Equal` + - `EqualFunc` + - `Compare` + - `CompareFunc` + - `Index` + - `IndexFunc` + - `Contains` + - `ContainsFunc` + - `Insert` + - `Delete` + - `DeleteFunc` + - `Replace` + - `Clone` + - `Compact` + - `CompactFunc` + - `Grow` + - `Clip` + - `Reverse` + - `Sort` + - `SortFunc` + - `SortStableFunc` + - `IsSorted` + - `IsSortedFunc` + - `Min` + - `MinFunc` + - `Max` + - `MaxFunc` + - `BinarySearch` + - `BinarySearchFunc` + +## Usages + +### Inside golangci-lint + +Recommended. + +```yaml +linters: + enable: + - exptostd +``` + +### As a CLI + +```bash +go install github.com/ldez/exptostd/cmd/exptostd@latest +``` + +```bash +./exptostd ./... +``` + +## Examples + +```go +package foo + +import ( + "fmt" + + "golang.org/x/exp/maps" +) + +func foo(m map[string]string) { + clone := maps.Clone(m) + + fmt.Println(clone) +} +``` + +It can be replaced by: + +```go +package foo + +import ( + "fmt" + "maps" +) + +func foo(m map[string]string) { + clone := maps.Clone(m) + + fmt.Println(clone) +} + +``` + +## References + +- https://tip.golang.org/doc/go1.21#maps +- https://tip.golang.org/doc/go1.21#slices +- https://tip.golang.org/doc/go1.23#iterators diff --git a/hack/tools/vendor/github.com/ldez/gomoddirectives/.golangci.yml b/hack/tools/vendor/github.com/ldez/gomoddirectives/.golangci.yml index 034745570a..7f25666569 100644 --- a/hack/tools/vendor/github.com/ldez/gomoddirectives/.golangci.yml +++ b/hack/tools/vendor/github.com/ldez/gomoddirectives/.golangci.yml @@ -1,9 +1,31 @@ -run: - timeout: 2m +linters: + enable-all: true + disable: + - exportloopref # deprecated + - sqlclosecheck # not relevant (SQL) + - rowserrcheck # not relevant (SQL) + - cyclop # duplicate of gocyclo + - lll + - dupl + - prealloc + - bodyclose + - wsl + - nlreturn + - mnd + - testpackage + - paralleltest + - tparallel + - err113 + - wrapcheck + - exhaustive + - exhaustruct + - varnamelen linters-settings: govet: enable-all: true + disable: + - fieldalignment gocyclo: min-complexity: 12 goconst: @@ -49,39 +71,6 @@ linters-settings: rules: json: pascal -linters: - enable-all: true - disable: - - deadcode # deprecated - - exhaustivestruct # deprecated - - golint # deprecated - - ifshort # deprecated - - interfacer # deprecated - - maligned # deprecated - - nosnakecase # deprecated - - scopelint # deprecated - - structcheck # deprecated - - varcheck # deprecated - - sqlclosecheck # not relevant (SQL) - - rowserrcheck # not relevant (SQL) - - execinquery # not relevant (SQL) - - cyclop # duplicate of gocyclo - - lll - - dupl - - prealloc - - bodyclose - - wsl - - nlreturn - - gomnd - - testpackage - - paralleltest - - tparallel - - goerr113 - - wrapcheck - - exhaustive - - exhaustruct - - varnamelen - issues: exclude-use-default: false max-issues-per-linter: 0 @@ -92,11 +81,20 @@ issues: exclude-rules: - path: "(.+)_test.go" linters: - - funlen - - goconst + - funlen + - goconst + - maintidx - path: cmd/gomoddirectives/gomoddirectives.go + linters: + - forbidigo text: 'use of `fmt.Println` forbidden' output: show-stats: true sort-results: true + sort-order: + - linter + - file + +run: + timeout: 2m diff --git a/hack/tools/vendor/github.com/ldez/gomoddirectives/LICENSE b/hack/tools/vendor/github.com/ldez/gomoddirectives/LICENSE index caed523b49..c1bf0c3288 100644 --- a/hack/tools/vendor/github.com/ldez/gomoddirectives/LICENSE +++ b/hack/tools/vendor/github.com/ldez/gomoddirectives/LICENSE @@ -175,7 +175,7 @@ END OF TERMS AND CONDITIONS - Copyright 2021 Fernandez Ludovic + Copyright 2024 Fernandez Ludovic Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/hack/tools/vendor/github.com/ldez/gomoddirectives/gomoddirectives.go b/hack/tools/vendor/github.com/ldez/gomoddirectives/gomoddirectives.go index 2a4c904746..22d01d627e 100644 --- a/hack/tools/vendor/github.com/ldez/gomoddirectives/gomoddirectives.go +++ b/hack/tools/vendor/github.com/ldez/gomoddirectives/gomoddirectives.go @@ -1,17 +1,25 @@ -// Package gomoddirectives a linter that handle `replace`, `retract`, `exclude` directives into `go.mod`. +// Package gomoddirectives a linter that handle directives into `go.mod`. package gomoddirectives import ( "fmt" "go/token" + "regexp" "strings" + "github.com/ldez/grignotin/gomod" "golang.org/x/mod/modfile" + "golang.org/x/tools/go/analysis" ) const ( reasonRetract = "a comment is mandatory to explain why the version has been retracted" reasonExclude = "exclude directive is not allowed" + reasonToolchain = "toolchain directive is not allowed" + reasonToolchainPattern = "toolchain directive (%s) doesn't match the pattern '%s'" + reasonTool = "tool directive is not allowed" + reasonGoDebug = "godebug directive is not allowed" + reasonGoVersion = "go directive (%s) doesn't match the pattern '%s'" reasonReplaceLocal = "local replacement are not allowed" reasonReplace = "replacement are not allowed" reasonReplaceIdentical = "the original module and the replacement are identical" @@ -44,6 +52,36 @@ type Options struct { ReplaceAllowLocal bool ExcludeForbidden bool RetractAllowNoExplanation bool + ToolchainForbidden bool + ToolchainPattern *regexp.Regexp + ToolForbidden bool + GoDebugForbidden bool + GoVersionPattern *regexp.Regexp +} + +// AnalyzePass analyzes a pass. +func AnalyzePass(pass *analysis.Pass, opts Options) ([]Result, error) { + info, err := gomod.GetModuleInfo() + if err != nil { + return nil, fmt.Errorf("get information about modules: %w", err) + } + + goMod := info[0].GoMod + if pass.Module != nil && pass.Module.Path != "" { + for _, m := range info { + if m.Path == pass.Module.Path { + goMod = m.GoMod + break + } + } + } + + f, err := parseGoMod(goMod) + if err != nil { + return nil, fmt.Errorf("parse %s: %w", goMod, err) + } + + return AnalyzeFile(f, opts), nil } // Analyze analyzes a project. @@ -58,58 +96,135 @@ func Analyze(opts Options) ([]Result, error) { // AnalyzeFile analyzes a mod file. func AnalyzeFile(file *modfile.File, opts Options) []Result { + checks := []func(file *modfile.File, opts Options) []Result{ + checkRetractDirectives, + checkExcludeDirectives, + checkToolDirectives, + checkReplaceDirectives, + checkToolchainDirective, + checkGoDebugDirectives, + checkGoVersionDirectives, + } + var results []Result + for _, check := range checks { + results = append(results, check(file, opts)...) + } - if !opts.RetractAllowNoExplanation { - for _, r := range file.Retract { - if r.Rationale != "" { - continue - } + return results +} - results = append(results, NewResult(file, r.Syntax, reasonRetract)) - } +func checkGoVersionDirectives(file *modfile.File, opts Options) []Result { + if file == nil || file.Go == nil || opts.GoVersionPattern == nil || opts.GoVersionPattern.MatchString(file.Go.Version) { + return nil } - if opts.ExcludeForbidden { - for _, e := range file.Exclude { - results = append(results, NewResult(file, e.Syntax, reasonExclude)) + return []Result{NewResult(file, file.Go.Syntax, fmt.Sprintf(reasonGoVersion, file.Go.Version, opts.GoVersionPattern.String()))} +} + +func checkToolchainDirective(file *modfile.File, opts Options) []Result { + if file.Toolchain == nil { + return nil + } + + if opts.ToolchainForbidden { + return []Result{NewResult(file, file.Toolchain.Syntax, reasonToolchain)} + } + + if opts.ToolchainPattern == nil { + return nil + } + + if !opts.ToolchainPattern.MatchString(file.Toolchain.Name) { + return []Result{NewResult(file, file.Toolchain.Syntax, fmt.Sprintf(reasonToolchainPattern, file.Toolchain.Name, opts.ToolchainPattern.String()))} + } + + return nil +} + +func checkRetractDirectives(file *modfile.File, opts Options) []Result { + if opts.RetractAllowNoExplanation { + return nil + } + + var results []Result + + for _, retract := range file.Retract { + if retract.Rationale != "" { + continue } + + results = append(results, NewResult(file, retract.Syntax, reasonRetract)) + } + + return results +} + +func checkExcludeDirectives(file *modfile.File, opts Options) []Result { + if !opts.ExcludeForbidden { + return nil + } + + var results []Result + + for _, exclude := range file.Exclude { + results = append(results, NewResult(file, exclude.Syntax, reasonExclude)) } + return results +} + +func checkToolDirectives(file *modfile.File, opts Options) []Result { + if !opts.ToolForbidden { + return nil + } + + var results []Result + + for _, tool := range file.Tool { + results = append(results, NewResult(file, tool.Syntax, reasonTool)) + } + + return results +} + +func checkReplaceDirectives(file *modfile.File, opts Options) []Result { + var results []Result + uniqReplace := map[string]struct{}{} - for _, r := range file.Replace { - reason := check(opts, r) + for _, replace := range file.Replace { + reason := checkReplaceDirective(opts, replace) if reason != "" { - results = append(results, NewResult(file, r.Syntax, reason)) + results = append(results, NewResult(file, replace.Syntax, reason)) continue } - if r.Old.Path == r.New.Path && r.Old.Version == r.New.Version { - results = append(results, NewResult(file, r.Syntax, reasonReplaceIdentical)) + if replace.Old.Path == replace.New.Path && replace.Old.Version == replace.New.Version { + results = append(results, NewResult(file, replace.Syntax, reasonReplaceIdentical)) continue } - if _, ok := uniqReplace[r.Old.Path+r.Old.Version]; ok { - results = append(results, NewResult(file, r.Syntax, reasonReplaceDuplicate)) + if _, ok := uniqReplace[replace.Old.Path+replace.Old.Version]; ok { + results = append(results, NewResult(file, replace.Syntax, reasonReplaceDuplicate)) } - uniqReplace[r.Old.Path+r.Old.Version] = struct{}{} + uniqReplace[replace.Old.Path+replace.Old.Version] = struct{}{} } return results } -func check(o Options, r *modfile.Replace) string { +func checkReplaceDirective(opts Options, r *modfile.Replace) string { if isLocal(r) { - if o.ReplaceAllowLocal { + if opts.ReplaceAllowLocal { return "" } return fmt.Sprintf("%s: %s", reasonReplaceLocal, r.Old.Path) } - for _, v := range o.ReplaceAllowList { + for _, v := range opts.ReplaceAllowList { if r.Old.Path == v { return "" } @@ -118,6 +233,20 @@ func check(o Options, r *modfile.Replace) string { return fmt.Sprintf("%s: %s", reasonReplace, r.Old.Path) } +func checkGoDebugDirectives(file *modfile.File, opts Options) []Result { + if !opts.GoDebugForbidden { + return nil + } + + var results []Result + + for _, goDebug := range file.Godebug { + results = append(results, NewResult(file, goDebug.Syntax, reasonGoDebug)) + } + + return results +} + // Filesystem paths found in "replace" directives are represented by a path with an empty version. // https://github.com/golang/mod/blob/bc388b264a244501debfb9caea700c6dcaff10e2/module/module.go#L122-L124 func isLocal(r *modfile.Replace) bool { diff --git a/hack/tools/vendor/github.com/ldez/gomoddirectives/module.go b/hack/tools/vendor/github.com/ldez/gomoddirectives/module.go index 4cb3653794..53cf1f59e1 100644 --- a/hack/tools/vendor/github.com/ldez/gomoddirectives/module.go +++ b/hack/tools/vendor/github.com/ldez/gomoddirectives/module.go @@ -1,45 +1,32 @@ package gomoddirectives import ( - "bytes" - "encoding/json" "errors" "fmt" "os" - "os/exec" + "path/filepath" + "github.com/ldez/grignotin/gomod" "golang.org/x/mod/modfile" ) -type modInfo struct { - Path string `json:"Path"` - Dir string `json:"Dir"` - GoMod string `json:"GoMod"` - GoVersion string `json:"GoVersion"` - Main bool `json:"Main"` -} - // GetModuleFile gets module file. +// It's better to use [GetGoModFile] instead of this function. func GetModuleFile() (*modfile.File, error) { - // https://github.com/golang/go/issues/44753#issuecomment-790089020 - cmd := exec.Command("go", "list", "-m", "-json") - - raw, err := cmd.Output() - if err != nil { - return nil, fmt.Errorf("command go list: %w: %s", err, string(raw)) - } - - var v modInfo - err = json.NewDecoder(bytes.NewBuffer(raw)).Decode(&v) + info, err := gomod.GetModuleInfo() if err != nil { - return nil, fmt.Errorf("unmarshaling error: %w: %s", err, string(raw)) + return nil, err } - if v.GoMod == "" { + if info[0].GoMod == "" { return nil, errors.New("working directory is not part of a module") } - raw, err = os.ReadFile(v.GoMod) + return parseGoMod(info[0].GoMod) +} + +func parseGoMod(goMod string) (*modfile.File, error) { + raw, err := os.ReadFile(filepath.Clean(goMod)) if err != nil { return nil, fmt.Errorf("reading go.mod file: %w", err) } diff --git a/hack/tools/vendor/github.com/ldez/gomoddirectives/readme.md b/hack/tools/vendor/github.com/ldez/gomoddirectives/readme.md index 510c8502e2..04738bd81c 100644 --- a/hack/tools/vendor/github.com/ldez/gomoddirectives/readme.md +++ b/hack/tools/vendor/github.com/ldez/gomoddirectives/readme.md @@ -1,16 +1,192 @@ # gomoddirectives +A linter that handle directives into `go.mod`. + [![Sponsor](https://img.shields.io/badge/Sponsor%20me-%E2%9D%A4%EF%B8%8F-pink)](https://github.com/sponsors/ldez) [![Build Status](https://github.com/ldez/gomoddirectives/workflows/Main/badge.svg?branch=master)](https://github.com/ldez/gomoddirectives/actions) -A linter that handle [`replace`](https://golang.org/ref/mod#go-mod-file-replace), [`retract`](https://golang.org/ref/mod#go-mod-file-retract), [`exclude`](https://golang.org/ref/mod#go-mod-file-exclude) directives into `go.mod`. +## Usage + +### Inside golangci-lint + +Recommended. + +```yml +linters-settings: + gomoddirectives: + # Allow local `replace` directives. + # Default: false + replace-local: true + + # List of allowed `replace` directives. + # Default: [] + replace-allow-list: + - launchpad.net/gocheck + # Allow to not explain why the version has been retracted in the `retract` directives. + # Default: false + retract-allow-no-explanation: true + + # Forbid the use of the `exclude` directives. + # Default: false + exclude-forbidden: true + + # Forbid the use of the `toolchain` directive. + # Default: false + toolchain-forbidden: true + + # Defines a pattern to validate `toolchain` directive. + # Default: '' (no match) + toolchain-pattern: 'go1\.22\.\d+$' + + # Forbid the use of the `tool` directives. + # Default: false + tool-forbidden: true + + # Forbid the use of the `godebug` directive. + # Default: false + go-debug-forbidden: true + + # Defines a pattern to validate `go` minimum version directive. + # Default: '' (no match) + go-version-pattern: '1\.\d+(\.0)?$' +``` + +### As a CLI + +``` +gomoddirectives [flags] + +Flags: + -exclude + Forbid the use of exclude directives + -godebug + Forbid the use of godebug directives + -goversion string + Pattern to validate go min version directive + -h Show this help. + -list value + List of allowed replace directives + -local + Allow local replace directives + -retract-no-explanation + Allow to use retract directives without explanation + -tool + Forbid the use of tool directives + -toolchain + Forbid the use of toolchain directive + -toolchain-pattern string + Pattern to validate toolchain directive +``` + +## Details + +### [`retract`](https://golang.org/ref/mod#go-mod-file-retract) directives + +- Force explanation for `retract` directives. + +```go +module example.com/foo + +go 1.22 + +require ( + github.com/ldez/grignotin v0.4.1 +) + +retract ( + v1.0.0 // Explanation +) +``` + +### [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives + +- Ban all `replace` directives. +- Allow only local `replace` directives. +- Allow only some `replace` directives. +- Detect duplicated `replace` directives. +- Detect identical `replace` directives. + +```go +module example.com/foo + +go 1.22 + +require ( + github.com/ldez/grignotin v0.4.1 +) + +replace github.com/ldez/grignotin => ../grignotin/ +``` + +### [`exclude`](https://golang.org/ref/mod#go-mod-file-exclude) directives + +- Ban all `exclude` directives. + +```go +module example.com/foo + +go 1.22 + +require ( + github.com/ldez/grignotin v0.4.1 +) + +exclude ( + golang.org/x/crypto v1.4.5 + golang.org/x/text v1.6.7 +) +``` + +### [`tool`](https://golang.org/ref/mod#go-mod-file-tool) directives + +- Ban all `tool` directives. + +```go +module example.com/foo + +go 1.24 + +tool ( + example.com/module/cmd/a + example.com/module/cmd/b +) +``` + +### [`toolchain`](https://golang.org/ref/mod#go-mod-file-toolchain) directive + +- Ban `toolchain` directive. +- Use a regular expression to constraint the Go minimum version. + +```go +module example.com/foo + +go 1.22 + +toolchain go1.23.3 +``` + +### [`godebug`](https://go.dev/ref/mod#go-mod-file-godebug) directives + +- Ban `godebug` directive. + +```go +module example.com/foo + +go 1.22 + +godebug default=go1.21 +godebug ( + panicnil=1 + asynctimerchan=0 +) +``` + +### [`go`](https://go.dev/ref/mod#go-mod-file-go) directive + +- Use a regular expression to constraint the Go minimum version. -Features: +```go +module example.com/foo -- ban all [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives -- allow only local [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives -- allow only some [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives -- force explanation for [`retract`](https://golang.org/ref/mod#go-mod-file-retract) directives -- ban all [`exclude`](https://golang.org/ref/mod#go-mod-file-exclude) directives -- detect duplicated [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives -- detect identical [`replace`](https://golang.org/ref/mod#go-mod-file-replace) directives +go 1.22.0 +``` diff --git a/hack/tools/vendor/github.com/ldez/grignotin/gomod/gomod.go b/hack/tools/vendor/github.com/ldez/grignotin/gomod/gomod.go new file mode 100644 index 0000000000..ad0090cc30 --- /dev/null +++ b/hack/tools/vendor/github.com/ldez/grignotin/gomod/gomod.go @@ -0,0 +1,99 @@ +// Package gomod A function to get information about module (go list). +package gomod + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "golang.org/x/mod/modfile" +) + +// ModInfo Module information. +// +//nolint:tagliatelle // temporary: the next version of golangci-lint will allow configuration by package. +type ModInfo struct { + Path string `json:"Path"` + Dir string `json:"Dir"` + GoMod string `json:"GoMod"` + GoVersion string `json:"GoVersion"` + Main bool `json:"Main"` +} + +// GetModuleInfo gets modules information from `go list`. +func GetModuleInfo() ([]ModInfo, error) { + // https://github.com/golang/go/issues/44753#issuecomment-790089020 + cmd := exec.Command("go", "list", "-m", "-json") + + out, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("command %q: %w: %s", strings.Join(cmd.Args, " "), err, string(out)) + } + + var infos []ModInfo + + for dec := json.NewDecoder(bytes.NewBuffer(out)); dec.More(); { + var v ModInfo + if err := dec.Decode(&v); err != nil { + return nil, fmt.Errorf("unmarshaling error: %w: %s", err, string(out)) + } + + if v.GoMod == "" { + return nil, errors.New("working directory is not part of a module") + } + + if !v.Main || v.Dir == "" { + continue + } + + infos = append(infos, v) + } + + if len(infos) == 0 { + return nil, errors.New("go.mod file not found") + } + + return infos, nil +} + +type goEnv struct { + GOMOD string `json:"GOMOD"` //nolint:tagliatelle // Based on en var name. +} + +// GetGoModPath extracts go.mod path from "go env". +func GetGoModPath() (string, error) { + cmd := exec.Command("go", "env", "-json", "GOMOD") + + out, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("command %q: %w: %s", strings.Join(cmd.Args, " "), err, string(out)) + } + + v := &goEnv{} + err = json.NewDecoder(bytes.NewBuffer(out)).Decode(v) + if err != nil { + return "", err + } + + return v.GOMOD, nil +} + +// GetModulePath extracts module path from go.mod. +func GetModulePath() (string, error) { + p, err := GetGoModPath() + if err != nil { + return "", err + } + + b, err := os.ReadFile(filepath.Clean(p)) + if err != nil { + return "", fmt.Errorf("reading go.mod: %w", err) + } + + return modfile.ModulePath(b), nil +} diff --git a/hack/tools/vendor/github.com/ldez/tagliatelle/.golangci.yml b/hack/tools/vendor/github.com/ldez/tagliatelle/.golangci.yml index ec5c5c7661..01c76dca99 100644 --- a/hack/tools/vendor/github.com/ldez/tagliatelle/.golangci.yml +++ b/hack/tools/vendor/github.com/ldez/tagliatelle/.golangci.yml @@ -1,7 +1,28 @@ -run: - timeout: 5m - skip-files: [ ] - skip-dirs: [ ] +linters: + enable-all: true + disable: + - exportloopref # deprecated + - sqlclosecheck # not relevant (SQL) + - rowserrcheck # not relevant (SQL) + - cyclop # duplicate of gocyclo + - lll + - dupl + - wsl + - nlreturn + - mnd + - err113 + - wrapcheck + - exhaustive + - exhaustruct + - testpackage + - tparallel + - paralleltest + - prealloc + - forcetypeassert + - varnamelen + - nilnil + - errchkjson + - nonamedreturns linters-settings: govet: @@ -9,7 +30,7 @@ linters-settings: disable: - fieldalignment gocyclo: - min-complexity: 15 + min-complexity: 20 goconst: min-len: 5 min-occurrences: 3 @@ -24,11 +45,13 @@ linters-settings: gofumpt: extra-rules: true depguard: - list-type: denylist - include-go-root: false - packages: - - github.com/sirupsen/logrus - - github.com/pkg/errors + rules: + main: + deny: + - pkg: "github.com/instana/testify" + desc: not allowed + - pkg: "github.com/pkg/errors" + desc: Should be replaced by standard lib errors package gocritic: enabled-tags: - diagnostic @@ -43,46 +66,12 @@ linters-settings: hugeParam: sizeThreshold: 100 -linters: - enable-all: true - disable: - - deadcode # deprecated - - exhaustivestruct # deprecated - - golint # deprecated - - ifshort # deprecated - - interfacer # deprecated - - maligned # deprecated - - nosnakecase # deprecated - - scopelint # deprecated - - structcheck # deprecated - - varcheck # deprecated - - sqlclosecheck # not relevant (SQL) - - rowserrcheck # not relevant (SQL) - - execinquery # not relevant (SQL) - - cyclop # duplicate of gocyclo - - lll - - dupl - - wsl - - nlreturn - - gomnd - - goerr113 - - wrapcheck - - exhaustive - - exhaustruct - - testpackage - - tparallel - - paralleltest - - prealloc - - ifshort - - forcetypeassert - - varnamelen - - nilnil - - errchkjson - - nonamedreturns - issues: exclude-use-default: false - max-per-linter: 0 + max-issues-per-linter: 0 max-same-issues: 0 exclude: - 'package-comments: should have a package comment' + +run: + timeout: 5m diff --git a/hack/tools/vendor/github.com/ldez/tagliatelle/converter.go b/hack/tools/vendor/github.com/ldez/tagliatelle/converter.go new file mode 100644 index 0000000000..6005f5b755 --- /dev/null +++ b/hack/tools/vendor/github.com/ldez/tagliatelle/converter.go @@ -0,0 +1,116 @@ +package tagliatelle + +import ( + "fmt" + "strings" + + "github.com/ettle/strcase" +) + +// https://github.com/dominikh/go-tools/blob/v0.5.1/config/config.go#L167-L175 +// +//nolint:gochecknoglobals // For now I'll accept this, but I think will refactor to use a structure. +var staticcheckInitialisms = map[string]bool{ + "AMQP": true, + "DB": true, + "GID": true, + "LHS": false, + "RHS": false, + "RTP": true, + "SIP": true, + "TS": true, +} + +// Converter is the signature of a case converter. +type Converter func(s string) string + +// ConverterCallback allows to abstract `getSimpleConverter` and `ruleToConverter`. +type ConverterCallback func() (Converter, error) + +func getSimpleConverter(c string) (Converter, error) { + switch c { + case "camel": + return strcase.ToCamel, nil + case "pascal": + return strcase.ToPascal, nil + case "kebab": + return strcase.ToKebab, nil + case "snake": + return strcase.ToSnake, nil + case "goCamel": + return strcase.ToGoCamel, nil + case "goPascal": + return strcase.ToGoPascal, nil + case "goKebab": + return strcase.ToGoKebab, nil + case "goSnake": + return strcase.ToGoSnake, nil + case "upperSnake": + return strcase.ToSNAKE, nil + case "header": + return toHeader, nil + case "upper": + return strings.ToUpper, nil + case "lower": + return strings.ToLower, nil + default: + return nil, fmt.Errorf("unsupported case: %s", c) + } +} + +func toHeader(s string) string { + return strcase.ToCase(s, strcase.TitleCase, '-') +} + +func ruleToConverter(rule ExtendedRule) (Converter, error) { + if rule.ExtraInitialisms { + for k, v := range staticcheckInitialisms { + if _, found := rule.InitialismOverrides[k]; found { + continue + } + + rule.InitialismOverrides[k] = v + } + } + + caser := strcase.NewCaser(strings.HasPrefix(rule.Case, "go"), rule.InitialismOverrides, nil) + + switch strings.ToLower(strings.TrimPrefix(rule.Case, "go")) { + case "camel": + return caser.ToCamel, nil + + case "pascal": + return caser.ToPascal, nil + + case "kebab": + return caser.ToKebab, nil + + case "snake": + return caser.ToSnake, nil + + case "uppersnake": + return caser.ToSNAKE, nil + + case "header": + return toHeaderCase(caser), nil + + case "upper": + return func(s string) string { + return caser.ToCase(s, strcase.UpperCase, 0) + }, nil + + case "lower": + return func(s string) string { + return caser.ToCase(s, strcase.LowerCase, 0) + }, nil + + default: + return nil, fmt.Errorf("unsupported case: %s", rule.Case) + } +} + +func toHeaderCase(caser *strcase.Caser) Converter { + return func(s string) string { + return caser.ToCase(s, strcase.TitleCase, '-') + } +} diff --git a/hack/tools/vendor/github.com/ldez/tagliatelle/readme.md b/hack/tools/vendor/github.com/ldez/tagliatelle/readme.md index 55a544db81..52d10304b1 100644 --- a/hack/tools/vendor/github.com/ldez/tagliatelle/readme.md +++ b/hack/tools/vendor/github.com/ldez/tagliatelle/readme.md @@ -97,15 +97,14 @@ type Foo struct { } ``` -## What this tool is about +## What this linter is about -This tool is about validating tags according to rules you define. -The tool also allows to fix tags according to the rules you defined. +This linter is about validating tags according to rules you define. +The linter also allows to fix tags according to the rules you defined. -This tool is not intended to validate the fact a tag in valid or not. -To do that, you can use `go vet`, or use [golangci-lint](https://golangci-lint.run) ["go vet"](https://golangci-lint.run/usage/linters/#govet) linter. +This linter is not intended to validate the fact a tag in valid or not. -## How to use the tool +## How to use the linter ### As a golangci-lint linter @@ -114,17 +113,149 @@ Define the rules, you want via your [golangci-lint](https://golangci-lint.run) c ```yaml linters-settings: tagliatelle: - # Check the struck tag name case. + # Checks the struct tag name case. case: - # Use the struct field name to check the name of the struct tag. + # Defines the association between tag name and case. + # Any struct tag name can be used. + # Supported string cases: + # - `camel` + # - `pascal` + # - `kebab` + # - `snake` + # - `upperSnake` + # - `goCamel` + # - `goPascal` + # - `goKebab` + # - `goSnake` + # - `upper` + # - `lower` + # - `header` + rules: + json: camel + yaml: camel + xml: camel + toml: camel + bson: camel + avro: snake + mapstructure: kebab + env: upperSnake + envconfig: upperSnake + whatever: snake + # Defines the association between tag name and case. + # Important: the `extended-rules` overrides `rules`. + # Default: empty + extended-rules: + json: + # Supported string cases: + # - `camel` + # - `pascal` + # - `kebab` + # - `snake` + # - `upperSnake` + # - `goCamel` + # - `goPascal` + # - `goKebab` + # - `goSnake` + # - `header` + # - `lower` + # - `header` + # + # Required + case: camel + # Adds 'AMQP', 'DB', 'GID', 'RTP', 'SIP', 'TS' to initialisms, + # and removes 'LHS', 'RHS' from initialisms. + # Default: false + extra-initialisms: true + # Defines initialism additions and overrides. + # Default: empty + initialism-overrides: + DB: true # add a new initialism + LHS: false # disable a default initialism. + # ... + # Uses the struct field name to check the name of the struct tag. # Default: false use-field-name: true + # The field names to ignore. + # Default: [] + ignored-fields: + - Bar + - Foo + # Overrides the default/root configuration. + # Default: [] + overrides: + - + # The package path (uses `/` only as a separator). + # Required + pkg: foo/bar + # Default: empty or the same as the default/root configuration. + rules: + json: snake + xml: pascal + # Default: empty or the same as the default/root configuration. + extended-rules: + # same options as the base `extended-rules`. + # Default: false (WARNING: it doesn't follow the default/root configuration) + use-field-name: true + # The field names to ignore. + # Default: [] or the same as the default/root configuration. + ignored-fields: + - Bar + - Foo + # Ignore the package (takes precedence over all other configurations). + # Default: false + ignore: true + +``` + +#### Examples + +Overrides case rules for the package `foo/bar`: + +```yaml +linters-settings: + tagliatelle: + case: rules: - # Any struct tag type can be used. - # Support string case: `camel`, `pascal`, `kebab`, `snake`, `upperSnake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower`, `header`. json: camel yaml: camel xml: camel + overrides: + - pkg: foo/bar + rules: + json: snake + xml: pascal +``` + +Ignore fields inside the package `foo/bar`: + +```yaml +linters-settings: + tagliatelle: + case: + rules: + json: camel + yaml: camel + xml: camel + overrides: + - pkg: foo/bar + ignored-fields: + - Bar + - Foo +``` + +Ignore the package `foo/bar`: + +```yaml +linters-settings: + tagliatelle: + case: + rules: + json: camel + yaml: camel + xml: camel + overrides: + - pkg: foo/bar + ignore: true ``` More information here https://golangci-lint.run/usage/linters/#tagliatelle @@ -149,13 +280,14 @@ Here are the default rules for the well known and used tags, when using tagliate - `bson`: `camel` - `avro`: `snake` - `header`: `header` +- `env`: `upperSnake` - `envconfig`: `upperSnake` ### Custom Rules -The tool is not limited to the tags used in example, you can use it to validate any tag. +The linter is not limited to the tags used in example, **you can use it to validate any tag**. -You can add your own tag, for example `whatever` and tells the tool you want to use `kebab`. +You can add your own tag, for example `whatever` and tells the linter you want to use `kebab`. This option is only available via [golangci-lint](https://golangci-lint.run). @@ -164,14 +296,15 @@ linters-settings: tagliatelle: # Check the struck tag name case. case: - # Use the struct field name to check the name of the struct tag. - # Default: false - use-field-name: true rules: # Any struct tag type can be used. # Support string case: `camel`, `pascal`, `kebab`, `snake`, `goCamel`, `goPascal`, `goKebab`, `goSnake`, `upper`, `lower` - json: camel - yaml: camel - xml: camel + json: camel + yaml: camel + xml: camel + toml: camel whatever: kebab + # Use the struct field name to check the name of the struct tag. + # Default: false + use-field-name: true ``` diff --git a/hack/tools/vendor/github.com/ldez/tagliatelle/tagliatelle.go b/hack/tools/vendor/github.com/ldez/tagliatelle/tagliatelle.go index 22c5feb3d8..99c7da2d04 100644 --- a/hack/tools/vendor/github.com/ldez/tagliatelle/tagliatelle.go +++ b/hack/tools/vendor/github.com/ldez/tagliatelle/tagliatelle.go @@ -6,10 +6,14 @@ import ( "errors" "fmt" "go/ast" + "maps" + "path" + "path/filepath" "reflect" + "slices" "strings" - "github.com/ettle/strcase" + iradix "github.com/hashicorp/go-immutable-radix/v2" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" "golang.org/x/tools/go/ast/inspector" @@ -17,8 +21,30 @@ import ( // Config the tagliatelle configuration. type Config struct { - Rules map[string]string - UseFieldName bool + Base + Overrides []Overrides +} + +// Overrides applies configuration overrides by package. +type Overrides struct { + Base + Package string +} + +// Base shared configuration between rules. +type Base struct { + Rules map[string]string + ExtendedRules map[string]ExtendedRule + UseFieldName bool + IgnoredFields []string + Ignore bool +} + +// ExtendedRule allows to customize rules. +type ExtendedRule struct { + Case string + ExtraInitialisms bool + InitialismOverrides map[string]bool } // New creates an analyzer. @@ -26,20 +52,18 @@ func New(config Config) *analysis.Analyzer { return &analysis.Analyzer{ Name: "tagliatelle", Doc: "Checks the struct tags.", - Run: func(pass *analysis.Pass) (interface{}, error) { - if len(config.Rules) == 0 { + Run: func(pass *analysis.Pass) (any, error) { + if len(config.Rules) == 0 && len(config.ExtendedRules) == 0 && len(config.Overrides) == 0 { return nil, nil } return run(pass, config) }, - Requires: []*analysis.Analyzer{ - inspect.Analyzer, - }, + Requires: []*analysis.Analyzer{inspect.Analyzer}, } } -func run(pass *analysis.Pass, config Config) (interface{}, error) { +func run(pass *analysis.Pass, config Config) (any, error) { isp, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) if !ok { return nil, errors.New("missing inspect analyser") @@ -49,6 +73,16 @@ func run(pass *analysis.Pass, config Config) (interface{}, error) { (*ast.StructType)(nil), } + cfg := config.Base + if pass.Module != nil { + radixTree := createRadixTree(config, pass.Module.Path) + _, cfg, _ = radixTree.Root().LongestPrefix([]byte(pass.Pkg.Path())) + } + + if cfg.Ignore { + return nil, nil + } + isp.Preorder(nodeFilter, func(n ast.Node) { node, ok := n.(*ast.StructType) if !ok { @@ -56,14 +90,14 @@ func run(pass *analysis.Pass, config Config) (interface{}, error) { } for _, field := range node.Fields.List { - analyze(pass, config, node, field) + analyze(pass, cfg, node, field) } }) return nil, nil } -func analyze(pass *analysis.Pass, config Config, n *ast.StructType, field *ast.Field) { +func analyze(pass *analysis.Pass, config Base, n *ast.StructType, field *ast.Field) { if n.Fields == nil || n.Fields.NumFields() < 1 { // skip empty structs return @@ -80,49 +114,74 @@ func analyze(pass *analysis.Pass, config Config, n *ast.StructType, field *ast.F return } + cleanRules(config) + + if slices.Contains(config.IgnoredFields, fieldName) { + return + } + + for key, extRule := range config.ExtendedRules { + report(pass, config, key, extRule.Case, fieldName, n, field, func() (Converter, error) { + return ruleToConverter(extRule) + }) + } + for key, convName := range config.Rules { - if convName == "" { - continue - } + report(pass, config, key, convName, fieldName, n, field, func() (Converter, error) { + return getSimpleConverter(convName) + }) + } +} - value, flags, ok := lookupTagValue(field.Tag, key) - if !ok { - // skip when no struct tag for the key - continue - } +func report(pass *analysis.Pass, config Base, key, convName, fieldName string, n *ast.StructType, field *ast.Field, fn ConverterCallback) { + if convName == "" { + return + } - if value == "-" { - // skip when skipped :) - continue - } + value, flags, ok := lookupTagValue(field.Tag, key) + if !ok { + // skip when no struct tag for the key + return + } - // TODO(ldez): need to be rethink. - // This is an exception because of a bug. - // https://github.com/ldez/tagliatelle/issues/8 - // For now, tagliatelle should try to remain neutral in terms of format. - if hasTagFlag(flags, "inline") { - // skip for inline children (no name to lint) - continue - } + if value == "-" { + // skip when skipped :) + return + } - if value == "" { - value = fieldName - } + // TODO(ldez): need to be rethink. + // tagliatelle should try to remain neutral in terms of format. + if key == "xml" && strings.ContainsAny(value, ">:") { + // ignore XML names than contains path + return + } - converter, err := getConverter(convName) - if err != nil { - pass.Reportf(n.Pos(), "%s(%s): %v", key, convName, err) - continue - } + // TODO(ldez): need to be rethink. + // This is an exception because of a bug. + // https://github.com/ldez/tagliatelle/issues/8 + // For now, tagliatelle should try to remain neutral in terms of format. + if hasTagFlag(flags, "inline") { + // skip for inline children (no name to lint) + return + } - expected := value - if config.UseFieldName { - expected = fieldName - } + if value == "" { + value = fieldName + } - if value != converter(expected) { - pass.Reportf(field.Tag.Pos(), "%s(%s): got '%s' want '%s'", key, convName, value, converter(expected)) - } + converter, err := fn() + if err != nil { + pass.Reportf(n.Pos(), "%s(%s): %v", key, convName, err) + return + } + + expected := value + if config.UseFieldName { + expected = fieldName + } + + if value != converter(expected) { + pass.Reportf(field.Tag.Pos(), "%s(%s): got '%s' want '%s'", key, convName, value, converter(expected)) } } @@ -182,37 +241,62 @@ func hasTagFlag(flags []string, query string) bool { return false } -func getConverter(c string) (func(s string) string, error) { - switch c { - case "camel": - return strcase.ToCamel, nil - case "pascal": - return strcase.ToPascal, nil - case "kebab": - return strcase.ToKebab, nil - case "snake": - return strcase.ToSnake, nil - case "goCamel": - return strcase.ToGoCamel, nil - case "goPascal": - return strcase.ToGoPascal, nil - case "goKebab": - return strcase.ToGoKebab, nil - case "goSnake": - return strcase.ToGoSnake, nil - case "header": - return toHeader, nil - case "upper": - return strings.ToUpper, nil - case "upperSnake": - return strcase.ToSNAKE, nil - case "lower": - return strings.ToLower, nil - default: - return nil, fmt.Errorf("unsupported case: %s", c) +func createRadixTree(config Config, modPath string) *iradix.Tree[Base] { + r := iradix.New[Base]() + + defaultRule := Base{ + Rules: maps.Clone(config.Rules), + ExtendedRules: maps.Clone(config.ExtendedRules), + UseFieldName: config.UseFieldName, + Ignore: config.Ignore, } + + defaultRule.IgnoredFields = append(defaultRule.IgnoredFields, config.IgnoredFields...) + + r, _, _ = r.Insert([]byte(""), defaultRule) + + for _, override := range config.Overrides { + c := Base{ + UseFieldName: override.UseFieldName, + Ignore: override.Ignore, + } + + // If there is an override the base configuration is ignored. + if len(override.IgnoredFields) == 0 { + c.IgnoredFields = append(c.IgnoredFields, config.IgnoredFields...) + } else { + c.IgnoredFields = append(c.IgnoredFields, override.IgnoredFields...) + } + + // Copy the rules from the base. + c.Rules = maps.Clone(config.Rules) + + // Overrides the rule from the base. + for k, v := range override.Rules { + c.Rules[k] = v + } + + // Copy the extended rules from the base. + c.ExtendedRules = maps.Clone(config.ExtendedRules) + + // Overrides the extended rule from the base. + for k, v := range override.ExtendedRules { + c.ExtendedRules[k] = v + } + + key := path.Join(modPath, override.Package) + if filepath.Base(modPath) == override.Package { + key = modPath + } + + r, _, _ = r.Insert([]byte(key), c) + } + + return r } -func toHeader(s string) string { - return strcase.ToCase(s, strcase.TitleCase, '-') +func cleanRules(config Base) { + for k := range config.ExtendedRules { + delete(config.Rules, k) + } } diff --git a/hack/tools/vendor/github.com/ldez/usetesting/.gitignore b/hack/tools/vendor/github.com/ldez/usetesting/.gitignore new file mode 100644 index 0000000000..0907a9069e --- /dev/null +++ b/hack/tools/vendor/github.com/ldez/usetesting/.gitignore @@ -0,0 +1,2 @@ +/usetesting +.idea diff --git a/hack/tools/vendor/github.com/ldez/usetesting/.golangci.yml b/hack/tools/vendor/github.com/ldez/usetesting/.golangci.yml new file mode 100644 index 0000000000..597647d242 --- /dev/null +++ b/hack/tools/vendor/github.com/ldez/usetesting/.golangci.yml @@ -0,0 +1,83 @@ +linters: + enable-all: true + disable: + - exportloopref # deprecated + - sqlclosecheck # not relevant (SQL) + - rowserrcheck # not relevant (SQL) + - cyclop # duplicate of gocyclo + - lll + - dupl + - nlreturn + - exhaustive + - exhaustruct + - testpackage + - tparallel + - paralleltest + - prealloc + - varnamelen + - nilnil + - errchkjson + - nonamedreturns + +linters-settings: + govet: + enable-all: true + disable: + - fieldalignment + mnd: + ignored-numbers: + - "124" + gocyclo: + min-complexity: 20 + goconst: + min-len: 5 + min-occurrences: 3 + misspell: + locale: US + funlen: + lines: -1 + statements: 40 + godox: + keywords: + - FIXME + gofumpt: + extra-rules: true + depguard: + rules: + main: + deny: + - pkg: "github.com/instana/testify" + desc: not allowed + - pkg: "github.com/pkg/errors" + desc: Should be replaced by standard lib errors package + wsl: + force-case-trailing-whitespace: 1 + allow-trailing-comment: true + gocritic: + enabled-tags: + - diagnostic + - style + - performance + disabled-checks: + - sloppyReassign + - rangeValCopy + - octalLiteral + - paramTypeCombine # already handle by gofumpt.extra-rules + settings: + hugeParam: + sizeThreshold: 100 + +issues: + exclude-use-default: false + max-issues-per-linter: 0 + max-same-issues: 0 + +output: + show-stats: true + sort-results: true + sort-order: + - linter + - file + +run: + timeout: 5m diff --git a/hack/tools/vendor/github.com/ldez/usetesting/LICENSE b/hack/tools/vendor/github.com/ldez/usetesting/LICENSE new file mode 100644 index 0000000000..c1bf0c3288 --- /dev/null +++ b/hack/tools/vendor/github.com/ldez/usetesting/LICENSE @@ -0,0 +1,190 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2024 Fernandez Ludovic + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hack/tools/vendor/github.com/ldez/usetesting/Makefile b/hack/tools/vendor/github.com/ldez/usetesting/Makefile new file mode 100644 index 0000000000..b8eca65980 --- /dev/null +++ b/hack/tools/vendor/github.com/ldez/usetesting/Makefile @@ -0,0 +1,15 @@ +.PHONY: clean check test build + +default: clean check test build + +clean: + rm -rf dist/ cover.out + +test: clean + go test -v -cover ./... + +check: + golangci-lint run + +build: + go build -ldflags "-s -w" -trimpath ./cmd/usetesting/ diff --git a/hack/tools/vendor/github.com/ldez/usetesting/readme.md b/hack/tools/vendor/github.com/ldez/usetesting/readme.md new file mode 100644 index 0000000000..e21ba06e63 --- /dev/null +++ b/hack/tools/vendor/github.com/ldez/usetesting/readme.md @@ -0,0 +1,209 @@ +# UseTesting + +Detects when some calls can be replaced by methods from the testing package. + +[![Sponsor](https://img.shields.io/badge/Sponsor%20me-%E2%9D%A4%EF%B8%8F-pink)](https://github.com/sponsors/ldez) + +## Usages + +### Inside golangci-lint + +Recommended. + +```yml +linters-settings: + usetesting: + # Enable/disable `os.CreateTemp("", ...)` detections. + # Default: true + os-create-temp: false + + # Enable/disable `os.MkdirTemp()` detections. + # Default: true + os-mkdir-temp: false + + # Enable/disable `os.Setenv()` detections. + # Default: false + os-setenv: true + + # Enable/disable `os.TempDir()` detections. + # Default: false + os-temp-dir: true + + # Enable/disable `os.Chdir()` detections. + # Disabled if Go < 1.24. + # Default: true + os-chdir: false + + # Enable/disable `context.Background()` detections. + # Disabled if Go < 1.24. + # Default: true + context-background: false + + # Enable/disable `context.TODO()` detections. + # Disabled if Go < 1.24. + # Default: true + context-todo: false +``` + +### As a CLI + +```shell +go install github.com/ldez/usetesting/cmd/usetesting@latest +``` + +``` +usetesting: Reports uses of functions with replacement inside the testing package. + +Usage: usetesting [-flag] [package] + +Flags: + -contextbackground + Enable/disable context.Background() detections (default true) + -contexttodo + Enable/disable context.TODO() detections (default true) + -oschdir + Enable/disable os.Chdir() detections (default true) + -osmkdirtemp + Enable/disable os.MkdirTemp() detections (default true) + -ossetenv + Enable/disable os.Setenv() detections (default false) + -ostempdir + Enable/disable os.TempDir() detections (default false) + -oscreatetemp + Enable/disable os.CreateTemp("", ...) detections (default true) +... +``` + +## Examples + +### `os.MkdirTemp` + +```go +func TestExample(t *testing.T) { + os.MkdirTemp("a", "b") + // ... +} +``` + +It can be replaced by: + +```go +func TestExample(t *testing.T) { + t.TempDir() + // ... +} +``` + +### `os.TempDir` + +```go +func TestExample(t *testing.T) { + os.TempDir() + // ... +} +``` + +It can be replaced by: + +```go +func TestExample(t *testing.T) { + t.TempDir() + // ... +} +``` + +### `os.CreateTemp` + +```go +func TestExample(t *testing.T) { + os.CreateTemp("", "x") + // ... +} +``` + +It can be replaced by: + +```go +func TestExample(t *testing.T) { + os.CreateTemp(t.TempDir(), "x") + // ... +} +``` + +### `os.Setenv` + +```go +func TestExample(t *testing.T) { + os.Setenv("A", "b") + // ... +} +``` + +It can be replaced by: + +```go +func TestExample(t *testing.T) { + t.Setenv("A", "b") + // ... +} +``` + +### `os.Chdir` (Go >= 1.24) + +```go +func TestExample(t *testing.T) { + os.Chdir("x") + // ... +} +``` + +It can be replaced by: + +```go +func TestExample(t *testing.T) { + t.Chdir("x") + // ... +} +``` + +### `context.Background` (Go >= 1.24) + +```go +func TestExample(t *testing.T) { + ctx := context.Background() + // ... +} +``` + +It can be replaced by: + +```go +func TestExample(t *testing.T) { + ctx := t.Context() + // ... +} +``` + +### `context.TODO` (Go >= 1.24) + +```go +func TestExample(t *testing.T) { + ctx := context.TODO() + // ... +} +``` + +It can be replaced by: + +```go +func TestExample(t *testing.T) { + ctx := t.Context() + // ... +} +``` + +## References + +- https://tip.golang.org/doc/go1.15#testingpkgtesting (`TempDir`) +- https://tip.golang.org/doc/go1.17#testingpkgtesting (`SetEnv`) +- https://tip.golang.org/doc/go1.24#testingpkgtesting (`Chdir`, `Context`) diff --git a/hack/tools/vendor/github.com/ldez/usetesting/report.go b/hack/tools/vendor/github.com/ldez/usetesting/report.go new file mode 100644 index 0000000000..3c90b6baec --- /dev/null +++ b/hack/tools/vendor/github.com/ldez/usetesting/report.go @@ -0,0 +1,200 @@ +package usetesting + +import ( + "bytes" + "fmt" + "go/ast" + "go/printer" + "go/token" + "slices" + "strings" + + "golang.org/x/tools/go/analysis" +) + +// because [os.CreateTemp] takes 2 args. +const nbArgCreateTemp = 2 + +func (a *analyzer) reportCallExpr(pass *analysis.Pass, ce *ast.CallExpr, fnInfo *FuncInfo) bool { + if !a.osCreateTemp { + return false + } + + if len(ce.Args) != nbArgCreateTemp { + return false + } + + switch fun := ce.Fun.(type) { + case *ast.SelectorExpr: + if fun.Sel == nil || fun.Sel.Name != createTempName { + return false + } + + expr, ok := fun.X.(*ast.Ident) + if !ok { + return false + } + + if expr.Name == osPkgName && isFirstArgEmptyString(ce) { + pass.Report(diagnosticOSCreateTemp(ce, fnInfo)) + + return true + } + + case *ast.Ident: + if fun.Name != createTempName { + return false + } + + pkgName := getPkgNameFromType(pass, fun) + + if pkgName == osPkgName && isFirstArgEmptyString(ce) { + pass.Report(diagnosticOSCreateTemp(ce, fnInfo)) + + return true + } + } + + return false +} + +func diagnosticOSCreateTemp(ce *ast.CallExpr, fnInfo *FuncInfo) analysis.Diagnostic { + diagnostic := analysis.Diagnostic{ + Pos: ce.Pos(), + Message: fmt.Sprintf( + `%s.%s("", ...) could be replaced by %[1]s.%[2]s(%s.%s(), ...) in %s`, + osPkgName, createTempName, fnInfo.ArgName, tempDirName, fnInfo.Name, + ), + } + + // Skip `` arg names. + if !strings.Contains(fnInfo.ArgName, "<") { + g := &ast.CallExpr{ + Fun: ce.Fun, + Args: []ast.Expr{ + &ast.CallExpr{ + Fun: &ast.SelectorExpr{ + X: &ast.Ident{Name: fnInfo.ArgName}, + Sel: &ast.Ident{Name: tempDirName}, + }, + }, + ce.Args[1], + }, + } + + buf := bytes.NewBuffer(nil) + + err := printer.Fprint(buf, token.NewFileSet(), g) + if err != nil { + diagnostic.Message = fmt.Sprintf("Suggested fix error: %v", err) + return diagnostic + } + + diagnostic.SuggestedFixes = append(diagnostic.SuggestedFixes, analysis.SuggestedFix{ + TextEdits: []analysis.TextEdit{{ + Pos: ce.Pos(), + End: ce.End(), + NewText: buf.Bytes(), + }}, + }) + } + + return diagnostic +} + +func (a *analyzer) reportSelector(pass *analysis.Pass, se *ast.SelectorExpr, fnInfo *FuncInfo) bool { + if se.Sel == nil || !se.Sel.IsExported() { + return false + } + + ident, ok := se.X.(*ast.Ident) + if !ok { + return false + } + + return a.report(pass, se, ident.Name, se.Sel.Name, fnInfo) +} + +func (a *analyzer) reportIdent(pass *analysis.Pass, ident *ast.Ident, fnInfo *FuncInfo) bool { + if !ident.IsExported() { + return false + } + + if !slices.Contains(a.fieldNames, ident.Name) { + return false + } + + pkgName := getPkgNameFromType(pass, ident) + + return a.report(pass, ident, pkgName, ident.Name, fnInfo) +} + +//nolint:gocyclo // The complexity is expected by the number of cases to check. +func (a *analyzer) report(pass *analysis.Pass, rg analysis.Range, origPkgName, origName string, fnInfo *FuncInfo) bool { + switch { + case a.osMkdirTemp && origPkgName == osPkgName && origName == mkdirTempName: + report(pass, rg, origPkgName, origName, tempDirName, fnInfo) + + case a.osTempDir && origPkgName == osPkgName && origName == tempDirName: + report(pass, rg, origPkgName, origName, tempDirName, fnInfo) + + case a.osSetenv && origPkgName == osPkgName && origName == setenvName: + report(pass, rg, origPkgName, origName, setenvName, fnInfo) + + case a.geGo124 && a.osChdir && origPkgName == osPkgName && origName == chdirName: + report(pass, rg, origPkgName, origName, chdirName, fnInfo) + + case a.geGo124 && a.contextBackground && origPkgName == contextPkgName && origName == backgroundName: + report(pass, rg, origPkgName, origName, contextName, fnInfo) + + case a.geGo124 && a.contextTodo && origPkgName == contextPkgName && origName == todoName: + report(pass, rg, origPkgName, origName, contextName, fnInfo) + + default: + return false + } + + return true +} + +func report(pass *analysis.Pass, rg analysis.Range, origPkgName, origName, expectName string, fnInfo *FuncInfo) { + diagnostic := analysis.Diagnostic{ + Pos: rg.Pos(), + Message: fmt.Sprintf("%s.%s() could be replaced by %s.%s() in %s", + origPkgName, origName, fnInfo.ArgName, expectName, fnInfo.Name, + ), + } + + // Skip `` arg names. + // Only applies on `context.XXX` because the nb of return parameters is the same as the replacement. + if !strings.Contains(fnInfo.ArgName, "<") && origPkgName == contextPkgName { + diagnostic.SuggestedFixes = append(diagnostic.SuggestedFixes, analysis.SuggestedFix{ + TextEdits: []analysis.TextEdit{{ + Pos: rg.Pos(), + End: rg.End(), + NewText: []byte(fmt.Sprintf("%s.%s", fnInfo.ArgName, expectName)), + }}, + }) + } + + pass.Report(diagnostic) +} + +func isFirstArgEmptyString(ce *ast.CallExpr) bool { + bl, ok := ce.Args[0].(*ast.BasicLit) + if !ok { + return false + } + + return bl.Kind == token.STRING && bl.Value == `""` +} + +func getPkgNameFromType(pass *analysis.Pass, ident *ast.Ident) string { + o := pass.TypesInfo.ObjectOf(ident) + + if o == nil || o.Pkg() == nil { + return "" + } + + return o.Pkg().Name() +} diff --git a/hack/tools/vendor/github.com/ldez/usetesting/usetesting.go b/hack/tools/vendor/github.com/ldez/usetesting/usetesting.go new file mode 100644 index 0000000000..7258278329 --- /dev/null +++ b/hack/tools/vendor/github.com/ldez/usetesting/usetesting.go @@ -0,0 +1,268 @@ +// Package usetesting It is an analyzer that detects when some calls can be replaced by methods from the testing package. +package usetesting + +import ( + "go/ast" + "go/build" + "os" + "slices" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" +) + +const ( + chdirName = "Chdir" + mkdirTempName = "MkdirTemp" + createTempName = "CreateTemp" + setenvName = "Setenv" + tempDirName = "TempDir" + backgroundName = "Background" + todoName = "TODO" + contextName = "Context" +) + +const ( + osPkgName = "os" + contextPkgName = "context" + testingPkgName = "testing" +) + +// FuncInfo information about the test function. +type FuncInfo struct { + Name string + ArgName string +} + +// analyzer is the UseTesting linter. +type analyzer struct { + contextBackground bool + contextTodo bool + osChdir bool + osMkdirTemp bool + osTempDir bool + osSetenv bool + osCreateTemp bool + + fieldNames []string + + skipGoVersionDetection bool + geGo124 bool +} + +// NewAnalyzer create a new UseTesting. +func NewAnalyzer() *analysis.Analyzer { + _, skip := os.LookupEnv("USETESTING_SKIP_GO_VERSION_CHECK") // TODO should be removed when go1.25 will be released. + + l := &analyzer{ + fieldNames: []string{ + chdirName, + mkdirTempName, + tempDirName, + setenvName, + backgroundName, + todoName, + createTempName, + }, + skipGoVersionDetection: skip, + } + + a := &analysis.Analyzer{ + Name: "usetesting", + Doc: "Reports uses of functions with replacement inside the testing package.", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: l.run, + } + + a.Flags.BoolVar(&l.contextBackground, "contextbackground", true, "Enable/disable context.Background() detections") + a.Flags.BoolVar(&l.contextTodo, "contexttodo", true, "Enable/disable context.TODO() detections") + a.Flags.BoolVar(&l.osChdir, "oschdir", true, "Enable/disable os.Chdir() detections") + a.Flags.BoolVar(&l.osMkdirTemp, "osmkdirtemp", true, "Enable/disable os.MkdirTemp() detections") + a.Flags.BoolVar(&l.osSetenv, "ossetenv", false, "Enable/disable os.Setenv() detections") + a.Flags.BoolVar(&l.osTempDir, "ostempdir", false, "Enable/disable os.TempDir() detections") + a.Flags.BoolVar(&l.osCreateTemp, "oscreatetemp", true, `Enable/disable os.CreateTemp("", ...) detections`) + + return a +} + +func (a *analyzer) run(pass *analysis.Pass) (any, error) { + if !a.contextBackground && !a.contextTodo && !a.osChdir && !a.osMkdirTemp && !a.osSetenv && !a.osTempDir && !a.osCreateTemp { + return nil, nil + } + + a.geGo124 = a.isGoSupported(pass) + + insp, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if !ok { + return nil, nil + } + + nodeFilter := []ast.Node{ + (*ast.FuncDecl)(nil), + (*ast.FuncLit)(nil), + } + + insp.WithStack(nodeFilter, func(node ast.Node, push bool, stack []ast.Node) (proceed bool) { + if !push { + return false + } + + switch fn := node.(type) { + case *ast.FuncDecl: + a.checkFunc(pass, fn.Type, fn.Body, fn.Name.Name) + + case *ast.FuncLit: + if hasParentFunc(stack) { + return true + } + + a.checkFunc(pass, fn.Type, fn.Body, "anonymous function") + } + + return true + }) + + return nil, nil +} + +func (a *analyzer) checkFunc(pass *analysis.Pass, ft *ast.FuncType, block *ast.BlockStmt, fnName string) { + if len(ft.Params.List) < 1 { + return + } + + fnInfo := checkTestFunctionSignature(ft.Params.List[0], fnName) + if fnInfo == nil { + return + } + + ast.Inspect(block, func(n ast.Node) bool { + switch v := n.(type) { + case *ast.SelectorExpr: + return !a.reportSelector(pass, v, fnInfo) + + case *ast.Ident: + return !a.reportIdent(pass, v, fnInfo) + + case *ast.CallExpr: + return !a.reportCallExpr(pass, v, fnInfo) + } + + return true + }) +} + +func (a *analyzer) isGoSupported(pass *analysis.Pass) bool { + if a.skipGoVersionDetection { + return true + } + + // Prior to go1.22, versions.FileVersion returns only the toolchain version, + // which is of no use to us, + // so disable this analyzer on earlier versions. + if !slices.Contains(build.Default.ReleaseTags, "go1.22") { + return false + } + + pkgVersion := pass.Pkg.GoVersion() + if pkgVersion == "" { + // Empty means Go devel. + return true + } + + raw := strings.TrimPrefix(pkgVersion, "go") + + // prerelease version (go1.24rc1) + idx := strings.IndexFunc(raw, func(r rune) bool { + return (r < '0' || r > '9') && r != '.' + }) + + if idx != -1 { + raw = raw[:idx] + } + + vParts := strings.Split(raw, ".") + + v, err := strconv.Atoi(strings.Join(vParts[:2], "")) + if err != nil { + v = 116 + } + + return v >= 124 +} + +func hasParentFunc(stack []ast.Node) bool { + // -2 because the last parent is the node. + const skipSelf = 2 + + // skip 0 because it's always [*ast.File]. + for i := len(stack) - skipSelf; i > 0; i-- { + s := stack[i] + + switch fn := s.(type) { + case *ast.FuncDecl: + if len(fn.Type.Params.List) < 1 { + continue + } + + if checkTestFunctionSignature(fn.Type.Params.List[0], fn.Name.Name) != nil { + return true + } + + case *ast.FuncLit: + if len(fn.Type.Params.List) < 1 { + continue + } + + if checkTestFunctionSignature(fn.Type.Params.List[0], "anonymous function") != nil { + return true + } + } + } + + return false +} + +func checkTestFunctionSignature(arg *ast.Field, fnName string) *FuncInfo { + switch at := arg.Type.(type) { + case *ast.StarExpr: + if se, ok := at.X.(*ast.SelectorExpr); ok { + return createFuncInfo(arg, "", se, testingPkgName, fnName, "T", "B") + } + + case *ast.SelectorExpr: + return createFuncInfo(arg, "tb", at, testingPkgName, fnName, "TB") + } + + return nil +} + +func createFuncInfo(arg *ast.Field, defaultName string, se *ast.SelectorExpr, pkgName, fnName string, selectorNames ...string) *FuncInfo { + ok := checkSelectorName(se, pkgName, selectorNames...) + if !ok { + return nil + } + + return &FuncInfo{ + Name: fnName, + ArgName: getTestArgName(arg, defaultName), + } +} + +func checkSelectorName(se *ast.SelectorExpr, pkgName string, selectorNames ...string) bool { + if ident, ok := se.X.(*ast.Ident); ok { + return pkgName == ident.Name && slices.Contains(selectorNames, se.Sel.Name) + } + + return false +} + +func getTestArgName(arg *ast.Field, defaultName string) string { + if len(arg.Names) > 0 && arg.Names[0].Name != "_" { + return arg.Names[0].Name + } + + return defaultName +} diff --git a/hack/tools/vendor/github.com/mgechev/revive/lint/linter.go b/hack/tools/vendor/github.com/mgechev/revive/lint/linter.go index 56036e83d8..b777f9251d 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/lint/linter.go +++ b/hack/tools/vendor/github.com/mgechev/revive/lint/linter.go @@ -109,7 +109,7 @@ func (l *Linter) Lint(packages [][]string, ruleSet []Rule, config Config) (<-cha fmt.Fprintln(os.Stderr, err) os.Exit(1) } - defer wg.Done() + wg.Done() }(packages[n], perPkgVersions[n]) } @@ -173,6 +173,10 @@ func detectGoMod(dir string) (rootDir string, ver *goversion.Version, err error) return "", nil, fmt.Errorf("failed to parse %q, got %v", modFileName, err) } + if modAst.Go == nil { + return "", nil, fmt.Errorf("%q does not specify a Go version", modFileName) + } + ver, err = goversion.NewVersion(modAst.Go.Version) return filepath.Dir(modFileName), ver, err } @@ -180,7 +184,9 @@ func detectGoMod(dir string) (rootDir string, ver *goversion.Version, err error) func retrieveModFile(dir string) (string, error) { const lookingForFile = "go.mod" for { - if dir == "." || dir == "/" { + // filepath.Dir returns 'C:\' on Windows, and '/' on Unix + isRootDir := (dir == filepath.VolumeName(dir)+string(filepath.Separator)) + if dir == "." || isRootDir { return "", fmt.Errorf("did not found %q file", lookingForFile) } diff --git a/hack/tools/vendor/github.com/mgechev/revive/lint/package.go b/hack/tools/vendor/github.com/mgechev/revive/lint/package.go index 2ab035f169..4a633f35ab 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/lint/package.go +++ b/hack/tools/vendor/github.com/mgechev/revive/lint/package.go @@ -189,7 +189,7 @@ func (p *Package) lint(rules []Rule, config Config, failures chan Failure) { wg.Add(1) go (func(file *File) { file.lint(rules, config, failures) - defer wg.Done() + wg.Done() })(file) } wg.Wait() diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/add-constant.go b/hack/tools/vendor/github.com/mgechev/revive/rule/add_constant.go similarity index 98% rename from hack/tools/vendor/github.com/mgechev/revive/rule/add-constant.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/add_constant.go index 233f1d8489..399382c8bc 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/add-constant.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/add_constant.go @@ -36,12 +36,13 @@ type AddConstantRule struct { allowList allowList ignoreFunctions []*regexp.Regexp strLitLimit int - sync.Mutex + + configureOnce sync.Once } // Apply applies the rule to given file. func (r *AddConstantRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) var failures []lint.Failure @@ -201,9 +202,6 @@ func (w *lintAddConstantRule) isStructTag(n *ast.BasicLit) bool { } func (r *AddConstantRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - if r.allowList == nil { r.strLitLimit = defaultStrLitLimit r.allowList = newAllowList() diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/argument-limit.go b/hack/tools/vendor/github.com/mgechev/revive/rule/argument_limit.go similarity index 94% rename from hack/tools/vendor/github.com/mgechev/revive/rule/argument-limit.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/argument_limit.go index b6ce0e81a7..b4d56de0ee 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/argument-limit.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/argument_limit.go @@ -11,18 +11,13 @@ import ( // ArgumentsLimitRule lints given else constructs. type ArgumentsLimitRule struct { max int - sync.Mutex + + configureOnce sync.Once } const defaultArgumentsLimit = 8 func (r *ArgumentsLimitRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - if r.max != 0 { - return - } - if len(arguments) < 1 { r.max = defaultArgumentsLimit return @@ -37,7 +32,7 @@ func (r *ArgumentsLimitRule) configure(arguments lint.Arguments) { // Apply applies the rule to given file. func (r *ArgumentsLimitRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) var failures []lint.Failure onFailure := func(failure lint.Failure) { diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/banned-characters.go b/hack/tools/vendor/github.com/mgechev/revive/rule/banned_characters.go similarity index 94% rename from hack/tools/vendor/github.com/mgechev/revive/rule/banned-characters.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/banned_characters.go index 12997bae11..926b32c214 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/banned-characters.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/banned_characters.go @@ -12,15 +12,14 @@ import ( // BannedCharsRule checks if a file contains banned characters. type BannedCharsRule struct { bannedCharList []string - sync.Mutex + + configureOnce sync.Once } const bannedCharsRuleName = "banned-characters" func (r *BannedCharsRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - if r.bannedCharList == nil && len(arguments) > 0 { + if len(arguments) > 0 { checkNumberOfArguments(1, arguments, bannedCharsRuleName) r.bannedCharList = r.getBannedCharsList(arguments) } @@ -28,7 +27,7 @@ func (r *BannedCharsRule) configure(arguments lint.Arguments) { // Apply applied the rule to the given file. func (r *BannedCharsRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) var failures []lint.Failure onFailure := func(failure lint.Failure) { diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/bare-return.go b/hack/tools/vendor/github.com/mgechev/revive/rule/bare_return.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/bare-return.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/bare_return.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/blank-imports.go b/hack/tools/vendor/github.com/mgechev/revive/rule/blank_imports.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/blank-imports.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/blank_imports.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/bool-literal-in-expr.go b/hack/tools/vendor/github.com/mgechev/revive/rule/bool_literal_in_expr.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/bool-literal-in-expr.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/bool_literal_in_expr.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/call-to-gc.go b/hack/tools/vendor/github.com/mgechev/revive/rule/call_to_gc.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/call-to-gc.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/call_to_gc.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/cognitive-complexity.go b/hack/tools/vendor/github.com/mgechev/revive/rule/cognitive_complexity.go similarity index 97% rename from hack/tools/vendor/github.com/mgechev/revive/rule/cognitive-complexity.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/cognitive_complexity.go index 83640fd3d1..ecde3882e1 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/cognitive-complexity.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/cognitive_complexity.go @@ -13,18 +13,13 @@ import ( // CognitiveComplexityRule lints given else constructs. type CognitiveComplexityRule struct { maxComplexity int - sync.Mutex + + configureOnce sync.Once } const defaultMaxCognitiveComplexity = 7 func (r *CognitiveComplexityRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - if r.maxComplexity != 0 { - return // already configured - } - if len(arguments) < 1 { r.maxComplexity = defaultMaxCognitiveComplexity return @@ -40,7 +35,7 @@ func (r *CognitiveComplexityRule) configure(arguments lint.Arguments) { // Apply applies the rule to given file. func (r *CognitiveComplexityRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) var failures []lint.Failure diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/comment-spacings.go b/hack/tools/vendor/github.com/mgechev/revive/rule/comment_spacings.go similarity index 93% rename from hack/tools/vendor/github.com/mgechev/revive/rule/comment-spacings.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/comment_spacings.go index f721513017..7bdc0e71d0 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/comment-spacings.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/comment_spacings.go @@ -12,16 +12,11 @@ import ( // the comment symbol( // ) and the start of the comment text type CommentSpacingsRule struct { allowList []string - sync.Mutex + + configureOnce sync.Once } func (r *CommentSpacingsRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - if r.allowList != nil { - return // already configured - } - r.allowList = []string{} for _, arg := range arguments { allow, ok := arg.(string) // Alt. non panicking version @@ -34,7 +29,7 @@ func (r *CommentSpacingsRule) configure(arguments lint.Arguments) { // Apply the rule. func (r *CommentSpacingsRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { - r.configure(args) + r.configureOnce.Do(func() { r.configure(args) }) var failures []lint.Failure diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/comments-density.go b/hack/tools/vendor/github.com/mgechev/revive/rule/comments_density.go similarity index 93% rename from hack/tools/vendor/github.com/mgechev/revive/rule/comments-density.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/comments_density.go index c5298ea07d..f2382b1f02 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/comments-density.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/comments_density.go @@ -12,22 +12,13 @@ import ( // CommentsDensityRule lints given else constructs. type CommentsDensityRule struct { minimumCommentsDensity int64 - configured bool - sync.Mutex + + configureOnce sync.Once } const defaultMinimumCommentsPercentage = 0 func (r *CommentsDensityRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - - if r.configured { - return - } - - r.configured = true - if len(arguments) < 1 { r.minimumCommentsDensity = defaultMinimumCommentsPercentage return @@ -42,7 +33,7 @@ func (r *CommentsDensityRule) configure(arguments lint.Arguments) { // Apply applies the rule to given file. func (r *CommentsDensityRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) commentsLines := countDocLines(file.AST.Comments) statementsCount := countStatements(file.AST) diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/confusing-naming.go b/hack/tools/vendor/github.com/mgechev/revive/rule/confusing_naming.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/confusing-naming.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/confusing_naming.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/confusing-results.go b/hack/tools/vendor/github.com/mgechev/revive/rule/confusing_results.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/confusing-results.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/confusing_results.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/constant-logical-expr.go b/hack/tools/vendor/github.com/mgechev/revive/rule/constant_logical_expr.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/constant-logical-expr.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/constant_logical_expr.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/context-as-argument.go b/hack/tools/vendor/github.com/mgechev/revive/rule/context_as_argument.go similarity index 92% rename from hack/tools/vendor/github.com/mgechev/revive/rule/context-as-argument.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/context_as_argument.go index e0c8cfa5e9..8bc5f8b614 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/context-as-argument.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/context_as_argument.go @@ -12,32 +12,31 @@ import ( // ContextAsArgumentRule lints given else constructs. type ContextAsArgumentRule struct { allowTypesLUT map[string]struct{} - sync.Mutex + + configureOnce sync.Once } // Apply applies the rule to given file. func (r *ContextAsArgumentRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { - r.Lock() - if r.allowTypesLUT == nil { - r.allowTypesLUT = getAllowTypesFromArguments(args) - } - r.Unlock() + r.configureOnce.Do(func() { r.configure(args) }) var failures []lint.Failure - r.Lock() walker := lintContextArguments{ allowTypesLUT: r.allowTypesLUT, onFailure: func(failure lint.Failure) { failures = append(failures, failure) }, } - r.Unlock() ast.Walk(walker, file.AST) return failures } +func (r *ContextAsArgumentRule) configure(arguments lint.Arguments) { + r.allowTypesLUT = getAllowTypesFromArguments(arguments) +} + // Name returns the rule name. func (*ContextAsArgumentRule) Name() string { return "context-as-argument" diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/context-keys-type.go b/hack/tools/vendor/github.com/mgechev/revive/rule/context_keys_type.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/context-keys-type.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/context_keys_type.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/cyclomatic.go b/hack/tools/vendor/github.com/mgechev/revive/rule/cyclomatic.go index 10413de24c..c1a2de97ac 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/cyclomatic.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/cyclomatic.go @@ -14,18 +14,13 @@ import ( // CyclomaticRule lints given else constructs. type CyclomaticRule struct { maxComplexity int - sync.Mutex + + configureOnce sync.Once } const defaultMaxCyclomaticComplexity = 10 func (r *CyclomaticRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - if r.maxComplexity != 0 { - return // already configured - } - if len(arguments) < 1 { r.maxComplexity = defaultMaxCyclomaticComplexity return @@ -40,7 +35,7 @@ func (r *CyclomaticRule) configure(arguments lint.Arguments) { // Apply applies the rule to given file. func (r *CyclomaticRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) var failures []lint.Failure fileAst := file.AST diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/deep-exit.go b/hack/tools/vendor/github.com/mgechev/revive/rule/deep_exit.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/deep-exit.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/deep_exit.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/defer.go b/hack/tools/vendor/github.com/mgechev/revive/rule/defer.go index 3c31d507bf..f7c716eb69 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/defer.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/defer.go @@ -11,22 +11,17 @@ import ( // DeferRule lints unused params in functions. type DeferRule struct { allow map[string]bool - sync.Mutex + + configureOnce sync.Once } func (r *DeferRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - if r.allow != nil { - return // already configured - } - r.allow = r.allowFromArgs(arguments) } // Apply applies the rule to given file. func (r *DeferRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) var failures []lint.Failure onFailure := func(failure lint.Failure) { diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/dot-imports.go b/hack/tools/vendor/github.com/mgechev/revive/rule/dot_imports.go similarity index 95% rename from hack/tools/vendor/github.com/mgechev/revive/rule/dot-imports.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/dot_imports.go index df0b2a7f4f..f6c7fbcfba 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/dot-imports.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/dot_imports.go @@ -10,13 +10,14 @@ import ( // DotImportsRule lints given else constructs. type DotImportsRule struct { - sync.Mutex allowedPackages allowPackages + + configureOnce sync.Once } // Apply applies the rule to given file. func (r *DotImportsRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) var failures []lint.Failure @@ -41,13 +42,6 @@ func (*DotImportsRule) Name() string { } func (r *DotImportsRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - - if r.allowedPackages != nil { - return - } - r.allowedPackages = make(allowPackages) if len(arguments) == 0 { return diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/duplicated-imports.go b/hack/tools/vendor/github.com/mgechev/revive/rule/duplicated_imports.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/duplicated-imports.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/duplicated_imports.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/early-return.go b/hack/tools/vendor/github.com/mgechev/revive/rule/early_return.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/early-return.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/early_return.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/empty-block.go b/hack/tools/vendor/github.com/mgechev/revive/rule/empty_block.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/empty-block.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/empty_block.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/empty-lines.go b/hack/tools/vendor/github.com/mgechev/revive/rule/empty_lines.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/empty-lines.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/empty_lines.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/enforce-map-style.go b/hack/tools/vendor/github.com/mgechev/revive/rule/enforce_map_style.go similarity index 96% rename from hack/tools/vendor/github.com/mgechev/revive/rule/enforce-map-style.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/enforce_map_style.go index c698c40ed8..7ddf31e35d 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/enforce-map-style.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/enforce_map_style.go @@ -39,20 +39,12 @@ func mapStyleFromString(s string) (enforceMapStyleType, error) { // EnforceMapStyleRule implements a rule to enforce `make(map[type]type)` over `map[type]type{}`. type EnforceMapStyleRule struct { - configured bool enforceMapStyle enforceMapStyleType - sync.Mutex + + configureOnce sync.Once } func (r *EnforceMapStyleRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - - if r.configured { - return - } - r.configured = true - if len(arguments) < 1 { r.enforceMapStyle = enforceMapStyleTypeAny return @@ -72,7 +64,7 @@ func (r *EnforceMapStyleRule) configure(arguments lint.Arguments) { // Apply applies the rule to given file. func (r *EnforceMapStyleRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) if r.enforceMapStyle == enforceMapStyleTypeAny { // this linter is not configured diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/enforce-repeated-arg-type-style.go b/hack/tools/vendor/github.com/mgechev/revive/rule/enforce_repeated_arg_type_style.go similarity index 97% rename from hack/tools/vendor/github.com/mgechev/revive/rule/enforce-repeated-arg-type-style.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/enforce_repeated_arg_type_style.go index a435ee186c..3f9712aef0 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/enforce-repeated-arg-type-style.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/enforce_repeated_arg_type_style.go @@ -41,22 +41,13 @@ func repeatedArgTypeStyleFromString(s string) enforceRepeatedArgTypeStyleType { // EnforceRepeatedArgTypeStyleRule implements a rule to enforce repeated argument type style. type EnforceRepeatedArgTypeStyleRule struct { - configured bool funcArgStyle enforceRepeatedArgTypeStyleType funcRetValStyle enforceRepeatedArgTypeStyleType - sync.Mutex + configureOnce sync.Once } func (r *EnforceRepeatedArgTypeStyleRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - - if r.configured { - return - } - r.configured = true - r.funcArgStyle = enforceRepeatedArgTypeStyleTypeAny r.funcRetValStyle = enforceRepeatedArgTypeStyleTypeAny @@ -94,7 +85,7 @@ func (r *EnforceRepeatedArgTypeStyleRule) configure(arguments lint.Arguments) { // Apply applies the rule to a given file. func (r *EnforceRepeatedArgTypeStyleRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) if r.funcArgStyle == enforceRepeatedArgTypeStyleTypeAny && r.funcRetValStyle == enforceRepeatedArgTypeStyleTypeAny { // This linter is not configured, return no failures. diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/enforce-slice-style.go b/hack/tools/vendor/github.com/mgechev/revive/rule/enforce_slice_style.go similarity index 97% rename from hack/tools/vendor/github.com/mgechev/revive/rule/enforce-slice-style.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/enforce_slice_style.go index 14be258935..7170379d93 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/enforce-slice-style.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/enforce_slice_style.go @@ -43,20 +43,12 @@ func sliceStyleFromString(s string) (enforceSliceStyleType, error) { // EnforceSliceStyleRule implements a rule to enforce `make([]type)` over `[]type{}`. type EnforceSliceStyleRule struct { - configured bool enforceSliceStyle enforceSliceStyleType - sync.Mutex + + configureOnce sync.Once } func (r *EnforceSliceStyleRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - - if r.configured { - return - } - r.configured = true - if len(arguments) < 1 { r.enforceSliceStyle = enforceSliceStyleTypeAny return @@ -76,7 +68,7 @@ func (r *EnforceSliceStyleRule) configure(arguments lint.Arguments) { // Apply applies the rule to given file. func (r *EnforceSliceStyleRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) if r.enforceSliceStyle == enforceSliceStyleTypeAny { // this linter is not configured diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/error-naming.go b/hack/tools/vendor/github.com/mgechev/revive/rule/error_naming.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/error-naming.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/error_naming.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/error-return.go b/hack/tools/vendor/github.com/mgechev/revive/rule/error_return.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/error-return.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/error_return.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/error-strings.go b/hack/tools/vendor/github.com/mgechev/revive/rule/error_strings.go similarity index 97% rename from hack/tools/vendor/github.com/mgechev/revive/rule/error-strings.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/error_strings.go index 81ebda5401..97a0f4d065 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/error-strings.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/error_strings.go @@ -15,17 +15,11 @@ import ( // ErrorStringsRule lints given else constructs. type ErrorStringsRule struct { errorFunctions map[string]map[string]struct{} - sync.Mutex + + configureOnce sync.Once } func (r *ErrorStringsRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - - if r.errorFunctions != nil { - return - } - r.errorFunctions = map[string]map[string]struct{}{ "fmt": { "Errorf": {}, @@ -60,7 +54,7 @@ func (r *ErrorStringsRule) configure(arguments lint.Arguments) { func (r *ErrorStringsRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { var failures []lint.Failure - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) fileAst := file.AST walker := lintErrorStrings{ diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/exported.go b/hack/tools/vendor/github.com/mgechev/revive/rule/exported.go index e3972d40e5..7ee27b309f 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/exported.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/exported.go @@ -55,20 +55,13 @@ func (dc *disabledChecks) isDisabled(checkName string) bool { // ExportedRule lints given else constructs. type ExportedRule struct { - configured bool stuttersMsg string disabledChecks disabledChecks - sync.Mutex + + configureOnce sync.Once } func (r *ExportedRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - if r.configured { - return - } - r.configured = true - r.disabledChecks = disabledChecks{PrivateReceivers: true, PublicInterfaces: true} r.stuttersMsg = "stutters" for _, flag := range arguments { @@ -104,7 +97,7 @@ func (r *ExportedRule) configure(arguments lint.Arguments) { // Apply applies the rule to given file. func (r *ExportedRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { - r.configure(args) + r.configureOnce.Do(func() { r.configure(args) }) var failures []lint.Failure if file.IsTest() { diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/file-header.go b/hack/tools/vendor/github.com/mgechev/revive/rule/file_header.go similarity index 92% rename from hack/tools/vendor/github.com/mgechev/revive/rule/file-header.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/file_header.go index 0dcb57746e..52513d8e8d 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/file-header.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/file_header.go @@ -11,7 +11,8 @@ import ( // FileHeaderRule lints given else constructs. type FileHeaderRule struct { header string - sync.Mutex + + configureOnce sync.Once } var ( @@ -20,12 +21,6 @@ var ( ) func (r *FileHeaderRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - if r.header != "" { - return // already configured - } - if len(arguments) < 1 { return } @@ -39,7 +34,7 @@ func (r *FileHeaderRule) configure(arguments lint.Arguments) { // Apply applies the rule to given file. func (r *FileHeaderRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) if r.header == "" { return nil diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/file-length-limit.go b/hack/tools/vendor/github.com/mgechev/revive/rule/file_length_limit.go similarity index 96% rename from hack/tools/vendor/github.com/mgechev/revive/rule/file-length-limit.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/file_length_limit.go index c5a5641f41..0fe075c56e 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/file-length-limit.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/file_length_limit.go @@ -20,12 +20,13 @@ type FileLengthLimitRule struct { skipComments bool // skipBlankLines indicates whether to skip blank lines when counting lines. skipBlankLines bool - sync.Mutex + + configureOnce sync.Once } // Apply applies the rule to given file. func (r *FileLengthLimitRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) if r.max <= 0 { // when max is negative or 0 the rule is disabled @@ -75,13 +76,6 @@ func (r *FileLengthLimitRule) Apply(file *lint.File, arguments lint.Arguments) [ } func (r *FileLengthLimitRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - - if r.max != 0 { - return // already configured - } - if len(arguments) < 1 { return // use default } diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/filename-format.go b/hack/tools/vendor/github.com/mgechev/revive/rule/filename_format.go similarity index 94% rename from hack/tools/vendor/github.com/mgechev/revive/rule/filename-format.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/filename_format.go index 49fdf9c3e8..9d8047829e 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/filename-format.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/filename_format.go @@ -13,12 +13,13 @@ import ( // FilenameFormatRule lints source filenames according to a set of regular expressions given as arguments type FilenameFormatRule struct { format *regexp.Regexp - sync.Mutex + + configureOnce sync.Once } // Apply applies the rule to the given file. func (r *FilenameFormatRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) filename := filepath.Base(file.Name) if r.format.MatchString(filename) { @@ -55,13 +56,6 @@ func (*FilenameFormatRule) Name() string { var defaultFormat = regexp.MustCompile("^[_A-Za-z0-9][_A-Za-z0-9-]*.go$") func (r *FilenameFormatRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - - if r.format != nil { - return - } - argsCount := len(arguments) if argsCount == 0 { r.format = defaultFormat diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/flag-param.go b/hack/tools/vendor/github.com/mgechev/revive/rule/flag_param.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/flag-param.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/flag_param.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/function-length.go b/hack/tools/vendor/github.com/mgechev/revive/rule/function_length.go similarity index 96% rename from hack/tools/vendor/github.com/mgechev/revive/rule/function-length.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/function_length.go index 30402313d1..c58cd4c0f4 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/function-length.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/function_length.go @@ -11,20 +11,13 @@ import ( // FunctionLength lint. type FunctionLength struct { - maxStmt int - maxLines int - configured bool - sync.Mutex + maxStmt int + maxLines int + + configureOnce sync.Once } func (r *FunctionLength) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - if r.configured { - return - } - - r.configured = true maxStmt, maxLines := r.parseArguments(arguments) r.maxStmt = int(maxStmt) r.maxLines = int(maxLines) @@ -32,7 +25,7 @@ func (r *FunctionLength) configure(arguments lint.Arguments) { // Apply applies the rule to given file. func (r *FunctionLength) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) var failures []lint.Failure diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/function-result-limit.go b/hack/tools/vendor/github.com/mgechev/revive/rule/function_result_limit.go similarity index 94% rename from hack/tools/vendor/github.com/mgechev/revive/rule/function-result-limit.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/function_result_limit.go index 23474b5ee4..5b72f01ab8 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/function-result-limit.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/function_result_limit.go @@ -11,18 +11,13 @@ import ( // FunctionResultsLimitRule lints given else constructs. type FunctionResultsLimitRule struct { max int - sync.Mutex + + configureOnce sync.Once } const defaultResultsLimit = 3 func (r *FunctionResultsLimitRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - if r.max != 0 { - return // already configured - } - if len(arguments) < 1 { r.max = defaultResultsLimit return @@ -41,7 +36,7 @@ func (r *FunctionResultsLimitRule) configure(arguments lint.Arguments) { // Apply applies the rule to given file. func (r *FunctionResultsLimitRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) var failures []lint.Failure diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/get-return.go b/hack/tools/vendor/github.com/mgechev/revive/rule/get_return.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/get-return.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/get_return.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/identical-branches.go b/hack/tools/vendor/github.com/mgechev/revive/rule/identical_branches.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/identical-branches.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/identical_branches.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/if-return.go b/hack/tools/vendor/github.com/mgechev/revive/rule/if_return.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/if-return.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/if_return.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/import-alias-naming.go b/hack/tools/vendor/github.com/mgechev/revive/rule/import_alias_naming.go similarity index 96% rename from hack/tools/vendor/github.com/mgechev/revive/rule/import-alias-naming.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/import_alias_naming.go index 48d22566a1..043bf0d76e 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/import-alias-naming.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/import_alias_naming.go @@ -10,10 +10,10 @@ import ( // ImportAliasNamingRule lints import alias naming. type ImportAliasNamingRule struct { - configured bool allowRegexp *regexp.Regexp denyRegexp *regexp.Regexp - sync.Mutex + + configureOnce sync.Once } const defaultImportAliasNamingAllowRule = "^[a-z][a-z0-9]{0,}$" @@ -21,12 +21,6 @@ const defaultImportAliasNamingAllowRule = "^[a-z][a-z0-9]{0,}$" var defaultImportAliasNamingAllowRegexp = regexp.MustCompile(defaultImportAliasNamingAllowRule) func (r *ImportAliasNamingRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - if r.configured { - return - } - if len(arguments) == 0 { r.allowRegexp = defaultImportAliasNamingAllowRegexp return @@ -57,7 +51,7 @@ func (r *ImportAliasNamingRule) configure(arguments lint.Arguments) { // Apply applies the rule to given file. func (r *ImportAliasNamingRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) var failures []lint.Failure diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/import-shadowing.go b/hack/tools/vendor/github.com/mgechev/revive/rule/import_shadowing.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/import-shadowing.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/import_shadowing.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/imports-blocklist.go b/hack/tools/vendor/github.com/mgechev/revive/rule/imports_blocklist.go similarity index 63% rename from hack/tools/vendor/github.com/mgechev/revive/rule/imports-blocklist.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/imports_blocklist.go index 431066403a..18d77ca1c5 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/imports-blocklist.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/imports_blocklist.go @@ -11,29 +11,24 @@ import ( // ImportsBlocklistRule lints given else constructs. type ImportsBlocklistRule struct { blocklist []*regexp.Regexp - sync.Mutex + + configureOnce sync.Once } var replaceImportRegexp = regexp.MustCompile(`/?\*\*/?`) func (r *ImportsBlocklistRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - - if r.blocklist == nil { - r.blocklist = make([]*regexp.Regexp, 0) - - for _, arg := range arguments { - argStr, ok := arg.(string) - if !ok { - panic(fmt.Sprintf("Invalid argument to the imports-blocklist rule. Expecting a string, got %T", arg)) - } - regStr, err := regexp.Compile(fmt.Sprintf(`(?m)"%s"$`, replaceImportRegexp.ReplaceAllString(argStr, `(\W|\w)*`))) - if err != nil { - panic(fmt.Sprintf("Invalid argument to the imports-blocklist rule. Expecting %q to be a valid regular expression, got: %v", argStr, err)) - } - r.blocklist = append(r.blocklist, regStr) + r.blocklist = []*regexp.Regexp{} + for _, arg := range arguments { + argStr, ok := arg.(string) + if !ok { + panic(fmt.Sprintf("Invalid argument to the imports-blocklist rule. Expecting a string, got %T", arg)) + } + regStr, err := regexp.Compile(fmt.Sprintf(`(?m)"%s"$`, replaceImportRegexp.ReplaceAllString(argStr, `(\W|\w)*`))) + if err != nil { + panic(fmt.Sprintf("Invalid argument to the imports-blocklist rule. Expecting %q to be a valid regular expression, got: %v", argStr, err)) } + r.blocklist = append(r.blocklist, regStr) } } @@ -48,7 +43,7 @@ func (r *ImportsBlocklistRule) isBlocklisted(path string) bool { // Apply applies the rule to given file. func (r *ImportsBlocklistRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) var failures []lint.Failure diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/increment-decrement.go b/hack/tools/vendor/github.com/mgechev/revive/rule/increment_decrement.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/increment-decrement.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/increment_decrement.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/indent-error-flow.go b/hack/tools/vendor/github.com/mgechev/revive/rule/indent_error_flow.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/indent-error-flow.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/indent_error_flow.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/line-length-limit.go b/hack/tools/vendor/github.com/mgechev/revive/rule/line_length_limit.go similarity index 94% rename from hack/tools/vendor/github.com/mgechev/revive/rule/line-length-limit.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/line_length_limit.go index a154b7aece..415761e1e9 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/line-length-limit.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/line_length_limit.go @@ -15,18 +15,13 @@ import ( // LineLengthLimitRule lints given else constructs. type LineLengthLimitRule struct { max int - sync.Mutex + + configureOnce sync.Once } const defaultLineLengthLimit = 80 func (r *LineLengthLimitRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - if r.max != 0 { - return // already configured - } - if len(arguments) < 1 { r.max = defaultLineLengthLimit return @@ -42,7 +37,7 @@ func (r *LineLengthLimitRule) configure(arguments lint.Arguments) { // Apply applies the rule to given file. func (r *LineLengthLimitRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) var failures []lint.Failure diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/max-control-nesting.go b/hack/tools/vendor/github.com/mgechev/revive/rule/max_control_nesting.go similarity index 96% rename from hack/tools/vendor/github.com/mgechev/revive/rule/max-control-nesting.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/max_control_nesting.go index 5dbb1eefa4..b2c5af70e6 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/max-control-nesting.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/max_control_nesting.go @@ -11,14 +11,15 @@ import ( // MaxControlNestingRule lints given else constructs. type MaxControlNestingRule struct { max int64 - sync.Mutex + + configureOnce sync.Once } const defaultMaxControlNesting = 5 // Apply applies the rule to given file. func (r *MaxControlNestingRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) var failures []lint.Failure @@ -107,12 +108,6 @@ func (w *lintMaxControlNesting) walkControlledBlock(b ast.Node) { } func (r *MaxControlNestingRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - if !(r.max < 1) { - return // max already configured - } - if len(arguments) < 1 { r.max = defaultMaxControlNesting return diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/max-public-structs.go b/hack/tools/vendor/github.com/mgechev/revive/rule/max_public_structs.go similarity index 87% rename from hack/tools/vendor/github.com/mgechev/revive/rule/max-public-structs.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/max_public_structs.go index 70840e734e..d6f91e3752 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/max-public-structs.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/max_public_structs.go @@ -1,6 +1,7 @@ package rule import ( + "fmt" "go/ast" "strings" "sync" @@ -11,18 +12,13 @@ import ( // MaxPublicStructsRule lints given else constructs. type MaxPublicStructsRule struct { max int64 - sync.Mutex + + configureOnce sync.Once } const defaultMaxPublicStructs = 5 func (r *MaxPublicStructsRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - if r.max == 0 { - return // already configured - } - if len(arguments) < 1 { r.max = defaultMaxPublicStructs return @@ -39,10 +35,14 @@ func (r *MaxPublicStructsRule) configure(arguments lint.Arguments) { // Apply applies the rule to given file. func (r *MaxPublicStructsRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) var failures []lint.Failure + if r.max < 1 { + return failures + } + fileAst := file.AST walker := &lintMaxPublicStructs{ @@ -56,7 +56,7 @@ func (r *MaxPublicStructsRule) Apply(file *lint.File, arguments lint.Arguments) if walker.current > r.max { walker.onFailure(lint.Failure{ - Failure: "you have exceeded the maximum number of public struct declarations", + Failure: fmt.Sprintf("you have exceeded the maximum number (%d) of public struct declarations", r.max), Confidence: 1, Node: fileAst, Category: "style", diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/modifies-param.go b/hack/tools/vendor/github.com/mgechev/revive/rule/modifies_param.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/modifies-param.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/modifies_param.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/modifies-value-receiver.go b/hack/tools/vendor/github.com/mgechev/revive/rule/modifies_value_receiver.go similarity index 65% rename from hack/tools/vendor/github.com/mgechev/revive/rule/modifies-value-receiver.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/modifies_value_receiver.go index e9e64b9a6a..2f92991f53 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/modifies-value-receiver.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/modifies_value_receiver.go @@ -2,6 +2,7 @@ package rule import ( "go/ast" + "go/token" "strings" "github.com/mgechev/revive/lint" @@ -60,14 +61,14 @@ func (w lintModifiesValRecRule) Visit(node ast.Node) ast.Visitor { return nil // skip, anonymous receiver } - fselect := func(n ast.Node) bool { + receiverAssignmentFinder := func(n ast.Node) bool { // look for assignments with the receiver in the right hand - asgmt, ok := n.(*ast.AssignStmt) + assignment, ok := n.(*ast.AssignStmt) if !ok { return false } - for _, exp := range asgmt.Lhs { + for _, exp := range assignment.Lhs { switch e := exp.(type) { case *ast.IndexExpr: // receiver...[] = ... continue @@ -92,7 +93,15 @@ func (w lintModifiesValRecRule) Visit(node ast.Node) ast.Visitor { return false } - assignmentsToReceiver := pick(n.Body, fselect) + assignmentsToReceiver := pick(n.Body, receiverAssignmentFinder) + if len(assignmentsToReceiver) == 0 { + return nil // receiver is not modified + } + + methodReturnsReceiver := len(w.findReturnReceiverStatements(receiverName, n.Body)) > 0 + if methodReturnsReceiver { + return nil // modification seems legit (see issue #1066) + } for _, assignment := range assignmentsToReceiver { w.onFailure(lint.Failure{ @@ -127,3 +136,44 @@ func (lintModifiesValRecRule) getNameFromExpr(ie ast.Expr) string { return ident.Name } + +func (w lintModifiesValRecRule) findReturnReceiverStatements(receiverName string, target ast.Node) []ast.Node { + finder := func(n ast.Node) bool { + // look for returns with the receiver as value + returnStatement, ok := n.(*ast.ReturnStmt) + if !ok { + return false + } + + for _, exp := range returnStatement.Results { + switch e := exp.(type) { + case *ast.SelectorExpr: // receiver.field = ... + name := w.getNameFromExpr(e.X) + if name == "" || name != receiverName { + continue + } + case *ast.Ident: // receiver := ... + if e.Name != receiverName { + continue + } + case *ast.UnaryExpr: + if e.Op != token.AND { + continue + } + name := w.getNameFromExpr(e.X) + if name == "" || name != receiverName { + continue + } + + default: + continue + } + + return true + } + + return false + } + + return pick(target, finder) +} diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/nested-structs.go b/hack/tools/vendor/github.com/mgechev/revive/rule/nested_structs.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/nested-structs.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/nested_structs.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/optimize-operands-order.go b/hack/tools/vendor/github.com/mgechev/revive/rule/optimize_operands_order.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/optimize-operands-order.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/optimize_operands_order.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/package-comments.go b/hack/tools/vendor/github.com/mgechev/revive/rule/package_comments.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/package-comments.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/package_comments.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/range-val-address.go b/hack/tools/vendor/github.com/mgechev/revive/rule/range_val_address.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/range-val-address.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/range_val_address.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/range-val-in-closure.go b/hack/tools/vendor/github.com/mgechev/revive/rule/range_val_in_closure.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/range-val-in-closure.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/range_val_in_closure.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/receiver-naming.go b/hack/tools/vendor/github.com/mgechev/revive/rule/receiver_naming.go similarity index 96% rename from hack/tools/vendor/github.com/mgechev/revive/rule/receiver-naming.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/receiver_naming.go index afcd99b8fd..c83bacc2fb 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/receiver-naming.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/receiver_naming.go @@ -12,18 +12,13 @@ import ( // ReceiverNamingRule lints given else constructs. type ReceiverNamingRule struct { receiverNameMaxLength int - sync.Mutex + + configureOnce sync.Once } const defaultReceiverNameMaxLength = -1 // thus will not check func (r *ReceiverNamingRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - if r.receiverNameMaxLength != 0 { - return - } - r.receiverNameMaxLength = defaultReceiverNameMaxLength if len(arguments) < 1 { return @@ -50,7 +45,7 @@ func (r *ReceiverNamingRule) configure(arguments lint.Arguments) { // Apply applies the rule to given file. func (r *ReceiverNamingRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { - r.configure(args) + r.configureOnce.Do(func() { r.configure(args) }) var failures []lint.Failure diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/redefines-builtin-id.go b/hack/tools/vendor/github.com/mgechev/revive/rule/redefines_builtin_id.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/redefines-builtin-id.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/redefines_builtin_id.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/redundant-import-alias.go b/hack/tools/vendor/github.com/mgechev/revive/rule/redundant_import_alias.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/redundant-import-alias.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/redundant_import_alias.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/string-format.go b/hack/tools/vendor/github.com/mgechev/revive/rule/string_format.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/string-format.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/string_format.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/string-of-int.go b/hack/tools/vendor/github.com/mgechev/revive/rule/string_of_int.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/string-of-int.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/string_of_int.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/struct-tag.go b/hack/tools/vendor/github.com/mgechev/revive/rule/struct_tag.go similarity index 98% rename from hack/tools/vendor/github.com/mgechev/revive/rule/struct-tag.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/struct_tag.go index ec3f0c7cf3..4dd9278277 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/struct-tag.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/struct_tag.go @@ -14,15 +14,12 @@ import ( // StructTagRule lints struct tags. type StructTagRule struct { userDefined map[string][]string // map: key -> []option - sync.Mutex + + configureOnce sync.Once } func (r *StructTagRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - - mustConfigure := r.userDefined == nil && len(arguments) > 0 - if !mustConfigure { + if len(arguments) == 0 { return } @@ -47,7 +44,7 @@ func (r *StructTagRule) configure(arguments lint.Arguments) { // Apply applies the rule to given file. func (r *StructTagRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { - r.configure(args) + r.configureOnce.Do(func() { r.configure(args) }) var failures []lint.Failure onFailure := func(failure lint.Failure) { diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/superfluous-else.go b/hack/tools/vendor/github.com/mgechev/revive/rule/superfluous_else.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/superfluous-else.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/superfluous_else.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/time-equal.go b/hack/tools/vendor/github.com/mgechev/revive/rule/time_equal.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/time-equal.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/time_equal.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/time-naming.go b/hack/tools/vendor/github.com/mgechev/revive/rule/time_naming.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/time-naming.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/time_naming.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/unchecked-type-assertion.go b/hack/tools/vendor/github.com/mgechev/revive/rule/unchecked_type_assertion.go similarity index 91% rename from hack/tools/vendor/github.com/mgechev/revive/rule/unchecked-type-assertion.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/unchecked_type_assertion.go index eea344060a..34d854e8f5 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/unchecked-type-assertion.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/unchecked_type_assertion.go @@ -15,21 +15,16 @@ const ( // UncheckedTypeAssertionRule lints missing or ignored `ok`-value in dynamic type casts. type UncheckedTypeAssertionRule struct { - sync.Mutex acceptIgnoredAssertionResult bool - configured bool -} -func (u *UncheckedTypeAssertionRule) configure(arguments lint.Arguments) { - u.Lock() - defer u.Unlock() + configureOnce sync.Once +} - if len(arguments) == 0 || u.configured { +func (r *UncheckedTypeAssertionRule) configure(arguments lint.Arguments) { + if len(arguments) == 0 { return } - u.configured = true - args, ok := arguments[0].(map[string]any) if !ok { panic("Unable to get arguments. Expected object of key-value-pairs.") @@ -38,7 +33,7 @@ func (u *UncheckedTypeAssertionRule) configure(arguments lint.Arguments) { for k, v := range args { switch k { case "acceptIgnoredAssertionResult": - u.acceptIgnoredAssertionResult, ok = v.(bool) + r.acceptIgnoredAssertionResult, ok = v.(bool) if !ok { panic(fmt.Sprintf("Unable to parse argument '%s'. Expected boolean.", k)) } @@ -49,8 +44,8 @@ func (u *UncheckedTypeAssertionRule) configure(arguments lint.Arguments) { } // Apply applies the rule to given file. -func (u *UncheckedTypeAssertionRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { - u.configure(args) +func (r *UncheckedTypeAssertionRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { + r.configureOnce.Do(func() { r.configure(args) }) var failures []lint.Failure @@ -58,7 +53,7 @@ func (u *UncheckedTypeAssertionRule) Apply(file *lint.File, args lint.Arguments) onFailure: func(failure lint.Failure) { failures = append(failures, failure) }, - acceptIgnoredTypeAssertionResult: u.acceptIgnoredAssertionResult, + acceptIgnoredTypeAssertionResult: r.acceptIgnoredAssertionResult, } ast.Walk(walker, file.AST) diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/unconditional-recursion.go b/hack/tools/vendor/github.com/mgechev/revive/rule/unconditional_recursion.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/unconditional-recursion.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/unconditional_recursion.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/unexported-naming.go b/hack/tools/vendor/github.com/mgechev/revive/rule/unexported_naming.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/unexported-naming.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/unexported_naming.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/unexported-return.go b/hack/tools/vendor/github.com/mgechev/revive/rule/unexported_return.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/unexported-return.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/unexported_return.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/unhandled-error.go b/hack/tools/vendor/github.com/mgechev/revive/rule/unhandled_error.go similarity index 96% rename from hack/tools/vendor/github.com/mgechev/revive/rule/unhandled-error.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/unhandled_error.go index 95ba56180e..4fad8ccfcc 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/unhandled-error.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/unhandled_error.go @@ -14,17 +14,11 @@ import ( // UnhandledErrorRule lints given else constructs. type UnhandledErrorRule struct { ignoreList []*regexp.Regexp - sync.Mutex + + configureOnce sync.Once } func (r *UnhandledErrorRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - - if r.ignoreList != nil { - return // already configured - } - for _, arg := range arguments { argStr, ok := arg.(string) if !ok { @@ -47,7 +41,7 @@ func (r *UnhandledErrorRule) configure(arguments lint.Arguments) { // Apply applies the rule to given file. func (r *UnhandledErrorRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { - r.configure(args) + r.configureOnce.Do(func() { r.configure(args) }) var failures []lint.Failure diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/unnecessary-stmt.go b/hack/tools/vendor/github.com/mgechev/revive/rule/unnecessary_stmt.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/unnecessary-stmt.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/unnecessary_stmt.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/unreachable-code.go b/hack/tools/vendor/github.com/mgechev/revive/rule/unreachable_code.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/unreachable-code.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/unreachable_code.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/unused-param.go b/hack/tools/vendor/github.com/mgechev/revive/rule/unused_param.go similarity index 96% rename from hack/tools/vendor/github.com/mgechev/revive/rule/unused-param.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/unused_param.go index 4b04ee916b..a8514ac2df 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/unused-param.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/unused_param.go @@ -11,22 +11,14 @@ import ( // UnusedParamRule lints unused params in functions. type UnusedParamRule struct { - configured bool // regex to check if some name is valid for unused parameter, "^_$" by default allowRegex *regexp.Regexp failureMsg string - sync.Mutex + + configureOnce sync.Once } func (r *UnusedParamRule) configure(args lint.Arguments) { - r.Lock() - defer r.Unlock() - - if r.configured { - return - } - r.configured = true - // while by default args is an array, i think it's good to provide structures inside it by default, not arrays or primitives // it's more compatible to JSON nature of configurations var allowedRegexStr string @@ -58,7 +50,7 @@ func (r *UnusedParamRule) configure(args lint.Arguments) { // Apply applies the rule to given file. func (r *UnusedParamRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { - r.configure(args) + r.configureOnce.Do(func() { r.configure(args) }) var failures []lint.Failure onFailure := func(failure lint.Failure) { diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/unused-receiver.go b/hack/tools/vendor/github.com/mgechev/revive/rule/unused_receiver.go similarity index 96% rename from hack/tools/vendor/github.com/mgechev/revive/rule/unused-receiver.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/unused_receiver.go index 715dba3383..131aae5fb7 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/unused-receiver.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/unused_receiver.go @@ -11,22 +11,14 @@ import ( // UnusedReceiverRule lints unused params in functions. type UnusedReceiverRule struct { - configured bool // regex to check if some name is valid for unused parameter, "^_$" by default allowRegex *regexp.Regexp failureMsg string - sync.Mutex + + configureOnce sync.Once } func (r *UnusedReceiverRule) configure(args lint.Arguments) { - r.Lock() - defer r.Unlock() - - if r.configured { - return - } - r.configured = true - // while by default args is an array, i think it's good to provide structures inside it by default, not arrays or primitives // it's more compatible to JSON nature of configurations var allowedRegexStr string @@ -57,7 +49,7 @@ func (r *UnusedReceiverRule) configure(args lint.Arguments) { // Apply applies the rule to given file. func (r *UnusedReceiverRule) Apply(file *lint.File, args lint.Arguments) []lint.Failure { - r.configure(args) + r.configureOnce.Do(func() { r.configure(args) }) var failures []lint.Failure onFailure := func(failure lint.Failure) { diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/use-any.go b/hack/tools/vendor/github.com/mgechev/revive/rule/use_any.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/use-any.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/use_any.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/useless-break.go b/hack/tools/vendor/github.com/mgechev/revive/rule/useless_break.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/useless-break.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/useless_break.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/var-declarations.go b/hack/tools/vendor/github.com/mgechev/revive/rule/var_declarations.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/var-declarations.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/var_declarations.go diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/var-naming.go b/hack/tools/vendor/github.com/mgechev/revive/rule/var_naming.go similarity index 98% rename from hack/tools/vendor/github.com/mgechev/revive/rule/var-naming.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/var_naming.go index 5a4d0dc24b..2c2198dbda 100644 --- a/hack/tools/vendor/github.com/mgechev/revive/rule/var-naming.go +++ b/hack/tools/vendor/github.com/mgechev/revive/rule/var_naming.go @@ -18,22 +18,15 @@ var upperCaseConstRE = regexp.MustCompile(`^_?[A-Z][A-Z\d]*(_[A-Z\d]+)*$`) // VarNamingRule lints given else constructs. type VarNamingRule struct { - configured bool allowList []string blockList []string allowUpperCaseConst bool // if true - allows to use UPPER_SOME_NAMES for constants skipPackageNameChecks bool - sync.Mutex + + configureOnce sync.Once } func (r *VarNamingRule) configure(arguments lint.Arguments) { - r.Lock() - defer r.Unlock() - if r.configured { - return - } - - r.configured = true if len(arguments) >= 1 { r.allowList = getList(arguments[0], "allowlist") } @@ -83,7 +76,7 @@ func (r *VarNamingRule) applyPackageCheckRules(walker *lintNames) { // Apply applies the rule to given file. func (r *VarNamingRule) Apply(file *lint.File, arguments lint.Arguments) []lint.Failure { - r.configure(arguments) + r.configureOnce.Do(func() { r.configure(arguments) }) var failures []lint.Failure diff --git a/hack/tools/vendor/github.com/mgechev/revive/rule/waitgroup-by-value.go b/hack/tools/vendor/github.com/mgechev/revive/rule/waitgroup_by_value.go similarity index 100% rename from hack/tools/vendor/github.com/mgechev/revive/rule/waitgroup-by-value.go rename to hack/tools/vendor/github.com/mgechev/revive/rule/waitgroup_by_value.go diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/.golangci.yml b/hack/tools/vendor/github.com/mikefarah/yq/v4/.golangci.yml index 1476b6cc02..c108f703d6 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/.golangci.yml +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/.golangci.yml @@ -10,7 +10,9 @@ linters: - gofmt - goimports - gosec - - megacheck + - gosimple + - staticcheck + - unused - misspell - nakedret - nolintlint @@ -31,13 +33,6 @@ linters-settings: desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil" issues: exclude-rules: - - linters: - - gosec - text: "Implicit memory aliasing in for loop." - path: _test\.go - - linters: - - revive - text: "unexported-return" - linters: - revive text: "var-naming" diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/.goreleaser.yaml b/hack/tools/vendor/github.com/mikefarah/yq/v4/.goreleaser.yaml new file mode 100644 index 0000000000..5ce0161730 --- /dev/null +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/.goreleaser.yaml @@ -0,0 +1,46 @@ +version: 2 + +dist: build + +builds: + - id: yq + + binary: yq_{{ .Os }}_{{ .Arch }} + + ldflags: + - -s -w + + env: + - CGO_ENABLED=0 + + targets: + - darwin_amd64 + - darwin_arm64 + - freebsd_386 + - freebsd_amd64 + - freebsd_arm + - linux_386 + - linux_amd64 + - linux_arm + - linux_arm64 + - linux_mips + - linux_mips64 + - linux_mips64le + - linux_mipsle + - linux_ppc64 + - linux_ppc64le + - linux_riscv64 + - linux_s390x + - netbsd_386 + - netbsd_amd64 + - netbsd_arm + - openbsd_386 + - openbsd_amd64 + - windows_386 + - windows_amd64 + + no_unique_dist_dir: true + +release: + disable: true + skip_upload: true diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/Dockerfile b/hack/tools/vendor/github.com/mikefarah/yq/v4/Dockerfile index ad8080a4dd..4204252fec 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/Dockerfile +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.4 as builder +FROM golang:1.23.3 as builder WORKDIR /go/src/mikefarah/yq diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/Dockerfile.dev b/hack/tools/vendor/github.com/mikefarah/yq/v4/Dockerfile.dev index 9805cc0b93..5dc23d941b 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/Dockerfile.dev +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/Dockerfile.dev @@ -1,4 +1,4 @@ -FROM golang:1.22.4 +FROM golang:1.23.3 RUN apt-get update && \ apt-get install -y npm && \ diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/Makefile b/hack/tools/vendor/github.com/mikefarah/yq/v4/Makefile index 42c11fb2cd..e50cdd4870 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/Makefile +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/Makefile @@ -2,7 +2,7 @@ MAKEFLAGS += --warn-undefined-variables SHELL := /bin/bash .SHELLFLAGS := -o pipefail -euc .DEFAULT_GOAL := install -ENGINE := $(shell { command -v podman || command -v docker; } 2>/dev/null) +ENGINE := $(shell { (podman version > /dev/null 2>&1 && command -v podman) || command -v docker; } 2>/dev/null) include Makefile.variables diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/README.md b/hack/tools/vendor/github.com/mikefarah/yq/v4/README.md index f339b6cf44..582ab1556a 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/README.md +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/README.md @@ -241,6 +241,14 @@ As these are supported by the community :heart: - however, they may be out of da _Please note that the Debian package (previously supported by @rmescandon) is no longer maintained. Please use an alternative installation method._ +### X-CMD +Checkout `yq` on x-cmd: https://x-cmd.com/mod/yq + +- Instant Results: See the output of your yq filter in real-time. +- Error Handling: Encounter a syntax error? It will display the error message and the results of the closest valid filter + +Thanks @edwinjhlee! + ### Nix ``` @@ -308,6 +316,13 @@ apk add yq Supported by Tuan Hoang (https://pkgs.alpinelinux.org/packages?name=yq-go) +### Flox: + +Flox can be used to install yq on Linux, MacOS, and Windows through WSL. + +``` +flox install yq +``` ## Features - [Detailed documentation with many examples](https://mikefarah.gitbook.io/yq/) diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/cmd/constant.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/cmd/constant.go index afc691236e..762897d0e3 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/cmd/constant.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/cmd/constant.go @@ -12,9 +12,6 @@ var outputFormat = "" var inputFormat = "" var exitStatus = false -var forceColor = false -var forceNoColor = false -var colorsEnabled = false var indent = 2 var noDocSeparators = false var nullInput = false @@ -23,6 +20,10 @@ var verbose = false var version = false var prettyPrint = false +var forceColor = false +var forceNoColor = false +var colorsEnabled = false + // can be either "" (off), "extract" or "process" var frontMatter = "" diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/cmd/evaluate_all_command.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/cmd/evaluate_all_command.go index c273cd6c55..ea82abef03 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/cmd/evaluate_all_command.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/cmd/evaluate_all_command.go @@ -12,7 +12,7 @@ func createEvaluateAllCommand() *cobra.Command { Use: "eval-all [expression] [yaml_file1]...", Aliases: []string{"ea"}, Short: "Loads _all_ yaml documents of _all_ yaml files and runs expression once", - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + ValidArgsFunction: func(_ *cobra.Command, args []string, _ string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return nil, cobra.ShellCompDirectiveNoFileComp } diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/cmd/evaluate_sequence_command.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/cmd/evaluate_sequence_command.go index e5641065a2..e722415e49 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/cmd/evaluate_sequence_command.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/cmd/evaluate_sequence_command.go @@ -13,7 +13,7 @@ func createEvaluateSequenceCommand() *cobra.Command { Use: "eval [expression] [yaml_file1]...", Aliases: []string{"e"}, Short: "(default) Apply the expression to each document in each yaml file in sequence", - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + ValidArgsFunction: func(_ *cobra.Command, args []string, _ string) ([]string, cobra.ShellCompDirective) { if len(args) == 0 { return nil, cobra.ShellCompDirectiveNoFileComp } diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/cmd/root.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/cmd/root.go index 071c679ea7..80ebe8e122 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/cmd/root.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/cmd/root.go @@ -66,14 +66,19 @@ yq -P -oy sample.json return evaluateSequence(cmd, args) }, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { cmd.SetOut(cmd.OutOrStdout()) level := logging.WARNING stringFormat := `[%{level}] %{color}%{time:15:04:05}%{color:reset} %{message}` - if verbose { + if verbose && forceNoColor { + level = logging.DEBUG + stringFormat = `[%{level:5.5s}] %{time:15:04:05} %{shortfile:-33s} %{shortfunc:-25s} %{message}` + } else if verbose { level = logging.DEBUG stringFormat = `[%{level:5.5s}] %{color}%{time:15:04:05}%{color:bold} %{shortfile:-33s} %{shortfunc:-25s}%{color:reset} %{message}` + } else if forceNoColor { + stringFormat = `[%{level}] %{time:15:04:05} %{message}` } var format = logging.MustStringFormatter(stringFormat) @@ -85,6 +90,10 @@ yq -P -oy sample.json logging.SetBackend(backend) yqlib.InitExpressionParser() + // when NO_COLOR environment variable presents and not an empty string the coloured output should be disabled; + // refer to no-color.org + forceNoColor = os.Getenv("NO_COLOR") != "" + return nil }, } @@ -177,7 +186,7 @@ yq -P -oy sample.json rootCmd.PersistentFlags().BoolVarP(&exitStatus, "exit-status", "e", false, "set exit status if there are no matches or null or false is returned") rootCmd.PersistentFlags().BoolVarP(&forceColor, "colors", "C", false, "force print with colors") - rootCmd.PersistentFlags().BoolVarP(&forceNoColor, "no-colors", "M", false, "force print with no colors") + rootCmd.PersistentFlags().BoolVarP(&forceNoColor, "no-colors", "M", forceNoColor, "force print with no colors") rootCmd.PersistentFlags().StringVarP(&frontMatter, "front-matter", "f", "", "(extract|process) first input as yaml front-matter. Extract will pull out the yaml content, process will run the expression against the yaml content, leaving the remaining data intact") if err = rootCmd.RegisterFlagCompletionFunc("front-matter", cobra.FixedCompletions([]string{"extract", "process"}, cobra.ShellCompDirectiveNoFileComp)); err != nil { panic(err) diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/cmd/version.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/cmd/version.go index 979eba701f..4a5c4cd7fb 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/cmd/version.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/cmd/version.go @@ -11,7 +11,7 @@ var ( GitDescribe string // Version is main version number that is being run at the moment. - Version = "v4.44.2" + Version = "v4.44.5" // VersionPrerelease is a pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/color_print.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/color_print.go index 1128e8513f..51c0a45dc2 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/color_print.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/color_print.go @@ -56,6 +56,12 @@ func colorizeAndPrint(yamlBytes []byte, writer io.Writer) error { Suffix: format(color.Reset), } } + p.Comment = func() *printer.Property { + return &printer.Property{ + Prefix: format(color.FgHiBlack), + Suffix: format(color.Reset), + } + } _, err := writer.Write([]byte(p.PrintTokens(tokens) + "\n")) return err } diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/decoder_base64.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/decoder_base64.go index 50ed350735..b5e9681e9b 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/decoder_base64.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/decoder_base64.go @@ -8,20 +8,20 @@ import ( ) type base64Padder struct { - count uint64 + count int io.Reader } func (c *base64Padder) pad(buf []byte) (int, error) { - pad := strings.Repeat("=", int(4-c.count%4)) + pad := strings.Repeat("=", (4 - c.count%4)) n, err := strings.NewReader(pad).Read(buf) - c.count += uint64(n) + c.count += n return n, err } func (c *base64Padder) Read(buf []byte) (int, error) { n, err := c.Reader.Read(buf) - c.count += uint64(n) + c.count += n if err == io.EOF && c.count%4 != 0 { return c.pad(buf) diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/decoder_csv_object.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/decoder_csv_object.go index 6273fcbb92..21eb136987 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/decoder_csv_object.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/decoder_csv_object.go @@ -31,7 +31,7 @@ func (dec *csvObjectDecoder) convertToNode(content string) *CandidateNode { node, err := parseSnippet(content) // if we're not auto-parsing, then we wont put in parsed objects or arrays // but we still parse scalars - if err != nil || (!dec.prefs.AutoParse && node.Kind != ScalarNode) { + if err != nil || (!dec.prefs.AutoParse && (node.Kind != ScalarNode || node.Value != content)) { return createScalarNode(content, content) } return node diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/decoder_toml.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/decoder_toml.go index d8a6264dd7..ca193cf19b 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/decoder_toml.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/decoder_toml.go @@ -286,7 +286,7 @@ func (dec *tomlDecoder) processTable(currentNode *toml.Node) (bool, error) { } runAgainstCurrentExp, err = dec.decodeKeyValuesIntoMap(tableNodeValue, tableValue) - if err != nil && !errors.Is(io.EOF, err) { + if err != nil && !errors.Is(err, io.EOF) { return false, err } } @@ -343,7 +343,7 @@ func (dec *tomlDecoder) processArrayTable(currentNode *toml.Node) (bool, error) tableValue := dec.parser.Expression() runAgainstCurrentExp, err := dec.decodeKeyValuesIntoMap(tableNodeValue, tableValue) log.Debugf("table node err: %w", err) - if err != nil && !errors.Is(io.EOF, err) { + if err != nil && !errors.Is(err, io.EOF) { return false, err } c := Context{} diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/encoder_yaml.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/encoder_yaml.go index 5cce044c17..f198c05257 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/encoder_yaml.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/encoder_yaml.go @@ -8,7 +8,8 @@ import ( "regexp" "strings" - yaml "gopkg.in/yaml.v3" + "github.com/fatih/color" + "gopkg.in/yaml.v3" ) type yamlEncoder struct { @@ -54,6 +55,9 @@ func (ye *yamlEncoder) PrintLeadingContent(writer io.Writer, content string) err if len(readline) > 0 && readline != "\n" && readline[0] != '%' && !commentLineRegEx.MatchString(readline) { readline = "# " + readline } + if ye.prefs.ColorsEnabled && strings.TrimSpace(readline) != "" { + readline = format(color.FgHiBlack) + readline + format(color.Reset) + } if err := writeString(writer, readline); err != nil { return err } diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/format.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/format.go index 49c47dfd29..144aac08d0 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/format.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/format.go @@ -2,6 +2,7 @@ package yqlib import ( "fmt" + "path/filepath" "strings" ) @@ -107,12 +108,11 @@ func (f *Format) GetConfiguredEncoder() Encoder { } func FormatStringFromFilename(filename string) string { - if filename != "" { - GetLogger().Debugf("checking file extension '%s' for auto format detection", filename) - nPos := strings.LastIndex(filename, ".") - if nPos > -1 { - format := filename[nPos+1:] + GetLogger().Debugf("checking filename '%s' for auto format detection", filename) + ext := filepath.Ext(filename) + if ext != "" && ext[0] == '.' { + format := strings.ToLower(ext[1:]) GetLogger().Debugf("detected format '%s'", format) return format } diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/lexer.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/lexer.go index 904ce7564f..cc6841b3e5 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/lexer.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/lexer.go @@ -126,7 +126,7 @@ func handleToken(tokens []*token, index int, postProcessedTokens []*token) (toke if tokenIsOpType(currentToken, createMapOpType) { log.Debugf("tokenIsOpType: createMapOpType") // check the previous token is '[', means we are slice, but dont have a first number - if tokens[index-1].TokenType == traverseArrayCollect { + if index > 0 && tokens[index-1].TokenType == traverseArrayCollect { log.Debugf("previous token is : traverseArrayOpType") // need to put the number 0 before this token, as that is implied postProcessedTokens = append(postProcessedTokens, &token{TokenType: operationToken, Operation: createValueOperation(0, "0")}) diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/lexer_participle.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/lexer_participle.go index 1dc61741d6..870b39cf16 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/lexer_participle.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/lexer_participle.go @@ -311,7 +311,7 @@ func opTokenWithPrefs(opType *operationType, assignOpType *operationType, prefer } func expressionOpToken(expression string) yqAction { - return func(rawToken lexer.Token) (*token, error) { + return func(_ lexer.Token) (*token, error) { prefs := expressionOpPreferences{expression: expression} expressionOp := &Operation{OperationType: expressionOpType, Preferences: prefs} return &token{TokenType: operationToken, Operation: expressionOp}, nil @@ -517,15 +517,15 @@ func parentWithLevel() yqAction { prefs := parentOpPreferences{Level: level} op := &Operation{OperationType: getParentOpType, Value: getParentOpType.Type, StringValue: value, Preferences: prefs} - return &token{TokenType: operationToken, Operation: op}, nil + return &token{TokenType: operationToken, Operation: op, CheckForPostTraverse: true}, nil } } func parentWithDefaultLevel() yqAction { - return func(rawToken lexer.Token) (*token, error) { + return func(_ lexer.Token) (*token, error) { prefs := parentOpPreferences{Level: 1} op := &Operation{OperationType: getParentOpType, Value: getParentOpType.Type, StringValue: getParentOpType.Type, Preferences: prefs} - return &token{TokenType: operationToken, Operation: op}, nil + return &token{TokenType: operationToken, Operation: op, CheckForPostTraverse: true}, nil } } diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/lib.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/lib.go index d0c81cbc0e..155619307b 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/lib.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/lib.go @@ -95,10 +95,20 @@ func parseSnippet(value string) (*CandidateNode, error) { if err != nil { return nil, err } + + if result.Kind == ScalarNode { + result.LineComment = result.LeadingContent + } else { + result.HeadComment = result.LeadingContent + } + result.LeadingContent = "" + if result.Tag == "!!str" { // use the original string value, as // decoding drops new lines - return createScalarNode(value, value), nil + newNode := createScalarNode(value, value) + newNode.LineComment = result.LineComment + return newNode, nil } result.Line = 0 result.Column = 0 diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_assign.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_assign.go index 834b9f9e10..c9ed2f5b6f 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_assign.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_assign.go @@ -7,7 +7,7 @@ type assignPreferences struct { } func assignUpdateFunc(prefs assignPreferences) crossFunctionCalculation { - return func(d *dataTreeNavigator, context Context, lhs *CandidateNode, rhs *CandidateNode) (*CandidateNode, error) { + return func(_ *dataTreeNavigator, _ Context, lhs *CandidateNode, rhs *CandidateNode) (*CandidateNode, error) { if !prefs.OnlyWriteNull || lhs.Tag == "!!null" { lhs.UpdateFrom(rhs, prefs) } diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_compare.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_compare.go index 44febf224c..76a571fb17 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_compare.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_compare.go @@ -18,7 +18,7 @@ func compareOperator(d *dataTreeNavigator, context Context, expressionNode *Expr } func compare(prefs compareTypePref) func(d *dataTreeNavigator, context Context, lhs *CandidateNode, rhs *CandidateNode) (*CandidateNode, error) { - return func(d *dataTreeNavigator, context Context, lhs *CandidateNode, rhs *CandidateNode) (*CandidateNode, error) { + return func(_ *dataTreeNavigator, context Context, lhs *CandidateNode, rhs *CandidateNode) (*CandidateNode, error) { log.Debugf("compare cross function") if lhs == nil && rhs == nil { owner := &CandidateNode{} diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_create_map.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_create_map.go index a7182fc8a5..632473422c 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_create_map.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_create_map.go @@ -53,7 +53,7 @@ func sequenceFor(d *dataTreeNavigator, context Context, matchingNode *CandidateN log.Debugf("**********sequenceFor %v", NodeToString(matchingNode)) mapPairs, err := crossFunction(d, context.ChildContext(matches), expressionNode, - func(d *dataTreeNavigator, context Context, lhs *CandidateNode, rhs *CandidateNode) (*CandidateNode, error) { + func(_ *dataTreeNavigator, _ Context, lhs *CandidateNode, rhs *CandidateNode) (*CandidateNode, error) { node := &CandidateNode{Kind: MappingNode, Tag: "!!map"} log.Debugf("**********adding key %v and value %v", NodeToString(lhs), NodeToString(rhs)) diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_equals.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_equals.go index 087f595706..4bcf28dd36 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_equals.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_equals.go @@ -6,7 +6,7 @@ func equalsOperator(d *dataTreeNavigator, context Context, expressionNode *Expre } func isEquals(flip bool) func(d *dataTreeNavigator, context Context, lhs *CandidateNode, rhs *CandidateNode) (*CandidateNode, error) { - return func(d *dataTreeNavigator, context Context, lhs *CandidateNode, rhs *CandidateNode) (*CandidateNode, error) { + return func(_ *dataTreeNavigator, _ Context, lhs *CandidateNode, rhs *CandidateNode) (*CandidateNode, error) { value := false log.Debugf("isEquals cross function") if lhs == nil && rhs == nil { diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_error.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_error.go index 1a2283de87..d6a2b67fed 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_error.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_error.go @@ -1,7 +1,7 @@ package yqlib import ( - "fmt" + "errors" ) func errorOperator(d *dataTreeNavigator, context Context, expressionNode *ExpressionNode) (Context, error) { @@ -16,5 +16,5 @@ func errorOperator(d *dataTreeNavigator, context Context, expressionNode *Expres if rhs.MatchingNodes.Len() > 0 { errorMessage = rhs.MatchingNodes.Front().Value.(*CandidateNode).Value } - return Context{}, fmt.Errorf(errorMessage) + return Context{}, errors.New(errorMessage) } diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_sort.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_sort.go index 106b17ff49..942ef46a8c 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_sort.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_sort.go @@ -69,6 +69,7 @@ func (a sortableNodeArray) Less(i, j int) bool { rhsContext := a[j].CompareContext rhsEl := rhsContext.MatchingNodes.Front() + for lhsEl := lhsContext.MatchingNodes.Front(); lhsEl != nil && rhsEl != nil; lhsEl = lhsEl.Next() { lhs := lhsEl.Value.(*CandidateNode) rhs := rhsEl.Value.(*CandidateNode) @@ -83,7 +84,7 @@ func (a sortableNodeArray) Less(i, j int) bool { rhsEl = rhsEl.Next() } - return false + return lhsContext.MatchingNodes.Len() < rhsContext.MatchingNodes.Len() } func (a sortableNodeArray) compare(lhs *CandidateNode, rhs *CandidateNode, dateTimeLayout string) int { diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_strings.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_strings.go index 133b55f97a..c9bb9e4a6a 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_strings.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_strings.go @@ -102,7 +102,8 @@ func interpolate(d *dataTreeNavigator, context Context, str string) (string, err } } if inExpression { - return "", fmt.Errorf("unclosed interpolation string \\(") + log.Warning("unclosed interpolation string, skipping interpolation") + return str, nil } return sb.String(), nil } diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_subtract.go b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_subtract.go index f872e7c350..ab23bcee76 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_subtract.go +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/pkg/yqlib/operator_subtract.go @@ -23,7 +23,7 @@ func subtractOperator(d *dataTreeNavigator, context Context, expressionNode *Exp return crossFunction(d, context.ReadOnlyClone(), expressionNode, subtract, false) } -func subtractArray(lhs *CandidateNode, rhs *CandidateNode) (*CandidateNode, error) { +func subtractArray(lhs *CandidateNode, rhs *CandidateNode) []*CandidateNode { newLHSArray := make([]*CandidateNode, 0) for lindex := 0; lindex < len(lhs.Content); lindex = lindex + 1 { @@ -37,9 +37,7 @@ func subtractArray(lhs *CandidateNode, rhs *CandidateNode) (*CandidateNode, erro newLHSArray = append(newLHSArray, lhs.Content[lindex]) } } - // removing children from LHS, parent hasn't changed - lhs.Content = newLHSArray - return lhs, nil + return newLHSArray } func subtract(_ *dataTreeNavigator, context Context, lhs *CandidateNode, rhs *CandidateNode) (*CandidateNode, error) { @@ -56,7 +54,7 @@ func subtract(_ *dataTreeNavigator, context Context, lhs *CandidateNode, rhs *Ca if rhs.Kind != SequenceNode { return nil, fmt.Errorf("%v (%v) cannot be subtracted from %v", rhs.Tag, rhs.GetNicePath(), lhs.Tag) } - return subtractArray(lhs, rhs) + target.Content = subtractArray(lhs, rhs) case ScalarNode: if rhs.Kind != ScalarNode { return nil, fmt.Errorf("%v (%v) cannot be subtracted from %v", rhs.Tag, rhs.GetNicePath(), lhs.Tag) diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/project-words.txt b/hack/tools/vendor/github.com/mikefarah/yq/v4/project-words.txt index 39080c113e..808da2146f 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/project-words.txt +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/project-words.txt @@ -16,6 +16,7 @@ axbxcxdxe axbxcxdxexxx bananabanana barp +nbaz bitnami blarp blddir @@ -89,6 +90,8 @@ goccy gofmt gogo golangci +goreleaser +GORELEASER GOMODCACHE GOPATH gosec diff --git a/hack/tools/vendor/github.com/mikefarah/yq/v4/release_notes.txt b/hack/tools/vendor/github.com/mikefarah/yq/v4/release_notes.txt index 44c637516e..a8700f1783 100644 --- a/hack/tools/vendor/github.com/mikefarah/yq/v4/release_notes.txt +++ b/hack/tools/vendor/github.com/mikefarah/yq/v4/release_notes.txt @@ -1,3 +1,19 @@ +4.44.4: +- Format comments with a gray foreground (Thanks @gabe565) +- Fixed handling of nulls with sort_by expressions #2164 +- Force no color output when NO_COLOR env presents (Thanks @narqo) +- Fixed array subtraction update bug #2159 +- Fixed index out of range error +- Can traverse straight from parent operator (parent.blah) +- Bumped dependencies + +4.44.3: + - Fixed upper-case file extension detection, Thanks @ryenus (#2121) + - Log printing follow no-colors flag #2082 + - Skip and warn when interpolating strings and theres a unclosed bracket #2083 + - Fixed CSV content starting with # issue #2076 + - Bumped dependencies + 4.44.2: - Handle numbers with underscores #2039 - Unique now works on maps and arrays #2068 diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/README.md b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/README.md index 536a65e7be..83c436359f 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/README.md +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/README.md @@ -180,7 +180,7 @@ var _ = Describe("checking something", Focus, func() { These container, or the `Focus` spec, must not be part of the final source code, and should only be used locally by the developer. -***This rule is disabled by default***. Use the `--forbid-focus-container=true` command line flag to enable it. +***This rule is disabled by default***. Use the `--forbid-focus-container` command line flag to enable it. ### Comparing values from different types [BUG] @@ -189,7 +189,7 @@ The `Equal` and the `BeIdentical` matchers also check the type, not only the val The following code will fail in runtime: ```go x := 5 // x is int -Expect(x).Should(Eqaul(uint(5)) // x and uint(5) are with different +Expect(x).Should(Equal(uint(5)) // x and uint(5) are with different ``` When using negative checks, it's even worse, because we get a false positive: ``` @@ -202,7 +202,7 @@ using casting, or use the `BeEquivalentTo` matcher. The linter can't guess what is the best solution in each case, and so it won't auto-fix this warning. -To suppress this warning entirely, use the `--suppress-type-compare-assertion=true` command line parameter. +To suppress this warning entirely, use the `--suppress-type-compare-assertion` command line parameter. To suppress a specific file or line, use the `// ginkgo-linter:ignore-type-compare-warning` comment (see [below](#suppress-warning-from-the-code)) @@ -234,7 +234,7 @@ flag **is** set. ***Note***: This rule work with best-effort approach. It can't find many cases, like const defined not in the same package, or when using variables. -The timeout and polling intervals may be passed as optional arguments to the `Eventually` or `Constanly` functions, or +The timeout and polling intervals may be passed as optional arguments to the `Eventually` or `Consistently` functions, or using the `WithTimeout` or , `Within` methods (timeout), and `WithPolling` or `ProbeEvery` methods (polling). This rule checks if the async (`Eventually` or `Consistently`) timeout duration, is not shorter than the polling interval. @@ -249,7 +249,7 @@ This will probably happen when using the old format: Eventually(aFunc, 500 * time.Millisecond /*timeout*/, 10 * time.Second /*polling*/).Should(Succeed()) ``` -### Correct usage of the `Succeed()` matcher [Bug] +### Prevent Wrong Actual Values with the Succeed() matcher [Bug] The `Succeed()` matcher only accepts a single error value. this rule validates that. For example: @@ -271,8 +271,10 @@ a Gomega object as their first parameter, and returns nothing, e.g. this is a va }).WithTimeout(10 * time.Millisecond).WithPolling(time.Millisecond).Should(Succeed()) ``` +***Note***: This rule **does not** support auto-fix. + ### Avoid Spec Pollution: Don't Initialize Variables in Container Nodes [BUG/STYLE]: -***Note***: Only applied when the `--forbid-spec-pollution=true` flag is set (disabled by default). +***Note***: Only applied when the `--forbid-spec-pollution` flag is set (disabled by default). According to [ginkgo documentation](https://onsi.github.io/ginkgo/#avoid-spec-pollution-dont-initialize-variables-in-container-nodes), no variable should be assigned within a container node (`Describe`, `Context`, `When` or their `F`, `P` or `X` forms) @@ -449,13 +451,13 @@ Expect("abc").ShouldNot(BeEmpty()) // => Expect("abc").ToNot(BeEmpty()) ``` This rule support auto fixing. -***This rule is disabled by default***. Use the `--force-expect-to=true` command line flag to enable it. +***This rule is disabled by default***. Use the `--force-expect-to` command line flag to enable it. ### Async timing interval: multiple timeout or polling intervals [STYLE] ***Note***: Only applied when the `suppress-async-assertion` flag is **not set** *and* the `validate-async-intervals` flag **is** set. -The timeout and polling intervals may be passed as optional arguments to the `Eventually` or `Constanly` functions, or +The timeout and polling intervals may be passed as optional arguments to the `Eventually` or `Consistently` functions, or using the `WithTimeout` or , `Within` methods (timeout), and `WithPolling` or `ProbeEvery` methods (polling). The linter checks that there is up to one polling argument and up to one timeout argument. @@ -473,7 +475,7 @@ Eventually(aFunc, time.Second*10, time.Millisecond * 500).WithPolling(time.Milli ***Note***: Only applied when the `suppress-async-assertion` flag is **not set** *and* the `validate-async-intervals` flag **is** set. -gomega supports a few formats for timeout and polling intervals, when using the old format (the last two parameters of Eventually and Constantly): +gomega supports a few formats for timeout and polling intervals, when using the old format (the last two parameters of Eventually and Consistently): * a `time.Duration` value * any kind of numeric value (int(8/16/32/64), uint(8/16/32/64) or float(32/64), as the number of seconds. * duration string like `"12s"` @@ -520,18 +522,20 @@ will trigger a warning with a suggestion to replace the mather to ```go Expect(myErrorFunc()).To(Succeed()) ``` -***This rule is disabled by default***. Use the `--force-succeed=true` command line flag to enable it. +***This rule is disabled by default***. Use the `--force-succeed` command line flag to enable it. + +***Note***: This rule **does** support auto-fix, when the `--fix` command line parameter is used. ## Suppress the linter ### Suppress warning from command line -* Use the `--suppress-len-assertion=true` flag to suppress the wrong length and cap assertions warning -* Use the `--suppress-nil-assertion=true` flag to suppress the wrong nil assertion warning -* Use the `--suppress-err-assertion=true` flag to suppress the wrong error assertion warning -* Use the `--suppress-compare-assertion=true` flag to suppress the wrong comparison assertion warning -* Use the `--suppress-async-assertion=true` flag to suppress the function call in async assertion warning -* Use the `--forbid-focus-container=true` flag to activate the focused container assertion (deactivated by default) -* Use the `--suppress-type-compare-assertion=true` to suppress the type compare assertion warning -* Use the `--allow-havelen-0=true` flag to avoid warnings about `HaveLen(0)`; Note: this parameter is only supported from +* Use the `--suppress-len-assertion` flag to suppress the wrong length and cap assertions warning +* Use the `--suppress-nil-assertion` flag to suppress the wrong nil assertion warning +* Use the `--suppress-err-assertion` flag to suppress the wrong error assertion warning +* Use the `--suppress-compare-assertion` flag to suppress the wrong comparison assertion warning +* Use the `--suppress-async-assertion` flag to suppress the function call in async assertion warning +* Use the `--forbid-focus-container` flag to activate the focused container assertion (deactivated by default) +* Use the `--suppress-type-compare-assertion` to suppress the type compare assertion warning +* Use the `--allow-havelen-0` flag to avoid warnings about `HaveLen(0)`; Note: this parameter is only supported from command line, and not from a comment. ### Suppress warning from the code @@ -555,7 +559,7 @@ To suppress the wrong async assertion warning, add a comment with (only) `ginkgo-linter:ignore-async-assert-warning`. -To supress the focus container warning, add a comment with (only) +To suppress the focus container warning, add a comment with (only) `ginkgo-linter:ignore-focus-container-warning` @@ -568,10 +572,10 @@ Notice that this comment will not work for an anonymous variable container like // ginkgo-linter:ignore-focus-container-warning (not working!!) var _ = FDescribe(...) ``` -In this case, use the file comment (see bellow). +In this case, use the file comment (see below). There are two options to use these comments: -1. If the comment is at the top of the file, supress the warning for the whole file; e.g.: +1. If the comment is at the top of the file, suppress the warning for the whole file; e.g.: ```go package mypackage diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/analyzer.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/analyzer.go index dbc39aba5e..ac762cd9b6 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/analyzer.go +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/analyzer.go @@ -37,21 +37,19 @@ func NewAnalyzer() *analysis.Analyzer { a := NewAnalyzerWithConfig(config) - var ignored bool a.Flags.Init("ginkgolinter", flag.ExitOnError) - a.Flags.Var(&config.SuppressLen, "suppress-len-assertion", "Suppress warning for wrong length assertions") - a.Flags.Var(&config.SuppressNil, "suppress-nil-assertion", "Suppress warning for wrong nil assertions") - a.Flags.Var(&config.SuppressErr, "suppress-err-assertion", "Suppress warning for wrong error assertions") - a.Flags.Var(&config.SuppressCompare, "suppress-compare-assertion", "Suppress warning for wrong comparison assertions") - a.Flags.Var(&config.SuppressAsync, "suppress-async-assertion", "Suppress warning for function call in async assertion, like Eventually") - a.Flags.Var(&config.ValidateAsyncIntervals, "validate-async-intervals", "best effort validation of async intervals (timeout and polling); ignored the suppress-async-assertion flag is true") - a.Flags.Var(&config.SuppressTypeCompare, "suppress-type-compare-assertion", "Suppress warning for comparing values from different types, like int32 and uint32") - a.Flags.Var(&config.AllowHaveLen0, "allow-havelen-0", "Do not warn for HaveLen(0); default = false") - a.Flags.Var(&config.ForceExpectTo, "force-expect-to", "force using `Expect` with `To`, `ToNot` or `NotTo`. reject using `Expect` with `Should` or `ShouldNot`; default = false (not forced)") - a.Flags.BoolVar(&ignored, "suppress-focus-container", true, "Suppress warning for ginkgo focus containers like FDescribe, FContext, FWhen or FIt. Deprecated and ignored: use --forbid-focus-container instead") - a.Flags.Var(&config.ForbidFocus, "forbid-focus-container", "trigger a warning for ginkgo focus containers like FDescribe, FContext, FWhen or FIt; default = false.") - a.Flags.Var(&config.ForbidSpecPollution, "forbid-spec-pollution", "trigger a warning for variable assignments in ginkgo containers like Describe, Context and When, instead of in BeforeEach(); default = false.") - a.Flags.Var(&config.ForceSucceedForFuncs, "force-succeed", "force using the Succeed matcher for error functions, and the HaveOccurred matcher for non-function error values") + a.Flags.BoolVar(&config.SuppressLen, "suppress-len-assertion", config.SuppressLen, "Suppress warning for wrong length assertions") + a.Flags.BoolVar(&config.SuppressNil, "suppress-nil-assertion", config.SuppressNil, "Suppress warning for wrong nil assertions") + a.Flags.BoolVar(&config.SuppressErr, "suppress-err-assertion", config.SuppressErr, "Suppress warning for wrong error assertions") + a.Flags.BoolVar(&config.SuppressCompare, "suppress-compare-assertion", config.SuppressCompare, "Suppress warning for wrong comparison assertions") + a.Flags.BoolVar(&config.SuppressAsync, "suppress-async-assertion", config.SuppressAsync, "Suppress warning for function call in async assertion, like Eventually") + a.Flags.BoolVar(&config.ValidateAsyncIntervals, "validate-async-intervals", config.ValidateAsyncIntervals, "best effort validation of async intervals (timeout and polling); ignored the suppress-async-assertion flag is true") + a.Flags.BoolVar(&config.SuppressTypeCompare, "suppress-type-compare-assertion", config.SuppressTypeCompare, "Suppress warning for comparing values from different types, like int32 and uint32") + a.Flags.BoolVar(&config.AllowHaveLen0, "allow-havelen-0", config.AllowHaveLen0, "Do not warn for HaveLen(0); default = false") + a.Flags.BoolVar(&config.ForceExpectTo, "force-expect-to", config.ForceExpectTo, "force using `Expect` with `To`, `ToNot` or `NotTo`. reject using `Expect` with `Should` or `ShouldNot`; default = false (not forced)") + a.Flags.BoolVar(&config.ForbidFocus, "forbid-focus-container", config.ForbidFocus, "trigger a warning for ginkgo focus containers like FDescribe, FContext, FWhen or FIt; default = false.") + a.Flags.BoolVar(&config.ForbidSpecPollution, "forbid-spec-pollution", config.ForbidSpecPollution, "trigger a warning for variable assignments in ginkgo containers like Describe, Context and When, instead of in BeforeEach(); default = false.") + a.Flags.BoolVar(&config.ForceSucceedForFuncs, "force-succeed", config.ForceSucceedForFuncs, "force using the Succeed matcher for error functions, and the HaveOccurred matcher for non-function error values") return a } diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/doc.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/doc.go index c07b6a3164..2a935e9b34 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/doc.go +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/doc.go @@ -94,7 +94,7 @@ For example: Eventually(func() bool { return true }, time.Second*10, 500*time.Millisecond).ProbeEvery(time.Millisecond * 500).Should(BeTrue()) * async timing interval: non-time.Duration intervals [Style] -gomega supports a few formats for timeout and polling intervals, when using the old format (the last two parameters of Eventually and Constantly): +gomega supports a few formats for timeout and polling intervals, when using the old format (the last two parameters of Eventually and Consistently): * time.Duration * any kind of numeric value, as number of seconds * duration string like "12s" diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actual.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actual.go index 8e3df5d3fb..5bd6dd6e7e 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actual.go +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actual.go @@ -21,13 +21,8 @@ type Actual struct { actualOffset int } -func New(origExpr, cloneExpr *ast.CallExpr, orig *ast.CallExpr, clone *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Handler, timePkg string) (*Actual, bool) { - funcName, ok := handler.GetActualFuncName(orig) - if !ok { - return nil, false - } - - arg, actualOffset := getActualArgPayload(orig, clone, pass, funcName) +func New(origExpr, cloneExpr *ast.CallExpr, orig *ast.CallExpr, clone *ast.CallExpr, pass *analysis.Pass, timePkg string, info *gomegahandler.GomegaBasicInfo) (*Actual, bool) { + arg, actualOffset := getActualArgPayload(orig, clone, pass, info) if arg == nil { return nil, false } @@ -45,7 +40,7 @@ func New(origExpr, cloneExpr *ast.CallExpr, orig *ast.CallExpr, clone *ast.CallE isTuple = tpl.Len() > 1 } - isAsyncExpr := gomegainfo.IsAsyncActualMethod(funcName) + isAsyncExpr := gomegainfo.IsAsyncActualMethod(info.MethodName) var asyncArg *AsyncArg if isAsyncExpr { diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actualarg.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actualarg.go index 9d251c4680..5b6cfbbc44 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actualarg.go +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actualarg.go @@ -8,6 +8,7 @@ import ( "golang.org/x/tools/go/analysis" "github.com/nunnatsa/ginkgolinter/internal/expression/value" + "github.com/nunnatsa/ginkgolinter/internal/gomegahandler" "github.com/nunnatsa/ginkgolinter/internal/gomegainfo" "github.com/nunnatsa/ginkgolinter/internal/reverseassertion" ) @@ -28,6 +29,7 @@ const ( ErrFuncActualArgType GomegaParamArgType MultiRetsArgType + ErrorMethodArgType ErrorTypeArgType @@ -39,15 +41,17 @@ func (a ArgType) Is(val ArgType) bool { return a&val != 0 } -func getActualArgPayload(origActualExpr, actualExprClone *ast.CallExpr, pass *analysis.Pass, actualMethodName string) (ArgPayload, int) { - origArgExpr, argExprClone, actualOffset, isGomegaExpr := getActualArg(origActualExpr, actualExprClone, actualMethodName, pass) +func getActualArgPayload(origActualExpr, actualExprClone *ast.CallExpr, pass *analysis.Pass, info *gomegahandler.GomegaBasicInfo) (ArgPayload, int) { + origArgExpr, argExprClone, actualOffset, isGomegaExpr := getActualArg(origActualExpr, actualExprClone, info.MethodName, pass) if !isGomegaExpr { return nil, 0 } var arg ArgPayload - if value.IsExprError(pass, origArgExpr) { + if info.HasErrorMethod { + arg = &ErrorMethodPayload{} + } else if value.IsExprError(pass, origArgExpr) { arg = newErrPayload(origArgExpr, argExprClone, pass) } else { switch expr := origArgExpr.(type) { @@ -56,12 +60,6 @@ func getActualArgPayload(origActualExpr, actualExprClone *ast.CallExpr, pass *an case *ast.BinaryExpr: arg = parseBinaryExpr(expr, argExprClone.(*ast.BinaryExpr), pass) - - default: - t := pass.TypesInfo.TypeOf(origArgExpr) - if sig, ok := t.(*gotypes.Signature); ok { - arg = getAsyncFuncArg(sig) - } } } @@ -70,6 +68,14 @@ func getActualArgPayload(origActualExpr, actualExprClone *ast.CallExpr, pass *an return arg, actualOffset } + t := pass.TypesInfo.TypeOf(origArgExpr) + if sig, ok := t.(*gotypes.Signature); ok { + arg = getAsyncFuncArg(sig) + if arg != nil { + return arg, actualOffset + } + } + return newRegularArgPayload(origArgExpr, argExprClone, pass), actualOffset } @@ -181,6 +187,12 @@ func (*ErrPayload) ArgType() ArgType { return ErrActualArgType | ErrorTypeArgType } +type ErrorMethodPayload struct{} + +func (ErrorMethodPayload) ArgType() ArgType { + return ErrorMethodArgType | ErrorTypeArgType +} + func parseBinaryExpr(origActualExpr, argExprClone *ast.BinaryExpr, pass *analysis.Pass) ArgPayload { left, right, op := origActualExpr.X, origActualExpr.Y, origActualExpr.Op replace := false diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/expression.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/expression.go index 976e726fcf..6e8e0db6ac 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/expression.go +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/expression.go @@ -27,7 +27,8 @@ type GomegaExpression struct { origAssertionFuncName string actualFuncName string - isAsync bool + isAsync bool + isUsingGomegaVar bool actual *actual.Actual matcher *matcher.Matcher @@ -36,8 +37,8 @@ type GomegaExpression struct { } func New(origExpr *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Handler, timePkg string) (*GomegaExpression, bool) { - actualMethodName, ok := handler.GetActualFuncName(origExpr) - if !ok || !gomegainfo.IsActualMethod(actualMethodName) { + info, ok := handler.GetGomegaBasicInfo(origExpr) + if !ok || !gomegainfo.IsActualMethod(info.MethodName) { return nil, false } @@ -45,7 +46,7 @@ func New(origExpr *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Hand if !ok || !gomegainfo.IsAssertionFunc(origSel.Sel.Name) { return &GomegaExpression{ orig: origExpr, - actualFuncName: actualMethodName, + actualFuncName: info.MethodName, }, true } @@ -62,7 +63,7 @@ func New(origExpr *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Hand return nil, false } - actl, ok := actual.New(origExpr, exprClone, origActual, actualClone, pass, handler, timePkg) + actl, ok := actual.New(origExpr, exprClone, origActual, actualClone, pass, timePkg, info) if !ok { return nil, false } @@ -87,9 +88,10 @@ func New(origExpr *ast.CallExpr, pass *analysis.Pass, handler gomegahandler.Hand assertionFuncName: origSel.Sel.Name, origAssertionFuncName: origSel.Sel.Name, - actualFuncName: actualMethodName, + actualFuncName: info.MethodName, - isAsync: actl.IsAsync(), + isAsync: actl.IsAsync(), + isUsingGomegaVar: info.UseGomegaVar, actual: actl, matcher: mtchr, @@ -133,6 +135,10 @@ func (e *GomegaExpression) IsAsync() bool { return e.isAsync } +func (e *GomegaExpression) IsUsingGomegaVar() bool { + return e.isUsingGomegaVar +} + func (e *GomegaExpression) ReverseAssertionFuncLogic() { assertionFunc := e.clone.Fun.(*ast.SelectorExpr).Sel newName := reverseassertion.ChangeAssertionLogic(assertionFunc.Name) diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/matcher/matcher.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/matcher/matcher.go index 0969b95519..7a983cc9e8 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/matcher/matcher.go +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/matcher/matcher.go @@ -41,13 +41,13 @@ func New(origMatcher, matcherClone *ast.CallExpr, pass *analysis.Pass, handler g reverse := false var assertFuncName string for { - ok := false - assertFuncName, ok = handler.GetActualFuncName(origMatcher) + info, ok := handler.GetGomegaBasicInfo(origMatcher) if !ok { return nil, false } - if assertFuncName != "Not" { + if info.MethodName != "Not" { + assertFuncName = info.MethodName break } diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/value/value.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/value/value.go index dda0dd73ba..ba74722d27 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/value/value.go +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/value/value.go @@ -194,6 +194,10 @@ func IsExprError(pass *analysis.Pass, expr ast.Expr) bool { return interfaces.ImplementsError(actualArgType) case *gotypes.Pointer: + if interfaces.ImplementsError(t) { + return true + } + if tt, ok := t.Elem().(*gotypes.Named); ok { return interfaces.ImplementsError(tt) } diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgohandler/handling.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgohandler/handling.go index 4b6de57679..322bbc4533 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgohandler/handling.go +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgohandler/handling.go @@ -20,11 +20,11 @@ const ( func handleGinkgoSpecs(expr ast.Expr, config types.Config, pass *analysis.Pass, ginkgoHndlr Handler) bool { goDeeper := false if exp, ok := expr.(*ast.CallExpr); ok { - if bool(config.ForbidFocus) && checkFocusContainer(pass, ginkgoHndlr, exp) { + if config.ForbidFocus && checkFocusContainer(pass, ginkgoHndlr, exp) { goDeeper = true } - if bool(config.ForbidSpecPollution) && checkAssignmentsInContainer(pass, ginkgoHndlr, exp) { + if config.ForbidSpecPollution && checkAssignmentsInContainer(pass, ginkgoHndlr, exp) { goDeeper = true } } diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/dothandler.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/dothandler.go index bd3b93992f..8ab87c76e9 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/dothandler.go +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/dothandler.go @@ -14,24 +14,34 @@ type dotHandler struct { pass *analysis.Pass } -// GetActualFuncName returns the name of the gomega function, e.g. `Expect` -func (h dotHandler) GetActualFuncName(expr *ast.CallExpr) (string, bool) { - switch actualFunc := expr.Fun.(type) { - case *ast.Ident: - return actualFunc.Name, true - case *ast.SelectorExpr: - if h.isGomegaVar(actualFunc.X) { - return actualFunc.Sel.Name, true - } +// GetGomegaBasicInfo returns the name of the gomega function, e.g. `Expect` + some additional info +func (h dotHandler) GetGomegaBasicInfo(expr *ast.CallExpr) (*GomegaBasicInfo, bool) { + info := &GomegaBasicInfo{} + for { + switch actualFunc := expr.Fun.(type) { + case *ast.Ident: + info.MethodName = actualFunc.Name + return info, true + case *ast.SelectorExpr: + if h.isGomegaVar(actualFunc.X) { + info.UseGomegaVar = true + info.MethodName = actualFunc.Sel.Name + return info, true + } - if x, ok := actualFunc.X.(*ast.CallExpr); ok { - return h.GetActualFuncName(x) - } + if actualFunc.Sel.Name == "Error" { + info.HasErrorMethod = true + } - case *ast.CallExpr: - return h.GetActualFuncName(actualFunc) + if x, ok := actualFunc.X.(*ast.CallExpr); ok { + expr = x + } else { + return nil, false + } + default: + return nil, false + } } - return "", false } // ReplaceFunction replaces the function with another one, for fix suggestions diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/handler.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/handler.go index 4dba604a4f..881ec87896 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/handler.go +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/handler.go @@ -14,7 +14,7 @@ const ( // in imported with "." name, custom name or without any name. type Handler interface { // GetActualFuncName returns the name of the gomega function, e.g. `Expect` - GetActualFuncName(*ast.CallExpr) (string, bool) + GetGomegaBasicInfo(*ast.CallExpr) (*GomegaBasicInfo, bool) // ReplaceFunction replaces the function with another one, for fix suggestions ReplaceFunction(*ast.CallExpr, *ast.Ident) @@ -25,6 +25,12 @@ type Handler interface { GetNewWrapperMatcher(name string, existing *ast.CallExpr) *ast.CallExpr } +type GomegaBasicInfo struct { + MethodName string + UseGomegaVar bool + HasErrorMethod bool +} + // GetGomegaHandler returns a gomegar handler according to the way gomega was imported in the specific file func GetGomegaHandler(file *ast.File, pass *analysis.Pass) Handler { for _, imp := range file.Imports { diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/namedhandler.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/namedhandler.go index 712442426d..61c471f4c2 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/namedhandler.go +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegahandler/namedhandler.go @@ -18,28 +18,39 @@ type nameHandler struct { pass *analysis.Pass } -// GetActualFuncName returns the name of the gomega function, e.g. `Expect` -func (g nameHandler) GetActualFuncName(expr *ast.CallExpr) (string, bool) { - selector, ok := expr.Fun.(*ast.SelectorExpr) - if !ok { - return "", false - } +// GetGomegaBasicInfo returns the name of the gomega function, e.g. `Expect` + some additional info +func (g nameHandler) GetGomegaBasicInfo(expr *ast.CallExpr) (*GomegaBasicInfo, bool) { + info := &GomegaBasicInfo{} + for { + selector, ok := expr.Fun.(*ast.SelectorExpr) + if !ok { + return nil, false + } - switch x := selector.X.(type) { - case *ast.Ident: - if x.Name != g.name { - if !g.isGomegaVar(x) { - return "", false - } + if selector.Sel.Name == "Error" { + info.HasErrorMethod = true } - return selector.Sel.Name, true + switch x := selector.X.(type) { + case *ast.Ident: + if x.Name != g.name { + if !g.isGomegaVar(x) { + return nil, false + } + info.UseGomegaVar = true + } - case *ast.CallExpr: - return g.GetActualFuncName(x) - } + info.MethodName = selector.Sel.Name + + return info, true + + case *ast.CallExpr: + expr = x - return "", false + default: + return nil, false + } + } } // ReplaceFunction replaces the function with another one, for fix suggestions diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asyncfunccallrule.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asyncfunccallrule.go index e4eda7f6cf..307cd2d125 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asyncfunccallrule.go +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asyncfunccallrule.go @@ -19,11 +19,11 @@ const valueInEventually = "use a function call in %[1]s. This actually checks no type AsyncFuncCallRule struct{} func (r AsyncFuncCallRule) isApplied(gexp *expression.GomegaExpression, config types.Config) bool { - if bool(config.SuppressAsync) || !gexp.IsAsync() { + if config.SuppressAsync || !gexp.IsAsync() { return false } - if asyncArg := gexp.GetAsyncActualArg(); asyncRules != nil { + if asyncArg := gexp.GetAsyncActualArg(); asyncArg != nil { return !asyncArg.IsValid() } diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asynctimeintervalsrule.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asynctimeintervalsrule.go index 45953ec01e..ca5c326195 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asynctimeintervalsrule.go +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/asynctimeintervalsrule.go @@ -20,7 +20,7 @@ const ( type AsyncTimeIntervalsRule struct{} func (r AsyncTimeIntervalsRule) isApplied(gexp *expression.GomegaExpression, config types.Config) bool { - return !bool(config.SuppressAsync) && bool(config.ValidateAsyncIntervals) && gexp.IsAsync() + return !config.SuppressAsync && config.ValidateAsyncIntervals && gexp.IsAsync() } func (r AsyncTimeIntervalsRule) Apply(gexp *expression.GomegaExpression, config types.Config, reportBuilder *reports.Builder) bool { diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equaldifferenttypesrule.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equaldifferenttypesrule.go index 81d703bb89..4b6eafdda0 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equaldifferenttypesrule.go +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equaldifferenttypesrule.go @@ -14,7 +14,7 @@ const compareDifferentTypes = "use %[1]s with different types: Comparing %[2]s w type EqualDifferentTypesRule struct{} func (r EqualDifferentTypesRule) isApplied(config types.Config) bool { - return !bool(config.SuppressTypeCompare) + return !config.SuppressTypeCompare } func (r EqualDifferentTypesRule) Apply(gexp *expression.GomegaExpression, config types.Config, reportBuilder *reports.Builder) bool { diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equalnilrule.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equalnilrule.go index 5b28e7d9b7..f27dfb0d88 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equalnilrule.go +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/equalnilrule.go @@ -12,7 +12,7 @@ import ( type EqualNilRule struct{} func (r EqualNilRule) isApplied(gexp *expression.GomegaExpression, config types.Config) bool { - return !bool(config.SuppressNil) && + return !config.SuppressNil && gexp.MatcherTypeIs(matcher.EqualValueMatcherType) } diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/errorequalnilrule.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/errorequalnilrule.go index 7aaf7631b0..81932cc2c5 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/errorequalnilrule.go +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/errorequalnilrule.go @@ -12,8 +12,15 @@ import ( type ErrorEqualNilRule struct{} func (ErrorEqualNilRule) isApplied(gexp *expression.GomegaExpression, config types.Config) bool { - return !bool(config.SuppressErr) && - gexp.ActualArgTypeIs(actual.ErrorTypeArgType) && + if config.SuppressErr { + return false + } + + if !gexp.IsAsync() && gexp.ActualArgTypeIs(actual.FuncSigArgType) { + return false + } + + return gexp.ActualArgTypeIs(actual.ErrorTypeArgType) && gexp.MatcherTypeIs(matcher.BeNilMatcherType|matcher.EqualNilMatcherType) } diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/havelen0.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/havelen0.go index 20bcb72117..159fb615a0 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/havelen0.go +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/havelen0.go @@ -10,7 +10,7 @@ import ( type HaveLen0 struct{} func (r *HaveLen0) isApplied(gexp *expression.GomegaExpression, config types.Config) bool { - return gexp.MatcherTypeIs(matcher.HaveLenZeroMatcherType) && !bool(config.AllowHaveLen0) + return gexp.MatcherTypeIs(matcher.HaveLenZeroMatcherType) && !config.AllowHaveLen0 } func (r *HaveLen0) Apply(gexp *expression.GomegaExpression, config types.Config, reportBuilder *reports.Builder) bool { diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/haveoccurredrule.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/haveoccurredrule.go index 437d3ee23f..317e22ed3d 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/haveoccurredrule.go +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/haveoccurredrule.go @@ -24,7 +24,7 @@ func (r HaveOccurredRule) Apply(gexp *expression.GomegaExpression, config types. return true } - if bool(config.ForceSucceedForFuncs) && gexp.GetActualArg().(*actual.ErrPayload).IsFunc() { + if config.ForceSucceedForFuncs && gexp.GetActualArg().(*actual.ErrPayload).IsFunc() { gexp.ReverseAssertionFuncLogic() gexp.SetMatcherSucceed() reportBuilder.AddIssue(true, "prefer using the Succeed matcher for error function, instead of HaveOccurred") diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/nilcomparerule.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/nilcomparerule.go index fc3cd49e53..6677dce3bb 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/nilcomparerule.go +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/nilcomparerule.go @@ -42,9 +42,9 @@ func (r NilCompareRule) isApplied(gexp *expression.GomegaExpression, config type return false, false } - isErr := actl.IsError() && !bool(config.SuppressErr) + isErr := actl.IsError() && !config.SuppressErr - if !isErr && bool(config.SuppressNil) { + if !isErr && config.SuppressNil { return isErr, false } diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/succeedrule.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/succeedrule.go index 6a5167a8a0..45a8d948b4 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/succeedrule.go +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/rules/succeedrule.go @@ -28,7 +28,7 @@ func (r SucceedRule) Apply(gexp *expression.GomegaExpression, config types.Confi return true } - if bool(config.ForceSucceedForFuncs) && !gexp.GetActualArg().(*actual.ErrPayload).IsFunc() { + if config.ForceSucceedForFuncs && !gexp.GetActualArg().(*actual.ErrPayload).IsFunc() { gexp.ReverseAssertionFuncLogic() gexp.SetMatcherHaveOccurred() diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/types/boolean.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/types/boolean.go deleted file mode 100644 index be510c4e95..0000000000 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/types/boolean.go +++ /dev/null @@ -1,32 +0,0 @@ -package types - -import ( - "errors" - "strings" -) - -// Boolean is a bool, implementing the flag.Value interface, to be used as a flag var. -type Boolean bool - -func (b *Boolean) Set(value string) error { - if b == nil { - return errors.New("trying to set nil parameter") - } - switch strings.ToLower(value) { - case "true": - *b = true - case "false": - *b = false - default: - return errors.New(value + " is not a Boolean value") - - } - return nil -} - -func (b Boolean) String() string { - if b { - return "true" - } - return "false" -} diff --git a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/types/config.go b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/types/config.go index 0aadd3416f..81a9ebe327 100644 --- a/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/types/config.go +++ b/hack/tools/vendor/github.com/nunnatsa/ginkgolinter/types/config.go @@ -17,22 +17,22 @@ const ( ) type Config struct { - SuppressLen Boolean - SuppressNil Boolean - SuppressErr Boolean - SuppressCompare Boolean - SuppressAsync Boolean - ForbidFocus Boolean - SuppressTypeCompare Boolean - AllowHaveLen0 Boolean - ForceExpectTo Boolean - ValidateAsyncIntervals Boolean - ForbidSpecPollution Boolean - ForceSucceedForFuncs Boolean + SuppressLen bool + SuppressNil bool + SuppressErr bool + SuppressCompare bool + SuppressAsync bool + ForbidFocus bool + SuppressTypeCompare bool + AllowHaveLen0 bool + ForceExpectTo bool + ValidateAsyncIntervals bool + ForbidSpecPollution bool + ForceSucceedForFuncs bool } func (s *Config) AllTrue() bool { - return bool(s.SuppressLen && s.SuppressNil && s.SuppressErr && s.SuppressCompare && s.SuppressAsync && !s.ForbidFocus) + return s.SuppressLen && s.SuppressNil && s.SuppressErr && s.SuppressCompare && s.SuppressAsync && !s.ForbidFocus } func (s *Config) Clone() Config { diff --git a/hack/tools/vendor/github.com/openshift/api/tools/.custom-gcl.yml b/hack/tools/vendor/github.com/openshift/api/tools/.custom-gcl.yml new file mode 100644 index 0000000000..c3ad614f2e --- /dev/null +++ b/hack/tools/vendor/github.com/openshift/api/tools/.custom-gcl.yml @@ -0,0 +1,6 @@ +version: v1.63.4 +name: golangci-kal +destination: ./bin +plugins: +- module: 'github.com/JoelSpeed/kal' + version: v0.0.0-20250121143106-304c215e474e diff --git a/hack/tools/vendor/github.com/openshift/api/tools/Makefile b/hack/tools/vendor/github.com/openshift/api/tools/Makefile index efdf722112..245513262a 100644 --- a/hack/tools/vendor/github.com/openshift/api/tools/Makefile +++ b/hack/tools/vendor/github.com/openshift/api/tools/Makefile @@ -35,7 +35,9 @@ publish-kubebuilder-tools: @# Make sure to update the `-version` to the full semver (x.y.z) of the Kube version that the release is based on (check o/k). @# The payload should be any CI build (not nightly/EC) published in the CI registry that matches the defined version. @# Pull secret should be the standard pull secret used for spinning up OpenShift clusters, if you do not have one, follow https://docs.google.com/document/d/1ez2jrjiIQJChobfSu2ISJ5uM_-3HGouamL_xIa2_1W4/edit#heading=h.me93l6citmsq - go run ./publish-kubebuilder-tools/main.go -version v1.31.1 -output-dir /tmp/kubebuilder-tools -payload=registry.ci.openshift.org/ocp/release:4.18.0-0.ci-2024-10-03-015546 -pull-secret $(PULL_SECRET) -index-file ../envtest-releases.yaml + @# This also expects you to be already authenticated with the OpenShift GCE devel project on Google Cloud, if you are not, + @# follow the 'Create the Service Account and grant permissions' and 'Generate an access key' @# steps at https://docs.google.com/document/d/1qm37EKkjgoPtjW4909UClzvsjQO5VSpPUvFO_hW_PEg + go run ./publish-kubebuilder-tools/main.go -version v1.32.1 -output-dir /tmp/kubebuilder-tools -payload=registry.ci.openshift.org/ocp/release:4.19.0-0.ci-2025-01-30-154351 -pull-secret $(PULL_SECRET) -index-file ../envtest-releases.yaml ############################################# # @@ -46,9 +48,6 @@ publish-kubebuilder-tools: .PHONY:codegen codegen: $(OUTPUT_DIR)/codegen -.PHONY:prerelease-lifecycle-gen -prerelease-lifecycle-gen: $(OUTPUT_DIR)/prerelease-lifecycle-gen - .PHONY:controller-gen controller-gen: $(OUTPUT_DIR)/controller-gen @@ -58,9 +57,18 @@ deepcopy-gen: $(OUTPUT_DIR)/deepcopy-gen .PHONY:go-to-protobuf go-to-protobuf: $(OUTPUT_DIR)/go-to-protobuf +.PHONY:golangci-kal +golangci-kal: $(OUTPUT_DIR)/golangci-kal + +.PHONY:kal +kal: $(OUTPUT_DIR)/kal + .PHONY:openapi-gen openapi-gen: $(OUTPUT_DIR)/openapi-gen +.PHONY:prerelease-lifecycle-gen +prerelease-lifecycle-gen: $(OUTPUT_DIR)/prerelease-lifecycle-gen + .PHONY:protoc-gen-gogo protoc-gen-gogo: $(OUTPUT_DIR)/protoc-gen-gogo @@ -99,6 +107,18 @@ $(OUTPUT_DIR)/deepcopy-gen: $(OUTPUT_DIR)/vendor-version $(OUTPUT_DIR)/go-to-protobuf: $(OUTPUT_DIR)/vendor-version go build -mod=vendor -o $(OUTPUT_DIR)/go-to-protobuf ./vendor/k8s.io/code-generator/cmd/go-to-protobuf +$(OUTPUT_DIR)/golangci-lint: $(OUTPUT_DIR)/vendor-version + go build -mod=vendor -o $(OUTPUT_DIR)/golangci-lint ./vendor/github.com/golangci/golangci-lint/cmd/golangci-lint + +# The golangci-lint custom command relies on finding a module, this uses a workaround to init the go.mod in the kal directory. +$(OUTPUT_DIR)/golangci-kal: $(OUTPUT_DIR)/vendor-version $(OUTPUT_DIR)/golangci-lint + GOFLAGS=-mod=readonly $(OUTPUT_DIR)/golangci-lint custom + @ mv bin/golangci-kal $(OUTPUT_DIR)/golangci-kal + @ rmdir bin + +$(OUTPUT_DIR)/kal: $(OUTPUT_DIR)/vendor-version + go build -mod=vendor -o $(OUTPUT_DIR)/kal ./vendor/github.com/JoelSpeed/kal/cmd/kal + $(OUTPUT_DIR)/openapi-gen: $(OUTPUT_DIR)/vendor-version go build -mod=vendor -o $(OUTPUT_DIR)/openapi-gen ./vendor/k8s.io/code-generator/cmd/openapi-gen diff --git a/hack/tools/vendor/github.com/openshift/api/tools/codegen/cmd/featuregate-test-analyzer.go b/hack/tools/vendor/github.com/openshift/api/tools/codegen/cmd/featuregate-test-analyzer.go index 9c1ac1afc9..6593ae39f6 100644 --- a/hack/tools/vendor/github.com/openshift/api/tools/codegen/cmd/featuregate-test-analyzer.go +++ b/hack/tools/vendor/github.com/openshift/api/tools/codegen/cmd/featuregate-test-analyzer.go @@ -582,13 +582,22 @@ func listTestResultForVariant(featureGate string, jobVariant JobVariant) (*Testi Host: "sippy.dptools.openshift.org", Path: "api/tests", } - - latestRelease, err := getLatestRelease() - if err != nil { - return nil, fmt.Errorf("couldn't fetch latest release version: %w", err) + var release string + // if its not main branch, then use the ENV var to determine the release version + currentRelease := os.Getenv("PULL_BASE_REF") + if strings.Contains(currentRelease, "release-") { + // example: release-4.18, release-4.17 + release = strings.TrimPrefix(currentRelease, "release-") + } else { + // means its main branch + var err error + release, err = getLatestRelease() + if err != nil { + return nil, fmt.Errorf("couldn't fetch latest release version: %w", err) + } } queryParams := currURL.Query() - queryParams.Add("release", latestRelease) + queryParams.Add("release", release) queryParams.Add("period", "default") filterJSON, err := json.Marshal(currQuery) if err != nil { diff --git a/hack/tools/vendor/github.com/openshift/api/tools/codegen/cmd/root.go b/hack/tools/vendor/github.com/openshift/api/tools/codegen/cmd/root.go index 84b1d0638f..8375b5f8df 100644 --- a/hack/tools/vendor/github.com/openshift/api/tools/codegen/cmd/root.go +++ b/hack/tools/vendor/github.com/openshift/api/tools/codegen/cmd/root.go @@ -80,6 +80,7 @@ func init() { // Subsequent groups will continue to generate. func executeGenerators(genCtx generation.Context, generators ...generation.Generator) error { errs := []error{} + resultsByGroup := map[string][]generation.Result{} for _, group := range genCtx.APIGroups { klog.Infof("Running generators for %s", group.Name) @@ -90,15 +91,19 @@ func executeGenerators(genCtx generation.Context, generators ...generation.Gener g = g.ApplyConfig(group.Config) } - if err := g.GenGroup(group); err != nil { + results, err := g.GenGroup(group) + if err != nil { errs = append(errs, fmt.Errorf("error running generator %s on group %s: %w", gen.Name(), group.Name, err)) - - // Don't run any later generators for this group. - break } + + resultsByGroup[group.Name] = append(resultsByGroup[group.Name], results...) } } + if err := utils.PrintResults(resultsByGroup); err != nil { + errs = append(errs, err) + } + if len(errs) > 0 { return utils.NewAggregatePrinter(kerrors.NewAggregate(errs)) } diff --git a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/compatibility/generator.go b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/compatibility/generator.go index 1b63767387..e1438b01e5 100644 --- a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/compatibility/generator.go +++ b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/compatibility/generator.go @@ -52,10 +52,10 @@ func (g *generator) Name() string { } // GenGroup runs the compatibility generator against the given group context. -func (g *generator) GenGroup(groupCtx generation.APIGroupContext) error { +func (g *generator) GenGroup(groupCtx generation.APIGroupContext) ([]generation.Result, error) { if g.disabled { klog.V(2).Infof("Skipping compatibility generation for %s", groupCtx.Name) - return nil + return nil, nil } for _, version := range groupCtx.Versions { @@ -67,9 +67,9 @@ func (g *generator) GenGroup(groupCtx generation.APIGroupContext) error { klog.V(1).Infof("%s compatibility level comments for %s/%s", action, groupCtx.Name, version.Name) if err := insertCompatibilityLevelComments(version.Path, g.verify); err != nil { - return fmt.Errorf("could not insert compatibility level comments for %s/%s: %w", groupCtx.Name, version.Name, err) + return nil, fmt.Errorf("could not insert compatibility level comments for %s/%s: %w", groupCtx.Name, version.Name, err) } } - return nil + return nil, nil } diff --git a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/deepcopy/generator.go b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/deepcopy/generator.go index 22d59381d7..111c00aa53 100644 --- a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/deepcopy/generator.go +++ b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/deepcopy/generator.go @@ -78,10 +78,10 @@ func (g *generator) Name() string { } // GenGroup runs the deepcopy generator against the given group context. -func (g *generator) GenGroup(groupCtx generation.APIGroupContext) error { +func (g *generator) GenGroup(groupCtx generation.APIGroupContext) ([]generation.Result, error) { if g.disabled { klog.V(2).Infof("Skipping deepcopy generation for %s", groupCtx.Name) - return nil + return nil, nil } // If there is no header file, create an empty file and pass that through. @@ -89,7 +89,7 @@ func (g *generator) GenGroup(groupCtx generation.APIGroupContext) error { if headerFilePath == "" { tmpFile, err := os.CreateTemp("", "deepcopy-header-*.txt") if err != nil { - return fmt.Errorf("failed to create temporary file: %w", err) + return nil, fmt.Errorf("failed to create temporary file: %w", err) } tmpFile.Close() @@ -107,9 +107,9 @@ func (g *generator) GenGroup(groupCtx generation.APIGroupContext) error { klog.V(1).Infof("%s deepcopy functions for for %s/%s", action, groupCtx.Name, version.Name) if err := generateDeepcopyFunctions(version.Path, version.PackagePath, g.outputBaseFileName, headerFilePath, g.verify); err != nil { - return fmt.Errorf("could not generate deepcopy functions for %s/%s: %w", groupCtx.Name, version.Name, err) + return nil, fmt.Errorf("could not generate deepcopy functions for %s/%s: %w", groupCtx.Name, version.Name, err) } } - return nil + return nil, nil } diff --git a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/emptypartialschemas/content_creator.go b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/emptypartialschemas/content_creator.go index d6e62eb1e0..0144aa3640 100644 --- a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/emptypartialschemas/content_creator.go +++ b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/emptypartialschemas/content_creator.go @@ -2,6 +2,10 @@ package emptypartialschemas import ( "fmt" + "os" + "path/filepath" + "strings" + "github.com/google/go-cmp/cmp" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/equality" @@ -9,9 +13,6 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - "os" - "path/filepath" - "strings" ) func createFeatureGatedCRDManifests(allCRDInfo map[string]*CRDInfo, outputDir string, verify bool) error { @@ -128,12 +129,9 @@ func ensureNoExtraFields(minimalCRD, existingCRD *apiextensionsv1.CustomResource func minimalCRDFor(crdInfo *CRDInfo, featureGate string) *apiextensionsv1.CustomResourceDefinition { ret := &apiextensionsv1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{ - Name: crdInfo.CRDName, - Annotations: map[string]string{ - // notice that this produces a "" featuregate to mean ungated - fmt.Sprintf("feature-gate.release.openshift.io/%s", featureGate): "true", - }, - Labels: map[string]string{}, + Name: crdInfo.CRDName, + Annotations: map[string]string{}, + Labels: map[string]string{}, }, Spec: apiextensionsv1.CustomResourceDefinitionSpec{ Group: crdInfo.GroupName, @@ -160,6 +158,12 @@ func minimalCRDFor(crdInfo *CRDInfo, featureGate string) *apiextensionsv1.Custom PreserveUnknownFields: false, }, } + + for _, fg := range strings.Split(featureGate, "+") { + // notice that this produces a "" featuregate to mean ungated + ret.Annotations[fmt.Sprintf("feature-gate.release.openshift.io/%s", fg)] = "true" + } + if len(crdInfo.FilenameRunLevel) > 0 { ret.Annotations["api.openshift.io/filename-cvo-runlevel"] = crdInfo.FilenameRunLevel } diff --git a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/emptypartialschemas/generator.go b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/emptypartialschemas/generator.go index f19c7b424d..d964a3d289 100644 --- a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/emptypartialschemas/generator.go +++ b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/emptypartialschemas/generator.go @@ -2,12 +2,13 @@ package emptypartialschemas import ( "fmt" + "os" + "path/filepath" + "github.com/openshift/api/tools/codegen/pkg/generation" "github.com/openshift/api/tools/codegen/pkg/utils" "k8s.io/gengo/args" "k8s.io/klog/v2" - "os" - "path/filepath" ) // Options contains the configuration required for the compatibility generator. @@ -68,10 +69,10 @@ func (g *generator) Name() string { } // GenGroup runs the empty-partial-schemas generator against the given group context. -func (g *generator) GenGroup(groupCtx generation.APIGroupContext) error { +func (g *generator) GenGroup(groupCtx generation.APIGroupContext) ([]generation.Result, error) { if g.disabled { klog.V(2).Infof("Skipping %q generation for %s", g.Name(), groupCtx.Name) - return nil + return nil, nil } for _, version := range groupCtx.Versions { @@ -83,11 +84,11 @@ func (g *generator) GenGroup(groupCtx generation.APIGroupContext) error { klog.Infof("%s %q functions for for %s/%s", action, g.Name(), groupCtx.Name, version.Name) if err := g.generatePartialSchemaFiles(version.Path, version.PackagePath, g.verify); err != nil { - return fmt.Errorf("could not generate %v functions for %s/%s: %w", g.Name(), groupCtx.Name, version.Name, err) + return nil, fmt.Errorf("could not generate %v functions for %s/%s: %w", g.Name(), groupCtx.Name, version.Name, err) } } - return nil + return nil, nil } // generatePartialSchemaFiles generates the DeepCopy functions for the given API package paths. diff --git a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/emptypartialschemas/tags.go b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/emptypartialschemas/tags.go index bb153f285f..6efea84983 100644 --- a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/emptypartialschemas/tags.go +++ b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/emptypartialschemas/tags.go @@ -2,10 +2,11 @@ package emptypartialschemas import ( "fmt" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/gengo/types" "strconv" "strings" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/gengo/types" ) // known tags that are handled by empty partial schema generation. @@ -83,6 +84,17 @@ func extractFeatureGatesFromValidationMarker(comments []string, tagName string) } } + for _, tagVal := range tagVals { + requiredFeatureGates := extractNamedValuesFromSingleLine(tagVal)["requiredFeatureGate"] + + // Use + as the separator between required feature gates in file names. + requiredFeatureGates = strings.Replace(requiredFeatureGates, ";", "+", -1) + + if len(requiredFeatureGates) > 0 { + ret = append(ret, requiredFeatureGates) + } + } + return ret } diff --git a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/generation/generator.go b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/generation/generator.go index b5d5d8f505..15622aabaa 100644 --- a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/generation/generator.go +++ b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/generation/generator.go @@ -6,7 +6,7 @@ type Generator interface { Name() string // GenGroup runs the generator against the given APIGroupContext. - GenGroup(APIGroupContext) error + GenGroup(APIGroupContext) ([]Result, error) // ApplyConfig creates a new generator instance with the given configuration. ApplyConfig(*Config) Generator diff --git a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/generation/result.go b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/generation/result.go new file mode 100644 index 0000000000..2145d0f4d9 --- /dev/null +++ b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/generation/result.go @@ -0,0 +1,24 @@ +package generation + +type Result struct { + // Generator is the name of the generator that produced this result. + Generator string + + // Group is the name of the API group that was processed. + Group string + + // Version is the version of the API group that was processed. + Version string + + // Manifest is the path to the file the was processed. + Manifest string + + // Info contains informational messages from the generator. + Info []string + + // Warnings contains warning messages from the generator. + Warnings []string + + // Errors contains error messages from the generator. + Errors []error +} diff --git a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/manifestmerge/filters.go b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/manifestmerge/filters.go index 3d68d6ce6d..b50c6e8d26 100644 --- a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/manifestmerge/filters.go +++ b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/manifestmerge/filters.go @@ -2,15 +2,16 @@ package manifestmerge import ( "fmt" + "os" + "path" + "path/filepath" + "strings" + "github.com/openshift/api/tools/codegen/pkg/utils" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/sets" - "os" - "path" - "path/filepath" kyaml "sigs.k8s.io/yaml" - "strings" ) func AllKnownFeatureSets(payloadFeatureGatePath string) (sets.String, error) { @@ -149,7 +150,7 @@ func (f *ForFeatureGates) UseManifest(data []byte) (bool, error) { return true, nil } - return manifestFeatureGates.HasAny(f.allowedFeatureGates.UnsortedList()...), nil + return f.allowedFeatureGates.HasAll(manifestFeatureGates.UnsortedList()...), nil } func (f *ForFeatureGates) String() string { diff --git a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/manifestmerge/generator.go b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/manifestmerge/generator.go index 5b798140b4..082994f3f8 100644 --- a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/manifestmerge/generator.go +++ b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/manifestmerge/generator.go @@ -5,14 +5,17 @@ import ( _ "embed" "encoding/json" "fmt" - "github.com/google/go-cmp/cmp" - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/util/sets" "math/rand" "os" "strings" "sync" + "github.com/google/go-cmp/cmp" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/util/sets" + + "path/filepath" + "github.com/openshift/api/tools/codegen/pkg/generation" "github.com/openshift/api/tools/codegen/pkg/utils" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -25,7 +28,6 @@ import ( "k8s.io/kube-openapi/pkg/spec3" "k8s.io/kube-openapi/pkg/validation/spec" "k8s.io/utils/pointer" - "path/filepath" kyaml "sigs.k8s.io/yaml" ) @@ -104,10 +106,10 @@ func (g *generator) Name() string { } // GenGroup runs the schemapatch generator against the given group context. -func (g *generator) GenGroup(groupCtx generation.APIGroupContext) error { +func (g *generator) GenGroup(groupCtx generation.APIGroupContext) ([]generation.Result, error) { if g.disabled { klog.V(2).Infof("Skipping %q for %s", g.Name(), groupCtx.Name) - return nil + return nil, nil } versionPaths := allVersionPaths(groupCtx.Versions) @@ -128,10 +130,10 @@ func (g *generator) GenGroup(groupCtx generation.APIGroupContext) error { } if len(errs) > 0 { - return kerrors.NewAggregate(errs) + return nil, kerrors.NewAggregate(errs) } - return nil + return nil, nil } // genGroupVersion runs the schemapatch generator against a particular version of the API group. diff --git a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/schemacheck/generator.go b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/schemacheck/generator.go index 198e7179c3..317eec7a87 100644 --- a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/schemacheck/generator.go +++ b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/schemacheck/generator.go @@ -1,6 +1,7 @@ package schemacheck import ( + "errors" "fmt" "os" "path/filepath" @@ -79,10 +80,10 @@ func (g *generator) Name() string { } // GenGroup runs the schemacheck generator against the given group context. -func (g *generator) GenGroup(groupCtx generation.APIGroupContext) error { +func (g *generator) GenGroup(groupCtx generation.APIGroupContext) ([]generation.Result, error) { if g.disabled { klog.V(2).Infof("Skipping API schema check for %s", groupCtx.Name) - return nil + return nil, nil } errs := []error{} @@ -92,73 +93,85 @@ func (g *generator) GenGroup(groupCtx generation.APIGroupContext) error { comparatorOptions.DisabledComparators = g.disabledComparators if err := comparatorOptions.Validate(); err != nil { - return fmt.Errorf("could not validate comparator options: %w", err) + return nil, fmt.Errorf("could not validate comparator options: %w", err) } comparatorConfig, err := comparatorOptions.Complete() if err != nil { - return fmt.Errorf("could not complete comparator options: %w", err) + return nil, fmt.Errorf("could not complete comparator options: %w", err) } + var results []generation.Result + for _, version := range groupCtx.Versions { klog.V(1).Infof("Verifying API schema for for %s/%s", groupCtx.Name, version.Name) - if err := g.genGroupVersion(groupCtx.Name, version, comparatorConfig); err != nil { + r, err := g.genGroupVersion(groupCtx.Name, version, comparatorConfig) + if err != nil { errs = append(errs, fmt.Errorf("could not run schemacheck generator for group/version %s/%s: %w", groupCtx.Name, version.Name, err)) } + + results = append(results, r...) } if len(errs) > 0 { - return kerrors.NewAggregate(errs) + return results, kerrors.NewAggregate(errs) } - return nil + return results, nil } // genGroupVersion runs the schemacheck generator against a particular version of the API group. -func (g *generator) genGroupVersion(group string, version generation.APIVersionContext, comparatorConfig *options.ComparatorConfig) error { +func (g *generator) genGroupVersion(group string, version generation.APIVersionContext, comparatorConfig *options.ComparatorConfig) ([]generation.Result, error) { contexts, err := loadSchemaCheckGenerationContextsForVersion(version, g.comparisonBase) if err != nil { - return fmt.Errorf("could not load schema check generation contexts for group/version %s/%s: %w", group, version.Name, err) + return nil, fmt.Errorf("could not load schema check generation contexts for group/version %s/%s: %w", group, version.Name, err) } if len(contexts) == 0 { klog.V(1).Infof("No CRD manifests found for %s/%s", group, version.Name) - return nil + return nil, nil } var manifestErrs []error + var results []generation.Result for _, context := range contexts { klog.V(1).Infof("Verifying schema for %s\n", context.manifestName) comparisonResults, errs := comparatorConfig.ComparatorRegistry.Compare(context.oldCRD, context.manifestCRD, comparatorConfig.ComparatorNames...) - if len(errs) > 0 { - return fmt.Errorf("could not compare manifests for %s: %w", context.manifestName, kerrors.NewAggregate(errs)) + + result := generation.Result{ + Generator: g.Name(), + Group: group, + Version: version.Name, + Manifest: context.manifestName, + Errors: errs, } + manifestErrs = append(manifestErrs, errs...) + for _, comparisonResult := range comparisonResults { for _, msg := range comparisonResult.Errors { - manifestErrs = append(manifestErrs, fmt.Errorf("error in %s: %s: %v", context.manifestName, comparisonResult.Name, msg)) + err := fmt.Errorf("%s: %w", comparisonResult.Name, errors.New(msg)) + manifestErrs = append(manifestErrs, err) + result.Errors = append(result.Errors, err) } } - for _, comparisonResult := range comparisonResults { - for _, msg := range comparisonResult.Warnings { - klog.Warningf("warning in %s: %s: %v", context.manifestName, comparisonResult.Name, msg) + for _, warning := range comparisonResult.Warnings { + result.Warnings = append(result.Warnings, fmt.Sprintf("%s: %s", comparisonResult.Name, warning)) } } for _, comparisonResult := range comparisonResults { - for _, msg := range comparisonResult.Infos { - klog.Infof("info in %s: %s: %v", context.manifestName, comparisonResult.Name, msg) + for _, info := range comparisonResult.Infos { + result.Info = append(result.Info, fmt.Sprintf("%s: %s", comparisonResult.Name, info)) } } - } - if len(manifestErrs) > 0 { - return kerrors.NewAggregate(manifestErrs) + results = append(results, result) } - return nil + return results, kerrors.NewAggregate(manifestErrs) } // schemaCheckGenerationContext contains the context required to verify the schema for a particular diff --git a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/schemapatch/generator.go b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/schemapatch/generator.go index 32801ba6ff..27d9a954e7 100644 --- a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/schemapatch/generator.go +++ b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/schemapatch/generator.go @@ -84,10 +84,10 @@ func (g *generator) Name() string { } // GenGroup runs the schemapatch generator against the given group context. -func (g *generator) GenGroup(groupCtx generation.APIGroupContext) error { +func (g *generator) GenGroup(groupCtx generation.APIGroupContext) ([]generation.Result, error) { if g.disabled { klog.V(2).Infof("Skipping API schema generation for %s", groupCtx.Name) - return nil + return nil, nil } versionPaths := allVersionPaths(groupCtx.Versions) @@ -97,7 +97,7 @@ func (g *generator) GenGroup(groupCtx generation.APIGroupContext) error { for _, version := range groupCtx.Versions { versionRequired, err := shouldProcessGroupVersion(version, g.requiredFeatureSets) if err != nil { - return fmt.Errorf("could not determine if version %s is required: %w", version.Name, err) + return nil, fmt.Errorf("could not determine if version %s is required: %w", version.Name, err) } if !versionRequired { @@ -117,10 +117,10 @@ func (g *generator) GenGroup(groupCtx generation.APIGroupContext) error { } if len(errs) > 0 { - return kerrors.NewAggregate(errs) + return nil, kerrors.NewAggregate(errs) } - return nil + return nil, nil } // genGroupVersion runs the schemapatch generator against a particular version of the API group. diff --git a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/swaggerdocs/generator.go b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/swaggerdocs/generator.go index 379bf9bf7c..9829eff607 100644 --- a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/swaggerdocs/generator.go +++ b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/swaggerdocs/generator.go @@ -79,19 +79,19 @@ func (g *generator) Name() string { } // GenGroup runs the schemapatch generator against the given group context. -func (g *generator) GenGroup(groupCtx generation.APIGroupContext) error { +func (g *generator) GenGroup(groupCtx generation.APIGroupContext) ([]generation.Result, error) { if g.disabled { klog.V(2).Infof("Skipping swaggerdocs generation for %s", groupCtx.Name) - return nil + return nil, nil } for _, version := range groupCtx.Versions { if err := g.generateGroupVersion(groupCtx.Name, version); err != nil { - return fmt.Errorf("error generating swagger docs for %s/%s: %w", groupCtx.Name, version.Name, err) + return nil, fmt.Errorf("error generating swagger docs for %s/%s: %w", groupCtx.Name, version.Name, err) } } - return nil + return nil, nil } // generateGroupVersion generates swagger docs for the group version. diff --git a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/utils/aggregate.go b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/utils/aggregate.go index eb7693788d..b60abbdda6 100644 --- a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/utils/aggregate.go +++ b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/utils/aggregate.go @@ -52,9 +52,12 @@ func handleError(buf *bytes.Buffer, err error, indent int) { // printError prints the error after indenting it by the given amount. func printError(buf *bytes.Buffer, err error, indent int) { + buf.WriteString(colourRed) + for i := 0; i < indent; i++ { buf.WriteString("\t") } + buf.WriteString(err.Error()) - buf.WriteString("\n") + buf.WriteString("\n" + colourReset) } diff --git a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/utils/diff.go b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/utils/diff.go index 91a6a3c26c..4e686b2928 100644 --- a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/utils/diff.go +++ b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/utils/diff.go @@ -11,10 +11,12 @@ import ( ) const ( - colourReset = "\x1b[0m" - colourRed = "\x1b[31m" - colourGreen = "\x1b[32m" - colourCyan = "\x1b[36m" + colourReset = "\x1b[0m" + colourRed = "\x1b[91m" + colourGreen = "\x1b[92m" + colourYellow = "\x1b[93m" + colourMagenta = "\x1b[95m" + colourCyan = "\x1b[96m" ) // Diff returns a string containing the diff between the two strings. diff --git a/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/utils/results_printer.go b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/utils/results_printer.go new file mode 100644 index 0000000000..a65b3157ba --- /dev/null +++ b/hack/tools/vendor/github.com/openshift/api/tools/codegen/pkg/utils/results_printer.go @@ -0,0 +1,83 @@ +package utils + +import ( + "bytes" + "fmt" + "os" + "strings" + + "github.com/openshift/api/tools/codegen/pkg/generation" + kerrors "k8s.io/apimachinery/pkg/util/errors" +) + +func PrintResults(resultsByGroup map[string][]generation.Result) error { + errs := []error{} + + for _, results := range resultsByGroup { + resultsByVersion := map[string][]generation.Result{} + for _, result := range results { + resultsByVersion[result.Version] = append(resultsByVersion[result.Version], result) + } + + for _, results := range resultsByVersion { + resultsByManifest := map[string][]generation.Result{} + for _, result := range results { + resultsByManifest[result.Manifest] = append(resultsByManifest[result.Manifest], result) + } + + for _, results := range resultsByManifest { + if err := printResults(results); err != nil { + return err + } + } + } + } + + return kerrors.NewAggregate(errs) +} + +func printResults(results []generation.Result) error { + for _, result := range results { + if len(result.Errors) == 0 && len(result.Warnings) == 0 && len(result.Info) == 0 { + continue + } + + buf := bytes.NewBuffer(nil) + + buf.WriteString("----- " + colourCyan + result.Group + colourReset + " ----- " + colourCyan + result.Version + colourReset + " -----") + if result.Manifest != "" { + buf.WriteString(" " + colourCyan + result.Manifest + colourReset + " -----") + } + buf.WriteString("\n") + + buf.WriteString("----- " + colourMagenta + strings.ToUpper(result.Generator) + colourReset + " -----\n") + + for _, err := range result.Errors { + indentString(buf, err.Error(), 0, "ERROR: ", colourRed) + } + + for _, warn := range result.Warnings { + indentString(buf, warn, 0, "WARN: ", colourYellow) + } + + for _, info := range result.Info { + indentString(buf, info, 0, "INFO: ", colourGreen) + } + + if _, err := fmt.Fprint(os.Stdout, buf.String()); err != nil { + return err + } + + } + return nil +} + +// indentString prints the string after indenting it by the given amount. +func indentString(buf *bytes.Buffer, msg string, indent int, prefix, colour string) { + for i := 0; i < indent; i++ { + buf.WriteString("\t") + } + + buf.WriteString(colour + prefix + colourReset) + buf.WriteString(msg + "\n") +} diff --git a/hack/tools/vendor/github.com/openshift/api/tools/tools.go b/hack/tools/vendor/github.com/openshift/api/tools/tools.go index 9aed19f7ce..aefdfead9f 100644 --- a/hack/tools/vendor/github.com/openshift/api/tools/tools.go +++ b/hack/tools/vendor/github.com/openshift/api/tools/tools.go @@ -4,9 +4,12 @@ package tools import ( + _ "github.com/JoelSpeed/kal" + _ "github.com/JoelSpeed/kal/cmd/kal" _ "github.com/gogo/protobuf/gogoproto" _ "github.com/gogo/protobuf/proto" _ "github.com/gogo/protobuf/sortkeys" + _ "github.com/golangci/golangci-lint/cmd/golangci-lint" _ "github.com/mikefarah/yq/v4" _ "github.com/vmware-archive/yaml-patch/cmd/yaml-patch" _ "k8s.io/code-generator" diff --git a/hack/tools/vendor/github.com/openshift/crd-schema-checker/pkg/defaultcomparators/interfaces.go b/hack/tools/vendor/github.com/openshift/crd-schema-checker/pkg/defaultcomparators/interfaces.go index af4f57fb76..5fbd6ae6e0 100644 --- a/hack/tools/vendor/github.com/openshift/crd-schema-checker/pkg/defaultcomparators/interfaces.go +++ b/hack/tools/vendor/github.com/openshift/crd-schema-checker/pkg/defaultcomparators/interfaces.go @@ -20,6 +20,7 @@ func NewDefaultComparators() manifestcomparators.CRDComparatorRegistry { must(ret.AddComparator(manifestcomparators.ListsMustHaveSSATags())) must(ret.AddComparator(manifestcomparators.ConditionsMustHaveProperSSATags())) must(ret.AddComparator(manifestcomparators.NoNewRequiredFields())) + must(ret.AddComparator(manifestcomparators.MustNotExceedCostBudget())) /* other useful comparators diff --git a/hack/tools/vendor/github.com/openshift/crd-schema-checker/pkg/manifestcomparators/comp_must_not_exceed_cost_budget.go b/hack/tools/vendor/github.com/openshift/crd-schema-checker/pkg/manifestcomparators/comp_must_not_exceed_cost_budget.go new file mode 100644 index 0000000000..d1db0ff9de --- /dev/null +++ b/hack/tools/vendor/github.com/openshift/crd-schema-checker/pkg/manifestcomparators/comp_must_not_exceed_cost_budget.go @@ -0,0 +1,327 @@ +package manifestcomparators + +import ( + "errors" + "fmt" + "math" + "strings" + + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsvalidation "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation" + structuralschema "k8s.io/apiextensions-apiserver/pkg/apiserver/schema" + "k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel" + "k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model" + "k8s.io/apimachinery/pkg/util/validation/field" + celconfig "k8s.io/apiserver/pkg/apis/cel" + apiservercel "k8s.io/apiserver/pkg/cel" + "k8s.io/apiserver/pkg/cel/environment" +) + +type mustNotExceedCostBudget struct{} + +func MustNotExceedCostBudget() CRDComparator { + return mustNotExceedCostBudget{} +} + +func (mustNotExceedCostBudget) Name() string { + return "MustNotExceedCostBudget" +} + +func (mustNotExceedCostBudget) WhyItMatters() string { + return "" +} + +func (b mustNotExceedCostBudget) Validate(crd *apiextensionsv1.CustomResourceDefinition) (ComparisonResults, error) { + errsToReport := []string{} + warnings := []string{} + infos := []string{} + + for _, newVersion := range crd.Spec.Versions { + schema := &apiextensions.JSONSchemaProps{} + if err := apiextensionsv1.Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(newVersion.Schema.OpenAPIV3Schema, schema, nil); err != nil { + errsToReport = append(errsToReport, err.Error()) + continue + } + + rootCELContext := apiextensionsvalidation.RootCELContext(schema) + + SchemaHas(newVersion.Schema.OpenAPIV3Schema, field.NewPath("^"), field.NewPath("^"), nil, + func(s *apiextensionsv1.JSONSchemaProps, fldPath, simpleLocation *field.Path, ancestry []*apiextensionsv1.JSONSchemaProps) bool { + schema := &apiextensions.JSONSchemaProps{} + if err := apiextensionsv1.Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(s, schema, nil); err != nil { + errsToReport = append(errsToReport, err.Error()) + return false + } + + schemaInfos, schemaWarnings, err := inspectSchema(schema, simpleLocation, len(ancestry) == 0) + if err != nil { + errsToReport = append(errsToReport, err.Error()) + } + infos = append(infos, schemaInfos...) + warnings = append(warnings, schemaWarnings...) + + celContext, err := extractCELContext(append(ancestry, s), fldPath) + if err != nil { + errsToReport = append(errsToReport, err.Error()) + return false + } + + typeInfo, err := celContext.TypeInfo() + if err != nil { + errsToReport = append(errsToReport, err.Error()) + return false + } + + if typeInfo == nil { + // No validations to check. + return false + } + + compResults, err := cel.Compile( + typeInfo.Schema, + typeInfo.DeclType, + celconfig.PerCallLimit, + environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true), + cel.NewExpressionsEnvLoader(), + ) + if err != nil { + fieldErr := field.InternalError(fldPath, fmt.Errorf("failed to compile x-kubernetes-validations rules: %w", err)) + errsToReport = append(errsToReport, fieldErr.Error()) + return false + } + + for i, cr := range compResults { + if celContext.MaxCardinality == nil { + unboundedParents, err := getUnboundedParentFields(ancestry, fldPath) + if err != nil { + errsToReport = append(errsToReport, err.Error()) + } + warnings = append(warnings, fmt.Sprintf("%s: Field has unbounded cardinality. At least one, variable parent field does not have a maxItems or maxProperties constraint: %s. Falling back to CEL calculated worst case of %d executions.", simpleLocation.String(), strings.Join(unboundedParents, ","), cr.MaxCardinality)) + } else { + msg := fmt.Sprintf("%s: Field has a maximum cardinality of %d.", simpleLocation.String(), *celContext.MaxCardinality) + if *celContext.MaxCardinality > 1 { + msg += " This is the calculated, worst case number of times the rule will be evaluated." + } + + infos = append(infos, msg) + } + + expressionCost := getExpressionCost(cr, celContext) + + if expressionCost > apiextensionsvalidation.StaticEstimatedCostLimit { + costErrorMsg := getCostErrorMessage("estimated rule cost", expressionCost, apiextensionsvalidation.StaticEstimatedCostLimit) + errsToReport = append(errsToReport, field.Forbidden(fldPath, costErrorMsg).Error()) + } + if rootCELContext.TotalCost != nil { + rootCELContext.TotalCost.ObserveExpressionCost(fldPath, expressionCost) + } + + if cr.Error != nil { + if cr.Error.Type == apiservercel.ErrorTypeRequired { + errsToReport = append(errsToReport, field.Required(fldPath, cr.Error.Detail).Error()) + } else { + errsToReport = append(errsToReport, field.Invalid(fldPath, schema.XValidations[i], cr.Error.Detail).Error()) + } + } else { + infos = append(infos, fmt.Sprintf("%s: Rule %d raw cost is %d. Estimated total cost of %d. The maximum allowable value is %d. Rule is %.2f%% of allowed budget.", simpleLocation.String(), i, cr.MaxCost, expressionCost, apiextensionsvalidation.StaticEstimatedCostLimit, float64(expressionCost*100)/apiextensionsvalidation.StaticEstimatedCostLimit)) + } + + if cr.MessageExpressionError != nil { + errsToReport = append(errsToReport, field.Invalid(fldPath, schema.XValidations[i], cr.MessageExpressionError.Detail).Error()) + } else if cr.MessageExpression != nil { + if cr.MessageExpressionMaxCost > apiextensionsvalidation.StaticEstimatedCostLimit { + costErrorMsg := getCostErrorMessage("estimated messageExpression cost", cr.MessageExpressionMaxCost, apiextensionsvalidation.StaticEstimatedCostLimit) + errsToReport = append(errsToReport, field.Forbidden(fldPath, costErrorMsg).Error()) + } + if celContext.TotalCost != nil { + celContext.TotalCost.ObserveExpressionCost(fldPath, cr.MessageExpressionMaxCost) + } + } + } + + return false + }) + + if rootCELContext != nil && rootCELContext.TotalCost != nil && rootCELContext.TotalCost.Total > apiextensionsvalidation.StaticEstimatedCRDCostLimit { + costErrorMsg := getCostErrorMessage("total CRD cost", rootCELContext.TotalCost.Total, apiextensionsvalidation.StaticEstimatedCRDCostLimit) + errsToReport = append(errsToReport, field.Forbidden(field.NewPath("^"), costErrorMsg).Error()) + } + } + + return ComparisonResults{ + Name: b.Name(), + WhyItMatters: b.WhyItMatters(), + + Errors: errsToReport, + Warnings: warnings, + Infos: infos, + }, nil +} + +func (b mustNotExceedCostBudget) Compare(existingCRD, newCRD *apiextensionsv1.CustomResourceDefinition) (ComparisonResults, error) { + return RatchetCompare(b, existingCRD, newCRD) +} + +// multiplyWithOverflowGuard returns the product of baseCost and cardinality unless that product +// would exceed math.MaxUint, in which case math.MaxUint is returned. +func multiplyWithOverflowGuard(baseCost, cardinality uint64) uint64 { + if baseCost == 0 { + // an empty rule can return 0, so guard for that here + return 0 + } else if math.MaxUint/baseCost < cardinality { + return math.MaxUint + } + return baseCost * cardinality +} + +// unbounded uses nil to represent an unbounded cardinality value. +var unbounded *uint64 = nil //nolint:revive // Using as a named variable to provide the meaning of nil in this context. + +func getExpressionCost(cr cel.CompilationResult, cardinalityCost *apiextensionsvalidation.CELSchemaContext) uint64 { + if cardinalityCost.MaxCardinality != unbounded { + return multiplyWithOverflowGuard(cr.MaxCost, *cardinalityCost.MaxCardinality) + } + return multiplyWithOverflowGuard(cr.MaxCost, cr.MaxCardinality) +} + +func getCostErrorMessage(costName string, expressionCost, costLimit uint64) string { + exceedFactor := float64(expressionCost) / float64(costLimit) + var factor string + if exceedFactor > 100.0 { + // if exceedFactor is greater than 2 orders of magnitude, the rule is likely O(n^2) or worse + // and will probably never validate without some set limits + // also in such cases the cost estimation is generally large enough to not add any value + factor = "more than 100x" + } else if exceedFactor < 1.5 { + factor = fmt.Sprintf("%fx", exceedFactor) // avoid reporting "exceeds budge by a factor of 1.0x" + } else { + factor = fmt.Sprintf("%.1fx", exceedFactor) + } + return fmt.Sprintf("%s exceeds budget by factor of %s (try simplifying the rule(s), or adding maxItems, maxProperties, and maxLength where arrays, maps, and strings are declared)", costName, factor) +} + +// extractCELContext takes a series of CEL contextxs and returns the child context of the last schema in the series. +// This is used so that the calculated maximum cardinality of the field is correct. +func extractCELContext(schemas []*apiextensionsv1.JSONSchemaProps, fldPath *field.Path) (*apiextensionsvalidation.CELSchemaContext, error) { + var celContext *apiextensionsvalidation.CELSchemaContext + + for _, s := range schemas { + schema := &apiextensions.JSONSchemaProps{} + if err := apiextensionsv1.Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(s, schema, nil); err != nil { + return nil, fmt.Errorf("failed to convert schema: %w", err) + } + + if celContext == nil { + celContext = apiextensionsvalidation.RootCELContext(schema) + continue + } + + celContext = celContext.ChildPropertyContext(schema, s.ID) + } + + return celContext, nil +} + +// getUnboundedParentFields returns a list of field paths that have unbounded cardinality in the ancestry path. +// This is aiming to help users identify where the unbounded cardinality is coming from. +func getUnboundedParentFields(ancestry []*apiextensionsv1.JSONSchemaProps, fldPath *field.Path) ([]string, error) { + cleanPathParts := getCleanPathParts(fldPath) + var path *field.Path + + if len(ancestry)+1 != len(cleanPathParts) { + // Ancestry does not include the field itself, the last part of cleanPathParts is the field itself. + return nil, errors.New("ancestry and field path do not match") + } + + unboundedParents := []string{} + for i, s := range ancestry { + schema := &apiextensions.JSONSchemaProps{} + if err := apiextensionsv1.Convert_v1_JSONSchemaProps_To_apiextensions_JSONSchemaProps(s, schema, nil); err != nil { + continue + } + + if path == nil { + path = field.NewPath(cleanPathParts[i]) + } else if cleanPathParts[i] == "items" { + path = path.Index(-1) + } else { + path = path.Child(cleanPathParts[i]) + } + + if isUnboundedCardinality(schema) { + // Replace the -1 index with * that we use as a placeholder. + unboundedParents = append(unboundedParents, strings.Replace(path.String(), "-1", "*", -1)) + } + } + return unboundedParents, nil +} + +func getCleanPathParts(fldPath *field.Path) []string { + cleanPathParts := []string{} + for _, part := range strings.Split(fldPath.String(), ".") { + if strings.HasPrefix(part, "properties[") { + part = strings.TrimPrefix(strings.TrimSuffix(part, "]"), "properties[") + } + cleanPathParts = append(cleanPathParts, part) + } + return cleanPathParts +} + +func isUnboundedCardinality(schema *apiextensions.JSONSchemaProps) bool { + switch schema.Type { + case "object": + return schema.AdditionalProperties != nil && schema.MaxProperties == nil + case "array": + return schema.MaxItems == nil + default: + return false + } +} + +func inspectSchema(schema *apiextensions.JSONSchemaProps, simpleLocation *field.Path, isRoot bool) ([]string, []string, error) { + if schema.XValidations == nil { + return nil, nil, nil + } + + typeInfo, err := getDeclType(schema, isRoot) + if err != nil { + return nil, nil, err + } + + var infos, warnings []string + + switch schema.Type { + case "string": + switch { + case len(schema.Enum) > 0: + // Enums estimated lengths are based on the longest item in the list. + // Not a concern for cost estimation generally. + case schema.MaxLength == nil: + warnings = append(warnings, fmt.Sprintf("%s: String has unbounded maxLength. It will be considered to have length %d. Consider adding a maxLength constraint to reduce the raw rule cost.", simpleLocation.String(), typeInfo.MaxElements)) + default: + infos = append(infos, fmt.Sprintf("%s: String has maxLength of %d.", simpleLocation.String(), *schema.MaxLength)) + } + case "array": + switch { + case schema.MaxItems == nil: + warnings = append(warnings, fmt.Sprintf("%s: Array has unbounded maxItems. It will be considered to have %d items. Consider adding a maxItems constraint to reduce the raw rule cost.", simpleLocation.String(), typeInfo.MaxElements)) + default: + infos = append(infos, fmt.Sprintf("%s: Array has maxItems of %d.", simpleLocation.String(), *schema.MaxItems)) + } + } + + return infos, warnings, nil +} + +func getDeclType(schema *apiextensions.JSONSchemaProps, isRoot bool) (*apiservercel.DeclType, error) { + structural, err := structuralschema.NewStructural(schema) + if err != nil { + return nil, err + } + declType := model.SchemaDeclType(structural, isRoot) + if declType == nil { + return nil, fmt.Errorf("unable to convert structural schema to CEL declarations") + } + return declType, nil +} diff --git a/hack/tools/vendor/github.com/openshift/crd-schema-checker/pkg/manifestcomparators/simple_tester.go b/hack/tools/vendor/github.com/openshift/crd-schema-checker/pkg/manifestcomparators/simple_tester.go index 0101974fbc..5b7fe97406 100644 --- a/hack/tools/vendor/github.com/openshift/crd-schema-checker/pkg/manifestcomparators/simple_tester.go +++ b/hack/tools/vendor/github.com/openshift/crd-schema-checker/pkg/manifestcomparators/simple_tester.go @@ -6,6 +6,7 @@ import ( "os" "path/filepath" "reflect" + "sort" "testing" "github.com/openshift/crd-schema-checker/pkg/resourceread" @@ -248,6 +249,14 @@ func (tc *ComparatorTest) Test(t *testing.T, actualResults []ComparisonResults, if err != nil { t.Error(err) } + + sort.Strings(expected.Errors) + sort.Strings(actual.Errors) + sort.Strings(expected.Warnings) + sort.Strings(actual.Warnings) + sort.Strings(expected.Infos) + sort.Strings(actual.Infos) + noErrorsAsExpected := len(expected.Errors) == 0 && len(actual.Errors) == 0 if !noErrorsAsExpected && !reflect.DeepEqual(expected.Errors, actual.Errors) { t.Errorf("mismatched errors for expectedResults[%v]: expected\n%v\n, got\n%v\n", expected.Name, string(expectedBytes), string(actualBytes)) diff --git a/hack/tools/vendor/github.com/polyfloyd/go-errorlint/errorlint/allowed.go b/hack/tools/vendor/github.com/polyfloyd/go-errorlint/errorlint/allowed.go index cf481708ab..c639af6f37 100644 --- a/hack/tools/vendor/github.com/polyfloyd/go-errorlint/errorlint/allowed.go +++ b/hack/tools/vendor/github.com/polyfloyd/go-errorlint/errorlint/allowed.go @@ -53,6 +53,7 @@ func setDefaultAllowedErrors() { {Err: "io.EOF", Fun: "(*io.SectionReader).Read"}, {Err: "io.EOF", Fun: "(*io.SectionReader).ReadAt"}, {Err: "io.ErrClosedPipe", Fun: "(*io.PipeWriter).Write"}, + {Err: "io.EOF", Fun: "io.ReadAtLeast"}, {Err: "io.ErrShortBuffer", Fun: "io.ReadAtLeast"}, {Err: "io.ErrUnexpectedEOF", Fun: "io.ReadAtLeast"}, {Err: "io.EOF", Fun: "io.ReadFull"}, @@ -84,6 +85,7 @@ func setDefaultAllowedErrors() { {Err: "context.Canceled", Fun: "(context.Context).Err"}, // pkg/encoding/json {Err: "io.EOF", Fun: "(*encoding/json.Decoder).Decode"}, + {Err: "io.EOF", Fun: "(*encoding/json.Decoder).Token"}, // pkg/encoding/csv {Err: "io.EOF", Fun: "(*encoding/csv.Reader).Read"}, // pkg/mime/multipart diff --git a/hack/tools/vendor/github.com/polyfloyd/go-errorlint/errorlint/lint.go b/hack/tools/vendor/github.com/polyfloyd/go-errorlint/errorlint/lint.go index 9ac465c650..ed3dd0dc65 100644 --- a/hack/tools/vendor/github.com/polyfloyd/go-errorlint/errorlint/lint.go +++ b/hack/tools/vendor/github.com/polyfloyd/go-errorlint/errorlint/lint.go @@ -82,7 +82,7 @@ func LintFmtErrorfCalls(fset *token.FileSet, info types.Info, multipleWraps bool argIndex++ } - if verb.format == "w" { + if verb.format == "w" || verb.format == "T" { continue } if argIndex-1 >= len(args) { diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go new file mode 100644 index 0000000000..f4c92913a5 --- /dev/null +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go @@ -0,0 +1,40 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package collectors provides implementations of prometheus.Collector to +// conveniently collect process and Go-related metrics. +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// NewBuildInfoCollector returns a collector collecting a single metric +// "go_build_info" with the constant value 1 and three labels "path", "version", +// and "checksum". Their label values contain the main module path, version, and +// checksum, respectively. The labels will only have meaningful values if the +// binary is built with Go module support and from source code retrieved from +// the source repository (rather than the local file system). This is usually +// accomplished by building from outside of GOPATH, specifying the full address +// of the main package, e.g. "GO111MODULE=on go run +// github.com/prometheus/client_golang/examples/random". If built without Go +// module support, all label values will be "unknown". If built with Go module +// support but using the source code from the local file system, the "path" will +// be set appropriately, but "checksum" will be empty and "version" will be +// "(devel)". +// +// This collector uses only the build information for the main module. See +// https://github.com/povilasv/prommod for an example of a collector for the +// module dependencies. +func NewBuildInfoCollector() prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewBuildInfoCollector() +} diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go new file mode 100644 index 0000000000..d5a7279fb9 --- /dev/null +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go @@ -0,0 +1,119 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectors + +import ( + "database/sql" + + "github.com/prometheus/client_golang/prometheus" +) + +type dbStatsCollector struct { + db *sql.DB + + maxOpenConnections *prometheus.Desc + + openConnections *prometheus.Desc + inUseConnections *prometheus.Desc + idleConnections *prometheus.Desc + + waitCount *prometheus.Desc + waitDuration *prometheus.Desc + maxIdleClosed *prometheus.Desc + maxIdleTimeClosed *prometheus.Desc + maxLifetimeClosed *prometheus.Desc +} + +// NewDBStatsCollector returns a collector that exports metrics about the given *sql.DB. +// See https://golang.org/pkg/database/sql/#DBStats for more information on stats. +func NewDBStatsCollector(db *sql.DB, dbName string) prometheus.Collector { + fqName := func(name string) string { + return "go_sql_" + name + } + return &dbStatsCollector{ + db: db, + maxOpenConnections: prometheus.NewDesc( + fqName("max_open_connections"), + "Maximum number of open connections to the database.", + nil, prometheus.Labels{"db_name": dbName}, + ), + openConnections: prometheus.NewDesc( + fqName("open_connections"), + "The number of established connections both in use and idle.", + nil, prometheus.Labels{"db_name": dbName}, + ), + inUseConnections: prometheus.NewDesc( + fqName("in_use_connections"), + "The number of connections currently in use.", + nil, prometheus.Labels{"db_name": dbName}, + ), + idleConnections: prometheus.NewDesc( + fqName("idle_connections"), + "The number of idle connections.", + nil, prometheus.Labels{"db_name": dbName}, + ), + waitCount: prometheus.NewDesc( + fqName("wait_count_total"), + "The total number of connections waited for.", + nil, prometheus.Labels{"db_name": dbName}, + ), + waitDuration: prometheus.NewDesc( + fqName("wait_duration_seconds_total"), + "The total time blocked waiting for a new connection.", + nil, prometheus.Labels{"db_name": dbName}, + ), + maxIdleClosed: prometheus.NewDesc( + fqName("max_idle_closed_total"), + "The total number of connections closed due to SetMaxIdleConns.", + nil, prometheus.Labels{"db_name": dbName}, + ), + maxIdleTimeClosed: prometheus.NewDesc( + fqName("max_idle_time_closed_total"), + "The total number of connections closed due to SetConnMaxIdleTime.", + nil, prometheus.Labels{"db_name": dbName}, + ), + maxLifetimeClosed: prometheus.NewDesc( + fqName("max_lifetime_closed_total"), + "The total number of connections closed due to SetConnMaxLifetime.", + nil, prometheus.Labels{"db_name": dbName}, + ), + } +} + +// Describe implements Collector. +func (c *dbStatsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- c.maxOpenConnections + ch <- c.openConnections + ch <- c.inUseConnections + ch <- c.idleConnections + ch <- c.waitCount + ch <- c.waitDuration + ch <- c.maxIdleClosed + ch <- c.maxLifetimeClosed + ch <- c.maxIdleTimeClosed +} + +// Collect implements Collector. +func (c *dbStatsCollector) Collect(ch chan<- prometheus.Metric) { + stats := c.db.Stats() + ch <- prometheus.MustNewConstMetric(c.maxOpenConnections, prometheus.GaugeValue, float64(stats.MaxOpenConnections)) + ch <- prometheus.MustNewConstMetric(c.openConnections, prometheus.GaugeValue, float64(stats.OpenConnections)) + ch <- prometheus.MustNewConstMetric(c.inUseConnections, prometheus.GaugeValue, float64(stats.InUse)) + ch <- prometheus.MustNewConstMetric(c.idleConnections, prometheus.GaugeValue, float64(stats.Idle)) + ch <- prometheus.MustNewConstMetric(c.waitCount, prometheus.CounterValue, float64(stats.WaitCount)) + ch <- prometheus.MustNewConstMetric(c.waitDuration, prometheus.CounterValue, stats.WaitDuration.Seconds()) + ch <- prometheus.MustNewConstMetric(c.maxIdleClosed, prometheus.CounterValue, float64(stats.MaxIdleClosed)) + ch <- prometheus.MustNewConstMetric(c.maxLifetimeClosed, prometheus.CounterValue, float64(stats.MaxLifetimeClosed)) + ch <- prometheus.MustNewConstMetric(c.maxIdleTimeClosed, prometheus.CounterValue, float64(stats.MaxIdleTimeClosed)) +} diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go new file mode 100644 index 0000000000..b22d862fbc --- /dev/null +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go @@ -0,0 +1,57 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// NewExpvarCollector returns a newly allocated expvar Collector. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototyping, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*prometheus.Desc) prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewExpvarCollector(exports) +} diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go new file mode 100644 index 0000000000..effc57840a --- /dev/null +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go @@ -0,0 +1,49 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.17 +// +build !go1.17 + +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// NewGoCollector returns a collector that exports metrics about the current Go +// process. This includes memory stats. To collect those, runtime.ReadMemStats +// is called. This requires to “stop the world”, which usually only happens for +// garbage collection (GC). Take the following implications into account when +// deciding whether to use the Go collector: +// +// 1. The performance impact of stopping the world is the more relevant the more +// frequently metrics are collected. However, with Go1.9 or later the +// stop-the-world time per metrics collection is very short (~25µs) so that the +// performance impact will only matter in rare cases. However, with older Go +// versions, the stop-the-world duration depends on the heap size and can be +// quite significant (~1.7 ms/GiB as per +// https://go-review.googlesource.com/c/go/+/34937). +// +// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the +// metrics collection happens to coincide with GC, it will only complete after +// GC has finished. Usually, GC is fast enough to not cause problems. However, +// with a very large heap, GC might take multiple seconds, which is enough to +// cause scrape timeouts in common setups. To avoid this problem, the Go +// collector will use the memstats from a previous collection if +// runtime.ReadMemStats takes more than 1s. However, if there are no previously +// collected memstats, or their collection is more than 5m ago, the collection +// will block until runtime.ReadMemStats succeeds. +// +// NOTE: The problem is solved in Go 1.15, see +// https://github.com/golang/go/issues/19812 for the related Go issue. +func NewGoCollector() prometheus.Collector { + return prometheus.NewGoCollector() +} diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go new file mode 100644 index 0000000000..bcfa4fa10e --- /dev/null +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go @@ -0,0 +1,165 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.17 +// +build go1.17 + +package collectors + +import ( + "regexp" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/internal" +) + +var ( + // MetricsAll allows all the metrics to be collected from Go runtime. + MetricsAll = GoRuntimeMetricsRule{regexp.MustCompile("/.*")} + // MetricsGC allows only GC metrics to be collected from Go runtime. + // e.g. go_gc_cycles_automatic_gc_cycles_total + // NOTE: This does not include new class of "/cpu/classes/gc/..." metrics. + // Use custom metric rule to access those. + MetricsGC = GoRuntimeMetricsRule{regexp.MustCompile(`^/gc/.*`)} + // MetricsMemory allows only memory metrics to be collected from Go runtime. + // e.g. go_memory_classes_heap_free_bytes + MetricsMemory = GoRuntimeMetricsRule{regexp.MustCompile(`^/memory/.*`)} + // MetricsScheduler allows only scheduler metrics to be collected from Go runtime. + // e.g. go_sched_goroutines_goroutines + MetricsScheduler = GoRuntimeMetricsRule{regexp.MustCompile(`^/sched/.*`)} +) + +// WithGoCollectorMemStatsMetricsDisabled disables metrics that is gathered in runtime.MemStats structure such as: +// +// go_memstats_alloc_bytes +// go_memstats_alloc_bytes_total +// go_memstats_sys_bytes +// go_memstats_lookups_total +// go_memstats_mallocs_total +// go_memstats_frees_total +// go_memstats_heap_alloc_bytes +// go_memstats_heap_sys_bytes +// go_memstats_heap_idle_bytes +// go_memstats_heap_inuse_bytes +// go_memstats_heap_released_bytes +// go_memstats_heap_objects +// go_memstats_stack_inuse_bytes +// go_memstats_stack_sys_bytes +// go_memstats_mspan_inuse_bytes +// go_memstats_mspan_sys_bytes +// go_memstats_mcache_inuse_bytes +// go_memstats_mcache_sys_bytes +// go_memstats_buck_hash_sys_bytes +// go_memstats_gc_sys_bytes +// go_memstats_other_sys_bytes +// go_memstats_next_gc_bytes +// +// so the metrics known from pre client_golang v1.12.0, +// +// NOTE(bwplotka): The above represents runtime.MemStats statistics, but they are +// actually implemented using new runtime/metrics package. (except skipped go_memstats_gc_cpu_fraction +// -- see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 for explanation). +// +// Some users might want to disable this on collector level (although you can use scrape relabelling on Prometheus), +// because similar metrics can be now obtained using WithGoCollectorRuntimeMetrics. Note that the semantics of new +// metrics might be different, plus the names can be change over time with different Go version. +// +// NOTE(bwplotka): Changing metric names can be tedious at times as the alerts, recording rules and dashboards have to be adjusted. +// The old metrics are also very useful, with many guides and books written about how to interpret them. +// +// As a result our recommendation would be to stick with MemStats like metrics and enable other runtime/metrics if you are interested +// in advanced insights Go provides. See ExampleGoCollector_WithAdvancedGoMetrics. +func WithGoCollectorMemStatsMetricsDisabled() func(options *internal.GoCollectorOptions) { + return func(o *internal.GoCollectorOptions) { + o.DisableMemStatsLikeMetrics = true + } +} + +// GoRuntimeMetricsRule allow enabling and configuring particular group of runtime/metrics. +// TODO(bwplotka): Consider adding ability to adjust buckets. +type GoRuntimeMetricsRule struct { + // Matcher represents RE2 expression will match the runtime/metrics from https://golang.bg/src/runtime/metrics/description.go + // Use `regexp.MustCompile` or `regexp.Compile` to create this field. + Matcher *regexp.Regexp +} + +// WithGoCollectorRuntimeMetrics allows enabling and configuring particular group of runtime/metrics. +// See the list of metrics https://golang.bg/src/runtime/metrics/description.go (pick the Go version you use there!). +// You can use this option in repeated manner, which will add new rules. The order of rules is important, the last rule +// that matches particular metrics is applied. +func WithGoCollectorRuntimeMetrics(rules ...GoRuntimeMetricsRule) func(options *internal.GoCollectorOptions) { + rs := make([]internal.GoCollectorRule, len(rules)) + for i, r := range rules { + rs[i] = internal.GoCollectorRule{ + Matcher: r.Matcher, + } + } + + return func(o *internal.GoCollectorOptions) { + o.RuntimeMetricRules = append(o.RuntimeMetricRules, rs...) + } +} + +// WithoutGoCollectorRuntimeMetrics allows disabling group of runtime/metrics that you might have added in WithGoCollectorRuntimeMetrics. +// It behaves similarly to WithGoCollectorRuntimeMetrics just with deny-list semantics. +func WithoutGoCollectorRuntimeMetrics(matchers ...*regexp.Regexp) func(options *internal.GoCollectorOptions) { + rs := make([]internal.GoCollectorRule, len(matchers)) + for i, m := range matchers { + rs[i] = internal.GoCollectorRule{ + Matcher: m, + Deny: true, + } + } + + return func(o *internal.GoCollectorOptions) { + o.RuntimeMetricRules = append(o.RuntimeMetricRules, rs...) + } +} + +// GoCollectionOption represents Go collection option flag. +// Deprecated. +type GoCollectionOption uint32 + +const ( + // GoRuntimeMemStatsCollection represents the metrics represented by runtime.MemStats structure. + // + // Deprecated: Use WithGoCollectorMemStatsMetricsDisabled() function to disable those metrics in the collector. + GoRuntimeMemStatsCollection GoCollectionOption = 1 << iota + // GoRuntimeMetricsCollection is the new set of metrics represented by runtime/metrics package. + // + // Deprecated: Use WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")}) + // function to enable those metrics in the collector. + GoRuntimeMetricsCollection +) + +// WithGoCollections allows enabling different collections for Go collector on top of base metrics. +// +// Deprecated: Use WithGoCollectorRuntimeMetrics() and WithGoCollectorMemStatsMetricsDisabled() instead to control metrics. +func WithGoCollections(flags GoCollectionOption) func(options *internal.GoCollectorOptions) { + return func(options *internal.GoCollectorOptions) { + if flags&GoRuntimeMemStatsCollection == 0 { + WithGoCollectorMemStatsMetricsDisabled()(options) + } + + if flags&GoRuntimeMetricsCollection != 0 { + WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")})(options) + } + } +} + +// NewGoCollector returns a collector that exports metrics about the current Go +// process using debug.GCStats (base metrics) and runtime/metrics (both in MemStats style and new ones). +func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewGoCollector(opts...) +} diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go new file mode 100644 index 0000000000..24558f50a7 --- /dev/null +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go @@ -0,0 +1,56 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collectors + +import "github.com/prometheus/client_golang/prometheus" + +// ProcessCollectorOpts defines the behavior of a process metrics collector +// created with NewProcessCollector. +type ProcessCollectorOpts struct { + // PidFn returns the PID of the process the collector collects metrics + // for. It is called upon each collection. By default, the PID of the + // current process is used, as determined on construction time by + // calling os.Getpid(). + PidFn func() (int, error) + // If non-empty, each of the collected metrics is prefixed by the + // provided string and an underscore ("_"). + Namespace string + // If true, any error encountered during collection is reported as an + // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored + // and the collected metrics will be incomplete. (Possibly, no metrics + // will be collected at all.) While that's usually not desired, it is + // appropriate for the common "mix-in" of process metrics, where process + // metrics are nice to have, but failing to collect them should not + // disrupt the collection of the remaining metrics. + ReportErrors bool +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including CPU, memory and file descriptor usage as well as +// the process start time. The detailed behavior is defined by the provided +// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a +// collector for the current process with an empty namespace string and no error +// reporting. +// +// The collector only works on operating systems with a Linux-style proc +// filesystem and on Microsoft Windows. On other operating systems, it will not +// collect any metrics. +func NewProcessCollector(opts ProcessCollectorOpts) prometheus.Collector { + //nolint:staticcheck // Ignore SA1019 until v2. + return prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{ + PidFn: opts.PidFn, + Namespace: opts.Namespace, + ReportErrors: opts.ReportErrors, + }) +} diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go new file mode 100644 index 0000000000..9819917b83 --- /dev/null +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -0,0 +1,374 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "bufio" + "io" + "net" + "net/http" +) + +const ( + closeNotifier = 1 << iota + flusher + hijacker + readerFrom + pusher +) + +type delegator interface { + http.ResponseWriter + + Status() int + Written() int64 +} + +type responseWriterDelegator struct { + http.ResponseWriter + + status int + written int64 + wroteHeader bool + observeWriteHeader func(int) +} + +func (r *responseWriterDelegator) Status() int { + return r.status +} + +func (r *responseWriterDelegator) Written() int64 { + return r.written +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + if r.observeWriteHeader != nil && !r.wroteHeader { + // Only call observeWriteHeader for the 1st time. It's a bug if + // WriteHeader is called more than once, but we want to protect + // against it here. Note that we still delegate the WriteHeader + // to the original ResponseWriter to not mask the bug from it. + r.observeWriteHeader(code) + } + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type ( + closeNotifierDelegator struct{ *responseWriterDelegator } + flusherDelegator struct{ *responseWriterDelegator } + hijackerDelegator struct{ *responseWriterDelegator } + readerFromDelegator struct{ *responseWriterDelegator } + pusherDelegator struct{ *responseWriterDelegator } +) + +func (d closeNotifierDelegator) CloseNotify() <-chan bool { + //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. + return d.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +func (d flusherDelegator) Flush() { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + d.ResponseWriter.(http.Flusher).Flush() +} + +func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return d.ResponseWriter.(http.Hijacker).Hijack() +} + +func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) + d.written += n + return n, err +} + +func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { + return d.ResponseWriter.(http.Pusher).Push(target, opts) +} + +var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) + +func init() { + // TODO(beorn7): Code generation would help here. + pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 + return d + } + pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 + return closeNotifierDelegator{d} + } + pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 + return flusherDelegator{d} + } + pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 + return struct { + *responseWriterDelegator + http.Flusher + http.CloseNotifier + }{d, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 + return hijackerDelegator{d} + } + pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 + return struct { + *responseWriterDelegator + http.Hijacker + http.CloseNotifier + }{d, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + }{d, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 + return readerFromDelegator{d} + } + pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.CloseNotifier + }{d, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + }{d, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + }{d, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 + return pusherDelegator{d} + } + pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 + return struct { + *responseWriterDelegator + http.Pusher + http.CloseNotifier + }{d, pusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + }{d, pusherDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + }{d, pusherDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 23 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + }{d, pusherDelegator{d}, readerFromDelegator{d}} + } + pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } +} + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + if _, ok := w.(http.Pusher); ok { + id += pusher + } + + return pickDelegator[id](d) +} diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go new file mode 100644 index 0000000000..09b8d2fbea --- /dev/null +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -0,0 +1,408 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promhttp provides tooling around HTTP servers and clients. +// +// First, the package allows the creation of http.Handler instances to expose +// Prometheus metrics via HTTP. promhttp.Handler acts on the +// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a +// custom registry or anything that implements the Gatherer interface. It also +// allows the creation of handlers that act differently on errors or allow to +// log errors. +// +// Second, the package provides tooling to instrument instances of http.Handler +// via middleware. Middleware wrappers follow the naming scheme +// InstrumentHandlerX, where X describes the intended use of the middleware. +// See each function's doc comment for specific details. +// +// Finally, the package allows for an http.RoundTripper to be instrumented via +// middleware. Middleware wrappers follow the naming scheme +// InstrumentRoundTripperX, where X describes the intended use of the +// middleware. See each function's doc comment for specific details. +package promhttp + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + contentTypeHeader = "Content-Type" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" + processStartTimeHeader = "Process-Start-Time-Unix" +) + +var gzipPool = sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, +} + +// Handler returns an http.Handler for the prometheus.DefaultGatherer, using +// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has +// no error logging, and it applies compression if requested by the client. +// +// The returned http.Handler is already instrumented using the +// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you +// create multiple http.Handlers by separate calls of the Handler function, the +// metrics used for instrumentation will be shared between them, providing +// global scrape counts. +// +// This function is meant to cover the bulk of basic use cases. If you are doing +// anything that requires more customization (including using a non-default +// Gatherer, different instrumentation, and non-default HandlerOpts), use the +// HandlerFor function. See there for details. +func Handler() http.Handler { + return InstrumentMetricHandler( + prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), + ) +} + +// HandlerFor returns an uninstrumented http.Handler for the provided +// Gatherer. The behavior of the Handler is defined by the provided +// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom +// Gatherers, with non-default HandlerOpts, and/or with custom (or no) +// instrumentation. Use the InstrumentMetricHandler function to apply the same +// kind of instrumentation as it is used by the Handler function. +func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { + return HandlerForTransactional(prometheus.ToTransactionalGatherer(reg), opts) +} + +// HandlerForTransactional is like HandlerFor, but it uses transactional gather, which +// can safely change in-place returned *dto.MetricFamily before call to `Gather` and after +// call to `done` of that `Gather`. +func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerOpts) http.Handler { + var ( + inFlightSem chan struct{} + errCnt = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_errors_total", + Help: "Total number of internal errors encountered by the promhttp metric handler.", + }, + []string{"cause"}, + ) + ) + + if opts.MaxRequestsInFlight > 0 { + inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) + } + if opts.Registry != nil { + // Initialize all possibilities that can occur below. + errCnt.WithLabelValues("gathering") + errCnt.WithLabelValues("encoding") + if err := opts.Registry.Register(errCnt); err != nil { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { + errCnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + } + + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { + if !opts.ProcessStartTime.IsZero() { + rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10)) + } + if inFlightSem != nil { + select { + case inFlightSem <- struct{}{}: // All good, carry on. + defer func() { <-inFlightSem }() + default: + http.Error(rsp, fmt.Sprintf( + "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, + ), http.StatusServiceUnavailable) + return + } + } + mfs, done, err := reg.Gather() + defer done() + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error gathering metrics:", err) + } + errCnt.WithLabelValues("gathering").Inc() + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + if len(mfs) == 0 { + // Still report the error if no metrics have been gathered. + httpError(rsp, err) + return + } + case HTTPErrorOnError: + httpError(rsp, err) + return + } + } + + var contentType expfmt.Format + if opts.EnableOpenMetrics { + contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header) + } else { + contentType = expfmt.Negotiate(req.Header) + } + header := rsp.Header() + header.Set(contentTypeHeader, string(contentType)) + + w := io.Writer(rsp) + if !opts.DisableCompression && gzipAccepted(req.Header) { + header.Set(contentEncodingHeader, "gzip") + gz := gzipPool.Get().(*gzip.Writer) + defer gzipPool.Put(gz) + + gz.Reset(w) + defer gz.Close() + + w = gz + } + + enc := expfmt.NewEncoder(w, contentType) + + // handleError handles the error according to opts.ErrorHandling + // and returns true if we have to abort after the handling. + handleError := func(err error) bool { + if err == nil { + return false + } + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error encoding and sending metric family:", err) + } + errCnt.WithLabelValues("encoding").Inc() + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case HTTPErrorOnError: + // We cannot really send an HTTP error at this + // point because we most likely have written + // something to rsp already. But at least we can + // stop sending. + return true + } + // Do nothing in all other cases, including ContinueOnError. + return false + } + + for _, mf := range mfs { + if handleError(enc.Encode(mf)) { + return + } + } + if closer, ok := enc.(expfmt.Closer); ok { + // This in particular takes care of the final "# EOF\n" line for OpenMetrics. + if handleError(closer.Close()) { + return + } + } + }) + + if opts.Timeout <= 0 { + return h + } + return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( + "Exceeded configured timeout of %v.\n", + opts.Timeout, + )) +} + +// InstrumentMetricHandler is usually used with an http.Handler returned by the +// HandlerFor function. It instruments the provided http.Handler with two +// metrics: A counter vector "promhttp_metric_handler_requests_total" to count +// scrapes partitioned by HTTP status code, and a gauge +// "promhttp_metric_handler_requests_in_flight" to track the number of +// simultaneous scrapes. This function idempotently registers collectors for +// both metrics with the provided Registerer. It panics if the registration +// fails. The provided metrics are useful to see how many scrapes hit the +// monitored target (which could be from different Prometheus servers or other +// scrapers), and how often they overlap (which would result in more than one +// scrape in flight at the same time). Note that the scrapes-in-flight gauge +// will contain the scrape by which it is exposed, while the scrape counter will +// only get incremented after the scrape is complete (as only then the status +// code is known). For tracking scrape durations, use the +// "scrape_duration_seconds" gauge created by the Prometheus server upon each +// scrape. +func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { + cnt := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_requests_total", + Help: "Total number of scrapes by HTTP status code.", + }, + []string{"code"}, + ) + // Initialize the most likely HTTP status codes. + cnt.WithLabelValues("200") + cnt.WithLabelValues("500") + cnt.WithLabelValues("503") + if err := reg.Register(cnt); err != nil { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { + cnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + + gge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "promhttp_metric_handler_requests_in_flight", + Help: "Current number of scrapes being served.", + }) + if err := reg.Register(gge); err != nil { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { + gge = are.ExistingCollector.(prometheus.Gauge) + } else { + panic(err) + } + } + + return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) +} + +// HandlerErrorHandling defines how a Handler serving metrics will handle +// errors. +type HandlerErrorHandling int + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. Note that HTTP + // errors cannot be served anymore once the beginning of a regular + // payload has been sent. Thus, in the (unlikely) case that encoding the + // payload into the negotiated wire format fails, serving the response + // will simply be aborted. Set an ErrorLog in HandlerOpts to detect + // those errors. + HTTPErrorOnError HandlerErrorHandling = iota + // Ignore errors and try to serve as many metrics as possible. However, + // if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. In this case, it is highly + // recommended to provide other means of detecting errors: By setting an + // ErrorLog in HandlerOpts, the errors are logged. By providing a + // Registry in HandlerOpts, the exposed metrics include an error counter + // "promhttp_metric_handler_errors_total", which can be used for + // alerts. + ContinueOnError + // Panic upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// Logger is the minimal interface HandlerOpts needs for logging. Note that +// log.Logger from the standard library implements this interface, and it is +// easy to implement by custom loggers, if they don't do so already anyway. +type Logger interface { + Println(v ...interface{}) +} + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts struct { + // ErrorLog specifies an optional Logger for errors collecting and + // serving metrics. If nil, errors are not logged at all. Note that the + // type of a reported error is often prometheus.MultiError, which + // formats into a multi-line error string. If you want to avoid the + // latter, create a Logger implementation that detects a + // prometheus.MultiError and formats the contained errors into one line. + ErrorLog Logger + // ErrorHandling defines how errors are handled. Note that errors are + // logged regardless of the configured ErrorHandling provided ErrorLog + // is not nil. + ErrorHandling HandlerErrorHandling + // If Registry is not nil, it is used to register a metric + // "promhttp_metric_handler_errors_total", partitioned by "cause". A + // failed registration causes a panic. Note that this error counter is + // different from the instrumentation you get from the various + // InstrumentHandler... helpers. It counts errors that don't necessarily + // result in a non-2xx HTTP status code. There are two typical cases: + // (1) Encoding errors that only happen after streaming of the HTTP body + // has already started (and the status code 200 has been sent). This + // should only happen with custom collectors. (2) Collection errors with + // no effect on the HTTP status code because ErrorHandling is set to + // ContinueOnError. + Registry prometheus.Registerer + // If DisableCompression is true, the handler will never compress the + // response, even if requested by the client. + DisableCompression bool + // The number of concurrent HTTP requests is limited to + // MaxRequestsInFlight. Additional requests are responded to with 503 + // Service Unavailable and a suitable message in the body. If + // MaxRequestsInFlight is 0 or negative, no limit is applied. + MaxRequestsInFlight int + // If handling a request takes longer than Timeout, it is responded to + // with 503 ServiceUnavailable and a suitable Message. No timeout is + // applied if Timeout is 0 or negative. Note that with the current + // implementation, reaching the timeout simply ends the HTTP requests as + // described above (and even that only if sending of the body hasn't + // started yet), while the bulk work of gathering all the metrics keeps + // running in the background (with the eventual result to be thrown + // away). Until the implementation is improved, it is recommended to + // implement a separate timeout in potentially slow Collectors. + Timeout time.Duration + // If true, the experimental OpenMetrics encoding is added to the + // possible options during content negotiation. Note that Prometheus + // 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is + // the only way to transmit exemplars. However, the move to OpenMetrics + // is not completely transparent. Most notably, the values of "quantile" + // labels of Summaries and "le" labels of Histograms are formatted with + // a trailing ".0" if they would otherwise look like integer numbers + // (which changes the identity of the resulting series on the Prometheus + // server). + EnableOpenMetrics bool + // ProcessStartTime allows setting process start timevalue that will be exposed + // with "Process-Start-Time-Unix" response header along with the metrics + // payload. This allow callers to have efficient transformations to cumulative + // counters (e.g. OpenTelemetry) or generally _created timestamp estimation per + // scrape target. + // NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus + // exposition format. + ProcessStartTime time.Time +} + +// gzipAccepted returns whether the client will accept gzip-encoded content. +func gzipAccepted(header http.Header) bool { + a := header.Get(acceptEncodingHeader) + parts := strings.Split(a, ",") + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return true + } + } + return false +} + +// httpError removes any content-encoding header and then calls http.Error with +// the provided error and http.StatusInternalServerError. Error contents is +// supposed to be uncompressed plain text. Same as with a plain http.Error, this +// must not be called if the header or any payload has already been sent. +func httpError(rsp http.ResponseWriter, err error) { + rsp.Header().Del(contentEncodingHeader) + http.Error( + rsp, + "An error has occurred while serving metrics:\n\n"+err.Error(), + http.StatusInternalServerError, + ) +} diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go new file mode 100644 index 0000000000..d3482c40ca --- /dev/null +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -0,0 +1,249 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "crypto/tls" + "net/http" + "net/http/httptrace" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// The RoundTripperFunc type is an adapter to allow the use of ordinary +// functions as RoundTrippers. If f is a function with the appropriate +// signature, RountTripperFunc(f) is a RoundTripper that calls f. +type RoundTripperFunc func(req *http.Request) (*http.Response, error) + +// RoundTrip implements the RoundTripper interface. +func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return rt(r) +} + +// InstrumentRoundTripperInFlight is a middleware that wraps the provided +// http.RoundTripper. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.RoundTripper. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { + return func(r *http.Request) (*http.Response, error) { + gauge.Inc() + defer gauge.Dec() + return next.RoundTrip(r) + } +} + +// InstrumentRoundTripperCounter is a middleware that wraps the provided +// http.RoundTripper to observe the request result with the provided CounterVec. +// The CounterVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. For the "method" label a predefined default label value set +// is used to filter given values. Values besides predefined values will count +// as `unknown` method.`WithExtraMethods` can be used to add more +// methods to the set. Partitioning of the CounterVec happens by HTTP status code +// and/or HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped RoundTripper panics or returns a non-nil error, the Counter +// is not incremented. +// +// Use with WithExemplarFromContext to instrument the exemplars on the counter of requests. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc { + rtOpts := defaultOptions() + for _, o := range opts { + o.apply(rtOpts) + } + + // Curry the counter with dynamic labels before checking the remaining labels. + code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels())) + + return func(r *http.Request) (*http.Response, error) { + resp, err := next.RoundTrip(r) + if err == nil { + l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...) + for label, resolve := range rtOpts.extraLabelsFromCtx { + l[label] = resolve(resp.Request.Context()) + } + addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context())) + } + return resp, err + } +} + +// InstrumentRoundTripperDuration is a middleware that wraps the provided +// http.RoundTripper to observe the request duration with the provided +// ObserverVec. The ObserverVec must have zero, one, or two non-const +// non-curried labels. For those, the only allowed label names are "code" and +// "method". The function panics otherwise. For the "method" label a predefined +// default label value set is used to filter given values. Values besides +// predefined values will count as `unknown` method. `WithExtraMethods` +// can be used to add more methods to the set. The Observe method of the Observer +// in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped RoundTripper panics or returns a non-nil error, no values are +// reported. +// +// Use with WithExemplarFromContext to instrument the exemplars on the duration histograms. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc { + rtOpts := defaultOptions() + for _, o := range opts { + o.apply(rtOpts) + } + + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels())) + + return func(r *http.Request) (*http.Response, error) { + start := time.Now() + resp, err := next.RoundTrip(r) + if err == nil { + l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...) + for label, resolve := range rtOpts.extraLabelsFromCtx { + l[label] = resolve(resp.Request.Context()) + } + observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context())) + } + return resp, err + } +} + +// InstrumentTrace is used to offer flexibility in instrumenting the available +// httptrace.ClientTrace hook functions. Each function is passed a float64 +// representing the time in seconds since the start of the http request. A user +// may choose to use separately buckets Histograms, or implement custom +// instance labels on a per function basis. +type InstrumentTrace struct { + GotConn func(float64) + PutIdleConn func(float64) + GotFirstResponseByte func(float64) + Got100Continue func(float64) + DNSStart func(float64) + DNSDone func(float64) + ConnectStart func(float64) + ConnectDone func(float64) + TLSHandshakeStart func(float64) + TLSHandshakeDone func(float64) + WroteHeaders func(float64) + Wait100Continue func(float64) + WroteRequest func(float64) +} + +// InstrumentRoundTripperTrace is a middleware that wraps the provided +// RoundTripper and reports times to hook functions provided in the +// InstrumentTrace struct. Hook functions that are not present in the provided +// InstrumentTrace struct are ignored. Times reported to the hook functions are +// time since the start of the request. Only with Go1.9+, those times are +// guaranteed to never be negative. (Earlier Go versions are not using a +// monotonic clock.) Note that partitioning of Histograms is expensive and +// should be used judiciously. +// +// For hook functions that receive an error as an argument, no observations are +// made in the event of a non-nil error value. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { + return func(r *http.Request) (*http.Response, error) { + start := time.Now() + + trace := &httptrace.ClientTrace{ + GotConn: func(_ httptrace.GotConnInfo) { + if it.GotConn != nil { + it.GotConn(time.Since(start).Seconds()) + } + }, + PutIdleConn: func(err error) { + if err != nil { + return + } + if it.PutIdleConn != nil { + it.PutIdleConn(time.Since(start).Seconds()) + } + }, + DNSStart: func(_ httptrace.DNSStartInfo) { + if it.DNSStart != nil { + it.DNSStart(time.Since(start).Seconds()) + } + }, + DNSDone: func(_ httptrace.DNSDoneInfo) { + if it.DNSDone != nil { + it.DNSDone(time.Since(start).Seconds()) + } + }, + ConnectStart: func(_, _ string) { + if it.ConnectStart != nil { + it.ConnectStart(time.Since(start).Seconds()) + } + }, + ConnectDone: func(_, _ string, err error) { + if err != nil { + return + } + if it.ConnectDone != nil { + it.ConnectDone(time.Since(start).Seconds()) + } + }, + GotFirstResponseByte: func() { + if it.GotFirstResponseByte != nil { + it.GotFirstResponseByte(time.Since(start).Seconds()) + } + }, + Got100Continue: func() { + if it.Got100Continue != nil { + it.Got100Continue(time.Since(start).Seconds()) + } + }, + TLSHandshakeStart: func() { + if it.TLSHandshakeStart != nil { + it.TLSHandshakeStart(time.Since(start).Seconds()) + } + }, + TLSHandshakeDone: func(_ tls.ConnectionState, err error) { + if err != nil { + return + } + if it.TLSHandshakeDone != nil { + it.TLSHandshakeDone(time.Since(start).Seconds()) + } + }, + WroteHeaders: func() { + if it.WroteHeaders != nil { + it.WroteHeaders(time.Since(start).Seconds()) + } + }, + Wait100Continue: func() { + if it.Wait100Continue != nil { + it.Wait100Continue(time.Since(start).Seconds()) + } + }, + WroteRequest: func(_ httptrace.WroteRequestInfo) { + if it.WroteRequest != nil { + it.WroteRequest(time.Since(start).Seconds()) + } + }, + } + r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) + + return next.RoundTrip(r) + } +} diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go new file mode 100644 index 0000000000..356edb7868 --- /dev/null +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -0,0 +1,576 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "errors" + "net/http" + "strconv" + "strings" + "time" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +// magicString is used for the hacky label test in checkLabels. Remove once fixed. +const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" + +// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver], +// which falls back to [prometheus.Observer.Observe] if no labels are provided. +func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) { + if labels == nil { + obs.Observe(val) + return + } + obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels) +} + +// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar], +// which falls back to [prometheus.Counter.Add] if no labels are provided. +func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) { + if labels == nil { + obs.Add(val) + return + } + obs.(prometheus.ExemplarAdder).AddWithExemplar(val, labels) +} + +// InstrumentHandlerInFlight is a middleware that wraps the provided +// http.Handler. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.Handler. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + g.Inc() + defer g.Dec() + next.ServeHTTP(w, r) + }) +} + +// InstrumentHandlerDuration is a middleware that wraps the provided +// http.Handler to observe the request duration with the provided ObserverVec. +// The ObserverVec must have valid metric and label names and must have zero, +// one, or two non-const non-curried labels. For those, the only allowed label +// names are "code" and "method". The function panics otherwise. For the "method" +// label a predefined default label value set is used to filter given values. +// Values besides predefined values will count as `unknown` method. +// `WithExtraMethods` can be used to add more methods to the set. The Observe +// method of the Observer in the ObserverVec is called with the request duration +// in seconds. Partitioning happens by HTTP status code and/or HTTP method if +// the respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { + hOpts := defaultOptions() + for _, o := range opts { + o.apply(hOpts) + } + + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) + + if code { + return func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context())) + } + } + + return func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + next.ServeHTTP(w, r) + l := labels(code, method, r.Method, 0, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context())) + } +} + +// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler +// to observe the request result with the provided CounterVec. The CounterVec +// must have valid metric and label names and must have zero, one, or two +// non-const non-curried labels. For those, the only allowed label names are +// "code" and "method". The function panics otherwise. For the "method" +// label a predefined default label value set is used to filter given values. +// Values besides predefined values will count as `unknown` method. +// `WithExtraMethods` can be used to add more methods to the set. Partitioning of the +// CounterVec happens by HTTP status code and/or HTTP method if the respective +// instance label names are present in the CounterVec. For unpartitioned +// counting, use a CounterVec with zero labels. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, the Counter is not incremented. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc { + hOpts := defaultOptions() + for _, o := range opts { + o.apply(hOpts) + } + + // Curry the counter with dynamic labels before checking the remaining labels. + code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels())) + + if code { + return func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context())) + } + } + + return func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + + l := labels(code, method, r.Method, 0, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context())) + } +} + +// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided +// http.Handler to observe with the provided ObserverVec the request duration +// until the response headers are written. The ObserverVec must have valid +// metric and label names and must have zero, one, or two non-const non-curried +// labels. For those, the only allowed label names are "code" and "method". The +// function panics otherwise. For the "method" label a predefined default label +// value set is used to filter given values. Values besides predefined values +// will count as `unknown` method.`WithExtraMethods` can be used to add more +// methods to the set. The Observe method of the Observer in the +// ObserverVec is called with the request duration in seconds. Partitioning +// happens by HTTP status code and/or HTTP method if the respective instance +// label names are present in the ObserverVec. For unpartitioned observations, +// use an ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. +// +// If the wrapped Handler panics before calling WriteHeader, no value is +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { + hOpts := defaultOptions() + for _, o := range opts { + o.apply(hOpts) + } + + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) + + return func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, func(status int) { + l := labels(code, method, r.Method, status, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context())) + }) + next.ServeHTTP(d, r) + } +} + +// InstrumentHandlerRequestSize is a middleware that wraps the provided +// http.Handler to observe the request size with the provided ObserverVec. The +// ObserverVec must have valid metric and label names and must have zero, one, +// or two non-const non-curried labels. For those, the only allowed label names +// are "code" and "method". The function panics otherwise. For the "method" +// label a predefined default label value set is used to filter given values. +// Values besides predefined values will count as `unknown` method. +// `WithExtraMethods` can be used to add more methods to the set. The Observe +// method of the Observer in the ObserverVec is called with the request size in +// bytes. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { + hOpts := defaultOptions() + for _, o := range opts { + o.apply(hOpts) + } + + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) + + if code { + return func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + size := computeApproximateRequestSize(r) + + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context())) + } + } + + return func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + size := computeApproximateRequestSize(r) + + l := labels(code, method, r.Method, 0, hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context())) + } +} + +// InstrumentHandlerResponseSize is a middleware that wraps the provided +// http.Handler to observe the response size with the provided ObserverVec. The +// ObserverVec must have valid metric and label names and must have zero, one, +// or two non-const non-curried labels. For those, the only allowed label names +// are "code" and "method". The function panics otherwise. For the "method" +// label a predefined default label value set is used to filter given values. +// Values besides predefined values will count as `unknown` method. +// `WithExtraMethods` can be used to add more methods to the set. The Observe +// method of the Observer in the ObserverVec is called with the response size in +// bytes. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler { + hOpts := defaultOptions() + for _, o := range opts { + o.apply(hOpts) + } + + // Curry the observer with dynamic labels before checking the remaining labels. + code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels())) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + + l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...) + for label, resolve := range hOpts.extraLabelsFromCtx { + l[label] = resolve(r.Context()) + } + observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context())) + }) +} + +// checkLabels returns whether the provided Collector has a non-const, +// non-curried label named "code" and/or "method". It panics if the provided +// Collector does not have a Desc or has more than one Desc or its Desc is +// invalid. It also panics if the Collector has any non-const, non-curried +// labels that are not named "code" or "method". +func checkLabels(c prometheus.Collector) (code, method bool) { + // TODO(beorn7): Remove this hacky way to check for instance labels + // once Descriptors can have their dimensionality queried. + var ( + desc *prometheus.Desc + m prometheus.Metric + pm dto.Metric + lvs []string + ) + + // Get the Desc from the Collector. + descc := make(chan *prometheus.Desc, 1) + c.Describe(descc) + + select { + case desc = <-descc: + default: + panic("no description provided by collector") + } + select { + case <-descc: + panic("more than one description provided by collector") + default: + } + + close(descc) + + // Make sure the Collector has a valid Desc by registering it with a + // temporary registry. + prometheus.NewRegistry().MustRegister(c) + + // Create a ConstMetric with the Desc. Since we don't know how many + // variable labels there are, try for as long as it needs. + for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { + m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) + } + + // Write out the metric into a proto message and look at the labels. + // If the value is not the magicString, it is a constLabel, which doesn't interest us. + // If the label is curried, it doesn't interest us. + // In all other cases, only "code" or "method" is allowed. + if err := m.Write(&pm); err != nil { + panic("error checking metric for labels") + } + for _, label := range pm.Label { + name, value := label.GetName(), label.GetValue() + if value != magicString || isLabelCurried(c, name) { + continue + } + switch name { + case "code": + code = true + case "method": + method = true + default: + panic("metric partitioned with non-supported labels") + } + } + return +} + +func isLabelCurried(c prometheus.Collector, label string) bool { + // This is even hackier than the label test above. + // We essentially try to curry again and see if it works. + // But for that, we need to type-convert to the two + // types we use here, ObserverVec or *CounterVec. + switch v := c.(type) { + case *prometheus.CounterVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + case prometheus.ObserverVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + default: + panic("unsupported metric vec type") + } + return true +} + +func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels { + labels := prometheus.Labels{} + + if !(code || method) { + return labels + } + + if code { + labels["code"] = sanitizeCode(status) + } + if method { + labels["method"] = sanitizeMethod(reqMethod, extraMethods...) + } + + return labels +} + +func computeApproximateRequestSize(r *http.Request) int { + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + return s +} + +// If the wrapped http.Handler has a known method, it will be sanitized and returned. +// Otherwise, "unknown" will be returned. The known method list can be extended +// as needed by using extraMethods parameter. +func sanitizeMethod(m string, extraMethods ...string) string { + // See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for + // the methods chosen as default. + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + case "TRACE", "trace": + return "trace" + case "PATCH", "patch": + return "patch" + default: + for _, method := range extraMethods { + if strings.EqualFold(m, method) { + return strings.ToLower(m) + } + } + return "unknown" + } +} + +// If the wrapped http.Handler has not set a status code, i.e. the value is +// currently 0, sanitizeCode will return 200, for consistency with behavior in +// the stdlib. +func sanitizeCode(s int) string { + // See for accepted codes https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200, 0: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + if s >= 100 && s <= 599 { + return strconv.Itoa(s) + } + return "unknown" + } +} diff --git a/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go new file mode 100644 index 0000000000..5d4383aa14 --- /dev/null +++ b/hack/tools/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go @@ -0,0 +1,84 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "context" + + "github.com/prometheus/client_golang/prometheus" +) + +// Option are used to configure both handler (middleware) or round tripper. +type Option interface { + apply(*options) +} + +// LabelValueFromCtx are used to compute the label value from request context. +// Context can be filled with values from request through middleware. +type LabelValueFromCtx func(ctx context.Context) string + +// options store options for both a handler or round tripper. +type options struct { + extraMethods []string + getExemplarFn func(requestCtx context.Context) prometheus.Labels + extraLabelsFromCtx map[string]LabelValueFromCtx +} + +func defaultOptions() *options { + return &options{ + getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }, + extraLabelsFromCtx: map[string]LabelValueFromCtx{}, + } +} + +func (o *options) emptyDynamicLabels() prometheus.Labels { + labels := prometheus.Labels{} + + for label := range o.extraLabelsFromCtx { + labels[label] = "" + } + + return labels +} + +type optionApplyFunc func(*options) + +func (o optionApplyFunc) apply(opt *options) { o(opt) } + +// WithExtraMethods adds additional HTTP methods to the list of allowed methods. +// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list. +// +// See the example for ExampleInstrumentHandlerWithExtraMethods for example usage. +func WithExtraMethods(methods ...string) Option { + return optionApplyFunc(func(o *options) { + o.extraMethods = methods + }) +} + +// WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics. +// If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but +// metric will continue to observe/increment. +func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option { + return optionApplyFunc(func(o *options) { + o.getExemplarFn = getExemplarFn + }) +} + +// WithLabelFromCtx registers a label for dynamic resolution with access to context. +// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage +func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option { + return optionApplyFunc(func(o *options) { + o.extraLabelsFromCtx[name] = valueFn + }) +} diff --git a/hack/tools/vendor/github.com/raeperd/recvcheck/.gitignore b/hack/tools/vendor/github.com/raeperd/recvcheck/.gitignore index 035dc07e37..4212673324 100644 --- a/hack/tools/vendor/github.com/raeperd/recvcheck/.gitignore +++ b/hack/tools/vendor/github.com/raeperd/recvcheck/.gitignore @@ -1,2 +1,3 @@ +.idea/ coverage.txt -cmd/recvcheck/recvcheck +/recvcheck diff --git a/hack/tools/vendor/github.com/raeperd/recvcheck/.golangci.yml b/hack/tools/vendor/github.com/raeperd/recvcheck/.golangci.yml new file mode 100644 index 0000000000..18692d50b8 --- /dev/null +++ b/hack/tools/vendor/github.com/raeperd/recvcheck/.golangci.yml @@ -0,0 +1,10 @@ +linters: + enable: + - recvcheck + +output: + show-stats: true + sort-results: true + sort-order: + - linter + - file diff --git a/hack/tools/vendor/github.com/raeperd/recvcheck/Makefile b/hack/tools/vendor/github.com/raeperd/recvcheck/Makefile index 45ca47d9b6..d78605a3bd 100644 --- a/hack/tools/vendor/github.com/raeperd/recvcheck/Makefile +++ b/hack/tools/vendor/github.com/raeperd/recvcheck/Makefile @@ -1,12 +1,14 @@ -all: build test lint +.PHONY: clean lint test build -download: - go mod download +default: clean lint test build -build: download - go build -C cmd/recvcheck +clean: + rm -rf coverage.txt -test: +build: + go build -ldflags "-s -w" -trimpath ./cmd/recvcheck/ + +test: clean go test -race -coverprofile=coverage.txt . lint: diff --git a/hack/tools/vendor/github.com/raeperd/recvcheck/README.md b/hack/tools/vendor/github.com/raeperd/recvcheck/README.md index db84fe38e2..067aa3c580 100644 --- a/hack/tools/vendor/github.com/raeperd/recvcheck/README.md +++ b/hack/tools/vendor/github.com/raeperd/recvcheck/README.md @@ -1,12 +1,12 @@ # recvcheck -[![.github/workflows/build.yaml](https://github.com/raeperd/recvcheck/actions/workflows/build.yaml/badge.svg)](https://github.com/raeperd/recvcheck/actions/workflows/build.yaml) [![Go Report Card](https://goreportcard.com/badge/github.com/raeperd/recvcheck)](https://goreportcard.com/report/github.com/raeperd/recvcheck) [![codecov](https://codecov.io/gh/raeperd/recvcheck/graph/badge.svg?token=fPYgEHlq1e)](https://codecov.io/gh/raeperd/recvcheck) +[![.github/workflows/build.yaml](https://github.com/raeperd/recvcheck/actions/workflows/build.yaml/badge.svg)](https://github.com/raeperd/recvcheck/actions/workflows/build.yaml) [![Go Report Card](https://goreportcard.com/badge/github.com/raeperd/recvcheck)](https://goreportcard.com/report/github.com/raeperd/recvcheck) Golang linter for check receiver type in method -## Motivtation +## Motivation From [Go Wiki: Go Code Review Comments - The Go Programming Language](https://go.dev/wiki/CodeReviewComments#receiver-type) > Don’t mix receiver types. Choose either pointers or struct types for all available method -Following code from [Dave Chenney](https://dave.cheney.net/2015/11/18/wednesday-pop-quiz-spot-the-race) causes data race. Could you find it? +Following code from [Dave Cheney](https://dave.cheney.net/2015/11/18/wednesday-pop-quiz-spot-the-race) causes data race. Could you find it? This linter does it for you. ```go diff --git a/hack/tools/vendor/github.com/raeperd/recvcheck/analyzer.go b/hack/tools/vendor/github.com/raeperd/recvcheck/analyzer.go index e80dfc5776..11fb38e72e 100644 --- a/hack/tools/vendor/github.com/raeperd/recvcheck/analyzer.go +++ b/hack/tools/vendor/github.com/raeperd/recvcheck/analyzer.go @@ -8,14 +8,58 @@ import ( "golang.org/x/tools/go/ast/inspector" ) -var Analyzer = &analysis.Analyzer{ - Name: "recvcheck", - Doc: "checks for receiver type consistency", - Run: run, - Requires: []*analysis.Analyzer{inspect.Analyzer}, +// NewAnalyzer returns a new analyzer to check for receiver type consistency. +func NewAnalyzer(s Settings) *analysis.Analyzer { + a := &analyzer{ + excluded: map[string]struct{}{}, + } + + if !s.DisableBuiltin { + // Default excludes for Marshal/Encode methods https://github.com/raeperd/recvcheck/issues/7 + a.excluded = map[string]struct{}{ + "*.MarshalText": {}, + "*.MarshalJSON": {}, + "*.MarshalYAML": {}, + "*.MarshalXML": {}, + "*.MarshalBinary": {}, + "*.GobEncode": {}, + } + } + + for _, exclusion := range s.Exclusions { + a.excluded[exclusion] = struct{}{} + } + + return &analysis.Analyzer{ + Name: "recvcheck", + Doc: "checks for receiver type consistency", + Run: a.run, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + } } -func run(pass *analysis.Pass) (any, error) { +// Settings is the configuration for the analyzer. +type Settings struct { + // DisableBuiltin if true, disables the built-in method excludes. + // Built-in excluded methods: + // - "MarshalText" + // - "MarshalJSON" + // - "MarshalYAML" + // - "MarshalXML" + // - "MarshalBinary" + // - "GobEncode" + DisableBuiltin bool + + // Exclusions format is `struct_name.method_name` (ex: `Foo.MethodName`). + // A wildcard `*` can use as a struct name (ex: `*.MethodName`). + Exclusions []string +} + +type analyzer struct { + excluded map[string]struct{} +} + +func (r *analyzer) run(pass *analysis.Pass) (any, error) { inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) structs := map[string]*structType{} @@ -25,45 +69,67 @@ func run(pass *analysis.Pass) (any, error) { return } - var recv *ast.Ident - var isStar bool - switch recvType := funcDecl.Recv.List[0].Type.(type) { - case *ast.StarExpr: - isStar = true - if recv, ok = recvType.X.(*ast.Ident); !ok { - return - } - case *ast.Ident: - recv = recvType - default: + recv, isStar := recvTypeIdent(funcDecl.Recv.List[0].Type) + if recv == nil { return } - var st *structType - st, ok = structs[recv.Name] + if r.isExcluded(recv, funcDecl) { + return + } + + st, ok := structs[recv.Name] if !ok { - structs[recv.Name] = &structType{recv: recv.Name} + structs[recv.Name] = &structType{} st = structs[recv.Name] } if isStar { - st.numStarMethod++ + st.starUsed = true } else { - st.numTypeMethod++ + st.typeUsed = true } }) - for _, st := range structs { - if st.numStarMethod > 0 && st.numTypeMethod > 0 { - pass.Reportf(pass.Pkg.Scope().Lookup(st.recv).Pos(), "the methods of %q use pointer receiver and non-pointer receiver.", st.recv) + for recv, st := range structs { + if st.starUsed && st.typeUsed { + pass.Reportf(pass.Pkg.Scope().Lookup(recv).Pos(), "the methods of %q use pointer receiver and non-pointer receiver.", recv) } } return nil, nil } +func (r *analyzer) isExcluded(recv *ast.Ident, f *ast.FuncDecl) bool { + if f.Name == nil || f.Name.Name == "" { + return true + } + + _, found := r.excluded[recv.Name+"."+f.Name.Name] + if found { + return true + } + + _, found = r.excluded["*."+f.Name.Name] + + return found +} + type structType struct { - recv string - numStarMethod int - numTypeMethod int + starUsed bool + typeUsed bool +} + +func recvTypeIdent(r ast.Expr) (*ast.Ident, bool) { + switch n := r.(type) { + case *ast.StarExpr: + if i, ok := n.X.(*ast.Ident); ok { + return i, true + } + + case *ast.Ident: + return n, false + } + + return nil, false } diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore deleted file mode 100644 index 3c0af38259..0000000000 --- a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.vscode -.idea -*.swp -cmd/jv/jv diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md deleted file mode 100644 index b0d05054ca..0000000000 --- a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/README.md +++ /dev/null @@ -1,220 +0,0 @@ -# jsonschema v5.3.1 - -[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) -[![GoDoc](https://godoc.org/github.com/santhosh-tekuri/jsonschema?status.svg)](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5) -[![Go Report Card](https://goreportcard.com/badge/github.com/santhosh-tekuri/jsonschema/v5)](https://goreportcard.com/report/github.com/santhosh-tekuri/jsonschema/v5) -[![Build Status](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml/badge.svg?branch=master)](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml) -[![codecov](https://codecov.io/gh/santhosh-tekuri/jsonschema/branch/master/graph/badge.svg?token=JMVj1pFT2l)](https://codecov.io/gh/santhosh-tekuri/jsonschema) - -Package jsonschema provides json-schema compilation and validation. - -[Benchmarks](https://dev.to/vearutop/benchmarking-correctness-and-performance-of-go-json-schema-validators-3247) - -### Features: - - implements - [draft 2020-12](https://json-schema.org/specification-links.html#2020-12), - [draft 2019-09](https://json-schema.org/specification-links.html#draft-2019-09-formerly-known-as-draft-8), - [draft-7](https://json-schema.org/specification-links.html#draft-7), - [draft-6](https://json-schema.org/specification-links.html#draft-6), - [draft-4](https://json-schema.org/specification-links.html#draft-4) - - fully compliant with [JSON-Schema-Test-Suite](https://github.com/json-schema-org/JSON-Schema-Test-Suite), (excluding some optional) - - list of optional tests that are excluded can be found in schema_test.go(variable [skipTests](https://github.com/santhosh-tekuri/jsonschema/blob/master/schema_test.go#L24)) - - validates schemas against meta-schema - - full support of remote references - - support of recursive references between schemas - - detects infinite loop in schemas - - thread safe validation - - rich, intuitive hierarchial error messages with json-pointers to exact location - - supports output formats flag, basic and detailed - - supports enabling format and content Assertions in draft2019-09 or above - - change `Compiler.AssertFormat`, `Compiler.AssertContent` to `true` - - compiled schema can be introspected. easier to develop tools like generating go structs given schema - - supports user-defined keywords via [extensions](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-Extension) - - implements following formats (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedFormat)) - - date-time, date, time, duration, period (supports leap-second) - - uuid, hostname, email - - ip-address, ipv4, ipv6 - - uri, uriref, uri-template(limited validation) - - json-pointer, relative-json-pointer - - regex, format - - implements following contentEncoding (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedContent)) - - base64 - - implements following contentMediaType (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedContent)) - - application/json - - can load from files/http/https/[string](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-FromString)/[]byte/io.Reader (supports [user-defined](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5/#example-package-UserDefinedLoader)) - - -see examples in [godoc](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v5) - -The schema is compiled against the version specified in `$schema` property. -If "$schema" property is missing, it uses latest draft which currently implemented -by this library. - -You can force to use specific version, when `$schema` is missing, as follows: - -```go -compiler := jsonschema.NewCompiler() -compiler.Draft = jsonschema.Draft4 -``` - -This package supports loading json-schema from filePath and fileURL. - -To load json-schema from HTTPURL, add following import: - -```go -import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader" -``` - -## Rich Errors - -The ValidationError returned by Validate method contains detailed context to understand why and where the error is. - -schema.json: -```json -{ - "$ref": "t.json#/definitions/employee" -} -``` - -t.json: -```json -{ - "definitions": { - "employee": { - "type": "string" - } - } -} -``` - -doc.json: -```json -1 -``` - -assuming `err` is the ValidationError returned when `doc.json` validated with `schema.json`, -```go -fmt.Printf("%#v\n", err) // using %#v prints errors hierarchy -``` -Prints: -``` -[I#] [S#] doesn't validate with file:///Users/santhosh/jsonschema/schema.json# - [I#] [S#/$ref] doesn't validate with 'file:///Users/santhosh/jsonschema/t.json#/definitions/employee' - [I#] [S#/definitions/employee/type] expected string, but got number -``` - -Here `I` stands for instance document and `S` stands for schema document. -The json-fragments that caused error in instance and schema documents are represented using json-pointer notation. -Nested causes are printed with indent. - -To output `err` in `flag` output format: -```go -b, _ := json.MarshalIndent(err.FlagOutput(), "", " ") -fmt.Println(string(b)) -``` -Prints: -```json -{ - "valid": false -} -``` -To output `err` in `basic` output format: -```go -b, _ := json.MarshalIndent(err.BasicOutput(), "", " ") -fmt.Println(string(b)) -``` -Prints: -```json -{ - "valid": false, - "errors": [ - { - "keywordLocation": "", - "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#", - "instanceLocation": "", - "error": "doesn't validate with file:///Users/santhosh/jsonschema/schema.json#" - }, - { - "keywordLocation": "/$ref", - "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#/$ref", - "instanceLocation": "", - "error": "doesn't validate with 'file:///Users/santhosh/jsonschema/t.json#/definitions/employee'" - }, - { - "keywordLocation": "/$ref/type", - "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/t.json#/definitions/employee/type", - "instanceLocation": "", - "error": "expected string, but got number" - } - ] -} -``` -To output `err` in `detailed` output format: -```go -b, _ := json.MarshalIndent(err.DetailedOutput(), "", " ") -fmt.Println(string(b)) -``` -Prints: -```json -{ - "valid": false, - "keywordLocation": "", - "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#", - "instanceLocation": "", - "errors": [ - { - "valid": false, - "keywordLocation": "/$ref", - "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/schema.json#/$ref", - "instanceLocation": "", - "errors": [ - { - "valid": false, - "keywordLocation": "/$ref/type", - "absoluteKeywordLocation": "file:///Users/santhosh/jsonschema/t.json#/definitions/employee/type", - "instanceLocation": "", - "error": "expected string, but got number" - } - ] - } - ] -} -``` - -## CLI - -to install `go install github.com/santhosh-tekuri/jsonschema/cmd/jv@latest` - -```bash -jv [-draft INT] [-output FORMAT] [-assertformat] [-assertcontent] []... - -assertcontent - enable content assertions with draft >= 2019 - -assertformat - enable format assertions with draft >= 2019 - -draft int - draft used when '$schema' attribute is missing. valid values 4, 5, 7, 2019, 2020 (default 2020) - -output string - output format. valid values flag, basic, detailed -``` - -if no `` arguments are passed, it simply validates the ``. -if `$schema` attribute is missing in schema, it uses latest version. this can be overridden by passing `-draft` flag - -exit-code is 1, if there are any validation errors - -`jv` can also validate yaml files. It also accepts schema from yaml files. - -## Validating YAML Documents - -since yaml supports non-string keys, such yaml documents are rendered as invalid json documents. - -most yaml parser use `map[interface{}]interface{}` for object, -whereas json parser uses `map[string]interface{}`. - -so we need to manually convert them to `map[string]interface{}`. -below code shows such conversion by `toStringKeys` function. - -https://play.golang.org/p/Hhax3MrtD8r - -NOTE: if you are using `gopkg.in/yaml.v3`, then you do not need such conversion. since this library -returns `map[string]interface{}` if all keys are strings. \ No newline at end of file diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go deleted file mode 100644 index fdb68e6480..0000000000 --- a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/compiler.go +++ /dev/null @@ -1,812 +0,0 @@ -package jsonschema - -import ( - "encoding/json" - "fmt" - "io" - "math/big" - "regexp" - "strconv" - "strings" -) - -// A Compiler represents a json-schema compiler. -type Compiler struct { - // Draft represents the draft used when '$schema' attribute is missing. - // - // This defaults to latest supported draft (currently 2020-12). - Draft *Draft - resources map[string]*resource - - // Extensions is used to register extensions. - extensions map[string]extension - - // ExtractAnnotations tells whether schema annotations has to be extracted - // in compiled Schema or not. - ExtractAnnotations bool - - // LoadURL loads the document at given absolute URL. - // - // If nil, package global LoadURL is used. - LoadURL func(s string) (io.ReadCloser, error) - - // Formats can be registered by adding to this map. Key is format name, - // value is function that knows how to validate that format. - Formats map[string]func(interface{}) bool - - // AssertFormat for specifications >= draft2019-09. - AssertFormat bool - - // Decoders can be registered by adding to this map. Key is encoding name, - // value is function that knows how to decode string in that format. - Decoders map[string]func(string) ([]byte, error) - - // MediaTypes can be registered by adding to this map. Key is mediaType name, - // value is function that knows how to validate that mediaType. - MediaTypes map[string]func([]byte) error - - // AssertContent for specifications >= draft2019-09. - AssertContent bool -} - -// Compile parses json-schema at given url returns, if successful, -// a Schema object that can be used to match against json. -// -// Returned error can be *SchemaError -func Compile(url string) (*Schema, error) { - return NewCompiler().Compile(url) -} - -// MustCompile is like Compile but panics if the url cannot be compiled to *Schema. -// It simplifies safe initialization of global variables holding compiled Schemas. -func MustCompile(url string) *Schema { - return NewCompiler().MustCompile(url) -} - -// CompileString parses and compiles the given schema with given base url. -func CompileString(url, schema string) (*Schema, error) { - c := NewCompiler() - if err := c.AddResource(url, strings.NewReader(schema)); err != nil { - return nil, err - } - return c.Compile(url) -} - -// MustCompileString is like CompileString but panics on error. -// It simplified safe initialization of global variables holding compiled Schema. -func MustCompileString(url, schema string) *Schema { - c := NewCompiler() - if err := c.AddResource(url, strings.NewReader(schema)); err != nil { - panic(err) - } - return c.MustCompile(url) -} - -// NewCompiler returns a json-schema Compiler object. -// if '$schema' attribute is missing, it is treated as draft7. to change this -// behavior change Compiler.Draft value -func NewCompiler() *Compiler { - return &Compiler{ - Draft: latest, - resources: make(map[string]*resource), - Formats: make(map[string]func(interface{}) bool), - Decoders: make(map[string]func(string) ([]byte, error)), - MediaTypes: make(map[string]func([]byte) error), - extensions: make(map[string]extension), - } -} - -// AddResource adds in-memory resource to the compiler. -// -// Note that url must not have fragment -func (c *Compiler) AddResource(url string, r io.Reader) error { - res, err := newResource(url, r) - if err != nil { - return err - } - c.resources[res.url] = res - return nil -} - -// MustCompile is like Compile but panics if the url cannot be compiled to *Schema. -// It simplifies safe initialization of global variables holding compiled Schemas. -func (c *Compiler) MustCompile(url string) *Schema { - s, err := c.Compile(url) - if err != nil { - panic(fmt.Sprintf("jsonschema: %#v", err)) - } - return s -} - -// Compile parses json-schema at given url returns, if successful, -// a Schema object that can be used to match against json. -// -// error returned will be of type *SchemaError -func (c *Compiler) Compile(url string) (*Schema, error) { - // make url absolute - u, err := toAbs(url) - if err != nil { - return nil, &SchemaError{url, err} - } - url = u - - sch, err := c.compileURL(url, nil, "#") - if err != nil { - err = &SchemaError{url, err} - } - return sch, err -} - -func (c *Compiler) findResource(url string) (*resource, error) { - if _, ok := c.resources[url]; !ok { - // load resource - var rdr io.Reader - if sch, ok := vocabSchemas[url]; ok { - rdr = strings.NewReader(sch) - } else { - loadURL := LoadURL - if c.LoadURL != nil { - loadURL = c.LoadURL - } - r, err := loadURL(url) - if err != nil { - return nil, err - } - defer r.Close() - rdr = r - } - if err := c.AddResource(url, rdr); err != nil { - return nil, err - } - } - - r := c.resources[url] - if r.draft != nil { - return r, nil - } - - // set draft - r.draft = c.Draft - if m, ok := r.doc.(map[string]interface{}); ok { - if sch, ok := m["$schema"]; ok { - sch, ok := sch.(string) - if !ok { - return nil, fmt.Errorf("jsonschema: invalid $schema in %s", url) - } - if !isURI(sch) { - return nil, fmt.Errorf("jsonschema: $schema must be uri in %s", url) - } - r.draft = findDraft(sch) - if r.draft == nil { - sch, _ := split(sch) - if sch == url { - return nil, fmt.Errorf("jsonschema: unsupported draft in %s", url) - } - mr, err := c.findResource(sch) - if err != nil { - return nil, err - } - r.draft = mr.draft - } - } - } - - id, err := r.draft.resolveID(r.url, r.doc) - if err != nil { - return nil, err - } - if id != "" { - r.url = id - } - - if err := r.fillSubschemas(c, r); err != nil { - return nil, err - } - - return r, nil -} - -func (c *Compiler) compileURL(url string, stack []schemaRef, ptr string) (*Schema, error) { - // if url points to a draft, return Draft.meta - if d := findDraft(url); d != nil && d.meta != nil { - return d.meta, nil - } - - b, f := split(url) - r, err := c.findResource(b) - if err != nil { - return nil, err - } - return c.compileRef(r, stack, ptr, r, f) -} - -func (c *Compiler) compileRef(r *resource, stack []schemaRef, refPtr string, res *resource, ref string) (*Schema, error) { - base := r.baseURL(res.floc) - ref, err := resolveURL(base, ref) - if err != nil { - return nil, err - } - - u, f := split(ref) - sr := r.findResource(u) - if sr == nil { - // external resource - return c.compileURL(ref, stack, refPtr) - } - - // ensure root resource is always compiled first. - // this is required to get schema.meta from root resource - if r.schema == nil { - r.schema = newSchema(r.url, r.floc, r.draft, r.doc) - if _, err := c.compile(r, nil, schemaRef{"#", r.schema, false}, r); err != nil { - return nil, err - } - } - - sr, err = r.resolveFragment(c, sr, f) - if err != nil { - return nil, err - } - if sr == nil { - return nil, fmt.Errorf("jsonschema: %s not found", ref) - } - - if sr.schema != nil { - if err := checkLoop(stack, schemaRef{refPtr, sr.schema, false}); err != nil { - return nil, err - } - return sr.schema, nil - } - - sr.schema = newSchema(r.url, sr.floc, r.draft, sr.doc) - return c.compile(r, stack, schemaRef{refPtr, sr.schema, false}, sr) -} - -func (c *Compiler) compileDynamicAnchors(r *resource, res *resource) error { - if r.draft.version < 2020 { - return nil - } - - rr := r.listResources(res) - rr = append(rr, res) - for _, sr := range rr { - if m, ok := sr.doc.(map[string]interface{}); ok { - if _, ok := m["$dynamicAnchor"]; ok { - sch, err := c.compileRef(r, nil, "IGNORED", r, sr.floc) - if err != nil { - return err - } - res.schema.dynamicAnchors = append(res.schema.dynamicAnchors, sch) - } - } - } - return nil -} - -func (c *Compiler) compile(r *resource, stack []schemaRef, sref schemaRef, res *resource) (*Schema, error) { - if err := c.compileDynamicAnchors(r, res); err != nil { - return nil, err - } - - switch v := res.doc.(type) { - case bool: - res.schema.Always = &v - return res.schema, nil - default: - return res.schema, c.compileMap(r, stack, sref, res) - } -} - -func (c *Compiler) compileMap(r *resource, stack []schemaRef, sref schemaRef, res *resource) error { - m := res.doc.(map[string]interface{}) - - if err := checkLoop(stack, sref); err != nil { - return err - } - stack = append(stack, sref) - - var s = res.schema - var err error - - if r == res { // root schema - if sch, ok := m["$schema"]; ok { - sch := sch.(string) - if d := findDraft(sch); d != nil { - s.meta = d.meta - } else { - if s.meta, err = c.compileRef(r, stack, "$schema", res, sch); err != nil { - return err - } - } - } - } - - if ref, ok := m["$ref"]; ok { - s.Ref, err = c.compileRef(r, stack, "$ref", res, ref.(string)) - if err != nil { - return err - } - if r.draft.version < 2019 { - // All other properties in a "$ref" object MUST be ignored - return nil - } - } - - if r.draft.version >= 2019 { - if r == res { // root schema - if vocab, ok := m["$vocabulary"]; ok { - for url, reqd := range vocab.(map[string]interface{}) { - if reqd, ok := reqd.(bool); ok && !reqd { - continue - } - if !r.draft.isVocab(url) { - return fmt.Errorf("jsonschema: unsupported vocab %q in %s", url, res) - } - s.vocab = append(s.vocab, url) - } - } else { - s.vocab = r.draft.defaultVocab - } - } - - if ref, ok := m["$recursiveRef"]; ok { - s.RecursiveRef, err = c.compileRef(r, stack, "$recursiveRef", res, ref.(string)) - if err != nil { - return err - } - } - } - if r.draft.version >= 2020 { - if dref, ok := m["$dynamicRef"]; ok { - s.DynamicRef, err = c.compileRef(r, stack, "$dynamicRef", res, dref.(string)) - if err != nil { - return err - } - if dref, ok := dref.(string); ok { - _, frag := split(dref) - if frag != "#" && !strings.HasPrefix(frag, "#/") { - // frag is anchor - s.dynamicRefAnchor = frag[1:] - } - } - } - } - - loadInt := func(pname string) int { - if num, ok := m[pname]; ok { - i, _ := num.(json.Number).Float64() - return int(i) - } - return -1 - } - - loadRat := func(pname string) *big.Rat { - if num, ok := m[pname]; ok { - r, _ := new(big.Rat).SetString(string(num.(json.Number))) - return r - } - return nil - } - - if r.draft.version < 2019 || r.schema.meta.hasVocab("validation") { - if t, ok := m["type"]; ok { - switch t := t.(type) { - case string: - s.Types = []string{t} - case []interface{}: - s.Types = toStrings(t) - } - } - - if e, ok := m["enum"]; ok { - s.Enum = e.([]interface{}) - allPrimitives := true - for _, item := range s.Enum { - switch jsonType(item) { - case "object", "array": - allPrimitives = false - break - } - } - s.enumError = "enum failed" - if allPrimitives { - if len(s.Enum) == 1 { - s.enumError = fmt.Sprintf("value must be %#v", s.Enum[0]) - } else { - strEnum := make([]string, len(s.Enum)) - for i, item := range s.Enum { - strEnum[i] = fmt.Sprintf("%#v", item) - } - s.enumError = fmt.Sprintf("value must be one of %s", strings.Join(strEnum, ", ")) - } - } - } - - s.Minimum = loadRat("minimum") - if exclusive, ok := m["exclusiveMinimum"]; ok { - if exclusive, ok := exclusive.(bool); ok { - if exclusive { - s.Minimum, s.ExclusiveMinimum = nil, s.Minimum - } - } else { - s.ExclusiveMinimum = loadRat("exclusiveMinimum") - } - } - - s.Maximum = loadRat("maximum") - if exclusive, ok := m["exclusiveMaximum"]; ok { - if exclusive, ok := exclusive.(bool); ok { - if exclusive { - s.Maximum, s.ExclusiveMaximum = nil, s.Maximum - } - } else { - s.ExclusiveMaximum = loadRat("exclusiveMaximum") - } - } - - s.MultipleOf = loadRat("multipleOf") - - s.MinProperties, s.MaxProperties = loadInt("minProperties"), loadInt("maxProperties") - - if req, ok := m["required"]; ok { - s.Required = toStrings(req.([]interface{})) - } - - s.MinItems, s.MaxItems = loadInt("minItems"), loadInt("maxItems") - - if unique, ok := m["uniqueItems"]; ok { - s.UniqueItems = unique.(bool) - } - - s.MinLength, s.MaxLength = loadInt("minLength"), loadInt("maxLength") - - if pattern, ok := m["pattern"]; ok { - s.Pattern = regexp.MustCompile(pattern.(string)) - } - - if r.draft.version >= 2019 { - s.MinContains, s.MaxContains = loadInt("minContains"), loadInt("maxContains") - if s.MinContains == -1 { - s.MinContains = 1 - } - - if deps, ok := m["dependentRequired"]; ok { - deps := deps.(map[string]interface{}) - s.DependentRequired = make(map[string][]string, len(deps)) - for pname, pvalue := range deps { - s.DependentRequired[pname] = toStrings(pvalue.([]interface{})) - } - } - } - } - - compile := func(stack []schemaRef, ptr string) (*Schema, error) { - return c.compileRef(r, stack, ptr, res, r.url+res.floc+"/"+ptr) - } - - loadSchema := func(pname string, stack []schemaRef) (*Schema, error) { - if _, ok := m[pname]; ok { - return compile(stack, escape(pname)) - } - return nil, nil - } - - loadSchemas := func(pname string, stack []schemaRef) ([]*Schema, error) { - if pvalue, ok := m[pname]; ok { - pvalue := pvalue.([]interface{}) - schemas := make([]*Schema, len(pvalue)) - for i := range pvalue { - sch, err := compile(stack, escape(pname)+"/"+strconv.Itoa(i)) - if err != nil { - return nil, err - } - schemas[i] = sch - } - return schemas, nil - } - return nil, nil - } - - if r.draft.version < 2019 || r.schema.meta.hasVocab("applicator") { - if s.Not, err = loadSchema("not", stack); err != nil { - return err - } - if s.AllOf, err = loadSchemas("allOf", stack); err != nil { - return err - } - if s.AnyOf, err = loadSchemas("anyOf", stack); err != nil { - return err - } - if s.OneOf, err = loadSchemas("oneOf", stack); err != nil { - return err - } - - if props, ok := m["properties"]; ok { - props := props.(map[string]interface{}) - s.Properties = make(map[string]*Schema, len(props)) - for pname := range props { - s.Properties[pname], err = compile(nil, "properties/"+escape(pname)) - if err != nil { - return err - } - } - } - - if regexProps, ok := m["regexProperties"]; ok { - s.RegexProperties = regexProps.(bool) - } - - if patternProps, ok := m["patternProperties"]; ok { - patternProps := patternProps.(map[string]interface{}) - s.PatternProperties = make(map[*regexp.Regexp]*Schema, len(patternProps)) - for pattern := range patternProps { - s.PatternProperties[regexp.MustCompile(pattern)], err = compile(nil, "patternProperties/"+escape(pattern)) - if err != nil { - return err - } - } - } - - if additionalProps, ok := m["additionalProperties"]; ok { - switch additionalProps := additionalProps.(type) { - case bool: - s.AdditionalProperties = additionalProps - case map[string]interface{}: - s.AdditionalProperties, err = compile(nil, "additionalProperties") - if err != nil { - return err - } - } - } - - if deps, ok := m["dependencies"]; ok { - deps := deps.(map[string]interface{}) - s.Dependencies = make(map[string]interface{}, len(deps)) - for pname, pvalue := range deps { - switch pvalue := pvalue.(type) { - case []interface{}: - s.Dependencies[pname] = toStrings(pvalue) - default: - s.Dependencies[pname], err = compile(stack, "dependencies/"+escape(pname)) - if err != nil { - return err - } - } - } - } - - if r.draft.version >= 6 { - if s.PropertyNames, err = loadSchema("propertyNames", nil); err != nil { - return err - } - if s.Contains, err = loadSchema("contains", nil); err != nil { - return err - } - } - - if r.draft.version >= 7 { - if m["if"] != nil { - if s.If, err = loadSchema("if", stack); err != nil { - return err - } - if s.Then, err = loadSchema("then", stack); err != nil { - return err - } - if s.Else, err = loadSchema("else", stack); err != nil { - return err - } - } - } - if r.draft.version >= 2019 { - if deps, ok := m["dependentSchemas"]; ok { - deps := deps.(map[string]interface{}) - s.DependentSchemas = make(map[string]*Schema, len(deps)) - for pname := range deps { - s.DependentSchemas[pname], err = compile(stack, "dependentSchemas/"+escape(pname)) - if err != nil { - return err - } - } - } - } - - if r.draft.version >= 2020 { - if s.PrefixItems, err = loadSchemas("prefixItems", nil); err != nil { - return err - } - if s.Items2020, err = loadSchema("items", nil); err != nil { - return err - } - } else { - if items, ok := m["items"]; ok { - switch items.(type) { - case []interface{}: - s.Items, err = loadSchemas("items", nil) - if err != nil { - return err - } - if additionalItems, ok := m["additionalItems"]; ok { - switch additionalItems := additionalItems.(type) { - case bool: - s.AdditionalItems = additionalItems - case map[string]interface{}: - s.AdditionalItems, err = compile(nil, "additionalItems") - if err != nil { - return err - } - } - } - default: - s.Items, err = compile(nil, "items") - if err != nil { - return err - } - } - } - } - - } - - // unevaluatedXXX keywords were in "applicator" vocab in 2019, but moved to new vocab "unevaluated" in 2020 - if (r.draft.version == 2019 && r.schema.meta.hasVocab("applicator")) || (r.draft.version >= 2020 && r.schema.meta.hasVocab("unevaluated")) { - if s.UnevaluatedProperties, err = loadSchema("unevaluatedProperties", nil); err != nil { - return err - } - if s.UnevaluatedItems, err = loadSchema("unevaluatedItems", nil); err != nil { - return err - } - if r.draft.version >= 2020 { - // any item in an array that passes validation of the contains schema is considered "evaluated" - s.ContainsEval = true - } - } - - if format, ok := m["format"]; ok { - s.Format = format.(string) - if r.draft.version < 2019 || c.AssertFormat || r.schema.meta.hasVocab("format-assertion") { - if format, ok := c.Formats[s.Format]; ok { - s.format = format - } else { - s.format, _ = Formats[s.Format] - } - } - } - - if c.ExtractAnnotations { - if title, ok := m["title"]; ok { - s.Title = title.(string) - } - if description, ok := m["description"]; ok { - s.Description = description.(string) - } - s.Default = m["default"] - } - - if r.draft.version >= 6 { - if c, ok := m["const"]; ok { - s.Constant = []interface{}{c} - } - } - - if r.draft.version >= 7 { - if encoding, ok := m["contentEncoding"]; ok { - s.ContentEncoding = encoding.(string) - if decoder, ok := c.Decoders[s.ContentEncoding]; ok { - s.decoder = decoder - } else { - s.decoder, _ = Decoders[s.ContentEncoding] - } - } - if mediaType, ok := m["contentMediaType"]; ok { - s.ContentMediaType = mediaType.(string) - if mediaType, ok := c.MediaTypes[s.ContentMediaType]; ok { - s.mediaType = mediaType - } else { - s.mediaType, _ = MediaTypes[s.ContentMediaType] - } - if s.ContentSchema, err = loadSchema("contentSchema", stack); err != nil { - return err - } - } - if c.ExtractAnnotations { - if comment, ok := m["$comment"]; ok { - s.Comment = comment.(string) - } - if readOnly, ok := m["readOnly"]; ok { - s.ReadOnly = readOnly.(bool) - } - if writeOnly, ok := m["writeOnly"]; ok { - s.WriteOnly = writeOnly.(bool) - } - if examples, ok := m["examples"]; ok { - s.Examples = examples.([]interface{}) - } - } - } - - if r.draft.version >= 2019 { - if !c.AssertContent { - s.decoder = nil - s.mediaType = nil - s.ContentSchema = nil - } - if c.ExtractAnnotations { - if deprecated, ok := m["deprecated"]; ok { - s.Deprecated = deprecated.(bool) - } - } - } - - for name, ext := range c.extensions { - es, err := ext.compiler.Compile(CompilerContext{c, r, stack, res}, m) - if err != nil { - return err - } - if es != nil { - if s.Extensions == nil { - s.Extensions = make(map[string]ExtSchema) - } - s.Extensions[name] = es - } - } - - return nil -} - -func (c *Compiler) validateSchema(r *resource, v interface{}, vloc string) error { - validate := func(meta *Schema) error { - if meta == nil { - return nil - } - return meta.validateValue(v, vloc) - } - - if err := validate(r.draft.meta); err != nil { - return err - } - for _, ext := range c.extensions { - if err := validate(ext.meta); err != nil { - return err - } - } - return nil -} - -func toStrings(arr []interface{}) []string { - s := make([]string, len(arr)) - for i, v := range arr { - s[i] = v.(string) - } - return s -} - -// SchemaRef captures schema and the path referring to it. -type schemaRef struct { - path string // relative-json-pointer to schema - schema *Schema // target schema - discard bool // true when scope left -} - -func (sr schemaRef) String() string { - return fmt.Sprintf("(%s)%v", sr.path, sr.schema) -} - -func checkLoop(stack []schemaRef, sref schemaRef) error { - for _, ref := range stack { - if ref.schema == sref.schema { - return infiniteLoopError(stack, sref) - } - } - return nil -} - -func keywordLocation(stack []schemaRef, path string) string { - var loc string - for _, ref := range stack[1:] { - loc += "/" + ref.path - } - if path != "" { - loc = loc + "/" + path - } - return loc -} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go deleted file mode 100644 index 7570b8b5a9..0000000000 --- a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/content.go +++ /dev/null @@ -1,29 +0,0 @@ -package jsonschema - -import ( - "encoding/base64" - "encoding/json" -) - -// Decoders is a registry of functions, which know how to decode -// string encoded in specific format. -// -// New Decoders can be registered by adding to this map. Key is encoding name, -// value is function that knows how to decode string in that format. -var Decoders = map[string]func(string) ([]byte, error){ - "base64": base64.StdEncoding.DecodeString, -} - -// MediaTypes is a registry of functions, which know how to validate -// whether the bytes represent data of that mediaType. -// -// New mediaTypes can be registered by adding to this map. Key is mediaType name, -// value is function that knows how to validate that mediaType. -var MediaTypes = map[string]func([]byte) error{ - "application/json": validateJSON, -} - -func validateJSON(b []byte) error { - var v interface{} - return json.Unmarshal(b, &v) -} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go deleted file mode 100644 index a124262a51..0000000000 --- a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/doc.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Package jsonschema provides json-schema compilation and validation. - -Features: - - implements draft 2020-12, 2019-09, draft-7, draft-6, draft-4 - - fully compliant with JSON-Schema-Test-Suite, (excluding some optional) - - list of optional tests that are excluded can be found in schema_test.go(variable skipTests) - - validates schemas against meta-schema - - full support of remote references - - support of recursive references between schemas - - detects infinite loop in schemas - - thread safe validation - - rich, intuitive hierarchial error messages with json-pointers to exact location - - supports output formats flag, basic and detailed - - supports enabling format and content Assertions in draft2019-09 or above - - change Compiler.AssertFormat, Compiler.AssertContent to true - - compiled schema can be introspected. easier to develop tools like generating go structs given schema - - supports user-defined keywords via extensions - - implements following formats (supports user-defined) - - date-time, date, time, duration (supports leap-second) - - uuid, hostname, email - - ip-address, ipv4, ipv6 - - uri, uriref, uri-template(limited validation) - - json-pointer, relative-json-pointer - - regex, format - - implements following contentEncoding (supports user-defined) - - base64 - - implements following contentMediaType (supports user-defined) - - application/json - - can load from files/http/https/string/[]byte/io.Reader (supports user-defined) - -The schema is compiled against the version specified in "$schema" property. -If "$schema" property is missing, it uses latest draft which currently implemented -by this library. - -You can force to use specific draft, when "$schema" is missing, as follows: - - compiler := jsonschema.NewCompiler() - compiler.Draft = jsonschema.Draft4 - -This package supports loading json-schema from filePath and fileURL. - -To load json-schema from HTTPURL, add following import: - - import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader" - -you can validate yaml documents. see https://play.golang.org/p/sJy1qY7dXgA -*/ -package jsonschema diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go deleted file mode 100644 index 154fa5837d..0000000000 --- a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/draft.go +++ /dev/null @@ -1,1454 +0,0 @@ -package jsonschema - -import ( - "fmt" - "strconv" - "strings" -) - -// A Draft represents json-schema draft -type Draft struct { - version int - meta *Schema - id string // property name used to represent schema id. - boolSchema bool // is boolean valid schema - vocab []string // built-in vocab - defaultVocab []string // vocabs when $vocabulary is not used - subschemas map[string]position -} - -func (d *Draft) URL() string { - switch d.version { - case 2020: - return "https://json-schema.org/draft/2020-12/schema" - case 2019: - return "https://json-schema.org/draft/2019-09/schema" - case 7: - return "https://json-schema.org/draft-07/schema" - case 6: - return "https://json-schema.org/draft-06/schema" - case 4: - return "https://json-schema.org/draft-04/schema" - } - return "" -} - -func (d *Draft) String() string { - return fmt.Sprintf("Draft%d", d.version) -} - -func (d *Draft) loadMeta(url, schema string) { - c := NewCompiler() - c.AssertFormat = true - if err := c.AddResource(url, strings.NewReader(schema)); err != nil { - panic(err) - } - d.meta = c.MustCompile(url) - d.meta.meta = d.meta -} - -func (d *Draft) getID(sch interface{}) string { - m, ok := sch.(map[string]interface{}) - if !ok { - return "" - } - if _, ok := m["$ref"]; ok && d.version <= 7 { - // $ref prevents a sibling id from changing the base uri - return "" - } - v, ok := m[d.id] - if !ok { - return "" - } - id, ok := v.(string) - if !ok { - return "" - } - return id -} - -func (d *Draft) resolveID(base string, sch interface{}) (string, error) { - id, _ := split(d.getID(sch)) // strip fragment - if id == "" { - return "", nil - } - url, err := resolveURL(base, id) - url, _ = split(url) // strip fragment - return url, err -} - -func (d *Draft) anchors(sch interface{}) []string { - m, ok := sch.(map[string]interface{}) - if !ok { - return nil - } - - var anchors []string - - // before draft2019, anchor is specified in id - _, f := split(d.getID(m)) - if f != "#" { - anchors = append(anchors, f[1:]) - } - - if v, ok := m["$anchor"]; ok && d.version >= 2019 { - anchors = append(anchors, v.(string)) - } - if v, ok := m["$dynamicAnchor"]; ok && d.version >= 2020 { - anchors = append(anchors, v.(string)) - } - return anchors -} - -// listSubschemas collects subschemas in r into rr. -func (d *Draft) listSubschemas(r *resource, base string, rr map[string]*resource) error { - add := func(loc string, sch interface{}) error { - url, err := d.resolveID(base, sch) - if err != nil { - return err - } - floc := r.floc + "/" + loc - sr := &resource{url: url, floc: floc, doc: sch} - rr[floc] = sr - - base := base - if url != "" { - base = url - } - return d.listSubschemas(sr, base, rr) - } - - sch, ok := r.doc.(map[string]interface{}) - if !ok { - return nil - } - for kw, pos := range d.subschemas { - v, ok := sch[kw] - if !ok { - continue - } - if pos&self != 0 { - switch v := v.(type) { - case map[string]interface{}: - if err := add(kw, v); err != nil { - return err - } - case bool: - if d.boolSchema { - if err := add(kw, v); err != nil { - return err - } - } - } - } - if pos&item != 0 { - if v, ok := v.([]interface{}); ok { - for i, item := range v { - if err := add(kw+"/"+strconv.Itoa(i), item); err != nil { - return err - } - } - } - } - if pos&prop != 0 { - if v, ok := v.(map[string]interface{}); ok { - for pname, pval := range v { - if err := add(kw+"/"+escape(pname), pval); err != nil { - return err - } - } - } - } - } - return nil -} - -// isVocab tells whether url is built-in vocab. -func (d *Draft) isVocab(url string) bool { - for _, v := range d.vocab { - if url == v { - return true - } - } - return false -} - -type position uint - -const ( - self position = 1 << iota - prop - item -) - -// supported drafts -var ( - Draft4 = &Draft{version: 4, id: "id", boolSchema: false} - Draft6 = &Draft{version: 6, id: "$id", boolSchema: true} - Draft7 = &Draft{version: 7, id: "$id", boolSchema: true} - Draft2019 = &Draft{ - version: 2019, - id: "$id", - boolSchema: true, - vocab: []string{ - "https://json-schema.org/draft/2019-09/vocab/core", - "https://json-schema.org/draft/2019-09/vocab/applicator", - "https://json-schema.org/draft/2019-09/vocab/validation", - "https://json-schema.org/draft/2019-09/vocab/meta-data", - "https://json-schema.org/draft/2019-09/vocab/format", - "https://json-schema.org/draft/2019-09/vocab/content", - }, - defaultVocab: []string{ - "https://json-schema.org/draft/2019-09/vocab/core", - "https://json-schema.org/draft/2019-09/vocab/applicator", - "https://json-schema.org/draft/2019-09/vocab/validation", - }, - } - Draft2020 = &Draft{ - version: 2020, - id: "$id", - boolSchema: true, - vocab: []string{ - "https://json-schema.org/draft/2020-12/vocab/core", - "https://json-schema.org/draft/2020-12/vocab/applicator", - "https://json-schema.org/draft/2020-12/vocab/unevaluated", - "https://json-schema.org/draft/2020-12/vocab/validation", - "https://json-schema.org/draft/2020-12/vocab/meta-data", - "https://json-schema.org/draft/2020-12/vocab/format-annotation", - "https://json-schema.org/draft/2020-12/vocab/format-assertion", - "https://json-schema.org/draft/2020-12/vocab/content", - }, - defaultVocab: []string{ - "https://json-schema.org/draft/2020-12/vocab/core", - "https://json-schema.org/draft/2020-12/vocab/applicator", - "https://json-schema.org/draft/2020-12/vocab/unevaluated", - "https://json-schema.org/draft/2020-12/vocab/validation", - }, - } - - latest = Draft2020 -) - -func findDraft(url string) *Draft { - if strings.HasPrefix(url, "http://") { - url = "https://" + strings.TrimPrefix(url, "http://") - } - if strings.HasSuffix(url, "#") || strings.HasSuffix(url, "#/") { - url = url[:strings.IndexByte(url, '#')] - } - switch url { - case "https://json-schema.org/schema": - return latest - case "https://json-schema.org/draft/2020-12/schema": - return Draft2020 - case "https://json-schema.org/draft/2019-09/schema": - return Draft2019 - case "https://json-schema.org/draft-07/schema": - return Draft7 - case "https://json-schema.org/draft-06/schema": - return Draft6 - case "https://json-schema.org/draft-04/schema": - return Draft4 - } - return nil -} - -func init() { - subschemas := map[string]position{ - // type agnostic - "definitions": prop, - "not": self, - "allOf": item, - "anyOf": item, - "oneOf": item, - // object - "properties": prop, - "additionalProperties": self, - "patternProperties": prop, - // array - "items": self | item, - "additionalItems": self, - "dependencies": prop, - } - Draft4.subschemas = clone(subschemas) - - subschemas["propertyNames"] = self - subschemas["contains"] = self - Draft6.subschemas = clone(subschemas) - - subschemas["if"] = self - subschemas["then"] = self - subschemas["else"] = self - Draft7.subschemas = clone(subschemas) - - subschemas["$defs"] = prop - subschemas["dependentSchemas"] = prop - subschemas["unevaluatedProperties"] = self - subschemas["unevaluatedItems"] = self - subschemas["contentSchema"] = self - Draft2019.subschemas = clone(subschemas) - - subschemas["prefixItems"] = item - Draft2020.subschemas = clone(subschemas) - - Draft4.loadMeta("http://json-schema.org/draft-04/schema", `{ - "$schema": "http://json-schema.org/draft-04/schema#", - "description": "Core schema meta-schema", - "definitions": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$ref": "#" } - }, - "positiveInteger": { - "type": "integer", - "minimum": 0 - }, - "positiveIntegerDefault0": { - "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] - }, - "simpleTypes": { - "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "minItems": 1, - "uniqueItems": true - } - }, - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uriref" - }, - "$schema": { - "type": "string", - "format": "uri" - }, - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": {}, - "multipleOf": { - "type": "number", - "minimum": 0, - "exclusiveMinimum": true - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "boolean", - "default": false - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "boolean", - "default": false - }, - "maxLength": { "$ref": "#/definitions/positiveInteger" }, - "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "additionalItems": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "#" } - ], - "default": {} - }, - "items": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/schemaArray" } - ], - "default": {} - }, - "maxItems": { "$ref": "#/definitions/positiveInteger" }, - "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "maxProperties": { "$ref": "#/definitions/positiveInteger" }, - "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "required": { "$ref": "#/definitions/stringArray" }, - "additionalProperties": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "#" } - ], - "default": {} - }, - "definitions": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "properties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "regexProperties": true, - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "regexProperties": { "type": "boolean" }, - "dependencies": { - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/stringArray" } - ] - } - }, - "enum": { - "type": "array", - "minItems": 1, - "uniqueItems": true - }, - "type": { - "anyOf": [ - { "$ref": "#/definitions/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/definitions/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "allOf": { "$ref": "#/definitions/schemaArray" }, - "anyOf": { "$ref": "#/definitions/schemaArray" }, - "oneOf": { "$ref": "#/definitions/schemaArray" }, - "not": { "$ref": "#" }, - "format": { "type": "string" }, - "$ref": { "type": "string" } - }, - "dependencies": { - "exclusiveMaximum": [ "maximum" ], - "exclusiveMinimum": [ "minimum" ] - }, - "default": {} - }`) - Draft6.loadMeta("http://json-schema.org/draft-06/schema", `{ - "$schema": "http://json-schema.org/draft-06/schema#", - "$id": "http://json-schema.org/draft-06/schema#", - "title": "Core schema meta-schema", - "definitions": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$ref": "#" } - }, - "nonNegativeInteger": { - "type": "integer", - "minimum": 0 - }, - "nonNegativeIntegerDefault0": { - "allOf": [ - { "$ref": "#/definitions/nonNegativeInteger" }, - { "default": 0 } - ] - }, - "simpleTypes": { - "enum": [ - "array", - "boolean", - "integer", - "null", - "number", - "object", - "string" - ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "uniqueItems": true, - "default": [] - } - }, - "type": ["object", "boolean"], - "properties": { - "$id": { - "type": "string", - "format": "uri-reference" - }, - "$schema": { - "type": "string", - "format": "uri" - }, - "$ref": { - "type": "string", - "format": "uri-reference" - }, - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": {}, - "multipleOf": { - "type": "number", - "exclusiveMinimum": 0 - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "number" - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "number" - }, - "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, - "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "additionalItems": { "$ref": "#" }, - "items": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/schemaArray" } - ], - "default": {} - }, - "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, - "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "contains": { "$ref": "#" }, - "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, - "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "required": { "$ref": "#/definitions/stringArray" }, - "additionalProperties": { "$ref": "#" }, - "definitions": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "properties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "regexProperties": true, - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "dependencies": { - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/stringArray" } - ] - } - }, - "propertyNames": { "$ref": "#" }, - "const": {}, - "enum": { - "type": "array", - "minItems": 1, - "uniqueItems": true - }, - "type": { - "anyOf": [ - { "$ref": "#/definitions/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/definitions/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "format": { "type": "string" }, - "allOf": { "$ref": "#/definitions/schemaArray" }, - "anyOf": { "$ref": "#/definitions/schemaArray" }, - "oneOf": { "$ref": "#/definitions/schemaArray" }, - "not": { "$ref": "#" } - }, - "default": {} - }`) - Draft7.loadMeta("http://json-schema.org/draft-07/schema", `{ - "$schema": "http://json-schema.org/draft-07/schema#", - "$id": "http://json-schema.org/draft-07/schema#", - "title": "Core schema meta-schema", - "definitions": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$ref": "#" } - }, - "nonNegativeInteger": { - "type": "integer", - "minimum": 0 - }, - "nonNegativeIntegerDefault0": { - "allOf": [ - { "$ref": "#/definitions/nonNegativeInteger" }, - { "default": 0 } - ] - }, - "simpleTypes": { - "enum": [ - "array", - "boolean", - "integer", - "null", - "number", - "object", - "string" - ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "uniqueItems": true, - "default": [] - } - }, - "type": ["object", "boolean"], - "properties": { - "$id": { - "type": "string", - "format": "uri-reference" - }, - "$schema": { - "type": "string", - "format": "uri" - }, - "$ref": { - "type": "string", - "format": "uri-reference" - }, - "$comment": { - "type": "string" - }, - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": true, - "readOnly": { - "type": "boolean", - "default": false - }, - "writeOnly": { - "type": "boolean", - "default": false - }, - "examples": { - "type": "array", - "items": true - }, - "multipleOf": { - "type": "number", - "exclusiveMinimum": 0 - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "number" - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "number" - }, - "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, - "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "additionalItems": { "$ref": "#" }, - "items": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/schemaArray" } - ], - "default": true - }, - "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, - "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "contains": { "$ref": "#" }, - "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, - "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, - "required": { "$ref": "#/definitions/stringArray" }, - "additionalProperties": { "$ref": "#" }, - "definitions": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "properties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "propertyNames": { "format": "regex" }, - "default": {} - }, - "dependencies": { - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/stringArray" } - ] - } - }, - "propertyNames": { "$ref": "#" }, - "const": true, - "enum": { - "type": "array", - "items": true, - "minItems": 1, - "uniqueItems": true - }, - "type": { - "anyOf": [ - { "$ref": "#/definitions/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/definitions/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "format": { "type": "string" }, - "contentMediaType": { "type": "string" }, - "contentEncoding": { "type": "string" }, - "if": { "$ref": "#" }, - "then": { "$ref": "#" }, - "else": { "$ref": "#" }, - "allOf": { "$ref": "#/definitions/schemaArray" }, - "anyOf": { "$ref": "#/definitions/schemaArray" }, - "oneOf": { "$ref": "#/definitions/schemaArray" }, - "not": { "$ref": "#" } - }, - "default": true - }`) - Draft2019.loadMeta("https://json-schema.org/draft/2019-09/schema", `{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://json-schema.org/draft/2019-09/schema", - "$vocabulary": { - "https://json-schema.org/draft/2019-09/vocab/core": true, - "https://json-schema.org/draft/2019-09/vocab/applicator": true, - "https://json-schema.org/draft/2019-09/vocab/validation": true, - "https://json-schema.org/draft/2019-09/vocab/meta-data": true, - "https://json-schema.org/draft/2019-09/vocab/format": false, - "https://json-schema.org/draft/2019-09/vocab/content": true - }, - "$recursiveAnchor": true, - - "title": "Core and Validation specifications meta-schema", - "allOf": [ - {"$ref": "meta/core"}, - {"$ref": "meta/applicator"}, - {"$ref": "meta/validation"}, - {"$ref": "meta/meta-data"}, - {"$ref": "meta/format"}, - {"$ref": "meta/content"} - ], - "type": ["object", "boolean"], - "properties": { - "definitions": { - "$comment": "While no longer an official keyword as it is replaced by $defs, this keyword is retained in the meta-schema to prevent incompatible extensions as it remains in common use.", - "type": "object", - "additionalProperties": { "$recursiveRef": "#" }, - "default": {} - }, - "dependencies": { - "$comment": "\"dependencies\" is no longer a keyword, but schema authors should avoid redefining it to facilitate a smooth transition to \"dependentSchemas\" and \"dependentRequired\"", - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$recursiveRef": "#" }, - { "$ref": "meta/validation#/$defs/stringArray" } - ] - } - } - } - }`) - Draft2020.loadMeta("https://json-schema.org/draft/2020-12/schema", `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/schema", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/core": true, - "https://json-schema.org/draft/2020-12/vocab/applicator": true, - "https://json-schema.org/draft/2020-12/vocab/unevaluated": true, - "https://json-schema.org/draft/2020-12/vocab/validation": true, - "https://json-schema.org/draft/2020-12/vocab/meta-data": true, - "https://json-schema.org/draft/2020-12/vocab/format-annotation": true, - "https://json-schema.org/draft/2020-12/vocab/content": true - }, - "$dynamicAnchor": "meta", - - "title": "Core and Validation specifications meta-schema", - "allOf": [ - {"$ref": "meta/core"}, - {"$ref": "meta/applicator"}, - {"$ref": "meta/unevaluated"}, - {"$ref": "meta/validation"}, - {"$ref": "meta/meta-data"}, - {"$ref": "meta/format-annotation"}, - {"$ref": "meta/content"} - ], - "type": ["object", "boolean"], - "$comment": "This meta-schema also defines keywords that have appeared in previous drafts in order to prevent incompatible extensions as they remain in common use.", - "properties": { - "definitions": { - "$comment": "\"definitions\" has been replaced by \"$defs\".", - "type": "object", - "additionalProperties": { "$dynamicRef": "#meta" }, - "deprecated": true, - "default": {} - }, - "dependencies": { - "$comment": "\"dependencies\" has been split and replaced by \"dependentSchemas\" and \"dependentRequired\" in order to serve their differing semantics.", - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$dynamicRef": "#meta" }, - { "$ref": "meta/validation#/$defs/stringArray" } - ] - }, - "deprecated": true, - "default": {} - }, - "$recursiveAnchor": { - "$comment": "\"$recursiveAnchor\" has been replaced by \"$dynamicAnchor\".", - "$ref": "meta/core#/$defs/anchorString", - "deprecated": true - }, - "$recursiveRef": { - "$comment": "\"$recursiveRef\" has been replaced by \"$dynamicRef\".", - "$ref": "meta/core#/$defs/uriReferenceString", - "deprecated": true - } - } - }`) -} - -var vocabSchemas = map[string]string{ - "https://json-schema.org/draft/2019-09/meta/core": `{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://json-schema.org/draft/2019-09/meta/core", - "$vocabulary": { - "https://json-schema.org/draft/2019-09/vocab/core": true - }, - "$recursiveAnchor": true, - - "title": "Core vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "$id": { - "type": "string", - "format": "uri-reference", - "$comment": "Non-empty fragments not allowed.", - "pattern": "^[^#]*#?$" - }, - "$schema": { - "type": "string", - "format": "uri" - }, - "$anchor": { - "type": "string", - "pattern": "^[A-Za-z][-A-Za-z0-9.:_]*$" - }, - "$ref": { - "type": "string", - "format": "uri-reference" - }, - "$recursiveRef": { - "type": "string", - "format": "uri-reference" - }, - "$recursiveAnchor": { - "type": "boolean", - "default": false - }, - "$vocabulary": { - "type": "object", - "propertyNames": { - "type": "string", - "format": "uri" - }, - "additionalProperties": { - "type": "boolean" - } - }, - "$comment": { - "type": "string" - }, - "$defs": { - "type": "object", - "additionalProperties": { "$recursiveRef": "#" }, - "default": {} - } - } - }`, - "https://json-schema.org/draft/2019-09/meta/applicator": `{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://json-schema.org/draft/2019-09/meta/applicator", - "$vocabulary": { - "https://json-schema.org/draft/2019-09/vocab/applicator": true - }, - "$recursiveAnchor": true, - - "title": "Applicator vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "additionalItems": { "$recursiveRef": "#" }, - "unevaluatedItems": { "$recursiveRef": "#" }, - "items": { - "anyOf": [ - { "$recursiveRef": "#" }, - { "$ref": "#/$defs/schemaArray" } - ] - }, - "contains": { "$recursiveRef": "#" }, - "additionalProperties": { "$recursiveRef": "#" }, - "unevaluatedProperties": { "$recursiveRef": "#" }, - "properties": { - "type": "object", - "additionalProperties": { "$recursiveRef": "#" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "additionalProperties": { "$recursiveRef": "#" }, - "propertyNames": { "format": "regex" }, - "default": {} - }, - "dependentSchemas": { - "type": "object", - "additionalProperties": { - "$recursiveRef": "#" - } - }, - "propertyNames": { "$recursiveRef": "#" }, - "if": { "$recursiveRef": "#" }, - "then": { "$recursiveRef": "#" }, - "else": { "$recursiveRef": "#" }, - "allOf": { "$ref": "#/$defs/schemaArray" }, - "anyOf": { "$ref": "#/$defs/schemaArray" }, - "oneOf": { "$ref": "#/$defs/schemaArray" }, - "not": { "$recursiveRef": "#" } - }, - "$defs": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$recursiveRef": "#" } - } - } - }`, - "https://json-schema.org/draft/2019-09/meta/validation": `{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://json-schema.org/draft/2019-09/meta/validation", - "$vocabulary": { - "https://json-schema.org/draft/2019-09/vocab/validation": true - }, - "$recursiveAnchor": true, - - "title": "Validation vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "multipleOf": { - "type": "number", - "exclusiveMinimum": 0 - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "number" - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "number" - }, - "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, - "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, - "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, - "minContains": { - "$ref": "#/$defs/nonNegativeInteger", - "default": 1 - }, - "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, - "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, - "required": { "$ref": "#/$defs/stringArray" }, - "dependentRequired": { - "type": "object", - "additionalProperties": { - "$ref": "#/$defs/stringArray" - } - }, - "const": true, - "enum": { - "type": "array", - "items": true - }, - "type": { - "anyOf": [ - { "$ref": "#/$defs/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/$defs/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - } - }, - "$defs": { - "nonNegativeInteger": { - "type": "integer", - "minimum": 0 - }, - "nonNegativeIntegerDefault0": { - "$ref": "#/$defs/nonNegativeInteger", - "default": 0 - }, - "simpleTypes": { - "enum": [ - "array", - "boolean", - "integer", - "null", - "number", - "object", - "string" - ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "uniqueItems": true, - "default": [] - } - } - }`, - "https://json-schema.org/draft/2019-09/meta/meta-data": `{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://json-schema.org/draft/2019-09/meta/meta-data", - "$vocabulary": { - "https://json-schema.org/draft/2019-09/vocab/meta-data": true - }, - "$recursiveAnchor": true, - - "title": "Meta-data vocabulary meta-schema", - - "type": ["object", "boolean"], - "properties": { - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": true, - "deprecated": { - "type": "boolean", - "default": false - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "writeOnly": { - "type": "boolean", - "default": false - }, - "examples": { - "type": "array", - "items": true - } - } - }`, - "https://json-schema.org/draft/2019-09/meta/format": `{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://json-schema.org/draft/2019-09/meta/format", - "$vocabulary": { - "https://json-schema.org/draft/2019-09/vocab/format": true - }, - "$recursiveAnchor": true, - - "title": "Format vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "format": { "type": "string" } - } - }`, - "https://json-schema.org/draft/2019-09/meta/content": `{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "$id": "https://json-schema.org/draft/2019-09/meta/content", - "$vocabulary": { - "https://json-schema.org/draft/2019-09/vocab/content": true - }, - "$recursiveAnchor": true, - - "title": "Content vocabulary meta-schema", - - "type": ["object", "boolean"], - "properties": { - "contentMediaType": { "type": "string" }, - "contentEncoding": { "type": "string" }, - "contentSchema": { "$recursiveRef": "#" } - } - }`, - "https://json-schema.org/draft/2020-12/meta/core": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/core", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/core": true - }, - "$dynamicAnchor": "meta", - - "title": "Core vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "$id": { - "$ref": "#/$defs/uriReferenceString", - "$comment": "Non-empty fragments not allowed.", - "pattern": "^[^#]*#?$" - }, - "$schema": { "$ref": "#/$defs/uriString" }, - "$ref": { "$ref": "#/$defs/uriReferenceString" }, - "$anchor": { "$ref": "#/$defs/anchorString" }, - "$dynamicRef": { "$ref": "#/$defs/uriReferenceString" }, - "$dynamicAnchor": { "$ref": "#/$defs/anchorString" }, - "$vocabulary": { - "type": "object", - "propertyNames": { "$ref": "#/$defs/uriString" }, - "additionalProperties": { - "type": "boolean" - } - }, - "$comment": { - "type": "string" - }, - "$defs": { - "type": "object", - "additionalProperties": { "$dynamicRef": "#meta" } - } - }, - "$defs": { - "anchorString": { - "type": "string", - "pattern": "^[A-Za-z_][-A-Za-z0-9._]*$" - }, - "uriString": { - "type": "string", - "format": "uri" - }, - "uriReferenceString": { - "type": "string", - "format": "uri-reference" - } - } - }`, - "https://json-schema.org/draft/2020-12/meta/applicator": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/applicator", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/applicator": true - }, - "$dynamicAnchor": "meta", - - "title": "Applicator vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "prefixItems": { "$ref": "#/$defs/schemaArray" }, - "items": { "$dynamicRef": "#meta" }, - "contains": { "$dynamicRef": "#meta" }, - "additionalProperties": { "$dynamicRef": "#meta" }, - "properties": { - "type": "object", - "additionalProperties": { "$dynamicRef": "#meta" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "additionalProperties": { "$dynamicRef": "#meta" }, - "propertyNames": { "format": "regex" }, - "default": {} - }, - "dependentSchemas": { - "type": "object", - "additionalProperties": { "$dynamicRef": "#meta" }, - "default": {} - }, - "propertyNames": { "$dynamicRef": "#meta" }, - "if": { "$dynamicRef": "#meta" }, - "then": { "$dynamicRef": "#meta" }, - "else": { "$dynamicRef": "#meta" }, - "allOf": { "$ref": "#/$defs/schemaArray" }, - "anyOf": { "$ref": "#/$defs/schemaArray" }, - "oneOf": { "$ref": "#/$defs/schemaArray" }, - "not": { "$dynamicRef": "#meta" } - }, - "$defs": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$dynamicRef": "#meta" } - } - } - }`, - "https://json-schema.org/draft/2020-12/meta/unevaluated": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/unevaluated", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/unevaluated": true - }, - "$dynamicAnchor": "meta", - - "title": "Unevaluated applicator vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "unevaluatedItems": { "$dynamicRef": "#meta" }, - "unevaluatedProperties": { "$dynamicRef": "#meta" } - } - }`, - "https://json-schema.org/draft/2020-12/meta/validation": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/validation", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/validation": true - }, - "$dynamicAnchor": "meta", - - "title": "Validation vocabulary meta-schema", - "type": ["object", "boolean"], - "properties": { - "type": { - "anyOf": [ - { "$ref": "#/$defs/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/$defs/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "const": true, - "enum": { - "type": "array", - "items": true - }, - "multipleOf": { - "type": "number", - "exclusiveMinimum": 0 - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "number" - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "number" - }, - "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, - "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, - "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, - "minContains": { - "$ref": "#/$defs/nonNegativeInteger", - "default": 1 - }, - "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, - "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, - "required": { "$ref": "#/$defs/stringArray" }, - "dependentRequired": { - "type": "object", - "additionalProperties": { - "$ref": "#/$defs/stringArray" - } - } - }, - "$defs": { - "nonNegativeInteger": { - "type": "integer", - "minimum": 0 - }, - "nonNegativeIntegerDefault0": { - "$ref": "#/$defs/nonNegativeInteger", - "default": 0 - }, - "simpleTypes": { - "enum": [ - "array", - "boolean", - "integer", - "null", - "number", - "object", - "string" - ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "uniqueItems": true, - "default": [] - } - } - }`, - "https://json-schema.org/draft/2020-12/meta/meta-data": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/meta-data", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/meta-data": true - }, - "$dynamicAnchor": "meta", - - "title": "Meta-data vocabulary meta-schema", - - "type": ["object", "boolean"], - "properties": { - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": true, - "deprecated": { - "type": "boolean", - "default": false - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "writeOnly": { - "type": "boolean", - "default": false - }, - "examples": { - "type": "array", - "items": true - } - } - }`, - "https://json-schema.org/draft/2020-12/meta/format-annotation": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/format-annotation", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/format-annotation": true - }, - "$dynamicAnchor": "meta", - - "title": "Format vocabulary meta-schema for annotation results", - "type": ["object", "boolean"], - "properties": { - "format": { "type": "string" } - } - }`, - "https://json-schema.org/draft/2020-12/meta/format-assertion": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/format-assertion", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/format-assertion": true - }, - "$dynamicAnchor": "meta", - - "title": "Format vocabulary meta-schema for assertion results", - "type": ["object", "boolean"], - "properties": { - "format": { "type": "string" } - } - }`, - "https://json-schema.org/draft/2020-12/meta/content": `{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "$id": "https://json-schema.org/draft/2020-12/meta/content", - "$vocabulary": { - "https://json-schema.org/draft/2020-12/vocab/content": true - }, - "$dynamicAnchor": "meta", - - "title": "Content vocabulary meta-schema", - - "type": ["object", "boolean"], - "properties": { - "contentEncoding": { "type": "string" }, - "contentMediaType": { "type": "string" }, - "contentSchema": { "$dynamicRef": "#meta" } - } - }`, -} - -func clone(m map[string]position) map[string]position { - mm := make(map[string]position) - for k, v := range m { - mm[k] = v - } - return mm -} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go deleted file mode 100644 index deaded89f7..0000000000 --- a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/errors.go +++ /dev/null @@ -1,129 +0,0 @@ -package jsonschema - -import ( - "fmt" - "strings" -) - -// InvalidJSONTypeError is the error type returned by ValidateInterface. -// this tells that specified go object is not valid jsonType. -type InvalidJSONTypeError string - -func (e InvalidJSONTypeError) Error() string { - return fmt.Sprintf("jsonschema: invalid jsonType: %s", string(e)) -} - -// InfiniteLoopError is returned by Compile/Validate. -// this gives url#keywordLocation that lead to infinity loop. -type InfiniteLoopError string - -func (e InfiniteLoopError) Error() string { - return "jsonschema: infinite loop " + string(e) -} - -func infiniteLoopError(stack []schemaRef, sref schemaRef) InfiniteLoopError { - var path string - for _, ref := range stack { - if path == "" { - path += ref.schema.Location - } else { - path += "/" + ref.path - } - } - return InfiniteLoopError(path + "/" + sref.path) -} - -// SchemaError is the error type returned by Compile. -type SchemaError struct { - // SchemaURL is the url to json-schema that filed to compile. - // This is helpful, if your schema refers to external schemas - SchemaURL string - - // Err is the error that occurred during compilation. - // It could be ValidationError, because compilation validates - // given schema against the json meta-schema - Err error -} - -func (se *SchemaError) Unwrap() error { - return se.Err -} - -func (se *SchemaError) Error() string { - s := fmt.Sprintf("jsonschema %s compilation failed", se.SchemaURL) - if se.Err != nil { - return fmt.Sprintf("%s: %v", s, strings.TrimPrefix(se.Err.Error(), "jsonschema: ")) - } - return s -} - -func (se *SchemaError) GoString() string { - if _, ok := se.Err.(*ValidationError); ok { - return fmt.Sprintf("jsonschema %s compilation failed\n%#v", se.SchemaURL, se.Err) - } - return se.Error() -} - -// ValidationError is the error type returned by Validate. -type ValidationError struct { - KeywordLocation string // validation path of validating keyword or schema - AbsoluteKeywordLocation string // absolute location of validating keyword or schema - InstanceLocation string // location of the json value within the instance being validated - Message string // describes error - Causes []*ValidationError // nested validation errors -} - -func (ve *ValidationError) add(causes ...error) error { - for _, cause := range causes { - ve.Causes = append(ve.Causes, cause.(*ValidationError)) - } - return ve -} - -func (ve *ValidationError) causes(err error) error { - if err := err.(*ValidationError); err.Message == "" { - ve.Causes = err.Causes - } else { - ve.add(err) - } - return ve -} - -func (ve *ValidationError) Error() string { - leaf := ve - for len(leaf.Causes) > 0 { - leaf = leaf.Causes[0] - } - u, _ := split(ve.AbsoluteKeywordLocation) - return fmt.Sprintf("jsonschema: %s does not validate with %s: %s", quote(leaf.InstanceLocation), u+"#"+leaf.KeywordLocation, leaf.Message) -} - -func (ve *ValidationError) GoString() string { - sloc := ve.AbsoluteKeywordLocation - sloc = sloc[strings.IndexByte(sloc, '#')+1:] - msg := fmt.Sprintf("[I#%s] [S#%s] %s", ve.InstanceLocation, sloc, ve.Message) - for _, c := range ve.Causes { - for _, line := range strings.Split(c.GoString(), "\n") { - msg += "\n " + line - } - } - return msg -} - -func joinPtr(ptr1, ptr2 string) string { - if len(ptr1) == 0 { - return ptr2 - } - if len(ptr2) == 0 { - return ptr1 - } - return ptr1 + "/" + ptr2 -} - -// quote returns single-quoted string -func quote(s string) string { - s = fmt.Sprintf("%q", s) - s = strings.ReplaceAll(s, `\"`, `"`) - s = strings.ReplaceAll(s, `'`, `\'`) - return "'" + s[1:len(s)-1] + "'" -} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go deleted file mode 100644 index 452ba118c5..0000000000 --- a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/extension.go +++ /dev/null @@ -1,116 +0,0 @@ -package jsonschema - -// ExtCompiler compiles custom keyword(s) into ExtSchema. -type ExtCompiler interface { - // Compile compiles the custom keywords in schema m and returns its compiled representation. - // if the schema m does not contain the keywords defined by this extension, - // compiled representation nil should be returned. - Compile(ctx CompilerContext, m map[string]interface{}) (ExtSchema, error) -} - -// ExtSchema is schema representation of custom keyword(s) -type ExtSchema interface { - // Validate validates the json value v with this ExtSchema. - // Returned error must be *ValidationError. - Validate(ctx ValidationContext, v interface{}) error -} - -type extension struct { - meta *Schema - compiler ExtCompiler -} - -// RegisterExtension registers custom keyword(s) into this compiler. -// -// name is extension name, used only to avoid name collisions. -// meta captures the metaschema for the new keywords. -// This is used to validate the schema before calling ext.Compile. -func (c *Compiler) RegisterExtension(name string, meta *Schema, ext ExtCompiler) { - c.extensions[name] = extension{meta, ext} -} - -// CompilerContext --- - -// CompilerContext provides additional context required in compiling for extension. -type CompilerContext struct { - c *Compiler - r *resource - stack []schemaRef - res *resource -} - -// Compile compiles given value at ptr into *Schema. This is useful in implementing -// keyword like allOf/not/patternProperties. -// -// schPath is the relative-json-pointer to the schema to be compiled from parent schema. -// -// applicableOnSameInstance tells whether current schema and the given schema -// are applied on same instance value. this is used to detect infinite loop in schema. -func (ctx CompilerContext) Compile(schPath string, applicableOnSameInstance bool) (*Schema, error) { - var stack []schemaRef - if applicableOnSameInstance { - stack = ctx.stack - } - return ctx.c.compileRef(ctx.r, stack, schPath, ctx.res, ctx.r.url+ctx.res.floc+"/"+schPath) -} - -// CompileRef compiles the schema referenced by ref uri -// -// refPath is the relative-json-pointer to ref. -// -// applicableOnSameInstance tells whether current schema and the given schema -// are applied on same instance value. this is used to detect infinite loop in schema. -func (ctx CompilerContext) CompileRef(ref string, refPath string, applicableOnSameInstance bool) (*Schema, error) { - var stack []schemaRef - if applicableOnSameInstance { - stack = ctx.stack - } - return ctx.c.compileRef(ctx.r, stack, refPath, ctx.res, ref) -} - -// ValidationContext --- - -// ValidationContext provides additional context required in validating for extension. -type ValidationContext struct { - result validationResult - validate func(sch *Schema, schPath string, v interface{}, vpath string) error - validateInplace func(sch *Schema, schPath string) error - validationError func(keywordPath string, format string, a ...interface{}) *ValidationError -} - -// EvaluatedProp marks given property of object as evaluated. -func (ctx ValidationContext) EvaluatedProp(prop string) { - delete(ctx.result.unevalProps, prop) -} - -// EvaluatedItem marks given index of array as evaluated. -func (ctx ValidationContext) EvaluatedItem(index int) { - delete(ctx.result.unevalItems, index) -} - -// Validate validates schema s with value v. Extension must use this method instead of -// *Schema.ValidateInterface method. This will be useful in implementing keywords like -// allOf/oneOf -// -// spath is relative-json-pointer to s -// vpath is relative-json-pointer to v. -func (ctx ValidationContext) Validate(s *Schema, spath string, v interface{}, vpath string) error { - if vpath == "" { - return ctx.validateInplace(s, spath) - } - return ctx.validate(s, spath, v, vpath) -} - -// Error used to construct validation error by extensions. -// -// keywordPath is relative-json-pointer to keyword. -func (ctx ValidationContext) Error(keywordPath string, format string, a ...interface{}) *ValidationError { - return ctx.validationError(keywordPath, format, a...) -} - -// Group is used by extensions to group multiple errors as causes to parent error. -// This is useful in implementing keywords like allOf where each schema specified -// in allOf can result a validationError. -func (ValidationError) Group(parent *ValidationError, causes ...error) error { - return parent.add(causes...) -} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go deleted file mode 100644 index 05686073f0..0000000000 --- a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/format.go +++ /dev/null @@ -1,567 +0,0 @@ -package jsonschema - -import ( - "errors" - "net" - "net/mail" - "net/url" - "regexp" - "strconv" - "strings" - "time" -) - -// Formats is a registry of functions, which know how to validate -// a specific format. -// -// New Formats can be registered by adding to this map. Key is format name, -// value is function that knows how to validate that format. -var Formats = map[string]func(interface{}) bool{ - "date-time": isDateTime, - "date": isDate, - "time": isTime, - "duration": isDuration, - "period": isPeriod, - "hostname": isHostname, - "email": isEmail, - "ip-address": isIPV4, - "ipv4": isIPV4, - "ipv6": isIPV6, - "uri": isURI, - "iri": isURI, - "uri-reference": isURIReference, - "uriref": isURIReference, - "iri-reference": isURIReference, - "uri-template": isURITemplate, - "regex": isRegex, - "json-pointer": isJSONPointer, - "relative-json-pointer": isRelativeJSONPointer, - "uuid": isUUID, -} - -// isDateTime tells whether given string is a valid date representation -// as defined by RFC 3339, section 5.6. -// -// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details -func isDateTime(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - if len(s) < 20 { // yyyy-mm-ddThh:mm:ssZ - return false - } - if s[10] != 'T' && s[10] != 't' { - return false - } - return isDate(s[:10]) && isTime(s[11:]) -} - -// isDate tells whether given string is a valid full-date production -// as defined by RFC 3339, section 5.6. -// -// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details -func isDate(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - _, err := time.Parse("2006-01-02", s) - return err == nil -} - -// isTime tells whether given string is a valid full-time production -// as defined by RFC 3339, section 5.6. -// -// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6, for details -func isTime(v interface{}) bool { - str, ok := v.(string) - if !ok { - return true - } - - // golang time package does not support leap seconds. - // so we are parsing it manually here. - - // hh:mm:ss - // 01234567 - if len(str) < 9 || str[2] != ':' || str[5] != ':' { - return false - } - isInRange := func(str string, min, max int) (int, bool) { - n, err := strconv.Atoi(str) - if err != nil { - return 0, false - } - if n < min || n > max { - return 0, false - } - return n, true - } - var h, m, s int - if h, ok = isInRange(str[0:2], 0, 23); !ok { - return false - } - if m, ok = isInRange(str[3:5], 0, 59); !ok { - return false - } - if s, ok = isInRange(str[6:8], 0, 60); !ok { - return false - } - str = str[8:] - - // parse secfrac if present - if str[0] == '.' { - // dot following more than one digit - str = str[1:] - var numDigits int - for str != "" { - if str[0] < '0' || str[0] > '9' { - break - } - numDigits++ - str = str[1:] - } - if numDigits == 0 { - return false - } - } - - if len(str) == 0 { - return false - } - - if str[0] == 'z' || str[0] == 'Z' { - if len(str) != 1 { - return false - } - } else { - // time-numoffset - // +hh:mm - // 012345 - if len(str) != 6 || str[3] != ':' { - return false - } - - var sign int - if str[0] == '+' { - sign = -1 - } else if str[0] == '-' { - sign = +1 - } else { - return false - } - - var zh, zm int - if zh, ok = isInRange(str[1:3], 0, 23); !ok { - return false - } - if zm, ok = isInRange(str[4:6], 0, 59); !ok { - return false - } - - // apply timezone offset - hm := (h*60 + m) + sign*(zh*60+zm) - if hm < 0 { - hm += 24 * 60 - } - h, m = hm/60, hm%60 - } - - // check leapsecond - if s == 60 { // leap second - if h != 23 || m != 59 { - return false - } - } - - return true -} - -// isDuration tells whether given string is a valid duration format -// from the ISO 8601 ABNF as given in Appendix A of RFC 3339. -// -// see https://datatracker.ietf.org/doc/html/rfc3339#appendix-A, for details -func isDuration(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - if len(s) == 0 || s[0] != 'P' { - return false - } - s = s[1:] - parseUnits := func() (units string, ok bool) { - for len(s) > 0 && s[0] != 'T' { - digits := false - for { - if len(s) == 0 { - break - } - if s[0] < '0' || s[0] > '9' { - break - } - digits = true - s = s[1:] - } - if !digits || len(s) == 0 { - return units, false - } - units += s[:1] - s = s[1:] - } - return units, true - } - units, ok := parseUnits() - if !ok { - return false - } - if units == "W" { - return len(s) == 0 // P_W - } - if len(units) > 0 { - if strings.Index("YMD", units) == -1 { - return false - } - if len(s) == 0 { - return true // "P" dur-date - } - } - if len(s) == 0 || s[0] != 'T' { - return false - } - s = s[1:] - units, ok = parseUnits() - return ok && len(s) == 0 && len(units) > 0 && strings.Index("HMS", units) != -1 -} - -// isPeriod tells whether given string is a valid period format -// from the ISO 8601 ABNF as given in Appendix A of RFC 3339. -// -// see https://datatracker.ietf.org/doc/html/rfc3339#appendix-A, for details -func isPeriod(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - slash := strings.IndexByte(s, '/') - if slash == -1 { - return false - } - start, end := s[:slash], s[slash+1:] - if isDateTime(start) { - return isDateTime(end) || isDuration(end) - } - return isDuration(start) && isDateTime(end) -} - -// isHostname tells whether given string is a valid representation -// for an Internet host name, as defined by RFC 1034 section 3.1 and -// RFC 1123 section 2.1. -// -// See https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names, for details. -func isHostname(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - // entire hostname (including the delimiting dots but not a trailing dot) has a maximum of 253 ASCII characters - s = strings.TrimSuffix(s, ".") - if len(s) > 253 { - return false - } - - // Hostnames are composed of series of labels concatenated with dots, as are all domain names - for _, label := range strings.Split(s, ".") { - // Each label must be from 1 to 63 characters long - if labelLen := len(label); labelLen < 1 || labelLen > 63 { - return false - } - - // labels must not start with a hyphen - // RFC 1123 section 2.1: restriction on the first character - // is relaxed to allow either a letter or a digit - if first := s[0]; first == '-' { - return false - } - - // must not end with a hyphen - if label[len(label)-1] == '-' { - return false - } - - // labels may contain only the ASCII letters 'a' through 'z' (in a case-insensitive manner), - // the digits '0' through '9', and the hyphen ('-') - for _, c := range label { - if valid := (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || (c == '-'); !valid { - return false - } - } - } - - return true -} - -// isEmail tells whether given string is a valid Internet email address -// as defined by RFC 5322, section 3.4.1. -// -// See https://en.wikipedia.org/wiki/Email_address, for details. -func isEmail(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - // entire email address to be no more than 254 characters long - if len(s) > 254 { - return false - } - - // email address is generally recognized as having two parts joined with an at-sign - at := strings.LastIndexByte(s, '@') - if at == -1 { - return false - } - local := s[0:at] - domain := s[at+1:] - - // local part may be up to 64 characters long - if len(local) > 64 { - return false - } - - // domain if enclosed in brackets, must match an IP address - if len(domain) >= 2 && domain[0] == '[' && domain[len(domain)-1] == ']' { - ip := domain[1 : len(domain)-1] - if strings.HasPrefix(ip, "IPv6:") { - return isIPV6(strings.TrimPrefix(ip, "IPv6:")) - } - return isIPV4(ip) - } - - // domain must match the requirements for a hostname - if !isHostname(domain) { - return false - } - - _, err := mail.ParseAddress(s) - return err == nil -} - -// isIPV4 tells whether given string is a valid representation of an IPv4 address -// according to the "dotted-quad" ABNF syntax as defined in RFC 2673, section 3.2. -func isIPV4(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - groups := strings.Split(s, ".") - if len(groups) != 4 { - return false - } - for _, group := range groups { - n, err := strconv.Atoi(group) - if err != nil { - return false - } - if n < 0 || n > 255 { - return false - } - if n != 0 && group[0] == '0' { - return false // leading zeroes should be rejected, as they are treated as octals - } - } - return true -} - -// isIPV6 tells whether given string is a valid representation of an IPv6 address -// as defined in RFC 2373, section 2.2. -func isIPV6(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - if !strings.Contains(s, ":") { - return false - } - return net.ParseIP(s) != nil -} - -// isURI tells whether given string is valid URI, according to RFC 3986. -func isURI(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - u, err := urlParse(s) - return err == nil && u.IsAbs() -} - -func urlParse(s string) (*url.URL, error) { - u, err := url.Parse(s) - if err != nil { - return nil, err - } - - // if hostname is ipv6, validate it - hostname := u.Hostname() - if strings.IndexByte(hostname, ':') != -1 { - if strings.IndexByte(u.Host, '[') == -1 || strings.IndexByte(u.Host, ']') == -1 { - return nil, errors.New("ipv6 address is not enclosed in brackets") - } - if !isIPV6(hostname) { - return nil, errors.New("invalid ipv6 address") - } - } - return u, nil -} - -// isURIReference tells whether given string is a valid URI Reference -// (either a URI or a relative-reference), according to RFC 3986. -func isURIReference(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - _, err := urlParse(s) - return err == nil && !strings.Contains(s, `\`) -} - -// isURITemplate tells whether given string is a valid URI Template -// according to RFC6570. -// -// Current implementation does minimal validation. -func isURITemplate(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - u, err := urlParse(s) - if err != nil { - return false - } - for _, item := range strings.Split(u.RawPath, "/") { - depth := 0 - for _, ch := range item { - switch ch { - case '{': - depth++ - if depth != 1 { - return false - } - case '}': - depth-- - if depth != 0 { - return false - } - } - } - if depth != 0 { - return false - } - } - return true -} - -// isRegex tells whether given string is a valid regular expression, -// according to the ECMA 262 regular expression dialect. -// -// The implementation uses go-lang regexp package. -func isRegex(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - _, err := regexp.Compile(s) - return err == nil -} - -// isJSONPointer tells whether given string is a valid JSON Pointer. -// -// Note: It returns false for JSON Pointer URI fragments. -func isJSONPointer(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - if s != "" && !strings.HasPrefix(s, "/") { - return false - } - for _, item := range strings.Split(s, "/") { - for i := 0; i < len(item); i++ { - if item[i] == '~' { - if i == len(item)-1 { - return false - } - switch item[i+1] { - case '0', '1': - // valid - default: - return false - } - } - } - } - return true -} - -// isRelativeJSONPointer tells whether given string is a valid Relative JSON Pointer. -// -// see https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3 -func isRelativeJSONPointer(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - if s == "" { - return false - } - if s[0] == '0' { - s = s[1:] - } else if s[0] >= '0' && s[0] <= '9' { - for s != "" && s[0] >= '0' && s[0] <= '9' { - s = s[1:] - } - } else { - return false - } - return s == "#" || isJSONPointer(s) -} - -// isUUID tells whether given string is a valid uuid format -// as specified in RFC4122. -// -// see https://datatracker.ietf.org/doc/html/rfc4122#page-4, for details -func isUUID(v interface{}) bool { - s, ok := v.(string) - if !ok { - return true - } - parseHex := func(n int) bool { - for n > 0 { - if len(s) == 0 { - return false - } - hex := (s[0] >= '0' && s[0] <= '9') || (s[0] >= 'a' && s[0] <= 'f') || (s[0] >= 'A' && s[0] <= 'F') - if !hex { - return false - } - s = s[1:] - n-- - } - return true - } - groups := []int{8, 4, 4, 4, 12} - for i, numDigits := range groups { - if !parseHex(numDigits) { - return false - } - if i == len(groups)-1 { - break - } - if len(s) == 0 || s[0] != '-' { - return false - } - s = s[1:] - } - return len(s) == 0 -} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/httploader/httploader.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/httploader/httploader.go deleted file mode 100644 index 4198cfe37c..0000000000 --- a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/httploader/httploader.go +++ /dev/null @@ -1,38 +0,0 @@ -// Package httploader implements loader.Loader for http/https url. -// -// The package is typically only imported for the side effect of -// registering its Loaders. -// -// To use httploader, link this package into your program: -// -// import _ "github.com/santhosh-tekuri/jsonschema/v5/httploader" -package httploader - -import ( - "fmt" - "io" - "net/http" - - "github.com/santhosh-tekuri/jsonschema/v5" -) - -// Client is the default HTTP Client used to Get the resource. -var Client = http.DefaultClient - -// Load loads resource from given http(s) url. -func Load(url string) (io.ReadCloser, error) { - resp, err := Client.Get(url) - if err != nil { - return nil, err - } - if resp.StatusCode != http.StatusOK { - _ = resp.Body.Close() - return nil, fmt.Errorf("%s returned status code %d", url, resp.StatusCode) - } - return resp.Body, nil -} - -func init() { - jsonschema.Loaders["http"] = Load - jsonschema.Loaders["https"] = Load -} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go deleted file mode 100644 index c94195c335..0000000000 --- a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/loader.go +++ /dev/null @@ -1,60 +0,0 @@ -package jsonschema - -import ( - "fmt" - "io" - "net/url" - "os" - "path/filepath" - "runtime" - "strings" -) - -func loadFileURL(s string) (io.ReadCloser, error) { - u, err := url.Parse(s) - if err != nil { - return nil, err - } - f := u.Path - if runtime.GOOS == "windows" { - f = strings.TrimPrefix(f, "/") - f = filepath.FromSlash(f) - } - return os.Open(f) -} - -// Loaders is a registry of functions, which know how to load -// absolute url of specific schema. -// -// New loaders can be registered by adding to this map. Key is schema, -// value is function that knows how to load url of that schema -var Loaders = map[string]func(url string) (io.ReadCloser, error){ - "file": loadFileURL, -} - -// LoaderNotFoundError is the error type returned by Load function. -// It tells that no Loader is registered for that URL Scheme. -type LoaderNotFoundError string - -func (e LoaderNotFoundError) Error() string { - return fmt.Sprintf("jsonschema: no Loader found for %s", string(e)) -} - -// LoadURL loads document at given absolute URL. The default implementation -// uses Loaders registry to lookup by schema and uses that loader. -// -// Users can change this variable, if they would like to take complete -// responsibility of loading given URL. Used by Compiler if its LoadURL -// field is nil. -var LoadURL = func(s string) (io.ReadCloser, error) { - u, err := url.Parse(s) - if err != nil { - return nil, err - } - loader, ok := Loaders[u.Scheme] - if !ok { - return nil, LoaderNotFoundError(s) - - } - return loader(s) -} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go deleted file mode 100644 index d65ae2a929..0000000000 --- a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/output.go +++ /dev/null @@ -1,77 +0,0 @@ -package jsonschema - -// Flag is output format with simple boolean property valid. -type Flag struct { - Valid bool `json:"valid"` -} - -// FlagOutput returns output in flag format -func (ve *ValidationError) FlagOutput() Flag { - return Flag{} -} - -// Basic --- - -// Basic is output format with flat list of output units. -type Basic struct { - Valid bool `json:"valid"` - Errors []BasicError `json:"errors"` -} - -// BasicError is output unit in basic format. -type BasicError struct { - KeywordLocation string `json:"keywordLocation"` - AbsoluteKeywordLocation string `json:"absoluteKeywordLocation"` - InstanceLocation string `json:"instanceLocation"` - Error string `json:"error"` -} - -// BasicOutput returns output in basic format -func (ve *ValidationError) BasicOutput() Basic { - var errors []BasicError - var flatten func(*ValidationError) - flatten = func(ve *ValidationError) { - errors = append(errors, BasicError{ - KeywordLocation: ve.KeywordLocation, - AbsoluteKeywordLocation: ve.AbsoluteKeywordLocation, - InstanceLocation: ve.InstanceLocation, - Error: ve.Message, - }) - for _, cause := range ve.Causes { - flatten(cause) - } - } - flatten(ve) - return Basic{Errors: errors} -} - -// Detailed --- - -// Detailed is output format based on structure of schema. -type Detailed struct { - Valid bool `json:"valid"` - KeywordLocation string `json:"keywordLocation"` - AbsoluteKeywordLocation string `json:"absoluteKeywordLocation"` - InstanceLocation string `json:"instanceLocation"` - Error string `json:"error,omitempty"` - Errors []Detailed `json:"errors,omitempty"` -} - -// DetailedOutput returns output in detailed format -func (ve *ValidationError) DetailedOutput() Detailed { - var errors []Detailed - for _, cause := range ve.Causes { - errors = append(errors, cause.DetailedOutput()) - } - var message = ve.Message - if len(ve.Causes) > 0 { - message = "" - } - return Detailed{ - KeywordLocation: ve.KeywordLocation, - AbsoluteKeywordLocation: ve.AbsoluteKeywordLocation, - InstanceLocation: ve.InstanceLocation, - Error: message, - Errors: errors, - } -} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go deleted file mode 100644 index 18349daac7..0000000000 --- a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/resource.go +++ /dev/null @@ -1,280 +0,0 @@ -package jsonschema - -import ( - "encoding/json" - "fmt" - "io" - "net/url" - "path/filepath" - "runtime" - "strconv" - "strings" -) - -type resource struct { - url string // base url of resource. can be empty - floc string // fragment with json-pointer from root resource - doc interface{} - draft *Draft - subresources map[string]*resource // key is floc. only applicable for root resource - schema *Schema -} - -func (r *resource) String() string { - return r.url + r.floc -} - -func newResource(url string, r io.Reader) (*resource, error) { - if strings.IndexByte(url, '#') != -1 { - panic(fmt.Sprintf("BUG: newResource(%q)", url)) - } - doc, err := unmarshal(r) - if err != nil { - return nil, fmt.Errorf("jsonschema: invalid json %s: %v", url, err) - } - url, err = toAbs(url) - if err != nil { - return nil, err - } - return &resource{ - url: url, - floc: "#", - doc: doc, - }, nil -} - -// fillSubschemas fills subschemas in res into r.subresources -func (r *resource) fillSubschemas(c *Compiler, res *resource) error { - if err := c.validateSchema(r, res.doc, res.floc[1:]); err != nil { - return err - } - - if r.subresources == nil { - r.subresources = make(map[string]*resource) - } - if err := r.draft.listSubschemas(res, r.baseURL(res.floc), r.subresources); err != nil { - return err - } - - // ensure subresource.url uniqueness - url2floc := make(map[string]string) - for _, sr := range r.subresources { - if sr.url != "" { - if floc, ok := url2floc[sr.url]; ok { - return fmt.Errorf("jsonschema: %q and %q in %s have same canonical-uri", floc[1:], sr.floc[1:], r.url) - } - url2floc[sr.url] = sr.floc - } - } - - return nil -} - -// listResources lists all subresources in res -func (r *resource) listResources(res *resource) []*resource { - var result []*resource - prefix := res.floc + "/" - for _, sr := range r.subresources { - if strings.HasPrefix(sr.floc, prefix) { - result = append(result, sr) - } - } - return result -} - -func (r *resource) findResource(url string) *resource { - if r.url == url { - return r - } - for _, res := range r.subresources { - if res.url == url { - return res - } - } - return nil -} - -// resolve fragment f with sr as base -func (r *resource) resolveFragment(c *Compiler, sr *resource, f string) (*resource, error) { - if f == "#" || f == "#/" { - return sr, nil - } - - // resolve by anchor - if !strings.HasPrefix(f, "#/") { - // check in given resource - for _, anchor := range r.draft.anchors(sr.doc) { - if anchor == f[1:] { - return sr, nil - } - } - - // check in subresources that has same base url - prefix := sr.floc + "/" - for _, res := range r.subresources { - if strings.HasPrefix(res.floc, prefix) && r.baseURL(res.floc) == sr.url { - for _, anchor := range r.draft.anchors(res.doc) { - if anchor == f[1:] { - return res, nil - } - } - } - } - return nil, nil - } - - // resolve by ptr - floc := sr.floc + f[1:] - if res, ok := r.subresources[floc]; ok { - return res, nil - } - - // non-standrad location - doc := r.doc - for _, item := range strings.Split(floc[2:], "/") { - item = strings.Replace(item, "~1", "/", -1) - item = strings.Replace(item, "~0", "~", -1) - item, err := url.PathUnescape(item) - if err != nil { - return nil, err - } - switch d := doc.(type) { - case map[string]interface{}: - if _, ok := d[item]; !ok { - return nil, nil - } - doc = d[item] - case []interface{}: - index, err := strconv.Atoi(item) - if err != nil { - return nil, err - } - if index < 0 || index >= len(d) { - return nil, nil - } - doc = d[index] - default: - return nil, nil - } - } - - id, err := r.draft.resolveID(r.baseURL(floc), doc) - if err != nil { - return nil, err - } - res := &resource{url: id, floc: floc, doc: doc} - r.subresources[floc] = res - if err := r.fillSubschemas(c, res); err != nil { - return nil, err - } - return res, nil -} - -func (r *resource) baseURL(floc string) string { - for { - if sr, ok := r.subresources[floc]; ok { - if sr.url != "" { - return sr.url - } - } - slash := strings.LastIndexByte(floc, '/') - if slash == -1 { - break - } - floc = floc[:slash] - } - return r.url -} - -// url helpers --- - -func toAbs(s string) (string, error) { - // if windows absolute file path, convert to file url - // because: net/url parses driver name as scheme - if runtime.GOOS == "windows" && len(s) >= 3 && s[1:3] == `:\` { - s = "file:///" + filepath.ToSlash(s) - } - - u, err := url.Parse(s) - if err != nil { - return "", err - } - if u.IsAbs() { - return s, nil - } - - // s is filepath - if s, err = filepath.Abs(s); err != nil { - return "", err - } - if runtime.GOOS == "windows" { - s = "file:///" + filepath.ToSlash(s) - } else { - s = "file://" + s - } - u, err = url.Parse(s) // to fix spaces in filepath - return u.String(), err -} - -func resolveURL(base, ref string) (string, error) { - if ref == "" { - return base, nil - } - if strings.HasPrefix(ref, "urn:") { - return ref, nil - } - - refURL, err := url.Parse(ref) - if err != nil { - return "", err - } - if refURL.IsAbs() { - return ref, nil - } - - if strings.HasPrefix(base, "urn:") { - base, _ = split(base) - return base + ref, nil - } - - baseURL, err := url.Parse(base) - if err != nil { - return "", err - } - return baseURL.ResolveReference(refURL).String(), nil -} - -func split(uri string) (string, string) { - hash := strings.IndexByte(uri, '#') - if hash == -1 { - return uri, "#" - } - f := uri[hash:] - if f == "#/" { - f = "#" - } - return uri[0:hash], f -} - -func (s *Schema) url() string { - u, _ := split(s.Location) - return u -} - -func (s *Schema) loc() string { - _, f := split(s.Location) - return f[1:] -} - -func unmarshal(r io.Reader) (interface{}, error) { - decoder := json.NewDecoder(r) - decoder.UseNumber() - var doc interface{} - if err := decoder.Decode(&doc); err != nil { - return nil, err - } - if t, _ := decoder.Token(); t != nil { - return nil, fmt.Errorf("invalid character %v after top-level value", t) - } - return doc, nil -} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go deleted file mode 100644 index 688f0a6fee..0000000000 --- a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/schema.go +++ /dev/null @@ -1,900 +0,0 @@ -package jsonschema - -import ( - "bytes" - "encoding/json" - "fmt" - "hash/maphash" - "math/big" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - "unicode/utf8" -) - -// A Schema represents compiled version of json-schema. -type Schema struct { - Location string // absolute location - - Draft *Draft // draft used by schema. - meta *Schema - vocab []string - dynamicAnchors []*Schema - - // type agnostic validations - Format string - format func(interface{}) bool - Always *bool // always pass/fail. used when booleans are used as schemas in draft-07. - Ref *Schema - RecursiveAnchor bool - RecursiveRef *Schema - DynamicAnchor string - DynamicRef *Schema - dynamicRefAnchor string - Types []string // allowed types. - Constant []interface{} // first element in slice is constant value. note: slice is used to capture nil constant. - Enum []interface{} // allowed values. - enumError string // error message for enum fail. captured here to avoid constructing error message every time. - Not *Schema - AllOf []*Schema - AnyOf []*Schema - OneOf []*Schema - If *Schema - Then *Schema // nil, when If is nil. - Else *Schema // nil, when If is nil. - - // object validations - MinProperties int // -1 if not specified. - MaxProperties int // -1 if not specified. - Required []string // list of required properties. - Properties map[string]*Schema - PropertyNames *Schema - RegexProperties bool // property names must be valid regex. used only in draft4 as workaround in metaschema. - PatternProperties map[*regexp.Regexp]*Schema - AdditionalProperties interface{} // nil or bool or *Schema. - Dependencies map[string]interface{} // map value is *Schema or []string. - DependentRequired map[string][]string - DependentSchemas map[string]*Schema - UnevaluatedProperties *Schema - - // array validations - MinItems int // -1 if not specified. - MaxItems int // -1 if not specified. - UniqueItems bool - Items interface{} // nil or *Schema or []*Schema - AdditionalItems interface{} // nil or bool or *Schema. - PrefixItems []*Schema - Items2020 *Schema // items keyword reintroduced in draft 2020-12 - Contains *Schema - ContainsEval bool // whether any item in an array that passes validation of the contains schema is considered "evaluated" - MinContains int // 1 if not specified - MaxContains int // -1 if not specified - UnevaluatedItems *Schema - - // string validations - MinLength int // -1 if not specified. - MaxLength int // -1 if not specified. - Pattern *regexp.Regexp - ContentEncoding string - decoder func(string) ([]byte, error) - ContentMediaType string - mediaType func([]byte) error - ContentSchema *Schema - - // number validators - Minimum *big.Rat - ExclusiveMinimum *big.Rat - Maximum *big.Rat - ExclusiveMaximum *big.Rat - MultipleOf *big.Rat - - // annotations. captured only when Compiler.ExtractAnnotations is true. - Title string - Description string - Default interface{} - Comment string - ReadOnly bool - WriteOnly bool - Examples []interface{} - Deprecated bool - - // user defined extensions - Extensions map[string]ExtSchema -} - -func (s *Schema) String() string { - return s.Location -} - -func newSchema(url, floc string, draft *Draft, doc interface{}) *Schema { - // fill with default values - s := &Schema{ - Location: url + floc, - Draft: draft, - MinProperties: -1, - MaxProperties: -1, - MinItems: -1, - MaxItems: -1, - MinContains: 1, - MaxContains: -1, - MinLength: -1, - MaxLength: -1, - } - - if doc, ok := doc.(map[string]interface{}); ok { - if ra, ok := doc["$recursiveAnchor"]; ok { - if ra, ok := ra.(bool); ok { - s.RecursiveAnchor = ra - } - } - if da, ok := doc["$dynamicAnchor"]; ok { - if da, ok := da.(string); ok { - s.DynamicAnchor = da - } - } - } - return s -} - -func (s *Schema) hasVocab(name string) bool { - if s == nil { // during bootstrap - return true - } - if name == "core" { - return true - } - for _, url := range s.vocab { - if url == "https://json-schema.org/draft/2019-09/vocab/"+name { - return true - } - if url == "https://json-schema.org/draft/2020-12/vocab/"+name { - return true - } - } - return false -} - -// Validate validates given doc, against the json-schema s. -// -// the v must be the raw json value. for number precision -// unmarshal with json.UseNumber(). -// -// returns *ValidationError if v does not confirm with schema s. -// returns InfiniteLoopError if it detects loop during validation. -// returns InvalidJSONTypeError if it detects any non json value in v. -func (s *Schema) Validate(v interface{}) (err error) { - return s.validateValue(v, "") -} - -func (s *Schema) validateValue(v interface{}, vloc string) (err error) { - defer func() { - if r := recover(); r != nil { - switch r := r.(type) { - case InfiniteLoopError, InvalidJSONTypeError: - err = r.(error) - default: - panic(r) - } - } - }() - if _, err := s.validate(nil, 0, "", v, vloc); err != nil { - ve := ValidationError{ - KeywordLocation: "", - AbsoluteKeywordLocation: s.Location, - InstanceLocation: vloc, - Message: fmt.Sprintf("doesn't validate with %s", s.Location), - } - return ve.causes(err) - } - return nil -} - -// validate validates given value v with this schema. -func (s *Schema) validate(scope []schemaRef, vscope int, spath string, v interface{}, vloc string) (result validationResult, err error) { - validationError := func(keywordPath string, format string, a ...interface{}) *ValidationError { - return &ValidationError{ - KeywordLocation: keywordLocation(scope, keywordPath), - AbsoluteKeywordLocation: joinPtr(s.Location, keywordPath), - InstanceLocation: vloc, - Message: fmt.Sprintf(format, a...), - } - } - - sref := schemaRef{spath, s, false} - if err := checkLoop(scope[len(scope)-vscope:], sref); err != nil { - panic(err) - } - scope = append(scope, sref) - vscope++ - - // populate result - switch v := v.(type) { - case map[string]interface{}: - result.unevalProps = make(map[string]struct{}) - for pname := range v { - result.unevalProps[pname] = struct{}{} - } - case []interface{}: - result.unevalItems = make(map[int]struct{}) - for i := range v { - result.unevalItems[i] = struct{}{} - } - } - - validate := func(sch *Schema, schPath string, v interface{}, vpath string) error { - vloc := vloc - if vpath != "" { - vloc += "/" + vpath - } - _, err := sch.validate(scope, 0, schPath, v, vloc) - return err - } - - validateInplace := func(sch *Schema, schPath string) error { - vr, err := sch.validate(scope, vscope, schPath, v, vloc) - if err == nil { - // update result - for pname := range result.unevalProps { - if _, ok := vr.unevalProps[pname]; !ok { - delete(result.unevalProps, pname) - } - } - for i := range result.unevalItems { - if _, ok := vr.unevalItems[i]; !ok { - delete(result.unevalItems, i) - } - } - } - return err - } - - if s.Always != nil { - if !*s.Always { - return result, validationError("", "not allowed") - } - return result, nil - } - - if len(s.Types) > 0 { - vType := jsonType(v) - matched := false - for _, t := range s.Types { - if vType == t { - matched = true - break - } else if t == "integer" && vType == "number" { - num, _ := new(big.Rat).SetString(fmt.Sprint(v)) - if num.IsInt() { - matched = true - break - } - } - } - if !matched { - return result, validationError("type", "expected %s, but got %s", strings.Join(s.Types, " or "), vType) - } - } - - var errors []error - - if len(s.Constant) > 0 { - if !equals(v, s.Constant[0]) { - switch jsonType(s.Constant[0]) { - case "object", "array": - errors = append(errors, validationError("const", "const failed")) - default: - errors = append(errors, validationError("const", "value must be %#v", s.Constant[0])) - } - } - } - - if len(s.Enum) > 0 { - matched := false - for _, item := range s.Enum { - if equals(v, item) { - matched = true - break - } - } - if !matched { - errors = append(errors, validationError("enum", s.enumError)) - } - } - - if s.format != nil && !s.format(v) { - var val = v - if v, ok := v.(string); ok { - val = quote(v) - } - errors = append(errors, validationError("format", "%v is not valid %s", val, quote(s.Format))) - } - - switch v := v.(type) { - case map[string]interface{}: - if s.MinProperties != -1 && len(v) < s.MinProperties { - errors = append(errors, validationError("minProperties", "minimum %d properties allowed, but found %d properties", s.MinProperties, len(v))) - } - if s.MaxProperties != -1 && len(v) > s.MaxProperties { - errors = append(errors, validationError("maxProperties", "maximum %d properties allowed, but found %d properties", s.MaxProperties, len(v))) - } - if len(s.Required) > 0 { - var missing []string - for _, pname := range s.Required { - if _, ok := v[pname]; !ok { - missing = append(missing, quote(pname)) - } - } - if len(missing) > 0 { - errors = append(errors, validationError("required", "missing properties: %s", strings.Join(missing, ", "))) - } - } - - for pname, sch := range s.Properties { - if pvalue, ok := v[pname]; ok { - delete(result.unevalProps, pname) - if err := validate(sch, "properties/"+escape(pname), pvalue, escape(pname)); err != nil { - errors = append(errors, err) - } - } - } - - if s.PropertyNames != nil { - for pname := range v { - if err := validate(s.PropertyNames, "propertyNames", pname, escape(pname)); err != nil { - errors = append(errors, err) - } - } - } - - if s.RegexProperties { - for pname := range v { - if !isRegex(pname) { - errors = append(errors, validationError("", "patternProperty %s is not valid regex", quote(pname))) - } - } - } - for pattern, sch := range s.PatternProperties { - for pname, pvalue := range v { - if pattern.MatchString(pname) { - delete(result.unevalProps, pname) - if err := validate(sch, "patternProperties/"+escape(pattern.String()), pvalue, escape(pname)); err != nil { - errors = append(errors, err) - } - } - } - } - if s.AdditionalProperties != nil { - if allowed, ok := s.AdditionalProperties.(bool); ok { - if !allowed && len(result.unevalProps) > 0 { - errors = append(errors, validationError("additionalProperties", "additionalProperties %s not allowed", result.unevalPnames())) - } - } else { - schema := s.AdditionalProperties.(*Schema) - for pname := range result.unevalProps { - if pvalue, ok := v[pname]; ok { - if err := validate(schema, "additionalProperties", pvalue, escape(pname)); err != nil { - errors = append(errors, err) - } - } - } - } - result.unevalProps = nil - } - for dname, dvalue := range s.Dependencies { - if _, ok := v[dname]; ok { - switch dvalue := dvalue.(type) { - case *Schema: - if err := validateInplace(dvalue, "dependencies/"+escape(dname)); err != nil { - errors = append(errors, err) - } - case []string: - for i, pname := range dvalue { - if _, ok := v[pname]; !ok { - errors = append(errors, validationError("dependencies/"+escape(dname)+"/"+strconv.Itoa(i), "property %s is required, if %s property exists", quote(pname), quote(dname))) - } - } - } - } - } - for dname, dvalue := range s.DependentRequired { - if _, ok := v[dname]; ok { - for i, pname := range dvalue { - if _, ok := v[pname]; !ok { - errors = append(errors, validationError("dependentRequired/"+escape(dname)+"/"+strconv.Itoa(i), "property %s is required, if %s property exists", quote(pname), quote(dname))) - } - } - } - } - for dname, sch := range s.DependentSchemas { - if _, ok := v[dname]; ok { - if err := validateInplace(sch, "dependentSchemas/"+escape(dname)); err != nil { - errors = append(errors, err) - } - } - } - - case []interface{}: - if s.MinItems != -1 && len(v) < s.MinItems { - errors = append(errors, validationError("minItems", "minimum %d items required, but found %d items", s.MinItems, len(v))) - } - if s.MaxItems != -1 && len(v) > s.MaxItems { - errors = append(errors, validationError("maxItems", "maximum %d items required, but found %d items", s.MaxItems, len(v))) - } - if s.UniqueItems { - if len(v) <= 20 { - outer1: - for i := 1; i < len(v); i++ { - for j := 0; j < i; j++ { - if equals(v[i], v[j]) { - errors = append(errors, validationError("uniqueItems", "items at index %d and %d are equal", j, i)) - break outer1 - } - } - } - } else { - m := make(map[uint64][]int) - var h maphash.Hash - outer2: - for i, item := range v { - h.Reset() - hash(item, &h) - k := h.Sum64() - if err != nil { - panic(err) - } - arr, ok := m[k] - if ok { - for _, j := range arr { - if equals(v[j], item) { - errors = append(errors, validationError("uniqueItems", "items at index %d and %d are equal", j, i)) - break outer2 - } - } - } - arr = append(arr, i) - m[k] = arr - } - } - } - - // items + additionalItems - switch items := s.Items.(type) { - case *Schema: - for i, item := range v { - if err := validate(items, "items", item, strconv.Itoa(i)); err != nil { - errors = append(errors, err) - } - } - result.unevalItems = nil - case []*Schema: - for i, item := range v { - if i < len(items) { - delete(result.unevalItems, i) - if err := validate(items[i], "items/"+strconv.Itoa(i), item, strconv.Itoa(i)); err != nil { - errors = append(errors, err) - } - } else if sch, ok := s.AdditionalItems.(*Schema); ok { - delete(result.unevalItems, i) - if err := validate(sch, "additionalItems", item, strconv.Itoa(i)); err != nil { - errors = append(errors, err) - } - } else { - break - } - } - if additionalItems, ok := s.AdditionalItems.(bool); ok { - if additionalItems { - result.unevalItems = nil - } else if len(v) > len(items) { - errors = append(errors, validationError("additionalItems", "only %d items are allowed, but found %d items", len(items), len(v))) - } - } - } - - // prefixItems + items - for i, item := range v { - if i < len(s.PrefixItems) { - delete(result.unevalItems, i) - if err := validate(s.PrefixItems[i], "prefixItems/"+strconv.Itoa(i), item, strconv.Itoa(i)); err != nil { - errors = append(errors, err) - } - } else if s.Items2020 != nil { - delete(result.unevalItems, i) - if err := validate(s.Items2020, "items", item, strconv.Itoa(i)); err != nil { - errors = append(errors, err) - } - } else { - break - } - } - - // contains + minContains + maxContains - if s.Contains != nil && (s.MinContains != -1 || s.MaxContains != -1) { - matched := 0 - var causes []error - for i, item := range v { - if err := validate(s.Contains, "contains", item, strconv.Itoa(i)); err != nil { - causes = append(causes, err) - } else { - matched++ - if s.ContainsEval { - delete(result.unevalItems, i) - } - } - } - if s.MinContains != -1 && matched < s.MinContains { - errors = append(errors, validationError("minContains", "valid must be >= %d, but got %d", s.MinContains, matched).add(causes...)) - } - if s.MaxContains != -1 && matched > s.MaxContains { - errors = append(errors, validationError("maxContains", "valid must be <= %d, but got %d", s.MaxContains, matched)) - } - } - - case string: - // minLength + maxLength - if s.MinLength != -1 || s.MaxLength != -1 { - length := utf8.RuneCount([]byte(v)) - if s.MinLength != -1 && length < s.MinLength { - errors = append(errors, validationError("minLength", "length must be >= %d, but got %d", s.MinLength, length)) - } - if s.MaxLength != -1 && length > s.MaxLength { - errors = append(errors, validationError("maxLength", "length must be <= %d, but got %d", s.MaxLength, length)) - } - } - - if s.Pattern != nil && !s.Pattern.MatchString(v) { - errors = append(errors, validationError("pattern", "does not match pattern %s", quote(s.Pattern.String()))) - } - - // contentEncoding + contentMediaType - if s.decoder != nil || s.mediaType != nil { - decoded := s.ContentEncoding == "" - var content []byte - if s.decoder != nil { - b, err := s.decoder(v) - if err != nil { - errors = append(errors, validationError("contentEncoding", "value is not %s encoded", s.ContentEncoding)) - } else { - content, decoded = b, true - } - } - if decoded && s.mediaType != nil { - if s.decoder == nil { - content = []byte(v) - } - if err := s.mediaType(content); err != nil { - errors = append(errors, validationError("contentMediaType", "value is not of mediatype %s", quote(s.ContentMediaType))) - } - } - if decoded && s.ContentSchema != nil { - contentJSON, err := unmarshal(bytes.NewReader(content)) - if err != nil { - errors = append(errors, validationError("contentSchema", "value is not valid json")) - } else { - err := validate(s.ContentSchema, "contentSchema", contentJSON, "") - if err != nil { - errors = append(errors, err) - } - } - } - } - - case json.Number, float32, float64, int, int8, int32, int64, uint, uint8, uint32, uint64: - // lazy convert to *big.Rat to avoid allocation - var numVal *big.Rat - num := func() *big.Rat { - if numVal == nil { - numVal, _ = new(big.Rat).SetString(fmt.Sprint(v)) - } - return numVal - } - f64 := func(r *big.Rat) float64 { - f, _ := r.Float64() - return f - } - if s.Minimum != nil && num().Cmp(s.Minimum) < 0 { - errors = append(errors, validationError("minimum", "must be >= %v but found %v", f64(s.Minimum), v)) - } - if s.ExclusiveMinimum != nil && num().Cmp(s.ExclusiveMinimum) <= 0 { - errors = append(errors, validationError("exclusiveMinimum", "must be > %v but found %v", f64(s.ExclusiveMinimum), v)) - } - if s.Maximum != nil && num().Cmp(s.Maximum) > 0 { - errors = append(errors, validationError("maximum", "must be <= %v but found %v", f64(s.Maximum), v)) - } - if s.ExclusiveMaximum != nil && num().Cmp(s.ExclusiveMaximum) >= 0 { - errors = append(errors, validationError("exclusiveMaximum", "must be < %v but found %v", f64(s.ExclusiveMaximum), v)) - } - if s.MultipleOf != nil { - if q := new(big.Rat).Quo(num(), s.MultipleOf); !q.IsInt() { - errors = append(errors, validationError("multipleOf", "%v not multipleOf %v", v, f64(s.MultipleOf))) - } - } - } - - // $ref + $recursiveRef + $dynamicRef - validateRef := func(sch *Schema, refPath string) error { - if sch != nil { - if err := validateInplace(sch, refPath); err != nil { - var url = sch.Location - if s.url() == sch.url() { - url = sch.loc() - } - return validationError(refPath, "doesn't validate with %s", quote(url)).causes(err) - } - } - return nil - } - if err := validateRef(s.Ref, "$ref"); err != nil { - errors = append(errors, err) - } - if s.RecursiveRef != nil { - sch := s.RecursiveRef - if sch.RecursiveAnchor { - // recursiveRef based on scope - for _, e := range scope { - if e.schema.RecursiveAnchor { - sch = e.schema - break - } - } - } - if err := validateRef(sch, "$recursiveRef"); err != nil { - errors = append(errors, err) - } - } - if s.DynamicRef != nil { - sch := s.DynamicRef - if s.dynamicRefAnchor != "" && sch.DynamicAnchor == s.dynamicRefAnchor { - // dynamicRef based on scope - for i := len(scope) - 1; i >= 0; i-- { - sr := scope[i] - if sr.discard { - break - } - for _, da := range sr.schema.dynamicAnchors { - if da.DynamicAnchor == s.DynamicRef.DynamicAnchor && da != s.DynamicRef { - sch = da - break - } - } - } - } - if err := validateRef(sch, "$dynamicRef"); err != nil { - errors = append(errors, err) - } - } - - if s.Not != nil && validateInplace(s.Not, "not") == nil { - errors = append(errors, validationError("not", "not failed")) - } - - for i, sch := range s.AllOf { - schPath := "allOf/" + strconv.Itoa(i) - if err := validateInplace(sch, schPath); err != nil { - errors = append(errors, validationError(schPath, "allOf failed").add(err)) - } - } - - if len(s.AnyOf) > 0 { - matched := false - var causes []error - for i, sch := range s.AnyOf { - if err := validateInplace(sch, "anyOf/"+strconv.Itoa(i)); err == nil { - matched = true - } else { - causes = append(causes, err) - } - } - if !matched { - errors = append(errors, validationError("anyOf", "anyOf failed").add(causes...)) - } - } - - if len(s.OneOf) > 0 { - matched := -1 - var causes []error - for i, sch := range s.OneOf { - if err := validateInplace(sch, "oneOf/"+strconv.Itoa(i)); err == nil { - if matched == -1 { - matched = i - } else { - errors = append(errors, validationError("oneOf", "valid against schemas at indexes %d and %d", matched, i)) - break - } - } else { - causes = append(causes, err) - } - } - if matched == -1 { - errors = append(errors, validationError("oneOf", "oneOf failed").add(causes...)) - } - } - - // if + then + else - if s.If != nil { - err := validateInplace(s.If, "if") - // "if" leaves dynamic scope - scope[len(scope)-1].discard = true - if err == nil { - if s.Then != nil { - if err := validateInplace(s.Then, "then"); err != nil { - errors = append(errors, validationError("then", "if-then failed").add(err)) - } - } - } else { - if s.Else != nil { - if err := validateInplace(s.Else, "else"); err != nil { - errors = append(errors, validationError("else", "if-else failed").add(err)) - } - } - } - // restore dynamic scope - scope[len(scope)-1].discard = false - } - - for _, ext := range s.Extensions { - if err := ext.Validate(ValidationContext{result, validate, validateInplace, validationError}, v); err != nil { - errors = append(errors, err) - } - } - - // unevaluatedProperties + unevaluatedItems - switch v := v.(type) { - case map[string]interface{}: - if s.UnevaluatedProperties != nil { - for pname := range result.unevalProps { - if pvalue, ok := v[pname]; ok { - if err := validate(s.UnevaluatedProperties, "unevaluatedProperties", pvalue, escape(pname)); err != nil { - errors = append(errors, err) - } - } - } - result.unevalProps = nil - } - case []interface{}: - if s.UnevaluatedItems != nil { - for i := range result.unevalItems { - if err := validate(s.UnevaluatedItems, "unevaluatedItems", v[i], strconv.Itoa(i)); err != nil { - errors = append(errors, err) - } - } - result.unevalItems = nil - } - } - - switch len(errors) { - case 0: - return result, nil - case 1: - return result, errors[0] - default: - return result, validationError("", "").add(errors...) // empty message, used just for wrapping - } -} - -type validationResult struct { - unevalProps map[string]struct{} - unevalItems map[int]struct{} -} - -func (vr validationResult) unevalPnames() string { - pnames := make([]string, 0, len(vr.unevalProps)) - for pname := range vr.unevalProps { - pnames = append(pnames, quote(pname)) - } - return strings.Join(pnames, ", ") -} - -// jsonType returns the json type of given value v. -// -// It panics if the given value is not valid json value -func jsonType(v interface{}) string { - switch v.(type) { - case nil: - return "null" - case bool: - return "boolean" - case json.Number, float32, float64, int, int8, int32, int64, uint, uint8, uint32, uint64: - return "number" - case string: - return "string" - case []interface{}: - return "array" - case map[string]interface{}: - return "object" - } - panic(InvalidJSONTypeError(fmt.Sprintf("%T", v))) -} - -// equals tells if given two json values are equal or not. -func equals(v1, v2 interface{}) bool { - v1Type := jsonType(v1) - if v1Type != jsonType(v2) { - return false - } - switch v1Type { - case "array": - arr1, arr2 := v1.([]interface{}), v2.([]interface{}) - if len(arr1) != len(arr2) { - return false - } - for i := range arr1 { - if !equals(arr1[i], arr2[i]) { - return false - } - } - return true - case "object": - obj1, obj2 := v1.(map[string]interface{}), v2.(map[string]interface{}) - if len(obj1) != len(obj2) { - return false - } - for k, v1 := range obj1 { - if v2, ok := obj2[k]; ok { - if !equals(v1, v2) { - return false - } - } else { - return false - } - } - return true - case "number": - num1, _ := new(big.Rat).SetString(fmt.Sprint(v1)) - num2, _ := new(big.Rat).SetString(fmt.Sprint(v2)) - return num1.Cmp(num2) == 0 - default: - return v1 == v2 - } -} - -func hash(v interface{}, h *maphash.Hash) { - switch v := v.(type) { - case nil: - h.WriteByte(0) - case bool: - h.WriteByte(1) - if v { - h.WriteByte(1) - } else { - h.WriteByte(0) - } - case json.Number, float32, float64, int, int8, int32, int64, uint, uint8, uint32, uint64: - h.WriteByte(2) - num, _ := new(big.Rat).SetString(fmt.Sprint(v)) - h.Write(num.Num().Bytes()) - h.Write(num.Denom().Bytes()) - case string: - h.WriteByte(3) - h.WriteString(v) - case []interface{}: - h.WriteByte(4) - for _, item := range v { - hash(item, h) - } - case map[string]interface{}: - h.WriteByte(5) - props := make([]string, 0, len(v)) - for prop := range v { - props = append(props, prop) - } - sort.Slice(props, func(i, j int) bool { - return props[i] < props[j] - }) - for _, prop := range props { - hash(prop, h) - hash(v[prop], h) - } - default: - panic(InvalidJSONTypeError(fmt.Sprintf("%T", v))) - } -} - -// escape converts given token to valid json-pointer token -func escape(token string) string { - token = strings.ReplaceAll(token, "~", "~0") - token = strings.ReplaceAll(token, "/", "~1") - return url.PathEscape(token) -} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitmodules b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/.gitmodules similarity index 91% rename from hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitmodules rename to hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/.gitmodules index 314da31c5e..d14f5ea70f 100644 --- a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/.gitmodules +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/.gitmodules @@ -1,3 +1,4 @@ [submodule "testdata/JSON-Schema-Test-Suite"] path = testdata/JSON-Schema-Test-Suite url = https://github.com/json-schema-org/JSON-Schema-Test-Suite.git + branch = main diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/.golangci.yml b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/.golangci.yml new file mode 100644 index 0000000000..b3cd1749a3 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/.golangci.yml @@ -0,0 +1,5 @@ +linters: + enable: + - nakedret + - errname + - godot diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/.pre-commit-hooks.yaml b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/.pre-commit-hooks.yaml new file mode 100644 index 0000000000..695b502ed9 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/.pre-commit-hooks.yaml @@ -0,0 +1,7 @@ +- id: jsonschema-validate + name: Validate JSON against JSON Schema + description: ensure json files follow specified JSON Schema + entry: jv + language: golang + additional_dependencies: + - ./cmd/jv diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/LICENSE b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/LICENSE similarity index 100% rename from hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v5/LICENSE rename to hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/LICENSE diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/README.md b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/README.md new file mode 100644 index 0000000000..0831d7f580 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/README.md @@ -0,0 +1,86 @@ +# jsonschema v6.0.0 + +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) +[![GoDoc](https://godoc.org/github.com/santhosh-tekuri/jsonschema?status.svg)](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v6) +[![Go Report Card](https://goreportcard.com/badge/github.com/santhosh-tekuri/jsonschema/v6)](https://goreportcard.com/report/github.com/santhosh-tekuri/jsonschema/v6) +[![Build Status](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml/badge.svg?branch=boon)](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml) +[![codecov](https://codecov.io/gh/santhosh-tekuri/jsonschema/branch/boon/graph/badge.svg?token=JMVj1pFT2l)](https://codecov.io/gh/santhosh-tekuri/jsonschema/tree/boon) + +see [godoc](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v6) for examples + +## Library Features + +- [x] pass [JSON-Schema-Test-Suite](https://github.com/json-schema-org/JSON-Schema-Test-Suite) excluding optional(compare with other impls at [bowtie](https://bowtie-json-schema.github.io/bowtie/#)) + - [x] [![draft-04](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft4.json)](https://bowtie.report/#/dialects/draft4) + - [x] [![draft-06](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft6.json)](https://bowtie.report/#/dialects/draft6) + - [x] [![draft-07](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft7.json)](https://bowtie.report/#/dialects/draft7) + - [x] [![draft/2019-09](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft2019-09.json)](https://bowtie.report/#/dialects/draft2019-09) + - [x] [![draft/2020-12](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft2020-12.json)](https://bowtie.report/#/dialects/draft2020-12) +- [x] detect infinite loop traps + - [x] `$schema` cycle + - [x] validation cycle +- [x] custom `$schema` url +- [x] vocabulary based validation +- [x] custom regex engine +- [x] format assertions + - [x] flag to enable in draft >= 2019-09 + - [x] custom format registration + - [x] built-in formats + - [x] regex, uuid + - [x] ipv4, ipv6 + - [x] hostname, email + - [x] date, time, date-time, duration + - [x] json-pointer, relative-json-pointer + - [x] uri, uri-reference, uri-template + - [x] iri, iri-reference + - [x] period, semver +- [x] content assertions + - [x] flag to enable in draft >= 7 + - [x] contentEncoding + - [x] base64 + - [x] custom + - [x] contentMediaType + - [x] application/json + - [x] custom + - [x] contentSchema +- [x] errors + - [x] introspectable + - [x] hierarchy + - [x] alternative display with `#` + - [x] output + - [x] flag + - [x] basic + - [x] detailed +- [x] custom vocabulary + - enable via `$vocabulary` for draft >=2019-19 + - enable via flag for draft <= 7 +- [x] mixed dialect support + +## CLI + +to install: `go install github.com/santhosh-tekuri/jsonschema/cmd/jv@latest` + +``` +Usage: jv [OPTIONS] SCHEMA [INSTANCE...] + +Options: + -c, --assert-content Enable content assertions with draft >= 7 + -f, --assert-format Enable format assertions with draft >= 2019 + --cacert pem-file Use the specified pem-file to verify the peer. The file may contain multiple CA certificates + -d, --draft version Draft version used when '$schema' is missing. Valid values 4, 6, 7, 2019, 2020 (default 2020) + -h, --help Print help information + -k, --insecure Use insecure TLS connection + -o, --output format Output format. Valid values simple, alt, flag, basic, detailed (default "simple") + -q, --quiet Do not print errors + -v, --version Print build information +``` + +- [x] exit code `1` for validation erros, `2` for usage errors +- [x] validate both schema and multiple instances +- [x] support both json and yaml files +- [x] support standard input, use `-` +- [x] quite mode with parsable output +- [x] http(s) url support + - [x] custom certs for validation, use `--cacert` + - [x] flag to skip certificate verification, use `--insecure` + diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/compiler.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/compiler.go new file mode 100644 index 0000000000..4da7361038 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/compiler.go @@ -0,0 +1,332 @@ +package jsonschema + +import ( + "fmt" + "regexp" + "slices" +) + +// Compiler compiles json schema into *Schema. +type Compiler struct { + schemas map[urlPtr]*Schema + roots *roots + formats map[string]*Format + decoders map[string]*Decoder + mediaTypes map[string]*MediaType + assertFormat bool + assertContent bool +} + +// NewCompiler create Compiler Object. +func NewCompiler() *Compiler { + return &Compiler{ + schemas: map[urlPtr]*Schema{}, + roots: newRoots(), + formats: map[string]*Format{}, + decoders: map[string]*Decoder{}, + mediaTypes: map[string]*MediaType{}, + assertFormat: false, + assertContent: false, + } +} + +// DefaultDraft overrides the draft used to +// compile schemas without `$schema` field. +// +// By default, this library uses the latest +// draft supported. +// +// The use of this option is HIGHLY encouraged +// to ensure continued correct operation of your +// schema. The current default value will not stay +// the same overtime. +func (c *Compiler) DefaultDraft(d *Draft) { + c.roots.defaultDraft = d +} + +// AssertFormat always enables format assertions. +// +// Default Behavior: +// for draft-07: enabled. +// for draft/2019-09: disabled unless metaschema says `format` vocabulary is required. +// for draft/2020-12: disabled unless metaschema says `format-assertion` vocabulary is required. +func (c *Compiler) AssertFormat() { + c.assertFormat = true +} + +// AssertContent enables content assertions. +// +// Content assertions include keywords: +// - contentEncoding +// - contentMediaType +// - contentSchema +// +// Default behavior is always disabled. +func (c *Compiler) AssertContent() { + c.assertContent = true +} + +// RegisterFormat registers custom format. +// +// NOTE: +// - "regex" format can not be overridden +// - format assertions are disabled for draft >= 2019-09 +// see [Compiler.AssertFormat] +func (c *Compiler) RegisterFormat(f *Format) { + if f.Name != "regex" { + c.formats[f.Name] = f + } +} + +// RegisterContentEncoding registers custom contentEncoding. +// +// NOTE: content assertions are disabled by default. +// see [Compiler.AssertContent]. +func (c *Compiler) RegisterContentEncoding(d *Decoder) { + c.decoders[d.Name] = d +} + +// RegisterContentMediaType registers custom contentMediaType. +// +// NOTE: content assertions are disabled by default. +// see [Compiler.AssertContent]. +func (c *Compiler) RegisterContentMediaType(mt *MediaType) { + c.mediaTypes[mt.Name] = mt +} + +// RegisterVocabulary registers custom vocabulary. +// +// NOTE: +// - vocabularies are disabled for draft >= 2019-09 +// see [Compiler.AssertVocabs] +func (c *Compiler) RegisterVocabulary(vocab *Vocabulary) { + c.roots.vocabularies[vocab.URL] = vocab +} + +// AssertVocabs always enables user-defined vocabularies assertions. +// +// Default Behavior: +// for draft-07: enabled. +// for draft/2019-09: disabled unless metaschema enables a vocabulary. +// for draft/2020-12: disabled unless metaschema enables a vocabulary. +func (c *Compiler) AssertVocabs() { + c.roots.assertVocabs = true +} + +// AddResource adds schema resource which gets used later in reference +// resolution. +// +// The argument url can be file path or url. Any fragment in url is ignored. +// The argument doc must be valid json value. +func (c *Compiler) AddResource(url string, doc any) error { + uf, err := absolute(url) + if err != nil { + return err + } + if isMeta(string(uf.url)) { + return &ResourceExistsError{string(uf.url)} + } + if !c.roots.loader.add(uf.url, doc) { + return &ResourceExistsError{string(uf.url)} + } + return nil +} + +// UseLoader overrides the default [URLLoader] used +// to load schema resources. +func (c *Compiler) UseLoader(loader URLLoader) { + c.roots.loader.loader = loader +} + +// UseRegexpEngine changes the regexp-engine used. +// By default it uses regexp package from go standard +// library. +// +// NOTE: must be called before compiling any schemas. +func (c *Compiler) UseRegexpEngine(engine RegexpEngine) { + if engine == nil { + engine = goRegexpCompile + } + c.roots.regexpEngine = engine +} + +func (c *Compiler) enqueue(q *queue, up urlPtr) *Schema { + if sch, ok := c.schemas[up]; ok { + // already got compiled + return sch + } + if sch := q.get(up); sch != nil { + return sch + } + sch := newSchema(up) + q.append(sch) + return sch +} + +// MustCompile is like [Compile] but panics if compilation fails. +// It simplifies safe initialization of global variables holding +// compiled schema. +func (c *Compiler) MustCompile(loc string) *Schema { + sch, err := c.Compile(loc) + if err != nil { + panic(fmt.Sprintf("jsonschema: Compile(%q): %v", loc, err)) + } + return sch +} + +// Compile compiles json-schema at given loc. +func (c *Compiler) Compile(loc string) (*Schema, error) { + uf, err := absolute(loc) + if err != nil { + return nil, err + } + up, err := c.roots.resolveFragment(*uf) + if err != nil { + return nil, err + } + return c.doCompile(up) +} + +func (c *Compiler) doCompile(up urlPtr) (*Schema, error) { + q := &queue{} + compiled := 0 + + c.enqueue(q, up) + for q.len() > compiled { + sch := q.at(compiled) + if err := c.roots.ensureSubschema(sch.up); err != nil { + return nil, err + } + r := c.roots.roots[sch.up.url] + v, err := sch.up.lookup(r.doc) + if err != nil { + return nil, err + } + if err := c.compileValue(v, sch, r, q); err != nil { + return nil, err + } + compiled++ + } + for _, sch := range *q { + c.schemas[sch.up] = sch + } + return c.schemas[up], nil +} + +func (c *Compiler) compileValue(v any, sch *Schema, r *root, q *queue) error { + res := r.resource(sch.up.ptr) + sch.DraftVersion = res.dialect.draft.version + + base := urlPtr{sch.up.url, res.ptr} + sch.resource = c.enqueue(q, base) + + // if resource, enqueue dynamic anchors for compilation + if sch.DraftVersion >= 2020 && sch.up == sch.resource.up { + res := r.resource(sch.up.ptr) + for anchor, anchorPtr := range res.anchors { + if slices.Contains(res.dynamicAnchors, anchor) { + up := urlPtr{sch.up.url, anchorPtr} + danchorSch := c.enqueue(q, up) + if sch.dynamicAnchors == nil { + sch.dynamicAnchors = map[string]*Schema{} + } + sch.dynamicAnchors[string(anchor)] = danchorSch + } + } + } + + switch v := v.(type) { + case bool: + sch.Bool = &v + case map[string]any: + if err := c.compileObject(v, sch, r, q); err != nil { + return err + } + } + + sch.allPropsEvaluated = sch.AdditionalProperties != nil + if sch.DraftVersion < 2020 { + sch.allItemsEvaluated = sch.AdditionalItems != nil + switch items := sch.Items.(type) { + case *Schema: + sch.allItemsEvaluated = true + case []*Schema: + sch.numItemsEvaluated = len(items) + } + } else { + sch.allItemsEvaluated = sch.Items2020 != nil + sch.numItemsEvaluated = len(sch.PrefixItems) + } + + return nil +} + +func (c *Compiler) compileObject(obj map[string]any, sch *Schema, r *root, q *queue) error { + if len(obj) == 0 { + b := true + sch.Bool = &b + return nil + } + oc := objCompiler{ + c: c, + obj: obj, + up: sch.up, + r: r, + res: r.resource(sch.up.ptr), + q: q, + } + return oc.compile(sch) +} + +// queue -- + +type queue []*Schema + +func (q *queue) append(sch *Schema) { + *q = append(*q, sch) +} + +func (q *queue) at(i int) *Schema { + return (*q)[i] +} + +func (q *queue) len() int { + return len(*q) +} + +func (q *queue) get(up urlPtr) *Schema { + i := slices.IndexFunc(*q, func(sch *Schema) bool { return sch.up == up }) + if i != -1 { + return (*q)[i] + } + return nil +} + +// regexp -- + +// Regexp is the representation of compiled regular expression. +type Regexp interface { + fmt.Stringer + + // MatchString reports whether the string s contains + // any match of the regular expression. + MatchString(string) bool +} + +// RegexpEngine parses a regular expression and returns, +// if successful, a Regexp object that can be used to +// match against text. +type RegexpEngine func(string) (Regexp, error) + +func (re RegexpEngine) validate(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + _, err := re(s) + return err +} + +func goRegexpCompile(s string) (Regexp, error) { + return regexp.Compile(s) +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/content.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/content.go new file mode 100644 index 0000000000..8d62e58b09 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/content.go @@ -0,0 +1,51 @@ +package jsonschema + +import ( + "bytes" + "encoding/base64" + "encoding/json" +) + +// Decoder specifies how to decode specific contentEncoding. +type Decoder struct { + // Name of contentEncoding. + Name string + // Decode given string to byte array. + Decode func(string) ([]byte, error) +} + +var decoders = map[string]*Decoder{ + "base64": { + Name: "base64", + Decode: func(s string) ([]byte, error) { + return base64.StdEncoding.DecodeString(s) + }, + }, +} + +// MediaType specified how to validate bytes against specific contentMediaType. +type MediaType struct { + // Name of contentMediaType. + Name string + + // Validate checks whether bytes conform to this mediatype. + Validate func([]byte) error + + // UnmarshalJSON unmarshals bytes into json value. + // This must be nil if this mediatype is not compatible + // with json. + UnmarshalJSON func([]byte) (any, error) +} + +var mediaTypes = map[string]*MediaType{ + "application/json": { + Name: "application/json", + Validate: func(b []byte) error { + var v any + return json.Unmarshal(b, &v) + }, + UnmarshalJSON: func(b []byte) (any, error) { + return UnmarshalJSON(bytes.NewReader(b)) + }, + }, +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/draft.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/draft.go new file mode 100644 index 0000000000..fd09bae8d3 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/draft.go @@ -0,0 +1,360 @@ +package jsonschema + +import ( + "fmt" + "slices" + "strings" +) + +// A Draft represents json-schema specification. +type Draft struct { + version int + url string + sch *Schema + id string // property name used to represent id + subschemas []SchemaPath // locations of subschemas + vocabPrefix string // prefix used for vocabulary + allVocabs map[string]*Schema // names of supported vocabs with its schemas + defaultVocabs []string // names of default vocabs +} + +// String returns the specification url. +func (d *Draft) String() string { + return d.url +} + +var ( + Draft4 = &Draft{ + version: 4, + url: "http://json-schema.org/draft-04/schema", + id: "id", + subschemas: []SchemaPath{ + // type agonistic + schemaPath("definitions/*"), + schemaPath("not"), + schemaPath("allOf/[]"), + schemaPath("anyOf/[]"), + schemaPath("oneOf/[]"), + // object + schemaPath("properties/*"), + schemaPath("additionalProperties"), + schemaPath("patternProperties/*"), + // array + schemaPath("items"), + schemaPath("items/[]"), + schemaPath("additionalItems"), + schemaPath("dependencies/*"), + }, + vocabPrefix: "", + allVocabs: map[string]*Schema{}, + defaultVocabs: []string{}, + } + + Draft6 = &Draft{ + version: 6, + url: "http://json-schema.org/draft-06/schema", + id: "$id", + subschemas: joinSubschemas(Draft4.subschemas, + schemaPath("propertyNames"), + schemaPath("contains"), + ), + vocabPrefix: "", + allVocabs: map[string]*Schema{}, + defaultVocabs: []string{}, + } + + Draft7 = &Draft{ + version: 7, + url: "http://json-schema.org/draft-07/schema", + id: "$id", + subschemas: joinSubschemas(Draft6.subschemas, + schemaPath("if"), + schemaPath("then"), + schemaPath("else"), + ), + vocabPrefix: "", + allVocabs: map[string]*Schema{}, + defaultVocabs: []string{}, + } + + Draft2019 = &Draft{ + version: 2019, + url: "https://json-schema.org/draft/2019-09/schema", + id: "$id", + subschemas: joinSubschemas(Draft7.subschemas, + schemaPath("$defs/*"), + schemaPath("dependentSchemas/*"), + schemaPath("unevaluatedProperties"), + schemaPath("unevaluatedItems"), + schemaPath("contentSchema"), + ), + vocabPrefix: "https://json-schema.org/draft/2019-09/vocab/", + allVocabs: map[string]*Schema{ + "core": nil, + "applicator": nil, + "validation": nil, + "meta-data": nil, + "format": nil, + "content": nil, + }, + defaultVocabs: []string{"core", "applicator", "validation"}, + } + + Draft2020 = &Draft{ + version: 2020, + url: "https://json-schema.org/draft/2020-12/schema", + id: "$id", + subschemas: joinSubschemas(Draft2019.subschemas, + schemaPath("prefixItems/[]"), + ), + vocabPrefix: "https://json-schema.org/draft/2020-12/vocab/", + allVocabs: map[string]*Schema{ + "core": nil, + "applicator": nil, + "unevaluated": nil, + "validation": nil, + "meta-data": nil, + "format-annotation": nil, + "format-assertion": nil, + "content": nil, + }, + defaultVocabs: []string{"core", "applicator", "unevaluated", "validation"}, + } + + draftLatest = Draft2020 +) + +func init() { + c := NewCompiler() + c.AssertFormat() + for _, d := range []*Draft{Draft4, Draft6, Draft7, Draft2019, Draft2020} { + d.sch = c.MustCompile(d.url) + for name := range d.allVocabs { + d.allVocabs[name] = c.MustCompile(strings.TrimSuffix(d.url, "schema") + "meta/" + name) + } + } +} + +func draftFromURL(url string) *Draft { + u, frag := split(url) + if frag != "" { + return nil + } + u, ok := strings.CutPrefix(u, "http://") + if !ok { + u, _ = strings.CutPrefix(u, "https://") + } + switch u { + case "json-schema.org/schema": + return draftLatest + case "json-schema.org/draft/2020-12/schema": + return Draft2020 + case "json-schema.org/draft/2019-09/schema": + return Draft2019 + case "json-schema.org/draft-07/schema": + return Draft7 + case "json-schema.org/draft-06/schema": + return Draft6 + case "json-schema.org/draft-04/schema": + return Draft4 + default: + return nil + } +} + +func (d *Draft) getID(obj map[string]any) string { + if d.version < 2019 { + if _, ok := obj["$ref"]; ok { + // All other properties in a "$ref" object MUST be ignored + return "" + } + } + + id, ok := strVal(obj, d.id) + if !ok { + return "" + } + id, _ = split(id) // ignore fragment + return id +} + +func (d *Draft) getVocabs(url url, doc any, vocabularies map[string]*Vocabulary) ([]string, error) { + if d.version < 2019 { + return nil, nil + } + obj, ok := doc.(map[string]any) + if !ok { + return nil, nil + } + v, ok := obj["$vocabulary"] + if !ok { + return nil, nil + } + obj, ok = v.(map[string]any) + if !ok { + return nil, nil + } + + var vocabs []string + for vocab, reqd := range obj { + if reqd, ok := reqd.(bool); !ok || !reqd { + continue + } + name, ok := strings.CutPrefix(vocab, d.vocabPrefix) + if ok { + if _, ok := d.allVocabs[name]; ok { + if !slices.Contains(vocabs, name) { + vocabs = append(vocabs, name) + continue + } + } + } + if _, ok := vocabularies[vocab]; !ok { + return nil, &UnsupportedVocabularyError{url.String(), vocab} + } + if !slices.Contains(vocabs, vocab) { + vocabs = append(vocabs, vocab) + } + } + if !slices.Contains(vocabs, "core") { + vocabs = append(vocabs, "core") + } + return vocabs, nil +} + +// -- + +type dialect struct { + draft *Draft + vocabs []string // nil means use draft.defaultVocabs +} + +func (d *dialect) hasVocab(name string) bool { + if name == "core" || d.draft.version < 2019 { + return true + } + if d.vocabs != nil { + return slices.Contains(d.vocabs, name) + } + return slices.Contains(d.draft.defaultVocabs, name) +} + +func (d *dialect) activeVocabs(assertVocabs bool, vocabularies map[string]*Vocabulary) []string { + if len(vocabularies) == 0 { + return d.vocabs + } + if d.draft.version < 2019 { + assertVocabs = true + } + if !assertVocabs { + return d.vocabs + } + var vocabs []string + if d.vocabs == nil { + vocabs = slices.Clone(d.draft.defaultVocabs) + } else { + vocabs = slices.Clone(d.vocabs) + } + for vocab := range vocabularies { + if !slices.Contains(vocabs, vocab) { + vocabs = append(vocabs, vocab) + } + } + return vocabs +} + +func (d *dialect) getSchema(assertVocabs bool, vocabularies map[string]*Vocabulary) *Schema { + vocabs := d.activeVocabs(assertVocabs, vocabularies) + if vocabs == nil { + return d.draft.sch + } + + var allOf []*Schema + for _, vocab := range vocabs { + sch := d.draft.allVocabs[vocab] + if sch == nil { + if v, ok := vocabularies[vocab]; ok { + sch = v.Schema + } + } + if sch != nil { + allOf = append(allOf, sch) + } + } + if !slices.Contains(vocabs, "core") { + sch := d.draft.allVocabs["core"] + if sch == nil { + sch = d.draft.sch + } + allOf = append(allOf, sch) + } + sch := &Schema{ + Location: "urn:mem:metaschema", + up: urlPtr{url("urn:mem:metaschema"), ""}, + DraftVersion: d.draft.version, + AllOf: allOf, + } + sch.resource = sch + if sch.DraftVersion >= 2020 { + sch.DynamicAnchor = "meta" + sch.dynamicAnchors = map[string]*Schema{ + "meta": sch, + } + } + return sch +} + +// -- + +type ParseIDError struct { + URL string +} + +func (e *ParseIDError) Error() string { + return fmt.Sprintf("error in parsing id at %q", e.URL) +} + +// -- + +type ParseAnchorError struct { + URL string +} + +func (e *ParseAnchorError) Error() string { + return fmt.Sprintf("error in parsing anchor at %q", e.URL) +} + +// -- + +type DuplicateIDError struct { + ID string + URL string + Ptr1 string + Ptr2 string +} + +func (e *DuplicateIDError) Error() string { + return fmt.Sprintf("duplicate id %q in %q at %q and %q", e.ID, e.URL, e.Ptr1, e.Ptr2) +} + +// -- + +type DuplicateAnchorError struct { + Anchor string + URL string + Ptr1 string + Ptr2 string +} + +func (e *DuplicateAnchorError) Error() string { + return fmt.Sprintf("duplicate anchor %q in %q at %q and %q", e.Anchor, e.URL, e.Ptr1, e.Ptr2) +} + +// -- + +func joinSubschemas(a1 []SchemaPath, a2 ...SchemaPath) []SchemaPath { + var a []SchemaPath + a = append(a, a1...) + a = append(a, a2...) + return a +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/format.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/format.go new file mode 100644 index 0000000000..b78b22e2a5 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/format.go @@ -0,0 +1,708 @@ +package jsonschema + +import ( + "net/netip" + gourl "net/url" + "strconv" + "strings" + "time" +) + +// Format defined specific format. +type Format struct { + // Name of format. + Name string + + // Validate checks if given value is of this format. + Validate func(v any) error +} + +var formats = map[string]*Format{ + "json-pointer": {"json-pointer", validateJSONPointer}, + "relative-json-pointer": {"relative-json-pointer", validateRelativeJSONPointer}, + "uuid": {"uuid", validateUUID}, + "duration": {"duration", validateDuration}, + "period": {"period", validatePeriod}, + "ipv4": {"ipv4", validateIPV4}, + "ipv6": {"ipv6", validateIPV6}, + "hostname": {"hostname", validateHostname}, + "email": {"email", validateEmail}, + "date": {"date", validateDate}, + "time": {"time", validateTime}, + "date-time": {"date-time", validateDateTime}, + "uri": {"uri", validateURI}, + "iri": {"iri", validateURI}, + "uri-reference": {"uri-reference", validateURIReference}, + "iri-reference": {"iri-reference", validateURIReference}, + "uri-template": {"uri-template", validateURITemplate}, + "semver": {"semver", validateSemver}, +} + +// see https://www.rfc-editor.org/rfc/rfc6901#section-3 +func validateJSONPointer(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + if s == "" { + return nil + } + if !strings.HasPrefix(s, "/") { + return LocalizableError("not starting with /") + } + for _, tok := range strings.Split(s, "/")[1:] { + escape := false + for _, ch := range tok { + if escape { + escape = false + if ch != '0' && ch != '1' { + return LocalizableError("~ must be followed by 0 or 1") + } + continue + } + if ch == '~' { + escape = true + continue + } + switch { + case ch >= '\x00' && ch <= '\x2E': + case ch >= '\x30' && ch <= '\x7D': + case ch >= '\x7F' && ch <= '\U0010FFFF': + default: + return LocalizableError("invalid character %q", ch) + } + } + if escape { + return LocalizableError("~ must be followed by 0 or 1") + } + } + return nil +} + +// see https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3 +func validateRelativeJSONPointer(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // start with non-negative-integer + numDigits := 0 + for _, ch := range s { + if ch >= '0' && ch <= '9' { + numDigits++ + } else { + break + } + } + if numDigits == 0 { + return LocalizableError("must start with non-negative integer") + } + if numDigits > 1 && strings.HasPrefix(s, "0") { + return LocalizableError("starts with zero") + } + s = s[numDigits:] + + // followed by either json-pointer or '#' + if s == "#" { + return nil + } + return validateJSONPointer(s) +} + +// see https://datatracker.ietf.org/doc/html/rfc4122#page-4 +func validateUUID(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + hexGroups := []int{8, 4, 4, 4, 12} + groups := strings.Split(s, "-") + if len(groups) != len(hexGroups) { + return LocalizableError("must have %d elements", len(hexGroups)) + } + for i, group := range groups { + if len(group) != hexGroups[i] { + return LocalizableError("element %d must be %d characters long", i+1, hexGroups[i]) + } + for _, ch := range group { + switch { + case ch >= '0' && ch <= '9': + case ch >= 'a' && ch <= 'f': + case ch >= 'A' && ch <= 'F': + default: + return LocalizableError("non-hex character %q", ch) + } + } + } + return nil +} + +// see https://datatracker.ietf.org/doc/html/rfc3339#appendix-A +func validateDuration(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // must start with 'P' + s, ok = strings.CutPrefix(s, "P") + if !ok { + return LocalizableError("must start with P") + } + if s == "" { + return LocalizableError("nothing after P") + } + + // dur-week + if s, ok := strings.CutSuffix(s, "W"); ok { + if s == "" { + return LocalizableError("no number in week") + } + for _, ch := range s { + if ch < '0' || ch > '9' { + return LocalizableError("invalid week") + } + } + return nil + } + + allUnits := []string{"YMD", "HMS"} + for i, s := range strings.Split(s, "T") { + if i != 0 && s == "" { + return LocalizableError("no time elements") + } + if i >= len(allUnits) { + return LocalizableError("more than one T") + } + units := allUnits[i] + for s != "" { + digitCount := 0 + for _, ch := range s { + if ch >= '0' && ch <= '9' { + digitCount++ + } else { + break + } + } + if digitCount == 0 { + return LocalizableError("missing number") + } + s = s[digitCount:] + if s == "" { + return LocalizableError("missing unit") + } + unit := s[0] + j := strings.IndexByte(units, unit) + if j == -1 { + if strings.IndexByte(allUnits[i], unit) != -1 { + return LocalizableError("unit %q out of order", unit) + } + return LocalizableError("invalid unit %q", unit) + } + units = units[j+1:] + s = s[1:] + } + } + + return nil +} + +func validateIPV4(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + groups := strings.Split(s, ".") + if len(groups) != 4 { + return LocalizableError("expected four decimals") + } + for _, group := range groups { + if len(group) > 1 && group[0] == '0' { + return LocalizableError("leading zeros") + } + n, err := strconv.Atoi(group) + if err != nil { + return err + } + if n < 0 || n > 255 { + return LocalizableError("decimal must be between 0 and 255") + } + } + return nil +} + +func validateIPV6(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + if !strings.Contains(s, ":") { + return LocalizableError("missing colon") + } + addr, err := netip.ParseAddr(s) + if err != nil { + return err + } + if addr.Zone() != "" { + return LocalizableError("zone id is not a part of ipv6 address") + } + return nil +} + +// see https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names +func validateHostname(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // entire hostname (including the delimiting dots but not a trailing dot) has a maximum of 253 ASCII characters + s = strings.TrimSuffix(s, ".") + if len(s) > 253 { + return LocalizableError("more than 253 characters long") + } + + // Hostnames are composed of series of labels concatenated with dots, as are all domain names + for _, label := range strings.Split(s, ".") { + // Each label must be from 1 to 63 characters long + if len(label) < 1 || len(label) > 63 { + return LocalizableError("label must be 1 to 63 characters long") + } + + // labels must not start or end with a hyphen + if strings.HasPrefix(label, "-") { + return LocalizableError("label starts with hyphen") + } + if strings.HasSuffix(label, "-") { + return LocalizableError("label ends with hyphen") + } + + // labels may contain only the ASCII letters 'a' through 'z' (in a case-insensitive manner), + // the digits '0' through '9', and the hyphen ('-') + for _, ch := range label { + switch { + case ch >= 'a' && ch <= 'z': + case ch >= 'A' && ch <= 'Z': + case ch >= '0' && ch <= '9': + case ch == '-': + default: + return LocalizableError("invalid character %q", ch) + } + } + } + return nil +} + +// see https://en.wikipedia.org/wiki/Email_address +func validateEmail(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + // entire email address to be no more than 254 characters long + if len(s) > 254 { + return LocalizableError("more than 255 characters long") + } + + // email address is generally recognized as having two parts joined with an at-sign + at := strings.LastIndexByte(s, '@') + if at == -1 { + return LocalizableError("missing @") + } + local, domain := s[:at], s[at+1:] + + // local part may be up to 64 characters long + if len(local) > 64 { + return LocalizableError("local part more than 64 characters long") + } + + if len(local) > 1 && strings.HasPrefix(local, `"`) && strings.HasPrefix(local, `"`) { + // quoted + local := local[1 : len(local)-1] + if strings.IndexByte(local, '\\') != -1 || strings.IndexByte(local, '"') != -1 { + return LocalizableError("backslash and quote are not allowed within quoted local part") + } + } else { + // unquoted + if strings.HasPrefix(local, ".") { + return LocalizableError("starts with dot") + } + if strings.HasSuffix(local, ".") { + return LocalizableError("ends with dot") + } + + // consecutive dots not allowed + if strings.Contains(local, "..") { + return LocalizableError("consecutive dots") + } + + // check allowed chars + for _, ch := range local { + switch { + case ch >= 'a' && ch <= 'z': + case ch >= 'A' && ch <= 'Z': + case ch >= '0' && ch <= '9': + case strings.ContainsRune(".!#$%&'*+-/=?^_`{|}~", ch): + default: + return LocalizableError("invalid character %q", ch) + } + } + } + + // domain if enclosed in brackets, must match an IP address + if strings.HasPrefix(domain, "[") && strings.HasSuffix(domain, "]") { + domain = domain[1 : len(domain)-1] + if rem, ok := strings.CutPrefix(domain, "IPv6:"); ok { + if err := validateIPV6(rem); err != nil { + return LocalizableError("invalid ipv6 address: %v", err) + } + return nil + } + if err := validateIPV4(domain); err != nil { + return LocalizableError("invalid ipv4 address: %v", err) + } + return nil + } + + // domain must match the requirements for a hostname + if err := validateHostname(domain); err != nil { + return LocalizableError("invalid domain: %v", err) + } + + return nil +} + +// see see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6 +func validateDate(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + _, err := time.Parse("2006-01-02", s) + return err +} + +// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6 +// NOTE: golang time package does not support leap seconds. +func validateTime(v any) error { + str, ok := v.(string) + if !ok { + return nil + } + + // min: hh:mm:ssZ + if len(str) < 9 { + return LocalizableError("less than 9 characters long") + } + if str[2] != ':' || str[5] != ':' { + return LocalizableError("missing colon in correct place") + } + + // parse hh:mm:ss + var hms []int + for _, tok := range strings.SplitN(str[:8], ":", 3) { + i, err := strconv.Atoi(tok) + if err != nil { + return LocalizableError("invalid hour/min/sec") + } + if i < 0 { + return LocalizableError("non-positive hour/min/sec") + } + hms = append(hms, i) + } + if len(hms) != 3 { + return LocalizableError("missing hour/min/sec") + } + h, m, s := hms[0], hms[1], hms[2] + if h > 23 || m > 59 || s > 60 { + return LocalizableError("hour/min/sec out of range") + } + str = str[8:] + + // parse sec-frac if present + if rem, ok := strings.CutPrefix(str, "."); ok { + numDigits := 0 + for _, ch := range rem { + if ch >= '0' && ch <= '9' { + numDigits++ + } else { + break + } + } + if numDigits == 0 { + return LocalizableError("no digits in second fraction") + } + str = rem[numDigits:] + } + + if str != "z" && str != "Z" { + // parse time-numoffset + if len(str) != 6 { + return LocalizableError("offset must be 6 characters long") + } + var sign int + switch str[0] { + case '+': + sign = -1 + case '-': + sign = +1 + default: + return LocalizableError("offset must begin with plus/minus") + } + str = str[1:] + if str[2] != ':' { + return LocalizableError("missing colon in offset in correct place") + } + + var zhm []int + for _, tok := range strings.SplitN(str, ":", 2) { + i, err := strconv.Atoi(tok) + if err != nil { + return LocalizableError("invalid hour/min in offset") + } + if i < 0 { + return LocalizableError("non-positive hour/min in offset") + } + zhm = append(zhm, i) + } + zh, zm := zhm[0], zhm[1] + if zh > 23 || zm > 59 { + return LocalizableError("hour/min in offset out of range") + } + + // apply timezone + hm := (h*60 + m) + sign*(zh*60+zm) + if hm < 0 { + hm += 24 * 60 + } + h, m = hm/60, hm%60 + } + + // check leap second + if s >= 60 && (h != 23 || m != 59) { + return LocalizableError("invalid leap second") + } + + return nil +} + +// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6 +func validateDateTime(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // min: yyyy-mm-ddThh:mm:ssZ + if len(s) < 20 { + return LocalizableError("less than 20 characters long") + } + + if s[10] != 't' && s[10] != 'T' { + return LocalizableError("11th character must be t or T") + } + if err := validateDate(s[:10]); err != nil { + return LocalizableError("invalid date element: %v", err) + } + if err := validateTime(s[11:]); err != nil { + return LocalizableError("invalid time element: %v", err) + } + return nil +} + +func parseURL(s string) (*gourl.URL, error) { + u, err := gourl.Parse(s) + if err != nil { + return nil, err + } + + // gourl does not validate ipv6 host address + hostName := u.Hostname() + if strings.Contains(hostName, ":") { + if !strings.Contains(u.Host, "[") || !strings.Contains(u.Host, "]") { + return nil, LocalizableError("ipv6 address not enclosed in brackets") + } + if err := validateIPV6(hostName); err != nil { + return nil, LocalizableError("invalid ipv6 address: %v", err) + } + } + + return u, nil +} + +func validateURI(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + u, err := parseURL(s) + if err != nil { + return err + } + if !u.IsAbs() { + return LocalizableError("relative url") + } + return nil +} + +func validateURIReference(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + if strings.Contains(s, `\`) { + return LocalizableError(`contains \`) + } + _, err := parseURL(s) + return err +} + +func validateURITemplate(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + u, err := parseURL(s) + if err != nil { + return err + } + for _, tok := range strings.Split(u.RawPath, "/") { + tok, err = decode(tok) + if err != nil { + return LocalizableError("percent decode failed: %v", err) + } + want := true + for _, ch := range tok { + var got bool + switch ch { + case '{': + got = true + case '}': + got = false + default: + continue + } + if got != want { + return LocalizableError("nested curly braces") + } + want = !want + } + if !want { + return LocalizableError("no matching closing brace") + } + } + return nil +} + +func validatePeriod(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + slash := strings.IndexByte(s, '/') + if slash == -1 { + return LocalizableError("missing slash") + } + + start, end := s[:slash], s[slash+1:] + if strings.HasPrefix(start, "P") { + if err := validateDuration(start); err != nil { + return LocalizableError("invalid start duration: %v", err) + } + if err := validateDateTime(end); err != nil { + return LocalizableError("invalid end date-time: %v", err) + } + } else { + if err := validateDateTime(start); err != nil { + return LocalizableError("invalid start date-time: %v", err) + } + if strings.HasPrefix(end, "P") { + if err := validateDuration(end); err != nil { + return LocalizableError("invalid end duration: %v", err) + } + } else if err := validateDateTime(end); err != nil { + return LocalizableError("invalid end date-time: %v", err) + } + } + + return nil +} + +// see https://semver.org/#backusnaur-form-grammar-for-valid-semver-versions +func validateSemver(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // build -- + if i := strings.IndexByte(s, '+'); i != -1 { + build := s[i+1:] + if build == "" { + return LocalizableError("build is empty") + } + for _, buildID := range strings.Split(build, ".") { + if buildID == "" { + return LocalizableError("build identifier is empty") + } + for _, ch := range buildID { + switch { + case ch >= '0' && ch <= '9': + case (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || ch == '-': + default: + return LocalizableError("invalid character %q in build identifier", ch) + } + } + } + s = s[:i] + } + + // pre-release -- + if i := strings.IndexByte(s, '-'); i != -1 { + preRelease := s[i+1:] + for _, preReleaseID := range strings.Split(preRelease, ".") { + if preReleaseID == "" { + return LocalizableError("pre-release identifier is empty") + } + allDigits := true + for _, ch := range preReleaseID { + switch { + case ch >= '0' && ch <= '9': + case (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || ch == '-': + allDigits = false + default: + return LocalizableError("invalid character %q in pre-release identifier", ch) + } + } + if allDigits && len(preReleaseID) > 1 && preReleaseID[0] == '0' { + return LocalizableError("pre-release numeric identifier starts with zero") + } + } + s = s[:i] + } + + // versionCore -- + versions := strings.Split(s, ".") + if len(versions) != 3 { + return LocalizableError("versionCore must have 3 numbers separated by dot") + } + names := []string{"major", "minor", "patch"} + for i, version := range versions { + if version == "" { + return LocalizableError("%s is empty", names[i]) + } + if len(version) > 1 && version[0] == '0' { + return LocalizableError("%s starts with zero", names[i]) + } + for _, ch := range version { + if ch < '0' || ch > '9' { + return LocalizableError("%s contains non-digit", names[i]) + } + } + } + + return nil +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work new file mode 100644 index 0000000000..13df855d52 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work @@ -0,0 +1,8 @@ +go 1.21.1 + +use ( + . + ./cmd/jv +) + +replace github.com/santhosh-tekuri/jsonschema/v6 v6.0.0 => ./ diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/kind/kind.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/kind/kind.go new file mode 100644 index 0000000000..7da112ac89 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/kind/kind.go @@ -0,0 +1,651 @@ +package kind + +import ( + "fmt" + "math/big" + "strings" + + "golang.org/x/text/message" +) + +// -- + +type InvalidJsonValue struct { + Value any +} + +func (*InvalidJsonValue) KeywordPath() []string { + return nil +} + +func (k *InvalidJsonValue) LocalizedString(p *message.Printer) string { + return p.Sprintf("invalid jsonType %T", k.Value) +} + +// -- + +type Schema struct { + Location string +} + +func (*Schema) KeywordPath() []string { + return nil +} + +func (k *Schema) LocalizedString(p *message.Printer) string { + return p.Sprintf("jsonschema validation failed with %s", quote(k.Location)) +} + +// -- + +type Group struct{} + +func (*Group) KeywordPath() []string { + return nil +} + +func (*Group) LocalizedString(p *message.Printer) string { + return p.Sprintf("validation failed") +} + +// -- + +type Not struct{} + +func (*Not) KeywordPath() []string { + return nil +} + +func (*Not) LocalizedString(p *message.Printer) string { + return p.Sprintf("not failed") +} + +// -- + +type AllOf struct{} + +func (*AllOf) KeywordPath() []string { + return []string{"allOf"} +} + +func (*AllOf) LocalizedString(p *message.Printer) string { + return p.Sprintf("allOf failed") +} + +// -- + +type AnyOf struct{} + +func (*AnyOf) KeywordPath() []string { + return []string{"anyOf"} +} + +func (*AnyOf) LocalizedString(p *message.Printer) string { + return p.Sprintf("anyOf failed") +} + +// -- + +type OneOf struct { + // Subschemas gives indexes of Subschemas that have matched. + // Value nil, means none of the subschemas matched. + Subschemas []int +} + +func (*OneOf) KeywordPath() []string { + return []string{"oneOf"} +} + +func (k *OneOf) LocalizedString(p *message.Printer) string { + if len(k.Subschemas) == 0 { + return p.Sprintf("oneOf failed, none matched") + } + return p.Sprintf("oneOf failed, subschemas %d, %d matched", k.Subschemas[0], k.Subschemas[1]) +} + +//-- + +type FalseSchema struct{} + +func (*FalseSchema) KeywordPath() []string { + return nil +} + +func (*FalseSchema) LocalizedString(p *message.Printer) string { + return p.Sprintf("false schema") +} + +// -- + +type RefCycle struct { + URL string + KeywordLocation1 string + KeywordLocation2 string +} + +func (*RefCycle) KeywordPath() []string { + return nil +} + +func (k *RefCycle) LocalizedString(p *message.Printer) string { + return p.Sprintf("both %s and %s resolve to %q causing reference cycle", k.KeywordLocation1, k.KeywordLocation2, k.URL) +} + +// -- + +type Type struct { + Got string + Want []string +} + +func (*Type) KeywordPath() []string { + return []string{"type"} +} + +func (k *Type) LocalizedString(p *message.Printer) string { + want := strings.Join(k.Want, " or ") + return p.Sprintf("got %s, want %s", k.Got, want) +} + +// -- + +type Enum struct { + Got any + Want []any +} + +// KeywordPath implements jsonschema.ErrorKind. +func (*Enum) KeywordPath() []string { + return []string{"enum"} +} + +func (k *Enum) LocalizedString(p *message.Printer) string { + allPrimitive := true +loop: + for _, item := range k.Want { + switch item.(type) { + case []any, map[string]any: + allPrimitive = false + break loop + } + } + if allPrimitive { + if len(k.Want) == 1 { + return p.Sprintf("value must be %s", display(k.Want[0])) + } + var want []string + for _, v := range k.Want { + want = append(want, display(v)) + } + return p.Sprintf("value must be one of %s", strings.Join(want, ", ")) + } + return p.Sprintf("enum failed") +} + +// -- + +type Const struct { + Got any + Want any +} + +func (*Const) KeywordPath() []string { + return []string{"const"} +} + +func (k *Const) LocalizedString(p *message.Printer) string { + switch want := k.Want.(type) { + case []any, map[string]any: + return p.Sprintf("const failed") + default: + return p.Sprintf("value must be %s", display(want)) + } +} + +// -- + +type Format struct { + Got any + Want string + Err error +} + +func (*Format) KeywordPath() []string { + return []string{"format"} +} + +func (k *Format) LocalizedString(p *message.Printer) string { + return p.Sprintf("%s is not valid %s: %v", display(k.Got), k.Want, localizedError(k.Err, p)) +} + +// -- + +type Reference struct { + Keyword string + URL string +} + +func (k *Reference) KeywordPath() []string { + return []string{k.Keyword} +} + +func (*Reference) LocalizedString(p *message.Printer) string { + return p.Sprintf("validation failed") +} + +// -- + +type MinProperties struct { + Got, Want int +} + +func (*MinProperties) KeywordPath() []string { + return []string{"minProperties"} +} + +func (k *MinProperties) LocalizedString(p *message.Printer) string { + return p.Sprintf("minProperties: got %d, want %d", k.Got, k.Want) +} + +// -- + +type MaxProperties struct { + Got, Want int +} + +func (*MaxProperties) KeywordPath() []string { + return []string{"maxProperties"} +} + +func (k *MaxProperties) LocalizedString(p *message.Printer) string { + return p.Sprintf("maxProperties: got %d, want %d", k.Got, k.Want) +} + +// -- + +type MinItems struct { + Got, Want int +} + +func (*MinItems) KeywordPath() []string { + return []string{"minItems"} +} + +func (k *MinItems) LocalizedString(p *message.Printer) string { + return p.Sprintf("minItems: got %d, want %d", k.Got, k.Want) +} + +// -- + +type MaxItems struct { + Got, Want int +} + +func (*MaxItems) KeywordPath() []string { + return []string{"maxItems"} +} + +func (k *MaxItems) LocalizedString(p *message.Printer) string { + return p.Sprintf("maxItems: got %d, want %d", k.Got, k.Want) +} + +// -- + +type AdditionalItems struct { + Count int +} + +func (*AdditionalItems) KeywordPath() []string { + return []string{"additionalItems"} +} + +func (k *AdditionalItems) LocalizedString(p *message.Printer) string { + return p.Sprintf("last %d additionalItem(s) not allowed", k.Count) +} + +// -- + +type Required struct { + Missing []string +} + +func (*Required) KeywordPath() []string { + return []string{"required"} +} + +func (k *Required) LocalizedString(p *message.Printer) string { + if len(k.Missing) == 1 { + return p.Sprintf("missing property %s", quote(k.Missing[0])) + } + return p.Sprintf("missing properties %s", joinQuoted(k.Missing, ", ")) +} + +// -- + +type Dependency struct { + Prop string // dependency of prop that failed + Missing []string // missing props +} + +func (k *Dependency) KeywordPath() []string { + return []string{"dependency", k.Prop} +} + +func (k *Dependency) LocalizedString(p *message.Printer) string { + return p.Sprintf("properties %s required, if %s exists", joinQuoted(k.Missing, ", "), quote(k.Prop)) +} + +// -- + +type DependentRequired struct { + Prop string // dependency of prop that failed + Missing []string // missing props +} + +func (k *DependentRequired) KeywordPath() []string { + return []string{"dependentRequired", k.Prop} +} + +func (k *DependentRequired) LocalizedString(p *message.Printer) string { + return p.Sprintf("properties %s required, if %s exists", joinQuoted(k.Missing, ", "), quote(k.Prop)) +} + +// -- + +type AdditionalProperties struct { + Properties []string +} + +func (*AdditionalProperties) KeywordPath() []string { + return []string{"additionalProperties"} +} + +func (k *AdditionalProperties) LocalizedString(p *message.Printer) string { + return p.Sprintf("additional properties %s not allowed", joinQuoted(k.Properties, ", ")) +} + +// -- + +type PropertyNames struct { + Property string +} + +func (*PropertyNames) KeywordPath() []string { + return []string{"propertyNames"} +} + +func (k *PropertyNames) LocalizedString(p *message.Printer) string { + return p.Sprintf("invalid propertyName %s", quote(k.Property)) +} + +// -- + +type UniqueItems struct { + Duplicates [2]int +} + +func (*UniqueItems) KeywordPath() []string { + return []string{"uniqueItems"} +} + +func (k *UniqueItems) LocalizedString(p *message.Printer) string { + return p.Sprintf("items at %d and %d are equal", k.Duplicates[0], k.Duplicates[1]) +} + +// -- + +type Contains struct{} + +func (*Contains) KeywordPath() []string { + return []string{"contains"} +} + +func (*Contains) LocalizedString(p *message.Printer) string { + return p.Sprintf("no items match contains schema") +} + +// -- + +type MinContains struct { + Got []int + Want int +} + +func (*MinContains) KeywordPath() []string { + return []string{"minContains"} +} + +func (k *MinContains) LocalizedString(p *message.Printer) string { + if len(k.Got) == 0 { + return p.Sprintf("min %d items required to match contains schema, but none matched", k.Want) + } else { + got := fmt.Sprintf("%v", k.Got) + return p.Sprintf("min %d items required to match contains schema, but matched %d items at %v", k.Want, len(k.Got), got[1:len(got)-1]) + } +} + +// -- + +type MaxContains struct { + Got []int + Want int +} + +func (*MaxContains) KeywordPath() []string { + return []string{"maxContains"} +} + +func (k *MaxContains) LocalizedString(p *message.Printer) string { + got := fmt.Sprintf("%v", k.Got) + return p.Sprintf("max %d items required to match contains schema, but matched %d items at %v", k.Want, len(k.Got), got[1:len(got)-1]) +} + +// -- + +type MinLength struct { + Got, Want int +} + +func (*MinLength) KeywordPath() []string { + return []string{"minLength"} +} + +func (k *MinLength) LocalizedString(p *message.Printer) string { + return p.Sprintf("minLength: got %d, want %d", k.Got, k.Want) +} + +// -- + +type MaxLength struct { + Got, Want int +} + +func (*MaxLength) KeywordPath() []string { + return []string{"maxLength"} +} + +func (k *MaxLength) LocalizedString(p *message.Printer) string { + return p.Sprintf("maxLength: got %d, want %d", k.Got, k.Want) +} + +// -- + +type Pattern struct { + Got string + Want string +} + +func (*Pattern) KeywordPath() []string { + return []string{"pattern"} +} + +func (k *Pattern) LocalizedString(p *message.Printer) string { + return p.Sprintf("%s does not match pattern %s", quote(k.Got), quote(k.Want)) +} + +// -- + +type ContentEncoding struct { + Want string + Err error +} + +func (*ContentEncoding) KeywordPath() []string { + return []string{"contentEncoding"} +} + +func (k *ContentEncoding) LocalizedString(p *message.Printer) string { + return p.Sprintf("value is not %s encoded: %v", quote(k.Want), localizedError(k.Err, p)) +} + +// -- + +type ContentMediaType struct { + Got []byte + Want string + Err error +} + +func (*ContentMediaType) KeywordPath() []string { + return []string{"contentMediaType"} +} + +func (k *ContentMediaType) LocalizedString(p *message.Printer) string { + return p.Sprintf("value if not of mediatype %s: %v", quote(k.Want), k.Err) +} + +// -- + +type ContentSchema struct{} + +func (*ContentSchema) KeywordPath() []string { + return []string{"contentSchema"} +} + +func (*ContentSchema) LocalizedString(p *message.Printer) string { + return p.Sprintf("contentSchema failed") +} + +// -- + +type Minimum struct { + Got *big.Rat + Want *big.Rat +} + +func (*Minimum) KeywordPath() []string { + return []string{"minimum"} +} + +func (k *Minimum) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("minimum: got %v, want %v", got, want) +} + +// -- + +type Maximum struct { + Got *big.Rat + Want *big.Rat +} + +func (*Maximum) KeywordPath() []string { + return []string{"maximum"} +} + +func (k *Maximum) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("maximum: got %v, want %v", got, want) +} + +// -- + +type ExclusiveMinimum struct { + Got *big.Rat + Want *big.Rat +} + +func (*ExclusiveMinimum) KeywordPath() []string { + return []string{"exclusiveMinimum"} +} + +func (k *ExclusiveMinimum) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("exclusiveMinimum: got %v, want %v", got, want) +} + +// -- + +type ExclusiveMaximum struct { + Got *big.Rat + Want *big.Rat +} + +func (*ExclusiveMaximum) KeywordPath() []string { + return []string{"exclusiveMaximum"} +} + +func (k *ExclusiveMaximum) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("exclusiveMaximum: got %v, want %v", got, want) +} + +// -- + +type MultipleOf struct { + Got *big.Rat + Want *big.Rat +} + +func (*MultipleOf) KeywordPath() []string { + return []string{"multipleOf"} +} + +func (k *MultipleOf) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("multipleOf: got %v, want %v", got, want) +} + +// -- + +func quote(s string) string { + s = fmt.Sprintf("%q", s) + s = strings.ReplaceAll(s, `\"`, `"`) + s = strings.ReplaceAll(s, `'`, `\'`) + return "'" + s[1:len(s)-1] + "'" +} + +func joinQuoted(arr []string, sep string) string { + var sb strings.Builder + for _, s := range arr { + if sb.Len() > 0 { + sb.WriteString(sep) + } + sb.WriteString(quote(s)) + } + return sb.String() +} + +// to be used only for primitive. +func display(v any) string { + switch v := v.(type) { + case string: + return quote(v) + case []any, map[string]any: + return "value" + default: + return fmt.Sprintf("%v", v) + } +} + +func localizedError(err error, p *message.Printer) string { + if err, ok := err.(interface{ LocalizedError(*message.Printer) string }); ok { + return err.LocalizedError(p) + } + return err.Error() +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/loader.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/loader.go new file mode 100644 index 0000000000..ce0170e20a --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/loader.go @@ -0,0 +1,266 @@ +package jsonschema + +import ( + "embed" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + gourl "net/url" + "os" + "path/filepath" + "runtime" + "strings" +) + +// URLLoader knows how to load json from given url. +type URLLoader interface { + // Load loads json from given absolute url. + Load(url string) (any, error) +} + +// -- + +// FileLoader loads json file url. +type FileLoader struct{} + +func (l FileLoader) Load(url string) (any, error) { + path, err := l.ToFile(url) + if err != nil { + return nil, err + } + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + return UnmarshalJSON(f) +} + +// ToFile is helper method to convert file url to file path. +func (l FileLoader) ToFile(url string) (string, error) { + u, err := gourl.Parse(url) + if err != nil { + return "", err + } + if u.Scheme != "file" { + return "", fmt.Errorf("invalid file url: %s", u) + } + path := u.Path + if runtime.GOOS == "windows" { + path = strings.TrimPrefix(path, "/") + path = filepath.FromSlash(path) + } + return path, nil +} + +// -- + +// SchemeURLLoader delegates to other [URLLoaders] +// based on url scheme. +type SchemeURLLoader map[string]URLLoader + +func (l SchemeURLLoader) Load(url string) (any, error) { + u, err := gourl.Parse(url) + if err != nil { + return nil, err + } + ll, ok := l[u.Scheme] + if !ok { + return nil, &UnsupportedURLSchemeError{u.String()} + } + return ll.Load(url) +} + +// -- + +//go:embed metaschemas +var metaFS embed.FS + +func openMeta(url string) (fs.File, error) { + u, meta := strings.CutPrefix(url, "http://json-schema.org/") + if !meta { + u, meta = strings.CutPrefix(url, "https://json-schema.org/") + } + if meta { + if u == "schema" { + return openMeta(draftLatest.url) + } + f, err := metaFS.Open("metaschemas/" + u) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return nil, nil + } + return nil, err + } + return f, err + } + return nil, nil + +} + +func isMeta(url string) bool { + f, err := openMeta(url) + if err != nil { + return true + } + if f != nil { + f.Close() + return true + } + return false +} + +func loadMeta(url string) (any, error) { + f, err := openMeta(url) + if err != nil { + return nil, err + } + if f == nil { + return nil, nil + } + defer f.Close() + return UnmarshalJSON(f) +} + +// -- + +type defaultLoader struct { + docs map[url]any // docs loaded so far + loader URLLoader +} + +func (l *defaultLoader) add(url url, doc any) bool { + if _, ok := l.docs[url]; ok { + return false + } + l.docs[url] = doc + return true +} + +func (l *defaultLoader) load(url url) (any, error) { + if doc, ok := l.docs[url]; ok { + return doc, nil + } + doc, err := loadMeta(url.String()) + if err != nil { + return nil, err + } + if doc != nil { + l.add(url, doc) + return doc, nil + } + if l.loader == nil { + return nil, &LoadURLError{url.String(), errors.New("no URLLoader set")} + } + doc, err = l.loader.Load(url.String()) + if err != nil { + return nil, &LoadURLError{URL: url.String(), Err: err} + } + l.add(url, doc) + return doc, nil +} + +func (l *defaultLoader) getDraft(up urlPtr, doc any, defaultDraft *Draft, cycle map[url]struct{}) (*Draft, error) { + obj, ok := doc.(map[string]any) + if !ok { + return defaultDraft, nil + } + sch, ok := strVal(obj, "$schema") + if !ok { + return defaultDraft, nil + } + if draft := draftFromURL(sch); draft != nil { + return draft, nil + } + sch, _ = split(sch) + if _, err := gourl.Parse(sch); err != nil { + return nil, &InvalidMetaSchemaURLError{up.String(), err} + } + schUrl := url(sch) + if up.ptr.isEmpty() && schUrl == up.url { + return nil, &UnsupportedDraftError{schUrl.String()} + } + if _, ok := cycle[schUrl]; ok { + return nil, &MetaSchemaCycleError{schUrl.String()} + } + cycle[schUrl] = struct{}{} + doc, err := l.load(schUrl) + if err != nil { + return nil, err + } + return l.getDraft(urlPtr{schUrl, ""}, doc, defaultDraft, cycle) +} + +func (l *defaultLoader) getMetaVocabs(doc any, draft *Draft, vocabularies map[string]*Vocabulary) ([]string, error) { + obj, ok := doc.(map[string]any) + if !ok { + return nil, nil + } + sch, ok := strVal(obj, "$schema") + if !ok { + return nil, nil + } + if draft := draftFromURL(sch); draft != nil { + return nil, nil + } + sch, _ = split(sch) + if _, err := gourl.Parse(sch); err != nil { + return nil, &ParseURLError{sch, err} + } + schUrl := url(sch) + doc, err := l.load(schUrl) + if err != nil { + return nil, err + } + return draft.getVocabs(schUrl, doc, vocabularies) +} + +// -- + +type LoadURLError struct { + URL string + Err error +} + +func (e *LoadURLError) Error() string { + return fmt.Sprintf("failing loading %q: %v", e.URL, e.Err) +} + +// -- + +type UnsupportedURLSchemeError struct { + url string +} + +func (e *UnsupportedURLSchemeError) Error() string { + return fmt.Sprintf("no URLLoader registered for %q", e.url) +} + +// -- + +type ResourceExistsError struct { + url string +} + +func (e *ResourceExistsError) Error() string { + return fmt.Sprintf("resource for %q already exists", e.url) +} + +// -- + +// UnmarshalJSON unmarshals into [any] without losing +// number precision using [json.Number]. +func UnmarshalJSON(r io.Reader) (any, error) { + decoder := json.NewDecoder(r) + decoder.UseNumber() + var doc any + if err := decoder.Decode(&doc); err != nil { + return nil, err + } + if _, err := decoder.Token(); err == nil || err != io.EOF { + return nil, fmt.Errorf("invalid character after top-level value") + } + return doc, nil +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-04/schema b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-04/schema new file mode 100644 index 0000000000..b2a7ff0f54 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-04/schema @@ -0,0 +1,151 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uriref" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" }, + "format": { "type": "string" }, + "$ref": { "type": "string" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-06/schema b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-06/schema new file mode 100644 index 0000000000..fa22ad1b06 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-06/schema @@ -0,0 +1,150 @@ +{ + "$schema": "http://json-schema.org/draft-06/schema#", + "$id": "http://json-schema.org/draft-06/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": {}, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": {} +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-07/schema b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-07/schema new file mode 100644 index 0000000000..326759a622 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-07/schema @@ -0,0 +1,172 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://json-schema.org/draft-07/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$comment": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": true + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": true, + "enum": { + "type": "array", + "items": true, + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "if": { "$ref": "#" }, + "then": { "$ref": "#" }, + "else": { "$ref": "#" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": true +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/applicator b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/applicator new file mode 100644 index 0000000000..857d2d4955 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/applicator @@ -0,0 +1,55 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/applicator", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/applicator": true + }, + "$recursiveAnchor": true, + "title": "Applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "additionalItems": { "$recursiveRef": "#" }, + "unevaluatedItems": { "$recursiveRef": "#" }, + "items": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "#/$defs/schemaArray" } + ] + }, + "contains": { "$recursiveRef": "#" }, + "additionalProperties": { "$recursiveRef": "#" }, + "unevaluatedProperties": { "$recursiveRef": "#" }, + "properties": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependentSchemas": { + "type": "object", + "additionalProperties": { + "$recursiveRef": "#" + } + }, + "propertyNames": { "$recursiveRef": "#" }, + "if": { "$recursiveRef": "#" }, + "then": { "$recursiveRef": "#" }, + "else": { "$recursiveRef": "#" }, + "allOf": { "$ref": "#/$defs/schemaArray" }, + "anyOf": { "$ref": "#/$defs/schemaArray" }, + "oneOf": { "$ref": "#/$defs/schemaArray" }, + "not": { "$recursiveRef": "#" } + }, + "$defs": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$recursiveRef": "#" } + } + } +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/content b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/content new file mode 100644 index 0000000000..fa5d20b8d6 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/content @@ -0,0 +1,15 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/content", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + "title": "Content vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "contentSchema": { "$recursiveRef": "#" } + } +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/core b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/core new file mode 100644 index 0000000000..bf5731985d --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/core @@ -0,0 +1,56 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/core", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true + }, + "$recursiveAnchor": true, + "title": "Core vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference", + "$comment": "Non-empty fragments not allowed.", + "pattern": "^[^#]*#?$" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$anchor": { + "type": "string", + "pattern": "^[A-Za-z][-A-Za-z0-9.:_]*$" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$recursiveRef": { + "type": "string", + "format": "uri-reference" + }, + "$recursiveAnchor": { + "type": "boolean", + "default": false + }, + "$vocabulary": { + "type": "object", + "propertyNames": { + "type": "string", + "format": "uri" + }, + "additionalProperties": { + "type": "boolean" + } + }, + "$comment": { + "type": "string" + }, + "$defs": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + } + } +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/format b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/format new file mode 100644 index 0000000000..fe553c2397 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/format @@ -0,0 +1,13 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/format", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/format": true + }, + "$recursiveAnchor": true, + "title": "Format vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/meta-data b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/meta-data new file mode 100644 index 0000000000..5c95715c4a --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/meta-data @@ -0,0 +1,35 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/meta-data", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/meta-data": true + }, + "$recursiveAnchor": true, + "title": "Meta-data vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "deprecated": { + "type": "boolean", + "default": false + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + } + } +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/validation b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/validation new file mode 100644 index 0000000000..f3525e0760 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/validation @@ -0,0 +1,97 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/validation", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/validation": true + }, + "$recursiveAnchor": true, + "title": "Validation vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, + "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, + "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, + "minContains": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 1 + }, + "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, + "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/$defs/stringArray" }, + "dependentRequired": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/stringArray" + } + }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "type": { + "anyOf": [ + { "$ref": "#/$defs/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/$defs/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + } + }, + "$defs": { + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 0 + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + } +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/schema b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/schema new file mode 100644 index 0000000000..f433389be6 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/schema @@ -0,0 +1,41 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/schema", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true, + "https://json-schema.org/draft/2019-09/vocab/applicator": true, + "https://json-schema.org/draft/2019-09/vocab/validation": true, + "https://json-schema.org/draft/2019-09/vocab/meta-data": true, + "https://json-schema.org/draft/2019-09/vocab/format": false, + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "properties": { + "definitions": { + "$comment": "While no longer an official keyword as it is replaced by $defs, this keyword is retained in the meta-schema to prevent incompatible extensions as it remains in common use.", + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" is no longer a keyword, but schema authors should avoid redefining it to facilitate a smooth transition to \"dependentSchemas\" and \"dependentRequired\"", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + } + } + } +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/applicator b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/applicator new file mode 100644 index 0000000000..0ef24edc81 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/applicator @@ -0,0 +1,47 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/applicator", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/applicator": true + }, + "$dynamicAnchor": "meta", + "title": "Applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "prefixItems": { "$ref": "#/$defs/schemaArray" }, + "items": { "$dynamicRef": "#meta" }, + "contains": { "$dynamicRef": "#meta" }, + "additionalProperties": { "$dynamicRef": "#meta" }, + "properties": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependentSchemas": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "default": {} + }, + "propertyNames": { "$dynamicRef": "#meta" }, + "if": { "$dynamicRef": "#meta" }, + "then": { "$dynamicRef": "#meta" }, + "else": { "$dynamicRef": "#meta" }, + "allOf": { "$ref": "#/$defs/schemaArray" }, + "anyOf": { "$ref": "#/$defs/schemaArray" }, + "oneOf": { "$ref": "#/$defs/schemaArray" }, + "not": { "$dynamicRef": "#meta" } + }, + "$defs": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$dynamicRef": "#meta" } + } + } +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/content b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/content new file mode 100644 index 0000000000..0330ff0a81 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/content @@ -0,0 +1,15 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/content", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + "title": "Content vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "contentEncoding": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentSchema": { "$dynamicRef": "#meta" } + } +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/core b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/core new file mode 100644 index 0000000000..c4de7005ab --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/core @@ -0,0 +1,50 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/core", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true + }, + "$dynamicAnchor": "meta", + "title": "Core vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "$id": { + "$ref": "#/$defs/uriReferenceString", + "$comment": "Non-empty fragments not allowed.", + "pattern": "^[^#]*#?$" + }, + "$schema": { "$ref": "#/$defs/uriString" }, + "$ref": { "$ref": "#/$defs/uriReferenceString" }, + "$anchor": { "$ref": "#/$defs/anchorString" }, + "$dynamicRef": { "$ref": "#/$defs/uriReferenceString" }, + "$dynamicAnchor": { "$ref": "#/$defs/anchorString" }, + "$vocabulary": { + "type": "object", + "propertyNames": { "$ref": "#/$defs/uriString" }, + "additionalProperties": { + "type": "boolean" + } + }, + "$comment": { + "type": "string" + }, + "$defs": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" } + } + }, + "$defs": { + "anchorString": { + "type": "string", + "pattern": "^[A-Za-z_][-A-Za-z0-9._]*$" + }, + "uriString": { + "type": "string", + "format": "uri" + }, + "uriReferenceString": { + "type": "string", + "format": "uri-reference" + } + } +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-annotation b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-annotation new file mode 100644 index 0000000000..0aa07d1c15 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-annotation @@ -0,0 +1,13 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/format-annotation", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true + }, + "$dynamicAnchor": "meta", + "title": "Format vocabulary meta-schema for annotation results", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-assertion b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-assertion new file mode 100644 index 0000000000..38613bff6e --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-assertion @@ -0,0 +1,13 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/format-assertion", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/format-assertion": true + }, + "$dynamicAnchor": "meta", + "title": "Format vocabulary meta-schema for assertion results", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/meta-data b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/meta-data new file mode 100644 index 0000000000..30e2837140 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/meta-data @@ -0,0 +1,35 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/meta-data", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/meta-data": true + }, + "$dynamicAnchor": "meta", + "title": "Meta-data vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "deprecated": { + "type": "boolean", + "default": false + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + } + } +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/unevaluated b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/unevaluated new file mode 100644 index 0000000000..e9e093d12a --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/unevaluated @@ -0,0 +1,14 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/unevaluated", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true + }, + "$dynamicAnchor": "meta", + "title": "Unevaluated applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "unevaluatedItems": { "$dynamicRef": "#meta" }, + "unevaluatedProperties": { "$dynamicRef": "#meta" } + } +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/validation b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/validation new file mode 100644 index 0000000000..4e016ed2b7 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/validation @@ -0,0 +1,97 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/validation", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/validation": true + }, + "$dynamicAnchor": "meta", + "title": "Validation vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "type": { + "anyOf": [ + { "$ref": "#/$defs/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/$defs/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, + "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, + "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, + "minContains": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 1 + }, + "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, + "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/$defs/stringArray" }, + "dependentRequired": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/stringArray" + } + } + }, + "$defs": { + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 0 + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + } +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/schema b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/schema new file mode 100644 index 0000000000..364f8ada6c --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/schema @@ -0,0 +1,57 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/schema", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true, + "https://json-schema.org/draft/2020-12/vocab/applicator": true, + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true, + "https://json-schema.org/draft/2020-12/vocab/validation": true, + "https://json-schema.org/draft/2020-12/vocab/meta-data": true, + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true, + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/unevaluated"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format-annotation"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "$comment": "This meta-schema also defines keywords that have appeared in previous drafts in order to prevent incompatible extensions as they remain in common use.", + "properties": { + "definitions": { + "$comment": "\"definitions\" has been replaced by \"$defs\".", + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "deprecated": true, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" has been split and replaced by \"dependentSchemas\" and \"dependentRequired\" in order to serve their differing semantics.", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$dynamicRef": "#meta" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + }, + "deprecated": true, + "default": {} + }, + "$recursiveAnchor": { + "$comment": "\"$recursiveAnchor\" has been replaced by \"$dynamicAnchor\".", + "$ref": "meta/core#/$defs/anchorString", + "deprecated": true + }, + "$recursiveRef": { + "$comment": "\"$recursiveRef\" has been replaced by \"$dynamicRef\".", + "$ref": "meta/core#/$defs/uriReferenceString", + "deprecated": true + } + } +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/objcompiler.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/objcompiler.go new file mode 100644 index 0000000000..f1494b13a8 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/objcompiler.go @@ -0,0 +1,549 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "math/big" + "strconv" +) + +type objCompiler struct { + c *Compiler + obj map[string]any + up urlPtr + r *root + res *resource + q *queue +} + +func (c *objCompiler) compile(s *Schema) error { + // id -- + if id := c.res.dialect.draft.getID(c.obj); id != "" { + s.ID = id + } + + // anchor -- + if s.DraftVersion < 2019 { + // anchor is specified in id + id := c.string(c.res.dialect.draft.id) + if id != "" { + _, f := split(id) + if f != "" { + var err error + s.Anchor, err = decode(f) + if err != nil { + return &ParseAnchorError{URL: s.Location} + } + } + } + } else { + s.Anchor = c.string("$anchor") + } + + if err := c.compileDraft4(s); err != nil { + return err + } + if s.DraftVersion >= 6 { + if err := c.compileDraft6(s); err != nil { + return err + } + } + if s.DraftVersion >= 7 { + if err := c.compileDraft7(s); err != nil { + return err + } + } + if s.DraftVersion >= 2019 { + if err := c.compileDraft2019(s); err != nil { + return err + } + } + if s.DraftVersion >= 2020 { + if err := c.compileDraft2020(s); err != nil { + return err + } + } + + // vocabularies + vocabs := c.res.dialect.activeVocabs(c.c.roots.assertVocabs, c.c.roots.vocabularies) + for _, vocab := range vocabs { + v := c.c.roots.vocabularies[vocab] + if v == nil { + continue + } + ext, err := v.Compile(&CompilerContext{c}, c.obj) + if err != nil { + return err + } + if ext != nil { + s.Extensions = append(s.Extensions, ext) + } + } + + return nil +} + +func (c *objCompiler) compileDraft4(s *Schema) error { + var err error + + if c.hasVocab("core") { + if s.Ref, err = c.enqueueRef("$ref"); err != nil { + return err + } + if s.DraftVersion < 2019 && s.Ref != nil { + // All other properties in a "$ref" object MUST be ignored + return nil + } + } + + if c.hasVocab("applicator") { + s.AllOf = c.enqueueArr("allOf") + s.AnyOf = c.enqueueArr("anyOf") + s.OneOf = c.enqueueArr("oneOf") + s.Not = c.enqueueProp("not") + + if s.DraftVersion < 2020 { + if items, ok := c.obj["items"]; ok { + if _, ok := items.([]any); ok { + s.Items = c.enqueueArr("items") + s.AdditionalItems = c.enqueueAdditional("additionalItems") + } else { + s.Items = c.enqueueProp("items") + } + } + } + + s.Properties = c.enqueueMap("properties") + if m := c.enqueueMap("patternProperties"); m != nil { + s.PatternProperties = map[Regexp]*Schema{} + for pname, sch := range m { + re, err := c.c.roots.regexpEngine(pname) + if err != nil { + return &InvalidRegexError{c.up.format("patternProperties"), pname, err} + } + s.PatternProperties[re] = sch + } + } + s.AdditionalProperties = c.enqueueAdditional("additionalProperties") + + if m := c.objVal("dependencies"); m != nil { + s.Dependencies = map[string]any{} + for pname, pvalue := range m { + if arr, ok := pvalue.([]any); ok { + s.Dependencies[pname] = toStrings(arr) + } else { + ptr := c.up.ptr.append2("dependencies", pname) + s.Dependencies[pname] = c.enqueuePtr(ptr) + } + } + } + } + + if c.hasVocab("validation") { + if t, ok := c.obj["type"]; ok { + s.Types = newTypes(t) + } + if arr := c.arrVal("enum"); arr != nil { + s.Enum = newEnum(arr) + } + s.MultipleOf = c.numVal("multipleOf") + s.Maximum = c.numVal("maximum") + if c.boolean("exclusiveMaximum") { + s.ExclusiveMaximum = s.Maximum + s.Maximum = nil + } else { + s.ExclusiveMaximum = c.numVal("exclusiveMaximum") + } + s.Minimum = c.numVal("minimum") + if c.boolean("exclusiveMinimum") { + s.ExclusiveMinimum = s.Minimum + s.Minimum = nil + } else { + s.ExclusiveMinimum = c.numVal("exclusiveMinimum") + } + + s.MinLength = c.intVal("minLength") + s.MaxLength = c.intVal("maxLength") + if pat := c.strVal("pattern"); pat != nil { + s.Pattern, err = c.c.roots.regexpEngine(*pat) + if err != nil { + return &InvalidRegexError{c.up.format("pattern"), *pat, err} + } + } + + s.MinItems = c.intVal("minItems") + s.MaxItems = c.intVal("maxItems") + s.UniqueItems = c.boolean("uniqueItems") + + s.MaxProperties = c.intVal("maxProperties") + s.MinProperties = c.intVal("minProperties") + if arr := c.arrVal("required"); arr != nil { + s.Required = toStrings(arr) + } + } + + // format -- + if c.assertFormat(s.DraftVersion) { + if f := c.strVal("format"); f != nil { + if *f == "regex" { + s.Format = &Format{ + Name: "regex", + Validate: c.c.roots.regexpEngine.validate, + } + } else { + s.Format = c.c.formats[*f] + if s.Format == nil { + s.Format = formats[*f] + } + } + } + } + + // annotations -- + s.Title = c.string("title") + s.Description = c.string("description") + if v, ok := c.obj["default"]; ok { + s.Default = &v + } + + return nil +} + +func (c *objCompiler) compileDraft6(s *Schema) error { + if c.hasVocab("applicator") { + s.Contains = c.enqueueProp("contains") + s.PropertyNames = c.enqueueProp("propertyNames") + } + if c.hasVocab("validation") { + if v, ok := c.obj["const"]; ok { + s.Const = &v + } + } + return nil +} + +func (c *objCompiler) compileDraft7(s *Schema) error { + if c.hasVocab("applicator") { + s.If = c.enqueueProp("if") + if s.If != nil { + b := c.boolVal("if") + if b == nil || *b { + s.Then = c.enqueueProp("then") + } + if b == nil || !*b { + s.Else = c.enqueueProp("else") + } + } + } + + if c.c.assertContent { + if ce := c.strVal("contentEncoding"); ce != nil { + s.ContentEncoding = c.c.decoders[*ce] + if s.ContentEncoding == nil { + s.ContentEncoding = decoders[*ce] + } + } + if cm := c.strVal("contentMediaType"); cm != nil { + s.ContentMediaType = c.c.mediaTypes[*cm] + if s.ContentMediaType == nil { + s.ContentMediaType = mediaTypes[*cm] + } + } + } + + // annotations -- + s.Comment = c.string("$comment") + s.ReadOnly = c.boolean("readOnly") + s.WriteOnly = c.boolean("writeOnly") + if arr, ok := c.obj["examples"].([]any); ok { + s.Examples = arr + } + + return nil +} + +func (c *objCompiler) compileDraft2019(s *Schema) error { + var err error + + if c.hasVocab("core") { + if s.RecursiveRef, err = c.enqueueRef("$recursiveRef"); err != nil { + return err + } + s.RecursiveAnchor = c.boolean("$recursiveAnchor") + } + + if c.hasVocab("validation") { + if s.Contains != nil { + s.MinContains = c.intVal("minContains") + s.MaxContains = c.intVal("maxContains") + } + if m := c.objVal("dependentRequired"); m != nil { + s.DependentRequired = map[string][]string{} + for pname, pvalue := range m { + if arr, ok := pvalue.([]any); ok { + s.DependentRequired[pname] = toStrings(arr) + } + } + } + } + + if c.hasVocab("applicator") { + s.DependentSchemas = c.enqueueMap("dependentSchemas") + } + + var unevaluated bool + if s.DraftVersion == 2019 { + unevaluated = c.hasVocab("applicator") + } else { + unevaluated = c.hasVocab("unevaluated") + } + if unevaluated { + s.UnevaluatedItems = c.enqueueProp("unevaluatedItems") + s.UnevaluatedProperties = c.enqueueProp("unevaluatedProperties") + } + + if c.c.assertContent { + if s.ContentMediaType != nil && s.ContentMediaType.UnmarshalJSON != nil { + s.ContentSchema = c.enqueueProp("contentSchema") + } + } + + // annotations -- + s.Deprecated = c.boolean("deprecated") + + return nil +} + +func (c *objCompiler) compileDraft2020(s *Schema) error { + if c.hasVocab("core") { + sch, err := c.enqueueRef("$dynamicRef") + if err != nil { + return err + } + if sch != nil { + dref := c.strVal("$dynamicRef") + _, frag, err := splitFragment(*dref) + if err != nil { + return err + } + var anch string + if anchor, ok := frag.convert().(anchor); ok { + anch = string(anchor) + } + s.DynamicRef = &DynamicRef{sch, anch} + } + s.DynamicAnchor = c.string("$dynamicAnchor") + } + + if c.hasVocab("applicator") { + s.PrefixItems = c.enqueueArr("prefixItems") + s.Items2020 = c.enqueueProp("items") + } + + return nil +} + +// enqueue helpers -- + +func (c *objCompiler) enqueuePtr(ptr jsonPointer) *Schema { + up := urlPtr{c.up.url, ptr} + return c.c.enqueue(c.q, up) +} + +func (c *objCompiler) enqueueRef(pname string) (*Schema, error) { + ref := c.strVal(pname) + if ref == nil { + return nil, nil + } + baseURL := c.res.id + // baseURL := c.r.baseURL(c.up.ptr) + uf, err := baseURL.join(*ref) + if err != nil { + return nil, err + } + + up, err := c.r.resolve(*uf) + if err != nil { + return nil, err + } + if up != nil { + // local ref + return c.enqueuePtr(up.ptr), nil + } + + // remote ref + up_, err := c.c.roots.resolveFragment(*uf) + if err != nil { + return nil, err + } + return c.c.enqueue(c.q, up_), nil +} + +func (c *objCompiler) enqueueProp(pname string) *Schema { + if _, ok := c.obj[pname]; !ok { + return nil + } + ptr := c.up.ptr.append(pname) + return c.enqueuePtr(ptr) +} + +func (c *objCompiler) enqueueArr(pname string) []*Schema { + arr := c.arrVal(pname) + if arr == nil { + return nil + } + sch := make([]*Schema, len(arr)) + for i := range arr { + ptr := c.up.ptr.append2(pname, strconv.Itoa(i)) + sch[i] = c.enqueuePtr(ptr) + } + return sch +} + +func (c *objCompiler) enqueueMap(pname string) map[string]*Schema { + obj := c.objVal(pname) + if obj == nil { + return nil + } + sch := make(map[string]*Schema) + for k := range obj { + ptr := c.up.ptr.append2(pname, k) + sch[k] = c.enqueuePtr(ptr) + } + return sch +} + +func (c *objCompiler) enqueueAdditional(pname string) any { + if b := c.boolVal(pname); b != nil { + return *b + } + if sch := c.enqueueProp(pname); sch != nil { + return sch + } + return nil +} + +// -- + +func (c *objCompiler) hasVocab(name string) bool { + return c.res.dialect.hasVocab(name) +} + +func (c *objCompiler) assertFormat(draftVersion int) bool { + if c.c.assertFormat || draftVersion < 2019 { + return true + } + if draftVersion == 2019 { + return c.hasVocab("format") + } else { + return c.hasVocab("format-assertion") + } +} + +// value helpers -- + +func (c *objCompiler) boolVal(pname string) *bool { + v, ok := c.obj[pname] + if !ok { + return nil + } + b, ok := v.(bool) + if !ok { + return nil + } + return &b +} + +func (c *objCompiler) boolean(pname string) bool { + b := c.boolVal(pname) + return b != nil && *b +} + +func (c *objCompiler) strVal(pname string) *string { + v, ok := c.obj[pname] + if !ok { + return nil + } + s, ok := v.(string) + if !ok { + return nil + } + return &s +} + +func (c *objCompiler) string(pname string) string { + if s := c.strVal(pname); s != nil { + return *s + } + return "" +} + +func (c *objCompiler) numVal(pname string) *big.Rat { + v, ok := c.obj[pname] + if !ok { + return nil + } + switch v.(type) { + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + if n, ok := new(big.Rat).SetString(fmt.Sprint(v)); ok { + return n + } + } + return nil +} + +func (c *objCompiler) intVal(pname string) *int { + if n := c.numVal(pname); n != nil && n.IsInt() { + n := int(n.Num().Int64()) + return &n + } + return nil +} + +func (c *objCompiler) objVal(pname string) map[string]any { + v, ok := c.obj[pname] + if !ok { + return nil + } + obj, ok := v.(map[string]any) + if !ok { + return nil + } + return obj +} + +func (c *objCompiler) arrVal(pname string) []any { + v, ok := c.obj[pname] + if !ok { + return nil + } + arr, ok := v.([]any) + if !ok { + return nil + } + return arr +} + +// -- + +type InvalidRegexError struct { + URL string + Regex string + Err error +} + +func (e *InvalidRegexError) Error() string { + return fmt.Sprintf("invalid regex %q at %q: %v", e.Regex, e.URL, e.Err) +} + +// -- + +func toStrings(arr []any) []string { + var strings []string + for _, item := range arr { + if s, ok := item.(string); ok { + strings = append(strings, s) + } + } + return strings +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/output.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/output.go new file mode 100644 index 0000000000..4995d7b8bd --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/output.go @@ -0,0 +1,212 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/santhosh-tekuri/jsonschema/v6/kind" + "golang.org/x/text/language" + "golang.org/x/text/message" +) + +var defaultPrinter = message.NewPrinter(language.English) + +// format --- + +func (e *ValidationError) schemaURL() string { + if ref, ok := e.ErrorKind.(*kind.Reference); ok { + return ref.URL + } else { + return e.SchemaURL + } +} + +func (e *ValidationError) absoluteKeywordLocation() string { + var schemaURL string + var keywordPath []string + if ref, ok := e.ErrorKind.(*kind.Reference); ok { + schemaURL = ref.URL + keywordPath = nil + } else { + schemaURL = e.SchemaURL + keywordPath = e.ErrorKind.KeywordPath() + } + return fmt.Sprintf("%s%s", schemaURL, encode(jsonPtr(keywordPath))) +} + +func (e *ValidationError) skip() bool { + if len(e.Causes) == 1 { + _, ok := e.ErrorKind.(*kind.Reference) + return ok + } + return false +} + +func (e *ValidationError) display(sb *strings.Builder, verbose bool, indent int, absKwLoc string, p *message.Printer) { + if !e.skip() { + if indent > 0 { + sb.WriteByte('\n') + for i := 0; i < indent-1; i++ { + sb.WriteString(" ") + } + sb.WriteString("- ") + } + indent = indent + 1 + + prevAbsKwLoc := absKwLoc + absKwLoc = e.absoluteKeywordLocation() + + if _, ok := e.ErrorKind.(*kind.Schema); ok { + sb.WriteString(e.ErrorKind.LocalizedString(p)) + } else { + sb.WriteString(p.Sprintf("at %s", quote(jsonPtr(e.InstanceLocation)))) + if verbose { + schLoc := absKwLoc + if prevAbsKwLoc != "" { + pu, _ := split(prevAbsKwLoc) + u, f := split(absKwLoc) + if u == pu { + schLoc = fmt.Sprintf("S#%s", f) + } + } + fmt.Fprintf(sb, " [%s]", schLoc) + } + fmt.Fprintf(sb, ": %s", e.ErrorKind.LocalizedString(p)) + } + } + for _, cause := range e.Causes { + cause.display(sb, verbose, indent, absKwLoc, p) + } +} + +func (e *ValidationError) Error() string { + return e.LocalizedError(defaultPrinter) +} + +func (e *ValidationError) LocalizedError(p *message.Printer) string { + var sb strings.Builder + e.display(&sb, false, 0, "", p) + return sb.String() +} + +func (e *ValidationError) GoString() string { + return e.LocalizedGoString(defaultPrinter) +} + +func (e *ValidationError) LocalizedGoString(p *message.Printer) string { + var sb strings.Builder + e.display(&sb, true, 0, "", p) + return sb.String() +} + +func jsonPtr(tokens []string) string { + var sb strings.Builder + for _, tok := range tokens { + sb.WriteByte('/') + sb.WriteString(escape(tok)) + } + return sb.String() +} + +// -- + +// Flag is output format with simple boolean property valid. +type FlagOutput struct { + Valid bool `json:"valid"` +} + +// The `Flag` output format, merely the boolean result. +func (e *ValidationError) FlagOutput() *FlagOutput { + return &FlagOutput{Valid: false} +} + +// -- + +type OutputUnit struct { + Valid bool `json:"valid"` + KeywordLocation string `json:"keywordLocation"` + AbsoluteKeywordLocation string `json:"AbsoluteKeywordLocation,omitempty"` + InstanceLocation string `json:"instanceLocation"` + Error *OutputError `json:"error,omitempty"` + Errors []OutputUnit `json:"errors,omitempty"` +} + +type OutputError struct { + Kind ErrorKind + p *message.Printer +} + +func (k OutputError) MarshalJSON() ([]byte, error) { + return json.Marshal(k.Kind.LocalizedString(k.p)) +} + +// The `Basic` structure, a flat list of output units. +func (e *ValidationError) BasicOutput() *OutputUnit { + return e.LocalizedBasicOutput(defaultPrinter) +} + +func (e *ValidationError) LocalizedBasicOutput(p *message.Printer) *OutputUnit { + out := e.output(true, false, "", "", p) + return &out +} + +// The `Detailed` structure, based on the schema. +func (e *ValidationError) DetailedOutput() *OutputUnit { + return e.LocalizedDetailedOutput(defaultPrinter) +} + +func (e *ValidationError) LocalizedDetailedOutput(p *message.Printer) *OutputUnit { + out := e.output(false, false, "", "", p) + return &out +} + +func (e *ValidationError) output(flatten, inRef bool, schemaURL, kwLoc string, p *message.Printer) OutputUnit { + if !inRef { + if _, ok := e.ErrorKind.(*kind.Reference); ok { + inRef = true + } + } + if schemaURL != "" { + kwLoc += e.SchemaURL[len(schemaURL):] + if ref, ok := e.ErrorKind.(*kind.Reference); ok { + kwLoc += jsonPtr(ref.KeywordPath()) + } + } + schemaURL = e.schemaURL() + + keywordLocation := kwLoc + if _, ok := e.ErrorKind.(*kind.Reference); !ok { + keywordLocation += jsonPtr(e.ErrorKind.KeywordPath()) + } + + out := OutputUnit{ + Valid: false, + InstanceLocation: jsonPtr(e.InstanceLocation), + KeywordLocation: keywordLocation, + } + if inRef { + out.AbsoluteKeywordLocation = e.absoluteKeywordLocation() + } + for _, cause := range e.Causes { + causeOut := cause.output(flatten, inRef, schemaURL, kwLoc, p) + if cause.skip() { + causeOut = causeOut.Errors[0] + } + if flatten { + errors := causeOut.Errors + causeOut.Errors = nil + causeOut.Error = &OutputError{cause.ErrorKind, p} + out.Errors = append(out.Errors, causeOut) + if len(errors) > 0 { + out.Errors = append(out.Errors, errors...) + } + } else { + out.Errors = append(out.Errors, causeOut) + } + } + if len(out.Errors) == 0 { + out.Error = &OutputError{e.ErrorKind, p} + } + return out +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/position.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/position.go new file mode 100644 index 0000000000..576a2a47f7 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/position.go @@ -0,0 +1,142 @@ +package jsonschema + +import ( + "strconv" + "strings" +) + +// Position tells possible tokens in json. +type Position interface { + collect(v any, ptr jsonPointer) map[jsonPointer]any +} + +// -- + +type AllProp struct{} + +func (AllProp) collect(v any, ptr jsonPointer) map[jsonPointer]any { + obj, ok := v.(map[string]any) + if !ok { + return nil + } + m := map[jsonPointer]any{} + for pname, pvalue := range obj { + m[ptr.append(pname)] = pvalue + } + return m +} + +// -- + +type AllItem struct{} + +func (AllItem) collect(v any, ptr jsonPointer) map[jsonPointer]any { + arr, ok := v.([]any) + if !ok { + return nil + } + m := map[jsonPointer]any{} + for i, item := range arr { + m[ptr.append(strconv.Itoa(i))] = item + } + return m +} + +// -- + +type Prop string + +func (p Prop) collect(v any, ptr jsonPointer) map[jsonPointer]any { + obj, ok := v.(map[string]any) + if !ok { + return nil + } + pvalue, ok := obj[string(p)] + if !ok { + return nil + } + return map[jsonPointer]any{ + ptr.append(string(p)): pvalue, + } +} + +// -- + +type Item int + +func (i Item) collect(v any, ptr jsonPointer) map[jsonPointer]any { + arr, ok := v.([]any) + if !ok { + return nil + } + if i < 0 || int(i) >= len(arr) { + return nil + } + return map[jsonPointer]any{ + ptr.append(strconv.Itoa(int(i))): arr[int(i)], + } +} + +// -- + +// SchemaPath tells where to look for subschema inside keyword. +type SchemaPath []Position + +func schemaPath(path string) SchemaPath { + var sp SchemaPath + for _, tok := range strings.Split(path, "/") { + var pos Position + switch tok { + case "*": + pos = AllProp{} + case "[]": + pos = AllItem{} + default: + if i, err := strconv.Atoi(tok); err == nil { + pos = Item(i) + } else { + pos = Prop(tok) + } + } + sp = append(sp, pos) + } + return sp +} + +func (sp SchemaPath) collect(v any, ptr jsonPointer) map[jsonPointer]any { + if len(sp) == 0 { + return map[jsonPointer]any{ + ptr: v, + } + } + p, sp := sp[0], sp[1:] + m := p.collect(v, ptr) + mm := map[jsonPointer]any{} + for ptr, v := range m { + m = sp.collect(v, ptr) + for k, v := range m { + mm[k] = v + } + } + return mm +} + +func (sp SchemaPath) String() string { + var sb strings.Builder + for _, pos := range sp { + if sb.Len() != 0 { + sb.WriteByte('/') + } + switch pos := pos.(type) { + case AllProp: + sb.WriteString("*") + case AllItem: + sb.WriteString("[]") + case Prop: + sb.WriteString(string(pos)) + case Item: + sb.WriteString(strconv.Itoa(int(pos))) + } + } + return sb.String() +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/root.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/root.go new file mode 100644 index 0000000000..860690102d --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/root.go @@ -0,0 +1,202 @@ +package jsonschema + +import ( + "fmt" + "slices" + "strings" +) + +type root struct { + url url + doc any + resources map[jsonPointer]*resource + subschemasProcessed map[jsonPointer]struct{} +} + +func (r *root) rootResource() *resource { + return r.resources[""] +} + +func (r *root) resource(ptr jsonPointer) *resource { + for { + if res, ok := r.resources[ptr]; ok { + return res + } + slash := strings.LastIndexByte(string(ptr), '/') + if slash == -1 { + break + } + ptr = ptr[:slash] + } + return r.rootResource() +} + +func (r *root) resolveFragmentIn(frag fragment, res *resource) (urlPtr, error) { + var ptr jsonPointer + switch f := frag.convert().(type) { + case jsonPointer: + ptr = res.ptr.concat(f) + case anchor: + aptr, ok := res.anchors[f] + if !ok { + return urlPtr{}, &AnchorNotFoundError{ + URL: r.url.String(), + Reference: (&urlFrag{res.id, frag}).String(), + } + } + ptr = aptr + } + return urlPtr{r.url, ptr}, nil +} + +func (r *root) resolveFragment(frag fragment) (urlPtr, error) { + return r.resolveFragmentIn(frag, r.rootResource()) +} + +// resovles urlFrag to urlPtr from root. +// returns nil if it is external. +func (r *root) resolve(uf urlFrag) (*urlPtr, error) { + var res *resource + if uf.url == r.url { + res = r.rootResource() + } else { + // look for resource with id==uf.url + for _, v := range r.resources { + if v.id == uf.url { + res = v + break + } + } + if res == nil { + return nil, nil // external url + } + } + up, err := r.resolveFragmentIn(uf.frag, res) + return &up, err +} + +func (r *root) collectAnchors(sch any, schPtr jsonPointer, res *resource) error { + obj, ok := sch.(map[string]any) + if !ok { + return nil + } + + addAnchor := func(anchor anchor) error { + ptr1, ok := res.anchors[anchor] + if ok { + if ptr1 == schPtr { + // anchor with same root_ptr already exists + return nil + } + return &DuplicateAnchorError{ + string(anchor), r.url.String(), string(ptr1), string(schPtr), + } + } + res.anchors[anchor] = schPtr + return nil + } + + if res.dialect.draft.version < 2019 { + if _, ok := obj["$ref"]; ok { + // All other properties in a "$ref" object MUST be ignored + return nil + } + // anchor is specified in id + if id, ok := strVal(obj, res.dialect.draft.id); ok { + _, frag, err := splitFragment(id) + if err != nil { + loc := urlPtr{r.url, schPtr} + return &ParseAnchorError{loc.String()} + } + if anchor, ok := frag.convert().(anchor); ok { + if err := addAnchor(anchor); err != nil { + return err + } + } + } + } + if res.dialect.draft.version >= 2019 { + if s, ok := strVal(obj, "$anchor"); ok { + if err := addAnchor(anchor(s)); err != nil { + return err + } + } + } + if res.dialect.draft.version >= 2020 { + if s, ok := strVal(obj, "$dynamicAnchor"); ok { + if err := addAnchor(anchor(s)); err != nil { + return err + } + res.dynamicAnchors = append(res.dynamicAnchors, anchor(s)) + } + } + + return nil +} + +func (r *root) clone() *root { + processed := map[jsonPointer]struct{}{} + for k := range r.subschemasProcessed { + processed[k] = struct{}{} + } + resources := map[jsonPointer]*resource{} + for k, v := range r.resources { + resources[k] = v.clone() + } + return &root{ + url: r.url, + doc: r.doc, + resources: resources, + subschemasProcessed: processed, + } +} + +// -- + +type resource struct { + ptr jsonPointer + id url + dialect dialect + anchors map[anchor]jsonPointer + dynamicAnchors []anchor +} + +func newResource(ptr jsonPointer, id url) *resource { + return &resource{ptr: ptr, id: id, anchors: make(map[anchor]jsonPointer)} +} + +func (res *resource) clone() *resource { + anchors := map[anchor]jsonPointer{} + for k, v := range res.anchors { + anchors[k] = v + } + return &resource{ + ptr: res.ptr, + id: res.id, + dialect: res.dialect, + anchors: anchors, + dynamicAnchors: slices.Clone(res.dynamicAnchors), + } +} + +//-- + +type UnsupportedVocabularyError struct { + URL string + Vocabulary string +} + +func (e *UnsupportedVocabularyError) Error() string { + return fmt.Sprintf("unsupported vocabulary %q in %q", e.Vocabulary, e.URL) +} + +// -- + +type AnchorNotFoundError struct { + URL string + Reference string +} + +func (e *AnchorNotFoundError) Error() string { + return fmt.Sprintf("anchor in %q not found in schema %q", e.Reference, e.URL) +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/roots.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/roots.go new file mode 100644 index 0000000000..b9b79baa3a --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/roots.go @@ -0,0 +1,289 @@ +package jsonschema + +import ( + "fmt" + "strings" +) + +type roots struct { + defaultDraft *Draft + roots map[url]*root + loader defaultLoader + regexpEngine RegexpEngine + vocabularies map[string]*Vocabulary + assertVocabs bool +} + +func newRoots() *roots { + return &roots{ + defaultDraft: draftLatest, + roots: map[url]*root{}, + loader: defaultLoader{ + docs: map[url]any{}, + loader: FileLoader{}, + }, + regexpEngine: goRegexpCompile, + vocabularies: map[string]*Vocabulary{}, + } +} + +func (rr *roots) orLoad(u url) (*root, error) { + if r, ok := rr.roots[u]; ok { + return r, nil + } + doc, err := rr.loader.load(u) + if err != nil { + return nil, err + } + return rr.addRoot(u, doc) +} + +func (rr *roots) addRoot(u url, doc any) (*root, error) { + r := &root{ + url: u, + doc: doc, + resources: map[jsonPointer]*resource{}, + subschemasProcessed: map[jsonPointer]struct{}{}, + } + if err := rr.collectResources(r, doc, u, "", dialect{rr.defaultDraft, nil}); err != nil { + return nil, err + } + if !strings.HasPrefix(u.String(), "http://json-schema.org/") && + !strings.HasPrefix(u.String(), "https://json-schema.org/") { + if err := rr.validate(r, doc, ""); err != nil { + return nil, err + } + } + + rr.roots[u] = r + return r, nil +} + +func (rr *roots) resolveFragment(uf urlFrag) (urlPtr, error) { + r, err := rr.orLoad(uf.url) + if err != nil { + return urlPtr{}, err + } + return r.resolveFragment(uf.frag) +} + +func (rr *roots) collectResources(r *root, sch any, base url, schPtr jsonPointer, fallback dialect) error { + if _, ok := r.subschemasProcessed[schPtr]; ok { + return nil + } + if err := rr._collectResources(r, sch, base, schPtr, fallback); err != nil { + return err + } + r.subschemasProcessed[schPtr] = struct{}{} + return nil +} + +func (rr *roots) _collectResources(r *root, sch any, base url, schPtr jsonPointer, fallback dialect) error { + if _, ok := sch.(bool); ok { + if schPtr.isEmpty() { + // root resource + res := newResource(schPtr, base) + res.dialect = fallback + r.resources[schPtr] = res + } + return nil + } + obj, ok := sch.(map[string]any) + if !ok { + return nil + } + + hasSchema := false + if sch, ok := obj["$schema"]; ok { + if _, ok := sch.(string); ok { + hasSchema = true + } + } + + draft, err := rr.loader.getDraft(urlPtr{r.url, schPtr}, sch, fallback.draft, map[url]struct{}{}) + if err != nil { + return err + } + id := draft.getID(obj) + if id == "" && !schPtr.isEmpty() { + // ignore $schema + draft = fallback.draft + hasSchema = false + id = draft.getID(obj) + } + + var res *resource + if id != "" { + uf, err := base.join(id) + if err != nil { + loc := urlPtr{r.url, schPtr} + return &ParseIDError{loc.String()} + } + base = uf.url + res = newResource(schPtr, base) + } else if schPtr.isEmpty() { + // root resource + res = newResource(schPtr, base) + } + + if res != nil { + found := false + for _, res := range r.resources { + if res.id == base { + found = true + if res.ptr != schPtr { + return &DuplicateIDError{base.String(), r.url.String(), string(schPtr), string(res.ptr)} + } + } + } + if !found { + if hasSchema { + vocabs, err := rr.loader.getMetaVocabs(sch, draft, rr.vocabularies) + if err != nil { + return err + } + res.dialect = dialect{draft, vocabs} + } else { + res.dialect = fallback + } + r.resources[schPtr] = res + } + } + + var baseRes *resource + for _, res := range r.resources { + if res.id == base { + baseRes = res + break + } + } + if baseRes == nil { + panic("baseres is nil") + } + + // found base resource + if err := r.collectAnchors(sch, schPtr, baseRes); err != nil { + return err + } + + // process subschemas + subschemas := map[jsonPointer]any{} + for _, sp := range draft.subschemas { + ss := sp.collect(obj, schPtr) + for k, v := range ss { + subschemas[k] = v + } + } + for _, vocab := range baseRes.dialect.activeVocabs(true, rr.vocabularies) { + if v := rr.vocabularies[vocab]; v != nil { + for _, sp := range v.Subschemas { + ss := sp.collect(obj, schPtr) + for k, v := range ss { + subschemas[k] = v + } + } + } + } + for ptr, v := range subschemas { + if err := rr.collectResources(r, v, base, ptr, baseRes.dialect); err != nil { + return err + } + } + + return nil +} + +func (rr *roots) ensureSubschema(up urlPtr) error { + r, err := rr.orLoad(up.url) + if err != nil { + return err + } + if _, ok := r.subschemasProcessed[up.ptr]; ok { + return nil + } + v, err := up.lookup(r.doc) + if err != nil { + return err + } + rClone := r.clone() + if err := rr.addSubschema(rClone, up.ptr); err != nil { + return err + } + if err := rr.validate(rClone, v, up.ptr); err != nil { + return err + } + rr.roots[r.url] = rClone + return nil +} + +func (rr *roots) addSubschema(r *root, ptr jsonPointer) error { + v, err := (&urlPtr{r.url, ptr}).lookup(r.doc) + if err != nil { + return err + } + base := r.resource(ptr) + baseURL := base.id + if err := rr.collectResources(r, v, baseURL, ptr, base.dialect); err != nil { + return err + } + + // collect anchors + if _, ok := r.resources[ptr]; !ok { + res := r.resource(ptr) + if err := r.collectAnchors(v, ptr, res); err != nil { + return err + } + } + return nil +} + +func (rr *roots) validate(r *root, v any, ptr jsonPointer) error { + dialect := r.resource(ptr).dialect + meta := dialect.getSchema(rr.assertVocabs, rr.vocabularies) + if err := meta.validate(v, rr.regexpEngine, meta, r.resources, rr.assertVocabs, rr.vocabularies); err != nil { + up := urlPtr{r.url, ptr} + return &SchemaValidationError{URL: up.String(), Err: err} + } + return nil +} + +// -- + +type InvalidMetaSchemaURLError struct { + URL string + Err error +} + +func (e *InvalidMetaSchemaURLError) Error() string { + return fmt.Sprintf("invalid $schema in %q: %v", e.URL, e.Err) +} + +// -- + +type UnsupportedDraftError struct { + URL string +} + +func (e *UnsupportedDraftError) Error() string { + return fmt.Sprintf("draft %q is not supported", e.URL) +} + +// -- + +type MetaSchemaCycleError struct { + URL string +} + +func (e *MetaSchemaCycleError) Error() string { + return fmt.Sprintf("cycle in resolving $schema in %q", e.URL) +} + +// -- + +type MetaSchemaMismatchError struct { + URL string +} + +func (e *MetaSchemaMismatchError) Error() string { + return fmt.Sprintf("$schema in %q does not match with $schema in root", e.URL) +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/schema.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/schema.go new file mode 100644 index 0000000000..a970311fb3 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/schema.go @@ -0,0 +1,248 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "math/big" +) + +// Schema is the regpresentation of a compiled +// jsonschema. +type Schema struct { + up urlPtr + resource *Schema + dynamicAnchors map[string]*Schema + allPropsEvaluated bool + allItemsEvaluated bool + numItemsEvaluated int + + DraftVersion int + Location string + + // type agnostic -- + Bool *bool // boolean schema + ID string + Ref *Schema + Anchor string + RecursiveRef *Schema + RecursiveAnchor bool + DynamicRef *DynamicRef + DynamicAnchor string // "" if not specified + Types *Types + Enum *Enum + Const *any + Not *Schema + AllOf []*Schema + AnyOf []*Schema + OneOf []*Schema + If *Schema + Then *Schema + Else *Schema + Format *Format + + // object -- + MaxProperties *int + MinProperties *int + Required []string + PropertyNames *Schema + Properties map[string]*Schema + PatternProperties map[Regexp]*Schema + AdditionalProperties any // nil or bool or *Schema + Dependencies map[string]any // value is []string or *Schema + DependentRequired map[string][]string + DependentSchemas map[string]*Schema + UnevaluatedProperties *Schema + + // array -- + MinItems *int + MaxItems *int + UniqueItems bool + Contains *Schema + MinContains *int + MaxContains *int + Items any // nil or []*Schema or *Schema + AdditionalItems any // nil or bool or *Schema + PrefixItems []*Schema + Items2020 *Schema + UnevaluatedItems *Schema + + // string -- + MinLength *int + MaxLength *int + Pattern Regexp + ContentEncoding *Decoder + ContentMediaType *MediaType + ContentSchema *Schema + + // number -- + Maximum *big.Rat + Minimum *big.Rat + ExclusiveMaximum *big.Rat + ExclusiveMinimum *big.Rat + MultipleOf *big.Rat + + Extensions []SchemaExt + + // annotations -- + Title string + Description string + Default *any + Comment string + ReadOnly bool + WriteOnly bool + Examples []any + Deprecated bool +} + +// -- + +type jsonType int + +const ( + invalidType jsonType = 0 + nullType jsonType = 1 << iota + booleanType + numberType + integerType + stringType + arrayType + objectType +) + +func typeOf(v any) jsonType { + switch v.(type) { + case nil: + return nullType + case bool: + return booleanType + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + return numberType + case string: + return stringType + case []any: + return arrayType + case map[string]any: + return objectType + default: + return invalidType + } +} + +func typeFromString(s string) jsonType { + switch s { + case "null": + return nullType + case "boolean": + return booleanType + case "number": + return numberType + case "integer": + return integerType + case "string": + return stringType + case "array": + return arrayType + case "object": + return objectType + } + return invalidType +} + +func (jt jsonType) String() string { + switch jt { + case nullType: + return "null" + case booleanType: + return "boolean" + case numberType: + return "number" + case integerType: + return "integer" + case stringType: + return "string" + case arrayType: + return "array" + case objectType: + return "object" + } + return "" +} + +// -- + +// Types encapsulates list of json value types. +type Types int + +func newTypes(v any) *Types { + var types Types + switch v := v.(type) { + case string: + types.add(typeFromString(v)) + case []any: + for _, item := range v { + if s, ok := item.(string); ok { + types.add(typeFromString(s)) + } + } + } + if types.IsEmpty() { + return nil + } + return &types +} + +func (tt Types) IsEmpty() bool { + return tt == 0 +} + +func (tt *Types) add(t jsonType) { + *tt = Types(int(*tt) | int(t)) +} + +func (tt Types) contains(t jsonType) bool { + return int(tt)&int(t) != 0 +} + +func (tt Types) ToStrings() []string { + types := []jsonType{ + nullType, booleanType, numberType, integerType, + stringType, arrayType, objectType, + } + var arr []string + for _, t := range types { + if tt.contains(t) { + arr = append(arr, t.String()) + } + } + return arr +} + +func (tt Types) String() string { + return fmt.Sprintf("%v", tt.ToStrings()) +} + +// -- + +type Enum struct { + Values []any + types Types +} + +func newEnum(arr []any) *Enum { + var types Types + for _, item := range arr { + types.add(typeOf(item)) + } + return &Enum{arr, types} +} + +// -- + +type DynamicRef struct { + Ref *Schema + Anchor string // "" if not specified +} + +func newSchema(up urlPtr) *Schema { + return &Schema{up: up, Location: up.String()} +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/util.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/util.go new file mode 100644 index 0000000000..c6f8e77526 --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/util.go @@ -0,0 +1,464 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "hash/maphash" + "math/big" + gourl "net/url" + "path/filepath" + "runtime" + "slices" + "strconv" + "strings" + + "github.com/santhosh-tekuri/jsonschema/v6/kind" + "golang.org/x/text/message" +) + +// -- + +type url (string) + +func (u url) String() string { + return string(u) +} + +func (u url) join(ref string) (*urlFrag, error) { + base, err := gourl.Parse(string(u)) + if err != nil { + return nil, &ParseURLError{URL: u.String(), Err: err} + } + + ref, frag, err := splitFragment(ref) + if err != nil { + return nil, err + } + refURL, err := gourl.Parse(ref) + if err != nil { + return nil, &ParseURLError{URL: ref, Err: err} + } + resolved := base.ResolveReference(refURL) + + // see https://github.com/golang/go/issues/66084 (net/url: ResolveReference ignores Opaque value) + if !refURL.IsAbs() && base.Opaque != "" { + resolved.Opaque = base.Opaque + } + + return &urlFrag{url: url(resolved.String()), frag: frag}, nil +} + +// -- + +type jsonPointer string + +func escape(tok string) string { + tok = strings.ReplaceAll(tok, "~", "~0") + tok = strings.ReplaceAll(tok, "/", "~1") + return tok +} + +func unescape(tok string) (string, bool) { + tilde := strings.IndexByte(tok, '~') + if tilde == -1 { + return tok, true + } + sb := new(strings.Builder) + for { + sb.WriteString(tok[:tilde]) + tok = tok[tilde+1:] + if tok == "" { + return "", false + } + switch tok[0] { + case '0': + sb.WriteByte('~') + case '1': + sb.WriteByte('/') + default: + return "", false + } + tok = tok[1:] + tilde = strings.IndexByte(tok, '~') + if tilde == -1 { + sb.WriteString(tok) + break + } + } + return sb.String(), true +} + +func (ptr jsonPointer) isEmpty() bool { + return string(ptr) == "" +} + +func (ptr jsonPointer) concat(next jsonPointer) jsonPointer { + return jsonPointer(fmt.Sprintf("%s%s", ptr, next)) +} + +func (ptr jsonPointer) append(tok string) jsonPointer { + return jsonPointer(fmt.Sprintf("%s/%s", ptr, escape(tok))) +} + +func (ptr jsonPointer) append2(tok1, tok2 string) jsonPointer { + return jsonPointer(fmt.Sprintf("%s/%s/%s", ptr, escape(tok1), escape(tok2))) +} + +// -- + +type anchor string + +// -- + +type fragment string + +func decode(frag string) (string, error) { + return gourl.PathUnescape(frag) +} + +// avoids escaping /. +func encode(frag string) string { + var sb strings.Builder + for i, tok := range strings.Split(frag, "/") { + if i > 0 { + sb.WriteByte('/') + } + sb.WriteString(gourl.PathEscape(tok)) + } + return sb.String() +} + +func splitFragment(str string) (string, fragment, error) { + u, f := split(str) + f, err := decode(f) + if err != nil { + return "", fragment(""), &ParseURLError{URL: str, Err: err} + } + return u, fragment(f), nil +} + +func split(str string) (string, string) { + hash := strings.IndexByte(str, '#') + if hash == -1 { + return str, "" + } + return str[:hash], str[hash+1:] +} + +func (frag fragment) convert() any { + str := string(frag) + if str == "" || strings.HasPrefix(str, "/") { + return jsonPointer(str) + } + return anchor(str) +} + +// -- + +type urlFrag struct { + url url + frag fragment +} + +func startsWithWindowsDrive(s string) bool { + if s != "" && strings.HasPrefix(s[1:], `:\`) { + return (s[0] >= 'a' && s[0] <= 'z') || (s[0] >= 'A' && s[0] <= 'Z') + } + return false +} + +func absolute(input string) (*urlFrag, error) { + u, frag, err := splitFragment(input) + if err != nil { + return nil, err + } + + // if windows absolute file path, convert to file url + // because: net/url parses driver name as scheme + if runtime.GOOS == "windows" && startsWithWindowsDrive(u) { + u = "file:///" + filepath.ToSlash(u) + } + + gourl, err := gourl.Parse(u) + if err != nil { + return nil, &ParseURLError{URL: input, Err: err} + } + if gourl.IsAbs() { + return &urlFrag{url(u), frag}, nil + } + + // avoid filesystem api in wasm + if runtime.GOOS != "js" { + abs, err := filepath.Abs(u) + if err != nil { + return nil, &ParseURLError{URL: input, Err: err} + } + u = abs + } + if !strings.HasPrefix(u, "/") { + u = "/" + u + } + u = "file://" + filepath.ToSlash(u) + + _, err = gourl.Parse(u) + if err != nil { + return nil, &ParseURLError{URL: input, Err: err} + } + return &urlFrag{url: url(u), frag: frag}, nil +} + +func (uf *urlFrag) String() string { + return fmt.Sprintf("%s#%s", uf.url, encode(string(uf.frag))) +} + +// -- + +type urlPtr struct { + url url + ptr jsonPointer +} + +func (up *urlPtr) lookup(v any) (any, error) { + for _, tok := range strings.Split(string(up.ptr), "/")[1:] { + tok, ok := unescape(tok) + if !ok { + return nil, &InvalidJsonPointerError{up.String()} + } + switch val := v.(type) { + case map[string]any: + if pvalue, ok := val[tok]; ok { + v = pvalue + continue + } + case []any: + if index, err := strconv.Atoi(tok); err == nil { + if index >= 0 && index < len(val) { + v = val[index] + continue + } + } + } + return nil, &JSONPointerNotFoundError{up.String()} + } + return v, nil +} + +func (up *urlPtr) format(tok string) string { + return fmt.Sprintf("%s#%s/%s", up.url, encode(string(up.ptr)), encode(escape(tok))) +} + +func (up *urlPtr) String() string { + return fmt.Sprintf("%s#%s", up.url, encode(string(up.ptr))) +} + +// -- + +func minInt(i, j int) int { + if i < j { + return i + } + return j +} + +func strVal(obj map[string]any, prop string) (string, bool) { + v, ok := obj[prop] + if !ok { + return "", false + } + s, ok := v.(string) + return s, ok +} + +func isInteger(num any) bool { + rat, ok := new(big.Rat).SetString(fmt.Sprint(num)) + return ok && rat.IsInt() +} + +// quote returns single-quoted string. +// used for embedding quoted strings in json. +func quote(s string) string { + s = fmt.Sprintf("%q", s) + s = strings.ReplaceAll(s, `\"`, `"`) + s = strings.ReplaceAll(s, `'`, `\'`) + return "'" + s[1:len(s)-1] + "'" +} + +func equals(v1, v2 any) (bool, ErrorKind) { + switch v1 := v1.(type) { + case map[string]any: + v2, ok := v2.(map[string]any) + if !ok || len(v1) != len(v2) { + return false, nil + } + for k, val1 := range v1 { + val2, ok := v2[k] + if !ok { + return false, nil + } + if ok, k := equals(val1, val2); !ok || k != nil { + return ok, k + } + } + return true, nil + case []any: + v2, ok := v2.([]any) + if !ok || len(v1) != len(v2) { + return false, nil + } + for i := range v1 { + if ok, k := equals(v1[i], v2[i]); !ok || k != nil { + return ok, k + } + } + return true, nil + case nil: + return v2 == nil, nil + case bool: + v2, ok := v2.(bool) + return ok && v1 == v2, nil + case string: + v2, ok := v2.(string) + return ok && v1 == v2, nil + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + num1, ok1 := new(big.Rat).SetString(fmt.Sprint(v1)) + num2, ok2 := new(big.Rat).SetString(fmt.Sprint(v2)) + return ok1 && ok2 && num1.Cmp(num2) == 0, nil + default: + return false, &kind.InvalidJsonValue{Value: v1} + } +} + +func duplicates(arr []any) (int, int, ErrorKind) { + if len(arr) <= 20 { + for i := 1; i < len(arr); i++ { + for j := 0; j < i; j++ { + if ok, k := equals(arr[i], arr[j]); ok || k != nil { + return j, i, k + } + } + } + return -1, -1, nil + } + + m := make(map[uint64][]int) + h := new(maphash.Hash) + for i, item := range arr { + h.Reset() + writeHash(item, h) + hash := h.Sum64() + indexes, ok := m[hash] + if ok { + for _, j := range indexes { + if ok, k := equals(item, arr[j]); ok || k != nil { + return j, i, k + } + } + } + indexes = append(indexes, i) + m[hash] = indexes + } + return -1, -1, nil +} + +func writeHash(v any, h *maphash.Hash) ErrorKind { + switch v := v.(type) { + case map[string]any: + _ = h.WriteByte(0) + props := make([]string, 0, len(v)) + for prop := range v { + props = append(props, prop) + } + slices.Sort(props) + for _, prop := range props { + writeHash(prop, h) + writeHash(v[prop], h) + } + case []any: + _ = h.WriteByte(1) + for _, item := range v { + writeHash(item, h) + } + case nil: + _ = h.WriteByte(2) + case bool: + _ = h.WriteByte(3) + if v { + _ = h.WriteByte(1) + } else { + _ = h.WriteByte(0) + } + case string: + _ = h.WriteByte(4) + _, _ = h.WriteString(v) + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + _ = h.WriteByte(5) + num, _ := new(big.Rat).SetString(fmt.Sprint(v)) + _, _ = h.Write(num.Num().Bytes()) + _, _ = h.Write(num.Denom().Bytes()) + default: + return &kind.InvalidJsonValue{Value: v} + } + return nil +} + +// -- + +type ParseURLError struct { + URL string + Err error +} + +func (e *ParseURLError) Error() string { + return fmt.Sprintf("error in parsing %q: %v", e.URL, e.Err) +} + +// -- + +type InvalidJsonPointerError struct { + URL string +} + +func (e *InvalidJsonPointerError) Error() string { + return fmt.Sprintf("invalid json-pointer %q", e.URL) +} + +// -- + +type JSONPointerNotFoundError struct { + URL string +} + +func (e *JSONPointerNotFoundError) Error() string { + return fmt.Sprintf("json-pointer in %q not found", e.URL) +} + +// -- + +type SchemaValidationError struct { + URL string + Err error +} + +func (e *SchemaValidationError) Error() string { + return fmt.Sprintf("%q is not valid against metaschema: %v", e.URL, e.Err) +} + +// -- + +// LocalizableError is an error whose message is localizable. +func LocalizableError(format string, args ...any) error { + return &localizableError{format, args} +} + +type localizableError struct { + msg string + args []any +} + +func (e *localizableError) Error() string { + return fmt.Sprintf(e.msg, e.args...) +} + +func (e *localizableError) LocalizedError(p *message.Printer) string { + return p.Sprintf(e.msg, e.args...) +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/validator.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/validator.go new file mode 100644 index 0000000000..e2ace37a9f --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/validator.go @@ -0,0 +1,975 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "math/big" + "slices" + "strconv" + "unicode/utf8" + + "github.com/santhosh-tekuri/jsonschema/v6/kind" + "golang.org/x/text/message" +) + +func (sch *Schema) Validate(v any) error { + return sch.validate(v, nil, nil, nil, false, nil) +} + +func (sch *Schema) validate(v any, regexpEngine RegexpEngine, meta *Schema, resources map[jsonPointer]*resource, assertVocabs bool, vocabularies map[string]*Vocabulary) error { + vd := validator{ + v: v, + vloc: make([]string, 0, 8), + sch: sch, + scp: &scope{sch, "", 0, nil}, + uneval: unevalFrom(v, sch, false), + errors: nil, + boolResult: false, + regexpEngine: regexpEngine, + meta: meta, + resources: resources, + assertVocabs: assertVocabs, + vocabularies: vocabularies, + } + if _, err := vd.validate(); err != nil { + verr := err.(*ValidationError) + var causes []*ValidationError + if _, ok := verr.ErrorKind.(*kind.Group); ok { + causes = verr.Causes + } else { + causes = []*ValidationError{verr} + } + return &ValidationError{ + SchemaURL: sch.Location, + InstanceLocation: nil, + ErrorKind: &kind.Schema{Location: sch.Location}, + Causes: causes, + } + } + + return nil +} + +type validator struct { + v any + vloc []string + sch *Schema + scp *scope + uneval *uneval + errors []*ValidationError + boolResult bool // is interested to know valid or not (but not actuall error) + regexpEngine RegexpEngine + + // meta validation + meta *Schema // set only when validating with metaschema + resources map[jsonPointer]*resource // resources which should be validated with their dialect + assertVocabs bool + vocabularies map[string]*Vocabulary +} + +func (vd *validator) validate() (*uneval, error) { + s := vd.sch + v := vd.v + + // boolean -- + if s.Bool != nil { + if *s.Bool { + return vd.uneval, nil + } else { + return nil, vd.error(&kind.FalseSchema{}) + } + } + + // check cycle -- + if scp := vd.scp.checkCycle(); scp != nil { + return nil, vd.error(&kind.RefCycle{ + URL: s.Location, + KeywordLocation1: vd.scp.kwLoc(), + KeywordLocation2: scp.kwLoc(), + }) + } + + t := typeOf(v) + if t == invalidType { + return nil, vd.error(&kind.InvalidJsonValue{Value: v}) + } + + // type -- + if s.Types != nil && !s.Types.IsEmpty() { + matched := s.Types.contains(t) || (s.Types.contains(integerType) && t == numberType && isInteger(v)) + if !matched { + return nil, vd.error(&kind.Type{Got: t.String(), Want: s.Types.ToStrings()}) + } + } + + // const -- + if s.Const != nil { + ok, k := equals(v, *s.Const) + if k != nil { + return nil, vd.error(k) + } else if !ok { + return nil, vd.error(&kind.Const{Got: v, Want: *s.Const}) + } + } + + // enum -- + if s.Enum != nil { + matched := s.Enum.types.contains(typeOf(v)) + if matched { + matched = false + for _, item := range s.Enum.Values { + ok, k := equals(v, item) + if k != nil { + return nil, vd.error(k) + } else if ok { + matched = true + break + } + } + } + if !matched { + return nil, vd.error(&kind.Enum{Got: v, Want: s.Enum.Values}) + } + } + + // format -- + if s.Format != nil { + var err error + if s.Format.Name == "regex" && vd.regexpEngine != nil { + err = vd.regexpEngine.validate(v) + } else { + err = s.Format.Validate(v) + } + if err != nil { + return nil, vd.error(&kind.Format{Got: v, Want: s.Format.Name, Err: err}) + } + } + + // $ref -- + if s.Ref != nil { + err := vd.validateRef(s.Ref, "$ref") + if s.DraftVersion < 2019 { + return vd.uneval, err + } + if err != nil { + vd.addErr(err) + } + } + + // type specific validations -- + switch v := v.(type) { + case map[string]any: + vd.objValidate(v) + case []any: + vd.arrValidate(v) + case string: + vd.strValidate(v) + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + vd.numValidate(v) + } + + if len(vd.errors) == 0 || !vd.boolResult { + if s.DraftVersion >= 2019 { + vd.validateRefs() + } + vd.condValidate() + + for _, ext := range s.Extensions { + ext.Validate(&ValidatorContext{vd}, v) + } + + if s.DraftVersion >= 2019 { + vd.unevalValidate() + } + } + + switch len(vd.errors) { + case 0: + return vd.uneval, nil + case 1: + return nil, vd.errors[0] + default: + verr := vd.error(&kind.Group{}) + verr.Causes = vd.errors + return nil, verr + } +} + +func (vd *validator) objValidate(obj map[string]any) { + s := vd.sch + + // minProperties -- + if s.MinProperties != nil { + if len(obj) < *s.MinProperties { + vd.addError(&kind.MinProperties{Got: len(obj), Want: *s.MinProperties}) + } + } + + // maxProperties -- + if s.MaxProperties != nil { + if len(obj) > *s.MaxProperties { + vd.addError(&kind.MaxProperties{Got: len(obj), Want: *s.MaxProperties}) + } + } + + // required -- + if len(s.Required) > 0 { + if missing := vd.findMissing(obj, s.Required); missing != nil { + vd.addError(&kind.Required{Missing: missing}) + } + } + + if vd.boolResult && len(vd.errors) > 0 { + return + } + + // dependencies -- + for pname, dep := range s.Dependencies { + if _, ok := obj[pname]; ok { + switch dep := dep.(type) { + case []string: + if missing := vd.findMissing(obj, dep); missing != nil { + vd.addError(&kind.Dependency{Prop: pname, Missing: missing}) + } + case *Schema: + vd.addErr(vd.validateSelf(dep, "", false)) + } + } + } + + var additionalPros []string + for pname, pvalue := range obj { + if vd.boolResult && len(vd.errors) > 0 { + return + } + evaluated := false + + // properties -- + if sch, ok := s.Properties[pname]; ok { + evaluated = true + vd.addErr(vd.validateVal(sch, pvalue, pname)) + } + + // patternProperties -- + for regex, sch := range s.PatternProperties { + if regex.MatchString(pname) { + evaluated = true + vd.addErr(vd.validateVal(sch, pvalue, pname)) + } + } + + if !evaluated && s.AdditionalProperties != nil { + evaluated = true + switch additional := s.AdditionalProperties.(type) { + case bool: + if !additional { + additionalPros = append(additionalPros, pname) + } + case *Schema: + vd.addErr(vd.validateVal(additional, pvalue, pname)) + } + } + + if evaluated { + delete(vd.uneval.props, pname) + } + } + if len(additionalPros) > 0 { + vd.addError(&kind.AdditionalProperties{Properties: additionalPros}) + } + + if s.DraftVersion == 4 { + return + } + + // propertyNames -- + if s.PropertyNames != nil { + for pname := range obj { + sch, meta, resources := s.PropertyNames, vd.meta, vd.resources + res := vd.metaResource(sch) + if res != nil { + meta = res.dialect.getSchema(vd.assertVocabs, vd.vocabularies) + sch = meta + } + if err := sch.validate(pname, vd.regexpEngine, meta, resources, vd.assertVocabs, vd.vocabularies); err != nil { + verr := err.(*ValidationError) + verr.SchemaURL = s.PropertyNames.Location + verr.ErrorKind = &kind.PropertyNames{Property: pname} + vd.addErr(verr) + } + } + } + + if s.DraftVersion == 6 { + return + } + + // dependentSchemas -- + for pname, sch := range s.DependentSchemas { + if _, ok := obj[pname]; ok { + vd.addErr(vd.validateSelf(sch, "", false)) + } + } + + // dependentRequired -- + for pname, reqd := range s.DependentRequired { + if _, ok := obj[pname]; ok { + if missing := vd.findMissing(obj, reqd); missing != nil { + vd.addError(&kind.DependentRequired{Prop: pname, Missing: missing}) + } + } + } +} + +func (vd *validator) arrValidate(arr []any) { + s := vd.sch + + // minItems -- + if s.MinItems != nil { + if len(arr) < *s.MinItems { + vd.addError(&kind.MinItems{Got: len(arr), Want: *s.MinItems}) + } + } + + // maxItems -- + if s.MaxItems != nil { + if len(arr) > *s.MaxItems { + vd.addError(&kind.MaxItems{Got: len(arr), Want: *s.MaxItems}) + } + } + + // uniqueItems -- + if s.UniqueItems && len(arr) > 1 { + i, j, k := duplicates(arr) + if k != nil { + vd.addError(k) + } else if i != -1 { + vd.addError(&kind.UniqueItems{Duplicates: [2]int{i, j}}) + } + } + + if s.DraftVersion < 2020 { + evaluated := 0 + + // items -- + switch items := s.Items.(type) { + case *Schema: + for i, item := range arr { + vd.addErr(vd.validateVal(items, item, strconv.Itoa(i))) + } + evaluated = len(arr) + case []*Schema: + min := minInt(len(arr), len(items)) + for i, item := range arr[:min] { + vd.addErr(vd.validateVal(items[i], item, strconv.Itoa(i))) + } + evaluated = min + } + + // additionalItems -- + if s.AdditionalItems != nil { + switch additional := s.AdditionalItems.(type) { + case bool: + if !additional && evaluated != len(arr) { + vd.addError(&kind.AdditionalItems{Count: len(arr) - evaluated}) + } + case *Schema: + for i, item := range arr[evaluated:] { + vd.addErr(vd.validateVal(additional, item, strconv.Itoa(i))) + } + } + } + } else { + evaluated := minInt(len(s.PrefixItems), len(arr)) + + // prefixItems -- + for i, item := range arr[:evaluated] { + vd.addErr(vd.validateVal(s.PrefixItems[i], item, strconv.Itoa(i))) + } + + // items2020 -- + if s.Items2020 != nil { + for i, item := range arr[evaluated:] { + vd.addErr(vd.validateVal(s.Items2020, item, strconv.Itoa(i))) + } + } + } + + // contains -- + if s.Contains != nil { + var errors []*ValidationError + var matched []int + + for i, item := range arr { + if err := vd.validateVal(s.Contains, item, strconv.Itoa(i)); err != nil { + errors = append(errors, err.(*ValidationError)) + } else { + matched = append(matched, i) + if s.DraftVersion >= 2020 { + delete(vd.uneval.items, i) + } + } + } + + // minContains -- + if s.MinContains != nil { + if len(matched) < *s.MinContains { + vd.addErrors(errors, &kind.MinContains{Got: matched, Want: *s.MinContains}) + } + } else if len(matched) == 0 { + vd.addErrors(errors, &kind.Contains{}) + } + + // maxContains -- + if s.MaxContains != nil { + if len(matched) > *s.MaxContains { + vd.addError(&kind.MaxContains{Got: matched, Want: *s.MaxContains}) + } + } + } +} + +func (vd *validator) strValidate(str string) { + s := vd.sch + + strLen := -1 + if s.MinLength != nil || s.MaxLength != nil { + strLen = utf8.RuneCount([]byte(str)) + } + + // minLength -- + if s.MinLength != nil { + if strLen < *s.MinLength { + vd.addError(&kind.MinLength{Got: strLen, Want: *s.MinLength}) + } + } + + // maxLength -- + if s.MaxLength != nil { + if strLen > *s.MaxLength { + vd.addError(&kind.MaxLength{Got: strLen, Want: *s.MaxLength}) + } + } + + // pattern -- + if s.Pattern != nil { + if !s.Pattern.MatchString(str) { + vd.addError(&kind.Pattern{Got: str, Want: s.Pattern.String()}) + } + } + + if s.DraftVersion == 6 { + return + } + + var err error + + // contentEncoding -- + decoded := []byte(str) + if s.ContentEncoding != nil { + decoded, err = s.ContentEncoding.Decode(str) + if err != nil { + decoded = nil + vd.addError(&kind.ContentEncoding{Want: s.ContentEncoding.Name, Err: err}) + } + } + + var deserialized *any + if decoded != nil && s.ContentMediaType != nil { + if s.ContentSchema == nil { + err = s.ContentMediaType.Validate(decoded) + } else { + var value any + value, err = s.ContentMediaType.UnmarshalJSON(decoded) + if err == nil { + deserialized = &value + } + } + if err != nil { + vd.addError(&kind.ContentMediaType{ + Got: decoded, + Want: s.ContentMediaType.Name, + Err: err, + }) + } + } + + if deserialized != nil && s.ContentSchema != nil { + sch, meta, resources := s.ContentSchema, vd.meta, vd.resources + res := vd.metaResource(sch) + if res != nil { + meta = res.dialect.getSchema(vd.assertVocabs, vd.vocabularies) + sch = meta + } + if err = sch.validate(*deserialized, vd.regexpEngine, meta, resources, vd.assertVocabs, vd.vocabularies); err != nil { + verr := err.(*ValidationError) + verr.SchemaURL = s.Location + verr.ErrorKind = &kind.ContentSchema{} + vd.addErr(verr) + } + } +} + +func (vd *validator) numValidate(v any) { + s := vd.sch + + var numVal *big.Rat + num := func() *big.Rat { + if numVal == nil { + numVal, _ = new(big.Rat).SetString(fmt.Sprintf("%v", v)) + } + return numVal + } + + // minimum -- + if s.Minimum != nil && num().Cmp(s.Minimum) < 0 { + vd.addError(&kind.Minimum{Got: num(), Want: s.Minimum}) + } + + // maximum -- + if s.Maximum != nil && num().Cmp(s.Maximum) > 0 { + vd.addError(&kind.Maximum{Got: num(), Want: s.Maximum}) + } + + // exclusiveMinimum + if s.ExclusiveMinimum != nil && num().Cmp(s.ExclusiveMinimum) <= 0 { + vd.addError(&kind.ExclusiveMinimum{Got: num(), Want: s.ExclusiveMinimum}) + } + + // exclusiveMaximum + if s.ExclusiveMaximum != nil && num().Cmp(s.ExclusiveMaximum) >= 0 { + vd.addError(&kind.ExclusiveMaximum{Got: num(), Want: s.ExclusiveMaximum}) + } + + // multipleOf + if s.MultipleOf != nil { + if q := new(big.Rat).Quo(num(), s.MultipleOf); !q.IsInt() { + vd.addError(&kind.MultipleOf{Got: num(), Want: s.MultipleOf}) + } + } +} + +func (vd *validator) condValidate() { + s := vd.sch + + // not -- + if s.Not != nil { + if vd.validateSelf(s.Not, "", true) == nil { + vd.addError(&kind.Not{}) + } + } + + // allOf -- + if len(s.AllOf) > 0 { + var errors []*ValidationError + for _, sch := range s.AllOf { + if err := vd.validateSelf(sch, "", false); err != nil { + errors = append(errors, err.(*ValidationError)) + if vd.boolResult { + break + } + } + } + if len(errors) != 0 { + vd.addErrors(errors, &kind.AllOf{}) + } + } + + // anyOf + if len(s.AnyOf) > 0 { + var matched bool + var errors []*ValidationError + for _, sch := range s.AnyOf { + if err := vd.validateSelf(sch, "", false); err != nil { + errors = append(errors, err.(*ValidationError)) + } else { + matched = true + // for uneval, all schemas must be evaluated + if vd.uneval.isEmpty() { + break + } + } + } + if !matched { + vd.addErrors(errors, &kind.AnyOf{}) + } + } + + // oneOf + if len(s.OneOf) > 0 { + var matched = -1 + var errors []*ValidationError + for i, sch := range s.OneOf { + if err := vd.validateSelf(sch, "", matched != -1); err != nil { + if matched == -1 { + errors = append(errors, err.(*ValidationError)) + } + } else { + if matched == -1 { + matched = i + } else { + vd.addError(&kind.OneOf{Subschemas: []int{matched, i}}) + break + } + } + } + if matched == -1 { + vd.addErrors(errors, &kind.OneOf{Subschemas: nil}) + } + } + + // if, then, else -- + if s.If != nil { + if vd.validateSelf(s.If, "", true) == nil { + if s.Then != nil { + vd.addErr(vd.validateSelf(s.Then, "", false)) + } + } else if s.Else != nil { + vd.addErr(vd.validateSelf(s.Else, "", false)) + } + } +} + +func (vd *validator) unevalValidate() { + s := vd.sch + + // unevaluatedProperties + if obj, ok := vd.v.(map[string]any); ok && s.UnevaluatedProperties != nil { + for pname := range vd.uneval.props { + if pvalue, ok := obj[pname]; ok { + vd.addErr(vd.validateVal(s.UnevaluatedProperties, pvalue, pname)) + } + } + vd.uneval.props = nil + } + + // unevaluatedItems + if arr, ok := vd.v.([]any); ok && s.UnevaluatedItems != nil { + for i := range vd.uneval.items { + vd.addErr(vd.validateVal(s.UnevaluatedItems, arr[i], strconv.Itoa(i))) + } + vd.uneval.items = nil + } +} + +// validation helpers -- + +func (vd *validator) validateSelf(sch *Schema, refKw string, boolResult bool) error { + scp := vd.scp.child(sch, refKw, vd.scp.vid) + uneval := unevalFrom(vd.v, sch, !vd.uneval.isEmpty()) + subvd := validator{ + v: vd.v, + vloc: vd.vloc, + sch: sch, + scp: scp, + uneval: uneval, + errors: nil, + boolResult: vd.boolResult || boolResult, + regexpEngine: vd.regexpEngine, + meta: vd.meta, + resources: vd.resources, + assertVocabs: vd.assertVocabs, + vocabularies: vd.vocabularies, + } + subvd.handleMeta() + uneval, err := subvd.validate() + if err == nil { + vd.uneval.merge(uneval) + } + return err +} + +func (vd *validator) validateVal(sch *Schema, v any, vtok string) error { + vloc := append(vd.vloc, vtok) + scp := vd.scp.child(sch, "", vd.scp.vid+1) + uneval := unevalFrom(v, sch, false) + subvd := validator{ + v: v, + vloc: vloc, + sch: sch, + scp: scp, + uneval: uneval, + errors: nil, + boolResult: vd.boolResult, + regexpEngine: vd.regexpEngine, + meta: vd.meta, + resources: vd.resources, + assertVocabs: vd.assertVocabs, + vocabularies: vd.vocabularies, + } + subvd.handleMeta() + _, err := subvd.validate() + return err +} + +func (vd *validator) validateValue(sch *Schema, v any, vpath []string) error { + vloc := append(vd.vloc, vpath...) + scp := vd.scp.child(sch, "", vd.scp.vid+1) + uneval := unevalFrom(v, sch, false) + subvd := validator{ + v: v, + vloc: vloc, + sch: sch, + scp: scp, + uneval: uneval, + errors: nil, + boolResult: vd.boolResult, + regexpEngine: vd.regexpEngine, + meta: vd.meta, + resources: vd.resources, + assertVocabs: vd.assertVocabs, + vocabularies: vd.vocabularies, + } + subvd.handleMeta() + _, err := subvd.validate() + return err +} + +func (vd *validator) metaResource(sch *Schema) *resource { + if sch != vd.meta { + return nil + } + ptr := "" + for _, tok := range vd.instanceLocation() { + ptr += "/" + ptr += escape(tok) + } + return vd.resources[jsonPointer(ptr)] +} + +func (vd *validator) handleMeta() { + res := vd.metaResource(vd.sch) + if res == nil { + return + } + sch := res.dialect.getSchema(vd.assertVocabs, vd.vocabularies) + vd.meta = sch + vd.sch = sch +} + +// reference validation -- + +func (vd *validator) validateRef(sch *Schema, kw string) error { + err := vd.validateSelf(sch, kw, false) + if err != nil { + refErr := vd.error(&kind.Reference{Keyword: kw, URL: sch.Location}) + verr := err.(*ValidationError) + if _, ok := verr.ErrorKind.(*kind.Group); ok { + refErr.Causes = verr.Causes + } else { + refErr.Causes = append(refErr.Causes, verr) + } + return refErr + } + return nil +} + +func (vd *validator) resolveRecursiveAnchor(fallback *Schema) *Schema { + sch := fallback + scp := vd.scp + for scp != nil { + if scp.sch.resource.RecursiveAnchor { + sch = scp.sch + } + scp = scp.parent + } + return sch +} + +func (vd *validator) resolveDynamicAnchor(name string, fallback *Schema) *Schema { + sch := fallback + scp := vd.scp + for scp != nil { + if dsch, ok := scp.sch.resource.dynamicAnchors[name]; ok { + sch = dsch + } + scp = scp.parent + } + return sch +} + +func (vd *validator) validateRefs() { + // $recursiveRef -- + if sch := vd.sch.RecursiveRef; sch != nil { + if sch.RecursiveAnchor { + sch = vd.resolveRecursiveAnchor(sch) + } + vd.addErr(vd.validateRef(sch, "$recursiveRef")) + } + + // $dynamicRef -- + if dref := vd.sch.DynamicRef; dref != nil { + sch := dref.Ref // initial target + if dref.Anchor != "" { + // $dynamicRef includes anchor + if sch.DynamicAnchor == dref.Anchor { + // initial target has matching $dynamicAnchor + sch = vd.resolveDynamicAnchor(dref.Anchor, sch) + } + } + vd.addErr(vd.validateRef(sch, "$dynamicRef")) + } +} + +// error helpers -- + +func (vd *validator) instanceLocation() []string { + return slices.Clone(vd.vloc) +} + +func (vd *validator) error(kind ErrorKind) *ValidationError { + if vd.boolResult { + return &ValidationError{} + } + return &ValidationError{ + SchemaURL: vd.sch.Location, + InstanceLocation: vd.instanceLocation(), + ErrorKind: kind, + Causes: nil, + } +} + +func (vd *validator) addErr(err error) { + if err != nil { + vd.errors = append(vd.errors, err.(*ValidationError)) + } +} + +func (vd *validator) addError(kind ErrorKind) { + vd.errors = append(vd.errors, vd.error(kind)) +} + +func (vd *validator) addErrors(errors []*ValidationError, kind ErrorKind) { + err := vd.error(kind) + err.Causes = errors + vd.errors = append(vd.errors, err) +} + +func (vd *validator) findMissing(obj map[string]any, reqd []string) []string { + var missing []string + for _, pname := range reqd { + if _, ok := obj[pname]; !ok { + if vd.boolResult { + return []string{} // non-nil + } + missing = append(missing, pname) + } + } + return missing +} + +// -- + +type scope struct { + sch *Schema + + // if empty, compute from self.sch and self.parent.sch. + // not empty, only when there is a jump i.e, $ref, $XXXRef + refKeyword string + + // unique id of value being validated + // if two scopes validate same value, they will have + // same vid + vid int + + parent *scope +} + +func (sc *scope) child(sch *Schema, refKeyword string, vid int) *scope { + return &scope{sch, refKeyword, vid, sc} +} + +func (sc *scope) checkCycle() *scope { + scp := sc.parent + for scp != nil { + if scp.vid != sc.vid { + break + } + if scp.sch == sc.sch { + return scp + } + scp = scp.parent + } + return nil +} + +func (sc *scope) kwLoc() string { + var loc string + for sc.parent != nil { + if sc.refKeyword != "" { + loc = fmt.Sprintf("/%s%s", escape(sc.refKeyword), loc) + } else { + cur := sc.sch.Location + parent := sc.parent.sch.Location + loc = fmt.Sprintf("%s%s", cur[len(parent):], loc) + } + sc = sc.parent + } + return loc +} + +// -- + +type uneval struct { + props map[string]struct{} + items map[int]struct{} +} + +func unevalFrom(v any, sch *Schema, callerNeeds bool) *uneval { + uneval := &uneval{} + switch v := v.(type) { + case map[string]any: + if !sch.allPropsEvaluated && (callerNeeds || sch.UnevaluatedProperties != nil) { + uneval.props = map[string]struct{}{} + for k := range v { + uneval.props[k] = struct{}{} + } + } + case []any: + if !sch.allItemsEvaluated && (callerNeeds || sch.UnevaluatedItems != nil) && sch.numItemsEvaluated < len(v) { + uneval.items = map[int]struct{}{} + for i := sch.numItemsEvaluated; i < len(v); i++ { + uneval.items[i] = struct{}{} + } + } + } + return uneval +} + +func (ue *uneval) merge(other *uneval) { + for k := range ue.props { + if _, ok := other.props[k]; !ok { + delete(ue.props, k) + } + } + for i := range ue.items { + if _, ok := other.items[i]; !ok { + delete(ue.items, i) + } + } +} + +func (ue *uneval) isEmpty() bool { + return len(ue.props) == 0 && len(ue.items) == 0 +} + +// -- + +type ValidationError struct { + // absolute, dereferenced schema location. + SchemaURL string + + // location of the JSON value within the instance being validated. + InstanceLocation []string + + // kind of error + ErrorKind ErrorKind + + // holds nested errors + Causes []*ValidationError +} + +type ErrorKind interface { + KeywordPath() []string + LocalizedString(*message.Printer) string +} diff --git a/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/vocab.go b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/vocab.go new file mode 100644 index 0000000000..18ace91e8b --- /dev/null +++ b/hack/tools/vendor/github.com/santhosh-tekuri/jsonschema/v6/vocab.go @@ -0,0 +1,106 @@ +package jsonschema + +// CompilerContext provides helpers for +// compiling a [Vocabulary]. +type CompilerContext struct { + c *objCompiler +} + +func (ctx *CompilerContext) Enqueue(schPath []string) *Schema { + ptr := ctx.c.up.ptr + for _, tok := range schPath { + ptr = ptr.append(tok) + } + return ctx.c.enqueuePtr(ptr) +} + +// Vocabulary defines a set of keywords, their syntax and +// their semantics. +type Vocabulary struct { + // URL identifier for this Vocabulary. + URL string + + // Schema that is used to validate the keywords that is introduced by this + // vocabulary. + Schema *Schema + + // Subschemas lists the possible locations of subschemas introduced by + // this vocabulary. + Subschemas []SchemaPath + + // Compile compiles the keywords(introduced by this vocabulary) in obj into [SchemaExt]. + // If obj does not contain any keywords introduced by this vocabulary, nil SchemaExt must + // be returned. + Compile func(ctx *CompilerContext, obj map[string]any) (SchemaExt, error) +} + +// -- + +// SchemaExt is compled form of vocabulary. +type SchemaExt interface { + // Validate validates v against and errors if any are reported + // to ctx. + Validate(ctx *ValidatorContext, v any) +} + +// ValidatorContext provides helpers for +// validating with [SchemaExt]. +type ValidatorContext struct { + vd *validator +} + +// Validate validates v with sch. vpath gives path of v from current context value. +func (ctx *ValidatorContext) Validate(sch *Schema, v any, vpath []string) error { + switch len(vpath) { + case 0: + return ctx.vd.validateSelf(sch, "", false) + case 1: + return ctx.vd.validateVal(sch, v, vpath[0]) + default: + return ctx.vd.validateValue(sch, v, vpath) + } +} + +// EvaluatedProp marks given property of current object as evaluated. +func (ctx *ValidatorContext) EvaluatedProp(pname string) { + delete(ctx.vd.uneval.props, pname) +} + +// EvaluatedItem marks items at given index of current array as evaluated. +func (ctx *ValidatorContext) EvaluatedItem(index int) { + delete(ctx.vd.uneval.items, index) +} + +// AddError reports validation-error of given kind. +func (ctx *ValidatorContext) AddError(k ErrorKind) { + ctx.vd.addError(k) +} + +// AddErrors reports validation-errors of given kind. +func (ctx *ValidatorContext) AddErrors(errors []*ValidationError, k ErrorKind) { + ctx.vd.addErrors(errors, k) +} + +// AddErr reports the given err. This is typically used to report +// the error created by subschema validation. +// +// NOTE that err must be of type *ValidationError. +func (ctx *ValidatorContext) AddErr(err error) { + ctx.vd.addErr(err) +} + +func (ctx *ValidatorContext) Equals(v1, v2 any) (bool, error) { + b, k := equals(v1, v2) + if k != nil { + return false, ctx.vd.error(k) + } + return b, nil +} + +func (ctx *ValidatorContext) Duplicates(arr []any) (int, int, error) { + i, j, k := duplicates(arr) + if k != nil { + return -1, -1, ctx.vd.error(k) + } + return i, j, nil +} diff --git a/hack/tools/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go b/hack/tools/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go index 09f7c82405..c9e2dabd11 100644 --- a/hack/tools/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go +++ b/hack/tools/vendor/github.com/sashamelentyev/usestdlibvars/pkg/analyzer/analyzer.go @@ -2,6 +2,7 @@ package analyzer import ( "flag" + "fmt" "go/ast" "go/token" "strings" @@ -364,7 +365,7 @@ func checkHTTPMethod(pass *analysis.Pass, basicLit *ast.BasicLit) { key := strings.ToUpper(currentVal) if newVal, ok := mapping.HTTPMethod[key]; ok { - report(pass, basicLit.Pos(), currentVal, newVal) + report(pass, basicLit, currentVal, newVal) } } @@ -372,7 +373,7 @@ func checkHTTPStatusCode(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.HTTPStatusCode[currentVal]; ok { - report(pass, basicLit.Pos(), currentVal, newVal) + report(pass, basicLit, currentVal, newVal) } } @@ -380,7 +381,7 @@ func checkTimeWeekday(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.TimeWeekday[currentVal]; ok { - report(pass, basicLit.Pos(), currentVal, newVal) + report(pass, basicLit, currentVal, newVal) } } @@ -388,7 +389,7 @@ func checkTimeMonth(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.TimeMonth[currentVal]; ok { - report(pass, basicLit.Pos(), currentVal, newVal) + report(pass, basicLit, currentVal, newVal) } } @@ -396,7 +397,7 @@ func checkTimeLayout(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.TimeLayout[currentVal]; ok { - report(pass, basicLit.Pos(), currentVal, newVal) + report(pass, basicLit, currentVal, newVal) } } @@ -404,7 +405,7 @@ func checkCryptoHash(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.CryptoHash[currentVal]; ok { - report(pass, basicLit.Pos(), currentVal, newVal) + report(pass, basicLit, currentVal, newVal) } } @@ -412,7 +413,7 @@ func checkRPCDefaultPath(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.RPCDefaultPath[currentVal]; ok { - report(pass, basicLit.Pos(), currentVal, newVal) + report(pass, basicLit, currentVal, newVal) } } @@ -422,7 +423,7 @@ func checkSQLIsolationLevel(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.SQLIsolationLevel[currentVal]; ok { - report(pass, basicLit.Pos(), currentVal, newVal) + report(pass, basicLit, currentVal, newVal) } } @@ -430,7 +431,7 @@ func checkTLSSignatureScheme(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.TLSSignatureScheme[currentVal]; ok { - report(pass, basicLit.Pos(), currentVal, newVal) + report(pass, basicLit, currentVal, newVal) } } @@ -438,7 +439,7 @@ func checkConstantKind(pass *analysis.Pass, basicLit *ast.BasicLit) { currentVal := getBasicLitValue(basicLit) if newVal, ok := mapping.ConstantKind[currentVal]; ok { - report(pass, basicLit.Pos(), currentVal, newVal) + report(pass, basicLit, currentVal, newVal) } } @@ -514,6 +515,16 @@ func getBasicLitValue(basicLit *ast.BasicLit) string { return val.String() } -func report(pass *analysis.Pass, pos token.Pos, currentVal, newVal string) { - pass.Reportf(pos, "%q can be replaced by %s", currentVal, newVal) +func report(pass *analysis.Pass, rg analysis.Range, currentVal, newVal string) { + pass.Report(analysis.Diagnostic{ + Pos: rg.Pos(), + Message: fmt.Sprintf("%q can be replaced by %s", currentVal, newVal), + SuggestedFixes: []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: rg.Pos(), + End: rg.End(), + NewText: []byte(newVal), + }}, + }}, + }) } diff --git a/hack/tools/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go b/hack/tools/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go index 374bb0d242..ce57a4c210 100644 --- a/hack/tools/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go +++ b/hack/tools/vendor/github.com/stbenjam/no-sprintf-host-port/pkg/analyzer/analyzer.go @@ -29,7 +29,7 @@ func run(pass *analysis.Pass) (interface{}, error) { callExpr := node.(*ast.CallExpr) if p, f, ok := getCallExprFunction(callExpr); ok && p == "fmt" && f == "Sprintf" { if err := checkForHostPortConstruction(callExpr); err != nil { - pass.Reportf(node.Pos(), err.Error()) + pass.Reportf(node.Pos(), "%s", err.Error()) } } }) @@ -52,7 +52,7 @@ func getCallExprFunction(callExpr *ast.CallExpr) (pkg string, fn string, result // getStringLiteral returns the value at a position if it's a string literal. func getStringLiteral(args []ast.Expr, pos int) (string, bool) { - if len(args) < pos + 1 { + if len(args) < pos+1 { return "", false } @@ -72,9 +72,9 @@ func getStringLiteral(args []ast.Expr, pos int) (string, bool) { // essentially scheme://%s:, or scheme://user:pass@%s:. // // Matching requirements: -// - Scheme as per RFC3986 is ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) -// - A format string substitution in the host portion, preceded by an optional username/password@ -// - A colon indicating a port will be specified +// - Scheme as per RFC3986 is ALPHA *( ALPHA / DIGIT / "+" / "-" / "." ) +// - A format string substitution in the host portion, preceded by an optional username/password@ +// - A colon indicating a port will be specified func checkForHostPortConstruction(sprintf *ast.CallExpr) error { fs, ok := getStringLiteral(sprintf.Args, 0) if !ok { @@ -93,4 +93,4 @@ func checkForHostPortConstruction(sprintf *ast.CallExpr) error { } return nil -} \ No newline at end of file +} diff --git a/hack/tools/vendor/github.com/stoewer/go-strcase/.gitignore b/hack/tools/vendor/github.com/stoewer/go-strcase/.gitignore new file mode 100644 index 0000000000..db5247b944 --- /dev/null +++ b/hack/tools/vendor/github.com/stoewer/go-strcase/.gitignore @@ -0,0 +1,17 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +vendor +doc + +# Temporary files +*~ +*.swp + +# Editor and IDE config +.idea +*.iml +.vscode diff --git a/hack/tools/vendor/github.com/stoewer/go-strcase/.golangci.yml b/hack/tools/vendor/github.com/stoewer/go-strcase/.golangci.yml new file mode 100644 index 0000000000..7f98d55c42 --- /dev/null +++ b/hack/tools/vendor/github.com/stoewer/go-strcase/.golangci.yml @@ -0,0 +1,26 @@ +run: + deadline: 10m + +linters: + enable: + - dupl + - goconst + - gocyclo + - godox + - gosec + - interfacer + - lll + - maligned + - misspell + - prealloc + - stylecheck + - unconvert + - unparam + - errcheck + - golint + - gofmt + disable: [] + fast: false + +issues: + exclude-use-default: false diff --git a/hack/tools/vendor/github.com/stoewer/go-strcase/LICENSE b/hack/tools/vendor/github.com/stoewer/go-strcase/LICENSE new file mode 100644 index 0000000000..a105a3819a --- /dev/null +++ b/hack/tools/vendor/github.com/stoewer/go-strcase/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017, Adrian Stoewer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/hack/tools/vendor/github.com/stoewer/go-strcase/README.md b/hack/tools/vendor/github.com/stoewer/go-strcase/README.md new file mode 100644 index 0000000000..84a640e714 --- /dev/null +++ b/hack/tools/vendor/github.com/stoewer/go-strcase/README.md @@ -0,0 +1,50 @@ +[![GH Actions](https://github.com/stoewer/go-strcase/actions/workflows/lint-test.yml/badge.svg?branch=master)](https://github.com/stoewer/go-strcase/actions) +[![codecov](https://codecov.io/github/stoewer/go-strcase/branch/master/graph/badge.svg?token=c0UokYnop5)](https://codecov.io/github/stoewer/go-strcase) +[![GoDoc](https://godoc.org/github.com/stoewer/go-strcase?status.svg)](https://pkg.go.dev/github.com/stoewer/go-strcase) +--- + +Go strcase +========== + +The package `strcase` converts between different kinds of naming formats such as camel case +(`CamelCase`), snake case (`snake_case`) or kebab case (`kebab-case`). +The package is designed to work only with strings consisting of standard ASCII letters. +Unicode is currently not supported. + +Versioning and stability +------------------------ + +Although the master branch is supposed to remain always backward compatible, the repository +contains version tags in order to support vendoring tools. +The tag names follow semantic versioning conventions and have the following format `v1.0.0`. +This package supports Go modules introduced with version 1.11. + +Example +------- + +```go +import "github.com/stoewer/go-strcase" + +var snake = strcase.SnakeCase("CamelCase") +``` + +Dependencies +------------ + +### Build dependencies + +* none + +### Test dependencies + +* `github.com/stretchr/testify` + +Run linters and unit tests +-------------------------- + +To run the static code analysis, linters and tests use the following commands: + +``` +golangci-lint run --config .golangci.yml ./... +go test ./... +``` diff --git a/hack/tools/vendor/github.com/stoewer/go-strcase/camel.go b/hack/tools/vendor/github.com/stoewer/go-strcase/camel.go new file mode 100644 index 0000000000..ff9e66e0ce --- /dev/null +++ b/hack/tools/vendor/github.com/stoewer/go-strcase/camel.go @@ -0,0 +1,40 @@ +// Copyright (c) 2017, A. Stoewer +// All rights reserved. + +package strcase + +import ( + "strings" +) + +// UpperCamelCase converts a string into camel case starting with a upper case letter. +func UpperCamelCase(s string) string { + return camelCase(s, true) +} + +// LowerCamelCase converts a string into camel case starting with a lower case letter. +func LowerCamelCase(s string) string { + return camelCase(s, false) +} + +func camelCase(s string, upper bool) string { + s = strings.TrimSpace(s) + buffer := make([]rune, 0, len(s)) + + stringIter(s, func(prev, curr, next rune) { + if !isDelimiter(curr) { + if isDelimiter(prev) || (upper && prev == 0) { + buffer = append(buffer, toUpper(curr)) + } else if isLower(prev) { + buffer = append(buffer, curr) + } else if isUpper(prev) && isUpper(curr) && isLower(next) { + // Assume a case like "R" for "XRequestId" + buffer = append(buffer, curr) + } else { + buffer = append(buffer, toLower(curr)) + } + } + }) + + return string(buffer) +} diff --git a/hack/tools/vendor/github.com/stoewer/go-strcase/doc.go b/hack/tools/vendor/github.com/stoewer/go-strcase/doc.go new file mode 100644 index 0000000000..3e441ca3ef --- /dev/null +++ b/hack/tools/vendor/github.com/stoewer/go-strcase/doc.go @@ -0,0 +1,8 @@ +// Copyright (c) 2017, A. Stoewer +// All rights reserved. + +// Package strcase converts between different kinds of naming formats such as camel case +// (CamelCase), snake case (snake_case) or kebab case (kebab-case). The package is designed +// to work only with strings consisting of standard ASCII letters. Unicode is currently not +// supported. +package strcase diff --git a/hack/tools/vendor/github.com/stoewer/go-strcase/helper.go b/hack/tools/vendor/github.com/stoewer/go-strcase/helper.go new file mode 100644 index 0000000000..ecad589143 --- /dev/null +++ b/hack/tools/vendor/github.com/stoewer/go-strcase/helper.go @@ -0,0 +1,71 @@ +// Copyright (c) 2017, A. Stoewer +// All rights reserved. + +package strcase + +// isLower checks if a character is lower case. More precisely it evaluates if it is +// in the range of ASCII character 'a' to 'z'. +func isLower(ch rune) bool { + return ch >= 'a' && ch <= 'z' +} + +// toLower converts a character in the range of ASCII characters 'A' to 'Z' to its lower +// case counterpart. Other characters remain the same. +func toLower(ch rune) rune { + if ch >= 'A' && ch <= 'Z' { + return ch + 32 + } + return ch +} + +// isLower checks if a character is upper case. More precisely it evaluates if it is +// in the range of ASCII characters 'A' to 'Z'. +func isUpper(ch rune) bool { + return ch >= 'A' && ch <= 'Z' +} + +// toLower converts a character in the range of ASCII characters 'a' to 'z' to its lower +// case counterpart. Other characters remain the same. +func toUpper(ch rune) rune { + if ch >= 'a' && ch <= 'z' { + return ch - 32 + } + return ch +} + +// isSpace checks if a character is some kind of whitespace. +func isSpace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// isDelimiter checks if a character is some kind of whitespace or '_' or '-'. +func isDelimiter(ch rune) bool { + return ch == '-' || ch == '_' || isSpace(ch) +} + +// iterFunc is a callback that is called fro a specific position in a string. Its arguments are the +// rune at the respective string position as well as the previous and the next rune. If curr is at the +// first position of the string prev is zero. If curr is at the end of the string next is zero. +type iterFunc func(prev, curr, next rune) + +// stringIter iterates over a string, invoking the callback for every single rune in the string. +func stringIter(s string, callback iterFunc) { + var prev rune + var curr rune + for _, next := range s { + if curr == 0 { + prev = curr + curr = next + continue + } + + callback(prev, curr, next) + + prev = curr + curr = next + } + + if len(s) > 0 { + callback(prev, curr, 0) + } +} diff --git a/hack/tools/vendor/github.com/stoewer/go-strcase/kebab.go b/hack/tools/vendor/github.com/stoewer/go-strcase/kebab.go new file mode 100644 index 0000000000..e9a6487579 --- /dev/null +++ b/hack/tools/vendor/github.com/stoewer/go-strcase/kebab.go @@ -0,0 +1,14 @@ +// Copyright (c) 2017, A. Stoewer +// All rights reserved. + +package strcase + +// KebabCase converts a string into kebab case. +func KebabCase(s string) string { + return delimiterCase(s, '-', false) +} + +// UpperKebabCase converts a string into kebab case with capital letters. +func UpperKebabCase(s string) string { + return delimiterCase(s, '-', true) +} diff --git a/hack/tools/vendor/github.com/stoewer/go-strcase/snake.go b/hack/tools/vendor/github.com/stoewer/go-strcase/snake.go new file mode 100644 index 0000000000..1b216e20cf --- /dev/null +++ b/hack/tools/vendor/github.com/stoewer/go-strcase/snake.go @@ -0,0 +1,58 @@ +// Copyright (c) 2017, A. Stoewer +// All rights reserved. + +package strcase + +import ( + "strings" +) + +// SnakeCase converts a string into snake case. +func SnakeCase(s string) string { + return delimiterCase(s, '_', false) +} + +// UpperSnakeCase converts a string into snake case with capital letters. +func UpperSnakeCase(s string) string { + return delimiterCase(s, '_', true) +} + +// delimiterCase converts a string into snake_case or kebab-case depending on the delimiter passed +// as second argument. When upperCase is true the result will be UPPER_SNAKE_CASE or UPPER-KEBAB-CASE. +func delimiterCase(s string, delimiter rune, upperCase bool) string { + s = strings.TrimSpace(s) + buffer := make([]rune, 0, len(s)+3) + + adjustCase := toLower + if upperCase { + adjustCase = toUpper + } + + var prev rune + var curr rune + for _, next := range s { + if isDelimiter(curr) { + if !isDelimiter(prev) { + buffer = append(buffer, delimiter) + } + } else if isUpper(curr) { + if isLower(prev) || (isUpper(prev) && isLower(next)) { + buffer = append(buffer, delimiter) + } + buffer = append(buffer, adjustCase(curr)) + } else if curr != 0 { + buffer = append(buffer, adjustCase(curr)) + } + prev = curr + curr = next + } + + if len(s) > 0 { + if isUpper(curr) && isLower(prev) && prev != 0 { + buffer = append(buffer, delimiter) + } + buffer = append(buffer, adjustCase(curr)) + } + + return string(buffer) +} diff --git a/hack/tools/vendor/github.com/tetafro/godot/.goreleaser.yml b/hack/tools/vendor/github.com/tetafro/godot/.goreleaser.yml index c0fc2b6b1f..2f0c2466a5 100644 --- a/hack/tools/vendor/github.com/tetafro/godot/.goreleaser.yml +++ b/hack/tools/vendor/github.com/tetafro/godot/.goreleaser.yml @@ -1,3 +1,5 @@ +version: 2 + builds: - dir: ./cmd/godot checksum: diff --git a/hack/tools/vendor/github.com/tetafro/godot/getters.go b/hack/tools/vendor/github.com/tetafro/godot/getters.go index de3d06e105..eff836b405 100644 --- a/hack/tools/vendor/github.com/tetafro/godot/getters.go +++ b/hack/tools/vendor/github.com/tetafro/godot/getters.go @@ -6,6 +6,7 @@ import ( "go/ast" "go/token" "os" + "path/filepath" "regexp" "strings" ) @@ -36,36 +37,22 @@ func newParsedFile(file *ast.File, fset *token.FileSet) (*parsedFile, error) { file: file, } - var err error - // Read original file. This is necessary for making a replacements for // inline comments. I couldn't find a better way to get original line // with code and comment without reading the file. Function `Format` // from "go/format" won't help here if the original file is not gofmt-ed. - pf.lines, err = readFile(file, fset) - if err != nil { - return nil, fmt.Errorf("read file: %w", err) - } - // Dirty hack. For some cases Go generates temporary files during - // compilation process if there is a cgo block in the source file. Some of - // these temporary files are just copies of original source files but with - // new generated comments at the top. Because of them the content differs - // from AST. For some reason it differs only in golangci-lint. I failed to - // find out the exact description of the process, so let's just skip files - // generated by cgo. - if isCgoGenerated(pf.lines) { - return nil, errUnsuitableInput + filename := getFilename(fset, file) + + if !strings.HasSuffix(filename, ".go") { + return nil, errEmptyInput } - // Check consistency to avoid checking slice indexes in each function. - // Note that `PositionFor` is used with `adjusted=false` to skip `//line` - // directives that can set references to other files (e.g. templates) - // instead of the real ones, and break consistency here. - // Issue: https://github.com/tetafro/godot/issues/32 - lastComment := pf.file.Comments[len(pf.file.Comments)-1] - if p := pf.fset.PositionFor(lastComment.End(), false); len(pf.lines) < p.Line { - return nil, fmt.Errorf("inconsistency between file and AST: %s", p.Filename) + var err error + + pf.lines, err = readFile(filename) + if err != nil { + return nil, fmt.Errorf("read file: %w", err) } return &pf, nil @@ -244,12 +231,12 @@ func getText(comment *ast.CommentGroup, exclude []*regexp.Regexp) (s string) { } // readFile reads file and returns its lines as strings. -func readFile(file *ast.File, fset *token.FileSet) ([]string, error) { - fname := fset.File(file.Package) - f, err := os.ReadFile(fname.Name()) +func readFile(filename string) ([]string, error) { + f, err := os.ReadFile(filepath.Clean(filename)) if err != nil { return nil, err //nolint:wrapcheck } + return strings.Split(string(f), "\n"), nil } @@ -275,11 +262,11 @@ func matchAny(s string, rr []*regexp.Regexp) bool { return false } -func isCgoGenerated(lines []string) bool { - for i := range lines { - if strings.Contains(lines[i], "Code generated by cmd/cgo") { - return true - } +func getFilename(fset *token.FileSet, file *ast.File) string { + filename := fset.PositionFor(file.Pos(), true).Filename + if !strings.HasSuffix(filename, ".go") { + return fset.PositionFor(file.Pos(), false).Filename } - return false + + return filename } diff --git a/hack/tools/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go b/hack/tools/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go index 21bb485b4e..ae860d728c 100644 --- a/hack/tools/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go +++ b/hack/tools/vendor/github.com/timakin/bodyclose/passes/bodyclose/bodyclose.go @@ -23,7 +23,7 @@ var Analyzer = &analysis.Analyzer{ } const ( - Doc = "bodyclose checks whether HTTP response body is closed successfully" + Doc = "checks whether HTTP response body is closed successfully" nethttpPath = "net/http" closeMethod = "Close" @@ -114,6 +114,18 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { if len(*call.Referrers()) == 0 { return true } + + if instr, ok := b.Instrs[i].(*ssa.Call); ok { + // httptest.ResponseRecorder is not needed closing the response body because no-op. + if callee := instr.Call.StaticCallee(); callee != nil && callee.Name() == "Result" { + if callee.Pkg != nil && callee.Pkg.Pkg.Name() == "httptest" { + if recv := callee.Signature.Recv(); recv != nil && recv.Type().String() == "*net/http/httptest.ResponseRecorder" { + return false + } + } + } + } + cRefs := *call.Referrers() for _, cRef := range cRefs { val, ok := r.getResVal(cRef) @@ -149,6 +161,22 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { return r.calledInFunc(f, called) } + // Case when calling Close() from struct field or method + if s, ok := aref.(*ssa.Store); ok { + if f, ok := s.Addr.(*ssa.FieldAddr); ok { + for _, bRef := range f.Block().Instrs { + bOp, ok := r.getBodyOp(bRef) + if !ok { + continue + } + for _, ccall := range *bOp.Referrers() { + if r.isCloseCall(ccall) { + return false + } + } + } + } + } } case *ssa.Call, *ssa.Defer: // Indirect function call // Hacky way to extract CommonCall @@ -195,6 +223,34 @@ func (r *runner) isopen(b *ssa.BasicBlock, i int) bool { } } } + case *ssa.Phi: // Called in the higher-level block + if resRef.Referrers() == nil { + return true + } + + bRefs := *resRef.Referrers() + + for _, bRef := range bRefs { + switch instr := bRef.(type) { + case *ssa.FieldAddr: + bRefs := *instr.Referrers() + for _, bRef := range bRefs { + bOp, ok := r.getBodyOp(bRef) + if !ok { + continue + } + if len(*bOp.Referrers()) == 0 { + return true + } + ccalls := *bOp.Referrers() + for _, ccall := range ccalls { + if r.isCloseCall(ccall) { + return false + } + } + } + } + } } } } @@ -207,7 +263,9 @@ func (r *runner) getReqCall(instr ssa.Instruction) (*ssa.Call, bool) { if !ok { return nil, false } - if !strings.Contains(call.Type().String(), r.resTyp.String()) { + callType := call.Type().String() + if !strings.Contains(callType, r.resTyp.String()) || + strings.Contains(callType, "net/http.ResponseController") { return nil, false } return call, true diff --git a/hack/tools/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go b/hack/tools/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go index 2c1b2d20be..127f7efd84 100644 --- a/hack/tools/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go +++ b/hack/tools/vendor/github.com/tomarrell/wrapcheck/v2/wrapcheck/wrapcheck.go @@ -44,6 +44,10 @@ type WrapcheckConfig struct { // list to your config. IgnoreSigs []string `mapstructure:"ignoreSigs" yaml:"ignoreSigs"` + // ExtraIgnoreSigs defines an additional list of signatures to ignore, on + // top of IgnoreSigs. + ExtraIgnoreSigs []string `mapstructure:"extraIgnoreSigs" yaml:"extraIgnoreSigs"` + // IgnoreSigRegexps defines a list of regular expressions which if matched // to the signature of the function call returning the error, will be ignored. This // allows you to specify functions that wrapcheck will not report as @@ -276,7 +280,7 @@ func reportUnwrapped( // Check for ignored signatures fnSig := pass.TypesInfo.ObjectOf(sel.Sel).String() - if contains(cfg.IgnoreSigs, fnSig) { + if contains(cfg.IgnoreSigs, fnSig) || contains(cfg.ExtraIgnoreSigs, fnSig) { return } else if containsMatch(regexpsSig, fnSig) { return diff --git a/hack/tools/vendor/github.com/ultraware/funlen/.golangci.yml b/hack/tools/vendor/github.com/ultraware/funlen/.golangci.yml new file mode 100644 index 0000000000..600bef78e2 --- /dev/null +++ b/hack/tools/vendor/github.com/ultraware/funlen/.golangci.yml @@ -0,0 +1,2 @@ +run: + timeout: 5m diff --git a/hack/tools/vendor/github.com/ultraware/funlen/README.md b/hack/tools/vendor/github.com/ultraware/funlen/README.md index af2187694e..8bbe7eab68 100644 --- a/hack/tools/vendor/github.com/ultraware/funlen/README.md +++ b/hack/tools/vendor/github.com/ultraware/funlen/README.md @@ -16,6 +16,26 @@ The default values are used internally, but might to be adjusted for your specif Funlen is included in [golangci-lint](https://github.com/golangci/golangci-lint/). Install it and enable funlen. +## Configuration + +Available configuration options: + +```yaml +linters-settings: + funlen: + # Checks the number of lines in a function. + # If lower than 0, disable the check. + # Default: 60 + lines: 60 + # Checks the number of statements in a function. + # If lower than 0, disable the check. + # Default: 40 + statements: 60 + # Ignore comments when counting lines. + # Default false + ignore-comments: false +``` + # Exclude for tests golangci-lint offers a way to exclude linters in certain cases. More info can be found here: https://golangci-lint.run/usage/configuration/#issues-configuration. diff --git a/hack/tools/vendor/github.com/ultraware/funlen/funlen.go b/hack/tools/vendor/github.com/ultraware/funlen/funlen.go new file mode 100644 index 0000000000..9838f7cc46 --- /dev/null +++ b/hack/tools/vendor/github.com/ultraware/funlen/funlen.go @@ -0,0 +1,115 @@ +package funlen + +import ( + "go/ast" + "go/token" + "reflect" + + "golang.org/x/tools/go/analysis" +) + +const ( + defaultLineLimit = 60 + defaultStmtLimit = 40 +) + +func NewAnalyzer(lineLimit int, stmtLimit int, ignoreComments bool) *analysis.Analyzer { + if lineLimit == 0 { + lineLimit = defaultLineLimit + } + + if stmtLimit == 0 { + stmtLimit = defaultStmtLimit + } + + return &analysis.Analyzer{ + Name: "funlen", + Doc: "Checks for long functions.", + URL: "https://github.com/ultraware/funlen", + Run: func(pass *analysis.Pass) (any, error) { + run(pass, lineLimit, stmtLimit, ignoreComments) + return nil, nil + }, + } +} + +func run(pass *analysis.Pass, lineLimit int, stmtLimit int, ignoreComments bool) { + for _, file := range pass.Files { + cmap := ast.NewCommentMap(pass.Fset, file, file.Comments) + + for _, f := range file.Decls { + decl, ok := f.(*ast.FuncDecl) + if !ok || decl.Body == nil { // decl.Body can be nil for e.g. cgo + continue + } + + if stmtLimit > 0 { + if stmts := parseStmts(decl.Body.List); stmts > stmtLimit { + pass.Reportf(decl.Name.Pos(), "Function '%s' has too many statements (%d > %d)", decl.Name.Name, stmts, stmtLimit) + continue + } + } + + if lineLimit > 0 { + if lines := getLines(pass.Fset, decl, cmap.Filter(decl), ignoreComments); lines > lineLimit { + pass.Reportf(decl.Name.Pos(), "Function '%s' is too long (%d > %d)", decl.Name.Name, lines, lineLimit) + } + } + } + } +} + +func getLines(fset *token.FileSet, f *ast.FuncDecl, cmap ast.CommentMap, ignoreComments bool) int { + lineCount := fset.Position(f.End()).Line - fset.Position(f.Pos()).Line - 1 + + if !ignoreComments { + return lineCount + } + + var commentCount int + + for _, c := range cmap.Comments() { + // If the CommentGroup's lines are inside the function + // count how many comments are in the CommentGroup + if (fset.Position(c.Pos()).Line > fset.Position(f.Pos()).Line) && + (fset.Position(c.End()).Line < fset.Position(f.End()).Line) { + commentCount += len(c.List) + } + } + + return lineCount - commentCount +} + +func parseStmts(s []ast.Stmt) (total int) { + for _, v := range s { + total++ + switch stmt := v.(type) { + case *ast.BlockStmt: + total += parseStmts(stmt.List) - 1 + case *ast.ForStmt, *ast.RangeStmt, *ast.IfStmt, + *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt: + total += parseBodyListStmts(stmt) + case *ast.CaseClause: + total += parseStmts(stmt.Body) + case *ast.AssignStmt: + total += checkInlineFunc(stmt.Rhs[0]) + case *ast.GoStmt: + total += checkInlineFunc(stmt.Call.Fun) + case *ast.DeferStmt: + total += checkInlineFunc(stmt.Call.Fun) + } + } + return +} + +func checkInlineFunc(stmt ast.Expr) int { + if block, ok := stmt.(*ast.FuncLit); ok { + return parseStmts(block.Body.List) + } + return 0 +} + +func parseBodyListStmts(t any) int { + i := reflect.ValueOf(t).Elem().FieldByName(`Body`).Elem().FieldByName(`List`).Interface() + return parseStmts(i.([]ast.Stmt)) +} diff --git a/hack/tools/vendor/github.com/ultraware/funlen/main.go b/hack/tools/vendor/github.com/ultraware/funlen/main.go deleted file mode 100644 index b68ddb926f..0000000000 --- a/hack/tools/vendor/github.com/ultraware/funlen/main.go +++ /dev/null @@ -1,124 +0,0 @@ -package funlen - -import ( - "fmt" - "go/ast" - "go/token" - "reflect" -) - -const ( - defaultLineLimit = 60 - defaultStmtLimit = 40 -) - -// Run runs this linter on the provided code -func Run(file *ast.File, fset *token.FileSet, lineLimit int, stmtLimit int, ignoreComments bool) []Message { - if lineLimit == 0 { - lineLimit = defaultLineLimit - } - if stmtLimit == 0 { - stmtLimit = defaultStmtLimit - } - - cmap := ast.NewCommentMap(fset, file, file.Comments) - - var msgs []Message - for _, f := range file.Decls { - decl, ok := f.(*ast.FuncDecl) - if !ok || decl.Body == nil { // decl.Body can be nil for e.g. cgo - continue - } - - if stmtLimit > 0 { - if stmts := parseStmts(decl.Body.List); stmts > stmtLimit { - msgs = append(msgs, makeStmtMessage(fset, decl.Name, stmts, stmtLimit)) - continue - } - } - - if lineLimit > 0 { - if lines := getLines(fset, decl, cmap.Filter(decl), ignoreComments); lines > lineLimit { - msgs = append(msgs, makeLineMessage(fset, decl.Name, lines, lineLimit)) - } - } - } - - return msgs -} - -// Message contains a message -type Message struct { - Pos token.Position - Message string -} - -func makeLineMessage(fset *token.FileSet, funcInfo *ast.Ident, lines, lineLimit int) Message { - return Message{ - fset.Position(funcInfo.Pos()), - fmt.Sprintf("Function '%s' is too long (%d > %d)\n", funcInfo.Name, lines, lineLimit), - } -} - -func makeStmtMessage(fset *token.FileSet, funcInfo *ast.Ident, stmts, stmtLimit int) Message { - return Message{ - fset.Position(funcInfo.Pos()), - fmt.Sprintf("Function '%s' has too many statements (%d > %d)\n", funcInfo.Name, stmts, stmtLimit), - } -} - -func getLines(fset *token.FileSet, f *ast.FuncDecl, cmap ast.CommentMap, ignoreComments bool) int { // nolint: interfacer - var lineCount int - var commentCount int - - lineCount = fset.Position(f.End()).Line - fset.Position(f.Pos()).Line - 1 - - if !ignoreComments { - return lineCount - } - - for _, c := range cmap.Comments() { - // If the CommenGroup's lines are inside the function - // count how many comments are in the CommentGroup - if (fset.Position(c.Pos()).Line > fset.Position(f.Pos()).Line) && - (fset.Position(c.End()).Line < fset.Position(f.End()).Line) { - commentCount += len(c.List) - } - } - - return lineCount - commentCount -} - -func parseStmts(s []ast.Stmt) (total int) { - for _, v := range s { - total++ - switch stmt := v.(type) { - case *ast.BlockStmt: - total += parseStmts(stmt.List) - 1 - case *ast.ForStmt, *ast.RangeStmt, *ast.IfStmt, - *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt: - total += parseBodyListStmts(stmt) - case *ast.CaseClause: - total += parseStmts(stmt.Body) - case *ast.AssignStmt: - total += checkInlineFunc(stmt.Rhs[0]) - case *ast.GoStmt: - total += checkInlineFunc(stmt.Call.Fun) - case *ast.DeferStmt: - total += checkInlineFunc(stmt.Call.Fun) - } - } - return -} - -func checkInlineFunc(stmt ast.Expr) int { - if block, ok := stmt.(*ast.FuncLit); ok { - return parseStmts(block.Body.List) - } - return 0 -} - -func parseBodyListStmts(t interface{}) int { - i := reflect.ValueOf(t).Elem().FieldByName(`Body`).Elem().FieldByName(`List`).Interface() - return parseStmts(i.([]ast.Stmt)) -} diff --git a/hack/tools/vendor/github.com/ultraware/whitespace/README.md b/hack/tools/vendor/github.com/ultraware/whitespace/README.md index f99ecce36c..660c13d78c 100644 --- a/hack/tools/vendor/github.com/ultraware/whitespace/README.md +++ b/hack/tools/vendor/github.com/ultraware/whitespace/README.md @@ -4,6 +4,6 @@ Whitespace is a linter that checks for unnecessary newlines at the start and end ## Installation guide -To install as a standalone linter, run `go install github.com/ultraware/whitespace`. +To install as a standalone linter, run `go install github.com/ultraware/whitespace/cmd/whitespace@latest`. Whitespace is also included in [golangci-lint](https://github.com/golangci/golangci-lint/). Install it and enable whitespace. diff --git a/hack/tools/vendor/github.com/ultraware/whitespace/whitespace.go b/hack/tools/vendor/github.com/ultraware/whitespace/whitespace.go index 350e9b7e4e..44e68124c3 100644 --- a/hack/tools/vendor/github.com/ultraware/whitespace/whitespace.go +++ b/hack/tools/vendor/github.com/ultraware/whitespace/whitespace.go @@ -9,53 +9,8 @@ import ( "golang.org/x/tools/go/analysis" ) -// MessageType describes what should happen to fix the warning. -type MessageType uint8 - -// List of MessageTypes. -const ( - MessageTypeRemove MessageType = iota + 1 - MessageTypeAdd -) - -// RunningMode describes the mode the linter is run in. This can be either -// native or golangci-lint. -type RunningMode uint8 - -const ( - RunningModeNative RunningMode = iota - RunningModeGolangCI -) - -// Message contains a message and diagnostic information. -type Message struct { - // Diagnostic is what position the diagnostic should be put at. This isn't - // always the same as the fix start, f.ex. when we fix trailing newlines we - // put the diagnostic at the right bracket but we fix between the end of the - // last statement and the bracket. - Diagnostic token.Pos - - // FixStart is the span start of the fix. - FixStart token.Pos - - // FixEnd is the span end of the fix. - FixEnd token.Pos - - // LineNumbers represent the actual line numbers in the file. This is set - // when finding the diagnostic to make it easier to suggest fixes in - // golangci-lint. - LineNumbers []int - - // MessageType represents the type of message it is. - MessageType MessageType - - // Message is the diagnostic to show. - Message string -} - // Settings contains settings for edge-cases. type Settings struct { - Mode RunningMode MultiIf bool MultiFunc bool } @@ -86,47 +41,24 @@ func flags(settings *Settings) flag.FlagSet { return *flags } -func Run(pass *analysis.Pass, settings *Settings) []Message { - messages := []Message{} - +func Run(pass *analysis.Pass, settings *Settings) { for _, file := range pass.Files { filename := pass.Fset.Position(file.Pos()).Filename + if !strings.HasSuffix(filename, ".go") { continue } fileMessages := runFile(file, pass.Fset, *settings) - if settings.Mode == RunningModeGolangCI { - messages = append(messages, fileMessages...) - continue - } - for _, message := range fileMessages { - pass.Report(analysis.Diagnostic{ - Pos: message.Diagnostic, - Category: "whitespace", - Message: message.Message, - SuggestedFixes: []analysis.SuggestedFix{ - { - TextEdits: []analysis.TextEdit{ - { - Pos: message.FixStart, - End: message.FixEnd, - NewText: []byte("\n"), - }, - }, - }, - }, - }) + pass.Report(message) } } - - return messages } -func runFile(file *ast.File, fset *token.FileSet, settings Settings) []Message { - var messages []Message +func runFile(file *ast.File, fset *token.FileSet, settings Settings) []analysis.Diagnostic { + var messages []analysis.Diagnostic for _, f := range file.Decls { decl, ok := f.(*ast.FuncDecl) @@ -146,7 +78,7 @@ func runFile(file *ast.File, fset *token.FileSet, settings Settings) []Message { type visitor struct { comments []*ast.CommentGroup fset *token.FileSet - messages []Message + messages []analysis.Diagnostic wantNewline map[*ast.BlockStmt]bool settings Settings } @@ -180,13 +112,16 @@ func (v *visitor) Visit(node ast.Node) ast.Visitor { startMsg := checkStart(v.fset, opening, first) if wantNewline && startMsg == nil && len(stmt.List) >= 1 { - v.messages = append(v.messages, Message{ - Diagnostic: opening, - FixStart: stmt.List[0].Pos(), - FixEnd: stmt.List[0].Pos(), - LineNumbers: []int{v.fset.PositionFor(stmt.List[0].Pos(), false).Line}, - MessageType: MessageTypeAdd, - Message: "multi-line statement should be followed by a newline", + v.messages = append(v.messages, analysis.Diagnostic{ + Pos: opening, + Message: "multi-line statement should be followed by a newline", + SuggestedFixes: []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: stmt.List[0].Pos(), + End: stmt.List[0].Pos(), + NewText: []byte("\n"), + }}, + }}, }) } else if !wantNewline && startMsg != nil { v.messages = append(v.messages, *startMsg) @@ -209,7 +144,7 @@ func checkMultiLine(v *visitor, body *ast.BlockStmt, stmtStart ast.Node) { } func posLine(fset *token.FileSet, pos token.Pos) int { - return fset.PositionFor(pos, false).Line + return fset.Position(pos).Line } func firstAndLast(comments []*ast.CommentGroup, fset *token.FileSet, stmt *ast.BlockStmt) (token.Pos, ast.Node, ast.Node) { @@ -256,52 +191,46 @@ func firstAndLast(comments []*ast.CommentGroup, fset *token.FileSet, stmt *ast.B return openingPos, first, last } -func checkStart(fset *token.FileSet, start token.Pos, first ast.Node) *Message { +func checkStart(fset *token.FileSet, start token.Pos, first ast.Node) *analysis.Diagnostic { if first == nil { return nil } if posLine(fset, start)+1 < posLine(fset, first.Pos()) { - return &Message{ - Diagnostic: start, - FixStart: start, - FixEnd: first.Pos(), - LineNumbers: linesBetween(fset, start, first.Pos()), - MessageType: MessageTypeRemove, - Message: "unnecessary leading newline", + return &analysis.Diagnostic{ + Pos: start, + Message: "unnecessary leading newline", + SuggestedFixes: []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: start, + End: first.Pos(), + NewText: []byte("\n"), + }}, + }}, } } return nil } -func checkEnd(fset *token.FileSet, end token.Pos, last ast.Node) *Message { +func checkEnd(fset *token.FileSet, end token.Pos, last ast.Node) *analysis.Diagnostic { if last == nil { return nil } if posLine(fset, end)-1 > posLine(fset, last.End()) { - return &Message{ - Diagnostic: end, - FixStart: last.End(), - FixEnd: end, - LineNumbers: linesBetween(fset, last.End(), end), - MessageType: MessageTypeRemove, - Message: "unnecessary trailing newline", + return &analysis.Diagnostic{ + Pos: end, + Message: "unnecessary trailing newline", + SuggestedFixes: []analysis.SuggestedFix{{ + TextEdits: []analysis.TextEdit{{ + Pos: last.End(), + End: end, + NewText: []byte("\n"), + }}, + }}, } } return nil } - -func linesBetween(fset *token.FileSet, a, b token.Pos) []int { - lines := []int{} - aPosition := fset.PositionFor(a, false) - bPosition := fset.PositionFor(b, false) - - for i := aPosition.Line + 1; i < bPosition.Line; i++ { - lines = append(lines, i) - } - - return lines -} diff --git a/hack/tools/vendor/github.com/uudashr/gocognit/README.md b/hack/tools/vendor/github.com/uudashr/gocognit/README.md index c15f61bcfc..415e81e739 100644 --- a/hack/tools/vendor/github.com/uudashr/gocognit/README.md +++ b/hack/tools/vendor/github.com/uudashr/gocognit/README.md @@ -147,14 +147,14 @@ The following structures receive a nesting increment commensurate with their nes ## Installation -``` -$ go install github.com/uudashr/gocognit/cmd/gocognit@latest +```shell +go install github.com/uudashr/gocognit/cmd/gocognit@latest ``` or -``` -$ go get github.com/uudashr/gocognit/cmd/gocognit +```shell +go get github.com/uudashr/gocognit/cmd/gocognit ``` ## Usage @@ -169,14 +169,17 @@ Usage: Flags: - -over N show functions with complexity > N only - and return exit code 1 if the output is non-empty - -top N show the top N most complex functions only - -avg show the average complexity over all functions, - not depending on whether -over or -top are set - -json encode the output as JSON - -f format string the format to use - (default "{{.PkgName}}.{{.FuncName}}:{{.Complexity}}:{{.Pos}}") + -over N show functions with complexity > N only + and return exit code 1 if the output is non-empty + -top N show the top N most complex functions only + -avg show the average complexity over all functions, + not depending on whether -over or -top are set + -test indicates whether test files should be included + -json encode the output as JSON + -d enable diagnostic output + -f format string the format to use + (default "{{.Complexity}} {{.PkgName}} {{.FuncName}} {{.Pos}}") + -ignore expr ignore files matching the given regexp The (default) output fields for each line are: @@ -191,10 +194,24 @@ or equal to The struct being passed to the template is: type Stat struct { - PkgName string - FuncName string - Complexity int - Pos token.Position + PkgName string + FuncName string + Complexity int + Pos token.Position + Diagnostics []Diagnostics + } + + type Diagnostic struct { + Inc string + Nesting int + Text string + Pos DiagnosticPosition + } + + type DiagnosticPosition struct { + Offset int + Line int + Column int } ``` @@ -223,6 +240,76 @@ func IgnoreMe() { } ``` +## Diagnostic +To understand how the complexity are calculated, we can enable the diagnostic by using `-d` flag. + +Example: +```shell +$ gocognit -json -d . +``` + +It will show the diagnostic output in JSON format +
+ +JSON Output + +```json +[ + { + "PkgName": "prime", + "FuncName": "SumOfPrimes", + "Complexity": 7, + "Pos": { + "Filename": "prime.go", + "Offset": 15, + "Line": 3, + "Column": 1 + }, + "Diagnostics": [ + { + "Inc": 1, + "Text": "for", + "Pos": { + "Offset": 69, + "Line": 7, + "Column": 2 + } + }, + { + "Inc": 2, + "Nesting": 1, + "Text": "for", + "Pos": { + "Offset": 104, + "Line": 8, + "Column": 3 + } + }, + { + "Inc": 3, + "Nesting": 2, + "Text": "if", + "Pos": { + "Offset": 152, + "Line": 9, + "Column": 4 + } + }, + { + "Inc": 1, + "Text": "continue", + "Pos": { + "Offset": 190, + "Line": 10, + "Column": 5 + } + } + ] + } +] +``` +
+ ## Related project - [Gocyclo](https://github.com/fzipp/gocyclo) where the code are based on. - [Cognitive Complexity: A new way of measuring understandability](https://www.sonarsource.com/docs/CognitiveComplexity.pdf) white paper by G. Ann Campbell. diff --git a/hack/tools/vendor/github.com/uudashr/gocognit/doc.go b/hack/tools/vendor/github.com/uudashr/gocognit/doc.go index ae3d0a2264..797b192282 100644 --- a/hack/tools/vendor/github.com/uudashr/gocognit/doc.go +++ b/hack/tools/vendor/github.com/uudashr/gocognit/doc.go @@ -1,2 +1,3 @@ -// Package gocognit defines Analyzer other utilities to checks and calculate the complexity of function based on "cognitive complexity" methods. +// Package gocognit defines Analyzer other utilities to checks and calculate +// the complexity of function based on "cognitive complexity" methods. package gocognit diff --git a/hack/tools/vendor/github.com/uudashr/gocognit/gocognit.go b/hack/tools/vendor/github.com/uudashr/gocognit/gocognit.go index 2bba2eb4f0..e51ee2a042 100644 --- a/hack/tools/vendor/github.com/uudashr/gocognit/gocognit.go +++ b/hack/tools/vendor/github.com/uudashr/gocognit/gocognit.go @@ -4,6 +4,7 @@ import ( "fmt" "go/ast" "go/token" + "strconv" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/inspect" @@ -12,10 +13,58 @@ import ( // Stat is statistic of the complexity. type Stat struct { - PkgName string - FuncName string - Complexity int - Pos token.Position + PkgName string + FuncName string + Complexity int + Pos token.Position + Diagnostics []Diagnostic `json:",omitempty"` +} + +// Diagnostic contains information how the complexity increase. +type Diagnostic struct { + Inc int + Nesting int `json:",omitempty"` + Text string + Pos DiagnosticPosition +} + +// DiagnosticPosition is the position of the diagnostic. +type DiagnosticPosition struct { + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (byte count) +} + +func (pos DiagnosticPosition) isValid() bool { + return pos.Line > 0 +} + +func (pos DiagnosticPosition) String() string { + var s string + if pos.isValid() { + if s != "" { + s += ":" + } + + s += strconv.Itoa(pos.Line) + if pos.Column != 0 { + s += fmt.Sprintf(":%d", pos.Column) + } + } + + if s == "" { + s = "-" + } + + return s +} + +func (d Diagnostic) String() string { + if d.Nesting == 0 { + return fmt.Sprintf("+%d", d.Inc) + } + + return fmt.Sprintf("+%d (nesting=%d)", d.Inc, d.Nesting) } func (s Stat) String() string { @@ -24,6 +73,11 @@ func (s Stat) String() string { // ComplexityStats builds the complexity statistics. func ComplexityStats(f *ast.File, fset *token.FileSet, stats []Stat) []Stat { + return ComplexityStatsWithDiagnostic(f, fset, stats, false) +} + +// ComplexityStatsWithDiagnostic builds the complexity statistics with diagnostic. +func ComplexityStatsWithDiagnostic(f *ast.File, fset *token.FileSet, stats []Stat, enableDiagnostics bool) []Stat { for _, decl := range f.Decls { if fn, ok := decl.(*ast.FuncDecl); ok { d := parseDirective(fn.Doc) @@ -31,17 +85,43 @@ func ComplexityStats(f *ast.File, fset *token.FileSet, stats []Stat) []Stat { continue } + res := ScanComplexity(fn, enableDiagnostics) + stats = append(stats, Stat{ - PkgName: f.Name.Name, - FuncName: funcName(fn), - Complexity: Complexity(fn), - Pos: fset.Position(fn.Pos()), + PkgName: f.Name.Name, + FuncName: funcName(fn), + Complexity: res.Complexity, + Diagnostics: generateDiagnostics(fset, res.Diagnostics), + Pos: fset.Position(fn.Pos()), }) } } + return stats } +func generateDiagnostics(fset *token.FileSet, diags []diagnostic) []Diagnostic { + out := make([]Diagnostic, 0, len(diags)) + + for _, diag := range diags { + pos := fset.Position(diag.Pos) + diagPos := DiagnosticPosition{ + Offset: pos.Offset, + Line: pos.Line, + Column: pos.Column, + } + + out = append(out, Diagnostic{ + Inc: diag.Inc, + Nesting: diag.Nesting, + Text: diag.Text, + Pos: diagPos, + }) + } + + return out +} + type directive struct { Ignore bool } @@ -66,20 +146,46 @@ func funcName(fn *ast.FuncDecl) string { if fn.Recv != nil { if fn.Recv.NumFields() > 0 { typ := fn.Recv.List[0].Type + return fmt.Sprintf("(%s).%s", recvString(typ), fn.Name) } } + return fn.Name.Name } // Complexity calculates the cognitive complexity of a function. func Complexity(fn *ast.FuncDecl) int { + res := ScanComplexity(fn, false) + + return res.Complexity +} + +// ScanComplexity scans the function declaration. +func ScanComplexity(fn *ast.FuncDecl, includeDiagnostics bool) ScanResult { v := complexityVisitor{ - name: fn.Name, + name: fn.Name, + diagnosticsEnabled: includeDiagnostics, } ast.Walk(&v, fn) - return v.complexity + + return ScanResult{ + Diagnostics: v.diagnostics, + Complexity: v.complexity, + } +} + +type ScanResult struct { + Diagnostics []diagnostic + Complexity int +} + +type diagnostic struct { + Inc int + Nesting int + Text string + Pos token.Pos } type complexityVisitor struct { @@ -88,6 +194,9 @@ type complexityVisitor struct { nesting int elseNodes map[ast.Node]bool calculatedExprs map[ast.Expr]bool + + diagnosticsEnabled bool + diagnostics []diagnostic } func (v *complexityVisitor) incNesting() { @@ -98,12 +207,33 @@ func (v *complexityVisitor) decNesting() { v.nesting-- } -func (v *complexityVisitor) incComplexity() { +func (v *complexityVisitor) incComplexity(text string, pos token.Pos) { v.complexity++ + + if !v.diagnosticsEnabled { + return + } + + v.diagnostics = append(v.diagnostics, diagnostic{ + Inc: 1, + Text: text, + Pos: pos, + }) } -func (v *complexityVisitor) nestIncComplexity() { +func (v *complexityVisitor) nestIncComplexity(text string, pos token.Pos) { v.complexity += (v.nesting + 1) + + if !v.diagnosticsEnabled { + return + } + + v.diagnostics = append(v.diagnostics, diagnostic{ + Inc: v.nesting + 1, + Nesting: v.nesting, + Text: text, + Pos: pos, + }) } func (v *complexityVisitor) markAsElseNode(n ast.Node) { @@ -162,11 +292,12 @@ func (v *complexityVisitor) Visit(n ast.Node) ast.Visitor { case *ast.CallExpr: return v.visitCallExpr(n) } + return v } func (v *complexityVisitor) visitIfStmt(n *ast.IfStmt) ast.Visitor { - v.incIfComplexity(n) + v.incIfComplexity(n, "if", n.Pos()) if n := n.Init; n != nil { ast.Walk(v, n) @@ -174,17 +305,12 @@ func (v *complexityVisitor) visitIfStmt(n *ast.IfStmt) ast.Visitor { ast.Walk(v, n.Cond) - pure := !v.markedAsElseNode(n) // pure `if` statement, not an `else if` - if pure { - v.incNesting() - ast.Walk(v, n.Body) - v.decNesting() - } else { - ast.Walk(v, n.Body) - } + v.incNesting() + ast.Walk(v, n.Body) + v.decNesting() if _, ok := n.Else.(*ast.BlockStmt); ok { - v.incComplexity() + v.incComplexity("else", n.Else.Pos()) ast.Walk(v, n.Else) } else if _, ok := n.Else.(*ast.IfStmt); ok { @@ -196,7 +322,7 @@ func (v *complexityVisitor) visitIfStmt(n *ast.IfStmt) ast.Visitor { } func (v *complexityVisitor) visitSwitchStmt(n *ast.SwitchStmt) ast.Visitor { - v.nestIncComplexity() + v.nestIncComplexity("switch", n.Pos()) if n := n.Init; n != nil { ast.Walk(v, n) @@ -209,11 +335,12 @@ func (v *complexityVisitor) visitSwitchStmt(n *ast.SwitchStmt) ast.Visitor { v.incNesting() ast.Walk(v, n.Body) v.decNesting() + return nil } func (v *complexityVisitor) visitTypeSwitchStmt(n *ast.TypeSwitchStmt) ast.Visitor { - v.nestIncComplexity() + v.nestIncComplexity("switch", n.Pos()) if n := n.Init; n != nil { ast.Walk(v, n) @@ -226,20 +353,22 @@ func (v *complexityVisitor) visitTypeSwitchStmt(n *ast.TypeSwitchStmt) ast.Visit v.incNesting() ast.Walk(v, n.Body) v.decNesting() + return nil } func (v *complexityVisitor) visitSelectStmt(n *ast.SelectStmt) ast.Visitor { - v.nestIncComplexity() + v.nestIncComplexity("select", n.Pos()) v.incNesting() ast.Walk(v, n.Body) v.decNesting() + return nil } func (v *complexityVisitor) visitForStmt(n *ast.ForStmt) ast.Visitor { - v.nestIncComplexity() + v.nestIncComplexity("for", n.Pos()) if n := n.Init; n != nil { ast.Walk(v, n) @@ -256,11 +385,12 @@ func (v *complexityVisitor) visitForStmt(n *ast.ForStmt) ast.Visitor { v.incNesting() ast.Walk(v, n.Body) v.decNesting() + return nil } func (v *complexityVisitor) visitRangeStmt(n *ast.RangeStmt) ast.Visitor { - v.nestIncComplexity() + v.nestIncComplexity("for", n.Pos()) if n := n.Key; n != nil { ast.Walk(v, n) @@ -275,6 +405,7 @@ func (v *complexityVisitor) visitRangeStmt(n *ast.RangeStmt) ast.Visitor { v.incNesting() ast.Walk(v, n.Body) v.decNesting() + return nil } @@ -284,13 +415,15 @@ func (v *complexityVisitor) visitFuncLit(n *ast.FuncLit) ast.Visitor { v.incNesting() ast.Walk(v, n.Body) v.decNesting() + return nil } func (v *complexityVisitor) visitBranchStmt(n *ast.BranchStmt) ast.Visitor { if n.Label != nil { - v.incComplexity() + v.incComplexity(n.Tok.String(), n.Pos()) } + return v } @@ -301,11 +434,12 @@ func (v *complexityVisitor) visitBinaryExpr(n *ast.BinaryExpr) ast.Visitor { var lastOp token.Token for _, op := range ops { if lastOp != op { - v.incComplexity() + v.incComplexity(op.String(), n.OpPos) lastOp = op } } } + return v } @@ -314,34 +448,38 @@ func (v *complexityVisitor) visitCallExpr(n *ast.CallExpr) ast.Visitor { obj, name := callIdent.Obj, callIdent.Name if obj == v.name.Obj && name == v.name.Name { // called by same function directly (direct recursion) - v.incComplexity() + v.incComplexity(name, n.Pos()) } } + return v } func (v *complexityVisitor) collectBinaryOps(exp ast.Expr) []token.Token { v.markCalculated(exp) + if exp, ok := exp.(*ast.BinaryExpr); ok { return mergeBinaryOps(v.collectBinaryOps(exp.X), exp.Op, v.collectBinaryOps(exp.Y)) } return nil } -func (v *complexityVisitor) incIfComplexity(n *ast.IfStmt) { +func (v *complexityVisitor) incIfComplexity(n *ast.IfStmt, text string, pos token.Pos) { if v.markedAsElseNode(n) { - v.incComplexity() + v.incComplexity(text, pos) } else { - v.nestIncComplexity() + v.nestIncComplexity(text, pos) } } func mergeBinaryOps(x []token.Token, op token.Token, y []token.Token) []token.Token { var out []token.Token out = append(out, x...) + if isBinaryLogicalOp(op) { out = append(out, op) } + out = append(out, y...) return out } diff --git a/hack/tools/vendor/github.com/uudashr/gocognit/recv.go b/hack/tools/vendor/github.com/uudashr/gocognit/recv.go index 2f20d843a4..eaf3c9762d 100644 --- a/hack/tools/vendor/github.com/uudashr/gocognit/recv.go +++ b/hack/tools/vendor/github.com/uudashr/gocognit/recv.go @@ -20,5 +20,6 @@ func recvString(recv ast.Expr) string { case *ast.IndexListExpr: return recvString(t.X) } + return "BADRECV" } diff --git a/hack/tools/vendor/github.com/uudashr/gocognit/recv_pre118.go b/hack/tools/vendor/github.com/uudashr/gocognit/recv_pre118.go index 9e0ebfd822..a47ba1bd5a 100644 --- a/hack/tools/vendor/github.com/uudashr/gocognit/recv_pre118.go +++ b/hack/tools/vendor/github.com/uudashr/gocognit/recv_pre118.go @@ -16,5 +16,6 @@ func recvString(recv ast.Expr) string { case *ast.StarExpr: return "*" + recvString(t.X) } + return "BADRECV" } diff --git a/hack/tools/vendor/github.com/uudashr/iface/opaque/opaque.go b/hack/tools/vendor/github.com/uudashr/iface/opaque/opaque.go index fda0f001ba..59e406322f 100644 --- a/hack/tools/vendor/github.com/uudashr/iface/opaque/opaque.go +++ b/hack/tools/vendor/github.com/uudashr/iface/opaque/opaque.go @@ -53,6 +53,11 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { return } + if funcDecl.Body == nil { + // skip functions without body + return + } + if funcDecl.Type.Results == nil { // skip functions without return values return @@ -133,7 +138,7 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { typ := pass.TypesInfo.TypeOf(res) switch typ := typ.(type) { case *types.Tuple: - for i := 0; i < typ.Len(); i++ { + for i := range typ.Len() { v := typ.At(i) vTyp := v.Type() retStmtTypes[i][vTyp] = struct{}{} @@ -269,12 +274,28 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { stmtTypName = removePkgPrefix(stmtTypName) } - pass.Reportf(result.Pos(), - "%s function return %s interface at the %s result, abstract a single concrete implementation of %s", + msg := fmt.Sprintf("%s function return %s interface at the %s result, abstract a single concrete implementation of %s", funcDecl.Name.Name, retTypeName, positionStr(currentIdx), stmtTypName) + + pass.Report(analysis.Diagnostic{ + Pos: result.Pos(), + Message: msg, + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Replace the interface return type with the concrete type", + TextEdits: []analysis.TextEdit{ + { + Pos: result.Pos(), + End: result.End(), + NewText: []byte(stmtTypName), + }, + }, + }, + }, + }) } }) diff --git a/hack/tools/vendor/github.com/uudashr/iface/unused/unused.go b/hack/tools/vendor/github.com/uudashr/iface/unused/unused.go index c2efbf52c8..9c301ae673 100644 --- a/hack/tools/vendor/github.com/uudashr/iface/unused/unused.go +++ b/hack/tools/vendor/github.com/uudashr/iface/unused/unused.go @@ -48,7 +48,8 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) // Collect all interface type declarations - ifaceDecls := make(map[string]token.Pos) + ifaceDecls := make(map[string]*ast.TypeSpec) + genDecls := make(map[string]*ast.GenDecl) // ifaceName -> GenDecl nodeFilter := []ast.Node{ (*ast.GenDecl)(nil), @@ -80,7 +81,7 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { _, ok = ts.Type.(*ast.InterfaceType) if !ok { - return + continue } if r.debug { @@ -93,7 +94,8 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { continue } - ifaceDecls[ts.Name.Name] = ts.Pos() + ifaceDecls[ts.Name.Name] = ts + genDecls[ts.Name.Name] = decl } }) @@ -117,21 +119,51 @@ func (r *runner) run(pass *analysis.Pass) (interface{}, error) { return } - pos := ifaceDecls[ident.Name] - if pos == ident.Pos() { + ts, ok := ifaceDecls[ident.Name] + if !ok { + return + } + + if ts.Pos() == ident.Pos() { // The identifier is the interface type declaration return } delete(ifaceDecls, ident.Name) + delete(genDecls, ident.Name) }) if r.debug { fmt.Printf("Package %s %s\n", pass.Pkg.Path(), pass.Pkg.Name()) } - for name, pos := range ifaceDecls { - pass.Reportf(pos, "interface %s is declared but not used within the package", name) + for name, ts := range ifaceDecls { + decl := genDecls[name] + + var node ast.Node + if len(decl.Specs) == 1 { + node = decl + } else { + node = ts + } + + msg := fmt.Sprintf("interface %s is declared but not used within the package", name) + pass.Report(analysis.Diagnostic{ + Pos: ts.Pos(), + Message: msg, + SuggestedFixes: []analysis.SuggestedFix{ + { + Message: "Remove the unused interface declaration", + TextEdits: []analysis.TextEdit{ + { + Pos: node.Pos(), + End: node.End(), + NewText: []byte{}, + }, + }, + }, + }, + }) } return nil, nil diff --git a/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go new file mode 100644 index 0000000000..6aae83bfd2 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" +) + +// DefaultClient is the default Client and is used by Get, Head, Post and PostForm. +// Please be careful of initialization order - for example, if you change +// the global propagator, the DefaultClient might still be using the old one. +var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} + +// Get is a convenient replacement for http.Get that adds a span around the request. +func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) + if err != nil { + return nil, err + } + return DefaultClient.Do(req) +} + +// Head is a convenient replacement for http.Head that adds a span around the request. +func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) + if err != nil { + return nil, err + } + return DefaultClient.Do(req) +} + +// Post is a convenient replacement for http.Post that adds a span around the request. +func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", contentType) + return DefaultClient.Do(req) +} + +// PostForm is a convenient replacement for http.PostForm that adds a span around the request. +func PostForm(ctx context.Context, targetURL string, data url.Values) (resp *http.Response, err error) { + return Post(ctx, targetURL, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go new file mode 100644 index 0000000000..5d6e6156b7 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "net/http" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Attribute keys that can be added to a span. +const ( + ReadBytesKey = attribute.Key("http.read_bytes") // if anything was read from the request body, the total number of bytes read + ReadErrorKey = attribute.Key("http.read_error") // If an error occurred while reading a request, the string of the error (io.EOF is not recorded) + WroteBytesKey = attribute.Key("http.wrote_bytes") // if anything was written to the response writer, the total number of bytes written + WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) +) + +// Client HTTP metrics. +const ( + clientRequestSize = "http.client.request.size" // Outgoing request bytes total + clientResponseSize = "http.client.response.size" // Outgoing response bytes total + clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds +) + +// Filter is a predicate used to determine whether a given http.request should +// be traced. A Filter must return true if the request should be traced. +type Filter func(*http.Request) bool + +func newTracer(tp trace.TracerProvider) trace.Tracer { + return tp.Tracer(ScopeName, trace.WithInstrumentationVersion(Version())) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go new file mode 100644 index 0000000000..a01bfafbe0 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -0,0 +1,207 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "net/http" + "net/http/httptrace" + + "go.opentelemetry.io/otel/attribute" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +// ScopeName is the instrumentation scope name. +const ScopeName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +// config represents the configuration options available for the http.Handler +// and http.Transport types. +type config struct { + ServerName string + Tracer trace.Tracer + Meter metric.Meter + Propagators propagation.TextMapPropagator + SpanStartOptions []trace.SpanStartOption + PublicEndpoint bool + PublicEndpointFn func(*http.Request) bool + ReadEvent bool + WriteEvent bool + Filters []Filter + SpanNameFormatter func(string, *http.Request) string + ClientTrace func(context.Context) *httptrace.ClientTrace + + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + MetricAttributesFn func(*http.Request) []attribute.KeyValue +} + +// Option interface used for setting optional config properties. +type Option interface { + apply(*config) +} + +type optionFunc func(*config) + +func (o optionFunc) apply(c *config) { + o(c) +} + +// newConfig creates a new config struct and applies opts to it. +func newConfig(opts ...Option) *config { + c := &config{ + Propagators: otel.GetTextMapPropagator(), + MeterProvider: otel.GetMeterProvider(), + } + for _, opt := range opts { + opt.apply(c) + } + + // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context. + if c.TracerProvider != nil { + c.Tracer = newTracer(c.TracerProvider) + } + + c.Meter = c.MeterProvider.Meter( + ScopeName, + metric.WithInstrumentationVersion(Version()), + ) + + return c +} + +// WithTracerProvider specifies a tracer provider to use for creating a tracer. +// If none is specified, the global provider is used. +func WithTracerProvider(provider trace.TracerProvider) Option { + return optionFunc(func(cfg *config) { + if provider != nil { + cfg.TracerProvider = provider + } + }) +} + +// WithMeterProvider specifies a meter provider to use for creating a meter. +// If none is specified, the global provider is used. +func WithMeterProvider(provider metric.MeterProvider) Option { + return optionFunc(func(cfg *config) { + if provider != nil { + cfg.MeterProvider = provider + } + }) +} + +// WithPublicEndpoint configures the Handler to link the span with an incoming +// span context. If this option is not provided, then the association is a child +// association instead of a link. +func WithPublicEndpoint() Option { + return optionFunc(func(c *config) { + c.PublicEndpoint = true + }) +} + +// WithPublicEndpointFn runs with every request, and allows conditionally +// configuring the Handler to link the span with an incoming span context. If +// this option is not provided or returns false, then the association is a +// child association instead of a link. +// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn. +func WithPublicEndpointFn(fn func(*http.Request) bool) Option { + return optionFunc(func(c *config) { + c.PublicEndpointFn = fn + }) +} + +// WithPropagators configures specific propagators. If this +// option isn't specified, then the global TextMapPropagator is used. +func WithPropagators(ps propagation.TextMapPropagator) Option { + return optionFunc(func(c *config) { + if ps != nil { + c.Propagators = ps + } + }) +} + +// WithSpanOptions configures an additional set of +// trace.SpanOptions, which are applied to each new span. +func WithSpanOptions(opts ...trace.SpanStartOption) Option { + return optionFunc(func(c *config) { + c.SpanStartOptions = append(c.SpanStartOptions, opts...) + }) +} + +// WithFilter adds a filter to the list of filters used by the handler. +// If any filter indicates to exclude a request then the request will not be +// traced. All filters must allow a request to be traced for a Span to be created. +// If no filters are provided then all requests are traced. +// Filters will be invoked for each processed request, it is advised to make them +// simple and fast. +func WithFilter(f Filter) Option { + return optionFunc(func(c *config) { + c.Filters = append(c.Filters, f) + }) +} + +type event int + +// Different types of events that can be recorded, see WithMessageEvents. +const ( + ReadEvents event = iota + WriteEvents +) + +// WithMessageEvents configures the Handler to record the specified events +// (span.AddEvent) on spans. By default only summary attributes are added at the +// end of the request. +// +// Valid events are: +// - ReadEvents: Record the number of bytes read after every http.Request.Body.Read +// using the ReadBytesKey +// - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write +// using the WriteBytesKey +func WithMessageEvents(events ...event) Option { + return optionFunc(func(c *config) { + for _, e := range events { + switch e { + case ReadEvents: + c.ReadEvent = true + case WriteEvents: + c.WriteEvent = true + } + } + }) +} + +// WithSpanNameFormatter takes a function that will be called on every +// request and the returned string will become the Span Name. +func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option { + return optionFunc(func(c *config) { + c.SpanNameFormatter = f + }) +} + +// WithClientTrace takes a function that returns client trace instance that will be +// applied to the requests sent through the otelhttp Transport. +func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option { + return optionFunc(func(c *config) { + c.ClientTrace = f + }) +} + +// WithServerName returns an Option that sets the name of the (virtual) server +// handling requests. +func WithServerName(server string) Option { + return optionFunc(func(c *config) { + c.ServerName = server + }) +} + +// WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue. +// These attributes will be included in metrics for every request. +func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option { + return optionFunc(func(c *config) { + c.MetricAttributesFn = metricAttributesFn + }) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go new file mode 100644 index 0000000000..56b24b982a --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go @@ -0,0 +1,7 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package otelhttp provides an http.Handler and functions that are intended +// to be used to add tracing by wrapping existing handlers (with Handler) and +// routes WithRouteTag. +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" diff --git a/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go new file mode 100644 index 0000000000..33580a35b7 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -0,0 +1,217 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "net/http" + "time" + + "github.com/felixge/httpsnoop" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +// middleware is an http middleware which wraps the next handler in a span. +type middleware struct { + operation string + server string + + tracer trace.Tracer + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + readEvent bool + writeEvent bool + filters []Filter + spanNameFormatter func(string, *http.Request) string + publicEndpoint bool + publicEndpointFn func(*http.Request) bool + + semconv semconv.HTTPServer +} + +func defaultHandlerFormatter(operation string, _ *http.Request) string { + return operation +} + +// NewHandler wraps the passed handler in a span named after the operation and +// enriches it with metrics. +func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler { + return NewMiddleware(operation, opts...)(handler) +} + +// NewMiddleware returns a tracing and metrics instrumentation middleware. +// The handler returned by the middleware wraps a handler +// in a span named after the operation and enriches it with metrics. +func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { + h := middleware{ + operation: operation, + } + + defaultOpts := []Option{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindServer)), + WithSpanNameFormatter(defaultHandlerFormatter), + } + + c := newConfig(append(defaultOpts, opts...)...) + h.configure(c) + + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + h.serveHTTP(w, r, next) + }) + } +} + +func (h *middleware) configure(c *config) { + h.tracer = c.Tracer + h.propagators = c.Propagators + h.spanStartOptions = c.SpanStartOptions + h.readEvent = c.ReadEvent + h.writeEvent = c.WriteEvent + h.filters = c.Filters + h.spanNameFormatter = c.SpanNameFormatter + h.publicEndpoint = c.PublicEndpoint + h.publicEndpointFn = c.PublicEndpointFn + h.server = c.ServerName + h.semconv = semconv.NewHTTPServer(c.Meter) +} + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } +} + +// serveHTTP sets up tracing and calls the given next http.Handler with the span +// context injected into the request context. +func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { + requestStartTime := time.Now() + for _, f := range h.filters { + if !f(r) { + // Simply pass through to the handler if a filter rejects the request + next.ServeHTTP(w, r) + return + } + } + + ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) + opts := []trace.SpanStartOption{ + trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...), + } + + opts = append(opts, h.spanStartOptions...) + if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) { + opts = append(opts, trace.WithNewRoot()) + // Linking incoming span context if any for public endpoint. + if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() { + opts = append(opts, trace.WithLinks(trace.Link{SpanContext: s})) + } + } + + tracer := h.tracer + + if tracer == nil { + if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) + defer span.End() + + readRecordFunc := func(int64) {} + if h.readEvent { + readRecordFunc = func(n int64) { + span.AddEvent("read", trace.WithAttributes(ReadBytesKey.Int64(n))) + } + } + + // if request body is nil or NoBody, we don't want to mutate the body as it + // will affect the identity of it in an unforeseeable way because we assert + // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, readRecordFunc) + if r.Body != nil && r.Body != http.NoBody { + r.Body = bw + } + + writeRecordFunc := func(int64) {} + if h.writeEvent { + writeRecordFunc = func(n int64) { + span.AddEvent("write", trace.WithAttributes(WroteBytesKey.Int64(n))) + } + } + + rww := request.NewRespWriterWrapper(w, writeRecordFunc) + + // Wrap w to use our ResponseWriter methods while also exposing + // other interfaces that w may implement (http.CloseNotifier, + // http.Flusher, http.Hijacker, http.Pusher, io.ReaderFrom). + + w = httpsnoop.Wrap(w, httpsnoop.Hooks{ + Header: func(httpsnoop.HeaderFunc) httpsnoop.HeaderFunc { + return rww.Header + }, + Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc { + return rww.Write + }, + WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { + return rww.WriteHeader + }, + Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc { + return rww.Flush + }, + }) + + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } + + next.ServeHTTP(w, r.WithContext(ctx)) + + statusCode := rww.StatusCode() + bytesWritten := rww.BytesWritten() + span.SetStatus(h.semconv.Status(statusCode)) + span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ + StatusCode: statusCode, + ReadBytes: bw.BytesRead(), + ReadError: bw.Error(), + WriteBytes: bytesWritten, + WriteError: rww.Error(), + })...) + + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + + h.semconv.RecordMetrics(ctx, semconv.MetricData{ + ServerName: h.server, + Req: r, + StatusCode: statusCode, + AdditionalAttributes: labeler.Get(), + RequestSize: bw.BytesRead(), + ResponseSize: bytesWritten, + ElapsedTime: elapsedTime, + }) +} + +// WithRouteTag annotates spans and metrics with the provided route name +// with HTTP route attribute. +func WithRouteTag(route string, h http.Handler) http.Handler { + attr := semconv.NewHTTPServer(nil).Route(route) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + span := trace.SpanFromContext(r.Context()) + span.SetAttributes(attr) + + labeler, _ := LabelerFromContext(r.Context()) + labeler.Add(attr) + + h.ServeHTTP(w, r) + }) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go new file mode 100644 index 0000000000..a945f55661 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "io" + "sync" +) + +var _ io.ReadCloser = &BodyWrapper{} + +// BodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number +// of bytes read and the last error. +type BodyWrapper struct { + io.ReadCloser + OnRead func(n int64) // must not be nil + + mu sync.Mutex + read int64 + err error +} + +// NewBodyWrapper creates a new BodyWrapper. +// +// The onRead attribute is a callback that will be called every time the data +// is read, with the number of bytes being read. +func NewBodyWrapper(body io.ReadCloser, onRead func(int64)) *BodyWrapper { + return &BodyWrapper{ + ReadCloser: body, + OnRead: onRead, + } +} + +// Read reads the data from the io.ReadCloser, and stores the number of bytes +// read and the error. +func (w *BodyWrapper) Read(b []byte) (int, error) { + n, err := w.ReadCloser.Read(b) + n1 := int64(n) + + w.updateReadData(n1, err) + w.OnRead(n1) + return n, err +} + +func (w *BodyWrapper) updateReadData(n int64, err error) { + w.mu.Lock() + defer w.mu.Unlock() + + w.read += n + if err != nil { + w.err = err + } +} + +// Closes closes the io.ReadCloser. +func (w *BodyWrapper) Close() error { + return w.ReadCloser.Close() +} + +// BytesRead returns the number of bytes read up to this point. +func (w *BodyWrapper) BytesRead() int64 { + w.mu.Lock() + defer w.mu.Unlock() + + return w.read +} + +// Error returns the last error. +func (w *BodyWrapper) Error() error { + w.mu.Lock() + defer w.mu.Unlock() + + return w.err +} diff --git a/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go new file mode 100644 index 0000000000..aea171fb26 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -0,0 +1,112 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "net/http" + "sync" +) + +var _ http.ResponseWriter = &RespWriterWrapper{} + +// RespWriterWrapper wraps a http.ResponseWriter in order to track the number of +// bytes written, the last error, and to catch the first written statusCode. +// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional +// types (http.Hijacker, http.Pusher, http.CloseNotifier, etc) +// that may be useful when using it in real life situations. +type RespWriterWrapper struct { + http.ResponseWriter + OnWrite func(n int64) // must not be nil + + mu sync.RWMutex + written int64 + statusCode int + err error + wroteHeader bool +} + +// NewRespWriterWrapper creates a new RespWriterWrapper. +// +// The onWrite attribute is a callback that will be called every time the data +// is written, with the number of bytes that were written. +func NewRespWriterWrapper(w http.ResponseWriter, onWrite func(int64)) *RespWriterWrapper { + return &RespWriterWrapper{ + ResponseWriter: w, + OnWrite: onWrite, + statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything + } +} + +// Write writes the bytes array into the [ResponseWriter], and tracks the +// number of bytes written and last error. +func (w *RespWriterWrapper) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + + w.writeHeader(http.StatusOK) + + n, err := w.ResponseWriter.Write(p) + n1 := int64(n) + w.OnWrite(n1) + w.written += n1 + w.err = err + return n, err +} + +// WriteHeader persists initial statusCode for span attribution. +// All calls to WriteHeader will be propagated to the underlying ResponseWriter +// and will persist the statusCode from the first call. +// Blocking consecutive calls to WriteHeader alters expected behavior and will +// remove warning logs from net/http where developers will notice incorrect handler implementations. +func (w *RespWriterWrapper) WriteHeader(statusCode int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.writeHeader(statusCode) +} + +// writeHeader persists the status code for span attribution, and propagates +// the call to the underlying ResponseWriter. +// It does not acquire a lock, and therefore assumes that is being handled by a +// parent method. +func (w *RespWriterWrapper) writeHeader(statusCode int) { + if !w.wroteHeader { + w.wroteHeader = true + w.statusCode = statusCode + } + w.ResponseWriter.WriteHeader(statusCode) +} + +// Flush implements [http.Flusher]. +func (w *RespWriterWrapper) Flush() { + w.WriteHeader(http.StatusOK) + + if f, ok := w.ResponseWriter.(http.Flusher); ok { + f.Flush() + } +} + +// BytesWritten returns the number of bytes written. +func (w *RespWriterWrapper) BytesWritten() int64 { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.written +} + +// BytesWritten returns the HTTP status code that was sent. +func (w *RespWriterWrapper) StatusCode() int { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.statusCode +} + +// Error returns the last error. +func (w *RespWriterWrapper) Error() error { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.err +} diff --git a/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go new file mode 100644 index 0000000000..9cae4cab86 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -0,0 +1,165 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "context" + "fmt" + "net/http" + "os" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" +) + +type ResponseTelemetry struct { + StatusCode int + ReadBytes int64 + ReadError error + WriteBytes int64 + WriteError error +} + +type HTTPServer struct { + duplicate bool + + // Old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + serverLatencyMeasure metric.Float64Histogram +} + +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.RequestTraceAttrs(server, req), newHTTPServer{}.RequestTraceAttrs(server, req)...) + } + return oldHTTPServer{}.RequestTraceAttrs(server, req) +} + +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (s HTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + if s.duplicate { + return append(oldHTTPServer{}.ResponseTraceAttrs(resp), newHTTPServer{}.ResponseTraceAttrs(resp)...) + } + return oldHTTPServer{}.ResponseTraceAttrs(resp) +} + +// Route returns the attribute for the route. +func (s HTTPServer) Route(route string) attribute.KeyValue { + return oldHTTPServer{}.Route(route) +} + +// Status returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func (s HTTPServer) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 500 { + return codes.Error, "" + } + return codes.Unset, "" +} + +type MetricData struct { + ServerName string + Req *http.Request + StatusCode int + AdditionalAttributes []attribute.KeyValue + + RequestSize int64 + ResponseSize int64 + ElapsedTime float64 +} + +func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) { + if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { + // This will happen if an HTTPServer{} is used insted of NewHTTPServer. + return + } + + attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := []metric.AddOption{o} // Allocate vararg slice once. + s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) + s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + + // TODO: Duplicate Metrics +} + +func NewHTTPServer(meter metric.Meter) HTTPServer { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + duplicate := env == "http/dup" + server := HTTPServer{ + duplicate: duplicate, + } + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter) + return server +} + +type HTTPClient struct { + duplicate bool +} + +func NewHTTPClient() HTTPClient { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + return HTTPClient{duplicate: env == "http/dup"} +} + +// RequestTraceAttrs returns attributes for an HTTP request made by a client. +func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + if c.duplicate { + return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...) + } + return oldHTTPClient{}.RequestTraceAttrs(req) +} + +// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. +func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + if c.duplicate { + return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...) + } + + return oldHTTPClient{}.ResponseTraceAttrs(resp) +} + +func (c HTTPClient) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +func (c HTTPClient) ErrorType(err error) attribute.KeyValue { + if c.duplicate { + return newHTTPClient{}.ErrorType(err) + } + + return attribute.KeyValue{} +} diff --git a/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go new file mode 100644 index 0000000000..745b8c67bc --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -0,0 +1,348 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "fmt" + "net/http" + "reflect" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type newHTTPServer struct{} + +// TraceRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (n newHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + count := 3 // ServerAddress, Method, Scheme + + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + count++ + } + + method, methodOriginal := n.method(req.Method) + if methodOriginal != (attribute.KeyValue{}) { + count++ + } + + scheme := n.scheme(req.TLS != nil) + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + count++ + if peerPort > 0 { + count++ + } + } + + useragent := req.UserAgent() + if useragent != "" { + count++ + } + + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + count++ + } + + if req.URL != nil && req.URL.Path != "" { + count++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + count++ + } + if protoVersion != "" { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + attrs = append(attrs, + semconvNew.ServerAddress(host), + method, + scheme, + ) + + if hostPort > 0 { + attrs = append(attrs, semconvNew.ServerPort(hostPort)) + } + if methodOriginal != (attribute.KeyValue{}) { + attrs = append(attrs, methodOriginal) + } + + if peer, peerPort := splitHostPort(req.RemoteAddr); peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, semconvNew.NetworkPeerAddress(peer)) + if peerPort > 0 { + attrs = append(attrs, semconvNew.NetworkPeerPort(peerPort)) + } + } + + if useragent := req.UserAgent(); useragent != "" { + attrs = append(attrs, semconvNew.UserAgentOriginal(useragent)) + } + + if clientIP != "" { + attrs = append(attrs, semconvNew.ClientAddress(clientIP)) + } + + if req.URL != nil && req.URL.Path != "" { + attrs = append(attrs, semconvNew.URLPath(req.URL.Path)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +func (n newHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func (n newHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconvNew.URLScheme("https") + } + return semconvNew.URLScheme("http") +} + +// TraceResponse returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + var count int + + if resp.ReadBytes > 0 { + count++ + } + if resp.WriteBytes > 0 { + count++ + } + if resp.StatusCode > 0 { + count++ + } + + attributes := make([]attribute.KeyValue, 0, count) + + if resp.ReadBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPRequestBodySize(int(resp.ReadBytes)), + ) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseBodySize(int(resp.WriteBytes)), + ) + } + if resp.StatusCode > 0 { + attributes = append(attributes, + semconvNew.HTTPResponseStatusCode(resp.StatusCode), + ) + } + + return attributes +} + +// Route returns the attribute for the route. +func (n newHTTPServer) Route(route string) attribute.KeyValue { + return semconvNew.HTTPRoute(route) +} + +type newHTTPClient struct{} + +// RequestTraceAttrs returns trace attributes for an HTTP request made by a client. +func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + /* + below attributes are returned: + - http.request.method + - http.request.method.original + - url.full + - server.address + - server.port + - network.protocol.name + - network.protocol.version + */ + numOfAttributes := 3 // URL, server address, proto, and method. + + var urlHost string + if req.URL != nil { + urlHost = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{urlHost, req.Header.Get("Host")} { + requestHost, requestPort = splitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if eligiblePort > 0 { + numOfAttributes++ + } + useragent := req.UserAgent() + if useragent != "" { + numOfAttributes++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + numOfAttributes++ + } + if protoVersion != "" { + numOfAttributes++ + } + + method, originalMethod := n.method(req.Method) + if originalMethod != (attribute.KeyValue{}) { + numOfAttributes++ + } + + attrs := make([]attribute.KeyValue, 0, numOfAttributes) + + attrs = append(attrs, method) + if originalMethod != (attribute.KeyValue{}) { + attrs = append(attrs, originalMethod) + } + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, semconvNew.URLFull(u)) + + attrs = append(attrs, semconvNew.ServerAddress(requestHost)) + if eligiblePort > 0 { + attrs = append(attrs, semconvNew.ServerPort(eligiblePort)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. +func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + /* + below attributes are returned: + - http.response.status_code + - error.type + */ + var count int + if resp.StatusCode > 0 { + count++ + } + + if isErrorStatusCode(resp.StatusCode) { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + if resp.StatusCode > 0 { + attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode)) + } + + if isErrorStatusCode(resp.StatusCode) { + errorType := strconv.Itoa(resp.StatusCode) + attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType)) + } + return attrs +} + +func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return semconvNew.ErrorTypeOther + } + + return semconvNew.ErrorTypeKey.String(value) +} + +func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func isErrorStatusCode(code int) bool { + return code >= 400 || code < 100 +} diff --git a/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go new file mode 100644 index 0000000000..e6e14924f5 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -0,0 +1,98 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "net" + "net/http" + "strconv" + "strings" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +// splitHostPort splits a network address hostport of the form "host", +// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", +// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and +// port. +// +// An empty host is returned if it is not provided or unparsable. A negative +// port is returned if it is not provided or unparsable. +func splitHostPort(hostport string) (host string, port int) { + port = -1 + + if strings.HasPrefix(hostport, "[") { + addrEnd := strings.LastIndex(hostport, "]") + if addrEnd < 0 { + // Invalid hostport. + return + } + if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + host = hostport[1:addrEnd] + return + } + } else { + if i := strings.LastIndex(hostport, ":"); i < 0 { + host = hostport + return + } + } + + host, pStr, err := net.SplitHostPort(hostport) + if err != nil { + return + } + + p, err := strconv.ParseUint(pStr, 10, 16) + if err != nil { + return + } + return host, int(p) // nolint: gosec // Byte size checked 16 above. +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func netProtocol(proto string) (name string, version string) { + name, version, _ = strings.Cut(proto, "/") + name = strings.ToLower(name) + return name, version +} + +var methodLookup = map[string]attribute.KeyValue{ + http.MethodConnect: semconvNew.HTTPRequestMethodConnect, + http.MethodDelete: semconvNew.HTTPRequestMethodDelete, + http.MethodGet: semconvNew.HTTPRequestMethodGet, + http.MethodHead: semconvNew.HTTPRequestMethodHead, + http.MethodOptions: semconvNew.HTTPRequestMethodOptions, + http.MethodPatch: semconvNew.HTTPRequestMethodPatch, + http.MethodPost: semconvNew.HTTPRequestMethodPost, + http.MethodPut: semconvNew.HTTPRequestMethodPut, + http.MethodTrace: semconvNew.HTTPRequestMethodTrace, +} + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } +} diff --git a/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go new file mode 100644 index 0000000000..c999b05e67 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -0,0 +1,192 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +import ( + "errors" + "io" + "net/http" + "slices" + "strings" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" +) + +type oldHTTPServer struct{} + +// RequestTraceAttrs returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +func (o oldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []attribute.KeyValue { + return semconvutil.HTTPServerRequest(server, req) +} + +// ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. +// +// If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. +func (o oldHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.KeyValue { + attributes := []attribute.KeyValue{} + + if resp.ReadBytes > 0 { + attributes = append(attributes, semconv.HTTPRequestContentLength(int(resp.ReadBytes))) + } + if resp.ReadError != nil && !errors.Is(resp.ReadError, io.EOF) { + // This is not in the semantic conventions, but is historically provided + attributes = append(attributes, attribute.String("http.read_error", resp.ReadError.Error())) + } + if resp.WriteBytes > 0 { + attributes = append(attributes, semconv.HTTPResponseContentLength(int(resp.WriteBytes))) + } + if resp.StatusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(resp.StatusCode)) + } + if resp.WriteError != nil && !errors.Is(resp.WriteError, io.EOF) { + // This is not in the semantic conventions, but is historically provided + attributes = append(attributes, attribute.String("http.write_error", resp.WriteError.Error())) + } + + return attributes +} + +// Route returns the attribute for the route. +func (o oldHTTPServer) Route(route string) attribute.KeyValue { + return semconv.HTTPRoute(route) +} + +// HTTPStatusCode returns the attribute for the HTTP status code. +// This is a temporary function needed by metrics. This will be removed when MetricsRequest is added. +func HTTPStatusCode(status int) attribute.KeyValue { + return semconv.HTTPStatusCode(status) +} + +// Server HTTP metrics. +const ( + serverRequestSize = "http.server.request.size" // Incoming request bytes total + serverResponseSize = "http.server.response.size" // Incoming response bytes total + serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds +) + +func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + var err error + requestBytesCounter, err := meter.Int64Counter( + serverRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + serverResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + serverLatencyMeasure, err := meter.Float64Histogram( + serverDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of inbound HTTP requests."), + ) + handleErr(err) + + return requestBytesCounter, responseBytesCounter, serverLatencyMeasure +} + +func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + n := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + n++ + } + if protoVersion != "" { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + o.methodMetric(req.Method), + o.scheme(req.TLS != nil), + semconv.NetHostName(host)) + + if hostPort > 0 { + attributes = append(attributes, semconv.NetHostPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconv.NetProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconv.NetProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return semconv.HTTPMethod(method) +} + +func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconv.HTTPSchemeHTTPS + } + return semconv.HTTPSchemeHTTP +} + +type oldHTTPClient struct{} + +func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + return semconvutil.HTTPClientRequest(req) +} + +func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + return semconvutil.HTTPClientResponse(resp) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go new file mode 100644 index 0000000000..7aa5f99e81 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +// Generate semconvutil package: +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go diff --git a/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go new file mode 100644 index 0000000000..a73bb06e90 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -0,0 +1,575 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconvutil/httpconv.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +import ( + "fmt" + "net/http" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" +) + +// HTTPClientResponse returns trace attributes for an HTTP response received by a +// client from a server. It will return the following attributes if the related +// values are defined in resp: "http.status.code", +// "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(HTTPClientResponse(resp), ClientRequest(resp.Request)...) +func HTTPClientResponse(resp *http.Response) []attribute.KeyValue { + return hc.ClientResponse(resp) +} + +// HTTPClientRequest returns trace attributes for an HTTP request made by a client. +// The following attributes are always returned: "http.url", "http.method", +// "net.peer.name". The following attributes are returned if the related values +// are defined in req: "net.peer.port", "user_agent.original", +// "http.request_content_length". +func HTTPClientRequest(req *http.Request) []attribute.KeyValue { + return hc.ClientRequest(req) +} + +// HTTPClientRequestMetrics returns metric attributes for an HTTP request made by a client. +// The following attributes are always returned: "http.method", "net.peer.name". +// The following attributes are returned if the +// related values are defined in req: "net.peer.port". +func HTTPClientRequestMetrics(req *http.Request) []attribute.KeyValue { + return hc.ClientRequestMetrics(req) +} + +// HTTPClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func HTTPClientStatus(code int) (codes.Code, string) { + return hc.ClientStatus(code) +} + +// HTTPServerRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.target", "net.host.name". The following attributes are returned if +// they related values are defined in req: "net.host.port", "net.sock.peer.addr", +// "net.sock.peer.port", "user_agent.original", "http.client_ip". +func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequest(server, req) +} + +// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "net.host.name". The following attributes are returned if they related +// values are defined in req: "net.host.port". +func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequestMetrics(server, req) +} + +// HTTPServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func HTTPServerStatus(code int) (codes.Code, string) { + return hc.ServerStatus(code) +} + +// httpConv are the HTTP semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type httpConv struct { + NetConv *netConv + + HTTPClientIPKey attribute.Key + HTTPMethodKey attribute.Key + HTTPRequestContentLengthKey attribute.Key + HTTPResponseContentLengthKey attribute.Key + HTTPRouteKey attribute.Key + HTTPSchemeHTTP attribute.KeyValue + HTTPSchemeHTTPS attribute.KeyValue + HTTPStatusCodeKey attribute.Key + HTTPTargetKey attribute.Key + HTTPURLKey attribute.Key + UserAgentOriginalKey attribute.Key +} + +var hc = &httpConv{ + NetConv: nc, + + HTTPClientIPKey: semconv.HTTPClientIPKey, + HTTPMethodKey: semconv.HTTPMethodKey, + HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, + HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, + HTTPRouteKey: semconv.HTTPRouteKey, + HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, + HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, + HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, + HTTPTargetKey: semconv.HTTPTargetKey, + HTTPURLKey: semconv.HTTPURLKey, + UserAgentOriginalKey: semconv.UserAgentOriginalKey, +} + +// ClientResponse returns attributes for an HTTP response received by a client +// from a server. The following attributes are returned if the related values +// are defined in resp: "http.status.code", "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(ClientResponse(resp), ClientRequest(resp.Request)...) +func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.status_code int + http.response_content_length int + */ + var n int + if resp.StatusCode > 0 { + n++ + } + if resp.ContentLength > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + if resp.StatusCode > 0 { + attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) + } + if resp.ContentLength > 0 { + attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) + } + return attrs +} + +// ClientRequest returns attributes for an HTTP request made by a client. The +// following attributes are always returned: "http.url", "http.method", +// "net.peer.name". The following attributes are returned if the related values +// are defined in req: "net.peer.port", "user_agent.original", +// "http.request_content_length", "user_agent.original". +func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + user_agent.original string + http.url string + net.peer.name string + net.peer.port int + http.request_content_length int + */ + + /* The following semantic conventions are not returned: + http.status_code This requires the response. See ClientResponse. + http.response_content_length This requires the response. See ClientResponse. + net.sock.family This requires the socket used. + net.sock.peer.addr This requires the socket used. + net.sock.peer.name This requires the socket used. + net.sock.peer.port This requires the socket used. + http.resend_count This is something outside of a single request. + net.protocol.name The value is the Request is ignored, and the go client will always use "http". + net.protocol.version The value in the Request is ignored, and the go client will always use 1.1 or 2.0. + */ + n := 3 // URL, peer name, proto, and method. + var h string + if req.URL != nil { + h = req.URL.Host + } + peer, p := firstHostPort(h, req.Header.Get("Host")) + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) + if port > 0 { + n++ + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + if req.ContentLength > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, c.HTTPURLKey.String(u)) + + attrs = append(attrs, c.NetConv.PeerName(peer)) + if port > 0 { + attrs = append(attrs, c.NetConv.PeerPort(port)) + } + + if useragent != "" { + attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) + } + + if l := req.ContentLength; l > 0 { + attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) + } + + return attrs +} + +// ClientRequestMetrics returns metric attributes for an HTTP request made by a client. The +// following attributes are always returned: "http.method", "net.peer.name". +// The following attributes are returned if the related values +// are defined in req: "net.peer.port". +func (c *httpConv) ClientRequestMetrics(req *http.Request) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + net.peer.name string + net.peer.port int + */ + + n := 2 // method, peer name. + var h string + if req.URL != nil { + h = req.URL.Host + } + peer, p := firstHostPort(h, req.Header.Get("Host")) + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) + if port > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.method(req.Method), c.NetConv.PeerName(peer)) + + if port > 0 { + attrs = append(attrs, c.NetConv.PeerPort(port)) + } + + return attrs +} + +// ServerRequest returns attributes for an HTTP request received by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.target", "net.host.name". The following attributes are returned if they +// related values are defined in req: "net.host.port", "net.sock.peer.addr", +// "net.sock.peer.port", "user_agent.original", "http.client_ip", +// "net.protocol.name", "net.protocol.version". +func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + http.scheme string + net.host.name string + net.host.port int + net.sock.peer.addr string + net.sock.peer.port int + user_agent.original string + http.client_ip string + net.protocol.name string Note: not set if the value is "http". + net.protocol.version string + http.target string Note: doesn't include the query parameter. + */ + + /* The following semantic conventions are not returned: + http.status_code This requires the response. + http.request_content_length This requires the len() of body, which can mutate it. + http.response_content_length This requires the response. + http.route This is not available. + net.sock.peer.name This would require a DNS lookup. + net.sock.host.addr The request doesn't have access to the underlying socket. + net.sock.host.port The request doesn't have access to the underlying socket. + + */ + n := 4 // Method, scheme, proto, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + peer, peerPort := splitHostPort(req.RemoteAddr) + if peer != "" { + n++ + if peerPort > 0 { + n++ + } + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + n++ + } + + var target string + if req.URL != nil { + target = req.URL.Path + if target != "" { + n++ + } + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + n++ + } + if protoVersion != "" { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + + if peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) + if peerPort > 0 { + attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) + } + } + + if useragent != "" { + attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) + } + + if clientIP != "" { + attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) + } + + if target != "" { + attrs = append(attrs, c.HTTPTargetKey.String(target)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion)) + } + + return attrs +} + +// ServerRequestMetrics returns metric attributes for an HTTP request received +// by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "net.host.name". The following attributes are returned if they related +// values are defined in req: "net.host.port". +func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.scheme string + http.route string + http.method string + http.status_code int + net.host.name string + net.host.port int + net.protocol.name string Note: not set if the value is "http". + net.protocol.version string + */ + + n := 3 // Method, scheme, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + n++ + } + if protoVersion != "" { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.methodMetric(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + if protoName != "" { + attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion)) + } + + return attrs +} + +func (c *httpConv) method(method string) attribute.KeyValue { + if method == "" { + return c.HTTPMethodKey.String(http.MethodGet) + } + return c.HTTPMethodKey.String(method) +} + +func (c *httpConv) methodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return c.HTTPMethodKey.String(method) +} + +func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return c.HTTPSchemeHTTPS + } + return c.HTTPSchemeHTTP +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +// Return the request host and port from the first non-empty source. +func firstHostPort(source ...string) (host string, port int) { + for _, hostport := range source { + host, port = splitHostPort(hostport) + if host != "" || port > 0 { + break + } + } + return +} + +// ClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func (c *httpConv) ClientStatus(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +// ServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func (c *httpConv) ServerStatus(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 500 { + return codes.Error, "" + } + return codes.Unset, "" +} diff --git a/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go new file mode 100644 index 0000000000..b80a1db61f --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -0,0 +1,205 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconvutil/netconv.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +import ( + "net" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" +) + +// NetTransport returns a trace attribute describing the transport protocol of the +// passed network. See the net.Dial for information about acceptable network +// values. +func NetTransport(network string) attribute.KeyValue { + return nc.Transport(network) +} + +// netConv are the network semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type netConv struct { + NetHostNameKey attribute.Key + NetHostPortKey attribute.Key + NetPeerNameKey attribute.Key + NetPeerPortKey attribute.Key + NetProtocolName attribute.Key + NetProtocolVersion attribute.Key + NetSockFamilyKey attribute.Key + NetSockPeerAddrKey attribute.Key + NetSockPeerPortKey attribute.Key + NetSockHostAddrKey attribute.Key + NetSockHostPortKey attribute.Key + NetTransportOther attribute.KeyValue + NetTransportTCP attribute.KeyValue + NetTransportUDP attribute.KeyValue + NetTransportInProc attribute.KeyValue +} + +var nc = &netConv{ + NetHostNameKey: semconv.NetHostNameKey, + NetHostPortKey: semconv.NetHostPortKey, + NetPeerNameKey: semconv.NetPeerNameKey, + NetPeerPortKey: semconv.NetPeerPortKey, + NetProtocolName: semconv.NetProtocolNameKey, + NetProtocolVersion: semconv.NetProtocolVersionKey, + NetSockFamilyKey: semconv.NetSockFamilyKey, + NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, + NetSockPeerPortKey: semconv.NetSockPeerPortKey, + NetSockHostAddrKey: semconv.NetSockHostAddrKey, + NetSockHostPortKey: semconv.NetSockHostPortKey, + NetTransportOther: semconv.NetTransportOther, + NetTransportTCP: semconv.NetTransportTCP, + NetTransportUDP: semconv.NetTransportUDP, + NetTransportInProc: semconv.NetTransportInProc, +} + +func (c *netConv) Transport(network string) attribute.KeyValue { + switch network { + case "tcp", "tcp4", "tcp6": + return c.NetTransportTCP + case "udp", "udp4", "udp6": + return c.NetTransportUDP + case "unix", "unixgram", "unixpacket": + return c.NetTransportInProc + default: + // "ip:*", "ip4:*", and "ip6:*" all are considered other. + return c.NetTransportOther + } +} + +// Host returns attributes for a network host address. +func (c *netConv) Host(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.HostName(h)) + if p > 0 { + attrs = append(attrs, c.HostPort(p)) + } + return attrs +} + +func (c *netConv) HostName(name string) attribute.KeyValue { + return c.NetHostNameKey.String(name) +} + +func (c *netConv) HostPort(port int) attribute.KeyValue { + return c.NetHostPortKey.Int(port) +} + +func family(network, address string) string { + switch network { + case "unix", "unixgram", "unixpacket": + return "unix" + default: + if ip := net.ParseIP(address); ip != nil { + if ip.To4() == nil { + return "inet6" + } + return "inet" + } + } + return "" +} + +// Peer returns attributes for a network peer address. +func (c *netConv) Peer(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.PeerName(h)) + if p > 0 { + attrs = append(attrs, c.PeerPort(p)) + } + return attrs +} + +func (c *netConv) PeerName(name string) attribute.KeyValue { + return c.NetPeerNameKey.String(name) +} + +func (c *netConv) PeerPort(port int) attribute.KeyValue { + return c.NetPeerPortKey.Int(port) +} + +func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue { + return c.NetSockPeerAddrKey.String(addr) +} + +func (c *netConv) SockPeerPort(port int) attribute.KeyValue { + return c.NetSockPeerPortKey.Int(port) +} + +// splitHostPort splits a network address hostport of the form "host", +// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", +// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and +// port. +// +// An empty host is returned if it is not provided or unparsable. A negative +// port is returned if it is not provided or unparsable. +func splitHostPort(hostport string) (host string, port int) { + port = -1 + + if strings.HasPrefix(hostport, "[") { + addrEnd := strings.LastIndex(hostport, "]") + if addrEnd < 0 { + // Invalid hostport. + return + } + if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + host = hostport[1:addrEnd] + return + } + } else { + if i := strings.LastIndex(hostport, ":"); i < 0 { + host = hostport + return + } + } + + host, pStr, err := net.SplitHostPort(hostport) + if err != nil { + return + } + + p, err := strconv.ParseUint(pStr, 10, 16) + if err != nil { + return + } + return host, int(p) // nolint: gosec // Bitsize checked to be 16 above. +} + +func netProtocol(proto string) (name string, version string) { + name, version, _ = strings.Cut(proto, "/") + name = strings.ToLower(name) + return name, version +} diff --git a/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go new file mode 100644 index 0000000000..ea504e396f --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" +) + +// Labeler is used to allow instrumented HTTP handlers to add custom attributes to +// the metrics recorded by the net/http instrumentation. +type Labeler struct { + mu sync.Mutex + attributes []attribute.KeyValue +} + +// Add attributes to a Labeler. +func (l *Labeler) Add(ls ...attribute.KeyValue) { + l.mu.Lock() + defer l.mu.Unlock() + l.attributes = append(l.attributes, ls...) +} + +// Get returns a copy of the attributes added to the Labeler. +func (l *Labeler) Get() []attribute.KeyValue { + l.mu.Lock() + defer l.mu.Unlock() + ret := make([]attribute.KeyValue, len(l.attributes)) + copy(ret, l.attributes) + return ret +} + +type labelerContextKeyType int + +const lablelerContextKey labelerContextKeyType = 0 + +// ContextWithLabeler returns a new context with the provided Labeler instance. +// Attributes added to the specified labeler will be injected into metrics +// emitted by the instrumentation. Only one labeller can be injected into the +// context. Injecting it multiple times will override the previous calls. +func ContextWithLabeler(parent context.Context, l *Labeler) context.Context { + return context.WithValue(parent, lablelerContextKey, l) +} + +// LabelerFromContext retrieves a Labeler instance from the provided context if +// one is available. If no Labeler was found in the provided context a new, empty +// Labeler is returned and the second return value is false. In this case it is +// safe to use the Labeler but any attributes added to it will not be used. +func LabelerFromContext(ctx context.Context) (*Labeler, bool) { + l, ok := ctx.Value(lablelerContextKey).(*Labeler) + if !ok { + l = &Labeler{} + } + return l, ok +} diff --git a/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go new file mode 100644 index 0000000000..b4119d3438 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -0,0 +1,295 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + "net/http/httptrace" + "sync/atomic" + "time" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + + "go.opentelemetry.io/otel/trace" +) + +// Transport implements the http.RoundTripper interface and wraps +// outbound HTTP(S) requests with a span and enriches it with metrics. +type Transport struct { + rt http.RoundTripper + + tracer trace.Tracer + meter metric.Meter + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + filters []Filter + spanNameFormatter func(string, *http.Request) string + clientTrace func(context.Context) *httptrace.ClientTrace + metricAttributesFn func(*http.Request) []attribute.KeyValue + + semconv semconv.HTTPClient + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + latencyMeasure metric.Float64Histogram +} + +var _ http.RoundTripper = &Transport{} + +// NewTransport wraps the provided http.RoundTripper with one that +// starts a span, injects the span context into the outbound request headers, +// and enriches it with metrics. +// +// If the provided http.RoundTripper is nil, http.DefaultTransport will be used +// as the base http.RoundTripper. +func NewTransport(base http.RoundTripper, opts ...Option) *Transport { + if base == nil { + base = http.DefaultTransport + } + + t := Transport{ + rt: base, + semconv: semconv.NewHTTPClient(), + } + + defaultOpts := []Option{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)), + WithSpanNameFormatter(defaultTransportFormatter), + } + + c := newConfig(append(defaultOpts, opts...)...) + t.applyConfig(c) + t.createMeasures() + + return &t +} + +func (t *Transport) applyConfig(c *config) { + t.tracer = c.Tracer + t.meter = c.Meter + t.propagators = c.Propagators + t.spanStartOptions = c.SpanStartOptions + t.filters = c.Filters + t.spanNameFormatter = c.SpanNameFormatter + t.clientTrace = c.ClientTrace + t.metricAttributesFn = c.MetricAttributesFn +} + +func (t *Transport) createMeasures() { + var err error + t.requestBytesCounter, err = t.meter.Int64Counter( + clientRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + t.responseBytesCounter, err = t.meter.Int64Counter( + clientResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + t.latencyMeasure, err = t.meter.Float64Histogram( + clientDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of outbound HTTP requests."), + ) + handleErr(err) +} + +func defaultTransportFormatter(_ string, r *http.Request) string { + return "HTTP " + r.Method +} + +// RoundTrip creates a Span and propagates its context via the provided request's headers +// before handing the request to the configured base RoundTripper. The created span will +// end when the response body is closed or when a read from the body returns io.EOF. +func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { + requestStartTime := time.Now() + for _, f := range t.filters { + if !f(r) { + // Simply pass through to the base RoundTripper if a filter rejects the request + return t.rt.RoundTrip(r) + } + } + + tracer := t.tracer + + if tracer == nil { + if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + opts := append([]trace.SpanStartOption{}, t.spanStartOptions...) // start with the configured options + + ctx, span := tracer.Start(r.Context(), t.spanNameFormatter("", r), opts...) + + if t.clientTrace != nil { + ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) + } + + labeler, found := LabelerFromContext(ctx) + if !found { + ctx = ContextWithLabeler(ctx, labeler) + } + + r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. + + // if request body is nil or NoBody, we don't want to mutate the body as it + // will affect the identity of it in an unforeseeable way because we assert + // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, func(int64) {}) + if r.Body != nil && r.Body != http.NoBody { + r.Body = bw + } + + span.SetAttributes(t.semconv.RequestTraceAttrs(r)...) + t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) + + res, err := t.rt.RoundTrip(r) + if err != nil { + // set error type attribute if the error is part of the predefined + // error types. + // otherwise, record it as an exception + if errType := t.semconv.ErrorType(err); errType.Valid() { + span.SetAttributes(errType) + } else { + span.RecordError(err) + } + + span.SetStatus(codes.Error, err.Error()) + span.End() + return res, err + } + + // metrics + metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...) + if res.StatusCode > 0 { + metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) + } + o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) + + t.requestBytesCounter.Add(ctx, bw.BytesRead(), o) + // For handling response bytes we leverage a callback when the client reads the http response + readRecordFunc := func(n int64) { + t.responseBytesCounter.Add(ctx, n, o) + } + + // traces + span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...) + span.SetStatus(t.semconv.Status(res.StatusCode)) + + res.Body = newWrappedBody(span, readRecordFunc, res.Body) + + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + + t.latencyMeasure.Record(ctx, elapsedTime, o) + + return res, err +} + +func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { + var attributeForRequest []attribute.KeyValue + if t.metricAttributesFn != nil { + attributeForRequest = t.metricAttributesFn(r) + } + return attributeForRequest +} + +// newWrappedBody returns a new and appropriately scoped *wrappedBody as an +// io.ReadCloser. If the passed body implements io.Writer, the returned value +// will implement io.ReadWriteCloser. +func newWrappedBody(span trace.Span, record func(n int64), body io.ReadCloser) io.ReadCloser { + // The successful protocol switch responses will have a body that + // implement an io.ReadWriteCloser. Ensure this interface type continues + // to be satisfied if that is the case. + if _, ok := body.(io.ReadWriteCloser); ok { + return &wrappedBody{span: span, record: record, body: body} + } + + // Remove the implementation of the io.ReadWriteCloser and only implement + // the io.ReadCloser. + return struct{ io.ReadCloser }{&wrappedBody{span: span, record: record, body: body}} +} + +// wrappedBody is the response body type returned by the transport +// instrumentation to complete a span. Errors encountered when using the +// response body are recorded in span tracking the response. +// +// The span tracking the response is ended when this body is closed. +// +// If the response body implements the io.Writer interface (i.e. for +// successful protocol switches), the wrapped body also will. +type wrappedBody struct { + span trace.Span + recorded atomic.Bool + record func(n int64) + body io.ReadCloser + read atomic.Int64 +} + +var _ io.ReadWriteCloser = &wrappedBody{} + +func (wb *wrappedBody) Write(p []byte) (int, error) { + // This will not panic given the guard in newWrappedBody. + n, err := wb.body.(io.Writer).Write(p) + if err != nil { + wb.span.RecordError(err) + wb.span.SetStatus(codes.Error, err.Error()) + } + return n, err +} + +func (wb *wrappedBody) Read(b []byte) (int, error) { + n, err := wb.body.Read(b) + // Record the number of bytes read + wb.read.Add(int64(n)) + + switch err { + case nil: + // nothing to do here but fall through to the return + case io.EOF: + wb.recordBytesRead() + wb.span.End() + default: + wb.span.RecordError(err) + wb.span.SetStatus(codes.Error, err.Error()) + } + return n, err +} + +// recordBytesRead is a function that ensures the number of bytes read is recorded once and only once. +func (wb *wrappedBody) recordBytesRead() { + // note: it is more performant (and equally correct) to use atomic.Bool over sync.Once here. In the event that + // two goroutines are racing to call this method, the number of bytes read will no longer increase. Using + // CompareAndSwap allows later goroutines to return quickly and not block waiting for the race winner to finish + // calling wb.record(wb.read.Load()). + if wb.recorded.CompareAndSwap(false, true) { + // Record the total number of bytes read + wb.record(wb.read.Load()) + } +} + +func (wb *wrappedBody) Close() error { + wb.recordBytesRead() + wb.span.End() + if wb.body != nil { + return wb.body.Close() + } + return nil +} diff --git a/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go new file mode 100644 index 0000000000..502c1bdafc --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +// Version is the current release version of the otelhttp instrumentation. +func Version() string { + return "0.54.0" + // This string is updated by the pre_release.sh script during release +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +// +// Deprecated: Use [Version] instead. +func SemVersion() string { + return Version() +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/.codespellignore b/hack/tools/vendor/go.opentelemetry.io/otel/.codespellignore new file mode 100644 index 0000000000..6bf3abc41e --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/.codespellignore @@ -0,0 +1,9 @@ +ot +fo +te +collison +consequentially +ans +nam +valu +thirdparty diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/.codespellrc b/hack/tools/vendor/go.opentelemetry.io/otel/.codespellrc new file mode 100644 index 0000000000..e2cb3ea944 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/.codespellrc @@ -0,0 +1,10 @@ +# https://github.com/codespell-project/codespell +[codespell] +builtin = clear,rare,informal +check-filenames = +check-hidden = +ignore-words = .codespellignore +interactive = 1 +skip = .git,go.mod,go.sum,go.work,go.work.sum,semconv,venv,.tools +uri-ignore-words-list = * +write = diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/.gitattributes b/hack/tools/vendor/go.opentelemetry.io/otel/.gitattributes new file mode 100644 index 0000000000..314766e91b --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/.gitattributes @@ -0,0 +1,3 @@ +* text=auto eol=lf +*.{cmd,[cC][mM][dD]} text eol=crlf +*.{bat,[bB][aA][tT]} text eol=crlf diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/.gitignore b/hack/tools/vendor/go.opentelemetry.io/otel/.gitignore new file mode 100644 index 0000000000..895c7664be --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/.gitignore @@ -0,0 +1,22 @@ +.DS_Store +Thumbs.db + +.tools/ +venv/ +.idea/ +.vscode/ +*.iml +*.so +coverage.* +go.work +go.work.sum + +gen/ + +/example/dice/dice +/example/namedtracer/namedtracer +/example/otel-collector/otel-collector +/example/opencensus/opencensus +/example/passthrough/passthrough +/example/prometheus/prometheus +/example/zipkin/zipkin diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/.golangci.yml b/hack/tools/vendor/go.opentelemetry.io/otel/.golangci.yml new file mode 100644 index 0000000000..d9abe194d9 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -0,0 +1,304 @@ +# See https://github.com/golangci/golangci-lint#config-file +run: + issues-exit-code: 1 #Default + tests: true #Default + +linters: + # Disable everything by default so upgrades to not include new "default + # enabled" linters. + disable-all: true + # Specifically enable linters we want to use. + enable: + - asasalint + - bodyclose + - depguard + - errcheck + - errorlint + - godot + - gofumpt + - goimports + - gosec + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - tenv + - typecheck + - unconvert + - unused + - unparam + +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + # Setting to unlimited so the linter only is run once to debug all issues. + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + # Setting to unlimited so the linter only is run once to debug all issues. + max-same-issues: 0 + # Excluding configuration per-path, per-linter, per-text and per-source. + exclude-rules: + # TODO: Having appropriate comments for exported objects helps development, + # even for objects in internal packages. Appropriate comments for all + # exported objects should be added and this exclusion removed. + - path: '.*internal/.*' + text: "exported (method|function|type|const) (.+) should have comment or be unexported" + linters: + - revive + # Yes, they are, but it's okay in a test. + - path: _test\.go + text: "exported func.*returns unexported type.*which can be annoying to use" + linters: + - revive + # Example test functions should be treated like main. + - path: example.*_test\.go + text: "calls to (.+) only in main[(][)] or init[(][)] functions" + linters: + - revive + # It's okay to not run gosec in a test. + - path: _test\.go + linters: + - gosec + # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + # as we commonly use it in tests and examples. + - text: "G404:" + linters: + - gosec + # Igonoring gosec G402: TLS MinVersion too low + # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. + - text: "G402: TLS MinVersion too low." + linters: + - gosec + include: + # revive exported should have comment or be unexported. + - EXC0012 + # revive package comment should be of the form ... + - EXC0013 + +linters-settings: + depguard: + rules: + non-tests: + files: + - "!$test" + - "!**/*test/*.go" + - "!**/internal/matchers/*.go" + deny: + - pkg: "testing" + - pkg: "github.com/stretchr/testify" + - pkg: "crypto/md5" + - pkg: "crypto/sha1" + - pkg: "crypto/**/pkix" + otlp-internal: + files: + - "!**/exporters/otlp/internal/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal" + desc: Do not use cross-module internal packages. + otlptrace-internal: + files: + - "!**/exporters/otlp/otlptrace/*.go" + - "!**/exporters/otlp/otlptrace/internal/**.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal" + desc: Do not use cross-module internal packages. + otlpmetric-internal: + files: + - "!**/exporters/otlp/otlpmetric/internal/*.go" + - "!**/exporters/otlp/otlpmetric/internal/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal" + desc: Do not use cross-module internal packages. + otel-internal: + files: + - "**/sdk/*.go" + - "**/sdk/**/*.go" + - "**/exporters/*.go" + - "**/exporters/**/*.go" + - "**/schema/*.go" + - "**/schema/**/*.go" + - "**/metric/*.go" + - "**/metric/**/*.go" + - "**/bridge/*.go" + - "**/bridge/**/*.go" + - "**/example/*.go" + - "**/example/**/*.go" + - "**/trace/*.go" + - "**/trace/**/*.go" + - "**/log/*.go" + - "**/log/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/internal$" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/attribute" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/internaltest" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/matchers" + desc: Do not use cross-module internal packages. + godot: + exclude: + # Exclude links. + - '^ *\[[^]]+\]:' + # Exclude sentence fragments for lists. + - '^[ ]*[-•]' + # Exclude sentences prefixing a list. + - ':$' + goimports: + local-prefixes: go.opentelemetry.io + misspell: + locale: US + ignore-words: + - cancelled + revive: + # Sets the default failure confidence. + # This means that linting errors with less than 0.8 confidence will be ignored. + # Default: 0.8 + confidence: 0.01 + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports + - name: blank-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr + - name: bool-literal-in-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr + - name: constant-logical-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument + # TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 + - name: context-as-argument + disabled: true + arguments: + allowTypesBefore: "*testing.T" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type + - name: context-keys-type + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit + - name: deep-exit + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer + - name: defer + disabled: false + arguments: + - ["call-chain", "loop"] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports + - name: dot-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports + - name: duplicated-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return + - name: early-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block + - name: empty-block + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines + - name: empty-lines + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming + - name: error-naming + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return + - name: error-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings + - name: error-strings + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf + - name: errorf + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported + - name: exported + disabled: false + arguments: + - "sayRepetitiveInsteadOfStutters" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter + - name: flag-parameter + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches + - name: identical-branches + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return + - name: if-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement + - name: increment-decrement + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow + - name: indent-error-flow + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing + - name: import-shadowing + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments + - name: package-comments + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range + - name: range + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure + - name: range-val-in-closure + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address + - name: range-val-address + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id + - name: redefines-builtin-id + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format + - name: string-format + disabled: false + arguments: + - - panic + - '/^[^\n]*$/' + - must not contain line breaks + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag + - name: struct-tag + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else + - name: superfluous-else + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal + - name: time-equal + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming + - name: var-naming + disabled: false + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration + - name: var-declaration + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion + - name: unconditional-recursion + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return + - name: unexported-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error + - name: unhandled-error + disabled: false + arguments: + - "fmt.Fprint" + - "fmt.Fprintf" + - "fmt.Fprintln" + - "fmt.Print" + - "fmt.Printf" + - "fmt.Println" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt + - name: unnecessary-stmt + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break + - name: useless-break + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value + - name: waitgroup-by-value + disabled: false diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/.lycheeignore b/hack/tools/vendor/go.opentelemetry.io/otel/.lycheeignore new file mode 100644 index 0000000000..40d62fa2eb --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -0,0 +1,6 @@ +http://localhost +http://jaeger-collector +https://github.com/open-telemetry/opentelemetry-go/milestone/ +https://github.com/open-telemetry/opentelemetry-go/projects +file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries +file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/hack/tools/vendor/go.opentelemetry.io/otel/.markdownlint.yaml new file mode 100644 index 0000000000..3202496c35 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/.markdownlint.yaml @@ -0,0 +1,29 @@ +# Default state for all rules +default: true + +# ul-style +MD004: false + +# hard-tabs +MD010: false + +# line-length +MD013: false + +# no-duplicate-header +MD024: + siblings_only: true + +#single-title +MD025: false + +# ol-prefix +MD029: + style: ordered + +# no-inline-html +MD033: false + +# fenced-code-language +MD040: false + diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/hack/tools/vendor/go.opentelemetry.io/otel/CHANGELOG.md new file mode 100644 index 0000000000..6107c17b89 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -0,0 +1,3162 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + + + + +## [1.29.0/0.51.0/0.5.0] 2024-08-23 + +This release is the last to support [Go 1.21]. +The next release will require at least [Go 1.22]. + +### Added + +- Add MacOS ARM64 platform to the compatibility testing suite. (#5577) +- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627) +- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. + This new module contains an OTLP exporter that transmits log telemetry using gRPC. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629) +- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651) +- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651) +- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665) +- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`. + This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not. + It replaces the existing `Enabled` method that is removed from the `Processor` interface itself. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692) +- Support [Go 1.23]. (#5720) + +### Changed + +- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132) +- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636) +- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665) +- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666) +- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666) +- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method. + See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692) +- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) +- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) + +### Fixed + +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584) +- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541) +- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612) +- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612) +- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612) +- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650) +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) +- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) + +### Removed + +- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) +- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) + +## [1.28.0/0.50.0/0.4.0] 2024-07-02 + +### Added + +- The `IsEmpty` method is added to the `Instrument` type in `go.opentelemetry.io/otel/sdk/metric`. + This method is used to check if an `Instrument` instance is a zero-value. (#5431) +- Store and provide the emitted `context.Context` in `ScopeRecords` of `go.opentelemetry.io/otel/sdk/log/logtest`. (#5468) +- The `go.opentelemetry.io/otel/semconv/v1.26.0` package. + The package contains semantic conventions from the `v1.26.0` version of the OpenTelemetry Semantic Conventions. (#5476) +- The `AssertRecordEqual` method to `go.opentelemetry.io/otel/log/logtest` to allow comparison of two log records in tests. (#5499) +- The `WithHeaders` option to `go.opentelemetry.io/otel/exporters/zipkin` to allow configuring custom http headers while exporting spans. (#5530) + +### Changed + +- `Tracer.Start` in `go.opentelemetry.io/otel/trace/noop` no longer allocates a span for empty span context. (#5457) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/otel-collector`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/zipkin`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#5490) + - The exporter no longer exports the deprecated "otel.library.name" or "otel.library.version" attributes. +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/resource`. (#5490) +- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/trace`. (#5490) +- `SimpleProcessor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` no longer allocates a slice which makes it possible to have a zero-allocation log processing using `SimpleProcessor`. (#5493) +- Use non-generic functions in the `Start` method of `"go.opentelemetry.io/otel/sdk/trace".Trace` to reduce memory allocation. (#5497) +- `service.instance.id` is populated for a `Resource` created with `"go.opentelemetry.io/otel/sdk/resource".Default` with a default value when `OTEL_GO_X_RESOURCE` is set. (#5520) +- Improve performance of metric instruments in `go.opentelemetry.io/otel/sdk/metric` by removing unnecessary calls to `time.Now`. (#5545) + +### Fixed + +- Log a warning to the OpenTelemetry internal logger when a `Record` in `go.opentelemetry.io/otel/sdk/log` drops an attribute due to a limit being reached. (#5376) +- Identify the `Tracer` returned from the global `TracerProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Identify the `Meter` returned from the global `MeterProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426) +- Log a warning to the OpenTelemetry internal logger when a `Span` in `go.opentelemetry.io/otel/sdk/trace` drops an attribute, event, or link due to a limit being reached. (#5434) +- Document instrument name requirements in `go.opentelemetry.io/otel/metric`. (#5435) +- Prevent random number generation data-race for experimental rand exemplars in `go.opentelemetry.io/otel/sdk/metric`. (#5456) +- Fix counting number of dropped attributes of `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5464) +- Fix panic in baggage creation when a member contains `0x80` char in key or value. (#5494) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5508) +- Retry trace and span ID generation if it generated an invalid one in `go.opentelemetry.io/otel/sdk/trace`. (#5514) +- Fix stale timestamps reported by the last-value aggregation. (#5517) +- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) +- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) +- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528) + +## [1.27.0/0.49.0/0.3.0] 2024-05-21 + +### Added + +- Add example for `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5242) +- Add `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest` to facilitate testing exporter and processor implementations. (#5258) +- Add `RecordFactory` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing bridge implementations. (#5263) +- The count of dropped records from the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is logged. (#5276) +- Add metrics in the `otel-collector` example. (#5283) +- Add the synchronous gauge instrument to `go.opentelemetry.io/otel/metric`. (#5304) + - An `int64` or `float64` synchronous gauge instrument can now be created from a `Meter`. + - All implementations of the API (`go.opentelemetry.io/otel/metric/noop`, `go.opentelemetry.io/otel/sdk/metric`) are updated to support this instrument. +- Add logs to `go.opentelemetry.io/otel/example/dice`. (#5349) + +### Changed + +- The `Shutdown` method of `Exporter` in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` ignores the context cancellation and always returns `nil`. (#5189) +- The `ForceFlush` and `Shutdown` methods of the exporter returned by `New` in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` ignore the context cancellation and always return `nil`. (#5189) +- Apply the value length limits to `Record` attributes in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- De-duplicate map attributes added to a `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5230) +- `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` won't print timestamps when `WithoutTimestamps` option is set. (#5241) +- The `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` exporter won't print `AttributeValueLengthLimit` and `AttributeCountLimit` fields now, instead it prints the `DroppedAttributes` field. (#5272) +- Improved performance in the `Stringer` implementation of `go.opentelemetry.io/otel/baggage.Member` by reducing the number of allocations. (#5286) +- Set the start time for last-value aggregates in `go.opentelemetry.io/otel/sdk/metric`. (#5305) +- The `Span` in `go.opentelemetry.io/otel/sdk/trace` will record links without span context if either non-empty `TraceState` or attributes are provided. (#5315) +- Upgrade all dependencies of `go.opentelemetry.io/otel/semconv/v1.24.0` to `go.opentelemetry.io/otel/semconv/v1.25.0`. (#5374) + +### Fixed + +- Comparison of unordered maps for `go.opentelemetry.io/otel/log.KeyValue` and `go.opentelemetry.io/otel/log.Value`. (#5306) +- Fix the empty output of `go.opentelemetry.io/otel/log.Value` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5311) +- Split the behavior of `Recorder` in `go.opentelemetry.io/otel/log/logtest` so it behaves as a `LoggerProvider` only. (#5365) +- Fix wrong package name of the error message when parsing endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5371) +- Identify the `Logger` returned from the global `LoggerProvider` in `go.opentelemetry.io/otel/log/global` with its schema URL. (#5375) + +## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24 + +### Added + +- Add `Recorder` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing the log bridge implementations. (#5134) +- Add span flags to OTLP spans and links exported by `go.opentelemetry.io/otel/exporters/otlp/otlptrace`. (#5194) +- Make the initial alpha release of `go.opentelemetry.io/otel/sdk/log`. + This new module contains the Go implementation of the OpenTelemetry Logs SDK. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. + This new module contains an OTLP exporter that transmits log telemetry using HTTP. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. + This new module contains an exporter prints log records to STDOUT. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240) +- The `go.opentelemetry.io/otel/semconv/v1.25.0` package. + The package contains semantic conventions from the `v1.25.0` version of the OpenTelemetry Semantic Conventions. (#5254) + +### Changed + +- Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177) +- Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214) +- The `otel-collector` example now uses docker compose to bring up services instead of kubernetes. (#5244) + +### Fixed + +- Slice attribute values in `go.opentelemetry.io/otel/attribute` are now emitted as their JSON representation. (#5159) + +## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05 + +### Added + +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4906) +- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp`. (#4906) +- Add `AddLink` method to the `Span` interface in `go.opentelemetry.io/otel/trace`. (#5032) +- The `Enabled` method is added to the `Logger` interface in `go.opentelemetry.io/otel/log`. + This method is used to notify users if a log record will be emitted or not. (#5071) +- Add `SeverityUndefined` `const` to `go.opentelemetry.io/otel/log`. + This value represents an unset severity level. (#5072) +- Add `Empty` function in `go.opentelemetry.io/otel/log` to return a `KeyValue` for an empty value. (#5076) +- Add `go.opentelemetry.io/otel/log/global` to manage the global `LoggerProvider`. + This package is provided with the anticipation that all functionality will be migrate to `go.opentelemetry.io/otel` when `go.opentelemetry.io/otel/log` stabilizes. + At which point, users will be required to migrage their code, and this package will be deprecated then removed. (#5085) +- Add support for `Summary` metrics in the `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` exporters. (#5100) +- Add `otel.scope.name` and `otel.scope.version` tags to spans exported by `go.opentelemetry.io/otel/exporters/zipkin`. (#5108) +- Add support for `AddLink` to `go.opentelemetry.io/otel/bridge/opencensus`. (#5116) +- Add `String` method to `Value` and `KeyValue` in `go.opentelemetry.io/otel/log`. (#5117) +- Add Exemplar support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5111) +- Add metric semantic conventions to `go.opentelemetry.io/otel/semconv/v1.24.0`. Future `semconv` packages will include metric semantic conventions as well. (#4528) + +### Changed + +- `SpanFromContext` and `SpanContextFromContext` in `go.opentelemetry.io/otel/trace` no longer make a heap allocation when the passed context has no span. (#5049) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now create a gRPC client in idle mode and with "dns" as the default resolver using [`grpc.NewClient`](https://pkg.go.dev/google.golang.org/grpc#NewClient). (#5151) + Because of that `WithDialOption` ignores [`grpc.WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), [`grpc.WithTimeout`](https://pkg.go.dev/google.golang.org/grpc#WithTimeout), and [`grpc.WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError). + Notice that [`grpc.DialContext`](https://pkg.go.dev/google.golang.org/grpc#DialContext) which was used before is now deprecated. + +### Fixed + +- Clarify the documentation about equivalence guarantees for the `Set` and `Distinct` types in `go.opentelemetry.io/otel/attribute`. (#5027) +- Prevent default `ErrorHandler` self-delegation. (#5137) +- Update all dependencies to address [GO-2024-2687]. (#5139) + +### Removed + +- Drop support for [Go 1.20]. (#4967) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/attribute.Sortable` type. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortable` function. (#4734) +- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortableFiltered` function. (#4734) + +## [1.24.0/0.46.0/0.0.1-alpha] 2024-02-23 + +This release is the last to support [Go 1.20]. +The next release will require at least [Go 1.21]. + +### Added + +- Support [Go 1.22]. (#4890) +- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4900) +- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4900) +- The `go.opentelemetry.io/otel/log` module is added. + This module includes OpenTelemetry Go's implementation of the Logs Bridge API. + This module is in an alpha state, it is subject to breaking changes. + See our [versioning policy](./VERSIONING.md) for more info. (#4961) +- Add ARM64 platform to the compatibility testing suite. (#4994) + +### Fixed + +- Fix registration of multiple callbacks when using the global meter provider from `go.opentelemetry.io/otel`. (#4945) +- Fix negative buckets in output of exponential histograms. (#4956) + +## [1.23.1] 2024-02-07 + +### Fixed + +- Register all callbacks passed during observable instrument creation instead of just the last one multiple times in `go.opentelemetry.io/otel/sdk/metric`. (#4888) + +## [1.23.0] 2024-02-06 + +This release contains the first stable, `v1`, release of the following modules: + +- `go.opentelemetry.io/otel/bridge/opencensus` +- `go.opentelemetry.io/otel/bridge/opencensus/test` +- `go.opentelemetry.io/otel/example/opencensus` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` +- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` + +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Add `WithEndpointURL` option to the `exporters/otlp/otlpmetric/otlpmetricgrpc`, `exporters/otlp/otlpmetric/otlpmetrichttp`, `exporters/otlp/otlptrace/otlptracegrpc` and `exporters/otlp/otlptrace/otlptracehttp` packages. (#4808) +- Experimental exemplar exporting is added to the metric SDK. + See [metric documentation](./sdk/metric/internal/x/README.md#exemplars) for more information about this feature and how to enable it. (#4871) +- `ErrSchemaURLConflict` is added to `go.opentelemetry.io/otel/sdk/resource`. + This error is returned when a merge of two `Resource`s with different (non-empty) schema URL is attempted. (#4876) + +### Changed + +- The `Merge` and `New` functions in `go.opentelemetry.io/otel/sdk/resource` now returns a partial result if there is a schema URL merge conflict. + Instead of returning `nil` when two `Resource`s with different (non-empty) schema URLs are merged the merged `Resource`, along with the new `ErrSchemaURLConflict` error, is returned. + It is up to the user to decide if they want to use the returned `Resource` or not. + It may have desired attributes overwritten or include stale semantic conventions. (#4876) + +### Fixed + +- Fix `ContainerID` resource detection on systemd when cgroup path has a colon. (#4449) +- Fix `go.opentelemetry.io/otel/sdk/metric` to cache instruments to avoid leaking memory when the same instrument is created multiple times. (#4820) +- Fix missing `Mix` and `Max` values for `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` by introducing `MarshalText` and `MarshalJSON` for the `Extrema` type in `go.opentelemetry.io/sdk/metric/metricdata`. (#4827) + +## [1.23.0-rc.1] 2024-01-18 + +This is a release candidate for the v1.23.0 release. +That release is expected to include the `v1` release of the following modules: + +- `go.opentelemetry.io/otel/bridge/opencensus` +- `go.opentelemetry.io/otel/bridge/opencensus/test` +- `go.opentelemetry.io/otel/example/opencensus` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` +- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` + +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +## [1.22.0/0.45.0] 2024-01-17 + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.22.0` package. + The package contains semantic conventions from the `v1.22.0` version of the OpenTelemetry Semantic Conventions. (#4735) +- The `go.opentelemetry.io/otel/semconv/v1.23.0` package. + The package contains semantic conventions from the `v1.23.0` version of the OpenTelemetry Semantic Conventions. (#4746) +- The `go.opentelemetry.io/otel/semconv/v1.23.1` package. + The package contains semantic conventions from the `v1.23.1` version of the OpenTelemetry Semantic Conventions. (#4749) +- The `go.opentelemetry.io/otel/semconv/v1.24.0` package. + The package contains semantic conventions from the `v1.24.0` version of the OpenTelemetry Semantic Conventions. (#4770) +- Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733) +- Experimental cardinality limiting is added to the metric SDK. + See [metric documentation](./sdk/metric/internal/x/README.md#cardinality-limit) for more information about this feature and how to enable it. (#4457) +- Add `NewMemberRaw` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage`. (#4804) + +### Changed + +- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.24.0`. (#4754) +- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.24.0` version of the OpenTelemetry specification. (#4754) +- Record synchronous measurements when the passed context is canceled instead of dropping in `go.opentelemetry.io/otel/sdk/metric`. + If you do not want to make a measurement when the context is cancelled, you need to handle it yourself (e.g `if ctx.Err() != nil`). (#4671) +- Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722) +- Improve `go.opentelemetry.io/otel/propagation.TraceContext`'s performance. (#4721) +- Improve `go.opentelemetry.io/otel/baggage` performance. (#4743) +- Improve performance of the `(*Set).Filter` method in `go.opentelemetry.io/otel/attribute` when the passed filter does not filter out any attributes from the set. (#4774) +- `Member.String` in `go.opentelemetry.io/otel/baggage` percent-encodes only when necessary. (#4775) +- Improve `go.opentelemetry.io/otel/trace.Span`'s performance when adding multiple attributes. (#4818) +- `Property.Value` in `go.opentelemetry.io/otel/baggage` now returns a raw string instead of a percent-encoded value. (#4804) + +### Fixed + +- Fix `Parse` in `go.opentelemetry.io/otel/baggage` to validate member value before percent-decoding. (#4755) +- Fix whitespace encoding of `Member.String` in `go.opentelemetry.io/otel/baggage`. (#4756) +- Fix observable not registered error when the asynchronous instrument has a drop aggregation in `go.opentelemetry.io/otel/sdk/metric`. (#4772) +- Fix baggage item key so that it is not canonicalized in `go.opentelemetry.io/otel/bridge/opentracing`. (#4776) +- Fix `go.opentelemetry.io/otel/bridge/opentracing` to properly handle baggage values that requires escaping during propagation. (#4804) +- Fix a bug where using multiple readers resulted in incorrect asynchronous counter values in `go.opentelemetry.io/otel/sdk/metric`. (#4742) + +## [1.21.0/0.44.0] 2023-11-16 + +### Removed + +- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706) +- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707) +- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708) +- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723) + +### Fixed + +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719) +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719) + +## [1.20.0/0.43.0] 2023-11-10 + +This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementers need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. + +### Added + +- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567) +- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584) +- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620) +- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620) +- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644) +- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660) +- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622) +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585) +- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605) +- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567) +- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618) +- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`. + Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620) +- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693) + +### Changed + +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) +- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. + This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. + This extends the `Tracer` interface and is is a breaking change for any existing implementation. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. + This extends the `Span` interface and is is a breaking change for any existing implementation. + Implementers need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679) + +### Fixed + +- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699) +- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695) + +## [1.19.0/0.42.0/0.0.7] 2023-09-28 + +This release contains the first stable release of the OpenTelemetry Go [metric SDK]. +Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539) +- The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507) + +### Changed + +- Allow '/' characters in metric instrument names. (#4501) +- The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507) +- Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535) + +### Fixed + +- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499) + +### Removed + +- Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566) + +## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14 + +This is a release candidate for the v1.19.0/v0.42.0 release. +That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Changed + +- Allow '/' characters in metric instrument names. (#4501) + +### Fixed + +- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499) + +## [1.18.0/0.41.0/0.0.6] 2023-09-12 + +This release drops the compatibility guarantee of [Go 1.19]. + +### Added + +- Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473) +- Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447) + +### Changed + +- Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541). + The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470) + +### Removed + +- Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467) +- Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467) +- Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468) +- Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469) +- Dropped guaranteed support for versions of Go less than 1.20. (#4481) + +## [1.17.0/0.40.0/0.0.5] 2023-08-28 + +### Added + +- Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) +- Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) +- Add support for exponential histogram aggregations. + A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245) +- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272) +- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272) +- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287) +- Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306) +- Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315) +- The `go.opentelemetry.io/otel/semconv/v1.21.0` package. + The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362) +- Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365) +- Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381) +- Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374) +- Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435) +- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437) +- Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444) +- Support Go 1.21. (#4463) + +### Changed + +- Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145) +- Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202) +- Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210) +- `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244) +- `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244) +- Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221) +- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) +- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) +- If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290) +- If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289) +- Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332) +- Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333) +- `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377) +- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408) +- Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434) +- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346) + +### Removed + +- Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`. + Use the added `WithProducer` option instead. (#4346) +- Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`. + Notice that `PeriodicReader.ForceFlush` is still available. (#4375) + +### Fixed + +- Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143) +- Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307) +- Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317) +- Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337) +- Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338) +- The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350) +- If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350) +- Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349) +- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353) +- Improve context cancellation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846) +- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395) +- Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373) +- Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409) +- Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/jaeger` package is deprecated. + OpenTelemetry dropped support for Jaeger exporter in July 2023. + Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` + or `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` instead. (#4423) +- The `go.opentelemetry.io/otel/example/jaeger` package is deprecated. (#4423) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/internal` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/internal/envconfig` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/internal/retry` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/sdk/metric/aggregation` package is deprecated. + Use the aggregation types added to `go.opentelemetry.io/otel/sdk/metric` instead. (#4435) + +## [1.16.0/0.39.0] 2023-05-18 + +This release contains the first stable release of the OpenTelemetry Go [metric API]. +Our project stability guarantees now apply to the `go.opentelemetry.io/otel/metric` package. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.19.0` package. + The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848) +- The `go.opentelemetry.io/otel/semconv/v1.20.0` package. + The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078) +- The Exponential Histogram data types in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#4165) +- OTLP metrics exporter now supports the Exponential Histogram Data Type. (#4222) +- Fix serialization of `time.Time` zero values in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` packages. (#4271) + +### Changed + +- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049) +- `MeterProvider` returns noop meters once it has been shutdown. (#4154) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument` package is removed. + Use `go.opentelemetry.io/otel/metric` instead. (#4055) + +### Fixed + +- Fix build for BSD based systems in `go.opentelemetry.io/otel/sdk/resource`. (#4077) + +## [1.16.0-rc.1/0.39.0-rc.1] 2023-05-03 + +This is a release candidate for the v1.16.0/v0.39.0 release. +That release is expected to include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#4039) + - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. + - Use `GetMeterProivder` for a global `metric.MeterProvider`. + - Use `SetMeterProivder` to set the global `metric.MeterProvider`. + +### Changed + +- Move the `go.opentelemetry.io/otel/metric` module to the `stable-v1` module set. + This stages the metric API to be released as a stable module. (#4038) + +### Removed + +- The `go.opentelemetry.io/otel/metric/global` package is removed. + Use `go.opentelemetry.io/otel` instead. (#4039) + +## [1.15.1/0.38.1] 2023-05-02 + +### Fixed + +- Remove unused imports from `sdk/resource/host_id_bsd.go` which caused build failures. (#4040, #4041) + +## [1.15.0/0.38.0] 2023-04-27 + +### Added + +- The `go.opentelemetry.io/otel/metric/embedded` package. (#3916) +- The `Version` function to `go.opentelemetry.io/otel/sdk` to return the SDK version. (#3949) +- Add a `WithNamespace` option to `go.opentelemetry.io/otel/exporters/prometheus` to allow users to prefix metrics with a namespace. (#3970) +- The following configuration types were added to `go.opentelemetry.io/otel/metric/instrument` to be used in the configuration of measurement methods. (#3971) + - The `AddConfig` used to hold configuration for addition measurements + - `NewAddConfig` used to create a new `AddConfig` + - `AddOption` used to configure an `AddConfig` + - The `RecordConfig` used to hold configuration for recorded measurements + - `NewRecordConfig` used to create a new `RecordConfig` + - `RecordOption` used to configure a `RecordConfig` + - The `ObserveConfig` used to hold configuration for observed measurements + - `NewObserveConfig` used to create a new `ObserveConfig` + - `ObserveOption` used to configure an `ObserveConfig` +- `WithAttributeSet` and `WithAttributes` are added to `go.opentelemetry.io/otel/metric/instrument`. + They return an option used during a measurement that defines the attribute Set associated with the measurement. (#3971) +- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` to return the OTLP metrics client version. (#3956) +- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlptrace` to return the OTLP trace client version. (#3956) + +### Changed + +- The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870) +- Update all exported interfaces from `go.opentelemetry.io/otel/metric` to embed their corresponding interface from `go.opentelemetry.io/otel/metric/embedded`. + This adds an implementation requirement to set the interface default behavior for unimplemented methods. (#3916) +- Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941) + - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider` +- Add all the methods from `"go.opentelemetry.io/otel/trace".SpanContext` to `bridgeSpanContext` by embedding `otel.SpanContext` in `bridgeSpanContext`. (#3966) +- Wrap `UploadMetrics` error in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/` to improve error message when encountering generic grpc errors. (#3974) +- The measurement methods for all instruments in `go.opentelemetry.io/otel/metric/instrument` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) + - The `Int64Counter.Add` method now accepts `...AddOption` + - The `Float64Counter.Add` method now accepts `...AddOption` + - The `Int64UpDownCounter.Add` method now accepts `...AddOption` + - The `Float64UpDownCounter.Add` method now accepts `...AddOption` + - The `Int64Histogram.Record` method now accepts `...RecordOption` + - The `Float64Histogram.Record` method now accepts `...RecordOption` + - The `Int64Observer.Observe` method now accepts `...ObserveOption` + - The `Float64Observer.Observe` method now accepts `...ObserveOption` +- The `Observer` methods in `go.opentelemetry.io/otel/metric` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) + - The `Observer.ObserveInt64` method now accepts `...ObserveOption` + - The `Observer.ObserveFloat64` method now accepts `...ObserveOption` +- Move global metric back to `go.opentelemetry.io/otel/metric/global` from `go.opentelemetry.io/otel`. (#3986) + +### Fixed + +- `TracerProvider` allows calling `Tracer()` while it's shutting down. + It used to deadlock. (#3924) +- Use the SDK version for the Telemetry SDK resource detector in `go.opentelemetry.io/otel/sdk/resource`. (#3949) +- Fix a data race in `SpanProcessor` returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace`. (#3951) +- Automatically figure out the default aggregation with `aggregation.Default`. (#3967) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/instrument` package is deprecated. + Use the equivalent types added to `go.opentelemetry.io/otel/metric` instead. (#4018) + +## [1.15.0-rc.2/0.38.0-rc.2] 2023-03-23 + +This is a release candidate for the v1.15.0/v0.38.0 release. +That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- The `WithHostID` option to `go.opentelemetry.io/otel/sdk/resource`. (#3812) +- The `WithoutTimestamps` option to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to sets all timestamps to zero. (#3828) +- The new `Exemplar` type is added to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + Both the `DataPoint` and `HistogramDataPoint` types from that package have a new field of `Exemplars` containing the sampled exemplars for their timeseries. (#3849) +- Configuration for each metric instrument in `go.opentelemetry.io/otel/sdk/metric/instrument`. (#3895) +- The internal logging introduces a warning level verbosity equal to `V(1)`. (#3900) +- Added a log message warning about usage of `SimpleSpanProcessor` in production environments. (#3854) + +### Changed + +- Optimize memory allocation when creation a new `Set` using `NewSet` or `NewSetWithFiltered` in `go.opentelemetry.io/otel/attribute`. (#3832) +- Optimize memory allocation when creation new metric instruments in `go.opentelemetry.io/otel/sdk/metric`. (#3832) +- Avoid creating new objects on all calls to `WithDeferredSetup` and `SkipContextSetup` in OpenTracing bridge. (#3833) +- The `New` and `Detect` functions from `go.opentelemetry.io/otel/sdk/resource` return errors that wrap underlying errors instead of just containing the underlying error strings. (#3844) +- Both the `Histogram` and `HistogramDataPoint` are redefined with a generic argument of `[N int64 | float64]` in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#3849) +- The metric `Export` interface from `go.opentelemetry.io/otel/sdk/metric` accepts a `*ResourceMetrics` instead of `ResourceMetrics`. (#3853) +- Rename `Asynchronous` to `Observable` in `go.opentelemetry.io/otel/metric/instrument`. (#3892) +- Rename `Int64ObserverOption` to `Int64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) +- Rename `Float64ObserverOption` to `Float64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) +- The internal logging changes the verbosity level of info to `V(4)`, the verbosity level of debug to `V(8)`. (#3900) + +### Fixed + +- `TracerProvider` consistently doesn't allow to register a `SpanProcessor` after shutdown. (#3845) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/global` package is removed. (#3829) +- The unneeded `Synchronous` interface in `go.opentelemetry.io/otel/metric/instrument` was removed. (#3892) +- The `Float64ObserverConfig` and `NewFloat64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. + Use the added `float64` instrument configuration instead. (#3895) +- The `Int64ObserverConfig` and `NewInt64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. + Use the added `int64` instrument configuration instead. (#3895) +- The `NewNoopMeter` function in `go.opentelemetry.io/otel/metric`, use `NewMeterProvider().Meter("")` instead. (#3893) + +## [1.15.0-rc.1/0.38.0-rc.1] 2023-03-01 + +This is a release candidate for the v1.15.0/v0.38.0 release. +That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +This release drops the compatibility guarantee of [Go 1.18]. + +### Added + +- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#3818) + - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. + - Use `GetMeterProivder` for a global `metric.MeterProvider`. + - Use `SetMeterProivder` to set the global `metric.MeterProvider`. + +### Changed + +- Dropped compatibility testing for [Go 1.18]. + The project no longer guarantees support for this version of Go. (#3813) + +### Fixed + +- Handle empty environment variable as it they were not set. (#3764) +- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823) +- Fix race conditions in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic. (#3899) +- Fix sending nil `scopeInfo` to metrics channel in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic in `github.com/prometheus/client_golang/prometheus`. (#3899) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/global` package is deprecated. + Use `go.opentelemetry.io/otel` instead. (#3818) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/unit` package is removed. (#3814) + +## [1.14.0/0.37.0/0.0.4] 2023-02-27 + +This release is the last to support [Go 1.18]. +The next release will require at least [Go 1.19]. + +### Added + +- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697) +- Support [Go 1.20]. (#3693) +- The `go.opentelemetry.io/otel/semconv/v1.18.0` package. + The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719) + - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeNameKey` -> `OTelScopeNameKey` + - `OtelScopeVersionKey` -> `OTelScopeVersionKey` + - `OtelLibraryNameKey` -> `OTelLibraryNameKey` + - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey` + - `OtelStatusCodeKey` -> `OTelStatusCodeKey` + - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey` + - `OtelStatusCodeOk` -> `OTelStatusCodeOk` + - `OtelStatusCodeError` -> `OTelStatusCodeError` + - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeName` -> `OTelScopeName` + - `OtelScopeVersion` -> `OTelScopeVersion` + - `OtelLibraryName` -> `OTelLibraryName` + - `OtelLibraryVersion` -> `OTelLibraryVersion` + - `OtelStatusDescription` -> `OTelStatusDescription` +- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state. + See the [README](./bridge/opentracing/README.md) for more information. (#3570) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739) +- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763) + - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports. + - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted. + +### Changed + +- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679) +- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into. + This change is made to enable memory reuse by SDK users. (#3732) +- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776) + +### Fixed + +- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725) +- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724) +- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733) +- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743) +- Data race issue in OTLP exporter retry mechanism. (#3755, #3756) +- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772) +- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/unit` package is deprecated. + Use the equivalent unit string instead. (#3776) + - Use `"1"` instead of `unit.Dimensionless` + - Use `"By"` instead of `unit.Bytes` + - Use `"ms"` instead of `unit.Milliseconds` + +## [1.13.0/0.36.0] 2023-02-07 + +### Added + +- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions. + These functions ensure semantic convention type correctness. (#3675) + +### Fixed + +- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687) + - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv` + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631) + +## [1.12.0/0.35.0] 2023-01-28 + +### Added + +- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `int64` Observer callbacks during their creation. (#3507) +- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `float64` Observer callbacks during their creation. (#3507) +- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`. + These additions are used to enable external metric Producers. (#3524) +- The `Callback` function type to `go.opentelemetry.io/otel/metric`. + This new named function type is registered with a `Meter`. (#3564) +- The `go.opentelemetry.io/otel/semconv/v1.13.0` package. + The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499) + - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`. + - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`. +- The `go.opentelemetry.io/otel/semconv/v1.14.0` package. + The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566) +- The `go.opentelemetry.io/otel/semconv/v1.15.0` package. + The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578) +- The `go.opentelemetry.io/otel/semconv/v1.16.0` package. + The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) +- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. + These instruments are use as replacements of the deprecated `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) + - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` + - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` + - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` + - `Int64ObservableCounter` replaces the `asyncint64.Counter` + - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter` + - `Int64ObservableGauge` replaces the `asyncint64.Gauge` + - `Float64Counter` replaces the `syncfloat64.Counter` + - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter` + - `Float64Histogram` replaces the `syncfloat64.Histogram` + - `Int64Counter` replaces the `syncint64.Counter` + - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter` + - `Int64Histogram` replaces the `syncint64.Histogram` +- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`. + This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116) +- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487) +- The `go.opentelemetry.io/otel/semconv/v1.17.0` package. + The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599) + +### Changed + +- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) +- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and configuration based on the instrument type. (#3507) + - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. + - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. + - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. + - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`. +- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package. + This `Registration` can be used to unregister callbacks. (#3522) +- Global error handler uses an atomic value instead of a mutex. (#3543) +- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541) +- Global logger uses an atomic value instead of a mutex. (#3545) +- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) +- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. + This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) +- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in errors identifying their signal name. + Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) +- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) +- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) + - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter` + - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter` + - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram` + - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter` + - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter` + - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge` +- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed. + - The named `Callback` replaces the inline function parameter. (#3564) + - `Callback` is required to return an error. (#3576) + - `Callback` accepts the added `Observer` parameter added. + This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584) + - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587) +- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions. + This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint. + Instead it uses the `net.sock.peer` attributes. (#3581) +- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487) + +### Fixed + +- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549) +- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter. + Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated. + Use `NewMetricProducer` instead. (#3541) +- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated. + Use `NewTracerProvider` instead. (#3116) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520) +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Int64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Float64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64Counter` + - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Int64Histogram` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64Counter` + - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Float64Histogram` + +## [1.11.2/0.34.0] 2022-12-05 + +### Added + +- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package. + This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387) +- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter. + This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357) +- OTLP exporters now recognize: (#3363) + - `OTEL_EXPORTER_OTLP_INSECURE` + - `OTEL_EXPORTER_OTLP_TRACES_INSECURE` + - `OTEL_EXPORTER_OTLP_METRICS_INSECURE` + - `OTEL_EXPORTER_OTLP_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` +- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459) +- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487) + +### Changed + +- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`. + Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option. + The views registered with the `MeterProvider` apply to all `Reader`s. (#3387) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260) +- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260) +- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260) + +### Fixed + +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369) +- Remove comparable requirement for `Reader`s. (#3387) +- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389) +- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) +- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) +- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) +- Re-enabled Attribute Filters in the Metric SDK. (#3396) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggregation. (#3408) +- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) +- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) +- Prevent duplicate Prometheus description, unit, and type. (#3469) +- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) + +### Removed + +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) + +### Deprecated + +- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated. + Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476) + +## [1.11.1/0.33.0] 2022-10-19 + +### Added + +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation. + By default, it will register with the default Prometheus registerer. + A non-default registerer can be used by passing the `WithRegisterer` option. (#3239) +- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285) + +### Changed + +- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error. + It will return an error if the exporter fails to register with Prometheus. (#3239) + +### Fixed + +- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963) +- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it. + This fixes the implementation to be compliant with the W3C specification. (#3226) +- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252) +- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281) +- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293) +- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278) +- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup. + Instead the exporter is defined as an "unchecked" collector for Prometheus. + This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names. + This can be disabled using the `WithoutUnits()` option added to that package. (#3352) + +## [1.11.0/0.32.3] 2022-10-12 + +### Added + +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261) + +### Changed + +- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214) +- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`. + This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235) + +## [0.32.2] Metric SDK (Alpha) - 2022-10-11 + +### Added + +- Added an example of using metric views to customize instruments. (#3177) +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261) + +### Changed + +- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220) +- Update histogram default bounds to match the requirements of the latest specification. (#3222) +- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265) + +### Fixed + +- Use default view if instrument does not match any registered view of a reader. (#3224, #3237) +- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251) +- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251) +- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251) +- The OpenCensus bridge no longer sends empty batches of metrics. (#3263) + +## [0.32.1] Metric SDK (Alpha) - 2022-09-22 + +### Changed + +- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting. + Invalid characters are replaced with `_`. (#3212) + +### Added + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192) +- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206) + +### Fixed + +- Updated go.mods to point to valid versions of the sdk. (#3216) +- Set the `MeterProvider` resource on all exported metric data. (#3218) + +## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18 + +### Changed + +- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification. + Please see the package documentation for how the new SDK is initialized and configured. (#3175) +- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179) + +### Removed + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed. + A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed. + A replacement package that supports the new metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175) +- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175) + +## [1.10.0] - 2022-09-09 + +### Added + +- Support Go 1.19. (#3077) + Include compatibility testing and document support. (#3077) +- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106) +- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) + +### Changed + +- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096) +- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110) +- All exporters will be shutdown even if one reports an error (#3091) +- Ensure valid UTF-8 when truncating over-length attribute values. (#3156) + +## [1.9.0/0.0.3] - 2022-08-01 + +### Added + +- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999) +- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package. + The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009) +- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package. + The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010) +- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018) + +### Fixed + +- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029) + +## [1.8.0/0.31.0] - 2022-07-08 + +### Added + +- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods +of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911) + +### Changed + +- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886) +- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976) +- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866) + +### Removed + +- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917) + +### Deprecated + +- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated. + Use the equivalent `Scope` struct instead. (#2977) +- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated. + Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977) + +## [1.7.0/0.30.0] - 2022-04-28 + +### Added + +- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package. + The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763) +- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package. + The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792) +- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package. + The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842) +- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776) + +### Fixed + +- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784) +- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786) + +### Changed + +- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790) +- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`. + The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790) +- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`. + Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790) + +### Deprecated + +- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.Attribute` method instead. (#2790) +- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790) +- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `MergeIterator.Attribute` method instead. (#2790) + +### Removed + +- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) +- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) + +## [0.29.0] - 2022-04-11 + +### Added + +- The metrics global package was added back into several test files. (#2764) +- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package. + This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750) + +### Removed + +- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720) + +### Changed + +- Don't panic anymore when setting a global MeterProvider to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748) + +## [1.6.3] - 2022-04-07 + +### Fixed + +- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773) + +## [1.6.2] - 2022-04-06 + +### Changed + +- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748) + +## [1.6.1] - 2022-03-28 + +### Fixed + +- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant. + Instead of using `"https://opentelemetry.io/schemas/v"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/"`. (#2743, #2744) + +### Security + +- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`. + This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728) + +## [1.6.0/0.28.0] - 2022-03-23 + +### ⚠️ Notice ⚠️ + +This update is a breaking change of the unstable Metrics API. +Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified. + +### Added + +- Add metrics exponential histogram support. + New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502) +- Add Go 1.18 to our compatibility tests. (#2679) +- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517) +- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660) + +### Changed + +- The metrics API has been significantly changed to match the revised OpenTelemetry specification. + High-level changes include: + + - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s. + These `InstrumentProvider`s are managed with a `Meter`. + - Synchronous and asynchronous instruments are grouped into their own packages based on value types. + - Asynchronous callbacks can now be registered with a `Meter`. + + Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660) + +### Fixed + +- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677) + +## [1.5.0] - 2022-03-16 + +### Added + +- Log the Exporters configuration in the TracerProviders message. (#2578) +- Added support to configure the span limits with environment variables. + The following environment variables are supported. (#2606, #2637) + - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` + - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_EVENT_COUNT_LIMIT` + - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_LINK_COUNT_LIMIT` + - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` + + If the provided environment variables are invalid (negative), the default values would be used. +- Rename the `gc` runtime name to `go` (#2560) +- Add resource container ID detection. (#2418) +- Add span attribute value length limit. + The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`. + The default limit for this resource is "unlimited". (#2637) +- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`. + This option replaces the `WithSpanLimits` option. + Zero or negative values will not be changed to the default value like `WithSpanLimits` does. + Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited. + Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637) + +### Changed + +- Drop oldest tracestate `Member` when capacity is reached. (#2592) +- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601) +- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639) +- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640) +- Introduce new internal `envconfig` package for OTLP exporters. (#2608) +- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661) + +### Fixed + +- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616) +- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625) +- Unlimited span limits are now supported (negative values). (#2636, #2637) + +### Deprecated + +- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`. + Use `WithRawSpanLimits` instead. + That option allows setting unlimited and zero limits, this option does not. + This option will be kept until the next major version incremented release. (#2637) + +## [1.4.1] - 2022-02-16 + +### Fixed + +- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615) + +## [1.4.0] - 2022-02-11 + +### Added + +- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490) +- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging. + To enable use a logger with Verbosity (V level) `>=1`. (#2500) +- Added support to configure the batch span-processor with environment variables. + The following environment variables are used. (#2515) + - `OTEL_BSP_SCHEDULE_DELAY` + - `OTEL_BSP_EXPORT_TIMEOUT` + - `OTEL_BSP_MAX_QUEUE_SIZE`. + - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE` + +### Changed + +- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589) + +### Deprecated + +- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382) +- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445) + +### Fixed + +- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461) +- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512) +- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491) +- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493) +- W3C baggage will now decode urlescaped values. (#2529) +- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522) +- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification. + Instead of dropping the least-recently-used attribute, the last added attribute is dropped. + This drop order still only applies to attributes with unique keys not already contained in the span. + If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576) + +### Removed + +- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546) + - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge) + - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram) + - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum) + +## [1.3.0] - 2021-12-10 + +### ⚠️ Notice ⚠️ + +We have updated the project minimum supported Go version to 1.16 + +### Added + +- Added an internal Logger. + This can be used by the SDK and API to provide users with feedback of the internal state. + To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343) +- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425) +- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296) + +### Changed + +- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425) +- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432) +- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371) + +### Fixed + +- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification. + Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP__ENDPOINT` environment variable is now used without modification of the path. + When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433) +- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381) +- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440) + +### Deprecated + +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425) +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425) + +### Removed + +- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350) +- Remove the metric Bound Instruments interface and implementations. (#2399) +- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423) +- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348) + +## [1.2.0] - 2021-11-12 + +### Changed + +- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274) +- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274) +- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface: + - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner` + - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`. + - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271) +- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335) + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267) +- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334) + +## [1.1.0] - 2021-10-27 + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package. + The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320) +- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package. + The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321) +- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package. + The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322) + - When upgrading from the `semconv/v1.4.0` package note the following name changes: + - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey` + - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey` + - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey` + - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey` + - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey` + - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey` + +### Changed + +- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275). + +### Fixed + +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284) +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285) +- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289) + +## [1.0.1] - 2021-10-01 + +### Fixed + +- json stdout exporter no longer crashes due to concurrency bug. (#2265) + +## [Metrics 0.24.0] - 2021-10-01 + +### Changed + +- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237) +- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197) + - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`. + - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`. + +## [1.0.0] - 2021-09-20 + +This is the first stable release for the project. +This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md). + +### Added + +- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242) + +### Fixed + +- Slice-valued attributes can correctly be used as map keys. (#2223) + +### Removed + +- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248) +- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234) +- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233) +- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package. + Use the typed functions and methods added to the package instead. (#2235) + - The `Key.Array` method is removed. + - The `Array` function is removed. + - The `Any` function is removed. + - The `ArrayValue` function is removed. + - The `AsArray` function is removed. + +## [1.0.0-RC3] - 2021-09-02 + +### Added + +- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149) +- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163) +- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162) + - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package. +- Added the `go.opentelemetry.io/otel/example/fib` example package. + Included is an example application that computes Fibonacci numbers. (#2203) + +### Changed + +- Metric instruments have been renamed to match the (feature-frozen) metric API specification: + - ValueRecorder becomes Histogram + - ValueObserver becomes Gauge + - SumObserver becomes CounterObserver + - UpDownSumObserver becomes UpDownCounterObserver + The API exported from this project is still considered experimental. (#2202) +- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091) +- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120) +- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196) +- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212) + +### Deprecated + +- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated. + All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package. + The functions from that package should be used instead. (#2166) +- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated. + Use the typed `*Slice` functions and types added to the package instead. (#2162) +- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated. + Use the typed functions instead. (#2181) +- The `go.opentelemetry.io/otel/oteltest` package is deprecated. + The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188) + +### Removed + +- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105) + +### Fixed + +- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138) +- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140) +- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169) +- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120) +- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly omit timestamps. (#2195) +- Fixed typos in resources.go. (#2201) + +## [1.0.0-RC2] - 2021-07-26 + +### Added + +- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840) +- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840) +- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095) +- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115) +- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`. + This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK. + For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118) +- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package. + This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132) + +### Changed + +- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027) +- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095) + +### Deprecated + +- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114) +- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123) +- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated. + Use the `trace.ParseTraceState` function instead. (#2122) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020) +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020) +- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function. + The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097) +- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095) +- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118) + +### Fixed + +- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032) +- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073) +- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092) +- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package. + This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102) +- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108) +- Use `6831` as default Jaeger agent port instead of `6832`. (#2131) + +## [Experimental Metrics v0.22.0] - 2021-07-19 + +### Added + +- Adds HTTP support for OTLP metrics exporter. (#2022) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020) + +## [1.0.0-RC1] / 0.21.0 - 2021-06-18 + +With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1` +while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules +with major version 0. + +### Added + +- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832) + - The following status codes are defined as transient errors: + | gRPC Status Code | Description | + | ---------------- | ----------- | + | 1 | Cancelled | + | 4 | Deadline Exceeded | + | 8 | Resource Exhausted | + | 10 | Aborted | + | 10 | Out of Range | + | 14 | Unavailable | + | 15 | Data Loss | +- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874) +- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package. + This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873) +- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886) +- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889) +- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912) +- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package. + It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937) +- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package. + This method returns the number of list-members the `TraceState` holds. (#1937) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data. + Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922) +- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967) +- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package. + These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967) +- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963) +- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938) +- Several builtin resource detectors now correctly populate the schema URL. (#1938) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data. +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005) +- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009) + +### Changed + +- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item. + `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798) +- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810) +- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846) +- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865) +- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860) +- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855) +- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871) +- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method. + This method returns the status of a span using the new `Status` type. (#1874) +- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`. + This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873) +- Unembed `SpanContext` in `Link`. (#1877) +- Generate Semantic conventions from the specification YAML. (#1891) +- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901) +- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902) +- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903) +- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921) +- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921) +- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Refactored option types according to the contribution style guide. (#1882) +- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package. + This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use. + The new `ParseTraceState` function should be used to create a `TraceState`. (#1931) +- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931) +- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931) +- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931) +- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987) +- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993) + +### Removed + +- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810) +- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810) +- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`. + The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate. + The `IsRecording` method returns if the span is recording or not. + A read-only span value does not need to know if updates to it will be recorded or not. + By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873) +- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package. + The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type. + When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873) +- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package. + Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own. + The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900) + - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009) +- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919) +- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931) +- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package. + Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967) +- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed. + These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985) +- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990) +- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005) + +### Fixed + +- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851) +- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856) +- BatchSpanProcessor now drops span batches that failed to be exported. (#1860) +- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898) +- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931) +- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973) +- Avoid transport security when OTLP endpoint is a Unix socket. (#2001) + +### Security + +## [0.20.0] - 2021-04-23 + +### Added + +- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373) +- Adds semantic conventions for exceptions. (#1492) +- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT` + These environment variables can be used to override Jaeger agent hostname and port (#1752) +- Option `ExportTimeout` was added to batch span processor. (#1755) +- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770) +- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771) +- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771) +- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772) +- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785) +- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788) +- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788) + - `process.pid` + - `process.executable.name` + - `process.executable.path` + - `process.command_args` + - `process.owner` + - `process.runtime.name` + - `process.runtime.version` + - `process.runtime.description` +- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789) +- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811) + - `OTEL_EXPORTER_OTLP_ENDPOINT` + - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` + - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` + - `OTEL_EXPORTER_OTLP_HEADERS` + - `OTEL_EXPORTER_OTLP_TRACES_HEADERS` + - `OTEL_EXPORTER_OTLP_METRICS_HEADERS` + - `OTEL_EXPORTER_OTLP_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` + - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TIMEOUT` + - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` + - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` + - `OTEL_EXPORTER_OTLP_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` +- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821) +- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853) + +### Fixed + +- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750) +- The Jaeger exporter now correctly sets tags for the Span status code and message. + This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761) +- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag. + Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768) +- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688) +- Fixed typo for default service name in Jaeger Exporter. (#1797) +- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814) +- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit. + Instead, the exporter now splits the batch into smaller sendable batches. (#1828) + +### Changed + +- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492) +- Jaeger exporter was updated to use thrift v0.14.1. (#1712) +- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713) +- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713) +- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span. + The Span's SpanContext can now self-identify as being remote or not. + This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731) +- Improve OTLP/gRPC exporter connection errors. (#1737) +- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field. + The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748) +- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span. + This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749) +- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD` + to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752) +- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757) +- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself. + It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771) +- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773) +- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777) +- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778) +- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796) +- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800) +- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create. + This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822) +- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824) +- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument. + The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824) +- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830) + +### Removed + +- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS` + These environment variables will no longer be used to override values of the Jaeger exporter (#1752) +- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root. + This is unspecified behavior that the OpenTelemetry community plans to standardize in the future. + To prevent backwards incompatible changes when it is specified, these links are removed. (#1726) +- Setting error status while recording error with Span from oteltest package. (#1729) +- The concept of a remote and local Span stored in a context is unified to just the current Span. + Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. + If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) +- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. + This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) +- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770) +- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package. + The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804) +- Remove the `WithDisabled` option from the Jaeger exporter. + To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806) +- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter. + These functions for retrieving specific environment variable values are redundant of other internal functions and + are not intended for end user use. (#1824) +- Removed the Jaeger exporter `WithSDKOptions` `Option`. + This option was used to set SDK options for the exporter creation convenience functions. + These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases. + If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825) +- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed. + The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter `Option` type is removed. + The type is no longer used by the exporter to configure anything. + All the previous configurations these options provided were duplicates of SDK configuration. + They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830) + +## [0.19.0] - 2021-03-18 + +### Added + +- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586) +- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608) +- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702) +- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701) +- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703) + +### Changed + +- `trace.SpanContext` is now immutable and has no exported fields. (#1573) + - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known. +- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608) +- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608) +- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612) +- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656) +- Added non-empty string check for trace `Attribute` keys. (#1659) +- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662) +- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673) +- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673) +- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693) + +### Removed + +- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549) +- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633) +- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs. + These are now returned as a SpanProcessor interface from their respective constructors. (#1638) +- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660) +- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663) +- Removed `jaeger.WithProcess` configuration option. (#1673) +- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693) + +### Fixed + +- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626) +- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655) +- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678) +- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681) +- Synchronization issues in global trace delegate implementation. (#1686) +- Reduced excess memory usage by global `TracerProvider`. (#1687) + +## [0.18.0] - 2021-03-03 + +### Added + +- Added `resource.Default()` for use with meter and tracer providers. (#1507) +- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535) +- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544) +- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558) +- Compatibility testing suite in the CI system for the following systems. (#1567) + | OS | Go Version | Architecture | + | ------- | ---------- | ------------ | + | Ubuntu | 1.15 | amd64 | + | Ubuntu | 1.14 | amd64 | + | Ubuntu | 1.15 | 386 | + | Ubuntu | 1.14 | 386 | + | MacOS | 1.15 | amd64 | + | MacOS | 1.14 | amd64 | + | Windows | 1.15 | amd64 | + | Windows | 1.14 | amd64 | + | Windows | 1.15 | 386 | + | Windows | 1.14 | 386 | + +### Changed + +- Replaced interface `oteltest.SpanRecorder` with its existing implementation + `StandardSpanRecorder`. (#1542) +- Default span limit values to 128. (#1535) +- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535) +- Renamed the `otel/label` package to `otel/attribute`. (#1541) +- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551) +- Parallelize the CI linting and testing. (#1567) +- Stagger timestamps in exact aggregator tests. (#1569) +- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621) +- Prevent end-users from implementing some interfaces (#1575) + + ``` + "otel/exporters/otlp/otlphttp".Option + "otel/exporters/stdout".Option + "otel/oteltest".Option + "otel/trace".TracerOption + "otel/trace".SpanOption + "otel/trace".EventOption + "otel/trace".LifeCycleOption + "otel/trace".InstrumentationOption + "otel/sdk/resource".Option + "otel/sdk/trace".ParentBasedSamplerOption + "otel/sdk/trace".ReadOnlySpan + "otel/sdk/trace".ReadWriteSpan + ``` + +### Removed + +- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545) +- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567) +- Removed the `test-386` make target. + This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567) + +### Fixed + +- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572) +- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577) +- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579) +- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581) +- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570) + +## [0.17.0] - 2021-02-12 + +### Changed + +- Rename project default branch from `master` to `main`. (#1505) +- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501) +- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528) +- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528) +- Move metric-related public global APIs from otel to otel/metric/global. (#1528) + +## Fixed + +- Fixed otlpgrpc reconnection issue. +- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513) +- The otel-collector example now uses the default OTLP receiver port of the collector. + +## [0.16.0] - 2021-01-13 + +### Added + +- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360) +- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369) +- Added documentation about the project's versioning policy. (#1388) +- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418) +- Added codeql workflow to GitHub Actions (#1428) +- Added Gosec workflow to GitHub Actions (#1429) +- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420) +- Add an OpenCensus exporter bridge. (#1444) + +### Changed + +- Rename `internal/testing` to `internal/internaltest`. (#1449) +- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360) +- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360) +- Improve span duration accuracy. (#1360) +- Migrated CI/CD from CircleCI to GitHub Actions (#1382) +- Remove duplicate checkout from GitHub Actions workflow (#1407) +- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412) +- Metric `exact` aggregator includes per-point timestamps (#1412) +- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412) +- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) +- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) +- Unify endpoint API that related to OTel exporter. (#1401) +- Optimize metric histogram aggregator to reuse its slice of buckets. (#1435) +- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) +- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) +- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) +- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420) +- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447) +- Metric Push and Pull Controller components are combined into a single "basic" Controller: + - `WithExporter()` and `Start()` to configure Push behavior + - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior + - `Start()` and `Stop()` accept Context. (#1378) +- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452) + +### Removed + +- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360) +- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412) +- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412) + +### Fixed + +- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443) + +## [0.15.0] - 2020-12-10 + +### Added + +- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363) + +### Changed + +- The Zipkin exporter now uses the Span status code to determine. (#1328) +- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357) +- Move the OpenCensus example into `example` directory. (#1359) +- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363) +- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374) +- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375) + +### Fixed + +- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381) + +## [0.14.0] - 2020-11-19 + +### Added + +- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254) +- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259) +- `SpanContextFromContext` returns `SpanContext` from context. (#1255) +- `TraceState` has been added to `SpanContext`. (#1340) +- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323) +- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305) +- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333) +- Add missing tests for `sdk/trace/attributes_map.go`. (#1337) + +### Changed + +- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307) + - `ID` has been renamed to `TraceID`. + - `IDFromHex` has been renamed to `TraceIDFromHex`. + - `EmptySpanContext` is removed. +- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229) +- OTLP Exporter updates: + - supports OTLP v0.6.0 (#1230, #1354) + - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296) +- The Sampler is now called on local child spans. (#1233) +- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240) +- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`. + This matches the returned type and fixes misuse of the term metric. (#1240) +- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241) +- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252) +- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321) +- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316) +- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316) +- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254) +- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254) +- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330) +- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330) +- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267) +- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276) +- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and + `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235) +- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210) +- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310) +- Updated span collection limits for attribute, event and link counts to 1000 (#1318) +- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338) + +### Removed + +- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243) +- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy. + It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254) +- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`. + `Tracer` and `Span` from the same module should be used in their place instead. (#1306) +- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350) +- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314) + +### Fixed + +- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244) +- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258) +- Fix condition in `label.Any`. (#1299) +- Fix global `TracerProvider` to pass options to its configured provider. (#1329) +- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309) + +## [0.13.0] - 2020-10-08 + +### Added + +- OTLP Metric exporter supports Histogram aggregation. (#1209) +- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214) +- A Baggage API to implement the OpenTelemetry specification. (#1217) +- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227) + +### Changed + +- Set default propagator to no-op propagator. (#1184) +- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325) +- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212) +- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification. + They now are `Unset`, `Error`, and `Ok`. + They no longer track the gRPC codes. (#1214) +- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214) +- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325) +- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264) + +### Fixed + +- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226) + +### Removed + +- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212) +- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification. + The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212) +- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216) +- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217) +- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219) +- Nested array/slice support has been removed. (#1226) + +## [0.12.0] - 2020-09-24 + +### Added + +- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108) +- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s. + This addition was made to conform with our project option conventions. (#1155) +- Instrumentation library information was added to the Zipkin exporter. (#1119) +- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166) +- More semantic conventions for k8s as resource attributes. (#1167) + +### Changed + +- Add reconnecting udp connection type to Jaeger exporter. + This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record. + It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063) +- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`. + This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108) +- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`. + This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108) +- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109) +- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package. + This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118) +- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119) +- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115) +- Move `tools` package under `internal`. (#1141) +- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142) + The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged. +- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153) +- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155) +- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161) +- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to + recommend the use of `newConfig()` instead of `configure()`. (#1163) +- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163) +- Ensure exported interface types include parameter names and update the + Style Guide to reflect this styling rule. (#1172) +- Don't consider unset environment variable for resource detection to be an error. (#1170) +- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and + `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`. +- ValueObserver instruments use LastValue aggregator by default. (#1165) +- OTLP Metric exporter supports LastValue aggregation. (#1165) +- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190) +- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190) +- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192) +- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201) +- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195) +- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203) + +### Removed + +- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the + `go.opentelemetry.io/contrib/propagators/` module. (#1191) +- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194) + +### Fixed + +- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171) +- Fix missing shutdown processor in otel-collector example. (#1186) +- Fix missing shutdown processor in basic and namedtracer examples. (#1197) + +## [0.11.0] - 2020-08-24 + +### Added + +- Support for exporting array-valued attributes via OTLP. (#992) +- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994) +- Support for filtering metric label sets. (#1047) +- A dimensionality-reducing metric Processor. (#1057) +- Integration tests for more OTel Collector Attribute types. (#1062) +- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078) + +### Changed + +- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049) +- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049) +- Rename `api/testharness` to `api/apitest`. (#1049) +- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049) +- Change Metric Processor to merge multiple observations. (#1024) +- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module. + This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038) +- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016) +- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042) +- Replace `WithSyncer` with `WithBatcher` in examples. (#1044) +- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046) +- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060) +- Unify Callback Function Naming. + Rename `*Callback` with `*Func`. (#1061) +- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064) +- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface. + This interface still supports the export of `SpanData`, but only as a slice. + Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error. + If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`. + This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078) + +### Removed + +- Duplicate, unused API sampler interface. (#999) + Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead. +- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository. + This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027) +- The `WithSpan` method of the `Tracer` interface. + The functionality this method provided was limited compared to what a user can provide themselves. + It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043) +- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions. + These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077) +- The `oterror` package. (#1026) +- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032) + +### Fixed + +- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031) +- Correct instrumentation version tag in Jaeger exporter. (#1037) +- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043) +- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050) +- The `otel-collector` example referenced outdated collector processors. (#1006) + +## [0.10.0] - 2020-07-29 + +This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages. + +### Added + +- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern. + These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944) +- Add propagator option for gRPC instrumentation. (#986) +- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987) + +### Changed + +- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function. + This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944) +- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`. + This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963) +- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962) +- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968) + - `value.Bool` was replaced with `kv.BoolValue`. + - `value.Int64` was replaced with `kv.Int64Value`. + - `value.Uint64` was replaced with `kv.Uint64Value`. + - `value.Float64` was replaced with `kv.Float64Value`. + - `value.Int32` was replaced with `kv.Int32Value`. + - `value.Uint32` was replaced with `kv.Uint32Value`. + - `value.Float32` was replaced with `kv.Float32Value`. + - `value.String` was replaced with `kv.StringValue`. + - `value.Int` was replaced with `kv.IntValue`. + - `value.Uint` was replaced with `kv.UintValue`. + - `value.Array` was replaced with `kv.ArrayValue`. +- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972) +- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979) +- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980) +- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985) +- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989) + +### Removed + +- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970) + +### Fixed + +- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953) +- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957) +- Use `global.Handle` for span export errors in the OTLP exporter. (#946) +- Correct Go language formatting in the README documentation. (#961) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983) +- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984) + +## [0.9.0] - 2020-07-20 + +### Added + +- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939) +- A Detector to automatically detect resources from an environment variable. (#939) +- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938) +- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`. + References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942) + +### Changed + +- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948) + +### Removed + +- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943) + +## [0.8.0] - 2020-07-09 + +### Added + +- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject. + A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882) +- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882) +- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882) +- Add `peer.service` semantic attribute. (#898) +- Add database-specific semantic attributes. (#899) +- Add semantic convention for `faas.coldstart` and `container.id`. (#909) +- Add http content size semantic conventions. (#905) +- Include `http.request_content_length` in HTTP request basic attributes. (#905) +- Add semantic conventions for operating system process resource attribute keys. (#919) +- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931) + +### Changed + +- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879) +- Use lowercase header names for B3 Multiple Headers. (#881) +- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`. + This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings. + If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882) +- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header. + Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid. + This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882) +- Extend semantic conventions for RPC. (#900) +- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920) + - `"api/standard".FaaSName` -> `FaaSNameKey` + - `"api/standard".FaaSID` -> `FaaSIDKey` + - `"api/standard".FaaSVersion` -> `FaaSVersionKey` + - `"api/standard".FaaSInstance` -> `FaaSInstanceKey` + +### Removed + +- The `FlagsUnused` trace flag is removed. + The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882) +- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed. + If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882) + +### Fixed + +- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881) +- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882) +- The B3 propagator now propagates the debug flag. + This removes the behavior of changing the debug flag into a set sampling bit. + Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882) +- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882) +- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883) +- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885) +- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896) +- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908) +- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912) +- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) +- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) +- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) +- Update otel-colector example to use the v0.5.0 collector. (#915) +- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) +- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) +- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. + This is in accordance with OpenTelemetry semantic conventions. (#922) +- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923) +- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925) +- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926) +- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930) + +## [0.7.0] - 2020-06-26 + +This release implements the v0.5.0 version of the OpenTelemetry specification. + +### Added + +- The othttp instrumentation now includes default metrics. (#861) +- This CHANGELOG file to track all changes in the project going forward. +- Support for array type attributes. (#798) +- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844) +- Timestamps are now passed to exporters for each export. (#835) +- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s. + This replaces the prior `Record` `struct` use for this purpose. (#835) +- New dependabot integration to automate package upgrades. (#814) +- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument. + This instrumentation version is passed on to exporters. (#811) (#805) (#802) +- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811) +- Environment variables for Jaeger exporter are supported. (#796) +- New `aggregation.Kind` in the export metric API. (#808) +- New example that uses OTLP and the collector. (#790) +- Handle errors in the span `SetName` during span initialization. (#791) +- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777) +- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778) +- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`. + There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778) +- Options to specify propagators for httptrace and grpctrace instrumentation. (#784) +- The required `application/json` header for the Zipkin exporter is included in all exports. (#774) +- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769 + +### Changed + +- Rename `Integrator` to `Processor` in the metric SDK. (#863) +- Rename `AggregationSelector` to `AggregatorSelector`. (#859) +- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858) +- Rename `simple` integrator to `basic` integrator. (#857) +- Merge otlp collector examples. (#841) +- Change the metric SDK to support cumulative, delta, and pass-through exporters directly. + With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840) +- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812) +- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other. + All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`. + Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812) +- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812) +- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810) +- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808 +- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806) +- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791) +- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779) +- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781) + +### Removed + +- `Uint64NumberKind` and related functions from the API. (#864) +- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803) +- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775) + +### Fixed + +- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866) +- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824) +- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854) +- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817) +- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828) +- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829) +- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823) +- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839) +- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843) +- Set span status from HTTP status code in the othttp instrumentation. (#832) +- Fixed typo in push controller comment. (#834) +- The `Aggregator` testing has been updated and cleaned. (#812) +- `metric.Number(0)` expressions are replaced by `0` where possible. (#812) +- Fixed `global` `handler_test.go` test failure. #804 +- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766) +- Fixed OTLP example's accidental early close of exporter. (#807) +- Ensure zipkin exporter reads and closes response body. (#788) +- Update instrumentation to use `api/standard` keys instead of custom keys. (#782) +- Clean up tools and RELEASING documentation. (#762) + +## [0.6.0] - 2020-05-21 + +### Added + +- Support for `Resource`s in the prometheus exporter. (#757) +- New pull controller. (#751) +- New `UpDownSumObserver` instrument. (#750) +- OpenTelemetry collector demo. (#711) +- New `SumObserver` instrument. (#747) +- New `UpDownCounter` instrument. (#745) +- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742) +- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731) + +### Changed + +- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761) +- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758) +- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756) +- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754) +- The prometheus exporter now uses the new pull controller. (#751) +- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752) +- Support use of synchronous instruments in asynchronous callbacks (#725) +- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739) +- Rename `Observer` instrument to `ValueObserver`. (#734) +- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738) +- Replace `Measure` instrument by `ValueRecorder` instrument. (#732) +- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727) + +### Fixed + +- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755) +- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743) +- Fix `string` case in `kv` `Infer` function. (#746) +- Fix panic in grpctrace client interceptors. (#740) +- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737) +- Rewrite span batch process queue batching logic. (#719) +- Remove the push controller named Meter map. (#738) +- Fix Histogram aggregator initial state (fix #735). (#736) +- Ensure golang alpine image is running `golang-1.14` for examples. (#733) +- Added test for grpctrace `UnaryInterceptorClient`. (#695) +- Rearrange `api/metric` code layout. (#724) + +## [0.5.0] - 2020-05-13 + +### Added + +- Batch `Observer` callback support. (#717) +- Alias `api` types to root package of project. (#696) +- Create basic `othttp.Transport` for simple client instrumentation. (#678) +- `SetAttribute(string, interface{})` to the trace API. (#674) +- Jaeger exporter option that allows user to specify custom http client. (#671) +- `Stringer` and `Infer` methods to `key`s. (#662) + +### Changed + +- Rename `NewKey` in the `kv` package to just `Key`. (#721) +- Move `core` and `key` to `kv` package. (#720) +- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709) +- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710) +- Move `Number` from `core` to `api/metric` package. (#706) +- Move `SpanContext` from `core` to `trace` package. (#692) +- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681) + +### Fixed + +- Update tooling to run generators in all submodules. (#705) +- gRPC interceptor regexp to match methods without a service name. (#683) +- Use a `const` for padding 64-bit B3 trace IDs. (#701) +- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700) +- Left-pad 64-bit B3 trace IDs with zero. (#698) +- Propagate at least the first W3C tracestate header. (#694) +- Remove internal `StateLocker` implementation. (#688) +- Increase instance size CI system uses. (#690) +- Add a `key` benchmark and use reflection in `key.Infer()`. (#679) +- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680) +- Reimplement histogram using mutex instead of `StateLocker`. (#669) +- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667) +- Update documentation to not include any references to `WithKeys`. (#672) +- Correct misspelling. (#668) +- Fix clobbering of the span context if extraction fails. (#656) +- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670) + +## [0.4.3] - 2020-04-24 + +### Added + +- `Dockerfile` and `docker-compose.yml` to run example code. (#635) +- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621) +- New `api/label` package, providing common label set implementation. (#651) +- Support for JSON marshaling of `Resources`. (#654) +- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642) +- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627) +- `WithSpanFormatter` option to the othttp plugin. (#617) +- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612) +- The prometheus exporter now supports exporting histograms. (#601) +- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613) +- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613) +- An `Equal` method to the `Resource` test the equivalence of resources. (#613) +- An iterable structure (`AttributeIterator`) for `Resource` attributes. + +### Changed + +- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644) +- Pass `Resources` through the metrics export pipeline. (#659) + +### Removed + +- `WithKeys` option from the metric API. (#639) + +### Fixed + +- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658) +- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653) +- Use type names for return values in jaeger exporter. (#648) +- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650) +- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647) +- Do not cache `reflect.ValueOf()` in metric Labels. (#649) +- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626) +- Add error wrapping to the prometheus exporter. (#631) +- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623) +- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614) +- Update `Resource` internal representation to uniquely and reliably identify resources. (#613) +- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622) +- Ensure spans created by httptrace client tracer reflect operation structure. (#618) +- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610 +- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611) + +## [0.4.2] - 2020-03-31 + +### Fixed + +- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607) +- Fix time conversion from internal to OTLP in OTLP exporter. (#606) + +## [0.4.1] - 2020-03-31 + +### Fixed + +- Update `tag.sh` to create signed tags. (#604) + +## [0.4.0] - 2020-03-30 + +### Added + +- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580) +- Script to verify examples after a new release. (#579) + +### Removed + +- The dogstatsd exporter due to lack of support. + This additionally removes support for statsd. (#591) +- `LabelSet` from the metric API. + This is replaced by a `[]core.KeyValue` slice. (#595) +- `Labels` from the metric API's `Meter` interface. (#595) + +### Changed + +- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574) +- Renamed `internal/metric.Meter` to `MeterImpl`. (#580) +- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580) + +### Fixed + +- Corrected missing return in mock span. (#582) +- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596) +- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588) +- Update pre-release script to be compatible between GNU and BSD based systems. (#592) +- Add a `RecordBatch` benchmark. (#594) +- Moved span transforms of the OTLP exporter to the internal package. (#593) +- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569) +- Removed unneeded allocation on empty labels in OLTP exporter. (#597) +- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599) +- Update project documentation godoc.org links to pkg.go.dev. (#602) + +## [0.3.0] - 2020-03-21 + +This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality. +There is still a possibility of breaking changes. + +### Added + +- Add `Observer` metric instrument. (#474) +- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494) +- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459) +- The zipkin trace exporter. (#495) +- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545) +- Add `StatusMessage` field to the trace `Span`. (#524) +- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525) +- The `Resource` type was added to the SDK. (#528) +- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538) +- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction. + Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560) +- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560) +- Scripts to better automate the release process. (#576) + +### Changed + +- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506) +- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511) +- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511) +- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524) +- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531) +- Rename metric API `Options` to `Config`. (#541) +- Rename metric `Counter` aggregator to be `Sum`. (#541) +- Unify metric options into `Option` from instrument specific options. (#541) +- The trace API's `TraceProvider` now support `Resource`s. (#545) +- Correct error in zipkin module name. (#548) +- The jaeger trace exporter now supports `Resource`s. (#551) +- Metric SDK now supports `Resource`s. + The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552) +- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557) +- The stdout trace exporter now supports `Resource`s. (#558) +- The metric `Descriptor` is now included at the API instead of the SDK. (#560) +- Replace `Ordered` with an iterator in `export.Labels`. (#567) + +### Removed + +- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452) +- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560) +- `GetDescriptor` from the metric SDK. (#575) +- The `Gauge` instrument from the metric API. (#537) + +### Fixed + +- Make histogram aggregator checkpoint consistent. (#438) +- Update README with import instructions and how to build and test. (#505) +- The default label encoding was updated to be unique. (#508) +- Use `NewRoot` in the othttp plugin for public endpoints. (#513) +- Fix data race in `BatchedSpanProcessor`. (#518) +- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521 +- Use a variable-size array to represent ordered labels in maps. (#523) +- Update the OTLP protobuf and update changed import path. (#532) +- Use `StateLocker` implementation in `MinMaxSumCount`. (#546) +- Eliminate goroutine leak in histogram stress test. (#547) +- Update OTLP exporter with latest protobuf. (#550) +- Add filters to the othttp plugin. (#556) +- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565) +- Encode labels once during checkpoint. + The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter. + This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572) +- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573) + +## [0.2.3] - 2020-03-04 + +### Added + +- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473) +- Configurable push frequency for exporters setup pipeline. (#504) + +### Changed + +- Rename the `exporter` directory to `exporters`. + The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`. + This resulted in all subsequent releases not becoming the default latest. + A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages. + Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags. + Consequentially, this action also renames *all* exporter packages. (#502) + +### Removed + +- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503) + +## [0.2.2] - 2020-02-27 + +### Added + +- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467) +- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467) +- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467) +- `Config` and configuring `Option` to the propagator API. (#467) +- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467) +- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467) +- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467) +- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467) +- Histogram aggregator. (#433) +- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456) +- `AlwaysParentSample` sampler to the trace API. (#455) +- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451) + +### Changed + +- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481) +- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481) +- Move correlation context propagation to correlation package. (#479) +- Do not default to putting remote span context into links. (#480) +- `Tracer.WithSpan` updated to accept `StartOptions`. (#472) +- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432) +- Renamed the `export` package to `metric` to match directory structure. (#432) +- Rename the `api/distributedcontext` package to `api/correlation`. (#444) +- Rename the `api/propagators` package to `api/propagation`. (#444) +- Move the propagators from the `propagators` package into the `trace` API package. (#444) +- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462) +- Moved all dependencies of tools package to a tools directory. (#466) + +### Removed + +- Binary propagators. (#467) +- NOOP propagator. (#467) + +### Fixed + +- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492) +- Fix a possible nil-dereference crash (#478) +- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483) +- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484) +- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482) +- Initialize `onError` based on `Config` in prometheus exporter. (#486) +- Correct module name in prometheus exporter README. (#475) +- Removed tracer name prefix from span names. (#430) +- Fix `aggregator_test.go` import package comment. (#431) +- Improved detail in stdout exporter. (#436) +- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442) +- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442) +- Reword function documentation in gRPC plugin. (#446) +- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441) +- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441) +- Upgraded to Go 1.13 in CI. (#465) +- Correct opentelemetry.io URL in trace SDK documentation. (#464) +- Refactored reference counting logic in SDK determination of stale records. (#468) +- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469) + +## [0.2.1.1] - 2020-01-13 + +### Fixed + +- Use stateful batcher on Prometheus exporter fixing regression introduced in #395. (#428) + +## [0.2.1] - 2020-01-08 + +### Added + +- Global meter forwarding implementation. + This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392) +- Global trace forwarding implementation. + This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406) +- Standardize export pipeline creation in all exporters. (#395) +- A testing, organization, and comments for 64-bit field alignment. (#418) +- Script to tag all modules in the project. (#414) + +### Changed + +- Renamed `propagation` package to `propagators`. (#362) +- Renamed `B3Propagator` propagator to `B3`. (#362) +- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362) +- Renamed `BinaryPropagator` propagator to `Binary`. (#362) +- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362) +- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362) +- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362) +- Renamed `SpanOption` to `StartOption` in the trace API. (#369) +- Renamed `StartOptions` to `StartConfig` in the trace API. (#369) +- Renamed `EndOptions` to `EndConfig` in the trace API. (#369) +- `Number` now has a pointer receiver for its methods. (#375) +- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379) +- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379) +- Renamed `Message` in Event to `Name` in the trace API. (#389) +- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385) +- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400) +- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400) +- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400) +- Renamed the `File` option in the stdout exporter to `Writer`. (#404) +- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case. + +### Fixed + +- Aggregator import path corrected. (#421) +- Correct links in README. (#368) +- The README was updated to match latest code changes in its examples. (#374) +- Don't capitalize error statements. (#375) +- Fix ignored errors. (#375) +- Fix ambiguous variable naming. (#375) +- Removed unnecessary type casting. (#375) +- Use named parameters. (#375) +- Updated release schedule. (#378) +- Correct http-stackdriver example module name. (#394) +- Removed the `http.request` span in `httptrace` package. (#397) +- Add comments in the metrics SDK (#399) +- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403) +- Add documentation of compatible exporters in the README. (#405) +- Typo fix. (#408) +- Simplify span check logic in SDK tracer implementation. (#419) + +## [0.2.0] - 2019-12-03 + +### Added + +- Unary gRPC tracing example. (#351) +- Prometheus exporter. (#334) +- Dogstatsd metrics exporter. (#326) + +### Changed + +- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352) +- Rename `GetMeter` to `Meter`. (#357) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Rename `HTTPB3Propagator` to `B3Propagator`. (#355) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Move `/global` package to `/api/global`. (#356) +- Rename `GetTracer` to `Tracer`. (#347) + +### Removed + +- `SetAttribute` from the `Span` interface in the trace API. (#361) +- `AddLink` from the `Span` interface in the trace API. (#349) +- `Link` from the `Span` interface in the trace API. (#349) + +### Fixed + +- Exclude example directories from coverage report. (#365) +- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360) +- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359) +- Run the race checker for all test. (#354) +- Redundant commands in the Makefile are removed. (#354) +- Split the `generate` and `lint` targets of the Makefile. (#354) +- Renames `circle-ci` target to more generic `ci` in Makefile. (#354) +- Add example Prometheus binary to gitignore. (#358) +- Support negative numbers with the `MaxSumCount`. (#335) +- Resolve race conditions in `push_test.go` identified in #339. (#340) +- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336) +- Trace benchmark now tests both `AlwaysSample` and `NeverSample`. + Previously it was testing `AlwaysSample` twice. (#325) +- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325) +- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325) +- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint. + This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly. + This was corrected. (#333) + +## [0.1.2] - 2019-11-18 + +### Fixed + +- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328) +- Removed unnecessary unslicing of parameters that are already a slice. (#324) + +## [0.1.1] - 2019-11-18 + +This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch. + +### Added + +- Metrics stdout export pipeline. (#265) +- Array aggregation for raw measure metrics. (#282) +- The core.Value now have a `MarshalJSON` method. (#281) + +### Removed + +- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314) +- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292) + +### Changed + +- Allocation in LabelSet construction to reduce GC overhead. (#318) +- `trace.WithAttributes` to append values instead of replacing (#315) +- Use a formula for tolerance in sampling tests. (#298) +- Move export types into trace and metric-specific sub-directories. (#289) +- `SpanKind` back to being based on an `int` type. (#288) + +### Fixed + +- URL to OpenTelemetry website in README. (#323) +- Name of othttp default tracer. (#321) +- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294) +- CI modules cache to correctly restore/save from/to the cache. (#316) +- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293) +- README now reflects the new code structure introduced with these changes. (#291) +- Make the basic example work. (#279) + +## [0.1.0] - 2019-11-04 + +This is the first release of open-telemetry go library. +It contains api and sdk for trace and meter. + +### Added + +- Initial OpenTelemetry trace and metric API prototypes. +- Initial OpenTelemetry trace, metric, and export SDK packages. +- A wireframe bridge to support compatibility with OpenTracing. +- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup. +- Exporters for Jaeger, Stackdriver, and stdout. +- Propagators for binary, B3, and trace-context protocols. +- Project information and guidelines in the form of a README and CONTRIBUTING. +- Tools to build the project and a Makefile to automate the process. +- Apache-2.0 license. +- CircleCI build CI manifest files. +- CODEOWNERS file to track owners of this project. + +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...HEAD +[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 +[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 +[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 +[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 +[1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0 +[1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0 +[1.23.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1 +[1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0 +[1.23.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0-rc.1 +[1.22.0/0.45.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.22.0 +[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0 +[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0 +[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 +[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1 +[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0 +[1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0 +[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0 +[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1 +[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1 +[1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0 +[1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2 +[1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1 +[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0 +[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0 +[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 +[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 +[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 +[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 +[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2 +[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1 +[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0 +[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0 +[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0 +[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0 +[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0 +[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0 +[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3 +[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2 +[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1 +[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0 +[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0 +[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1 +[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0 +[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0 +[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0 +[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0 +[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1 +[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0 +[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0 +[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3 +[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2 +[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0 +[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1 +[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0 +[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0 +[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0 +[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0 +[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0 +[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0 +[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0 +[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0 +[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0 +[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0 +[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0 +[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0 +[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0 +[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0 +[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0 +[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0 +[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3 +[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2 +[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1 +[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0 +[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0 +[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3 +[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2 +[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1 +[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1 +[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0 +[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2 +[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 +[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + + + +[Go 1.23]: https://go.dev/doc/go1.23 +[Go 1.22]: https://go.dev/doc/go1.22 +[Go 1.21]: https://go.dev/doc/go1.21 +[Go 1.20]: https://go.dev/doc/go1.20 +[Go 1.19]: https://go.dev/doc/go1.19 +[Go 1.18]: https://go.dev/doc/go1.18 + +[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric +[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric +[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace + +[GO-2024-2687]: https://pkg.go.dev/vuln/GO-2024-2687 diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/CODEOWNERS b/hack/tools/vendor/go.opentelemetry.io/otel/CODEOWNERS new file mode 100644 index 0000000000..5904bb7070 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -0,0 +1,17 @@ +##################################################### +# +# List of approvers for this repository +# +##################################################### +# +# Learn about membership in OpenTelemetry community: +# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md +# +# +# Learn about CODEOWNERS file format: +# https://help.github.com/en/articles/about-code-owners +# + +* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu + +CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/hack/tools/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md new file mode 100644 index 0000000000..b7402576f9 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -0,0 +1,658 @@ +# Contributing to opentelemetry-go + +The Go special interest group (SIG) meets regularly. See the +OpenTelemetry +[community](https://github.com/open-telemetry/community#golang-sdk) +repo for information on this and other language SIGs. + +See the [public meeting +notes](https://docs.google.com/document/d/1E5e7Ld0NuU1iVvf-42tOBpu2VBBLYnh73GJuITGJTTU/edit) +for a summary description of past meetings. To request edit access, +join the meeting or get in touch on +[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT). + +## Development + +You can view and edit the source code by cloning this repository: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go.git +``` + +Run `make test` to run the tests instead of `go test`. + +There are some generated files checked into the repo. To make sure +that the generated files are up-to-date, run `make` (or `make +precommit` - the `precommit` target is the default). + +The `precommit` target also fixes the formatting of the code and +checks the status of the go module files. + +Additionally, there is a `codespell` target that checks for common +typos in the code. It is not run by default, but you can run it +manually with `make codespell`. It will set up a virtual environment +in `venv` and install `codespell` there. + +If after running `make precommit` the output of `git status` contains +`nothing to commit, working tree clean` then it means that everything +is up-to-date and properly formatted. + +## Pull Requests + +### How to Send Pull Requests + +Everyone is welcome to contribute code to `opentelemetry-go` via +GitHub pull requests (PRs). + +To create a new PR, fork the project in GitHub and clone the upstream +repo: + +```sh +go get -d go.opentelemetry.io/otel +``` + +(This may print some warning about "build constraints exclude all Go +files", just ignore it.) + +This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You +can alternatively use `git` directly with: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go +``` + +(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name - +that name is a kind of a redirector to GitHub that `go get` can +understand, but `git` does not.) + +This would put the project in the `opentelemetry-go` directory in +current working directory. + +Enter the newly created directory and add your fork as a new remote: + +```sh +git remote add git@github.com:/opentelemetry-go +``` + +Check out a new branch, make modifications, run linters and tests, update +`CHANGELOG.md`, and push the branch to your fork: + +```sh +git checkout -b +# edit files +# update changelog +make precommit +git add -p +git commit +git push +``` + +Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull +request ID to the entry you added to `CHANGELOG.md`. + +Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request. +Rewriting Git history makes it difficult to keep track of iterations during code review. +All pull requests are squashed to a single commit upon merge to `main`. + +### How to Receive Comments + +* If the PR is not ready for review, please put `[WIP]` in the title, + tag it as `work-in-progress`, or mark it as + [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). +* Make sure CLA is signed and CI is clear. + +### How to Get PRs Merged + +A PR is considered **ready to merge** when: + +* It has received two qualified approvals[^1]. + + This is not enforced through automation, but needs to be validated by the + maintainer merging. + * The qualified approvals need to be from [Approver]s/[Maintainer]s + affiliated with different companies. Two qualified approvals from + [Approver]s or [Maintainer]s affiliated with the same company counts as a + single qualified approval. + * PRs introducing changes that have already been discussed and consensus + reached only need one qualified approval. The discussion and resolution + needs to be linked to the PR. + * Trivial changes[^2] only need one qualified approval. + +* All feedback has been addressed. + * All PR comments and suggestions are resolved. + * All GitHub Pull Request reviews with a status of "Request changes" have + been addressed. Another review by the objecting reviewer with a different + status can be submitted to clear the original review, or the review can be + dismissed by a [Maintainer] when the issues from the original review have + been addressed. + * Any comments or reviews that cannot be resolved between the PR author and + reviewers can be submitted to the community [Approver]s and [Maintainer]s + during the weekly SIG meeting. If consensus is reached among the + [Approver]s and [Maintainer]s during the SIG meeting the objections to the + PR may be dismissed or resolved or the PR closed by a [Maintainer]. + * Any substantive changes to the PR require existing Approval reviews be + cleared unless the approver explicitly states that their approval persists + across changes. This includes changes resulting from other feedback. + [Approver]s and [Maintainer]s can help in clearing reviews and they should + be consulted if there are any questions. + +* The PR branch is up to date with the base branch it is merging into. + * To ensure this does not block the PR, it should be configured to allow + maintainers to update it. + +* It has been open for review for at least one working day. This gives people + reasonable time to review. + * Trivial changes[^2] do not have to wait for one day and may be merged with + a single [Maintainer]'s approval. + +* All required GitHub workflows have succeeded. +* Urgent fix can take exception as long as it has been actively communicated + among [Maintainer]s. + +Any [Maintainer] can merge the PR once the above criteria have been met. + +[^1]: A qualified approval is a GitHub Pull Request review with "Approve" + status from an OpenTelemetry Go [Approver] or [Maintainer]. +[^2]: Trivial changes include: typo corrections, cosmetic non-substantive + changes, documentation corrections or updates, dependency updates, etc. + +## Design Choices + +As with other OpenTelemetry clients, opentelemetry-go follows the +[OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel). + +It's especially valuable to read through the [library +guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines). + +### Focus on Capabilities, Not Structure Compliance + +OpenTelemetry is an evolving specification, one where the desires and +use cases are clear, but the method to satisfy those uses cases are +not. + +As such, Contributions should provide functionality and behavior that +conforms to the specification, but the interface and structure is +flexible. + +It is preferable to have contributions follow the idioms of the +language rather than conform to specific API names or argument +patterns in the spec. + +For a deeper discussion, see +[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). + +## Documentation + +Each (non-internal, non-test) package must be documented using +[Go Doc Comments](https://go.dev/doc/comment), +preferably in a `doc.go` file. + +Prefer using [Examples](https://pkg.go.dev/testing#hdr-Examples) +instead of putting code snippets in Go doc comments. +In some cases, you can even create [Testable Examples](https://go.dev/blog/examples). + +You can install and run a "local Go Doc site" in the following way: + + ```sh + go install golang.org/x/pkgsite/cmd/pkgsite@latest + pkgsite + ``` + +[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric) +is an example of a very well-documented package. + +### README files + +Each (non-internal, non-test, non-documentation) package must contain a +`README.md` file containing at least a title, and a `pkg.go.dev` badge. + +The README should not be a repetition of Go doc comments. + +You can verify the presence of all README files with the `make verify-readmes` +command. + +## Style Guide + +One of the primary goals of this project is that it is actually used by +developers. With this goal in mind the project strives to build +user-friendly and idiomatic Go code adhering to the Go community's best +practices. + +For a non-comprehensive but foundational overview of these best practices +the [Effective Go](https://golang.org/doc/effective_go.html) documentation +is an excellent starting place. + +As a convenience for developers building this project the `make precommit` +will format, lint, validate, and in some cases fix the changes you plan to +submit. This check will need to pass for your changes to be able to be +merged. + +In addition to idiomatic Go, the project has adopted certain standards for +implementations of common patterns. These standards should be followed as a +default, and if they are not followed documentation needs to be included as +to the reasons why. + +### Configuration + +When creating an instantiation function for a complex `type T struct`, it is +useful to allow variable number of options to be applied. However, the strong +type system of Go restricts the function design options. There are a few ways +to solve this problem, but we have landed on the following design. + +#### `config` + +Configuration should be held in a `struct` named `config`, or prefixed with +specific type name this Configuration applies to if there are multiple +`config` in the package. This type must contain configuration options. + +```go +// config contains configuration options for a thing. +type config struct { + // options ... +} +``` + +In general the `config` type will not need to be used externally to the +package and should be unexported. If, however, it is expected that the user +will likely want to build custom options for the configuration, the `config` +should be exported. Please, include in the documentation for the `config` +how the user can extend the configuration. + +It is important that internal `config` are not shared across package boundaries. +Meaning a `config` from one package should not be directly used by another. The +one exception is the API packages. The configs from the base API, eg. +`go.opentelemetry.io/otel/trace.TracerConfig` and +`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed +by the SDK therefore it is expected that these are exported. + +When a config is exported we want to maintain forward and backward +compatibility, to achieve this no fields should be exported but should +instead be accessed by methods. + +Optionally, it is common to include a `newConfig` function (with the same +naming scheme). This function wraps any defaults setting and looping over +all options to create a configured `config`. + +```go +// newConfig returns an appropriately configured config. +func newConfig(options ...Option) config { + // Set default values for config. + config := config{/* […] */} + for _, option := range options { + config = option.apply(config) + } + // Perform any validation here. + return config +} +``` + +If validation of the `config` options is also performed this can return an +error as well that is expected to be handled by the instantiation function +or propagated to the user. + +Given the design goal of not having the user need to work with the `config`, +the `newConfig` function should also be unexported. + +#### `Option` + +To set the value of the options a `config` contains, a corresponding +`Option` interface type should be used. + +```go +type Option interface { + apply(config) config +} +``` + +Having `apply` unexported makes sure that it will not be used externally. +Moreover, the interface becomes sealed so the user cannot easily implement +the interface on its own. + +The `apply` method should return a modified version of the passed config. +This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap. + +The name of the interface should be prefixed in the same way the +corresponding `config` is (if at all). + +#### Options + +All user configurable options for a `config` must have a related unexported +implementation of the `Option` interface and an exported configuration +function that wraps this implementation. + +The wrapping function name should be prefixed with `With*` (or in the +special case of a boolean options `Without*`) and should have the following +function signature. + +```go +func With*(…) Option { … } +``` + +##### `bool` Options + +```go +type defaultFalseOption bool + +func (o defaultFalseOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithOption sets a T to have an option included. +func WithOption() Option { + return defaultFalseOption(true) +} +``` + +```go +type defaultTrueOption bool + +func (o defaultTrueOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithoutOption sets a T to have Bool option excluded. +func WithoutOption() Option { + return defaultTrueOption(false) +} +``` + +##### Declared Type Options + +```go +type myTypeOption struct { + MyType MyType +} + +func (o myTypeOption) apply(c config) config { + c.MyType = o.MyType + return c +} + +// WithMyType sets T to have include MyType. +func WithMyType(t MyType) Option { + return myTypeOption{t} +} +``` + +##### Functional Options + +```go +type optionFunc func(config) config + +func (fn optionFunc) apply(c config) config { + return fn(c) +} + +// WithMyType sets t as MyType. +func WithMyType(t MyType) Option { + return optionFunc(func(c config) config { + c.MyType = t + return c + }) +} +``` + +#### Instantiation + +Using this configuration pattern to configure instantiation with a `NewT` +function. + +```go +func NewT(options ...Option) T {…} +``` + +Any required parameters can be declared before the variadic `options`. + +#### Dealing with Overlap + +Sometimes there are multiple complex `struct` that share common +configuration and also have distinct configuration. To avoid repeated +portions of `config`s, a common `config` can be used with the union of +options being handled with the `Option` interface. + +For example. + +```go +// config holds options for all animals. +type config struct { + Weight float64 + Color string + MaxAltitude float64 +} + +// DogOption apply Dog specific options. +type DogOption interface { + applyDog(config) config +} + +// BirdOption apply Bird specific options. +type BirdOption interface { + applyBird(config) config +} + +// Option apply options for all animals. +type Option interface { + BirdOption + DogOption +} + +type weightOption float64 + +func (o weightOption) applyDog(c config) config { + c.Weight = float64(o) + return c +} + +func (o weightOption) applyBird(c config) config { + c.Weight = float64(o) + return c +} + +func WithWeight(w float64) Option { return weightOption(w) } + +type furColorOption string + +func (o furColorOption) applyDog(c config) config { + c.Color = string(o) + return c +} + +func WithFurColor(c string) DogOption { return furColorOption(c) } + +type maxAltitudeOption float64 + +func (o maxAltitudeOption) applyBird(c config) config { + c.MaxAltitude = float64(o) + return c +} + +func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) } + +func NewDog(name string, o ...DogOption) Dog {…} +func NewBird(name string, o ...BirdOption) Bird {…} +``` + +### Interfaces + +To allow other developers to better comprehend the code, it is important +to ensure it is sufficiently documented. One simple measure that contributes +to this aim is self-documenting by naming method parameters. Therefore, +where appropriate, methods of every exported interface type should have +their parameters appropriately named. + +#### Interface Stability + +All exported stable interfaces that include the following warning in their +documentation are allowed to be extended with additional methods. + +> Warning: methods may be added to this interface in minor releases. + +These interfaces are defined by the OpenTelemetry specification and will be +updated as the specification evolves. + +Otherwise, stable interfaces MUST NOT be modified. + +#### How to Change Specification Interfaces + +When an API change must be made, we will update the SDK with the new method one +release before the API change. This will allow the SDK one version before the +API change to work seamlessly with the new API. + +If an incompatible version of the SDK is used with the new API the application +will fail to compile. + +#### How Not to Change Specification Interfaces + +We have explored using a v2 of the API to change interfaces and found that there +was no way to introduce a v2 and have it work seamlessly with the v1 of the API. +Problems happened with libraries that upgraded to v2 when an application did not, +and would not produce any telemetry. + +More detail of the approaches considered and their limitations can be found in +the [Use a V2 API to evolve interfaces](https://github.com/open-telemetry/opentelemetry-go/issues/3920) +issue. + +#### How to Change Other Interfaces + +If new functionality is needed for an interface that cannot be changed it MUST +be added by including an additional interface. That added interface can be a +simple interface for the specific functionality that you want to add or it can +be a super-set of the original interface. For example, if you wanted to a +`Close` method to the `Exporter` interface: + +```go +type Exporter interface { + Export() +} +``` + +A new interface, `Closer`, can be added: + +```go +type Closer interface { + Close() +} +``` + +Code that is passed the `Exporter` interface can now check to see if the passed +value also satisfies the new interface. E.g. + +```go +func caller(e Exporter) { + /* ... */ + if c, ok := e.(Closer); ok { + c.Close() + } + /* ... */ +} +``` + +Alternatively, a new type that is the super-set of an `Exporter` can be created. + +```go +type ClosingExporter struct { + Exporter + Close() +} +``` + +This new type can be used similar to the simple interface above in that a +passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type +and the `Close` method called. + +This super-set approach can be useful if there is explicit behavior that needs +to be coupled with the original type and passed as a unified type to a new +function, but, because of this coupling, it also limits the applicability of +the added functionality. If there exist other interfaces where this +functionality should be added, each one will need their own super-set +interfaces and will duplicate the pattern. For this reason, the simple targeted +interface that defines the specific functionality should be preferred. + +See also: +[Keeping Your Modules Compatible: Working with interfaces](https://go.dev/blog/module-compatibility#working-with-interfaces). + +### Testing + +The tests should never leak goroutines. + +Use the term `ConcurrentSafe` in the test name when it aims to verify the +absence of race conditions. + +### Internal packages + +The use of internal packages should be scoped to a single module. A sub-module +should never import from a parent internal package. This creates a coupling +between the two modules where a user can upgrade the parent without the child +and if the internal package API has changed it will fail to upgrade[^3]. + +There are two known exceptions to this rule: + +- `go.opentelemetry.io/otel/internal/global` + - This package manages global state for all of opentelemetry-go. It needs to + be a single package in order to ensure the uniqueness of the global state. +- `go.opentelemetry.io/otel/internal/baggage` + - This package provides values in a `context.Context` that need to be + recognized by `go.opentelemetry.io/otel/baggage` and + `go.opentelemetry.io/otel/bridge/opentracing` but remain private. + +If you have duplicate code in multiple modules, make that code into a Go +template stored in `go.opentelemetry.io/otel/internal/shared` and use [gotmpl] +to render the templates in the desired locations. See [#4404] for an example of +this. + +[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548 + +### Ignoring context cancellation + +OpenTelemetry API implementations need to ignore the cancellation of the context that are +passed when recording a value (e.g. starting a span, recording a measurement, emitting a log). +Recording methods should not return an error describing the cancellation state of the context +when they complete, nor should they abort any work. + +This rule may not apply if the OpenTelemetry specification defines a timeout mechanism for +the method. In that case the context cancellation can be used for the timeout with the +restriction that this behavior is documented for the method. Otherwise, timeouts +are expected to be handled by the user calling the API, not the implementation. + +Stoppage of the telemetry pipeline is handled by calling the appropriate `Shutdown` method +of a provider. It is assumed the context passed from a user is not used for this purpose. + +Outside of the direct recording of telemetry from the API (e.g. exporting telemetry, +force flushing telemetry, shutting down a signal provider) the context cancellation +should be honored. This means all work done on behalf of the user provided context +should be canceled. + +## Approvers and Maintainers + +### Approvers + +- [Chester Cheung](https://github.com/hanyuancheung), Tencent + +### Maintainers + +- [Aaron Clawson](https://github.com/MadVikingGod), LightStep +- [Damien Mathieu](https://github.com/dmathieu), Elastic +- [David Ashpole](https://github.com/dashpole), Google +- [Robert Pająk](https://github.com/pellared), Splunk +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics +- [Tyler Yahn](https://github.com/MrAlias), Splunk + +### Emeritus + +- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb +- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep +- [Josh MacDonald](https://github.com/jmacd), LightStep +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Evan Torrie](https://github.com/evantorrie), Yahoo + +### Become an Approver or a Maintainer + +See the [community membership document in OpenTelemetry community +repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md). + +[Approver]: #approvers +[Maintainer]: #maintainers +[gotmpl]: https://pkg.go.dev/go.opentelemetry.io/build-tools/gotmpl +[#4404]: https://github.com/open-telemetry/opentelemetry-go/pull/4404 diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/LICENSE b/hack/tools/vendor/go.opentelemetry.io/otel/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/Makefile b/hack/tools/vendor/go.opentelemetry.io/otel/Makefile new file mode 100644 index 0000000000..070b1e57df --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/Makefile @@ -0,0 +1,298 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +TOOLS_MOD_DIR := ./internal/tools + +ALL_DOCS := $(shell find . -name '*.md' -type f | sort) +ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) +OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) +ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort) + +GO = go +TIMEOUT = 60 + +.DEFAULT_GOAL := precommit + +.PHONY: precommit ci +precommit: generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default +ci: generate license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage + +# Tools + +TOOLS = $(CURDIR)/.tools + +$(TOOLS): + @mkdir -p $@ +$(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS) + cd $(TOOLS_MOD_DIR) && \ + $(GO) build -o $@ $(PACKAGE) + +MULTIMOD = $(TOOLS)/multimod +$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod + +SEMCONVGEN = $(TOOLS)/semconvgen +$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen + +CROSSLINK = $(TOOLS)/crosslink +$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink + +SEMCONVKIT = $(TOOLS)/semconvkit +$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit + +GOLANGCI_LINT = $(TOOLS)/golangci-lint +$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint + +MISSPELL = $(TOOLS)/misspell +$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell + +GOCOVMERGE = $(TOOLS)/gocovmerge +$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge + +STRINGER = $(TOOLS)/stringer +$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer + +PORTO = $(TOOLS)/porto +$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto + +GOJQ = $(TOOLS)/gojq +$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq + +GOTMPL = $(TOOLS)/gotmpl +$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl + +GORELEASE = $(TOOLS)/gorelease +$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease + +GOVULNCHECK = $(TOOLS)/govulncheck +$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck + +.PHONY: tools +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) + +# Virtualized python tools via docker + +# The directory where the virtual environment is created. +VENVDIR := venv + +# The directory where the python tools are installed. +PYTOOLS := $(VENVDIR)/bin + +# The pip executable in the virtual environment. +PIP := $(PYTOOLS)/pip + +# The directory in the docker image where the current directory is mounted. +WORKDIR := /workdir + +# The python image to use for the virtual environment. +PYTHONIMAGE := python:3.11.3-slim-bullseye + +# Run the python image with the current directory mounted. +DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) + +# Create a virtual environment for Python tools. +$(PYTOOLS): +# The `--upgrade` flag is needed to ensure that the virtual environment is +# created with the latest pip version. + @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" + +# Install python packages into the virtual environment. +$(PYTOOLS)/%: $(PYTOOLS) + @$(DOCKERPY) $(PIP) install -r requirements.txt + +CODESPELL = $(PYTOOLS)/codespell +$(CODESPELL): PACKAGE=codespell + +# Generate + +.PHONY: generate +generate: go-generate vanity-import-fix + +.PHONY: go-generate +go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%) +go-generate/%: DIR=$* +go-generate/%: $(STRINGER) $(GOTMPL) + @echo "$(GO) generate $(DIR)/..." \ + && cd $(DIR) \ + && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... + +.PHONY: vanity-import-fix +vanity-import-fix: $(PORTO) + @$(PORTO) --include-internal -w . + +# Generate go.work file for local development. +.PHONY: go-work +go-work: $(CROSSLINK) + $(CROSSLINK) work --root=$(shell pwd) + +# Build + +.PHONY: build + +build: $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%) +build/%: DIR=$* +build/%: + @echo "$(GO) build $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) build ./... + +build-tests/%: DIR=$* +build-tests/%: + @echo "$(GO) build tests $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null + +# Tests + +TEST_TARGETS := test-default test-bench test-short test-verbose test-race +.PHONY: $(TEST_TARGETS) test +test-default test-race: ARGS=-race +test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. +test-short: ARGS=-short +test-verbose: ARGS=-v -race +$(TEST_TARGETS): test +test: $(OTEL_GO_MOD_DIRS:%=test/%) +test/%: DIR=$* +test/%: + @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS) + +COVERAGE_MODE = atomic +COVERAGE_PROFILE = coverage.out +.PHONY: test-coverage +test-coverage: $(GOCOVMERGE) + @set -e; \ + printf "" > coverage.txt; \ + for dir in $(ALL_COVERAGE_MOD_DIRS); do \ + echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \ + (cd "$${dir}" && \ + $(GO) list ./... \ + | grep -v third_party \ + | grep -v 'semconv/v.*' \ + | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ + $(GO) tool cover -html=coverage.out -o coverage.html); \ + done; \ + $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt + +.PHONY: benchmark +benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%) +benchmark/%: + @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \ + && cd $* \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=. + +.PHONY: golangci-lint golangci-lint-fix +golangci-lint-fix: ARGS=--fix +golangci-lint-fix: golangci-lint +golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) +golangci-lint/%: DIR=$* +golangci-lint/%: $(GOLANGCI_LINT) + @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ + && cd $(DIR) \ + && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) + +.PHONY: crosslink +crosslink: $(CROSSLINK) + @echo "Updating intra-repository dependencies in all go modules" \ + && $(CROSSLINK) --root=$(shell pwd) --prune + +.PHONY: go-mod-tidy +go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) +go-mod-tidy/%: DIR=$* +go-mod-tidy/%: crosslink + @echo "$(GO) mod tidy in $(DIR)" \ + && cd $(DIR) \ + && $(GO) mod tidy -compat=1.21 + +.PHONY: lint-modules +lint-modules: go-mod-tidy + +.PHONY: lint +lint: misspell lint-modules golangci-lint govulncheck + +.PHONY: vanity-import-check +vanity-import-check: $(PORTO) + @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 ) + +.PHONY: misspell +misspell: $(MISSPELL) + @$(MISSPELL) -w $(ALL_DOCS) + +.PHONY: govulncheck +govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) +govulncheck/%: DIR=$* +govulncheck/%: $(GOVULNCHECK) + @echo "govulncheck ./... in $(DIR)" \ + && cd $(DIR) \ + && $(GOVULNCHECK) ./... + +.PHONY: codespell +codespell: $(CODESPELL) + @$(DOCKERPY) $(CODESPELL) + +.PHONY: license-check +license-check: + @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ + awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=4 { found=1; next } END { if (!found) print FILENAME }' $$f; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +.PHONY: check-clean-work-tree +check-clean-work-tree: + @if ! git diff --quiet; then \ + echo; \ + echo 'Working tree is not clean, did you forget to run "make precommit"?'; \ + echo; \ + git status; \ + exit 1; \ + fi + +SEMCONVPKG ?= "semconv/" +.PHONY: semconv-generate +semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) + [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) + [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" + $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + +.PHONY: gorelease +gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%) +gorelease/%: DIR=$* +gorelease/%:| $(GORELEASE) + @echo "gorelease in $(DIR):" \ + && cd $(DIR) \ + && $(GORELEASE) \ + || echo "" + +.PHONY: verify-mods +verify-mods: $(MULTIMOD) + $(MULTIMOD) verify + +.PHONY: prerelease +prerelease: verify-mods + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) prerelease -m ${MODSET} + +COMMIT ?= "HEAD" +.PHONY: add-tags +add-tags: verify-mods + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + +.PHONY: lint-markdown +lint-markdown: + docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md + +.PHONY: verify-readmes +verify-readmes: + ./verify_readmes.sh diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/README.md b/hack/tools/vendor/go.opentelemetry.io/otel/README.md new file mode 100644 index 0000000000..657df34710 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/README.md @@ -0,0 +1,118 @@ +# OpenTelemetry-Go + +[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) +[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) +[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) + +OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). +It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms. + +## Project Status + +| Signal | Status | +|---------|--------------------| +| Traces | Stable | +| Metrics | Stable | +| Logs | Beta[^1] | + +Progress and status specific to this repository is tracked in our +[project boards](https://github.com/open-telemetry/opentelemetry-go/projects) +and +[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones). + +Project versioning information and stability guarantees can be found in the +[versioning documentation](VERSIONING.md). + +[^1]: https://github.com/orgs/open-telemetry/projects/43 + +### Compatibility + +OpenTelemetry-Go ensures compatibility with the current supported versions of +the [Go language](https://golang.org/doc/devel/release#policy): + +> Each major Go release is supported until there are two newer major releases. +> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. + +For versions of Go that are no longer supported upstream, opentelemetry-go will +stop ensuring compatibility with these versions in the following manner: + +- A minor release of opentelemetry-go will be made to add support for the new + supported release of Go. +- The following minor release of opentelemetry-go will remove compatibility + testing for the oldest (now archived upstream) version of Go. This, and + future, releases of opentelemetry-go may include features only supported by + the currently supported versions of Go. + +Currently, this project supports the following environments. + +| OS | Go Version | Architecture | +|----------|------------|--------------| +| Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.21 | amd64 | +| Ubuntu | 1.23 | 386 | +| Ubuntu | 1.22 | 386 | +| Ubuntu | 1.21 | 386 | +| Linux | 1.23 | arm64 | +| Linux | 1.22 | arm64 | +| Linux | 1.21 | arm64 | +| macOS 13 | 1.23 | amd64 | +| macOS 13 | 1.22 | amd64 | +| macOS 13 | 1.21 | amd64 | +| macOS | 1.23 | arm64 | +| macOS | 1.22 | arm64 | +| macOS | 1.21 | arm64 | +| Windows | 1.23 | amd64 | +| Windows | 1.22 | amd64 | +| Windows | 1.21 | amd64 | +| Windows | 1.23 | 386 | +| Windows | 1.22 | 386 | +| Windows | 1.21 | 386 | + +While this project should work for other systems, no compatibility guarantees +are made for those systems currently. + +## Getting Started + +You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/languages/go/getting-started/). + +OpenTelemetry's goal is to provide a single set of APIs to capture distributed +traces and metrics from your application and send them to an observability +platform. This project allows you to do just that for applications written in +Go. There are two steps to this process: instrument your application, and +configure an exporter. + +### Instrumentation + +To start capturing distributed traces and metric events from your application +it first needs to be instrumented. The easiest way to do this is by using an +instrumentation library for your code. Be sure to check out [the officially +supported instrumentation +libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation). + +If you need to extend the telemetry an instrumentation library provides or want +to build your own instrumentation for your application directly you will need +to use the +[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) +package. The included [examples](./example/) are a good way to see some +practical uses of this process. + +### Export + +Now that your application is instrumented to collect telemetry, it needs an +export pipeline to send that telemetry to an observability platform. + +All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). + +| Exporter | Logs | Metrics | Traces | +|---------------------------------------|:----:|:-------:|:------:| +| [OTLP](./exporters/otlp/) | ✓ | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | | ✓ | + +## Contributing + +See the [contributing documentation](CONTRIBUTING.md). diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/RELEASING.md b/hack/tools/vendor/go.opentelemetry.io/otel/RELEASING.md new file mode 100644 index 0000000000..59992984d4 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -0,0 +1,146 @@ +# Release Process + +## Semantic Convention Generation + +New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. +The `semconv-generate` make target is used for this. + +1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag. +2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` +3. Run the `make semconv-generate ...` target from this repository. + +For example, + +```sh +export TAG="v1.21.0" # Change to the release version you are generating. +export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions" +docker pull otel/semconvgen:latest +make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO. +``` + +This should create a new sub-package of [`semconv`](./semconv). +Ensure things look correct before submitting a pull request to include the addition. + +## Breaking changes validation + +You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API. + +You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). + +## Verify changes for contrib repository + +If the changes in the main repository are going to affect the contrib repository, it is important to verify that the changes are compatible with the contrib repository. + +Follow [the steps](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#verify-otel-changes) in the contrib repository to verify OTel changes. + +## Pre-Release + +First, decide which module sets will be released and update their versions +in `versions.yaml`. Commit this change to a new branch. + +Update go.mod for submodules to depend on the new release which will happen in the next step. + +1. Run the `prerelease` make target. It creates a branch + `prerelease__` that will contain all release changes. + + ``` + make prerelease MODSET= + ``` + +2. Verify the changes. + + ``` + git diff ...prerelease__ + ``` + + This should have changed the version for all modules to be ``. + If these changes look correct, merge them into your pre-release branch: + + ```go + git merge prerelease__ + ``` + +3. Update the [Changelog](./CHANGELOG.md). + - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. + To verify this, you can look directly at the commits since the ``. + + ``` + git --no-pager log --pretty=oneline "..HEAD" + ``` + + - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future. + - Update all the appropriate links at the bottom. + +4. Push the changes to upstream and create a Pull Request on GitHub. + Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description. + +## Tag + +Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit. + +***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step! +Failure to do so will leave things in a broken state. As long as you do not +change `versions.yaml` between pre-release and this step, things should be fine. + +***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189). +It is critical you make sure the version you push upstream is correct. +[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331). + +1. For each module set that will be released, run the `add-tags` make target + using the `` of the commit on the main branch for the merged Pull Request. + + ``` + make add-tags MODSET= COMMIT= + ``` + + It should only be necessary to provide an explicit `COMMIT` value if the + current `HEAD` of your working directory is not the correct commit. + +2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`). + Make sure you push all sub-modules as well. + + ``` + git push upstream + git push upstream + ... + ``` + +## Release + +Finally create a Release for the new `` on GitHub. +The release body should include all the release notes from the Changelog for this release. + +## Verify Examples + +After releasing verify that examples build outside of the repository. + +``` +./verify_examples.sh +``` + +The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. +This ensures they build with the published release, not the local copy. + +## Post-Release + +### Contrib Repository + +Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release. + +### Website Documentation + +Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/languages/go]. +Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. + +[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions +[Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/ +[content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go + +### Demo Repository + +Bump the dependencies in the following Go services: + +- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) +- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) +- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/VERSIONING.md b/hack/tools/vendor/go.opentelemetry.io/otel/VERSIONING.md new file mode 100644 index 0000000000..412f1e362b --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -0,0 +1,224 @@ +# Versioning + +This document describes the versioning policy for this repository. This policy +is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver + 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions. + * New methods may be added to exported API interfaces. All exported + interfaces that fall within this exception will include the following + paragraph in their public documentation. + + > Warning: methods may be added to this interface in minor releases. + + * If a module is version `v2` or higher, the major version of the module + must be included as a `/vN` at the end of the module paths used in + `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require + go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path + (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the + paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + `@v2.0.1` in that example. One way to think about it is that the module + name now includes the `/v2`, so include `/v2` whenever you are using the + module name). + * If a module is version `v0` or `v1`, do not include the major version in + either the module path or the import path. + * Modules will be used to encapsulate signals and components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API will be versioned + with a major version greater than `v0`. + * The decision to make a module stable will be made on a case-by-case + basis by the maintainers of this project. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * All stable modules that use the same major version number will use the + same entire version number. + * Stable modules may be released with an incremented minor or patch + version even though that module has not been changed, but rather so + that it will remain at the same version as other stable modules that + did undergo change. + * When an experimental module becomes stable a new stable module version + will be released and will include this now stable module. The new + stable module version will be an increment of the minor version number + and will be applied to all existing stable modules as well as the newly + stable module being released. +* Versioning of the associated [contrib + repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of + this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * If a module is version `v2` or higher, the + major version of the module must be included as a `/vN` at the end of the + module paths used in `go.mod` files (e.g., `module + go.opentelemetry.io/contrib/instrumentation/host/v2`, `require + go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the + package import path (e.g., `import + "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes + the paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there + is both a `/v2` and a `@v2.0.1` in that example. One way to think about + it is that the module name now includes the `/v2`, so include `/v2` + whenever you are using the module name). + * If a module is version `v0` or `v1`, do not include the major version + in either the module path or the import path. + * In addition to public APIs, telemetry produced by stable instrumentation + will remain stable and backwards compatible. This is to avoid breaking + alerts and dashboard. + * Modules will be used to encapsulate instrumentation, detectors, exporters, + propagators, and any other independent sets of related components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API and telemetry will + be versioned with a major version greater than `v0`. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * Stable contrib modules cannot depend on experimental modules from this + project. + * All stable contrib modules of the same major version with this project + will use the same entire version as this project. + * Stable modules may be released with an incremented minor or patch + version even though that module's code has not been changed. Instead + the only change that will have been included is to have updated that + modules dependency on this project's stable APIs. + * When an experimental module in contrib becomes stable a new stable + module version will be released and will include this now stable + module. The new stable module version will be an increment of the minor + version number and will be applied to all existing stable contrib + modules, this project's modules, and the newly stable module being + released. + * Contrib modules will be kept up to date with this project's releases. + * Due to the dependency contrib modules will implicitly have on this + project's modules the release of stable contrib modules to match the + released version number will be staggered after this project's release. + There is no explicit time guarantee for how long after this projects + release the contrib release will be. Effort should be made to keep them + as close in time as possible. + * No additional stable release in this project can be made until the + contrib repository has a matching stable release. + * No release can be made in the contrib repository after this project's + stable release except for a stable release of the contrib repository. +* GitHub releases will be made for all releases. +* Go modules will be made available at Go package mirrors. + +## Example Versioning Lifecycle + +To better understand the implementation of the above policy the following +example is provided. This project is simplified to include only the following +modules and their versions: + +* `otel`: `v0.14.0` +* `otel/trace`: `v0.14.0` +* `otel/metric`: `v0.14.0` +* `otel/baggage`: `v0.14.0` +* `otel/sdk/trace`: `v0.14.0` +* `otel/sdk/metric`: `v0.14.0` + +These modules have been developed to a point where the `otel/trace`, +`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they +should be considered for a stable release. The `otel/metric` and +`otel/sdk/metric` are still under active development and the `otel` module +depends on both `otel/trace` and `otel/metric`. + +The `otel` package is refactored to remove its dependencies on `otel/metric` so +it can be released as stable as well. With that done the following release +candidates are made: + +* `otel`: `v1.0.0-RC1` +* `otel/trace`: `v1.0.0-RC1` +* `otel/baggage`: `v1.0.0-RC1` +* `otel/sdk/trace`: `v1.0.0-RC1` + +The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`. + +A few minor issues are discovered in the `otel/trace` package. These issues are +resolved with some minor, but backwards incompatible, changes and are released +as a second release candidate: + +* `otel`: `v1.0.0-RC2` +* `otel/trace`: `v1.0.0-RC2` +* `otel/baggage`: `v1.0.0-RC2` +* `otel/sdk/trace`: `v1.0.0-RC2` + +Notice that all module version numbers are incremented to adhere to our +versioning policy. + +After these release candidates have been evaluated to satisfaction, they are +released as version `v1.0.0`. + +* `otel`: `v1.0.0` +* `otel/trace`: `v1.0.0` +* `otel/baggage`: `v1.0.0` +* `otel/sdk/trace`: `v1.0.0` + +Since both the `go` utility and the Go module system support [the semantic +versioning definition of +precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release +will correctly be interpreted as the successor to the previous release +candidates. + +Active development of this project continues. The `otel/metric` module now has +backwards incompatible changes to its API that need to be released and the +`otel/baggage` module has a minor bug fix that needs to be released. The +following release is made: + +* `otel`: `v1.0.1` +* `otel/trace`: `v1.0.1` +* `otel/metric`: `v0.15.0` +* `otel/baggage`: `v1.0.1` +* `otel/sdk/trace`: `v1.0.1` +* `otel/sdk/metric`: `v0.15.0` + +Notice that, again, all stable module versions are incremented in unison and +the `otel/sdk/metric` package, which depends on the `otel/metric` package, also +bumped its version. This bump of the `otel/sdk/metric` package makes sense +given their coupling, though it is not explicitly required by our versioning +policy. + +As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a +point where they should be evaluated for stability. The `otel` module is +reintegrated with the `otel/metric` package and the following release is made: + +* `otel`: `v1.1.0-RC1` +* `otel/trace`: `v1.1.0-RC1` +* `otel/metric`: `v1.1.0-RC1` +* `otel/baggage`: `v1.1.0-RC1` +* `otel/sdk/trace`: `v1.1.0-RC1` +* `otel/sdk/metric`: `v1.1.0-RC1` + +All the modules are evaluated and determined to a viable stable release. They +are then released as version `v1.1.0` (the minor version is incremented to +indicate the addition of new signal). + +* `otel`: `v1.1.0` +* `otel/trace`: `v1.1.0` +* `otel/metric`: `v1.1.0` +* `otel/baggage`: `v1.1.0` +* `otel/sdk/trace`: `v1.1.0` +* `otel/sdk/metric`: `v1.1.0` diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/attribute/README.md b/hack/tools/vendor/go.opentelemetry.io/otel/attribute/README.md new file mode 100644 index 0000000000..5b3da8f14c --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/attribute/README.md @@ -0,0 +1,3 @@ +# Attribute + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/attribute)](https://pkg.go.dev/go.opentelemetry.io/otel/attribute) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/attribute/doc.go b/hack/tools/vendor/go.opentelemetry.io/otel/attribute/doc.go new file mode 100644 index 0000000000..eef51ebc2a --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/attribute/doc.go @@ -0,0 +1,5 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package attribute provides key and value attributes. +package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/hack/tools/vendor/go.opentelemetry.io/otel/attribute/encoder.go new file mode 100644 index 0000000000..318e42fcab --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -0,0 +1,135 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "bytes" + "sync" + "sync/atomic" +) + +type ( + // Encoder is a mechanism for serializing an attribute set into a specific + // string representation that supports caching, to avoid repeated + // serialization. An example could be an exporter encoding the attribute + // set into a wire representation. + Encoder interface { + // Encode returns the serialized encoding of the attribute set using + // its Iterator. This result may be cached by a attribute.Set. + Encode(iterator Iterator) string + + // ID returns a value that is unique for each class of attribute + // encoder. Attribute encoders allocate these using `NewEncoderID`. + ID() EncoderID + } + + // EncoderID is used to identify distinct Encoder + // implementations, for caching encoded results. + EncoderID struct { + value uint64 + } + + // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of + // allocations used in encoding attributes. This implementation encodes a + // comma-separated list of key=value, with '/'-escaping of '=', ',', and + // '\'. + defaultAttrEncoder struct { + // pool is a pool of attribute set builders. The buffers in this pool + // grow to a size that most attribute encodings will not allocate new + // memory. + pool sync.Pool // *bytes.Buffer + } +) + +// escapeChar is used to ensure uniqueness of the attribute encoding where +// keys or values contain either '=' or ','. Since there is no parser needed +// for this encoding and its only requirement is to be unique, this choice is +// arbitrary. Users will see these in some exporters (e.g., stdout), so the +// backslash ('\') is used as a conventional choice. +const escapeChar = '\\' + +var ( + _ Encoder = &defaultAttrEncoder{} + + // encoderIDCounter is for generating IDs for other attribute encoders. + encoderIDCounter uint64 + + defaultEncoderOnce sync.Once + defaultEncoderID = NewEncoderID() + defaultEncoderInstance *defaultAttrEncoder +) + +// NewEncoderID returns a unique attribute encoder ID. It should be called +// once per each type of attribute encoder. Preferably in init() or in var +// definition. +func NewEncoderID() EncoderID { + return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} +} + +// DefaultEncoder returns an attribute encoder that encodes attributes in such +// a way that each escaped attribute's key is followed by an equal sign and +// then by an escaped attribute's value. All key-value pairs are separated by +// a comma. +// +// Escaping is done by prepending a backslash before either a backslash, equal +// sign or a comma. +func DefaultEncoder() Encoder { + defaultEncoderOnce.Do(func() { + defaultEncoderInstance = &defaultAttrEncoder{ + pool: sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, + }, + } + }) + return defaultEncoderInstance +} + +// Encode is a part of an implementation of the AttributeEncoder interface. +func (d *defaultAttrEncoder) Encode(iter Iterator) string { + buf := d.pool.Get().(*bytes.Buffer) + defer d.pool.Put(buf) + buf.Reset() + + for iter.Next() { + i, keyValue := iter.IndexedAttribute() + if i > 0 { + _, _ = buf.WriteRune(',') + } + copyAndEscape(buf, string(keyValue.Key)) + + _, _ = buf.WriteRune('=') + + if keyValue.Value.Type() == STRING { + copyAndEscape(buf, keyValue.Value.AsString()) + } else { + _, _ = buf.WriteString(keyValue.Value.Emit()) + } + } + return buf.String() +} + +// ID is a part of an implementation of the AttributeEncoder interface. +func (*defaultAttrEncoder) ID() EncoderID { + return defaultEncoderID +} + +// copyAndEscape escapes `=`, `,` and its own escape character (`\`), +// making the default encoding unique. +func copyAndEscape(buf *bytes.Buffer, val string) { + for _, ch := range val { + switch ch { + case '=', ',', escapeChar: + _, _ = buf.WriteRune(escapeChar) + } + _, _ = buf.WriteRune(ch) + } +} + +// Valid returns true if this encoder ID was allocated by +// `NewEncoderID`. Invalid encoder IDs will not be cached. +func (id EncoderID) Valid() bool { + return id.value != 0 +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/attribute/filter.go b/hack/tools/vendor/go.opentelemetry.io/otel/attribute/filter.go new file mode 100644 index 0000000000..be9cd922d8 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Filter supports removing certain attributes from attribute sets. When +// the filter returns true, the attribute will be kept in the filtered +// attribute set. When the filter returns false, the attribute is excluded +// from the filtered attribute set, and the attribute instead appears in +// the removed list of excluded attributes. +type Filter func(KeyValue) bool + +// NewAllowKeysFilter returns a Filter that only allows attributes with one of +// the provided keys. +// +// If keys is empty a deny-all filter is returned. +func NewAllowKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return false } + } + + allowed := make(map[Key]struct{}) + for _, k := range keys { + allowed[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := allowed[kv.Key] + return ok + } +} + +// NewDenyKeysFilter returns a Filter that only allows attributes +// that do not have one of the provided keys. +// +// If keys is empty an allow-all filter is returned. +func NewDenyKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return true } + } + + forbid := make(map[Key]struct{}) + for _, k := range keys { + forbid[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := forbid[kv.Key] + return !ok + } +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/hack/tools/vendor/go.opentelemetry.io/otel/attribute/iterator.go new file mode 100644 index 0000000000..f2ba89ce4b --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -0,0 +1,150 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Iterator allows iterating over the set of attributes in order, sorted by +// key. +type Iterator struct { + storage *Set + idx int +} + +// MergeIterator supports iterating over two sets of attributes while +// eliminating duplicate values from the combined set. The first iterator +// value takes precedence. +type MergeIterator struct { + one oneIterator + two oneIterator + current KeyValue +} + +type oneIterator struct { + iter Iterator + done bool + attr KeyValue +} + +// Next moves the iterator to the next position. Returns false if there are no +// more attributes. +func (i *Iterator) Next() bool { + i.idx++ + return i.idx < i.Len() +} + +// Label returns current KeyValue. Must be called only after Next returns +// true. +// +// Deprecated: Use Attribute instead. +func (i *Iterator) Label() KeyValue { + return i.Attribute() +} + +// Attribute returns the current KeyValue of the Iterator. It must be called +// only after Next returns true. +func (i *Iterator) Attribute() KeyValue { + kv, _ := i.storage.Get(i.idx) + return kv +} + +// IndexedLabel returns current index and attribute. Must be called only +// after Next returns true. +// +// Deprecated: Use IndexedAttribute instead. +func (i *Iterator) IndexedLabel() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// IndexedAttribute returns current index and attribute. Must be called only +// after Next returns true. +func (i *Iterator) IndexedAttribute() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// Len returns a number of attributes in the iterated set. +func (i *Iterator) Len() int { + return i.storage.Len() +} + +// ToSlice is a convenience function that creates a slice of attributes from +// the passed iterator. The iterator is set up to start from the beginning +// before creating the slice. +func (i *Iterator) ToSlice() []KeyValue { + l := i.Len() + if l == 0 { + return nil + } + i.idx = -1 + slice := make([]KeyValue, 0, l) + for i.Next() { + slice = append(slice, i.Attribute()) + } + return slice +} + +// NewMergeIterator returns a MergeIterator for merging two attribute sets. +// Duplicates are resolved by taking the value from the first set. +func NewMergeIterator(s1, s2 *Set) MergeIterator { + mi := MergeIterator{ + one: makeOne(s1.Iter()), + two: makeOne(s2.Iter()), + } + return mi +} + +func makeOne(iter Iterator) oneIterator { + oi := oneIterator{ + iter: iter, + } + oi.advance() + return oi +} + +func (oi *oneIterator) advance() { + if oi.done = !oi.iter.Next(); !oi.done { + oi.attr = oi.iter.Attribute() + } +} + +// Next returns true if there is another attribute available. +func (m *MergeIterator) Next() bool { + if m.one.done && m.two.done { + return false + } + if m.one.done { + m.current = m.two.attr + m.two.advance() + return true + } + if m.two.done { + m.current = m.one.attr + m.one.advance() + return true + } + if m.one.attr.Key == m.two.attr.Key { + m.current = m.one.attr // first iterator attribute value wins + m.one.advance() + m.two.advance() + return true + } + if m.one.attr.Key < m.two.attr.Key { + m.current = m.one.attr + m.one.advance() + return true + } + m.current = m.two.attr + m.two.advance() + return true +} + +// Label returns the current value after Next() returns true. +// +// Deprecated: Use Attribute instead. +func (m *MergeIterator) Label() KeyValue { + return m.current +} + +// Attribute returns the current value after Next() returns true. +func (m *MergeIterator) Attribute() KeyValue { + return m.current +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/attribute/key.go b/hack/tools/vendor/go.opentelemetry.io/otel/attribute/key.go new file mode 100644 index 0000000000..d9a22c6502 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -0,0 +1,123 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Key represents the key part in key-value pairs. It's a string. The +// allowed character set in the key depends on the use of the key. +type Key string + +// Bool creates a KeyValue instance with a BOOL Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Bool(name, value). +func (k Key) Bool(v bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolValue(v), + } +} + +// BoolSlice creates a KeyValue instance with a BOOLSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- BoolSlice(name, value). +func (k Key) BoolSlice(v []bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolSliceValue(v), + } +} + +// Int creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int(name, value). +func (k Key) Int(v int) KeyValue { + return KeyValue{ + Key: k, + Value: IntValue(v), + } +} + +// IntSlice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- IntSlice(name, value). +func (k Key) IntSlice(v []int) KeyValue { + return KeyValue{ + Key: k, + Value: IntSliceValue(v), + } +} + +// Int64 creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64(name, value). +func (k Key) Int64(v int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64Value(v), + } +} + +// Int64Slice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64Slice(name, value). +func (k Key) Int64Slice(v []int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64SliceValue(v), + } +} + +// Float64 creates a KeyValue instance with a FLOAT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64(v float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64Value(v), + } +} + +// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64Slice(v []float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64SliceValue(v), + } +} + +// String creates a KeyValue instance with a STRING Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- String(name, value). +func (k Key) String(v string) KeyValue { + return KeyValue{ + Key: k, + Value: StringValue(v), + } +} + +// StringSlice creates a KeyValue instance with a STRINGSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- StringSlice(name, value). +func (k Key) StringSlice(v []string) KeyValue { + return KeyValue{ + Key: k, + Value: StringSliceValue(v), + } +} + +// Defined returns true for non-empty keys. +func (k Key) Defined() bool { + return len(k) != 0 +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/attribute/kv.go b/hack/tools/vendor/go.opentelemetry.io/otel/attribute/kv.go new file mode 100644 index 0000000000..3028f9a40f --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "fmt" +) + +// KeyValue holds a key and value pair. +type KeyValue struct { + Key Key + Value Value +} + +// Valid returns if kv is a valid OpenTelemetry attribute. +func (kv KeyValue) Valid() bool { + return kv.Key.Defined() && kv.Value.Type() != INVALID +} + +// Bool creates a KeyValue with a BOOL Value type. +func Bool(k string, v bool) KeyValue { + return Key(k).Bool(v) +} + +// BoolSlice creates a KeyValue with a BOOLSLICE Value type. +func BoolSlice(k string, v []bool) KeyValue { + return Key(k).BoolSlice(v) +} + +// Int creates a KeyValue with an INT64 Value type. +func Int(k string, v int) KeyValue { + return Key(k).Int(v) +} + +// IntSlice creates a KeyValue with an INT64SLICE Value type. +func IntSlice(k string, v []int) KeyValue { + return Key(k).IntSlice(v) +} + +// Int64 creates a KeyValue with an INT64 Value type. +func Int64(k string, v int64) KeyValue { + return Key(k).Int64(v) +} + +// Int64Slice creates a KeyValue with an INT64SLICE Value type. +func Int64Slice(k string, v []int64) KeyValue { + return Key(k).Int64Slice(v) +} + +// Float64 creates a KeyValue with a FLOAT64 Value type. +func Float64(k string, v float64) KeyValue { + return Key(k).Float64(v) +} + +// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type. +func Float64Slice(k string, v []float64) KeyValue { + return Key(k).Float64Slice(v) +} + +// String creates a KeyValue with a STRING Value type. +func String(k, v string) KeyValue { + return Key(k).String(v) +} + +// StringSlice creates a KeyValue with a STRINGSLICE Value type. +func StringSlice(k string, v []string) KeyValue { + return Key(k).StringSlice(v) +} + +// Stringer creates a new key-value pair with a passed name and a string +// value generated by the passed Stringer interface. +func Stringer(k string, v fmt.Stringer) KeyValue { + return Key(k).String(v.String()) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/attribute/set.go b/hack/tools/vendor/go.opentelemetry.io/otel/attribute/set.go new file mode 100644 index 0000000000..bff9c7fdbb --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -0,0 +1,431 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "cmp" + "encoding/json" + "reflect" + "slices" + "sort" +) + +type ( + // Set is the representation for a distinct attribute set. It manages an + // immutable set of attributes, with an internal cache for storing + // attribute encodings. + // + // This type will remain comparable for backwards compatibility. The + // equivalence of Sets across versions is not guaranteed to be stable. + // Prior versions may find two Sets to be equal or not when compared + // directly (i.e. ==), but subsequent versions may not. Users should use + // the Equals method to ensure stable equivalence checking. + // + // Users should also use the Distinct returned from Equivalent as a map key + // instead of a Set directly. In addition to that type providing guarantees + // on stable equivalence, it may also provide performance improvements. + Set struct { + equivalent Distinct + } + + // Distinct is a unique identifier of a Set. + // + // Distinct is designed to be ensures equivalence stability: comparisons + // will return the save value across versions. For this reason, Distinct + // should always be used as a map key instead of a Set. + Distinct struct { + iface interface{} + } + + // Sortable implements sort.Interface, used for sorting KeyValue. + // + // Deprecated: This type is no longer used. It was added as a performance + // optimization for Go < 1.21 that is no longer needed (Go < 1.21 is no + // longer supported by the module). + Sortable []KeyValue +) + +var ( + // keyValueType is used in computeDistinctReflect. + keyValueType = reflect.TypeOf(KeyValue{}) + + // emptySet is returned for empty attribute sets. + emptySet = &Set{ + equivalent: Distinct{ + iface: [0]KeyValue{}, + }, + } +) + +// EmptySet returns a reference to a Set with no elements. +// +// This is a convenience provided for optimized calling utility. +func EmptySet() *Set { + return emptySet +} + +// reflectValue abbreviates reflect.ValueOf(d). +func (d Distinct) reflectValue() reflect.Value { + return reflect.ValueOf(d.iface) +} + +// Valid returns true if this value refers to a valid Set. +func (d Distinct) Valid() bool { + return d.iface != nil +} + +// Len returns the number of attributes in this set. +func (l *Set) Len() int { + if l == nil || !l.equivalent.Valid() { + return 0 + } + return l.equivalent.reflectValue().Len() +} + +// Get returns the KeyValue at ordered position idx in this set. +func (l *Set) Get(idx int) (KeyValue, bool) { + if l == nil || !l.equivalent.Valid() { + return KeyValue{}, false + } + value := l.equivalent.reflectValue() + + if idx >= 0 && idx < value.Len() { + // Note: The Go compiler successfully avoids an allocation for + // the interface{} conversion here: + return value.Index(idx).Interface().(KeyValue), true + } + + return KeyValue{}, false +} + +// Value returns the value of a specified key in this set. +func (l *Set) Value(k Key) (Value, bool) { + if l == nil || !l.equivalent.Valid() { + return Value{}, false + } + rValue := l.equivalent.reflectValue() + vlen := rValue.Len() + + idx := sort.Search(vlen, func(idx int) bool { + return rValue.Index(idx).Interface().(KeyValue).Key >= k + }) + if idx >= vlen { + return Value{}, false + } + keyValue := rValue.Index(idx).Interface().(KeyValue) + if k == keyValue.Key { + return keyValue.Value, true + } + return Value{}, false +} + +// HasValue tests whether a key is defined in this set. +func (l *Set) HasValue(k Key) bool { + if l == nil { + return false + } + _, ok := l.Value(k) + return ok +} + +// Iter returns an iterator for visiting the attributes in this set. +func (l *Set) Iter() Iterator { + return Iterator{ + storage: l, + idx: -1, + } +} + +// ToSlice returns the set of attributes belonging to this set, sorted, where +// keys appear no more than once. +func (l *Set) ToSlice() []KeyValue { + iter := l.Iter() + return iter.ToSlice() +} + +// Equivalent returns a value that may be used as a map key. The Distinct type +// guarantees that the result will equal the equivalent. Distinct value of any +// attribute set with the same elements as this, where sets are made unique by +// choosing the last value in the input for any given key. +func (l *Set) Equivalent() Distinct { + if l == nil || !l.equivalent.Valid() { + return emptySet.equivalent + } + return l.equivalent +} + +// Equals returns true if the argument set is equivalent to this set. +func (l *Set) Equals(o *Set) bool { + return l.Equivalent() == o.Equivalent() +} + +// Encoded returns the encoded form of this set, according to encoder. +func (l *Set) Encoded(encoder Encoder) string { + if l == nil || encoder == nil { + return "" + } + + return encoder.Encode(l.Iter()) +} + +func empty() Set { + return Set{ + equivalent: emptySet.equivalent, + } +} + +// NewSet returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// Except for empty sets, this method adds an additional allocation compared +// with calls that include a Sortable. +func NewSet(kvs ...KeyValue) Set { + s, _ := NewSetWithFiltered(kvs, nil) + return s +} + +// NewSetWithSortable returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Sortable option as a memory optimization. +// +// Deprecated: Use [NewSet] instead. +func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set { + s, _ := NewSetWithFiltered(kvs, nil) + return s +} + +// NewSetWithFiltered returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. +// +// This call includes a Filter to include/exclude attribute keys from the +// return value. Excluded keys are returned as a slice of attribute values. +func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { + // Check for empty set. + if len(kvs) == 0 { + return empty(), nil + } + + // Stable sort so the following de-duplication can implement + // last-value-wins semantics. + slices.SortStableFunc(kvs, func(a, b KeyValue) int { + return cmp.Compare(a.Key, b.Key) + }) + + position := len(kvs) - 1 + offset := position - 1 + + // The requirements stated above require that the stable + // result be placed in the end of the input slice, while + // overwritten values are swapped to the beginning. + // + // De-duplicate with last-value-wins semantics. Preserve + // duplicate values at the beginning of the input slice. + for ; offset >= 0; offset-- { + if kvs[offset].Key == kvs[position].Key { + continue + } + position-- + kvs[offset], kvs[position] = kvs[position], kvs[offset] + } + kvs = kvs[position:] + + if filter != nil { + if div := filteredToFront(kvs, filter); div != 0 { + return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div] + } + } + return Set{equivalent: computeDistinct(kvs)}, nil +} + +// NewSetWithSortableFiltered returns a new Set. +// +// Duplicate keys are eliminated by taking the last value. This +// re-orders the input slice so that unique last-values are contiguous +// at the end of the slice. +// +// This ensures the following: +// +// - Last-value-wins semantics +// - Caller sees the reordering, but doesn't lose values +// - Repeated call preserve last-value wins. +// +// Note that methods are defined on Set, although this returns Set. Callers +// can avoid memory allocations by: +// +// - allocating a Sortable for use as a temporary in this method +// - allocating a Set for storing the return value of this constructor. +// +// The result maintains a cache of encoded attributes, by attribute.EncoderID. +// This value should not be copied after its first use. +// +// The second []KeyValue return value is a list of attributes that were +// excluded by the Filter (if non-nil). +// +// Deprecated: Use [NewSetWithFiltered] instead. +func NewSetWithSortableFiltered(kvs []KeyValue, _ *Sortable, filter Filter) (Set, []KeyValue) { + return NewSetWithFiltered(kvs, filter) +} + +// filteredToFront filters slice in-place using keep function. All KeyValues that need to +// be removed are moved to the front. All KeyValues that need to be kept are +// moved (in-order) to the back. The index for the first KeyValue to be kept is +// returned. +func filteredToFront(slice []KeyValue, keep Filter) int { + n := len(slice) + j := n + for i := n - 1; i >= 0; i-- { + if keep(slice[i]) { + j-- + slice[i], slice[j] = slice[j], slice[i] + } + } + return j +} + +// Filter returns a filtered copy of this Set. See the documentation for +// NewSetWithSortableFiltered for more details. +func (l *Set) Filter(re Filter) (Set, []KeyValue) { + if re == nil { + return *l, nil + } + + // Iterate in reverse to the first attribute that will be filtered out. + n := l.Len() + first := n - 1 + for ; first >= 0; first-- { + kv, _ := l.Get(first) + if !re(kv) { + break + } + } + + // No attributes will be dropped, return the immutable Set l and nil. + if first < 0 { + return *l, nil + } + + // Copy now that we know we need to return a modified set. + // + // Do not do this in-place on the underlying storage of *Set l. Sets are + // immutable and filtering should not change this. + slice := l.ToSlice() + + // Don't re-iterate the slice if only slice[0] is filtered. + if first == 0 { + // It is safe to assume len(slice) >= 1 given we found at least one + // attribute above that needs to be filtered out. + return Set{equivalent: computeDistinct(slice[1:])}, slice[:1] + } + + // Move the filtered slice[first] to the front (preserving order). + kv := slice[first] + copy(slice[1:first+1], slice[:first]) + slice[0] = kv + + // Do not re-evaluate re(slice[first+1:]). + div := filteredToFront(slice[1:first+1], re) + 1 + return Set{equivalent: computeDistinct(slice[div:])}, slice[:div] +} + +// computeDistinct returns a Distinct using either the fixed- or +// reflect-oriented code path, depending on the size of the input. The input +// slice is assumed to already be sorted and de-duplicated. +func computeDistinct(kvs []KeyValue) Distinct { + iface := computeDistinctFixed(kvs) + if iface == nil { + iface = computeDistinctReflect(kvs) + } + return Distinct{ + iface: iface, + } +} + +// computeDistinctFixed computes a Distinct for small slices. It returns nil +// if the input is too large for this code path. +func computeDistinctFixed(kvs []KeyValue) interface{} { + switch len(kvs) { + case 1: + ptr := new([1]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 2: + ptr := new([2]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 3: + ptr := new([3]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 4: + ptr := new([4]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 5: + ptr := new([5]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 6: + ptr := new([6]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 7: + ptr := new([7]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 8: + ptr := new([8]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 9: + ptr := new([9]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 10: + ptr := new([10]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + default: + return nil + } +} + +// computeDistinctReflect computes a Distinct using reflection, works for any +// size input. +func computeDistinctReflect(kvs []KeyValue) interface{} { + at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() + for i, keyValue := range kvs { + *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue + } + return at.Interface() +} + +// MarshalJSON returns the JSON encoding of the Set. +func (l *Set) MarshalJSON() ([]byte, error) { + return json.Marshal(l.equivalent.iface) +} + +// MarshalLog is the marshaling function used by the logging system to represent this Set. +func (l Set) MarshalLog() interface{} { + kvs := make(map[string]string) + for _, kv := range l.ToSlice() { + kvs[string(kv.Key)] = kv.Value.Emit() + } + return kvs +} + +// Len implements sort.Interface. +func (l *Sortable) Len() int { + return len(*l) +} + +// Swap implements sort.Interface. +func (l *Sortable) Swap(i, j int) { + (*l)[i], (*l)[j] = (*l)[j], (*l)[i] +} + +// Less implements sort.Interface. +func (l *Sortable) Less(i, j int) bool { + return (*l)[i].Key < (*l)[j].Key +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/hack/tools/vendor/go.opentelemetry.io/otel/attribute/type_string.go new file mode 100644 index 0000000000..e584b24776 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -type=Type"; DO NOT EDIT. + +package attribute + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[INVALID-0] + _ = x[BOOL-1] + _ = x[INT64-2] + _ = x[FLOAT64-3] + _ = x[STRING-4] + _ = x[BOOLSLICE-5] + _ = x[INT64SLICE-6] + _ = x[FLOAT64SLICE-7] + _ = x[STRINGSLICE-8] +} + +const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" + +var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/attribute/value.go b/hack/tools/vendor/go.opentelemetry.io/otel/attribute/value.go new file mode 100644 index 0000000000..9ea0ecbbd2 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -0,0 +1,271 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + + "go.opentelemetry.io/otel/internal" + "go.opentelemetry.io/otel/internal/attribute" +) + +//go:generate stringer -type=Type + +// Type describes the type of the data Value holds. +type Type int // nolint: revive // redefines builtin Type. + +// Value represents the value part in key-value pairs. +type Value struct { + vtype Type + numeric uint64 + stringly string + slice interface{} +} + +const ( + // INVALID is used for a Value with no value set. + INVALID Type = iota + // BOOL is a boolean Type Value. + BOOL + // INT64 is a 64-bit signed integral Type Value. + INT64 + // FLOAT64 is a 64-bit floating point Type Value. + FLOAT64 + // STRING is a string Type Value. + STRING + // BOOLSLICE is a slice of booleans Type Value. + BOOLSLICE + // INT64SLICE is a slice of 64-bit signed integral numbers Type Value. + INT64SLICE + // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value. + FLOAT64SLICE + // STRINGSLICE is a slice of strings Type Value. + STRINGSLICE +) + +// BoolValue creates a BOOL Value. +func BoolValue(v bool) Value { + return Value{ + vtype: BOOL, + numeric: internal.BoolToRaw(v), + } +} + +// BoolSliceValue creates a BOOLSLICE Value. +func BoolSliceValue(v []bool) Value { + return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} +} + +// IntValue creates an INT64 Value. +func IntValue(v int) Value { + return Int64Value(int64(v)) +} + +// IntSliceValue creates an INTSLICE Value. +func IntSliceValue(v []int) Value { + var int64Val int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val))) + for i, val := range v { + cp.Elem().Index(i).SetInt(int64(val)) + } + return Value{ + vtype: INT64SLICE, + slice: cp.Elem().Interface(), + } +} + +// Int64Value creates an INT64 Value. +func Int64Value(v int64) Value { + return Value{ + vtype: INT64, + numeric: internal.Int64ToRaw(v), + } +} + +// Int64SliceValue creates an INT64SLICE Value. +func Int64SliceValue(v []int64) Value { + return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} +} + +// Float64Value creates a FLOAT64 Value. +func Float64Value(v float64) Value { + return Value{ + vtype: FLOAT64, + numeric: internal.Float64ToRaw(v), + } +} + +// Float64SliceValue creates a FLOAT64SLICE Value. +func Float64SliceValue(v []float64) Value { + return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} +} + +// StringValue creates a STRING Value. +func StringValue(v string) Value { + return Value{ + vtype: STRING, + stringly: v, + } +} + +// StringSliceValue creates a STRINGSLICE Value. +func StringSliceValue(v []string) Value { + return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} +} + +// Type returns a type of the Value. +func (v Value) Type() Type { + return v.vtype +} + +// AsBool returns the bool value. Make sure that the Value's type is +// BOOL. +func (v Value) AsBool() bool { + return internal.RawToBool(v.numeric) +} + +// AsBoolSlice returns the []bool value. Make sure that the Value's type is +// BOOLSLICE. +func (v Value) AsBoolSlice() []bool { + if v.vtype != BOOLSLICE { + return nil + } + return v.asBoolSlice() +} + +func (v Value) asBoolSlice() []bool { + return attribute.AsBoolSlice(v.slice) +} + +// AsInt64 returns the int64 value. Make sure that the Value's type is +// INT64. +func (v Value) AsInt64() int64 { + return internal.RawToInt64(v.numeric) +} + +// AsInt64Slice returns the []int64 value. Make sure that the Value's type is +// INT64SLICE. +func (v Value) AsInt64Slice() []int64 { + if v.vtype != INT64SLICE { + return nil + } + return v.asInt64Slice() +} + +func (v Value) asInt64Slice() []int64 { + return attribute.AsInt64Slice(v.slice) +} + +// AsFloat64 returns the float64 value. Make sure that the Value's +// type is FLOAT64. +func (v Value) AsFloat64() float64 { + return internal.RawToFloat64(v.numeric) +} + +// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is +// FLOAT64SLICE. +func (v Value) AsFloat64Slice() []float64 { + if v.vtype != FLOAT64SLICE { + return nil + } + return v.asFloat64Slice() +} + +func (v Value) asFloat64Slice() []float64 { + return attribute.AsFloat64Slice(v.slice) +} + +// AsString returns the string value. Make sure that the Value's type +// is STRING. +func (v Value) AsString() string { + return v.stringly +} + +// AsStringSlice returns the []string value. Make sure that the Value's type is +// STRINGSLICE. +func (v Value) AsStringSlice() []string { + if v.vtype != STRINGSLICE { + return nil + } + return v.asStringSlice() +} + +func (v Value) asStringSlice() []string { + return attribute.AsStringSlice(v.slice) +} + +type unknownValueType struct{} + +// AsInterface returns Value's data as interface{}. +func (v Value) AsInterface() interface{} { + switch v.Type() { + case BOOL: + return v.AsBool() + case BOOLSLICE: + return v.asBoolSlice() + case INT64: + return v.AsInt64() + case INT64SLICE: + return v.asInt64Slice() + case FLOAT64: + return v.AsFloat64() + case FLOAT64SLICE: + return v.asFloat64Slice() + case STRING: + return v.stringly + case STRINGSLICE: + return v.asStringSlice() + } + return unknownValueType{} +} + +// Emit returns a string representation of Value's data. +func (v Value) Emit() string { + switch v.Type() { + case BOOLSLICE: + return fmt.Sprint(v.asBoolSlice()) + case BOOL: + return strconv.FormatBool(v.AsBool()) + case INT64SLICE: + j, err := json.Marshal(v.asInt64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asInt64Slice()) + } + return string(j) + case INT64: + return strconv.FormatInt(v.AsInt64(), 10) + case FLOAT64SLICE: + j, err := json.Marshal(v.asFloat64Slice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asFloat64Slice()) + } + return string(j) + case FLOAT64: + return fmt.Sprint(v.AsFloat64()) + case STRINGSLICE: + j, err := json.Marshal(v.asStringSlice()) + if err != nil { + return fmt.Sprintf("invalid: %v", v.asStringSlice()) + } + return string(j) + case STRING: + return v.stringly + default: + return "unknown" + } +} + +// MarshalJSON returns the JSON encoding of the Value. +func (v Value) MarshalJSON() ([]byte, error) { + var jsonVal struct { + Type string + Value interface{} + } + jsonVal.Type = v.Type().String() + jsonVal.Value = v.AsInterface() + return json.Marshal(jsonVal) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/baggage/README.md b/hack/tools/vendor/go.opentelemetry.io/otel/baggage/README.md new file mode 100644 index 0000000000..7d798435e1 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/baggage/README.md @@ -0,0 +1,3 @@ +# Baggage + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/baggage)](https://pkg.go.dev/go.opentelemetry.io/otel/baggage) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/hack/tools/vendor/go.opentelemetry.io/otel/baggage/baggage.go new file mode 100644 index 0000000000..b3569e95e5 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -0,0 +1,1018 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "errors" + "fmt" + "net/url" + "strings" + "unicode/utf8" + + "go.opentelemetry.io/otel/internal/baggage" +) + +const ( + maxMembers = 180 + maxBytesPerMembers = 4096 + maxBytesPerBaggageString = 8192 + + listDelimiter = "," + keyValueDelimiter = "=" + propertyDelimiter = ";" +) + +var ( + errInvalidKey = errors.New("invalid key") + errInvalidValue = errors.New("invalid value") + errInvalidProperty = errors.New("invalid baggage list-member property") + errInvalidMember = errors.New("invalid baggage list-member") + errMemberNumber = errors.New("too many list-members in baggage-string") + errMemberBytes = errors.New("list-member too large") + errBaggageBytes = errors.New("baggage-string too large") +) + +// Property is an additional metadata entry for a baggage list-member. +type Property struct { + key, value string + + // hasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + hasValue bool +} + +// NewKeyProperty returns a new Property for key. +// +// The passed key must be valid, non-empty UTF-8 string. +// If key is invalid, an error will be returned. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. +func NewKeyProperty(key string) (Property, error) { + if !validateBaggageName(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + p := Property{key: key} + return p, nil +} + +// NewKeyValueProperty returns a new Property for key with value. +// +// The passed key must be compliant with W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. +// +// Notice: Consider using [NewKeyValuePropertyRaw] instead +// that does not require percent-encoding of the value. +func NewKeyValueProperty(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + if !validateValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + decodedValue, err := url.PathUnescape(value) + if err != nil { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + return NewKeyValuePropertyRaw(key, decodedValue) +} + +// NewKeyValuePropertyRaw returns a new Property for key with value. +// +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. +func NewKeyValuePropertyRaw(key, value string) (Property, error) { + if !validateBaggageName(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateBaggageValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + + p := Property{ + key: key, + value: value, + hasValue: true, + } + return p, nil +} + +func newInvalidProperty() Property { + return Property{} +} + +// parseProperty attempts to decode a Property from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +func parseProperty(property string) (Property, error) { + if property == "" { + return newInvalidProperty(), nil + } + + p, ok := parsePropertyInternal(property) + if !ok { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property) + } + + return p, nil +} + +// validate ensures p conforms to the W3C Baggage specification, returning an +// error otherwise. +func (p Property) validate() error { + errFunc := func(err error) error { + return fmt.Errorf("invalid property: %w", err) + } + + if !validateBaggageName(p.key) { + return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) + } + if !p.hasValue && p.value != "" { + return errFunc(errors.New("inconsistent value")) + } + if p.hasValue && !validateBaggageValue(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } + return nil +} + +// Key returns the Property key. +func (p Property) Key() string { + return p.key +} + +// Value returns the Property value. Additionally, a boolean value is returned +// indicating if the returned value is the empty if the Property has a value +// that is empty or if the value is not set. +func (p Property) Value() (string, bool) { + return p.value, p.hasValue +} + +// String encodes Property into a header string compliant with the W3C Baggage +// specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. +func (p Property) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(p.key) { + return "" + } + + if p.hasValue { + return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) + } + return p.key +} + +type properties []Property + +func fromInternalProperties(iProps []baggage.Property) properties { + if len(iProps) == 0 { + return nil + } + + props := make(properties, len(iProps)) + for i, p := range iProps { + props[i] = Property{ + key: p.Key, + value: p.Value, + hasValue: p.HasValue, + } + } + return props +} + +func (p properties) asInternal() []baggage.Property { + if len(p) == 0 { + return nil + } + + iProps := make([]baggage.Property, len(p)) + for i, prop := range p { + iProps[i] = baggage.Property{ + Key: prop.key, + Value: prop.value, + HasValue: prop.hasValue, + } + } + return iProps +} + +func (p properties) Copy() properties { + if len(p) == 0 { + return nil + } + + props := make(properties, len(p)) + copy(props, p) + return props +} + +// validate ensures each Property in p conforms to the W3C Baggage +// specification, returning an error otherwise. +func (p properties) validate() error { + for _, prop := range p { + if err := prop.validate(); err != nil { + return err + } + } + return nil +} + +// String encodes properties into a header string compliant with the W3C Baggage +// specification. +func (p properties) String() string { + props := make([]string, 0, len(p)) + for _, prop := range p { + s := prop.String() + + // Ignored empty properties. + if s != "" { + props = append(props, s) + } + } + return strings.Join(props, propertyDelimiter) +} + +// Member is a list-member of a baggage-string as defined by the W3C Baggage +// specification. +type Member struct { + key, value string + properties properties + + // hasData indicates whether the created property contains data or not. + // Properties that do not contain data are invalid with no other check + // required. + hasData bool +} + +// NewMember returns a new Member from the passed arguments. +// +// The passed key must be compliant with W3C Baggage specification. +// The passed value must be percent-encoded as defined in W3C Baggage specification. +// +// Notice: Consider using [NewMemberRaw] instead +// that does not require percent-encoding of the value. +func NewMember(key, value string, props ...Property) (Member, error) { + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + if !validateValue(value) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + decodedValue, err := url.PathUnescape(value) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + return NewMemberRaw(key, decodedValue, props...) +} + +// NewMemberRaw returns a new Member from the passed arguments. +// +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on baggage key. +// For example, the W3C Baggage specification restricts the baggage keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as baggage key. +func NewMemberRaw(key, value string, props ...Property) (Member, error) { + m := Member{ + key: key, + value: value, + properties: properties(props).Copy(), + hasData: true, + } + if err := m.validate(); err != nil { + return newInvalidMember(), err + } + return m, nil +} + +func newInvalidMember() Member { + return Member{} +} + +// parseMember attempts to decode a Member from the passed string. It returns +// an error if the input is invalid according to the W3C Baggage +// specification. +func parseMember(member string) (Member, error) { + if n := len(member); n > maxBytesPerMembers { + return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n) + } + + var props properties + keyValue, properties, found := strings.Cut(member, propertyDelimiter) + if found { + // Parse the member properties. + for _, pStr := range strings.Split(properties, propertyDelimiter) { + p, err := parseProperty(pStr) + if err != nil { + return newInvalidMember(), err + } + props = append(props, p) + } + } + // Parse the member key/value pair. + + // Take into account a value can contain equal signs (=). + k, v, found := strings.Cut(keyValue, keyValueDelimiter) + if !found { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member) + } + // "Leading and trailing whitespaces are allowed but MUST be trimmed + // when converting the header into a data structure." + key := strings.TrimSpace(k) + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + rawVal := strings.TrimSpace(v) + if !validateValue(rawVal) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) + } + + // Decode a percent-encoded value. + unescapeVal, err := url.PathUnescape(rawVal) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) + } + + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) + return Member{key: key, value: value, properties: props, hasData: true}, nil +} + +// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. +func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { + if utf8.ValidString(unescapeVal) { + return unescapeVal + } + // W3C baggage spec: + // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 + + var b strings.Builder + b.Grow(cap) + for i := 0; i < len(unescapeVal); { + r, size := utf8.DecodeRuneInString(unescapeVal[i:]) + if r == utf8.RuneError && size == 1 { + // Invalid UTF-8 sequence found, replace it with '�' + _, _ = b.WriteString("�") + } else { + _, _ = b.WriteRune(r) + } + i += size + } + + return b.String() +} + +// validate ensures m conforms to the W3C Baggage specification. +// A key must be an ASCII string, returning an error otherwise. +func (m Member) validate() error { + if !m.hasData { + return fmt.Errorf("%w: %q", errInvalidMember, m) + } + + if !validateBaggageName(m.key) { + return fmt.Errorf("%w: %q", errInvalidKey, m.key) + } + if !validateBaggageValue(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } + return m.properties.validate() +} + +// Key returns the Member key. +func (m Member) Key() string { return m.key } + +// Value returns the Member value. +func (m Member) Value() string { return m.value } + +// Properties returns a copy of the Member properties. +func (m Member) Properties() []Property { return m.properties.Copy() } + +// String encodes Member into a header string compliant with the W3C Baggage +// specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. +func (m Member) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(m.key) { + return "" + } + + s := m.key + keyValueDelimiter + valueEscape(m.value) + if len(m.properties) > 0 { + s += propertyDelimiter + m.properties.String() + } + return s +} + +// Baggage is a list of baggage members representing the baggage-string as +// defined by the W3C Baggage specification. +type Baggage struct { //nolint:golint + list baggage.List +} + +// New returns a new valid Baggage. It returns an error if it results in a +// Baggage exceeding limits set in that specification. +// +// It expects all the provided members to have already been validated. +func New(members ...Member) (Baggage, error) { + if len(members) == 0 { + return Baggage{}, nil + } + + b := make(baggage.List) + for _, m := range members { + if !m.hasData { + return Baggage{}, errInvalidMember + } + + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // Check member numbers after deduplication. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + bag := Baggage{b} + if n := len(bag.String()); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + return bag, nil +} + +// Parse attempts to decode a baggage-string from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +// +// If there are duplicate list-members contained in baggage, the last one +// defined (reading left-to-right) will be the only one kept. This diverges +// from the W3C Baggage specification which allows duplicate list-members, but +// conforms to the OpenTelemetry Baggage specification. +func Parse(bStr string) (Baggage, error) { + if bStr == "" { + return Baggage{}, nil + } + + if n := len(bStr); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + b := make(baggage.List) + for _, memberStr := range strings.Split(bStr, listDelimiter) { + m, err := parseMember(memberStr) + if err != nil { + return Baggage{}, err + } + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // OpenTelemetry does not allow for duplicate list-members, but the W3C + // specification does. Now that we have deduplicated, ensure the baggage + // does not exceed list-member limits. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + return Baggage{b}, nil +} + +// Member returns the baggage list-member identified by key. +// +// If there is no list-member matching the passed key the returned Member will +// be a zero-value Member. +// The returned member is not validated, as we assume the validation happened +// when it was added to the Baggage. +func (b Baggage) Member(key string) Member { + v, ok := b.list[key] + if !ok { + // We do not need to worry about distinguishing between the situation + // where a zero-valued Member is included in the Baggage because a + // zero-valued Member is invalid according to the W3C Baggage + // specification (it has an empty key). + return newInvalidMember() + } + + return Member{ + key: key, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + } +} + +// Members returns all the baggage list-members. +// The order of the returned list-members is not significant. +// +// The returned members are not validated, as we assume the validation happened +// when they were added to the Baggage. +func (b Baggage) Members() []Member { + if len(b.list) == 0 { + return nil + } + + members := make([]Member, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + }) + } + return members +} + +// SetMember returns a copy of the Baggage with the member included. If the +// baggage contains a Member with the same key, the existing Member is +// replaced. +// +// If member is invalid according to the W3C Baggage specification, an error +// is returned with the original Baggage. +func (b Baggage) SetMember(member Member) (Baggage, error) { + if !member.hasData { + return b, errInvalidMember + } + + n := len(b.list) + if _, ok := b.list[member.key]; !ok { + n++ + } + list := make(baggage.List, n) + + for k, v := range b.list { + // Do not copy if we are just going to overwrite. + if k == member.key { + continue + } + list[k] = v + } + + list[member.key] = baggage.Item{ + Value: member.value, + Properties: member.properties.asInternal(), + } + + return Baggage{list: list}, nil +} + +// DeleteMember returns a copy of the Baggage with the list-member identified +// by key removed. +func (b Baggage) DeleteMember(key string) Baggage { + n := len(b.list) + if _, ok := b.list[key]; ok { + n-- + } + list := make(baggage.List, n) + + for k, v := range b.list { + if k == key { + continue + } + list[k] = v + } + + return Baggage{list: list} +} + +// Len returns the number of list-members in the Baggage. +func (b Baggage) Len() int { + return len(b.list) +} + +// String encodes Baggage into a header string compliant with the W3C Baggage +// specification. +// It would ignore members where the member key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. +func (b Baggage) String() string { + members := make([]string, 0, len(b.list)) + for k, v := range b.list { + s := Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + }.String() + + // Ignored empty members. + if s != "" { + members = append(members, s) + } + } + return strings.Join(members, listDelimiter) +} + +// parsePropertyInternal attempts to decode a Property from the passed string. +// It follows the spec at https://www.w3.org/TR/baggage/#definition. +func parsePropertyInternal(s string) (p Property, ok bool) { + // For the entire function we will use " key = value " as an example. + // Attempting to parse the key. + // First skip spaces at the beginning "< >key = value " (they could be empty). + index := skipSpace(s, 0) + + // Parse the key: " = value ". + keyStart := index + keyEnd := index + for _, c := range s[keyStart:] { + if !validateKeyChar(c) { + break + } + keyEnd++ + } + + // If we couldn't find any valid key character, + // it means the key is either empty or invalid. + if keyStart == keyEnd { + return + } + + // Skip spaces after the key: " key< >= value ". + index = skipSpace(s, keyEnd) + + if index == len(s) { + // A key can have no value, like: " key ". + ok = true + p.key = s[keyStart:keyEnd] + return + } + + // If we have not reached the end and we can't find the '=' delimiter, + // it means the property is invalid. + if s[index] != keyValueDelimiter[0] { + return + } + + // Attempting to parse the value. + // Match: " key =< >value ". + index = skipSpace(s, index+1) + + // Match the value string: " key = ". + // A valid property can be: " key =". + // Therefore, we don't have to check if the value is empty. + valueStart := index + valueEnd := index + for _, c := range s[valueStart:] { + if !validateValueChar(c) { + break + } + valueEnd++ + } + + // Skip all trailing whitespaces: " key = value< >". + index = skipSpace(s, valueEnd) + + // If after looking for the value and skipping whitespaces + // we have not reached the end, it means the property is + // invalid, something like: " key = value value1". + if index != len(s) { + return + } + + // Decode a percent-encoded value. + rawVal := s[valueStart:valueEnd] + unescapeVal, err := url.PathUnescape(rawVal) + if err != nil { + return + } + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) + + ok = true + p.key = s[keyStart:keyEnd] + p.hasValue = true + + p.value = value + return +} + +func skipSpace(s string, offset int) int { + i := offset + for ; i < len(s); i++ { + c := s[i] + if c != ' ' && c != '\t' { + break + } + } + return i +} + +var safeKeyCharset = [utf8.RuneSelf]bool{ + // 0x23 to 0x27 + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + + // 0x30 to 0x39 + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + + // 0x41 to 0x5a + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'V': true, + 'W': true, + 'X': true, + 'Y': true, + 'Z': true, + + // 0x5e to 0x7a + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + + // remainder + '!': true, + '*': true, + '+': true, + '-': true, + '.': true, + '|': true, + '~': true, +} + +// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. +// Baggage name is a valid, non-empty UTF-8 string. +func validateBaggageName(s string) bool { + if len(s) == 0 { + return false + } + + return utf8.ValidString(s) +} + +// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value. +// Baggage value is a valid UTF-8 strings. +// Empty string is also a valid UTF-8 string. +func validateBaggageValue(s string) bool { + return utf8.ValidString(s) +} + +// validateKey checks if the string is a valid W3C Baggage key. +func validateKey(s string) bool { + if len(s) == 0 { + return false + } + + for _, c := range s { + if !validateKeyChar(c) { + return false + } + } + + return true +} + +func validateKeyChar(c int32) bool { + return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] +} + +// validateValue checks if the string is a valid W3C Baggage value. +func validateValue(s string) bool { + for _, c := range s { + if !validateValueChar(c) { + return false + } + } + + return true +} + +var safeValueCharset = [utf8.RuneSelf]bool{ + '!': true, // 0x21 + + // 0x23 to 0x2b + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '(': true, + ')': true, + '*': true, + '+': true, + + // 0x2d to 0x3a + '-': true, + '.': true, + '/': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + ':': true, + + // 0x3c to 0x5b + '<': true, // 0x3C + '=': true, // 0x3D + '>': true, // 0x3E + '?': true, // 0x3F + '@': true, // 0x40 + 'A': true, // 0x41 + 'B': true, // 0x42 + 'C': true, // 0x43 + 'D': true, // 0x44 + 'E': true, // 0x45 + 'F': true, // 0x46 + 'G': true, // 0x47 + 'H': true, // 0x48 + 'I': true, // 0x49 + 'J': true, // 0x4A + 'K': true, // 0x4B + 'L': true, // 0x4C + 'M': true, // 0x4D + 'N': true, // 0x4E + 'O': true, // 0x4F + 'P': true, // 0x50 + 'Q': true, // 0x51 + 'R': true, // 0x52 + 'S': true, // 0x53 + 'T': true, // 0x54 + 'U': true, // 0x55 + 'V': true, // 0x56 + 'W': true, // 0x57 + 'X': true, // 0x58 + 'Y': true, // 0x59 + 'Z': true, // 0x5A + '[': true, // 0x5B + + // 0x5d to 0x7e + ']': true, // 0x5D + '^': true, // 0x5E + '_': true, // 0x5F + '`': true, // 0x60 + 'a': true, // 0x61 + 'b': true, // 0x62 + 'c': true, // 0x63 + 'd': true, // 0x64 + 'e': true, // 0x65 + 'f': true, // 0x66 + 'g': true, // 0x67 + 'h': true, // 0x68 + 'i': true, // 0x69 + 'j': true, // 0x6A + 'k': true, // 0x6B + 'l': true, // 0x6C + 'm': true, // 0x6D + 'n': true, // 0x6E + 'o': true, // 0x6F + 'p': true, // 0x70 + 'q': true, // 0x71 + 'r': true, // 0x72 + 's': true, // 0x73 + 't': true, // 0x74 + 'u': true, // 0x75 + 'v': true, // 0x76 + 'w': true, // 0x77 + 'x': true, // 0x78 + 'y': true, // 0x79 + 'z': true, // 0x7A + '{': true, // 0x7B + '|': true, // 0x7C + '}': true, // 0x7D + '~': true, // 0x7E +} + +func validateValueChar(c int32) bool { + return c >= 0 && c < int32(utf8.RuneSelf) && safeValueCharset[c] +} + +// valueEscape escapes the string so it can be safely placed inside a baggage value, +// replacing special characters with %XX sequences as needed. +// +// The implementation is based on: +// https://github.com/golang/go/blob/f6509cf5cdbb5787061b784973782933c47f1782/src/net/url/url.go#L285. +func valueEscape(s string) string { + hexCount := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return s + } + + var buf [64]byte + var t []byte + + required := len(s) + 2*hexCount + if required <= len(buf) { + t = buf[:required] + } else { + t = make([]byte, required) + } + + j := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(s[i]) { + const upperhex = "0123456789ABCDEF" + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + } else { + t[j] = c + j++ + } + } + + return string(t) +} + +// shouldEscape returns true if the specified byte should be escaped when +// appearing in a baggage value string. +func shouldEscape(c byte) bool { + if c == '%' { + // The percent character must be encoded so that percent-encoding can work. + return true + } + return !validateValueChar(int32(c)) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/baggage/context.go b/hack/tools/vendor/go.opentelemetry.io/otel/baggage/context.go new file mode 100644 index 0000000000..a572461a05 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/baggage/context.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "context" + + "go.opentelemetry.io/otel/internal/baggage" +) + +// ContextWithBaggage returns a copy of parent with baggage. +func ContextWithBaggage(parent context.Context, b Baggage) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, b.list) +} + +// ContextWithoutBaggage returns a copy of parent with no baggage. +func ContextWithoutBaggage(parent context.Context) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, nil) +} + +// FromContext returns the baggage contained in ctx. +func FromContext(ctx context.Context) Baggage { + // Delegate so any hooks for the OpenTracing bridge are handled. + return Baggage{list: baggage.ListFromContext(ctx)} +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/baggage/doc.go b/hack/tools/vendor/go.opentelemetry.io/otel/baggage/doc.go new file mode 100644 index 0000000000..b51d87cab7 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/baggage/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package baggage provides functionality for storing and retrieving +baggage items in Go context. For propagating the baggage, see the +go.opentelemetry.io/otel/propagation package. +*/ +package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/codes/README.md b/hack/tools/vendor/go.opentelemetry.io/otel/codes/README.md new file mode 100644 index 0000000000..24c52b387d --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/codes/README.md @@ -0,0 +1,3 @@ +# Codes + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/codes)](https://pkg.go.dev/go.opentelemetry.io/otel/codes) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/codes/codes.go b/hack/tools/vendor/go.opentelemetry.io/otel/codes/codes.go new file mode 100644 index 0000000000..2acbac3546 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -0,0 +1,105 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package codes // import "go.opentelemetry.io/otel/codes" + +import ( + "encoding/json" + "fmt" + "strconv" +) + +const ( + // Unset is the default status code. + Unset Code = 0 + + // Error indicates the operation contains an error. + // + // NOTE: The error code in OTLP is 2. + // The value of this enum is only relevant to the internals + // of the Go SDK. + Error Code = 1 + + // Ok indicates operation has been validated by an Application developers + // or Operator to have completed successfully, or contain no error. + // + // NOTE: The Ok code in OTLP is 1. + // The value of this enum is only relevant to the internals + // of the Go SDK. + Ok Code = 2 + + maxCode = 3 +) + +// Code is an 32-bit representation of a status state. +type Code uint32 + +var codeToStr = map[Code]string{ + Unset: "Unset", + Error: "Error", + Ok: "Ok", +} + +var strToCode = map[string]Code{ + `"Unset"`: Unset, + `"Error"`: Error, + `"Ok"`: Ok, +} + +// String returns the Code as a string. +func (c Code) String() string { + return codeToStr[c] +} + +// UnmarshalJSON unmarshals b into the Code. +// +// This is based on the functionality in the gRPC codes package: +// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244 +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + var x interface{} + if err := json.Unmarshal(b, &x); err != nil { + return err + } + switch x.(type) { + case string: + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + case float64: + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) // nolint: gosec // Bit size of 32 check above. + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + default: + return fmt.Errorf("invalid code: %q", string(b)) + } +} + +// MarshalJSON returns c as the JSON encoding of c. +func (c *Code) MarshalJSON() ([]byte, error) { + if c == nil { + return []byte("null"), nil + } + str, ok := codeToStr[*c] + if !ok { + return nil, fmt.Errorf("invalid code: %d", *c) + } + return []byte(fmt.Sprintf("%q", str)), nil +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/codes/doc.go b/hack/tools/vendor/go.opentelemetry.io/otel/codes/doc.go new file mode 100644 index 0000000000..ee8db448b8 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/codes/doc.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package codes defines the canonical error codes used by OpenTelemetry. + +It conforms to [the OpenTelemetry +specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status). +*/ +package codes // import "go.opentelemetry.io/otel/codes" diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/doc.go b/hack/tools/vendor/go.opentelemetry.io/otel/doc.go new file mode 100644 index 0000000000..921f85961a --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/doc.go @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package otel provides global access to the OpenTelemetry API. The subpackages of +the otel package provide an implementation of the OpenTelemetry API. + +The provided API is used to instrument code and measure data about that code's +performance and operation. The measured data, by default, is not processed or +transmitted anywhere. An implementation of the OpenTelemetry SDK, like the +default SDK implementation (go.opentelemetry.io/otel/sdk), and associated +exporters are used to process and transport this data. + +To read the getting started guide, see https://opentelemetry.io/docs/languages/go/getting-started/. + +To read more about tracing, see go.opentelemetry.io/otel/trace. + +To read more about metrics, see go.opentelemetry.io/otel/metric. + +To read more about logs, see go.opentelemetry.io/otel/log. + +To read more about propagation, see go.opentelemetry.io/otel/propagation and +go.opentelemetry.io/otel/baggage. +*/ +package otel // import "go.opentelemetry.io/otel" diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/error_handler.go b/hack/tools/vendor/go.opentelemetry.io/otel/error_handler.go new file mode 100644 index 0000000000..67414c71e0 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/error_handler.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// ErrorHandlerFunc is a convenience adapter to allow the use of a function +// as an ErrorHandler. +type ErrorHandlerFunc func(error) + +var _ ErrorHandler = ErrorHandlerFunc(nil) + +// Handle handles the irremediable error by calling the ErrorHandlerFunc itself. +func (f ErrorHandlerFunc) Handle(err error) { + f(err) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md new file mode 100644 index 0000000000..50802d5aee --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md @@ -0,0 +1,3 @@ +# OTLP Trace Exporter + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go new file mode 100644 index 0000000000..3c1a625c06 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + +import ( + "context" + + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" +) + +// Client manages connections to the collector, handles the +// transformation of data into wire format, and the transmission of that +// data to the collector. +type Client interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Start should establish connection(s) to endpoint(s). It is + // called just once by the exporter, so the implementation + // does not need to worry about idempotence and locking. + Start(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Stop should close the connections. The function is called + // only once by the exporter, so the implementation does not + // need to worry about idempotence, but it may be called + // concurrently with UploadTraces, so proper + // locking is required. The function serves as a + // synchronization point - after the function returns, the + // process of closing connections is assumed to be finished. + Stop(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // UploadTraces should transform the passed traces to the wire + // format and send it to the collector. May be called + // concurrently. + UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go new file mode 100644 index 0000000000..09ad5eadb6 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package otlptrace contains abstractions for OTLP span exporters. +See the official OTLP span exporter implementations: + - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc], + - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp]. +*/ +package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go new file mode 100644 index 0000000000..3f0a518ae0 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go @@ -0,0 +1,105 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + +import ( + "context" + "errors" + "fmt" + "sync" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" + tracesdk "go.opentelemetry.io/otel/sdk/trace" +) + +var errAlreadyStarted = errors.New("already started") + +// Exporter exports trace data in the OTLP wire format. +type Exporter struct { + client Client + + mu sync.RWMutex + started bool + + startOnce sync.Once + stopOnce sync.Once +} + +// ExportSpans exports a batch of spans. +func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan) error { + protoSpans := tracetransform.Spans(ss) + if len(protoSpans) == 0 { + return nil + } + + err := e.client.UploadTraces(ctx, protoSpans) + if err != nil { + return fmt.Errorf("traces export: %w", err) + } + return nil +} + +// Start establishes a connection to the receiving endpoint. +func (e *Exporter) Start(ctx context.Context) error { + err := errAlreadyStarted + e.startOnce.Do(func() { + e.mu.Lock() + e.started = true + e.mu.Unlock() + err = e.client.Start(ctx) + }) + + return err +} + +// Shutdown flushes all exports and closes all connections to the receiving endpoint. +func (e *Exporter) Shutdown(ctx context.Context) error { + e.mu.RLock() + started := e.started + e.mu.RUnlock() + + if !started { + return nil + } + + var err error + + e.stopOnce.Do(func() { + err = e.client.Stop(ctx) + e.mu.Lock() + e.started = false + e.mu.Unlock() + }) + + return err +} + +var _ tracesdk.SpanExporter = (*Exporter)(nil) + +// New constructs a new Exporter and starts it. +func New(ctx context.Context, client Client) (*Exporter, error) { + exp := NewUnstarted(client) + if err := exp.Start(ctx); err != nil { + return nil, err + } + return exp, nil +} + +// NewUnstarted constructs a new Exporter and does not start it. +func NewUnstarted(client Client) *Exporter { + return &Exporter{ + client: client, + } +} + +// MarshalLog is the marshaling function used by the logging system to represent this Exporter. +func (e *Exporter) MarshalLog() interface{} { + return struct { + Type string + Client Client + }{ + Type: "otlptrace", + Client: e.client, + } +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go new file mode 100644 index 0000000000..4571a5ca39 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go @@ -0,0 +1,147 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" + +import ( + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" + commonpb "go.opentelemetry.io/proto/otlp/common/v1" +) + +// KeyValues transforms a slice of attribute KeyValues into OTLP key-values. +func KeyValues(attrs []attribute.KeyValue) []*commonpb.KeyValue { + if len(attrs) == 0 { + return nil + } + + out := make([]*commonpb.KeyValue, 0, len(attrs)) + for _, kv := range attrs { + out = append(out, KeyValue(kv)) + } + return out +} + +// Iterator transforms an attribute iterator into OTLP key-values. +func Iterator(iter attribute.Iterator) []*commonpb.KeyValue { + l := iter.Len() + if l == 0 { + return nil + } + + out := make([]*commonpb.KeyValue, 0, l) + for iter.Next() { + out = append(out, KeyValue(iter.Attribute())) + } + return out +} + +// ResourceAttributes transforms a Resource OTLP key-values. +func ResourceAttributes(res *resource.Resource) []*commonpb.KeyValue { + return Iterator(res.Iter()) +} + +// KeyValue transforms an attribute KeyValue into an OTLP key-value. +func KeyValue(kv attribute.KeyValue) *commonpb.KeyValue { + return &commonpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)} +} + +// Value transforms an attribute Value into an OTLP AnyValue. +func Value(v attribute.Value) *commonpb.AnyValue { + av := new(commonpb.AnyValue) + switch v.Type() { + case attribute.BOOL: + av.Value = &commonpb.AnyValue_BoolValue{ + BoolValue: v.AsBool(), + } + case attribute.BOOLSLICE: + av.Value = &commonpb.AnyValue_ArrayValue{ + ArrayValue: &commonpb.ArrayValue{ + Values: boolSliceValues(v.AsBoolSlice()), + }, + } + case attribute.INT64: + av.Value = &commonpb.AnyValue_IntValue{ + IntValue: v.AsInt64(), + } + case attribute.INT64SLICE: + av.Value = &commonpb.AnyValue_ArrayValue{ + ArrayValue: &commonpb.ArrayValue{ + Values: int64SliceValues(v.AsInt64Slice()), + }, + } + case attribute.FLOAT64: + av.Value = &commonpb.AnyValue_DoubleValue{ + DoubleValue: v.AsFloat64(), + } + case attribute.FLOAT64SLICE: + av.Value = &commonpb.AnyValue_ArrayValue{ + ArrayValue: &commonpb.ArrayValue{ + Values: float64SliceValues(v.AsFloat64Slice()), + }, + } + case attribute.STRING: + av.Value = &commonpb.AnyValue_StringValue{ + StringValue: v.AsString(), + } + case attribute.STRINGSLICE: + av.Value = &commonpb.AnyValue_ArrayValue{ + ArrayValue: &commonpb.ArrayValue{ + Values: stringSliceValues(v.AsStringSlice()), + }, + } + default: + av.Value = &commonpb.AnyValue_StringValue{ + StringValue: "INVALID", + } + } + return av +} + +func boolSliceValues(vals []bool) []*commonpb.AnyValue { + converted := make([]*commonpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &commonpb.AnyValue{ + Value: &commonpb.AnyValue_BoolValue{ + BoolValue: v, + }, + } + } + return converted +} + +func int64SliceValues(vals []int64) []*commonpb.AnyValue { + converted := make([]*commonpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &commonpb.AnyValue{ + Value: &commonpb.AnyValue_IntValue{ + IntValue: v, + }, + } + } + return converted +} + +func float64SliceValues(vals []float64) []*commonpb.AnyValue { + converted := make([]*commonpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &commonpb.AnyValue{ + Value: &commonpb.AnyValue_DoubleValue{ + DoubleValue: v, + }, + } + } + return converted +} + +func stringSliceValues(vals []string) []*commonpb.AnyValue { + converted := make([]*commonpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &commonpb.AnyValue{ + Value: &commonpb.AnyValue_StringValue{ + StringValue: v, + }, + } + } + return converted +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go new file mode 100644 index 0000000000..f6dd3decc9 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" + +import ( + "go.opentelemetry.io/otel/sdk/instrumentation" + commonpb "go.opentelemetry.io/proto/otlp/common/v1" +) + +func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationScope { + if il == (instrumentation.Scope{}) { + return nil + } + return &commonpb.InstrumentationScope{ + Name: il.Name, + Version: il.Version, + } +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go new file mode 100644 index 0000000000..db7b698a56 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" + +import ( + "go.opentelemetry.io/otel/sdk/resource" + resourcepb "go.opentelemetry.io/proto/otlp/resource/v1" +) + +// Resource transforms a Resource into an OTLP Resource. +func Resource(r *resource.Resource) *resourcepb.Resource { + if r == nil { + return nil + } + return &resourcepb.Resource{Attributes: ResourceAttributes(r)} +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go new file mode 100644 index 0000000000..c3c69c5a0d --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go @@ -0,0 +1,207 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" + +import ( + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/sdk/instrumentation" + tracesdk "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" +) + +// Spans transforms a slice of OpenTelemetry spans into a slice of OTLP +// ResourceSpans. +func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans { + if len(sdl) == 0 { + return nil + } + + rsm := make(map[attribute.Distinct]*tracepb.ResourceSpans) + + type key struct { + r attribute.Distinct + is instrumentation.Scope + } + ssm := make(map[key]*tracepb.ScopeSpans) + + var resources int + for _, sd := range sdl { + if sd == nil { + continue + } + + rKey := sd.Resource().Equivalent() + k := key{ + r: rKey, + is: sd.InstrumentationScope(), + } + scopeSpan, iOk := ssm[k] + if !iOk { + // Either the resource or instrumentation scope were unknown. + scopeSpan = &tracepb.ScopeSpans{ + Scope: InstrumentationScope(sd.InstrumentationScope()), + Spans: []*tracepb.Span{}, + SchemaUrl: sd.InstrumentationScope().SchemaURL, + } + } + scopeSpan.Spans = append(scopeSpan.Spans, span(sd)) + ssm[k] = scopeSpan + + rs, rOk := rsm[rKey] + if !rOk { + resources++ + // The resource was unknown. + rs = &tracepb.ResourceSpans{ + Resource: Resource(sd.Resource()), + ScopeSpans: []*tracepb.ScopeSpans{scopeSpan}, + SchemaUrl: sd.Resource().SchemaURL(), + } + rsm[rKey] = rs + continue + } + + // The resource has been seen before. Check if the instrumentation + // library lookup was unknown because if so we need to add it to the + // ResourceSpans. Otherwise, the instrumentation library has already + // been seen and the append we did above will be included it in the + // ScopeSpans reference. + if !iOk { + rs.ScopeSpans = append(rs.ScopeSpans, scopeSpan) + } + } + + // Transform the categorized map into a slice + rss := make([]*tracepb.ResourceSpans, 0, resources) + for _, rs := range rsm { + rss = append(rss, rs) + } + return rss +} + +// span transforms a Span into an OTLP span. +func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { + if sd == nil { + return nil + } + + tid := sd.SpanContext().TraceID() + sid := sd.SpanContext().SpanID() + + s := &tracepb.Span{ + TraceId: tid[:], + SpanId: sid[:], + TraceState: sd.SpanContext().TraceState().String(), + Status: status(sd.Status().Code, sd.Status().Description), + StartTimeUnixNano: uint64(sd.StartTime().UnixNano()), + EndTimeUnixNano: uint64(sd.EndTime().UnixNano()), + Links: links(sd.Links()), + Kind: spanKind(sd.SpanKind()), + Name: sd.Name(), + Attributes: KeyValues(sd.Attributes()), + Events: spanEvents(sd.Events()), + DroppedAttributesCount: uint32(sd.DroppedAttributes()), + DroppedEventsCount: uint32(sd.DroppedEvents()), + DroppedLinksCount: uint32(sd.DroppedLinks()), + } + + if psid := sd.Parent().SpanID(); psid.IsValid() { + s.ParentSpanId = psid[:] + } + s.Flags = buildSpanFlags(sd.Parent()) + + return s +} + +// status transform a span code and message into an OTLP span status. +func status(status codes.Code, message string) *tracepb.Status { + var c tracepb.Status_StatusCode + switch status { + case codes.Ok: + c = tracepb.Status_STATUS_CODE_OK + case codes.Error: + c = tracepb.Status_STATUS_CODE_ERROR + default: + c = tracepb.Status_STATUS_CODE_UNSET + } + return &tracepb.Status{ + Code: c, + Message: message, + } +} + +// links transforms span Links to OTLP span links. +func links(links []tracesdk.Link) []*tracepb.Span_Link { + if len(links) == 0 { + return nil + } + + sl := make([]*tracepb.Span_Link, 0, len(links)) + for _, otLink := range links { + // This redefinition is necessary to prevent otLink.*ID[:] copies + // being reused -- in short we need a new otLink per iteration. + otLink := otLink + + tid := otLink.SpanContext.TraceID() + sid := otLink.SpanContext.SpanID() + + flags := buildSpanFlags(otLink.SpanContext) + + sl = append(sl, &tracepb.Span_Link{ + TraceId: tid[:], + SpanId: sid[:], + Attributes: KeyValues(otLink.Attributes), + DroppedAttributesCount: uint32(otLink.DroppedAttributeCount), + Flags: flags, + }) + } + return sl +} + +func buildSpanFlags(sc trace.SpanContext) uint32 { + flags := tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK + if sc.IsRemote() { + flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK + } + + return uint32(flags) +} + +// spanEvents transforms span Events to an OTLP span events. +func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event { + if len(es) == 0 { + return nil + } + + events := make([]*tracepb.Span_Event, len(es)) + // Transform message events + for i := 0; i < len(es); i++ { + events[i] = &tracepb.Span_Event{ + Name: es[i].Name, + TimeUnixNano: uint64(es[i].Time.UnixNano()), + Attributes: KeyValues(es[i].Attributes), + DroppedAttributesCount: uint32(es[i].DroppedAttributeCount), + } + } + return events +} + +// spanKind transforms a SpanKind to an OTLP span kind. +func spanKind(kind trace.SpanKind) tracepb.Span_SpanKind { + switch kind { + case trace.SpanKindInternal: + return tracepb.Span_SPAN_KIND_INTERNAL + case trace.SpanKindClient: + return tracepb.Span_SPAN_KIND_CLIENT + case trace.SpanKindServer: + return tracepb.Span_SPAN_KIND_SERVER + case trace.SpanKindProducer: + return tracepb.Span_SPAN_KIND_PRODUCER + case trace.SpanKindConsumer: + return tracepb.Span_SPAN_KIND_CONSUMER + default: + return tracepb.Span_SPAN_KIND_UNSPECIFIED + } +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md new file mode 100644 index 0000000000..5309bb7cb1 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md @@ -0,0 +1,3 @@ +# OTLP Trace gRPC Exporter + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go new file mode 100644 index 0000000000..3993df927d --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -0,0 +1,295 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + +import ( + "context" + "errors" + "sync" + "time" + + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" + coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" +) + +type client struct { + endpoint string + dialOpts []grpc.DialOption + metadata metadata.MD + exportTimeout time.Duration + requestFunc retry.RequestFunc + + // stopCtx is used as a parent context for all exports. Therefore, when it + // is canceled with the stopFunc all exports are canceled. + stopCtx context.Context + // stopFunc cancels stopCtx, stopping any active exports. + stopFunc context.CancelFunc + + // ourConn keeps track of where conn was created: true if created here on + // Start, or false if passed with an option. This is important on Shutdown + // as the conn should only be closed if created here on start. Otherwise, + // it is up to the processes that passed the conn to close it. + ourConn bool + conn *grpc.ClientConn + tscMu sync.RWMutex + tsc coltracepb.TraceServiceClient +} + +// Compile time check *client implements otlptrace.Client. +var _ otlptrace.Client = (*client)(nil) + +// NewClient creates a new gRPC trace client. +func NewClient(opts ...Option) otlptrace.Client { + return newClient(opts...) +} + +func newClient(opts ...Option) *client { + cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...) + + ctx, cancel := context.WithCancel(context.Background()) + + c := &client{ + endpoint: cfg.Traces.Endpoint, + exportTimeout: cfg.Traces.Timeout, + requestFunc: cfg.RetryConfig.RequestFunc(retryable), + dialOpts: cfg.DialOptions, + stopCtx: ctx, + stopFunc: cancel, + conn: cfg.GRPCConn, + } + + if len(cfg.Traces.Headers) > 0 { + c.metadata = metadata.New(cfg.Traces.Headers) + } + + return c +} + +// Start establishes a gRPC connection to the collector. +func (c *client) Start(context.Context) error { + if c.conn == nil { + // If the caller did not provide a ClientConn when the client was + // created, create one using the configuration they did provide. + conn, err := grpc.NewClient(c.endpoint, c.dialOpts...) + if err != nil { + return err + } + // Keep track that we own the lifecycle of this conn and need to close + // it on Shutdown. + c.ourConn = true + c.conn = conn + } + + // The otlptrace.Client interface states this method is called just once, + // so no need to check if already started. + c.tscMu.Lock() + c.tsc = coltracepb.NewTraceServiceClient(c.conn) + c.tscMu.Unlock() + + return nil +} + +var errAlreadyStopped = errors.New("the client is already stopped") + +// Stop shuts down the client. +// +// Any active connections to a remote endpoint are closed if they were created +// by the client. Any gRPC connection passed during creation using +// WithGRPCConn will not be closed. It is the caller's responsibility to +// handle cleanup of that resource. +// +// This method synchronizes with the UploadTraces method of the client. It +// will wait for any active calls to that method to complete unimpeded, or it +// will cancel any active calls if ctx expires. If ctx expires, the context +// error will be forwarded as the returned error. All client held resources +// will still be released in this situation. +// +// If the client has already stopped, an error will be returned describing +// this. +func (c *client) Stop(ctx context.Context) error { + // Make sure to return context error if the context is done when calling this method. + err := ctx.Err() + + // Acquire the c.tscMu lock within the ctx lifetime. + acquired := make(chan struct{}) + go func() { + c.tscMu.Lock() + close(acquired) + }() + + select { + case <-ctx.Done(): + // The Stop timeout is reached. Kill any remaining exports to force + // the clear of the lock and save the timeout error to return and + // signal the shutdown timed out before cleanly stopping. + c.stopFunc() + err = ctx.Err() + + // To ensure the client is not left in a dirty state c.tsc needs to be + // set to nil. To avoid the race condition when doing this, ensure + // that all the exports are killed (initiated by c.stopFunc). + <-acquired + case <-acquired: + } + // Hold the tscMu lock for the rest of the function to ensure no new + // exports are started. + defer c.tscMu.Unlock() + + // The otlptrace.Client interface states this method is called only + // once, but there is no guarantee it is called after Start. Ensure the + // client is started before doing anything and let the called know if they + // made a mistake. + if c.tsc == nil { + return errAlreadyStopped + } + + // Clear c.tsc to signal the client is stopped. + c.tsc = nil + + if c.ourConn { + closeErr := c.conn.Close() + // A context timeout error takes precedence over this error. + if err == nil && closeErr != nil { + err = closeErr + } + } + return err +} + +var errShutdown = errors.New("the client is shutdown") + +// UploadTraces sends a batch of spans. +// +// Retryable errors from the server will be handled according to any +// RetryConfig the client was created with. +func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error { + // Hold a read lock to ensure a shut down initiated after this starts does + // not abandon the export. This read lock acquire has less priority than a + // write lock acquire (i.e. Stop), meaning if the client is shutting down + // this will come after the shut down. + c.tscMu.RLock() + defer c.tscMu.RUnlock() + + if c.tsc == nil { + return errShutdown + } + + ctx, cancel := c.exportContext(ctx) + defer cancel() + + return c.requestFunc(ctx, func(iCtx context.Context) error { + resp, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{ + ResourceSpans: protoSpans, + }) + if resp != nil && resp.PartialSuccess != nil { + msg := resp.PartialSuccess.GetErrorMessage() + n := resp.PartialSuccess.GetRejectedSpans() + if n != 0 || msg != "" { + err := internal.TracePartialSuccessError(n, msg) + otel.Handle(err) + } + } + // nil is converted to OK. + if status.Code(err) == codes.OK { + // Success. + return nil + } + return err + }) +} + +// exportContext returns a copy of parent with an appropriate deadline and +// cancellation function. +// +// It is the callers responsibility to cancel the returned context once its +// use is complete, via the parent or directly with the returned CancelFunc, to +// ensure all resources are correctly released. +func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) { + var ( + ctx context.Context + cancel context.CancelFunc + ) + + if c.exportTimeout > 0 { + ctx, cancel = context.WithTimeout(parent, c.exportTimeout) + } else { + ctx, cancel = context.WithCancel(parent) + } + + if c.metadata.Len() > 0 { + ctx = metadata.NewOutgoingContext(ctx, c.metadata) + } + + // Unify the client stopCtx with the parent. + go func() { + select { + case <-ctx.Done(): + case <-c.stopCtx.Done(): + // Cancel the export as the shutdown has timed out. + cancel() + } + }() + + return ctx, cancel +} + +// retryable returns if err identifies a request that can be retried and a +// duration to wait for if an explicit throttle time is included in err. +func retryable(err error) (bool, time.Duration) { + s := status.Convert(err) + return retryableGRPCStatus(s) +} + +func retryableGRPCStatus(s *status.Status) (bool, time.Duration) { + switch s.Code() { + case codes.Canceled, + codes.DeadlineExceeded, + codes.Aborted, + codes.OutOfRange, + codes.Unavailable, + codes.DataLoss: + // Additionally handle RetryInfo. + _, d := throttleDelay(s) + return true, d + case codes.ResourceExhausted: + // Retry only if the server signals that the recovery from resource exhaustion is possible. + return throttleDelay(s) + } + + // Not a retry-able error. + return false, 0 +} + +// throttleDelay returns of the status is RetryInfo +// and the its duration to wait for if an explicit throttle time. +func throttleDelay(s *status.Status) (bool, time.Duration) { + for _, detail := range s.Details() { + if t, ok := detail.(*errdetails.RetryInfo); ok { + return true, t.RetryDelay.AsDuration() + } + } + return false, 0 +} + +// MarshalLog is the marshaling function used by the logging system to represent this Client. +func (c *client) MarshalLog() interface{} { + return struct { + Type string + Endpoint string + }{ + Type: "otlphttpgrpc", + Endpoint: c.endpoint, + } +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go new file mode 100644 index 0000000000..e783b57ac4 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package otlptracegrpc provides an OTLP span exporter using gRPC. +By default the telemetry is sent to https://localhost:4317. + +Exporter should be created using [New]. + +The environment variables described below can be used for configuration. + +OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") - +target to which the exporter sends telemetry. +The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. +The value must contain a host. +The value may additionally a port, a scheme, and a path. +The value accepts "http" and "https" scheme. +The value should not contain a query string or fragment. +OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. +The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_TRACES_INSECURE (default: "false") - +setting "true" disables client transport security for the exporter's gRPC connection. +You can use this only when an endpoint is provided without the http or https scheme. +OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT setting overrides +the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT. +OTEL_EXPORTER_OTLP_TRACES_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE. +The configuration can be overridden by [WithInsecure], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) - +key-value pairs used as gRPC metadata associated with gRPC requests. +The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format], +except that additional semi-colon delimited metadata is not supported. +Example value: "key1=value1,key2=value2". +OTEL_EXPORTER_OTLP_TRACES_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS. +The configuration can be overridden by [WithHeaders] option. + +OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT (default: "10000") - +maximum time in milliseconds the OTLP exporter waits for each batch export. +OTEL_EXPORTER_OTLP_TRACES_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT. +The configuration can be overridden by [WithTimeout] option. + +OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION (default: none) - +the gRPC compressor the exporter uses. +Supported value: "gzip". +OTEL_EXPORTER_OTLP_TRACES_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION. +The configuration can be overridden by [WithCompressor], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE (default: none) - +the filepath to the trusted certificate to use when verifying a server's TLS credentials. +OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE (default: none) - +the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY (default: none) - +the filepath to the client's private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option. + +[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content +*/ +package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go new file mode 100644 index 0000000000..b826b84247 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + +import ( + "context" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" +) + +// New constructs a new Exporter and starts it. +func New(ctx context.Context, opts ...Option) (*otlptrace.Exporter, error) { + return otlptrace.New(ctx, NewClient(opts...)) +} + +// NewUnstarted constructs a new Exporter and does not start it. +func NewUnstarted(opts ...Option) *otlptrace.Exporter { + return otlptrace.NewUnstarted(NewClient(opts...)) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go new file mode 100644 index 0000000000..9513c0a57c --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go @@ -0,0 +1,191 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/envconfig/envconfig.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig" + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net/url" + "strconv" + "strings" + "time" + + "go.opentelemetry.io/otel/internal/global" +) + +// ConfigFn is the generic function used to set a config. +type ConfigFn func(*EnvOptionsReader) + +// EnvOptionsReader reads the required environment variables. +type EnvOptionsReader struct { + GetEnv func(string) string + ReadFile func(string) ([]byte, error) + Namespace string +} + +// Apply runs every ConfigFn. +func (e *EnvOptionsReader) Apply(opts ...ConfigFn) { + for _, o := range opts { + o(e) + } +} + +// GetEnvValue gets an OTLP environment variable value of the specified key +// using the GetEnv function. +// This function prepends the OTLP specified namespace to all key lookups. +func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) { + v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key))) + return v, v != "" +} + +// WithString retrieves the specified config and passes it to ConfigFn as a string. +func WithString(n string, fn func(string)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + fn(v) + } + } +} + +// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn. +func WithBool(n string, fn func(bool)) ConfigFn { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + b := strings.ToLower(v) == "true" + fn(b) + } + } +} + +// WithDuration retrieves the specified config and passes it to ConfigFn as a duration. +func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + d, err := strconv.Atoi(v) + if err != nil { + global.Error(err, "parse duration", "input", v) + return + } + fn(time.Duration(d) * time.Millisecond) + } + } +} + +// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers. +func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + fn(stringToHeader(v)) + } + } +} + +// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL. +func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + u, err := url.Parse(v) + if err != nil { + global.Error(err, "parse url", "input", v) + return + } + fn(u) + } + } +} + +// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn. +func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + b, err := e.ReadFile(v) + if err != nil { + global.Error(err, "read tls ca cert file", "file", v) + return + } + c, err := createCertPool(b) + if err != nil { + global.Error(err, "create tls cert pool") + return + } + fn(c) + } + } +} + +// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn. +func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn { + return func(e *EnvOptionsReader) { + vc, okc := e.GetEnvValue(nc) + vk, okk := e.GetEnvValue(nk) + if !okc || !okk { + return + } + cert, err := e.ReadFile(vc) + if err != nil { + global.Error(err, "read tls client cert", "file", vc) + return + } + key, err := e.ReadFile(vk) + if err != nil { + global.Error(err, "read tls client key", "file", vk) + return + } + crt, err := tls.X509KeyPair(cert, key) + if err != nil { + global.Error(err, "create tls client key pair") + return + } + fn(crt) + } +} + +func keyWithNamespace(ns, key string) string { + if ns == "" { + return key + } + return fmt.Sprintf("%s_%s", ns, key) +} + +func stringToHeader(value string) map[string]string { + headersPairs := strings.Split(value, ",") + headers := make(map[string]string) + + for _, header := range headersPairs { + n, v, found := strings.Cut(header, "=") + if !found { + global.Error(errors.New("missing '="), "parse headers", "input", header) + continue + } + name, err := url.PathUnescape(n) + if err != nil { + global.Error(err, "escape header key", "key", n) + continue + } + trimmedName := strings.TrimSpace(name) + value, err := url.PathUnescape(v) + if err != nil { + global.Error(err, "escape header value", "value", v) + continue + } + trimmedValue := strings.TrimSpace(value) + + headers[trimmedName] = trimmedValue + } + + return headers +} + +func createCertPool(certBytes []byte) (*x509.CertPool, error) { + cp := x509.NewCertPool() + if ok := cp.AppendCertsFromPEM(certBytes); !ok { + return nil, errors.New("failed to append certificate to the cert pool") + } + return cp, nil +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go new file mode 100644 index 0000000000..97cd6c54f7 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/envconfig.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry\"}" --out=otlpconfig/options.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/options_test.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl "--data={}" --out=otlpconfig/optiontypes.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl "--data={}" --out=otlpconfig/tls.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl "--data={}" --out=otlptracetest/client.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go new file mode 100644 index 0000000000..7bb189a94b --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go @@ -0,0 +1,142 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + +import ( + "crypto/tls" + "crypto/x509" + "net/url" + "os" + "path" + "strings" + "time" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig" +) + +// DefaultEnvOptionsReader is the default environments reader. +var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ + GetEnv: os.Getenv, + ReadFile: os.ReadFile, + Namespace: "OTEL_EXPORTER_OTLP", +} + +// ApplyGRPCEnvConfigs applies the env configurations for gRPC. +func ApplyGRPCEnvConfigs(cfg Config) Config { + opts := getOptionsFromEnv() + for _, opt := range opts { + cfg = opt.ApplyGRPCOption(cfg) + } + return cfg +} + +// ApplyHTTPEnvConfigs applies the env configurations for HTTP. +func ApplyHTTPEnvConfigs(cfg Config) Config { + opts := getOptionsFromEnv() + for _, opt := range opts { + cfg = opt.ApplyHTTPOption(cfg) + } + return cfg +} + +func getOptionsFromEnv() []GenericOption { + opts := []GenericOption{} + + tlsConf := &tls.Config{} + DefaultEnvOptionsReader.Apply( + envconfig.WithURL("ENDPOINT", func(u *url.URL) { + opts = append(opts, withEndpointScheme(u)) + opts = append(opts, newSplitOption(func(cfg Config) Config { + cfg.Traces.Endpoint = u.Host + // For OTLP/HTTP endpoint URLs without a per-signal + // configuration, the passed endpoint is used as a base URL + // and the signals are sent to these paths relative to that. + cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath) + return cfg + }, withEndpointForGRPC(u))) + }), + envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) { + opts = append(opts, withEndpointScheme(u)) + opts = append(opts, newSplitOption(func(cfg Config) Config { + cfg.Traces.Endpoint = u.Host + // For endpoint URLs for OTLP/HTTP per-signal variables, the + // URL MUST be used as-is without any modification. The only + // exception is that if an URL contains no path part, the root + // path / MUST be used. + path := u.Path + if path == "" { + path = "/" + } + cfg.Traces.URLPath = path + return cfg + }, withEndpointForGRPC(u))) + }), + envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), + envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), + envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), + envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), + envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), + envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), + WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), + envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), + envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), + ) + + return opts +} + +func withEndpointScheme(u *url.URL) GenericOption { + switch strings.ToLower(u.Scheme) { + case "http", "unix": + return WithInsecure() + default: + return WithSecure() + } +} + +func withEndpointForGRPC(u *url.URL) func(cfg Config) Config { + return func(cfg Config) Config { + // For OTLP/gRPC endpoints, this is the target to which the + // exporter is going to send telemetry. + cfg.Traces.Endpoint = path.Join(u.Host, u.Path) + return cfg + } +} + +// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression. +func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + cp := NoCompression + if v == "gzip" { + cp = GzipCompression + } + + fn(cp) + } + } +} + +// revive:disable-next-line:flag-parameter +func withInsecure(b bool) GenericOption { + if b { + return WithInsecure() + } + return WithSecure() +} + +func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if c.RootCAs != nil || len(c.Certificates) > 0 { + fn(c) + } + } +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go new file mode 100644 index 0000000000..8f84a79963 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -0,0 +1,353 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + +import ( + "crypto/tls" + "fmt" + "net/http" + "net/url" + "path" + "strings" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/encoding/gzip" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" + "go.opentelemetry.io/otel/internal/global" +) + +const ( + // DefaultTracesPath is a default URL path for endpoint that + // receives spans. + DefaultTracesPath string = "/v1/traces" + // DefaultTimeout is a default max waiting time for the backend to process + // each span batch. + DefaultTimeout time.Duration = 10 * time.Second +) + +type ( + // HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request. + // This type is compatible with `http.Transport.Proxy` and can be used to set a custom proxy function to the OTLP HTTP client. + HTTPTransportProxyFunc func(*http.Request) (*url.URL, error) + + SignalConfig struct { + Endpoint string + Insecure bool + TLSCfg *tls.Config + Headers map[string]string + Compression Compression + Timeout time.Duration + URLPath string + + // gRPC configurations + GRPCCredentials credentials.TransportCredentials + + Proxy HTTPTransportProxyFunc + } + + Config struct { + // Signal specific configurations + Traces SignalConfig + + RetryConfig retry.Config + + // gRPC configurations + ReconnectionPeriod time.Duration + ServiceConfig string + DialOptions []grpc.DialOption + GRPCConn *grpc.ClientConn + } +) + +// NewHTTPConfig returns a new Config with all settings applied from opts and +// any unset setting using the default HTTP config values. +func NewHTTPConfig(opts ...HTTPOption) Config { + cfg := Config{ + Traces: SignalConfig{ + Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort), + URLPath: DefaultTracesPath, + Compression: NoCompression, + Timeout: DefaultTimeout, + }, + RetryConfig: retry.DefaultConfig, + } + cfg = ApplyHTTPEnvConfigs(cfg) + for _, opt := range opts { + cfg = opt.ApplyHTTPOption(cfg) + } + cfg.Traces.URLPath = cleanPath(cfg.Traces.URLPath, DefaultTracesPath) + return cfg +} + +// cleanPath returns a path with all spaces trimmed and all redundancies +// removed. If urlPath is empty or cleaning it results in an empty string, +// defaultPath is returned instead. +func cleanPath(urlPath string, defaultPath string) string { + tmp := path.Clean(strings.TrimSpace(urlPath)) + if tmp == "." { + return defaultPath + } + if !path.IsAbs(tmp) { + tmp = fmt.Sprintf("/%s", tmp) + } + return tmp +} + +// NewGRPCConfig returns a new Config with all settings applied from opts and +// any unset setting using the default gRPC config values. +func NewGRPCConfig(opts ...GRPCOption) Config { + userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version() + cfg := Config{ + Traces: SignalConfig{ + Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort), + URLPath: DefaultTracesPath, + Compression: NoCompression, + Timeout: DefaultTimeout, + }, + RetryConfig: retry.DefaultConfig, + DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)}, + } + cfg = ApplyGRPCEnvConfigs(cfg) + for _, opt := range opts { + cfg = opt.ApplyGRPCOption(cfg) + } + + if cfg.ServiceConfig != "" { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) + } + // Priroritize GRPCCredentials over Insecure (passing both is an error). + if cfg.Traces.GRPCCredentials != nil { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials)) + } else if cfg.Traces.Insecure { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials())) + } else { + // Default to using the host's root CA. + creds := credentials.NewTLS(nil) + cfg.Traces.GRPCCredentials = creds + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds)) + } + if cfg.Traces.Compression == GzipCompression { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) + } + if cfg.ReconnectionPeriod != 0 { + p := grpc.ConnectParams{ + Backoff: backoff.DefaultConfig, + MinConnectTimeout: cfg.ReconnectionPeriod, + } + cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p)) + } + + return cfg +} + +type ( + // GenericOption applies an option to the HTTP or gRPC driver. + GenericOption interface { + ApplyHTTPOption(Config) Config + ApplyGRPCOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } + + // HTTPOption applies an option to the HTTP driver. + HTTPOption interface { + ApplyHTTPOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } + + // GRPCOption applies an option to the gRPC driver. + GRPCOption interface { + ApplyGRPCOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } +) + +// genericOption is an option that applies the same logic +// for both gRPC and HTTP. +type genericOption struct { + fn func(Config) Config +} + +func (g *genericOption) ApplyGRPCOption(cfg Config) Config { + return g.fn(cfg) +} + +func (g *genericOption) ApplyHTTPOption(cfg Config) Config { + return g.fn(cfg) +} + +func (genericOption) private() {} + +func newGenericOption(fn func(cfg Config) Config) GenericOption { + return &genericOption{fn: fn} +} + +// splitOption is an option that applies different logics +// for gRPC and HTTP. +type splitOption struct { + httpFn func(Config) Config + grpcFn func(Config) Config +} + +func (g *splitOption) ApplyGRPCOption(cfg Config) Config { + return g.grpcFn(cfg) +} + +func (g *splitOption) ApplyHTTPOption(cfg Config) Config { + return g.httpFn(cfg) +} + +func (splitOption) private() {} + +func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption { + return &splitOption{httpFn: httpFn, grpcFn: grpcFn} +} + +// httpOption is an option that is only applied to the HTTP driver. +type httpOption struct { + fn func(Config) Config +} + +func (h *httpOption) ApplyHTTPOption(cfg Config) Config { + return h.fn(cfg) +} + +func (httpOption) private() {} + +func NewHTTPOption(fn func(cfg Config) Config) HTTPOption { + return &httpOption{fn: fn} +} + +// grpcOption is an option that is only applied to the gRPC driver. +type grpcOption struct { + fn func(Config) Config +} + +func (h *grpcOption) ApplyGRPCOption(cfg Config) Config { + return h.fn(cfg) +} + +func (grpcOption) private() {} + +func NewGRPCOption(fn func(cfg Config) Config) GRPCOption { + return &grpcOption{fn: fn} +} + +// Generic Options + +// WithEndpoint configures the trace host and port only; endpoint should +// resemble "example.com" or "localhost:4317". To configure the scheme and path, +// use WithEndpointURL. +func WithEndpoint(endpoint string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Endpoint = endpoint + return cfg + }) +} + +// WithEndpointURL configures the trace scheme, host, port, and path; the +// provided value should resemble "https://example.com:4318/v1/traces". +func WithEndpointURL(v string) GenericOption { + return newGenericOption(func(cfg Config) Config { + u, err := url.Parse(v) + if err != nil { + global.Error(err, "otlptrace: parse endpoint url", "url", v) + return cfg + } + + cfg.Traces.Endpoint = u.Host + cfg.Traces.URLPath = u.Path + if u.Scheme != "https" { + cfg.Traces.Insecure = true + } + + return cfg + }) +} + +func WithCompression(compression Compression) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Compression = compression + return cfg + }) +} + +func WithURLPath(urlPath string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.URLPath = urlPath + return cfg + }) +} + +func WithRetry(rc retry.Config) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.RetryConfig = rc + return cfg + }) +} + +func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption { + return newSplitOption(func(cfg Config) Config { + cfg.Traces.TLSCfg = tlsCfg.Clone() + return cfg + }, func(cfg Config) Config { + cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg) + return cfg + }) +} + +func WithInsecure() GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Insecure = true + return cfg + }) +} + +func WithSecure() GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Insecure = false + return cfg + }) +} + +func WithHeaders(headers map[string]string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Headers = headers + return cfg + }) +} + +func WithTimeout(duration time.Duration) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Timeout = duration + return cfg + }) +} + +func WithProxy(pf HTTPTransportProxyFunc) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Proxy = pf + return cfg + }) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go new file mode 100644 index 0000000000..3d4f699d47 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go @@ -0,0 +1,40 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + +const ( + // DefaultCollectorGRPCPort is the default gRPC port of the collector. + DefaultCollectorGRPCPort uint16 = 4317 + // DefaultCollectorHTTPPort is the default HTTP port of the collector. + DefaultCollectorHTTPPort uint16 = 4318 + // DefaultCollectorHost is the host address the Exporter will attempt + // connect to if no collector address is provided. + DefaultCollectorHost string = "localhost" +) + +// Compression describes the compression used for payloads sent to the +// collector. +type Compression int + +const ( + // NoCompression tells the driver to send payloads without + // compression. + NoCompression Compression = iota + // GzipCompression tells the driver to send payloads after + // compressing them with gzip. + GzipCompression +) + +// Marshaler describes the kind of message format sent to the collector. +type Marshaler int + +const ( + // MarshalProto tells the driver to send using the protobuf binary format. + MarshalProto Marshaler = iota + // MarshalJSON tells the driver to send using json format. + MarshalJSON +) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go new file mode 100644 index 0000000000..38b97a0131 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go @@ -0,0 +1,26 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + +import ( + "crypto/tls" + "crypto/x509" + "errors" +) + +// CreateTLSConfig creates a tls.Config from a raw certificate bytes +// to verify a server certificate. +func CreateTLSConfig(certBytes []byte) (*tls.Config, error) { + cp := x509.NewCertPool() + if ok := cp.AppendCertsFromPEM(certBytes); !ok { + return nil, errors.New("failed to append certificate to the cert pool") + } + + return &tls.Config{ + RootCAs: cp, + }, nil +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go new file mode 100644 index 0000000000..a12ea4c48e --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go @@ -0,0 +1,56 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/partialsuccess.go + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" + +import "fmt" + +// PartialSuccess represents the underlying error for all handling +// OTLP partial success messages. Use `errors.Is(err, +// PartialSuccess{})` to test whether an error passed to the OTel +// error handler belongs to this category. +type PartialSuccess struct { + ErrorMessage string + RejectedItems int64 + RejectedKind string +} + +var _ error = PartialSuccess{} + +// Error implements the error interface. +func (ps PartialSuccess) Error() string { + msg := ps.ErrorMessage + if msg == "" { + msg = "empty message" + } + return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) +} + +// Is supports the errors.Is() interface. +func (ps PartialSuccess) Is(err error) bool { + _, ok := err.(PartialSuccess) + return ok +} + +// TracePartialSuccessError returns an error describing a partial success +// response for the trace signal. +func TracePartialSuccessError(itemsRejected int64, errorMessage string) error { + return PartialSuccess{ + ErrorMessage: errorMessage, + RejectedItems: itemsRejected, + RejectedKind: "spans", + } +} + +// MetricPartialSuccessError returns an error describing a partial success +// response for the metric signal. +func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error { + return PartialSuccess{ + ErrorMessage: errorMessage, + RejectedItems: itemsRejected, + RejectedKind: "metric data points", + } +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go new file mode 100644 index 0000000000..4f2113ae2c --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go @@ -0,0 +1,145 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/retry/retry.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package retry provides request retry functionality that can perform +// configurable exponential backoff for transient errors and honor any +// explicit throttle responses received. +package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" + +import ( + "context" + "fmt" + "time" + + "github.com/cenkalti/backoff/v4" +) + +// DefaultConfig are the recommended defaults to use. +var DefaultConfig = Config{ + Enabled: true, + InitialInterval: 5 * time.Second, + MaxInterval: 30 * time.Second, + MaxElapsedTime: time.Minute, +} + +// Config defines configuration for retrying batches in case of export failure +// using an exponential backoff. +type Config struct { + // Enabled indicates whether to not retry sending batches in case of + // export failure. + Enabled bool + // InitialInterval the time to wait after the first failure before + // retrying. + InitialInterval time.Duration + // MaxInterval is the upper bound on backoff interval. Once this value is + // reached the delay between consecutive retries will always be + // `MaxInterval`. + MaxInterval time.Duration + // MaxElapsedTime is the maximum amount of time (including retries) spent + // trying to send a request/batch. Once this value is reached, the data + // is discarded. + MaxElapsedTime time.Duration +} + +// RequestFunc wraps a request with retry logic. +type RequestFunc func(context.Context, func(context.Context) error) error + +// EvaluateFunc returns if an error is retry-able and if an explicit throttle +// duration should be honored that was included in the error. +// +// The function must return true if the error argument is retry-able, +// otherwise it must return false for the first return parameter. +// +// The function must return a non-zero time.Duration if the error contains +// explicit throttle duration that should be honored, otherwise it must return +// a zero valued time.Duration. +type EvaluateFunc func(error) (bool, time.Duration) + +// RequestFunc returns a RequestFunc using the evaluate function to determine +// if requests can be retried and based on the exponential backoff +// configuration of c. +func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { + if !c.Enabled { + return func(ctx context.Context, fn func(context.Context) error) error { + return fn(ctx) + } + } + + return func(ctx context.Context, fn func(context.Context) error) error { + // Do not use NewExponentialBackOff since it calls Reset and the code here + // must call Reset after changing the InitialInterval (this saves an + // unnecessary call to Now). + b := &backoff.ExponentialBackOff{ + InitialInterval: c.InitialInterval, + RandomizationFactor: backoff.DefaultRandomizationFactor, + Multiplier: backoff.DefaultMultiplier, + MaxInterval: c.MaxInterval, + MaxElapsedTime: c.MaxElapsedTime, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + } + b.Reset() + + for { + err := fn(ctx) + if err == nil { + return nil + } + + retryable, throttle := evaluate(err) + if !retryable { + return err + } + + bOff := b.NextBackOff() + if bOff == backoff.Stop { + return fmt.Errorf("max retry time elapsed: %w", err) + } + + // Wait for the greater of the backoff or throttle delay. + var delay time.Duration + if bOff > throttle { + delay = bOff + } else { + elapsed := b.GetElapsedTime() + if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) + } + delay = throttle + } + + if ctxErr := waitFunc(ctx, delay); ctxErr != nil { + return fmt.Errorf("%w: %s", ctxErr, err) + } + } + } +} + +// Allow override for testing. +var waitFunc = wait + +// wait takes the caller's context, and the amount of time to wait. It will +// return nil if the timer fires before or at the same time as the context's +// deadline. This indicates that the call can be retried. +func wait(ctx context.Context, delay time.Duration) error { + timer := time.NewTimer(delay) + defer timer.Stop() + + select { + case <-ctx.Done(): + // Handle the case where the timer and context deadline end + // simultaneously by prioritizing the timer expiration nil value + // response. + select { + case <-timer.C: + default: + return ctx.Err() + } + case <-timer.C: + } + + return nil +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go new file mode 100644 index 0000000000..bbad0e6d01 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go @@ -0,0 +1,208 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + +import ( + "fmt" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" +) + +// Option applies an option to the gRPC driver. +type Option interface { + applyGRPCOption(otlpconfig.Config) otlpconfig.Config +} + +func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption { + converted := make([]otlpconfig.GRPCOption, len(opts)) + for i, o := range opts { + converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption) + } + return converted +} + +// RetryConfig defines configuration for retrying export of span batches that +// failed to be received by the target endpoint. +// +// This configuration does not define any network retry strategy. That is +// entirely handled by the gRPC ClientConn. +type RetryConfig retry.Config + +type wrappedOption struct { + otlpconfig.GRPCOption +} + +func (w wrappedOption) applyGRPCOption(cfg otlpconfig.Config) otlpconfig.Config { + return w.ApplyGRPCOption(cfg) +} + +// WithInsecure disables client transport security for the exporter's gRPC +// connection just like grpc.WithInsecure() +// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. Note, by +// default, client security is required unless WithInsecure is used. +// +// This option has no effect if WithGRPCConn is used. +func WithInsecure() Option { + return wrappedOption{otlpconfig.WithInsecure()} +} + +// WithEndpoint sets the target endpoint (host and port) the Exporter will +// connect to. The provided endpoint should resemble "example.com:4317" (no +// scheme or path). +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT +// will take precedence. +// +// If both this option and WithEndpointURL are used, the last used option will +// take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, "localhost:4317" will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithEndpoint(endpoint string) Option { + return wrappedOption{otlpconfig.WithEndpoint(endpoint)} +} + +// WithEndpointURL sets the target endpoint URL (scheme, host, port, path) +// the Exporter will connect to. The provided endpoint URL should resemble +// "https://example.com:4318/v1/traces". +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT +// will take precedence. +// +// If both this option and WithEndpoint are used, the last used option will +// take precedence. +// +// If an invalid URL is provided, the default value will be kept. +// +// By default, if an environment variable is not set, and this option is not +// passed, "https://localhost:4317/v1/traces" will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithEndpointURL(u string) Option { + return wrappedOption{otlpconfig.WithEndpointURL(u)} +} + +// WithReconnectionPeriod set the minimum amount of time between connection +// attempts to the target endpoint. +// +// This option has no effect if WithGRPCConn is used. +func WithReconnectionPeriod(rp time.Duration) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.ReconnectionPeriod = rp + return cfg + })} +} + +func compressorToCompression(compressor string) otlpconfig.Compression { + if compressor == "gzip" { + return otlpconfig.GzipCompression + } + + otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor)) + return otlpconfig.NoCompression +} + +// WithCompressor sets the compressor for the gRPC client to use when sending +// requests. Supported compressor values: "gzip". +func WithCompressor(compressor string) Option { + return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))} +} + +// WithHeaders will send the provided headers with each gRPC requests. +func WithHeaders(headers map[string]string) Option { + return wrappedOption{otlpconfig.WithHeaders(headers)} +} + +// WithTLSCredentials allows the connection to use TLS credentials when +// talking to the server. It takes in grpc.TransportCredentials instead of say +// a Certificate file or a tls.Certificate, because the retrieving of these +// credentials can be done in many ways e.g. plain file, in code tls.Config or +// by certificate rotation, so it is up to the caller to decide what to use. +// +// This option has no effect if WithGRPCConn is used. +func WithTLSCredentials(creds credentials.TransportCredentials) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.Traces.GRPCCredentials = creds + return cfg + })} +} + +// WithServiceConfig defines the default gRPC service config used. +// +// This option has no effect if WithGRPCConn is used. +func WithServiceConfig(serviceConfig string) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.ServiceConfig = serviceConfig + return cfg + })} +} + +// WithDialOption sets explicit grpc.DialOptions to use when making a +// connection. The options here are appended to the internal grpc.DialOptions +// used so they will take precedence over any other internal grpc.DialOptions +// they might conflict with. +// The [grpc.WithBlock], [grpc.WithTimeout], and [grpc.WithReturnConnectionError] +// grpc.DialOptions are ignored. +// +// This option has no effect if WithGRPCConn is used. +func WithDialOption(opts ...grpc.DialOption) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.DialOptions = opts + return cfg + })} +} + +// WithGRPCConn sets conn as the gRPC ClientConn used for all communication. +// +// This option takes precedence over any other option that relates to +// establishing or persisting a gRPC connection to a target endpoint. Any +// other option of those types passed will be ignored. +// +// It is the callers responsibility to close the passed conn. The client +// Shutdown method will not close this connection. +func WithGRPCConn(conn *grpc.ClientConn) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.GRPCConn = conn + return cfg + })} +} + +// WithTimeout sets the max amount of time a client will attempt to export a +// batch of spans. This takes precedence over any retry settings defined with +// WithRetry, once this time limit has been reached the export is abandoned +// and the batch of spans is dropped. +// +// If unset, the default timeout will be set to 10 seconds. +func WithTimeout(duration time.Duration) Option { + return wrappedOption{otlpconfig.WithTimeout(duration)} +} + +// WithRetry sets the retry policy for transient retryable errors that may be +// returned by the target endpoint when exporting a batch of spans. +// +// If the target endpoint responds with not only a retryable error, but +// explicitly returns a backoff time in the response. That time will take +// precedence over these settings. +// +// These settings do not define any network retry strategy. That is entirely +// handled by the gRPC ClientConn. +// +// If unset, the default retry policy will be used. It will retry the export +// 5 seconds after receiving a retryable error and increase exponentially +// after each error for no more than a total time of 1 minute. +func WithRetry(settings RetryConfig) Option { + return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))} +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go new file mode 100644 index 0000000000..14ad8c33b4 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + +// Version is the current release version of the OpenTelemetry OTLP trace exporter in use. +func Version() string { + return "1.28.0" +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/hack/tools/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh new file mode 100644 index 0000000000..93e80ea306 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +top_dir='.' +if [[ $# -gt 0 ]]; then + top_dir="${1}" +fi + +p=$(pwd) +mod_dirs=() + +# Note `mapfile` does not exist in older bash versions: +# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash + +while IFS= read -r line; do + mod_dirs+=("$line") +done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort) + +for mod_dir in "${mod_dirs[@]}"; do + cd "${mod_dir}" + + while IFS= read -r line; do + echo ".${line#${p}}" + done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|') + cd "${p}" +done diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/handler.go b/hack/tools/vendor/go.opentelemetry.io/otel/handler.go new file mode 100644 index 0000000000..07623b6791 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/handler.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" +) + +// Compile-time check global.ErrDelegator implements ErrorHandler. +var _ ErrorHandler = (*global.ErrDelegator)(nil) + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } + +// Handle is a convenience function for GetErrorHandler().Handle(err). +func Handle(err error) { global.GetErrorHandler().Handle(err) } diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/hack/tools/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go new file mode 100644 index 0000000000..822d847947 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -0,0 +1,100 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package attribute provide several helper functions for some commonly used +logic of processing attributes. +*/ +package attribute // import "go.opentelemetry.io/otel/internal/attribute" + +import ( + "reflect" +) + +// BoolSliceValue converts a bool slice into an array with same elements as slice. +func BoolSliceValue(v []bool) interface{} { + var zero bool + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() +} + +// Int64SliceValue converts an int64 slice into an array with same elements as slice. +func Int64SliceValue(v []int64) interface{} { + var zero int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() +} + +// Float64SliceValue converts a float64 slice into an array with same elements as slice. +func Float64SliceValue(v []float64) interface{} { + var zero float64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() +} + +// StringSliceValue converts a string slice into an array with same elements as slice. +func StringSliceValue(v []string) interface{} { + var zero string + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem() + reflect.Copy(cp, reflect.ValueOf(v)) + return cp.Interface() +} + +// AsBoolSlice converts a bool array into a slice into with same elements as array. +func AsBoolSlice(v interface{}) []bool { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero bool + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]bool) +} + +// AsInt64Slice converts an int64 array into a slice into with same elements as array. +func AsInt64Slice(v interface{}) []int64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero int64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]int64) +} + +// AsFloat64Slice converts a float64 array into a slice into with same elements as array. +func AsFloat64Slice(v interface{}) []float64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero float64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]float64) +} + +// AsStringSlice converts a string array into a slice into with same elements as array. +func AsStringSlice(v interface{}) []string { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero string + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]string) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/hack/tools/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go new file mode 100644 index 0000000000..b4f85f44a9 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go @@ -0,0 +1,32 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package baggage provides base types and functionality to store and retrieve +baggage in Go context. This package exists because the OpenTracing bridge to +OpenTelemetry needs to synchronize state whenever baggage for a context is +modified and that context contains an OpenTracing span. If it were not for +this need this package would not need to exist and the +`go.opentelemetry.io/otel/baggage` package would be the singular place where +W3C baggage is handled. +*/ +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +// List is the collection of baggage members. The W3C allows for duplicates, +// but OpenTelemetry does not, therefore, this is represented as a map. +type List map[string]Item + +// Item is the value and metadata properties part of a list-member. +type Item struct { + Value string + Properties []Property +} + +// Property is a metadata entry for a list-member. +type Property struct { + Key, Value string + + // HasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + HasValue bool +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/hack/tools/vendor/go.opentelemetry.io/otel/internal/baggage/context.go new file mode 100644 index 0000000000..3aea9c491f --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +import "context" + +type baggageContextKeyType int + +const baggageKey baggageContextKeyType = iota + +// SetHookFunc is a callback called when storing baggage in the context. +type SetHookFunc func(context.Context, List) context.Context + +// GetHookFunc is a callback called when getting baggage from the context. +type GetHookFunc func(context.Context, List) List + +type baggageState struct { + list List + + setHook SetHookFunc + getHook GetHookFunc +} + +// ContextWithSetHook returns a copy of parent with hook configured to be +// invoked every time ContextWithBaggage is called. +// +// Passing nil SetHookFunc creates a context with no set hook to call. +func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.setHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithGetHook returns a copy of parent with hook configured to be +// invoked every time FromContext is called. +// +// Passing nil GetHookFunc creates a context with no get hook to call. +func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.getHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithList returns a copy of parent with baggage. Passing nil list +// returns a context without any baggage. +func ContextWithList(parent context.Context, list List) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.list = list + ctx := context.WithValue(parent, baggageKey, s) + if s.setHook != nil { + ctx = s.setHook(ctx, list) + } + + return ctx +} + +// ListFromContext returns the baggage contained in ctx. +func ListFromContext(ctx context.Context) List { + switch v := ctx.Value(baggageKey).(type) { + case baggageState: + if v.getHook != nil { + return v.getHook(ctx, v.list) + } + return v.list + default: + return nil + } +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/internal/gen.go b/hack/tools/vendor/go.opentelemetry.io/otel/internal/gen.go new file mode 100644 index 0000000000..4259f0320d --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/internal/gen.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/internal" + +//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go +//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go +//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go + +//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go +//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go +//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go +//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go +//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/hack/tools/vendor/go.opentelemetry.io/otel/internal/global/handler.go new file mode 100644 index 0000000000..c657ff8e75 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "sync/atomic" +) + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) +} + +type ErrDelegator struct { + delegate atomic.Pointer[ErrorHandler] +} + +// Compile-time check that delegator implements ErrorHandler. +var _ ErrorHandler = (*ErrDelegator)(nil) + +func (d *ErrDelegator) Handle(err error) { + if eh := d.delegate.Load(); eh != nil { + (*eh).Handle(err) + return + } + log.Print(err) +} + +// setDelegate sets the ErrorHandler delegate. +func (d *ErrDelegator) setDelegate(eh ErrorHandler) { + d.delegate.Store(&eh) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/hack/tools/vendor/go.opentelemetry.io/otel/internal/global/instruments.go new file mode 100644 index 0000000000..3a0cc42f6a --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -0,0 +1,412 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "context" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +// unwrapper unwraps to return the underlying instrument implementation. +type unwrapper interface { + Unwrap() metric.Observable +} + +type afCounter struct { + embedded.Float64ObservableCounter + metric.Float64Observable + + name string + opts []metric.Float64ObservableCounterOption + + delegate atomic.Value // metric.Float64ObservableCounter +} + +var ( + _ unwrapper = (*afCounter)(nil) + _ metric.Float64ObservableCounter = (*afCounter)(nil) +) + +func (i *afCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableCounter) + } + return nil +} + +type afUpDownCounter struct { + embedded.Float64ObservableUpDownCounter + metric.Float64Observable + + name string + opts []metric.Float64ObservableUpDownCounterOption + + delegate atomic.Value // metric.Float64ObservableUpDownCounter +} + +var ( + _ unwrapper = (*afUpDownCounter)(nil) + _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) +) + +func (i *afUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afUpDownCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableUpDownCounter) + } + return nil +} + +type afGauge struct { + embedded.Float64ObservableGauge + metric.Float64Observable + + name string + opts []metric.Float64ObservableGaugeOption + + delegate atomic.Value // metric.Float64ObservableGauge +} + +var ( + _ unwrapper = (*afGauge)(nil) + _ metric.Float64ObservableGauge = (*afGauge)(nil) +) + +func (i *afGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableGauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afGauge) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableGauge) + } + return nil +} + +type aiCounter struct { + embedded.Int64ObservableCounter + metric.Int64Observable + + name string + opts []metric.Int64ObservableCounterOption + + delegate atomic.Value // metric.Int64ObservableCounter +} + +var ( + _ unwrapper = (*aiCounter)(nil) + _ metric.Int64ObservableCounter = (*aiCounter)(nil) +) + +func (i *aiCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableCounter) + } + return nil +} + +type aiUpDownCounter struct { + embedded.Int64ObservableUpDownCounter + metric.Int64Observable + + name string + opts []metric.Int64ObservableUpDownCounterOption + + delegate atomic.Value // metric.Int64ObservableUpDownCounter +} + +var ( + _ unwrapper = (*aiUpDownCounter)(nil) + _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) +) + +func (i *aiUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiUpDownCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableUpDownCounter) + } + return nil +} + +type aiGauge struct { + embedded.Int64ObservableGauge + metric.Int64Observable + + name string + opts []metric.Int64ObservableGaugeOption + + delegate atomic.Value // metric.Int64ObservableGauge +} + +var ( + _ unwrapper = (*aiGauge)(nil) + _ metric.Int64ObservableGauge = (*aiGauge)(nil) +) + +func (i *aiGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableGauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiGauge) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableGauge) + } + return nil +} + +// Sync Instruments. +type sfCounter struct { + embedded.Float64Counter + + name string + opts []metric.Float64CounterOption + + delegate atomic.Value // metric.Float64Counter +} + +var _ metric.Float64Counter = (*sfCounter)(nil) + +func (i *sfCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64Counter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Counter).Add(ctx, incr, opts...) + } +} + +type sfUpDownCounter struct { + embedded.Float64UpDownCounter + + name string + opts []metric.Float64UpDownCounterOption + + delegate atomic.Value // metric.Float64UpDownCounter +} + +var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil) + +func (i *sfUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64UpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64UpDownCounter).Add(ctx, incr, opts...) + } +} + +type sfHistogram struct { + embedded.Float64Histogram + + name string + opts []metric.Float64HistogramOption + + delegate atomic.Value // metric.Float64Histogram +} + +var _ metric.Float64Histogram = (*sfHistogram)(nil) + +func (i *sfHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Float64Histogram(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Histogram).Record(ctx, x, opts...) + } +} + +type sfGauge struct { + embedded.Float64Gauge + + name string + opts []metric.Float64GaugeOption + + delegate atomic.Value // metric.Float64Gauge +} + +var _ metric.Float64Gauge = (*sfGauge)(nil) + +func (i *sfGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Gauge).Record(ctx, x, opts...) + } +} + +type siCounter struct { + embedded.Int64Counter + + name string + opts []metric.Int64CounterOption + + delegate atomic.Value // metric.Int64Counter +} + +var _ metric.Int64Counter = (*siCounter)(nil) + +func (i *siCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64Counter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Counter).Add(ctx, x, opts...) + } +} + +type siUpDownCounter struct { + embedded.Int64UpDownCounter + + name string + opts []metric.Int64UpDownCounterOption + + delegate atomic.Value // metric.Int64UpDownCounter +} + +var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil) + +func (i *siUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64UpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64UpDownCounter).Add(ctx, x, opts...) + } +} + +type siHistogram struct { + embedded.Int64Histogram + + name string + opts []metric.Int64HistogramOption + + delegate atomic.Value // metric.Int64Histogram +} + +var _ metric.Int64Histogram = (*siHistogram)(nil) + +func (i *siHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Int64Histogram(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Histogram).Record(ctx, x, opts...) + } +} + +type siGauge struct { + embedded.Int64Gauge + + name string + opts []metric.Int64GaugeOption + + delegate atomic.Value // metric.Int64Gauge +} + +var _ metric.Int64Gauge = (*siGauge)(nil) + +func (i *siGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64Gauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Gauge).Record(ctx, x, opts...) + } +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/hack/tools/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go new file mode 100644 index 0000000000..adbca7d347 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -0,0 +1,62 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "os" + "sync/atomic" + + "github.com/go-logr/logr" + "github.com/go-logr/stdr" +) + +// globalLogger holds a reference to the [logr.Logger] used within +// go.opentelemetry.io/otel. +// +// The default logger uses stdr which is backed by the standard `log.Logger` +// interface. This logger will only show messages at the Error Level. +var globalLogger = func() *atomic.Pointer[logr.Logger] { + l := stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) + + p := new(atomic.Pointer[logr.Logger]) + p.Store(&l) + return p +}() + +// SetLogger sets the global Logger to l. +// +// To see Warn messages use a logger with `l.V(1).Enabled() == true` +// To see Info messages use a logger with `l.V(4).Enabled() == true` +// To see Debug messages use a logger with `l.V(8).Enabled() == true`. +func SetLogger(l logr.Logger) { + globalLogger.Store(&l) +} + +// GetLogger returns the global logger. +func GetLogger() logr.Logger { + return *globalLogger.Load() +} + +// Info prints messages about the general state of the API or SDK. +// This should usually be less than 5 messages a minute. +func Info(msg string, keysAndValues ...interface{}) { + GetLogger().V(4).Info(msg, keysAndValues...) +} + +// Error prints messages about exceptional states of the API or SDK. +func Error(err error, msg string, keysAndValues ...interface{}) { + GetLogger().Error(err, msg, keysAndValues...) +} + +// Debug prints messages about all internal changes in the API or SDK. +func Debug(msg string, keysAndValues ...interface{}) { + GetLogger().V(8).Info(msg, keysAndValues...) +} + +// Warn prints messages about warnings in the API or SDK. +// Not an error but is likely more important than an informational event. +func Warn(msg string, keysAndValues ...interface{}) { + GetLogger().V(1).Info(msg, keysAndValues...) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/hack/tools/vendor/go.opentelemetry.io/otel/internal/global/meter.go new file mode 100644 index 0000000000..cfd1df9bfa --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -0,0 +1,368 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "container/list" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +// meterProvider is a placeholder for a configured SDK MeterProvider. +// +// All MeterProvider functionality is forwarded to a delegate once +// configured. +type meterProvider struct { + embedded.MeterProvider + + mtx sync.Mutex + meters map[il]*meter + + delegate metric.MeterProvider +} + +// setDelegate configures p to delegate all MeterProvider functionality to +// provider. +// +// All Meters provided prior to this function call are switched out to be +// Meters provided by provider. All instruments and callbacks are recreated and +// delegated. +// +// It is guaranteed by the caller that this happens only once. +func (p *meterProvider) setDelegate(provider metric.MeterProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.meters) == 0 { + return + } + + for _, meter := range p.meters { + meter.setDelegate(provider) + } + + p.meters = nil +} + +// Meter implements MeterProvider. +func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Meter(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map. + + c := metric.NewMeterConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + schema: c.SchemaURL(), + } + + if p.meters == nil { + p.meters = make(map[il]*meter) + } + + if val, ok := p.meters[key]; ok { + return val + } + + t := &meter{name: name, opts: opts} + p.meters[key] = t + return t +} + +// meter is a placeholder for a metric.Meter. +// +// All Meter functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopMeter. +type meter struct { + embedded.Meter + + name string + opts []metric.MeterOption + + mtx sync.Mutex + instruments []delegatedInstrument + + registry list.List + + delegate atomic.Value // metric.Meter +} + +type delegatedInstrument interface { + setDelegate(metric.Meter) +} + +// setDelegate configures m to delegate all Meter functionality to Meters +// created by provider. +// +// All subsequent calls to the Meter methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (m *meter) setDelegate(provider metric.MeterProvider) { + meter := provider.Meter(m.name, m.opts...) + m.delegate.Store(meter) + + m.mtx.Lock() + defer m.mtx.Unlock() + + for _, inst := range m.instruments { + inst.setDelegate(meter) + } + + var n *list.Element + for e := m.registry.Front(); e != nil; e = n { + r := e.Value.(*registration) + r.setDelegate(meter) + n = e.Next() + m.registry.Remove(e) + } + + m.instruments = nil + m.registry.Init() +} + +func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Counter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64UpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Histogram(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siHistogram{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Gauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableUpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableGauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Counter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64UpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Histogram(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfHistogram{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Gauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableUpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableGauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +// RegisterCallback captures the function that will be called during Collect. +func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + insts = unwrapInstruments(insts) + return del.RegisterCallback(f, insts...) + } + + m.mtx.Lock() + defer m.mtx.Unlock() + + reg := ®istration{instruments: insts, function: f} + e := m.registry.PushBack(reg) + reg.unreg = func() error { + m.mtx.Lock() + _ = m.registry.Remove(e) + m.mtx.Unlock() + return nil + } + return reg, nil +} + +type wrapped interface { + unwrap() metric.Observable +} + +func unwrapInstruments(instruments []metric.Observable) []metric.Observable { + out := make([]metric.Observable, 0, len(instruments)) + + for _, inst := range instruments { + if in, ok := inst.(wrapped); ok { + out = append(out, in.unwrap()) + } else { + out = append(out, inst) + } + } + + return out +} + +type registration struct { + embedded.Registration + + instruments []metric.Observable + function metric.Callback + + unreg func() error + unregMu sync.Mutex +} + +func (c *registration) setDelegate(m metric.Meter) { + insts := unwrapInstruments(c.instruments) + + c.unregMu.Lock() + defer c.unregMu.Unlock() + + if c.unreg == nil { + // Unregister already called. + return + } + + reg, err := m.RegisterCallback(c.function, insts...) + if err != nil { + GetErrorHandler().Handle(err) + } + + c.unreg = reg.Unregister +} + +func (c *registration) Unregister() error { + c.unregMu.Lock() + defer c.unregMu.Unlock() + if c.unreg == nil { + // Unregister already called. + return nil + } + + var err error + err, c.unreg = c.unreg(), nil + return err +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/hack/tools/vendor/go.opentelemetry.io/otel/internal/global/propagator.go new file mode 100644 index 0000000000..38560ff991 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/internal/global/propagator.go @@ -0,0 +1,71 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/propagation" +) + +// textMapPropagator is a default TextMapPropagator that delegates calls to a +// registered delegate if one is set, otherwise it defaults to delegating the +// calls to a the default no-op propagation.TextMapPropagator. +type textMapPropagator struct { + mtx sync.Mutex + once sync.Once + delegate propagation.TextMapPropagator + noop propagation.TextMapPropagator +} + +// Compile-time guarantee that textMapPropagator implements the +// propagation.TextMapPropagator interface. +var _ propagation.TextMapPropagator = (*textMapPropagator)(nil) + +func newTextMapPropagator() *textMapPropagator { + return &textMapPropagator{ + noop: propagation.NewCompositeTextMapPropagator(), + } +} + +// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are +// forwarded to. Delegation can only be performed once, all subsequent calls +// perform no delegation. +func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) { + if delegate == nil { + return + } + + p.mtx.Lock() + p.once.Do(func() { p.delegate = delegate }) + p.mtx.Unlock() +} + +// effectiveDelegate returns the current delegate of p if one is set, +// otherwise the default noop TextMapPropagator is returned. This method +// can be called concurrently. +func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator { + p.mtx.Lock() + defer p.mtx.Unlock() + if p.delegate != nil { + return p.delegate + } + return p.noop +} + +// Inject set cross-cutting concerns from the Context into the carrier. +func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { + p.effectiveDelegate().Inject(ctx, carrier) +} + +// Extract reads cross-cutting concerns from the carrier into a Context. +func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { + return p.effectiveDelegate().Extract(ctx, carrier) +} + +// Fields returns the keys whose values are set with Inject. +func (p *textMapPropagator) Fields() []string { + return p.effectiveDelegate().Fields() +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/internal/global/state.go b/hack/tools/vendor/go.opentelemetry.io/otel/internal/global/state.go new file mode 100644 index 0000000000..204ea142a5 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "errors" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +type ( + errorHandlerHolder struct { + eh ErrorHandler + } + + tracerProviderHolder struct { + tp trace.TracerProvider + } + + propagatorsHolder struct { + tm propagation.TextMapPropagator + } + + meterProviderHolder struct { + mp metric.MeterProvider + } +) + +var ( + globalErrorHandler = defaultErrorHandler() + globalTracer = defaultTracerValue() + globalPropagators = defaultPropagatorsValue() + globalMeterProvider = defaultMeterProvider() + + delegateErrorHandlerOnce sync.Once + delegateTraceOnce sync.Once + delegateTextMapPropagatorOnce sync.Once + delegateMeterOnce sync.Once +) + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return globalErrorHandler.Load().(errorHandlerHolder).eh +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + current := GetErrorHandler() + + if _, cOk := current.(*ErrDelegator); cOk { + if _, ehOk := h.(*ErrDelegator); ehOk && current == h { + // Do not assign to the delegate of the default ErrDelegator to be + // itself. + Error( + errors.New("no ErrorHandler delegate configured"), + "ErrorHandler remains its current value.", + ) + return + } + } + + delegateErrorHandlerOnce.Do(func() { + if def, ok := current.(*ErrDelegator); ok { + def.setDelegate(h) + } + }) + globalErrorHandler.Store(errorHandlerHolder{eh: h}) +} + +// TracerProvider is the internal implementation for global.TracerProvider. +func TracerProvider() trace.TracerProvider { + return globalTracer.Load().(tracerProviderHolder).tp +} + +// SetTracerProvider is the internal implementation for global.SetTracerProvider. +func SetTracerProvider(tp trace.TracerProvider) { + current := TracerProvider() + + if _, cOk := current.(*tracerProvider); cOk { + if _, tpOk := tp.(*tracerProvider); tpOk && current == tp { + // Do not assign the default delegating TracerProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in tracer provider"), + "Setting tracer provider to its current value. No delegate will be configured", + ) + return + } + } + + delegateTraceOnce.Do(func() { + if def, ok := current.(*tracerProvider); ok { + def.setDelegate(tp) + } + }) + globalTracer.Store(tracerProviderHolder{tp: tp}) +} + +// TextMapPropagator is the internal implementation for global.TextMapPropagator. +func TextMapPropagator() propagation.TextMapPropagator { + return globalPropagators.Load().(propagatorsHolder).tm +} + +// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator. +func SetTextMapPropagator(p propagation.TextMapPropagator) { + current := TextMapPropagator() + + if _, cOk := current.(*textMapPropagator); cOk { + if _, pOk := p.(*textMapPropagator); pOk && current == p { + // Do not assign the default delegating TextMapPropagator to + // delegate to itself. + Error( + errors.New("no delegate configured in text map propagator"), + "Setting text map propagator to its current value. No delegate will be configured", + ) + return + } + } + + // For the textMapPropagator already returned by TextMapPropagator + // delegate to p. + delegateTextMapPropagatorOnce.Do(func() { + if def, ok := current.(*textMapPropagator); ok { + def.SetDelegate(p) + } + }) + // Return p when subsequent calls to TextMapPropagator are made. + globalPropagators.Store(propagatorsHolder{tm: p}) +} + +// MeterProvider is the internal implementation for global.MeterProvider. +func MeterProvider() metric.MeterProvider { + return globalMeterProvider.Load().(meterProviderHolder).mp +} + +// SetMeterProvider is the internal implementation for global.SetMeterProvider. +func SetMeterProvider(mp metric.MeterProvider) { + current := MeterProvider() + if _, cOk := current.(*meterProvider); cOk { + if _, mpOk := mp.(*meterProvider); mpOk && current == mp { + // Do not assign the default delegating MeterProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in meter provider"), + "Setting meter provider to its current value. No delegate will be configured", + ) + return + } + } + + delegateMeterOnce.Do(func() { + if def, ok := current.(*meterProvider); ok { + def.setDelegate(mp) + } + }) + globalMeterProvider.Store(meterProviderHolder{mp: mp}) +} + +func defaultErrorHandler() *atomic.Value { + v := &atomic.Value{} + v.Store(errorHandlerHolder{eh: &ErrDelegator{}}) + return v +} + +func defaultTracerValue() *atomic.Value { + v := &atomic.Value{} + v.Store(tracerProviderHolder{tp: &tracerProvider{}}) + return v +} + +func defaultPropagatorsValue() *atomic.Value { + v := &atomic.Value{} + v.Store(propagatorsHolder{tm: newTextMapPropagator()}) + return v +} + +func defaultMeterProvider() *atomic.Value { + v := &atomic.Value{} + v.Store(meterProviderHolder{mp: &meterProvider{}}) + return v +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/hack/tools/vendor/go.opentelemetry.io/otel/internal/global/trace.go new file mode 100644 index 0000000000..e31f442b48 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package global // import "go.opentelemetry.io/otel/internal/global" + +/* +This file contains the forwarding implementation of the TracerProvider used as +the default global instance. Prior to initialization of an SDK, Tracers +returned by the global TracerProvider will provide no-op functionality. This +means that all Span created prior to initialization are no-op Spans. + +Once an SDK has been initialized, all provided no-op Tracers are swapped for +Tracers provided by the SDK defined TracerProvider. However, any Span started +prior to this initialization does not change its behavior. Meaning, the Span +remains a no-op Span. + +The implementation to track and swap Tracers locks all new Tracer creation +until the swap is complete. This assumes that this operation is not +performance-critical. If that assumption is incorrect, be sure to configure an +SDK prior to any Tracer creation. +*/ + +import ( + "context" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" +) + +// tracerProvider is a placeholder for a configured SDK TracerProvider. +// +// All TracerProvider functionality is forwarded to a delegate once +// configured. +type tracerProvider struct { + embedded.TracerProvider + + mtx sync.Mutex + tracers map[il]*tracer + delegate trace.TracerProvider +} + +// Compile-time guarantee that tracerProvider implements the TracerProvider +// interface. +var _ trace.TracerProvider = &tracerProvider{} + +// setDelegate configures p to delegate all TracerProvider functionality to +// provider. +// +// All Tracers provided prior to this function call are switched out to be +// Tracers provided by provider. +// +// It is guaranteed by the caller that this happens only once. +func (p *tracerProvider) setDelegate(provider trace.TracerProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.tracers) == 0 { + return + } + + for _, t := range p.tracers { + t.setDelegate(provider) + } + + p.tracers = nil +} + +// Tracer implements TracerProvider. +func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Tracer(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map. + + c := trace.NewTracerConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + schema: c.SchemaURL(), + } + + if p.tracers == nil { + p.tracers = make(map[il]*tracer) + } + + if val, ok := p.tracers[key]; ok { + return val + } + + t := &tracer{name: name, opts: opts, provider: p} + p.tracers[key] = t + return t +} + +type il struct{ name, version, schema string } + +// tracer is a placeholder for a trace.Tracer. +// +// All Tracer functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopTracer. +type tracer struct { + embedded.Tracer + + name string + opts []trace.TracerOption + provider *tracerProvider + + delegate atomic.Value +} + +// Compile-time guarantee that tracer implements the trace.Tracer interface. +var _ trace.Tracer = &tracer{} + +// setDelegate configures t to delegate all Tracer functionality to Tracers +// created by provider. +// +// All subsequent calls to the Tracer methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (t *tracer) setDelegate(provider trace.TracerProvider) { + t.delegate.Store(provider.Tracer(t.name, t.opts...)) +} + +// Start implements trace.Tracer by forwarding the call to t.delegate if +// set, otherwise it forwards the call to a NoopTracer. +func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + delegate := t.delegate.Load() + if delegate != nil { + return delegate.(trace.Tracer).Start(ctx, name, opts...) + } + + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} + ctx = trace.ContextWithSpan(ctx, s) + return ctx, s +} + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + embedded.Span + + sc trace.SpanContext + tracer *tracer +} + +var _ trace.Span = nonRecordingSpan{} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (nonRecordingSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (nonRecordingSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (nonRecordingSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (nonRecordingSpan) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} + +// AddLink does nothing. +func (nonRecordingSpan) AddLink(trace.Link) {} + +// SetName does nothing. +func (nonRecordingSpan) SetName(string) {} + +func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/hack/tools/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go new file mode 100644 index 0000000000..9b1da2c02b --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/internal" + +import ( + "math" + "unsafe" +) + +func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. + if b { + return 1 + } + return 0 +} + +func RawToBool(r uint64) bool { + return r != 0 +} + +func Int64ToRaw(i int64) uint64 { + return uint64(i) +} + +func RawToInt64(r uint64) int64 { + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec +} + +func Float64ToRaw(f float64) uint64 { + return math.Float64bits(f) +} + +func RawToFloat64(r uint64) float64 { + return math.Float64frombits(r) +} + +func RawPtrToFloat64Ptr(r *uint64) *float64 { + // Assumes original was a valid *float64 (overflow not checked). + return (*float64)(unsafe.Pointer(r)) // nolint: gosec +} + +func RawPtrToInt64Ptr(r *uint64) *int64 { + // Assumes original was a valid *int64 (overflow not checked). + return (*int64)(unsafe.Pointer(r)) // nolint: gosec +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/internal_logging.go b/hack/tools/vendor/go.opentelemetry.io/otel/internal_logging.go new file mode 100644 index 0000000000..6de7f2e4d8 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/internal_logging.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +import ( + "github.com/go-logr/logr" + + "go.opentelemetry.io/otel/internal/global" +) + +// SetLogger configures the logger used internally to opentelemetry. +func SetLogger(logger logr.Logger) { + global.SetLogger(logger) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/metric.go b/hack/tools/vendor/go.opentelemetry.io/otel/metric.go new file mode 100644 index 0000000000..1e6473b32f --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/metric.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" +) + +// Meter returns a Meter from the global MeterProvider. The name must be the +// name of the library providing instrumentation. This name may be the same as +// the instrumented code only if that code provides built-in instrumentation. +// If the name is empty, then a implementation defined default name will be +// used instead. +// +// If this is called before a global MeterProvider is registered the returned +// Meter will be a No-op implementation of a Meter. When a global MeterProvider +// is registered for the first time, the returned Meter, and all the +// instruments it has created or will create, are recreated automatically from +// the new MeterProvider. +// +// This is short for GetMeterProvider().Meter(name). +func Meter(name string, opts ...metric.MeterOption) metric.Meter { + return GetMeterProvider().Meter(name, opts...) +} + +// GetMeterProvider returns the registered global meter provider. +// +// If no global GetMeterProvider has been registered, a No-op GetMeterProvider +// implementation is returned. When a global GetMeterProvider is registered for +// the first time, the returned GetMeterProvider, and all the Meters it has +// created or will create, are recreated automatically from the new +// GetMeterProvider. +func GetMeterProvider() metric.MeterProvider { + return global.MeterProvider() +} + +// SetMeterProvider registers mp as the global MeterProvider. +func SetMeterProvider(mp metric.MeterProvider) { + global.SetMeterProvider(mp) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/metric/LICENSE b/hack/tools/vendor/go.opentelemetry.io/otel/metric/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/metric/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/metric/README.md b/hack/tools/vendor/go.opentelemetry.io/otel/metric/README.md new file mode 100644 index 0000000000..0cf902e01f --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/metric/README.md @@ -0,0 +1,3 @@ +# Metric API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric)](https://pkg.go.dev/go.opentelemetry.io/otel/metric) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/hack/tools/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go new file mode 100644 index 0000000000..cf23db7780 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -0,0 +1,260 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Float64Observable describes a set of instruments used asynchronously to +// record float64 measurements once per collection cycle. Observations of +// these instruments are only made within a callback. +// +// Warning: Methods may be added to this interface in minor releases. +type Float64Observable interface { + Observable + + float64Observable() +} + +// Float64ObservableCounter is an instrument used to asynchronously record +// increasing float64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for +// unimplemented methods. +type Float64ObservableCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableCounter + + Float64Observable +} + +// Float64ObservableCounterConfig contains options for asynchronous counter +// instruments that record float64 values. +type Float64ObservableCounterConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableCounterConfig returns a new +// [Float64ObservableCounterConfig] with all opts applied. +func NewFloat64ObservableCounterConfig(opts ...Float64ObservableCounterOption) Float64ObservableCounterConfig { + var config Float64ObservableCounterConfig + for _, o := range opts { + config = o.applyFloat64ObservableCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableCounterOption applies options to a +// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableCounterOption. +type Float64ObservableCounterOption interface { + applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig +} + +// Float64ObservableUpDownCounter is an instrument used to asynchronously +// record float64 measurements once per collection cycle. Observations are only +// made within a callback for this instrument. The value observed is assumed +// the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64ObservableUpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableUpDownCounter + + Float64Observable +} + +// Float64ObservableUpDownCounterConfig contains options for asynchronous +// counter instruments that record float64 values. +type Float64ObservableUpDownCounterConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableUpDownCounterConfig returns a new +// [Float64ObservableUpDownCounterConfig] with all opts applied. +func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig { + var config Float64ObservableUpDownCounterConfig + for _, o := range opts { + config = o.applyFloat64ObservableUpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableUpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableUpDownCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableUpDownCounterOption applies options to a +// [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableUpDownCounterOption. +type Float64ObservableUpDownCounterOption interface { + applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig +} + +// Float64ObservableGauge is an instrument used to asynchronously record +// instantaneous float64 measurements once per collection cycle. Observations +// are only made within a callback for this instrument. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64ObservableGauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableGauge + + Float64Observable +} + +// Float64ObservableGaugeConfig contains options for asynchronous counter +// instruments that record float64 values. +type Float64ObservableGaugeConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableGaugeConfig returns a new [Float64ObservableGaugeConfig] +// with all opts applied. +func NewFloat64ObservableGaugeConfig(opts ...Float64ObservableGaugeOption) Float64ObservableGaugeConfig { + var config Float64ObservableGaugeConfig + for _, o := range opts { + config = o.applyFloat64ObservableGauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableGaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableGaugeConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableGaugeOption applies options to a +// [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableGaugeOption. +type Float64ObservableGaugeOption interface { + applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig +} + +// Float64Observer is a recorder of float64 measurements. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Observer + + // Observe records the float64 value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Observe(value float64, options ...ObserveOption) +} + +// Float64Callback is a function registered with a Meter that makes +// observations for a Float64Observerable instrument it is registered with. +// Calls to the Float64Observer record measurement values for the +// Float64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Float64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Float64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Float64Callback func(context.Context, Float64Observer) error + +// Float64ObservableOption applies options to float64 Observer instruments. +type Float64ObservableOption interface { + Float64ObservableCounterOption + Float64ObservableUpDownCounterOption + Float64ObservableGaugeOption +} + +type float64CallbackOpt struct { + cback Float64Callback +} + +func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o float64CallbackOpt) applyFloat64ObservableGauge(cfg Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +// WithFloat64Callback adds callback to be called for an instrument. +func WithFloat64Callback(callback Float64Callback) Float64ObservableOption { + return float64CallbackOpt{callback} +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/hack/tools/vendor/go.opentelemetry.io/otel/metric/asyncint64.go new file mode 100644 index 0000000000..c82ba5324e --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -0,0 +1,258 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Int64Observable describes a set of instruments used asynchronously to record +// int64 measurements once per collection cycle. Observations of these +// instruments are only made within a callback. +// +// Warning: Methods may be added to this interface in minor releases. +type Int64Observable interface { + Observable + + int64Observable() +} + +// Int64ObservableCounter is an instrument used to asynchronously record +// increasing int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableCounter + + Int64Observable +} + +// Int64ObservableCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableCounterConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableCounterConfig returns a new [Int64ObservableCounterConfig] +// with all opts applied. +func NewInt64ObservableCounterConfig(opts ...Int64ObservableCounterOption) Int64ObservableCounterConfig { + var config Int64ObservableCounterConfig + for _, o := range opts { + config = o.applyInt64ObservableCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableCounterOption applies options to a +// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableCounterOption. +type Int64ObservableCounterOption interface { + applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig +} + +// Int64ObservableUpDownCounter is an instrument used to asynchronously record +// int64 measurements once per collection cycle. Observations are only made +// within a callback for this instrument. The value observed is assumed the to +// be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableUpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableUpDownCounter + + Int64Observable +} + +// Int64ObservableUpDownCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableUpDownCounterConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableUpDownCounterConfig returns a new +// [Int64ObservableUpDownCounterConfig] with all opts applied. +func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig { + var config Int64ObservableUpDownCounterConfig + for _, o := range opts { + config = o.applyInt64ObservableUpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableUpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableUpDownCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableUpDownCounterOption applies options to a +// [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableUpDownCounterOption. +type Int64ObservableUpDownCounterOption interface { + applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig +} + +// Int64ObservableGauge is an instrument used to asynchronously record +// instantaneous int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableGauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableGauge + + Int64Observable +} + +// Int64ObservableGaugeConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableGaugeConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableGaugeConfig returns a new [Int64ObservableGaugeConfig] +// with all opts applied. +func NewInt64ObservableGaugeConfig(opts ...Int64ObservableGaugeOption) Int64ObservableGaugeConfig { + var config Int64ObservableGaugeConfig + for _, o := range opts { + config = o.applyInt64ObservableGauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableGaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableGaugeConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableGaugeOption applies options to a +// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableGaugeOption. +type Int64ObservableGaugeOption interface { + applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig +} + +// Int64Observer is a recorder of int64 measurements. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Observer + + // Observe records the int64 value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Observe(value int64, options ...ObserveOption) +} + +// Int64Callback is a function registered with a Meter that makes observations +// for an Int64Observerable instrument it is registered with. Calls to the +// Int64Observer record measurement values for the Int64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Int64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Int64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Int64Callback func(context.Context, Int64Observer) error + +// Int64ObservableOption applies options to int64 Observer instruments. +type Int64ObservableOption interface { + Int64ObservableCounterOption + Int64ObservableUpDownCounterOption + Int64ObservableGaugeOption +} + +type int64CallbackOpt struct { + cback Int64Callback +} + +func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounterConfig) Int64ObservableCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o int64CallbackOpt) applyInt64ObservableGauge(cfg Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +// WithInt64Callback adds callback to be called for an instrument. +func WithInt64Callback(callback Int64Callback) Int64ObservableOption { + return int64CallbackOpt{callback} +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/metric/config.go b/hack/tools/vendor/go.opentelemetry.io/otel/metric/config.go new file mode 100644 index 0000000000..d9e3b13e4d --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/metric/config.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import "go.opentelemetry.io/otel/attribute" + +// MeterConfig contains options for Meters. +type MeterConfig struct { + instrumentationVersion string + schemaURL string + attrs attribute.Set + + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. +} + +// InstrumentationVersion returns the version of the library providing +// instrumentation. +func (cfg MeterConfig) InstrumentationVersion() string { + return cfg.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (cfg MeterConfig) InstrumentationAttributes() attribute.Set { + return cfg.attrs +} + +// SchemaURL is the schema_url of the library providing instrumentation. +func (cfg MeterConfig) SchemaURL() string { + return cfg.schemaURL +} + +// MeterOption is an interface for applying Meter options. +type MeterOption interface { + // applyMeter is used to set a MeterOption value of a MeterConfig. + applyMeter(MeterConfig) MeterConfig +} + +// NewMeterConfig creates a new MeterConfig and applies +// all the given options. +func NewMeterConfig(opts ...MeterOption) MeterConfig { + var config MeterConfig + for _, o := range opts { + config = o.applyMeter(config) + } + return config +} + +type meterOptionFunc func(MeterConfig) MeterConfig + +func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig { + return fn(cfg) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.instrumentationVersion = version + return config + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL. +func WithSchemaURL(schemaURL string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.schemaURL = schemaURL + return config + }) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/metric/doc.go b/hack/tools/vendor/go.opentelemetry.io/otel/metric/doc.go new file mode 100644 index 0000000000..f153745b00 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package metric provides the OpenTelemetry API used to measure metrics about +source code operation. + +This API is separate from its implementation so the instrumentation built from +it is reusable. See [go.opentelemetry.io/otel/sdk/metric] for the official +OpenTelemetry implementation of this API. + +All measurements made with this package are made via instruments. These +instruments are created by a [Meter] which itself is created by a +[MeterProvider]. Applications need to accept a [MeterProvider] implementation +as a starting point when instrumenting. This can be done directly, or by using +the OpenTelemetry global MeterProvider via [GetMeterProvider]. Using an +appropriately named [Meter] from the accepted [MeterProvider], instrumentation +can then be built from the [Meter]'s instruments. + +# Instruments + +Each instrument is designed to make measurements of a particular type. Broadly, +all instruments fall into two overlapping logical categories: asynchronous or +synchronous, and int64 or float64. + +All synchronous instruments ([Int64Counter], [Int64UpDownCounter], +[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and +[Float64Histogram]) are used to measure the operation and performance of source +code during the source code execution. These instruments only make measurements +when the source code they instrument is run. + +All asynchronous instruments ([Int64ObservableCounter], +[Int64ObservableUpDownCounter], [Int64ObservableGauge], +[Float64ObservableCounter], [Float64ObservableUpDownCounter], and +[Float64ObservableGauge]) are used to measure metrics outside of the execution +of source code. They are said to make "observations" via a callback function +called once every measurement collection cycle. + +Each instrument is also grouped by the value type it measures. Either int64 or +float64. The value being measured will dictate which instrument in these +categories to use. + +Outside of these two broad categories, instruments are described by the +function they are designed to serve. All Counters ([Int64Counter], +[Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are +designed to measure values that never decrease in value, but instead only +incrementally increase in value. UpDownCounters ([Int64UpDownCounter], +[Float64UpDownCounter], [Int64ObservableUpDownCounter], and +[Float64ObservableUpDownCounter]) on the other hand, are designed to measure +values that can increase and decrease. When more information needs to be +conveyed about all the synchronous measurements made during a collection cycle, +a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally, +when just the most recent measurement needs to be conveyed about an +asynchronous measurement, a Gauge ([Int64ObservableGauge] and +[Float64ObservableGauge]) should be used. + +See the [OpenTelemetry documentation] for more information about instruments +and their intended use. + +# Instrument Name + +OpenTelemetry defines an [instrument name syntax] that restricts what +instrument names are allowed. + +Instrument names should ... + + - Not be empty. + - Have an alphabetic character as their first letter. + - Have any letter after the first be an alphanumeric character, ‘_’, ‘.’, + ‘-’, or ‘/’. + - Have a maximum length of 255 letters. + +To ensure compatibility with observability platforms, all instruments created +need to conform to this syntax. Not all implementations of the API will validate +these names, it is the callers responsibility to ensure compliance. + +# Measurements + +Measurements are made by recording values and information about the values with +an instrument. How these measurements are recorded depends on the instrument. + +Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter], +[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and +[Float64Histogram]) are recorded using the instrument methods directly. All +counter instruments have an Add method that is used to measure an increment +value, and all histogram instruments have a Record method to measure a data +point. + +Asynchronous instruments ([Int64ObservableCounter], +[Int64ObservableUpDownCounter], [Int64ObservableGauge], +[Float64ObservableCounter], [Float64ObservableUpDownCounter], and +[Float64ObservableGauge]) record measurements within a callback function. The +callback is registered with the Meter which ensures the callback is called once +per collection cycle. A callback can be registered two ways: during the +instrument's creation using an option, or later using the RegisterCallback +method of the [Meter] that created the instrument. + +If the following criteria are met, an option ([WithInt64Callback] or +[WithFloat64Callback]) can be used during the asynchronous instrument's +creation to register a callback ([Int64Callback] or [Float64Callback], +respectively): + + - The measurement process is known when the instrument is created + - Only that instrument will make a measurement within the callback + - The callback never needs to be unregistered + +If the criteria are not met, use the RegisterCallback method of the [Meter] that +created the instrument to register a [Callback]. + +# API Implementations + +This package does not conform to the standard Go versioning policy, all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/metric/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/metric/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/metric/embedded" + + type MeterProvider struct { + embedded.MeterProvider + // ... + } + +If an author wants the default behavior of their implementations to a panic, +they need to embed the API interface directly. + + import "go.opentelemetry.io/otel/metric" + + type MeterProvider struct { + metric.MeterProvider + // ... + } + +This is not a recommended behavior as it could lead to publishing packages that +contain runtime panics when users update other package that use newer versions +of [go.opentelemetry.io/otel/metric]. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who wants to default to silently dropping the call can use +[go.opentelemetry.io/otel/metric/noop]: + + import "go.opentelemetry.io/otel/metric/noop" + + type MeterProvider struct { + noop.MeterProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/metric/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. + +[instrument name syntax]: https://opentelemetry.io/docs/specs/otel/metrics/api/#instrument-name-syntax +[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ +[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider +*/ +package metric // import "go.opentelemetry.io/otel/metric" diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/metric/embedded/README.md b/hack/tools/vendor/go.opentelemetry.io/otel/metric/embedded/README.md new file mode 100644 index 0000000000..1f6e0efa73 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/metric/embedded/README.md @@ -0,0 +1,3 @@ +# Metric Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/embedded) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/hack/tools/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go new file mode 100644 index 0000000000..1a9dc68093 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go @@ -0,0 +1,243 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// metric API]. +// +// Implementers of the [OpenTelemetry metric API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry metric API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry metric API]: https://pkg.go.dev/go.opentelemetry.io/otel/metric +package embedded // import "go.opentelemetry.io/otel/metric/embedded" + +// MeterProvider is embedded in +// [go.opentelemetry.io/otel/metric.MeterProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.MeterProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.MeterProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type MeterProvider interface{ meterProvider() } + +// Meter is embedded in [go.opentelemetry.io/otel/metric.Meter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Meter] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Meter] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Meter interface{ meter() } + +// Float64Observer is embedded in +// [go.opentelemetry.io/otel/metric.Float64Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Observer] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Observer] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Observer interface{ float64Observer() } + +// Int64Observer is embedded in +// [go.opentelemetry.io/otel/metric.Int64Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Observer] if you want users +// to experience a compilation error, signaling they need to update to your +// latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Observer] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Observer interface{ int64Observer() } + +// Observer is embedded in [go.opentelemetry.io/otel/metric.Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Observer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Observer] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Observer interface{ observer() } + +// Registration is embedded in [go.opentelemetry.io/otel/metric.Registration]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Registration] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Registration] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Registration interface{ registration() } + +// Float64Counter is embedded in +// [go.opentelemetry.io/otel/metric.Float64Counter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Counter] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Counter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Counter interface{ float64Counter() } + +// Float64Histogram is embedded in +// [go.opentelemetry.io/otel/metric.Float64Histogram]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Histogram] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Histogram] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Histogram interface{ float64Histogram() } + +// Float64Gauge is embedded in [go.opentelemetry.io/otel/metric.Float64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Gauge] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Float64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64Gauge interface{ float64Gauge() } + +// Float64ObservableCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableCounter interface{ float64ObservableCounter() } + +// Float64ObservableGauge is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableGauge interface{ float64ObservableGauge() } + +// Float64ObservableUpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] +// if you want users to experience a compilation error, signaling they need to +// update to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableUpDownCounter interface{ float64ObservableUpDownCounter() } + +// Float64UpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Float64UpDownCounter interface{ float64UpDownCounter() } + +// Int64Counter is embedded in +// [go.opentelemetry.io/otel/metric.Int64Counter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Counter] if you want users +// to experience a compilation error, signaling they need to update to your +// latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Counter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Counter interface{ int64Counter() } + +// Int64Histogram is embedded in +// [go.opentelemetry.io/otel/metric.Int64Histogram]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Histogram] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Histogram] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Histogram interface{ int64Histogram() } + +// Int64Gauge is embedded in [go.opentelemetry.io/otel/metric.Int64Gauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Gauge] if you want users to experience +// a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Int64Gauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64Gauge interface{ int64Gauge() } + +// Int64ObservableCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64ObservableCounter interface{ int64ObservableCounter() } + +// Int64ObservableGauge is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Int64ObservableGauge interface{ int64ObservableGauge() } + +// Int64ObservableUpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] if +// you want users to experience a compilation error, signaling they need to +// update to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64ObservableUpDownCounter interface{ int64ObservableUpDownCounter() } + +// Int64UpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64UpDownCounter interface{ int64UpDownCounter() } diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/metric/instrument.go b/hack/tools/vendor/go.opentelemetry.io/otel/metric/instrument.go new file mode 100644 index 0000000000..ea52e40233 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -0,0 +1,368 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import "go.opentelemetry.io/otel/attribute" + +// Observable is used as a grouping mechanism for all instruments that are +// updated within a Callback. +type Observable interface { + observable() +} + +// InstrumentOption applies options to all instruments. +type InstrumentOption interface { + Int64CounterOption + Int64UpDownCounterOption + Int64HistogramOption + Int64GaugeOption + Int64ObservableCounterOption + Int64ObservableUpDownCounterOption + Int64ObservableGaugeOption + + Float64CounterOption + Float64UpDownCounterOption + Float64HistogramOption + Float64GaugeOption + Float64ObservableCounterOption + Float64ObservableUpDownCounterOption + Float64ObservableGaugeOption +} + +// HistogramOption applies options to histogram instruments. +type HistogramOption interface { + Int64HistogramOption + Float64HistogramOption +} + +type descOpt string + +func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + c.description = string(o) + return c +} + +// WithDescription sets the instrument description. +func WithDescription(desc string) InstrumentOption { return descOpt(desc) } + +type unitOpt string + +func (o unitOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + c.unit = string(o) + return c +} + +// WithUnit sets the instrument unit. +// +// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code. +func WithUnit(u string) InstrumentOption { return unitOpt(u) } + +// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries. +// +// This option is considered "advisory", and may be ignored by API implementations. +func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) } + +type bucketOpt []float64 + +func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + +func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + +// AddOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as an AddOption. +type AddOption interface { + applyAdd(AddConfig) AddConfig +} + +// AddConfig contains options for an addition measurement. +type AddConfig struct { + attrs attribute.Set +} + +// NewAddConfig returns a new [AddConfig] with all opts applied. +func NewAddConfig(opts []AddOption) AddConfig { + config := AddConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyAdd(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c AddConfig) Attributes() attribute.Set { + return c.attrs +} + +// RecordOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as a RecordOption. +type RecordOption interface { + applyRecord(RecordConfig) RecordConfig +} + +// RecordConfig contains options for a recorded measurement. +type RecordConfig struct { + attrs attribute.Set +} + +// NewRecordConfig returns a new [RecordConfig] with all opts applied. +func NewRecordConfig(opts []RecordOption) RecordConfig { + config := RecordConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyRecord(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c RecordConfig) Attributes() attribute.Set { + return c.attrs +} + +// ObserveOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as a ObserveOption. +type ObserveOption interface { + applyObserve(ObserveConfig) ObserveConfig +} + +// ObserveConfig contains options for an observed measurement. +type ObserveConfig struct { + attrs attribute.Set +} + +// NewObserveConfig returns a new [ObserveConfig] with all opts applied. +func NewObserveConfig(opts []ObserveOption) ObserveConfig { + config := ObserveConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyObserve(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c ObserveConfig) Attributes() attribute.Set { + return c.attrs +} + +// MeasurementOption applies options to all instrument measurement. +type MeasurementOption interface { + AddOption + RecordOption + ObserveOption +} + +type attrOpt struct { + set attribute.Set +} + +// mergeSets returns the union of keys between a and b. Any duplicate keys will +// use the value associated with b. +func mergeSets(a, b attribute.Set) attribute.Set { + // NewMergeIterator uses the first value for any duplicates. + iter := attribute.NewMergeIterator(&b, &a) + merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for iter.Next() { + merged = append(merged, iter.Attribute()) + } + return attribute.NewSet(merged...) +} + +func (o attrOpt) applyAdd(c AddConfig) AddConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +func (o attrOpt) applyRecord(c RecordConfig) RecordConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +func (o attrOpt) applyObserve(c ObserveConfig) ObserveConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +// WithAttributeSet sets the attribute Set associated with a measurement is +// made with. +// +// If multiple WithAttributeSet or WithAttributes options are passed the +// attributes will be merged together in the order they are passed. Attributes +// with duplicate keys will use the last value passed. +func WithAttributeSet(attributes attribute.Set) MeasurementOption { + return attrOpt{set: attributes} +} + +// WithAttributes converts attributes into an attribute Set and sets the Set to +// be associated with a measurement. This is shorthand for: +// +// cp := make([]attribute.KeyValue, len(attributes)) +// copy(cp, attributes) +// WithAttributes(attribute.NewSet(cp...)) +// +// [attribute.NewSet] may modify the passed attributes so this will make a copy +// of attributes before creating a set in order to ensure this function is +// concurrent safe. This makes this option function less optimized in +// comparison to [WithAttributeSet]. Therefore, [WithAttributeSet] should be +// preferred for performance sensitive code. +// +// See [WithAttributeSet] for information about how multiple WithAttributes are +// merged. +func WithAttributes(attributes ...attribute.KeyValue) MeasurementOption { + cp := make([]attribute.KeyValue, len(attributes)) + copy(cp, attributes) + return attrOpt{set: attribute.NewSet(cp...)} +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/metric/meter.go b/hack/tools/vendor/go.opentelemetry.io/otel/metric/meter.go new file mode 100644 index 0000000000..14e08c24a4 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -0,0 +1,278 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// MeterProvider provides access to named Meter instances, for instrumenting +// an application or package. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type MeterProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.MeterProvider + + // Meter returns a new Meter with the provided name and configuration. + // + // A Meter should be scoped at most to a single package. The name needs to + // be unique so it does not collide with other names used by + // an application, nor other applications. To achieve this, the import path + // of the instrumentation package is recommended to be used as name. + // + // If the name is empty, then an implementation defined default name will + // be used instead. + Meter(name string, opts ...MeterOption) Meter +} + +// Meter provides access to instrument instances for recording metrics. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Meter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Meter + + // Int64Counter returns a new Int64Counter instrument identified by name + // and configured with options. The instrument is used to synchronously + // record increasing int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + + // Int64UpDownCounter returns a new Int64UpDownCounter instrument + // identified by name and configured with options. The instrument is used + // to synchronously record int64 measurements during a computational + // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + + // Int64Histogram returns a new Int64Histogram instrument identified by + // name and configured with options. The instrument is used to + // synchronously record the distribution of int64 measurements during a + // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + + // Int64Gauge returns a new Int64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous int64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) + + // Int64ObservableCounter returns a new Int64ObservableCounter identified + // by name and configured with options. The instrument is used to + // asynchronously record increasing int64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter + // instrument identified by name and configured with options. The + // instrument is used to asynchronously record int64 measurements once per + // a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + + // Int64ObservableGauge returns a new Int64ObservableGauge instrument + // identified by name and configured with options. The instrument is used + // to asynchronously record instantaneous int64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) + + // Float64Counter returns a new Float64Counter instrument identified by + // name and configured with options. The instrument is used to + // synchronously record increasing float64 measurements during a + // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + + // Float64UpDownCounter returns a new Float64UpDownCounter instrument + // identified by name and configured with options. The instrument is used + // to synchronously record float64 measurements during a computational + // operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + + // Float64Histogram returns a new Float64Histogram instrument identified by + // name and configured with options. The instrument is used to + // synchronously record the distribution of float64 measurements during a + // computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + + // Float64Gauge returns a new Float64Gauge instrument identified by name and + // configured with options. The instrument is used to synchronously record + // instantaneous float64 measurements during a computational operation. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) + + // Float64ObservableCounter returns a new Float64ObservableCounter + // instrument identified by name and configured with options. The + // instrument is used to asynchronously record increasing float64 + // measurements once per a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + + // Float64ObservableUpDownCounter returns a new + // Float64ObservableUpDownCounter instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // float64 measurements once per a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + + // Float64ObservableGauge returns a new Float64ObservableGauge instrument + // identified by name and configured with options. The instrument is used + // to asynchronously record instantaneous float64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + // + // The name needs to conform to the OpenTelemetry instrument name syntax. + // See the Instrument Name section of the package documentation for more + // information. + Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) + + // RegisterCallback registers f to be called during the collection of a + // measurement cycle. + // + // If Unregister of the returned Registration is called, f needs to be + // unregistered and not called during collection. + // + // The instruments f is registered with are the only instruments that f may + // observe values for. + // + // If no instruments are passed, f should not be registered nor called + // during collection. + // + // The function f needs to be concurrent safe. + RegisterCallback(f Callback, instruments ...Observable) (Registration, error) +} + +// Callback is a function registered with a Meter that makes observations for +// the set of instruments it is registered with. The Observer parameter is used +// to record measurement observations for these instruments. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Callbacks. Meaning, it should not report measurements for an instrument with +// the same attributes as another Callback will report. +// +// The function needs to be concurrent safe. +type Callback func(context.Context, Observer) error + +// Observer records measurements for multiple instruments in a Callback. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Observer + + // ObserveFloat64 records the float64 value for obsrv. + ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + + // ObserveInt64 records the int64 value for obsrv. + ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) +} + +// Registration is an token representing the unique registration of a callback +// for a set of instruments with a Meter. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Registration interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Registration + + // Unregister removes the callback registration from a Meter. + // + // This method needs to be idempotent and concurrent safe. + Unregister() error +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/metric/noop/README.md b/hack/tools/vendor/go.opentelemetry.io/otel/metric/noop/README.md new file mode 100644 index 0000000000..bb89694356 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/metric/noop/README.md @@ -0,0 +1,3 @@ +# Metric Noop + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/noop) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/hack/tools/vendor/go.opentelemetry.io/otel/metric/noop/noop.go new file mode 100644 index 0000000000..ca6fcbdc09 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/metric/noop/noop.go @@ -0,0 +1,281 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package noop provides an implementation of the OpenTelemetry metric API that +// produces no telemetry and minimizes used computation resources. +// +// Using this package to implement the OpenTelemetry metric API will +// effectively disable OpenTelemetry. +// +// This implementation can be embedded in other implementations of the +// OpenTelemetry metric API. Doing so will mean the implementation defaults to +// no operation for methods it does not implement. +package noop // import "go.opentelemetry.io/otel/metric/noop" + +import ( + "context" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +var ( + // Compile-time check this implements the OpenTelemetry API. + + _ metric.MeterProvider = MeterProvider{} + _ metric.Meter = Meter{} + _ metric.Observer = Observer{} + _ metric.Registration = Registration{} + _ metric.Int64Counter = Int64Counter{} + _ metric.Float64Counter = Float64Counter{} + _ metric.Int64UpDownCounter = Int64UpDownCounter{} + _ metric.Float64UpDownCounter = Float64UpDownCounter{} + _ metric.Int64Histogram = Int64Histogram{} + _ metric.Float64Histogram = Float64Histogram{} + _ metric.Int64Gauge = Int64Gauge{} + _ metric.Float64Gauge = Float64Gauge{} + _ metric.Int64ObservableCounter = Int64ObservableCounter{} + _ metric.Float64ObservableCounter = Float64ObservableCounter{} + _ metric.Int64ObservableGauge = Int64ObservableGauge{} + _ metric.Float64ObservableGauge = Float64ObservableGauge{} + _ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{} + _ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{} + _ metric.Int64Observer = Int64Observer{} + _ metric.Float64Observer = Float64Observer{} +) + +// MeterProvider is an OpenTelemetry No-Op MeterProvider. +type MeterProvider struct{ embedded.MeterProvider } + +// NewMeterProvider returns a MeterProvider that does not record any telemetry. +func NewMeterProvider() MeterProvider { + return MeterProvider{} +} + +// Meter returns an OpenTelemetry Meter that does not record any telemetry. +func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter { + return Meter{} +} + +// Meter is an OpenTelemetry No-Op Meter. +type Meter struct{ embedded.Meter } + +// Int64Counter returns a Counter used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) { + return Int64Counter{}, nil +} + +// Int64UpDownCounter returns an UpDownCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + return Int64UpDownCounter{}, nil +} + +// Int64Histogram returns a Histogram used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { + return Int64Histogram{}, nil +} + +// Int64Gauge returns a Gauge used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + return Int64Gauge{}, nil +} + +// Int64ObservableCounter returns an ObservableCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { + return Int64ObservableCounter{}, nil +} + +// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to +// record int64 measurements that produces no telemetry. +func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { + return Int64ObservableUpDownCounter{}, nil +} + +// Int64ObservableGauge returns an ObservableGauge used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { + return Int64ObservableGauge{}, nil +} + +// Float64Counter returns a Counter used to record int64 measurements that +// produces no telemetry. +func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) { + return Float64Counter{}, nil +} + +// Float64UpDownCounter returns an UpDownCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { + return Float64UpDownCounter{}, nil +} + +// Float64Histogram returns a Histogram used to record int64 measurements that +// produces no telemetry. +func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { + return Float64Histogram{}, nil +} + +// Float64Gauge returns a Gauge used to record float64 measurements that +// produces no telemetry. +func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + return Float64Gauge{}, nil +} + +// Float64ObservableCounter returns an ObservableCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { + return Float64ObservableCounter{}, nil +} + +// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to +// record int64 measurements that produces no telemetry. +func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { + return Float64ObservableUpDownCounter{}, nil +} + +// Float64ObservableGauge returns an ObservableGauge used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { + return Float64ObservableGauge{}, nil +} + +// RegisterCallback performs no operation. +func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) { + return Registration{}, nil +} + +// Observer acts as a recorder of measurements for multiple instruments in a +// Callback, it performing no operation. +type Observer struct{ embedded.Observer } + +// ObserveFloat64 performs no operation. +func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) { +} + +// ObserveInt64 performs no operation. +func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) { +} + +// Registration is the registration of a Callback with a No-Op Meter. +type Registration struct{ embedded.Registration } + +// Unregister unregisters the Callback the Registration represents with the +// No-Op Meter. This will always return nil because the No-Op Meter performs no +// operation, including hold any record of registrations. +func (Registration) Unregister() error { return nil } + +// Int64Counter is an OpenTelemetry Counter used to record int64 measurements. +// It produces no telemetry. +type Int64Counter struct{ embedded.Int64Counter } + +// Add performs no operation. +func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {} + +// Float64Counter is an OpenTelemetry Counter used to record float64 +// measurements. It produces no telemetry. +type Float64Counter struct{ embedded.Float64Counter } + +// Add performs no operation. +func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {} + +// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64 +// measurements. It produces no telemetry. +type Int64UpDownCounter struct{ embedded.Int64UpDownCounter } + +// Add performs no operation. +func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {} + +// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record +// float64 measurements. It produces no telemetry. +type Float64UpDownCounter struct{ embedded.Float64UpDownCounter } + +// Add performs no operation. +func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {} + +// Int64Histogram is an OpenTelemetry Histogram used to record int64 +// measurements. It produces no telemetry. +type Int64Histogram struct{ embedded.Int64Histogram } + +// Record performs no operation. +func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {} + +// Float64Histogram is an OpenTelemetry Histogram used to record float64 +// measurements. It produces no telemetry. +type Float64Histogram struct{ embedded.Float64Histogram } + +// Record performs no operation. +func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {} + +// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64 +// measurements. It produces no telemetry. +type Int64Gauge struct{ embedded.Int64Gauge } + +// Record performs no operation. +func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {} + +// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64 +// measurements. It produces no telemetry. +type Float64Gauge struct{ embedded.Float64Gauge } + +// Record performs no operation. +func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {} + +// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record +// int64 measurements. It produces no telemetry. +type Int64ObservableCounter struct { + metric.Int64Observable + embedded.Int64ObservableCounter +} + +// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record +// float64 measurements. It produces no telemetry. +type Float64ObservableCounter struct { + metric.Float64Observable + embedded.Float64ObservableCounter +} + +// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record +// int64 measurements. It produces no telemetry. +type Int64ObservableGauge struct { + metric.Int64Observable + embedded.Int64ObservableGauge +} + +// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record +// float64 measurements. It produces no telemetry. +type Float64ObservableGauge struct { + metric.Float64Observable + embedded.Float64ObservableGauge +} + +// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter +// used to record int64 measurements. It produces no telemetry. +type Int64ObservableUpDownCounter struct { + metric.Int64Observable + embedded.Int64ObservableUpDownCounter +} + +// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter +// used to record float64 measurements. It produces no telemetry. +type Float64ObservableUpDownCounter struct { + metric.Float64Observable + embedded.Float64ObservableUpDownCounter +} + +// Int64Observer is a recorder of int64 measurements that performs no operation. +type Int64Observer struct{ embedded.Int64Observer } + +// Observe performs no operation. +func (Int64Observer) Observe(int64, ...metric.ObserveOption) {} + +// Float64Observer is a recorder of float64 measurements that performs no +// operation. +type Float64Observer struct{ embedded.Float64Observer } + +// Observe performs no operation. +func (Float64Observer) Observe(float64, ...metric.ObserveOption) {} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/hack/tools/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go new file mode 100644 index 0000000000..8403a4bad2 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -0,0 +1,226 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Float64Counter is an instrument that records increasing float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Counter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Counter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr float64, options ...AddOption) +} + +// Float64CounterConfig contains options for synchronous counter instruments that +// record float64 values. +type Float64CounterConfig struct { + description string + unit string +} + +// NewFloat64CounterConfig returns a new [Float64CounterConfig] with all opts +// applied. +func NewFloat64CounterConfig(opts ...Float64CounterOption) Float64CounterConfig { + var config Float64CounterConfig + for _, o := range opts { + config = o.applyFloat64Counter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64CounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64CounterConfig) Unit() string { + return c.unit +} + +// Float64CounterOption applies options to a [Float64CounterConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64CounterOption. +type Float64CounterOption interface { + applyFloat64Counter(Float64CounterConfig) Float64CounterConfig +} + +// Float64UpDownCounter is an instrument that records increasing or decreasing +// float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64UpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64UpDownCounter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr float64, options ...AddOption) +} + +// Float64UpDownCounterConfig contains options for synchronous counter +// instruments that record float64 values. +type Float64UpDownCounterConfig struct { + description string + unit string +} + +// NewFloat64UpDownCounterConfig returns a new [Float64UpDownCounterConfig] +// with all opts applied. +func NewFloat64UpDownCounterConfig(opts ...Float64UpDownCounterOption) Float64UpDownCounterConfig { + var config Float64UpDownCounterConfig + for _, o := range opts { + config = o.applyFloat64UpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64UpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64UpDownCounterConfig) Unit() string { + return c.unit +} + +// Float64UpDownCounterOption applies options to a +// [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that +// can be used as a Float64UpDownCounterOption. +type Float64UpDownCounterOption interface { + applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig +} + +// Float64Histogram is an instrument that records a distribution of float64 +// values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Histogram interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Histogram + + // Record adds an additional value to the distribution. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, incr float64, options ...RecordOption) +} + +// Float64HistogramConfig contains options for synchronous histogram +// instruments that record float64 values. +type Float64HistogramConfig struct { + description string + unit string + explicitBucketBoundaries []float64 +} + +// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all +// opts applied. +func NewFloat64HistogramConfig(opts ...Float64HistogramOption) Float64HistogramConfig { + var config Float64HistogramConfig + for _, o := range opts { + config = o.applyFloat64Histogram(config) + } + return config +} + +// Description returns the configured description. +func (c Float64HistogramConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64HistogramConfig) Unit() string { + return c.unit +} + +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + +// Float64HistogramOption applies options to a [Float64HistogramConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64HistogramOption. +type Float64HistogramOption interface { + applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig +} + +// Float64Gauge is an instrument that records instantaneous float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value float64, options ...RecordOption) +} + +// Float64GaugeConfig contains options for synchronous gauge instruments that +// record float64 values. +type Float64GaugeConfig struct { + description string + unit string +} + +// NewFloat64GaugeConfig returns a new [Float64GaugeConfig] with all opts +// applied. +func NewFloat64GaugeConfig(opts ...Float64GaugeOption) Float64GaugeConfig { + var config Float64GaugeConfig + for _, o := range opts { + config = o.applyFloat64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64GaugeConfig) Unit() string { + return c.unit +} + +// Float64GaugeOption applies options to a [Float64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64GaugeOption. +type Float64GaugeOption interface { + applyFloat64Gauge(Float64GaugeConfig) Float64GaugeConfig +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/hack/tools/vendor/go.opentelemetry.io/otel/metric/syncint64.go new file mode 100644 index 0000000000..783fdfba77 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -0,0 +1,226 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Int64Counter is an instrument that records increasing int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Counter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Counter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr int64, options ...AddOption) +} + +// Int64CounterConfig contains options for synchronous counter instruments that +// record int64 values. +type Int64CounterConfig struct { + description string + unit string +} + +// NewInt64CounterConfig returns a new [Int64CounterConfig] with all opts +// applied. +func NewInt64CounterConfig(opts ...Int64CounterOption) Int64CounterConfig { + var config Int64CounterConfig + for _, o := range opts { + config = o.applyInt64Counter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64CounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64CounterConfig) Unit() string { + return c.unit +} + +// Int64CounterOption applies options to a [Int64CounterConfig]. See +// [InstrumentOption] for other options that can be used as an +// Int64CounterOption. +type Int64CounterOption interface { + applyInt64Counter(Int64CounterConfig) Int64CounterConfig +} + +// Int64UpDownCounter is an instrument that records increasing or decreasing +// int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64UpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64UpDownCounter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr int64, options ...AddOption) +} + +// Int64UpDownCounterConfig contains options for synchronous counter +// instruments that record int64 values. +type Int64UpDownCounterConfig struct { + description string + unit string +} + +// NewInt64UpDownCounterConfig returns a new [Int64UpDownCounterConfig] with +// all opts applied. +func NewInt64UpDownCounterConfig(opts ...Int64UpDownCounterOption) Int64UpDownCounterConfig { + var config Int64UpDownCounterConfig + for _, o := range opts { + config = o.applyInt64UpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64UpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64UpDownCounterConfig) Unit() string { + return c.unit +} + +// Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig]. +// See [InstrumentOption] for other options that can be used as an +// Int64UpDownCounterOption. +type Int64UpDownCounterOption interface { + applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig +} + +// Int64Histogram is an instrument that records a distribution of int64 +// values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Histogram interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Histogram + + // Record adds an additional value to the distribution. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, incr int64, options ...RecordOption) +} + +// Int64HistogramConfig contains options for synchronous histogram instruments +// that record int64 values. +type Int64HistogramConfig struct { + description string + unit string + explicitBucketBoundaries []float64 +} + +// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts +// applied. +func NewInt64HistogramConfig(opts ...Int64HistogramOption) Int64HistogramConfig { + var config Int64HistogramConfig + for _, o := range opts { + config = o.applyInt64Histogram(config) + } + return config +} + +// Description returns the configured description. +func (c Int64HistogramConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64HistogramConfig) Unit() string { + return c.unit +} + +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + +// Int64HistogramOption applies options to a [Int64HistogramConfig]. See +// [InstrumentOption] for other options that can be used as an +// Int64HistogramOption. +type Int64HistogramOption interface { + applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig +} + +// Int64Gauge is an instrument that records instantaneous int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Gauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Gauge + + // Record records the instantaneous value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, value int64, options ...RecordOption) +} + +// Int64GaugeConfig contains options for synchronous gauge instruments that +// record int64 values. +type Int64GaugeConfig struct { + description string + unit string +} + +// NewInt64GaugeConfig returns a new [Int64GaugeConfig] with all opts +// applied. +func NewInt64GaugeConfig(opts ...Int64GaugeOption) Int64GaugeConfig { + var config Int64GaugeConfig + for _, o := range opts { + config = o.applyInt64Gauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64GaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64GaugeConfig) Unit() string { + return c.unit +} + +// Int64GaugeOption applies options to a [Int64GaugeConfig]. See +// [InstrumentOption] for other options that can be used as a +// Int64GaugeOption. +type Int64GaugeOption interface { + applyInt64Gauge(Int64GaugeConfig) Int64GaugeConfig +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/propagation.go b/hack/tools/vendor/go.opentelemetry.io/otel/propagation.go new file mode 100644 index 0000000000..2fd9497338 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/propagation.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/propagation" +) + +// GetTextMapPropagator returns the global TextMapPropagator. If none has been +// set, a No-Op TextMapPropagator is returned. +func GetTextMapPropagator() propagation.TextMapPropagator { + return global.TextMapPropagator() +} + +// SetTextMapPropagator sets propagator as the global TextMapPropagator. +func SetTextMapPropagator(propagator propagation.TextMapPropagator) { + global.SetTextMapPropagator(propagator) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/propagation/README.md b/hack/tools/vendor/go.opentelemetry.io/otel/propagation/README.md new file mode 100644 index 0000000000..e2959ac747 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/propagation/README.md @@ -0,0 +1,3 @@ +# Propagation + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/propagation)](https://pkg.go.dev/go.opentelemetry.io/otel/propagation) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/hack/tools/vendor/go.opentelemetry.io/otel/propagation/baggage.go new file mode 100644 index 0000000000..552263ba73 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + + "go.opentelemetry.io/otel/baggage" +) + +const baggageHeader = "baggage" + +// Baggage is a propagator that supports the W3C Baggage format. +// +// This propagates user-defined baggage associated with a trace. The complete +// specification is defined at https://www.w3.org/TR/baggage/. +type Baggage struct{} + +var _ TextMapPropagator = Baggage{} + +// Inject sets baggage key-values from ctx into the carrier. +func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { + bStr := baggage.FromContext(ctx).String() + if bStr != "" { + carrier.Set(baggageHeader, bStr) + } +} + +// Extract returns a copy of parent with the baggage from the carrier added. +func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { + bStr := carrier.Get(baggageHeader) + if bStr == "" { + return parent + } + + bag, err := baggage.Parse(bStr) + if err != nil { + return parent + } + return baggage.ContextWithBaggage(parent, bag) +} + +// Fields returns the keys who's values are set with Inject. +func (b Baggage) Fields() []string { + return []string{baggageHeader} +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/propagation/doc.go b/hack/tools/vendor/go.opentelemetry.io/otel/propagation/doc.go new file mode 100644 index 0000000000..33a3baf15f --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/propagation/doc.go @@ -0,0 +1,13 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package propagation contains OpenTelemetry context propagators. + +OpenTelemetry propagators are used to extract and inject context data from and +into messages exchanged by applications. The propagator supported by this +package is the W3C Trace Context encoding +(https://www.w3.org/TR/trace-context/), and W3C Baggage +(https://www.w3.org/TR/baggage/). +*/ +package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/hack/tools/vendor/go.opentelemetry.io/otel/propagation/propagation.go new file mode 100644 index 0000000000..8c8286aab4 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -0,0 +1,142 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "net/http" +) + +// TextMapCarrier is the storage medium used by a TextMapPropagator. +type TextMapCarrier interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Get returns the value associated with the passed key. + Get(key string) string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Set stores the key-value pair. + Set(key string, value string) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Keys lists the keys stored in this carrier. + Keys() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage +// medium for propagated key-value pairs. +type MapCarrier map[string]string + +// Compile time check that MapCarrier implements the TextMapCarrier. +var _ TextMapCarrier = MapCarrier{} + +// Get returns the value associated with the passed key. +func (c MapCarrier) Get(key string) string { + return c[key] +} + +// Set stores the key-value pair. +func (c MapCarrier) Set(key, value string) { + c[key] = value +} + +// Keys lists the keys stored in this carrier. +func (c MapCarrier) Keys() []string { + keys := make([]string, 0, len(c)) + for k := range c { + keys = append(keys, k) + } + return keys +} + +// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. +type HeaderCarrier http.Header + +// Get returns the value associated with the passed key. +func (hc HeaderCarrier) Get(key string) string { + return http.Header(hc).Get(key) +} + +// Set stores the key-value pair. +func (hc HeaderCarrier) Set(key string, value string) { + http.Header(hc).Set(key, value) +} + +// Keys lists the keys stored in this carrier. +func (hc HeaderCarrier) Keys() []string { + keys := make([]string, 0, len(hc)) + for k := range hc { + keys = append(keys, k) + } + return keys +} + +// TextMapPropagator propagates cross-cutting concerns as key-value text +// pairs within a carrier that travels in-band across process boundaries. +type TextMapPropagator interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Inject set cross-cutting concerns from the Context into the carrier. + Inject(ctx context.Context, carrier TextMapCarrier) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Extract reads cross-cutting concerns from the carrier into a Context. + Extract(ctx context.Context, carrier TextMapCarrier) context.Context + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Fields returns the keys whose values are set with Inject. + Fields() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type compositeTextMapPropagator []TextMapPropagator + +func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) { + for _, i := range p { + i.Inject(ctx, carrier) + } +} + +func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + for _, i := range p { + ctx = i.Extract(ctx, carrier) + } + return ctx +} + +func (p compositeTextMapPropagator) Fields() []string { + unique := make(map[string]struct{}) + for _, i := range p { + for _, k := range i.Fields() { + unique[k] = struct{}{} + } + } + + fields := make([]string, 0, len(unique)) + for k := range unique { + fields = append(fields, k) + } + return fields +} + +// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the +// group of passed TextMapPropagator. This allows different cross-cutting +// concerns to be propagates in a unified manner. +// +// The returned TextMapPropagator will inject and extract cross-cutting +// concerns in the order the TextMapPropagators were provided. Additionally, +// the Fields method will return a de-duplicated slice of the keys that are +// set with the Inject method. +func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator { + return compositeTextMapPropagator(p) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/hack/tools/vendor/go.opentelemetry.io/otel/propagation/trace_context.go new file mode 100644 index 0000000000..6870e316dc --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -0,0 +1,156 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "encoding/hex" + "fmt" + "strings" + + "go.opentelemetry.io/otel/trace" +) + +const ( + supportedVersion = 0 + maxVersion = 254 + traceparentHeader = "traceparent" + tracestateHeader = "tracestate" + delimiter = "-" +) + +// TraceContext is a propagator that supports the W3C Trace Context format +// (https://www.w3.org/TR/trace-context/) +// +// This propagator will propagate the traceparent and tracestate headers to +// guarantee traces are not broken. It is up to the users of this propagator +// to choose if they want to participate in a trace by modifying the +// traceparent header and relevant parts of the tracestate header containing +// their proprietary information. +type TraceContext struct{} + +var ( + _ TextMapPropagator = TraceContext{} + versionPart = fmt.Sprintf("%.2X", supportedVersion) +) + +// Inject injects the trace context from ctx into carrier. +func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { + sc := trace.SpanContextFromContext(ctx) + if !sc.IsValid() { + return + } + + if ts := sc.TraceState().String(); ts != "" { + carrier.Set(tracestateHeader, ts) + } + + // Clear all flags other than the trace-context supported sampling bit. + flags := sc.TraceFlags() & trace.FlagsSampled + + var sb strings.Builder + sb.Grow(2 + 32 + 16 + 2 + 3) + _, _ = sb.WriteString(versionPart) + traceID := sc.TraceID() + spanID := sc.SpanID() + flagByte := [1]byte{byte(flags)} + var buf [32]byte + for _, src := range [][]byte{traceID[:], spanID[:], flagByte[:]} { + _ = sb.WriteByte(delimiter[0]) + n := hex.Encode(buf[:], src) + _, _ = sb.Write(buf[:n]) + } + carrier.Set(traceparentHeader, sb.String()) +} + +// Extract reads tracecontext from the carrier into a returned Context. +// +// The returned Context will be a copy of ctx and contain the extracted +// tracecontext as the remote SpanContext. If the extracted tracecontext is +// invalid, the passed ctx will be returned directly instead. +func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + sc := tc.extract(carrier) + if !sc.IsValid() { + return ctx + } + return trace.ContextWithRemoteSpanContext(ctx, sc) +} + +func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { + h := carrier.Get(traceparentHeader) + if h == "" { + return trace.SpanContext{} + } + + var ver [1]byte + if !extractPart(ver[:], &h, 2) { + return trace.SpanContext{} + } + version := int(ver[0]) + if version > maxVersion { + return trace.SpanContext{} + } + + var scc trace.SpanContextConfig + if !extractPart(scc.TraceID[:], &h, 32) { + return trace.SpanContext{} + } + if !extractPart(scc.SpanID[:], &h, 16) { + return trace.SpanContext{} + } + + var opts [1]byte + if !extractPart(opts[:], &h, 2) { + return trace.SpanContext{} + } + if version == 0 && (h != "" || opts[0] > 2) { + // version 0 not allow extra + // version 0 not allow other flag + return trace.SpanContext{} + } + + // Clear all flags other than the trace-context supported sampling bit. + scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled + + // Ignore the error returned here. Failure to parse tracestate MUST NOT + // affect the parsing of traceparent according to the W3C tracecontext + // specification. + scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader)) + scc.Remote = true + + sc := trace.NewSpanContext(scc) + if !sc.IsValid() { + return trace.SpanContext{} + } + + return sc +} + +// upperHex detect hex is upper case Unicode characters. +func upperHex(v string) bool { + for _, c := range v { + if c >= 'A' && c <= 'F' { + return true + } + } + return false +} + +func extractPart(dst []byte, h *string, n int) bool { + part, left, _ := strings.Cut(*h, delimiter) + *h = left + // hex.Decode decodes unsupported upper-case characters, so exclude explicitly. + if len(part) != n || upperHex(part) { + return false + } + if p, err := hex.Decode(dst, []byte(part)); err != nil || p != n/2 { + return false + } + return true +} + +// Fields returns the keys who's values are set with Inject. +func (tc TraceContext) Fields() []string { + return []string{traceparentHeader, tracestateHeader} +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/renovate.json b/hack/tools/vendor/go.opentelemetry.io/otel/renovate.json new file mode 100644 index 0000000000..8c5ac55ca9 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/renovate.json @@ -0,0 +1,24 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:recommended" + ], + "ignorePaths": [], + "labels": ["Skip Changelog", "dependencies"], + "postUpdateOptions" : [ + "gomodTidy" + ], + "packageRules": [ + { + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": true + }, + { + "matchFileNames": ["internal/tools/**"], + "matchManagers": ["gomod"], + "matchDepTypes": ["indirect"], + "enabled": false + } + ] +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/requirements.txt b/hack/tools/vendor/go.opentelemetry.io/otel/requirements.txt new file mode 100644 index 0000000000..ab09daf9d5 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/requirements.txt @@ -0,0 +1 @@ +codespell==2.3.0 diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/README.md b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/README.md new file mode 100644 index 0000000000..f81b1576ad --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/README.md @@ -0,0 +1,3 @@ +# SDK + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md new file mode 100644 index 0000000000..06e6d86854 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md @@ -0,0 +1,3 @@ +# SDK Instrumentation + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/instrumentation)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/instrumentation) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go new file mode 100644 index 0000000000..a4faa6a03d --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go @@ -0,0 +1,13 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package instrumentation provides types to represent the code libraries that +// provide OpenTelemetry instrumentation. These types are used in the +// OpenTelemetry signal pipelines to identify the source of telemetry. +// +// See +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md +// and +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md +// for more information. +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go new file mode 100644 index 0000000000..f4d1857c4f --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" + +// Library represents the instrumentation library. +// Deprecated: please use Scope instead. +type Library = Scope diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go new file mode 100644 index 0000000000..728115045b --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" + +// Scope represents the instrumentation scope. +type Scope struct { + // Name is the name of the instrumentation scope. This should be the + // Go package name of that scope. + Name string + // Version is the version of the instrumentation scope. + Version string + // SchemaURL of the telemetry emitted by the scope. + SchemaURL string +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go new file mode 100644 index 0000000000..07923ed8d9 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go @@ -0,0 +1,166 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package env // import "go.opentelemetry.io/otel/sdk/internal/env" + +import ( + "os" + "strconv" + + "go.opentelemetry.io/otel/internal/global" +) + +// Environment variable names. +const ( + // BatchSpanProcessorScheduleDelayKey is the delay interval between two + // consecutive exports (i.e. 5000). + BatchSpanProcessorScheduleDelayKey = "OTEL_BSP_SCHEDULE_DELAY" + // BatchSpanProcessorExportTimeoutKey is the maximum allowed time to + // export data (i.e. 3000). + BatchSpanProcessorExportTimeoutKey = "OTEL_BSP_EXPORT_TIMEOUT" + // BatchSpanProcessorMaxQueueSizeKey is the maximum queue size (i.e. 2048). + BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE" + // BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e. + // 512). Note: it must be less than or equal to + // BatchSpanProcessorMaxQueueSize. + BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE" + + // AttributeValueLengthKey is the maximum allowed attribute value size. + AttributeValueLengthKey = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT" + + // AttributeCountKey is the maximum allowed span attribute count. + AttributeCountKey = "OTEL_ATTRIBUTE_COUNT_LIMIT" + + // SpanAttributeValueLengthKey is the maximum allowed attribute value size + // for a span. + SpanAttributeValueLengthKey = "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT" + + // SpanAttributeCountKey is the maximum allowed span attribute count for a + // span. + SpanAttributeCountKey = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT" + + // SpanEventCountKey is the maximum allowed span event count. + SpanEventCountKey = "OTEL_SPAN_EVENT_COUNT_LIMIT" + + // SpanEventAttributeCountKey is the maximum allowed attribute per span + // event count. + SpanEventAttributeCountKey = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT" + + // SpanLinkCountKey is the maximum allowed span link count. + SpanLinkCountKey = "OTEL_SPAN_LINK_COUNT_LIMIT" + + // SpanLinkAttributeCountKey is the maximum allowed attribute per span + // link count. + SpanLinkAttributeCountKey = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT" +) + +// firstInt returns the value of the first matching environment variable from +// keys. If the value is not an integer or no match is found, defaultValue is +// returned. +func firstInt(defaultValue int, keys ...string) int { + for _, key := range keys { + value := os.Getenv(key) + if value == "" { + continue + } + + intValue, err := strconv.Atoi(value) + if err != nil { + global.Info("Got invalid value, number value expected.", key, value) + return defaultValue + } + + return intValue + } + + return defaultValue +} + +// IntEnvOr returns the int value of the environment variable with name key if +// it exists, it is not empty, and the value is an int. Otherwise, defaultValue is returned. +func IntEnvOr(key string, defaultValue int) int { + value := os.Getenv(key) + if value == "" { + return defaultValue + } + + intValue, err := strconv.Atoi(value) + if err != nil { + global.Info("Got invalid value, number value expected.", key, value) + return defaultValue + } + + return intValue +} + +// BatchSpanProcessorScheduleDelay returns the environment variable value for +// the OTEL_BSP_SCHEDULE_DELAY key if it exists, otherwise defaultValue is +// returned. +func BatchSpanProcessorScheduleDelay(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorScheduleDelayKey, defaultValue) +} + +// BatchSpanProcessorExportTimeout returns the environment variable value for +// the OTEL_BSP_EXPORT_TIMEOUT key if it exists, otherwise defaultValue is +// returned. +func BatchSpanProcessorExportTimeout(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorExportTimeoutKey, defaultValue) +} + +// BatchSpanProcessorMaxQueueSize returns the environment variable value for +// the OTEL_BSP_MAX_QUEUE_SIZE key if it exists, otherwise defaultValue is +// returned. +func BatchSpanProcessorMaxQueueSize(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorMaxQueueSizeKey, defaultValue) +} + +// BatchSpanProcessorMaxExportBatchSize returns the environment variable value for +// the OTEL_BSP_MAX_EXPORT_BATCH_SIZE key if it exists, otherwise defaultValue +// is returned. +func BatchSpanProcessorMaxExportBatchSize(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorMaxExportBatchSizeKey, defaultValue) +} + +// SpanAttributeValueLength returns the environment variable value for the +// OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the +// environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT is +// returned or defaultValue if that is not set. +func SpanAttributeValueLength(defaultValue int) int { + return firstInt(defaultValue, SpanAttributeValueLengthKey, AttributeValueLengthKey) +} + +// SpanAttributeCount returns the environment variable value for the +// OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the +// environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT is returned or +// defaultValue if that is not set. +func SpanAttributeCount(defaultValue int) int { + return firstInt(defaultValue, SpanAttributeCountKey, AttributeCountKey) +} + +// SpanEventCount returns the environment variable value for the +// OTEL_SPAN_EVENT_COUNT_LIMIT key if it exists, otherwise defaultValue is +// returned. +func SpanEventCount(defaultValue int) int { + return IntEnvOr(SpanEventCountKey, defaultValue) +} + +// SpanEventAttributeCount returns the environment variable value for the +// OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue +// is returned. +func SpanEventAttributeCount(defaultValue int) int { + return IntEnvOr(SpanEventAttributeCountKey, defaultValue) +} + +// SpanLinkCount returns the environment variable value for the +// OTEL_SPAN_LINK_COUNT_LIMIT key if it exists, otherwise defaultValue is +// returned. +func SpanLinkCount(defaultValue int) int { + return IntEnvOr(SpanLinkCountKey, defaultValue) +} + +// SpanLinkAttributeCount returns the environment variable value for the +// OTEL_LINK_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue is +// returned. +func SpanLinkAttributeCount(defaultValue int) int { + return IntEnvOr(SpanLinkAttributeCountKey, defaultValue) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md new file mode 100644 index 0000000000..fab61647c2 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md @@ -0,0 +1,46 @@ +# Experimental Features + +The SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Resource](#resource) + +### Resource + +[OpenTelemetry resource semantic conventions] include many attribute definitions that are defined as experimental. +To have experimental semantic conventions be added by [resource detectors] set the `OTEL_GO_X_RESOURCE` environment variable. +The value set must be the case-insensitive string of `"true"` to enable the feature. +All other values are ignored. + + + +[OpenTelemetry resource semantic conventions]: https://opentelemetry.io/docs/specs/semconv/resource/ +[resource detectors]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource#Detector + +#### Examples + +Enable experimental resource semantic conventions. + +```console +export OTEL_GO_X_RESOURCE=true +``` + +Disable experimental resource semantic conventions. + +```console +unset OTEL_GO_X_RESOURCE +``` + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go new file mode 100644 index 0000000000..68d296cbed --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x contains support for OTel SDK experimental features. +// +// This package should only be used for features defined in the specification. +// It should not be used for experiments or new project ideas. +package x // import "go.opentelemetry.io/otel/sdk/internal/x" + +import ( + "os" + "strings" +) + +// Resource is an experimental feature flag that defines if resource detectors +// should be included experimental semantic conventions. +// +// To enable this feature set the OTEL_GO_X_RESOURCE environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Resource = newFeature("RESOURCE", func(v string) (string, bool) { + if strings.ToLower(v) == "true" { + return v, true + } + return "", false +}) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled returns if the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/README.md b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/README.md new file mode 100644 index 0000000000..4ad864d716 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/README.md @@ -0,0 +1,3 @@ +# SDK Resource + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/resource)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go new file mode 100644 index 0000000000..95a61d61d4 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -0,0 +1,118 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "fmt" + "strings" +) + +// ErrPartialResource is returned by a detector when complete source +// information for a Resource is unavailable or the source information +// contains invalid values that are omitted from the returned Resource. +var ErrPartialResource = errors.New("partial resource") + +// Detector detects OpenTelemetry resource information. +type Detector interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Detect returns an initialized Resource based on gathered information. + // If the source information to construct a Resource contains invalid + // values, a Resource is returned with the valid parts of the source + // information used for initialization along with an appropriately + // wrapped ErrPartialResource error. + Detect(ctx context.Context) (*Resource, error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// Detect returns a new [Resource] merged from all the Resources each of the +// detectors produces. Each of the detectors are called sequentially, in the +// order they are passed, merging the produced resource into the previous. +// +// This may return a partial Resource along with an error containing +// [ErrPartialResource] if that error is returned from a detector. It may also +// return a merge-conflicting Resource along with an error containing +// [ErrSchemaURLConflict] if merging Resources from different detectors results +// in a schema URL conflict. It is up to the caller to determine if this +// returned Resource should be used or not. +// +// If one of the detectors returns an error that is not [ErrPartialResource], +// the resource produced by the detector will not be merged and the returned +// error will wrap that detector's error. +func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { + r := new(Resource) + return r, detect(ctx, r, detectors) +} + +// detect runs all detectors using ctx and merges the result into res. This +// assumes res is allocated and not nil, it will panic otherwise. +// +// If the detectors or merging resources produces any errors (i.e. +// [ErrPartialResource] [ErrSchemaURLConflict]), a single error wrapping all of +// these errors will be returned. Otherwise, nil is returned. +func detect(ctx context.Context, res *Resource, detectors []Detector) error { + var ( + r *Resource + errs detectErrs + err error + ) + + for _, detector := range detectors { + if detector == nil { + continue + } + r, err = detector.Detect(ctx) + if err != nil { + errs = append(errs, err) + if !errors.Is(err, ErrPartialResource) { + continue + } + } + r, err = Merge(res, r) + if err != nil { + errs = append(errs, err) + } + *res = *r + } + + if len(errs) == 0 { + return nil + } + if errors.Is(errs, ErrSchemaURLConflict) { + // If there has been a merge conflict, ensure the resource has no + // schema URL. + res.schemaURL = "" + } + return errs +} + +type detectErrs []error + +func (e detectErrs) Error() string { + errStr := make([]string, len(e)) + for i, err := range e { + errStr[i] = fmt.Sprintf("* %s", err) + } + + format := "%d errors occurred detecting resource:\n\t%s" + return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t")) +} + +func (e detectErrs) Unwrap() error { + switch len(e) { + case 0: + return nil + case 1: + return e[0] + } + return e[1:] +} + +func (e detectErrs) Is(target error) bool { + return len(e) != 0 && errors.Is(e[0], target) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go new file mode 100644 index 0000000000..6ac1cdbf7b --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -0,0 +1,118 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/google/uuid" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type ( + // telemetrySDK is a Detector that provides information about + // the OpenTelemetry SDK used. This Detector is included as a + // builtin. If these resource attributes are not wanted, use + // the WithTelemetrySDK(nil) or WithoutBuiltin() options to + // explicitly disable them. + telemetrySDK struct{} + + // host is a Detector that provides information about the host + // being run on. This Detector is included as a builtin. If + // these resource attributes are not wanted, use the + // WithHost(nil) or WithoutBuiltin() options to explicitly + // disable them. + host struct{} + + stringDetector struct { + schemaURL string + K attribute.Key + F func() (string, error) + } + + defaultServiceNameDetector struct{} + + defaultServiceInstanceIDDetector struct{} +) + +var ( + _ Detector = telemetrySDK{} + _ Detector = host{} + _ Detector = stringDetector{} + _ Detector = defaultServiceNameDetector{} + _ Detector = defaultServiceInstanceIDDetector{} +) + +// Detect returns a *Resource that describes the OpenTelemetry SDK used. +func (telemetrySDK) Detect(context.Context) (*Resource, error) { + return NewWithAttributes( + semconv.SchemaURL, + semconv.TelemetrySDKName("opentelemetry"), + semconv.TelemetrySDKLanguageGo, + semconv.TelemetrySDKVersion(sdk.Version()), + ), nil +} + +// Detect returns a *Resource that describes the host being run on. +func (host) Detect(ctx context.Context) (*Resource, error) { + return StringDetector(semconv.SchemaURL, semconv.HostNameKey, os.Hostname).Detect(ctx) +} + +// StringDetector returns a Detector that will produce a *Resource +// containing the string as a value corresponding to k. The resulting Resource +// will have the specified schemaURL. +func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) Detector { + return stringDetector{schemaURL: schemaURL, K: k, F: f} +} + +// Detect returns a *Resource that describes the string as a value +// corresponding to attribute.Key as well as the specific schemaURL. +func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { + value, err := sd.F() + if err != nil { + return nil, fmt.Errorf("%s: %w", string(sd.K), err) + } + a := sd.K.String(value) + if !a.Valid() { + return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit()) + } + return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil +} + +// Detect implements Detector. +func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) { + return StringDetector( + semconv.SchemaURL, + semconv.ServiceNameKey, + func() (string, error) { + executable, err := os.Executable() + if err != nil { + return "unknown_service:go", nil + } + return "unknown_service:" + filepath.Base(executable), nil + }, + ).Detect(ctx) +} + +// Detect implements Detector. +func (defaultServiceInstanceIDDetector) Detect(ctx context.Context) (*Resource, error) { + return StringDetector( + semconv.SchemaURL, + semconv.ServiceInstanceIDKey, + func() (string, error) { + version4Uuid, err := uuid.NewRandom() + if err != nil { + return "", err + } + + return version4Uuid.String(), nil + }, + ).Detect(ctx) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/config.go new file mode 100644 index 0000000000..0d6e213d92 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/config.go @@ -0,0 +1,195 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// config contains configuration for Resource creation. +type config struct { + // detectors that will be evaluated. + detectors []Detector + // SchemaURL to associate with the Resource. + schemaURL string +} + +// Option is the interface that applies a configuration option. +type Option interface { + // apply sets the Option value of a config. + apply(config) config +} + +// WithAttributes adds attributes to the configured Resource. +func WithAttributes(attributes ...attribute.KeyValue) Option { + return WithDetectors(detectAttributes{attributes}) +} + +type detectAttributes struct { + attributes []attribute.KeyValue +} + +func (d detectAttributes) Detect(context.Context) (*Resource, error) { + return NewSchemaless(d.attributes...), nil +} + +// WithDetectors adds detectors to be evaluated for the configured resource. +func WithDetectors(detectors ...Detector) Option { + return detectorsOption{detectors: detectors} +} + +type detectorsOption struct { + detectors []Detector +} + +func (o detectorsOption) apply(cfg config) config { + cfg.detectors = append(cfg.detectors, o.detectors...) + return cfg +} + +// WithFromEnv adds attributes from environment variables to the configured resource. +func WithFromEnv() Option { + return WithDetectors(fromEnv{}) +} + +// WithHost adds attributes from the host to the configured resource. +func WithHost() Option { + return WithDetectors(host{}) +} + +// WithHostID adds host ID information to the configured resource. +func WithHostID() Option { + return WithDetectors(hostIDDetector{}) +} + +// WithTelemetrySDK adds TelemetrySDK version info to the configured resource. +func WithTelemetrySDK() Option { + return WithDetectors(telemetrySDK{}) +} + +// WithSchemaURL sets the schema URL for the configured resource. +func WithSchemaURL(schemaURL string) Option { + return schemaURLOption(schemaURL) +} + +type schemaURLOption string + +func (o schemaURLOption) apply(cfg config) config { + cfg.schemaURL = string(o) + return cfg +} + +// WithOS adds all the OS attributes to the configured Resource. +// See individual WithOS* functions to configure specific attributes. +func WithOS() Option { + return WithDetectors( + osTypeDetector{}, + osDescriptionDetector{}, + ) +} + +// WithOSType adds an attribute with the operating system type to the configured Resource. +func WithOSType() Option { + return WithDetectors(osTypeDetector{}) +} + +// WithOSDescription adds an attribute with the operating system description to the +// configured Resource. The formatted string is equivalent to the output of the +// `uname -snrvm` command. +func WithOSDescription() Option { + return WithDetectors(osDescriptionDetector{}) +} + +// WithProcess adds all the Process attributes to the configured Resource. +// +// Warning! This option will include process command line arguments. If these +// contain sensitive information it will be included in the exported resource. +// +// This option is equivalent to calling WithProcessPID, +// WithProcessExecutableName, WithProcessExecutablePath, +// WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName, +// WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each +// option function for information about what resource attributes each +// includes. +func WithProcess() Option { + return WithDetectors( + processPIDDetector{}, + processExecutableNameDetector{}, + processExecutablePathDetector{}, + processCommandArgsDetector{}, + processOwnerDetector{}, + processRuntimeNameDetector{}, + processRuntimeVersionDetector{}, + processRuntimeDescriptionDetector{}, + ) +} + +// WithProcessPID adds an attribute with the process identifier (PID) to the +// configured Resource. +func WithProcessPID() Option { + return WithDetectors(processPIDDetector{}) +} + +// WithProcessExecutableName adds an attribute with the name of the process +// executable to the configured Resource. +func WithProcessExecutableName() Option { + return WithDetectors(processExecutableNameDetector{}) +} + +// WithProcessExecutablePath adds an attribute with the full path to the process +// executable to the configured Resource. +func WithProcessExecutablePath() Option { + return WithDetectors(processExecutablePathDetector{}) +} + +// WithProcessCommandArgs adds an attribute with all the command arguments (including +// the command/executable itself) as received by the process to the configured +// Resource. +// +// Warning! This option will include process command line arguments. If these +// contain sensitive information it will be included in the exported resource. +func WithProcessCommandArgs() Option { + return WithDetectors(processCommandArgsDetector{}) +} + +// WithProcessOwner adds an attribute with the username of the user that owns the process +// to the configured Resource. +func WithProcessOwner() Option { + return WithDetectors(processOwnerDetector{}) +} + +// WithProcessRuntimeName adds an attribute with the name of the runtime of this +// process to the configured Resource. +func WithProcessRuntimeName() Option { + return WithDetectors(processRuntimeNameDetector{}) +} + +// WithProcessRuntimeVersion adds an attribute with the version of the runtime of +// this process to the configured Resource. +func WithProcessRuntimeVersion() Option { + return WithDetectors(processRuntimeVersionDetector{}) +} + +// WithProcessRuntimeDescription adds an attribute with an additional description +// about the runtime of the process to the configured Resource. +func WithProcessRuntimeDescription() Option { + return WithDetectors(processRuntimeDescriptionDetector{}) +} + +// WithContainer adds all the Container attributes to the configured Resource. +// See individual WithContainer* functions to configure specific attributes. +func WithContainer() Option { + return WithDetectors( + cgroupContainerIDDetector{}, + ) +} + +// WithContainerID adds an attribute with the id of the container to the configured Resource. +// Note: WithContainerID will not extract the correct container ID in an ECS environment. +// Please use the ECS resource detector instead (https://pkg.go.dev/go.opentelemetry.io/contrib/detectors/aws/ecs). +func WithContainerID() Option { + return WithDetectors(cgroupContainerIDDetector{}) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/container.go new file mode 100644 index 0000000000..5ecd859a52 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bufio" + "context" + "errors" + "io" + "os" + "regexp" + + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type containerIDProvider func() (string, error) + +var ( + containerID containerIDProvider = getContainerIDFromCGroup + cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*[-:])?([0-9a-f]+)(?:\.|\s*$)`) +) + +type cgroupContainerIDDetector struct{} + +const cgroupPath = "/proc/self/cgroup" + +// Detect returns a *Resource that describes the id of the container. +// If no container id found, an empty resource will be returned. +func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) { + containerID, err := containerID() + if err != nil { + return nil, err + } + + if containerID == "" { + return Empty(), nil + } + return NewWithAttributes(semconv.SchemaURL, semconv.ContainerID(containerID)), nil +} + +var ( + defaultOSStat = os.Stat + osStat = defaultOSStat + + defaultOSOpen = func(name string) (io.ReadCloser, error) { + return os.Open(name) + } + osOpen = defaultOSOpen +) + +// getContainerIDFromCGroup returns the id of the container from the cgroup file. +// If no container id found, an empty string will be returned. +func getContainerIDFromCGroup() (string, error) { + if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) { + // File does not exist, skip + return "", nil + } + + file, err := osOpen(cgroupPath) + if err != nil { + return "", err + } + defer file.Close() + + return getContainerIDFromReader(file), nil +} + +// getContainerIDFromReader returns the id of the container from reader. +func getContainerIDFromReader(reader io.Reader) string { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + line := scanner.Text() + + if id := getContainerIDFromLine(line); id != "" { + return id + } + } + return "" +} + +// getContainerIDFromLine returns the id of the container from one string line. +func getContainerIDFromLine(line string) string { + matches := cgroupContainerIDRe.FindStringSubmatch(line) + if len(matches) <= 1 { + return "" + } + return matches[1] +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go new file mode 100644 index 0000000000..64939a2713 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package resource provides detecting and representing resources. +// +// The fundamental struct is a Resource which holds identifying information +// about the entities for which telemetry is exported. +// +// To automatically construct Resources from an environment a Detector +// interface is defined. Implementations of this interface can be passed to +// the Detect function to generate a Resource from the merged information. +// +// To load a user defined Resource from the environment variable +// OTEL_RESOURCE_ATTRIBUTES the FromEnv Detector can be used. It will interpret +// the value as a list of comma delimited key/value pairs +// (e.g. `=,=,...`). +// +// While this package provides a stable API, +// the attributes added by resource detectors may change. +package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/env.go new file mode 100644 index 0000000000..813f056242 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "net/url" + "os" + "strings" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +const ( + // resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from. + resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" //nolint:gosec // False positive G101: Potential hardcoded credentials + + // svcNameKey is the environment variable name that Service Name information will be read from. + svcNameKey = "OTEL_SERVICE_NAME" +) + +// errMissingValue is returned when a resource value is missing. +var errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource) + +// fromEnv is a Detector that implements the Detector and collects +// resources from environment. This Detector is included as a +// builtin. +type fromEnv struct{} + +// compile time assertion that FromEnv implements Detector interface. +var _ Detector = fromEnv{} + +// Detect collects resources from environment. +func (fromEnv) Detect(context.Context) (*Resource, error) { + attrs := strings.TrimSpace(os.Getenv(resourceAttrKey)) + svcName := strings.TrimSpace(os.Getenv(svcNameKey)) + + if attrs == "" && svcName == "" { + return Empty(), nil + } + + var res *Resource + + if svcName != "" { + res = NewSchemaless(semconv.ServiceName(svcName)) + } + + r2, err := constructOTResources(attrs) + + // Ensure that the resource with the service name from OTEL_SERVICE_NAME + // takes precedence, if it was defined. + res, err2 := Merge(r2, res) + + if err == nil { + err = err2 + } else if err2 != nil { + err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()}) + } + + return res, err +} + +func constructOTResources(s string) (*Resource, error) { + if s == "" { + return Empty(), nil + } + pairs := strings.Split(s, ",") + var attrs []attribute.KeyValue + var invalid []string + for _, p := range pairs { + k, v, found := strings.Cut(p, "=") + if !found { + invalid = append(invalid, p) + continue + } + key := strings.TrimSpace(k) + val, err := url.PathUnescape(strings.TrimSpace(v)) + if err != nil { + // Retain original value if decoding fails, otherwise it will be + // an empty string. + val = v + otel.Handle(err) + } + attrs = append(attrs, attribute.String(key, val)) + } + var err error + if len(invalid) > 0 { + err = fmt.Errorf("%w: %v", errMissingValue, invalid) + } + return NewSchemaless(attrs...), err +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go new file mode 100644 index 0000000000..2d0f65498a --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -0,0 +1,109 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "strings" + + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type hostIDProvider func() (string, error) + +var defaultHostIDProvider hostIDProvider = platformHostIDReader.read + +var hostID = defaultHostIDProvider + +type hostIDReader interface { + read() (string, error) +} + +type fileReader func(string) (string, error) + +type commandExecutor func(string, ...string) (string, error) + +// hostIDReaderBSD implements hostIDReader. +type hostIDReaderBSD struct { + execCommand commandExecutor + readFile fileReader +} + +// read attempts to read the machine-id from /etc/hostid. If not found it will +// execute `kenv -q smbios.system.uuid`. If neither location yields an id an +// error will be returned. +func (r *hostIDReaderBSD) read() (string, error) { + if result, err := r.readFile("/etc/hostid"); err == nil { + return strings.TrimSpace(result), nil + } + + if result, err := r.execCommand("kenv", "-q", "smbios.system.uuid"); err == nil { + return strings.TrimSpace(result), nil + } + + return "", errors.New("host id not found in: /etc/hostid or kenv") +} + +// hostIDReaderDarwin implements hostIDReader. +type hostIDReaderDarwin struct { + execCommand commandExecutor +} + +// read executes `ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id +// from the IOPlatformUUID line. If the command fails or the uuid cannot be +// parsed an error will be returned. +func (r *hostIDReaderDarwin) read() (string, error) { + result, err := r.execCommand("ioreg", "-rd1", "-c", "IOPlatformExpertDevice") + if err != nil { + return "", err + } + + lines := strings.Split(result, "\n") + for _, line := range lines { + if strings.Contains(line, "IOPlatformUUID") { + parts := strings.Split(line, " = ") + if len(parts) == 2 { + return strings.Trim(parts[1], "\""), nil + } + break + } + } + + return "", errors.New("could not parse IOPlatformUUID") +} + +type hostIDReaderLinux struct { + readFile fileReader +} + +// read attempts to read the machine-id from /etc/machine-id followed by +// /var/lib/dbus/machine-id. If neither location yields an ID an error will +// be returned. +func (r *hostIDReaderLinux) read() (string, error) { + if result, err := r.readFile("/etc/machine-id"); err == nil { + return strings.TrimSpace(result), nil + } + + if result, err := r.readFile("/var/lib/dbus/machine-id"); err == nil { + return strings.TrimSpace(result), nil + } + + return "", errors.New("host id not found in: /etc/machine-id or /var/lib/dbus/machine-id") +} + +type hostIDDetector struct{} + +// Detect returns a *Resource containing the platform specific host id. +func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) { + hostID, err := hostID() + if err != nil { + return nil, err + } + + return NewWithAttributes( + semconv.SchemaURL, + semconv.HostID(hostID), + ), nil +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go new file mode 100644 index 0000000000..cc8b8938ed --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go @@ -0,0 +1,12 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build dragonfly || freebsd || netbsd || openbsd || solaris +// +build dragonfly freebsd netbsd openbsd solaris + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +var platformHostIDReader hostIDReader = &hostIDReaderBSD{ + execCommand: execCommand, + readFile: readFile, +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go new file mode 100644 index 0000000000..b09fde3b73 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +var platformHostIDReader hostIDReader = &hostIDReaderDarwin{ + execCommand: execCommand, +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go new file mode 100644 index 0000000000..d9e5d1a8ff --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build darwin || dragonfly || freebsd || netbsd || openbsd || solaris + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import "os/exec" + +func execCommand(name string, arg ...string) (string, error) { + cmd := exec.Command(name, arg...) + b, err := cmd.Output() + if err != nil { + return "", err + } + + return string(b), nil +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go new file mode 100644 index 0000000000..f84f173240 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go @@ -0,0 +1,11 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build linux +// +build linux + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +var platformHostIDReader hostIDReader = &hostIDReaderLinux{ + readFile: readFile, +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go new file mode 100644 index 0000000000..6354b35602 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build linux || dragonfly || freebsd || netbsd || openbsd || solaris + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import "os" + +func readFile(filename string) (string, error) { + b, err := os.ReadFile(filename) + if err != nil { + return "", err + } + + return string(b), nil +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go new file mode 100644 index 0000000000..df12c44c56 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +// hostIDReaderUnsupported is a placeholder implementation for operating systems +// for which this project currently doesn't support host.id +// attribute detection. See build tags declaration early on this file +// for a list of unsupported OSes. +type hostIDReaderUnsupported struct{} + +func (*hostIDReaderUnsupported) read() (string, error) { + return "", nil +} + +var platformHostIDReader hostIDReader = &hostIDReaderUnsupported{} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go new file mode 100644 index 0000000000..71386e2da4 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build windows +// +build windows + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "golang.org/x/sys/windows/registry" +) + +// implements hostIDReader +type hostIDReaderWindows struct{} + +// read reads MachineGuid from the windows registry key: +// SOFTWARE\Microsoft\Cryptography +func (*hostIDReaderWindows) read() (string, error) { + k, err := registry.OpenKey( + registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`, + registry.QUERY_VALUE|registry.WOW64_64KEY, + ) + + if err != nil { + return "", err + } + defer k.Close() + + guid, _, err := k.GetStringValue("MachineGuid") + if err != nil { + return "", err + } + + return guid, nil +} + +var platformHostIDReader hostIDReader = &hostIDReaderWindows{} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os.go new file mode 100644 index 0000000000..8a48ab4fa3 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type osDescriptionProvider func() (string, error) + +var defaultOSDescriptionProvider osDescriptionProvider = platformOSDescription + +var osDescription = defaultOSDescriptionProvider + +func setDefaultOSDescriptionProvider() { + setOSDescriptionProvider(defaultOSDescriptionProvider) +} + +func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) { + osDescription = osDescriptionProvider +} + +type ( + osTypeDetector struct{} + osDescriptionDetector struct{} +) + +// Detect returns a *Resource that describes the operating system type the +// service is running on. +func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { + osType := runtimeOS() + + osTypeAttribute := mapRuntimeOSToSemconvOSType(osType) + + return NewWithAttributes( + semconv.SchemaURL, + osTypeAttribute, + ), nil +} + +// Detect returns a *Resource that describes the operating system the +// service is running on. +func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + description, err := osDescription() + if err != nil { + return nil, err + } + + return NewWithAttributes( + semconv.SchemaURL, + semconv.OSDescription(description), + ), nil +} + +// mapRuntimeOSToSemconvOSType translates the OS name as provided by the Go runtime +// into an OS type attribute with the corresponding value defined by the semantic +// conventions. In case the provided OS name isn't mapped, it's transformed to lowercase +// and used as the value for the returned OS type attribute. +func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue { + // the elements in this map are the intersection between + // available GOOS values and defined semconv OS types + osTypeAttributeMap := map[string]attribute.KeyValue{ + "aix": semconv.OSTypeAIX, + "darwin": semconv.OSTypeDarwin, + "dragonfly": semconv.OSTypeDragonflyBSD, + "freebsd": semconv.OSTypeFreeBSD, + "linux": semconv.OSTypeLinux, + "netbsd": semconv.OSTypeNetBSD, + "openbsd": semconv.OSTypeOpenBSD, + "solaris": semconv.OSTypeSolaris, + "windows": semconv.OSTypeWindows, + "zos": semconv.OSTypeZOS, + } + + var osTypeAttribute attribute.KeyValue + + if attr, ok := osTypeAttributeMap[osType]; ok { + osTypeAttribute = attr + } else { + osTypeAttribute = semconv.OSTypeKey.String(strings.ToLower(osType)) + } + + return osTypeAttribute +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go new file mode 100644 index 0000000000..ce455dc544 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "encoding/xml" + "fmt" + "io" + "os" +) + +type plist struct { + XMLName xml.Name `xml:"plist"` + Dict dict `xml:"dict"` +} + +type dict struct { + Key []string `xml:"key"` + String []string `xml:"string"` +} + +// osRelease builds a string describing the operating system release based on the +// contents of the property list (.plist) system files. If no .plist files are found, +// or if the required properties to build the release description string are missing, +// an empty string is returned instead. The generated string resembles the output of +// the `sw_vers` commandline program, but in a single-line string. For more information +// about the `sw_vers` program, see: https://www.unix.com/man-page/osx/1/SW_VERS. +func osRelease() string { + file, err := getPlistFile() + if err != nil { + return "" + } + + defer file.Close() + + values, err := parsePlistFile(file) + if err != nil { + return "" + } + + return buildOSRelease(values) +} + +// getPlistFile returns a *os.File pointing to one of the well-known .plist files +// available on macOS. If no file can be opened, it returns an error. +func getPlistFile() (*os.File, error) { + return getFirstAvailableFile([]string{ + "/System/Library/CoreServices/SystemVersion.plist", + "/System/Library/CoreServices/ServerVersion.plist", + }) +} + +// parsePlistFile process the file pointed by `file` as a .plist file and returns +// a map with the key-values for each pair of correlated and elements +// contained in it. +func parsePlistFile(file io.Reader) (map[string]string, error) { + var v plist + + err := xml.NewDecoder(file).Decode(&v) + if err != nil { + return nil, err + } + + if len(v.Dict.Key) != len(v.Dict.String) { + return nil, fmt.Errorf("the number of and elements doesn't match") + } + + properties := make(map[string]string, len(v.Dict.Key)) + for i, key := range v.Dict.Key { + properties[key] = v.Dict.String[i] + } + + return properties, nil +} + +// buildOSRelease builds a string describing the OS release based on the properties +// available on the provided map. It tries to find the `ProductName`, `ProductVersion` +// and `ProductBuildVersion` properties. If some of these properties are not found, +// it returns an empty string. +func buildOSRelease(properties map[string]string) string { + productName := properties["ProductName"] + productVersion := properties["ProductVersion"] + productBuildVersion := properties["ProductBuildVersion"] + + if productName == "" || productVersion == "" || productBuildVersion == "" { + return "" + } + + return fmt.Sprintf("%s %s (%s)", productName, productVersion, productBuildVersion) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go new file mode 100644 index 0000000000..f537e5ca5c --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go @@ -0,0 +1,143 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix dragonfly freebsd linux netbsd openbsd solaris zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +// osRelease builds a string describing the operating system release based on the +// properties of the os-release file. If no os-release file is found, or if the +// required properties to build the release description string are missing, an empty +// string is returned instead. For more information about os-release files, see: +// https://www.freedesktop.org/software/systemd/man/os-release.html +func osRelease() string { + file, err := getOSReleaseFile() + if err != nil { + return "" + } + + defer file.Close() + + values := parseOSReleaseFile(file) + + return buildOSRelease(values) +} + +// getOSReleaseFile returns a *os.File pointing to one of the well-known os-release +// files, according to their order of preference. If no file can be opened, it +// returns an error. +func getOSReleaseFile() (*os.File, error) { + return getFirstAvailableFile([]string{"/etc/os-release", "/usr/lib/os-release"}) +} + +// parseOSReleaseFile process the file pointed by `file` as an os-release file and +// returns a map with the key-values contained in it. Empty lines or lines starting +// with a '#' character are ignored, as well as lines with the missing key=value +// separator. Values are unquoted and unescaped. +func parseOSReleaseFile(file io.Reader) map[string]string { + values := make(map[string]string) + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + line := scanner.Text() + + if skip(line) { + continue + } + + key, value, ok := parse(line) + if ok { + values[key] = value + } + } + + return values +} + +// skip returns true if the line is blank or starts with a '#' character, and +// therefore should be skipped from processing. +func skip(line string) bool { + line = strings.TrimSpace(line) + + return len(line) == 0 || strings.HasPrefix(line, "#") +} + +// parse attempts to split the provided line on the first '=' character, and then +// sanitize each side of the split before returning them as a key-value pair. +func parse(line string) (string, string, bool) { + k, v, found := strings.Cut(line, "=") + + if !found || len(k) == 0 { + return "", "", false + } + + key := strings.TrimSpace(k) + value := unescape(unquote(strings.TrimSpace(v))) + + return key, value, true +} + +// unquote checks whether the string `s` is quoted with double or single quotes +// and, if so, returns a version of the string without them. Otherwise it returns +// the provided string unchanged. +func unquote(s string) string { + if len(s) < 2 { + return s + } + + if (s[0] == '"' || s[0] == '\'') && s[0] == s[len(s)-1] { + return s[1 : len(s)-1] + } + + return s +} + +// unescape removes the `\` prefix from some characters that are expected +// to have it added in front of them for escaping purposes. +func unescape(s string) string { + return strings.NewReplacer( + `\$`, `$`, + `\"`, `"`, + `\'`, `'`, + `\\`, `\`, + "\\`", "`", + ).Replace(s) +} + +// buildOSRelease builds a string describing the OS release based on the properties +// available on the provided map. It favors a combination of the `NAME` and `VERSION` +// properties as first option (falling back to `VERSION_ID` if `VERSION` isn't +// found), and using `PRETTY_NAME` alone if some of the previous are not present. If +// none of these properties are found, it returns an empty string. +// +// The rationale behind not using `PRETTY_NAME` as first choice was that, for some +// Linux distributions, it doesn't include the same detail that can be found on the +// individual `NAME` and `VERSION` properties, and combining `PRETTY_NAME` with +// other properties can produce "pretty" redundant strings in some cases. +func buildOSRelease(values map[string]string) string { + var osRelease string + + name := values["NAME"] + version := values["VERSION"] + + if version == "" { + version = values["VERSION_ID"] + } + + if name != "" && version != "" { + osRelease = fmt.Sprintf("%s %s", name, version) + } else { + osRelease = values["PRETTY_NAME"] + } + + return osRelease +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go new file mode 100644 index 0000000000..a6ff26a4d2 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +type unameProvider func(buf *unix.Utsname) (err error) + +var defaultUnameProvider unameProvider = unix.Uname + +var currentUnameProvider = defaultUnameProvider + +func setDefaultUnameProvider() { + setUnameProvider(defaultUnameProvider) +} + +func setUnameProvider(unameProvider unameProvider) { + currentUnameProvider = unameProvider +} + +// platformOSDescription returns a human readable OS version information string. +// The final string combines OS release information (where available) and the +// result of the `uname` system call. +func platformOSDescription() (string, error) { + uname, err := uname() + if err != nil { + return "", err + } + + osRelease := osRelease() + if osRelease != "" { + return fmt.Sprintf("%s (%s)", osRelease, uname), nil + } + + return uname, nil +} + +// uname issues a uname(2) system call (or equivalent on systems which doesn't +// have one) and formats the output in a single string, similar to the output +// of the `uname` commandline program. The final string resembles the one +// obtained with a call to `uname -snrvm`. +func uname() (string, error) { + var utsName unix.Utsname + + err := currentUnameProvider(&utsName) + if err != nil { + return "", err + } + + return fmt.Sprintf("%s %s %s %s %s", + unix.ByteSliceToString(utsName.Sysname[:]), + unix.ByteSliceToString(utsName.Nodename[:]), + unix.ByteSliceToString(utsName.Release[:]), + unix.ByteSliceToString(utsName.Version[:]), + unix.ByteSliceToString(utsName.Machine[:]), + ), nil +} + +// getFirstAvailableFile returns an *os.File of the first available +// file from a list of candidate file paths. +func getFirstAvailableFile(candidates []string) (*os.File, error) { + for _, c := range candidates { + file, err := os.Open(c) + if err == nil { + return file, nil + } + } + + return nil, fmt.Errorf("no candidate file available: %v", candidates) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go new file mode 100644 index 0000000000..a77742b077 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +// platformOSDescription is a placeholder implementation for OSes +// for which this project currently doesn't support os.description +// attribute detection. See build tags declaration early on this file +// for a list of unsupported OSes. +func platformOSDescription() (string, error) { + return "", nil +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go new file mode 100644 index 0000000000..5e3d199d78 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "fmt" + "strconv" + + "golang.org/x/sys/windows/registry" +) + +// platformOSDescription returns a human readable OS version information string. +// It does so by querying registry values under the +// `SOFTWARE\Microsoft\Windows NT\CurrentVersion` key. The final string +// resembles the one displayed by the Version Reporter Applet (winver.exe). +func platformOSDescription() (string, error) { + k, err := registry.OpenKey( + registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) + + if err != nil { + return "", err + } + + defer k.Close() + + var ( + productName = readProductName(k) + displayVersion = readDisplayVersion(k) + releaseID = readReleaseID(k) + currentMajorVersionNumber = readCurrentMajorVersionNumber(k) + currentMinorVersionNumber = readCurrentMinorVersionNumber(k) + currentBuildNumber = readCurrentBuildNumber(k) + ubr = readUBR(k) + ) + + if displayVersion != "" { + displayVersion += " " + } + + return fmt.Sprintf("%s %s(%s) [Version %s.%s.%s.%s]", + productName, + displayVersion, + releaseID, + currentMajorVersionNumber, + currentMinorVersionNumber, + currentBuildNumber, + ubr, + ), nil +} + +func getStringValue(name string, k registry.Key) string { + value, _, _ := k.GetStringValue(name) + + return value +} + +func getIntegerValue(name string, k registry.Key) uint64 { + value, _, _ := k.GetIntegerValue(name) + + return value +} + +func readProductName(k registry.Key) string { + return getStringValue("ProductName", k) +} + +func readDisplayVersion(k registry.Key) string { + return getStringValue("DisplayVersion", k) +} + +func readReleaseID(k registry.Key) string { + return getStringValue("ReleaseID", k) +} + +func readCurrentMajorVersionNumber(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("CurrentMajorVersionNumber", k), 10) +} + +func readCurrentMinorVersionNumber(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("CurrentMinorVersionNumber", k), 10) +} + +func readCurrentBuildNumber(k registry.Key) string { + return getStringValue("CurrentBuildNumber", k) +} + +func readUBR(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("UBR", k), 10) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/process.go new file mode 100644 index 0000000000..085fe68fd7 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -0,0 +1,173 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "os" + "os/user" + "path/filepath" + "runtime" + + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type ( + pidProvider func() int + executablePathProvider func() (string, error) + commandArgsProvider func() []string + ownerProvider func() (*user.User, error) + runtimeNameProvider func() string + runtimeVersionProvider func() string + runtimeOSProvider func() string + runtimeArchProvider func() string +) + +var ( + defaultPidProvider pidProvider = os.Getpid + defaultExecutablePathProvider executablePathProvider = os.Executable + defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args } + defaultOwnerProvider ownerProvider = user.Current + defaultRuntimeNameProvider runtimeNameProvider = func() string { + if runtime.Compiler == "gc" { + return "go" + } + return runtime.Compiler + } + defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version + defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS } + defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH } +) + +var ( + pid = defaultPidProvider + executablePath = defaultExecutablePathProvider + commandArgs = defaultCommandArgsProvider + owner = defaultOwnerProvider + runtimeName = defaultRuntimeNameProvider + runtimeVersion = defaultRuntimeVersionProvider + runtimeOS = defaultRuntimeOSProvider + runtimeArch = defaultRuntimeArchProvider +) + +func setDefaultOSProviders() { + setOSProviders( + defaultPidProvider, + defaultExecutablePathProvider, + defaultCommandArgsProvider, + ) +} + +func setOSProviders( + pidProvider pidProvider, + executablePathProvider executablePathProvider, + commandArgsProvider commandArgsProvider, +) { + pid = pidProvider + executablePath = executablePathProvider + commandArgs = commandArgsProvider +} + +func setDefaultRuntimeProviders() { + setRuntimeProviders( + defaultRuntimeNameProvider, + defaultRuntimeVersionProvider, + defaultRuntimeOSProvider, + defaultRuntimeArchProvider, + ) +} + +func setRuntimeProviders( + runtimeNameProvider runtimeNameProvider, + runtimeVersionProvider runtimeVersionProvider, + runtimeOSProvider runtimeOSProvider, + runtimeArchProvider runtimeArchProvider, +) { + runtimeName = runtimeNameProvider + runtimeVersion = runtimeVersionProvider + runtimeOS = runtimeOSProvider + runtimeArch = runtimeArchProvider +} + +func setDefaultUserProviders() { + setUserProviders(defaultOwnerProvider) +} + +func setUserProviders(ownerProvider ownerProvider) { + owner = ownerProvider +} + +type ( + processPIDDetector struct{} + processExecutableNameDetector struct{} + processExecutablePathDetector struct{} + processCommandArgsDetector struct{} + processOwnerDetector struct{} + processRuntimeNameDetector struct{} + processRuntimeVersionDetector struct{} + processRuntimeDescriptionDetector struct{} +) + +// Detect returns a *Resource that describes the process identifier (PID) of the +// executing process. +func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil +} + +// Detect returns a *Resource that describes the name of the process executable. +func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { + executableName := filepath.Base(commandArgs()[0]) + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil +} + +// Detect returns a *Resource that describes the full path of the process executable. +func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) { + executablePath, err := executablePath() + if err != nil { + return nil, err + } + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePath(executablePath)), nil +} + +// Detect returns a *Resource that describes all the command arguments as received +// by the process. +func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil +} + +// Detect returns a *Resource that describes the username of the user that owns the +// process. +func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { + owner, err := owner() + if err != nil { + return nil, err + } + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwner(owner.Username)), nil +} + +// Detect returns a *Resource that describes the name of the compiler used to compile +// this process image. +func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil +} + +// Detect returns a *Resource that describes the version of the runtime of this process. +func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil +} + +// Detect returns a *Resource that describes the runtime of this process. +func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + runtimeDescription := fmt.Sprintf( + "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch()) + + return NewWithAttributes( + semconv.SchemaURL, + semconv.ProcessRuntimeDescription(runtimeDescription), + ), nil +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go new file mode 100644 index 0000000000..ad4b50df40 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -0,0 +1,294 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "fmt" + "sync" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/internal/x" +) + +// Resource describes an entity about which identifying information +// and metadata is exposed. Resource is an immutable object, +// equivalent to a map from key to unique value. +// +// Resources should be passed and stored as pointers +// (`*resource.Resource`). The `nil` value is equivalent to an empty +// Resource. +type Resource struct { + attrs attribute.Set + schemaURL string +} + +var ( + defaultResource *Resource + defaultResourceOnce sync.Once +) + +// ErrSchemaURLConflict is an error returned when two Resources are merged +// together that contain different, non-empty, schema URLs. +var ErrSchemaURLConflict = errors.New("conflicting Schema URL") + +// New returns a [Resource] built using opts. +// +// This may return a partial Resource along with an error containing +// [ErrPartialResource] if options that provide a [Detector] are used and that +// error is returned from one or more of the Detectors. It may also return a +// merge-conflict Resource along with an error containing +// [ErrSchemaURLConflict] if merging Resources from the opts results in a +// schema URL conflict (see [Resource.Merge] for more information). It is up to +// the caller to determine if this returned Resource should be used or not +// based on these errors. +func New(ctx context.Context, opts ...Option) (*Resource, error) { + cfg := config{} + for _, opt := range opts { + cfg = opt.apply(cfg) + } + + r := &Resource{schemaURL: cfg.schemaURL} + return r, detect(ctx, r, cfg.detectors) +} + +// NewWithAttributes creates a resource from attrs and associates the resource with a +// schema URL. If attrs contains duplicate keys, the last value will be used. If attrs +// contains any invalid items those items will be dropped. The attrs are assumed to be +// in a schema identified by schemaURL. +func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource { + resource := NewSchemaless(attrs...) + resource.schemaURL = schemaURL + return resource +} + +// NewSchemaless creates a resource from attrs. If attrs contains duplicate keys, +// the last value will be used. If attrs contains any invalid items those items will +// be dropped. The resource will not be associated with a schema URL. If the schema +// of the attrs is known use NewWithAttributes instead. +func NewSchemaless(attrs ...attribute.KeyValue) *Resource { + if len(attrs) == 0 { + return &Resource{} + } + + // Ensure attributes comply with the specification: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/common/README.md#attribute + s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool { + return kv.Valid() + }) + + // If attrs only contains invalid entries do not allocate a new resource. + if s.Len() == 0 { + return &Resource{} + } + + return &Resource{attrs: s} //nolint +} + +// String implements the Stringer interface and provides a +// human-readable form of the resource. +// +// Avoid using this representation as the key in a map of resources, +// use Equivalent() as the key instead. +func (r *Resource) String() string { + if r == nil { + return "" + } + return r.attrs.Encoded(attribute.DefaultEncoder()) +} + +// MarshalLog is the marshaling function used by the logging system to represent this Resource. +func (r *Resource) MarshalLog() interface{} { + return struct { + Attributes attribute.Set + SchemaURL string + }{ + Attributes: r.attrs, + SchemaURL: r.schemaURL, + } +} + +// Attributes returns a copy of attributes from the resource in a sorted order. +// To avoid allocating a new slice, use an iterator. +func (r *Resource) Attributes() []attribute.KeyValue { + if r == nil { + r = Empty() + } + return r.attrs.ToSlice() +} + +// SchemaURL returns the schema URL associated with Resource r. +func (r *Resource) SchemaURL() string { + if r == nil { + return "" + } + return r.schemaURL +} + +// Iter returns an iterator of the Resource attributes. +// This is ideal to use if you do not want a copy of the attributes. +func (r *Resource) Iter() attribute.Iterator { + if r == nil { + r = Empty() + } + return r.attrs.Iter() +} + +// Equal returns true when a Resource is equivalent to this Resource. +func (r *Resource) Equal(eq *Resource) bool { + if r == nil { + r = Empty() + } + if eq == nil { + eq = Empty() + } + return r.Equivalent() == eq.Equivalent() +} + +// Merge creates a new [Resource] by merging a and b. +// +// If there are common keys between a and b, then the value from b will +// overwrite the value from a, even if b's value is empty. +// +// The SchemaURL of the resources will be merged according to the +// [OpenTelemetry specification rules]: +// +// - If a's schema URL is empty then the returned Resource's schema URL will +// be set to the schema URL of b, +// - Else if b's schema URL is empty then the returned Resource's schema URL +// will be set to the schema URL of a, +// - Else if the schema URLs of a and b are the same then that will be the +// schema URL of the returned Resource, +// - Else this is a merging error. If the resources have different, +// non-empty, schema URLs an error containing [ErrSchemaURLConflict] will +// be returned with the merged Resource. The merged Resource will have an +// empty schema URL. It may be the case that some unintended attributes +// have been overwritten or old semantic conventions persisted in the +// returned Resource. It is up to the caller to determine if this returned +// Resource should be used or not. +// +// [OpenTelemetry specification rules]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge +func Merge(a, b *Resource) (*Resource, error) { + if a == nil && b == nil { + return Empty(), nil + } + if a == nil { + return b, nil + } + if b == nil { + return a, nil + } + + // Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key() + // Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...) + mi := attribute.NewMergeIterator(b.Set(), a.Set()) + combine := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for mi.Next() { + combine = append(combine, mi.Attribute()) + } + + switch { + case a.schemaURL == "": + return NewWithAttributes(b.schemaURL, combine...), nil + case b.schemaURL == "": + return NewWithAttributes(a.schemaURL, combine...), nil + case a.schemaURL == b.schemaURL: + return NewWithAttributes(a.schemaURL, combine...), nil + } + // Return the merged resource with an appropriate error. It is up to + // the user to decide if the returned resource can be used or not. + return NewSchemaless(combine...), fmt.Errorf( + "%w: %s and %s", + ErrSchemaURLConflict, + a.schemaURL, + b.schemaURL, + ) +} + +// Empty returns an instance of Resource with no attributes. It is +// equivalent to a `nil` Resource. +func Empty() *Resource { + return &Resource{} +} + +// Default returns an instance of Resource with a default +// "service.name" and OpenTelemetrySDK attributes. +func Default() *Resource { + defaultResourceOnce.Do(func() { + var err error + defaultDetectors := []Detector{ + defaultServiceNameDetector{}, + fromEnv{}, + telemetrySDK{}, + } + if x.Resource.Enabled() { + defaultDetectors = append([]Detector{defaultServiceInstanceIDDetector{}}, defaultDetectors...) + } + defaultResource, err = Detect( + context.Background(), + defaultDetectors..., + ) + if err != nil { + otel.Handle(err) + } + // If Detect did not return a valid resource, fall back to emptyResource. + if defaultResource == nil { + defaultResource = &Resource{} + } + }) + return defaultResource +} + +// Environment returns an instance of Resource with attributes +// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. +func Environment() *Resource { + detector := &fromEnv{} + resource, err := detector.Detect(context.Background()) + if err != nil { + otel.Handle(err) + } + return resource +} + +// Equivalent returns an object that can be compared for equality +// between two resources. This value is suitable for use as a key in +// a map. +func (r *Resource) Equivalent() attribute.Distinct { + return r.Set().Equivalent() +} + +// Set returns the equivalent *attribute.Set of this resource's attributes. +func (r *Resource) Set() *attribute.Set { + if r == nil { + r = Empty() + } + return &r.attrs +} + +// MarshalJSON encodes the resource attributes as a JSON list of { "Key": +// "...", "Value": ... } pairs in order sorted by key. +func (r *Resource) MarshalJSON() ([]byte, error) { + if r == nil { + r = Empty() + } + return r.attrs.MarshalJSON() +} + +// Len returns the number of unique key-values in this Resource. +func (r *Resource) Len() int { + if r == nil { + return 0 + } + return r.attrs.Len() +} + +// Encoded returns an encoded representation of the resource. +func (r *Resource) Encoded(enc attribute.Encoder) string { + if r == nil { + return "" + } + return r.attrs.Encoded(enc) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/README.md b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/README.md new file mode 100644 index 0000000000..f2936e1439 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/README.md @@ -0,0 +1,3 @@ +# SDK Trace + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/trace) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go new file mode 100644 index 0000000000..1d399a75db --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -0,0 +1,409 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "sync" + "sync/atomic" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/internal/env" + "go.opentelemetry.io/otel/trace" +) + +// Defaults for BatchSpanProcessorOptions. +const ( + DefaultMaxQueueSize = 2048 + DefaultScheduleDelay = 5000 + DefaultExportTimeout = 30000 + DefaultMaxExportBatchSize = 512 +) + +// BatchSpanProcessorOption configures a BatchSpanProcessor. +type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions) + +// BatchSpanProcessorOptions is configuration settings for a +// BatchSpanProcessor. +type BatchSpanProcessorOptions struct { + // MaxQueueSize is the maximum queue size to buffer spans for delayed processing. If the + // queue gets full it drops the spans. Use BlockOnQueueFull to change this behavior. + // The default value of MaxQueueSize is 2048. + MaxQueueSize int + + // BatchTimeout is the maximum duration for constructing a batch. Processor + // forcefully sends available spans when timeout is reached. + // The default value of BatchTimeout is 5000 msec. + BatchTimeout time.Duration + + // ExportTimeout specifies the maximum duration for exporting spans. If the timeout + // is reached, the export will be cancelled. + // The default value of ExportTimeout is 30000 msec. + ExportTimeout time.Duration + + // MaxExportBatchSize is the maximum number of spans to process in a single batch. + // If there are more than one batch worth of spans then it processes multiple batches + // of spans one batch after the other without any delay. + // The default value of MaxExportBatchSize is 512. + MaxExportBatchSize int + + // BlockOnQueueFull blocks onEnd() and onStart() method if the queue is full + // AND if BlockOnQueueFull is set to true. + // Blocking option should be used carefully as it can severely affect the performance of an + // application. + BlockOnQueueFull bool +} + +// batchSpanProcessor is a SpanProcessor that batches asynchronously-received +// spans and sends them to a trace.Exporter when complete. +type batchSpanProcessor struct { + e SpanExporter + o BatchSpanProcessorOptions + + queue chan ReadOnlySpan + dropped uint32 + + batch []ReadOnlySpan + batchMutex sync.Mutex + timer *time.Timer + stopWait sync.WaitGroup + stopOnce sync.Once + stopCh chan struct{} + stopped atomic.Bool +} + +var _ SpanProcessor = (*batchSpanProcessor)(nil) + +// NewBatchSpanProcessor creates a new SpanProcessor that will send completed +// span batches to the exporter with the supplied options. +// +// If the exporter is nil, the span processor will perform no action. +func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorOption) SpanProcessor { + maxQueueSize := env.BatchSpanProcessorMaxQueueSize(DefaultMaxQueueSize) + maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize) + + if maxExportBatchSize > maxQueueSize { + if DefaultMaxExportBatchSize > maxQueueSize { + maxExportBatchSize = maxQueueSize + } else { + maxExportBatchSize = DefaultMaxExportBatchSize + } + } + + o := BatchSpanProcessorOptions{ + BatchTimeout: time.Duration(env.BatchSpanProcessorScheduleDelay(DefaultScheduleDelay)) * time.Millisecond, + ExportTimeout: time.Duration(env.BatchSpanProcessorExportTimeout(DefaultExportTimeout)) * time.Millisecond, + MaxQueueSize: maxQueueSize, + MaxExportBatchSize: maxExportBatchSize, + } + for _, opt := range options { + opt(&o) + } + bsp := &batchSpanProcessor{ + e: exporter, + o: o, + batch: make([]ReadOnlySpan, 0, o.MaxExportBatchSize), + timer: time.NewTimer(o.BatchTimeout), + queue: make(chan ReadOnlySpan, o.MaxQueueSize), + stopCh: make(chan struct{}), + } + + bsp.stopWait.Add(1) + go func() { + defer bsp.stopWait.Done() + bsp.processQueue() + bsp.drainQueue() + }() + + return bsp +} + +// OnStart method does nothing. +func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {} + +// OnEnd method enqueues a ReadOnlySpan for later processing. +func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) { + // Do not enqueue spans after Shutdown. + if bsp.stopped.Load() { + return + } + + // Do not enqueue spans if we are just going to drop them. + if bsp.e == nil { + return + } + bsp.enqueue(s) +} + +// Shutdown flushes the queue and waits until all spans are processed. +// It only executes once. Subsequent call does nothing. +func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error { + var err error + bsp.stopOnce.Do(func() { + bsp.stopped.Store(true) + wait := make(chan struct{}) + go func() { + close(bsp.stopCh) + bsp.stopWait.Wait() + if bsp.e != nil { + if err := bsp.e.Shutdown(ctx); err != nil { + otel.Handle(err) + } + } + close(wait) + }() + // Wait until the wait group is done or the context is cancelled + select { + case <-wait: + case <-ctx.Done(): + err = ctx.Err() + } + }) + return err +} + +type forceFlushSpan struct { + ReadOnlySpan + flushed chan struct{} +} + +func (f forceFlushSpan) SpanContext() trace.SpanContext { + return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled}) +} + +// ForceFlush exports all ended spans that have not yet been exported. +func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error { + // Interrupt if context is already canceled. + if err := ctx.Err(); err != nil { + return err + } + + // Do nothing after Shutdown. + if bsp.stopped.Load() { + return nil + } + + var err error + if bsp.e != nil { + flushCh := make(chan struct{}) + if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}) { + select { + case <-bsp.stopCh: + // The batchSpanProcessor is Shutdown. + return nil + case <-flushCh: + // Processed any items in queue prior to ForceFlush being called + case <-ctx.Done(): + return ctx.Err() + } + } + + wait := make(chan error) + go func() { + wait <- bsp.exportSpans(ctx) + close(wait) + }() + // Wait until the export is finished or the context is cancelled/timed out + select { + case err = <-wait: + case <-ctx.Done(): + err = ctx.Err() + } + } + return err +} + +// WithMaxQueueSize returns a BatchSpanProcessorOption that configures the +// maximum queue size allowed for a BatchSpanProcessor. +func WithMaxQueueSize(size int) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.MaxQueueSize = size + } +} + +// WithMaxExportBatchSize returns a BatchSpanProcessorOption that configures +// the maximum export batch size allowed for a BatchSpanProcessor. +func WithMaxExportBatchSize(size int) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.MaxExportBatchSize = size + } +} + +// WithBatchTimeout returns a BatchSpanProcessorOption that configures the +// maximum delay allowed for a BatchSpanProcessor before it will export any +// held span (whether the queue is full or not). +func WithBatchTimeout(delay time.Duration) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.BatchTimeout = delay + } +} + +// WithExportTimeout returns a BatchSpanProcessorOption that configures the +// amount of time a BatchSpanProcessor waits for an exporter to export before +// abandoning the export. +func WithExportTimeout(timeout time.Duration) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.ExportTimeout = timeout + } +} + +// WithBlocking returns a BatchSpanProcessorOption that configures a +// BatchSpanProcessor to wait for enqueue operations to succeed instead of +// dropping data when the queue is full. +func WithBlocking() BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.BlockOnQueueFull = true + } +} + +// exportSpans is a subroutine of processing and draining the queue. +func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { + bsp.timer.Reset(bsp.o.BatchTimeout) + + bsp.batchMutex.Lock() + defer bsp.batchMutex.Unlock() + + if bsp.o.ExportTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout) + defer cancel() + } + + if l := len(bsp.batch); l > 0 { + global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) + err := bsp.e.ExportSpans(ctx, bsp.batch) + + // A new batch is always created after exporting, even if the batch failed to be exported. + // + // It is up to the exporter to implement any type of retry logic if a batch is failing + // to be exported, since it is specific to the protocol and backend being sent to. + bsp.batch = bsp.batch[:0] + + if err != nil { + return err + } + } + return nil +} + +// processQueue removes spans from the `queue` channel until processor +// is shut down. It calls the exporter in batches of up to MaxExportBatchSize +// waiting up to BatchTimeout to form a batch. +func (bsp *batchSpanProcessor) processQueue() { + defer bsp.timer.Stop() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for { + select { + case <-bsp.stopCh: + return + case <-bsp.timer.C: + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + case sd := <-bsp.queue: + if ffs, ok := sd.(forceFlushSpan); ok { + close(ffs.flushed) + continue + } + bsp.batchMutex.Lock() + bsp.batch = append(bsp.batch, sd) + shouldExport := len(bsp.batch) >= bsp.o.MaxExportBatchSize + bsp.batchMutex.Unlock() + if shouldExport { + if !bsp.timer.Stop() { + <-bsp.timer.C + } + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + } + } + } +} + +// drainQueue awaits the any caller that had added to bsp.stopWait +// to finish the enqueue, then exports the final batch. +func (bsp *batchSpanProcessor) drainQueue() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for { + select { + case sd := <-bsp.queue: + if _, ok := sd.(forceFlushSpan); ok { + // Ignore flush requests as they are not valid spans. + continue + } + + bsp.batchMutex.Lock() + bsp.batch = append(bsp.batch, sd) + shouldExport := len(bsp.batch) == bsp.o.MaxExportBatchSize + bsp.batchMutex.Unlock() + + if shouldExport { + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + } + default: + // There are no more enqueued spans. Make final export. + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + return + } + } +} + +func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) { + ctx := context.TODO() + if bsp.o.BlockOnQueueFull { + bsp.enqueueBlockOnQueueFull(ctx, sd) + } else { + bsp.enqueueDrop(ctx, sd) + } +} + +func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan) bool { + if !sd.SpanContext().IsSampled() { + return false + } + + select { + case bsp.queue <- sd: + return true + case <-ctx.Done(): + return false + } +} + +func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool { + if !sd.SpanContext().IsSampled() { + return false + } + + select { + case bsp.queue <- sd: + return true + default: + atomic.AddUint32(&bsp.dropped, 1) + } + return false +} + +// MarshalLog is the marshaling function used by the logging system to represent this Span Processor. +func (bsp *batchSpanProcessor) MarshalLog() interface{} { + return struct { + Type string + SpanExporter SpanExporter + Config BatchSpanProcessorOptions + }{ + Type: "BatchSpanProcessor", + SpanExporter: bsp.e, + Config: bsp.o, + } +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go new file mode 100644 index 0000000000..1f60524e3e --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package trace contains support for OpenTelemetry distributed tracing. + +The following assumes a basic familiarity with OpenTelemetry concepts. +See https://opentelemetry.io. +*/ +package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/event.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/event.go new file mode 100644 index 0000000000..60a7ed1349 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/event.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Event is a thing that happened during a Span's lifetime. +type Event struct { + // Name is the name of this event + Name string + + // Attributes describe the aspects of the event. + Attributes []attribute.KeyValue + + // DroppedAttributeCount is the number of attributes that were not + // recorded due to configured limits being reached. + DroppedAttributeCount int + + // Time at which this event was recorded. + Time time.Time +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go new file mode 100644 index 0000000000..821c83faa1 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "slices" + "sync" + + "go.opentelemetry.io/otel/internal/global" +) + +// evictedQueue is a FIFO queue with a configurable capacity. +type evictedQueue[T any] struct { + queue []T + capacity int + droppedCount int + logDropped func() +} + +func newEvictedQueueEvent(capacity int) evictedQueue[Event] { + // Do not pre-allocate queue, do this lazily. + return evictedQueue[Event]{ + capacity: capacity, + logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Event") }), + } +} + +func newEvictedQueueLink(capacity int) evictedQueue[Link] { + // Do not pre-allocate queue, do this lazily. + return evictedQueue[Link]{ + capacity: capacity, + logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Link") }), + } +} + +// add adds value to the evictedQueue eq. If eq is at capacity, the oldest +// queued value will be discarded and the drop count incremented. +func (eq *evictedQueue[T]) add(value T) { + if eq.capacity == 0 { + eq.droppedCount++ + eq.logDropped() + return + } + + if eq.capacity > 0 && len(eq.queue) == eq.capacity { + // Drop first-in while avoiding allocating more capacity to eq.queue. + copy(eq.queue[:eq.capacity-1], eq.queue[1:]) + eq.queue = eq.queue[:eq.capacity-1] + eq.droppedCount++ + eq.logDropped() + } + eq.queue = append(eq.queue, value) +} + +// copy returns a copy of the evictedQueue. +func (eq *evictedQueue[T]) copy() []T { + return slices.Clone(eq.queue) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go new file mode 100644 index 0000000000..925bcf9930 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + crand "crypto/rand" + "encoding/binary" + "math/rand" + "sync" + + "go.opentelemetry.io/otel/trace" +) + +// IDGenerator allows custom generators for TraceID and SpanID. +type IDGenerator interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // NewIDs returns a new trace and span ID. + NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // NewSpanID returns a ID for a new span in the trace with traceID. + NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type randomIDGenerator struct { + sync.Mutex + randSource *rand.Rand +} + +var _ IDGenerator = &randomIDGenerator{} + +// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. +func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID { + gen.Lock() + defer gen.Unlock() + sid := trace.SpanID{} + for { + _, _ = gen.randSource.Read(sid[:]) + if sid.IsValid() { + break + } + } + return sid +} + +// NewIDs returns a non-zero trace ID and a non-zero span ID from a +// randomly-chosen sequence. +func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) { + gen.Lock() + defer gen.Unlock() + tid := trace.TraceID{} + sid := trace.SpanID{} + for { + _, _ = gen.randSource.Read(tid[:]) + if tid.IsValid() { + break + } + } + for { + _, _ = gen.randSource.Read(sid[:]) + if sid.IsValid() { + break + } + } + return tid, sid +} + +func defaultIDGenerator() IDGenerator { + gen := &randomIDGenerator{} + var rngSeed int64 + _ = binary.Read(crand.Reader, binary.LittleEndian, &rngSeed) + gen.randSource = rand.New(rand.NewSource(rngSeed)) + return gen +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/link.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/link.go new file mode 100644 index 0000000000..c03bdc90f6 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/link.go @@ -0,0 +1,23 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +type Link struct { + // SpanContext of the linked Span. + SpanContext trace.SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue + + // DroppedAttributeCount is the number of attributes that were not + // recorded due to configured limits being reached. + DroppedAttributeCount int +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go new file mode 100644 index 0000000000..14c2e5bebd --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -0,0 +1,493 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" + "go.opentelemetry.io/otel/trace/noop" +) + +const ( + defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" +) + +// tracerProviderConfig. +type tracerProviderConfig struct { + // processors contains collection of SpanProcessors that are processing pipeline + // for spans in the trace signal. + // SpanProcessors registered with a TracerProvider and are called at the start + // and end of a Span's lifecycle, and are called in the order they are + // registered. + processors []SpanProcessor + + // sampler is the default sampler used when creating new spans. + sampler Sampler + + // idGenerator is used to generate all Span and Trace IDs when needed. + idGenerator IDGenerator + + // spanLimits defines the attribute, event, and link limits for spans. + spanLimits SpanLimits + + // resource contains attributes representing an entity that produces telemetry. + resource *resource.Resource +} + +// MarshalLog is the marshaling function used by the logging system to represent this Provider. +func (cfg tracerProviderConfig) MarshalLog() interface{} { + return struct { + SpanProcessors []SpanProcessor + SamplerType string + IDGeneratorType string + SpanLimits SpanLimits + Resource *resource.Resource + }{ + SpanProcessors: cfg.processors, + SamplerType: fmt.Sprintf("%T", cfg.sampler), + IDGeneratorType: fmt.Sprintf("%T", cfg.idGenerator), + SpanLimits: cfg.spanLimits, + Resource: cfg.resource, + } +} + +// TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to +// instrumentation so it can trace operational flow through a system. +type TracerProvider struct { + embedded.TracerProvider + + mu sync.Mutex + namedTracer map[instrumentation.Scope]*tracer + spanProcessors atomic.Pointer[spanProcessorStates] + + isShutdown atomic.Bool + + // These fields are not protected by the lock mu. They are assumed to be + // immutable after creation of the TracerProvider. + sampler Sampler + idGenerator IDGenerator + spanLimits SpanLimits + resource *resource.Resource +} + +var _ trace.TracerProvider = &TracerProvider{} + +// NewTracerProvider returns a new and configured TracerProvider. +// +// By default the returned TracerProvider is configured with: +// - a ParentBased(AlwaysSample) Sampler +// - a random number IDGenerator +// - the resource.Default() Resource +// - the default SpanLimits. +// +// The passed opts are used to override these default values and configure the +// returned TracerProvider appropriately. +func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider { + o := tracerProviderConfig{ + spanLimits: NewSpanLimits(), + } + o = applyTracerProviderEnvConfigs(o) + + for _, opt := range opts { + o = opt.apply(o) + } + + o = ensureValidTracerProviderConfig(o) + + tp := &TracerProvider{ + namedTracer: make(map[instrumentation.Scope]*tracer), + sampler: o.sampler, + idGenerator: o.idGenerator, + spanLimits: o.spanLimits, + resource: o.resource, + } + global.Info("TracerProvider created", "config", o) + + spss := make(spanProcessorStates, 0, len(o.processors)) + for _, sp := range o.processors { + spss = append(spss, newSpanProcessorState(sp)) + } + tp.spanProcessors.Store(&spss) + + return tp +} + +// Tracer returns a Tracer with the given name and options. If a Tracer for +// the given name and options does not exist it is created, otherwise the +// existing Tracer is returned. +// +// If name is empty, DefaultTracerName is used instead. +// +// This method is safe to be called concurrently. +func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + // This check happens before the mutex is acquired to avoid deadlocking if Tracer() is called from within Shutdown(). + if p.isShutdown.Load() { + return noop.NewTracerProvider().Tracer(name, opts...) + } + c := trace.NewTracerConfig(opts...) + if name == "" { + name = defaultTracerName + } + is := instrumentation.Scope{ + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + } + + t, ok := func() (trace.Tracer, bool) { + p.mu.Lock() + defer p.mu.Unlock() + // Must check the flag after acquiring the mutex to avoid returning a valid tracer if Shutdown() ran + // after the first check above but before we acquired the mutex. + if p.isShutdown.Load() { + return noop.NewTracerProvider().Tracer(name, opts...), true + } + t, ok := p.namedTracer[is] + if !ok { + t = &tracer{ + provider: p, + instrumentationScope: is, + } + p.namedTracer[is] = t + } + return t, ok + }() + if !ok { + // This code is outside the mutex to not hold the lock while calling third party logging code: + // - That code may do slow things like I/O, which would prolong the duration the lock is held, + // slowing down all tracing consumers. + // - Logging code may be instrumented with tracing and deadlock because it could try + // acquiring the same non-reentrant mutex. + global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL) + } + return t +} + +// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors. +func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) { + // This check prevents calls during a shutdown. + if p.isShutdown.Load() { + return + } + p.mu.Lock() + defer p.mu.Unlock() + // This check prevents calls after a shutdown. + if p.isShutdown.Load() { + return + } + + current := p.getSpanProcessors() + newSPS := make(spanProcessorStates, 0, len(current)+1) + newSPS = append(newSPS, current...) + newSPS = append(newSPS, newSpanProcessorState(sp)) + p.spanProcessors.Store(&newSPS) +} + +// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors. +func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) { + // This check prevents calls during a shutdown. + if p.isShutdown.Load() { + return + } + p.mu.Lock() + defer p.mu.Unlock() + // This check prevents calls after a shutdown. + if p.isShutdown.Load() { + return + } + old := p.getSpanProcessors() + if len(old) == 0 { + return + } + spss := make(spanProcessorStates, len(old)) + copy(spss, old) + + // stop the span processor if it is started and remove it from the list + var stopOnce *spanProcessorState + var idx int + for i, sps := range spss { + if sps.sp == sp { + stopOnce = sps + idx = i + } + } + if stopOnce != nil { + stopOnce.state.Do(func() { + if err := sp.Shutdown(context.Background()); err != nil { + otel.Handle(err) + } + }) + } + if len(spss) > 1 { + copy(spss[idx:], spss[idx+1:]) + } + spss[len(spss)-1] = nil + spss = spss[:len(spss)-1] + + p.spanProcessors.Store(&spss) +} + +// ForceFlush immediately exports all spans that have not yet been exported for +// all the registered span processors. +func (p *TracerProvider) ForceFlush(ctx context.Context) error { + spss := p.getSpanProcessors() + if len(spss) == 0 { + return nil + } + + for _, sps := range spss { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if err := sps.sp.ForceFlush(ctx); err != nil { + return err + } + } + return nil +} + +// Shutdown shuts down TracerProvider. All registered span processors are shut down +// in the order they were registered and any held computational resources are released. +// After Shutdown is called, all methods are no-ops. +func (p *TracerProvider) Shutdown(ctx context.Context) error { + // This check prevents deadlocks in case of recursive shutdown. + if p.isShutdown.Load() { + return nil + } + p.mu.Lock() + defer p.mu.Unlock() + // This check prevents calls after a shutdown has already been done concurrently. + if !p.isShutdown.CompareAndSwap(false, true) { // did toggle? + return nil + } + + var retErr error + for _, sps := range p.getSpanProcessors() { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + var err error + sps.state.Do(func() { + err = sps.sp.Shutdown(ctx) + }) + if err != nil { + if retErr == nil { + retErr = err + } else { + // Poor man's list of errors + retErr = fmt.Errorf("%w; %w", retErr, err) + } + } + } + p.spanProcessors.Store(&spanProcessorStates{}) + return retErr +} + +func (p *TracerProvider) getSpanProcessors() spanProcessorStates { + return *(p.spanProcessors.Load()) +} + +// TracerProviderOption configures a TracerProvider. +type TracerProviderOption interface { + apply(tracerProviderConfig) tracerProviderConfig +} + +type traceProviderOptionFunc func(tracerProviderConfig) tracerProviderConfig + +func (fn traceProviderOptionFunc) apply(cfg tracerProviderConfig) tracerProviderConfig { + return fn(cfg) +} + +// WithSyncer registers the exporter with the TracerProvider using a +// SimpleSpanProcessor. +// +// This is not recommended for production use. The synchronous nature of the +// SimpleSpanProcessor that will wrap the exporter make it good for testing, +// debugging, or showing examples of other feature, but it will be slow and +// have a high computation resource usage overhead. The WithBatcher option is +// recommended for production use instead. +func WithSyncer(e SpanExporter) TracerProviderOption { + return WithSpanProcessor(NewSimpleSpanProcessor(e)) +} + +// WithBatcher registers the exporter with the TracerProvider using a +// BatchSpanProcessor configured with the passed opts. +func WithBatcher(e SpanExporter, opts ...BatchSpanProcessorOption) TracerProviderOption { + return WithSpanProcessor(NewBatchSpanProcessor(e, opts...)) +} + +// WithSpanProcessor registers the SpanProcessor with a TracerProvider. +func WithSpanProcessor(sp SpanProcessor) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + cfg.processors = append(cfg.processors, sp) + return cfg + }) +} + +// WithResource returns a TracerProviderOption that will configure the +// Resource r as a TracerProvider's Resource. The configured Resource is +// referenced by all the Tracers the TracerProvider creates. It represents the +// entity producing telemetry. +// +// If this option is not used, the TracerProvider will use the +// resource.Default() Resource by default. +func WithResource(r *resource.Resource) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + var err error + cfg.resource, err = resource.Merge(resource.Environment(), r) + if err != nil { + otel.Handle(err) + } + return cfg + }) +} + +// WithIDGenerator returns a TracerProviderOption that will configure the +// IDGenerator g as a TracerProvider's IDGenerator. The configured IDGenerator +// is used by the Tracers the TracerProvider creates to generate new Span and +// Trace IDs. +// +// If this option is not used, the TracerProvider will use a random number +// IDGenerator by default. +func WithIDGenerator(g IDGenerator) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + if g != nil { + cfg.idGenerator = g + } + return cfg + }) +} + +// WithSampler returns a TracerProviderOption that will configure the Sampler +// s as a TracerProvider's Sampler. The configured Sampler is used by the +// Tracers the TracerProvider creates to make their sampling decisions for the +// Spans they create. +// +// This option overrides the Sampler configured through the OTEL_TRACES_SAMPLER +// and OTEL_TRACES_SAMPLER_ARG environment variables. If this option is not used +// and the sampler is not configured through environment variables or the environment +// contains invalid/unsupported configuration, the TracerProvider will use a +// ParentBased(AlwaysSample) Sampler by default. +func WithSampler(s Sampler) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + if s != nil { + cfg.sampler = s + } + return cfg + }) +} + +// WithSpanLimits returns a TracerProviderOption that configures a +// TracerProvider to use the SpanLimits sl. These SpanLimits bound any Span +// created by a Tracer from the TracerProvider. +// +// If any field of sl is zero or negative it will be replaced with the default +// value for that field. +// +// If this or WithRawSpanLimits are not provided, the TracerProvider will use +// the limits defined by environment variables, or the defaults if unset. +// Refer to the NewSpanLimits documentation for information about this +// relationship. +// +// Deprecated: Use WithRawSpanLimits instead which allows setting unlimited +// and zero limits. This option will be kept until the next major version +// incremented release. +func WithSpanLimits(sl SpanLimits) TracerProviderOption { + if sl.AttributeValueLengthLimit <= 0 { + sl.AttributeValueLengthLimit = DefaultAttributeValueLengthLimit + } + if sl.AttributeCountLimit <= 0 { + sl.AttributeCountLimit = DefaultAttributeCountLimit + } + if sl.EventCountLimit <= 0 { + sl.EventCountLimit = DefaultEventCountLimit + } + if sl.AttributePerEventCountLimit <= 0 { + sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit + } + if sl.LinkCountLimit <= 0 { + sl.LinkCountLimit = DefaultLinkCountLimit + } + if sl.AttributePerLinkCountLimit <= 0 { + sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit + } + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + cfg.spanLimits = sl + return cfg + }) +} + +// WithRawSpanLimits returns a TracerProviderOption that configures a +// TracerProvider to use these limits. These limits bound any Span created by +// a Tracer from the TracerProvider. +// +// The limits will be used as-is. Zero or negative values will not be changed +// to the default value like WithSpanLimits does. Setting a limit to zero will +// effectively disable the related resource it limits and setting to a +// negative value will mean that resource is unlimited. Consequentially, this +// means that the zero-value SpanLimits will disable all span resources. +// Because of this, limits should be constructed using NewSpanLimits and +// updated accordingly. +// +// If this or WithSpanLimits are not provided, the TracerProvider will use the +// limits defined by environment variables, or the defaults if unset. Refer to +// the NewSpanLimits documentation for information about this relationship. +func WithRawSpanLimits(limits SpanLimits) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + cfg.spanLimits = limits + return cfg + }) +} + +func applyTracerProviderEnvConfigs(cfg tracerProviderConfig) tracerProviderConfig { + for _, opt := range tracerProviderOptionsFromEnv() { + cfg = opt.apply(cfg) + } + + return cfg +} + +func tracerProviderOptionsFromEnv() []TracerProviderOption { + var opts []TracerProviderOption + + sampler, err := samplerFromEnv() + if err != nil { + otel.Handle(err) + } + + if sampler != nil { + opts = append(opts, WithSampler(sampler)) + } + + return opts +} + +// ensureValidTracerProviderConfig ensures that given TracerProviderConfig is valid. +func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderConfig { + if cfg.sampler == nil { + cfg.sampler = ParentBased(AlwaysSample()) + } + if cfg.idGenerator == nil { + cfg.idGenerator = defaultIDGenerator() + } + if cfg.resource == nil { + cfg.resource = resource.Default() + } + return cfg +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go new file mode 100644 index 0000000000..d2d1f72466 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go @@ -0,0 +1,97 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "errors" + "fmt" + "os" + "strconv" + "strings" +) + +const ( + tracesSamplerKey = "OTEL_TRACES_SAMPLER" + tracesSamplerArgKey = "OTEL_TRACES_SAMPLER_ARG" + + samplerAlwaysOn = "always_on" + samplerAlwaysOff = "always_off" + samplerTraceIDRatio = "traceidratio" + samplerParentBasedAlwaysOn = "parentbased_always_on" + samplerParsedBasedAlwaysOff = "parentbased_always_off" + samplerParentBasedTraceIDRatio = "parentbased_traceidratio" +) + +type errUnsupportedSampler string + +func (e errUnsupportedSampler) Error() string { + return fmt.Sprintf("unsupported sampler: %s", string(e)) +} + +var ( + errNegativeTraceIDRatio = errors.New("invalid trace ID ratio: less than 0.0") + errGreaterThanOneTraceIDRatio = errors.New("invalid trace ID ratio: greater than 1.0") +) + +type samplerArgParseError struct { + parseErr error +} + +func (e samplerArgParseError) Error() string { + return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error()) +} + +func (e samplerArgParseError) Unwrap() error { + return e.parseErr +} + +func samplerFromEnv() (Sampler, error) { + sampler, ok := os.LookupEnv(tracesSamplerKey) + if !ok { + return nil, nil + } + + sampler = strings.ToLower(strings.TrimSpace(sampler)) + samplerArg, hasSamplerArg := os.LookupEnv(tracesSamplerArgKey) + samplerArg = strings.TrimSpace(samplerArg) + + switch sampler { + case samplerAlwaysOn: + return AlwaysSample(), nil + case samplerAlwaysOff: + return NeverSample(), nil + case samplerTraceIDRatio: + if !hasSamplerArg { + return TraceIDRatioBased(1.0), nil + } + return parseTraceIDRatio(samplerArg) + case samplerParentBasedAlwaysOn: + return ParentBased(AlwaysSample()), nil + case samplerParsedBasedAlwaysOff: + return ParentBased(NeverSample()), nil + case samplerParentBasedTraceIDRatio: + if !hasSamplerArg { + return ParentBased(TraceIDRatioBased(1.0)), nil + } + ratio, err := parseTraceIDRatio(samplerArg) + return ParentBased(ratio), err + default: + return nil, errUnsupportedSampler(sampler) + } +} + +func parseTraceIDRatio(arg string) (Sampler, error) { + v, err := strconv.ParseFloat(arg, 64) + if err != nil { + return TraceIDRatioBased(1.0), samplerArgParseError{err} + } + if v < 0.0 { + return TraceIDRatioBased(1.0), errNegativeTraceIDRatio + } + if v > 1.0 { + return TraceIDRatioBased(1.0), errGreaterThanOneTraceIDRatio + } + + return TraceIDRatioBased(v), nil +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go new file mode 100644 index 0000000000..ebb6df6c90 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -0,0 +1,282 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "encoding/binary" + "fmt" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Sampler decides whether a trace should be sampled and exported. +type Sampler interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ShouldSample returns a SamplingResult based on a decision made from the + // passed parameters. + ShouldSample(parameters SamplingParameters) SamplingResult + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Description returns information describing the Sampler. + Description() string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// SamplingParameters contains the values passed to a Sampler. +type SamplingParameters struct { + ParentContext context.Context + TraceID trace.TraceID + Name string + Kind trace.SpanKind + Attributes []attribute.KeyValue + Links []trace.Link +} + +// SamplingDecision indicates whether a span is dropped, recorded and/or sampled. +type SamplingDecision uint8 + +// Valid sampling decisions. +const ( + // Drop will not record the span and all attributes/events will be dropped. + Drop SamplingDecision = iota + + // Record indicates the span's `IsRecording() == true`, but `Sampled` flag + // *must not* be set. + RecordOnly + + // RecordAndSample has span's `IsRecording() == true` and `Sampled` flag + // *must* be set. + RecordAndSample +) + +// SamplingResult conveys a SamplingDecision, set of Attributes and a Tracestate. +type SamplingResult struct { + Decision SamplingDecision + Attributes []attribute.KeyValue + Tracestate trace.TraceState +} + +type traceIDRatioSampler struct { + traceIDUpperBound uint64 + description string +} + +func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult { + psc := trace.SpanContextFromContext(p.ParentContext) + x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1 + if x < ts.traceIDUpperBound { + return SamplingResult{ + Decision: RecordAndSample, + Tracestate: psc.TraceState(), + } + } + return SamplingResult{ + Decision: Drop, + Tracestate: psc.TraceState(), + } +} + +func (ts traceIDRatioSampler) Description() string { + return ts.description +} + +// TraceIDRatioBased samples a given fraction of traces. Fractions >= 1 will +// always sample. Fractions < 0 are treated as zero. To respect the +// parent trace's `SampledFlag`, the `TraceIDRatioBased` sampler should be used +// as a delegate of a `Parent` sampler. +// +//nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased` +func TraceIDRatioBased(fraction float64) Sampler { + if fraction >= 1 { + return AlwaysSample() + } + + if fraction <= 0 { + fraction = 0 + } + + return &traceIDRatioSampler{ + traceIDUpperBound: uint64(fraction * (1 << 63)), + description: fmt.Sprintf("TraceIDRatioBased{%g}", fraction), + } +} + +type alwaysOnSampler struct{} + +func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { + return SamplingResult{ + Decision: RecordAndSample, + Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), + } +} + +func (as alwaysOnSampler) Description() string { + return "AlwaysOnSampler" +} + +// AlwaysSample returns a Sampler that samples every trace. +// Be careful about using this sampler in a production application with +// significant traffic: a new trace will be started and exported for every +// request. +func AlwaysSample() Sampler { + return alwaysOnSampler{} +} + +type alwaysOffSampler struct{} + +func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { + return SamplingResult{ + Decision: Drop, + Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), + } +} + +func (as alwaysOffSampler) Description() string { + return "AlwaysOffSampler" +} + +// NeverSample returns a Sampler that samples no traces. +func NeverSample() Sampler { + return alwaysOffSampler{} +} + +// ParentBased returns a sampler decorator which behaves differently, +// based on the parent of the span. If the span has no parent, +// the decorated sampler is used to make sampling decision. If the span has +// a parent, depending on whether the parent is remote and whether it +// is sampled, one of the following samplers will apply: +// - remoteParentSampled(Sampler) (default: AlwaysOn) +// - remoteParentNotSampled(Sampler) (default: AlwaysOff) +// - localParentSampled(Sampler) (default: AlwaysOn) +// - localParentNotSampled(Sampler) (default: AlwaysOff) +func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler { + return parentBased{ + root: root, + config: configureSamplersForParentBased(samplers), + } +} + +type parentBased struct { + root Sampler + config samplerConfig +} + +func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) samplerConfig { + c := samplerConfig{ + remoteParentSampled: AlwaysSample(), + remoteParentNotSampled: NeverSample(), + localParentSampled: AlwaysSample(), + localParentNotSampled: NeverSample(), + } + + for _, so := range samplers { + c = so.apply(c) + } + + return c +} + +// samplerConfig is a group of options for parentBased sampler. +type samplerConfig struct { + remoteParentSampled, remoteParentNotSampled Sampler + localParentSampled, localParentNotSampled Sampler +} + +// ParentBasedSamplerOption configures the sampler for a particular sampling case. +type ParentBasedSamplerOption interface { + apply(samplerConfig) samplerConfig +} + +// WithRemoteParentSampled sets the sampler for the case of sampled remote parent. +func WithRemoteParentSampled(s Sampler) ParentBasedSamplerOption { + return remoteParentSampledOption{s} +} + +type remoteParentSampledOption struct { + s Sampler +} + +func (o remoteParentSampledOption) apply(config samplerConfig) samplerConfig { + config.remoteParentSampled = o.s + return config +} + +// WithRemoteParentNotSampled sets the sampler for the case of remote parent +// which is not sampled. +func WithRemoteParentNotSampled(s Sampler) ParentBasedSamplerOption { + return remoteParentNotSampledOption{s} +} + +type remoteParentNotSampledOption struct { + s Sampler +} + +func (o remoteParentNotSampledOption) apply(config samplerConfig) samplerConfig { + config.remoteParentNotSampled = o.s + return config +} + +// WithLocalParentSampled sets the sampler for the case of sampled local parent. +func WithLocalParentSampled(s Sampler) ParentBasedSamplerOption { + return localParentSampledOption{s} +} + +type localParentSampledOption struct { + s Sampler +} + +func (o localParentSampledOption) apply(config samplerConfig) samplerConfig { + config.localParentSampled = o.s + return config +} + +// WithLocalParentNotSampled sets the sampler for the case of local parent +// which is not sampled. +func WithLocalParentNotSampled(s Sampler) ParentBasedSamplerOption { + return localParentNotSampledOption{s} +} + +type localParentNotSampledOption struct { + s Sampler +} + +func (o localParentNotSampledOption) apply(config samplerConfig) samplerConfig { + config.localParentNotSampled = o.s + return config +} + +func (pb parentBased) ShouldSample(p SamplingParameters) SamplingResult { + psc := trace.SpanContextFromContext(p.ParentContext) + if psc.IsValid() { + if psc.IsRemote() { + if psc.IsSampled() { + return pb.config.remoteParentSampled.ShouldSample(p) + } + return pb.config.remoteParentNotSampled.ShouldSample(p) + } + + if psc.IsSampled() { + return pb.config.localParentSampled.ShouldSample(p) + } + return pb.config.localParentNotSampled.ShouldSample(p) + } + return pb.root.ShouldSample(p) +} + +func (pb parentBased) Description() string { + return fmt.Sprintf("ParentBased{root:%s,remoteParentSampled:%s,"+ + "remoteParentNotSampled:%s,localParentSampled:%s,localParentNotSampled:%s}", + pb.root.Description(), + pb.config.remoteParentSampled.Description(), + pb.config.remoteParentNotSampled.Description(), + pb.config.localParentSampled.Description(), + pb.config.localParentNotSampled.Description(), + ) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go new file mode 100644 index 0000000000..554111bb4a --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go @@ -0,0 +1,121 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/internal/global" +) + +// simpleSpanProcessor is a SpanProcessor that synchronously sends all +// completed Spans to a trace.Exporter immediately. +type simpleSpanProcessor struct { + exporterMu sync.Mutex + exporter SpanExporter + stopOnce sync.Once +} + +var _ SpanProcessor = (*simpleSpanProcessor)(nil) + +// NewSimpleSpanProcessor returns a new SpanProcessor that will synchronously +// send completed spans to the exporter immediately. +// +// This SpanProcessor is not recommended for production use. The synchronous +// nature of this SpanProcessor makes it good for testing, debugging, or showing +// examples of other features, but it will be slow and have a high computation +// resource usage overhead. The BatchSpanProcessor is recommended for production +// use instead. +func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { + ssp := &simpleSpanProcessor{ + exporter: exporter, + } + global.Warn("SimpleSpanProcessor is not recommended for production use, consider using BatchSpanProcessor instead.") + + return ssp +} + +// OnStart does nothing. +func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} + +// OnEnd immediately exports a ReadOnlySpan. +func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) { + ssp.exporterMu.Lock() + defer ssp.exporterMu.Unlock() + + if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() { + if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil { + otel.Handle(err) + } + } +} + +// Shutdown shuts down the exporter this SimpleSpanProcessor exports to. +func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { + var err error + ssp.stopOnce.Do(func() { + stopFunc := func(exp SpanExporter) (<-chan error, func()) { + done := make(chan error) + return done, func() { done <- exp.Shutdown(ctx) } + } + + // The exporter field of the simpleSpanProcessor needs to be zeroed to + // signal it is shut down, meaning all subsequent calls to OnEnd will + // be gracefully ignored. This needs to be done synchronously to avoid + // any race condition. + // + // A closure is used to keep reference to the exporter and then the + // field is zeroed. This ensures the simpleSpanProcessor is shut down + // before the exporter. This order is important as it avoids a potential + // deadlock. If the exporter shut down operation generates a span, that + // span would need to be exported. Meaning, OnEnd would be called and + // try acquiring the lock that is held here. + ssp.exporterMu.Lock() + done, shutdown := stopFunc(ssp.exporter) + ssp.exporter = nil + ssp.exporterMu.Unlock() + + go shutdown() + + // Wait for the exporter to shut down or the deadline to expire. + select { + case err = <-done: + case <-ctx.Done(): + // It is possible for the exporter to have immediately shut down and + // the context to be done simultaneously. In that case this outer + // select statement will randomly choose a case. This will result in + // a different returned error for similar scenarios. Instead, double + // check if the exporter shut down at the same time and return that + // error if so. This will ensure consistency as well as ensure + // the caller knows the exporter shut down successfully (they can + // already determine if the deadline is expired given they passed + // the context). + select { + case err = <-done: + default: + err = ctx.Err() + } + } + }) + return err +} + +// ForceFlush does nothing as there is no data to flush. +func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error { + return nil +} + +// MarshalLog is the marshaling function used by the logging system to represent +// this Span Processor. +func (ssp *simpleSpanProcessor) MarshalLog() interface{} { + return struct { + Type string + Exporter SpanExporter + }{ + Type: "SimpleSpanProcessor", + Exporter: ssp.exporter, + } +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go new file mode 100644 index 0000000000..32f862790c --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go @@ -0,0 +1,133 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/trace" +) + +// snapshot is an record of a spans state at a particular checkpointed time. +// It is used as a read-only representation of that state. +type snapshot struct { + name string + spanContext trace.SpanContext + parent trace.SpanContext + spanKind trace.SpanKind + startTime time.Time + endTime time.Time + attributes []attribute.KeyValue + events []Event + links []Link + status Status + childSpanCount int + droppedAttributeCount int + droppedEventCount int + droppedLinkCount int + resource *resource.Resource + instrumentationScope instrumentation.Scope +} + +var _ ReadOnlySpan = snapshot{} + +func (s snapshot) private() {} + +// Name returns the name of the span. +func (s snapshot) Name() string { + return s.name +} + +// SpanContext returns the unique SpanContext that identifies the span. +func (s snapshot) SpanContext() trace.SpanContext { + return s.spanContext +} + +// Parent returns the unique SpanContext that identifies the parent of the +// span if one exists. If the span has no parent the returned SpanContext +// will be invalid. +func (s snapshot) Parent() trace.SpanContext { + return s.parent +} + +// SpanKind returns the role the span plays in a Trace. +func (s snapshot) SpanKind() trace.SpanKind { + return s.spanKind +} + +// StartTime returns the time the span started recording. +func (s snapshot) StartTime() time.Time { + return s.startTime +} + +// EndTime returns the time the span stopped recording. It will be zero if +// the span has not ended. +func (s snapshot) EndTime() time.Time { + return s.endTime +} + +// Attributes returns the defining attributes of the span. +func (s snapshot) Attributes() []attribute.KeyValue { + return s.attributes +} + +// Links returns all the links the span has to other spans. +func (s snapshot) Links() []Link { + return s.links +} + +// Events returns all the events that occurred within in the spans +// lifetime. +func (s snapshot) Events() []Event { + return s.events +} + +// Status returns the spans status. +func (s snapshot) Status() Status { + return s.status +} + +// InstrumentationScope returns information about the instrumentation +// scope that created the span. +func (s snapshot) InstrumentationScope() instrumentation.Scope { + return s.instrumentationScope +} + +// InstrumentationLibrary returns information about the instrumentation +// library that created the span. +func (s snapshot) InstrumentationLibrary() instrumentation.Library { + return s.instrumentationScope +} + +// Resource returns information about the entity that produced the span. +func (s snapshot) Resource() *resource.Resource { + return s.resource +} + +// DroppedAttributes returns the number of attributes dropped by the span +// due to limits being reached. +func (s snapshot) DroppedAttributes() int { + return s.droppedAttributeCount +} + +// DroppedLinks returns the number of links dropped by the span due to limits +// being reached. +func (s snapshot) DroppedLinks() int { + return s.droppedLinkCount +} + +// DroppedEvents returns the number of events dropped by the span due to +// limits being reached. +func (s snapshot) DroppedEvents() int { + return s.droppedEventCount +} + +// ChildSpanCount returns the count of spans that consider the span a +// direct parent. +func (s snapshot) ChildSpanCount() int { + return s.childSpanCount +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/span.go new file mode 100644 index 0000000000..ac90f1a260 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -0,0 +1,846 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "fmt" + "reflect" + "runtime" + rt "runtime/trace" + "slices" + "strings" + "sync" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" +) + +// ReadOnlySpan allows reading information from the data structure underlying a +// trace.Span. It is used in places where reading information from a span is +// necessary but changing the span isn't necessary or allowed. +// +// Warning: methods may be added to this interface in minor releases. +type ReadOnlySpan interface { + // Name returns the name of the span. + Name() string + // SpanContext returns the unique SpanContext that identifies the span. + SpanContext() trace.SpanContext + // Parent returns the unique SpanContext that identifies the parent of the + // span if one exists. If the span has no parent the returned SpanContext + // will be invalid. + Parent() trace.SpanContext + // SpanKind returns the role the span plays in a Trace. + SpanKind() trace.SpanKind + // StartTime returns the time the span started recording. + StartTime() time.Time + // EndTime returns the time the span stopped recording. It will be zero if + // the span has not ended. + EndTime() time.Time + // Attributes returns the defining attributes of the span. + // The order of the returned attributes is not guaranteed to be stable across invocations. + Attributes() []attribute.KeyValue + // Links returns all the links the span has to other spans. + Links() []Link + // Events returns all the events that occurred within in the spans + // lifetime. + Events() []Event + // Status returns the spans status. + Status() Status + // InstrumentationScope returns information about the instrumentation + // scope that created the span. + InstrumentationScope() instrumentation.Scope + // InstrumentationLibrary returns information about the instrumentation + // library that created the span. + // Deprecated: please use InstrumentationScope instead. + InstrumentationLibrary() instrumentation.Library + // Resource returns information about the entity that produced the span. + Resource() *resource.Resource + // DroppedAttributes returns the number of attributes dropped by the span + // due to limits being reached. + DroppedAttributes() int + // DroppedLinks returns the number of links dropped by the span due to + // limits being reached. + DroppedLinks() int + // DroppedEvents returns the number of events dropped by the span due to + // limits being reached. + DroppedEvents() int + // ChildSpanCount returns the count of spans that consider the span a + // direct parent. + ChildSpanCount() int + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() +} + +// ReadWriteSpan exposes the same methods as trace.Span and in addition allows +// reading information from the underlying data structure. +// This interface exposes the union of the methods of trace.Span (which is a +// "write-only" span) and ReadOnlySpan. New methods for writing or reading span +// information should be added under trace.Span or ReadOnlySpan, respectively. +// +// Warning: methods may be added to this interface in minor releases. +type ReadWriteSpan interface { + trace.Span + ReadOnlySpan +} + +// recordingSpan is an implementation of the OpenTelemetry Span API +// representing the individual component of a trace that is sampled. +type recordingSpan struct { + embedded.Span + + // mu protects the contents of this span. + mu sync.Mutex + + // parent holds the parent span of this span as a trace.SpanContext. + parent trace.SpanContext + + // spanKind represents the kind of this span as a trace.SpanKind. + spanKind trace.SpanKind + + // name is the name of this span. + name string + + // startTime is the time at which this span was started. + startTime time.Time + + // endTime is the time at which this span was ended. It contains the zero + // value of time.Time until the span is ended. + endTime time.Time + + // status is the status of this span. + status Status + + // childSpanCount holds the number of child spans created for this span. + childSpanCount int + + // spanContext holds the SpanContext of this span. + spanContext trace.SpanContext + + // attributes is a collection of user provided key/values. The collection + // is constrained by a configurable maximum held by the parent + // TracerProvider. When additional attributes are added after this maximum + // is reached these attributes the user is attempting to add are dropped. + // This dropped number of attributes is tracked and reported in the + // ReadOnlySpan exported when the span ends. + attributes []attribute.KeyValue + droppedAttributes int + logDropAttrsOnce sync.Once + + // events are stored in FIFO queue capped by configured limit. + events evictedQueue[Event] + + // links are stored in FIFO queue capped by configured limit. + links evictedQueue[Link] + + // executionTracerTaskEnd ends the execution tracer span. + executionTracerTaskEnd func() + + // tracer is the SDK tracer that created this span. + tracer *tracer +} + +var ( + _ ReadWriteSpan = (*recordingSpan)(nil) + _ runtimeTracer = (*recordingSpan)(nil) +) + +// SpanContext returns the SpanContext of this span. +func (s *recordingSpan) SpanContext() trace.SpanContext { + if s == nil { + return trace.SpanContext{} + } + return s.spanContext +} + +// IsRecording returns if this span is being recorded. If this span has ended +// this will return false. +func (s *recordingSpan) IsRecording() bool { + if s == nil { + return false + } + s.mu.Lock() + defer s.mu.Unlock() + + return s.endTime.IsZero() +} + +// SetStatus sets the status of the Span in the form of a code and a +// description, overriding previous values set. The description is only +// included in the set status when the code is for an error. If this span is +// not being recorded than this method does nothing. +func (s *recordingSpan) SetStatus(code codes.Code, description string) { + if !s.IsRecording() { + return + } + s.mu.Lock() + defer s.mu.Unlock() + if s.status.Code > code { + return + } + + status := Status{Code: code} + if code == codes.Error { + status.Description = description + } + + s.status = status +} + +// SetAttributes sets attributes of this span. +// +// If a key from attributes already exists the value associated with that key +// will be overwritten with the value contained in attributes. +// +// If this span is not being recorded than this method does nothing. +// +// If adding attributes to the span would exceed the maximum amount of +// attributes the span is configured to have, the last added attributes will +// be dropped. +func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { + if !s.IsRecording() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := s.tracer.provider.spanLimits.AttributeCountLimit + if limit == 0 { + // No attributes allowed. + s.addDroppedAttr(len(attributes)) + return + } + + // If adding these attributes could exceed the capacity of s perform a + // de-duplication and truncation while adding to avoid over allocation. + if limit > 0 && len(s.attributes)+len(attributes) > limit { + s.addOverCapAttrs(limit, attributes) + return + } + + // Otherwise, add without deduplication. When attributes are read they + // will be deduplicated, optimizing the operation. + s.attributes = slices.Grow(s.attributes, len(s.attributes)+len(attributes)) + for _, a := range attributes { + if !a.Valid() { + // Drop all invalid attributes. + s.addDroppedAttr(1) + continue + } + a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) + s.attributes = append(s.attributes, a) + } +} + +// Declared as a var so tests can override. +var logDropAttrs = func() { + global.Warn("limit reached: dropping trace Span attributes") +} + +// addDroppedAttr adds incr to the count of dropped attributes. +// +// The first, and only the first, time this method is called a warning will be +// logged. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) addDroppedAttr(incr int) { + s.droppedAttributes += incr + s.logDropAttrsOnce.Do(logDropAttrs) +} + +// addOverCapAttrs adds the attributes attrs to the span s while +// de-duplicating the attributes of s and attrs and dropping attributes that +// exceed the limit. +// +// This method assumes s.mu.Lock is held by the caller. +// +// This method should only be called when there is a possibility that adding +// attrs to s will exceed the limit. Otherwise, attrs should be added to s +// without checking for duplicates and all retrieval methods of the attributes +// for s will de-duplicate as needed. +// +// This method assumes limit is a value > 0. The argument should be validated +// by the caller. +func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { + // In order to not allocate more capacity to s.attributes than needed, + // prune and truncate this addition of attributes while adding. + + // Do not set a capacity when creating this map. Benchmark testing has + // showed this to only add unused memory allocations in general use. + exists := make(map[attribute.Key]int) + s.dedupeAttrsFromRecord(&exists) + + // Now that s.attributes is deduplicated, adding unique attributes up to + // the capacity of s will not over allocate s.attributes. + sum := len(attrs) + len(s.attributes) + s.attributes = slices.Grow(s.attributes, min(sum, limit)) + for _, a := range attrs { + if !a.Valid() { + // Drop all invalid attributes. + s.addDroppedAttr(1) + continue + } + + if idx, ok := exists[a.Key]; ok { + // Perform all updates before dropping, even when at capacity. + s.attributes[idx] = a + continue + } + + if len(s.attributes) >= limit { + // Do not just drop all of the remaining attributes, make sure + // updates are checked and performed. + s.addDroppedAttr(1) + } else { + a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) + s.attributes = append(s.attributes, a) + exists[a.Key] = len(s.attributes) - 1 + } + } +} + +// truncateAttr returns a truncated version of attr. Only string and string +// slice attribute values are truncated. String values are truncated to at +// most a length of limit. Each string slice value is truncated in this fashion +// (the slice length itself is unaffected). +// +// No truncation is performed for a negative limit. +func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { + if limit < 0 { + return attr + } + switch attr.Value.Type() { + case attribute.STRING: + if v := attr.Value.AsString(); len(v) > limit { + return attr.Key.String(safeTruncate(v, limit)) + } + case attribute.STRINGSLICE: + v := attr.Value.AsStringSlice() + for i := range v { + if len(v[i]) > limit { + v[i] = safeTruncate(v[i], limit) + } + } + return attr.Key.StringSlice(v) + } + return attr +} + +// safeTruncate truncates the string and guarantees valid UTF-8 is returned. +func safeTruncate(input string, limit int) string { + if trunc, ok := safeTruncateValidUTF8(input, limit); ok { + return trunc + } + trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit) + return trunc +} + +// safeTruncateValidUTF8 returns a copy of the input string safely truncated to +// limit. The truncation is ensured to occur at the bounds of complete UTF-8 +// characters. If invalid encoding of UTF-8 is encountered, input is returned +// with false, otherwise, the truncated input will be returned with true. +func safeTruncateValidUTF8(input string, limit int) (string, bool) { + for cnt := 0; cnt <= limit; { + r, size := utf8.DecodeRuneInString(input[cnt:]) + if r == utf8.RuneError { + return input, false + } + + if cnt+size > limit { + return input[:cnt], true + } + cnt += size + } + return input, true +} + +// End ends the span. This method does nothing if the span is already ended or +// is not being recorded. +// +// The only SpanOption currently supported is WithTimestamp which will set the +// end time for a Span's life-cycle. +// +// If this method is called while panicking an error event is added to the +// Span before ending it and the panic is continued. +func (s *recordingSpan) End(options ...trace.SpanEndOption) { + // Do not start by checking if the span is being recorded which requires + // acquiring a lock. Make a minimal check that the span is not nil. + if s == nil { + return + } + + // Store the end time as soon as possible to avoid artificially increasing + // the span's duration in case some operation below takes a while. + et := monotonicEndTime(s.startTime) + + // Do relative expensive check now that we have an end time and see if we + // need to do any more processing. + if !s.IsRecording() { + return + } + + config := trace.NewSpanEndConfig(options...) + if recovered := recover(); recovered != nil { + // Record but don't stop the panic. + defer panic(recovered) + opts := []trace.EventOption{ + trace.WithAttributes( + semconv.ExceptionType(typeStr(recovered)), + semconv.ExceptionMessage(fmt.Sprint(recovered)), + ), + } + + if config.StackTrace() { + opts = append(opts, trace.WithAttributes( + semconv.ExceptionStacktrace(recordStackTrace()), + )) + } + + s.addEvent(semconv.ExceptionEventName, opts...) + } + + if s.executionTracerTaskEnd != nil { + s.executionTracerTaskEnd() + } + + s.mu.Lock() + // Setting endTime to non-zero marks the span as ended and not recording. + if config.Timestamp().IsZero() { + s.endTime = et + } else { + s.endTime = config.Timestamp() + } + s.mu.Unlock() + + sps := s.tracer.provider.getSpanProcessors() + if len(sps) == 0 { + return + } + snap := s.snapshot() + for _, sp := range sps { + sp.sp.OnEnd(snap) + } +} + +// monotonicEndTime returns the end time at present but offset from start, +// monotonically. +// +// The monotonic clock is used in subtractions hence the duration since start +// added back to start gives end as a monotonic time. See +// https://golang.org/pkg/time/#hdr-Monotonic_Clocks +func monotonicEndTime(start time.Time) time.Time { + return start.Add(time.Since(start)) +} + +// RecordError will record err as a span event for this span. An additional call to +// SetStatus is required if the Status of the Span should be set to Error, this method +// does not change the Span status. If this span is not being recorded or err is nil +// than this method does nothing. +func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { + if s == nil || err == nil || !s.IsRecording() { + return + } + + opts = append(opts, trace.WithAttributes( + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + )) + + c := trace.NewEventConfig(opts...) + if c.StackTrace() { + opts = append(opts, trace.WithAttributes( + semconv.ExceptionStacktrace(recordStackTrace()), + )) + } + + s.addEvent(semconv.ExceptionEventName, opts...) +} + +func typeStr(i interface{}) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func recordStackTrace() string { + stackTrace := make([]byte, 2048) + n := runtime.Stack(stackTrace, false) + + return string(stackTrace[0:n]) +} + +// AddEvent adds an event with the provided name and options. If this span is +// not being recorded than this method does nothing. +func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) { + if !s.IsRecording() { + return + } + s.addEvent(name, o...) +} + +func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) { + c := trace.NewEventConfig(o...) + e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()} + + // Discard attributes over limit. + limit := s.tracer.provider.spanLimits.AttributePerEventCountLimit + if limit == 0 { + // Drop all attributes. + e.DroppedAttributeCount = len(e.Attributes) + e.Attributes = nil + } else if limit > 0 && len(e.Attributes) > limit { + // Drop over capacity. + e.DroppedAttributeCount = len(e.Attributes) - limit + e.Attributes = e.Attributes[:limit] + } + + s.mu.Lock() + s.events.add(e) + s.mu.Unlock() +} + +// SetName sets the name of this span. If this span is not being recorded than +// this method does nothing. +func (s *recordingSpan) SetName(name string) { + if !s.IsRecording() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + s.name = name +} + +// Name returns the name of this span. +func (s *recordingSpan) Name() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.name +} + +// Name returns the SpanContext of this span's parent span. +func (s *recordingSpan) Parent() trace.SpanContext { + s.mu.Lock() + defer s.mu.Unlock() + return s.parent +} + +// SpanKind returns the SpanKind of this span. +func (s *recordingSpan) SpanKind() trace.SpanKind { + s.mu.Lock() + defer s.mu.Unlock() + return s.spanKind +} + +// StartTime returns the time this span started. +func (s *recordingSpan) StartTime() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.startTime +} + +// EndTime returns the time this span ended. For spans that have not yet +// ended, the returned value will be the zero value of time.Time. +func (s *recordingSpan) EndTime() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.endTime +} + +// Attributes returns the attributes of this span. +// +// The order of the returned attributes is not guaranteed to be stable. +func (s *recordingSpan) Attributes() []attribute.KeyValue { + s.mu.Lock() + defer s.mu.Unlock() + s.dedupeAttrs() + return s.attributes +} + +// dedupeAttrs deduplicates the attributes of s to fit capacity. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) dedupeAttrs() { + // Do not set a capacity when creating this map. Benchmark testing has + // showed this to only add unused memory allocations in general use. + exists := make(map[attribute.Key]int) + s.dedupeAttrsFromRecord(&exists) +} + +// dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity +// using record as the record of unique attribute keys to their index. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) dedupeAttrsFromRecord(record *map[attribute.Key]int) { + // Use the fact that slices share the same backing array. + unique := s.attributes[:0] + for _, a := range s.attributes { + if idx, ok := (*record)[a.Key]; ok { + unique[idx] = a + } else { + unique = append(unique, a) + (*record)[a.Key] = len(unique) - 1 + } + } + // s.attributes have element types of attribute.KeyValue. These types are + // not pointers and they themselves do not contain pointer fields, + // therefore the duplicate values do not need to be zeroed for them to be + // garbage collected. + s.attributes = unique +} + +// Links returns the links of this span. +func (s *recordingSpan) Links() []Link { + s.mu.Lock() + defer s.mu.Unlock() + if len(s.links.queue) == 0 { + return []Link{} + } + return s.links.copy() +} + +// Events returns the events of this span. +func (s *recordingSpan) Events() []Event { + s.mu.Lock() + defer s.mu.Unlock() + if len(s.events.queue) == 0 { + return []Event{} + } + return s.events.copy() +} + +// Status returns the status of this span. +func (s *recordingSpan) Status() Status { + s.mu.Lock() + defer s.mu.Unlock() + return s.status +} + +// InstrumentationScope returns the instrumentation.Scope associated with +// the Tracer that created this span. +func (s *recordingSpan) InstrumentationScope() instrumentation.Scope { + s.mu.Lock() + defer s.mu.Unlock() + return s.tracer.instrumentationScope +} + +// InstrumentationLibrary returns the instrumentation.Library associated with +// the Tracer that created this span. +func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { + s.mu.Lock() + defer s.mu.Unlock() + return s.tracer.instrumentationScope +} + +// Resource returns the Resource associated with the Tracer that created this +// span. +func (s *recordingSpan) Resource() *resource.Resource { + s.mu.Lock() + defer s.mu.Unlock() + return s.tracer.provider.resource +} + +func (s *recordingSpan) AddLink(link trace.Link) { + if !s.IsRecording() { + return + } + if !link.SpanContext.IsValid() && len(link.Attributes) == 0 && + link.SpanContext.TraceState().Len() == 0 { + return + } + + l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes} + + // Discard attributes over limit. + limit := s.tracer.provider.spanLimits.AttributePerLinkCountLimit + if limit == 0 { + // Drop all attributes. + l.DroppedAttributeCount = len(l.Attributes) + l.Attributes = nil + } else if limit > 0 && len(l.Attributes) > limit { + l.DroppedAttributeCount = len(l.Attributes) - limit + l.Attributes = l.Attributes[:limit] + } + + s.mu.Lock() + s.links.add(l) + s.mu.Unlock() +} + +// DroppedAttributes returns the number of attributes dropped by the span +// due to limits being reached. +func (s *recordingSpan) DroppedAttributes() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.droppedAttributes +} + +// DroppedLinks returns the number of links dropped by the span due to limits +// being reached. +func (s *recordingSpan) DroppedLinks() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.links.droppedCount +} + +// DroppedEvents returns the number of events dropped by the span due to +// limits being reached. +func (s *recordingSpan) DroppedEvents() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.events.droppedCount +} + +// ChildSpanCount returns the count of spans that consider the span a +// direct parent. +func (s *recordingSpan) ChildSpanCount() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.childSpanCount +} + +// TracerProvider returns a trace.TracerProvider that can be used to generate +// additional Spans on the same telemetry pipeline as the current Span. +func (s *recordingSpan) TracerProvider() trace.TracerProvider { + return s.tracer.provider +} + +// snapshot creates a read-only copy of the current state of the span. +func (s *recordingSpan) snapshot() ReadOnlySpan { + var sd snapshot + s.mu.Lock() + defer s.mu.Unlock() + + sd.endTime = s.endTime + sd.instrumentationScope = s.tracer.instrumentationScope + sd.name = s.name + sd.parent = s.parent + sd.resource = s.tracer.provider.resource + sd.spanContext = s.spanContext + sd.spanKind = s.spanKind + sd.startTime = s.startTime + sd.status = s.status + sd.childSpanCount = s.childSpanCount + + if len(s.attributes) > 0 { + s.dedupeAttrs() + sd.attributes = s.attributes + } + sd.droppedAttributeCount = s.droppedAttributes + if len(s.events.queue) > 0 { + sd.events = s.events.copy() + sd.droppedEventCount = s.events.droppedCount + } + if len(s.links.queue) > 0 { + sd.links = s.links.copy() + sd.droppedLinkCount = s.links.droppedCount + } + return &sd +} + +func (s *recordingSpan) addChild() { + if !s.IsRecording() { + return + } + s.mu.Lock() + s.childSpanCount++ + s.mu.Unlock() +} + +func (*recordingSpan) private() {} + +// runtimeTrace starts a "runtime/trace".Task for the span and returns a +// context containing the task. +func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context { + if !rt.IsEnabled() { + // Avoid additional overhead if runtime/trace is not enabled. + return ctx + } + nctx, task := rt.NewTask(ctx, s.name) + + s.mu.Lock() + s.executionTracerTaskEnd = task.End + s.mu.Unlock() + + return nctx +} + +// nonRecordingSpan is a minimal implementation of the OpenTelemetry Span API +// that wraps a SpanContext. It performs no operations other than to return +// the wrapped SpanContext or TracerProvider that created it. +type nonRecordingSpan struct { + embedded.Span + + // tracer is the SDK tracer that created this span. + tracer *tracer + sc trace.SpanContext +} + +var _ trace.Span = nonRecordingSpan{} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (nonRecordingSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (nonRecordingSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (nonRecordingSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (nonRecordingSpan) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} + +// AddLink does nothing. +func (nonRecordingSpan) AddLink(trace.Link) {} + +// SetName does nothing. +func (nonRecordingSpan) SetName(string) {} + +// TracerProvider returns the trace.TracerProvider that provided the Tracer +// that created this span. +func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } + +func isRecording(s SamplingResult) bool { + return s.Decision == RecordOnly || s.Decision == RecordAndSample +} + +func isSampled(s SamplingResult) bool { + return s.Decision == RecordAndSample +} + +// Status is the classified state of a Span. +type Status struct { + // Code is an identifier of a Spans state classification. + Code codes.Code + // Description is a user hint about why that status was set. It is only + // applicable when Code is Error. + Description string +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go new file mode 100644 index 0000000000..6bdda3d94a --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import "context" + +// SpanExporter handles the delivery of spans to external receivers. This is +// the final component in the trace export pipeline. +type SpanExporter interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ExportSpans exports a batch of spans. + // + // This function is called synchronously, so there is no concurrency + // safety requirement. However, due to the synchronous calling pattern, + // it is critical that all timeouts and cancellations contained in the + // passed context must be honored. + // + // Any retry logic must be contained in this function. The SDK that + // calls this function will not implement any retry logic. All errors + // returned by this function are considered unrecoverable and will be + // reported to a configured error Handler. + ExportSpans(ctx context.Context, spans []ReadOnlySpan) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown notifies the exporter of a pending halt to operations. The + // exporter is expected to perform any cleanup or synchronization it + // requires while honoring all timeouts and cancellations contained in + // the passed context. + Shutdown(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go new file mode 100644 index 0000000000..bec5e20978 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go @@ -0,0 +1,114 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import "go.opentelemetry.io/otel/sdk/internal/env" + +const ( + // DefaultAttributeValueLengthLimit is the default maximum allowed + // attribute value length, unlimited. + DefaultAttributeValueLengthLimit = -1 + + // DefaultAttributeCountLimit is the default maximum number of attributes + // a span can have. + DefaultAttributeCountLimit = 128 + + // DefaultEventCountLimit is the default maximum number of events a span + // can have. + DefaultEventCountLimit = 128 + + // DefaultLinkCountLimit is the default maximum number of links a span can + // have. + DefaultLinkCountLimit = 128 + + // DefaultAttributePerEventCountLimit is the default maximum number of + // attributes a span event can have. + DefaultAttributePerEventCountLimit = 128 + + // DefaultAttributePerLinkCountLimit is the default maximum number of + // attributes a span link can have. + DefaultAttributePerLinkCountLimit = 128 +) + +// SpanLimits represents the limits of a span. +type SpanLimits struct { + // AttributeValueLengthLimit is the maximum allowed attribute value length. + // + // This limit only applies to string and string slice attribute values. + // Any string longer than this value will be truncated to this length. + // + // Setting this to a negative value means no limit is applied. + AttributeValueLengthLimit int + + // AttributeCountLimit is the maximum allowed span attribute count. Any + // attribute added to a span once this limit is reached will be dropped. + // + // Setting this to zero means no attributes will be recorded. + // + // Setting this to a negative value means no limit is applied. + AttributeCountLimit int + + // EventCountLimit is the maximum allowed span event count. Any event + // added to a span once this limit is reached means it will be added but + // the oldest event will be dropped. + // + // Setting this to zero means no events we be recorded. + // + // Setting this to a negative value means no limit is applied. + EventCountLimit int + + // LinkCountLimit is the maximum allowed span link count. Any link added + // to a span once this limit is reached means it will be added but the + // oldest link will be dropped. + // + // Setting this to zero means no links we be recorded. + // + // Setting this to a negative value means no limit is applied. + LinkCountLimit int + + // AttributePerEventCountLimit is the maximum number of attributes allowed + // per span event. Any attribute added after this limit reached will be + // dropped. + // + // Setting this to zero means no attributes will be recorded for events. + // + // Setting this to a negative value means no limit is applied. + AttributePerEventCountLimit int + + // AttributePerLinkCountLimit is the maximum number of attributes allowed + // per span link. Any attribute added after this limit reached will be + // dropped. + // + // Setting this to zero means no attributes will be recorded for links. + // + // Setting this to a negative value means no limit is applied. + AttributePerLinkCountLimit int +} + +// NewSpanLimits returns a SpanLimits with all limits set to the value their +// corresponding environment variable holds, or the default if unset. +// +// • AttributeValueLengthLimit: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT +// (default: unlimited) +// +// • AttributeCountLimit: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT (default: 128) +// +// • EventCountLimit: OTEL_SPAN_EVENT_COUNT_LIMIT (default: 128) +// +// • AttributePerEventCountLimit: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT (default: +// 128) +// +// • LinkCountLimit: OTEL_SPAN_LINK_COUNT_LIMIT (default: 128) +// +// • AttributePerLinkCountLimit: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT (default: 128) +func NewSpanLimits() SpanLimits { + return SpanLimits{ + AttributeValueLengthLimit: env.SpanAttributeValueLength(DefaultAttributeValueLengthLimit), + AttributeCountLimit: env.SpanAttributeCount(DefaultAttributeCountLimit), + EventCountLimit: env.SpanEventCount(DefaultEventCountLimit), + LinkCountLimit: env.SpanLinkCount(DefaultLinkCountLimit), + AttributePerEventCountLimit: env.SpanEventAttributeCount(DefaultAttributePerEventCountLimit), + AttributePerLinkCountLimit: env.SpanLinkAttributeCount(DefaultAttributePerLinkCountLimit), + } +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go new file mode 100644 index 0000000000..af7f9177fc --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "sync" +) + +// SpanProcessor is a processing pipeline for spans in the trace signal. +// SpanProcessors registered with a TracerProvider and are called at the start +// and end of a Span's lifecycle, and are called in the order they are +// registered. +type SpanProcessor interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // OnStart is called when a span is started. It is called synchronously + // and should not block. + OnStart(parent context.Context, s ReadWriteSpan) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // OnEnd is called when span is finished. It is called synchronously and + // hence not block. + OnEnd(s ReadOnlySpan) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown is called when the SDK shuts down. Any cleanup or release of + // resources held by the processor should be done in this call. + // + // Calls to OnStart, OnEnd, or ForceFlush after this has been called + // should be ignored. + // + // All timeouts and cancellations contained in ctx must be honored, this + // should not block indefinitely. + Shutdown(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ForceFlush exports all ended spans to the configured Exporter that have not yet + // been exported. It should only be called when absolutely necessary, such as when + // using a FaaS provider that may suspend the process after an invocation, but before + // the Processor can export the completed spans. + ForceFlush(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type spanProcessorState struct { + sp SpanProcessor + state sync.Once +} + +func newSpanProcessorState(sp SpanProcessor) *spanProcessorState { + return &spanProcessorState{sp: sp} +} + +type spanProcessorStates []*spanProcessorState diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go new file mode 100644 index 0000000000..43419d3b54 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" +) + +type tracer struct { + embedded.Tracer + + provider *TracerProvider + instrumentationScope instrumentation.Scope +} + +var _ trace.Tracer = &tracer{} + +// Start starts a Span and returns it along with a context containing it. +// +// The Span is created with the provided name and as a child of any existing +// span context found in the passed context. The created Span will be +// configured appropriately by any SpanOption passed. +func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) { + config := trace.NewSpanStartConfig(options...) + + if ctx == nil { + // Prevent trace.ContextWithSpan from panicking. + ctx = context.Background() + } + + // For local spans created by this SDK, track child span count. + if p := trace.SpanFromContext(ctx); p != nil { + if sdkSpan, ok := p.(*recordingSpan); ok { + sdkSpan.addChild() + } + } + + s := tr.newSpan(ctx, name, &config) + if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { + sps := tr.provider.getSpanProcessors() + for _, sp := range sps { + sp.sp.OnStart(ctx, rw) + } + } + if rtt, ok := s.(runtimeTracer); ok { + ctx = rtt.runtimeTrace(ctx) + } + + return trace.ContextWithSpan(ctx, s), s +} + +type runtimeTracer interface { + // runtimeTrace starts a "runtime/trace".Task for the span and + // returns a context containing the task. + runtimeTrace(ctx context.Context) context.Context +} + +// newSpan returns a new configured span. +func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanConfig) trace.Span { + // If told explicitly to make this a new root use a zero value SpanContext + // as a parent which contains an invalid trace ID and is not remote. + var psc trace.SpanContext + if config.NewRoot() { + ctx = trace.ContextWithSpanContext(ctx, psc) + } else { + psc = trace.SpanContextFromContext(ctx) + } + + // If there is a valid parent trace ID, use it to ensure the continuity of + // the trace. Always generate a new span ID so other components can rely + // on a unique span ID, even if the Span is non-recording. + var tid trace.TraceID + var sid trace.SpanID + if !psc.TraceID().IsValid() { + tid, sid = tr.provider.idGenerator.NewIDs(ctx) + } else { + tid = psc.TraceID() + sid = tr.provider.idGenerator.NewSpanID(ctx, tid) + } + + samplingResult := tr.provider.sampler.ShouldSample(SamplingParameters{ + ParentContext: ctx, + TraceID: tid, + Name: name, + Kind: config.SpanKind(), + Attributes: config.Attributes(), + Links: config.Links(), + }) + + scc := trace.SpanContextConfig{ + TraceID: tid, + SpanID: sid, + TraceState: samplingResult.Tracestate, + } + if isSampled(samplingResult) { + scc.TraceFlags = psc.TraceFlags() | trace.FlagsSampled + } else { + scc.TraceFlags = psc.TraceFlags() &^ trace.FlagsSampled + } + sc := trace.NewSpanContext(scc) + + if !isRecording(samplingResult) { + return tr.newNonRecordingSpan(sc) + } + return tr.newRecordingSpan(psc, sc, name, samplingResult, config) +} + +// newRecordingSpan returns a new configured recordingSpan. +func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan { + startTime := config.Timestamp() + if startTime.IsZero() { + startTime = time.Now() + } + + s := &recordingSpan{ + // Do not pre-allocate the attributes slice here! Doing so will + // allocate memory that is likely never going to be used, or if used, + // will be over-sized. The default Go compiler has been tested to + // dynamically allocate needed space very well. Benchmarking has shown + // it to be more performant than what we can predetermine here, + // especially for the common use case of few to no added + // attributes. + + parent: psc, + spanContext: sc, + spanKind: trace.ValidateSpanKind(config.SpanKind()), + name: name, + startTime: startTime, + events: newEvictedQueueEvent(tr.provider.spanLimits.EventCountLimit), + links: newEvictedQueueLink(tr.provider.spanLimits.LinkCountLimit), + tracer: tr, + } + + for _, l := range config.Links() { + s.AddLink(l) + } + + s.SetAttributes(sr.Attributes...) + s.SetAttributes(config.Attributes()...) + + return s +} + +// newNonRecordingSpan returns a new configured nonRecordingSpan. +func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan { + return nonRecordingSpan{tracer: tr, sc: sc} +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/version.go new file mode 100644 index 0000000000..b84dd2c5ee --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/trace/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +// version is the current release version of the metric SDK in use. +func version() string { + return "1.16.0-rc.1" +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/sdk/version.go b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/version.go new file mode 100644 index 0000000000..33d065a7cb --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk // import "go.opentelemetry.io/otel/sdk" + +// Version is the current release version of the OpenTelemetry SDK in use. +func Version() string { + return "1.28.0" +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md new file mode 100644 index 0000000000..87b842c5d1 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.17.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.17.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.17.0) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go new file mode 100644 index 0000000000..e087c9c04d --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.17.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go new file mode 100644 index 0000000000..c7b804bbe2 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go @@ -0,0 +1,188 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} + +// The attributes used to report a single exception associated with a span. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example above](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go new file mode 100644 index 0000000000..137acc67de --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go new file mode 100644 index 0000000000..d318221e59 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go new file mode 100644 index 0000000000..7e365e82ce --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go @@ -0,0 +1,1999 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserUserAgentKey is the attribute Key conforming to the + // "browser.user_agent" semantic conventions. It represents the full + // user-agent string provided by the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) + // AppleWebKit/537.36 (KHTML, ' + // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' + // Note: The user-agent value SHOULD be provided only from browsers that do + // not have a mechanism to retrieve brands and platform individually from + // the User-Agent Client Hints API. To retrieve the value, the legacy + // `navigator.userAgent` API can be used. + BrowserUserAgentKey = attribute.Key("browser.user_agent") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserUserAgent returns an attribute KeyValue conforming to the +// "browser.user_agent" semantic conventions. It represents the full user-agent +// string provided by the browser +func BrowserUserAgent(val string) attribute.KeyValue { + return BrowserUserAgentKey.String(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://intl.cloud.tencent.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// A container instance. +const ( + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// The device on which the process represented by this resource is running. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// A serverless instance. +const ( + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `faas.id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic + // conventions. It represents the unique ID of the single function that + // this runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so consider setting `faas.id` as a span attribute instead. + // + // The exact value to use for `faas.id` depends on the cloud provider: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + FaaSIDKey = attribute.Key("faas.id") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function in MiB. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information. + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic +// conventions. It represents the unique ID of the single function that this +// runtime instance executes. +func FaaSID(val string) attribute.KeyValue { + return FaaSIDKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function in MiB. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// A host is defined as a general computing instance. +const ( + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // Linux systems, the `machine-id` located in `/etc/machine-id` or + // `/var/lib/dbus/machine-id` may be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID. For Cloud, this + // value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized Linux +// systems, the `machine-id` located in `/etc/machine-id` or +// `/var/lib/dbus/machine-id` may be used. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// A Kubernetes Cluster. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// A Kubernetes Node object. +const ( + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// A Kubernetes Namespace. +const ( + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// A Kubernetes Pod object. +const ( + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// A Kubernetes ReplicaSet object. +const ( + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// A Kubernetes Deployment object. +const ( + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// A Kubernetes StatefulSet object. +const ( + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// A Kubernetes DaemonSet object. +const ( + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// A Kubernetes Job object. +const ( + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// A Kubernetes CronJob object. +const ( + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](../../resource/semantic_conventions/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// The single (language) runtime instance which is monitored. +const ( + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OtelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelScopeNameKey = attribute.Key("otel.scope.name") + + // OtelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OtelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OtelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OtelScopeName(val string) attribute.KeyValue { + return OtelScopeNameKey.String(val) +} + +// OtelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OtelScopeVersion(val string) attribute.KeyValue { + return OtelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OtelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelLibraryNameKey = attribute.Key("otel.library.name") + + // OtelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OtelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OtelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OtelLibraryName(val string) attribute.KeyValue { + return OtelLibraryNameKey.String(val) +} + +// OtelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OtelLibraryVersion(val string) attribute.KeyValue { + return OtelLibraryVersionKey.String(val) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go new file mode 100644 index 0000000000..634a1dce07 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.17.0" diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go new file mode 100644 index 0000000000..21497bb6bc --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go @@ -0,0 +1,3364 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `faas.id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The attributes used to perform database client calls. +const ( + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable and not + // explicitly disabled via instrumentation configuration.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + DBStatementKey = attribute.Key("db.statement") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// Connection-level attributes for Microsoft SQL Server +const ( + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no + // longer required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// Call-level attributes for Cassandra +const ( + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// Call-level attributes for Redis +const ( + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// Call-level attributes for MongoDB +const ( + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// Call-level attributes for SQL databases +const ( + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OtelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OtelStatusCodeKey = attribute.Key("otel.status_code") + + // OtelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OtelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OtelStatusCodeOk = OtelStatusCodeKey.String("OK") + // The operation contains an error + OtelStatusCodeError = OtelStatusCodeKey.String("ERROR") +) + +// OtelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OtelStatusDescription(val string) attribute.KeyValue { + return OtelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function execution. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + // + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSExecutionKey is the attribute Key conforming to the "faas.execution" + // semantic conventions. It represents the execution ID of the current + // function execution. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSExecutionKey = attribute.Key("faas.execution") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSExecution returns an attribute KeyValue conforming to the +// "faas.execution" semantic conventions. It represents the execution ID of the +// current function execution. +func FaaSExecution(val string) attribute.KeyValue { + return FaaSExecutionKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// Contains additional attributes for outgoing FaaS spans. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the transport protocol used. See + // note below. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + + // NetAppProtocolNameKey is the attribute Key conforming to the + // "net.app.protocol.name" semantic conventions. It represents the + // application layer protocol used. The value SHOULD be normalized to + // lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") + + // NetAppProtocolVersionKey is the attribute Key conforming to the + // "net.app.protocol.version" semantic conventions. It represents the + // version of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `net.app.protocol.version` refers to the version of the protocol + // used and might be different from the protocol client's version. If the + // HTTP client used has a version of `0.27.2`, but sends HTTP version + // `1.1`, this attribute should be set to `1.1`. + NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the remote + // socket peer name. + // + // Type: string + // RequirementLevel: Recommended (If available and different from + // `net.peer.name` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 'proxy.example.com' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the remote + // socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, + // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the remote + // socket peer port. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 16456 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the protocol + // [address + // family](https://man7.org/linux/man-pages/man7/address_families.7.html) + // which is used for communication. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If different than `inet` and if + // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers + // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document.) + // Stability: stable + // Examples: 'inet6', 'bluetooth' + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the logical remote hostname, see + // note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an + // extra DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the logical remote port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the logical local hostname or + // similar, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the logical local port number, + // preferably the one that the peer used to connect + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the local + // socket address. Useful in case of a multi-IP host. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '192.168.0.1' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the local + // socket port number. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.host.port` and if `net.sock.host.addr` is set.) + // Stability: stable + // Examples: 35555 + NetSockHostPortKey = attribute.Key("net.sock.host.port") + + // NetHostConnectionTypeKey is the attribute Key conforming to the + // "net.host.connection.type" semantic conventions. It represents the + // internet connection type currently being used by the host. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + + // NetHostConnectionSubtypeKey is the attribute Key conforming to the + // "net.host.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + + // NetHostCarrierNameKey is the attribute Key conforming to the + // "net.host.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + + // NetHostCarrierMccKey is the attribute Key conforming to the + // "net.host.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + + // NetHostCarrierMncKey is the attribute Key conforming to the + // "net.host.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + + // NetHostCarrierIccKey is the attribute Key conforming to the + // "net.host.carrier.icc" semantic conventions. It represents the ISO + // 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// NetAppProtocolName returns an attribute KeyValue conforming to the +// "net.app.protocol.name" semantic conventions. It represents the application +// layer protocol used. The value SHOULD be normalized to lowercase. +func NetAppProtocolName(val string) attribute.KeyValue { + return NetAppProtocolNameKey.String(val) +} + +// NetAppProtocolVersion returns an attribute KeyValue conforming to the +// "net.app.protocol.version" semantic conventions. It represents the version +// of the application layer protocol used. See note below. +func NetAppProtocolVersion(val string) attribute.KeyValue { + return NetAppProtocolVersionKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the remote socket +// peer name. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the remote socket +// peer address: IPv4 or IPv6 for internet protocols, path for local +// communication, +// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the remote socket +// peer port. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the logical remote +// hostname, see note below. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the logical remote port +// number +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the logical local +// hostname or similar, see note below. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the logical local port +// number, preferably the one that the peer used to connect +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the local socket +// address. Useful in case of a multi-IP host. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the local socket +// port number. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// NetHostCarrierName returns an attribute KeyValue conforming to the +// "net.host.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetHostCarrierName(val string) attribute.KeyValue { + return NetHostCarrierNameKey.String(val) +} + +// NetHostCarrierMcc returns an attribute KeyValue conforming to the +// "net.host.carrier.mcc" semantic conventions. It represents the mobile +// carrier country code. +func NetHostCarrierMcc(val string) attribute.KeyValue { + return NetHostCarrierMccKey.String(val) +} + +// NetHostCarrierMnc returns an attribute KeyValue conforming to the +// "net.host.carrier.mnc" semantic conventions. It represents the mobile +// carrier network code. +func NetHostCarrierMnc(val string) attribute.KeyValue { + return NetHostCarrierMncKey.String(val) +} + +// NetHostCarrierIcc returns an attribute KeyValue conforming to the +// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetHostCarrierIcc(val string) attribute.KeyValue { + return NetHostCarrierIccKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](../../resource/semantic_conventions/README.md#service) + // of the remote service. SHOULD be equal to the actual `service.name` + // resource attribute of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](../../resource/semantic_conventions/README.md#service) of +// the remote service. SHOULD be equal to the actual `service.name` resource +// attribute of the remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic conventions for HTTP client and server Spans. +const ( + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the hTTP request method. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the [HTTP + // response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + + // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" + // semantic conventions. It represents the kind of HTTP protocol used. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: If `net.transport` is not specified, it can be assumed to be + // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is + // assumed. + HTTPFlavorKey = attribute.Key("http.flavor") + + // HTTPUserAgentKey is the attribute Key conforming to the + // "http.user_agent" semantic conventions. It represents the value of the + // [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + HTTPUserAgentKey = attribute.Key("http.user_agent") + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +var ( + // HTTP/1.0 + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the hTTP request method. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the [HTTP response +// status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPUserAgent returns an attribute KeyValue conforming to the +// "http.user_agent" semantic conventions. It represents the value of the [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func HTTPUserAgent(val string) attribute.KeyValue { + return HTTPUserAgentKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the size +// of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the size +// of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// Semantic Convention for HTTP Client +const ( + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the full HTTP request URL in the form + // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is + // not transmitted over HTTP, but if it is known, it should be included + // nevertheless. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the + // attribute's value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the full HTTP request URL in the form +// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not +// transmitted over HTTP, but if it is known, it should be included +// nevertheless. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + +// Semantic Convention for HTTP Server +const ( + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the URI scheme identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the full request target as passed in + // a HTTP request line or equivalent. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '/path/12314/?q=ddds' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: 'http.route' MUST NOT be populated when this is not supported by + // the HTTP server framework as the route attribute should have + // low-cardinality and the URI path can NOT substitute it. + HTTPRouteKey = attribute.Key("http.route") + + // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" + // semantic conventions. It represents the IP address of the original + // client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.sock.peer.addr`, which + // would + // identify the network-level peer, which may be a proxy. + // + // This attribute should be set when a source of information different + // from the one used for `net.sock.peer.addr`, is available even if that + // other + // source just confirms the same value as `net.sock.peer.addr`. + // Rationale: For `net.sock.peer.addr`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.sock.peer.addr` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the URI scheme identifying the used +// protocol. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the full request target as passed in a +// HTTP request line or equivalent. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// HTTPClientIP returns an attribute KeyValue conforming to the +// "http.client_ip" semantic conventions. It represents the IP address of the +// original client behind all proxies, if known (e.g. from +// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). +func HTTPClientIP(val string) attribute.KeyValue { + return HTTPClientIPKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. +const ( + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker +const ( + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationKindKey is the attribute Key conforming to the + // "messaging.destination.kind" semantic conventions. It represents the + // kind of message destination + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationKindKey = attribute.Key("messaging.destination.kind") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +var ( + // A message sent to a queue + MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") + // A message sent to a topic + MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") +) + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// Semantic convention for attributes that describe messaging source on broker +const ( + // MessagingSourceNameKey is the attribute Key conforming to the + // "messaging.source.name" semantic conventions. It represents the message + // source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Source name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker does not have such notion, the source name SHOULD uniquely + // identify the broker. + MessagingSourceNameKey = attribute.Key("messaging.source.name") + + // MessagingSourceKindKey is the attribute Key conforming to the + // "messaging.source.kind" semantic conventions. It represents the kind of + // message source + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingSourceKindKey = attribute.Key("messaging.source.kind") + + // MessagingSourceTemplateKey is the attribute Key conforming to the + // "messaging.source.template" semantic conventions. It represents the low + // cardinality representation of the messaging source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Source names could be constructed from templates. An example would + // be a source name involving a user name or product id. Although the + // source name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingSourceTemplateKey = attribute.Key("messaging.source.template") + + // MessagingSourceTemporaryKey is the attribute Key conforming to the + // "messaging.source.temporary" semantic conventions. It represents a + // boolean that is true if the message source is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") + + // MessagingSourceAnonymousKey is the attribute Key conforming to the + // "messaging.source.anonymous" semantic conventions. It represents a + // boolean that is true if the message source is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") +) + +var ( + // A message received from a queue + MessagingSourceKindQueue = MessagingSourceKindKey.String("queue") + // A message received from a topic + MessagingSourceKindTopic = MessagingSourceKindKey.String("topic") +) + +// MessagingSourceName returns an attribute KeyValue conforming to the +// "messaging.source.name" semantic conventions. It represents the message +// source name +func MessagingSourceName(val string) attribute.KeyValue { + return MessagingSourceNameKey.String(val) +} + +// MessagingSourceTemplate returns an attribute KeyValue conforming to the +// "messaging.source.template" semantic conventions. It represents the low +// cardinality representation of the messaging source name +func MessagingSourceTemplate(val string) attribute.KeyValue { + return MessagingSourceTemplateKey.String(val) +} + +// MessagingSourceTemporary returns an attribute KeyValue conforming to the +// "messaging.source.temporary" semantic conventions. It represents a boolean +// that is true if the message source is temporary and might not exist anymore +// after messages are processed. +func MessagingSourceTemporary(val bool) attribute.KeyValue { + return MessagingSourceTemporaryKey.Bool(val) +} + +// MessagingSourceAnonymous returns an attribute KeyValue conforming to the +// "messaging.source.anonymous" semantic conventions. It represents a boolean +// that is true if the message source is anonymous (could be unnamed or have +// auto-generated name). +func MessagingSourceAnonymous(val bool) attribute.KeyValue { + return MessagingSourceAnonymousKey.Bool(val) +} + +// General attributes used in messaging systems. +const ( + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// Semantic convention for a consumer of messages received from a messaging +// system +const ( + // MessagingConsumerIDKey is the attribute Key conforming to the + // "messaging.consumer.id" semantic conventions. It represents the + // identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if + // both are present, or only `messaging.kafka.consumer.group`. For brokers, + // such as RabbitMQ and Artemis, set it to the `client_id` of the client + // consuming the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") +) + +// MessagingConsumerID returns an attribute KeyValue conforming to the +// "messaging.consumer.id" semantic conventions. It represents the identifier +// for the consumer receiving a message. For Kafka, set it to +// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both +// are present, or only `messaging.kafka.consumer.group`. For brokers, such as +// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the +// message. +func MessagingConsumerID(val string) attribute.KeyValue { + return MessagingConsumerIDKey.String(val) +} + +// Attributes for RabbitMQ +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// Attributes for Apache Kafka +const ( + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaClientIDKey is the attribute Key conforming to the + // "messaging.kafka.client_id" semantic conventions. It represents the + // client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the + // "messaging.kafka.source.partition" semantic conventions. It represents + // the partition the message is received from. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaClientID returns an attribute KeyValue conforming to the +// "messaging.kafka.client_id" semantic conventions. It represents the client +// ID for the Consumer or Producer that is handling the message. +func MessagingKafkaClientID(val string) attribute.KeyValue { + return MessagingKafkaClientIDKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to +// the "messaging.kafka.source.partition" semantic conventions. It represents +// the partition the message is received from. +func MessagingKafkaSourcePartition(val int) attribute.KeyValue { + return MessagingKafkaSourcePartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// Attributes for Apache RocketMQ +const ( + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqClientIDKey is the attribute Key conforming to the + // "messaging.rocketmq.client_id" semantic conventions. It represents the + // unique identifier for each client. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqClientID returns an attribute KeyValue conforming to the +// "messaging.rocketmq.client_id" semantic conventions. It represents the +// unique identifier for each client. +func MessagingRocketmqClientID(val string) attribute.KeyValue { + return MessagingRocketmqClientIDKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Semantic conventions for remote procedure calls. +const ( + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") +) + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// Tech-specific attributes for gRPC. +const ( + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md new file mode 100644 index 0000000000..82e1f46b4e --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.20.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.20.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.20.0) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go new file mode 100644 index 0000000000..6685c392b5 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go @@ -0,0 +1,1198 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// Describes HTTP attributes. +const ( + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the hTTP request method. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the [HTTP + // response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the hTTP request method. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the [HTTP response +// status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTP Server spans attributes +const ( + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the URI scheme identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/specification/trace/semantic_conventions/http.md#http-server-definitions) + // if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the URI scheme identifying the used +// protocol. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the transport protocol used. See + // note below. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + + // NetProtocolNameKey is the attribute Key conforming to the + // "net.protocol.name" semantic conventions. It represents the application + // layer protocol used. The value SHOULD be normalized to lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetProtocolNameKey = attribute.Key("net.protocol.name") + + // NetProtocolVersionKey is the attribute Key conforming to the + // "net.protocol.version" semantic conventions. It represents the version + // of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `net.protocol.version` refers to the version of the protocol used + // and might be different from the protocol client's version. If the HTTP + // client used has a version of `0.27.2`, but sends HTTP version `1.1`, + // this attribute should be set to `1.1`. + NetProtocolVersionKey = attribute.Key("net.protocol.version") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the remote + // socket peer name. + // + // Type: string + // RequirementLevel: Recommended (If available and different from + // `net.peer.name` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 'proxy.example.com' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the remote + // socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, + // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the remote + // socket peer port. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 16456 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the protocol + // [address + // family](https://man7.org/linux/man-pages/man7/address_families.7.html) + // which is used for communication. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If different than `inet` and if + // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers + // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document.) + // Stability: stable + // Examples: 'inet6', 'bluetooth' + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the logical remote hostname, see + // note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an + // extra DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the logical remote port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the logical local hostname or + // similar, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the logical local port number, + // preferably the one that the peer used to connect + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the local + // socket address. Useful in case of a multi-IP host. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '192.168.0.1' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the local + // socket port number. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If defined for the address + // family and if different than `net.host.port` and if `net.sock.host.addr` + // is set. In other cases, it is still recommended to set this.) + // Stability: stable + // Examples: 35555 + NetSockHostPortKey = attribute.Key("net.sock.host.port") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +// NetProtocolName returns an attribute KeyValue conforming to the +// "net.protocol.name" semantic conventions. It represents the application +// layer protocol used. The value SHOULD be normalized to lowercase. +func NetProtocolName(val string) attribute.KeyValue { + return NetProtocolNameKey.String(val) +} + +// NetProtocolVersion returns an attribute KeyValue conforming to the +// "net.protocol.version" semantic conventions. It represents the version of +// the application layer protocol used. See note below. +func NetProtocolVersion(val string) attribute.KeyValue { + return NetProtocolVersionKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the remote socket +// peer name. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the remote socket +// peer address: IPv4 or IPv6 for internet protocols, path for local +// communication, +// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the remote socket +// peer port. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the logical remote +// hostname, see note below. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the logical remote port +// number +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the logical local +// hostname or similar, see note below. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the logical local port +// number, preferably the one that the peer used to connect +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the local socket +// address. Useful in case of a multi-IP host. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the local socket +// port number. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetHostConnectionTypeKey is the attribute Key conforming to the + // "net.host.connection.type" semantic conventions. It represents the + // internet connection type currently being used by the host. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + + // NetHostConnectionSubtypeKey is the attribute Key conforming to the + // "net.host.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + + // NetHostCarrierNameKey is the attribute Key conforming to the + // "net.host.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + + // NetHostCarrierMccKey is the attribute Key conforming to the + // "net.host.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + + // NetHostCarrierMncKey is the attribute Key conforming to the + // "net.host.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + + // NetHostCarrierIccKey is the attribute Key conforming to the + // "net.host.carrier.icc" semantic conventions. It represents the ISO + // 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// NetHostCarrierName returns an attribute KeyValue conforming to the +// "net.host.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetHostCarrierName(val string) attribute.KeyValue { + return NetHostCarrierNameKey.String(val) +} + +// NetHostCarrierMcc returns an attribute KeyValue conforming to the +// "net.host.carrier.mcc" semantic conventions. It represents the mobile +// carrier country code. +func NetHostCarrierMcc(val string) attribute.KeyValue { + return NetHostCarrierMccKey.String(val) +} + +// NetHostCarrierMnc returns an attribute KeyValue conforming to the +// "net.host.carrier.mnc" semantic conventions. It represents the mobile +// carrier network code. +func NetHostCarrierMnc(val string) attribute.KeyValue { + return NetHostCarrierMncKey.String(val) +} + +// NetHostCarrierIcc returns an attribute KeyValue conforming to the +// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetHostCarrierIcc(val string) attribute.KeyValue { + return NetHostCarrierIccKey.String(val) +} + +// Semantic conventions for HTTP client and server Spans. +const ( + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the size +// of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the size +// of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. +const ( + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker +const ( + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// Semantic convention for attributes that describe messaging source on broker +const ( + // MessagingSourceNameKey is the attribute Key conforming to the + // "messaging.source.name" semantic conventions. It represents the message + // source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Source name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker does not have such notion, the source name SHOULD uniquely + // identify the broker. + MessagingSourceNameKey = attribute.Key("messaging.source.name") + + // MessagingSourceTemplateKey is the attribute Key conforming to the + // "messaging.source.template" semantic conventions. It represents the low + // cardinality representation of the messaging source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Source names could be constructed from templates. An example would + // be a source name involving a user name or product id. Although the + // source name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingSourceTemplateKey = attribute.Key("messaging.source.template") + + // MessagingSourceTemporaryKey is the attribute Key conforming to the + // "messaging.source.temporary" semantic conventions. It represents a + // boolean that is true if the message source is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") + + // MessagingSourceAnonymousKey is the attribute Key conforming to the + // "messaging.source.anonymous" semantic conventions. It represents a + // boolean that is true if the message source is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") +) + +// MessagingSourceName returns an attribute KeyValue conforming to the +// "messaging.source.name" semantic conventions. It represents the message +// source name +func MessagingSourceName(val string) attribute.KeyValue { + return MessagingSourceNameKey.String(val) +} + +// MessagingSourceTemplate returns an attribute KeyValue conforming to the +// "messaging.source.template" semantic conventions. It represents the low +// cardinality representation of the messaging source name +func MessagingSourceTemplate(val string) attribute.KeyValue { + return MessagingSourceTemplateKey.String(val) +} + +// MessagingSourceTemporary returns an attribute KeyValue conforming to the +// "messaging.source.temporary" semantic conventions. It represents a boolean +// that is true if the message source is temporary and might not exist anymore +// after messages are processed. +func MessagingSourceTemporary(val bool) attribute.KeyValue { + return MessagingSourceTemporaryKey.Bool(val) +} + +// MessagingSourceAnonymous returns an attribute KeyValue conforming to the +// "messaging.source.anonymous" semantic conventions. It represents a boolean +// that is true if the message source is anonymous (could be unnamed or have +// auto-generated name). +func MessagingSourceAnonymous(val bool) attribute.KeyValue { + return MessagingSourceAnonymousKey.Bool(val) +} + +// Attributes for RabbitMQ +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// Attributes for Apache Kafka +const ( + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaClientIDKey is the attribute Key conforming to the + // "messaging.kafka.client_id" semantic conventions. It represents the + // client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the + // "messaging.kafka.source.partition" semantic conventions. It represents + // the partition the message is received from. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaClientID returns an attribute KeyValue conforming to the +// "messaging.kafka.client_id" semantic conventions. It represents the client +// ID for the Consumer or Producer that is handling the message. +func MessagingKafkaClientID(val string) attribute.KeyValue { + return MessagingKafkaClientIDKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to +// the "messaging.kafka.source.partition" semantic conventions. It represents +// the partition the message is received from. +func MessagingKafkaSourcePartition(val int) attribute.KeyValue { + return MessagingKafkaSourcePartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// Attributes for Apache RocketMQ +const ( + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqClientIDKey is the attribute Key conforming to the + // "messaging.rocketmq.client_id" semantic conventions. It represents the + // unique identifier for each client. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqClientID returns an attribute KeyValue conforming to the +// "messaging.rocketmq.client_id" semantic conventions. It represents the +// unique identifier for each client. +func MessagingRocketmqClientID(val string) attribute.KeyValue { + return MessagingRocketmqClientIDKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + UserAgentOriginalKey = attribute.Key("user_agent.original") +) + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go new file mode 100644 index 0000000000..0d1f55a8fe --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.20.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go new file mode 100644 index 0000000000..6377639321 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go @@ -0,0 +1,188 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} + +// The attributes used to report a single exception associated with a span. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example above](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go new file mode 100644 index 0000000000..f40c97825a --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go new file mode 100644 index 0000000000..9c1840631b --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go new file mode 100644 index 0000000000..3d44dae275 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go @@ -0,0 +1,2060 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) +// on Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// Heroku dyno metadata +const ( + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") +) + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// A container instance. +const ( + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// The device on which the process represented by this resource is running. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// A serverless instance. +const ( + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// A host is defined as a general computing instance. +const ( + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID. For Cloud, this + // value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// A Kubernetes Cluster. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// A Kubernetes Node object. +const ( + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// A Kubernetes Namespace. +const ( + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// A Kubernetes Pod object. +const ( + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// A Kubernetes ReplicaSet object. +const ( + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// A Kubernetes Deployment object. +const ( + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// A Kubernetes StatefulSet object. +const ( + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// A Kubernetes DaemonSet object. +const ( + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// A Kubernetes Job object. +const ( + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// A Kubernetes CronJob object. +const ( + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](../../resource/semantic_conventions/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// The single (language) runtime instance which is monitored. +const ( + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// A service instance. +const ( + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-k8s-pod-deployment-1', + // '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OTelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelLibraryNameKey = attribute.Key("otel.library.name") + + // OTelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OTelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OTelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OTelLibraryName(val string) attribute.KeyValue { + return OTelLibraryNameKey.String(val) +} + +// OTelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OTelLibraryVersion(val string) attribute.KeyValue { + return OTelLibraryVersionKey.String(val) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go new file mode 100644 index 0000000000..95d0210e38 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.20.0" diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go new file mode 100644 index 0000000000..90b1b0452c --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go @@ -0,0 +1,2599 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// The attributes described in this section are rather generic. They may be +// used in any Log Record they apply to. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The attributes used to perform database client calls. +const ( + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: Recommended (Should be collected by default only if + // there is sanitization that excludes sensitive information.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + DBStatementKey = attribute.Key("db.statement") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// Connection-level attributes for Microsoft SQL Server +const ( + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no + // longer required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// Call-level attributes for Cassandra +const ( + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// Call-level attributes for Redis +const ( + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// Call-level attributes for MongoDB +const ( + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// Call-level attributes for SQL databases +const ( + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Call-level attributes for Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (when performing one of the + // operations in this list) + // Stability: stable + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as + // default)) + // Stability: stable + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBContainerKey is the attribute Key conforming to the + // "db.cosmosdb.container" semantic conventions. It represents the cosmos + // DB container name. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if available) + // Stability: stable + // Examples: 'anystring' + DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: ConditionallyRequired (if response was received) + // Stability: stable + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: ConditionallyRequired (when response was received and + // contained sub-code.) + // Stability: stable + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: ConditionallyRequired (when available) + // Stability: stable + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBContainer returns an attribute KeyValue conforming to the +// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB +// container name. +func DBCosmosDBContainer(val string) attribute.KeyValue { + return DBCosmosDBContainerKey.String(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + // + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// Contains additional attributes for outgoing FaaS spans. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](../../resource/semantic_conventions/README.md#service) + // of the remote service. SHOULD be equal to the actual `service.name` + // resource attribute of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](../../resource/semantic_conventions/README.md#service) of +// the remote service. SHOULD be equal to the actual `service.name` resource +// attribute of the remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic Convention for HTTP Client +const ( + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the full HTTP request URL in the form + // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is + // not transmitted over HTTP, but if it is known, it should be included + // nevertheless. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the + // attribute's value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the full HTTP request URL in the form +// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not +// transmitted over HTTP, but if it is known, it should be included +// nevertheless. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + +// Semantic Convention for HTTP Server +const ( + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the full request target as passed in + // a HTTP request line or equivalent. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '/users/12314/?q=ddds' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" + // semantic conventions. It represents the IP address of the original + // client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.sock.peer.addr`, which + // would + // identify the network-level peer, which may be a proxy. + // + // This attribute should be set when a source of information different + // from the one used for `net.sock.peer.addr`, is available even if that + // other + // source just confirms the same value as `net.sock.peer.addr`. + // Rationale: For `net.sock.peer.addr`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.sock.peer.addr` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the full request target as passed in a +// HTTP request line or equivalent. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPClientIP returns an attribute KeyValue conforming to the +// "http.client_ip" semantic conventions. It represents the IP address of the +// original client behind all proxies, if known (e.g. from +// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). +func HTTPClientIP(val string) attribute.KeyValue { + return HTTPClientIPKey.String(val) +} + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Attributes that exist for S3 request types. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// General attributes used in messaging systems. +const ( + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// Semantic convention for a consumer of messages received from a messaging +// system +const ( + // MessagingConsumerIDKey is the attribute Key conforming to the + // "messaging.consumer.id" semantic conventions. It represents the + // identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if + // both are present, or only `messaging.kafka.consumer.group`. For brokers, + // such as RabbitMQ and Artemis, set it to the `client_id` of the client + // consuming the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") +) + +// MessagingConsumerID returns an attribute KeyValue conforming to the +// "messaging.consumer.id" semantic conventions. It represents the identifier +// for the consumer receiving a message. For Kafka, set it to +// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both +// are present, or only `messaging.kafka.consumer.group`. For brokers, such as +// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the +// message. +func MessagingConsumerID(val string) attribute.KeyValue { + return MessagingConsumerIDKey.String(val) +} + +// Semantic conventions for remote procedure calls. +const ( + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// Tech-specific attributes for gRPC. +const ( + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// Tech-specific attributes for Connect RPC. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If response is not successful + // and if error code available.) + // Stability: stable + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md new file mode 100644 index 0000000000..2de1fc3c6b --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.26.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go new file mode 100644 index 0000000000..d8dc822b26 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go @@ -0,0 +1,8996 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +import "go.opentelemetry.io/otel/attribute" + +// The Android platform on which the Android application is running. +const ( + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version + // (`os.version`) of the android operating system. More information can be + // found + // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '33', '32' + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found +// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// ASP.NET Core attributes +const ( + // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.result" semantic conventions. It represents + // the rate-limiting result, shows whether the lease was acquired or + // contains a rejection reason + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Examples: 'acquired', 'request_canceled' + AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") + + // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to + // the "aspnetcore.diagnostics.handler.type" semantic conventions. It + // represents the full type name of the + // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) + // implementation that handled the exception. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if and only if the exception + // was handled by this handler.) + // Stability: stable + // Examples: 'Contoso.MyHandler' + AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") + + // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming + // to the "aspnetcore.diagnostics.exception.result" semantic conventions. + // It represents the aSP.NET Core exception middleware handling result + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'handled', 'unhandled' + AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result") + + // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.policy" semantic conventions. It represents + // the rate limiting policy name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fixed', 'sliding', 'token' + AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") + + // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the + // "aspnetcore.request.is_unhandled" semantic conventions. It represents + // the flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") + + // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the + // "aspnetcore.routing.is_fallback" semantic conventions. It represents a + // value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") + + // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the + // "aspnetcore.routing.match_status" semantic conventions. It represents + // the match result - success or failure + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'success', 'failure' + AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status") +) + +var ( + // Lease was acquired + AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") + // Lease request was rejected by the endpoint limiter + AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") + // Lease request was rejected by the global limiter + AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") + // Lease request was canceled + AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") +) + +var ( + // Exception was handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled") + // Exception was not handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled") + // Exception handling was skipped because the response had started + AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped") + // Exception handling didn't run because the request was aborted + AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted") +) + +var ( + // Match succeeded + AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success") + // Match failed + AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure") +) + +// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming +// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It +// represents the full type name of the +// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) +// implementation that handled the exception. +func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { + return AspnetcoreDiagnosticsHandlerTypeKey.String(val) +} + +// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to +// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents +// the rate limiting policy name. +func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { + return AspnetcoreRateLimitingPolicyKey.String(val) +} + +// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to +// the "aspnetcore.request.is_unhandled" semantic conventions. It represents +// the flag indicating if request was handled by the application pipeline. +func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { + return AspnetcoreRequestIsUnhandledKey.Bool(val) +} + +// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to +// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a +// value that indicates whether the matched route is a fallback route. +func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { + return AspnetcoreRoutingIsFallbackKey.Bool(val) +} + +// Generic attributes for AWS services. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes for AWS DynamoDB. +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// Attributes for AWS Elastic Container Service (ECS). +const ( + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID + // MUST be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is + // populated.) + // Stability: experimental + // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', + // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a + // running [ECS + // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', + // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family + // name of the [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) + // used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSTaskID returns an attribute KeyValue conforming to the +// "aws.ecs.task.id" semantic conventions. It represents the ID of a running +// ECS task. The ID MUST be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS +// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) +// used to create the ECS task. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Attributes for AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Attributes for AWS Logs. +const ( + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") +) + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// Attributes for AWS Lambda. +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for AWS S3. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// The web browser attributes +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent the client address + // behind any intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent the client port behind + // any intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Apps + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on +// Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Attributes for CloudEvents. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeStacktraceKey is the attribute Key conforming to the + // "code.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'at + // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// A container instance. +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCPUStateKey is the attribute Key conforming to the + // "container.cpu.state" semantic conventions. It represents the CPU state + // for this data point. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'user', 'kernel' + ContainerCPUStateKey = attribute.Key("container.cpu.state") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the + // repo digests of the container image as provided by the container + // runtime. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', + // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: + // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) + // and + // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) + // report those under the `RepoDigests` field. + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image + // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). + // Should be only the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +var ( + // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) + ContainerCPUStateUser = ContainerCPUStateKey.String("user") + // When CPU is used by the system (host OS) + ContainerCPUStateSystem = ContainerCPUStateKey.String("system") + // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) + ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container +// image tags. An example can be found in [Docker Image +// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). +// Should be only the `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// This group defines the attributes used to describe telemetry in the context +// of databases. +const ( + // DBClientConnectionsPoolNameKey is the attribute Key conforming to the + // "db.client.connections.pool.name" semantic conventions. It represents + // the name of the connection pool; unique within the instrumented + // application. In case the connection pool implementation doesn't provide + // a name, instrumentation should use a combination of `server.address` and + // `server.port` attributes formatted as `server.address:server.port`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myDataSource' + DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name") + + // DBClientConnectionsStateKey is the attribute Key conforming to the + // "db.client.connections.state" semantic conventions. It represents the + // state of a connection in the pool + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle' + DBClientConnectionsStateKey = attribute.Key("db.client.connections.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: If the collection name is parsed from the query, it SHOULD match + // the value provided in the query and may be qualified with the schema and + // database name. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" + // semantic conventions. It represents the name of the database, fully + // qualified within the server address and port. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'test.users' + // Note: If a database system has multiple namespace components, they + // SHOULD be concatenated (potentially using database system specific + // conventions) from most general to most specific namespace component, and + // more specific namespaces SHOULD NOT be captured without the more general + // namespaces, to ensure that "startswith" queries for the more general + // namespaces will be valid. + // Semantic conventions for individual database systems SHOULD document + // what `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationNameKey is the attribute Key conforming to the + // "db.operation.name" semantic conventions. It represents the name of the + // operation or command being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: It is RECOMMENDED to capture the value as provided by the + // application without attempting to do any case normalization. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey + // "WuValue"' + DBQueryTextKey = attribute.Key("db.query.text") + + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents the database management system (DBMS) product + // as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual DBMS may differ from the one identified by the client. + // For example, when using PostgreSQL client libraries to connect to a + // CockroachDB, the `db.system` is set to `postgresql` based on the + // instrumentation's best knowledge. + DBSystemKey = attribute.Key("db.system") +) + +var ( + // idle + DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle") + // used + DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBClientConnectionsPoolName returns an attribute KeyValue conforming to +// the "db.client.connections.pool.name" semantic conventions. It represents +// the name of the connection pool; unique within the instrumented application. +// In case the connection pool implementation doesn't provide a name, +// instrumentation should use a combination of `server.address` and +// `server.port` attributes formatted as `server.address:server.port`. +func DBClientConnectionsPoolName(val string) attribute.KeyValue { + return DBClientConnectionsPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the +// "db.namespace" semantic conventions. It represents the name of the database, +// fully qualified within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the +// "db.query.text" semantic conventions. It represents the database query being +// executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// This group defines attributes for Cassandra. +const ( + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// This group defines attributes for Azure Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// This group defines attributes for Elasticsearch. +const ( + // DBElasticsearchClusterNameKey is the attribute Key conforming to the + // "db.elasticsearch.cluster.name" semantic conventions. It represents the + // represents the identifier of an Elasticsearch cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") + + // DBElasticsearchNodeNameKey is the attribute Key conforming to the + // "db.elasticsearch.node.name" semantic conventions. It represents the + // represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") +) + +// DBElasticsearchClusterName returns an attribute KeyValue conforming to +// the "db.elasticsearch.cluster.name" semantic conventions. It represents the +// represents the identifier of an Elasticsearch cluster. +func DBElasticsearchClusterName(val string) attribute.KeyValue { + return DBElasticsearchClusterNameKey.String(val) +} + +// DBElasticsearchNodeName returns an attribute KeyValue conforming to the +// "db.elasticsearch.node.name" semantic conventions. It represents the +// represents the human-readable identifier of the node/instance to which a +// request was routed. +func DBElasticsearchNodeName(val string) attribute.KeyValue { + return DBElasticsearchNodeNameKey.String(val) +} + +// Attributes for software deployments. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: `deployment.environment` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` + // resource attributes. + // This implies that resources carrying the following attribute + // combinations MUST be + // considered to be identifying the same service: + // + // * `service.name=frontend`, `deployment.environment=production` + // * `service.name=frontend`, `deployment.environment=staging`. + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) +// (aka deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// Attributes that represents an occurrence of a lifecycle transition on the +// Android platform. +const ( + // AndroidStateKey is the attribute Key conforming to the "android.state" + // semantic conventions. It represents the deprecated use the + // `device.app.lifecycle` event definition including `android.state` as a + // payload field instead. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The Android lifecycle states are defined in [Activity lifecycle + // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), + // and from which the `OS identifiers` are derived. + AndroidStateKey = attribute.Key("android.state") +) + +var ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AndroidStateCreated = AndroidStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AndroidStateBackground = AndroidStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AndroidStateForeground = AndroidStateKey.String("foreground") +) + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the + // destination address - domain name if available without reverse DNS + // lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through + // an intermediary, `destination.address` SHOULD represent the destination + // address behind any intermediaries, for example proxies, if it's + // available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the destination + // port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Describes device attributes. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of + // the device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// These attributes may be used for any disk related operation. +const ( + // DiskIoDirectionKey is the attribute Key conforming to the + // "disk.io.direction" semantic conventions. It represents the disk IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read' + DiskIoDirectionKey = attribute.Key("disk.io.direction") +) + +var ( + // read + DiskIoDirectionRead = DiskIoDirectionKey.String("read") + // write + DiskIoDirectionWrite = DiskIoDirectionKey.String("write") +) + +// The shared attributes used to report a DNS query. +const ( + // DNSQuestionNameKey is the attribute Key conforming to the + // "dns.question.name" semantic conventions. It represents the name being + // queried. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.example.com', 'opentelemetry.io' + // Note: If the name field contains non-printable characters (below 32 or + // above 126), those characters should be represented as escaped base 10 + // integers (\DDD). Back slashes and quotes should be escaped. Tabs, + // carriage returns, and line feeds should be converted to \t, \r, and \n + // respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Attributes for operations with an authenticated and/or authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// The shared attributes used to report an error. +const ( + // ErrorTypeKey is the attribute Key conforming to the "error.type" + // semantic conventions. It represents the describes a class of error the + // operation ended with. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be + // used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library + // SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query + // time when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT + // set `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as + // HTTP or gRPC status codes), + // it's RECOMMENDED to: + // + // * Use a domain-specific attribute + // * Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +var ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the identifies the class / type of + // event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as [attribute + // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md). + // Notably, event names are namespaced to avoid collisions and provide a + // clean separation of semantics for events in separate domains like + // browser, mobile, and kubernetes. + EventNameKey = attribute.Key("event.name") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the identifies the class / type of +// event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example for recording span + // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// FaaS attributes +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Attributes for Feature Flags. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// Describes file attributes. +const ( + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is + // located. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/user', 'C:\\Program Files\\MyApp' + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the + // leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: When the file name has multiple extensions (example.tar.gz), only + // the last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.png' + FileNameKey = attribute.Key("file.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/alice/example.png', 'C:\\Program + // Files\\MyApp\\myapp.exe' + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + FileSizeKey = attribute.Key("file.size") +) + +// FileDirectory returns an attribute KeyValue conforming to the +// "file.directory" semantic conventions. It represents the directory where the +// file is located. It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the +// "file.extension" semantic conventions. It represents the file extension, +// excluding the leading dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" +// semantic conventions. It represents the name of the file including the +// extension, without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" +// semantic conventions. It represents the full path to the file, including the +// file name. It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" +// semantic conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// Attributes for Google Cloud Run. +const ( + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") +) + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// Attributes for Google Compute Engine (GCE). +const ( + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// The attributes used to describe telemetry in the context of LLM (Large +// Language Models) requests and responses. +const ( + // GenAiCompletionKey is the attribute Key conforming to the + // "gen_ai.completion" semantic conventions. It represents the full + // response received from the LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'assistant', 'content': 'The capital of France is + // Paris.'}]" + // Note: It's RECOMMENDED to format completions as JSON string matching + // [OpenAI messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiCompletionKey = attribute.Key("gen_ai.completion") + + // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt" + // semantic conventions. It represents the full prompt sent to an LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'user', 'content': 'What is the capital of + // France?'}]" + // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI + // messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiPromptKey = attribute.Key("gen_ai.prompt") + + // GenAiRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the + // maximum number of tokens the LLM generates for a request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAiRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of + // the LLM a request is being made to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4' + GenAiRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAiRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0.0 + GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAiRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p + // sampling setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0 + GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAiResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to + // each generation received. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'stop' + GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAiResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'chatcmpl-123' + GenAiResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAiResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of + // the LLM a response was generated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4-0613' + GenAiResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system" + // semantic conventions. It represents the Generative AI product as + // identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'openai' + // Note: The actual GenAI product may differ from the one identified by the + // client. For example, when using OpenAI client libraries to communicate + // with Mistral, the `gen_ai.system` is set to `openai` based on the + // instrumentation's best knowledge. + GenAiSystemKey = attribute.Key("gen_ai.system") + + // GenAiUsageCompletionTokensKey is the attribute Key conforming to the + // "gen_ai.usage.completion_tokens" semantic conventions. It represents the + // number of tokens used in the LLM response (completion). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 180 + GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens") + + // GenAiUsagePromptTokensKey is the attribute Key conforming to the + // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the + // number of tokens used in the LLM prompt. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens") +) + +var ( + // OpenAI + GenAiSystemOpenai = GenAiSystemKey.String("openai") +) + +// GenAiCompletion returns an attribute KeyValue conforming to the +// "gen_ai.completion" semantic conventions. It represents the full response +// received from the LLM. +func GenAiCompletion(val string) attribute.KeyValue { + return GenAiCompletionKey.String(val) +} + +// GenAiPrompt returns an attribute KeyValue conforming to the +// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to +// an LLM. +func GenAiPrompt(val string) attribute.KeyValue { + return GenAiPromptKey.String(val) +} + +// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the LLM generates for a request. +func GenAiRequestMaxTokens(val int) attribute.KeyValue { + return GenAiRequestMaxTokensKey.Int(val) +} + +// GenAiRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// LLM a request is being made to. +func GenAiRequestModel(val string) attribute.KeyValue { + return GenAiRequestModelKey.String(val) +} + +// GenAiRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the LLM request. +func GenAiRequestTemperature(val float64) attribute.KeyValue { + return GenAiRequestTemperatureKey.Float64(val) +} + +// GenAiRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p +// sampling setting for the LLM request. +func GenAiRequestTopP(val float64) attribute.KeyValue { + return GenAiRequestTopPKey.Float64(val) +} + +// GenAiResponseFinishReasons returns an attribute KeyValue conforming to +// the "gen_ai.response.finish_reasons" semantic conventions. It represents the +// array of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAiResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAiResponseFinishReasonsKey.StringSlice(val) +} + +// GenAiResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique +// identifier for the completion. +func GenAiResponseID(val string) attribute.KeyValue { + return GenAiResponseIDKey.String(val) +} + +// GenAiResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// LLM a response was generated from. +func GenAiResponseModel(val string) attribute.KeyValue { + return GenAiResponseModelKey.String(val) +} + +// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to +// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the +// number of tokens used in the LLM response (completion). +func GenAiUsageCompletionTokens(val int) attribute.KeyValue { + return GenAiUsageCompletionTokensKey.Int(val) +} + +// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number +// of tokens used in the LLM prompt. +func GenAiUsagePromptTokens(val int) attribute.KeyValue { + return GenAiUsagePromptTokensKey.Int(val) +} + +// Attributes for GraphQL. +const ( + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") + + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// Attributes for the Android platform on which the Android application is +// running. +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount + // of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the + // "host.cpu.family" semantic conventions. It represents the family or + // generation of the CPU. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the + // "host.cpu.model.id" semantic conventions. It represents the model + // identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the + // "host.cpu.stepping" semantic conventions. It represents the stepping or + // core revisions. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1', 'r1p1' + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor + // ID string in EBX, EDX and ECX registers. Writing these to memory in this + // order results in a 12-character string. + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC + // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal + // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): + // as hyphen-separated octets in uppercase hexadecimal form from most to + // least significant. + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or +// generation of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model +// identifier. It provides more granular information about the CPU, +// distinguishing it from other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" +// semantic conventions. It represents the available MAC addresses of the host, +// excluding loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Semantic convention attributes in the HTTP namespace. +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of + // the HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'active', 'idle' + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of + // the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the hTTP + // request method. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones listed + // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + // and the PATCH method defined in + // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + // + // If the HTTP request method is not known to instrumentation, it MUST set + // the `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated + // list of case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is + // not a list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods + // to be case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the + // ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the + // "http.request.size" semantic conventions. It represents the total size + // of the request in bytes. This should be the total number of bytes sent + // over the wire, including the request line (HTTP/1.1), framing (HTTP/2 + // and HTTP/3), headers, and request body if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size + // of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size + // of the response in bytes. This should be the total number of bytes sent + // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and + // HTTP/3), headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status + // code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 200 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route, that is, the path + // template in the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/docs/http/http-spans.md#http-server-definitions) if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +var ( + // active state + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +var ( + // CONNECT method + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of +// the request in bytes. This should be the total number of bytes sent over the +// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of +// the response in bytes. This should be the total number of bytes sent over +// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Java Virtual machine related attributes. +const ( + // JvmBufferPoolNameKey is the attribute Key conforming to the + // "jvm.buffer.pool.name" semantic conventions. It represents the name of + // the buffer pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via + // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). + JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") + + // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action" + // semantic conventions. It represents the name of the garbage collector + // action. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'end of minor GC', 'end of major GC' + // Note: Garbage collector action is generally obtained via + // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()). + JvmGcActionKey = attribute.Key("jvm.gc.action") + + // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name" + // semantic conventions. It represents the name of the garbage collector. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Young Generation', 'G1 Old Generation' + // Note: Garbage collector name is generally obtained via + // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()). + JvmGcNameKey = attribute.Key("jvm.gc.name") + + // JvmMemoryPoolNameKey is the attribute Key conforming to the + // "jvm.memory.pool.name" semantic conventions. It represents the name of + // the memory pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") + + // JvmMemoryTypeKey is the attribute Key conforming to the + // "jvm.memory.type" semantic conventions. It represents the type of + // memory. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'heap', 'non_heap' + JvmMemoryTypeKey = attribute.Key("jvm.memory.type") + + // JvmThreadDaemonKey is the attribute Key conforming to the + // "jvm.thread.daemon" semantic conventions. It represents the whether the + // thread is daemon or not. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon") + + // JvmThreadStateKey is the attribute Key conforming to the + // "jvm.thread.state" semantic conventions. It represents the state of the + // thread. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'runnable', 'blocked' + JvmThreadStateKey = attribute.Key("jvm.thread.state") +) + +var ( + // Heap memory + JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") + // Non-heap memory + JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") +) + +var ( + // A thread that has not yet started is in this state + JvmThreadStateNew = JvmThreadStateKey.String("new") + // A thread executing in the Java virtual machine is in this state + JvmThreadStateRunnable = JvmThreadStateKey.String("runnable") + // A thread that is blocked waiting for a monitor lock is in this state + JvmThreadStateBlocked = JvmThreadStateKey.String("blocked") + // A thread that is waiting indefinitely for another thread to perform a particular action is in this state + JvmThreadStateWaiting = JvmThreadStateKey.String("waiting") + // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state + JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting") + // A thread that has exited is in this state + JvmThreadStateTerminated = JvmThreadStateKey.String("terminated") +) + +// JvmBufferPoolName returns an attribute KeyValue conforming to the +// "jvm.buffer.pool.name" semantic conventions. It represents the name of the +// buffer pool. +func JvmBufferPoolName(val string) attribute.KeyValue { + return JvmBufferPoolNameKey.String(val) +} + +// JvmGcAction returns an attribute KeyValue conforming to the +// "jvm.gc.action" semantic conventions. It represents the name of the garbage +// collector action. +func JvmGcAction(val string) attribute.KeyValue { + return JvmGcActionKey.String(val) +} + +// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name" +// semantic conventions. It represents the name of the garbage collector. +func JvmGcName(val string) attribute.KeyValue { + return JvmGcNameKey.String(val) +} + +// JvmMemoryPoolName returns an attribute KeyValue conforming to the +// "jvm.memory.pool.name" semantic conventions. It represents the name of the +// memory pool. +func JvmMemoryPoolName(val string) attribute.KeyValue { + return JvmMemoryPoolNameKey.String(val) +} + +// JvmThreadDaemon returns an attribute KeyValue conforming to the +// "jvm.thread.daemon" semantic conventions. It represents the whether the +// thread is daemon or not. +func JvmThreadDaemon(val bool) attribute.KeyValue { + return JvmThreadDaemonKey.Bool(val) +} + +// Kubernetes resource attributes. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key + // conforming to the "k8s.container.status.last_terminated_reason" semantic + // conventions. It represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Evicted', 'Error' + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// Log attributes +const ( + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Attributes for a file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// The generic attributes that may be used in any Log Record. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique + // identifier for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to + // the "messaging.destination.partition.id" semantic conventions. It + // represents the identifier of the partition messages are sent to or + // received from, unique within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1' + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationPublishAnonymousKey is the attribute Key conforming + // to the "messaging.destination_publish.anonymous" semantic conventions. + // It represents a boolean that is true if the publish message destination + // is anonymous (could be unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") + + // MessagingDestinationPublishNameKey is the attribute Key conforming to + // the "messaging.destination_publish.name" semantic conventions. It + // represents the name of the original destination the message was + // published to + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker doesn't have such notion, the original destination name + // SHOULD uniquely identify the broker. + MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the + // size of the message body in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. + // If both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the conversation ID identifying the conversation to which the message + // belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents + // the size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If + // both sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack', 'nack', 'send' + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents the messaging + // system as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate + // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on + // the instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +var ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are delivered to or processed by a consumer + MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process") + // One or more messages are settled + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") +) + +var ( + // Apache ActiveMQ + MessagingSystemActivemq = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + MessagingSystemServicebus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + MessagingSystemJms = MessagingSystemKey.String("jms") + // Apache Kafka + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique +// identifier for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming +// to the "messaging.destination.partition.id" semantic conventions. It +// represents the identifier of the partition messages are sent to or received +// from, unique within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationPublishAnonymous returns an attribute KeyValue +// conforming to the "messaging.destination_publish.anonymous" semantic +// conventions. It represents a boolean that is true if the publish message +// destination is anonymous (could be unnamed or have auto-generated name). +func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationPublishAnonymousKey.Bool(val) +} + +// MessagingDestinationPublishName returns an attribute KeyValue conforming +// to the "messaging.destination_publish.name" semantic conventions. It +// represents the name of the original destination the message was published to +func MessagingDestinationPublishName(val string) attribute.KeyValue { + return MessagingDestinationPublishNameKey.String(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size +// of the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the conversation ID identifying the conversation to which the +// message belongs, represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to +// the "messaging.message.envelope.size" semantic conventions. It represents +// the size of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// This group describes attributes specific to Apache Kafka. +const ( + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// This group describes attributes specific to RabbitMQ. +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming + // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. + // It represents the rabbitMQ message delivery tag + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 123 + MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic +// conventions. It represents the rabbitMQ message delivery tag +func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitmqMessageDeliveryTagKey.Int(val) +} + +// This group describes attributes specific to RocketMQ. +const ( + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// This group describes attributes specific to GCP Pub/Sub. +const ( + // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. + // It represents the ack deadline in seconds set for the modify ack + // deadline request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It + // represents the ack id for a given message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack_id' + MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key + // conforming to the "messaging.gcp_pubsub.message.delivery_attempt" + // semantic conventions. It represents the delivery attempt for a given + // message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. + // It represents the ordering key for a given message. If the attribute is + // not present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ordering_key' + MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") +) + +// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic +// conventions. It represents the ack deadline in seconds set for the modify +// ack deadline request. +func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It +// represents the ack id for a given message. +func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageAckIDKey.String(val) +} + +// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic +// conventions. It represents the ordering key for a given message. If the +// attribute is not present, the message does not have an ordering key. +func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageOrderingKeyKey.String(val) +} + +// This group describes attributes specific to Azure Service Bus. +const ( + // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key + // conforming to the "messaging.servicebus.destination.subscription_name" + // semantic conventions. It represents the name of the subscription in the + // topic messages are received from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mySubscription' + MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") + + // MessagingServicebusDispositionStatusKey is the attribute Key conforming + // to the "messaging.servicebus.disposition_status" semantic conventions. + // It represents the describes the [settlement + // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServicebusMessageDeliveryCountKey is the attribute Key + // conforming to the "messaging.servicebus.message.delivery_count" semantic + // conventions. It represents the number of deliveries that have been + // attempted for this message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key + // conforming to the "messaging.servicebus.message.enqueued_time" semantic + // conventions. It represents the UTC epoch seconds at which the message + // has been accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") +) + +var ( + // Message is completed + MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") + // Message is abandoned + MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") + // Message is deferred + MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") +) + +// MessagingServicebusDestinationSubscriptionName returns an attribute +// KeyValue conforming to the +// "messaging.servicebus.destination.subscription_name" semantic conventions. +// It represents the name of the subscription in the topic messages are +// received from. +func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingServicebusDestinationSubscriptionNameKey.String(val) +} + +// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServicebusMessageDeliveryCountKey.Int(val) +} + +// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServicebusMessageEnqueuedTimeKey.Int(val) +} + +// This group describes attributes specific to Azure Event Hubs. +const ( + // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to + // the "messaging.eventhubs.consumer.group" semantic conventions. It + // represents the name of the consumer group the event consumer is + // associated with. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'indexer' + MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") + + // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming + // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. + // It represents the UTC epoch seconds at which the message has been + // accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") +) + +// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming +// to the "messaging.eventhubs.consumer.group" semantic conventions. It +// represents the name of the consumer group the event consumer is associated +// with. +func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { + return MessagingEventhubsConsumerGroupKey.String(val) +} + +// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.eventhubs.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkIoDirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network + // IO operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'transmit' + NetworkIoDirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the + // "network.peer.port" semantic conventions. It represents the peer port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the [OSI + // application layer](https://osi-model.com/application-layer/) or non-OSI + // equivalent. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the + // actual version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.1', '2' + // Note: If protocol version is subject to negotiation (for example using + // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute + // SHOULD be set to the negotiated version. If the actual protocol version + // is not known, this attribute SHOULD NOT be set. + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the [OSI + // transport layer](https://osi-model.com/transport-layer/) or + // [inter-process communication + // method](https://wikipedia.org/wiki/Inter-process_communication). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port + // 12345. + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" + // semantic conventions. It represents the [OSI network + // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + NetworkTypeKey = attribute.Key("network.type") +) + +var ( + // GPRS + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +var ( + // wifi + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +var ( + // transmit + NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") + // receive + NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") +) + +var ( + // TCP + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + NetworkTransportUnix = NetworkTransportKey.String("unix") +) + +var ( + // IPv4 + NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") + // IPv6 + NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") +) + +// NetworkCarrierIcc returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierIcc(val string) attribute.KeyValue { + return NetworkCarrierIccKey.String(val) +} + +// NetworkCarrierMcc returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMcc(val string) attribute.KeyValue { + return NetworkCarrierMccKey.String(val) +} + +// NetworkCarrierMnc returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMnc(val string) attribute.KeyValue { + return NetworkCarrierMncKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local +// address of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port +// number of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address +// of the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// application layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// An OCI image manifest. +const ( + // OciManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of + // the OCI image manifest. For container images specifically is the digest + // by which the container image is known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows [OCI Image Manifest + // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), + // and specifically the [Digest + // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). + // An example can be found in [Example Image + // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). + OciManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OciManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OciManifestDigest(val string) attribute.KeyValue { + return OciManifestDigestKey.String(val) +} + +// Attributes used by the OpenTracing Shim layer. +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" + // semantic conventions. It represents the unique identifier for a + // particular build or compilation of the operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Attributes reserved for OpenTelemetry +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// An operating system process. +const ( + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were + // voluntary or involuntary. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and + // time the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:25:34.853Z' + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the + // "process.exit.code" semantic conventions. It represents the exit code of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the + // "process.exit.time" semantic conventions. It represents the date and + // time the process exited, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:26:12.315Z' + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID + // of the process's group leader. This is also the process group ID (PGID) + // of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether + // the process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type + // of page fault for this data point. Type `major` is for major/hard page + // faults, and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PPID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user + // ID (RUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the + // username of the real user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved + // user ID (SUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the + // username of the saved user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID + // of the process's session leader. This is also the session ID (SID) of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessUserIDKey is the attribute Key conforming to the + // "process.user.id" semantic conventions. It represents the effective user + // ID (EUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the + // "process.user.name" semantic conventions. It represents the username of + // the effective user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" + // semantic conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily + // unique across all processes on the host but it is unique within the + // process namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") +) + +var ( + // voluntary + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +var ( + // major + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and +// time the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time +// the process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of +// the process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user +// ID (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username +// of the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the +// "process.vpid" semantic conventions. It represents the virtual process +// identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// Attributes for process CPU +const ( + // ProcessCPUStateKey is the attribute Key conforming to the + // "process.cpu.state" semantic conventions. It represents the CPU state of + // the process. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessCPUStateKey = attribute.Key("process.cpu.state") +) + +var ( + // system + ProcessCPUStateSystem = ProcessCPUStateKey.String("system") + // user + ProcessCPUStateUser = ProcessCPUStateKey.String("user") + // wait + ProcessCPUStateWait = ProcessCPUStateKey.String("wait") +) + +// Attributes for remote procedure calls. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the + // "rpc.message.type" semantic conventions. It represents the whether this + // is a received or sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCSystemKey = attribute.Key("rpc.system") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +var ( + // sent + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// doesn't specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the +// "rpc.message.id" semantic conventions. It represents the mUST be calculated +// as two different counters starting from `1` one for sent messages and one +// for received message. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to +// the "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.address` SHOULD represent the server address + // behind any intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" + // semantic conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.port` SHOULD represent the server port behind + // any intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the +// "server.address" semantic conventions. It represents the server domain name +// if available without reverse DNS lookup; otherwise, IP address or Unix +// domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// A service instance. +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to + // distinguish instances of the same service that exist at the same time + // (e.g. instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random + // Version 1 or Version 4 [RFC + // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an + // inherent unique ID as the source of + // this value if stability is desirable. In that case, the ID SHOULD be + // used as source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the + // purposes of identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html) + // file, the underlying + // data, such as pod name and namespace should be treated as confidential, + // being the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we + // do not recommend using one identifier + // for all processes participating in the application. Instead, it's + // recommended each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it + // can't unambiguously determine the + // service instance that is generating that telemetry. For instance, + // creating an UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container + // within that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, + // as they know the target address and + // port. + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If + // `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" + // semantic conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// SignalR attributes +const ( + // SignalrConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the + // signalR HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'app_shutdown', 'timeout' + SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalrTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the [SignalR + // transport + // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'web_sockets', 'long_polling' + SignalrTransportKey = attribute.Key("signalr.transport") +) + +var ( + // The connection was closed normally + SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout + SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down + SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") +) + +var ( + // ServerSentEvents protocol + SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") + // LongPolling protocol + SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") + // WebSockets protocol + SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") +) + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating + // through an intermediary, `source.address` SHOULD represent the source + // address behind any intermediaries, for example proxies, if it's + // available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Describes System attributes +const ( + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '(identifier)' + SystemDeviceKey = attribute.Key("system.device") +) + +// SystemDevice returns an attribute KeyValue conforming to the +// "system.device" semantic conventions. It represents the device identifier +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// Describes System CPU attributes +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // logical CPU number [0..n-1] + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemCPUStateKey is the attribute Key conforming to the + // "system.cpu.state" semantic conventions. It represents the state of the + // CPU + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + SystemCPUStateKey = attribute.Key("system.cpu.state") +) + +var ( + // user + SystemCPUStateUser = SystemCPUStateKey.String("user") + // system + SystemCPUStateSystem = SystemCPUStateKey.String("system") + // nice + SystemCPUStateNice = SystemCPUStateKey.String("nice") + // idle + SystemCPUStateIdle = SystemCPUStateKey.String("idle") + // iowait + SystemCPUStateIowait = SystemCPUStateKey.String("iowait") + // interrupt + SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") + // steal + SystemCPUStateSteal = SystemCPUStateKey.String("steal") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the logical +// CPU number [0..n-1] +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// Describes System Memory attributes +const ( + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory + // state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free', 'cached' + SystemMemoryStateKey = attribute.Key("system.memory.state") +) + +var ( + // used + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // shared + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Describes System Memory Paging attributes +const ( + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'in' + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory + // paging state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free' + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory + // paging type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'minor' + SystemPagingTypeKey = attribute.Key("system.paging.type") +) + +var ( + // in + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +var ( + // used + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +var ( + // major + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Describes Filesystem attributes +const ( + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the + // filesystem mode + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'rw, ro' + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/mnt/data' + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the + // filesystem state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'used' + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the + // filesystem type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ext4' + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") +) + +var ( + // used + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +var ( + // fat32 + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to +// the "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Describes Network attributes +const ( + // SystemNetworkStateKey is the attribute Key conforming to the + // "system.network.state" semantic conventions. It represents a stateless + // protocol MUST NOT set this attribute + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'close_wait' + SystemNetworkStateKey = attribute.Key("system.network.state") +) + +var ( + // close + SystemNetworkStateClose = SystemNetworkStateKey.String("close") + // close_wait + SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") + // closing + SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") + // delete + SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") + // established + SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") + // fin_wait_1 + SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") + // fin_wait_2 + SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") + // last_ack + SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") + // listen + SystemNetworkStateListen = SystemNetworkStateKey.String("listen") + // syn_recv + SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") + // syn_sent + SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") + // time_wait + SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") +) + +// Describes System Process attributes +const ( + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State + // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'running' + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +var ( + // running + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Attributes for telemetry SDK. +const ( + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of + // the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set + // the `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the + // version string of the auto instrumentation agent or distribution, if + // used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2.3' + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Semantic convention attributes in the TLS namespace. +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" + // semantic conventions. It represents the string indicating the + // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) + // used during the current connection. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for `tls.cipher` MUST be one of the + // `Descriptions` of the [registered TLS Cipher + // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the client. This is + // usually mutually-exclusive of `client.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the client. This is usually mutually-exclusive of + // `client.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the + // "tls.client.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based + // on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the + // date/Time indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientServerNameKey is the attribute Key conforming to the + // "tls.client.server_name" semantic conventions. It represents the also + // called an SNI, this tells the server which hostname to which the client + // is attempting to connect to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + TLSClientServerNameKey = attribute.Key("tls.client.server_name") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the + // array of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the + // given cipher, when applicable + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'secp256r1' + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the + // "tls.established" semantic conventions. It represents the boolean flag + // indicating if the TLS negotiation was successful and transitioned to an + // encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the + // "tls.next_protocol" semantic conventions. It represents the string + // indicating the protocol being tunneled. Per the values in the [IANA + // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), + // this string should be lower case. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'http/1.1' + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the + // "tls.protocol.name" semantic conventions. It represents the normalized + // lowercase protocol name parsed from original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric + // part of the version parsed from the original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2', '3' + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" + // semantic conventions. It represents the boolean flag indicating if this + // TLS connection was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the server. This is + // usually mutually-exclusive of `server.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the server. This is usually mutually-exclusive of + // `server.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the + // "tls.server.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the + // "tls.server.ja3s" semantic conventions. It represents a hash that + // identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the + // date/Time indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // server. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +var ( + // ssl + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the +// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used +// during the current connection. +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also +// exists in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the client. This is usually mutually-exclusive of `client.certificate` since +// that value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the +// "tls.client.ja3" semantic conventions. It represents a hash that identifies +// clients based on how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientServerName returns an attribute KeyValue conforming to the +// "tls.client.server_name" semantic conventions. It represents the also called +// an SNI, this tells the server which hostname to which the client is +// attempting to connect to. +func TLSClientServerName(val string) attribute.KeyValue { + return TLSClientServerNameKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" +// semantic conventions. It represents the string indicating the curve used for +// the given cipher, when applicable +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string +// indicating the protocol being tunneled. Per the values in the [IANA +// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), +// this string should be lower case. +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part +// of the version parsed from the original string of the negotiated [SSL/TLS +// protocol +// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also +// exists in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the server. This is usually mutually-exclusive of `server.certificate` since +// that value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Attributes describing URL. +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" + // semantic conventions. It represents the domain extracted from the + // `url.full`, such as "opentelemetry.io". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', + // '[1080:0:0:0:8:800:200C:417A]' + // Note: In some cases a URL may refer to an IP and/or port directly, + // without a domain name. In this case, the IP address would go to the + // domain field. If the URL contains a [literal IPv6 + // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by + // `[` and `]`, the `[` and `]` characters should also be captured in the + // domain field. + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from + // the `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: The file extension is only set if it exists, as not every url has + // a file extension. When the file name has multiple extensions + // `example.tar.gz`, only the last one should be captured `gz`, not + // `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" + // semantic conventions. It represents the [URI + // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'SemConv' + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network + // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // '//localhost' + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the + // fragment is not transmitted over HTTP, but if it is known, it SHOULD be + // included nevertheless. + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case username and + // password SHOULD be redacted and attribute's value SHOULD be + // `https://REDACTED:REDACTED@www.example.com/`. + // `url.full` SHOULD capture the absolute URL when it is available (or can + // be reconstructed). Sensitive content provided in `url.full` SHOULD be + // scrubbed when instrumentations can identify it. + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" + // semantic conventions. It represents the unmodified original URL as seen + // in the event source. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // 'search?q=OpenTelemetry' + // Note: In network monitoring, the observed URL may be a full URL, whereas + // in access logs, the URL is often just represented as a path. This field + // is meant to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the + // same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI + // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/search' + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full` + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI + // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.com', 'foo.co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). For example, the registered domain for + // `foo.example.com` is `example.com`. Trying to approximate this by simply + // taking the last two labels will not work well for TLDs such as `co.uk`. + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" + // semantic conventions. It represents the [URI + // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component + // identifying the used protocol. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name + // under the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain + // contains all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'east', 'sub2.sub1' + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If + // the domain has multiple levels of subdomain, such as + // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, + // with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" + // semantic conventions. It represents the low-cardinality template of an + // [absolute path + // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/users/{id}', '/users/:id', '/users?id={id}' + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective + // top level domain (eTLD), also known as the domain suffix, is the last + // part of the domain name. For example, the top level domain for + // example.com is `com`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com', 'co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the +// `url.full`, such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the +// "url.extension" semantic conventions. It represents the file extension +// extracted from the `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the +// "url.fragment" semantic conventions. It represents the [URI +// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" +// semantic conventions. It represents the absolute URL describing a network +// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the +// "url.original" semantic conventions. It represents the unmodified original +// URL as seen in the event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" +// semantic conventions. It represents the [URI +// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" +// semantic conventions. It represents the port extracted from the `url.full` +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" +// semantic conventions. It represents the [URI +// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI +// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component +// identifying the used protocol. +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the +// "url.subdomain" semantic conventions. It represents the subdomain portion of +// a fully qualified domain name includes all of the names except the host name +// under the registered_domain. In a partially qualified domain, or if the +// qualification level of the full name cannot be determined, subdomain +// contains all of the names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the +// "url.template" semantic conventions. It represents the low-cardinality +// template of an [absolute path +// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of +// the domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentNameKey is the attribute Key conforming to the + // "user_agent.name" semantic conventions. It represents the name of the + // user-agent extracted from original. Usually refers to the browser's + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Safari', 'YourApp' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's name + // from original string. In the case of using a user-agent for non-browser + // products, such as microservices with multiple names/versions inside the + // `user_agent.original`, the most significant name SHOULD be selected. In + // such a scenario it should align with `user_agent.version` + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 + // grpc-java-okhttp/1.27.2' + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of + // the user-agent extracted from original. Usually refers to the browser's + // version + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.1.2', '1.0.0' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's + // version from original string. In the case of using a user-agent for + // non-browser products, such as microservices with multiple names/versions + // inside the `user_agent.original`, the most significant version SHOULD be + // selected. In such a scenario it should align with `user_agent.name` + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// The attributes used to describe the packaged software running the +// application code. +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go new file mode 100644 index 0000000000..d031bbea78 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the v1.26.0 +// version of the OpenTelemetry semantic conventions. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go new file mode 100644 index 0000000000..bfaee0d56e --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go new file mode 100644 index 0000000000..fcdb9f4859 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go @@ -0,0 +1,1307 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +const ( + + // ContainerCPUTime is the metric conforming to the "container.cpu.time" + // semantic conventions. It represents the total CPU time consumed. + // Instrument: counter + // Unit: s + // Stability: Experimental + ContainerCPUTimeName = "container.cpu.time" + ContainerCPUTimeUnit = "s" + ContainerCPUTimeDescription = "Total CPU time consumed" + + // ContainerMemoryUsage is the metric conforming to the + // "container.memory.usage" semantic conventions. It represents the memory + // usage of the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerMemoryUsageName = "container.memory.usage" + ContainerMemoryUsageUnit = "By" + ContainerMemoryUsageDescription = "Memory usage of the container." + + // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic + // conventions. It represents the disk bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerDiskIoName = "container.disk.io" + ContainerDiskIoUnit = "By" + ContainerDiskIoDescription = "Disk bytes for the container." + + // ContainerNetworkIo is the metric conforming to the "container.network.io" + // semantic conventions. It represents the network bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerNetworkIoName = "container.network.io" + ContainerNetworkIoUnit = "By" + ContainerNetworkIoDescription = "Network bytes for the container." + + // DBClientOperationDuration is the metric conforming to the + // "db.client.operation.duration" semantic conventions. It represents the + // duration of database client operations. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientOperationDurationName = "db.client.operation.duration" + DBClientOperationDurationUnit = "s" + DBClientOperationDurationDescription = "Duration of database client operations." + + // DBClientConnectionCount is the metric conforming to the + // "db.client.connection.count" semantic conventions. It represents the number + // of connections that are currently in state described by the `state` + // attribute. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionCountName = "db.client.connection.count" + DBClientConnectionCountUnit = "{connection}" + DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute" + + // DBClientConnectionIdleMax is the metric conforming to the + // "db.client.connection.idle.max" semantic conventions. It represents the + // maximum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMaxName = "db.client.connection.idle.max" + DBClientConnectionIdleMaxUnit = "{connection}" + DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed" + + // DBClientConnectionIdleMin is the metric conforming to the + // "db.client.connection.idle.min" semantic conventions. It represents the + // minimum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMinName = "db.client.connection.idle.min" + DBClientConnectionIdleMinUnit = "{connection}" + DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed" + + // DBClientConnectionMax is the metric conforming to the + // "db.client.connection.max" semantic conventions. It represents the maximum + // number of open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionMaxName = "db.client.connection.max" + DBClientConnectionMaxUnit = "{connection}" + DBClientConnectionMaxDescription = "The maximum number of open connections allowed" + + // DBClientConnectionPendingRequests is the metric conforming to the + // "db.client.connection.pending_requests" semantic conventions. It represents + // the number of pending requests for an open connection, cumulative for the + // entire pool. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests" + DBClientConnectionPendingRequestsUnit = "{request}" + DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + + // DBClientConnectionTimeouts is the metric conforming to the + // "db.client.connection.timeouts" semantic conventions. It represents the + // number of connection timeouts that have occurred trying to obtain a + // connection from the pool. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionTimeoutsName = "db.client.connection.timeouts" + DBClientConnectionTimeoutsUnit = "{timeout}" + DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + + // DBClientConnectionCreateTime is the metric conforming to the + // "db.client.connection.create_time" semantic conventions. It represents the + // time it took to create a new connection. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionCreateTimeName = "db.client.connection.create_time" + DBClientConnectionCreateTimeUnit = "s" + DBClientConnectionCreateTimeDescription = "The time it took to create a new connection" + + // DBClientConnectionWaitTime is the metric conforming to the + // "db.client.connection.wait_time" semantic conventions. It represents the + // time it took to obtain an open connection from the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionWaitTimeName = "db.client.connection.wait_time" + DBClientConnectionWaitTimeUnit = "s" + DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool" + + // DBClientConnectionUseTime is the metric conforming to the + // "db.client.connection.use_time" semantic conventions. It represents the time + // between borrowing a connection and returning it to the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionUseTimeName = "db.client.connection.use_time" + DBClientConnectionUseTimeUnit = "s" + DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + + // DBClientConnectionsUsage is the metric conforming to the + // "db.client.connections.usage" semantic conventions. It represents the + // deprecated, use `db.client.connection.count` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsUsageName = "db.client.connections.usage" + DBClientConnectionsUsageUnit = "{connection}" + DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead." + + // DBClientConnectionsIdleMax is the metric conforming to the + // "db.client.connections.idle.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.idle.max` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" + DBClientConnectionsIdleMaxUnit = "{connection}" + DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead." + + // DBClientConnectionsIdleMin is the metric conforming to the + // "db.client.connections.idle.min" semantic conventions. It represents the + // deprecated, use `db.client.connection.idle.min` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsIdleMinName = "db.client.connections.idle.min" + DBClientConnectionsIdleMinUnit = "{connection}" + DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead." + + // DBClientConnectionsMax is the metric conforming to the + // "db.client.connections.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.max` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionsMaxName = "db.client.connections.max" + DBClientConnectionsMaxUnit = "{connection}" + DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead." + + // DBClientConnectionsPendingRequests is the metric conforming to the + // "db.client.connections.pending_requests" semantic conventions. It represents + // the deprecated, use `db.client.connection.pending_requests` instead. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" + DBClientConnectionsPendingRequestsUnit = "{request}" + DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead." + + // DBClientConnectionsTimeouts is the metric conforming to the + // "db.client.connections.timeouts" semantic conventions. It represents the + // deprecated, use `db.client.connection.timeouts` instead. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" + DBClientConnectionsTimeoutsUnit = "{timeout}" + DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead." + + // DBClientConnectionsCreateTime is the metric conforming to the + // "db.client.connections.create_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.create_time` instead. Note: the unit + // also changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsCreateTimeName = "db.client.connections.create_time" + DBClientConnectionsCreateTimeUnit = "ms" + DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`." + + // DBClientConnectionsWaitTime is the metric conforming to the + // "db.client.connections.wait_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.wait_time` instead. Note: the unit + // also changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" + DBClientConnectionsWaitTimeUnit = "ms" + DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`." + + // DBClientConnectionsUseTime is the metric conforming to the + // "db.client.connections.use_time" semantic conventions. It represents the + // deprecated, use `db.client.connection.use_time` instead. Note: the unit also + // changed from `ms` to `s`. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + DBClientConnectionsUseTimeName = "db.client.connections.use_time" + DBClientConnectionsUseTimeUnit = "ms" + DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`." + + // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" + // semantic conventions. It represents the measures the time taken to perform a + // DNS lookup. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DNSLookupDurationName = "dns.lookup.duration" + DNSLookupDurationUnit = "s" + DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." + + // AspnetcoreRoutingMatchAttempts is the metric conforming to the + // "aspnetcore.routing.match_attempts" semantic conventions. It represents the + // number of requests that were attempted to be matched to an endpoint. + // Instrument: counter + // Unit: {match_attempt} + // Stability: Stable + AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" + AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" + AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." + + // AspnetcoreDiagnosticsExceptions is the metric conforming to the + // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the + // number of exceptions caught by exception handling middleware. + // Instrument: counter + // Unit: {exception} + // Stability: Stable + AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" + AspnetcoreDiagnosticsExceptionsUnit = "{exception}" + AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." + + // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the + // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It + // represents the number of requests that are currently active on the server + // that hold a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" + AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" + AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." + + // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the + // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It + // represents the duration of rate limiting lease held by requests on the + // server. + // Instrument: histogram + // Unit: s + // Stability: Stable + AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" + AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" + AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." + + // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the + // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It + // represents the time the request spent in a queue waiting to acquire a rate + // limiting lease. + // Instrument: histogram + // Unit: s + // Stability: Stable + AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" + AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" + AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the + // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It + // represents the number of requests that are currently queued, waiting to + // acquire a rate limiting lease. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" + AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" + AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." + + // AspnetcoreRateLimitingRequests is the metric conforming to the + // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the + // number of requests that tried to acquire a rate limiting lease. + // Instrument: counter + // Unit: {request} + // Stability: Stable + AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" + AspnetcoreRateLimitingRequestsUnit = "{request}" + AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." + + // KestrelActiveConnections is the metric conforming to the + // "kestrel.active_connections" semantic conventions. It represents the number + // of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelActiveConnectionsName = "kestrel.active_connections" + KestrelActiveConnectionsUnit = "{connection}" + KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // KestrelConnectionDuration is the metric conforming to the + // "kestrel.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + KestrelConnectionDurationName = "kestrel.connection.duration" + KestrelConnectionDurationUnit = "s" + KestrelConnectionDurationDescription = "The duration of connections on the server." + + // KestrelRejectedConnections is the metric conforming to the + // "kestrel.rejected_connections" semantic conventions. It represents the + // number of connections rejected by the server. + // Instrument: counter + // Unit: {connection} + // Stability: Stable + KestrelRejectedConnectionsName = "kestrel.rejected_connections" + KestrelRejectedConnectionsUnit = "{connection}" + KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." + + // KestrelQueuedConnections is the metric conforming to the + // "kestrel.queued_connections" semantic conventions. It represents the number + // of connections that are currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelQueuedConnectionsName = "kestrel.queued_connections" + KestrelQueuedConnectionsUnit = "{connection}" + KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." + + // KestrelQueuedRequests is the metric conforming to the + // "kestrel.queued_requests" semantic conventions. It represents the number of + // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are + // currently queued and are waiting to start. + // Instrument: updowncounter + // Unit: {request} + // Stability: Stable + KestrelQueuedRequestsName = "kestrel.queued_requests" + KestrelQueuedRequestsUnit = "{request}" + KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." + + // KestrelUpgradedConnections is the metric conforming to the + // "kestrel.upgraded_connections" semantic conventions. It represents the + // number of connections that are currently upgraded (WebSockets). . + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" + KestrelUpgradedConnectionsUnit = "{connection}" + KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." + + // KestrelTLSHandshakeDuration is the metric conforming to the + // "kestrel.tls_handshake.duration" semantic conventions. It represents the + // duration of TLS handshakes on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" + KestrelTLSHandshakeDurationUnit = "s" + KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." + + // KestrelActiveTLSHandshakes is the metric conforming to the + // "kestrel.active_tls_handshakes" semantic conventions. It represents the + // number of TLS handshakes that are currently in progress on the server. + // Instrument: updowncounter + // Unit: {handshake} + // Stability: Stable + KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" + KestrelActiveTLSHandshakesUnit = "{handshake}" + KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." + + // SignalrServerConnectionDuration is the metric conforming to the + // "signalr.server.connection.duration" semantic conventions. It represents the + // duration of connections on the server. + // Instrument: histogram + // Unit: s + // Stability: Stable + SignalrServerConnectionDurationName = "signalr.server.connection.duration" + SignalrServerConnectionDurationUnit = "s" + SignalrServerConnectionDurationDescription = "The duration of connections on the server." + + // SignalrServerActiveConnections is the metric conforming to the + // "signalr.server.active_connections" semantic conventions. It represents the + // number of connections that are currently active on the server. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Stable + SignalrServerActiveConnectionsName = "signalr.server.active_connections" + SignalrServerActiveConnectionsUnit = "{connection}" + SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." + + // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" + // semantic conventions. It represents the measures the duration of the + // function's logic execution. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInvokeDurationName = "faas.invoke_duration" + FaaSInvokeDurationUnit = "s" + FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" + + // FaaSInitDuration is the metric conforming to the "faas.init_duration" + // semantic conventions. It represents the measures the duration of the + // function's initialization, such as a cold start. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSInitDurationName = "faas.init_duration" + FaaSInitDurationUnit = "s" + FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" + + // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic + // conventions. It represents the number of invocation cold starts. + // Instrument: counter + // Unit: {coldstart} + // Stability: Experimental + FaaSColdstartsName = "faas.coldstarts" + FaaSColdstartsUnit = "{coldstart}" + FaaSColdstartsDescription = "Number of invocation cold starts" + + // FaaSErrors is the metric conforming to the "faas.errors" semantic + // conventions. It represents the number of invocation errors. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + FaaSErrorsName = "faas.errors" + FaaSErrorsUnit = "{error}" + FaaSErrorsDescription = "Number of invocation errors" + + // FaaSInvocations is the metric conforming to the "faas.invocations" semantic + // conventions. It represents the number of successful invocations. + // Instrument: counter + // Unit: {invocation} + // Stability: Experimental + FaaSInvocationsName = "faas.invocations" + FaaSInvocationsUnit = "{invocation}" + FaaSInvocationsDescription = "Number of successful invocations" + + // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic + // conventions. It represents the number of invocation timeouts. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + FaaSTimeoutsName = "faas.timeouts" + FaaSTimeoutsUnit = "{timeout}" + FaaSTimeoutsDescription = "Number of invocation timeouts" + + // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic + // conventions. It represents the distribution of max memory usage per + // invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSMemUsageName = "faas.mem_usage" + FaaSMemUsageUnit = "By" + FaaSMemUsageDescription = "Distribution of max memory usage per invocation" + + // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic + // conventions. It represents the distribution of CPU usage per invocation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + FaaSCPUUsageName = "faas.cpu_usage" + FaaSCPUUsageUnit = "s" + FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" + + // FaaSNetIo is the metric conforming to the "faas.net_io" semantic + // conventions. It represents the distribution of net I/O usage per invocation. + // Instrument: histogram + // Unit: By + // Stability: Experimental + FaaSNetIoName = "faas.net_io" + FaaSNetIoUnit = "By" + FaaSNetIoDescription = "Distribution of net I/O usage per invocation" + + // HTTPServerRequestDuration is the metric conforming to the + // "http.server.request.duration" semantic conventions. It represents the + // duration of HTTP server requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPServerRequestDurationName = "http.server.request.duration" + HTTPServerRequestDurationUnit = "s" + HTTPServerRequestDurationDescription = "Duration of HTTP server requests." + + // HTTPServerActiveRequests is the metric conforming to the + // "http.server.active_requests" semantic conventions. It represents the number + // of active HTTP server requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPServerActiveRequestsName = "http.server.active_requests" + HTTPServerActiveRequestsUnit = "{request}" + HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." + + // HTTPServerRequestBodySize is the metric conforming to the + // "http.server.request.body.size" semantic conventions. It represents the size + // of HTTP server request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerRequestBodySizeName = "http.server.request.body.size" + HTTPServerRequestBodySizeUnit = "By" + HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." + + // HTTPServerResponseBodySize is the metric conforming to the + // "http.server.response.body.size" semantic conventions. It represents the + // size of HTTP server response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPServerResponseBodySizeName = "http.server.response.body.size" + HTTPServerResponseBodySizeUnit = "By" + HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." + + // HTTPClientRequestDuration is the metric conforming to the + // "http.client.request.duration" semantic conventions. It represents the + // duration of HTTP client requests. + // Instrument: histogram + // Unit: s + // Stability: Stable + HTTPClientRequestDurationName = "http.client.request.duration" + HTTPClientRequestDurationUnit = "s" + HTTPClientRequestDurationDescription = "Duration of HTTP client requests." + + // HTTPClientRequestBodySize is the metric conforming to the + // "http.client.request.body.size" semantic conventions. It represents the size + // of HTTP client request bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientRequestBodySizeName = "http.client.request.body.size" + HTTPClientRequestBodySizeUnit = "By" + HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." + + // HTTPClientResponseBodySize is the metric conforming to the + // "http.client.response.body.size" semantic conventions. It represents the + // size of HTTP client response bodies. + // Instrument: histogram + // Unit: By + // Stability: Experimental + HTTPClientResponseBodySizeName = "http.client.response.body.size" + HTTPClientResponseBodySizeUnit = "By" + HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." + + // HTTPClientOpenConnections is the metric conforming to the + // "http.client.open_connections" semantic conventions. It represents the + // number of outbound HTTP connections that are currently active or idle on the + // client. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + HTTPClientOpenConnectionsName = "http.client.open_connections" + HTTPClientOpenConnectionsUnit = "{connection}" + HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." + + // HTTPClientConnectionDuration is the metric conforming to the + // "http.client.connection.duration" semantic conventions. It represents the + // duration of the successfully established outbound HTTP connections. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientConnectionDurationName = "http.client.connection.duration" + HTTPClientConnectionDurationUnit = "s" + HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." + + // HTTPClientActiveRequests is the metric conforming to the + // "http.client.active_requests" semantic conventions. It represents the number + // of active HTTP requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPClientActiveRequestsName = "http.client.active_requests" + HTTPClientActiveRequestsUnit = "{request}" + HTTPClientActiveRequestsDescription = "Number of active HTTP requests." + + // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic + // conventions. It represents the measure of initial memory requested. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmMemoryInitName = "jvm.memory.init" + JvmMemoryInitUnit = "By" + JvmMemoryInitDescription = "Measure of initial memory requested." + + // JvmSystemCPUUtilization is the metric conforming to the + // "jvm.system.cpu.utilization" semantic conventions. It represents the recent + // CPU utilization for the whole system as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" + JvmSystemCPUUtilizationUnit = "1" + JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." + + // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" + // semantic conventions. It represents the average CPU load of the whole system + // for the last minute as reported by the JVM. + // Instrument: gauge + // Unit: {run_queue_item} + // Stability: Experimental + JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" + JvmSystemCPULoad1mUnit = "{run_queue_item}" + JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." + + // JvmBufferMemoryUsage is the metric conforming to the + // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of + // memory used by buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" + JvmBufferMemoryUsageUnit = "By" + JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." + + // JvmBufferMemoryLimit is the metric conforming to the + // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of + // total memory capacity of buffers. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" + JvmBufferMemoryLimitUnit = "By" + JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." + + // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic + // conventions. It represents the number of buffers in the pool. + // Instrument: updowncounter + // Unit: {buffer} + // Stability: Experimental + JvmBufferCountName = "jvm.buffer.count" + JvmBufferCountUnit = "{buffer}" + JvmBufferCountDescription = "Number of buffers in the pool." + + // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic + // conventions. It represents the measure of memory used. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedName = "jvm.memory.used" + JvmMemoryUsedUnit = "By" + JvmMemoryUsedDescription = "Measure of memory used." + + // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" + // semantic conventions. It represents the measure of memory committed. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryCommittedName = "jvm.memory.committed" + JvmMemoryCommittedUnit = "By" + JvmMemoryCommittedDescription = "Measure of memory committed." + + // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic + // conventions. It represents the measure of max obtainable memory. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryLimitName = "jvm.memory.limit" + JvmMemoryLimitUnit = "By" + JvmMemoryLimitDescription = "Measure of max obtainable memory." + + // JvmMemoryUsedAfterLastGc is the metric conforming to the + // "jvm.memory.used_after_last_gc" semantic conventions. It represents the + // measure of memory used, as measured after the most recent garbage collection + // event on this pool. + // Instrument: updowncounter + // Unit: By + // Stability: Stable + JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" + JvmMemoryUsedAfterLastGcUnit = "By" + JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." + + // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic + // conventions. It represents the duration of JVM garbage collection actions. + // Instrument: histogram + // Unit: s + // Stability: Stable + JvmGcDurationName = "jvm.gc.duration" + JvmGcDurationUnit = "s" + JvmGcDurationDescription = "Duration of JVM garbage collection actions." + + // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic + // conventions. It represents the number of executing platform threads. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Stable + JvmThreadCountName = "jvm.thread.count" + JvmThreadCountUnit = "{thread}" + JvmThreadCountDescription = "Number of executing platform threads." + + // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic + // conventions. It represents the number of classes loaded since JVM start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassLoadedName = "jvm.class.loaded" + JvmClassLoadedUnit = "{class}" + JvmClassLoadedDescription = "Number of classes loaded since JVM start." + + // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" + // semantic conventions. It represents the number of classes unloaded since JVM + // start. + // Instrument: counter + // Unit: {class} + // Stability: Stable + JvmClassUnloadedName = "jvm.class.unloaded" + JvmClassUnloadedUnit = "{class}" + JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." + + // JvmClassCount is the metric conforming to the "jvm.class.count" semantic + // conventions. It represents the number of classes currently loaded. + // Instrument: updowncounter + // Unit: {class} + // Stability: Stable + JvmClassCountName = "jvm.class.count" + JvmClassCountUnit = "{class}" + JvmClassCountDescription = "Number of classes currently loaded." + + // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic + // conventions. It represents the number of processors available to the Java + // virtual machine. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Stable + JvmCPUCountName = "jvm.cpu.count" + JvmCPUCountUnit = "{cpu}" + JvmCPUCountDescription = "Number of processors available to the Java virtual machine." + + // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic + // conventions. It represents the cPU time used by the process as reported by + // the JVM. + // Instrument: counter + // Unit: s + // Stability: Stable + JvmCPUTimeName = "jvm.cpu.time" + JvmCPUTimeUnit = "s" + JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." + + // JvmCPURecentUtilization is the metric conforming to the + // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent + // CPU utilization for the process as reported by the JVM. + // Instrument: gauge + // Unit: 1 + // Stability: Stable + JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" + JvmCPURecentUtilizationUnit = "1" + JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." + + // MessagingPublishDuration is the metric conforming to the + // "messaging.publish.duration" semantic conventions. It represents the + // measures the duration of publish operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingPublishDurationName = "messaging.publish.duration" + MessagingPublishDurationUnit = "s" + MessagingPublishDurationDescription = "Measures the duration of publish operation." + + // MessagingReceiveDuration is the metric conforming to the + // "messaging.receive.duration" semantic conventions. It represents the + // measures the duration of receive operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingReceiveDurationName = "messaging.receive.duration" + MessagingReceiveDurationUnit = "s" + MessagingReceiveDurationDescription = "Measures the duration of receive operation." + + // MessagingProcessDuration is the metric conforming to the + // "messaging.process.duration" semantic conventions. It represents the + // measures the duration of process operation. + // Instrument: histogram + // Unit: s + // Stability: Experimental + MessagingProcessDurationName = "messaging.process.duration" + MessagingProcessDurationUnit = "s" + MessagingProcessDurationDescription = "Measures the duration of process operation." + + // MessagingPublishMessages is the metric conforming to the + // "messaging.publish.messages" semantic conventions. It represents the + // measures the number of published messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingPublishMessagesName = "messaging.publish.messages" + MessagingPublishMessagesUnit = "{message}" + MessagingPublishMessagesDescription = "Measures the number of published messages." + + // MessagingReceiveMessages is the metric conforming to the + // "messaging.receive.messages" semantic conventions. It represents the + // measures the number of received messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingReceiveMessagesName = "messaging.receive.messages" + MessagingReceiveMessagesUnit = "{message}" + MessagingReceiveMessagesDescription = "Measures the number of received messages." + + // MessagingProcessMessages is the metric conforming to the + // "messaging.process.messages" semantic conventions. It represents the + // measures the number of processed messages. + // Instrument: counter + // Unit: {message} + // Stability: Experimental + MessagingProcessMessagesName = "messaging.process.messages" + MessagingProcessMessagesUnit = "{message}" + MessagingProcessMessagesDescription = "Measures the number of processed messages." + + // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic + // conventions. It represents the total CPU seconds broken down by different + // states. + // Instrument: counter + // Unit: s + // Stability: Experimental + ProcessCPUTimeName = "process.cpu.time" + ProcessCPUTimeUnit = "s" + ProcessCPUTimeDescription = "Total CPU seconds broken down by different states." + + // ProcessCPUUtilization is the metric conforming to the + // "process.cpu.utilization" semantic conventions. It represents the difference + // in process.cpu.time since the last measurement, divided by the elapsed time + // and number of CPUs available to the process. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + ProcessCPUUtilizationName = "process.cpu.utilization" + ProcessCPUUtilizationUnit = "1" + ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." + + // ProcessMemoryUsage is the metric conforming to the "process.memory.usage" + // semantic conventions. It represents the amount of physical memory in use. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryUsageName = "process.memory.usage" + ProcessMemoryUsageUnit = "By" + ProcessMemoryUsageDescription = "The amount of physical memory in use." + + // ProcessMemoryVirtual is the metric conforming to the + // "process.memory.virtual" semantic conventions. It represents the amount of + // committed virtual memory. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryVirtualName = "process.memory.virtual" + ProcessMemoryVirtualUnit = "By" + ProcessMemoryVirtualDescription = "The amount of committed virtual memory." + + // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic + // conventions. It represents the disk bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessDiskIoName = "process.disk.io" + ProcessDiskIoUnit = "By" + ProcessDiskIoDescription = "Disk bytes transferred." + + // ProcessNetworkIo is the metric conforming to the "process.network.io" + // semantic conventions. It represents the network bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessNetworkIoName = "process.network.io" + ProcessNetworkIoUnit = "By" + ProcessNetworkIoDescription = "Network bytes transferred." + + // ProcessThreadCount is the metric conforming to the "process.thread.count" + // semantic conventions. It represents the process threads count. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Experimental + ProcessThreadCountName = "process.thread.count" + ProcessThreadCountUnit = "{thread}" + ProcessThreadCountDescription = "Process threads count." + + // ProcessOpenFileDescriptorCount is the metric conforming to the + // "process.open_file_descriptor.count" semantic conventions. It represents the + // number of file descriptors in use by the process. + // Instrument: updowncounter + // Unit: {count} + // Stability: Experimental + ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count" + ProcessOpenFileDescriptorCountUnit = "{count}" + ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process." + + // ProcessContextSwitches is the metric conforming to the + // "process.context_switches" semantic conventions. It represents the number of + // times the process has been context switched. + // Instrument: counter + // Unit: {count} + // Stability: Experimental + ProcessContextSwitchesName = "process.context_switches" + ProcessContextSwitchesUnit = "{count}" + ProcessContextSwitchesDescription = "Number of times the process has been context switched." + + // ProcessPagingFaults is the metric conforming to the "process.paging.faults" + // semantic conventions. It represents the number of page faults the process + // has made. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + ProcessPagingFaultsName = "process.paging.faults" + ProcessPagingFaultsUnit = "{fault}" + ProcessPagingFaultsDescription = "Number of page faults the process has made." + + // RPCServerDuration is the metric conforming to the "rpc.server.duration" + // semantic conventions. It represents the measures the duration of inbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCServerDurationName = "rpc.server.duration" + RPCServerDurationUnit = "ms" + RPCServerDurationDescription = "Measures the duration of inbound RPC." + + // RPCServerRequestSize is the metric conforming to the + // "rpc.server.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerRequestSizeName = "rpc.server.request.size" + RPCServerRequestSizeUnit = "By" + RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCServerResponseSize is the metric conforming to the + // "rpc.server.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCServerResponseSizeName = "rpc.server.response.size" + RPCServerResponseSizeUnit = "By" + RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCServerRequestsPerRPC is the metric conforming to the + // "rpc.server.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" + RPCServerRequestsPerRPCUnit = "{count}" + RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCServerResponsesPerRPC is the metric conforming to the + // "rpc.server.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" + RPCServerResponsesPerRPCUnit = "{count}" + RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // RPCClientDuration is the metric conforming to the "rpc.client.duration" + // semantic conventions. It represents the measures the duration of outbound + // RPC. + // Instrument: histogram + // Unit: ms + // Stability: Experimental + RPCClientDurationName = "rpc.client.duration" + RPCClientDurationUnit = "ms" + RPCClientDurationDescription = "Measures the duration of outbound RPC." + + // RPCClientRequestSize is the metric conforming to the + // "rpc.client.request.size" semantic conventions. It represents the measures + // the size of RPC request messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientRequestSizeName = "rpc.client.request.size" + RPCClientRequestSizeUnit = "By" + RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." + + // RPCClientResponseSize is the metric conforming to the + // "rpc.client.response.size" semantic conventions. It represents the measures + // the size of RPC response messages (uncompressed). + // Instrument: histogram + // Unit: By + // Stability: Experimental + RPCClientResponseSizeName = "rpc.client.response.size" + RPCClientResponseSizeUnit = "By" + RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." + + // RPCClientRequestsPerRPC is the metric conforming to the + // "rpc.client.requests_per_rpc" semantic conventions. It represents the + // measures the number of messages received per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" + RPCClientRequestsPerRPCUnit = "{count}" + RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." + + // RPCClientResponsesPerRPC is the metric conforming to the + // "rpc.client.responses_per_rpc" semantic conventions. It represents the + // measures the number of messages sent per RPC. + // Instrument: histogram + // Unit: {count} + // Stability: Experimental + RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" + RPCClientResponsesPerRPCUnit = "{count}" + RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." + + // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic + // conventions. It represents the seconds each logical CPU spent on each mode. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemCPUTimeName = "system.cpu.time" + SystemCPUTimeUnit = "s" + SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" + + // SystemCPUUtilization is the metric conforming to the + // "system.cpu.utilization" semantic conventions. It represents the difference + // in system.cpu.time since the last measurement, divided by the elapsed time + // and number of logical CPUs. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + SystemCPUUtilizationName = "system.cpu.utilization" + SystemCPUUtilizationUnit = "1" + SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" + + // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" + // semantic conventions. It represents the reports the current frequency of the + // CPU in Hz. + // Instrument: gauge + // Unit: {Hz} + // Stability: Experimental + SystemCPUFrequencyName = "system.cpu.frequency" + SystemCPUFrequencyUnit = "{Hz}" + SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" + + // SystemCPUPhysicalCount is the metric conforming to the + // "system.cpu.physical.count" semantic conventions. It represents the reports + // the number of actual physical processor cores on the hardware. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPUPhysicalCountName = "system.cpu.physical.count" + SystemCPUPhysicalCountUnit = "{cpu}" + SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" + + // SystemCPULogicalCount is the metric conforming to the + // "system.cpu.logical.count" semantic conventions. It represents the reports + // the number of logical (virtual) processor cores created by the operating + // system to manage multitasking. + // Instrument: updowncounter + // Unit: {cpu} + // Stability: Experimental + SystemCPULogicalCountName = "system.cpu.logical.count" + SystemCPULogicalCountUnit = "{cpu}" + SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" + + // SystemMemoryUsage is the metric conforming to the "system.memory.usage" + // semantic conventions. It represents the reports memory in use by state. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryUsageName = "system.memory.usage" + SystemMemoryUsageUnit = "By" + SystemMemoryUsageDescription = "Reports memory in use by state." + + // SystemMemoryLimit is the metric conforming to the "system.memory.limit" + // semantic conventions. It represents the total memory available in the + // system. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemoryLimitName = "system.memory.limit" + SystemMemoryLimitUnit = "By" + SystemMemoryLimitDescription = "Total memory available in the system." + + // SystemMemoryShared is the metric conforming to the "system.memory.shared" + // semantic conventions. It represents the shared memory used (mostly by + // tmpfs). + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemorySharedName = "system.memory.shared" + SystemMemorySharedUnit = "By" + SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)." + + // SystemMemoryUtilization is the metric conforming to the + // "system.memory.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemMemoryUtilizationName = "system.memory.utilization" + SystemMemoryUtilizationUnit = "1" + + // SystemPagingUsage is the metric conforming to the "system.paging.usage" + // semantic conventions. It represents the unix swap or windows pagefile usage. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemPagingUsageName = "system.paging.usage" + SystemPagingUsageUnit = "By" + SystemPagingUsageDescription = "Unix swap or windows pagefile usage" + + // SystemPagingUtilization is the metric conforming to the + // "system.paging.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingUtilizationName = "system.paging.utilization" + SystemPagingUtilizationUnit = "1" + + // SystemPagingFaults is the metric conforming to the "system.paging.faults" + // semantic conventions. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingFaultsName = "system.paging.faults" + SystemPagingFaultsUnit = "{fault}" + + // SystemPagingOperations is the metric conforming to the + // "system.paging.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemPagingOperationsName = "system.paging.operations" + SystemPagingOperationsUnit = "{operation}" + + // SystemDiskIo is the metric conforming to the "system.disk.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskIoName = "system.disk.io" + SystemDiskIoUnit = "By" + + // SystemDiskOperations is the metric conforming to the + // "system.disk.operations" semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskOperationsName = "system.disk.operations" + SystemDiskOperationsUnit = "{operation}" + + // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" + // semantic conventions. It represents the time disk spent activated. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskIoTimeName = "system.disk.io_time" + SystemDiskIoTimeUnit = "s" + SystemDiskIoTimeDescription = "Time disk spent activated" + + // SystemDiskOperationTime is the metric conforming to the + // "system.disk.operation_time" semantic conventions. It represents the sum of + // the time each operation took to complete. + // Instrument: counter + // Unit: s + // Stability: Experimental + SystemDiskOperationTimeName = "system.disk.operation_time" + SystemDiskOperationTimeUnit = "s" + SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" + + // SystemDiskMerged is the metric conforming to the "system.disk.merged" + // semantic conventions. + // Instrument: counter + // Unit: {operation} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemDiskMergedName = "system.disk.merged" + SystemDiskMergedUnit = "{operation}" + + // SystemFilesystemUsage is the metric conforming to the + // "system.filesystem.usage" semantic conventions. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUsageName = "system.filesystem.usage" + SystemFilesystemUsageUnit = "By" + + // SystemFilesystemUtilization is the metric conforming to the + // "system.filesystem.utilization" semantic conventions. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemFilesystemUtilizationName = "system.filesystem.utilization" + SystemFilesystemUtilizationUnit = "1" + + // SystemNetworkDropped is the metric conforming to the + // "system.network.dropped" semantic conventions. It represents the count of + // packets that are dropped or discarded even though there was no error. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + SystemNetworkDroppedName = "system.network.dropped" + SystemNetworkDroppedUnit = "{packet}" + SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" + + // SystemNetworkPackets is the metric conforming to the + // "system.network.packets" semantic conventions. + // Instrument: counter + // Unit: {packet} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkPacketsName = "system.network.packets" + SystemNetworkPacketsUnit = "{packet}" + + // SystemNetworkErrors is the metric conforming to the "system.network.errors" + // semantic conventions. It represents the count of network errors detected. + // Instrument: counter + // Unit: {error} + // Stability: Experimental + SystemNetworkErrorsName = "system.network.errors" + SystemNetworkErrorsUnit = "{error}" + SystemNetworkErrorsDescription = "Count of network errors detected" + + // SystemNetworkIo is the metric conforming to the "system.network.io" semantic + // conventions. + // Instrument: counter + // Unit: By + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkIoName = "system.network.io" + SystemNetworkIoUnit = "By" + + // SystemNetworkConnections is the metric conforming to the + // "system.network.connections" semantic conventions. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. + SystemNetworkConnectionsName = "system.network.connections" + SystemNetworkConnectionsUnit = "{connection}" + + // SystemProcessCount is the metric conforming to the "system.process.count" + // semantic conventions. It represents the total number of processes in each + // state. + // Instrument: updowncounter + // Unit: {process} + // Stability: Experimental + SystemProcessCountName = "system.process.count" + SystemProcessCountUnit = "{process}" + SystemProcessCountDescription = "Total number of processes in each state" + + // SystemProcessCreated is the metric conforming to the + // "system.process.created" semantic conventions. It represents the total + // number of processes created over uptime of the host. + // Instrument: counter + // Unit: {process} + // Stability: Experimental + SystemProcessCreatedName = "system.process.created" + SystemProcessCreatedUnit = "{process}" + SystemProcessCreatedDescription = "Total number of processes created over uptime of the host" + + // SystemLinuxMemoryAvailable is the metric conforming to the + // "system.linux.memory.available" semantic conventions. It represents an + // estimate of how much memory is available for starting new applications, + // without causing swapping. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemLinuxMemoryAvailableName = "system.linux.memory.available" + SystemLinuxMemoryAvailableUnit = "By" + SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" +) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go new file mode 100644 index 0000000000..4c87c7adcc --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/trace.go b/hack/tools/vendor/go.opentelemetry.io/otel/trace.go new file mode 100644 index 0000000000..6836c65478 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/trace.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/trace" +) + +// Tracer creates a named tracer that implements Tracer interface. +// If the name is an empty string then provider uses default name. +// +// This is short for GetTracerProvider().Tracer(name, opts...) +func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + return GetTracerProvider().Tracer(name, opts...) +} + +// GetTracerProvider returns the registered global trace provider. +// If none is registered then an instance of NoopTracerProvider is returned. +// +// Use the trace provider to create a named tracer. E.g. +// +// tracer := otel.GetTracerProvider().Tracer("example.com/foo") +// +// or +// +// tracer := otel.Tracer("example.com/foo") +func GetTracerProvider() trace.TracerProvider { + return global.TracerProvider() +} + +// SetTracerProvider registers `tp` as the global trace provider. +func SetTracerProvider(tp trace.TracerProvider) { + global.SetTracerProvider(tp) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/trace/LICENSE b/hack/tools/vendor/go.opentelemetry.io/otel/trace/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/trace/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/trace/README.md b/hack/tools/vendor/go.opentelemetry.io/otel/trace/README.md new file mode 100644 index 0000000000..58ccaba69b --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/trace/README.md @@ -0,0 +1,3 @@ +# Trace API + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/trace) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/trace/config.go b/hack/tools/vendor/go.opentelemetry.io/otel/trace/config.go new file mode 100644 index 0000000000..273d58e001 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/trace/config.go @@ -0,0 +1,323 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// TracerConfig is a group of options for a Tracer. +type TracerConfig struct { + instrumentationVersion string + // Schema URL of the telemetry emitted by the Tracer. + schemaURL string + attrs attribute.Set +} + +// InstrumentationVersion returns the version of the library providing instrumentation. +func (t *TracerConfig) InstrumentationVersion() string { + return t.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (t *TracerConfig) InstrumentationAttributes() attribute.Set { + return t.attrs +} + +// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. +func (t *TracerConfig) SchemaURL() string { + return t.schemaURL +} + +// NewTracerConfig applies all the options to a returned TracerConfig. +func NewTracerConfig(options ...TracerOption) TracerConfig { + var config TracerConfig + for _, option := range options { + config = option.apply(config) + } + return config +} + +// TracerOption applies an option to a TracerConfig. +type TracerOption interface { + apply(TracerConfig) TracerConfig +} + +type tracerOptionFunc func(TracerConfig) TracerConfig + +func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig { + return fn(cfg) +} + +// SpanConfig is a group of options for a Span. +type SpanConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + links []Link + newRoot bool + spanKind SpanKind + stackTrace bool +} + +// Attributes describe the associated qualities of a Span. +func (cfg *SpanConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in a Span life-cycle. +func (cfg *SpanConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *SpanConfig) StackTrace() bool { + return cfg.stackTrace +} + +// Links are the associations a Span has with other Spans. +func (cfg *SpanConfig) Links() []Link { + return cfg.links +} + +// NewRoot identifies a Span as the root Span for a new trace. This is +// commonly used when an existing trace crosses trust boundaries and the +// remote parent span context should be ignored for security. +func (cfg *SpanConfig) NewRoot() bool { + return cfg.newRoot +} + +// SpanKind is the role a Span has in a trace. +func (cfg *SpanConfig) SpanKind() SpanKind { + return cfg.spanKind +} + +// NewSpanStartConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanStartConfig(options ...SpanStartOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanStart(c) + } + return c +} + +// NewSpanEndConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanEndConfig(options ...SpanEndOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanEnd(c) + } + return c +} + +// SpanStartOption applies an option to a SpanConfig. These options are applicable +// only when the span is created. +type SpanStartOption interface { + applySpanStart(SpanConfig) SpanConfig +} + +type spanOptionFunc func(SpanConfig) SpanConfig + +func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig { + return fn(cfg) +} + +// SpanEndOption applies an option to a SpanConfig. These options are +// applicable only when the span is ended. +type SpanEndOption interface { + applySpanEnd(SpanConfig) SpanConfig +} + +// EventConfig is a group of options for an Event. +type EventConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + stackTrace bool +} + +// Attributes describe the associated qualities of an Event. +func (cfg *EventConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in an Event life-cycle. +func (cfg *EventConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *EventConfig) StackTrace() bool { + return cfg.stackTrace +} + +// NewEventConfig applies all the EventOptions to a returned EventConfig. If no +// timestamp option is passed, the returned EventConfig will have a Timestamp +// set to the call time, otherwise no validation is performed on the returned +// EventConfig. +func NewEventConfig(options ...EventOption) EventConfig { + var c EventConfig + for _, option := range options { + c = option.applyEvent(c) + } + if c.timestamp.IsZero() { + c.timestamp = time.Now() + } + return c +} + +// EventOption applies span event options to an EventConfig. +type EventOption interface { + applyEvent(EventConfig) EventConfig +} + +// SpanOption are options that can be used at both the beginning and end of a span. +type SpanOption interface { + SpanStartOption + SpanEndOption +} + +// SpanStartEventOption are options that can be used at the start of a span, or with an event. +type SpanStartEventOption interface { + SpanStartOption + EventOption +} + +// SpanEndEventOption are options that can be used at the end of a span, or with an event. +type SpanEndEventOption interface { + SpanEndOption + EventOption +} + +type attributeOption []attribute.KeyValue + +func (o attributeOption) applySpan(c SpanConfig) SpanConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} +func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o attributeOption) applyEvent(c EventConfig) EventConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} + +var _ SpanStartEventOption = attributeOption{} + +// WithAttributes adds the attributes related to a span life-cycle event. +// These attributes are used to describe the work a Span represents when this +// option is provided to a Span's start or end events. Otherwise, these +// attributes provide additional information about the event being recorded +// (e.g. error, state change, processing progress, system event). +// +// If multiple of these options are passed the attributes of each successive +// option will extend the attributes instead of overwriting. There is no +// guarantee of uniqueness in the resulting attributes. +func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption { + return attributeOption(attributes) +} + +// SpanEventOption are options that can be used with an event or a span. +type SpanEventOption interface { + SpanOption + EventOption +} + +type timestampOption time.Time + +func (o timestampOption) applySpan(c SpanConfig) SpanConfig { + c.timestamp = time.Time(o) + return c +} +func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applyEvent(c EventConfig) EventConfig { + c.timestamp = time.Time(o) + return c +} + +var _ SpanEventOption = timestampOption{} + +// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g. +// started, stopped, errored). +func WithTimestamp(t time.Time) SpanEventOption { + return timestampOption(t) +} + +type stackTraceOption bool + +func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { + c.stackTrace = bool(o) + return c +} + +func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { + c.stackTrace = bool(o) + return c +} +func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } + +// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false). +func WithStackTrace(b bool) SpanEndEventOption { + return stackTraceOption(b) +} + +// WithLinks adds links to a Span. The links are added to the existing Span +// links, i.e. this does not overwrite. Links with invalid span context are ignored. +func WithLinks(links ...Link) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.links = append(cfg.links, links...) + return cfg + }) +} + +// WithNewRoot specifies that the Span should be treated as a root Span. Any +// existing parent span context will be ignored when defining the Span's trace +// identifiers. +func WithNewRoot() SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.newRoot = true + return cfg + }) +} + +// WithSpanKind sets the SpanKind of a Span. +func WithSpanKind(kind SpanKind) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.spanKind = kind + return cfg + }) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.instrumentationVersion = version + return cfg + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { + return tracerOptionFunc(func(config TracerConfig) TracerConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL for the Tracer. +func WithSchemaURL(schemaURL string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.schemaURL = schemaURL + return cfg + }) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/trace/context.go b/hack/tools/vendor/go.opentelemetry.io/otel/trace/context.go new file mode 100644 index 0000000000..5650a174b4 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/trace/context.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "context" + +type traceContextKeyType int + +const currentSpanKey traceContextKeyType = iota + +// ContextWithSpan returns a copy of parent with span set as the current Span. +func ContextWithSpan(parent context.Context, span Span) context.Context { + return context.WithValue(parent, currentSpanKey, span) +} + +// ContextWithSpanContext returns a copy of parent with sc as the current +// Span. The Span implementation that wraps sc is non-recording and performs +// no operations other than to return sc as the SpanContext from the +// SpanContext method. +func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context { + return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) +} + +// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly +// as a remote SpanContext and as the current Span. The Span implementation +// that wraps rsc is non-recording and performs no operations other than to +// return rsc as the SpanContext from the SpanContext method. +func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context { + return ContextWithSpanContext(parent, rsc.WithRemote(true)) +} + +// SpanFromContext returns the current Span from ctx. +// +// If no Span is currently set in ctx an implementation of a Span that +// performs no operations is returned. +func SpanFromContext(ctx context.Context) Span { + if ctx == nil { + return noopSpanInstance + } + if span, ok := ctx.Value(currentSpanKey).(Span); ok { + return span + } + return noopSpanInstance +} + +// SpanContextFromContext returns the current Span's SpanContext. +func SpanContextFromContext(ctx context.Context) SpanContext { + return SpanFromContext(ctx).SpanContext() +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/trace/doc.go b/hack/tools/vendor/go.opentelemetry.io/otel/trace/doc.go new file mode 100644 index 0000000000..d661c5d100 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -0,0 +1,119 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package trace provides an implementation of the tracing part of the +OpenTelemetry API. + +To participate in distributed traces a Span needs to be created for the +operation being performed as part of a traced workflow. In its simplest form: + + var tracer trace.Tracer + + func init() { + tracer = otel.Tracer("instrumentation/package/name") + } + + func operation(ctx context.Context) { + var span trace.Span + ctx, span = tracer.Start(ctx, "operation") + defer span.End() + // ... + } + +A Tracer is unique to the instrumentation and is used to create Spans. +Instrumentation should be designed to accept a TracerProvider from which it +can create its own unique Tracer. Alternatively, the registered global +TracerProvider from the go.opentelemetry.io/otel package can be used as +a default. + + const ( + name = "instrumentation/package/name" + version = "0.1.0" + ) + + type Instrumentation struct { + tracer trace.Tracer + } + + func NewInstrumentation(tp trace.TracerProvider) *Instrumentation { + if tp == nil { + tp = otel.TracerProvider() + } + return &Instrumentation{ + tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)), + } + } + + func operation(ctx context.Context, inst *Instrumentation) { + var span trace.Span + ctx, span = inst.tracer.Start(ctx, "operation") + defer span.End() + // ... + } + +# API Implementations + +This package does not conform to the standard Go versioning policy; all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/trace/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/trace/embedded" + + type TracerProvider struct { + embedded.TracerProvider + // ... + } + +If an author wants the default behavior of their implementations to panic, they +can embed the API interface directly. + + import "go.opentelemetry.io/otel/trace" + + type TracerProvider struct { + trace.TracerProvider + // ... + } + +This option is not recommended. It will lead to publishing packages that +contain runtime panics when users update to newer versions of +[go.opentelemetry.io/otel/trace], which may be done with a trasitive +dependency. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who wants to default to silently dropping the call can use +[go.opentelemetry.io/otel/trace/noop]: + + import "go.opentelemetry.io/otel/trace/noop" + + type TracerProvider struct { + noop.TracerProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. +*/ +package trace // import "go.opentelemetry.io/otel/trace" diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/trace/embedded/README.md b/hack/tools/vendor/go.opentelemetry.io/otel/trace/embedded/README.md new file mode 100644 index 0000000000..7754a239ee --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/trace/embedded/README.md @@ -0,0 +1,3 @@ +# Trace Embedded + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/embedded)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/embedded) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/hack/tools/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go new file mode 100644 index 0000000000..3e359a00bf --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// trace API]. +// +// Implementers of the [OpenTelemetry trace API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry trace API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace +package embedded // import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider is embedded in +// [go.opentelemetry.io/otel/trace.TracerProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type TracerProvider interface{ tracerProvider() } + +// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Tracer interface{ tracer() } + +// Span is embedded in [go.opentelemetry.io/otel/trace.Span]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Span interface{ span() } diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/hack/tools/vendor/go.opentelemetry.io/otel/trace/nonrecording.go new file mode 100644 index 0000000000..c00221e7be --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/trace/nonrecording.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + noopSpan + + sc SpanContext +} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc } diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/trace/noop.go b/hack/tools/vendor/go.opentelemetry.io/otel/trace/noop.go new file mode 100644 index 0000000000..ca20e9997a --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -0,0 +1,85 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// NewNoopTracerProvider returns an implementation of TracerProvider that +// performs no operations. The Tracer and Spans created from the returned +// TracerProvider also perform no operations. +// +// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider] +// instead. +func NewNoopTracerProvider() TracerProvider { + return noopTracerProvider{} +} + +type noopTracerProvider struct{ embedded.TracerProvider } + +var _ TracerProvider = noopTracerProvider{} + +// Tracer returns noop implementation of Tracer. +func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { + return noopTracer{} +} + +// noopTracer is an implementation of Tracer that performs no operations. +type noopTracer struct{ embedded.Tracer } + +var _ Tracer = noopTracer{} + +// Start carries forward a non-recording Span, if one is present in the context, otherwise it +// creates a no-op Span. +func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { + span := SpanFromContext(ctx) + if _, ok := span.(nonRecordingSpan); !ok { + // span is likely already a noopSpan, but let's be sure + span = noopSpanInstance + } + return ContextWithSpan(ctx, span), span +} + +// noopSpan is an implementation of Span that performs no operations. +type noopSpan struct{ embedded.Span } + +var noopSpanInstance Span = noopSpan{} + +// SpanContext returns an empty span context. +func (noopSpan) SpanContext() SpanContext { return SpanContext{} } + +// IsRecording always returns false. +func (noopSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (noopSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (noopSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (noopSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (noopSpan) End(...SpanEndOption) {} + +// RecordError does nothing. +func (noopSpan) RecordError(error, ...EventOption) {} + +// AddEvent does nothing. +func (noopSpan) AddEvent(string, ...EventOption) {} + +// AddLink does nothing. +func (noopSpan) AddLink(Link) {} + +// SetName does nothing. +func (noopSpan) SetName(string) {} + +// TracerProvider returns a no-op TracerProvider. +func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/trace/noop/README.md b/hack/tools/vendor/go.opentelemetry.io/otel/trace/noop/README.md new file mode 100644 index 0000000000..cd382c82a1 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/trace/noop/README.md @@ -0,0 +1,3 @@ +# Trace Noop + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/noop) diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/hack/tools/vendor/go.opentelemetry.io/otel/trace/noop/noop.go new file mode 100644 index 0000000000..64a4f1b362 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/trace/noop/noop.go @@ -0,0 +1,112 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package noop provides an implementation of the OpenTelemetry trace API that +// produces no telemetry and minimizes used computation resources. +// +// Using this package to implement the OpenTelemetry trace API will effectively +// disable OpenTelemetry. +// +// This implementation can be embedded in other implementations of the +// OpenTelemetry trace API. Doing so will mean the implementation defaults to +// no operation for methods it does not implement. +package noop // import "go.opentelemetry.io/otel/trace/noop" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" +) + +var ( + // Compile-time check this implements the OpenTelemetry API. + + _ trace.TracerProvider = TracerProvider{} + _ trace.Tracer = Tracer{} + _ trace.Span = Span{} +) + +// TracerProvider is an OpenTelemetry No-Op TracerProvider. +type TracerProvider struct{ embedded.TracerProvider } + +// NewTracerProvider returns a TracerProvider that does not record any telemetry. +func NewTracerProvider() TracerProvider { + return TracerProvider{} +} + +// Tracer returns an OpenTelemetry Tracer that does not record any telemetry. +func (TracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer { + return Tracer{} +} + +// Tracer is an OpenTelemetry No-Op Tracer. +type Tracer struct{ embedded.Tracer } + +// Start creates a span. The created span will be set in a child context of ctx +// and returned with the span. +// +// If ctx contains a span context, the returned span will also contain that +// span context. If the span context in ctx is for a non-recording span, that +// span instance will be returned directly. +func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { + span := trace.SpanFromContext(ctx) + + // If the parent context contains a non-zero span context, that span + // context needs to be returned as a non-recording span + // (https://github.com/open-telemetry/opentelemetry-specification/blob/3a1dde966a4ce87cce5adf464359fe369741bbea/specification/trace/api.md#behavior-of-the-api-in-the-absence-of-an-installed-sdk). + var zeroSC trace.SpanContext + if sc := span.SpanContext(); !sc.Equal(zeroSC) { + if !span.IsRecording() { + // If the span is not recording return it directly. + return ctx, span + } + // Otherwise, return the span context needs in a non-recording span. + span = Span{sc: sc} + } else { + // No parent, return a No-Op span with an empty span context. + span = noopSpanInstance + } + return trace.ContextWithSpan(ctx, span), span +} + +var noopSpanInstance trace.Span = Span{} + +// Span is an OpenTelemetry No-Op Span. +type Span struct { + embedded.Span + + sc trace.SpanContext +} + +// SpanContext returns an empty span context. +func (s Span) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (Span) IsRecording() bool { return false } + +// SetStatus does nothing. +func (Span) SetStatus(codes.Code, string) {} + +// SetAttributes does nothing. +func (Span) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (Span) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (Span) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (Span) AddEvent(string, ...trace.EventOption) {} + +// AddLink does nothing. +func (Span) AddLink(trace.Link) {} + +// SetName does nothing. +func (Span) SetName(string) {} + +// TracerProvider returns a No-Op TracerProvider. +func (Span) TracerProvider() trace.TracerProvider { return TracerProvider{} } diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/trace/provider.go b/hack/tools/vendor/go.opentelemetry.io/otel/trace/provider.go new file mode 100644 index 0000000000..ef85cb70c6 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/trace/provider.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/trace/span.go b/hack/tools/vendor/go.opentelemetry.io/otel/trace/span.go new file mode 100644 index 0000000000..d3aa476ee1 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/trace/span.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided +// ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/trace/trace.go b/hack/tools/vendor/go.opentelemetry.io/otel/trace/trace.go new file mode 100644 index 0000000000..d49adf671b --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -0,0 +1,323 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "bytes" + "encoding/hex" + "encoding/json" +) + +const ( + // FlagsSampled is a bitmask with the sampled bit set. A SpanContext + // with the sampling bit set means the span is sampled. + FlagsSampled = TraceFlags(0x01) + + errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase" + + errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32" + errNilTraceID errorConst = "trace-id can't be all zero" + + errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16" + errNilSpanID errorConst = "span-id can't be all zero" +) + +type errorConst string + +func (e errorConst) Error() string { + return string(e) +} + +// TraceID is a unique identity of a trace. +// nolint:revive // revive complains about stutter of `trace.TraceID`. +type TraceID [16]byte + +var ( + nilTraceID TraceID + _ json.Marshaler = nilTraceID +) + +// IsValid checks whether the trace TraceID is valid. A valid trace ID does +// not consist of zeros only. +func (t TraceID) IsValid() bool { + return !bytes.Equal(t[:], nilTraceID[:]) +} + +// MarshalJSON implements a custom marshal function to encode TraceID +// as a hex string. +func (t TraceID) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String returns the hex string representation form of a TraceID. +func (t TraceID) String() string { + return hex.EncodeToString(t[:]) +} + +// SpanID is a unique identity of a span in a trace. +type SpanID [8]byte + +var ( + nilSpanID SpanID + _ json.Marshaler = nilSpanID +) + +// IsValid checks whether the SpanID is valid. A valid SpanID does not consist +// of zeros only. +func (s SpanID) IsValid() bool { + return !bytes.Equal(s[:], nilSpanID[:]) +} + +// MarshalJSON implements a custom marshal function to encode SpanID +// as a hex string. +func (s SpanID) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} + +// String returns the hex string representation form of a SpanID. +func (s SpanID) String() string { + return hex.EncodeToString(s[:]) +} + +// TraceIDFromHex returns a TraceID from a hex string if it is compliant with +// the W3C trace-context specification. See more at +// https://www.w3.org/TR/trace-context/#trace-id +// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. +func TraceIDFromHex(h string) (TraceID, error) { + t := TraceID{} + if len(h) != 32 { + return t, errInvalidTraceIDLength + } + + if err := decodeHex(h, t[:]); err != nil { + return t, err + } + + if !t.IsValid() { + return t, errNilTraceID + } + return t, nil +} + +// SpanIDFromHex returns a SpanID from a hex string if it is compliant +// with the w3c trace-context specification. +// See more at https://www.w3.org/TR/trace-context/#parent-id +func SpanIDFromHex(h string) (SpanID, error) { + s := SpanID{} + if len(h) != 16 { + return s, errInvalidSpanIDLength + } + + if err := decodeHex(h, s[:]); err != nil { + return s, err + } + + if !s.IsValid() { + return s, errNilSpanID + } + return s, nil +} + +func decodeHex(h string, b []byte) error { + for _, r := range h { + switch { + case 'a' <= r && r <= 'f': + continue + case '0' <= r && r <= '9': + continue + default: + return errInvalidHexID + } + } + + decoded, err := hex.DecodeString(h) + if err != nil { + return err + } + + copy(b, decoded) + return nil +} + +// TraceFlags contains flags that can be set on a SpanContext. +type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. + +// IsSampled returns if the sampling bit is set in the TraceFlags. +func (tf TraceFlags) IsSampled() bool { + return tf&FlagsSampled == FlagsSampled +} + +// WithSampled sets the sampling bit in a new copy of the TraceFlags. +func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag. + if sampled { + return tf | FlagsSampled + } + + return tf &^ FlagsSampled +} + +// MarshalJSON implements a custom marshal function to encode TraceFlags +// as a hex string. +func (tf TraceFlags) MarshalJSON() ([]byte, error) { + return json.Marshal(tf.String()) +} + +// String returns the hex string representation form of TraceFlags. +func (tf TraceFlags) String() string { + return hex.EncodeToString([]byte{byte(tf)}[:]) +} + +// SpanContextConfig contains mutable fields usable for constructing +// an immutable SpanContext. +type SpanContextConfig struct { + TraceID TraceID + SpanID SpanID + TraceFlags TraceFlags + TraceState TraceState + Remote bool +} + +// NewSpanContext constructs a SpanContext using values from the provided +// SpanContextConfig. +func NewSpanContext(config SpanContextConfig) SpanContext { + return SpanContext{ + traceID: config.TraceID, + spanID: config.SpanID, + traceFlags: config.TraceFlags, + traceState: config.TraceState, + remote: config.Remote, + } +} + +// SpanContext contains identifying trace information about a Span. +type SpanContext struct { + traceID TraceID + spanID SpanID + traceFlags TraceFlags + traceState TraceState + remote bool +} + +var _ json.Marshaler = SpanContext{} + +// IsValid returns if the SpanContext is valid. A valid span context has a +// valid TraceID and SpanID. +func (sc SpanContext) IsValid() bool { + return sc.HasTraceID() && sc.HasSpanID() +} + +// IsRemote indicates whether the SpanContext represents a remotely-created Span. +func (sc SpanContext) IsRemote() bool { + return sc.remote +} + +// WithRemote returns a copy of sc with the Remote property set to remote. +func (sc SpanContext) WithRemote(remote bool) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: remote, + } +} + +// TraceID returns the TraceID from the SpanContext. +func (sc SpanContext) TraceID() TraceID { + return sc.traceID +} + +// HasTraceID checks if the SpanContext has a valid TraceID. +func (sc SpanContext) HasTraceID() bool { + return sc.traceID.IsValid() +} + +// WithTraceID returns a new SpanContext with the TraceID replaced. +func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext { + return SpanContext{ + traceID: traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// SpanID returns the SpanID from the SpanContext. +func (sc SpanContext) SpanID() SpanID { + return sc.spanID +} + +// HasSpanID checks if the SpanContext has a valid SpanID. +func (sc SpanContext) HasSpanID() bool { + return sc.spanID.IsValid() +} + +// WithSpanID returns a new SpanContext with the SpanID replaced. +func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceFlags returns the flags from the SpanContext. +func (sc SpanContext) TraceFlags() TraceFlags { + return sc.traceFlags +} + +// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. +func (sc SpanContext) IsSampled() bool { + return sc.traceFlags.IsSampled() +} + +// WithTraceFlags returns a new SpanContext with the TraceFlags replaced. +func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: flags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceState returns the TraceState from the SpanContext. +func (sc SpanContext) TraceState() TraceState { + return sc.traceState +} + +// WithTraceState returns a new SpanContext with the TraceState replaced. +func (sc SpanContext) WithTraceState(state TraceState) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: state, + remote: sc.remote, + } +} + +// Equal is a predicate that determines whether two SpanContext values are equal. +func (sc SpanContext) Equal(other SpanContext) bool { + return sc.traceID == other.traceID && + sc.spanID == other.spanID && + sc.traceFlags == other.traceFlags && + sc.traceState.String() == other.traceState.String() && + sc.remote == other.remote +} + +// MarshalJSON implements a custom marshal function to encode a SpanContext. +func (sc SpanContext) MarshalJSON() ([]byte, error) { + return json.Marshal(SpanContextConfig{ + TraceID: sc.traceID, + SpanID: sc.spanID, + TraceFlags: sc.traceFlags, + TraceState: sc.traceState, + Remote: sc.remote, + }) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/trace/tracer.go b/hack/tools/vendor/go.opentelemetry.io/otel/trace/tracer.go new file mode 100644 index 0000000000..77952d2a0b --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/trace/tracer.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/trace/embedded" +) + +// Tracer is the creator of Spans. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/hack/tools/vendor/go.opentelemetry.io/otel/trace/tracestate.go new file mode 100644 index 0000000000..dc5e34cad0 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -0,0 +1,330 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "encoding/json" + "fmt" + "strings" +) + +const ( + maxListMembers = 32 + + listDelimiters = "," + memberDelimiter = "=" + + errInvalidKey errorConst = "invalid tracestate key" + errInvalidValue errorConst = "invalid tracestate value" + errInvalidMember errorConst = "invalid tracestate list-member" + errMemberNumber errorConst = "too many list-members in tracestate" + errDuplicate errorConst = "duplicate list-member in tracestate" +) + +type member struct { + Key string + Value string +} + +// according to (chr = %x20 / (nblk-char = %x21-2B / %x2D-3C / %x3E-7E) ) +// means (chr = %x20-2B / %x2D-3C / %x3E-7E) . +func checkValueChar(v byte) bool { + return v >= '\x20' && v <= '\x7e' && v != '\x2c' && v != '\x3d' +} + +// according to (nblk-chr = %x21-2B / %x2D-3C / %x3E-7E) . +func checkValueLast(v byte) bool { + return v >= '\x21' && v <= '\x7e' && v != '\x2c' && v != '\x3d' +} + +// based on the W3C Trace Context specification +// +// value = (0*255(chr)) nblk-chr +// nblk-chr = %x21-2B / %x2D-3C / %x3E-7E +// chr = %x20 / nblk-chr +// +// see https://www.w3.org/TR/trace-context-1/#value +func checkValue(val string) bool { + n := len(val) + if n == 0 || n > 256 { + return false + } + for i := 0; i < n-1; i++ { + if !checkValueChar(val[i]) { + return false + } + } + return checkValueLast(val[n-1]) +} + +func checkKeyRemain(key string) bool { + // ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) + for _, v := range key { + if isAlphaNum(byte(v)) { + continue + } + switch v { + case '_', '-', '*', '/': + continue + } + return false + } + return true +} + +// according to +// +// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// +// param n is remain part length, should be 255 in simple-key or 13 in system-id. +func checkKeyPart(key string, n int) bool { + if len(key) == 0 { + return false + } + first := key[0] // key's first char + ret := len(key[1:]) <= n + ret = ret && first >= 'a' && first <= 'z' + return ret && checkKeyRemain(key[1:]) +} + +func isAlphaNum(c byte) bool { + if c >= 'a' && c <= 'z' { + return true + } + return c >= '0' && c <= '9' +} + +// according to +// +// tenant-id = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) +// +// param n is remain part length, should be 240 exactly. +func checkKeyTenant(key string, n int) bool { + if len(key) == 0 { + return false + } + return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:]) +} + +// based on the W3C Trace Context specification +// +// key = simple-key / multi-tenant-key +// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// multi-tenant-key = tenant-id "@" system-id +// tenant-id = ( lcalpha / DIGIT ) (0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// lcalpha = %x61-7A ; a-z +// +// see https://www.w3.org/TR/trace-context-1/#tracestate-header. +func checkKey(key string) bool { + tenant, system, ok := strings.Cut(key, "@") + if !ok { + return checkKeyPart(key, 255) + } + return checkKeyTenant(tenant, 240) && checkKeyPart(system, 13) +} + +func newMember(key, value string) (member, error) { + if !checkKey(key) { + return member{}, errInvalidKey + } + if !checkValue(value) { + return member{}, errInvalidValue + } + return member{Key: key, Value: value}, nil +} + +func parseMember(m string) (member, error) { + key, val, ok := strings.Cut(m, memberDelimiter) + if !ok { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + key = strings.TrimLeft(key, " \t") + val = strings.TrimRight(val, " \t") + result, e := newMember(key, val) + if e != nil { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + return result, nil +} + +// String encodes member into a string compliant with the W3C Trace Context +// specification. +func (m member) String() string { + return m.Key + "=" + m.Value +} + +// TraceState provides additional vendor-specific trace identification +// information across different distributed tracing systems. It represents an +// immutable list consisting of key/value pairs, each pair is referred to as a +// list-member. +// +// TraceState conforms to the W3C Trace Context specification +// (https://www.w3.org/TR/trace-context-1). All operations that create or copy +// a TraceState do so by validating all input and will only produce TraceState +// that conform to the specification. Specifically, this means that all +// list-member's key/value pairs are valid, no duplicate list-members exist, +// and the maximum number of list-members (32) is not exceeded. +type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState` + // list is the members in order. + list []member +} + +var _ json.Marshaler = TraceState{} + +// ParseTraceState attempts to decode a TraceState from the passed +// string. It returns an error if the input is invalid according to the W3C +// Trace Context specification. +func ParseTraceState(ts string) (TraceState, error) { + if ts == "" { + return TraceState{}, nil + } + + wrapErr := func(err error) error { + return fmt.Errorf("failed to parse tracestate: %w", err) + } + + var members []member + found := make(map[string]struct{}) + for ts != "" { + var memberStr string + memberStr, ts, _ = strings.Cut(ts, listDelimiters) + if len(memberStr) == 0 { + continue + } + + m, err := parseMember(memberStr) + if err != nil { + return TraceState{}, wrapErr(err) + } + + if _, ok := found[m.Key]; ok { + return TraceState{}, wrapErr(errDuplicate) + } + found[m.Key] = struct{}{} + + members = append(members, m) + if n := len(members); n > maxListMembers { + return TraceState{}, wrapErr(errMemberNumber) + } + } + + return TraceState{list: members}, nil +} + +// MarshalJSON marshals the TraceState into JSON. +func (ts TraceState) MarshalJSON() ([]byte, error) { + return json.Marshal(ts.String()) +} + +// String encodes the TraceState into a string compliant with the W3C +// Trace Context specification. The returned string will be invalid if the +// TraceState contains any invalid members. +func (ts TraceState) String() string { + if len(ts.list) == 0 { + return "" + } + var n int + n += len(ts.list) // member delimiters: '=' + n += len(ts.list) - 1 // list delimiters: ',' + for _, mem := range ts.list { + n += len(mem.Key) + n += len(mem.Value) + } + + var sb strings.Builder + sb.Grow(n) + _, _ = sb.WriteString(ts.list[0].Key) + _ = sb.WriteByte('=') + _, _ = sb.WriteString(ts.list[0].Value) + for i := 1; i < len(ts.list); i++ { + _ = sb.WriteByte(listDelimiters[0]) + _, _ = sb.WriteString(ts.list[i].Key) + _ = sb.WriteByte('=') + _, _ = sb.WriteString(ts.list[i].Value) + } + return sb.String() +} + +// Get returns the value paired with key from the corresponding TraceState +// list-member if it exists, otherwise an empty string is returned. +func (ts TraceState) Get(key string) string { + for _, member := range ts.list { + if member.Key == key { + return member.Value + } + } + + return "" +} + +// Walk walks all key value pairs in the TraceState by calling f +// Iteration stops if f returns false. +func (ts TraceState) Walk(f func(key, value string) bool) { + for _, m := range ts.list { + if !f(m.Key, m.Value) { + break + } + } +} + +// Insert adds a new list-member defined by the key/value pair to the +// TraceState. If a list-member already exists for the given key, that +// list-member's value is updated. The new or updated list-member is always +// moved to the beginning of the TraceState as specified by the W3C Trace +// Context specification. +// +// If key or value are invalid according to the W3C Trace Context +// specification an error is returned with the original TraceState. +// +// If adding a new list-member means the TraceState would have more members +// then is allowed, the new list-member will be inserted and the right-most +// list-member will be dropped in the returned TraceState. +func (ts TraceState) Insert(key, value string) (TraceState, error) { + m, err := newMember(key, value) + if err != nil { + return ts, err + } + n := len(ts.list) + found := n + for i := range ts.list { + if ts.list[i].Key == key { + found = i + } + } + cTS := TraceState{} + if found == n && n < maxListMembers { + cTS.list = make([]member, n+1) + } else { + cTS.list = make([]member, n) + } + cTS.list[0] = m + // When the number of members exceeds capacity, drop the "right-most". + copy(cTS.list[1:], ts.list[0:found]) + if found < n { + copy(cTS.list[1+found:], ts.list[found+1:]) + } + return cTS, nil +} + +// Delete returns a copy of the TraceState with the list-member identified by +// key removed. +func (ts TraceState) Delete(key string) TraceState { + members := make([]member, ts.Len()) + copy(members, ts.list) + for i, member := range ts.list { + if member.Key == key { + members = append(members[:i], members[i+1:]...) + // TraceState should contain no duplicate members. + break + } + } + return TraceState{list: members} +} + +// Len returns the number of list-members in the TraceState. +func (ts TraceState) Len() int { + return len(ts.list) +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/verify_examples.sh b/hack/tools/vendor/go.opentelemetry.io/otel/verify_examples.sh new file mode 100644 index 0000000000..e57bf57fce --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/verify_examples.sh @@ -0,0 +1,74 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +cd $(dirname $0) +TOOLS_DIR=$(pwd)/.tools + +if [ -z "${GOPATH}" ] ; then + printf "GOPATH is not defined.\n" + exit -1 +fi + +if [ ! -d "${GOPATH}" ] ; then + printf "GOPATH ${GOPATH} is invalid \n" + exit -1 +fi + +# Pre-requisites +if ! git diff --quiet; then \ + git status + printf "\n\nError: working tree is not clean\n" + exit -1 +fi + +if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then + printf "$(git log -1)" + printf "\n\nError: HEAD is not pointing to a tagged version" +fi + +make ${TOOLS_DIR}/gojq + +DIR_TMP="${GOPATH}/src/oteltmp/" +rm -rf $DIR_TMP +mkdir -p $DIR_TMP + +printf "Copy examples to ${DIR_TMP}\n" +cp -a ./example ${DIR_TMP} + +# Update go.mod files +printf "Update go.mod: rename module and remove replace\n" + +PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) + +for dir in $PACKAGE_DIRS; do + printf " Update go.mod for $dir\n" + (cd "${DIR_TMP}/${dir}" && \ + # replaces is ("mod1" "mod2" …) + replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ + # strip double quotes + replaces=("${replaces[@]%\"}") && \ + replaces=("${replaces[@]#\"}") && \ + # make an array (-dropreplace=mod1 -dropreplace=mod2 …) + dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ + go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ + go mod tidy) +done +printf "Update done:\n\n" + +# Build directories that contain main package. These directories are different than +# directories that contain go.mod files. +printf "Build examples:\n" +EXAMPLES=$(./get_main_pkgs.sh ./example) +for ex in $EXAMPLES; do + printf " Build $ex in ${DIR_TMP}/${ex}\n" + (cd "${DIR_TMP}/${ex}" && \ + go build .) +done + +# Cleanup +printf "Remove copied files.\n" +rm -rf $DIR_TMP diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/verify_readmes.sh b/hack/tools/vendor/go.opentelemetry.io/otel/verify_readmes.sh new file mode 100644 index 0000000000..1e87855eea --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/verify_readmes.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +dirs=$(find . -type d -not -path "*/internal*" -not -path "*/test*" -not -path "*/example*" -not -path "*/.*" | sort) + +missingReadme=false +for dir in $dirs; do + if [ ! -f "$dir/README.md" ]; then + echo "couldn't find README.md for $dir" + missingReadme=true + fi +done + +if [ "$missingReadme" = true ] ; then + echo "Error: some READMEs couldn't be found." + exit 1 +fi diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/hack/tools/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh new file mode 100644 index 0000000000..c9b7cdbbfe --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +TARGET="${1:?Must provide target ref}" + +FILE="CHANGELOG.md" +TEMP_DIR=$(mktemp -d) +echo "Temp folder: $TEMP_DIR" + +# Only the latest commit of the feature branch is available +# automatically. To diff with the base branch, we need to +# fetch that too (and we only need its latest commit). +git fetch origin "${TARGET}" --depth=1 + +# Checkout the previous version on the base branch of the changelog to tmpfolder +git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE + +PREVIOUS_FILE="$TEMP_DIR/$FILE" +CURRENT_FILE="$FILE" +PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md" +CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md" + +# Extract released sections from the previous version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE" + +# Extract released sections from the current version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE" + +# Compare the released sections +if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then + echo "Error: The released sections of the changelog file have been modified." + diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE" + rm -rf "$TEMP_DIR" + false +fi + +rm -rf "$TEMP_DIR" +echo "The released sections remain unchanged." diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/version.go b/hack/tools/vendor/go.opentelemetry.io/otel/version.go new file mode 100644 index 0000000000..f67039ed1f --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otel // import "go.opentelemetry.io/otel" + +// Version is the current release version of OpenTelemetry in use. +func Version() string { + return "1.29.0" +} diff --git a/hack/tools/vendor/go.opentelemetry.io/otel/versions.yaml b/hack/tools/vendor/go.opentelemetry.io/otel/versions.yaml new file mode 100644 index 0000000000..3ba611d713 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/otel/versions.yaml @@ -0,0 +1,49 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +module-sets: + stable-v1: + version: v1.29.0 + modules: + - go.opentelemetry.io/otel + - go.opentelemetry.io/otel/bridge/opencensus + - go.opentelemetry.io/otel/bridge/opencensus/test + - go.opentelemetry.io/otel/bridge/opentracing + - go.opentelemetry.io/otel/bridge/opentracing/test + - go.opentelemetry.io/otel/example/dice + - go.opentelemetry.io/otel/example/namedtracer + - go.opentelemetry.io/otel/example/opencensus + - go.opentelemetry.io/otel/example/otel-collector + - go.opentelemetry.io/otel/example/passthrough + - go.opentelemetry.io/otel/example/zipkin + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp + - go.opentelemetry.io/otel/exporters/otlp/otlptrace + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp + - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric + - go.opentelemetry.io/otel/exporters/stdout/stdouttrace + - go.opentelemetry.io/otel/exporters/zipkin + - go.opentelemetry.io/otel/metric + - go.opentelemetry.io/otel/sdk + - go.opentelemetry.io/otel/sdk/metric + - go.opentelemetry.io/otel/trace + experimental-metrics: + version: v0.51.0 + modules: + - go.opentelemetry.io/otel/example/prometheus + - go.opentelemetry.io/otel/exporters/prometheus + experimental-logs: + version: v0.5.0 + modules: + - go.opentelemetry.io/otel/log + - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp + - go.opentelemetry.io/otel/exporters/stdout/stdoutlog + experimental-schema: + version: v0.0.8 + modules: + - go.opentelemetry.io/otel/schema +excluded-modules: + - go.opentelemetry.io/otel/internal/tools diff --git a/hack/tools/vendor/go.opentelemetry.io/proto/otlp/LICENSE b/hack/tools/vendor/go.opentelemetry.io/proto/otlp/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/proto/otlp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/hack/tools/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go b/hack/tools/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go new file mode 100644 index 0000000000..c1af04e84e --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go @@ -0,0 +1,367 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.6 +// source: opentelemetry/proto/collector/trace/v1/trace_service.proto + +package v1 + +import ( + v1 "go.opentelemetry.io/proto/otlp/trace/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ExportTraceServiceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + ResourceSpans []*v1.ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"` +} + +func (x *ExportTraceServiceRequest) Reset() { + *x = ExportTraceServiceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportTraceServiceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportTraceServiceRequest) ProtoMessage() {} + +func (x *ExportTraceServiceRequest) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportTraceServiceRequest.ProtoReflect.Descriptor instead. +func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ExportTraceServiceRequest) GetResourceSpans() []*v1.ResourceSpans { + if x != nil { + return x.ResourceSpans + } + return nil +} + +type ExportTraceServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + PartialSuccess *ExportTracePartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success,omitempty"` +} + +func (x *ExportTraceServiceResponse) Reset() { + *x = ExportTraceServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportTraceServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportTraceServiceResponse) ProtoMessage() {} + +func (x *ExportTraceServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportTraceServiceResponse.ProtoReflect.Descriptor instead. +func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ExportTraceServiceResponse) GetPartialSuccess() *ExportTracePartialSuccess { + if x != nil { + return x.PartialSuccess + } + return nil +} + +type ExportTracePartialSuccess struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of rejected spans. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + RejectedSpans int64 `protobuf:"varint,1,opt,name=rejected_spans,json=rejectedSpans,proto3" json:"rejected_spans,omitempty"` + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ExportTracePartialSuccess) Reset() { + *x = ExportTracePartialSuccess{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportTracePartialSuccess) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportTracePartialSuccess) ProtoMessage() {} + +func (x *ExportTracePartialSuccess) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportTracePartialSuccess.ProtoReflect.Descriptor instead. +func (*ExportTracePartialSuccess) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ExportTracePartialSuccess) GetRejectedSpans() int64 { + if x != nil { + return x.RejectedSpans + } + return 0 +} + +func (x *ExportTracePartialSuccess) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_opentelemetry_proto_collector_trace_v1_trace_service_proto protoreflect.FileDescriptor + +var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = []byte{ + 0x0a, 0x3a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x26, 0x6f, 0x70, + 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x28, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, + 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6f, + 0x0a, 0x19, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x52, 0x0a, 0x0e, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, + 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, + 0x88, 0x01, 0x0a, 0x1a, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6a, + 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, + 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, + 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x67, 0x0a, 0x19, 0x45, 0x78, + 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, + 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x6a, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0d, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x32, 0xa2, 0x01, 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x91, 0x01, 0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12, + 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x42, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, + 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x9c, 0x01, 0x0a, 0x29, 0x69, 0x6f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, + 0x26, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescOnce sync.Once + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData = file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc +) + +func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP() []byte { + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescOnce.Do(func() { + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData) + }) + return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData +} + +var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes = []interface{}{ + (*ExportTraceServiceRequest)(nil), // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest + (*ExportTraceServiceResponse)(nil), // 1: opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse + (*ExportTracePartialSuccess)(nil), // 2: opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess + (*v1.ResourceSpans)(nil), // 3: opentelemetry.proto.trace.v1.ResourceSpans +} +var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs = []int32{ + 3, // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans + 2, // 1: opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse.partial_success:type_name -> opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess + 0, // 2: opentelemetry.proto.collector.trace.v1.TraceService.Export:input_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest + 1, // 3: opentelemetry.proto.collector.trace.v1.TraceService.Export:output_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() } +func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() { + if File_opentelemetry_proto_collector_trace_v1_trace_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExportTraceServiceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExportTraceServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExportTracePartialSuccess); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes, + DependencyIndexes: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs, + MessageInfos: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes, + }.Build() + File_opentelemetry_proto_collector_trace_v1_trace_service_proto = out.File + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = nil + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes = nil + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs = nil +} diff --git a/hack/tools/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go b/hack/tools/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go new file mode 100644 index 0000000000..bb1bd261ed --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go @@ -0,0 +1,171 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: opentelemetry/proto/collector/trace/v1/trace_service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = metadata.Join + +func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportTraceServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server TraceServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportTraceServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Export(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterTraceServiceHandlerServer registers the http handlers for service TraceService to "mux". +// UnaryRPC :call TraceServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterTraceServiceHandlerFromEndpoint instead. +func RegisterTraceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server TraceServiceServer) error { + + mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/traces")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_TraceService_Export_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_TraceService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterTraceServiceHandler(ctx, mux, conn) +} + +// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn)) +} + +// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "TraceServiceClient" to call the correct interceptors. +func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error { + + mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/traces")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_TraceService_Export_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_TraceService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "traces"}, "")) +) + +var ( + forward_TraceService_Export_0 = runtime.ForwardResponseMessage +) diff --git a/hack/tools/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go b/hack/tools/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go new file mode 100644 index 0000000000..dd1b73f1e9 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go @@ -0,0 +1,109 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.21.6 +// source: opentelemetry/proto/collector/trace/v1/trace_service.proto + +package v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// TraceServiceClient is the client API for TraceService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type TraceServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) +} + +type traceServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewTraceServiceClient(cc grpc.ClientConnInterface) TraceServiceClient { + return &traceServiceClient{cc} +} + +func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) { + out := new(ExportTraceServiceResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TraceServiceServer is the server API for TraceService service. +// All implementations must embed UnimplementedTraceServiceServer +// for forward compatibility +type TraceServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) + mustEmbedUnimplementedTraceServiceServer() +} + +// UnimplementedTraceServiceServer must be embedded to have forward compatible implementations. +type UnimplementedTraceServiceServer struct { +} + +func (UnimplementedTraceServiceServer) Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") +} +func (UnimplementedTraceServiceServer) mustEmbedUnimplementedTraceServiceServer() {} + +// UnsafeTraceServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to TraceServiceServer will +// result in compilation errors. +type UnsafeTraceServiceServer interface { + mustEmbedUnimplementedTraceServiceServer() +} + +func RegisterTraceServiceServer(s grpc.ServiceRegistrar, srv TraceServiceServer) { + s.RegisterService(&TraceService_ServiceDesc, srv) +} + +func _TraceService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExportTraceServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TraceServiceServer).Export(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.collector.trace.v1.TraceService/Export", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TraceServiceServer).Export(ctx, req.(*ExportTraceServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// TraceService_ServiceDesc is the grpc.ServiceDesc for TraceService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var TraceService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService", + HandlerType: (*TraceServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Export", + Handler: _TraceService_Export_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/collector/trace/v1/trace_service.proto", +} diff --git a/hack/tools/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/hack/tools/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go new file mode 100644 index 0000000000..852209b097 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go @@ -0,0 +1,630 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.6 +// source: opentelemetry/proto/common/v1/common.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// AnyValue is used to represent any type of attribute value. AnyValue may contain a +// primitive value such as a string or integer or it may contain an arbitrary nested +// object containing arrays, key-value lists and primitives. +type AnyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The value is one of the listed fields. It is valid for all values to be unspecified + // in which case this AnyValue is considered to be "empty". + // + // Types that are assignable to Value: + // *AnyValue_StringValue + // *AnyValue_BoolValue + // *AnyValue_IntValue + // *AnyValue_DoubleValue + // *AnyValue_ArrayValue + // *AnyValue_KvlistValue + // *AnyValue_BytesValue + Value isAnyValue_Value `protobuf_oneof:"value"` +} + +func (x *AnyValue) Reset() { + *x = AnyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnyValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnyValue) ProtoMessage() {} + +func (x *AnyValue) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnyValue.ProtoReflect.Descriptor instead. +func (*AnyValue) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{0} +} + +func (m *AnyValue) GetValue() isAnyValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (x *AnyValue) GetStringValue() string { + if x, ok := x.GetValue().(*AnyValue_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *AnyValue) GetBoolValue() bool { + if x, ok := x.GetValue().(*AnyValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *AnyValue) GetIntValue() int64 { + if x, ok := x.GetValue().(*AnyValue_IntValue); ok { + return x.IntValue + } + return 0 +} + +func (x *AnyValue) GetDoubleValue() float64 { + if x, ok := x.GetValue().(*AnyValue_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (x *AnyValue) GetArrayValue() *ArrayValue { + if x, ok := x.GetValue().(*AnyValue_ArrayValue); ok { + return x.ArrayValue + } + return nil +} + +func (x *AnyValue) GetKvlistValue() *KeyValueList { + if x, ok := x.GetValue().(*AnyValue_KvlistValue); ok { + return x.KvlistValue + } + return nil +} + +func (x *AnyValue) GetBytesValue() []byte { + if x, ok := x.GetValue().(*AnyValue_BytesValue); ok { + return x.BytesValue + } + return nil +} + +type isAnyValue_Value interface { + isAnyValue_Value() +} + +type AnyValue_StringValue struct { + StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type AnyValue_BoolValue struct { + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type AnyValue_IntValue struct { + IntValue int64 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof"` +} + +type AnyValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type AnyValue_ArrayValue struct { + ArrayValue *ArrayValue `protobuf:"bytes,5,opt,name=array_value,json=arrayValue,proto3,oneof"` +} + +type AnyValue_KvlistValue struct { + KvlistValue *KeyValueList `protobuf:"bytes,6,opt,name=kvlist_value,json=kvlistValue,proto3,oneof"` +} + +type AnyValue_BytesValue struct { + BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +func (*AnyValue_StringValue) isAnyValue_Value() {} + +func (*AnyValue_BoolValue) isAnyValue_Value() {} + +func (*AnyValue_IntValue) isAnyValue_Value() {} + +func (*AnyValue_DoubleValue) isAnyValue_Value() {} + +func (*AnyValue_ArrayValue) isAnyValue_Value() {} + +func (*AnyValue_KvlistValue) isAnyValue_Value() {} + +func (*AnyValue_BytesValue) isAnyValue_Value() {} + +// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message +// since oneof in AnyValue does not allow repeated fields. +type ArrayValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Array of values. The array may be empty (contain 0 elements). + Values []*AnyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *ArrayValue) Reset() { + *x = ArrayValue{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ArrayValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ArrayValue) ProtoMessage() {} + +func (x *ArrayValue) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ArrayValue.ProtoReflect.Descriptor instead. +func (*ArrayValue) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{1} +} + +func (x *ArrayValue) GetValues() []*AnyValue { + if x != nil { + return x.Values + } + return nil +} + +// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message +// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need +// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to +// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches +// are semantically equivalent. +type KeyValueList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A collection of key/value pairs of key-value pairs. The list may be empty (may + // contain 0 elements). + // The keys MUST be unique (it is not allowed to have more than one + // value with the same key). + Values []*KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *KeyValueList) Reset() { + *x = KeyValueList{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValueList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValueList) ProtoMessage() {} + +func (x *KeyValueList) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValueList.ProtoReflect.Descriptor instead. +func (*KeyValueList) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{2} +} + +func (x *KeyValueList) GetValues() []*KeyValue { + if x != nil { + return x.Values + } + return nil +} + +// KeyValue is a key-value pair that is used to store Span attributes, Link +// attributes, etc. +type KeyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *KeyValue) Reset() { + *x = KeyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValue) ProtoMessage() {} + +func (x *KeyValue) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead. +func (*KeyValue) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{3} +} + +func (x *KeyValue) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *KeyValue) GetValue() *AnyValue { + if x != nil { + return x.Value + } + return nil +} + +// InstrumentationScope is a message representing the instrumentation scope information +// such as the fully qualified name and version. +type InstrumentationScope struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An empty instrumentation scope name means the name is unknown. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // Additional attributes that describe the scope. [Optional]. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` + DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` +} + +func (x *InstrumentationScope) Reset() { + *x = InstrumentationScope{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InstrumentationScope) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InstrumentationScope) ProtoMessage() {} + +func (x *InstrumentationScope) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InstrumentationScope.ProtoReflect.Descriptor instead. +func (*InstrumentationScope) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{4} +} + +func (x *InstrumentationScope) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *InstrumentationScope) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *InstrumentationScope) GetAttributes() []*KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *InstrumentationScope) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + +var File_opentelemetry_proto_common_v1_common_proto protoreflect.FileDescriptor + +var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ + 0x0a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x6f, 0x70, + 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xe0, 0x02, 0x0a, 0x08, + 0x41, 0x6e, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, + 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, + 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, + 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x4c, 0x0a, 0x0b, 0x61, 0x72, 0x72, 0x61, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x50, 0x0a, 0x0c, 0x6b, 0x76, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c, + 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x6b, 0x76, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4d, + 0x0a, 0x0a, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3f, 0x0a, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x4f, 0x0a, + 0x0c, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3f, 0x0a, + 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, + 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x5b, + 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3d, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xc7, 0x01, 0x0a, 0x14, + 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, + 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, + 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, + 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_opentelemetry_proto_common_v1_common_proto_rawDescOnce sync.Once + file_opentelemetry_proto_common_v1_common_proto_rawDescData = file_opentelemetry_proto_common_v1_common_proto_rawDesc +) + +func file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP() []byte { + file_opentelemetry_proto_common_v1_common_proto_rawDescOnce.Do(func() { + file_opentelemetry_proto_common_v1_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_common_v1_common_proto_rawDescData) + }) + return file_opentelemetry_proto_common_v1_common_proto_rawDescData +} + +var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_opentelemetry_proto_common_v1_common_proto_goTypes = []interface{}{ + (*AnyValue)(nil), // 0: opentelemetry.proto.common.v1.AnyValue + (*ArrayValue)(nil), // 1: opentelemetry.proto.common.v1.ArrayValue + (*KeyValueList)(nil), // 2: opentelemetry.proto.common.v1.KeyValueList + (*KeyValue)(nil), // 3: opentelemetry.proto.common.v1.KeyValue + (*InstrumentationScope)(nil), // 4: opentelemetry.proto.common.v1.InstrumentationScope +} +var file_opentelemetry_proto_common_v1_common_proto_depIdxs = []int32{ + 1, // 0: opentelemetry.proto.common.v1.AnyValue.array_value:type_name -> opentelemetry.proto.common.v1.ArrayValue + 2, // 1: opentelemetry.proto.common.v1.AnyValue.kvlist_value:type_name -> opentelemetry.proto.common.v1.KeyValueList + 0, // 2: opentelemetry.proto.common.v1.ArrayValue.values:type_name -> opentelemetry.proto.common.v1.AnyValue + 3, // 3: opentelemetry.proto.common.v1.KeyValueList.values:type_name -> opentelemetry.proto.common.v1.KeyValue + 0, // 4: opentelemetry.proto.common.v1.KeyValue.value:type_name -> opentelemetry.proto.common.v1.AnyValue + 3, // 5: opentelemetry.proto.common.v1.InstrumentationScope.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_opentelemetry_proto_common_v1_common_proto_init() } +func file_opentelemetry_proto_common_v1_common_proto_init() { + if File_opentelemetry_proto_common_v1_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ArrayValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValueList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InstrumentationScope); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*AnyValue_StringValue)(nil), + (*AnyValue_BoolValue)(nil), + (*AnyValue_IntValue)(nil), + (*AnyValue_DoubleValue)(nil), + (*AnyValue_ArrayValue)(nil), + (*AnyValue_KvlistValue)(nil), + (*AnyValue_BytesValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_opentelemetry_proto_common_v1_common_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_opentelemetry_proto_common_v1_common_proto_goTypes, + DependencyIndexes: file_opentelemetry_proto_common_v1_common_proto_depIdxs, + MessageInfos: file_opentelemetry_proto_common_v1_common_proto_msgTypes, + }.Build() + File_opentelemetry_proto_common_v1_common_proto = out.File + file_opentelemetry_proto_common_v1_common_proto_rawDesc = nil + file_opentelemetry_proto_common_v1_common_proto_goTypes = nil + file_opentelemetry_proto_common_v1_common_proto_depIdxs = nil +} diff --git a/hack/tools/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/hack/tools/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go new file mode 100644 index 0000000000..b7545b03b9 --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go @@ -0,0 +1,193 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.6 +// source: opentelemetry/proto/resource/v1/resource.proto + +package v1 + +import ( + v1 "go.opentelemetry.io/proto/otlp/common/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Resource information. +type Resource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Set of attributes that describe the resource. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, then + // no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` +} + +func (x *Resource) Reset() { + *x = Resource{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Resource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resource) ProtoMessage() {} + +func (x *Resource) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Resource.ProtoReflect.Descriptor instead. +func (*Resource) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP(), []int{0} +} + +func (x *Resource) GetAttributes() []*v1.KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Resource) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + +var File_opentelemetry_proto_resource_v1_resource_proto protoreflect.FileDescriptor + +var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ + 0x0a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, + 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x1f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, + 0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x01, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, + 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x83, 0x01, + 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, + 0x31, 0xaa, 0x02, 0x1f, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_opentelemetry_proto_resource_v1_resource_proto_rawDescOnce sync.Once + file_opentelemetry_proto_resource_v1_resource_proto_rawDescData = file_opentelemetry_proto_resource_v1_resource_proto_rawDesc +) + +func file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP() []byte { + file_opentelemetry_proto_resource_v1_resource_proto_rawDescOnce.Do(func() { + file_opentelemetry_proto_resource_v1_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_resource_v1_resource_proto_rawDescData) + }) + return file_opentelemetry_proto_resource_v1_resource_proto_rawDescData +} + +var file_opentelemetry_proto_resource_v1_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_opentelemetry_proto_resource_v1_resource_proto_goTypes = []interface{}{ + (*Resource)(nil), // 0: opentelemetry.proto.resource.v1.Resource + (*v1.KeyValue)(nil), // 1: opentelemetry.proto.common.v1.KeyValue +} +var file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = []int32{ + 1, // 0: opentelemetry.proto.resource.v1.Resource.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_opentelemetry_proto_resource_v1_resource_proto_init() } +func file_opentelemetry_proto_resource_v1_resource_proto_init() { + if File_opentelemetry_proto_resource_v1_resource_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Resource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_opentelemetry_proto_resource_v1_resource_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_opentelemetry_proto_resource_v1_resource_proto_goTypes, + DependencyIndexes: file_opentelemetry_proto_resource_v1_resource_proto_depIdxs, + MessageInfos: file_opentelemetry_proto_resource_v1_resource_proto_msgTypes, + }.Build() + File_opentelemetry_proto_resource_v1_resource_proto = out.File + file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = nil + file_opentelemetry_proto_resource_v1_resource_proto_goTypes = nil + file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = nil +} diff --git a/hack/tools/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/hack/tools/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go new file mode 100644 index 0000000000..d7099c35bc --- /dev/null +++ b/hack/tools/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go @@ -0,0 +1,1281 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.6 +// source: opentelemetry/proto/trace/v1/trace.proto + +package v1 + +import ( + v11 "go.opentelemetry.io/proto/otlp/common/v1" + v1 "go.opentelemetry.io/proto/otlp/resource/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +type SpanFlags int32 + +const ( + // The zero value for the enum. Should not be used for comparisons. + // Instead use bitwise "and" with the appropriate mask as shown above. + SpanFlags_SPAN_FLAGS_DO_NOT_USE SpanFlags = 0 + // Bits 0-7 are used for trace flags. + SpanFlags_SPAN_FLAGS_TRACE_FLAGS_MASK SpanFlags = 255 + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK SpanFlags = 256 + SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK SpanFlags = 512 +) + +// Enum value maps for SpanFlags. +var ( + SpanFlags_name = map[int32]string{ + 0: "SPAN_FLAGS_DO_NOT_USE", + 255: "SPAN_FLAGS_TRACE_FLAGS_MASK", + 256: "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK", + 512: "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK", + } + SpanFlags_value = map[string]int32{ + "SPAN_FLAGS_DO_NOT_USE": 0, + "SPAN_FLAGS_TRACE_FLAGS_MASK": 255, + "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK": 256, + "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK": 512, + } +) + +func (x SpanFlags) Enum() *SpanFlags { + p := new(SpanFlags) + *p = x + return p +} + +func (x SpanFlags) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SpanFlags) Descriptor() protoreflect.EnumDescriptor { + return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0].Descriptor() +} + +func (SpanFlags) Type() protoreflect.EnumType { + return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0] +} + +func (x SpanFlags) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SpanFlags.Descriptor instead. +func (SpanFlags) EnumDescriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0} +} + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type Span_SpanKind int32 + +const ( + // Unspecified. Do NOT use as default. + // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. + Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + Span_SPAN_KIND_INTERNAL Span_SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + Span_SPAN_KIND_SERVER Span_SpanKind = 2 + // Indicates that the span describes a request to some remote service. + Span_SPAN_KIND_CLIENT Span_SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + Span_SPAN_KIND_PRODUCER Span_SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + Span_SPAN_KIND_CONSUMER Span_SpanKind = 5 +) + +// Enum value maps for Span_SpanKind. +var ( + Span_SpanKind_name = map[int32]string{ + 0: "SPAN_KIND_UNSPECIFIED", + 1: "SPAN_KIND_INTERNAL", + 2: "SPAN_KIND_SERVER", + 3: "SPAN_KIND_CLIENT", + 4: "SPAN_KIND_PRODUCER", + 5: "SPAN_KIND_CONSUMER", + } + Span_SpanKind_value = map[string]int32{ + "SPAN_KIND_UNSPECIFIED": 0, + "SPAN_KIND_INTERNAL": 1, + "SPAN_KIND_SERVER": 2, + "SPAN_KIND_CLIENT": 3, + "SPAN_KIND_PRODUCER": 4, + "SPAN_KIND_CONSUMER": 5, + } +) + +func (x Span_SpanKind) Enum() *Span_SpanKind { + p := new(Span_SpanKind) + *p = x + return p +} + +func (x Span_SpanKind) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Span_SpanKind) Descriptor() protoreflect.EnumDescriptor { + return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1].Descriptor() +} + +func (Span_SpanKind) Type() protoreflect.EnumType { + return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1] +} + +func (x Span_SpanKind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Span_SpanKind.Descriptor instead. +func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0} +} + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type Status_StatusCode int32 + +const ( + // The default status. + Status_STATUS_CODE_UNSET Status_StatusCode = 0 + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + Status_STATUS_CODE_OK Status_StatusCode = 1 + // The Span contains an error. + Status_STATUS_CODE_ERROR Status_StatusCode = 2 +) + +// Enum value maps for Status_StatusCode. +var ( + Status_StatusCode_name = map[int32]string{ + 0: "STATUS_CODE_UNSET", + 1: "STATUS_CODE_OK", + 2: "STATUS_CODE_ERROR", + } + Status_StatusCode_value = map[string]int32{ + "STATUS_CODE_UNSET": 0, + "STATUS_CODE_OK": 1, + "STATUS_CODE_ERROR": 2, + } +) + +func (x Status_StatusCode) Enum() *Status_StatusCode { + p := new(Status_StatusCode) + *p = x + return p +} + +func (x Status_StatusCode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Status_StatusCode) Descriptor() protoreflect.EnumDescriptor { + return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[2].Descriptor() +} + +func (Status_StatusCode) Type() protoreflect.EnumType { + return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[2] +} + +func (x Status_StatusCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Status_StatusCode.Descriptor instead. +func (Status_StatusCode) EnumDescriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4, 0} +} + +// TracesData represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type TracesData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"` +} + +func (x *TracesData) Reset() { + *x = TracesData{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TracesData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TracesData) ProtoMessage() {} + +func (x *TracesData) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TracesData.ProtoReflect.Descriptor instead. +func (*TracesData) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0} +} + +func (x *TracesData) GetResourceSpans() []*ResourceSpans { + if x != nil { + return x.ResourceSpans + } + return nil +} + +// A collection of ScopeSpans from a Resource. +type ResourceSpans struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (x *ResourceSpans) Reset() { + *x = ResourceSpans{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceSpans) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceSpans) ProtoMessage() {} + +func (x *ResourceSpans) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceSpans.ProtoReflect.Descriptor instead. +func (*ResourceSpans) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{1} +} + +func (x *ResourceSpans) GetResource() *v1.Resource { + if x != nil { + return x.Resource + } + return nil +} + +func (x *ResourceSpans) GetScopeSpans() []*ScopeSpans { + if x != nil { + return x.ScopeSpans + } + return nil +} + +func (x *ResourceSpans) GetSchemaUrl() string { + if x != nil { + return x.SchemaUrl + } + return "" +} + +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (x *ScopeSpans) Reset() { + *x = ScopeSpans{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopeSpans) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopeSpans) ProtoMessage() {} + +func (x *ScopeSpans) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopeSpans.ProtoReflect.Descriptor instead. +func (*ScopeSpans) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{2} +} + +func (x *ScopeSpans) GetScope() *v11.InstrumentationScope { + if x != nil { + return x.Scope + } + return nil +} + +func (x *ScopeSpans) GetSpans() []*Span { + if x != nil { + return x.Spans + } + return nil +} + +func (x *ScopeSpans) GetSchemaUrl() string { + if x != nil { + return x.SchemaUrl + } + return "" +} + +// A Span represents a single operation performed by a single component of the system. +// +// The next available field id is 17. +type Span struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanId []byte `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + Flags uint32 `protobuf:"fixed32,16,opt,name=flags,proto3" json:"flags,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // events is a collection of Event items. + Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `protobuf:"bytes,15,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *Span) Reset() { + *x = Span{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Span) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Span) ProtoMessage() {} + +func (x *Span) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Span.ProtoReflect.Descriptor instead. +func (*Span) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3} +} + +func (x *Span) GetTraceId() []byte { + if x != nil { + return x.TraceId + } + return nil +} + +func (x *Span) GetSpanId() []byte { + if x != nil { + return x.SpanId + } + return nil +} + +func (x *Span) GetTraceState() string { + if x != nil { + return x.TraceState + } + return "" +} + +func (x *Span) GetParentSpanId() []byte { + if x != nil { + return x.ParentSpanId + } + return nil +} + +func (x *Span) GetFlags() uint32 { + if x != nil { + return x.Flags + } + return 0 +} + +func (x *Span) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Span) GetKind() Span_SpanKind { + if x != nil { + return x.Kind + } + return Span_SPAN_KIND_UNSPECIFIED +} + +func (x *Span) GetStartTimeUnixNano() uint64 { + if x != nil { + return x.StartTimeUnixNano + } + return 0 +} + +func (x *Span) GetEndTimeUnixNano() uint64 { + if x != nil { + return x.EndTimeUnixNano + } + return 0 +} + +func (x *Span) GetAttributes() []*v11.KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Span) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + +func (x *Span) GetEvents() []*Span_Event { + if x != nil { + return x.Events + } + return nil +} + +func (x *Span) GetDroppedEventsCount() uint32 { + if x != nil { + return x.DroppedEventsCount + } + return 0 +} + +func (x *Span) GetLinks() []*Span_Link { + if x != nil { + return x.Links + } + return nil +} + +func (x *Span) GetDroppedLinksCount() uint32 { + if x != nil { + return x.DroppedLinksCount + } + return 0 +} + +func (x *Span) GetStatus() *Status { + if x != nil { + return x.Status + } + return nil +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A developer-facing human readable error message. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // The status code. + Code Status_StatusCode `protobuf:"varint,3,opt,name=code,proto3,enum=opentelemetry.proto.trace.v1.Status_StatusCode" json:"code,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4} +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetCode() Status_StatusCode { + if x != nil { + return x.Code + } + return Status_STATUS_CODE_UNSET +} + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type Span_Event struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // time_unix_nano is the time the event occurred. + TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` +} + +func (x *Span_Event) Reset() { + *x = Span_Event{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Span_Event) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Span_Event) ProtoMessage() {} + +func (x *Span_Event) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Span_Event.ProtoReflect.Descriptor instead. +func (*Span_Event) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *Span_Event) GetTimeUnixNano() uint64 { + if x != nil { + return x.TimeUnixNano + } + return 0 +} + +func (x *Span_Event) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Span_Event) GetAttributes() []*v11.KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Span_Event) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type Span_Link struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The trace_state associated with the link. + TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + Flags uint32 `protobuf:"fixed32,6,opt,name=flags,proto3" json:"flags,omitempty"` +} + +func (x *Span_Link) Reset() { + *x = Span_Link{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Span_Link) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Span_Link) ProtoMessage() {} + +func (x *Span_Link) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Span_Link.ProtoReflect.Descriptor instead. +func (*Span_Link) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 1} +} + +func (x *Span_Link) GetTraceId() []byte { + if x != nil { + return x.TraceId + } + return nil +} + +func (x *Span_Link) GetSpanId() []byte { + if x != nil { + return x.SpanId + } + return nil +} + +func (x *Span_Link) GetTraceState() string { + if x != nil { + return x.TraceState + } + return "" +} + +func (x *Span_Link) GetAttributes() []*v11.KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Span_Link) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + +func (x *Span_Link) GetFlags() uint32 { + if x != nil { + return x.Flags + } + return 0 +} + +var File_opentelemetry_proto_trace_v1_trace_proto protoreflect.FileDescriptor + +var file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1c, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x60, 0x0a, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x73, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x52, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, + 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, + 0x49, 0x0a, 0x0b, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0a, + 0x73, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x4a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, + 0x07, 0x22, 0xb0, 0x01, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, + 0x12, 0x49, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x33, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x73, + 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x05, + 0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, + 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x55, 0x72, 0x6c, 0x22, 0xc8, 0x0a, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x19, 0x0a, + 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, + 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x61, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, + 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, + 0x69, 0x6e, 0x64, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x06, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, + 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x2b, 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x08, 0x20, 0x01, 0x28, 0x06, + 0x52, 0x0f, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, + 0x6f, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, + 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, + 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, + 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, + 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0b, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, + 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, + 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, + 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x70, + 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x69, 0x6e, + 0x6b, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0xc4, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, + 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, + 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, + 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, + 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xf4, 0x01, 0x0a, + 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, + 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, + 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, + 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, 0x66, 0x6c, + 0x61, 0x67, 0x73, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, + 0x12, 0x19, 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, + 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, + 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, + 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, + 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, + 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x50, 0x52, 0x4f, + 0x44, 0x55, 0x43, 0x45, 0x52, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, + 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x53, 0x55, 0x4d, 0x45, 0x52, 0x10, 0x05, 0x22, + 0xbd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, + 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x4e, 0x0a, 0x0a, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x45, 0x54, 0x10, 0x00, 0x12, 0x12, + 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x4b, + 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, + 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x2a, + 0x9c, 0x01, 0x0a, 0x09, 0x53, 0x70, 0x61, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x19, 0x0a, + 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, + 0x4f, 0x54, 0x5f, 0x55, 0x53, 0x45, 0x10, 0x00, 0x12, 0x20, 0x0a, 0x1b, 0x53, 0x50, 0x41, 0x4e, + 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x46, 0x4c, 0x41, + 0x47, 0x53, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0xff, 0x01, 0x12, 0x2a, 0x0a, 0x25, 0x53, 0x50, + 0x41, 0x4e, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x58, 0x54, + 0x5f, 0x48, 0x41, 0x53, 0x5f, 0x49, 0x53, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x54, 0x45, 0x5f, 0x4d, + 0x41, 0x53, 0x4b, 0x10, 0x80, 0x02, 0x12, 0x26, 0x0a, 0x21, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x46, + 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x58, 0x54, 0x5f, 0x49, 0x53, 0x5f, + 0x52, 0x45, 0x4d, 0x4f, 0x54, 0x45, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0x80, 0x04, 0x42, 0x77, + 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, + 0x31, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x27, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1c, 0x4f, 0x70, 0x65, 0x6e, 0x54, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_opentelemetry_proto_trace_v1_trace_proto_rawDescOnce sync.Once + file_opentelemetry_proto_trace_v1_trace_proto_rawDescData = file_opentelemetry_proto_trace_v1_trace_proto_rawDesc +) + +func file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP() []byte { + file_opentelemetry_proto_trace_v1_trace_proto_rawDescOnce.Do(func() { + file_opentelemetry_proto_trace_v1_trace_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_trace_v1_trace_proto_rawDescData) + }) + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescData +} + +var file_opentelemetry_proto_trace_v1_trace_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_opentelemetry_proto_trace_v1_trace_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_opentelemetry_proto_trace_v1_trace_proto_goTypes = []interface{}{ + (SpanFlags)(0), // 0: opentelemetry.proto.trace.v1.SpanFlags + (Span_SpanKind)(0), // 1: opentelemetry.proto.trace.v1.Span.SpanKind + (Status_StatusCode)(0), // 2: opentelemetry.proto.trace.v1.Status.StatusCode + (*TracesData)(nil), // 3: opentelemetry.proto.trace.v1.TracesData + (*ResourceSpans)(nil), // 4: opentelemetry.proto.trace.v1.ResourceSpans + (*ScopeSpans)(nil), // 5: opentelemetry.proto.trace.v1.ScopeSpans + (*Span)(nil), // 6: opentelemetry.proto.trace.v1.Span + (*Status)(nil), // 7: opentelemetry.proto.trace.v1.Status + (*Span_Event)(nil), // 8: opentelemetry.proto.trace.v1.Span.Event + (*Span_Link)(nil), // 9: opentelemetry.proto.trace.v1.Span.Link + (*v1.Resource)(nil), // 10: opentelemetry.proto.resource.v1.Resource + (*v11.InstrumentationScope)(nil), // 11: opentelemetry.proto.common.v1.InstrumentationScope + (*v11.KeyValue)(nil), // 12: opentelemetry.proto.common.v1.KeyValue +} +var file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = []int32{ + 4, // 0: opentelemetry.proto.trace.v1.TracesData.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans + 10, // 1: opentelemetry.proto.trace.v1.ResourceSpans.resource:type_name -> opentelemetry.proto.resource.v1.Resource + 5, // 2: opentelemetry.proto.trace.v1.ResourceSpans.scope_spans:type_name -> opentelemetry.proto.trace.v1.ScopeSpans + 11, // 3: opentelemetry.proto.trace.v1.ScopeSpans.scope:type_name -> opentelemetry.proto.common.v1.InstrumentationScope + 6, // 4: opentelemetry.proto.trace.v1.ScopeSpans.spans:type_name -> opentelemetry.proto.trace.v1.Span + 1, // 5: opentelemetry.proto.trace.v1.Span.kind:type_name -> opentelemetry.proto.trace.v1.Span.SpanKind + 12, // 6: opentelemetry.proto.trace.v1.Span.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 8, // 7: opentelemetry.proto.trace.v1.Span.events:type_name -> opentelemetry.proto.trace.v1.Span.Event + 9, // 8: opentelemetry.proto.trace.v1.Span.links:type_name -> opentelemetry.proto.trace.v1.Span.Link + 7, // 9: opentelemetry.proto.trace.v1.Span.status:type_name -> opentelemetry.proto.trace.v1.Status + 2, // 10: opentelemetry.proto.trace.v1.Status.code:type_name -> opentelemetry.proto.trace.v1.Status.StatusCode + 12, // 11: opentelemetry.proto.trace.v1.Span.Event.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 12, // 12: opentelemetry.proto.trace.v1.Span.Link.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 13, // [13:13] is the sub-list for method output_type + 13, // [13:13] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name +} + +func init() { file_opentelemetry_proto_trace_v1_trace_proto_init() } +func file_opentelemetry_proto_trace_v1_trace_proto_init() { + if File_opentelemetry_proto_trace_v1_trace_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TracesData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceSpans); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopeSpans); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Span); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Span_Event); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Span_Link); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_opentelemetry_proto_trace_v1_trace_proto_rawDesc, + NumEnums: 3, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_opentelemetry_proto_trace_v1_trace_proto_goTypes, + DependencyIndexes: file_opentelemetry_proto_trace_v1_trace_proto_depIdxs, + EnumInfos: file_opentelemetry_proto_trace_v1_trace_proto_enumTypes, + MessageInfos: file_opentelemetry_proto_trace_v1_trace_proto_msgTypes, + }.Build() + File_opentelemetry_proto_trace_v1_trace_proto = out.File + file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = nil + file_opentelemetry_proto_trace_v1_trace_proto_goTypes = nil + file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = nil +} diff --git a/hack/tools/vendor/go.uber.org/zap/.golangci.yml b/hack/tools/vendor/go.uber.org/zap/.golangci.yml index fbc6df7906..2346df1351 100644 --- a/hack/tools/vendor/go.uber.org/zap/.golangci.yml +++ b/hack/tools/vendor/go.uber.org/zap/.golangci.yml @@ -17,7 +17,7 @@ linters: - unused # Our own extras: - - gofmt + - gofumpt - nolintlint # lints nolint directives - revive diff --git a/hack/tools/vendor/go.uber.org/zap/.readme.tmpl b/hack/tools/vendor/go.uber.org/zap/.readme.tmpl index 92aa65d660..4fea3027af 100644 --- a/hack/tools/vendor/go.uber.org/zap/.readme.tmpl +++ b/hack/tools/vendor/go.uber.org/zap/.readme.tmpl @@ -1,7 +1,15 @@ # :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] +
+ Blazing fast, structured, leveled logging in Go. +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
+ ## Installation `go get -u go.uber.org/zap` @@ -92,7 +100,7 @@ standard.
-Released under the [MIT License](LICENSE.txt). +Released under the [MIT License](LICENSE). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are diff --git a/hack/tools/vendor/go.uber.org/zap/CHANGELOG.md b/hack/tools/vendor/go.uber.org/zap/CHANGELOG.md index 11b4659761..6d6cd5f4d7 100644 --- a/hack/tools/vendor/go.uber.org/zap/CHANGELOG.md +++ b/hack/tools/vendor/go.uber.org/zap/CHANGELOG.md @@ -3,14 +3,30 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 1.27.0 (20 Feb 2024) +Enhancements: +* [#1378][]: Add `WithLazy` method for `SugaredLogger`. +* [#1399][]: zaptest: Add `NewTestingWriter` for customizing TestingWriter with more flexibility than `NewLogger`. +* [#1406][]: Add `Log`, `Logw`, `Logln` methods for `SugaredLogger`. +* [#1416][]: Add `WithPanicHook` option for testing panic logs. + +Thanks to @defval, @dimmo, @arxeiss, and @MKrupauskas for their contributions to this release. + +[#1378]: https://github.com/uber-go/zap/pull/1378 +[#1399]: https://github.com/uber-go/zap/pull/1399 +[#1406]: https://github.com/uber-go/zap/pull/1406 +[#1416]: https://github.com/uber-go/zap/pull/1416 + ## 1.26.0 (14 Sep 2023) Enhancements: +* [#1297][]: Add Dict as a Field. * [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured context. * [#1350][]: String encoding is much (~50%) faster now. -Thanks to @jquirke, @cdvr1993 for their contributions to this release. +Thanks to @hhk7734, @jquirke, and @cdvr1993 for their contributions to this release. +[#1297]: https://github.com/uber-go/zap/pull/1297 [#1319]: https://github.com/uber-go/zap/pull/1319 [#1350]: https://github.com/uber-go/zap/pull/1350 @@ -25,7 +41,7 @@ Enhancements: * [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set. * [#1281][]: Add `zap/exp/expfield` package which contains helper methods `Str` and `Strs` for constructing String-like zap.Fields. -* [#1310][]: Reduce stack size on `Any`. +* [#1310][]: Reduce stack size on `Any`. Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions to this release. @@ -352,7 +368,7 @@ to this release. [#675]: https://github.com/uber-go/zap/pull/675 [#704]: https://github.com/uber-go/zap/pull/704 -## v1.9.1 (06 Aug 2018) +## 1.9.1 (06 Aug 2018) Bugfixes: @@ -360,7 +376,7 @@ Bugfixes: [#614]: https://github.com/uber-go/zap/pull/614 -## v1.9.0 (19 Jul 2018) +## 1.9.0 (19 Jul 2018) Enhancements: * [#602][]: Reduce number of allocations when logging with reflection. @@ -373,7 +389,7 @@ Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and [#572]: https://github.com/uber-go/zap/pull/572 [#606]: https://github.com/uber-go/zap/pull/606 -## v1.8.0 (13 Apr 2018) +## 1.8.0 (13 Apr 2018) Enhancements: * [#508][]: Make log level configurable when redirecting the standard @@ -391,14 +407,14 @@ Thanks to @DiSiqueira and @djui for their contributions to this release. [#577]: https://github.com/uber-go/zap/pull/577 [#574]: https://github.com/uber-go/zap/pull/574 -## v1.7.1 (25 Sep 2017) +## 1.7.1 (25 Sep 2017) Bugfixes: * [#504][]: Store strings when using AddByteString with the map encoder. [#504]: https://github.com/uber-go/zap/pull/504 -## v1.7.0 (21 Sep 2017) +## 1.7.0 (21 Sep 2017) Enhancements: @@ -407,7 +423,7 @@ Enhancements: [#487]: https://github.com/uber-go/zap/pull/487 -## v1.6.0 (30 Aug 2017) +## 1.6.0 (30 Aug 2017) Enhancements: @@ -418,7 +434,7 @@ Enhancements: [#490]: https://github.com/uber-go/zap/pull/490 [#491]: https://github.com/uber-go/zap/pull/491 -## v1.5.0 (22 Jul 2017) +## 1.5.0 (22 Jul 2017) Enhancements: @@ -436,7 +452,7 @@ Thanks to @richard-tunein and @pavius for their contributions to this release. [#460]: https://github.com/uber-go/zap/pull/460 [#470]: https://github.com/uber-go/zap/pull/470 -## v1.4.1 (08 Jun 2017) +## 1.4.1 (08 Jun 2017) This release fixes two bugs. @@ -448,7 +464,7 @@ Bugfixes: [#435]: https://github.com/uber-go/zap/pull/435 [#444]: https://github.com/uber-go/zap/pull/444 -## v1.4.0 (12 May 2017) +## 1.4.0 (12 May 2017) This release adds a few small features and is fully backward-compatible. @@ -464,7 +480,7 @@ Enhancements: [#425]: https://github.com/uber-go/zap/pull/425 [#431]: https://github.com/uber-go/zap/pull/431 -## v1.3.0 (25 Apr 2017) +## 1.3.0 (25 Apr 2017) This release adds an enhancement to zap's testing helpers as well as the ability to marshal an AtomicLevel. It is fully backward-compatible. @@ -478,7 +494,7 @@ Enhancements: [#415]: https://github.com/uber-go/zap/pull/415 [#416]: https://github.com/uber-go/zap/pull/416 -## v1.2.0 (13 Apr 2017) +## 1.2.0 (13 Apr 2017) This release adds a gRPC compatibility wrapper. It is fully backward-compatible. @@ -489,7 +505,7 @@ Enhancements: [#402]: https://github.com/uber-go/zap/pull/402 -## v1.1.0 (31 Mar 2017) +## 1.1.0 (31 Mar 2017) This release fixes two bugs and adds some enhancements to zap's testing helpers. It is fully backward-compatible. @@ -510,7 +526,7 @@ Thanks to @moitias for contributing to this release. [#396]: https://github.com/uber-go/zap/pull/396 [#386]: https://github.com/uber-go/zap/pull/386 -## v1.0.0 (14 Mar 2017) +## 1.0.0 (14 Mar 2017) This is zap's first stable release. All exported APIs are now final, and no further breaking changes will be made in the 1.x release series. Anyone using a @@ -569,7 +585,7 @@ contributions to this release. [#365]: https://github.com/uber-go/zap/pull/365 [#372]: https://github.com/uber-go/zap/pull/372 -## v1.0.0-rc.3 (7 Mar 2017) +## 1.0.0-rc.3 (7 Mar 2017) This is the third release candidate for zap's stable release. There are no breaking changes. @@ -595,7 +611,7 @@ Thanks to @ansel1 and @suyash for their contributions to this release. [#353]: https://github.com/uber-go/zap/pull/353 [#311]: https://github.com/uber-go/zap/pull/311 -## v1.0.0-rc.2 (21 Feb 2017) +## 1.0.0-rc.2 (21 Feb 2017) This is the second release candidate for zap's stable release. It includes two breaking changes. @@ -641,7 +657,7 @@ Thanks to @skipor and @chapsuk for their contributions to this release. [#326]: https://github.com/uber-go/zap/pull/326 [#300]: https://github.com/uber-go/zap/pull/300 -## v1.0.0-rc.1 (14 Feb 2017) +## 1.0.0-rc.1 (14 Feb 2017) This is the first release candidate for zap's stable release. There are multiple breaking changes and improvements from the pre-release version. Most notably: @@ -661,7 +677,7 @@ breaking changes and improvements from the pre-release version. Most notably: * Sampling is more accurate, and doesn't depend on the standard library's shared timer heap. -## v0.1.0-beta.1 (6 Feb 2017) +## 0.1.0-beta.1 (6 Feb 2017) This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and upgrade at their leisure. Since this is the first tagged release, there are no diff --git a/hack/tools/vendor/go.uber.org/zap/LICENSE.txt b/hack/tools/vendor/go.uber.org/zap/LICENSE similarity index 100% rename from hack/tools/vendor/go.uber.org/zap/LICENSE.txt rename to hack/tools/vendor/go.uber.org/zap/LICENSE diff --git a/hack/tools/vendor/go.uber.org/zap/README.md b/hack/tools/vendor/go.uber.org/zap/README.md index 9de08927be..a17035cb6f 100644 --- a/hack/tools/vendor/go.uber.org/zap/README.md +++ b/hack/tools/vendor/go.uber.org/zap/README.md @@ -1,7 +1,16 @@ -# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] +# :zap: zap + + +
Blazing fast, structured, leveled logging in Go. +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
+ ## Installation `go get -u go.uber.org/zap` @@ -66,41 +75,44 @@ Log a message and 10 fields: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 1744 ns/op | +0% | 5 allocs/op -| :zap: zap (sugared) | 2483 ns/op | +42% | 10 allocs/op -| zerolog | 918 ns/op | -47% | 1 allocs/op -| go-kit | 5590 ns/op | +221% | 57 allocs/op -| slog | 5640 ns/op | +223% | 40 allocs/op -| apex/log | 21184 ns/op | +1115% | 63 allocs/op -| logrus | 24338 ns/op | +1296% | 79 allocs/op -| log15 | 26054 ns/op | +1394% | 74 allocs/op +| :zap: zap | 656 ns/op | +0% | 5 allocs/op +| :zap: zap (sugared) | 935 ns/op | +43% | 10 allocs/op +| zerolog | 380 ns/op | -42% | 1 allocs/op +| go-kit | 2249 ns/op | +243% | 57 allocs/op +| slog (LogAttrs) | 2479 ns/op | +278% | 40 allocs/op +| slog | 2481 ns/op | +278% | 42 allocs/op +| apex/log | 9591 ns/op | +1362% | 63 allocs/op +| log15 | 11393 ns/op | +1637% | 75 allocs/op +| logrus | 11654 ns/op | +1677% | 79 allocs/op Log a message with a logger that already has 10 fields of context: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 193 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 227 ns/op | +18% | 1 allocs/op -| zerolog | 81 ns/op | -58% | 0 allocs/op -| slog | 322 ns/op | +67% | 0 allocs/op -| go-kit | 5377 ns/op | +2686% | 56 allocs/op -| apex/log | 19518 ns/op | +10013% | 53 allocs/op -| log15 | 19812 ns/op | +10165% | 70 allocs/op -| logrus | 21997 ns/op | +11297% | 68 allocs/op +| :zap: zap | 67 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 84 ns/op | +25% | 1 allocs/op +| zerolog | 35 ns/op | -48% | 0 allocs/op +| slog | 193 ns/op | +188% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +199% | 0 allocs/op +| go-kit | 2460 ns/op | +3572% | 56 allocs/op +| log15 | 9038 ns/op | +13390% | 70 allocs/op +| apex/log | 9068 ns/op | +13434% | 53 allocs/op +| logrus | 10521 ns/op | +15603% | 68 allocs/op Log a static string, without any context or `printf`-style templating: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 165 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 212 ns/op | +28% | 1 allocs/op -| zerolog | 95 ns/op | -42% | 0 allocs/op -| slog | 296 ns/op | +79% | 0 allocs/op -| go-kit | 415 ns/op | +152% | 9 allocs/op -| standard library | 422 ns/op | +156% | 2 allocs/op -| apex/log | 1601 ns/op | +870% | 5 allocs/op -| logrus | 3017 ns/op | +1728% | 23 allocs/op -| log15 | 3469 ns/op | +2002% | 20 allocs/op +| :zap: zap | 63 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 81 ns/op | +29% | 1 allocs/op +| zerolog | 32 ns/op | -49% | 0 allocs/op +| standard library | 124 ns/op | +97% | 1 allocs/op +| slog | 196 ns/op | +211% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +217% | 0 allocs/op +| go-kit | 213 ns/op | +238% | 9 allocs/op +| apex/log | 771 ns/op | +1124% | 5 allocs/op +| logrus | 1439 ns/op | +2184% | 23 allocs/op +| log15 | 2069 ns/op | +3184% | 20 allocs/op ## Development Status: Stable @@ -120,7 +132,7 @@ standard.
-Released under the [MIT License](LICENSE.txt). +Released under the [MIT License](LICENSE). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are diff --git a/hack/tools/vendor/go.uber.org/zap/buffer/buffer.go b/hack/tools/vendor/go.uber.org/zap/buffer/buffer.go index 27fb5cd5da..0b8540c213 100644 --- a/hack/tools/vendor/go.uber.org/zap/buffer/buffer.go +++ b/hack/tools/vendor/go.uber.org/zap/buffer/buffer.go @@ -42,7 +42,7 @@ func (b *Buffer) AppendByte(v byte) { b.bs = append(b.bs, v) } -// AppendBytes writes a single byte to the Buffer. +// AppendBytes writes the given slice of bytes to the Buffer. func (b *Buffer) AppendBytes(v []byte) { b.bs = append(b.bs, v...) } diff --git a/hack/tools/vendor/go.uber.org/zap/field.go b/hack/tools/vendor/go.uber.org/zap/field.go index c8dd3358a9..6743930b82 100644 --- a/hack/tools/vendor/go.uber.org/zap/field.go +++ b/hack/tools/vendor/go.uber.org/zap/field.go @@ -460,6 +460,8 @@ func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error { // - https://github.com/uber-go/zap/pull/1304 // - https://github.com/uber-go/zap/pull/1305 // - https://github.com/uber-go/zap/pull/1308 +// +// See https://github.com/golang/go/issues/62077 for upstream issue. type anyFieldC[T any] func(string, T) Field func (f anyFieldC[T]) Any(key string, val any) Field { diff --git a/hack/tools/vendor/go.uber.org/zap/logger.go b/hack/tools/vendor/go.uber.org/zap/logger.go index 6205fe48a6..c4d3003239 100644 --- a/hack/tools/vendor/go.uber.org/zap/logger.go +++ b/hack/tools/vendor/go.uber.org/zap/logger.go @@ -43,6 +43,7 @@ type Logger struct { development bool addCaller bool + onPanic zapcore.CheckWriteHook // default is WriteThenPanic onFatal zapcore.CheckWriteHook // default is WriteThenFatal name string @@ -345,27 +346,12 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Set up any required terminal behavior. switch ent.Level { case zapcore.PanicLevel: - ce = ce.After(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) case zapcore.FatalLevel: - onFatal := log.onFatal - // nil or WriteThenNoop will lead to continued execution after - // a Fatal log entry, which is unexpected. For example, - // - // f, err := os.Open(..) - // if err != nil { - // log.Fatal("cannot open", zap.Error(err)) - // } - // fmt.Println(f.Name()) - // - // The f.Name() will panic if we continue execution after the - // log.Fatal. - if onFatal == nil || onFatal == zapcore.WriteThenNoop { - onFatal = zapcore.WriteThenFatal - } - ce = ce.After(ent, onFatal) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenFatal, log.onFatal)) case zapcore.DPanicLevel: if log.development { - ce = ce.After(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) } } @@ -430,3 +416,20 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { return ce } + +func terminalHookOverride(defaultHook, override zapcore.CheckWriteHook) zapcore.CheckWriteHook { + // A nil or WriteThenNoop hook will lead to continued execution after + // a Panic or Fatal log entry, which is unexpected. For example, + // + // f, err := os.Open(..) + // if err != nil { + // log.Fatal("cannot open", zap.Error(err)) + // } + // fmt.Println(f.Name()) + // + // The f.Name() will panic if we continue execution after the log.Fatal. + if override == nil || override == zapcore.WriteThenNoop { + return defaultHook + } + return override +} diff --git a/hack/tools/vendor/go.uber.org/zap/options.go b/hack/tools/vendor/go.uber.org/zap/options.go index c4f3bca3d2..43d357ac90 100644 --- a/hack/tools/vendor/go.uber.org/zap/options.go +++ b/hack/tools/vendor/go.uber.org/zap/options.go @@ -132,6 +132,21 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option { }) } +// WithPanicHook sets a CheckWriteHook to run on Panic/DPanic logs. +// Zap will call this hook after writing a log statement with a Panic/DPanic level. +// +// For example, the following builds a logger that will exit the current +// goroutine after writing a Panic/DPanic log message, but it will not start a panic. +// +// zap.New(core, zap.WithPanicHook(zapcore.WriteThenGoexit)) +// +// This is useful for testing Panic/DPanic log output. +func WithPanicHook(hook zapcore.CheckWriteHook) Option { + return optionFunc(func(log *Logger) { + log.onPanic = hook + }) +} + // OnFatal sets the action to take on fatal logs. // // Deprecated: Use [WithFatalHook] instead. diff --git a/hack/tools/vendor/go.uber.org/zap/sugar.go b/hack/tools/vendor/go.uber.org/zap/sugar.go index 00ac5fe3ac..8904cd0871 100644 --- a/hack/tools/vendor/go.uber.org/zap/sugar.go +++ b/hack/tools/vendor/go.uber.org/zap/sugar.go @@ -115,6 +115,21 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} } +// WithLazy adds a variadic number of fields to the logging context lazily. +// The fields are evaluated only if the logger is further chained with [With] +// or is written to with any of the log level methods. +// Until that occurs, the logger may retain references to objects inside the fields, +// and logging will reflect the state of an object at the time of logging, +// not the time of WithLazy(). +// +// Similar to [With], fields added to the child don't affect the parent, +// and vice versa. Also, the keys in key-value pairs should be strings. In development, +// passing a non-string key panics, while in production it logs an error and skips the pair. +// Passing an orphaned key has the same behavior. +func (s *SugaredLogger) WithLazy(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.WithLazy(s.sweetenFields(args)...)} +} + // Level reports the minimum enabled level for this logger. // // For NopLoggers, this is [zapcore.InvalidLevel]. @@ -122,6 +137,12 @@ func (s *SugaredLogger) Level() zapcore.Level { return zapcore.LevelOf(s.base.core) } +// Log logs the provided arguments at provided level. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Log(lvl zapcore.Level, args ...interface{}) { + s.log(lvl, "", args, nil) +} + // Debug logs the provided arguments at [DebugLevel]. // Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Debug(args ...interface{}) { @@ -165,6 +186,12 @@ func (s *SugaredLogger) Fatal(args ...interface{}) { s.log(FatalLevel, "", args, nil) } +// Logf formats the message according to the format specifier +// and logs it at provided level. +func (s *SugaredLogger) Logf(lvl zapcore.Level, template string, args ...interface{}) { + s.log(lvl, template, args, nil) +} + // Debugf formats the message according to the format specifier // and logs it at [DebugLevel]. func (s *SugaredLogger) Debugf(template string, args ...interface{}) { @@ -208,6 +235,12 @@ func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { s.log(FatalLevel, template, args, nil) } +// Logw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Logw(lvl zapcore.Level, msg string, keysAndValues ...interface{}) { + s.log(lvl, msg, nil, keysAndValues) +} + // Debugw logs a message with some additional context. The variadic key-value // pairs are treated as they are in With. // @@ -255,6 +288,12 @@ func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { s.log(FatalLevel, msg, nil, keysAndValues) } +// Logln logs a message at provided level. +// Spaces are always added between arguments. +func (s *SugaredLogger) Logln(lvl zapcore.Level, args ...interface{}) { + s.logln(lvl, args, nil) +} + // Debugln logs a message at [DebugLevel]. // Spaces are always added between arguments. func (s *SugaredLogger) Debugln(args ...interface{}) { diff --git a/hack/tools/vendor/go.uber.org/zap/zapcore/console_encoder.go b/hack/tools/vendor/go.uber.org/zap/zapcore/console_encoder.go index 8ca0bfaf56..cc2b4e07b9 100644 --- a/hack/tools/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ b/hack/tools/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -77,7 +77,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, // If this ever becomes a performance bottleneck, we can implement // ArrayEncoder for our plain-text format. arr := getSliceEncoder() - if c.TimeKey != "" && c.EncodeTime != nil { + if c.TimeKey != "" && c.EncodeTime != nil && !ent.Time.IsZero() { c.EncodeTime(ent.Time, arr) } if c.LevelKey != "" && c.EncodeLevel != nil { diff --git a/hack/tools/vendor/go.uber.org/zap/zapcore/encoder.go b/hack/tools/vendor/go.uber.org/zap/zapcore/encoder.go index 5769ff3e4e..0446254156 100644 --- a/hack/tools/vendor/go.uber.org/zap/zapcore/encoder.go +++ b/hack/tools/vendor/go.uber.org/zap/zapcore/encoder.go @@ -37,6 +37,9 @@ const DefaultLineEnding = "\n" const OmitKey = "" // A LevelEncoder serializes a Level to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type LevelEncoder func(Level, PrimitiveArrayEncoder) // LowercaseLevelEncoder serializes a Level to a lowercase string. For example, @@ -90,6 +93,9 @@ func (e *LevelEncoder) UnmarshalText(text []byte) error { } // A TimeEncoder serializes a time.Time to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type TimeEncoder func(time.Time, PrimitiveArrayEncoder) // EpochTimeEncoder serializes a time.Time to a floating-point number of seconds @@ -219,6 +225,9 @@ func (e *TimeEncoder) UnmarshalJSON(data []byte) error { } // A DurationEncoder serializes a time.Duration to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) // SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. @@ -262,6 +271,9 @@ func (e *DurationEncoder) UnmarshalText(text []byte) error { } // A CallerEncoder serializes an EntryCaller to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) // FullCallerEncoder serializes a caller in /full/path/to/package/file:line @@ -292,6 +304,9 @@ func (e *CallerEncoder) UnmarshalText(text []byte) error { // A NameEncoder serializes a period-separated logger name to a primitive // type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type NameEncoder func(string, PrimitiveArrayEncoder) // FullNameEncoder serializes the logger name as-is. diff --git a/hack/tools/vendor/go.uber.org/zap/zapcore/field.go b/hack/tools/vendor/go.uber.org/zap/zapcore/field.go index 95bdb0a126..308c9781ed 100644 --- a/hack/tools/vendor/go.uber.org/zap/zapcore/field.go +++ b/hack/tools/vendor/go.uber.org/zap/zapcore/field.go @@ -47,7 +47,7 @@ const ( ByteStringType // Complex128Type indicates that the field carries a complex128. Complex128Type - // Complex64Type indicates that the field carries a complex128. + // Complex64Type indicates that the field carries a complex64. Complex64Type // DurationType indicates that the field carries a time.Duration. DurationType diff --git a/hack/tools/vendor/go.uber.org/zap/zapcore/json_encoder.go b/hack/tools/vendor/go.uber.org/zap/zapcore/json_encoder.go index c8ab86979b..9685169b2e 100644 --- a/hack/tools/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/hack/tools/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -372,7 +372,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, final.AppendString(ent.Level.String()) } } - if final.TimeKey != "" { + if final.TimeKey != "" && !ent.Time.IsZero() { final.AddTime(final.TimeKey, ent.Time) } if ent.LoggerName != "" && final.NameKey != "" { diff --git a/hack/tools/vendor/golang.org/x/exp/slices/cmp.go b/hack/tools/vendor/golang.org/x/exp/slices/cmp.go new file mode 100644 index 0000000000..fbf1934a06 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/exp/slices/cmp.go @@ -0,0 +1,44 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// min is a version of the predeclared function from the Go 1.21 release. +func min[T constraints.Ordered](a, b T) T { + if a < b || isNaN(a) { + return a + } + return b +} + +// max is a version of the predeclared function from the Go 1.21 release. +func max[T constraints.Ordered](a, b T) T { + if a > b || isNaN(a) { + return a + } + return b +} + +// cmpLess is a copy of cmp.Less from the Go 1.21 release. +func cmpLess[T constraints.Ordered](x, y T) bool { + return (isNaN(x) && !isNaN(y)) || x < y +} + +// cmpCompare is a copy of cmp.Compare from the Go 1.21 release. +func cmpCompare[T constraints.Ordered](x, y T) int { + xNaN := isNaN(x) + yNaN := isNaN(y) + if xNaN && yNaN { + return 0 + } + if xNaN || x < y { + return -1 + } + if yNaN || x > y { + return +1 + } + return 0 +} diff --git a/hack/tools/vendor/golang.org/x/exp/slices/slices.go b/hack/tools/vendor/golang.org/x/exp/slices/slices.go new file mode 100644 index 0000000000..46ceac3439 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/exp/slices/slices.go @@ -0,0 +1,515 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slices defines various functions useful with slices of any type. +package slices + +import ( + "unsafe" + + "golang.org/x/exp/constraints" +) + +// Equal reports whether two slices are equal: the same length and all +// elements equal. If the lengths are different, Equal returns false. +// Otherwise, the elements are compared in increasing index order, and the +// comparison stops at the first unequal pair. +// Floating point NaNs are not considered equal. +func Equal[S ~[]E, E comparable](s1, s2 S) bool { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + if s1[i] != s2[i] { + return false + } + } + return true +} + +// EqualFunc reports whether two slices are equal using an equality +// function on each pair of elements. If the lengths are different, +// EqualFunc returns false. Otherwise, the elements are compared in +// increasing index order, and the comparison stops at the first index +// for which eq returns false. +func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool { + if len(s1) != len(s2) { + return false + } + for i, v1 := range s1 { + v2 := s2[i] + if !eq(v1, v2) { + return false + } + } + return true +} + +// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair +// of elements. The elements are compared sequentially, starting at index 0, +// until one element is not equal to the other. +// The result of comparing the first non-matching elements is returned. +// If both slices are equal until one of them ends, the shorter slice is +// considered less than the longer one. +// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. +func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int { + for i, v1 := range s1 { + if i >= len(s2) { + return +1 + } + v2 := s2[i] + if c := cmpCompare(v1, v2); c != 0 { + return c + } + } + if len(s1) < len(s2) { + return -1 + } + return 0 +} + +// CompareFunc is like [Compare] but uses a custom comparison function on each +// pair of elements. +// The result is the first non-zero result of cmp; if cmp always +// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), +// and +1 if len(s1) > len(s2). +func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int { + for i, v1 := range s1 { + if i >= len(s2) { + return +1 + } + v2 := s2[i] + if c := cmp(v1, v2); c != 0 { + return c + } + } + if len(s1) < len(s2) { + return -1 + } + return 0 +} + +// Index returns the index of the first occurrence of v in s, +// or -1 if not present. +func Index[S ~[]E, E comparable](s S, v E) int { + for i := range s { + if v == s[i] { + return i + } + } + return -1 +} + +// IndexFunc returns the first index i satisfying f(s[i]), +// or -1 if none do. +func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { + for i := range s { + if f(s[i]) { + return i + } + } + return -1 +} + +// Contains reports whether v is present in s. +func Contains[S ~[]E, E comparable](s S, v E) bool { + return Index(s, v) >= 0 +} + +// ContainsFunc reports whether at least one +// element e of s satisfies f(e). +func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { + return IndexFunc(s, f) >= 0 +} + +// Insert inserts the values v... into s at index i, +// returning the modified slice. +// The elements at s[i:] are shifted up to make room. +// In the returned slice r, r[i] == v[0], +// and r[i+len(v)] == value originally at r[i]. +// Insert panics if i is out of range. +// This function is O(len(s) + len(v)). +func Insert[S ~[]E, E any](s S, i int, v ...E) S { + m := len(v) + if m == 0 { + return s + } + n := len(s) + if i == n { + return append(s, v...) + } + if n+m > cap(s) { + // Use append rather than make so that we bump the size of + // the slice up to the next storage class. + // This is what Grow does but we don't call Grow because + // that might copy the values twice. + s2 := append(s[:i], make(S, n+m-i)...) + copy(s2[i:], v) + copy(s2[i+m:], s[i:]) + return s2 + } + s = s[:n+m] + + // before: + // s: aaaaaaaabbbbccccccccdddd + // ^ ^ ^ ^ + // i i+m n n+m + // after: + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // + // a are the values that don't move in s. + // v are the values copied in from v. + // b and c are the values from s that are shifted up in index. + // d are the values that get overwritten, never to be seen again. + + if !overlaps(v, s[i+m:]) { + // Easy case - v does not overlap either the c or d regions. + // (It might be in some of a or b, or elsewhere entirely.) + // The data we copy up doesn't write to v at all, so just do it. + + copy(s[i+m:], s[i:]) + + // Now we have + // s: aaaaaaaabbbbbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // Note the b values are duplicated. + + copy(s[i:], v) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s + } + + // The hard case - v overlaps c or d. We can't just shift up + // the data because we'd move or clobber the values we're trying + // to insert. + // So instead, write v on top of d, then rotate. + copy(s[n:], v) + + // Now we have + // s: aaaaaaaabbbbccccccccvvvv + // ^ ^ ^ ^ + // i i+m n n+m + + rotateRight(s[i:], m) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s +} + +// clearSlice sets all elements up to the length of s to the zero value of E. +// We may use the builtin clear func instead, and remove clearSlice, when upgrading +// to Go 1.21+. +func clearSlice[S ~[]E, E any](s S) { + var zero E + for i := range s { + s[i] = zero + } +} + +// Delete removes the elements s[i:j] from s, returning the modified slice. +// Delete panics if j > len(s) or s[i:j] is not a valid slice of s. +// Delete is O(len(s)-i), so if many items must be deleted, it is better to +// make a single call deleting them all together than to delete one at a time. +// Delete zeroes the elements s[len(s)-(j-i):len(s)]. +func Delete[S ~[]E, E any](s S, i, j int) S { + _ = s[i:j:len(s)] // bounds check + + if i == j { + return s + } + + oldlen := len(s) + s = append(s[:i], s[j:]...) + clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC + return s +} + +// DeleteFunc removes any elements from s for which del returns true, +// returning the modified slice. +// DeleteFunc zeroes the elements between the new length and the original length. +func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { + i := IndexFunc(s, del) + if i == -1 { + return s + } + // Don't start copying elements until we find one to delete. + for j := i + 1; j < len(s); j++ { + if v := s[j]; !del(v) { + s[i] = v + i++ + } + } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC + return s[:i] +} + +// Replace replaces the elements s[i:j] by the given v, and returns the +// modified slice. Replace panics if s[i:j] is not a valid slice of s. +// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length. +func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { + _ = s[i:j] // verify that i:j is a valid subslice + + if i == j { + return Insert(s, i, v...) + } + if j == len(s) { + return append(s[:i], v...) + } + + tot := len(s[:i]) + len(v) + len(s[j:]) + if tot > cap(s) { + // Too big to fit, allocate and copy over. + s2 := append(s[:i], make(S, tot-i)...) // See Insert + copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) + return s2 + } + + r := s[:tot] + + if i+len(v) <= j { + // Easy, as v fits in the deleted portion. + copy(r[i:], v) + if i+len(v) != j { + copy(r[i+len(v):], s[j:]) + } + clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC + return r + } + + // We are expanding (v is bigger than j-i). + // The situation is something like this: + // (example has i=4,j=8,len(s)=16,len(v)=6) + // s: aaaaxxxxbbbbbbbbyy + // ^ ^ ^ ^ + // i j len(s) tot + // a: prefix of s + // x: deleted range + // b: more of s + // y: area to expand into + + if !overlaps(r[i+len(v):], v) { + // Easy, as v is not clobbered by the first copy. + copy(r[i+len(v):], s[j:]) + copy(r[i:], v) + return r + } + + // This is a situation where we don't have a single place to which + // we can copy v. Parts of it need to go to two different places. + // We want to copy the prefix of v into y and the suffix into x, then + // rotate |y| spots to the right. + // + // v[2:] v[:2] + // | | + // s: aaaavvvvbbbbbbbbvv + // ^ ^ ^ ^ + // i j len(s) tot + // + // If either of those two destinations don't alias v, then we're good. + y := len(v) - (j - i) // length of y portion + + if !overlaps(r[i:j], v) { + copy(r[i:j], v[y:]) + copy(r[len(s):], v[:y]) + rotateRight(r[i:], y) + return r + } + if !overlaps(r[len(s):], v) { + copy(r[len(s):], v[:y]) + copy(r[i:j], v[y:]) + rotateRight(r[i:], y) + return r + } + + // Now we know that v overlaps both x and y. + // That means that the entirety of b is *inside* v. + // So we don't need to preserve b at all; instead we + // can copy v first, then copy the b part of v out of + // v to the right destination. + k := startIdx(v, s[j:]) + copy(r[i:], v) + copy(r[i+len(v):], r[i+k:]) + return r +} + +// Clone returns a copy of the slice. +// The elements are copied using assignment, so this is a shallow clone. +func Clone[S ~[]E, E any](s S) S { + // Preserve nil in case it matters. + if s == nil { + return nil + } + return append(S([]E{}), s...) +} + +// Compact replaces consecutive runs of equal elements with a single copy. +// This is like the uniq command found on Unix. +// Compact modifies the contents of the slice s and returns the modified slice, +// which may have a smaller length. +// Compact zeroes the elements between the new length and the original length. +func Compact[S ~[]E, E comparable](s S) S { + if len(s) < 2 { + return s + } + i := 1 + for k := 1; k < len(s); k++ { + if s[k] != s[k-1] { + if i != k { + s[i] = s[k] + } + i++ + } + } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC + return s[:i] +} + +// CompactFunc is like [Compact] but uses an equality function to compare elements. +// For runs of elements that compare equal, CompactFunc keeps the first one. +// CompactFunc zeroes the elements between the new length and the original length. +func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { + if len(s) < 2 { + return s + } + i := 1 + for k := 1; k < len(s); k++ { + if !eq(s[k], s[k-1]) { + if i != k { + s[i] = s[k] + } + i++ + } + } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC + return s[:i] +} + +// Grow increases the slice's capacity, if necessary, to guarantee space for +// another n elements. After Grow(n), at least n elements can be appended +// to the slice without another allocation. If n is negative or too large to +// allocate the memory, Grow panics. +func Grow[S ~[]E, E any](s S, n int) S { + if n < 0 { + panic("cannot be negative") + } + if n -= cap(s) - len(s); n > 0 { + // TODO(https://go.dev/issue/53888): Make using []E instead of S + // to workaround a compiler bug where the runtime.growslice optimization + // does not take effect. Revert when the compiler is fixed. + s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] + } + return s +} + +// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. +func Clip[S ~[]E, E any](s S) S { + return s[:len(s):len(s)] +} + +// Rotation algorithm explanation: +// +// rotate left by 2 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join first parts +// 89234567 01 +// recursively rotate first left part by 2 +// 23456789 01 +// join at the end +// 2345678901 +// +// rotate left by 8 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join last parts +// 89 23456701 +// recursively rotate second part left by 6 +// 89 01234567 +// join at the end +// 8901234567 + +// TODO: There are other rotate algorithms. +// This algorithm has the desirable property that it moves each element exactly twice. +// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes. +// The follow-cycles algorithm can be 1-write but it is not very cache friendly. + +// rotateLeft rotates b left by n spaces. +// s_final[i] = s_orig[i+r], wrapping around. +func rotateLeft[E any](s []E, r int) { + for r != 0 && r != len(s) { + if r*2 <= len(s) { + swap(s[:r], s[len(s)-r:]) + s = s[:len(s)-r] + } else { + swap(s[:len(s)-r], s[r:]) + s, r = s[len(s)-r:], r*2-len(s) + } + } +} +func rotateRight[E any](s []E, r int) { + rotateLeft(s, len(s)-r) +} + +// swap swaps the contents of x and y. x and y must be equal length and disjoint. +func swap[E any](x, y []E) { + for i := 0; i < len(x); i++ { + x[i], y[i] = y[i], x[i] + } +} + +// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap. +func overlaps[E any](a, b []E) bool { + if len(a) == 0 || len(b) == 0 { + return false + } + elemSize := unsafe.Sizeof(a[0]) + if elemSize == 0 { + return false + } + // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445. + // Also see crypto/internal/alias/alias.go:AnyOverlap + return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) && + uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1) +} + +// startIdx returns the index in haystack where the needle starts. +// prerequisite: the needle must be aliased entirely inside the haystack. +func startIdx[E any](haystack, needle []E) int { + p := &needle[0] + for i := range haystack { + if p == &haystack[i] { + return i + } + } + // TODO: what if the overlap is by a non-integral number of Es? + panic("needle not found") +} + +// Reverse reverses the elements of the slice in place. +func Reverse[S ~[]E, E any](s S) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} diff --git a/hack/tools/vendor/golang.org/x/exp/slices/sort.go b/hack/tools/vendor/golang.org/x/exp/slices/sort.go new file mode 100644 index 0000000000..f58bbc7ba4 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/exp/slices/sort.go @@ -0,0 +1,197 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp + +package slices + +import ( + "math/bits" + + "golang.org/x/exp/constraints" +) + +// Sort sorts a slice of any ordered type in ascending order. +// When sorting floating-point numbers, NaNs are ordered before other values. +func Sort[S ~[]E, E constraints.Ordered](x S) { + n := len(x) + pdqsortOrdered(x, 0, n, bits.Len(uint(n))) +} + +// SortFunc sorts the slice x in ascending order as determined by the cmp +// function. This sort is not guaranteed to be stable. +// cmp(a, b) should return a negative number when a < b, a positive number when +// a > b and zero when a == b or when a is not comparable to b in the sense +// of the formal definition of Strict Weak Ordering. +// +// SortFunc requires that cmp is a strict weak ordering. +// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +// To indicate 'uncomparable', return 0 from the function. +func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { + n := len(x) + pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) +} + +// SortStableFunc sorts the slice x while keeping the original order of equal +// elements, using cmp to compare elements in the same way as [SortFunc]. +func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { + stableCmpFunc(x, len(x), cmp) +} + +// IsSorted reports whether x is sorted in ascending order. +func IsSorted[S ~[]E, E constraints.Ordered](x S) bool { + for i := len(x) - 1; i > 0; i-- { + if cmpLess(x[i], x[i-1]) { + return false + } + } + return true +} + +// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the +// comparison function as defined by [SortFunc]. +func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { + for i := len(x) - 1; i > 0; i-- { + if cmp(x[i], x[i-1]) < 0 { + return false + } + } + return true +} + +// Min returns the minimal value in x. It panics if x is empty. +// For floating-point numbers, Min propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Min[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Min: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = min(m, x[i]) + } + return m +} + +// MinFunc returns the minimal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one minimal element +// according to the cmp function, MinFunc returns the first one. +func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MinFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) < 0 { + m = x[i] + } + } + return m +} + +// Max returns the maximal value in x. It panics if x is empty. +// For floating-point E, Max propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Max[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Max: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = max(m, x[i]) + } + return m +} + +// MaxFunc returns the maximal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one maximal element +// according to the cmp function, MaxFunc returns the first one. +func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MaxFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) > 0 { + m = x[i] + } + } + return m +} + +// BinarySearch searches for target in a sorted slice and returns the position +// where target is found, or the position where target would appear in the +// sort order; it also returns a bool saying whether the target is really found +// in the slice. The slice must be sorted in increasing order. +func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) { + // Inlining is faster than calling BinarySearchFunc with a lambda. + n := len(x) + // Define x[-1] < target and x[n] >= target. + // Invariant: x[i-1] < target, x[j] >= target. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if cmpLess(x[h], target) { + i = h + 1 // preserves x[i-1] < target + } else { + j = h // preserves x[j] >= target + } + } + // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. + return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target))) +} + +// BinarySearchFunc works like [BinarySearch], but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" +// is defined by cmp. cmp should return 0 if the slice element matches +// the target, a negative number if the slice element precedes the target, +// or a positive number if the slice element follows the target. +// cmp must implement the same ordering as the slice, such that if +// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. +func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) { + n := len(x) + // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . + // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if cmp(x[h], target) < 0 { + i = h + 1 // preserves cmp(x[i - 1], target) < 0 + } else { + j = h // preserves cmp(x[j], target) >= 0 + } + } + // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. + return i, i < n && cmp(x[i], target) == 0 +} + +type sortedHint int // hint for pdqsort when choosing the pivot + +const ( + unknownHint sortedHint = iota + increasingHint + decreasingHint +) + +// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf +type xorshift uint64 + +func (r *xorshift) Next() uint64 { + *r ^= *r << 13 + *r ^= *r >> 17 + *r ^= *r << 5 + return uint64(*r) +} + +func nextPowerOfTwo(length int) uint { + return 1 << bits.Len(uint(length)) +} + +// isNaN reports whether x is a NaN without requiring the math package. +// This will always return false if T is not floating-point. +func isNaN[T constraints.Ordered](x T) bool { + return x != x +} diff --git a/hack/tools/vendor/golang.org/x/exp/slices/zsortanyfunc.go b/hack/tools/vendor/golang.org/x/exp/slices/zsortanyfunc.go new file mode 100644 index 0000000000..06f2c7a248 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/exp/slices/zsortanyfunc.go @@ -0,0 +1,479 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +// insertionSortCmpFunc sorts data[a:b] using insertion sort. +func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownCmpFunc implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) { + child++ + } + if !(cmp(data[first+root], data[first+child]) < 0) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownCmpFunc(data, i, hi, first, cmp) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownCmpFunc(data, lo, i, first, cmp) + } +} + +// pdqsortCmpFunc sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortCmpFunc(data, a, b, cmp) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortCmpFunc(data, a, b, cmp) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsCmpFunc(data, a, b, cmp) + limit-- + } + + pivot, hint := choosePivotCmpFunc(data, a, b, cmp) + if hint == decreasingHint { + reverseRangeCmpFunc(data, a, b, cmp) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortCmpFunc(data, a, b, cmp) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) { + mid := partitionEqualCmpFunc(data, a, b, pivot, cmp) + a = mid + continue + } + + mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortCmpFunc(data, a, mid, limit, cmp) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortCmpFunc(data, mid+1, b, limit, cmp) + b = mid + } + } +} + +// partitionCmpFunc does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && (cmp(data[i], data[a]) < 0) { + i++ + } + for i <= j && !(cmp(data[j], data[a]) < 0) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && (cmp(data[i], data[a]) < 0) { + i++ + } + for i <= j && !(cmp(data[j], data[a]) < 0) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !(cmp(data[a], data[i]) < 0) { + i++ + } + for i <= j && (cmp(data[a], data[j]) < 0) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !(cmp(data[i], data[i-1]) < 0) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !(cmp(data[j], data[j-1]) < 0) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !(cmp(data[j], data[j-1]) < 0) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotCmpFunc chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentCmpFunc(data, i, &swaps, cmp) + j = medianAdjacentCmpFunc(data, j, &swaps, cmp) + k = medianAdjacentCmpFunc(data, k, &swaps, cmp) + } + // Find the median among i, j, k and stores it into j. + j = medianCmpFunc(data, i, j, k, &swaps, cmp) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) { + if cmp(data[b], data[a]) < 0 { + *swaps++ + return b, a + } + return a, b +} + +// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int { + a, b = order2CmpFunc(data, a, b, swaps, cmp) + b, c = order2CmpFunc(data, b, c, swaps, cmp) + a, b = order2CmpFunc(data, a, b, swaps, cmp) + return b +} + +// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int { + return medianCmpFunc(data, a-1, a, a+1, swaps, cmp) +} + +func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortCmpFunc(data, a, b, cmp) + a = b + b += blockSize + } + insertionSortCmpFunc(data, a, n, cmp) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeCmpFunc(data, a, a+blockSize, b, cmp) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeCmpFunc(data, a, m, n, cmp) + } + blockSize *= 2 + } +} + +// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if cmp(data[h], data[a]) < 0 { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !(cmp(data[m], data[h]) < 0) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !(cmp(data[p-c], data[c]) < 0) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateCmpFunc(data, start, m, end, cmp) + } + if a < start && start < mid { + symMergeCmpFunc(data, a, start, mid, cmp) + } + if mid < end && end < b { + symMergeCmpFunc(data, mid, end, b, cmp) + } +} + +// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeCmpFunc(data, m-i, m, j, cmp) + i -= j + } else { + swapRangeCmpFunc(data, m-i, m+j-i, i, cmp) + j -= i + } + } + // i == j + swapRangeCmpFunc(data, m-i, m, i, cmp) +} diff --git a/hack/tools/vendor/golang.org/x/exp/slices/zsortordered.go b/hack/tools/vendor/golang.org/x/exp/slices/zsortordered.go new file mode 100644 index 0000000000..99b47c3986 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/exp/slices/zsortordered.go @@ -0,0 +1,481 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// insertionSortOrdered sorts data[a:b] using insertion sort. +func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && cmpLess(data[j], data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownOrdered implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) { + child++ + } + if !cmpLess(data[first+root], data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownOrdered(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownOrdered(data, lo, i, first) + } +} + +// pdqsortOrdered sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortOrdered(data, a, b) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortOrdered(data, a, b) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsOrdered(data, a, b) + limit-- + } + + pivot, hint := choosePivotOrdered(data, a, b) + if hint == decreasingHint { + reverseRangeOrdered(data, a, b) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortOrdered(data, a, b) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !cmpLess(data[a-1], data[pivot]) { + mid := partitionEqualOrdered(data, a, b, pivot) + a = mid + continue + } + + mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortOrdered(data, a, mid, limit) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortOrdered(data, mid+1, b, limit) + b = mid + } + } +} + +// partitionOrdered does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && cmpLess(data[i], data[a]) { + i++ + } + for i <= j && !cmpLess(data[j], data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && cmpLess(data[i], data[a]) { + i++ + } + for i <= j && !cmpLess(data[j], data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !cmpLess(data[a], data[i]) { + i++ + } + for i <= j && cmpLess(data[a], data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !cmpLess(data[i], data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !cmpLess(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !cmpLess(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsOrdered scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotOrdered chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentOrdered(data, i, &swaps) + j = medianAdjacentOrdered(data, j, &swaps) + k = medianAdjacentOrdered(data, k, &swaps) + } + // Find the median among i, j, k and stores it into j. + j = medianOrdered(data, i, j, k, &swaps) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { + if cmpLess(data[b], data[a]) { + *swaps++ + return b, a + } + return a, b +} + +// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { + a, b = order2Ordered(data, a, b, swaps) + b, c = order2Ordered(data, b, c, swaps) + a, b = order2Ordered(data, a, b, swaps) + return b +} + +// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { + return medianOrdered(data, a-1, a, a+1, swaps) +} + +func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableOrdered[E constraints.Ordered](data []E, n int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortOrdered(data, a, b) + a = b + b += blockSize + } + insertionSortOrdered(data, a, n) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeOrdered(data, a, a+blockSize, b) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeOrdered(data, a, m, n) + } + blockSize *= 2 + } +} + +// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if cmpLess(data[h], data[a]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !cmpLess(data[m], data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !cmpLess(data[p-c], data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateOrdered(data, start, m, end) + } + if a < start && start < mid { + symMergeOrdered(data, a, start, mid) + } + if mid < end && end < b { + symMergeOrdered(data, mid, end, b) + } +} + +// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeOrdered(data, m-i, m, j) + i -= j + } else { + swapRangeOrdered(data, m-i, m+j-i, i) + j -= i + } + } + // i == j + swapRangeOrdered(data, m-i, m, i) +} diff --git a/hack/tools/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/hack/tools/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 0000000000..dc5225b6d4 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := int(dstStart.Sub(srcStart) / srcInterval) + srcIndex += advance + srcStart = srcStart.Add(time.Duration(advance) * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/hack/tools/vendor/golang.org/x/net/trace/events.go b/hack/tools/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 0000000000..c646a6952e --- /dev/null +++ b/hack/tools/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/hack/tools/vendor/golang.org/x/net/trace/histogram.go b/hack/tools/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 0000000000..d6c71101e4 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// addMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) + }) + return distTmplCache +} diff --git a/hack/tools/vendor/golang.org/x/net/trace/trace.go b/hack/tools/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 0000000000..eae2a99f54 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1130 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "context" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "net/url" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// HTTP ServeMux paths. +const ( + debugRequestsPath = "/debug/requests" + debugEventsPath = "/debug/events" +) + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}}) + if pat == debugRequestsPath { + panic("/debug/requests is already registered. You may have two independent copies of " + + "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + + "involve a vendored copy of golang.org/x/net/trace.") + } + + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc(debugRequestsPath, Traces) + http.HandleFunc(debugEventsPath, Events) +} + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Since(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/hack/tools/vendor/golang.org/x/oauth2/.travis.yml b/hack/tools/vendor/golang.org/x/oauth2/.travis.yml new file mode 100644 index 0000000000..fa139db225 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/oauth2/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - tip + +install: + - export GOPATH="$HOME/gopath" + - mkdir -p "$GOPATH/src/golang.org/x" + - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" + - go get -v -t -d golang.org/x/oauth2/... + +script: + - go test -v golang.org/x/oauth2/... diff --git a/hack/tools/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/hack/tools/vendor/golang.org/x/oauth2/CONTRIBUTING.md new file mode 100644 index 0000000000..dfbed62cf5 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/oauth2/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + +## Filing issues + +When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. diff --git a/hack/tools/vendor/golang.org/x/oauth2/LICENSE b/hack/tools/vendor/golang.org/x/oauth2/LICENSE new file mode 100644 index 0000000000..2a7cf70da6 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/hack/tools/vendor/golang.org/x/oauth2/README.md b/hack/tools/vendor/golang.org/x/oauth2/README.md new file mode 100644 index 0000000000..781770c204 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/oauth2/README.md @@ -0,0 +1,40 @@ +# OAuth2 for Go + +[![Go Reference](https://pkg.go.dev/badge/golang.org/x/oauth2.svg)](https://pkg.go.dev/golang.org/x/oauth2) +[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) + +oauth2 package contains a client implementation for OAuth 2.0 spec. + +## Installation + +~~~~ +go get golang.org/x/oauth2 +~~~~ + +Or you can manually git clone the repository to +`$(go env GOPATH)/src/golang.org/x/oauth2`. + +See pkg.go.dev for further documentation and examples. + +* [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) +* [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google) + +## Policy for new endpoints + +We no longer accept new provider-specific packages in this repo if all +they do is add a single endpoint variable. If you just want to add a +single endpoint, add it to the +[pkg.go.dev/golang.org/x/oauth2/endpoints](https://pkg.go.dev/golang.org/x/oauth2/endpoints) +package. + +## Report Issues / Send Patches + +The main issue tracker for the oauth2 repository is located at +https://github.com/golang/oauth2/issues. + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. In particular: + +* Excluding trivial changes, all contributions should be connected to an existing issue. +* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. +* The code owners are listed at [dev.golang.org/owners](https://dev.golang.org/owners#:~:text=x/oauth2). diff --git a/hack/tools/vendor/golang.org/x/oauth2/deviceauth.go b/hack/tools/vendor/golang.org/x/oauth2/deviceauth.go new file mode 100644 index 0000000000..e99c92f39c --- /dev/null +++ b/hack/tools/vendor/golang.org/x/oauth2/deviceauth.go @@ -0,0 +1,198 @@ +package oauth2 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2/internal" +) + +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 +const ( + errAuthorizationPending = "authorization_pending" + errSlowDown = "slow_down" + errAccessDenied = "access_denied" + errExpiredToken = "expired_token" +) + +// DeviceAuthResponse describes a successful RFC 8628 Device Authorization Response +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 +type DeviceAuthResponse struct { + // DeviceCode + DeviceCode string `json:"device_code"` + // UserCode is the code the user should enter at the verification uri + UserCode string `json:"user_code"` + // VerificationURI is where user should enter the user code + VerificationURI string `json:"verification_uri"` + // VerificationURIComplete (if populated) includes the user code in the verification URI. This is typically shown to the user in non-textual form, such as a QR code. + VerificationURIComplete string `json:"verification_uri_complete,omitempty"` + // Expiry is when the device code and user code expire + Expiry time.Time `json:"expires_in,omitempty"` + // Interval is the duration in seconds that Poll should wait between requests + Interval int64 `json:"interval,omitempty"` +} + +func (d DeviceAuthResponse) MarshalJSON() ([]byte, error) { + type Alias DeviceAuthResponse + var expiresIn int64 + if !d.Expiry.IsZero() { + expiresIn = int64(time.Until(d.Expiry).Seconds()) + } + return json.Marshal(&struct { + ExpiresIn int64 `json:"expires_in,omitempty"` + *Alias + }{ + ExpiresIn: expiresIn, + Alias: (*Alias)(&d), + }) + +} + +func (c *DeviceAuthResponse) UnmarshalJSON(data []byte) error { + type Alias DeviceAuthResponse + aux := &struct { + ExpiresIn int64 `json:"expires_in"` + // workaround misspelling of verification_uri + VerificationURL string `json:"verification_url"` + *Alias + }{ + Alias: (*Alias)(c), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if aux.ExpiresIn != 0 { + c.Expiry = time.Now().UTC().Add(time.Second * time.Duration(aux.ExpiresIn)) + } + if c.VerificationURI == "" { + c.VerificationURI = aux.VerificationURL + } + return nil +} + +// DeviceAuth returns a device auth struct which contains a device code +// and authorization information provided for users to enter on another device. +func (c *Config) DeviceAuth(ctx context.Context, opts ...AuthCodeOption) (*DeviceAuthResponse, error) { + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.1 + v := url.Values{ + "client_id": {c.ClientID}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + return retrieveDeviceAuth(ctx, c, v) +} + +func retrieveDeviceAuth(ctx context.Context, c *Config, v url.Values) (*DeviceAuthResponse, error) { + if c.Endpoint.DeviceAuthURL == "" { + return nil, errors.New("endpoint missing DeviceAuthURL") + } + + req, err := http.NewRequest("POST", c.Endpoint.DeviceAuthURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("Accept", "application/json") + + t := time.Now() + r, err := internal.ContextClient(ctx).Do(req) + if err != nil { + return nil, err + } + + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot auth device: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, &RetrieveError{ + Response: r, + Body: body, + } + } + + da := &DeviceAuthResponse{} + err = json.Unmarshal(body, &da) + if err != nil { + return nil, fmt.Errorf("unmarshal %s", err) + } + + if !da.Expiry.IsZero() { + // Make a small adjustment to account for time taken by the request + da.Expiry = da.Expiry.Add(-time.Since(t)) + } + + return da, nil +} + +// DeviceAccessToken polls the server to exchange a device code for a token. +func (c *Config) DeviceAccessToken(ctx context.Context, da *DeviceAuthResponse, opts ...AuthCodeOption) (*Token, error) { + if !da.Expiry.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, da.Expiry) + defer cancel() + } + + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.4 + v := url.Values{ + "client_id": {c.ClientID}, + "grant_type": {"urn:ietf:params:oauth:grant-type:device_code"}, + "device_code": {da.DeviceCode}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + + // "If no value is provided, clients MUST use 5 as the default." + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 + interval := da.Interval + if interval == 0 { + interval = 5 + } + + ticker := time.NewTicker(time.Duration(interval) * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-ticker.C: + tok, err := retrieveToken(ctx, c, v) + if err == nil { + return tok, nil + } + + e, ok := err.(*RetrieveError) + if !ok { + return nil, err + } + switch e.ErrorCode { + case errSlowDown: + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 + // "the interval MUST be increased by 5 seconds for this and all subsequent requests" + interval += 5 + ticker.Reset(time.Duration(interval) * time.Second) + case errAuthorizationPending: + // Do nothing. + case errAccessDenied, errExpiredToken: + fallthrough + default: + return tok, err + } + } + } +} diff --git a/hack/tools/vendor/golang.org/x/oauth2/internal/doc.go b/hack/tools/vendor/golang.org/x/oauth2/internal/doc.go new file mode 100644 index 0000000000..03265e888a --- /dev/null +++ b/hack/tools/vendor/golang.org/x/oauth2/internal/doc.go @@ -0,0 +1,6 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal diff --git a/hack/tools/vendor/golang.org/x/oauth2/internal/oauth2.go b/hack/tools/vendor/golang.org/x/oauth2/internal/oauth2.go new file mode 100644 index 0000000000..14989beaf4 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" +) + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8; parse error: %v", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} diff --git a/hack/tools/vendor/golang.org/x/oauth2/internal/token.go b/hack/tools/vendor/golang.org/x/oauth2/internal/token.go new file mode 100644 index 0000000000..e83ddeef0f --- /dev/null +++ b/hack/tools/vendor/golang.org/x/oauth2/internal/token.go @@ -0,0 +1,352 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Token represents the credentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// This type is a mirror of oauth2.Token and exists to break +// an otherwise-circular dependency. Other internal packages +// should convert this Token into an oauth2.Token before use. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time + + // Raw optionally contains extra metadata from the server + // when updating a token. + Raw interface{} +} + +// tokenJSON is the struct representing the HTTP response from OAuth2 +// providers returning a token or error in JSON form. +// https://datatracker.ietf.org/doc/html/rfc6749#section-5.1 +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + // error fields + // https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +type expirationTime int32 + +func (e *expirationTime) UnmarshalJSON(b []byte) error { + if len(b) == 0 || string(b) == "null" { + return nil + } + var n json.Number + err := json.Unmarshal(b, &n) + if err != nil { + return err + } + i, err := n.Int64() + if err != nil { + return err + } + if i > math.MaxInt32 { + i = math.MaxInt32 + } + *e = expirationTime(i) + return nil +} + +// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. +// +// Deprecated: this function no longer does anything. Caller code that +// wants to avoid potential extra HTTP requests made during +// auto-probing of the provider's auth style should set +// Endpoint.AuthStyle. +func RegisterBrokenAuthHeaderProvider(tokenURL string) {} + +// AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type. +type AuthStyle int + +const ( + AuthStyleUnknown AuthStyle = 0 + AuthStyleInParams AuthStyle = 1 + AuthStyleInHeader AuthStyle = 2 +) + +// LazyAuthStyleCache is a backwards compatibility compromise to let Configs +// have a lazily-initialized AuthStyleCache. +// +// The two users of this, oauth2.Config and oauth2/clientcredentials.Config, +// both would ideally just embed an unexported AuthStyleCache but because both +// were historically allowed to be copied by value we can't retroactively add an +// uncopyable Mutex to them. +// +// We could use an atomic.Pointer, but that was added recently enough (in Go +// 1.18) that we'd break Go 1.17 users where the tests as of 2023-08-03 +// still pass. By using an atomic.Value, it supports both Go 1.17 and +// copying by value, even if that's not ideal. +type LazyAuthStyleCache struct { + v atomic.Value // of *AuthStyleCache +} + +func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { + if c, ok := lc.v.Load().(*AuthStyleCache); ok { + return c + } + c := new(AuthStyleCache) + if !lc.v.CompareAndSwap(nil, c) { + c = lc.v.Load().(*AuthStyleCache) + } + return c +} + +// AuthStyleCache is the set of tokenURLs we've successfully used via +// RetrieveToken and which style auth we ended up using. +// It's called a cache, but it doesn't (yet?) shrink. It's expected that +// the set of OAuth2 servers a program contacts over time is fixed and +// small. +type AuthStyleCache struct { + mu sync.Mutex + m map[string]AuthStyle // keyed by tokenURL +} + +// lookupAuthStyle reports which auth style we last used with tokenURL +// when calling RetrieveToken and whether we have ever done so. +func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + style, ok = c.m[tokenURL] + return +} + +// setAuthStyle adds an entry to authStyleCache, documented above. +func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) { + c.mu.Lock() + defer c.mu.Unlock() + if c.m == nil { + c.m = make(map[string]AuthStyle) + } + c.m[tokenURL] = v +} + +// newTokenRequest returns a new *http.Request to retrieve a new token +// from tokenURL using the provided clientID, clientSecret, and POST +// body parameters. +// +// inParams is whether the clientID & clientSecret should be encoded +// as the POST body. An 'inParams' value of true means to send it in +// the POST body (along with any values in v); false means to send it +// in the Authorization header. +func newTokenRequest(tokenURL, clientID, clientSecret string, v url.Values, authStyle AuthStyle) (*http.Request, error) { + if authStyle == AuthStyleInParams { + v = cloneURLValues(v) + if clientID != "" { + v.Set("client_id", clientID) + } + if clientSecret != "" { + v.Set("client_secret", clientSecret) + } + } + req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if authStyle == AuthStyleInHeader { + req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret)) + } + return req, nil +} + +func cloneURLValues(v url.Values) url.Values { + v2 := make(url.Values, len(v)) + for k, vv := range v { + v2[k] = append([]string(nil), vv...) + } + return v2 +} + +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) { + needsAuthStyleProbe := authStyle == 0 + if needsAuthStyleProbe { + if style, ok := styleCache.lookupAuthStyle(tokenURL); ok { + authStyle = style + needsAuthStyleProbe = false + } else { + authStyle = AuthStyleInHeader // the first way we'll try + } + } + req, err := newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle) + if err != nil { + return nil, err + } + token, err := doTokenRoundTrip(ctx, req) + if err != nil && needsAuthStyleProbe { + // If we get an error, assume the server wants the + // clientID & clientSecret in a different form. + // See https://code.google.com/p/goauth2/issues/detail?id=31 for background. + // In summary: + // - Reddit only accepts client secret in the Authorization header + // - Dropbox accepts either it in URL param or Auth header, but not both. + // - Google only accepts URL param (not spec compliant?), not Auth header + // - Stripe only accepts client secret in Auth header with Bearer method, not Basic + // + // We used to maintain a big table in this code of all the sites and which way + // they went, but maintaining it didn't scale & got annoying. + // So just try both ways. + authStyle = AuthStyleInParams // the second way we'll try + req, _ = newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle) + token, err = doTokenRoundTrip(ctx, req) + } + if needsAuthStyleProbe && err == nil { + styleCache.setAuthStyle(tokenURL, authStyle) + } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token != nil && token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } + return token, err +} + +func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { + r, err := ContextClient(ctx).Do(req.WithContext(ctx)) + if err != nil { + return nil, err + } + body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + r.Body.Close() + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + + failureStatus := r.StatusCode < 200 || r.StatusCode > 299 + retrieveError := &RetrieveError{ + Response: r, + Body: body, + // attempt to populate error detail below + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + // some endpoints return a query string + vals, err := url.ParseQuery(string(body)) + if err != nil { + if failureStatus { + return nil, retrieveError + } + return nil, fmt.Errorf("oauth2: cannot parse response: %v", err) + } + retrieveError.ErrorCode = vals.Get("error") + retrieveError.ErrorDescription = vals.Get("error_description") + retrieveError.ErrorURI = vals.Get("error_uri") + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), + RefreshToken: vals.Get("refresh_token"), + Raw: vals, + } + e := vals.Get("expires_in") + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + if failureStatus { + return nil, retrieveError + } + return nil, fmt.Errorf("oauth2: cannot parse json: %v", err) + } + retrieveError.ErrorCode = tj.ErrorCode + retrieveError.ErrorDescription = tj.ErrorDescription + retrieveError.ErrorURI = tj.ErrorURI + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, + RefreshToken: tj.RefreshToken, + Expiry: tj.expiry(), + Raw: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } + // according to spec, servers should respond status 400 in error case + // https://www.rfc-editor.org/rfc/rfc6749#section-5.2 + // but some unorthodox servers respond 200 in error case + if failureStatus || retrieveError.ErrorCode != "" { + return nil, retrieveError + } + if token.AccessToken == "" { + return nil, errors.New("oauth2: server response missing access_token") + } + return token, nil +} + +// mirrors oauth2.RetrieveError +type RetrieveError struct { + Response *http.Response + Body []byte + ErrorCode string + ErrorDescription string + ErrorURI string +} + +func (r *RetrieveError) Error() string { + if r.ErrorCode != "" { + s := fmt.Sprintf("oauth2: %q", r.ErrorCode) + if r.ErrorDescription != "" { + s += fmt.Sprintf(" %q", r.ErrorDescription) + } + if r.ErrorURI != "" { + s += fmt.Sprintf(" %q", r.ErrorURI) + } + return s + } + return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) +} diff --git a/hack/tools/vendor/golang.org/x/oauth2/internal/transport.go b/hack/tools/vendor/golang.org/x/oauth2/internal/transport.go new file mode 100644 index 0000000000..b9db01ddfd --- /dev/null +++ b/hack/tools/vendor/golang.org/x/oauth2/internal/transport.go @@ -0,0 +1,28 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "context" + "net/http" +) + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient ContextKey + +// ContextKey is just an empty struct. It exists so HTTPClient can be +// an immutable public variable with a unique type. It's immutable +// because nobody else can create a ContextKey, being unexported. +type ContextKey struct{} + +func ContextClient(ctx context.Context) *http.Client { + if ctx != nil { + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc + } + } + return http.DefaultClient +} diff --git a/hack/tools/vendor/golang.org/x/oauth2/oauth2.go b/hack/tools/vendor/golang.org/x/oauth2/oauth2.go new file mode 100644 index 0000000000..09f6a49b80 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/oauth2/oauth2.go @@ -0,0 +1,421 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package oauth2 provides support for making +// OAuth2 authorized and authenticated HTTP requests, +// as specified in RFC 6749. +// It can additionally grant authorization with Bearer JWT. +package oauth2 // import "golang.org/x/oauth2" + +import ( + "bytes" + "context" + "errors" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "golang.org/x/oauth2/internal" +) + +// NoContext is the default context you should supply if not using +// your own context.Context (see https://golang.org/x/net/context). +// +// Deprecated: Use context.Background() or context.TODO() instead. +var NoContext = context.TODO() + +// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. +// +// Deprecated: this function no longer does anything. Caller code that +// wants to avoid potential extra HTTP requests made during +// auto-probing of the provider's auth style should set +// Endpoint.AuthStyle. +func RegisterBrokenAuthHeaderProvider(tokenURL string) {} + +// Config describes a typical 3-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +// For the client credentials 2-legged OAuth2 flow, see the clientcredentials +// package (https://golang.org/x/oauth2/clientcredentials). +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // Endpoint contains the resource server's token endpoint + // URLs. These are constants specific to each server and are + // often available via site-specific packages, such as + // google.Endpoint or github.Endpoint. + Endpoint Endpoint + + // RedirectURL is the URL to redirect users going through + // the OAuth flow, after the resource owner's URLs. + RedirectURL string + + // Scope specifies optional requested permissions. + Scopes []string + + // authStyleCache caches which auth style to use when Endpoint.AuthStyle is + // the zero value (AuthStyleAutoDetect). + authStyleCache internal.LazyAuthStyleCache +} + +// A TokenSource is anything that can return a token. +type TokenSource interface { + // Token returns a token or an error. + // Token must be safe for concurrent use by multiple goroutines. + // The returned Token must not be modified. + Token() (*Token, error) +} + +// Endpoint represents an OAuth 2.0 provider's authorization and token +// endpoint URLs. +type Endpoint struct { + AuthURL string + DeviceAuthURL string + TokenURL string + + // AuthStyle optionally specifies how the endpoint wants the + // client ID & client secret sent. The zero value means to + // auto-detect. + AuthStyle AuthStyle +} + +// AuthStyle represents how requests for tokens are authenticated +// to the server. +type AuthStyle int + +const ( + // AuthStyleAutoDetect means to auto-detect which authentication + // style the provider wants by trying both ways and caching + // the successful way for the future. + AuthStyleAutoDetect AuthStyle = 0 + + // AuthStyleInParams sends the "client_id" and "client_secret" + // in the POST body as application/x-www-form-urlencoded parameters. + AuthStyleInParams AuthStyle = 1 + + // AuthStyleInHeader sends the client_id and client_password + // using HTTP Basic Authorization. This is an optional style + // described in the OAuth2 RFC 6749 section 2.3.1. + AuthStyleInHeader AuthStyle = 2 +) + +var ( + // AccessTypeOnline and AccessTypeOffline are options passed + // to the Options.AuthCodeURL method. They modify the + // "access_type" field that gets sent in the URL returned by + // AuthCodeURL. + // + // Online is the default if neither is specified. If your + // application needs to refresh access tokens when the user + // is not present at the browser, then use offline. This will + // result in your application obtaining a refresh token the + // first time your application exchanges an authorization + // code for a user. + AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") + AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") + + // ApprovalForce forces the users to view the consent dialog + // and confirm the permissions request at the URL returned + // from AuthCodeURL, even if they've already done so. + ApprovalForce AuthCodeOption = SetAuthURLParam("prompt", "consent") +) + +// An AuthCodeOption is passed to Config.AuthCodeURL. +type AuthCodeOption interface { + setValue(url.Values) +} + +type setParam struct{ k, v string } + +func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } + +// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// to a provider's authorization endpoint. +func SetAuthURLParam(key, value string) AuthCodeOption { + return setParam{key, value} +} + +// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page +// that asks for permissions for the required scopes explicitly. +// +// State is an opaque value used by the client to maintain state between the +// request and callback. The authorization server includes this value when +// redirecting the user agent back to the client. +// +// Opts may include AccessTypeOnline or AccessTypeOffline, as well +// as ApprovalForce. +// +// To protect against CSRF attacks, opts should include a PKCE challenge +// (S256ChallengeOption). Not all servers support PKCE. An alternative is to +// generate a random state parameter and verify it after exchange. +// See https://datatracker.ietf.org/doc/html/rfc6749#section-10.12 (predating +// PKCE), https://www.oauth.com/oauth2-servers/pkce/ and +// https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches) +func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { + var buf bytes.Buffer + buf.WriteString(c.Endpoint.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {c.ClientID}, + } + if c.RedirectURL != "" { + v.Set("redirect_uri", c.RedirectURL) + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + if state != "" { + v.Set("state", state) + } + for _, opt := range opts { + opt.setValue(v) + } + if strings.Contains(c.Endpoint.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// PasswordCredentialsToken converts a resource owner username and password +// pair into a token. +// +// Per the RFC, this grant type should only be used "when there is a high +// degree of trust between the resource owner and the client (e.g., the client +// is part of the device operating system or a highly privileged application), +// and when other authorization grant types are not available." +// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. +// +// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { + v := url.Values{ + "grant_type": {"password"}, + "username": {username}, + "password": {password}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + return retrieveToken(ctx, c, v) +} + +// Exchange converts an authorization code into a token. +// +// It is used after a resource provider redirects the user back +// to the Redirect URI (the URL obtained from AuthCodeURL). +// +// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +// +// The code will be in the *http.Request.FormValue("code"). Before +// calling Exchange, be sure to validate FormValue("state") if you are +// using it to protect against CSRF attacks. +// +// If using PKCE to protect against CSRF attacks, opts should include a +// VerifierOption. +func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) { + v := url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + } + if c.RedirectURL != "" { + v.Set("redirect_uri", c.RedirectURL) + } + for _, opt := range opts { + opt.setValue(v) + } + return retrieveToken(ctx, c, v) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context, t *Token) *http.Client { + return NewClient(ctx, c.TokenSource(ctx, t)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { + tkr := &tokenRefresher{ + ctx: ctx, + conf: c, + } + if t != nil { + tkr.refreshToken = t.RefreshToken + } + return &reuseTokenSource{ + t: t, + new: tkr, + } +} + +// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// HTTP requests to renew a token using a RefreshToken. +type tokenRefresher struct { + ctx context.Context // used to get HTTP requests + conf *Config + refreshToken string +} + +// WARNING: Token is not safe for concurrent access, as it +// updates the tokenRefresher's refreshToken field. +// Within this package, it is used by reuseTokenSource which +// synchronizes calls to this method with its own mutex. +func (tf *tokenRefresher) Token() (*Token, error) { + if tf.refreshToken == "" { + return nil, errors.New("oauth2: token expired and refresh token is not set") + } + + tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tf.refreshToken}, + }) + + if err != nil { + return nil, err + } + if tf.refreshToken != tk.RefreshToken { + tf.refreshToken = tk.RefreshToken + } + return tk, err +} + +// reuseTokenSource is a TokenSource that holds a single token in memory +// and validates its expiry before each call to retrieve it with +// Token. If it's expired, it will be auto-refreshed using the +// new TokenSource. +type reuseTokenSource struct { + new TokenSource // called when t is expired. + + mu sync.Mutex // guards t + t *Token + + expiryDelta time.Duration +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *reuseTokenSource) Token() (*Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + t.expiryDelta = s.expiryDelta + s.t = t + return t, nil +} + +// StaticTokenSource returns a TokenSource that always returns the same token. +// Because the provided token t is never refreshed, StaticTokenSource is only +// useful for tokens that never expire. +func StaticTokenSource(t *Token) TokenSource { + return staticTokenSource{t} +} + +// staticTokenSource is a TokenSource that always returns the same Token. +type staticTokenSource struct { + t *Token +} + +func (s staticTokenSource) Token() (*Token, error) { + return s.t, nil +} + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient internal.ContextKey + +// NewClient creates an *http.Client from a Context and TokenSource. +// The returned client is not valid beyond the lifetime of the context. +// +// Note that if a custom *http.Client is provided via the Context it +// is used only for token acquisition and is not used to configure the +// *http.Client returned from NewClient. +// +// As a special case, if src is nil, a non-OAuth2 client is returned +// using the provided context. This exists to support related OAuth2 +// packages. +func NewClient(ctx context.Context, src TokenSource) *http.Client { + if src == nil { + return internal.ContextClient(ctx) + } + return &http.Client{ + Transport: &Transport{ + Base: internal.ContextClient(ctx).Transport, + Source: ReuseTokenSource(nil, src), + }, + } +} + +// ReuseTokenSource returns a TokenSource which repeatedly returns the +// same token as long as it's valid, starting with t. +// When its cached token is invalid, a new token is obtained from src. +// +// ReuseTokenSource is typically used to reuse tokens from a cache +// (such as a file on disk) between runs of a program, rather than +// obtaining new tokens unnecessarily. +// +// The initial token t may be nil, in which case the TokenSource is +// wrapped in a caching version if it isn't one already. This also +// means it's always safe to wrap ReuseTokenSource around any other +// TokenSource without adverse effects. +func ReuseTokenSource(t *Token, src TokenSource) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly. + return rt + } + src = rt.new + } + return &reuseTokenSource{ + t: t, + new: src, + } +} + +// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the +// TokenSource returned by ReuseTokenSource, except the expiry buffer is +// configurable. The expiration time of a token is calculated as +// t.Expiry.Add(-earlyExpiry). +func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly, but set the expiryDelta to earlyExpiry, + // so the behavior matches what the user expects. + rt.expiryDelta = earlyExpiry + return rt + } + src = rt.new + } + if t != nil { + t.expiryDelta = earlyExpiry + } + return &reuseTokenSource{ + t: t, + new: src, + expiryDelta: earlyExpiry, + } +} diff --git a/hack/tools/vendor/golang.org/x/oauth2/pkce.go b/hack/tools/vendor/golang.org/x/oauth2/pkce.go new file mode 100644 index 0000000000..50593b6dfe --- /dev/null +++ b/hack/tools/vendor/golang.org/x/oauth2/pkce.go @@ -0,0 +1,68 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package oauth2 + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "net/url" +) + +const ( + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + codeVerifierKey = "code_verifier" +) + +// GenerateVerifier generates a PKCE code verifier with 32 octets of randomness. +// This follows recommendations in RFC 7636. +// +// A fresh verifier should be generated for each authorization. +// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL +// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange +// (or Config.DeviceAccessToken). +func GenerateVerifier() string { + // "RECOMMENDED that the output of a suitable random number generator be + // used to create a 32-octet sequence. The octet sequence is then + // base64url-encoded to produce a 43-octet URL-safe string to use as the + // code verifier." + // https://datatracker.ietf.org/doc/html/rfc7636#section-4.1 + data := make([]byte, 32) + if _, err := rand.Read(data); err != nil { + panic(err) + } + return base64.RawURLEncoding.EncodeToString(data) +} + +// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be +// passed to Config.Exchange or Config.DeviceAccessToken only. +func VerifierOption(verifier string) AuthCodeOption { + return setParam{k: codeVerifierKey, v: verifier} +} + +// S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. +// +// Prefer to use S256ChallengeOption where possible. +func S256ChallengeFromVerifier(verifier string) string { + sha := sha256.Sum256([]byte(verifier)) + return base64.RawURLEncoding.EncodeToString(sha[:]) +} + +// S256ChallengeOption derives a PKCE code challenge derived from verifier with +// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// only. +func S256ChallengeOption(verifier string) AuthCodeOption { + return challengeOption{ + challenge_method: "S256", + challenge: S256ChallengeFromVerifier(verifier), + } +} + +type challengeOption struct{ challenge_method, challenge string } + +func (p challengeOption) setValue(m url.Values) { + m.Set(codeChallengeMethodKey, p.challenge_method) + m.Set(codeChallengeKey, p.challenge) +} diff --git a/hack/tools/vendor/golang.org/x/oauth2/token.go b/hack/tools/vendor/golang.org/x/oauth2/token.go new file mode 100644 index 0000000000..109997d77c --- /dev/null +++ b/hack/tools/vendor/golang.org/x/oauth2/token.go @@ -0,0 +1,212 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/oauth2/internal" +) + +// defaultExpiryDelta determines how earlier a token should be considered +// expired than its actual expiration time. It is used to avoid late +// expirations due to client-server time mismatches. +const defaultExpiryDelta = 10 * time.Second + +// Token represents the credentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// Most users of this package should not access fields of Token +// directly. They're exported mostly for use by related packages +// implementing derivative OAuth2 flows. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string `json:"access_token"` + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string `json:"token_type,omitempty"` + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string `json:"refresh_token,omitempty"` + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time `json:"expiry,omitempty"` + + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + + // raw optionally contains extra metadata from the server + // when updating a token. + raw interface{} + + // expiryDelta is used to calculate when a token is considered + // expired, by subtracting from Expiry. If zero, defaultExpiryDelta + // is used. + expiryDelta time.Duration +} + +// Type returns t.TokenType if non-empty, else "Bearer". +func (t *Token) Type() string { + if strings.EqualFold(t.TokenType, "bearer") { + return "Bearer" + } + if strings.EqualFold(t.TokenType, "mac") { + return "MAC" + } + if strings.EqualFold(t.TokenType, "basic") { + return "Basic" + } + if t.TokenType != "" { + return t.TokenType + } + return "Bearer" +} + +// SetAuthHeader sets the Authorization header to r using the access +// token in t. +// +// This method is unnecessary when using Transport or an HTTP Client +// returned by this package. +func (t *Token) SetAuthHeader(r *http.Request) { + r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) +} + +// WithExtra returns a new Token that's a clone of t, but using the +// provided raw extra map. This is only intended for use by packages +// implementing derivative OAuth2 flows. +func (t *Token) WithExtra(extra interface{}) *Token { + t2 := new(Token) + *t2 = *t + t2.raw = extra + return t2 +} + +// Extra returns an extra field. +// Extra fields are key-value pairs returned by the server as a +// part of the token retrieval response. +func (t *Token) Extra(key string) interface{} { + if raw, ok := t.raw.(map[string]interface{}); ok { + return raw[key] + } + + vals, ok := t.raw.(url.Values) + if !ok { + return nil + } + + v := vals.Get(key) + switch s := strings.TrimSpace(v); strings.Count(s, ".") { + case 0: // Contains no "."; try to parse as int + if i, err := strconv.ParseInt(s, 10, 64); err == nil { + return i + } + case 1: // Contains a single "."; try to parse as float + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + + return v +} + +// timeNow is time.Now but pulled out as a variable for tests. +var timeNow = time.Now + +// expired reports whether the token is expired. +// t must be non-nil. +func (t *Token) expired() bool { + if t.Expiry.IsZero() { + return false + } + + expiryDelta := defaultExpiryDelta + if t.expiryDelta != 0 { + expiryDelta = t.expiryDelta + } + return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow()) +} + +// Valid reports whether t is non-nil, has an AccessToken, and is not expired. +func (t *Token) Valid() bool { + return t != nil && t.AccessToken != "" && !t.expired() +} + +// tokenFromInternal maps an *internal.Token struct into +// a *Token struct. +func tokenFromInternal(t *internal.Token) *Token { + if t == nil { + return nil + } + return &Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + raw: t.Raw, + } +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along +// with an error.. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get()) + if err != nil { + if rErr, ok := err.(*internal.RetrieveError); ok { + return nil, (*RetrieveError)(rErr) + } + return nil, err + } + return tokenFromInternal(tk), nil +} + +// RetrieveError is the error returned when the token endpoint returns a +// non-2XX HTTP status code or populates RFC 6749's 'error' parameter. +// https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 +type RetrieveError struct { + Response *http.Response + // Body is the body that was consumed by reading Response.Body. + // It may be truncated. + Body []byte + // ErrorCode is RFC 6749's 'error' parameter. + ErrorCode string + // ErrorDescription is RFC 6749's 'error_description' parameter. + ErrorDescription string + // ErrorURI is RFC 6749's 'error_uri' parameter. + ErrorURI string +} + +func (r *RetrieveError) Error() string { + if r.ErrorCode != "" { + s := fmt.Sprintf("oauth2: %q", r.ErrorCode) + if r.ErrorDescription != "" { + s += fmt.Sprintf(" %q", r.ErrorDescription) + } + if r.ErrorURI != "" { + s += fmt.Sprintf(" %q", r.ErrorURI) + } + return s + } + return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) +} diff --git a/hack/tools/vendor/golang.org/x/oauth2/transport.go b/hack/tools/vendor/golang.org/x/oauth2/transport.go new file mode 100644 index 0000000000..90657915fb --- /dev/null +++ b/hack/tools/vendor/golang.org/x/oauth2/transport.go @@ -0,0 +1,89 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "errors" + "log" + "net/http" + "sync" +) + +// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, +// wrapping a base RoundTripper and adding an Authorization header +// with a token from the supplied Sources. +// +// Transport is a low-level mechanism. Most code will use the +// higher-level Config.Client method instead. +type Transport struct { + // Source supplies the token to add to outgoing requests' + // Authorization headers. + Source TokenSource + + // Base is the base RoundTripper used to make HTTP requests. + // If nil, http.DefaultTransport is used. + Base http.RoundTripper +} + +// RoundTrip authorizes and authenticates the request with an +// access token from Transport's Source. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + reqBodyClosed := false + if req.Body != nil { + defer func() { + if !reqBodyClosed { + req.Body.Close() + } + }() + } + + if t.Source == nil { + return nil, errors.New("oauth2: Transport's Source is nil") + } + token, err := t.Source.Token() + if err != nil { + return nil, err + } + + req2 := cloneRequest(req) // per RoundTripper contract + token.SetAuthHeader(req2) + + // req.Body is assumed to be closed by the base RoundTripper. + reqBodyClosed = true + return t.base().RoundTrip(req2) +} + +var cancelOnce sync.Once + +// CancelRequest does nothing. It used to be a legacy cancellation mechanism +// but now only it only logs on first use to warn that it's deprecated. +// +// Deprecated: use contexts for cancellation instead. +func (t *Transport) CancelRequest(req *http.Request) { + cancelOnce.Do(func() { + log.Printf("deprecated: golang.org/x/oauth2: Transport.CancelRequest no longer does anything; use contexts") + }) +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} diff --git a/hack/tools/vendor/golang.org/x/sync/singleflight/singleflight.go b/hack/tools/vendor/golang.org/x/sync/singleflight/singleflight.go new file mode 100644 index 0000000000..4051830982 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/sync/singleflight/singleflight.go @@ -0,0 +1,214 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight // import "golang.org/x/sync/singleflight" + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "runtime/debug" + "sync" +) + +// errGoexit indicates the runtime.Goexit was called in +// the user given function. +var errGoexit = errors.New("runtime.Goexit was called") + +// A panicError is an arbitrary value recovered from a panic +// with the stack trace during the execution of given function. +type panicError struct { + value interface{} + stack []byte +} + +// Error implements error interface. +func (p *panicError) Error() string { + return fmt.Sprintf("%v\n\n%s", p.value, p.stack) +} + +func (p *panicError) Unwrap() error { + err, ok := p.value.(error) + if !ok { + return nil + } + + return err +} + +func newPanicError(v interface{}) error { + stack := debug.Stack() + + // The first line of the stack trace is of the form "goroutine N [status]:" + // but by the time the panic reaches Do the goroutine may no longer exist + // and its status will have changed. Trim out the misleading line. + if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { + stack = stack[line+1:] + } + return &panicError{value: v, stack: stack} +} + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + + if e, ok := c.err.(*panicError); ok { + panic(e) + } else if c.err == errGoexit { + runtime.Goexit() + } + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +// +// The returned channel will not be closed. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + normalReturn := false + recovered := false + + // use double-defer to distinguish panic from runtime.Goexit, + // more details see https://golang.org/cl/134395 + defer func() { + // the given function invoked runtime.Goexit + if !normalReturn && !recovered { + c.err = errGoexit + } + + g.mu.Lock() + defer g.mu.Unlock() + c.wg.Done() + if g.m[key] == c { + delete(g.m, key) + } + + if e, ok := c.err.(*panicError); ok { + // In order to prevent the waiting channels from being blocked forever, + // needs to ensure that this panic cannot be recovered. + if len(c.chans) > 0 { + go panic(e) + select {} // Keep this goroutine around so that it will appear in the crash dump. + } else { + panic(e) + } + } else if c.err == errGoexit { + // Already in the process of goexit, no need to call again + } else { + // Normal return + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + } + }() + + func() { + defer func() { + if !normalReturn { + // Ideally, we would wait to take a stack trace until we've determined + // whether this is a panic or a runtime.Goexit. + // + // Unfortunately, the only way we can distinguish the two is to see + // whether the recover stopped the goroutine from terminating, and by + // the time we know that, the part of the stack trace relevant to the + // panic has been discarded. + if r := recover(); r != nil { + c.err = newPanicError(r) + } + } + }() + + c.val, c.err = fn() + normalReturn = true + }() + + if !normalReturn { + recovered = true + } +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + delete(g.m, key) + g.mu.Unlock() +} diff --git a/hack/tools/vendor/golang.org/x/sys/windows/registry/key.go b/hack/tools/vendor/golang.org/x/sys/windows/registry/key.go new file mode 100644 index 0000000000..fd8632444e --- /dev/null +++ b/hack/tools/vendor/golang.org/x/sys/windows/registry/key.go @@ -0,0 +1,205 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +// Package registry provides access to the Windows registry. +// +// Here is a simple example, opening a registry key and reading a string value from it. +// +// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) +// if err != nil { +// log.Fatal(err) +// } +// defer k.Close() +// +// s, _, err := k.GetStringValue("SystemRoot") +// if err != nil { +// log.Fatal(err) +// } +// fmt.Printf("Windows system root is %q\n", s) +package registry + +import ( + "io" + "runtime" + "syscall" + "time" +) + +const ( + // Registry key security and access rights. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx + // for details. + ALL_ACCESS = 0xf003f + CREATE_LINK = 0x00020 + CREATE_SUB_KEY = 0x00004 + ENUMERATE_SUB_KEYS = 0x00008 + EXECUTE = 0x20019 + NOTIFY = 0x00010 + QUERY_VALUE = 0x00001 + READ = 0x20019 + SET_VALUE = 0x00002 + WOW64_32KEY = 0x00200 + WOW64_64KEY = 0x00100 + WRITE = 0x20006 +) + +// Key is a handle to an open Windows registry key. +// Keys can be obtained by calling OpenKey; there are +// also some predefined root keys such as CURRENT_USER. +// Keys can be used directly in the Windows API. +type Key syscall.Handle + +const ( + // Windows defines some predefined root keys that are always open. + // An application can use these keys as entry points to the registry. + // Normally these keys are used in OpenKey to open new keys, + // but they can also be used anywhere a Key is required. + CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) + CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) + LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) + USERS = Key(syscall.HKEY_USERS) + CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) + PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA) +) + +// Close closes open key k. +func (k Key) Close() error { + return syscall.RegCloseKey(syscall.Handle(k)) +} + +// OpenKey opens a new key with path name relative to key k. +// It accepts any open key, including CURRENT_USER and others, +// and returns the new key and an error. +// The access parameter specifies desired access rights to the +// key to be opened. +func OpenKey(k Key, path string, access uint32) (Key, error) { + p, err := syscall.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + var subkey syscall.Handle + err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey) + if err != nil { + return 0, err + } + return Key(subkey), nil +} + +// OpenRemoteKey opens a predefined registry key on another +// computer pcname. The key to be opened is specified by k, but +// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS. +// If pcname is "", OpenRemoteKey returns local computer key. +func OpenRemoteKey(pcname string, k Key) (Key, error) { + var err error + var p *uint16 + if pcname != "" { + p, err = syscall.UTF16PtrFromString(`\\` + pcname) + if err != nil { + return 0, err + } + } + var remoteKey syscall.Handle + err = regConnectRegistry(p, syscall.Handle(k), &remoteKey) + if err != nil { + return 0, err + } + return Key(remoteKey), nil +} + +// ReadSubKeyNames returns the names of subkeys of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadSubKeyNames(n int) ([]string, error) { + // RegEnumKeyEx must be called repeatedly and to completion. + // During this time, this goroutine cannot migrate away from + // its current thread. See https://golang.org/issue/49320 and + // https://golang.org/issue/49466. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + names := make([]string, 0) + // Registry key size limit is 255 bytes and described there: + // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx + buf := make([]uint16, 256) //plus extra room for terminating zero byte +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} + +// CreateKey creates a key named path under open key k. +// CreateKey returns the new key and a boolean flag that reports +// whether the key already existed. +// The access parameter specifies the access rights for the key +// to be created. +func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { + var h syscall.Handle + var d uint32 + err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) + if err != nil { + return 0, false, err + } + return Key(h), d == _REG_OPENED_EXISTING_KEY, nil +} + +// DeleteKey deletes the subkey path of key k and its values. +func DeleteKey(k Key, path string) error { + return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) +} + +// A KeyInfo describes the statistics of a key. It is returned by Stat. +type KeyInfo struct { + SubKeyCount uint32 + MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte + ValueCount uint32 + MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte + MaxValueLen uint32 // longest data component among the key's values, in bytes + lastWriteTime syscall.Filetime +} + +// ModTime returns the key's last write time. +func (ki *KeyInfo) ModTime() time.Time { + return time.Unix(0, ki.lastWriteTime.Nanoseconds()) +} + +// Stat retrieves information about the open key k. +func (k Key) Stat() (*KeyInfo, error) { + var ki KeyInfo + err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil, + &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount, + &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime) + if err != nil { + return nil, err + } + return &ki, nil +} diff --git a/hack/tools/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/hack/tools/vendor/golang.org/x/sys/windows/registry/mksyscall.go new file mode 100644 index 0000000000..bbf86ccf0c --- /dev/null +++ b/hack/tools/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -0,0 +1,9 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build generate + +package registry + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go diff --git a/hack/tools/vendor/golang.org/x/sys/windows/registry/syscall.go b/hack/tools/vendor/golang.org/x/sys/windows/registry/syscall.go new file mode 100644 index 0000000000..f533091c19 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import "syscall" + +const ( + _REG_OPTION_NON_VOLATILE = 0 + + _REG_CREATED_NEW_KEY = 1 + _REG_OPENED_EXISTING_KEY = 2 + + _ERROR_NO_MORE_ITEMS syscall.Errno = 259 +) + +func LoadRegLoadMUIString() error { + return procRegLoadMUIStringW.Find() +} + +//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW +//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW +//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW +//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW +//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW +//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW +//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW + +//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/hack/tools/vendor/golang.org/x/sys/windows/registry/value.go b/hack/tools/vendor/golang.org/x/sys/windows/registry/value.go new file mode 100644 index 0000000000..74db26b94d --- /dev/null +++ b/hack/tools/vendor/golang.org/x/sys/windows/registry/value.go @@ -0,0 +1,386 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import ( + "errors" + "io" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + // Registry value types. + NONE = 0 + SZ = 1 + EXPAND_SZ = 2 + BINARY = 3 + DWORD = 4 + DWORD_BIG_ENDIAN = 5 + LINK = 6 + MULTI_SZ = 7 + RESOURCE_LIST = 8 + FULL_RESOURCE_DESCRIPTOR = 9 + RESOURCE_REQUIREMENTS_LIST = 10 + QWORD = 11 +) + +var ( + // ErrShortBuffer is returned when the buffer was too short for the operation. + ErrShortBuffer = syscall.ERROR_MORE_DATA + + // ErrNotExist is returned when a registry key or value does not exist. + ErrNotExist = syscall.ERROR_FILE_NOT_FOUND + + // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected. + ErrUnexpectedType = errors.New("unexpected key value type") +) + +// GetValue retrieves the type and data for the specified value associated +// with an open key k. It fills up buffer buf and returns the retrieved +// byte count n. If buf is too small to fit the stored value it returns +// ErrShortBuffer error along with the required buffer size n. +// If no buffer is provided, it returns true and actual buffer size n. +// If no buffer is provided, GetValue returns the value's type only. +// If the value does not exist, the error returned is ErrNotExist. +// +// GetValue is a low level function. If value's type is known, use the appropriate +// Get*Value function instead. +func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return 0, 0, err + } + var pbuf *byte + if len(buf) > 0 { + pbuf = (*byte)(unsafe.Pointer(&buf[0])) + } + l := uint32(len(buf)) + err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l) + if err != nil { + return int(l), valtype, err + } + return int(l), valtype, nil +} + +func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return nil, 0, err + } + var t uint32 + n := uint32(len(buf)) + for { + err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n) + if err == nil { + return buf[:n], t, nil + } + if err != syscall.ERROR_MORE_DATA { + return nil, 0, err + } + if n <= uint32(len(buf)) { + return nil, 0, err + } + buf = make([]byte, n) + } +} + +// GetStringValue retrieves the string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringValue returns ErrNotExist. +// If value is not SZ or EXPAND_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return "", typ, err2 + } + switch typ { + case SZ, EXPAND_SZ: + default: + return "", typ, ErrUnexpectedType + } + if len(data) == 0 { + return "", typ, nil + } + u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + return syscall.UTF16ToString(u), typ, nil +} + +// GetMUIStringValue retrieves the localized string value for +// the specified value name associated with an open key k. +// If the value name doesn't exist or the localized string value +// can't be resolved, GetMUIStringValue returns ErrNotExist. +// GetMUIStringValue panics if the system doesn't support +// regLoadMUIString; use LoadRegLoadMUIString to check if +// regLoadMUIString is supported before calling this function. +func (k Key) GetMUIStringValue(name string) (string, error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return "", err + } + + buf := make([]uint16, 1024) + var buflen uint32 + var pdir *uint16 + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path + + // Try to resolve the string value using the system directory as + // a DLL search path; this assumes the string value is of the form + // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320. + + // This approach works with tzres.dll but may have to be revised + // in the future to allow callers to provide custom search paths. + + var s string + s, err = ExpandString("%SystemRoot%\\system32\\") + if err != nil { + return "", err + } + pdir, err = syscall.UTF16PtrFromString(s) + if err != nil { + return "", err + } + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed + if buflen <= uint32(len(buf)) { + break // Buffer not growing, assume race; break + } + buf = make([]uint16, buflen) + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + if err != nil { + return "", err + } + + return syscall.UTF16ToString(buf), nil +} + +// ExpandString expands environment-variable strings and replaces +// them with the values defined for the current user. +// Use ExpandString to expand EXPAND_SZ strings. +func ExpandString(value string) (string, error) { + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + r := make([]uint16, 100) + for { + n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r))) + if err != nil { + return "", err + } + if n <= uint32(len(r)) { + return syscall.UTF16ToString(r[:n]), nil + } + r = make([]uint16, n) + } +} + +// GetStringsValue retrieves the []string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringsValue returns ErrNotExist. +// If value is not MULTI_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != MULTI_SZ { + return nil, typ, ErrUnexpectedType + } + if len(data) == 0 { + return nil, typ, nil + } + p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + if len(p) == 0 { + return nil, typ, nil + } + if p[len(p)-1] == 0 { + p = p[:len(p)-1] // remove terminating null + } + val = make([]string, 0, 5) + from := 0 + for i, c := range p { + if c == 0 { + val = append(val, string(utf16.Decode(p[from:i]))) + from = i + 1 + } + } + return val, typ, nil +} + +// GetIntegerValue retrieves the integer value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetIntegerValue returns ErrNotExist. +// If value is not DWORD or QWORD, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 8)) + if err2 != nil { + return 0, typ, err2 + } + switch typ { + case DWORD: + if len(data) != 4 { + return 0, typ, errors.New("DWORD value is not 4 bytes long") + } + var val32 uint32 + copy((*[4]byte)(unsafe.Pointer(&val32))[:], data) + return uint64(val32), DWORD, nil + case QWORD: + if len(data) != 8 { + return 0, typ, errors.New("QWORD value is not 8 bytes long") + } + copy((*[8]byte)(unsafe.Pointer(&val))[:], data) + return val, QWORD, nil + default: + return 0, typ, ErrUnexpectedType + } +} + +// GetBinaryValue retrieves the binary value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetBinaryValue returns ErrNotExist. +// If value is not BINARY, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != BINARY { + return nil, typ, ErrUnexpectedType + } + return data, typ, nil +} + +func (k Key) setValue(name string, valtype uint32, data []byte) error { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + if len(data) == 0 { + return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0) + } + return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data))) +} + +// SetDWordValue sets the data and type of a name value +// under key k to value and DWORD. +func (k Key) SetDWordValue(name string, value uint32) error { + return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:]) +} + +// SetQWordValue sets the data and type of a name value +// under key k to value and QWORD. +func (k Key) SetQWordValue(name string, value uint64) error { + return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:]) +} + +func (k Key) setStringValue(name string, valtype uint32, value string) error { + v, err := syscall.UTF16FromString(value) + if err != nil { + return err + } + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, valtype, buf) +} + +// SetStringValue sets the data and type of a name value +// under key k to value and SZ. The value must not contain a zero byte. +func (k Key) SetStringValue(name, value string) error { + return k.setStringValue(name, SZ, value) +} + +// SetExpandStringValue sets the data and type of a name value +// under key k to value and EXPAND_SZ. The value must not contain a zero byte. +func (k Key) SetExpandStringValue(name, value string) error { + return k.setStringValue(name, EXPAND_SZ, value) +} + +// SetStringsValue sets the data and type of a name value +// under key k to value and MULTI_SZ. The value strings +// must not contain a zero byte. +func (k Key) SetStringsValue(name string, value []string) error { + ss := "" + for _, s := range value { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return errors.New("string cannot have 0 inside") + } + } + ss += s + "\x00" + } + v := utf16.Encode([]rune(ss + "\x00")) + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, MULTI_SZ, buf) +} + +// SetBinaryValue sets the data and type of a name value +// under key k to value and BINARY. +func (k Key) SetBinaryValue(name string, value []byte) error { + return k.setValue(name, BINARY, value) +} + +// DeleteValue removes a named value from the key k. +func (k Key) DeleteValue(name string) error { + return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) +} + +// ReadValueNames returns the value names of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadValueNames(n int) ([]string, error) { + ki, err := k.Stat() + if err != nil { + return nil, err + } + names := make([]string, 0, ki.ValueCount) + buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} diff --git a/hack/tools/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/hack/tools/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go new file mode 100644 index 0000000000..fc1835d8a2 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -0,0 +1,117 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package registry + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") + procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") + procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") + procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") + procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") + procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") + procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") +) + +func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} diff --git a/hack/tools/vendor/golang.org/x/text/feature/plural/common.go b/hack/tools/vendor/golang.org/x/text/feature/plural/common.go new file mode 100644 index 0000000000..fdcb373fdf --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/feature/plural/common.go @@ -0,0 +1,70 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package plural + +// Form defines a plural form. +// +// Not all languages support all forms. Also, the meaning of each form varies +// per language. It is important to note that the name of a form does not +// necessarily correspond one-to-one with the set of numbers. For instance, +// for Croation, One matches not only 1, but also 11, 21, etc. +// +// Each language must at least support the form "other". +type Form byte + +const ( + Other Form = iota + Zero + One + Two + Few + Many +) + +var countMap = map[string]Form{ + "other": Other, + "zero": Zero, + "one": One, + "two": Two, + "few": Few, + "many": Many, +} + +type pluralCheck struct { + // category: + // 3..7: opID + // 0..2: category + cat byte + setID byte +} + +// opID identifies the type of operand in the plural rule, being i, n or f. +// (v, w, and t are treated as filters in our implementation.) +type opID byte + +const ( + opMod opID = 0x1 // is '%' used? + opNotEqual opID = 0x2 // using "!=" to compare + opI opID = 0 << 2 // integers after taking the absolute value + opN opID = 1 << 2 // full number (must be integer) + opF opID = 2 << 2 // fraction + opV opID = 3 << 2 // number of visible digits + opW opID = 4 << 2 // number of visible digits without trailing zeros + opBretonM opID = 5 << 2 // hard-wired rule for Breton + opItalian800 opID = 6 << 2 // hard-wired rule for Italian + opAzerbaijan00s opID = 7 << 2 // hard-wired rule for Azerbaijan +) +const ( + // Use this plural form to indicate the next rule needs to match as well. + // The last condition in the list will have the correct plural form. + andNext = 0x7 + formMask = 0x7 + + opShift = 3 + + // numN indicates the maximum integer, or maximum mod value, for which we + // have inclusion masks. + numN = 100 + // The common denominator of the modulo that is taken. + maxMod = 100 +) diff --git a/hack/tools/vendor/golang.org/x/text/feature/plural/message.go b/hack/tools/vendor/golang.org/x/text/feature/plural/message.go new file mode 100644 index 0000000000..56d518cc34 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/feature/plural/message.go @@ -0,0 +1,244 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package plural + +import ( + "fmt" + "io" + "reflect" + "strconv" + + "golang.org/x/text/internal/catmsg" + "golang.org/x/text/internal/number" + "golang.org/x/text/language" + "golang.org/x/text/message/catalog" +) + +// TODO: consider deleting this interface. Maybe VisibleDigits is always +// sufficient and practical. + +// Interface is used for types that can determine their own plural form. +type Interface interface { + // PluralForm reports the plural form for the given language of the + // underlying value. It also returns the integer value. If the integer value + // is larger than fits in n, PluralForm may return a value modulo + // 10,000,000. + PluralForm(t language.Tag, scale int) (f Form, n int) +} + +// Selectf returns the first case for which its selector is a match for the +// arg-th substitution argument to a formatting call, formatting it as indicated +// by format. +// +// The cases argument are pairs of selectors and messages. Selectors are of type +// string or Form. Messages are of type string or catalog.Message. A selector +// matches an argument if: +// - it is "other" or Other +// - it matches the plural form of the argument: "zero", "one", "two", "few", +// or "many", or the equivalent Form +// - it is of the form "=x" where x is an integer that matches the value of +// the argument. +// - it is of the form " kindDefault { + e.EncodeUint(uint64(m.scale)) + } + + forms := validForms(cardinal, e.Language()) + + for i := 0; i < len(m.cases); { + if err := compileSelector(e, forms, m.cases[i]); err != nil { + return err + } + if i++; i >= len(m.cases) { + return fmt.Errorf("plural: no message defined for selector %v", m.cases[i-1]) + } + var msg catalog.Message + switch x := m.cases[i].(type) { + case string: + msg = catalog.String(x) + case catalog.Message: + msg = x + default: + return fmt.Errorf("plural: message of type %T; must be string or catalog.Message", x) + } + if err := e.EncodeMessage(msg); err != nil { + return err + } + i++ + } + return nil +} + +func compileSelector(e *catmsg.Encoder, valid []Form, selector interface{}) error { + form := Other + switch x := selector.(type) { + case string: + if x == "" { + return fmt.Errorf("plural: empty selector") + } + if c := x[0]; c == '=' || c == '<' { + val, err := strconv.ParseUint(x[1:], 10, 16) + if err != nil { + return fmt.Errorf("plural: invalid number in selector %q: %v", selector, err) + } + e.EncodeUint(uint64(c)) + e.EncodeUint(val) + return nil + } + var ok bool + form, ok = countMap[x] + if !ok { + return fmt.Errorf("plural: invalid plural form %q", selector) + } + case Form: + form = x + default: + return fmt.Errorf("plural: selector of type %T; want string or Form", selector) + } + + ok := false + for _, f := range valid { + if f == form { + ok = true + break + } + } + if !ok { + return fmt.Errorf("plural: form %q not supported for language %q", selector, e.Language()) + } + e.EncodeUint(uint64(form)) + return nil +} + +func execute(d *catmsg.Decoder) bool { + lang := d.Language() + argN := int(d.DecodeUint()) + kind := int(d.DecodeUint()) + scale := -1 // default + if kind > kindDefault { + scale = int(d.DecodeUint()) + } + form := Other + n := -1 + if arg := d.Arg(argN); arg == nil { + // Default to Other. + } else if x, ok := arg.(number.VisibleDigits); ok { + d := x.Digits(nil, lang, scale) + form, n = cardinal.matchDisplayDigits(lang, &d) + } else if x, ok := arg.(Interface); ok { + // This covers lists and formatters from the number package. + form, n = x.PluralForm(lang, scale) + } else { + var f number.Formatter + switch kind { + case kindScale: + f.InitDecimal(lang) + f.SetScale(scale) + case kindScientific: + f.InitScientific(lang) + f.SetScale(scale) + case kindPrecision: + f.InitDecimal(lang) + f.SetPrecision(scale) + case kindDefault: + // sensible default + f.InitDecimal(lang) + if k := reflect.TypeOf(arg).Kind(); reflect.Int <= k && k <= reflect.Uintptr { + f.SetScale(0) + } else { + f.SetScale(2) + } + } + var dec number.Decimal // TODO: buffer in Printer + dec.Convert(f.RoundingContext, arg) + v := number.FormatDigits(&dec, f.RoundingContext) + if !v.NaN && !v.Inf { + form, n = cardinal.matchDisplayDigits(d.Language(), &v) + } + } + for !d.Done() { + f := d.DecodeUint() + if (f == '=' && n == int(d.DecodeUint())) || + (f == '<' && 0 <= n && n < int(d.DecodeUint())) || + form == Form(f) || + Other == Form(f) { + return d.ExecuteMessage() + } + d.SkipMessage() + } + return false +} diff --git a/hack/tools/vendor/golang.org/x/text/feature/plural/plural.go b/hack/tools/vendor/golang.org/x/text/feature/plural/plural.go new file mode 100644 index 0000000000..e9f2d42e04 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/feature/plural/plural.go @@ -0,0 +1,262 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go gen_common.go + +// Package plural provides utilities for handling linguistic plurals in text. +// +// The definitions in this package are based on the plural rule handling defined +// in CLDR. See +// https://unicode.org/reports/tr35/tr35-numbers.html#Language_Plural_Rules for +// details. +package plural + +import ( + "golang.org/x/text/internal/language/compact" + "golang.org/x/text/internal/number" + "golang.org/x/text/language" +) + +// Rules defines the plural rules for all languages for a certain plural type. +// +// This package is UNDER CONSTRUCTION and its API may change. +type Rules struct { + rules []pluralCheck + index []byte + langToIndex []byte + inclusionMasks []uint64 +} + +var ( + // Cardinal defines the plural rules for numbers indicating quantities. + Cardinal *Rules = cardinal + + // Ordinal defines the plural rules for numbers indicating position + // (first, second, etc.). + Ordinal *Rules = ordinal + + ordinal = &Rules{ + ordinalRules, + ordinalIndex, + ordinalLangToIndex, + ordinalInclusionMasks[:], + } + + cardinal = &Rules{ + cardinalRules, + cardinalIndex, + cardinalLangToIndex, + cardinalInclusionMasks[:], + } +) + +// getIntApprox converts the digits in slice digits[start:end] to an integer +// according to the following rules: +// - Let i be asInt(digits[start:end]), where out-of-range digits are assumed +// to be zero. +// - Result n is big if i / 10^nMod > 1. +// - Otherwise the result is i % 10^nMod. +// +// For example, if digits is {1, 2, 3} and start:end is 0:5, then the result +// for various values of nMod is: +// - when nMod == 2, n == big +// - when nMod == 3, n == big +// - when nMod == 4, n == big +// - when nMod == 5, n == 12300 +// - when nMod == 6, n == 12300 +// - when nMod == 7, n == 12300 +func getIntApprox(digits []byte, start, end, nMod, big int) (n int) { + // Leading 0 digits just result in 0. + p := start + if p < 0 { + p = 0 + } + // Range only over the part for which we have digits. + mid := end + if mid >= len(digits) { + mid = len(digits) + } + // Check digits more significant that nMod. + if q := end - nMod; q > 0 { + if q > mid { + q = mid + } + for ; p < q; p++ { + if digits[p] != 0 { + return big + } + } + } + for ; p < mid; p++ { + n = 10*n + int(digits[p]) + } + // Multiply for trailing zeros. + for ; p < end; p++ { + n *= 10 + } + return n +} + +// MatchDigits computes the plural form for the given language and the given +// decimal floating point digits. The digits are stored in big-endian order and +// are of value byte(0) - byte(9). The floating point position is indicated by +// exp and the number of visible decimals is scale. All leading and trailing +// zeros may be omitted from digits. +// +// The following table contains examples of possible arguments to represent +// the given numbers. +// +// decimal digits exp scale +// 123 []byte{1, 2, 3} 3 0 +// 123.4 []byte{1, 2, 3, 4} 3 1 +// 123.40 []byte{1, 2, 3, 4} 3 2 +// 100000 []byte{1} 6 0 +// 100000.00 []byte{1} 6 3 +func (p *Rules) MatchDigits(t language.Tag, digits []byte, exp, scale int) Form { + index := tagToID(t) + + // Differentiate up to including mod 1000000 for the integer part. + n := getIntApprox(digits, 0, exp, 6, 1000000) + + // Differentiate up to including mod 100 for the fractional part. + f := getIntApprox(digits, exp, exp+scale, 2, 100) + + return matchPlural(p, index, n, f, scale) +} + +func (p *Rules) matchDisplayDigits(t language.Tag, d *number.Digits) (Form, int) { + n := getIntApprox(d.Digits, 0, int(d.Exp), 6, 1000000) + return p.MatchDigits(t, d.Digits, int(d.Exp), d.NumFracDigits()), n +} + +func validForms(p *Rules, t language.Tag) (forms []Form) { + offset := p.langToIndex[tagToID(t)] + rules := p.rules[p.index[offset]:p.index[offset+1]] + + forms = append(forms, Other) + last := Other + for _, r := range rules { + if cat := Form(r.cat & formMask); cat != andNext && last != cat { + forms = append(forms, cat) + last = cat + } + } + return forms +} + +func (p *Rules) matchComponents(t language.Tag, n, f, scale int) Form { + return matchPlural(p, tagToID(t), n, f, scale) +} + +// MatchPlural returns the plural form for the given language and plural +// operands (as defined in +// https://unicode.org/reports/tr35/tr35-numbers.html#Language_Plural_Rules): +// +// where +// n absolute value of the source number (integer and decimals) +// input +// i integer digits of n. +// v number of visible fraction digits in n, with trailing zeros. +// w number of visible fraction digits in n, without trailing zeros. +// f visible fractional digits in n, with trailing zeros (f = t * 10^(v-w)) +// t visible fractional digits in n, without trailing zeros. +// +// If any of the operand values is too large to fit in an int, it is okay to +// pass the value modulo 10,000,000. +func (p *Rules) MatchPlural(lang language.Tag, i, v, w, f, t int) Form { + return matchPlural(p, tagToID(lang), i, f, v) +} + +func matchPlural(p *Rules, index compact.ID, n, f, v int) Form { + nMask := p.inclusionMasks[n%maxMod] + // Compute the fMask inline in the rules below, as it is relatively rare. + // fMask := p.inclusionMasks[f%maxMod] + vMask := p.inclusionMasks[v%maxMod] + + // Do the matching + offset := p.langToIndex[index] + rules := p.rules[p.index[offset]:p.index[offset+1]] + for i := 0; i < len(rules); i++ { + rule := rules[i] + setBit := uint64(1 << rule.setID) + var skip bool + switch op := opID(rule.cat >> opShift); op { + case opI: // i = x + skip = n >= numN || nMask&setBit == 0 + + case opI | opNotEqual: // i != x + skip = n < numN && nMask&setBit != 0 + + case opI | opMod: // i % m = x + skip = nMask&setBit == 0 + + case opI | opMod | opNotEqual: // i % m != x + skip = nMask&setBit != 0 + + case opN: // n = x + skip = f != 0 || n >= numN || nMask&setBit == 0 + + case opN | opNotEqual: // n != x + skip = f == 0 && n < numN && nMask&setBit != 0 + + case opN | opMod: // n % m = x + skip = f != 0 || nMask&setBit == 0 + + case opN | opMod | opNotEqual: // n % m != x + skip = f == 0 && nMask&setBit != 0 + + case opF: // f = x + skip = f >= numN || p.inclusionMasks[f%maxMod]&setBit == 0 + + case opF | opNotEqual: // f != x + skip = f < numN && p.inclusionMasks[f%maxMod]&setBit != 0 + + case opF | opMod: // f % m = x + skip = p.inclusionMasks[f%maxMod]&setBit == 0 + + case opF | opMod | opNotEqual: // f % m != x + skip = p.inclusionMasks[f%maxMod]&setBit != 0 + + case opV: // v = x + skip = v < numN && vMask&setBit == 0 + + case opV | opNotEqual: // v != x + skip = v < numN && vMask&setBit != 0 + + case opW: // w == 0 + skip = f != 0 + + case opW | opNotEqual: // w != 0 + skip = f == 0 + + // Hard-wired rules that cannot be handled by our algorithm. + + case opBretonM: + skip = f != 0 || n == 0 || n%1000000 != 0 + + case opAzerbaijan00s: + // 100,200,300,400,500,600,700,800,900 + skip = n == 0 || n >= 1000 || n%100 != 0 + + case opItalian800: + skip = (f != 0 || n >= numN || nMask&setBit == 0) && n != 800 + } + if skip { + // advance over AND entries. + for ; i < len(rules) && rules[i].cat&formMask == andNext; i++ { + } + continue + } + // return if we have a final entry. + if cat := rule.cat & formMask; cat != andNext { + return Form(cat) + } + } + return Other +} + +func tagToID(t language.Tag) compact.ID { + id, _ := compact.RegionalID(compact.Tag(t)) + return id +} diff --git a/hack/tools/vendor/golang.org/x/text/feature/plural/tables.go b/hack/tools/vendor/golang.org/x/text/feature/plural/tables.go new file mode 100644 index 0000000000..b06b9cb4ea --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/feature/plural/tables.go @@ -0,0 +1,552 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package plural + +// CLDRVersion is the CLDR version from which the tables in this package are derived. +const CLDRVersion = "32" + +var ordinalRules = []pluralCheck{ // 64 elements + 0: {cat: 0x2f, setID: 0x4}, + 1: {cat: 0x3a, setID: 0x5}, + 2: {cat: 0x22, setID: 0x1}, + 3: {cat: 0x22, setID: 0x6}, + 4: {cat: 0x22, setID: 0x7}, + 5: {cat: 0x2f, setID: 0x8}, + 6: {cat: 0x3c, setID: 0x9}, + 7: {cat: 0x2f, setID: 0xa}, + 8: {cat: 0x3c, setID: 0xb}, + 9: {cat: 0x2c, setID: 0xc}, + 10: {cat: 0x24, setID: 0xd}, + 11: {cat: 0x2d, setID: 0xe}, + 12: {cat: 0x2d, setID: 0xf}, + 13: {cat: 0x2f, setID: 0x10}, + 14: {cat: 0x35, setID: 0x3}, + 15: {cat: 0xc5, setID: 0x11}, + 16: {cat: 0x2, setID: 0x1}, + 17: {cat: 0x5, setID: 0x3}, + 18: {cat: 0xd, setID: 0x12}, + 19: {cat: 0x22, setID: 0x1}, + 20: {cat: 0x2f, setID: 0x13}, + 21: {cat: 0x3d, setID: 0x14}, + 22: {cat: 0x2f, setID: 0x15}, + 23: {cat: 0x3a, setID: 0x16}, + 24: {cat: 0x2f, setID: 0x17}, + 25: {cat: 0x3b, setID: 0x18}, + 26: {cat: 0x2f, setID: 0xa}, + 27: {cat: 0x3c, setID: 0xb}, + 28: {cat: 0x22, setID: 0x1}, + 29: {cat: 0x23, setID: 0x19}, + 30: {cat: 0x24, setID: 0x1a}, + 31: {cat: 0x22, setID: 0x1b}, + 32: {cat: 0x23, setID: 0x2}, + 33: {cat: 0x24, setID: 0x1a}, + 34: {cat: 0xf, setID: 0x15}, + 35: {cat: 0x1a, setID: 0x16}, + 36: {cat: 0xf, setID: 0x17}, + 37: {cat: 0x1b, setID: 0x18}, + 38: {cat: 0xf, setID: 0x1c}, + 39: {cat: 0x1d, setID: 0x1d}, + 40: {cat: 0xa, setID: 0x1e}, + 41: {cat: 0xa, setID: 0x1f}, + 42: {cat: 0xc, setID: 0x20}, + 43: {cat: 0xe4, setID: 0x0}, + 44: {cat: 0x5, setID: 0x3}, + 45: {cat: 0xd, setID: 0xe}, + 46: {cat: 0xd, setID: 0x21}, + 47: {cat: 0x22, setID: 0x1}, + 48: {cat: 0x23, setID: 0x19}, + 49: {cat: 0x24, setID: 0x1a}, + 50: {cat: 0x25, setID: 0x22}, + 51: {cat: 0x22, setID: 0x23}, + 52: {cat: 0x23, setID: 0x19}, + 53: {cat: 0x24, setID: 0x1a}, + 54: {cat: 0x25, setID: 0x22}, + 55: {cat: 0x22, setID: 0x24}, + 56: {cat: 0x23, setID: 0x19}, + 57: {cat: 0x24, setID: 0x1a}, + 58: {cat: 0x25, setID: 0x22}, + 59: {cat: 0x21, setID: 0x25}, + 60: {cat: 0x22, setID: 0x1}, + 61: {cat: 0x23, setID: 0x2}, + 62: {cat: 0x24, setID: 0x26}, + 63: {cat: 0x25, setID: 0x27}, +} // Size: 152 bytes + +var ordinalIndex = []uint8{ // 22 elements + 0x00, 0x00, 0x02, 0x03, 0x04, 0x05, 0x07, 0x09, + 0x0b, 0x0f, 0x10, 0x13, 0x16, 0x1c, 0x1f, 0x22, + 0x28, 0x2f, 0x33, 0x37, 0x3b, 0x40, +} // Size: 46 bytes + +var ordinalLangToIndex = []uint8{ // 775 elements + // Entry 0 - 3F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x12, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, + 0x10, 0x10, 0x10, 0x00, 0x00, 0x05, 0x05, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 40 - 7F + 0x12, 0x12, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, + 0x0e, 0x0e, 0x0e, 0x0e, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x14, 0x14, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 80 - BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + // Entry C0 - FF + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 100 - 13F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, + 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 140 - 17F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, + 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, + 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 180 - 1BF + 0x00, 0x00, 0x00, 0x00, 0x09, 0x09, 0x09, 0x09, + 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x0a, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 1C0 - 1FF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0f, 0x00, 0x00, + 0x00, 0x00, 0x02, 0x0d, 0x0d, 0x02, 0x02, 0x02, + 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 200 - 23F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x13, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 240 - 27F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 280 - 2BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x0b, 0x0b, 0x0b, 0x0b, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x07, 0x07, 0x02, 0x00, 0x00, 0x00, 0x00, + // Entry 2C0 - 2FF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 300 - 33F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x0c, +} // Size: 799 bytes + +var ordinalInclusionMasks = []uint64{ // 100 elements + // Entry 0 - 1F + 0x0000002000010009, 0x00000018482000d3, 0x0000000042840195, 0x000000410a040581, + 0x00000041040c0081, 0x0000009840040041, 0x0000008400045001, 0x0000003850040001, + 0x0000003850060001, 0x0000003800049001, 0x0000000800052001, 0x0000000040660031, + 0x0000000041840331, 0x0000000100040f01, 0x00000001001c0001, 0x0000000040040001, + 0x0000000000045001, 0x0000000070040001, 0x0000000070040001, 0x0000000000049001, + 0x0000000080050001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501, + 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001, + 0x0000000050000001, 0x0000000000009001, 0x0000000000010001, 0x0000000040200011, + // Entry 20 - 3F + 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001, + 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001, + 0x0000000200050001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501, + 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001, + 0x0000000050000001, 0x0000000000009001, 0x0000000080010001, 0x0000000040200011, + 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001, + 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001, + 0x0000000200050001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501, + // Entry 40 - 5F + 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001, + 0x0000000050000001, 0x0000000000009001, 0x0000000080010001, 0x0000000040200011, + 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001, + 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001, + 0x0000000080070001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501, + 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001, + 0x0000000050000001, 0x0000000000009001, 0x0000000200010001, 0x0000000040200011, + 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001, + // Entry 60 - 7F + 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001, +} // Size: 824 bytes + +// Slots used for ordinal: 40 of 0xFF rules; 16 of 0xFF indexes; 40 of 64 sets + +var cardinalRules = []pluralCheck{ // 166 elements + 0: {cat: 0x2, setID: 0x3}, + 1: {cat: 0x22, setID: 0x1}, + 2: {cat: 0x2, setID: 0x4}, + 3: {cat: 0x2, setID: 0x4}, + 4: {cat: 0x7, setID: 0x1}, + 5: {cat: 0x62, setID: 0x3}, + 6: {cat: 0x22, setID: 0x4}, + 7: {cat: 0x7, setID: 0x3}, + 8: {cat: 0x42, setID: 0x1}, + 9: {cat: 0x22, setID: 0x4}, + 10: {cat: 0x22, setID: 0x4}, + 11: {cat: 0x22, setID: 0x5}, + 12: {cat: 0x22, setID: 0x1}, + 13: {cat: 0x22, setID: 0x1}, + 14: {cat: 0x7, setID: 0x4}, + 15: {cat: 0x92, setID: 0x3}, + 16: {cat: 0xf, setID: 0x6}, + 17: {cat: 0x1f, setID: 0x7}, + 18: {cat: 0x82, setID: 0x3}, + 19: {cat: 0x92, setID: 0x3}, + 20: {cat: 0xf, setID: 0x6}, + 21: {cat: 0x62, setID: 0x3}, + 22: {cat: 0x4a, setID: 0x6}, + 23: {cat: 0x7, setID: 0x8}, + 24: {cat: 0x62, setID: 0x3}, + 25: {cat: 0x1f, setID: 0x9}, + 26: {cat: 0x62, setID: 0x3}, + 27: {cat: 0x5f, setID: 0x9}, + 28: {cat: 0x72, setID: 0x3}, + 29: {cat: 0x29, setID: 0xa}, + 30: {cat: 0x29, setID: 0xb}, + 31: {cat: 0x4f, setID: 0xb}, + 32: {cat: 0x61, setID: 0x2}, + 33: {cat: 0x2f, setID: 0x6}, + 34: {cat: 0x3a, setID: 0x7}, + 35: {cat: 0x4f, setID: 0x6}, + 36: {cat: 0x5f, setID: 0x7}, + 37: {cat: 0x62, setID: 0x2}, + 38: {cat: 0x4f, setID: 0x6}, + 39: {cat: 0x72, setID: 0x2}, + 40: {cat: 0x21, setID: 0x3}, + 41: {cat: 0x7, setID: 0x4}, + 42: {cat: 0x32, setID: 0x3}, + 43: {cat: 0x21, setID: 0x3}, + 44: {cat: 0x22, setID: 0x1}, + 45: {cat: 0x22, setID: 0x1}, + 46: {cat: 0x23, setID: 0x2}, + 47: {cat: 0x2, setID: 0x3}, + 48: {cat: 0x22, setID: 0x1}, + 49: {cat: 0x24, setID: 0xc}, + 50: {cat: 0x7, setID: 0x1}, + 51: {cat: 0x62, setID: 0x3}, + 52: {cat: 0x74, setID: 0x3}, + 53: {cat: 0x24, setID: 0x3}, + 54: {cat: 0x2f, setID: 0xd}, + 55: {cat: 0x34, setID: 0x1}, + 56: {cat: 0xf, setID: 0x6}, + 57: {cat: 0x1f, setID: 0x7}, + 58: {cat: 0x62, setID: 0x3}, + 59: {cat: 0x4f, setID: 0x6}, + 60: {cat: 0x5a, setID: 0x7}, + 61: {cat: 0xf, setID: 0xe}, + 62: {cat: 0x1f, setID: 0xf}, + 63: {cat: 0x64, setID: 0x3}, + 64: {cat: 0x4f, setID: 0xe}, + 65: {cat: 0x5c, setID: 0xf}, + 66: {cat: 0x22, setID: 0x10}, + 67: {cat: 0x23, setID: 0x11}, + 68: {cat: 0x24, setID: 0x12}, + 69: {cat: 0xf, setID: 0x1}, + 70: {cat: 0x62, setID: 0x3}, + 71: {cat: 0xf, setID: 0x2}, + 72: {cat: 0x63, setID: 0x3}, + 73: {cat: 0xf, setID: 0x13}, + 74: {cat: 0x64, setID: 0x3}, + 75: {cat: 0x74, setID: 0x3}, + 76: {cat: 0xf, setID: 0x1}, + 77: {cat: 0x62, setID: 0x3}, + 78: {cat: 0x4a, setID: 0x1}, + 79: {cat: 0xf, setID: 0x2}, + 80: {cat: 0x63, setID: 0x3}, + 81: {cat: 0x4b, setID: 0x2}, + 82: {cat: 0xf, setID: 0x13}, + 83: {cat: 0x64, setID: 0x3}, + 84: {cat: 0x4c, setID: 0x13}, + 85: {cat: 0x7, setID: 0x1}, + 86: {cat: 0x62, setID: 0x3}, + 87: {cat: 0x7, setID: 0x2}, + 88: {cat: 0x63, setID: 0x3}, + 89: {cat: 0x2f, setID: 0xa}, + 90: {cat: 0x37, setID: 0x14}, + 91: {cat: 0x65, setID: 0x3}, + 92: {cat: 0x7, setID: 0x1}, + 93: {cat: 0x62, setID: 0x3}, + 94: {cat: 0x7, setID: 0x15}, + 95: {cat: 0x64, setID: 0x3}, + 96: {cat: 0x75, setID: 0x3}, + 97: {cat: 0x7, setID: 0x1}, + 98: {cat: 0x62, setID: 0x3}, + 99: {cat: 0xf, setID: 0xe}, + 100: {cat: 0x1f, setID: 0xf}, + 101: {cat: 0x64, setID: 0x3}, + 102: {cat: 0xf, setID: 0x16}, + 103: {cat: 0x17, setID: 0x1}, + 104: {cat: 0x65, setID: 0x3}, + 105: {cat: 0xf, setID: 0x17}, + 106: {cat: 0x65, setID: 0x3}, + 107: {cat: 0xf, setID: 0xf}, + 108: {cat: 0x65, setID: 0x3}, + 109: {cat: 0x2f, setID: 0x6}, + 110: {cat: 0x3a, setID: 0x7}, + 111: {cat: 0x2f, setID: 0xe}, + 112: {cat: 0x3c, setID: 0xf}, + 113: {cat: 0x2d, setID: 0xa}, + 114: {cat: 0x2d, setID: 0x17}, + 115: {cat: 0x2d, setID: 0x18}, + 116: {cat: 0x2f, setID: 0x6}, + 117: {cat: 0x3a, setID: 0xb}, + 118: {cat: 0x2f, setID: 0x19}, + 119: {cat: 0x3c, setID: 0xb}, + 120: {cat: 0x55, setID: 0x3}, + 121: {cat: 0x22, setID: 0x1}, + 122: {cat: 0x24, setID: 0x3}, + 123: {cat: 0x2c, setID: 0xc}, + 124: {cat: 0x2d, setID: 0xb}, + 125: {cat: 0xf, setID: 0x6}, + 126: {cat: 0x1f, setID: 0x7}, + 127: {cat: 0x62, setID: 0x3}, + 128: {cat: 0xf, setID: 0xe}, + 129: {cat: 0x1f, setID: 0xf}, + 130: {cat: 0x64, setID: 0x3}, + 131: {cat: 0xf, setID: 0xa}, + 132: {cat: 0x65, setID: 0x3}, + 133: {cat: 0xf, setID: 0x17}, + 134: {cat: 0x65, setID: 0x3}, + 135: {cat: 0xf, setID: 0x18}, + 136: {cat: 0x65, setID: 0x3}, + 137: {cat: 0x2f, setID: 0x6}, + 138: {cat: 0x3a, setID: 0x1a}, + 139: {cat: 0x2f, setID: 0x1b}, + 140: {cat: 0x3b, setID: 0x1c}, + 141: {cat: 0x2f, setID: 0x1d}, + 142: {cat: 0x3c, setID: 0x1e}, + 143: {cat: 0x37, setID: 0x3}, + 144: {cat: 0xa5, setID: 0x0}, + 145: {cat: 0x22, setID: 0x1}, + 146: {cat: 0x23, setID: 0x2}, + 147: {cat: 0x24, setID: 0x1f}, + 148: {cat: 0x25, setID: 0x20}, + 149: {cat: 0xf, setID: 0x6}, + 150: {cat: 0x62, setID: 0x3}, + 151: {cat: 0xf, setID: 0x1b}, + 152: {cat: 0x63, setID: 0x3}, + 153: {cat: 0xf, setID: 0x21}, + 154: {cat: 0x64, setID: 0x3}, + 155: {cat: 0x75, setID: 0x3}, + 156: {cat: 0x21, setID: 0x3}, + 157: {cat: 0x22, setID: 0x1}, + 158: {cat: 0x23, setID: 0x2}, + 159: {cat: 0x2c, setID: 0x22}, + 160: {cat: 0x2d, setID: 0x5}, + 161: {cat: 0x21, setID: 0x3}, + 162: {cat: 0x22, setID: 0x1}, + 163: {cat: 0x23, setID: 0x2}, + 164: {cat: 0x24, setID: 0x23}, + 165: {cat: 0x25, setID: 0x24}, +} // Size: 356 bytes + +var cardinalIndex = []uint8{ // 36 elements + 0x00, 0x00, 0x02, 0x03, 0x04, 0x06, 0x09, 0x0a, + 0x0c, 0x0d, 0x10, 0x14, 0x17, 0x1d, 0x28, 0x2b, + 0x2d, 0x2f, 0x32, 0x38, 0x42, 0x45, 0x4c, 0x55, + 0x5c, 0x61, 0x6d, 0x74, 0x79, 0x7d, 0x89, 0x91, + 0x95, 0x9c, 0xa1, 0xa6, +} // Size: 60 bytes + +var cardinalLangToIndex = []uint8{ // 775 elements + // Entry 0 - 3F + 0x00, 0x08, 0x08, 0x08, 0x00, 0x00, 0x06, 0x06, + 0x01, 0x01, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x01, 0x01, 0x08, 0x08, 0x04, 0x04, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x00, 0x00, 0x1a, 0x1a, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x06, 0x00, 0x00, + // Entry 40 - 7F + 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x1e, 0x1e, + 0x08, 0x08, 0x13, 0x13, 0x13, 0x13, 0x13, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x00, 0x00, 0x00, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x18, 0x18, 0x00, 0x00, 0x22, 0x22, 0x09, 0x09, + 0x09, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x00, 0x00, 0x16, 0x16, 0x00, + 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 80 - BF + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry C0 - FF + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + // Entry 100 - 13F + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, + 0x08, 0x08, 0x00, 0x00, 0x01, 0x01, 0x01, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x04, 0x04, 0x0c, 0x0c, + 0x08, 0x08, 0x08, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 140 - 17F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x08, 0x08, 0x04, 0x04, 0x1f, 0x1f, + 0x14, 0x14, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, + 0x01, 0x01, 0x06, 0x00, 0x00, 0x20, 0x20, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x17, 0x17, 0x01, + 0x01, 0x13, 0x13, 0x13, 0x16, 0x16, 0x08, 0x08, + 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 180 - 1BF + 0x00, 0x04, 0x0a, 0x0a, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x10, 0x17, 0x00, 0x00, 0x00, 0x08, 0x08, + 0x04, 0x08, 0x08, 0x00, 0x00, 0x08, 0x08, 0x02, + 0x02, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, + 0x08, 0x08, 0x00, 0x00, 0x0f, 0x0f, 0x08, 0x10, + // Entry 1C0 - 1FF + 0x10, 0x08, 0x08, 0x0e, 0x0e, 0x08, 0x08, 0x08, + 0x08, 0x00, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x1b, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x0d, 0x08, + 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, + 0x00, 0x00, 0x08, 0x08, 0x0b, 0x0b, 0x08, 0x08, + 0x08, 0x08, 0x12, 0x01, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x1c, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 200 - 23F + 0x00, 0x08, 0x10, 0x10, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x00, 0x00, 0x00, 0x08, 0x08, 0x08, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x00, + 0x00, 0x08, 0x08, 0x08, 0x08, 0x08, 0x00, 0x08, + 0x06, 0x00, 0x00, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x08, 0x19, 0x19, 0x0d, 0x0d, + 0x08, 0x08, 0x03, 0x04, 0x03, 0x04, 0x04, 0x04, + // Entry 240 - 27F + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x08, 0x08, 0x00, 0x00, 0x12, + 0x12, 0x12, 0x08, 0x08, 0x1d, 0x1d, 0x1d, 0x1d, + 0x1d, 0x1d, 0x1d, 0x00, 0x00, 0x08, 0x08, 0x00, + 0x00, 0x08, 0x08, 0x00, 0x00, 0x08, 0x08, 0x08, + 0x10, 0x10, 0x10, 0x10, 0x08, 0x08, 0x00, 0x00, + 0x00, 0x00, 0x13, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x05, 0x05, 0x18, 0x18, 0x15, 0x15, 0x10, 0x10, + // Entry 280 - 2BF + 0x10, 0x10, 0x10, 0x10, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x13, + 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, + 0x13, 0x13, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x06, + 0x08, 0x08, 0x08, 0x0c, 0x08, 0x00, 0x00, 0x08, + // Entry 2C0 - 2FF + 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x07, + 0x07, 0x08, 0x08, 0x1d, 0x1d, 0x04, 0x04, 0x04, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, + 0x08, 0x08, 0x08, 0x06, 0x08, 0x08, 0x00, 0x00, + 0x08, 0x08, 0x08, 0x00, 0x00, 0x04, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 300 - 33F + 0x00, 0x00, 0x00, 0x01, 0x01, 0x04, 0x04, +} // Size: 799 bytes + +var cardinalInclusionMasks = []uint64{ // 100 elements + // Entry 0 - 1F + 0x0000000200500419, 0x0000000000512153, 0x000000000a327105, 0x0000000ca23c7101, + 0x00000004a23c7201, 0x0000000482943001, 0x0000001482943201, 0x0000000502943001, + 0x0000000502943001, 0x0000000522943201, 0x0000000540543401, 0x00000000454128e1, + 0x000000005b02e821, 0x000000006304e821, 0x000000006304ea21, 0x0000000042842821, + 0x0000000042842a21, 0x0000000042842821, 0x0000000042842821, 0x0000000062842a21, + 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021, + 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021, + 0x0000000002800021, 0x0000000022800221, 0x0000000000400421, 0x0000000000400061, + // Entry 20 - 3F + 0x000000000a004021, 0x0000000022004021, 0x0000000022004221, 0x0000000002800021, + 0x0000000002800221, 0x0000000002800021, 0x0000000002800021, 0x0000000022800221, + 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021, + 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021, + 0x0000000002800021, 0x0000000022800221, 0x0000000000400421, 0x0000000000400061, + 0x000000000a004021, 0x0000000022004021, 0x0000000022004221, 0x0000000002800021, + 0x0000000002800221, 0x0000000002800021, 0x0000000002800021, 0x0000000022800221, + 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021, + // Entry 40 - 5F + 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021, + 0x0000000002800021, 0x0000000022800221, 0x0000000040400421, 0x0000000044400061, + 0x000000005a004021, 0x0000000062004021, 0x0000000062004221, 0x0000000042800021, + 0x0000000042800221, 0x0000000042800021, 0x0000000042800021, 0x0000000062800221, + 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021, + 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021, + 0x0000000002800021, 0x0000000022800221, 0x0000000040400421, 0x0000000044400061, + 0x000000005a004021, 0x0000000062004021, 0x0000000062004221, 0x0000000042800021, + // Entry 60 - 7F + 0x0000000042800221, 0x0000000042800021, 0x0000000042800021, 0x0000000062800221, +} // Size: 824 bytes + +// Slots used for cardinal: A6 of 0xFF rules; 24 of 0xFF indexes; 37 of 64 sets + +// Total table size 3860 bytes (3KiB); checksum: AAFBF21 diff --git a/hack/tools/vendor/golang.org/x/text/internal/catmsg/catmsg.go b/hack/tools/vendor/golang.org/x/text/internal/catmsg/catmsg.go new file mode 100644 index 0000000000..1b257a7b4d --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/internal/catmsg/catmsg.go @@ -0,0 +1,417 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package catmsg contains support types for package x/text/message/catalog. +// +// This package contains the low-level implementations of Message used by the +// catalog package and provides primitives for other packages to implement their +// own. For instance, the plural package provides functionality for selecting +// translation strings based on the plural category of substitution arguments. +// +// # Encoding and Decoding +// +// Catalogs store Messages encoded as a single string. Compiling a message into +// a string both results in compacter representation and speeds up evaluation. +// +// A Message must implement a Compile method to convert its arbitrary +// representation to a string. The Compile method takes an Encoder which +// facilitates serializing the message. Encoders also provide more context of +// the messages's creation (such as for which language the message is intended), +// which may not be known at the time of the creation of the message. +// +// Each message type must also have an accompanying decoder registered to decode +// the message. This decoder takes a Decoder argument which provides the +// counterparts for the decoding. +// +// # Renderers +// +// A Decoder must be initialized with a Renderer implementation. These +// implementations must be provided by packages that use Catalogs, typically +// formatting packages such as x/text/message. A typical user will not need to +// worry about this type; it is only relevant to packages that do string +// formatting and want to use the catalog package to handle localized strings. +// +// A package that uses catalogs for selecting strings receives selection results +// as sequence of substrings passed to the Renderer. The following snippet shows +// how to express the above example using the message package. +// +// message.Set(language.English, "You are %d minute(s) late.", +// catalog.Var("minutes", plural.Select(1, "one", "minute")), +// catalog.String("You are %[1]d ${minutes} late.")) +// +// p := message.NewPrinter(language.English) +// p.Printf("You are %d minute(s) late.", 5) // always 5 minutes late. +// +// To evaluate the Printf, package message wraps the arguments in a Renderer +// that is passed to the catalog for message decoding. The call sequence that +// results from evaluating the above message, assuming the person is rather +// tardy, is: +// +// Render("You are %[1]d ") +// Arg(1) +// Render("minutes") +// Render(" late.") +// +// The calls to Arg is caused by the plural.Select execution, which evaluates +// the argument to determine whether the singular or plural message form should +// be selected. The calls to Render reports the partial results to the message +// package for further evaluation. +package catmsg + +import ( + "errors" + "fmt" + "strconv" + "strings" + "sync" + + "golang.org/x/text/language" +) + +// A Handle refers to a registered message type. +type Handle int + +// A Handler decodes and evaluates data compiled by a Message and sends the +// result to the Decoder. The output may depend on the value of the substitution +// arguments, accessible by the Decoder's Arg method. The Handler returns false +// if there is no translation for the given substitution arguments. +type Handler func(d *Decoder) bool + +// Register records the existence of a message type and returns a Handle that +// can be used in the Encoder's EncodeMessageType method to create such +// messages. The prefix of the name should be the package path followed by +// an optional disambiguating string. +// Register will panic if a handle for the same name was already registered. +func Register(name string, handler Handler) Handle { + mutex.Lock() + defer mutex.Unlock() + + if _, ok := names[name]; ok { + panic(fmt.Errorf("catmsg: handler for %q already exists", name)) + } + h := Handle(len(handlers)) + names[name] = h + handlers = append(handlers, handler) + return h +} + +// These handlers require fixed positions in the handlers slice. +const ( + msgVars Handle = iota + msgFirst + msgRaw + msgString + msgAffix + // Leave some arbitrary room for future expansion: 20 should suffice. + numInternal = 20 +) + +const prefix = "golang.org/x/text/internal/catmsg." + +var ( + // TODO: find a more stable way to link handles to message types. + mutex sync.Mutex + names = map[string]Handle{ + prefix + "Vars": msgVars, + prefix + "First": msgFirst, + prefix + "Raw": msgRaw, + prefix + "String": msgString, + prefix + "Affix": msgAffix, + } + handlers = make([]Handler, numInternal) +) + +func init() { + // This handler is a message type wrapper that initializes a decoder + // with a variable block. This message type, if present, is always at the + // start of an encoded message. + handlers[msgVars] = func(d *Decoder) bool { + blockSize := int(d.DecodeUint()) + d.vars = d.data[:blockSize] + d.data = d.data[blockSize:] + return d.executeMessage() + } + + // First takes the first message in a sequence that results in a match for + // the given substitution arguments. + handlers[msgFirst] = func(d *Decoder) bool { + for !d.Done() { + if d.ExecuteMessage() { + return true + } + } + return false + } + + handlers[msgRaw] = func(d *Decoder) bool { + d.Render(d.data) + return true + } + + // A String message alternates between a string constant and a variable + // substitution. + handlers[msgString] = func(d *Decoder) bool { + for !d.Done() { + if str := d.DecodeString(); str != "" { + d.Render(str) + } + if d.Done() { + break + } + d.ExecuteSubstitution() + } + return true + } + + handlers[msgAffix] = func(d *Decoder) bool { + // TODO: use an alternative method for common cases. + prefix := d.DecodeString() + suffix := d.DecodeString() + if prefix != "" { + d.Render(prefix) + } + ret := d.ExecuteMessage() + if suffix != "" { + d.Render(suffix) + } + return ret + } +} + +var ( + // ErrIncomplete indicates a compiled message does not define translations + // for all possible argument values. If this message is returned, evaluating + // a message may result in the ErrNoMatch error. + ErrIncomplete = errors.New("catmsg: incomplete message; may not give result for all inputs") + + // ErrNoMatch indicates no translation message matched the given input + // parameters when evaluating a message. + ErrNoMatch = errors.New("catmsg: no translation for inputs") +) + +// A Message holds a collection of translations for the same phrase that may +// vary based on the values of substitution arguments. +type Message interface { + // Compile encodes the format string(s) of the message as a string for later + // evaluation. + // + // The first call Compile makes on the encoder must be EncodeMessageType. + // The handle passed to this call may either be a handle returned by + // Register to encode a single custom message, or HandleFirst followed by + // a sequence of calls to EncodeMessage. + // + // Compile must return ErrIncomplete if it is possible for evaluation to + // not match any translation for a given set of formatting parameters. + // For example, selecting a translation based on plural form may not yield + // a match if the form "Other" is not one of the selectors. + // + // Compile may return any other application-specific error. For backwards + // compatibility with package like fmt, which often do not do sanity + // checking of format strings ahead of time, Compile should still make an + // effort to have some sensible fallback in case of an error. + Compile(e *Encoder) error +} + +// Compile converts a Message to a data string that can be stored in a Catalog. +// The resulting string can subsequently be decoded by passing to the Execute +// method of a Decoder. +func Compile(tag language.Tag, macros Dictionary, m Message) (data string, err error) { + // TODO: pass macros so they can be used for validation. + v := &Encoder{inBody: true} // encoder for variables + v.root = v + e := &Encoder{root: v, parent: v, tag: tag} // encoder for messages + err = m.Compile(e) + // This package serves te message package, which in turn is meant to be a + // drop-in replacement for fmt. With the fmt package, format strings are + // evaluated lazily and errors are handled by substituting strings in the + // result, rather then returning an error. Dealing with multiple languages + // makes it more important to check errors ahead of time. We chose to be + // consistent and compatible and allow graceful degradation in case of + // errors. + buf := e.buf[stripPrefix(e.buf):] + if len(v.buf) > 0 { + // Prepend variable block. + b := make([]byte, 1+maxVarintBytes+len(v.buf)+len(buf)) + b[0] = byte(msgVars) + b = b[:1+encodeUint(b[1:], uint64(len(v.buf)))] + b = append(b, v.buf...) + b = append(b, buf...) + buf = b + } + if err == nil { + err = v.err + } + return string(buf), err +} + +// FirstOf is a message type that prints the first message in the sequence that +// resolves to a match for the given substitution arguments. +type FirstOf []Message + +// Compile implements Message. +func (s FirstOf) Compile(e *Encoder) error { + e.EncodeMessageType(msgFirst) + err := ErrIncomplete + for i, m := range s { + if err == nil { + return fmt.Errorf("catalog: message argument %d is complete and blocks subsequent messages", i-1) + } + err = e.EncodeMessage(m) + } + return err +} + +// Var defines a message that can be substituted for a placeholder of the same +// name. If an expression does not result in a string after evaluation, Name is +// used as the substitution. For example: +// +// Var{ +// Name: "minutes", +// Message: plural.Select(1, "one", "minute"), +// } +// +// will resolve to minute for singular and minutes for plural forms. +type Var struct { + Name string + Message Message +} + +var errIsVar = errors.New("catmsg: variable used as message") + +// Compile implements Message. +// +// Note that this method merely registers a variable; it does not create an +// encoded message. +func (v *Var) Compile(e *Encoder) error { + if err := e.addVar(v.Name, v.Message); err != nil { + return err + } + // Using a Var by itself is an error. If it is in a sequence followed by + // other messages referring to it, this error will be ignored. + return errIsVar +} + +// Raw is a message consisting of a single format string that is passed as is +// to the Renderer. +// +// Note that a Renderer may still do its own variable substitution. +type Raw string + +// Compile implements Message. +func (r Raw) Compile(e *Encoder) (err error) { + e.EncodeMessageType(msgRaw) + // Special case: raw strings don't have a size encoding and so don't use + // EncodeString. + e.buf = append(e.buf, r...) + return nil +} + +// String is a message consisting of a single format string which contains +// placeholders that may be substituted with variables. +// +// Variable substitutions are marked with placeholders and a variable name of +// the form ${name}. Any other substitutions such as Go templates or +// printf-style substitutions are left to be done by the Renderer. +// +// When evaluation a string interpolation, a Renderer will receive separate +// calls for each placeholder and interstitial string. For example, for the +// message: "%[1]v ${invites} %[2]v to ${their} party." The sequence of calls +// is: +// +// d.Render("%[1]v ") +// d.Arg(1) +// d.Render(resultOfInvites) +// d.Render(" %[2]v to ") +// d.Arg(2) +// d.Render(resultOfTheir) +// d.Render(" party.") +// +// where the messages for "invites" and "their" both use a plural.Select +// referring to the first argument. +// +// Strings may also invoke macros. Macros are essentially variables that can be +// reused. Macros may, for instance, be used to make selections between +// different conjugations of a verb. See the catalog package description for an +// overview of macros. +type String string + +// Compile implements Message. It parses the placeholder formats and returns +// any error. +func (s String) Compile(e *Encoder) (err error) { + msg := string(s) + const subStart = "${" + hasHeader := false + p := 0 + b := []byte{} + for { + i := strings.Index(msg[p:], subStart) + if i == -1 { + break + } + b = append(b, msg[p:p+i]...) + p += i + len(subStart) + if i = strings.IndexByte(msg[p:], '}'); i == -1 { + b = append(b, "$!(MISSINGBRACE)"...) + err = fmt.Errorf("catmsg: missing '}'") + p = len(msg) + break + } + name := strings.TrimSpace(msg[p : p+i]) + if q := strings.IndexByte(name, '('); q == -1 { + if !hasHeader { + hasHeader = true + e.EncodeMessageType(msgString) + } + e.EncodeString(string(b)) + e.EncodeSubstitution(name) + b = b[:0] + } else if j := strings.IndexByte(name[q:], ')'); j == -1 { + // TODO: what should the error be? + b = append(b, "$!(MISSINGPAREN)"...) + err = fmt.Errorf("catmsg: missing ')'") + } else if x, sErr := strconv.ParseUint(strings.TrimSpace(name[q+1:q+j]), 10, 32); sErr != nil { + // TODO: handle more than one argument + b = append(b, "$!(BADNUM)"...) + err = fmt.Errorf("catmsg: invalid number %q", strings.TrimSpace(name[q+1:q+j])) + } else { + if !hasHeader { + hasHeader = true + e.EncodeMessageType(msgString) + } + e.EncodeString(string(b)) + e.EncodeSubstitution(name[:q], int(x)) + b = b[:0] + } + p += i + 1 + } + b = append(b, msg[p:]...) + if !hasHeader { + // Simplify string to a raw string. + Raw(string(b)).Compile(e) + } else if len(b) > 0 { + e.EncodeString(string(b)) + } + return err +} + +// Affix is a message that adds a prefix and suffix to another message. +// This is mostly used add back whitespace to a translation that was stripped +// before sending it out. +type Affix struct { + Message Message + Prefix string + Suffix string +} + +// Compile implements Message. +func (a Affix) Compile(e *Encoder) (err error) { + // TODO: consider adding a special message type that just adds a single + // return. This is probably common enough to handle the majority of cases. + // Get some stats first, though. + e.EncodeMessageType(msgAffix) + e.EncodeString(a.Prefix) + e.EncodeString(a.Suffix) + e.EncodeMessage(a.Message) + return nil +} diff --git a/hack/tools/vendor/golang.org/x/text/internal/catmsg/codec.go b/hack/tools/vendor/golang.org/x/text/internal/catmsg/codec.go new file mode 100644 index 0000000000..547802b0f3 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/internal/catmsg/codec.go @@ -0,0 +1,407 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package catmsg + +import ( + "errors" + "fmt" + + "golang.org/x/text/language" +) + +// A Renderer renders a Message. +type Renderer interface { + // Render renders the given string. The given string may be interpreted as a + // format string, such as the one used by the fmt package or a template. + Render(s string) + + // Arg returns the i-th argument passed to format a message. This method + // should return nil if there is no such argument. Messages need access to + // arguments to allow selecting a message based on linguistic features of + // those arguments. + Arg(i int) interface{} +} + +// A Dictionary specifies a source of messages, including variables or macros. +type Dictionary interface { + // Lookup returns the message for the given key. It returns false for ok if + // such a message could not be found. + Lookup(key string) (data string, ok bool) + + // TODO: consider returning an interface, instead of a string. This will + // allow implementations to do their own message type decoding. +} + +// An Encoder serializes a Message to a string. +type Encoder struct { + // The root encoder is used for storing encoded variables. + root *Encoder + // The parent encoder provides the surrounding scopes for resolving variable + // names. + parent *Encoder + + tag language.Tag + + // buf holds the encoded message so far. After a message completes encoding, + // the contents of buf, prefixed by the encoded length, are flushed to the + // parent buffer. + buf []byte + + // vars is the lookup table of variables in the current scope. + vars []keyVal + + err error + inBody bool // if false next call must be EncodeMessageType +} + +type keyVal struct { + key string + offset int +} + +// Language reports the language for which the encoded message will be stored +// in the Catalog. +func (e *Encoder) Language() language.Tag { return e.tag } + +func (e *Encoder) setError(err error) { + if e.root.err == nil { + e.root.err = err + } +} + +// EncodeUint encodes x. +func (e *Encoder) EncodeUint(x uint64) { + e.checkInBody() + var buf [maxVarintBytes]byte + n := encodeUint(buf[:], x) + e.buf = append(e.buf, buf[:n]...) +} + +// EncodeString encodes s. +func (e *Encoder) EncodeString(s string) { + e.checkInBody() + e.EncodeUint(uint64(len(s))) + e.buf = append(e.buf, s...) +} + +// EncodeMessageType marks the current message to be of type h. +// +// It must be the first call of a Message's Compile method. +func (e *Encoder) EncodeMessageType(h Handle) { + if e.inBody { + panic("catmsg: EncodeMessageType not the first method called") + } + e.inBody = true + e.EncodeUint(uint64(h)) +} + +// EncodeMessage serializes the given message inline at the current position. +func (e *Encoder) EncodeMessage(m Message) error { + e = &Encoder{root: e.root, parent: e, tag: e.tag} + err := m.Compile(e) + if _, ok := m.(*Var); !ok { + e.flushTo(e.parent) + } + return err +} + +func (e *Encoder) checkInBody() { + if !e.inBody { + panic("catmsg: expected prior call to EncodeMessageType") + } +} + +// stripPrefix indicates the number of prefix bytes that must be stripped to +// turn a single-element sequence into a message that is just this single member +// without its size prefix. If the message can be stripped, b[1:n] contains the +// size prefix. +func stripPrefix(b []byte) (n int) { + if len(b) > 0 && Handle(b[0]) == msgFirst { + x, n, _ := decodeUint(b[1:]) + if 1+n+int(x) == len(b) { + return 1 + n + } + } + return 0 +} + +func (e *Encoder) flushTo(dst *Encoder) { + data := e.buf + p := stripPrefix(data) + if p > 0 { + data = data[1:] + } else { + // Prefix the size. + dst.EncodeUint(uint64(len(data))) + } + dst.buf = append(dst.buf, data...) +} + +func (e *Encoder) addVar(key string, m Message) error { + for _, v := range e.parent.vars { + if v.key == key { + err := fmt.Errorf("catmsg: duplicate variable %q", key) + e.setError(err) + return err + } + } + scope := e.parent + // If a variable message is Incomplete, and does not evaluate to a message + // during execution, we fall back to the variable name. We encode this by + // appending the variable name if the message reports it's incomplete. + + err := m.Compile(e) + if err != ErrIncomplete { + e.setError(err) + } + switch { + case len(e.buf) == 1 && Handle(e.buf[0]) == msgFirst: // empty sequence + e.buf = e.buf[:0] + e.inBody = false + fallthrough + case len(e.buf) == 0: + // Empty message. + if err := String(key).Compile(e); err != nil { + e.setError(err) + } + case err == ErrIncomplete: + if Handle(e.buf[0]) != msgFirst { + seq := &Encoder{root: e.root, parent: e} + seq.EncodeMessageType(msgFirst) + e.flushTo(seq) + e = seq + } + // e contains a sequence; append the fallback string. + e.EncodeMessage(String(key)) + } + + // Flush result to variable heap. + offset := len(e.root.buf) + e.flushTo(e.root) + e.buf = e.buf[:0] + + // Record variable offset in current scope. + scope.vars = append(scope.vars, keyVal{key: key, offset: offset}) + return err +} + +const ( + substituteVar = iota + substituteMacro + substituteError +) + +// EncodeSubstitution inserts a resolved reference to a variable or macro. +// +// This call must be matched with a call to ExecuteSubstitution at decoding +// time. +func (e *Encoder) EncodeSubstitution(name string, arguments ...int) { + if arity := len(arguments); arity > 0 { + // TODO: also resolve macros. + e.EncodeUint(substituteMacro) + e.EncodeString(name) + for _, a := range arguments { + e.EncodeUint(uint64(a)) + } + return + } + for scope := e; scope != nil; scope = scope.parent { + for _, v := range scope.vars { + if v.key != name { + continue + } + e.EncodeUint(substituteVar) // TODO: support arity > 0 + e.EncodeUint(uint64(v.offset)) + return + } + } + // TODO: refer to dictionary-wide scoped variables. + e.EncodeUint(substituteError) + e.EncodeString(name) + e.setError(fmt.Errorf("catmsg: unknown var %q", name)) +} + +// A Decoder deserializes and evaluates messages that are encoded by an encoder. +type Decoder struct { + tag language.Tag + dst Renderer + macros Dictionary + + err error + vars string + data string + + macroArg int // TODO: allow more than one argument +} + +// NewDecoder returns a new Decoder. +// +// Decoders are designed to be reused for multiple invocations of Execute. +// Only one goroutine may call Execute concurrently. +func NewDecoder(tag language.Tag, r Renderer, macros Dictionary) *Decoder { + return &Decoder{ + tag: tag, + dst: r, + macros: macros, + } +} + +func (d *Decoder) setError(err error) { + if d.err == nil { + d.err = err + } +} + +// Language returns the language in which the message is being rendered. +// +// The destination language may be a child language of the language used for +// encoding. For instance, a decoding language of "pt-PT" is consistent with an +// encoding language of "pt". +func (d *Decoder) Language() language.Tag { return d.tag } + +// Done reports whether there are more bytes to process in this message. +func (d *Decoder) Done() bool { return len(d.data) == 0 } + +// Render implements Renderer. +func (d *Decoder) Render(s string) { d.dst.Render(s) } + +// Arg implements Renderer. +// +// During evaluation of macros, the argument positions may be mapped to +// arguments that differ from the original call. +func (d *Decoder) Arg(i int) interface{} { + if d.macroArg != 0 { + if i != 1 { + panic("catmsg: only macros with single argument supported") + } + i = d.macroArg + } + return d.dst.Arg(i) +} + +// DecodeUint decodes a number that was encoded with EncodeUint and advances the +// position. +func (d *Decoder) DecodeUint() uint64 { + x, n, err := decodeUintString(d.data) + d.data = d.data[n:] + if err != nil { + d.setError(err) + } + return x +} + +// DecodeString decodes a string that was encoded with EncodeString and advances +// the position. +func (d *Decoder) DecodeString() string { + size := d.DecodeUint() + s := d.data[:size] + d.data = d.data[size:] + return s +} + +// SkipMessage skips the message at the current location and advances the +// position. +func (d *Decoder) SkipMessage() { + n := int(d.DecodeUint()) + d.data = d.data[n:] +} + +// Execute decodes and evaluates msg. +// +// Only one goroutine may call execute. +func (d *Decoder) Execute(msg string) error { + d.err = nil + if !d.execute(msg) { + return ErrNoMatch + } + return d.err +} + +func (d *Decoder) execute(msg string) bool { + saved := d.data + d.data = msg + ok := d.executeMessage() + d.data = saved + return ok +} + +// executeMessageFromData is like execute, but also decodes a leading message +// size and clips the given string accordingly. +// +// It reports the number of bytes consumed and whether a message was selected. +func (d *Decoder) executeMessageFromData(s string) (n int, ok bool) { + saved := d.data + d.data = s + size := int(d.DecodeUint()) + n = len(s) - len(d.data) + // Sanitize the setting. This allows skipping a size argument for + // RawString and method Done. + d.data = d.data[:size] + ok = d.executeMessage() + n += size - len(d.data) + d.data = saved + return n, ok +} + +var errUnknownHandler = errors.New("catmsg: string contains unsupported handler") + +// executeMessage reads the handle id, initializes the decoder and executes the +// message. It is assumed that all of d.data[d.p:] is the single message. +func (d *Decoder) executeMessage() bool { + if d.Done() { + // We interpret no data as a valid empty message. + return true + } + handle := d.DecodeUint() + + var fn Handler + mutex.Lock() + if int(handle) < len(handlers) { + fn = handlers[handle] + } + mutex.Unlock() + if fn == nil { + d.setError(errUnknownHandler) + d.execute(fmt.Sprintf("\x02$!(UNKNOWNMSGHANDLER=%#x)", handle)) + return true + } + return fn(d) +} + +// ExecuteMessage decodes and executes the message at the current position. +func (d *Decoder) ExecuteMessage() bool { + n, ok := d.executeMessageFromData(d.data) + d.data = d.data[n:] + return ok +} + +// ExecuteSubstitution executes the message corresponding to the substitution +// as encoded by EncodeSubstitution. +func (d *Decoder) ExecuteSubstitution() { + switch x := d.DecodeUint(); x { + case substituteVar: + offset := d.DecodeUint() + d.executeMessageFromData(d.vars[offset:]) + case substituteMacro: + name := d.DecodeString() + data, ok := d.macros.Lookup(name) + old := d.macroArg + // TODO: support macros of arity other than 1. + d.macroArg = int(d.DecodeUint()) + switch { + case !ok: + // TODO: detect this at creation time. + d.setError(fmt.Errorf("catmsg: undefined macro %q", name)) + fallthrough + case !d.execute(data): + d.dst.Render(name) // fall back to macro name. + } + d.macroArg = old + case substituteError: + d.dst.Render(d.DecodeString()) + default: + panic("catmsg: unreachable") + } +} diff --git a/hack/tools/vendor/golang.org/x/text/internal/catmsg/varint.go b/hack/tools/vendor/golang.org/x/text/internal/catmsg/varint.go new file mode 100644 index 0000000000..a2cee2cf5b --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/internal/catmsg/varint.go @@ -0,0 +1,62 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package catmsg + +// This file implements varint encoding analogous to the one in encoding/binary. +// We need a string version of this function, so we add that here and then add +// the rest for consistency. + +import "errors" + +var ( + errIllegalVarint = errors.New("catmsg: illegal varint") + errVarintTooLarge = errors.New("catmsg: varint too large for uint64") +) + +const maxVarintBytes = 10 // maximum length of a varint + +// encodeUint encodes x as a variable-sized integer into buf and returns the +// number of bytes written. buf must be at least maxVarintBytes long +func encodeUint(buf []byte, x uint64) (n int) { + for ; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return n +} + +func decodeUintString(s string) (x uint64, size int, err error) { + i := 0 + for shift := uint(0); shift < 64; shift += 7 { + if i >= len(s) { + return 0, i, errIllegalVarint + } + b := uint64(s[i]) + i++ + x |= (b & 0x7F) << shift + if b&0x80 == 0 { + return x, i, nil + } + } + return 0, i, errVarintTooLarge +} + +func decodeUint(b []byte) (x uint64, size int, err error) { + i := 0 + for shift := uint(0); shift < 64; shift += 7 { + if i >= len(b) { + return 0, i, errIllegalVarint + } + c := uint64(b[i]) + i++ + x |= (c & 0x7F) << shift + if c&0x80 == 0 { + return x, i, nil + } + } + return 0, i, errVarintTooLarge +} diff --git a/hack/tools/vendor/golang.org/x/text/internal/format/format.go b/hack/tools/vendor/golang.org/x/text/internal/format/format.go new file mode 100644 index 0000000000..ee1c57a3c5 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/internal/format/format.go @@ -0,0 +1,41 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package format contains types for defining language-specific formatting of +// values. +// +// This package is internal now, but will eventually be exposed after the API +// settles. +package format // import "golang.org/x/text/internal/format" + +import ( + "fmt" + + "golang.org/x/text/language" +) + +// State represents the printer state passed to custom formatters. It provides +// access to the fmt.State interface and the sentence and language-related +// context. +type State interface { + fmt.State + + // Language reports the requested language in which to render a message. + Language() language.Tag + + // TODO: consider this and removing rune from the Format method in the + // Formatter interface. + // + // Verb returns the format variant to render, analogous to the types used + // in fmt. Use 'v' for the default or only variant. + // Verb() rune + + // TODO: more info: + // - sentence context such as linguistic features passed by the translator. +} + +// Formatter is analogous to fmt.Formatter. +type Formatter interface { + Format(state State, verb rune) +} diff --git a/hack/tools/vendor/golang.org/x/text/internal/format/parser.go b/hack/tools/vendor/golang.org/x/text/internal/format/parser.go new file mode 100644 index 0000000000..855aed71db --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/internal/format/parser.go @@ -0,0 +1,358 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package format + +import ( + "reflect" + "unicode/utf8" +) + +// A Parser parses a format string. The result from the parse are set in the +// struct fields. +type Parser struct { + Verb rune + + WidthPresent bool + PrecPresent bool + Minus bool + Plus bool + Sharp bool + Space bool + Zero bool + + // For the formats %+v %#v, we set the plusV/sharpV flags + // and clear the plus/sharp flags since %+v and %#v are in effect + // different, flagless formats set at the top level. + PlusV bool + SharpV bool + + HasIndex bool + + Width int + Prec int // precision + + // retain arguments across calls. + Args []interface{} + // retain current argument number across calls + ArgNum int + + // reordered records whether the format string used argument reordering. + Reordered bool + // goodArgNum records whether the most recent reordering directive was valid. + goodArgNum bool + + // position info + format string + startPos int + endPos int + Status Status +} + +// Reset initializes a parser to scan format strings for the given args. +func (p *Parser) Reset(args []interface{}) { + p.Args = args + p.ArgNum = 0 + p.startPos = 0 + p.Reordered = false +} + +// Text returns the part of the format string that was parsed by the last call +// to Scan. It returns the original substitution clause if the current scan +// parsed a substitution. +func (p *Parser) Text() string { return p.format[p.startPos:p.endPos] } + +// SetFormat sets a new format string to parse. It does not reset the argument +// count. +func (p *Parser) SetFormat(format string) { + p.format = format + p.startPos = 0 + p.endPos = 0 +} + +// Status indicates the result type of a call to Scan. +type Status int + +const ( + StatusText Status = iota + StatusSubstitution + StatusBadWidthSubstitution + StatusBadPrecSubstitution + StatusNoVerb + StatusBadArgNum + StatusMissingArg +) + +// ClearFlags reset the parser to default behavior. +func (p *Parser) ClearFlags() { + p.WidthPresent = false + p.PrecPresent = false + p.Minus = false + p.Plus = false + p.Sharp = false + p.Space = false + p.Zero = false + + p.PlusV = false + p.SharpV = false + + p.HasIndex = false +} + +// Scan scans the next part of the format string and sets the status to +// indicate whether it scanned a string literal, substitution or error. +func (p *Parser) Scan() bool { + p.Status = StatusText + format := p.format + end := len(format) + if p.endPos >= end { + return false + } + afterIndex := false // previous item in format was an index like [3]. + + p.startPos = p.endPos + p.goodArgNum = true + i := p.startPos + for i < end && format[i] != '%' { + i++ + } + if i > p.startPos { + p.endPos = i + return true + } + // Process one verb + i++ + + p.Status = StatusSubstitution + + // Do we have flags? + p.ClearFlags() + +simpleFormat: + for ; i < end; i++ { + c := p.format[i] + switch c { + case '#': + p.Sharp = true + case '0': + p.Zero = !p.Minus // Only allow zero padding to the left. + case '+': + p.Plus = true + case '-': + p.Minus = true + p.Zero = false // Do not pad with zeros to the right. + case ' ': + p.Space = true + default: + // Fast path for common case of ascii lower case simple verbs + // without precision or width or argument indices. + if 'a' <= c && c <= 'z' && p.ArgNum < len(p.Args) { + if c == 'v' { + // Go syntax + p.SharpV = p.Sharp + p.Sharp = false + // Struct-field syntax + p.PlusV = p.Plus + p.Plus = false + } + p.Verb = rune(c) + p.ArgNum++ + p.endPos = i + 1 + return true + } + // Format is more complex than simple flags and a verb or is malformed. + break simpleFormat + } + } + + // Do we have an explicit argument index? + i, afterIndex = p.updateArgNumber(format, i) + + // Do we have width? + if i < end && format[i] == '*' { + i++ + p.Width, p.WidthPresent = p.intFromArg() + + if !p.WidthPresent { + p.Status = StatusBadWidthSubstitution + } + + // We have a negative width, so take its value and ensure + // that the minus flag is set + if p.Width < 0 { + p.Width = -p.Width + p.Minus = true + p.Zero = false // Do not pad with zeros to the right. + } + afterIndex = false + } else { + p.Width, p.WidthPresent, i = parsenum(format, i, end) + if afterIndex && p.WidthPresent { // "%[3]2d" + p.goodArgNum = false + } + } + + // Do we have precision? + if i+1 < end && format[i] == '.' { + i++ + if afterIndex { // "%[3].2d" + p.goodArgNum = false + } + i, afterIndex = p.updateArgNumber(format, i) + if i < end && format[i] == '*' { + i++ + p.Prec, p.PrecPresent = p.intFromArg() + // Negative precision arguments don't make sense + if p.Prec < 0 { + p.Prec = 0 + p.PrecPresent = false + } + if !p.PrecPresent { + p.Status = StatusBadPrecSubstitution + } + afterIndex = false + } else { + p.Prec, p.PrecPresent, i = parsenum(format, i, end) + if !p.PrecPresent { + p.Prec = 0 + p.PrecPresent = true + } + } + } + + if !afterIndex { + i, afterIndex = p.updateArgNumber(format, i) + } + p.HasIndex = afterIndex + + if i >= end { + p.endPos = i + p.Status = StatusNoVerb + return true + } + + verb, w := utf8.DecodeRuneInString(format[i:]) + p.endPos = i + w + p.Verb = verb + + switch { + case verb == '%': // Percent does not absorb operands and ignores f.wid and f.prec. + p.startPos = p.endPos - 1 + p.Status = StatusText + case !p.goodArgNum: + p.Status = StatusBadArgNum + case p.ArgNum >= len(p.Args): // No argument left over to print for the current verb. + p.Status = StatusMissingArg + p.ArgNum++ + case verb == 'v': + // Go syntax + p.SharpV = p.Sharp + p.Sharp = false + // Struct-field syntax + p.PlusV = p.Plus + p.Plus = false + fallthrough + default: + p.ArgNum++ + } + return true +} + +// intFromArg gets the ArgNumth element of Args. On return, isInt reports +// whether the argument has integer type. +func (p *Parser) intFromArg() (num int, isInt bool) { + if p.ArgNum < len(p.Args) { + arg := p.Args[p.ArgNum] + num, isInt = arg.(int) // Almost always OK. + if !isInt { + // Work harder. + switch v := reflect.ValueOf(arg); v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := v.Int() + if int64(int(n)) == n { + num = int(n) + isInt = true + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n := v.Uint() + if int64(n) >= 0 && uint64(int(n)) == n { + num = int(n) + isInt = true + } + default: + // Already 0, false. + } + } + p.ArgNum++ + if tooLarge(num) { + num = 0 + isInt = false + } + } + return +} + +// parseArgNumber returns the value of the bracketed number, minus 1 +// (explicit argument numbers are one-indexed but we want zero-indexed). +// The opening bracket is known to be present at format[0]. +// The returned values are the index, the number of bytes to consume +// up to the closing paren, if present, and whether the number parsed +// ok. The bytes to consume will be 1 if no closing paren is present. +func parseArgNumber(format string) (index int, wid int, ok bool) { + // There must be at least 3 bytes: [n]. + if len(format) < 3 { + return 0, 1, false + } + + // Find closing bracket. + for i := 1; i < len(format); i++ { + if format[i] == ']' { + width, ok, newi := parsenum(format, 1, i) + if !ok || newi != i { + return 0, i + 1, false + } + return width - 1, i + 1, true // arg numbers are one-indexed and skip paren. + } + } + return 0, 1, false +} + +// updateArgNumber returns the next argument to evaluate, which is either the value of the passed-in +// argNum or the value of the bracketed integer that begins format[i:]. It also returns +// the new value of i, that is, the index of the next byte of the format to process. +func (p *Parser) updateArgNumber(format string, i int) (newi int, found bool) { + if len(format) <= i || format[i] != '[' { + return i, false + } + p.Reordered = true + index, wid, ok := parseArgNumber(format[i:]) + if ok && 0 <= index && index < len(p.Args) { + p.ArgNum = index + return i + wid, true + } + p.goodArgNum = false + return i + wid, ok +} + +// tooLarge reports whether the magnitude of the integer is +// too large to be used as a formatting width or precision. +func tooLarge(x int) bool { + const max int = 1e6 + return x > max || x < -max +} + +// parsenum converts ASCII to integer. num is 0 (and isnum is false) if no number present. +func parsenum(s string, start, end int) (num int, isnum bool, newi int) { + if start >= end { + return 0, false, end + } + for newi = start; newi < end && '0' <= s[newi] && s[newi] <= '9'; newi++ { + if tooLarge(num) { + return 0, false, end // Overflow; crazy long number most likely. + } + num = num*10 + int(s[newi]-'0') + isnum = true + } + return +} diff --git a/hack/tools/vendor/golang.org/x/text/internal/number/common.go b/hack/tools/vendor/golang.org/x/text/internal/number/common.go new file mode 100644 index 0000000000..a6e9c8e0d5 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/internal/number/common.go @@ -0,0 +1,55 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package number + +import ( + "unicode/utf8" + + "golang.org/x/text/internal/language/compact" +) + +// A system identifies a CLDR numbering system. +type system byte + +type systemData struct { + id system + digitSize byte // number of UTF-8 bytes per digit + zero [utf8.UTFMax]byte // UTF-8 sequence of zero digit. +} + +// A SymbolType identifies a symbol of a specific kind. +type SymbolType int + +const ( + SymDecimal SymbolType = iota + SymGroup + SymList + SymPercentSign + SymPlusSign + SymMinusSign + SymExponential + SymSuperscriptingExponent + SymPerMille + SymInfinity + SymNan + SymTimeSeparator + + NumSymbolTypes +) + +const hasNonLatnMask = 0x8000 + +// symOffset is an offset into altSymData if the bit indicated by hasNonLatnMask +// is not 0 (with this bit masked out), and an offset into symIndex otherwise. +// +// TODO: this type can be a byte again if we use an indirection into altsymData +// and introduce an alt -> offset slice (the length of this will be number of +// alternatives plus 1). This also allows getting rid of the compactTag field +// in altSymData. In total this will save about 1K. +type symOffset uint16 + +type altSymData struct { + compactTag compact.ID + symIndex symOffset + system system +} diff --git a/hack/tools/vendor/golang.org/x/text/internal/number/decimal.go b/hack/tools/vendor/golang.org/x/text/internal/number/decimal.go new file mode 100644 index 0000000000..e128cf3437 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/internal/number/decimal.go @@ -0,0 +1,500 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate stringer -type RoundingMode + +package number + +import ( + "math" + "strconv" +) + +// RoundingMode determines how a number is rounded to the desired precision. +type RoundingMode byte + +const ( + ToNearestEven RoundingMode = iota // towards the nearest integer, or towards an even number if equidistant. + ToNearestZero // towards the nearest integer, or towards zero if equidistant. + ToNearestAway // towards the nearest integer, or away from zero if equidistant. + ToPositiveInf // towards infinity + ToNegativeInf // towards negative infinity + ToZero // towards zero + AwayFromZero // away from zero + numModes +) + +const maxIntDigits = 20 + +// A Decimal represents a floating point number in decimal format. +// Digits represents a number [0, 1.0), and the absolute value represented by +// Decimal is Digits * 10^Exp. Leading and trailing zeros may be omitted and Exp +// may point outside a valid position in Digits. +// +// Examples: +// +// Number Decimal +// 12345 Digits: [1, 2, 3, 4, 5], Exp: 5 +// 12.345 Digits: [1, 2, 3, 4, 5], Exp: 2 +// 12000 Digits: [1, 2], Exp: 5 +// 12000.00 Digits: [1, 2], Exp: 5 +// 0.00123 Digits: [1, 2, 3], Exp: -2 +// 0 Digits: [], Exp: 0 +type Decimal struct { + digits + + buf [maxIntDigits]byte +} + +type digits struct { + Digits []byte // mantissa digits, big-endian + Exp int32 // exponent + Neg bool + Inf bool // Takes precedence over Digits and Exp. + NaN bool // Takes precedence over Inf. +} + +// Digits represents a floating point number represented in digits of the +// base in which a number is to be displayed. It is similar to Decimal, but +// keeps track of trailing fraction zeros and the comma placement for +// engineering notation. Digits must have at least one digit. +// +// Examples: +// +// Number Decimal +// decimal +// 12345 Digits: [1, 2, 3, 4, 5], Exp: 5 End: 5 +// 12.345 Digits: [1, 2, 3, 4, 5], Exp: 2 End: 5 +// 12000 Digits: [1, 2], Exp: 5 End: 5 +// 12000.00 Digits: [1, 2], Exp: 5 End: 7 +// 0.00123 Digits: [1, 2, 3], Exp: -2 End: 3 +// 0 Digits: [], Exp: 0 End: 1 +// scientific (actual exp is Exp - Comma) +// 0e0 Digits: [0], Exp: 1, End: 1, Comma: 1 +// .0e0 Digits: [0], Exp: 0, End: 1, Comma: 0 +// 0.0e0 Digits: [0], Exp: 1, End: 2, Comma: 1 +// 1.23e4 Digits: [1, 2, 3], Exp: 5, End: 3, Comma: 1 +// .123e5 Digits: [1, 2, 3], Exp: 5, End: 3, Comma: 0 +// engineering +// 12.3e3 Digits: [1, 2, 3], Exp: 5, End: 3, Comma: 2 +type Digits struct { + digits + // End indicates the end position of the number. + End int32 // For decimals Exp <= End. For scientific len(Digits) <= End. + // Comma is used for the comma position for scientific (always 0 or 1) and + // engineering notation (always 0, 1, 2, or 3). + Comma uint8 + // IsScientific indicates whether this number is to be rendered as a + // scientific number. + IsScientific bool +} + +func (d *Digits) NumFracDigits() int { + if d.Exp >= d.End { + return 0 + } + return int(d.End - d.Exp) +} + +// normalize returns a new Decimal with leading and trailing zeros removed. +func (d *Decimal) normalize() (n Decimal) { + n = *d + b := n.Digits + // Strip leading zeros. Resulting number of digits is significant digits. + for len(b) > 0 && b[0] == 0 { + b = b[1:] + n.Exp-- + } + // Strip trailing zeros + for len(b) > 0 && b[len(b)-1] == 0 { + b = b[:len(b)-1] + } + if len(b) == 0 { + n.Exp = 0 + } + n.Digits = b + return n +} + +func (d *Decimal) clear() { + b := d.Digits + if b == nil { + b = d.buf[:0] + } + *d = Decimal{} + d.Digits = b[:0] +} + +func (x *Decimal) String() string { + if x.NaN { + return "NaN" + } + var buf []byte + if x.Neg { + buf = append(buf, '-') + } + if x.Inf { + buf = append(buf, "Inf"...) + return string(buf) + } + switch { + case len(x.Digits) == 0: + buf = append(buf, '0') + case x.Exp <= 0: + // 0.00ddd + buf = append(buf, "0."...) + buf = appendZeros(buf, -int(x.Exp)) + buf = appendDigits(buf, x.Digits) + + case /* 0 < */ int(x.Exp) < len(x.Digits): + // dd.ddd + buf = appendDigits(buf, x.Digits[:x.Exp]) + buf = append(buf, '.') + buf = appendDigits(buf, x.Digits[x.Exp:]) + + default: // len(x.Digits) <= x.Exp + // ddd00 + buf = appendDigits(buf, x.Digits) + buf = appendZeros(buf, int(x.Exp)-len(x.Digits)) + } + return string(buf) +} + +func appendDigits(buf []byte, digits []byte) []byte { + for _, c := range digits { + buf = append(buf, c+'0') + } + return buf +} + +// appendZeros appends n 0 digits to buf and returns buf. +func appendZeros(buf []byte, n int) []byte { + for ; n > 0; n-- { + buf = append(buf, '0') + } + return buf +} + +func (d *digits) round(mode RoundingMode, n int) { + if n >= len(d.Digits) { + return + } + // Make rounding decision: The result mantissa is truncated ("rounded down") + // by default. Decide if we need to increment, or "round up", the (unsigned) + // mantissa. + inc := false + switch mode { + case ToNegativeInf: + inc = d.Neg + case ToPositiveInf: + inc = !d.Neg + case ToZero: + // nothing to do + case AwayFromZero: + inc = true + case ToNearestEven: + inc = d.Digits[n] > 5 || d.Digits[n] == 5 && + (len(d.Digits) > n+1 || n == 0 || d.Digits[n-1]&1 != 0) + case ToNearestAway: + inc = d.Digits[n] >= 5 + case ToNearestZero: + inc = d.Digits[n] > 5 || d.Digits[n] == 5 && len(d.Digits) > n+1 + default: + panic("unreachable") + } + if inc { + d.roundUp(n) + } else { + d.roundDown(n) + } +} + +// roundFloat rounds a floating point number. +func (r RoundingMode) roundFloat(x float64) float64 { + // Make rounding decision: The result mantissa is truncated ("rounded down") + // by default. Decide if we need to increment, or "round up", the (unsigned) + // mantissa. + abs := x + if x < 0 { + abs = -x + } + i, f := math.Modf(abs) + if f == 0.0 { + return x + } + inc := false + switch r { + case ToNegativeInf: + inc = x < 0 + case ToPositiveInf: + inc = x >= 0 + case ToZero: + // nothing to do + case AwayFromZero: + inc = true + case ToNearestEven: + // TODO: check overflow + inc = f > 0.5 || f == 0.5 && int64(i)&1 != 0 + case ToNearestAway: + inc = f >= 0.5 + case ToNearestZero: + inc = f > 0.5 + default: + panic("unreachable") + } + if inc { + i += 1 + } + if abs != x { + i = -i + } + return i +} + +func (x *digits) roundUp(n int) { + if n < 0 || n >= len(x.Digits) { + return // nothing to do + } + // find first digit < 9 + for n > 0 && x.Digits[n-1] >= 9 { + n-- + } + + if n == 0 { + // all digits are 9s => round up to 1 and update exponent + x.Digits[0] = 1 // ok since len(x.Digits) > n + x.Digits = x.Digits[:1] + x.Exp++ + return + } + x.Digits[n-1]++ + x.Digits = x.Digits[:n] + // x already trimmed +} + +func (x *digits) roundDown(n int) { + if n < 0 || n >= len(x.Digits) { + return // nothing to do + } + x.Digits = x.Digits[:n] + trim(x) +} + +// trim cuts off any trailing zeros from x's mantissa; +// they are meaningless for the value of x. +func trim(x *digits) { + i := len(x.Digits) + for i > 0 && x.Digits[i-1] == 0 { + i-- + } + x.Digits = x.Digits[:i] + if i == 0 { + x.Exp = 0 + } +} + +// A Converter converts a number into decimals according to the given rounding +// criteria. +type Converter interface { + Convert(d *Decimal, r RoundingContext) +} + +const ( + signed = true + unsigned = false +) + +// Convert converts the given number to the decimal representation using the +// supplied RoundingContext. +func (d *Decimal) Convert(r RoundingContext, number interface{}) { + switch f := number.(type) { + case Converter: + d.clear() + f.Convert(d, r) + case float32: + d.ConvertFloat(r, float64(f), 32) + case float64: + d.ConvertFloat(r, f, 64) + case int: + d.ConvertInt(r, signed, uint64(f)) + case int8: + d.ConvertInt(r, signed, uint64(f)) + case int16: + d.ConvertInt(r, signed, uint64(f)) + case int32: + d.ConvertInt(r, signed, uint64(f)) + case int64: + d.ConvertInt(r, signed, uint64(f)) + case uint: + d.ConvertInt(r, unsigned, uint64(f)) + case uint8: + d.ConvertInt(r, unsigned, uint64(f)) + case uint16: + d.ConvertInt(r, unsigned, uint64(f)) + case uint32: + d.ConvertInt(r, unsigned, uint64(f)) + case uint64: + d.ConvertInt(r, unsigned, f) + + default: + d.NaN = true + // TODO: + // case string: if produced by strconv, allows for easy arbitrary pos. + // case reflect.Value: + // case big.Float + // case big.Int + // case big.Rat? + // catch underlyings using reflect or will this already be done by the + // message package? + } +} + +// ConvertInt converts an integer to decimals. +func (d *Decimal) ConvertInt(r RoundingContext, signed bool, x uint64) { + if r.Increment > 0 { + // TODO: if uint64 is too large, fall back to float64 + if signed { + d.ConvertFloat(r, float64(int64(x)), 64) + } else { + d.ConvertFloat(r, float64(x), 64) + } + return + } + d.clear() + if signed && int64(x) < 0 { + x = uint64(-int64(x)) + d.Neg = true + } + d.fillIntDigits(x) + d.Exp = int32(len(d.Digits)) +} + +// ConvertFloat converts a floating point number to decimals. +func (d *Decimal) ConvertFloat(r RoundingContext, x float64, size int) { + d.clear() + if math.IsNaN(x) { + d.NaN = true + return + } + // Simple case: decimal notation + if r.Increment > 0 { + scale := int(r.IncrementScale) + mult := 1.0 + if scale >= len(scales) { + mult = math.Pow(10, float64(scale)) + } else { + mult = scales[scale] + } + // We multiply x instead of dividing inc as it gives less rounding + // issues. + x *= mult + x /= float64(r.Increment) + x = r.Mode.roundFloat(x) + x *= float64(r.Increment) + x /= mult + } + + abs := x + if x < 0 { + d.Neg = true + abs = -x + } + if math.IsInf(abs, 1) { + d.Inf = true + return + } + + // By default we get the exact decimal representation. + verb := byte('g') + prec := -1 + // As the strconv API does not return the rounding accuracy, we can only + // round using ToNearestEven. + if r.Mode == ToNearestEven { + if n := r.RoundSignificantDigits(); n >= 0 { + prec = n + } else if n = r.RoundFractionDigits(); n >= 0 { + prec = n + verb = 'f' + } + } else { + // TODO: At this point strconv's rounding is imprecise to the point that + // it is not usable for this purpose. + // See https://github.com/golang/go/issues/21714 + // If rounding is requested, we ask for a large number of digits and + // round from there to simulate rounding only once. + // Ideally we would have strconv export an AppendDigits that would take + // a rounding mode and/or return an accuracy. Something like this would + // work: + // AppendDigits(dst []byte, x float64, base, size, prec int) (digits []byte, exp, accuracy int) + hasPrec := r.RoundSignificantDigits() >= 0 + hasScale := r.RoundFractionDigits() >= 0 + if hasPrec || hasScale { + // prec is the number of mantissa bits plus some extra for safety. + // We need at least the number of mantissa bits as decimals to + // accurately represent the floating point without rounding, as each + // bit requires one more decimal to represent: 0.5, 0.25, 0.125, ... + prec = 60 + } + } + + b := strconv.AppendFloat(d.Digits[:0], abs, verb, prec, size) + i := 0 + k := 0 + beforeDot := 1 + for i < len(b) { + if c := b[i]; '0' <= c && c <= '9' { + b[k] = c - '0' + k++ + d.Exp += int32(beforeDot) + } else if c == '.' { + beforeDot = 0 + d.Exp = int32(k) + } else { + break + } + i++ + } + d.Digits = b[:k] + if i != len(b) { + i += len("e") + pSign := i + exp := 0 + for i++; i < len(b); i++ { + exp *= 10 + exp += int(b[i] - '0') + } + if b[pSign] == '-' { + exp = -exp + } + d.Exp = int32(exp) + 1 + } +} + +func (d *Decimal) fillIntDigits(x uint64) { + if cap(d.Digits) < maxIntDigits { + d.Digits = d.buf[:] + } else { + d.Digits = d.buf[:maxIntDigits] + } + i := 0 + for ; x > 0; x /= 10 { + d.Digits[i] = byte(x % 10) + i++ + } + d.Digits = d.Digits[:i] + for p := 0; p < i; p++ { + i-- + d.Digits[p], d.Digits[i] = d.Digits[i], d.Digits[p] + } +} + +var scales [70]float64 + +func init() { + x := 1.0 + for i := range scales { + scales[i] = x + x *= 10 + } +} diff --git a/hack/tools/vendor/golang.org/x/text/internal/number/format.go b/hack/tools/vendor/golang.org/x/text/internal/number/format.go new file mode 100644 index 0000000000..cd94c5dc4e --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/internal/number/format.go @@ -0,0 +1,535 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package number + +import ( + "strconv" + "unicode/utf8" + + "golang.org/x/text/language" +) + +// TODO: +// - grouping of fractions +// - allow user-defined superscript notation (such as 4) +// - same for non-breaking spaces, like   + +// A VisibleDigits computes digits, comma placement and trailing zeros as they +// will be shown to the user. +type VisibleDigits interface { + Digits(buf []byte, t language.Tag, scale int) Digits + // TODO: Do we also need to add the verb or pass a format.State? +} + +// Formatting proceeds along the following lines: +// 0) Compose rounding information from format and context. +// 1) Convert a number into a Decimal. +// 2) Sanitize Decimal by adding trailing zeros, removing leading digits, and +// (non-increment) rounding. The Decimal that results from this is suitable +// for determining the plural form. +// 3) Render the Decimal in the localized form. + +// Formatter contains all the information needed to render a number. +type Formatter struct { + Pattern + Info +} + +func (f *Formatter) init(t language.Tag, index []uint8) { + f.Info = InfoFromTag(t) + f.Pattern = formats[index[tagToID(t)]] +} + +// InitPattern initializes a Formatter for the given Pattern. +func (f *Formatter) InitPattern(t language.Tag, pat *Pattern) { + f.Info = InfoFromTag(t) + f.Pattern = *pat +} + +// InitDecimal initializes a Formatter using the default Pattern for the given +// language. +func (f *Formatter) InitDecimal(t language.Tag) { + f.init(t, tagToDecimal) +} + +// InitScientific initializes a Formatter using the default Pattern for the +// given language. +func (f *Formatter) InitScientific(t language.Tag) { + f.init(t, tagToScientific) + f.Pattern.MinFractionDigits = 0 + f.Pattern.MaxFractionDigits = -1 +} + +// InitEngineering initializes a Formatter using the default Pattern for the +// given language. +func (f *Formatter) InitEngineering(t language.Tag) { + f.init(t, tagToScientific) + f.Pattern.MinFractionDigits = 0 + f.Pattern.MaxFractionDigits = -1 + f.Pattern.MaxIntegerDigits = 3 + f.Pattern.MinIntegerDigits = 1 +} + +// InitPercent initializes a Formatter using the default Pattern for the given +// language. +func (f *Formatter) InitPercent(t language.Tag) { + f.init(t, tagToPercent) +} + +// InitPerMille initializes a Formatter using the default Pattern for the given +// language. +func (f *Formatter) InitPerMille(t language.Tag) { + f.init(t, tagToPercent) + f.Pattern.DigitShift = 3 +} + +func (f *Formatter) Append(dst []byte, x interface{}) []byte { + var d Decimal + r := f.RoundingContext + d.Convert(r, x) + return f.Render(dst, FormatDigits(&d, r)) +} + +func FormatDigits(d *Decimal, r RoundingContext) Digits { + if r.isScientific() { + return scientificVisibleDigits(r, d) + } + return decimalVisibleDigits(r, d) +} + +func (f *Formatter) Format(dst []byte, d *Decimal) []byte { + return f.Render(dst, FormatDigits(d, f.RoundingContext)) +} + +func (f *Formatter) Render(dst []byte, d Digits) []byte { + var result []byte + var postPrefix, preSuffix int + if d.IsScientific { + result, postPrefix, preSuffix = appendScientific(dst, f, &d) + } else { + result, postPrefix, preSuffix = appendDecimal(dst, f, &d) + } + if f.PadRune == 0 { + return result + } + width := int(f.FormatWidth) + if count := utf8.RuneCount(result); count < width { + insertPos := 0 + switch f.Flags & PadMask { + case PadAfterPrefix: + insertPos = postPrefix + case PadBeforeSuffix: + insertPos = preSuffix + case PadAfterSuffix: + insertPos = len(result) + } + num := width - count + pad := [utf8.UTFMax]byte{' '} + sz := 1 + if r := f.PadRune; r != 0 { + sz = utf8.EncodeRune(pad[:], r) + } + extra := sz * num + if n := len(result) + extra; n < cap(result) { + result = result[:n] + copy(result[insertPos+extra:], result[insertPos:]) + } else { + buf := make([]byte, n) + copy(buf, result[:insertPos]) + copy(buf[insertPos+extra:], result[insertPos:]) + result = buf + } + for ; num > 0; num-- { + insertPos += copy(result[insertPos:], pad[:sz]) + } + } + return result +} + +// decimalVisibleDigits converts d according to the RoundingContext. Note that +// the exponent may change as a result of this operation. +func decimalVisibleDigits(r RoundingContext, d *Decimal) Digits { + if d.NaN || d.Inf { + return Digits{digits: digits{Neg: d.Neg, NaN: d.NaN, Inf: d.Inf}} + } + n := Digits{digits: d.normalize().digits} + + exp := n.Exp + exp += int32(r.DigitShift) + + // Cap integer digits. Remove *most-significant* digits. + if r.MaxIntegerDigits > 0 { + if p := int(exp) - int(r.MaxIntegerDigits); p > 0 { + if p > len(n.Digits) { + p = len(n.Digits) + } + if n.Digits = n.Digits[p:]; len(n.Digits) == 0 { + exp = 0 + } else { + exp -= int32(p) + } + // Strip leading zeros. + for len(n.Digits) > 0 && n.Digits[0] == 0 { + n.Digits = n.Digits[1:] + exp-- + } + } + } + + // Rounding if not already done by Convert. + p := len(n.Digits) + if maxSig := int(r.MaxSignificantDigits); maxSig > 0 { + p = maxSig + } + if maxFrac := int(r.MaxFractionDigits); maxFrac >= 0 { + if cap := int(exp) + maxFrac; cap < p { + p = int(exp) + maxFrac + } + if p < 0 { + p = 0 + } + } + n.round(r.Mode, p) + + // set End (trailing zeros) + n.End = int32(len(n.Digits)) + if n.End == 0 { + exp = 0 + if r.MinFractionDigits > 0 { + n.End = int32(r.MinFractionDigits) + } + if p := int32(r.MinSignificantDigits) - 1; p > n.End { + n.End = p + } + } else { + if end := exp + int32(r.MinFractionDigits); end > n.End { + n.End = end + } + if n.End < int32(r.MinSignificantDigits) { + n.End = int32(r.MinSignificantDigits) + } + } + n.Exp = exp + return n +} + +// appendDecimal appends a formatted number to dst. It returns two possible +// insertion points for padding. +func appendDecimal(dst []byte, f *Formatter, n *Digits) (b []byte, postPre, preSuf int) { + if dst, ok := f.renderSpecial(dst, n); ok { + return dst, 0, len(dst) + } + digits := n.Digits + exp := n.Exp + + // Split in integer and fraction part. + var intDigits, fracDigits []byte + numInt := 0 + numFrac := int(n.End - n.Exp) + if exp > 0 { + numInt = int(exp) + if int(exp) >= len(digits) { // ddddd | ddddd00 + intDigits = digits + } else { // ddd.dd + intDigits = digits[:exp] + fracDigits = digits[exp:] + } + } else { + fracDigits = digits + } + + neg := n.Neg + affix, suffix := f.getAffixes(neg) + dst = appendAffix(dst, f, affix, neg) + savedLen := len(dst) + + minInt := int(f.MinIntegerDigits) + if minInt == 0 && f.MinSignificantDigits > 0 { + minInt = 1 + } + // add leading zeros + for i := minInt; i > numInt; i-- { + dst = f.AppendDigit(dst, 0) + if f.needsSep(i) { + dst = append(dst, f.Symbol(SymGroup)...) + } + } + i := 0 + for ; i < len(intDigits); i++ { + dst = f.AppendDigit(dst, intDigits[i]) + if f.needsSep(numInt - i) { + dst = append(dst, f.Symbol(SymGroup)...) + } + } + for ; i < numInt; i++ { + dst = f.AppendDigit(dst, 0) + if f.needsSep(numInt - i) { + dst = append(dst, f.Symbol(SymGroup)...) + } + } + + if numFrac > 0 || f.Flags&AlwaysDecimalSeparator != 0 { + dst = append(dst, f.Symbol(SymDecimal)...) + } + // Add trailing zeros + i = 0 + for n := -int(n.Exp); i < n; i++ { + dst = f.AppendDigit(dst, 0) + } + for _, d := range fracDigits { + i++ + dst = f.AppendDigit(dst, d) + } + for ; i < numFrac; i++ { + dst = f.AppendDigit(dst, 0) + } + return appendAffix(dst, f, suffix, neg), savedLen, len(dst) +} + +func scientificVisibleDigits(r RoundingContext, d *Decimal) Digits { + if d.NaN || d.Inf { + return Digits{digits: digits{Neg: d.Neg, NaN: d.NaN, Inf: d.Inf}} + } + n := Digits{digits: d.normalize().digits, IsScientific: true} + + // Normalize to have at least one digit. This simplifies engineering + // notation. + if len(n.Digits) == 0 { + n.Digits = append(n.Digits, 0) + n.Exp = 1 + } + + // Significant digits are transformed by the parser for scientific notation + // and do not need to be handled here. + maxInt, numInt := int(r.MaxIntegerDigits), int(r.MinIntegerDigits) + if numInt == 0 { + numInt = 1 + } + + // If a maximum number of integers is specified, the minimum must be 1 + // and the exponent is grouped by this number (e.g. for engineering) + if maxInt > numInt { + // Correct the exponent to reflect a single integer digit. + numInt = 1 + // engineering + // 0.01234 ([12345]e-1) -> 1.2345e-2 12.345e-3 + // 12345 ([12345]e+5) -> 1.2345e4 12.345e3 + d := int(n.Exp-1) % maxInt + if d < 0 { + d += maxInt + } + numInt += d + } + + p := len(n.Digits) + if maxSig := int(r.MaxSignificantDigits); maxSig > 0 { + p = maxSig + } + if maxFrac := int(r.MaxFractionDigits); maxFrac >= 0 && numInt+maxFrac < p { + p = numInt + maxFrac + } + n.round(r.Mode, p) + + n.Comma = uint8(numInt) + n.End = int32(len(n.Digits)) + if minSig := int32(r.MinFractionDigits) + int32(numInt); n.End < minSig { + n.End = minSig + } + return n +} + +// appendScientific appends a formatted number to dst. It returns two possible +// insertion points for padding. +func appendScientific(dst []byte, f *Formatter, n *Digits) (b []byte, postPre, preSuf int) { + if dst, ok := f.renderSpecial(dst, n); ok { + return dst, 0, 0 + } + digits := n.Digits + numInt := int(n.Comma) + numFrac := int(n.End) - int(n.Comma) + + var intDigits, fracDigits []byte + if numInt <= len(digits) { + intDigits = digits[:numInt] + fracDigits = digits[numInt:] + } else { + intDigits = digits + } + neg := n.Neg + affix, suffix := f.getAffixes(neg) + dst = appendAffix(dst, f, affix, neg) + savedLen := len(dst) + + i := 0 + for ; i < len(intDigits); i++ { + dst = f.AppendDigit(dst, intDigits[i]) + if f.needsSep(numInt - i) { + dst = append(dst, f.Symbol(SymGroup)...) + } + } + for ; i < numInt; i++ { + dst = f.AppendDigit(dst, 0) + if f.needsSep(numInt - i) { + dst = append(dst, f.Symbol(SymGroup)...) + } + } + + if numFrac > 0 || f.Flags&AlwaysDecimalSeparator != 0 { + dst = append(dst, f.Symbol(SymDecimal)...) + } + i = 0 + for ; i < len(fracDigits); i++ { + dst = f.AppendDigit(dst, fracDigits[i]) + } + for ; i < numFrac; i++ { + dst = f.AppendDigit(dst, 0) + } + + // exp + buf := [12]byte{} + // TODO: use exponential if superscripting is not available (no Latin + // numbers or no tags) and use exponential in all other cases. + exp := n.Exp - int32(n.Comma) + exponential := f.Symbol(SymExponential) + if exponential == "E" { + dst = append(dst, "\u202f"...) // NARROW NO-BREAK SPACE + dst = append(dst, f.Symbol(SymSuperscriptingExponent)...) + dst = append(dst, "\u202f"...) // NARROW NO-BREAK SPACE + dst = f.AppendDigit(dst, 1) + dst = f.AppendDigit(dst, 0) + switch { + case exp < 0: + dst = append(dst, superMinus...) + exp = -exp + case f.Flags&AlwaysExpSign != 0: + dst = append(dst, superPlus...) + } + b = strconv.AppendUint(buf[:0], uint64(exp), 10) + for i := len(b); i < int(f.MinExponentDigits); i++ { + dst = append(dst, superDigits[0]...) + } + for _, c := range b { + dst = append(dst, superDigits[c-'0']...) + } + } else { + dst = append(dst, exponential...) + switch { + case exp < 0: + dst = append(dst, f.Symbol(SymMinusSign)...) + exp = -exp + case f.Flags&AlwaysExpSign != 0: + dst = append(dst, f.Symbol(SymPlusSign)...) + } + b = strconv.AppendUint(buf[:0], uint64(exp), 10) + for i := len(b); i < int(f.MinExponentDigits); i++ { + dst = f.AppendDigit(dst, 0) + } + for _, c := range b { + dst = f.AppendDigit(dst, c-'0') + } + } + return appendAffix(dst, f, suffix, neg), savedLen, len(dst) +} + +const ( + superMinus = "\u207B" // SUPERSCRIPT HYPHEN-MINUS + superPlus = "\u207A" // SUPERSCRIPT PLUS SIGN +) + +var ( + // Note: the digits are not sequential!!! + superDigits = []string{ + "\u2070", // SUPERSCRIPT DIGIT ZERO + "\u00B9", // SUPERSCRIPT DIGIT ONE + "\u00B2", // SUPERSCRIPT DIGIT TWO + "\u00B3", // SUPERSCRIPT DIGIT THREE + "\u2074", // SUPERSCRIPT DIGIT FOUR + "\u2075", // SUPERSCRIPT DIGIT FIVE + "\u2076", // SUPERSCRIPT DIGIT SIX + "\u2077", // SUPERSCRIPT DIGIT SEVEN + "\u2078", // SUPERSCRIPT DIGIT EIGHT + "\u2079", // SUPERSCRIPT DIGIT NINE + } +) + +func (f *Formatter) getAffixes(neg bool) (affix, suffix string) { + str := f.Affix + if str != "" { + if f.NegOffset > 0 { + if neg { + str = str[f.NegOffset:] + } else { + str = str[:f.NegOffset] + } + } + sufStart := 1 + str[0] + affix = str[1:sufStart] + suffix = str[sufStart+1:] + } + // TODO: introduce a NeedNeg sign to indicate if the left pattern already + // has a sign marked? + if f.NegOffset == 0 && (neg || f.Flags&AlwaysSign != 0) { + affix = "-" + affix + } + return affix, suffix +} + +func (f *Formatter) renderSpecial(dst []byte, d *Digits) (b []byte, ok bool) { + if d.NaN { + return fmtNaN(dst, f), true + } + if d.Inf { + return fmtInfinite(dst, f, d), true + } + return dst, false +} + +func fmtNaN(dst []byte, f *Formatter) []byte { + return append(dst, f.Symbol(SymNan)...) +} + +func fmtInfinite(dst []byte, f *Formatter, d *Digits) []byte { + affix, suffix := f.getAffixes(d.Neg) + dst = appendAffix(dst, f, affix, d.Neg) + dst = append(dst, f.Symbol(SymInfinity)...) + dst = appendAffix(dst, f, suffix, d.Neg) + return dst +} + +func appendAffix(dst []byte, f *Formatter, affix string, neg bool) []byte { + quoting := false + escaping := false + for _, r := range affix { + switch { + case escaping: + // escaping occurs both inside and outside of quotes + dst = append(dst, string(r)...) + escaping = false + case r == '\\': + escaping = true + case r == '\'': + quoting = !quoting + case quoting: + dst = append(dst, string(r)...) + case r == '%': + if f.DigitShift == 3 { + dst = append(dst, f.Symbol(SymPerMille)...) + } else { + dst = append(dst, f.Symbol(SymPercentSign)...) + } + case r == '-' || r == '+': + if neg { + dst = append(dst, f.Symbol(SymMinusSign)...) + } else if f.Flags&ElideSign == 0 { + dst = append(dst, f.Symbol(SymPlusSign)...) + } else { + dst = append(dst, ' ') + } + default: + dst = append(dst, string(r)...) + } + } + return dst +} diff --git a/hack/tools/vendor/golang.org/x/text/internal/number/number.go b/hack/tools/vendor/golang.org/x/text/internal/number/number.go new file mode 100644 index 0000000000..e1d933c3f7 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/internal/number/number.go @@ -0,0 +1,152 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go gen_common.go + +// Package number contains tools and data for formatting numbers. +package number + +import ( + "unicode/utf8" + + "golang.org/x/text/internal/language/compact" + "golang.org/x/text/language" +) + +// Info holds number formatting configuration data. +type Info struct { + system systemData // numbering system information + symIndex symOffset // index to symbols +} + +// InfoFromLangID returns a Info for the given compact language identifier and +// numbering system identifier. If system is the empty string, the default +// numbering system will be taken for that language. +func InfoFromLangID(compactIndex compact.ID, numberSystem string) Info { + p := langToDefaults[compactIndex] + // Lookup the entry for the language. + pSymIndex := symOffset(0) // Default: Latin, default symbols + system, ok := systemMap[numberSystem] + if !ok { + // Take the value for the default numbering system. This is by far the + // most common case as an alternative numbering system is hardly used. + if p&hasNonLatnMask == 0 { // Latn digits. + pSymIndex = p + } else { // Non-Latn or multiple numbering systems. + // Take the first entry from the alternatives list. + data := langToAlt[p&^hasNonLatnMask] + pSymIndex = data.symIndex + system = data.system + } + } else { + langIndex := compactIndex + ns := system + outerLoop: + for ; ; p = langToDefaults[langIndex] { + if p&hasNonLatnMask == 0 { + if ns == 0 { + // The index directly points to the symbol data. + pSymIndex = p + break + } + // Move to the parent and retry. + langIndex = langIndex.Parent() + } else { + // The index points to a list of symbol data indexes. + for _, e := range langToAlt[p&^hasNonLatnMask:] { + if e.compactTag != langIndex { + if langIndex == 0 { + // The CLDR root defines full symbol information for + // all numbering systems (even though mostly by + // means of aliases). Fall back to the default entry + // for Latn if there is no data for the numbering + // system of this language. + if ns == 0 { + break + } + // Fall back to Latin and start from the original + // language. See + // https://unicode.org/reports/tr35/#Locale_Inheritance. + ns = numLatn + langIndex = compactIndex + continue outerLoop + } + // Fall back to parent. + langIndex = langIndex.Parent() + } else if e.system == ns { + pSymIndex = e.symIndex + break outerLoop + } + } + } + } + } + if int(system) >= len(numSysData) { // algorithmic + // Will generate ASCII digits in case the user inadvertently calls + // WriteDigit or Digit on it. + d := numSysData[0] + d.id = system + return Info{ + system: d, + symIndex: pSymIndex, + } + } + return Info{ + system: numSysData[system], + symIndex: pSymIndex, + } +} + +// InfoFromTag returns a Info for the given language tag. +func InfoFromTag(t language.Tag) Info { + return InfoFromLangID(tagToID(t), t.TypeForKey("nu")) +} + +// IsDecimal reports if the numbering system can convert decimal to native +// symbols one-to-one. +func (n Info) IsDecimal() bool { + return int(n.system.id) < len(numSysData) +} + +// WriteDigit writes the UTF-8 sequence for n corresponding to the given ASCII +// digit to dst and reports the number of bytes written. dst must be large +// enough to hold the rune (can be up to utf8.UTFMax bytes). +func (n Info) WriteDigit(dst []byte, asciiDigit rune) int { + copy(dst, n.system.zero[:n.system.digitSize]) + dst[n.system.digitSize-1] += byte(asciiDigit - '0') + return int(n.system.digitSize) +} + +// AppendDigit appends the UTF-8 sequence for n corresponding to the given digit +// to dst and reports the number of bytes written. dst must be large enough to +// hold the rune (can be up to utf8.UTFMax bytes). +func (n Info) AppendDigit(dst []byte, digit byte) []byte { + dst = append(dst, n.system.zero[:n.system.digitSize]...) + dst[len(dst)-1] += digit + return dst +} + +// Digit returns the digit for the numbering system for the corresponding ASCII +// value. For example, ni.Digit('3') could return '三'. Note that the argument +// is the rune constant '3', which equals 51, not the integer constant 3. +func (n Info) Digit(asciiDigit rune) rune { + var x [utf8.UTFMax]byte + n.WriteDigit(x[:], asciiDigit) + r, _ := utf8.DecodeRune(x[:]) + return r +} + +// Symbol returns the string for the given symbol type. +func (n Info) Symbol(t SymbolType) string { + return symData.Elem(int(symIndex[n.symIndex][t])) +} + +func formatForLang(t language.Tag, index []byte) *Pattern { + return &formats[index[tagToID(t)]] +} + +func tagToID(t language.Tag) compact.ID { + id, _ := compact.RegionalID(compact.Tag(t)) + return id +} diff --git a/hack/tools/vendor/golang.org/x/text/internal/number/pattern.go b/hack/tools/vendor/golang.org/x/text/internal/number/pattern.go new file mode 100644 index 0000000000..06e59559a9 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/internal/number/pattern.go @@ -0,0 +1,485 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package number + +import ( + "errors" + "unicode/utf8" +) + +// This file contains a parser for the CLDR number patterns as described in +// https://unicode.org/reports/tr35/tr35-numbers.html#Number_Format_Patterns. +// +// The following BNF is derived from this standard. +// +// pattern := subpattern (';' subpattern)? +// subpattern := affix? number exponent? affix? +// number := decimal | sigDigits +// decimal := '#'* '0'* ('.' fraction)? | '#' | '0' +// fraction := '0'* '#'* +// sigDigits := '#'* '@' '@'* '#'* +// exponent := 'E' '+'? '0'* '0' +// padSpec := '*' \L +// +// Notes: +// - An affix pattern may contain any runes, but runes with special meaning +// should be escaped. +// - Sequences of digits, '#', and '@' in decimal and sigDigits may have +// interstitial commas. + +// TODO: replace special characters in affixes (-, +, ¤) with control codes. + +// Pattern holds information for formatting numbers. It is designed to hold +// information from CLDR number patterns. +// +// This pattern is precompiled for all patterns for all languages. Even though +// the number of patterns is not very large, we want to keep this small. +// +// This type is only intended for internal use. +type Pattern struct { + RoundingContext + + Affix string // includes prefix and suffix. First byte is prefix length. + Offset uint16 // Offset into Affix for prefix and suffix + NegOffset uint16 // Offset into Affix for negative prefix and suffix or 0. + PadRune rune + FormatWidth uint16 + + GroupingSize [2]uint8 + Flags PatternFlag +} + +// A RoundingContext indicates how a number should be converted to digits. +// It contains all information needed to determine the "visible digits" as +// required by the pluralization rules. +type RoundingContext struct { + // TODO: unify these two fields so that there is a more unambiguous meaning + // of how precision is handled. + MaxSignificantDigits int16 // -1 is unlimited + MaxFractionDigits int16 // -1 is unlimited + + Increment uint32 + IncrementScale uint8 // May differ from printed scale. + + Mode RoundingMode + + DigitShift uint8 // Number of decimals to shift. Used for % and ‰. + + // Number of digits. + MinIntegerDigits uint8 + + MaxIntegerDigits uint8 + MinFractionDigits uint8 + MinSignificantDigits uint8 + + MinExponentDigits uint8 +} + +// RoundSignificantDigits returns the number of significant digits an +// implementation of Convert may round to or n < 0 if there is no maximum or +// a maximum is not recommended. +func (r *RoundingContext) RoundSignificantDigits() (n int) { + if r.MaxFractionDigits == 0 && r.MaxSignificantDigits > 0 { + return int(r.MaxSignificantDigits) + } else if r.isScientific() && r.MaxIntegerDigits == 1 { + if r.MaxSignificantDigits == 0 || + int(r.MaxFractionDigits+1) == int(r.MaxSignificantDigits) { + // Note: don't add DigitShift: it is only used for decimals. + return int(r.MaxFractionDigits) + 1 + } + } + return -1 +} + +// RoundFractionDigits returns the number of fraction digits an implementation +// of Convert may round to or n < 0 if there is no maximum or a maximum is not +// recommended. +func (r *RoundingContext) RoundFractionDigits() (n int) { + if r.MinExponentDigits == 0 && + r.MaxSignificantDigits == 0 && + r.MaxFractionDigits >= 0 { + return int(r.MaxFractionDigits) + int(r.DigitShift) + } + return -1 +} + +// SetScale fixes the RoundingContext to a fixed number of fraction digits. +func (r *RoundingContext) SetScale(scale int) { + r.MinFractionDigits = uint8(scale) + r.MaxFractionDigits = int16(scale) +} + +func (r *RoundingContext) SetPrecision(prec int) { + r.MaxSignificantDigits = int16(prec) +} + +func (r *RoundingContext) isScientific() bool { + return r.MinExponentDigits > 0 +} + +func (f *Pattern) needsSep(pos int) bool { + p := pos - 1 + size := int(f.GroupingSize[0]) + if size == 0 || p == 0 { + return false + } + if p == size { + return true + } + if p -= size; p < 0 { + return false + } + // TODO: make second groupingsize the same as first if 0 so that we can + // avoid this check. + if x := int(f.GroupingSize[1]); x != 0 { + size = x + } + return p%size == 0 +} + +// A PatternFlag is a bit mask for the flag field of a Pattern. +type PatternFlag uint8 + +const ( + AlwaysSign PatternFlag = 1 << iota + ElideSign // Use space instead of plus sign. AlwaysSign must be true. + AlwaysExpSign + AlwaysDecimalSeparator + ParenthesisForNegative // Common pattern. Saves space. + + PadAfterNumber + PadAfterAffix + + PadBeforePrefix = 0 // Default + PadAfterPrefix = PadAfterAffix + PadBeforeSuffix = PadAfterNumber + PadAfterSuffix = PadAfterNumber | PadAfterAffix + PadMask = PadAfterNumber | PadAfterAffix +) + +type parser struct { + *Pattern + + leadingSharps int + + pos int + err error + doNotTerminate bool + groupingCount uint + hasGroup bool + buf []byte +} + +func (p *parser) setError(err error) { + if p.err == nil { + p.err = err + } +} + +func (p *parser) updateGrouping() { + if p.hasGroup && + 0 < p.groupingCount && p.groupingCount < 255 { + p.GroupingSize[1] = p.GroupingSize[0] + p.GroupingSize[0] = uint8(p.groupingCount) + } + p.groupingCount = 0 + p.hasGroup = true +} + +var ( + // TODO: more sensible and localizeable error messages. + errMultiplePadSpecifiers = errors.New("format: pattern has multiple pad specifiers") + errInvalidPadSpecifier = errors.New("format: invalid pad specifier") + errInvalidQuote = errors.New("format: invalid quote") + errAffixTooLarge = errors.New("format: prefix or suffix exceeds maximum UTF-8 length of 256 bytes") + errDuplicatePercentSign = errors.New("format: duplicate percent sign") + errDuplicatePermilleSign = errors.New("format: duplicate permille sign") + errUnexpectedEnd = errors.New("format: unexpected end of pattern") +) + +// ParsePattern extracts formatting information from a CLDR number pattern. +// +// See https://unicode.org/reports/tr35/tr35-numbers.html#Number_Format_Patterns. +func ParsePattern(s string) (f *Pattern, err error) { + p := parser{Pattern: &Pattern{}} + + s = p.parseSubPattern(s) + + if s != "" { + // Parse negative sub pattern. + if s[0] != ';' { + p.setError(errors.New("format: error parsing first sub pattern")) + return nil, p.err + } + neg := parser{Pattern: &Pattern{}} // just for extracting the affixes. + s = neg.parseSubPattern(s[len(";"):]) + p.NegOffset = uint16(len(p.buf)) + p.buf = append(p.buf, neg.buf...) + } + if s != "" { + p.setError(errors.New("format: spurious characters at end of pattern")) + } + if p.err != nil { + return nil, p.err + } + if affix := string(p.buf); affix == "\x00\x00" || affix == "\x00\x00\x00\x00" { + // No prefix or suffixes. + p.NegOffset = 0 + } else { + p.Affix = affix + } + if p.Increment == 0 { + p.IncrementScale = 0 + } + return p.Pattern, nil +} + +func (p *parser) parseSubPattern(s string) string { + s = p.parsePad(s, PadBeforePrefix) + s = p.parseAffix(s) + s = p.parsePad(s, PadAfterPrefix) + + s = p.parse(p.number, s) + p.updateGrouping() + + s = p.parsePad(s, PadBeforeSuffix) + s = p.parseAffix(s) + s = p.parsePad(s, PadAfterSuffix) + return s +} + +func (p *parser) parsePad(s string, f PatternFlag) (tail string) { + if len(s) >= 2 && s[0] == '*' { + r, sz := utf8.DecodeRuneInString(s[1:]) + if p.PadRune != 0 { + p.err = errMultiplePadSpecifiers + } else { + p.Flags |= f + p.PadRune = r + } + return s[1+sz:] + } + return s +} + +func (p *parser) parseAffix(s string) string { + x := len(p.buf) + p.buf = append(p.buf, 0) // placeholder for affix length + + s = p.parse(p.affix, s) + + n := len(p.buf) - x - 1 + if n > 0xFF { + p.setError(errAffixTooLarge) + } + p.buf[x] = uint8(n) + return s +} + +// state implements a state transition. It returns the new state. A state +// function may set an error on the parser or may simply return on an incorrect +// token and let the next phase fail. +type state func(r rune) state + +// parse repeatedly applies a state function on the given string until a +// termination condition is reached. +func (p *parser) parse(fn state, s string) (tail string) { + for i, r := range s { + p.doNotTerminate = false + if fn = fn(r); fn == nil || p.err != nil { + return s[i:] + } + p.FormatWidth++ + } + if p.doNotTerminate { + p.setError(errUnexpectedEnd) + } + return "" +} + +func (p *parser) affix(r rune) state { + switch r { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + '#', '@', '.', '*', ',', ';': + return nil + case '\'': + p.FormatWidth-- + return p.escapeFirst + case '%': + if p.DigitShift != 0 { + p.setError(errDuplicatePercentSign) + } + p.DigitShift = 2 + case '\u2030': // ‰ Per mille + if p.DigitShift != 0 { + p.setError(errDuplicatePermilleSign) + } + p.DigitShift = 3 + // TODO: handle currency somehow: ¤, ¤¤, ¤¤¤, ¤¤¤¤ + } + p.buf = append(p.buf, string(r)...) + return p.affix +} + +func (p *parser) escapeFirst(r rune) state { + switch r { + case '\'': + p.buf = append(p.buf, "\\'"...) + return p.affix + default: + p.buf = append(p.buf, '\'') + p.buf = append(p.buf, string(r)...) + } + return p.escape +} + +func (p *parser) escape(r rune) state { + switch r { + case '\'': + p.FormatWidth-- + p.buf = append(p.buf, '\'') + return p.affix + default: + p.buf = append(p.buf, string(r)...) + } + return p.escape +} + +// number parses a number. The BNF says the integer part should always have +// a '0', but that does not appear to be the case according to the rest of the +// documentation. We will allow having only '#' numbers. +func (p *parser) number(r rune) state { + switch r { + case '#': + p.groupingCount++ + p.leadingSharps++ + case '@': + p.groupingCount++ + p.leadingSharps = 0 + p.MaxFractionDigits = -1 + return p.sigDigits(r) + case ',': + if p.leadingSharps == 0 { // no leading commas + return nil + } + p.updateGrouping() + case 'E': + p.MaxIntegerDigits = uint8(p.leadingSharps) + return p.exponent + case '.': // allow ".##" etc. + p.updateGrouping() + return p.fraction + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return p.integer(r) + default: + return nil + } + return p.number +} + +func (p *parser) integer(r rune) state { + if !('0' <= r && r <= '9') { + var next state + switch r { + case 'E': + if p.leadingSharps > 0 { + p.MaxIntegerDigits = uint8(p.leadingSharps) + p.MinIntegerDigits + } + next = p.exponent + case '.': + next = p.fraction + case ',': + next = p.integer + } + p.updateGrouping() + return next + } + p.Increment = p.Increment*10 + uint32(r-'0') + p.groupingCount++ + p.MinIntegerDigits++ + return p.integer +} + +func (p *parser) sigDigits(r rune) state { + switch r { + case '@': + p.groupingCount++ + p.MaxSignificantDigits++ + p.MinSignificantDigits++ + case '#': + return p.sigDigitsFinal(r) + case 'E': + p.updateGrouping() + return p.normalizeSigDigitsWithExponent() + default: + p.updateGrouping() + return nil + } + return p.sigDigits +} + +func (p *parser) sigDigitsFinal(r rune) state { + switch r { + case '#': + p.groupingCount++ + p.MaxSignificantDigits++ + case 'E': + p.updateGrouping() + return p.normalizeSigDigitsWithExponent() + default: + p.updateGrouping() + return nil + } + return p.sigDigitsFinal +} + +func (p *parser) normalizeSigDigitsWithExponent() state { + p.MinIntegerDigits, p.MaxIntegerDigits = 1, 1 + p.MinFractionDigits = p.MinSignificantDigits - 1 + p.MaxFractionDigits = p.MaxSignificantDigits - 1 + p.MinSignificantDigits, p.MaxSignificantDigits = 0, 0 + return p.exponent +} + +func (p *parser) fraction(r rune) state { + switch r { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + p.Increment = p.Increment*10 + uint32(r-'0') + p.IncrementScale++ + p.MinFractionDigits++ + p.MaxFractionDigits++ + case '#': + p.MaxFractionDigits++ + case 'E': + if p.leadingSharps > 0 { + p.MaxIntegerDigits = uint8(p.leadingSharps) + p.MinIntegerDigits + } + return p.exponent + default: + return nil + } + return p.fraction +} + +func (p *parser) exponent(r rune) state { + switch r { + case '+': + // Set mode and check it wasn't already set. + if p.Flags&AlwaysExpSign != 0 || p.MinExponentDigits > 0 { + break + } + p.Flags |= AlwaysExpSign + p.doNotTerminate = true + return p.exponent + case '0': + p.MinExponentDigits++ + return p.exponent + } + // termination condition + if p.MinExponentDigits == 0 { + p.setError(errors.New("format: need at least one digit")) + } + return nil +} diff --git a/hack/tools/vendor/golang.org/x/text/internal/number/roundingmode_string.go b/hack/tools/vendor/golang.org/x/text/internal/number/roundingmode_string.go new file mode 100644 index 0000000000..bcc22471db --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/internal/number/roundingmode_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type RoundingMode"; DO NOT EDIT. + +package number + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ToNearestEven-0] + _ = x[ToNearestZero-1] + _ = x[ToNearestAway-2] + _ = x[ToPositiveInf-3] + _ = x[ToNegativeInf-4] + _ = x[ToZero-5] + _ = x[AwayFromZero-6] + _ = x[numModes-7] +} + +const _RoundingMode_name = "ToNearestEvenToNearestZeroToNearestAwayToPositiveInfToNegativeInfToZeroAwayFromZeronumModes" + +var _RoundingMode_index = [...]uint8{0, 13, 26, 39, 52, 65, 71, 83, 91} + +func (i RoundingMode) String() string { + if i >= RoundingMode(len(_RoundingMode_index)-1) { + return "RoundingMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _RoundingMode_name[_RoundingMode_index[i]:_RoundingMode_index[i+1]] +} diff --git a/hack/tools/vendor/golang.org/x/text/internal/number/tables.go b/hack/tools/vendor/golang.org/x/text/internal/number/tables.go new file mode 100644 index 0000000000..8efce81b56 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/internal/number/tables.go @@ -0,0 +1,1219 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package number + +import "golang.org/x/text/internal/stringset" + +// CLDRVersion is the CLDR version from which the tables in this package are derived. +const CLDRVersion = "32" + +var numSysData = []systemData{ // 59 elements + 0: {id: 0x0, digitSize: 0x1, zero: [4]uint8{0x30, 0x0, 0x0, 0x0}}, + 1: {id: 0x1, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9e, 0xa5, 0x90}}, + 2: {id: 0x2, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x9c, 0xb0}}, + 3: {id: 0x3, digitSize: 0x2, zero: [4]uint8{0xd9, 0xa0, 0x0, 0x0}}, + 4: {id: 0x4, digitSize: 0x2, zero: [4]uint8{0xdb, 0xb0, 0x0, 0x0}}, + 5: {id: 0x5, digitSize: 0x3, zero: [4]uint8{0xe1, 0xad, 0x90, 0x0}}, + 6: {id: 0x6, digitSize: 0x3, zero: [4]uint8{0xe0, 0xa7, 0xa6, 0x0}}, + 7: {id: 0x7, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0xb1, 0x90}}, + 8: {id: 0x8, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x81, 0xa6}}, + 9: {id: 0x9, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x84, 0xb6}}, + 10: {id: 0xa, digitSize: 0x3, zero: [4]uint8{0xea, 0xa9, 0x90, 0x0}}, + 11: {id: 0xb, digitSize: 0x3, zero: [4]uint8{0xe0, 0xa5, 0xa6, 0x0}}, + 12: {id: 0xc, digitSize: 0x3, zero: [4]uint8{0xef, 0xbc, 0x90, 0x0}}, + 13: {id: 0xd, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0xb5, 0x90}}, + 14: {id: 0xe, digitSize: 0x3, zero: [4]uint8{0xe0, 0xab, 0xa6, 0x0}}, + 15: {id: 0xf, digitSize: 0x3, zero: [4]uint8{0xe0, 0xa9, 0xa6, 0x0}}, + 16: {id: 0x10, digitSize: 0x4, zero: [4]uint8{0xf0, 0x96, 0xad, 0x90}}, + 17: {id: 0x11, digitSize: 0x3, zero: [4]uint8{0xea, 0xa7, 0x90, 0x0}}, + 18: {id: 0x12, digitSize: 0x3, zero: [4]uint8{0xea, 0xa4, 0x80, 0x0}}, + 19: {id: 0x13, digitSize: 0x3, zero: [4]uint8{0xe1, 0x9f, 0xa0, 0x0}}, + 20: {id: 0x14, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb3, 0xa6, 0x0}}, + 21: {id: 0x15, digitSize: 0x3, zero: [4]uint8{0xe1, 0xaa, 0x80, 0x0}}, + 22: {id: 0x16, digitSize: 0x3, zero: [4]uint8{0xe1, 0xaa, 0x90, 0x0}}, + 23: {id: 0x17, digitSize: 0x3, zero: [4]uint8{0xe0, 0xbb, 0x90, 0x0}}, + 24: {id: 0x18, digitSize: 0x3, zero: [4]uint8{0xe1, 0xb1, 0x80, 0x0}}, + 25: {id: 0x19, digitSize: 0x3, zero: [4]uint8{0xe1, 0xa5, 0x86, 0x0}}, + 26: {id: 0x1a, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0x8e}}, + 27: {id: 0x1b, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0x98}}, + 28: {id: 0x1c, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0xb6}}, + 29: {id: 0x1d, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0xac}}, + 30: {id: 0x1e, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0xa2}}, + 31: {id: 0x1f, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb5, 0xa6, 0x0}}, + 32: {id: 0x20, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x99, 0x90}}, + 33: {id: 0x21, digitSize: 0x3, zero: [4]uint8{0xe1, 0xa0, 0x90, 0x0}}, + 34: {id: 0x22, digitSize: 0x4, zero: [4]uint8{0xf0, 0x96, 0xa9, 0xa0}}, + 35: {id: 0x23, digitSize: 0x3, zero: [4]uint8{0xea, 0xaf, 0xb0, 0x0}}, + 36: {id: 0x24, digitSize: 0x3, zero: [4]uint8{0xe1, 0x81, 0x80, 0x0}}, + 37: {id: 0x25, digitSize: 0x3, zero: [4]uint8{0xe1, 0x82, 0x90, 0x0}}, + 38: {id: 0x26, digitSize: 0x3, zero: [4]uint8{0xea, 0xa7, 0xb0, 0x0}}, + 39: {id: 0x27, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x91, 0x90}}, + 40: {id: 0x28, digitSize: 0x2, zero: [4]uint8{0xdf, 0x80, 0x0, 0x0}}, + 41: {id: 0x29, digitSize: 0x3, zero: [4]uint8{0xe1, 0xb1, 0x90, 0x0}}, + 42: {id: 0x2a, digitSize: 0x3, zero: [4]uint8{0xe0, 0xad, 0xa6, 0x0}}, + 43: {id: 0x2b, digitSize: 0x4, zero: [4]uint8{0xf0, 0x90, 0x92, 0xa0}}, + 44: {id: 0x2c, digitSize: 0x3, zero: [4]uint8{0xea, 0xa3, 0x90, 0x0}}, + 45: {id: 0x2d, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x87, 0x90}}, + 46: {id: 0x2e, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x8b, 0xb0}}, + 47: {id: 0x2f, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb7, 0xa6, 0x0}}, + 48: {id: 0x30, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x83, 0xb0}}, + 49: {id: 0x31, digitSize: 0x3, zero: [4]uint8{0xe1, 0xae, 0xb0, 0x0}}, + 50: {id: 0x32, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x9b, 0x80}}, + 51: {id: 0x33, digitSize: 0x3, zero: [4]uint8{0xe1, 0xa7, 0x90, 0x0}}, + 52: {id: 0x34, digitSize: 0x3, zero: [4]uint8{0xe0, 0xaf, 0xa6, 0x0}}, + 53: {id: 0x35, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb1, 0xa6, 0x0}}, + 54: {id: 0x36, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb9, 0x90, 0x0}}, + 55: {id: 0x37, digitSize: 0x3, zero: [4]uint8{0xe0, 0xbc, 0xa0, 0x0}}, + 56: {id: 0x38, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x93, 0x90}}, + 57: {id: 0x39, digitSize: 0x3, zero: [4]uint8{0xea, 0x98, 0xa0, 0x0}}, + 58: {id: 0x3a, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0xa3, 0xa0}}, +} // Size: 378 bytes + +const ( + numAdlm = 0x1 + numAhom = 0x2 + numArab = 0x3 + numArabext = 0x4 + numArmn = 0x3b + numArmnlow = 0x3c + numBali = 0x5 + numBeng = 0x6 + numBhks = 0x7 + numBrah = 0x8 + numCakm = 0x9 + numCham = 0xa + numCyrl = 0x3d + numDeva = 0xb + numEthi = 0x3e + numFullwide = 0xc + numGeor = 0x3f + numGonm = 0xd + numGrek = 0x40 + numGreklow = 0x41 + numGujr = 0xe + numGuru = 0xf + numHanidays = 0x42 + numHanidec = 0x43 + numHans = 0x44 + numHansfin = 0x45 + numHant = 0x46 + numHantfin = 0x47 + numHebr = 0x48 + numHmng = 0x10 + numJava = 0x11 + numJpan = 0x49 + numJpanfin = 0x4a + numKali = 0x12 + numKhmr = 0x13 + numKnda = 0x14 + numLana = 0x15 + numLanatham = 0x16 + numLaoo = 0x17 + numLatn = 0x0 + numLepc = 0x18 + numLimb = 0x19 + numMathbold = 0x1a + numMathdbl = 0x1b + numMathmono = 0x1c + numMathsanb = 0x1d + numMathsans = 0x1e + numMlym = 0x1f + numModi = 0x20 + numMong = 0x21 + numMroo = 0x22 + numMtei = 0x23 + numMymr = 0x24 + numMymrshan = 0x25 + numMymrtlng = 0x26 + numNewa = 0x27 + numNkoo = 0x28 + numOlck = 0x29 + numOrya = 0x2a + numOsma = 0x2b + numRoman = 0x4b + numRomanlow = 0x4c + numSaur = 0x2c + numShrd = 0x2d + numSind = 0x2e + numSinh = 0x2f + numSora = 0x30 + numSund = 0x31 + numTakr = 0x32 + numTalu = 0x33 + numTaml = 0x4d + numTamldec = 0x34 + numTelu = 0x35 + numThai = 0x36 + numTibt = 0x37 + numTirh = 0x38 + numVaii = 0x39 + numWara = 0x3a + numNumberSystems +) + +var systemMap = map[string]system{ + "adlm": numAdlm, + "ahom": numAhom, + "arab": numArab, + "arabext": numArabext, + "armn": numArmn, + "armnlow": numArmnlow, + "bali": numBali, + "beng": numBeng, + "bhks": numBhks, + "brah": numBrah, + "cakm": numCakm, + "cham": numCham, + "cyrl": numCyrl, + "deva": numDeva, + "ethi": numEthi, + "fullwide": numFullwide, + "geor": numGeor, + "gonm": numGonm, + "grek": numGrek, + "greklow": numGreklow, + "gujr": numGujr, + "guru": numGuru, + "hanidays": numHanidays, + "hanidec": numHanidec, + "hans": numHans, + "hansfin": numHansfin, + "hant": numHant, + "hantfin": numHantfin, + "hebr": numHebr, + "hmng": numHmng, + "java": numJava, + "jpan": numJpan, + "jpanfin": numJpanfin, + "kali": numKali, + "khmr": numKhmr, + "knda": numKnda, + "lana": numLana, + "lanatham": numLanatham, + "laoo": numLaoo, + "latn": numLatn, + "lepc": numLepc, + "limb": numLimb, + "mathbold": numMathbold, + "mathdbl": numMathdbl, + "mathmono": numMathmono, + "mathsanb": numMathsanb, + "mathsans": numMathsans, + "mlym": numMlym, + "modi": numModi, + "mong": numMong, + "mroo": numMroo, + "mtei": numMtei, + "mymr": numMymr, + "mymrshan": numMymrshan, + "mymrtlng": numMymrtlng, + "newa": numNewa, + "nkoo": numNkoo, + "olck": numOlck, + "orya": numOrya, + "osma": numOsma, + "roman": numRoman, + "romanlow": numRomanlow, + "saur": numSaur, + "shrd": numShrd, + "sind": numSind, + "sinh": numSinh, + "sora": numSora, + "sund": numSund, + "takr": numTakr, + "talu": numTalu, + "taml": numTaml, + "tamldec": numTamldec, + "telu": numTelu, + "thai": numThai, + "tibt": numTibt, + "tirh": numTirh, + "vaii": numVaii, + "wara": numWara, +} + +var symIndex = [][12]uint8{ // 81 elements + 0: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 1: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 2: [12]uint8{0x0, 0x1, 0x2, 0xd, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0x10, 0xb}, + 3: [12]uint8{0x1, 0x0, 0x2, 0xd, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0x10, 0xb}, + 4: [12]uint8{0x0, 0x1, 0x2, 0x11, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0x10, 0xb}, + 5: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x12, 0xb}, + 6: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 7: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x13, 0xb}, + 8: [12]uint8{0x0, 0x1, 0x2, 0x3, 0xe, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 9: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x0}, + 10: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x14, 0x8, 0x9, 0xa, 0xb}, + 11: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x14, 0x8, 0x9, 0xa, 0xb}, + 12: [12]uint8{0x0, 0x15, 0x2, 0x3, 0x4, 0x5, 0x6, 0x14, 0x8, 0x9, 0xa, 0xb}, + 13: [12]uint8{0x0, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 14: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x16, 0xb}, + 15: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb}, + 16: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0x0}, + 17: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb}, + 18: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x0}, + 19: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x18, 0x7, 0x8, 0x9, 0xa, 0xb}, + 20: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x19, 0x1a, 0xa, 0xb}, + 21: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 22: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x18, 0x7, 0x8, 0x9, 0xa, 0xb}, + 23: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 24: [12]uint8{0x0, 0x1, 0x2, 0x3, 0xe, 0x1c, 0x6, 0x7, 0x8, 0x9, 0x1d, 0xb}, + 25: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0x1e, 0x0}, + 26: [12]uint8{0x0, 0x15, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 27: [12]uint8{0x0, 0x1, 0x2, 0x3, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 28: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x1f, 0xb}, + 29: [12]uint8{0x0, 0x15, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 30: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x20, 0xb}, + 31: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x21, 0x7, 0x8, 0x9, 0x22, 0xb}, + 32: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x23, 0xb}, + 33: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x1b, 0x18, 0x14, 0x8, 0x9, 0x24, 0xb}, + 34: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x18, 0x7, 0x8, 0x9, 0x24, 0xb}, + 35: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x25, 0xb}, + 36: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x26, 0xb}, + 37: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x27, 0xb}, + 38: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x28, 0xb}, + 39: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x29, 0xb}, + 40: [12]uint8{0x1, 0x0, 0x2, 0x3, 0xe, 0x1c, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 41: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2a, 0xb}, + 42: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2b, 0xb}, + 43: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x2c, 0x14, 0x8, 0x9, 0x24, 0xb}, + 44: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x0}, + 45: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb}, + 46: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x1b, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb}, + 47: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2d, 0x0}, + 48: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2e, 0xb}, + 49: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2f, 0xb}, + 50: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x30, 0x7, 0x8, 0x9, 0xa, 0xb}, + 51: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x31, 0xb}, + 52: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x32, 0xb}, + 53: [12]uint8{0x1, 0x15, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 54: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x33, 0xb}, + 55: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x34, 0xb}, + 56: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 57: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x7, 0x3c, 0x9, 0x3d, 0xb}, + 58: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x3e, 0x3f, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 59: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x39, 0x3a, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 60: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x39, 0x40, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 61: [12]uint8{0x35, 0x36, 0x37, 0x41, 0x3e, 0x3f, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 62: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x3e, 0x3f, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 63: [12]uint8{0x35, 0xc, 0x37, 0x38, 0x39, 0x42, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0x0}, + 64: [12]uint8{0x35, 0xc, 0x37, 0x38, 0x39, 0x42, 0x43, 0x7, 0x44, 0x9, 0x24, 0xb}, + 65: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x39, 0x5, 0x3b, 0x7, 0x3c, 0x9, 0x33, 0xb}, + 66: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x45, 0x46, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35}, + 67: [12]uint8{0x35, 0x36, 0x37, 0x11, 0xe, 0x1c, 0x43, 0x7, 0x3c, 0x9, 0x1d, 0xb}, + 68: [12]uint8{0x35, 0x36, 0x37, 0x11, 0xe, 0x1c, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35}, + 69: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x45, 0x5, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35}, + 70: [12]uint8{0x1, 0xc, 0x37, 0x11, 0x45, 0x47, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x0}, + 71: [12]uint8{0x35, 0x1, 0x37, 0x11, 0x4, 0x5, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35}, + 72: [12]uint8{0x1, 0xc, 0x37, 0x11, 0x45, 0x47, 0x43, 0x7, 0x3c, 0x9, 0x24, 0xb}, + 73: [12]uint8{0x35, 0x36, 0x2, 0x3, 0x45, 0x46, 0x43, 0x7, 0x8, 0x9, 0xa, 0x35}, + 74: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x4, 0x5, 0x43, 0x7, 0x3c, 0x9, 0x31, 0x35}, + 75: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x4, 0x5, 0x43, 0x7, 0x3c, 0x9, 0x32, 0x35}, + 76: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x48, 0x46, 0x43, 0x7, 0x3c, 0x9, 0x33, 0x35}, + 77: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x49}, + 78: [12]uint8{0x0, 0x1, 0x4a, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x28, 0xb}, + 79: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x4b, 0xb}, + 80: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x4c, 0x4d, 0xb}, +} // Size: 996 bytes + +var symData = stringset.Set{ + Data: "" + // Size: 599 bytes + ".,;%+-E׉∞NaN:\u00a0\u200e%\u200e\u200e+\u200e-ليس\u00a0رقمًا٪NDТерхьаш" + + "\u00a0дац·’mnne×10^0/00INF−\u200e−ناعددepälukuՈչԹარ\u00a0არის\u00a0რიცხვ" + + "იZMdMсан\u00a0емес¤¤¤сан\u00a0эмесບໍ່\u200bແມ່ນ\u200bໂຕ\u200bເລກNSဂဏန်" + + "းမဟုတ်သောННне\u00a0числочыыһыла\u00a0буотах·10^epilohosan\u00a0dälTFЕs" + + "on\u00a0emasҳақиқий\u00a0сон\u00a0эмас非數值非数值٫٬؛٪\u061c\u061c+\u061c-اس؉ل" + + "يس\u00a0رقم\u200f+\u200f-\u200f−٪\u200f\u061c−×۱۰^؉\u200f\u200e+\u200e" + + "\u200e-\u200e\u200e−\u200e+\u200e:၊ཨང་མེན་གྲངས་མེདཨང་མད", + Index: []uint16{ // 79 elements + // Entry 0 - 3F + 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, + 0x0009, 0x000c, 0x000f, 0x0012, 0x0013, 0x0015, 0x001c, 0x0020, + 0x0024, 0x0036, 0x0038, 0x003a, 0x0050, 0x0052, 0x0055, 0x0058, + 0x0059, 0x005e, 0x0062, 0x0065, 0x0068, 0x006e, 0x0078, 0x0080, + 0x0086, 0x00ae, 0x00af, 0x00b2, 0x00c2, 0x00c8, 0x00d8, 0x0105, + 0x0107, 0x012e, 0x0132, 0x0142, 0x015e, 0x0163, 0x016a, 0x0173, + 0x0175, 0x0177, 0x0180, 0x01a0, 0x01a9, 0x01b2, 0x01b4, 0x01b6, + 0x01b8, 0x01bc, 0x01bf, 0x01c2, 0x01c6, 0x01c8, 0x01d6, 0x01da, + // Entry 40 - 7F + 0x01de, 0x01e4, 0x01e9, 0x01ee, 0x01f5, 0x01fa, 0x0201, 0x0208, + 0x0211, 0x0215, 0x0218, 0x021b, 0x0230, 0x0248, 0x0257, + }, +} // Size: 797 bytes + +// langToDefaults maps a compact language index to the default numbering system +// and default symbol set +var langToDefaults = [775]symOffset{ + // Entry 0 - 3F + 0x8000, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0000, 0x0000, + 0x0000, 0x0000, 0x8003, 0x0002, 0x0002, 0x0002, 0x0002, 0x0003, + 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, + 0x0003, 0x0003, 0x0003, 0x0003, 0x0002, 0x0002, 0x0002, 0x0004, + 0x0002, 0x0004, 0x0002, 0x0002, 0x0002, 0x0003, 0x0002, 0x0000, + 0x8005, 0x0000, 0x0000, 0x0000, 0x8006, 0x0005, 0x0006, 0x0006, + 0x0006, 0x0006, 0x0006, 0x0001, 0x0001, 0x0001, 0x0001, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0000, 0x0000, 0x0000, + // Entry 40 - 7F + 0x8009, 0x0000, 0x0000, 0x800a, 0x0000, 0x0000, 0x800c, 0x0001, + 0x0000, 0x0000, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, + 0x0006, 0x0006, 0x0006, 0x0006, 0x800e, 0x0000, 0x0000, 0x0007, + 0x0007, 0x0000, 0x0000, 0x0000, 0x0000, 0x800f, 0x0008, 0x0008, + 0x8011, 0x0001, 0x0001, 0x0001, 0x803c, 0x0000, 0x0009, 0x0009, + 0x0009, 0x0000, 0x0000, 0x000a, 0x000b, 0x000a, 0x000c, 0x000a, + 0x000a, 0x000c, 0x000a, 0x000d, 0x000d, 0x000a, 0x000a, 0x0001, + 0x0001, 0x0000, 0x0001, 0x0001, 0x803f, 0x0000, 0x0000, 0x0000, + // Entry 80 - BF + 0x000e, 0x000e, 0x000e, 0x000f, 0x000f, 0x000f, 0x0000, 0x0000, + 0x0006, 0x0000, 0x0000, 0x0000, 0x000a, 0x0010, 0x0000, 0x0006, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0011, 0x0000, 0x000a, + 0x0000, 0x0000, 0x0000, 0x0000, 0x000a, 0x0000, 0x0009, 0x0000, + 0x0000, 0x0012, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + // Entry C0 - FF + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0013, 0x0000, + 0x0000, 0x000f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x0000, 0x0015, + 0x0015, 0x0006, 0x0000, 0x0006, 0x0006, 0x0000, 0x0000, 0x0006, + 0x0006, 0x0001, 0x0000, 0x0000, 0x0006, 0x0006, 0x0006, 0x0006, + // Entry 100 - 13F + 0x0000, 0x0000, 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0006, + 0x0000, 0x0006, 0x0000, 0x0000, 0x0006, 0x0006, 0x0016, 0x0016, + 0x0017, 0x0017, 0x0001, 0x0001, 0x8041, 0x0018, 0x0018, 0x0001, + 0x0001, 0x0001, 0x0001, 0x0001, 0x0019, 0x0019, 0x0000, 0x0000, + 0x0017, 0x0017, 0x0017, 0x8044, 0x0001, 0x0001, 0x0001, 0x0001, + 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, + 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, + 0x0001, 0x0001, 0x0006, 0x0006, 0x0001, 0x0001, 0x0001, 0x0001, + // Entry 140 - 17F + 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, + 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, + 0x0001, 0x0001, 0x0006, 0x0006, 0x0006, 0x0006, 0x0000, 0x0000, + 0x8047, 0x0000, 0x0006, 0x0006, 0x001a, 0x001a, 0x001a, 0x001a, + 0x804a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x804c, 0x001b, 0x0000, + 0x0000, 0x0006, 0x0006, 0x0006, 0x000a, 0x000a, 0x0001, 0x0001, + 0x001c, 0x001c, 0x0009, 0x0009, 0x804f, 0x0000, 0x0000, 0x0000, + // Entry 180 - 1BF + 0x0000, 0x0000, 0x8052, 0x0006, 0x0006, 0x001d, 0x0006, 0x0006, + 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x001e, 0x001e, 0x001f, + 0x001f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, + 0x0001, 0x000d, 0x000d, 0x0000, 0x0000, 0x0020, 0x0020, 0x0006, + 0x0006, 0x0021, 0x0021, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000, + 0x0000, 0x8054, 0x0000, 0x0000, 0x0000, 0x0000, 0x8056, 0x001b, + 0x0000, 0x0000, 0x0001, 0x0001, 0x0022, 0x0022, 0x0000, 0x0000, + // Entry 1C0 - 1FF + 0x0000, 0x0023, 0x0023, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, + 0x0024, 0x0024, 0x8058, 0x0000, 0x0000, 0x0016, 0x0016, 0x0006, + 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0025, 0x0025, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x000d, 0x000d, 0x0000, 0x0000, + 0x0006, 0x0006, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x805a, 0x0000, 0x0000, 0x0006, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, 0x805b, 0x0026, 0x805d, + // Entry 200 - 23F + 0x0000, 0x0000, 0x0000, 0x0000, 0x805e, 0x0015, 0x0015, 0x0000, + 0x0000, 0x0006, 0x0006, 0x0006, 0x8061, 0x0000, 0x0000, 0x8062, + 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0001, + 0x0001, 0x0015, 0x0015, 0x0006, 0x0006, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0027, 0x0027, 0x0027, 0x8065, 0x8067, + 0x001b, 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0001, 0x0001, + 0x8069, 0x0028, 0x0006, 0x0001, 0x0006, 0x0001, 0x0001, 0x0001, + // Entry 240 - 27F + 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0000, + 0x0006, 0x0000, 0x0000, 0x001a, 0x001a, 0x0006, 0x0006, 0x0006, + 0x0006, 0x0006, 0x0000, 0x0000, 0x0029, 0x0029, 0x0029, 0x0029, + 0x0029, 0x0029, 0x0029, 0x0006, 0x0006, 0x0000, 0x0000, 0x002a, + 0x002a, 0x0000, 0x0000, 0x0000, 0x0000, 0x806b, 0x0000, 0x0000, + 0x002b, 0x002b, 0x002b, 0x002b, 0x0006, 0x0006, 0x000d, 0x000d, + 0x0006, 0x0006, 0x0000, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, + 0x002c, 0x002c, 0x002d, 0x002d, 0x002e, 0x002e, 0x0000, 0x0000, + // Entry 280 - 2BF + 0x0000, 0x002f, 0x002f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0001, 0x0001, 0x0006, + 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, + 0x0006, 0x0006, 0x0000, 0x0000, 0x0000, 0x806d, 0x0022, 0x0022, + 0x0022, 0x0000, 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0001, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0030, 0x0030, 0x0000, 0x0000, 0x8071, 0x0031, 0x0006, + // Entry 2C0 - 2FF + 0x0006, 0x0006, 0x0000, 0x0001, 0x0001, 0x000d, 0x000d, 0x0001, + 0x0001, 0x0000, 0x0000, 0x0032, 0x0032, 0x8074, 0x8076, 0x001b, + 0x8077, 0x8079, 0x0028, 0x807b, 0x0034, 0x0033, 0x0033, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0035, 0x0035, 0x0006, 0x0006, + 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0036, 0x0037, 0x0037, 0x0036, 0x0036, 0x0001, + 0x0001, 0x807d, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8080, + // Entry 300 - 33F + 0x0036, 0x0036, 0x0036, 0x0000, 0x0000, 0x0006, 0x0014, +} // Size: 1550 bytes + +// langToAlt is a list of numbering system and symbol set pairs, sorted and +// marked by compact language index. +var langToAlt = []altSymData{ // 131 elements + 1: {compactTag: 0x0, symIndex: 0x38, system: 0x3}, + 2: {compactTag: 0x0, symIndex: 0x42, system: 0x4}, + 3: {compactTag: 0xa, symIndex: 0x39, system: 0x3}, + 4: {compactTag: 0xa, symIndex: 0x2, system: 0x0}, + 5: {compactTag: 0x28, symIndex: 0x0, system: 0x6}, + 6: {compactTag: 0x2c, symIndex: 0x5, system: 0x0}, + 7: {compactTag: 0x2c, symIndex: 0x3a, system: 0x3}, + 8: {compactTag: 0x2c, symIndex: 0x42, system: 0x4}, + 9: {compactTag: 0x40, symIndex: 0x0, system: 0x6}, + 10: {compactTag: 0x43, symIndex: 0x0, system: 0x0}, + 11: {compactTag: 0x43, symIndex: 0x4f, system: 0x37}, + 12: {compactTag: 0x46, symIndex: 0x1, system: 0x0}, + 13: {compactTag: 0x46, symIndex: 0x38, system: 0x3}, + 14: {compactTag: 0x54, symIndex: 0x0, system: 0x9}, + 15: {compactTag: 0x5d, symIndex: 0x3a, system: 0x3}, + 16: {compactTag: 0x5d, symIndex: 0x8, system: 0x0}, + 17: {compactTag: 0x60, symIndex: 0x1, system: 0x0}, + 18: {compactTag: 0x60, symIndex: 0x38, system: 0x3}, + 19: {compactTag: 0x60, symIndex: 0x42, system: 0x4}, + 20: {compactTag: 0x60, symIndex: 0x0, system: 0x5}, + 21: {compactTag: 0x60, symIndex: 0x0, system: 0x6}, + 22: {compactTag: 0x60, symIndex: 0x0, system: 0x8}, + 23: {compactTag: 0x60, symIndex: 0x0, system: 0x9}, + 24: {compactTag: 0x60, symIndex: 0x0, system: 0xa}, + 25: {compactTag: 0x60, symIndex: 0x0, system: 0xb}, + 26: {compactTag: 0x60, symIndex: 0x0, system: 0xc}, + 27: {compactTag: 0x60, symIndex: 0x0, system: 0xd}, + 28: {compactTag: 0x60, symIndex: 0x0, system: 0xe}, + 29: {compactTag: 0x60, symIndex: 0x0, system: 0xf}, + 30: {compactTag: 0x60, symIndex: 0x0, system: 0x11}, + 31: {compactTag: 0x60, symIndex: 0x0, system: 0x12}, + 32: {compactTag: 0x60, symIndex: 0x0, system: 0x13}, + 33: {compactTag: 0x60, symIndex: 0x0, system: 0x14}, + 34: {compactTag: 0x60, symIndex: 0x0, system: 0x15}, + 35: {compactTag: 0x60, symIndex: 0x0, system: 0x16}, + 36: {compactTag: 0x60, symIndex: 0x0, system: 0x17}, + 37: {compactTag: 0x60, symIndex: 0x0, system: 0x18}, + 38: {compactTag: 0x60, symIndex: 0x0, system: 0x19}, + 39: {compactTag: 0x60, symIndex: 0x0, system: 0x1f}, + 40: {compactTag: 0x60, symIndex: 0x0, system: 0x21}, + 41: {compactTag: 0x60, symIndex: 0x0, system: 0x23}, + 42: {compactTag: 0x60, symIndex: 0x0, system: 0x24}, + 43: {compactTag: 0x60, symIndex: 0x0, system: 0x25}, + 44: {compactTag: 0x60, symIndex: 0x0, system: 0x28}, + 45: {compactTag: 0x60, symIndex: 0x0, system: 0x29}, + 46: {compactTag: 0x60, symIndex: 0x0, system: 0x2a}, + 47: {compactTag: 0x60, symIndex: 0x0, system: 0x2b}, + 48: {compactTag: 0x60, symIndex: 0x0, system: 0x2c}, + 49: {compactTag: 0x60, symIndex: 0x0, system: 0x2d}, + 50: {compactTag: 0x60, symIndex: 0x0, system: 0x30}, + 51: {compactTag: 0x60, symIndex: 0x0, system: 0x31}, + 52: {compactTag: 0x60, symIndex: 0x0, system: 0x32}, + 53: {compactTag: 0x60, symIndex: 0x0, system: 0x33}, + 54: {compactTag: 0x60, symIndex: 0x0, system: 0x34}, + 55: {compactTag: 0x60, symIndex: 0x0, system: 0x35}, + 56: {compactTag: 0x60, symIndex: 0x0, system: 0x36}, + 57: {compactTag: 0x60, symIndex: 0x0, system: 0x37}, + 58: {compactTag: 0x60, symIndex: 0x0, system: 0x39}, + 59: {compactTag: 0x60, symIndex: 0x0, system: 0x43}, + 60: {compactTag: 0x64, symIndex: 0x0, system: 0x0}, + 61: {compactTag: 0x64, symIndex: 0x38, system: 0x3}, + 62: {compactTag: 0x64, symIndex: 0x42, system: 0x4}, + 63: {compactTag: 0x7c, symIndex: 0x50, system: 0x37}, + 64: {compactTag: 0x7c, symIndex: 0x0, system: 0x0}, + 65: {compactTag: 0x114, symIndex: 0x43, system: 0x4}, + 66: {compactTag: 0x114, symIndex: 0x18, system: 0x0}, + 67: {compactTag: 0x114, symIndex: 0x3b, system: 0x3}, + 68: {compactTag: 0x123, symIndex: 0x1, system: 0x0}, + 69: {compactTag: 0x123, symIndex: 0x3c, system: 0x3}, + 70: {compactTag: 0x123, symIndex: 0x44, system: 0x4}, + 71: {compactTag: 0x158, symIndex: 0x0, system: 0x0}, + 72: {compactTag: 0x158, symIndex: 0x3b, system: 0x3}, + 73: {compactTag: 0x158, symIndex: 0x45, system: 0x4}, + 74: {compactTag: 0x160, symIndex: 0x0, system: 0x0}, + 75: {compactTag: 0x160, symIndex: 0x38, system: 0x3}, + 76: {compactTag: 0x16d, symIndex: 0x1b, system: 0x0}, + 77: {compactTag: 0x16d, symIndex: 0x0, system: 0x9}, + 78: {compactTag: 0x16d, symIndex: 0x0, system: 0xa}, + 79: {compactTag: 0x17c, symIndex: 0x0, system: 0x0}, + 80: {compactTag: 0x17c, symIndex: 0x3d, system: 0x3}, + 81: {compactTag: 0x17c, symIndex: 0x42, system: 0x4}, + 82: {compactTag: 0x182, symIndex: 0x6, system: 0x0}, + 83: {compactTag: 0x182, symIndex: 0x38, system: 0x3}, + 84: {compactTag: 0x1b1, symIndex: 0x0, system: 0x0}, + 85: {compactTag: 0x1b1, symIndex: 0x3e, system: 0x3}, + 86: {compactTag: 0x1b6, symIndex: 0x42, system: 0x4}, + 87: {compactTag: 0x1b6, symIndex: 0x1b, system: 0x0}, + 88: {compactTag: 0x1d2, symIndex: 0x42, system: 0x4}, + 89: {compactTag: 0x1d2, symIndex: 0x0, system: 0x0}, + 90: {compactTag: 0x1f3, symIndex: 0x0, system: 0xb}, + 91: {compactTag: 0x1fd, symIndex: 0x4e, system: 0x24}, + 92: {compactTag: 0x1fd, symIndex: 0x26, system: 0x0}, + 93: {compactTag: 0x1ff, symIndex: 0x42, system: 0x4}, + 94: {compactTag: 0x204, symIndex: 0x15, system: 0x0}, + 95: {compactTag: 0x204, symIndex: 0x3f, system: 0x3}, + 96: {compactTag: 0x204, symIndex: 0x46, system: 0x4}, + 97: {compactTag: 0x20c, symIndex: 0x0, system: 0xb}, + 98: {compactTag: 0x20f, symIndex: 0x6, system: 0x0}, + 99: {compactTag: 0x20f, symIndex: 0x38, system: 0x3}, + 100: {compactTag: 0x20f, symIndex: 0x42, system: 0x4}, + 101: {compactTag: 0x22e, symIndex: 0x0, system: 0x0}, + 102: {compactTag: 0x22e, symIndex: 0x47, system: 0x4}, + 103: {compactTag: 0x22f, symIndex: 0x42, system: 0x4}, + 104: {compactTag: 0x22f, symIndex: 0x1b, system: 0x0}, + 105: {compactTag: 0x238, symIndex: 0x42, system: 0x4}, + 106: {compactTag: 0x238, symIndex: 0x28, system: 0x0}, + 107: {compactTag: 0x265, symIndex: 0x38, system: 0x3}, + 108: {compactTag: 0x265, symIndex: 0x0, system: 0x0}, + 109: {compactTag: 0x29d, symIndex: 0x22, system: 0x0}, + 110: {compactTag: 0x29d, symIndex: 0x40, system: 0x3}, + 111: {compactTag: 0x29d, symIndex: 0x48, system: 0x4}, + 112: {compactTag: 0x29d, symIndex: 0x4d, system: 0xc}, + 113: {compactTag: 0x2bd, symIndex: 0x31, system: 0x0}, + 114: {compactTag: 0x2bd, symIndex: 0x3e, system: 0x3}, + 115: {compactTag: 0x2bd, symIndex: 0x42, system: 0x4}, + 116: {compactTag: 0x2cd, symIndex: 0x1b, system: 0x0}, + 117: {compactTag: 0x2cd, symIndex: 0x49, system: 0x4}, + 118: {compactTag: 0x2ce, symIndex: 0x49, system: 0x4}, + 119: {compactTag: 0x2d0, symIndex: 0x33, system: 0x0}, + 120: {compactTag: 0x2d0, symIndex: 0x4a, system: 0x4}, + 121: {compactTag: 0x2d1, symIndex: 0x42, system: 0x4}, + 122: {compactTag: 0x2d1, symIndex: 0x28, system: 0x0}, + 123: {compactTag: 0x2d3, symIndex: 0x34, system: 0x0}, + 124: {compactTag: 0x2d3, symIndex: 0x4b, system: 0x4}, + 125: {compactTag: 0x2f9, symIndex: 0x0, system: 0x0}, + 126: {compactTag: 0x2f9, symIndex: 0x38, system: 0x3}, + 127: {compactTag: 0x2f9, symIndex: 0x42, system: 0x4}, + 128: {compactTag: 0x2ff, symIndex: 0x36, system: 0x0}, + 129: {compactTag: 0x2ff, symIndex: 0x41, system: 0x3}, + 130: {compactTag: 0x2ff, symIndex: 0x4c, system: 0x4}, +} // Size: 810 bytes + +var tagToDecimal = []uint8{ // 775 elements + // Entry 0 - 3F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 40 - 7F + 0x05, 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, 0x05, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, 0x01, 0x01, + // Entry 80 - BF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry C0 - FF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 100 - 13F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 140 - 17F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x05, + 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 180 - 1BF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, 0x05, 0x05, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 1C0 - 1FF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, + 0x01, 0x01, 0x01, 0x05, 0x05, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 200 - 23F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x05, 0x05, 0x01, 0x01, 0x01, 0x05, 0x01, + 0x01, 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 240 - 27F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 280 - 2BF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x05, + 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 2C0 - 2FF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 300 - 33F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x08, +} // Size: 799 bytes + +var tagToScientific = []uint8{ // 775 elements + // Entry 0 - 3F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 40 - 7F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 80 - BF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry C0 - FF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 100 - 13F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 140 - 17F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x0c, 0x0c, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x0c, + 0x0c, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 180 - 1BF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 1C0 - 1FF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x0d, 0x0d, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x0c, 0x0c, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 200 - 23F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x0c, 0x02, + 0x02, 0x0c, 0x0c, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 240 - 27F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x0d, 0x0d, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 280 - 2BF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 2C0 - 2FF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 300 - 33F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x09, +} // Size: 799 bytes + +var tagToPercent = []uint8{ // 775 elements + // Entry 0 - 3F + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry 40 - 7F + 0x06, 0x06, 0x06, 0x04, 0x04, 0x04, 0x03, 0x03, + 0x06, 0x06, 0x03, 0x04, 0x04, 0x03, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x03, + 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, + 0x03, 0x04, 0x04, 0x03, 0x03, 0x03, 0x04, 0x03, + 0x03, 0x04, 0x03, 0x04, 0x04, 0x03, 0x03, 0x03, + 0x03, 0x04, 0x04, 0x04, 0x07, 0x07, 0x04, 0x04, + // Entry 80 - BF + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x03, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x03, 0x04, 0x03, 0x04, + 0x04, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry C0 - FF + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + // Entry 100 - 13F + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, + 0x0b, 0x0b, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, 0x04, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + // Entry 140 - 17F + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, + 0x06, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry 180 - 1BF + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, + 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, 0x04, + // Entry 1C0 - 1FF + 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry 200 - 23F + 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x06, 0x06, 0x04, 0x04, 0x04, 0x06, 0x04, + 0x04, 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry 240 - 27F + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, + // Entry 280 - 2BF + 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, + 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, + 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x0e, + // Entry 2C0 - 2FF + 0x0e, 0x0e, 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, + 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry 300 - 33F + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x0a, +} // Size: 799 bytes + +var formats = []Pattern{Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x0, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x0, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 3, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x9, + GroupingSize: [2]uint8{0x3, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x0, + MaxIntegerDigits: 0x1, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x1}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x3, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x00\x03\u00a0%", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x7, + GroupingSize: [2]uint8{0x3, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x00\x01%", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x6, + GroupingSize: [2]uint8{0x3, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 3, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0xc, + GroupingSize: [2]uint8{0x3, + 0x2}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x00\x01%", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x9, + GroupingSize: [2]uint8{0x3, + 0x2}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x00\x03\u00a0%", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0xa, + GroupingSize: [2]uint8{0x3, + 0x2}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 6, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x8, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 6, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x6, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x3}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0xd, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x4}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x00\x01%", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x2, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x03%\u00a0\x00", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x7, + GroupingSize: [2]uint8{0x3, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x0, + MaxIntegerDigits: 0x1, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x1}, + Affix: "\x01[\x01]", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x5, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x0, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x1, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x01%\x00", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x6, + GroupingSize: [2]uint8{0x3, + 0x0}, + Flags: 0x0}} + +// Total table size 8634 bytes (8KiB); checksum: 8F23386D diff --git a/hack/tools/vendor/golang.org/x/text/internal/stringset/set.go b/hack/tools/vendor/golang.org/x/text/internal/stringset/set.go new file mode 100644 index 0000000000..bb2fffbc75 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/internal/stringset/set.go @@ -0,0 +1,86 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package stringset provides a way to represent a collection of strings +// compactly. +package stringset + +import "sort" + +// A Set holds a collection of strings that can be looked up by an index number. +type Set struct { + // These fields are exported to allow for code generation. + + Data string + Index []uint16 +} + +// Elem returns the string with index i. It panics if i is out of range. +func (s *Set) Elem(i int) string { + return s.Data[s.Index[i]:s.Index[i+1]] +} + +// Len returns the number of strings in the set. +func (s *Set) Len() int { + return len(s.Index) - 1 +} + +// Search returns the index of the given string or -1 if it is not in the set. +// The Set must have been created with strings in sorted order. +func Search(s *Set, str string) int { + // TODO: optimize this if it gets used a lot. + n := len(s.Index) - 1 + p := sort.Search(n, func(i int) bool { + return s.Elem(i) >= str + }) + if p == n || str != s.Elem(p) { + return -1 + } + return p +} + +// A Builder constructs Sets. +type Builder struct { + set Set + index map[string]int +} + +// NewBuilder returns a new and initialized Builder. +func NewBuilder() *Builder { + return &Builder{ + set: Set{ + Index: []uint16{0}, + }, + index: map[string]int{}, + } +} + +// Set creates the set created so far. +func (b *Builder) Set() Set { + return b.set +} + +// Index returns the index for the given string, which must have been added +// before. +func (b *Builder) Index(s string) int { + return b.index[s] +} + +// Add adds a string to the index. Strings that are added by a single Add will +// be stored together, unless they match an existing string. +func (b *Builder) Add(ss ...string) { + // First check if the string already exists. + for _, s := range ss { + if _, ok := b.index[s]; ok { + continue + } + b.index[s] = len(b.set.Index) - 1 + b.set.Data += s + x := len(b.set.Data) + if x > 0xFFFF { + panic("Index too > 0xFFFF") + } + b.set.Index = append(b.set.Index, uint16(x)) + } +} diff --git a/hack/tools/vendor/golang.org/x/text/message/catalog.go b/hack/tools/vendor/golang.org/x/text/message/catalog.go new file mode 100644 index 0000000000..068271def4 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/message/catalog.go @@ -0,0 +1,36 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package message + +// TODO: some types in this file will need to be made public at some time. +// Documentation and method names will reflect this by using the exported name. + +import ( + "golang.org/x/text/language" + "golang.org/x/text/message/catalog" +) + +// MatchLanguage reports the matched tag obtained from language.MatchStrings for +// the Matcher of the DefaultCatalog. +func MatchLanguage(preferred ...string) language.Tag { + c := DefaultCatalog + tag, _ := language.MatchStrings(c.Matcher(), preferred...) + return tag +} + +// DefaultCatalog is used by SetString. +var DefaultCatalog catalog.Catalog = defaultCatalog + +var defaultCatalog = catalog.NewBuilder() + +// SetString calls SetString on the initial default Catalog. +func SetString(tag language.Tag, key string, msg string) error { + return defaultCatalog.SetString(tag, key, msg) +} + +// Set calls Set on the initial default Catalog. +func Set(tag language.Tag, key string, msg ...catalog.Message) error { + return defaultCatalog.Set(tag, key, msg...) +} diff --git a/hack/tools/vendor/golang.org/x/text/message/catalog/catalog.go b/hack/tools/vendor/golang.org/x/text/message/catalog/catalog.go new file mode 100644 index 0000000000..96955d0752 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/message/catalog/catalog.go @@ -0,0 +1,365 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package catalog defines collections of translated format strings. +// +// This package mostly defines types for populating catalogs with messages. The +// catmsg package contains further definitions for creating custom message and +// dictionary types as well as packages that use Catalogs. +// +// Package catalog defines various interfaces: Dictionary, Loader, and Message. +// A Dictionary maintains a set of translations of format strings for a single +// language. The Loader interface defines a source of dictionaries. A +// translation of a format string is represented by a Message. +// +// # Catalogs +// +// A Catalog defines a programmatic interface for setting message translations. +// It maintains a set of per-language dictionaries with translations for a set +// of keys. For message translation to function properly, a translation should +// be defined for each key for each supported language. A dictionary may be +// underspecified, though, if there is a parent language that already defines +// the key. For example, a Dictionary for "en-GB" could leave out entries that +// are identical to those in a dictionary for "en". +// +// # Messages +// +// A Message is a format string which varies on the value of substitution +// variables. For instance, to indicate the number of results one could want "no +// results" if there are none, "1 result" if there is 1, and "%d results" for +// any other number. Catalog is agnostic to the kind of format strings that are +// used: for instance, messages can follow either the printf-style substitution +// from package fmt or use templates. +// +// A Message does not substitute arguments in the format string. This job is +// reserved for packages that render strings, such as message, that use Catalogs +// to selected string. This separation of concerns allows Catalog to be used to +// store any kind of formatting strings. +// +// # Selecting messages based on linguistic features of substitution arguments +// +// Messages may vary based on any linguistic features of the argument values. +// The most common one is plural form, but others exist. +// +// Selection messages are provided in packages that provide support for a +// specific linguistic feature. The following snippet uses plural.Selectf: +// +// catalog.Set(language.English, "You are %d minute(s) late.", +// plural.Selectf(1, "", +// plural.One, "You are 1 minute late.", +// plural.Other, "You are %d minutes late.")) +// +// In this example, a message is stored in the Catalog where one of two messages +// is selected based on the first argument, a number. The first message is +// selected if the argument is singular (identified by the selector "one") and +// the second message is selected in all other cases. The selectors are defined +// by the plural rules defined in CLDR. The selector "other" is special and will +// always match. Each language always defines one of the linguistic categories +// to be "other." For English, singular is "one" and plural is "other". +// +// Selects can be nested. This allows selecting sentences based on features of +// multiple arguments or multiple linguistic properties of a single argument. +// +// # String interpolation +// +// There is often a lot of commonality between the possible variants of a +// message. For instance, in the example above the word "minute" varies based on +// the plural catogory of the argument, but the rest of the sentence is +// identical. Using interpolation the above message can be rewritten as: +// +// catalog.Set(language.English, "You are %d minute(s) late.", +// catalog.Var("minutes", +// plural.Selectf(1, "", plural.One, "minute", plural.Other, "minutes")), +// catalog.String("You are %[1]d ${minutes} late.")) +// +// Var is defined to return the variable name if the message does not yield a +// match. This allows us to further simplify this snippet to +// +// catalog.Set(language.English, "You are %d minute(s) late.", +// catalog.Var("minutes", plural.Selectf(1, "", plural.One, "minute")), +// catalog.String("You are %d ${minutes} late.")) +// +// Overall this is still only a minor improvement, but things can get a lot more +// unwieldy if more than one linguistic feature is used to determine a message +// variant. Consider the following example: +// +// // argument 1: list of hosts, argument 2: list of guests +// catalog.Set(language.English, "%[1]v invite(s) %[2]v to their party.", +// catalog.Var("their", +// plural.Selectf(1, "" +// plural.One, gender.Select(1, "female", "her", "other", "his"))), +// catalog.Var("invites", plural.Selectf(1, "", plural.One, "invite")) +// catalog.String("%[1]v ${invites} %[2]v to ${their} party.")), +// +// Without variable substitution, this would have to be written as +// +// // argument 1: list of hosts, argument 2: list of guests +// catalog.Set(language.English, "%[1]v invite(s) %[2]v to their party.", +// plural.Selectf(1, "", +// plural.One, gender.Select(1, +// "female", "%[1]v invites %[2]v to her party." +// "other", "%[1]v invites %[2]v to his party."), +// plural.Other, "%[1]v invites %[2]v to their party.")) +// +// Not necessarily shorter, but using variables there is less duplication and +// the messages are more maintenance friendly. Moreover, languages may have up +// to six plural forms. This makes the use of variables more welcome. +// +// Different messages using the same inflections can reuse variables by moving +// them to macros. Using macros we can rewrite the message as: +// +// // argument 1: list of hosts, argument 2: list of guests +// catalog.SetString(language.English, "%[1]v invite(s) %[2]v to their party.", +// "%[1]v ${invites(1)} %[2]v to ${their(1)} party.") +// +// Where the following macros were defined separately. +// +// catalog.SetMacro(language.English, "invites", plural.Selectf(1, "", +// plural.One, "invite")) +// catalog.SetMacro(language.English, "their", plural.Selectf(1, "", +// plural.One, gender.Select(1, "female", "her", "other", "his"))), +// +// Placeholders use parentheses and the arguments to invoke a macro. +// +// # Looking up messages +// +// Message lookup using Catalogs is typically only done by specialized packages +// and is not something the user should be concerned with. For instance, to +// express the tardiness of a user using the related message we defined earlier, +// the user may use the package message like so: +// +// p := message.NewPrinter(language.English) +// p.Printf("You are %d minute(s) late.", 5) +// +// Which would print: +// +// You are 5 minutes late. +// +// This package is UNDER CONSTRUCTION and its API may change. +package catalog // import "golang.org/x/text/message/catalog" + +// TODO: +// Some way to freeze a catalog. +// - Locking on each lockup turns out to be about 50% of the total running time +// for some of the benchmarks in the message package. +// Consider these: +// - Sequence type to support sequences in user-defined messages. +// - Garbage collection: Remove dictionaries that can no longer be reached +// as other dictionaries have been added that cover all possible keys. + +import ( + "errors" + "fmt" + + "golang.org/x/text/internal" + + "golang.org/x/text/internal/catmsg" + "golang.org/x/text/language" +) + +// A Catalog allows lookup of translated messages. +type Catalog interface { + // Languages returns all languages for which the Catalog contains variants. + Languages() []language.Tag + + // Matcher returns a Matcher for languages from this Catalog. + Matcher() language.Matcher + + // A Context is used for evaluating Messages. + Context(tag language.Tag, r catmsg.Renderer) *Context + + // This method also makes Catalog a private interface. + lookup(tag language.Tag, key string) (data string, ok bool) +} + +// NewFromMap creates a Catalog from the given map. If a Dictionary is +// underspecified the entry is retrieved from a parent language. +func NewFromMap(dictionaries map[string]Dictionary, opts ...Option) (Catalog, error) { + options := options{} + for _, o := range opts { + o(&options) + } + c := &catalog{ + dicts: map[language.Tag]Dictionary{}, + } + _, hasFallback := dictionaries[options.fallback.String()] + if hasFallback { + // TODO: Should it be okay to not have a fallback language? + // Catalog generators could enforce there is always a fallback. + c.langs = append(c.langs, options.fallback) + } + for lang, dict := range dictionaries { + tag, err := language.Parse(lang) + if err != nil { + return nil, fmt.Errorf("catalog: invalid language tag %q", lang) + } + if _, ok := c.dicts[tag]; ok { + return nil, fmt.Errorf("catalog: duplicate entry for tag %q after normalization", tag) + } + c.dicts[tag] = dict + if !hasFallback || tag != options.fallback { + c.langs = append(c.langs, tag) + } + } + if hasFallback { + internal.SortTags(c.langs[1:]) + } else { + internal.SortTags(c.langs) + } + c.matcher = language.NewMatcher(c.langs) + return c, nil +} + +// A Dictionary is a source of translations for a single language. +type Dictionary interface { + // Lookup returns a message compiled with catmsg.Compile for the given key. + // It returns false for ok if such a message could not be found. + Lookup(key string) (data string, ok bool) +} + +type catalog struct { + langs []language.Tag + dicts map[language.Tag]Dictionary + macros store + matcher language.Matcher +} + +func (c *catalog) Languages() []language.Tag { return c.langs } +func (c *catalog) Matcher() language.Matcher { return c.matcher } + +func (c *catalog) lookup(tag language.Tag, key string) (data string, ok bool) { + for ; ; tag = tag.Parent() { + if dict, ok := c.dicts[tag]; ok { + if data, ok := dict.Lookup(key); ok { + return data, true + } + } + if tag == language.Und { + break + } + } + return "", false +} + +// Context returns a Context for formatting messages. +// Only one Message may be formatted per context at any given time. +func (c *catalog) Context(tag language.Tag, r catmsg.Renderer) *Context { + return &Context{ + cat: c, + tag: tag, + dec: catmsg.NewDecoder(tag, r, &dict{&c.macros, tag}), + } +} + +// A Builder allows building a Catalog programmatically. +type Builder struct { + options + matcher language.Matcher + + index store + macros store +} + +type options struct { + fallback language.Tag +} + +// An Option configures Catalog behavior. +type Option func(*options) + +// Fallback specifies the default fallback language. The default is Und. +func Fallback(tag language.Tag) Option { + return func(o *options) { o.fallback = tag } +} + +// TODO: +// // Catalogs specifies one or more sources for a Catalog. +// // Lookups are in order. +// // This can be changed inserting a Catalog used for setting, which implements +// // Loader, used for setting in the chain. +// func Catalogs(d ...Loader) Option { +// return nil +// } +// +// func Delims(start, end string) Option {} +// +// func Dict(tag language.Tag, d ...Dictionary) Option + +// NewBuilder returns an empty mutable Catalog. +func NewBuilder(opts ...Option) *Builder { + c := &Builder{} + for _, o := range opts { + o(&c.options) + } + return c +} + +// SetString is shorthand for Set(tag, key, String(msg)). +func (c *Builder) SetString(tag language.Tag, key string, msg string) error { + return c.set(tag, key, &c.index, String(msg)) +} + +// Set sets the translation for the given language and key. +// +// When evaluation this message, the first Message in the sequence to msgs to +// evaluate to a string will be the message returned. +func (c *Builder) Set(tag language.Tag, key string, msg ...Message) error { + return c.set(tag, key, &c.index, msg...) +} + +// SetMacro defines a Message that may be substituted in another message. +// The arguments to a macro Message are passed as arguments in the +// placeholder the form "${foo(arg1, arg2)}". +func (c *Builder) SetMacro(tag language.Tag, name string, msg ...Message) error { + return c.set(tag, name, &c.macros, msg...) +} + +// ErrNotFound indicates there was no message for the given key. +var ErrNotFound = errors.New("catalog: message not found") + +// String specifies a plain message string. It can be used as fallback if no +// other strings match or as a simple standalone message. +// +// It is an error to pass more than one String in a message sequence. +func String(name string) Message { + return catmsg.String(name) +} + +// Var sets a variable that may be substituted in formatting patterns using +// named substitution of the form "${name}". The name argument is used as a +// fallback if the statements do not produce a match. The statement sequence may +// not contain any Var calls. +// +// The name passed to a Var must be unique within message sequence. +func Var(name string, msg ...Message) Message { + return &catmsg.Var{Name: name, Message: firstInSequence(msg)} +} + +// Context returns a Context for formatting messages. +// Only one Message may be formatted per context at any given time. +func (b *Builder) Context(tag language.Tag, r catmsg.Renderer) *Context { + return &Context{ + cat: b, + tag: tag, + dec: catmsg.NewDecoder(tag, r, &dict{&b.macros, tag}), + } +} + +// A Context is used for evaluating Messages. +// Only one Message may be formatted per context at any given time. +type Context struct { + cat Catalog + tag language.Tag // TODO: use compact index. + dec *catmsg.Decoder +} + +// Execute looks up and executes the message with the given key. +// It returns ErrNotFound if no message could be found in the index. +func (c *Context) Execute(key string) error { + data, ok := c.cat.lookup(c.tag, key) + if !ok { + return ErrNotFound + } + return c.dec.Execute(data) +} diff --git a/hack/tools/vendor/golang.org/x/text/message/catalog/dict.go b/hack/tools/vendor/golang.org/x/text/message/catalog/dict.go new file mode 100644 index 0000000000..a0eb81810b --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/message/catalog/dict.go @@ -0,0 +1,129 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package catalog + +import ( + "sync" + + "golang.org/x/text/internal" + "golang.org/x/text/internal/catmsg" + "golang.org/x/text/language" +) + +// TODO: +// Dictionary returns a Dictionary that returns the first Message, using the +// given language tag, that matches: +// 1. the last one registered by one of the Set methods +// 2. returned by one of the Loaders +// 3. repeat from 1. using the parent language +// This approach allows messages to be underspecified. +// func (c *Catalog) Dictionary(tag language.Tag) (Dictionary, error) { +// // TODO: verify dictionary exists. +// return &dict{&c.index, tag}, nil +// } + +type dict struct { + s *store + tag language.Tag // TODO: make compact tag. +} + +func (d *dict) Lookup(key string) (data string, ok bool) { + return d.s.lookup(d.tag, key) +} + +func (b *Builder) lookup(tag language.Tag, key string) (data string, ok bool) { + return b.index.lookup(tag, key) +} + +func (c *Builder) set(tag language.Tag, key string, s *store, msg ...Message) error { + data, err := catmsg.Compile(tag, &dict{&c.macros, tag}, firstInSequence(msg)) + + s.mutex.Lock() + defer s.mutex.Unlock() + + m := s.index[tag] + if m == nil { + m = msgMap{} + if s.index == nil { + s.index = map[language.Tag]msgMap{} + } + c.matcher = nil + s.index[tag] = m + } + + m[key] = data + return err +} + +func (c *Builder) Matcher() language.Matcher { + c.index.mutex.RLock() + m := c.matcher + c.index.mutex.RUnlock() + if m != nil { + return m + } + + c.index.mutex.Lock() + if c.matcher == nil { + c.matcher = language.NewMatcher(c.unlockedLanguages()) + } + m = c.matcher + c.index.mutex.Unlock() + return m +} + +type store struct { + mutex sync.RWMutex + index map[language.Tag]msgMap +} + +type msgMap map[string]string + +func (s *store) lookup(tag language.Tag, key string) (data string, ok bool) { + s.mutex.RLock() + defer s.mutex.RUnlock() + + for ; ; tag = tag.Parent() { + if msgs, ok := s.index[tag]; ok { + if msg, ok := msgs[key]; ok { + return msg, true + } + } + if tag == language.Und { + break + } + } + return "", false +} + +// Languages returns all languages for which the Catalog contains variants. +func (b *Builder) Languages() []language.Tag { + s := &b.index + s.mutex.RLock() + defer s.mutex.RUnlock() + + return b.unlockedLanguages() +} + +func (b *Builder) unlockedLanguages() []language.Tag { + s := &b.index + if len(s.index) == 0 { + return nil + } + tags := make([]language.Tag, 0, len(s.index)) + _, hasFallback := s.index[b.options.fallback] + offset := 0 + if hasFallback { + tags = append(tags, b.options.fallback) + offset = 1 + } + for t := range s.index { + if t != b.options.fallback { + tags = append(tags, t) + } + } + internal.SortTags(tags[offset:]) + return tags +} diff --git a/hack/tools/vendor/golang.org/x/text/message/catalog/go19.go b/hack/tools/vendor/golang.org/x/text/message/catalog/go19.go new file mode 100644 index 0000000000..291a4df949 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/message/catalog/go19.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.9 + +package catalog + +import "golang.org/x/text/internal/catmsg" + +// A Message holds a collection of translations for the same phrase that may +// vary based on the values of substitution arguments. +type Message = catmsg.Message + +type firstInSequence = catmsg.FirstOf diff --git a/hack/tools/vendor/golang.org/x/text/message/catalog/gopre19.go b/hack/tools/vendor/golang.org/x/text/message/catalog/gopre19.go new file mode 100644 index 0000000000..da44ebb8be --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/message/catalog/gopre19.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.9 + +package catalog + +import "golang.org/x/text/internal/catmsg" + +// A Message holds a collection of translations for the same phrase that may +// vary based on the values of substitution arguments. +type Message interface { + catmsg.Message +} + +func firstInSequence(m []Message) catmsg.Message { + a := []catmsg.Message{} + for _, m := range m { + a = append(a, m) + } + return catmsg.FirstOf(a) +} diff --git a/hack/tools/vendor/golang.org/x/text/message/doc.go b/hack/tools/vendor/golang.org/x/text/message/doc.go new file mode 100644 index 0000000000..4bf7bdcac3 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/message/doc.go @@ -0,0 +1,99 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package message implements formatted I/O for localized strings with functions +// analogous to the fmt's print functions. It is a drop-in replacement for fmt. +// +// # Localized Formatting +// +// A format string can be localized by replacing any of the print functions of +// fmt with an equivalent call to a Printer. +// +// p := message.NewPrinter(message.MatchLanguage("en")) +// p.Println(123456.78) // Prints 123,456.78 +// +// p.Printf("%d ducks in a row", 4331) // Prints 4,331 ducks in a row +// +// p := message.NewPrinter(message.MatchLanguage("nl")) +// p.Printf("Hoogte: %.1f meter", 1244.9) // Prints Hoogte: 1,244.9 meter +// +// p := message.NewPrinter(message.MatchLanguage("bn")) +// p.Println(123456.78) // Prints ১,২৩,৪৫৬.৭৮ +// +// Printer currently supports numbers and specialized types for which packages +// exist in x/text. Other builtin types such as time.Time and slices are +// planned. +// +// Format strings largely have the same meaning as with fmt with the following +// notable exceptions: +// - flag # always resorts to fmt for printing +// - verb 'f', 'e', 'g', 'd' use localized formatting unless the '#' flag is +// specified. +// - verb 'm' inserts a translation of a string argument. +// +// See package fmt for more options. +// +// # Translation +// +// The format strings that are passed to Printf, Sprintf, Fprintf, or Errorf +// are used as keys to look up translations for the specified languages. +// More on how these need to be specified below. +// +// One can use arbitrary keys to distinguish between otherwise ambiguous +// strings: +// +// p := message.NewPrinter(language.English) +// p.Printf("archive(noun)") // Prints "archive" +// p.Printf("archive(verb)") // Prints "archive" +// +// p := message.NewPrinter(language.German) +// p.Printf("archive(noun)") // Prints "Archiv" +// p.Printf("archive(verb)") // Prints "archivieren" +// +// To retain the fallback functionality, use Key: +// +// p.Printf(message.Key("archive(noun)", "archive")) +// p.Printf(message.Key("archive(verb)", "archive")) +// +// # Translation Pipeline +// +// Format strings that contain text need to be translated to support different +// locales. The first step is to extract strings that need to be translated. +// +// 1. Install gotext +// +// go get -u golang.org/x/text/cmd/gotext +// gotext -help +// +// 2. Mark strings in your source to be translated by using message.Printer, +// instead of the functions of the fmt package. +// +// 3. Extract the strings from your source +// +// gotext extract +// +// The output will be written to the textdata directory. +// +// 4. Send the files for translation +// +// It is planned to support multiple formats, but for now one will have to +// rewrite the JSON output to the desired format. +// +// 5. Inject translations into program +// +// 6. Repeat from 2 +// +// Right now this has to be done programmatically with calls to Set or +// SetString. These functions as well as the methods defined in +// see also package golang.org/x/text/message/catalog can be used to implement +// either dynamic or static loading of messages. +// +// # Plural and Gender Forms +// +// Translated messages can vary based on the plural and gender forms of +// substitution values. In general, it is up to the translators to provide +// alternative translations for such forms. See the packages in +// golang.org/x/text/feature and golang.org/x/text/message/catalog for more +// information. +package message diff --git a/hack/tools/vendor/golang.org/x/text/message/format.go b/hack/tools/vendor/golang.org/x/text/message/format.go new file mode 100644 index 0000000000..a47d17dd4d --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/message/format.go @@ -0,0 +1,510 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package message + +import ( + "bytes" + "strconv" + "unicode/utf8" + + "golang.org/x/text/internal/format" +) + +const ( + ldigits = "0123456789abcdefx" + udigits = "0123456789ABCDEFX" +) + +const ( + signed = true + unsigned = false +) + +// A formatInfo is the raw formatter used by Printf etc. +// It prints into a buffer that must be set up separately. +type formatInfo struct { + buf *bytes.Buffer + + format.Parser + + // intbuf is large enough to store %b of an int64 with a sign and + // avoids padding at the end of the struct on 32 bit architectures. + intbuf [68]byte +} + +func (f *formatInfo) init(buf *bytes.Buffer) { + f.ClearFlags() + f.buf = buf +} + +// writePadding generates n bytes of padding. +func (f *formatInfo) writePadding(n int) { + if n <= 0 { // No padding bytes needed. + return + } + f.buf.Grow(n) + // Decide which byte the padding should be filled with. + padByte := byte(' ') + if f.Zero { + padByte = byte('0') + } + // Fill padding with padByte. + for i := 0; i < n; i++ { + f.buf.WriteByte(padByte) // TODO: make more efficient. + } +} + +// pad appends b to f.buf, padded on left (!f.minus) or right (f.minus). +func (f *formatInfo) pad(b []byte) { + if !f.WidthPresent || f.Width == 0 { + f.buf.Write(b) + return + } + width := f.Width - utf8.RuneCount(b) + if !f.Minus { + // left padding + f.writePadding(width) + f.buf.Write(b) + } else { + // right padding + f.buf.Write(b) + f.writePadding(width) + } +} + +// padString appends s to f.buf, padded on left (!f.minus) or right (f.minus). +func (f *formatInfo) padString(s string) { + if !f.WidthPresent || f.Width == 0 { + f.buf.WriteString(s) + return + } + width := f.Width - utf8.RuneCountInString(s) + if !f.Minus { + // left padding + f.writePadding(width) + f.buf.WriteString(s) + } else { + // right padding + f.buf.WriteString(s) + f.writePadding(width) + } +} + +// fmt_boolean formats a boolean. +func (f *formatInfo) fmt_boolean(v bool) { + if v { + f.padString("true") + } else { + f.padString("false") + } +} + +// fmt_unicode formats a uint64 as "U+0078" or with f.sharp set as "U+0078 'x'". +func (f *formatInfo) fmt_unicode(u uint64) { + buf := f.intbuf[0:] + + // With default precision set the maximum needed buf length is 18 + // for formatting -1 with %#U ("U+FFFFFFFFFFFFFFFF") which fits + // into the already allocated intbuf with a capacity of 68 bytes. + prec := 4 + if f.PrecPresent && f.Prec > 4 { + prec = f.Prec + // Compute space needed for "U+" , number, " '", character, "'". + width := 2 + prec + 2 + utf8.UTFMax + 1 + if width > len(buf) { + buf = make([]byte, width) + } + } + + // Format into buf, ending at buf[i]. Formatting numbers is easier right-to-left. + i := len(buf) + + // For %#U we want to add a space and a quoted character at the end of the buffer. + if f.Sharp && u <= utf8.MaxRune && strconv.IsPrint(rune(u)) { + i-- + buf[i] = '\'' + i -= utf8.RuneLen(rune(u)) + utf8.EncodeRune(buf[i:], rune(u)) + i-- + buf[i] = '\'' + i-- + buf[i] = ' ' + } + // Format the Unicode code point u as a hexadecimal number. + for u >= 16 { + i-- + buf[i] = udigits[u&0xF] + prec-- + u >>= 4 + } + i-- + buf[i] = udigits[u] + prec-- + // Add zeros in front of the number until requested precision is reached. + for prec > 0 { + i-- + buf[i] = '0' + prec-- + } + // Add a leading "U+". + i-- + buf[i] = '+' + i-- + buf[i] = 'U' + + oldZero := f.Zero + f.Zero = false + f.pad(buf[i:]) + f.Zero = oldZero +} + +// fmt_integer formats signed and unsigned integers. +func (f *formatInfo) fmt_integer(u uint64, base int, isSigned bool, digits string) { + negative := isSigned && int64(u) < 0 + if negative { + u = -u + } + + buf := f.intbuf[0:] + // The already allocated f.intbuf with a capacity of 68 bytes + // is large enough for integer formatting when no precision or width is set. + if f.WidthPresent || f.PrecPresent { + // Account 3 extra bytes for possible addition of a sign and "0x". + width := 3 + f.Width + f.Prec // wid and prec are always positive. + if width > len(buf) { + // We're going to need a bigger boat. + buf = make([]byte, width) + } + } + + // Two ways to ask for extra leading zero digits: %.3d or %03d. + // If both are specified the f.zero flag is ignored and + // padding with spaces is used instead. + prec := 0 + if f.PrecPresent { + prec = f.Prec + // Precision of 0 and value of 0 means "print nothing" but padding. + if prec == 0 && u == 0 { + oldZero := f.Zero + f.Zero = false + f.writePadding(f.Width) + f.Zero = oldZero + return + } + } else if f.Zero && f.WidthPresent { + prec = f.Width + if negative || f.Plus || f.Space { + prec-- // leave room for sign + } + } + + // Because printing is easier right-to-left: format u into buf, ending at buf[i]. + // We could make things marginally faster by splitting the 32-bit case out + // into a separate block but it's not worth the duplication, so u has 64 bits. + i := len(buf) + // Use constants for the division and modulo for more efficient code. + // Switch cases ordered by popularity. + switch base { + case 10: + for u >= 10 { + i-- + next := u / 10 + buf[i] = byte('0' + u - next*10) + u = next + } + case 16: + for u >= 16 { + i-- + buf[i] = digits[u&0xF] + u >>= 4 + } + case 8: + for u >= 8 { + i-- + buf[i] = byte('0' + u&7) + u >>= 3 + } + case 2: + for u >= 2 { + i-- + buf[i] = byte('0' + u&1) + u >>= 1 + } + default: + panic("fmt: unknown base; can't happen") + } + i-- + buf[i] = digits[u] + for i > 0 && prec > len(buf)-i { + i-- + buf[i] = '0' + } + + // Various prefixes: 0x, -, etc. + if f.Sharp { + switch base { + case 8: + if buf[i] != '0' { + i-- + buf[i] = '0' + } + case 16: + // Add a leading 0x or 0X. + i-- + buf[i] = digits[16] + i-- + buf[i] = '0' + } + } + + if negative { + i-- + buf[i] = '-' + } else if f.Plus { + i-- + buf[i] = '+' + } else if f.Space { + i-- + buf[i] = ' ' + } + + // Left padding with zeros has already been handled like precision earlier + // or the f.zero flag is ignored due to an explicitly set precision. + oldZero := f.Zero + f.Zero = false + f.pad(buf[i:]) + f.Zero = oldZero +} + +// truncate truncates the string to the specified precision, if present. +func (f *formatInfo) truncate(s string) string { + if f.PrecPresent { + n := f.Prec + for i := range s { + n-- + if n < 0 { + return s[:i] + } + } + } + return s +} + +// fmt_s formats a string. +func (f *formatInfo) fmt_s(s string) { + s = f.truncate(s) + f.padString(s) +} + +// fmt_sbx formats a string or byte slice as a hexadecimal encoding of its bytes. +func (f *formatInfo) fmt_sbx(s string, b []byte, digits string) { + length := len(b) + if b == nil { + // No byte slice present. Assume string s should be encoded. + length = len(s) + } + // Set length to not process more bytes than the precision demands. + if f.PrecPresent && f.Prec < length { + length = f.Prec + } + // Compute width of the encoding taking into account the f.sharp and f.space flag. + width := 2 * length + if width > 0 { + if f.Space { + // Each element encoded by two hexadecimals will get a leading 0x or 0X. + if f.Sharp { + width *= 2 + } + // Elements will be separated by a space. + width += length - 1 + } else if f.Sharp { + // Only a leading 0x or 0X will be added for the whole string. + width += 2 + } + } else { // The byte slice or string that should be encoded is empty. + if f.WidthPresent { + f.writePadding(f.Width) + } + return + } + // Handle padding to the left. + if f.WidthPresent && f.Width > width && !f.Minus { + f.writePadding(f.Width - width) + } + // Write the encoding directly into the output buffer. + buf := f.buf + if f.Sharp { + // Add leading 0x or 0X. + buf.WriteByte('0') + buf.WriteByte(digits[16]) + } + var c byte + for i := 0; i < length; i++ { + if f.Space && i > 0 { + // Separate elements with a space. + buf.WriteByte(' ') + if f.Sharp { + // Add leading 0x or 0X for each element. + buf.WriteByte('0') + buf.WriteByte(digits[16]) + } + } + if b != nil { + c = b[i] // Take a byte from the input byte slice. + } else { + c = s[i] // Take a byte from the input string. + } + // Encode each byte as two hexadecimal digits. + buf.WriteByte(digits[c>>4]) + buf.WriteByte(digits[c&0xF]) + } + // Handle padding to the right. + if f.WidthPresent && f.Width > width && f.Minus { + f.writePadding(f.Width - width) + } +} + +// fmt_sx formats a string as a hexadecimal encoding of its bytes. +func (f *formatInfo) fmt_sx(s, digits string) { + f.fmt_sbx(s, nil, digits) +} + +// fmt_bx formats a byte slice as a hexadecimal encoding of its bytes. +func (f *formatInfo) fmt_bx(b []byte, digits string) { + f.fmt_sbx("", b, digits) +} + +// fmt_q formats a string as a double-quoted, escaped Go string constant. +// If f.sharp is set a raw (backquoted) string may be returned instead +// if the string does not contain any control characters other than tab. +func (f *formatInfo) fmt_q(s string) { + s = f.truncate(s) + if f.Sharp && strconv.CanBackquote(s) { + f.padString("`" + s + "`") + return + } + buf := f.intbuf[:0] + if f.Plus { + f.pad(strconv.AppendQuoteToASCII(buf, s)) + } else { + f.pad(strconv.AppendQuote(buf, s)) + } +} + +// fmt_c formats an integer as a Unicode character. +// If the character is not valid Unicode, it will print '\ufffd'. +func (f *formatInfo) fmt_c(c uint64) { + r := rune(c) + if c > utf8.MaxRune { + r = utf8.RuneError + } + buf := f.intbuf[:0] + w := utf8.EncodeRune(buf[:utf8.UTFMax], r) + f.pad(buf[:w]) +} + +// fmt_qc formats an integer as a single-quoted, escaped Go character constant. +// If the character is not valid Unicode, it will print '\ufffd'. +func (f *formatInfo) fmt_qc(c uint64) { + r := rune(c) + if c > utf8.MaxRune { + r = utf8.RuneError + } + buf := f.intbuf[:0] + if f.Plus { + f.pad(strconv.AppendQuoteRuneToASCII(buf, r)) + } else { + f.pad(strconv.AppendQuoteRune(buf, r)) + } +} + +// fmt_float formats a float64. It assumes that verb is a valid format specifier +// for strconv.AppendFloat and therefore fits into a byte. +func (f *formatInfo) fmt_float(v float64, size int, verb rune, prec int) { + // Explicit precision in format specifier overrules default precision. + if f.PrecPresent { + prec = f.Prec + } + // Format number, reserving space for leading + sign if needed. + num := strconv.AppendFloat(f.intbuf[:1], v, byte(verb), prec, size) + if num[1] == '-' || num[1] == '+' { + num = num[1:] + } else { + num[0] = '+' + } + // f.space means to add a leading space instead of a "+" sign unless + // the sign is explicitly asked for by f.plus. + if f.Space && num[0] == '+' && !f.Plus { + num[0] = ' ' + } + // Special handling for infinities and NaN, + // which don't look like a number so shouldn't be padded with zeros. + if num[1] == 'I' || num[1] == 'N' { + oldZero := f.Zero + f.Zero = false + // Remove sign before NaN if not asked for. + if num[1] == 'N' && !f.Space && !f.Plus { + num = num[1:] + } + f.pad(num) + f.Zero = oldZero + return + } + // The sharp flag forces printing a decimal point for non-binary formats + // and retains trailing zeros, which we may need to restore. + if f.Sharp && verb != 'b' { + digits := 0 + switch verb { + case 'v', 'g', 'G': + digits = prec + // If no precision is set explicitly use a precision of 6. + if digits == -1 { + digits = 6 + } + } + + // Buffer pre-allocated with enough room for + // exponent notations of the form "e+123". + var tailBuf [5]byte + tail := tailBuf[:0] + + hasDecimalPoint := false + // Starting from i = 1 to skip sign at num[0]. + for i := 1; i < len(num); i++ { + switch num[i] { + case '.': + hasDecimalPoint = true + case 'e', 'E': + tail = append(tail, num[i:]...) + num = num[:i] + default: + digits-- + } + } + if !hasDecimalPoint { + num = append(num, '.') + } + for digits > 0 { + num = append(num, '0') + digits-- + } + num = append(num, tail...) + } + // We want a sign if asked for and if the sign is not positive. + if f.Plus || num[0] != '+' { + // If we're zero padding to the left we want the sign before the leading zeros. + // Achieve this by writing the sign out and then padding the unsigned number. + if f.Zero && f.WidthPresent && f.Width > len(num) { + f.buf.WriteByte(num[0]) + f.writePadding(f.Width - len(num)) + f.buf.Write(num[1:]) + return + } + f.pad(num) + return + } + // No sign to show and the number is positive; just print the unsigned number. + f.pad(num[1:]) +} diff --git a/hack/tools/vendor/golang.org/x/text/message/message.go b/hack/tools/vendor/golang.org/x/text/message/message.go new file mode 100644 index 0000000000..91a9726421 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/message/message.go @@ -0,0 +1,192 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package message // import "golang.org/x/text/message" + +import ( + "io" + "os" + + // Include features to facilitate generated catalogs. + _ "golang.org/x/text/feature/plural" + + "golang.org/x/text/internal/number" + "golang.org/x/text/language" + "golang.org/x/text/message/catalog" +) + +// A Printer implements language-specific formatted I/O analogous to the fmt +// package. +type Printer struct { + // the language + tag language.Tag + + toDecimal number.Formatter + toScientific number.Formatter + + cat catalog.Catalog +} + +type options struct { + cat catalog.Catalog + // TODO: + // - allow %s to print integers in written form (tables are likely too large + // to enable this by default). + // - list behavior + // +} + +// An Option defines an option of a Printer. +type Option func(o *options) + +// Catalog defines the catalog to be used. +func Catalog(c catalog.Catalog) Option { + return func(o *options) { o.cat = c } +} + +// NewPrinter returns a Printer that formats messages tailored to language t. +func NewPrinter(t language.Tag, opts ...Option) *Printer { + options := &options{ + cat: DefaultCatalog, + } + for _, o := range opts { + o(options) + } + p := &Printer{ + tag: t, + cat: options.cat, + } + p.toDecimal.InitDecimal(t) + p.toScientific.InitScientific(t) + return p +} + +// Sprint is like fmt.Sprint, but using language-specific formatting. +func (p *Printer) Sprint(a ...interface{}) string { + pp := newPrinter(p) + pp.doPrint(a) + s := pp.String() + pp.free() + return s +} + +// Fprint is like fmt.Fprint, but using language-specific formatting. +func (p *Printer) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + pp := newPrinter(p) + pp.doPrint(a) + n64, err := io.Copy(w, &pp.Buffer) + pp.free() + return int(n64), err +} + +// Print is like fmt.Print, but using language-specific formatting. +func (p *Printer) Print(a ...interface{}) (n int, err error) { + return p.Fprint(os.Stdout, a...) +} + +// Sprintln is like fmt.Sprintln, but using language-specific formatting. +func (p *Printer) Sprintln(a ...interface{}) string { + pp := newPrinter(p) + pp.doPrintln(a) + s := pp.String() + pp.free() + return s +} + +// Fprintln is like fmt.Fprintln, but using language-specific formatting. +func (p *Printer) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + pp := newPrinter(p) + pp.doPrintln(a) + n64, err := io.Copy(w, &pp.Buffer) + pp.free() + return int(n64), err +} + +// Println is like fmt.Println, but using language-specific formatting. +func (p *Printer) Println(a ...interface{}) (n int, err error) { + return p.Fprintln(os.Stdout, a...) +} + +// Sprintf is like fmt.Sprintf, but using language-specific formatting. +func (p *Printer) Sprintf(key Reference, a ...interface{}) string { + pp := newPrinter(p) + lookupAndFormat(pp, key, a) + s := pp.String() + pp.free() + return s +} + +// Fprintf is like fmt.Fprintf, but using language-specific formatting. +func (p *Printer) Fprintf(w io.Writer, key Reference, a ...interface{}) (n int, err error) { + pp := newPrinter(p) + lookupAndFormat(pp, key, a) + n, err = w.Write(pp.Bytes()) + pp.free() + return n, err + +} + +// Printf is like fmt.Printf, but using language-specific formatting. +func (p *Printer) Printf(key Reference, a ...interface{}) (n int, err error) { + pp := newPrinter(p) + lookupAndFormat(pp, key, a) + n, err = os.Stdout.Write(pp.Bytes()) + pp.free() + return n, err +} + +func lookupAndFormat(p *printer, r Reference, a []interface{}) { + p.fmt.Reset(a) + switch v := r.(type) { + case string: + if p.catContext.Execute(v) == catalog.ErrNotFound { + p.Render(v) + return + } + case key: + if p.catContext.Execute(v.id) == catalog.ErrNotFound && + p.catContext.Execute(v.fallback) == catalog.ErrNotFound { + p.Render(v.fallback) + return + } + default: + panic("key argument is not a Reference") + } +} + +type rawPrinter struct { + p *printer +} + +func (p rawPrinter) Render(msg string) { p.p.WriteString(msg) } +func (p rawPrinter) Arg(i int) interface{} { return nil } + +// Arg implements catmsg.Renderer. +func (p *printer) Arg(i int) interface{} { // TODO, also return "ok" bool + i-- + if uint(i) < uint(len(p.fmt.Args)) { + return p.fmt.Args[i] + } + return nil +} + +// Render implements catmsg.Renderer. +func (p *printer) Render(msg string) { + p.doPrintf(msg) +} + +// A Reference is a string or a message reference. +type Reference interface { + // TODO: also allow []string +} + +// Key creates a message Reference for a message where the given id is used for +// message lookup and the fallback is returned when no matches are found. +func Key(id string, fallback string) Reference { + return key{id, fallback} +} + +type key struct { + id, fallback string +} diff --git a/hack/tools/vendor/golang.org/x/text/message/print.go b/hack/tools/vendor/golang.org/x/text/message/print.go new file mode 100644 index 0000000000..da304cc0ed --- /dev/null +++ b/hack/tools/vendor/golang.org/x/text/message/print.go @@ -0,0 +1,984 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package message + +import ( + "bytes" + "fmt" // TODO: consider copying interfaces from package fmt to avoid dependency. + "math" + "reflect" + "sync" + "unicode/utf8" + + "golang.org/x/text/internal/format" + "golang.org/x/text/internal/number" + "golang.org/x/text/language" + "golang.org/x/text/message/catalog" +) + +// Strings for use with buffer.WriteString. +// This is less overhead than using buffer.Write with byte arrays. +const ( + commaSpaceString = ", " + nilAngleString = "" + nilParenString = "(nil)" + nilString = "nil" + mapString = "map[" + percentBangString = "%!" + missingString = "(MISSING)" + badIndexString = "(BADINDEX)" + panicString = "(PANIC=" + extraString = "%!(EXTRA " + badWidthString = "%!(BADWIDTH)" + badPrecString = "%!(BADPREC)" + noVerbString = "%!(NOVERB)" + + invReflectString = "" +) + +var printerPool = sync.Pool{ + New: func() interface{} { return new(printer) }, +} + +// newPrinter allocates a new printer struct or grabs a cached one. +func newPrinter(pp *Printer) *printer { + p := printerPool.Get().(*printer) + p.Printer = *pp + // TODO: cache most of the following call. + p.catContext = pp.cat.Context(pp.tag, p) + + p.panicking = false + p.erroring = false + p.fmt.init(&p.Buffer) + return p +} + +// free saves used printer structs in printerFree; avoids an allocation per invocation. +func (p *printer) free() { + p.Buffer.Reset() + p.arg = nil + p.value = reflect.Value{} + printerPool.Put(p) +} + +// printer is used to store a printer's state. +// It implements "golang.org/x/text/internal/format".State. +type printer struct { + Printer + + // the context for looking up message translations + catContext *catalog.Context + + // buffer for accumulating output. + bytes.Buffer + + // arg holds the current item, as an interface{}. + arg interface{} + // value is used instead of arg for reflect values. + value reflect.Value + + // fmt is used to format basic items such as integers or strings. + fmt formatInfo + + // panicking is set by catchPanic to avoid infinite panic, recover, panic, ... recursion. + panicking bool + // erroring is set when printing an error string to guard against calling handleMethods. + erroring bool +} + +// Language implements "golang.org/x/text/internal/format".State. +func (p *printer) Language() language.Tag { return p.tag } + +func (p *printer) Width() (wid int, ok bool) { return p.fmt.Width, p.fmt.WidthPresent } + +func (p *printer) Precision() (prec int, ok bool) { return p.fmt.Prec, p.fmt.PrecPresent } + +func (p *printer) Flag(b int) bool { + switch b { + case '-': + return p.fmt.Minus + case '+': + return p.fmt.Plus || p.fmt.PlusV + case '#': + return p.fmt.Sharp || p.fmt.SharpV + case ' ': + return p.fmt.Space + case '0': + return p.fmt.Zero + } + return false +} + +// getField gets the i'th field of the struct value. +// If the field is itself is an interface, return a value for +// the thing inside the interface, not the interface itself. +func getField(v reflect.Value, i int) reflect.Value { + val := v.Field(i) + if val.Kind() == reflect.Interface && !val.IsNil() { + val = val.Elem() + } + return val +} + +func (p *printer) unknownType(v reflect.Value) { + if !v.IsValid() { + p.WriteString(nilAngleString) + return + } + p.WriteByte('?') + p.WriteString(v.Type().String()) + p.WriteByte('?') +} + +func (p *printer) badVerb(verb rune) { + p.erroring = true + p.WriteString(percentBangString) + p.WriteRune(verb) + p.WriteByte('(') + switch { + case p.arg != nil: + p.WriteString(reflect.TypeOf(p.arg).String()) + p.WriteByte('=') + p.printArg(p.arg, 'v') + case p.value.IsValid(): + p.WriteString(p.value.Type().String()) + p.WriteByte('=') + p.printValue(p.value, 'v', 0) + default: + p.WriteString(nilAngleString) + } + p.WriteByte(')') + p.erroring = false +} + +func (p *printer) fmtBool(v bool, verb rune) { + switch verb { + case 't', 'v': + p.fmt.fmt_boolean(v) + default: + p.badVerb(verb) + } +} + +// fmt0x64 formats a uint64 in hexadecimal and prefixes it with 0x or +// not, as requested, by temporarily setting the sharp flag. +func (p *printer) fmt0x64(v uint64, leading0x bool) { + sharp := p.fmt.Sharp + p.fmt.Sharp = leading0x + p.fmt.fmt_integer(v, 16, unsigned, ldigits) + p.fmt.Sharp = sharp +} + +// fmtInteger formats a signed or unsigned integer. +func (p *printer) fmtInteger(v uint64, isSigned bool, verb rune) { + switch verb { + case 'v': + if p.fmt.SharpV && !isSigned { + p.fmt0x64(v, true) + return + } + fallthrough + case 'd': + if p.fmt.Sharp || p.fmt.SharpV { + p.fmt.fmt_integer(v, 10, isSigned, ldigits) + } else { + p.fmtDecimalInt(v, isSigned) + } + case 'b': + p.fmt.fmt_integer(v, 2, isSigned, ldigits) + case 'o': + p.fmt.fmt_integer(v, 8, isSigned, ldigits) + case 'x': + p.fmt.fmt_integer(v, 16, isSigned, ldigits) + case 'X': + p.fmt.fmt_integer(v, 16, isSigned, udigits) + case 'c': + p.fmt.fmt_c(v) + case 'q': + if v <= utf8.MaxRune { + p.fmt.fmt_qc(v) + } else { + p.badVerb(verb) + } + case 'U': + p.fmt.fmt_unicode(v) + default: + p.badVerb(verb) + } +} + +// fmtFloat formats a float. The default precision for each verb +// is specified as last argument in the call to fmt_float. +func (p *printer) fmtFloat(v float64, size int, verb rune) { + switch verb { + case 'b': + p.fmt.fmt_float(v, size, verb, -1) + case 'v': + verb = 'g' + fallthrough + case 'g', 'G': + if p.fmt.Sharp || p.fmt.SharpV { + p.fmt.fmt_float(v, size, verb, -1) + } else { + p.fmtVariableFloat(v, size) + } + case 'e', 'E': + if p.fmt.Sharp || p.fmt.SharpV { + p.fmt.fmt_float(v, size, verb, 6) + } else { + p.fmtScientific(v, size, 6) + } + case 'f', 'F': + if p.fmt.Sharp || p.fmt.SharpV { + p.fmt.fmt_float(v, size, verb, 6) + } else { + p.fmtDecimalFloat(v, size, 6) + } + default: + p.badVerb(verb) + } +} + +func (p *printer) setFlags(f *number.Formatter) { + f.Flags &^= number.ElideSign + if p.fmt.Plus || p.fmt.Space { + f.Flags |= number.AlwaysSign + if !p.fmt.Plus { + f.Flags |= number.ElideSign + } + } else { + f.Flags &^= number.AlwaysSign + } +} + +func (p *printer) updatePadding(f *number.Formatter) { + f.Flags &^= number.PadMask + if p.fmt.Minus { + f.Flags |= number.PadAfterSuffix + } else { + f.Flags |= number.PadBeforePrefix + } + f.PadRune = ' ' + f.FormatWidth = uint16(p.fmt.Width) +} + +func (p *printer) initDecimal(minFrac, maxFrac int) { + f := &p.toDecimal + f.MinIntegerDigits = 1 + f.MaxIntegerDigits = 0 + f.MinFractionDigits = uint8(minFrac) + f.MaxFractionDigits = int16(maxFrac) + p.setFlags(f) + f.PadRune = 0 + if p.fmt.WidthPresent { + if p.fmt.Zero { + wid := p.fmt.Width + // Use significant integers for this. + // TODO: this is not the same as width, but so be it. + if f.MinFractionDigits > 0 { + wid -= 1 + int(f.MinFractionDigits) + } + if p.fmt.Plus || p.fmt.Space { + wid-- + } + if wid > 0 && wid > int(f.MinIntegerDigits) { + f.MinIntegerDigits = uint8(wid) + } + } + p.updatePadding(f) + } +} + +func (p *printer) initScientific(minFrac, maxFrac int) { + f := &p.toScientific + if maxFrac < 0 { + f.SetPrecision(maxFrac) + } else { + f.SetPrecision(maxFrac + 1) + f.MinFractionDigits = uint8(minFrac) + f.MaxFractionDigits = int16(maxFrac) + } + f.MinExponentDigits = 2 + p.setFlags(f) + f.PadRune = 0 + if p.fmt.WidthPresent { + f.Flags &^= number.PadMask + if p.fmt.Zero { + f.PadRune = f.Digit(0) + f.Flags |= number.PadAfterPrefix + } else { + f.PadRune = ' ' + f.Flags |= number.PadBeforePrefix + } + p.updatePadding(f) + } +} + +func (p *printer) fmtDecimalInt(v uint64, isSigned bool) { + var d number.Decimal + + f := &p.toDecimal + if p.fmt.PrecPresent { + p.setFlags(f) + f.MinIntegerDigits = uint8(p.fmt.Prec) + f.MaxIntegerDigits = 0 + f.MinFractionDigits = 0 + f.MaxFractionDigits = 0 + if p.fmt.WidthPresent { + p.updatePadding(f) + } + } else { + p.initDecimal(0, 0) + } + d.ConvertInt(p.toDecimal.RoundingContext, isSigned, v) + + out := p.toDecimal.Format([]byte(nil), &d) + p.Buffer.Write(out) +} + +func (p *printer) fmtDecimalFloat(v float64, size, prec int) { + var d number.Decimal + if p.fmt.PrecPresent { + prec = p.fmt.Prec + } + p.initDecimal(prec, prec) + d.ConvertFloat(p.toDecimal.RoundingContext, v, size) + + out := p.toDecimal.Format([]byte(nil), &d) + p.Buffer.Write(out) +} + +func (p *printer) fmtVariableFloat(v float64, size int) { + prec := -1 + if p.fmt.PrecPresent { + prec = p.fmt.Prec + } + var d number.Decimal + p.initScientific(0, prec) + d.ConvertFloat(p.toScientific.RoundingContext, v, size) + + // Copy logic of 'g' formatting from strconv. It is simplified a bit as + // we don't have to mind having prec > len(d.Digits). + shortest := prec < 0 + ePrec := prec + if shortest { + prec = len(d.Digits) + ePrec = 6 + } else if prec == 0 { + prec = 1 + ePrec = 1 + } + exp := int(d.Exp) - 1 + if exp < -4 || exp >= ePrec { + p.initScientific(0, prec) + + out := p.toScientific.Format([]byte(nil), &d) + p.Buffer.Write(out) + } else { + if prec > int(d.Exp) { + prec = len(d.Digits) + } + if prec -= int(d.Exp); prec < 0 { + prec = 0 + } + p.initDecimal(0, prec) + + out := p.toDecimal.Format([]byte(nil), &d) + p.Buffer.Write(out) + } +} + +func (p *printer) fmtScientific(v float64, size, prec int) { + var d number.Decimal + if p.fmt.PrecPresent { + prec = p.fmt.Prec + } + p.initScientific(prec, prec) + rc := p.toScientific.RoundingContext + d.ConvertFloat(rc, v, size) + + out := p.toScientific.Format([]byte(nil), &d) + p.Buffer.Write(out) + +} + +// fmtComplex formats a complex number v with +// r = real(v) and j = imag(v) as (r+ji) using +// fmtFloat for r and j formatting. +func (p *printer) fmtComplex(v complex128, size int, verb rune) { + // Make sure any unsupported verbs are found before the + // calls to fmtFloat to not generate an incorrect error string. + switch verb { + case 'v', 'b', 'g', 'G', 'f', 'F', 'e', 'E': + p.WriteByte('(') + p.fmtFloat(real(v), size/2, verb) + // Imaginary part always has a sign. + if math.IsNaN(imag(v)) { + // By CLDR's rules, NaNs do not use patterns or signs. As this code + // relies on AlwaysSign working for imaginary parts, we need to + // manually handle NaNs. + f := &p.toScientific + p.setFlags(f) + p.updatePadding(f) + p.setFlags(f) + nan := f.Symbol(number.SymNan) + extra := 0 + if w, ok := p.Width(); ok { + extra = w - utf8.RuneCountInString(nan) - 1 + } + if f.Flags&number.PadAfterNumber == 0 { + for ; extra > 0; extra-- { + p.WriteRune(f.PadRune) + } + } + p.WriteString(f.Symbol(number.SymPlusSign)) + p.WriteString(nan) + for ; extra > 0; extra-- { + p.WriteRune(f.PadRune) + } + p.WriteString("i)") + return + } + oldPlus := p.fmt.Plus + p.fmt.Plus = true + p.fmtFloat(imag(v), size/2, verb) + p.WriteString("i)") // TODO: use symbol? + p.fmt.Plus = oldPlus + default: + p.badVerb(verb) + } +} + +func (p *printer) fmtString(v string, verb rune) { + switch verb { + case 'v': + if p.fmt.SharpV { + p.fmt.fmt_q(v) + } else { + p.fmt.fmt_s(v) + } + case 's': + p.fmt.fmt_s(v) + case 'x': + p.fmt.fmt_sx(v, ldigits) + case 'X': + p.fmt.fmt_sx(v, udigits) + case 'q': + p.fmt.fmt_q(v) + case 'm': + ctx := p.cat.Context(p.tag, rawPrinter{p}) + if ctx.Execute(v) == catalog.ErrNotFound { + p.WriteString(v) + } + default: + p.badVerb(verb) + } +} + +func (p *printer) fmtBytes(v []byte, verb rune, typeString string) { + switch verb { + case 'v', 'd': + if p.fmt.SharpV { + p.WriteString(typeString) + if v == nil { + p.WriteString(nilParenString) + return + } + p.WriteByte('{') + for i, c := range v { + if i > 0 { + p.WriteString(commaSpaceString) + } + p.fmt0x64(uint64(c), true) + } + p.WriteByte('}') + } else { + p.WriteByte('[') + for i, c := range v { + if i > 0 { + p.WriteByte(' ') + } + p.fmt.fmt_integer(uint64(c), 10, unsigned, ldigits) + } + p.WriteByte(']') + } + case 's': + p.fmt.fmt_s(string(v)) + case 'x': + p.fmt.fmt_bx(v, ldigits) + case 'X': + p.fmt.fmt_bx(v, udigits) + case 'q': + p.fmt.fmt_q(string(v)) + default: + p.printValue(reflect.ValueOf(v), verb, 0) + } +} + +func (p *printer) fmtPointer(value reflect.Value, verb rune) { + var u uintptr + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + u = value.Pointer() + default: + p.badVerb(verb) + return + } + + switch verb { + case 'v': + if p.fmt.SharpV { + p.WriteByte('(') + p.WriteString(value.Type().String()) + p.WriteString(")(") + if u == 0 { + p.WriteString(nilString) + } else { + p.fmt0x64(uint64(u), true) + } + p.WriteByte(')') + } else { + if u == 0 { + p.fmt.padString(nilAngleString) + } else { + p.fmt0x64(uint64(u), !p.fmt.Sharp) + } + } + case 'p': + p.fmt0x64(uint64(u), !p.fmt.Sharp) + case 'b', 'o', 'd', 'x', 'X': + if verb == 'd' { + p.fmt.Sharp = true // Print as standard go. TODO: does this make sense? + } + p.fmtInteger(uint64(u), unsigned, verb) + default: + p.badVerb(verb) + } +} + +func (p *printer) catchPanic(arg interface{}, verb rune) { + if err := recover(); err != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // Stringer that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(arg); v.Kind() == reflect.Ptr && v.IsNil() { + p.WriteString(nilAngleString) + return + } + // Otherwise print a concise panic message. Most of the time the panic + // value will print itself nicely. + if p.panicking { + // Nested panics; the recursion in printArg cannot succeed. + panic(err) + } + + oldFlags := p.fmt.Parser + // For this output we want default behavior. + p.fmt.ClearFlags() + + p.WriteString(percentBangString) + p.WriteRune(verb) + p.WriteString(panicString) + p.panicking = true + p.printArg(err, 'v') + p.panicking = false + p.WriteByte(')') + + p.fmt.Parser = oldFlags + } +} + +func (p *printer) handleMethods(verb rune) (handled bool) { + if p.erroring { + return + } + // Is it a Formatter? + if formatter, ok := p.arg.(format.Formatter); ok { + handled = true + defer p.catchPanic(p.arg, verb) + formatter.Format(p, verb) + return + } + if formatter, ok := p.arg.(fmt.Formatter); ok { + handled = true + defer p.catchPanic(p.arg, verb) + formatter.Format(p, verb) + return + } + + // If we're doing Go syntax and the argument knows how to supply it, take care of it now. + if p.fmt.SharpV { + if stringer, ok := p.arg.(fmt.GoStringer); ok { + handled = true + defer p.catchPanic(p.arg, verb) + // Print the result of GoString unadorned. + p.fmt.fmt_s(stringer.GoString()) + return + } + } else { + // If a string is acceptable according to the format, see if + // the value satisfies one of the string-valued interfaces. + // Println etc. set verb to %v, which is "stringable". + switch verb { + case 'v', 's', 'x', 'X', 'q': + // Is it an error or Stringer? + // The duplication in the bodies is necessary: + // setting handled and deferring catchPanic + // must happen before calling the method. + switch v := p.arg.(type) { + case error: + handled = true + defer p.catchPanic(p.arg, verb) + p.fmtString(v.Error(), verb) + return + + case fmt.Stringer: + handled = true + defer p.catchPanic(p.arg, verb) + p.fmtString(v.String(), verb) + return + } + } + } + return false +} + +func (p *printer) printArg(arg interface{}, verb rune) { + p.arg = arg + p.value = reflect.Value{} + + if arg == nil { + switch verb { + case 'T', 'v': + p.fmt.padString(nilAngleString) + default: + p.badVerb(verb) + } + return + } + + // Special processing considerations. + // %T (the value's type) and %p (its address) are special; we always do them first. + switch verb { + case 'T': + p.fmt.fmt_s(reflect.TypeOf(arg).String()) + return + case 'p': + p.fmtPointer(reflect.ValueOf(arg), 'p') + return + } + + // Some types can be done without reflection. + switch f := arg.(type) { + case bool: + p.fmtBool(f, verb) + case float32: + p.fmtFloat(float64(f), 32, verb) + case float64: + p.fmtFloat(f, 64, verb) + case complex64: + p.fmtComplex(complex128(f), 64, verb) + case complex128: + p.fmtComplex(f, 128, verb) + case int: + p.fmtInteger(uint64(f), signed, verb) + case int8: + p.fmtInteger(uint64(f), signed, verb) + case int16: + p.fmtInteger(uint64(f), signed, verb) + case int32: + p.fmtInteger(uint64(f), signed, verb) + case int64: + p.fmtInteger(uint64(f), signed, verb) + case uint: + p.fmtInteger(uint64(f), unsigned, verb) + case uint8: + p.fmtInteger(uint64(f), unsigned, verb) + case uint16: + p.fmtInteger(uint64(f), unsigned, verb) + case uint32: + p.fmtInteger(uint64(f), unsigned, verb) + case uint64: + p.fmtInteger(f, unsigned, verb) + case uintptr: + p.fmtInteger(uint64(f), unsigned, verb) + case string: + p.fmtString(f, verb) + case []byte: + p.fmtBytes(f, verb, "[]byte") + case reflect.Value: + // Handle extractable values with special methods + // since printValue does not handle them at depth 0. + if f.IsValid() && f.CanInterface() { + p.arg = f.Interface() + if p.handleMethods(verb) { + return + } + } + p.printValue(f, verb, 0) + default: + // If the type is not simple, it might have methods. + if !p.handleMethods(verb) { + // Need to use reflection, since the type had no + // interface methods that could be used for formatting. + p.printValue(reflect.ValueOf(f), verb, 0) + } + } +} + +// printValue is similar to printArg but starts with a reflect value, not an interface{} value. +// It does not handle 'p' and 'T' verbs because these should have been already handled by printArg. +func (p *printer) printValue(value reflect.Value, verb rune, depth int) { + // Handle values with special methods if not already handled by printArg (depth == 0). + if depth > 0 && value.IsValid() && value.CanInterface() { + p.arg = value.Interface() + if p.handleMethods(verb) { + return + } + } + p.arg = nil + p.value = value + + switch f := value; value.Kind() { + case reflect.Invalid: + if depth == 0 { + p.WriteString(invReflectString) + } else { + switch verb { + case 'v': + p.WriteString(nilAngleString) + default: + p.badVerb(verb) + } + } + case reflect.Bool: + p.fmtBool(f.Bool(), verb) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p.fmtInteger(uint64(f.Int()), signed, verb) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p.fmtInteger(f.Uint(), unsigned, verb) + case reflect.Float32: + p.fmtFloat(f.Float(), 32, verb) + case reflect.Float64: + p.fmtFloat(f.Float(), 64, verb) + case reflect.Complex64: + p.fmtComplex(f.Complex(), 64, verb) + case reflect.Complex128: + p.fmtComplex(f.Complex(), 128, verb) + case reflect.String: + p.fmtString(f.String(), verb) + case reflect.Map: + if p.fmt.SharpV { + p.WriteString(f.Type().String()) + if f.IsNil() { + p.WriteString(nilParenString) + return + } + p.WriteByte('{') + } else { + p.WriteString(mapString) + } + keys := f.MapKeys() + for i, key := range keys { + if i > 0 { + if p.fmt.SharpV { + p.WriteString(commaSpaceString) + } else { + p.WriteByte(' ') + } + } + p.printValue(key, verb, depth+1) + p.WriteByte(':') + p.printValue(f.MapIndex(key), verb, depth+1) + } + if p.fmt.SharpV { + p.WriteByte('}') + } else { + p.WriteByte(']') + } + case reflect.Struct: + if p.fmt.SharpV { + p.WriteString(f.Type().String()) + } + p.WriteByte('{') + for i := 0; i < f.NumField(); i++ { + if i > 0 { + if p.fmt.SharpV { + p.WriteString(commaSpaceString) + } else { + p.WriteByte(' ') + } + } + if p.fmt.PlusV || p.fmt.SharpV { + if name := f.Type().Field(i).Name; name != "" { + p.WriteString(name) + p.WriteByte(':') + } + } + p.printValue(getField(f, i), verb, depth+1) + } + p.WriteByte('}') + case reflect.Interface: + value := f.Elem() + if !value.IsValid() { + if p.fmt.SharpV { + p.WriteString(f.Type().String()) + p.WriteString(nilParenString) + } else { + p.WriteString(nilAngleString) + } + } else { + p.printValue(value, verb, depth+1) + } + case reflect.Array, reflect.Slice: + switch verb { + case 's', 'q', 'x', 'X': + // Handle byte and uint8 slices and arrays special for the above verbs. + t := f.Type() + if t.Elem().Kind() == reflect.Uint8 { + var bytes []byte + if f.Kind() == reflect.Slice { + bytes = f.Bytes() + } else if f.CanAddr() { + bytes = f.Slice(0, f.Len()).Bytes() + } else { + // We have an array, but we cannot Slice() a non-addressable array, + // so we build a slice by hand. This is a rare case but it would be nice + // if reflection could help a little more. + bytes = make([]byte, f.Len()) + for i := range bytes { + bytes[i] = byte(f.Index(i).Uint()) + } + } + p.fmtBytes(bytes, verb, t.String()) + return + } + } + if p.fmt.SharpV { + p.WriteString(f.Type().String()) + if f.Kind() == reflect.Slice && f.IsNil() { + p.WriteString(nilParenString) + return + } + p.WriteByte('{') + for i := 0; i < f.Len(); i++ { + if i > 0 { + p.WriteString(commaSpaceString) + } + p.printValue(f.Index(i), verb, depth+1) + } + p.WriteByte('}') + } else { + p.WriteByte('[') + for i := 0; i < f.Len(); i++ { + if i > 0 { + p.WriteByte(' ') + } + p.printValue(f.Index(i), verb, depth+1) + } + p.WriteByte(']') + } + case reflect.Ptr: + // pointer to array or slice or struct? ok at top level + // but not embedded (avoid loops) + if depth == 0 && f.Pointer() != 0 { + switch a := f.Elem(); a.Kind() { + case reflect.Array, reflect.Slice, reflect.Struct, reflect.Map: + p.WriteByte('&') + p.printValue(a, verb, depth+1) + return + } + } + fallthrough + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + p.fmtPointer(f, verb) + default: + p.unknownType(f) + } +} + +func (p *printer) badArgNum(verb rune) { + p.WriteString(percentBangString) + p.WriteRune(verb) + p.WriteString(badIndexString) +} + +func (p *printer) missingArg(verb rune) { + p.WriteString(percentBangString) + p.WriteRune(verb) + p.WriteString(missingString) +} + +func (p *printer) doPrintf(fmt string) { + for p.fmt.Parser.SetFormat(fmt); p.fmt.Scan(); { + switch p.fmt.Status { + case format.StatusText: + p.WriteString(p.fmt.Text()) + case format.StatusSubstitution: + p.printArg(p.Arg(p.fmt.ArgNum), p.fmt.Verb) + case format.StatusBadWidthSubstitution: + p.WriteString(badWidthString) + p.printArg(p.Arg(p.fmt.ArgNum), p.fmt.Verb) + case format.StatusBadPrecSubstitution: + p.WriteString(badPrecString) + p.printArg(p.Arg(p.fmt.ArgNum), p.fmt.Verb) + case format.StatusNoVerb: + p.WriteString(noVerbString) + case format.StatusBadArgNum: + p.badArgNum(p.fmt.Verb) + case format.StatusMissingArg: + p.missingArg(p.fmt.Verb) + default: + panic("unreachable") + } + } + + // Check for extra arguments, but only if there was at least one ordered + // argument. Note that this behavior is necessarily different from fmt: + // different variants of messages may opt to drop some or all of the + // arguments. + if !p.fmt.Reordered && p.fmt.ArgNum < len(p.fmt.Args) && p.fmt.ArgNum != 0 { + p.fmt.ClearFlags() + p.WriteString(extraString) + for i, arg := range p.fmt.Args[p.fmt.ArgNum:] { + if i > 0 { + p.WriteString(commaSpaceString) + } + if arg == nil { + p.WriteString(nilAngleString) + } else { + p.WriteString(reflect.TypeOf(arg).String()) + p.WriteString("=") + p.printArg(arg, 'v') + } + } + p.WriteByte(')') + } +} + +func (p *printer) doPrint(a []interface{}) { + prevString := false + for argNum, arg := range a { + isString := arg != nil && reflect.TypeOf(arg).Kind() == reflect.String + // Add a space between two non-string arguments. + if argNum > 0 && !isString && !prevString { + p.WriteByte(' ') + } + p.printArg(arg, 'v') + prevString = isString + } +} + +// doPrintln is like doPrint but always adds a space between arguments +// and a newline after the last argument. +func (p *printer) doPrintln(a []interface{}) { + for argNum, arg := range a { + if argNum > 0 { + p.WriteByte(' ') + } + p.printArg(arg, 'v') + } + p.WriteByte('\n') +} diff --git a/hack/tools/vendor/golang.org/x/time/LICENSE b/hack/tools/vendor/golang.org/x/time/LICENSE new file mode 100644 index 0000000000..2a7cf70da6 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/time/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/hack/tools/vendor/golang.org/x/xerrors/PATENTS b/hack/tools/vendor/golang.org/x/time/PATENTS similarity index 100% rename from hack/tools/vendor/golang.org/x/xerrors/PATENTS rename to hack/tools/vendor/golang.org/x/time/PATENTS diff --git a/hack/tools/vendor/golang.org/x/time/rate/rate.go b/hack/tools/vendor/golang.org/x/time/rate/rate.go new file mode 100644 index 0000000000..93a798ab63 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/time/rate/rate.go @@ -0,0 +1,419 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rate provides a rate limiter. +package rate + +import ( + "context" + "fmt" + "math" + "sync" + "time" +) + +// Limit defines the maximum frequency of some events. +// Limit is represented as number of events per second. +// A zero Limit allows no events. +type Limit float64 + +// Inf is the infinite rate limit; it allows all events (even if burst is zero). +const Inf = Limit(math.MaxFloat64) + +// Every converts a minimum time interval between events to a Limit. +func Every(interval time.Duration) Limit { + if interval <= 0 { + return Inf + } + return 1 / Limit(interval.Seconds()) +} + +// A Limiter controls how frequently events are allowed to happen. +// It implements a "token bucket" of size b, initially full and refilled +// at rate r tokens per second. +// Informally, in any large enough time interval, the Limiter limits the +// rate to r tokens per second, with a maximum burst size of b events. +// As a special case, if r == Inf (the infinite rate), b is ignored. +// See https://en.wikipedia.org/wiki/Token_bucket for more about token buckets. +// +// The zero value is a valid Limiter, but it will reject all events. +// Use NewLimiter to create non-zero Limiters. +// +// Limiter has three main methods, Allow, Reserve, and Wait. +// Most callers should use Wait. +// +// Each of the three methods consumes a single token. +// They differ in their behavior when no token is available. +// If no token is available, Allow returns false. +// If no token is available, Reserve returns a reservation for a future token +// and the amount of time the caller must wait before using it. +// If no token is available, Wait blocks until one can be obtained +// or its associated context.Context is canceled. +// +// The methods AllowN, ReserveN, and WaitN consume n tokens. +// +// Limiter is safe for simultaneous use by multiple goroutines. +type Limiter struct { + mu sync.Mutex + limit Limit + burst int + tokens float64 + // last is the last time the limiter's tokens field was updated + last time.Time + // lastEvent is the latest time of a rate-limited event (past or future) + lastEvent time.Time +} + +// Limit returns the maximum overall event rate. +func (lim *Limiter) Limit() Limit { + lim.mu.Lock() + defer lim.mu.Unlock() + return lim.limit +} + +// Burst returns the maximum burst size. Burst is the maximum number of tokens +// that can be consumed in a single call to Allow, Reserve, or Wait, so higher +// Burst values allow more events to happen at once. +// A zero Burst allows no events, unless limit == Inf. +func (lim *Limiter) Burst() int { + lim.mu.Lock() + defer lim.mu.Unlock() + return lim.burst +} + +// TokensAt returns the number of tokens available at time t. +func (lim *Limiter) TokensAt(t time.Time) float64 { + lim.mu.Lock() + _, tokens := lim.advance(t) // does not mutate lim + lim.mu.Unlock() + return tokens +} + +// Tokens returns the number of tokens available now. +func (lim *Limiter) Tokens() float64 { + return lim.TokensAt(time.Now()) +} + +// NewLimiter returns a new Limiter that allows events up to rate r and permits +// bursts of at most b tokens. +func NewLimiter(r Limit, b int) *Limiter { + return &Limiter{ + limit: r, + burst: b, + tokens: float64(b), + } +} + +// Allow reports whether an event may happen now. +func (lim *Limiter) Allow() bool { + return lim.AllowN(time.Now(), 1) +} + +// AllowN reports whether n events may happen at time t. +// Use this method if you intend to drop / skip events that exceed the rate limit. +// Otherwise use Reserve or Wait. +func (lim *Limiter) AllowN(t time.Time, n int) bool { + return lim.reserveN(t, n, 0).ok +} + +// A Reservation holds information about events that are permitted by a Limiter to happen after a delay. +// A Reservation may be canceled, which may enable the Limiter to permit additional events. +type Reservation struct { + ok bool + lim *Limiter + tokens int + timeToAct time.Time + // This is the Limit at reservation time, it can change later. + limit Limit +} + +// OK returns whether the limiter can provide the requested number of tokens +// within the maximum wait time. If OK is false, Delay returns InfDuration, and +// Cancel does nothing. +func (r *Reservation) OK() bool { + return r.ok +} + +// Delay is shorthand for DelayFrom(time.Now()). +func (r *Reservation) Delay() time.Duration { + return r.DelayFrom(time.Now()) +} + +// InfDuration is the duration returned by Delay when a Reservation is not OK. +const InfDuration = time.Duration(math.MaxInt64) + +// DelayFrom returns the duration for which the reservation holder must wait +// before taking the reserved action. Zero duration means act immediately. +// InfDuration means the limiter cannot grant the tokens requested in this +// Reservation within the maximum wait time. +func (r *Reservation) DelayFrom(t time.Time) time.Duration { + if !r.ok { + return InfDuration + } + delay := r.timeToAct.Sub(t) + if delay < 0 { + return 0 + } + return delay +} + +// Cancel is shorthand for CancelAt(time.Now()). +func (r *Reservation) Cancel() { + r.CancelAt(time.Now()) +} + +// CancelAt indicates that the reservation holder will not perform the reserved action +// and reverses the effects of this Reservation on the rate limit as much as possible, +// considering that other reservations may have already been made. +func (r *Reservation) CancelAt(t time.Time) { + if !r.ok { + return + } + + r.lim.mu.Lock() + defer r.lim.mu.Unlock() + + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(t) { + return + } + + // calculate tokens to restore + // The duration between lim.lastEvent and r.timeToAct tells us how many tokens were reserved + // after r was obtained. These tokens should not be restored. + restoreTokens := float64(r.tokens) - r.limit.tokensFromDuration(r.lim.lastEvent.Sub(r.timeToAct)) + if restoreTokens <= 0 { + return + } + // advance time to now + t, tokens := r.lim.advance(t) + // calculate new number of tokens + tokens += restoreTokens + if burst := float64(r.lim.burst); tokens > burst { + tokens = burst + } + // update state + r.lim.last = t + r.lim.tokens = tokens + if r.timeToAct == r.lim.lastEvent { + prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) + if !prevEvent.Before(t) { + r.lim.lastEvent = prevEvent + } + } +} + +// Reserve is shorthand for ReserveN(time.Now(), 1). +func (lim *Limiter) Reserve() *Reservation { + return lim.ReserveN(time.Now(), 1) +} + +// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen. +// The Limiter takes this Reservation into account when allowing future events. +// The returned Reservation’s OK() method returns false if n exceeds the Limiter's burst size. +// Usage example: +// +// r := lim.ReserveN(time.Now(), 1) +// if !r.OK() { +// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? +// return +// } +// time.Sleep(r.Delay()) +// Act() +// +// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. +// If you need to respect a deadline or cancel the delay, use Wait instead. +// To drop or skip events exceeding rate limit, use Allow instead. +func (lim *Limiter) ReserveN(t time.Time, n int) *Reservation { + r := lim.reserveN(t, n, InfDuration) + return &r +} + +// Wait is shorthand for WaitN(ctx, 1). +func (lim *Limiter) Wait(ctx context.Context) (err error) { + return lim.WaitN(ctx, 1) +} + +// WaitN blocks until lim permits n events to happen. +// It returns an error if n exceeds the Limiter's burst size, the Context is +// canceled, or the expected wait time exceeds the Context's Deadline. +// The burst limit is ignored if the rate limit is Inf. +func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + // The test code calls lim.wait with a fake timer generator. + // This is the real timer generator. + newTimer := func(d time.Duration) (<-chan time.Time, func() bool, func()) { + timer := time.NewTimer(d) + return timer.C, timer.Stop, func() {} + } + + return lim.wait(ctx, n, time.Now(), newTimer) +} + +// wait is the internal implementation of WaitN. +func (lim *Limiter) wait(ctx context.Context, n int, t time.Time, newTimer func(d time.Duration) (<-chan time.Time, func() bool, func())) error { + lim.mu.Lock() + burst := lim.burst + limit := lim.limit + lim.mu.Unlock() + + if n > burst && limit != Inf { + return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, burst) + } + // Check if ctx is already cancelled + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + // Determine wait limit + waitLimit := InfDuration + if deadline, ok := ctx.Deadline(); ok { + waitLimit = deadline.Sub(t) + } + // Reserve + r := lim.reserveN(t, n, waitLimit) + if !r.ok { + return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) + } + // Wait if necessary + delay := r.DelayFrom(t) + if delay == 0 { + return nil + } + ch, stop, advance := newTimer(delay) + defer stop() + advance() // only has an effect when testing + select { + case <-ch: + // We can proceed. + return nil + case <-ctx.Done(): + // Context was canceled before we could proceed. Cancel the + // reservation, which may permit other events to proceed sooner. + r.Cancel() + return ctx.Err() + } +} + +// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit). +func (lim *Limiter) SetLimit(newLimit Limit) { + lim.SetLimitAt(time.Now(), newLimit) +} + +// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated +// or underutilized by those which reserved (using Reserve or Wait) but did not yet act +// before SetLimitAt was called. +func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { + lim.mu.Lock() + defer lim.mu.Unlock() + + t, tokens := lim.advance(t) + + lim.last = t + lim.tokens = tokens + lim.limit = newLimit +} + +// SetBurst is shorthand for SetBurstAt(time.Now(), newBurst). +func (lim *Limiter) SetBurst(newBurst int) { + lim.SetBurstAt(time.Now(), newBurst) +} + +// SetBurstAt sets a new burst size for the limiter. +func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { + lim.mu.Lock() + defer lim.mu.Unlock() + + t, tokens := lim.advance(t) + + lim.last = t + lim.tokens = tokens + lim.burst = newBurst +} + +// reserveN is a helper method for AllowN, ReserveN, and WaitN. +// maxFutureReserve specifies the maximum reservation wait duration allowed. +// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. +func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) Reservation { + lim.mu.Lock() + defer lim.mu.Unlock() + + if lim.limit == Inf { + return Reservation{ + ok: true, + lim: lim, + tokens: n, + timeToAct: t, + } + } + + t, tokens := lim.advance(t) + + // Calculate the remaining number of tokens resulting from the request. + tokens -= float64(n) + + // Calculate the wait duration + var waitDuration time.Duration + if tokens < 0 { + waitDuration = lim.limit.durationFromTokens(-tokens) + } + + // Decide result + ok := n <= lim.burst && waitDuration <= maxFutureReserve + + // Prepare reservation + r := Reservation{ + ok: ok, + lim: lim, + limit: lim.limit, + } + if ok { + r.tokens = n + r.timeToAct = t.Add(waitDuration) + + // Update state + lim.last = t + lim.tokens = tokens + lim.lastEvent = r.timeToAct + } + + return r +} + +// advance calculates and returns an updated state for lim resulting from the passage of time. +// lim is not changed. +// advance requires that lim.mu is held. +func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { + last := lim.last + if t.Before(last) { + last = t + } + + // Calculate the new number of tokens, due to time that passed. + elapsed := t.Sub(last) + delta := lim.limit.tokensFromDuration(elapsed) + tokens := lim.tokens + delta + if burst := float64(lim.burst); tokens > burst { + tokens = burst + } + return t, tokens +} + +// durationFromTokens is a unit conversion function from the number of tokens to the duration +// of time it takes to accumulate them at a rate of limit tokens per second. +func (limit Limit) durationFromTokens(tokens float64) time.Duration { + if limit <= 0 { + return InfDuration + } + seconds := tokens / float64(limit) + return time.Duration(float64(time.Second) * seconds) +} + +// tokensFromDuration is a unit conversion function from a time duration to the number of tokens +// which could be accumulated during that duration at a rate of limit tokens per second. +func (limit Limit) tokensFromDuration(d time.Duration) float64 { + if limit <= 0 { + return 0 + } + return d.Seconds() * float64(limit) +} diff --git a/hack/tools/vendor/golang.org/x/time/rate/sometimes.go b/hack/tools/vendor/golang.org/x/time/rate/sometimes.go new file mode 100644 index 0000000000..6ba99ddb67 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/time/rate/sometimes.go @@ -0,0 +1,67 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rate + +import ( + "sync" + "time" +) + +// Sometimes will perform an action occasionally. The First, Every, and +// Interval fields govern the behavior of Do, which performs the action. +// A zero Sometimes value will perform an action exactly once. +// +// # Example: logging with rate limiting +// +// var sometimes = rate.Sometimes{First: 3, Interval: 10*time.Second} +// func Spammy() { +// sometimes.Do(func() { log.Info("here I am!") }) +// } +type Sometimes struct { + First int // if non-zero, the first N calls to Do will run f. + Every int // if non-zero, every Nth call to Do will run f. + Interval time.Duration // if non-zero and Interval has elapsed since f's last run, Do will run f. + + mu sync.Mutex + count int // number of Do calls + last time.Time // last time f was run +} + +// Do runs the function f as allowed by First, Every, and Interval. +// +// The model is a union (not intersection) of filters. The first call to Do +// always runs f. Subsequent calls to Do run f if allowed by First or Every or +// Interval. +// +// A non-zero First:N causes the first N Do(f) calls to run f. +// +// A non-zero Every:M causes every Mth Do(f) call, starting with the first, to +// run f. +// +// A non-zero Interval causes Do(f) to run f if Interval has elapsed since +// Do last ran f. +// +// Specifying multiple filters produces the union of these execution streams. +// For example, specifying both First:N and Every:M causes the first N Do(f) +// calls and every Mth Do(f) call, starting with the first, to run f. See +// Examples for more. +// +// If Do is called multiple times simultaneously, the calls will block and run +// serially. Therefore, Do is intended for lightweight operations. +// +// Because a call to Do may block until f returns, if f causes Do to be called, +// it will deadlock. +func (s *Sometimes) Do(f func()) { + s.mu.Lock() + defer s.mu.Unlock() + if s.count == 0 || + (s.First > 0 && s.count < s.First) || + (s.Every > 0 && s.count%s.Every == 0) || + (s.Interval > 0 && time.Since(s.last) >= s.Interval) { + f() + s.last = time.Now() + } + s.count++ +} diff --git a/hack/tools/vendor/golang.org/x/tools/go/analysis/analysis.go b/hack/tools/vendor/golang.org/x/tools/go/analysis/analysis.go index aa02eeda68..d384aa89b8 100644 --- a/hack/tools/vendor/golang.org/x/tools/go/analysis/analysis.go +++ b/hack/tools/vendor/golang.org/x/tools/go/analysis/analysis.go @@ -50,7 +50,7 @@ type Analyzer struct { // RunDespiteErrors allows the driver to invoke // the Run method of this analyzer even on a // package that contains parse or type errors. - // The Pass.TypeErrors field may consequently be non-empty. + // The [Pass.TypeErrors] field may consequently be non-empty. RunDespiteErrors bool // Requires is a set of analyzers that must run successfully diff --git a/hack/tools/vendor/golang.org/x/tools/go/analysis/checker/checker.go b/hack/tools/vendor/golang.org/x/tools/go/analysis/checker/checker.go new file mode 100644 index 0000000000..5935a62aba --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/go/analysis/checker/checker.go @@ -0,0 +1,625 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package checker provides an analysis driver based on the +// [golang.org/x/tools/go/packages] representation of a set of +// packages and all their dependencies, as produced by +// [packages.Load]. +// +// It is the core of multichecker (the multi-analyzer driver), +// singlechecker (the single-analyzer driver often used to provide a +// convenient command alongside each analyzer), and analysistest, the +// test driver. +// +// By contrast, the 'go vet' command is based on unitchecker, an +// analysis driver that uses separate analysis--analogous to separate +// compilation--with file-based intermediate results. Like separate +// compilation, it is more scalable, especially for incremental +// analysis of large code bases. Commands based on multichecker and +// singlechecker are capable of detecting when they are being invoked +// by "go vet -vettool=exe" and instead dispatching to unitchecker. +// +// Programs built using this package will, in general, not be usable +// in that way. This package is intended only for use in applications +// that invoke the analysis driver as a subroutine, and need to insert +// additional steps before or after the analysis. +// +// See the Example of how to build a complete analysis driver program. +package checker + +import ( + "bytes" + "encoding/gob" + "fmt" + "go/types" + "io" + "log" + "reflect" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/internal" + "golang.org/x/tools/go/analysis/internal/analysisflags" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/internal/analysisinternal" +) + +// Options specifies options that control the analysis driver. +type Options struct { + // These options correspond to existing flags exposed by multichecker: + Sequential bool // disable parallelism + SanityCheck bool // check fact encoding is ok and deterministic + FactLog io.Writer // if non-nil, log each exported fact to it + + // TODO(adonovan): add ReadFile so that an Overlay specified + // in the [packages.Config] can be communicated via + // Pass.ReadFile to each Analyzer. +} + +// Graph holds the results of a round of analysis, including the graph +// of requested actions (analyzers applied to packages) plus any +// dependent actions that it was necessary to compute. +type Graph struct { + // Roots contains the roots of the action graph. + // Each node (a, p) in the action graph represents the + // application of one analyzer a to one package p. + // (A node thus corresponds to one analysis.Pass instance.) + // Roots holds one action per element of the product + // of the analyzers × packages arguments to Analyze, + // in unspecified order. + // + // Each element of Action.Deps represents an edge in the + // action graph: a dependency from one action to another. + // An edge of the form (a, p) -> (a, p2) indicates that the + // analysis of package p requires information ("facts") from + // the same analyzer applied to one of p's dependencies, p2. + // An edge of the form (a, p) -> (a2, p) indicates that the + // analysis of package p requires information ("results") + // from a different analyzer a2 applied to the same package. + // These two kind of edges are called "vertical" and "horizontal", + // respectively. + Roots []*Action +} + +// All returns an iterator over the action graph in depth-first postorder. +// +// Example: +// +// for act := range graph.All() { +// ... +// } +// +// Clients using go1.22 should iterate using the code below and may +// not assume anything else about the result: +// +// graph.All()(func (act *Action) bool { +// ... +// }) +func (g *Graph) All() actionSeq { + return func(yield func(*Action) bool) { + forEach(g.Roots, func(act *Action) error { + if !yield(act) { + return io.EOF // any error will do + } + return nil + }) + } +} + +// An Action represents one unit of analysis work by the driver: the +// application of one analysis to one package. It provides the inputs +// to and records the outputs of a single analysis.Pass. +// +// Actions form a DAG, both within a package (as different analyzers +// are applied, either in sequence or parallel), and across packages +// (as dependencies are analyzed). +type Action struct { + Analyzer *analysis.Analyzer + Package *packages.Package + IsRoot bool // whether this is a root node of the graph + Deps []*Action + Result any // computed result of Analyzer.run, if any (and if IsRoot) + Err error // error result of Analyzer.run + Diagnostics []analysis.Diagnostic + Duration time.Duration // execution time of this step + + opts *Options + once sync.Once + pass *analysis.Pass + objectFacts map[objectFactKey]analysis.Fact + packageFacts map[packageFactKey]analysis.Fact + inputs map[*analysis.Analyzer]any +} + +func (act *Action) String() string { + return fmt.Sprintf("%s@%s", act.Analyzer, act.Package) +} + +// Analyze runs the specified analyzers on the initial packages. +// +// The initial packages and all dependencies must have been loaded +// using the [packages.LoadAllSyntax] flag, Analyze may need to run +// some analyzer (those that consume and produce facts) on +// dependencies too. +// +// On success, it returns a Graph of actions whose Roots hold one +// item per (a, p) in the cross-product of analyzers and pkgs. +// +// If opts is nil, it is equivalent to new(Options). +func Analyze(analyzers []*analysis.Analyzer, pkgs []*packages.Package, opts *Options) (*Graph, error) { + if opts == nil { + opts = new(Options) + } + + if err := analysis.Validate(analyzers); err != nil { + return nil, err + } + + // Construct the action graph. + // + // Each graph node (action) is one unit of analysis. + // Edges express package-to-package (vertical) dependencies, + // and analysis-to-analysis (horizontal) dependencies. + type key struct { + a *analysis.Analyzer + pkg *packages.Package + } + actions := make(map[key]*Action) + + var mkAction func(a *analysis.Analyzer, pkg *packages.Package) *Action + mkAction = func(a *analysis.Analyzer, pkg *packages.Package) *Action { + k := key{a, pkg} + act, ok := actions[k] + if !ok { + act = &Action{Analyzer: a, Package: pkg, opts: opts} + + // Add a dependency on each required analyzers. + for _, req := range a.Requires { + act.Deps = append(act.Deps, mkAction(req, pkg)) + } + + // An analysis that consumes/produces facts + // must run on the package's dependencies too. + if len(a.FactTypes) > 0 { + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // for determinism + for _, path := range paths { + dep := mkAction(a, pkg.Imports[path]) + act.Deps = append(act.Deps, dep) + } + } + + actions[k] = act + } + return act + } + + // Build nodes for initial packages. + var roots []*Action + for _, a := range analyzers { + for _, pkg := range pkgs { + root := mkAction(a, pkg) + root.IsRoot = true + roots = append(roots, root) + } + } + + // Execute the graph in parallel. + execAll(roots) + + // Ensure that only root Results are visible to caller. + // (The others are considered temporary intermediaries.) + // TODO(adonovan): opt: clear them earlier, so we can + // release large data structures like SSA sooner. + for _, act := range actions { + if !act.IsRoot { + act.Result = nil + } + } + + return &Graph{Roots: roots}, nil +} + +func init() { + // Allow analysistest to access Action.pass, + // for its legacy Result data type. + internal.Pass = func(x any) *analysis.Pass { return x.(*Action).pass } +} + +type objectFactKey struct { + obj types.Object + typ reflect.Type +} + +type packageFactKey struct { + pkg *types.Package + typ reflect.Type +} + +func execAll(actions []*Action) { + var wg sync.WaitGroup + for _, act := range actions { + wg.Add(1) + work := func(act *Action) { + act.exec() + wg.Done() + } + if act.opts.Sequential { + work(act) + } else { + go work(act) + } + } + wg.Wait() +} + +func (act *Action) exec() { act.once.Do(act.execOnce) } + +func (act *Action) execOnce() { + // Analyze dependencies. + execAll(act.Deps) + + // Record time spent in this node but not its dependencies. + // In parallel mode, due to GC/scheduler contention, the + // time is 5x higher than in sequential mode, even with a + // semaphore limiting the number of threads here. + // So use -debug=tp. + t0 := time.Now() + defer func() { act.Duration = time.Since(t0) }() + + // Report an error if any dependency failed. + var failed []string + for _, dep := range act.Deps { + if dep.Err != nil { + failed = append(failed, dep.String()) + } + } + if failed != nil { + sort.Strings(failed) + act.Err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", ")) + return + } + + // Plumb the output values of the dependencies + // into the inputs of this action. Also facts. + inputs := make(map[*analysis.Analyzer]any) + act.objectFacts = make(map[objectFactKey]analysis.Fact) + act.packageFacts = make(map[packageFactKey]analysis.Fact) + for _, dep := range act.Deps { + if dep.Package == act.Package { + // Same package, different analysis (horizontal edge): + // in-memory outputs of prerequisite analyzers + // become inputs to this analysis pass. + inputs[dep.Analyzer] = dep.Result + + } else if dep.Analyzer == act.Analyzer { // (always true) + // Same analysis, different package (vertical edge): + // serialized facts produced by prerequisite analysis + // become available to this analysis pass. + inheritFacts(act, dep) + } + } + + // Quick (nonexhaustive) check that the correct go/packages mode bits were used. + // (If there were errors, all bets are off.) + if pkg := act.Package; pkg.Errors == nil { + if pkg.Name == "" || pkg.PkgPath == "" || pkg.Types == nil || pkg.Fset == nil || pkg.TypesSizes == nil { + panic("packages must be loaded with packages.LoadSyntax mode") + } + } + + module := &analysis.Module{} // possibly empty (non nil) in go/analysis drivers. + if mod := act.Package.Module; mod != nil { + module.Path = mod.Path + module.Version = mod.Version + module.GoVersion = mod.GoVersion + } + + // Run the analysis. + pass := &analysis.Pass{ + Analyzer: act.Analyzer, + Fset: act.Package.Fset, + Files: act.Package.Syntax, + OtherFiles: act.Package.OtherFiles, + IgnoredFiles: act.Package.IgnoredFiles, + Pkg: act.Package.Types, + TypesInfo: act.Package.TypesInfo, + TypesSizes: act.Package.TypesSizes, + TypeErrors: act.Package.TypeErrors, + Module: module, + + ResultOf: inputs, + Report: func(d analysis.Diagnostic) { act.Diagnostics = append(act.Diagnostics, d) }, + ImportObjectFact: act.ObjectFact, + ExportObjectFact: act.exportObjectFact, + ImportPackageFact: act.PackageFact, + ExportPackageFact: act.exportPackageFact, + AllObjectFacts: act.AllObjectFacts, + AllPackageFacts: act.AllPackageFacts, + } + pass.ReadFile = analysisinternal.MakeReadFile(pass) + act.pass = pass + + act.Result, act.Err = func() (any, error) { + if act.Package.IllTyped && !pass.Analyzer.RunDespiteErrors { + return nil, fmt.Errorf("analysis skipped due to errors in package") + } + + result, err := pass.Analyzer.Run(pass) + if err != nil { + return nil, err + } + + // correct result type? + if got, want := reflect.TypeOf(result), pass.Analyzer.ResultType; got != want { + return nil, fmt.Errorf( + "internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v", + pass.Pkg.Path(), pass.Analyzer, got, want) + } + + // resolve diagnostic URLs + for i := range act.Diagnostics { + url, err := analysisflags.ResolveURL(act.Analyzer, act.Diagnostics[i]) + if err != nil { + return nil, err + } + act.Diagnostics[i].URL = url + } + return result, nil + }() + + // Help detect (disallowed) calls after Run. + pass.ExportObjectFact = nil + pass.ExportPackageFact = nil +} + +// inheritFacts populates act.facts with +// those it obtains from its dependency, dep. +func inheritFacts(act, dep *Action) { + for key, fact := range dep.objectFacts { + // Filter out facts related to objects + // that are irrelevant downstream + // (equivalently: not in the compiler export data). + if !exportedFrom(key.obj, dep.Package.Types) { + if false { + log.Printf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact) + } + continue + } + + // Optionally serialize/deserialize fact + // to verify that it works across address spaces. + if act.opts.SanityCheck { + encodedFact, err := codeFact(fact) + if err != nil { + log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) + } + fact = encodedFact + } + + if false { + log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.obj, fact) + } + act.objectFacts[key] = fact + } + + for key, fact := range dep.packageFacts { + // TODO: filter out facts that belong to + // packages not mentioned in the export data + // to prevent side channels. + // + // The Pass.All{Object,Package}Facts accessors expose too much: + // all facts, of all types, for all dependencies in the action + // graph. Not only does the representation grow quadratically, + // but it violates the separate compilation paradigm, allowing + // analysis implementations to communicate with indirect + // dependencies that are not mentioned in the export data. + // + // It's not clear how to fix this short of a rather expensive + // filtering step after each action that enumerates all the + // objects that would appear in export data, and deletes + // facts associated with objects not in this set. + + // Optionally serialize/deserialize fact + // to verify that it works across address spaces + // and is deterministic. + if act.opts.SanityCheck { + encodedFact, err := codeFact(fact) + if err != nil { + log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) + } + fact = encodedFact + } + + if false { + log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.pkg.Path(), fact) + } + act.packageFacts[key] = fact + } +} + +// codeFact encodes then decodes a fact, +// just to exercise that logic. +func codeFact(fact analysis.Fact) (analysis.Fact, error) { + // We encode facts one at a time. + // A real modular driver would emit all facts + // into one encoder to improve gob efficiency. + var buf bytes.Buffer + if err := gob.NewEncoder(&buf).Encode(fact); err != nil { + return nil, err + } + + // Encode it twice and assert that we get the same bits. + // This helps detect nondeterministic Gob encoding (e.g. of maps). + var buf2 bytes.Buffer + if err := gob.NewEncoder(&buf2).Encode(fact); err != nil { + return nil, err + } + if !bytes.Equal(buf.Bytes(), buf2.Bytes()) { + return nil, fmt.Errorf("encoding of %T fact is nondeterministic", fact) + } + + new := reflect.New(reflect.TypeOf(fact).Elem()).Interface().(analysis.Fact) + if err := gob.NewDecoder(&buf).Decode(new); err != nil { + return nil, err + } + return new, nil +} + +// exportedFrom reports whether obj may be visible to a package that imports pkg. +// This includes not just the exported members of pkg, but also unexported +// constants, types, fields, and methods, perhaps belonging to other packages, +// that find there way into the API. +// This is an overapproximation of the more accurate approach used by +// gc export data, which walks the type graph, but it's much simpler. +// +// TODO(adonovan): do more accurate filtering by walking the type graph. +func exportedFrom(obj types.Object, pkg *types.Package) bool { + switch obj := obj.(type) { + case *types.Func: + return obj.Exported() && obj.Pkg() == pkg || + obj.Type().(*types.Signature).Recv() != nil + case *types.Var: + if obj.IsField() { + return true + } + // we can't filter more aggressively than this because we need + // to consider function parameters exported, but have no way + // of telling apart function parameters from local variables. + return obj.Pkg() == pkg + case *types.TypeName, *types.Const: + return true + } + return false // Nil, Builtin, Label, or PkgName +} + +// ObjectFact retrieves a fact associated with obj, +// and returns true if one was found. +// Given a value ptr of type *T, where *T satisfies Fact, +// ObjectFact copies the value to *ptr. +// +// See documentation at ImportObjectFact field of [analysis.Pass]. +func (act *Action) ObjectFact(obj types.Object, ptr analysis.Fact) bool { + if obj == nil { + panic("nil object") + } + key := objectFactKey{obj, factType(ptr)} + if v, ok := act.objectFacts[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// exportObjectFact implements Pass.ExportObjectFact. +func (act *Action) exportObjectFact(obj types.Object, fact analysis.Fact) { + if act.pass.ExportObjectFact == nil { + log.Panicf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact) + } + + if obj.Pkg() != act.Package.Types { + log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package", + act.Analyzer, act.Package, obj, fact) + } + + key := objectFactKey{obj, factType(fact)} + act.objectFacts[key] = fact // clobber any existing entry + if log := act.opts.FactLog; log != nil { + objstr := types.ObjectString(obj, (*types.Package).Name) + fmt.Fprintf(log, "%s: object %s has fact %s\n", + act.Package.Fset.Position(obj.Pos()), objstr, fact) + } +} + +// AllObjectFacts returns a new slice containing all object facts of +// the analysis's FactTypes in unspecified order. +// +// See documentation at AllObjectFacts field of [analysis.Pass]. +func (act *Action) AllObjectFacts() []analysis.ObjectFact { + facts := make([]analysis.ObjectFact, 0, len(act.objectFacts)) + for k, fact := range act.objectFacts { + facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: fact}) + } + return facts +} + +// PackageFact retrieves a fact associated with package pkg, +// which must be this package or one of its dependencies. +// +// See documentation at ImportObjectFact field of [analysis.Pass]. +func (act *Action) PackageFact(pkg *types.Package, ptr analysis.Fact) bool { + if pkg == nil { + panic("nil package") + } + key := packageFactKey{pkg, factType(ptr)} + if v, ok := act.packageFacts[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// exportPackageFact implements Pass.ExportPackageFact. +func (act *Action) exportPackageFact(fact analysis.Fact) { + if act.pass.ExportPackageFact == nil { + log.Panicf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact) + } + + key := packageFactKey{act.pass.Pkg, factType(fact)} + act.packageFacts[key] = fact // clobber any existing entry + if log := act.opts.FactLog; log != nil { + fmt.Fprintf(log, "%s: package %s has fact %s\n", + act.Package.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact) + } +} + +func factType(fact analysis.Fact) reflect.Type { + t := reflect.TypeOf(fact) + if t.Kind() != reflect.Ptr { + log.Fatalf("invalid Fact type: got %T, want pointer", fact) + } + return t +} + +// AllPackageFacts returns a new slice containing all package +// facts of the analysis's FactTypes in unspecified order. +// +// See documentation at AllPackageFacts field of [analysis.Pass]. +func (act *Action) AllPackageFacts() []analysis.PackageFact { + facts := make([]analysis.PackageFact, 0, len(act.packageFacts)) + for k, fact := range act.packageFacts { + facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: fact}) + } + return facts +} + +// forEach is a utility function for traversing the action graph. It +// applies function f to each action in the graph reachable from +// roots, in depth-first postorder. If any call to f returns an error, +// the traversal is aborted and ForEach returns the error. +func forEach(roots []*Action, f func(*Action) error) error { + seen := make(map[*Action]bool) + var visitAll func(actions []*Action) error + visitAll = func(actions []*Action) error { + for _, act := range actions { + if !seen[act] { + seen[act] = true + if err := visitAll(act.Deps); err != nil { + return err + } + if err := f(act); err != nil { + return err + } + } + } + return nil + } + return visitAll(roots) +} diff --git a/hack/tools/vendor/golang.org/x/tools/go/analysis/checker/iter_go122.go b/hack/tools/vendor/golang.org/x/tools/go/analysis/checker/iter_go122.go new file mode 100644 index 0000000000..cd25cce035 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/go/analysis/checker/iter_go122.go @@ -0,0 +1,10 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.23 + +package checker + +// This type is a placeholder for go1.23's iter.Seq[*Action]. +type actionSeq func(yield func(*Action) bool) diff --git a/hack/tools/vendor/golang.org/x/tools/internal/versions/constraint_go121.go b/hack/tools/vendor/golang.org/x/tools/go/analysis/checker/iter_go123.go similarity index 53% rename from hack/tools/vendor/golang.org/x/tools/internal/versions/constraint_go121.go rename to hack/tools/vendor/golang.org/x/tools/go/analysis/checker/iter_go123.go index 38011407d5..e8278a9c1a 100644 --- a/hack/tools/vendor/golang.org/x/tools/internal/versions/constraint_go121.go +++ b/hack/tools/vendor/golang.org/x/tools/go/analysis/checker/iter_go123.go @@ -2,13 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.21 -// +build go1.21 +//go:build go1.23 -package versions +package checker -import "go/build/constraint" +import "iter" -func init() { - ConstraintGoVersion = constraint.GoVersion -} +type actionSeq = iter.Seq[*Action] diff --git a/hack/tools/vendor/golang.org/x/tools/go/analysis/checker/print.go b/hack/tools/vendor/golang.org/x/tools/go/analysis/checker/print.go new file mode 100644 index 0000000000..d7c0430117 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/go/analysis/checker/print.go @@ -0,0 +1,88 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package checker + +// This file defines helpers for printing analysis results. +// They should all be pure functions. + +import ( + "bytes" + "fmt" + "go/token" + "io" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/internal/analysisflags" +) + +// PrintText emits diagnostics as plain text to w. +// +// If contextLines is nonnegative, it also prints the +// offending line, plus that many lines of context +// before and after the line. +func (g *Graph) PrintText(w io.Writer, contextLines int) error { + return writeTextDiagnostics(w, g.Roots, contextLines) +} + +func writeTextDiagnostics(w io.Writer, roots []*Action, contextLines int) error { + // De-duplicate diagnostics by position (not token.Pos) to + // avoid double-reporting in source files that belong to + // multiple packages, such as foo and foo.test. + // (We cannot assume that such repeated files were parsed + // only once and use syntax nodes as the key.) + type key struct { + pos token.Position + end token.Position + *analysis.Analyzer + message string + } + seen := make(map[key]bool) + + // TODO(adonovan): opt: plumb errors back from PrintPlain and avoid buffer. + buf := new(bytes.Buffer) + forEach(roots, func(act *Action) error { + if act.Err != nil { + fmt.Fprintf(w, "%s: %v\n", act.Analyzer.Name, act.Err) + } else if act.IsRoot { + for _, diag := range act.Diagnostics { + // We don't display Analyzer.Name/diag.Category + // as most users don't care. + + posn := act.Package.Fset.Position(diag.Pos) + end := act.Package.Fset.Position(diag.End) + k := key{posn, end, act.Analyzer, diag.Message} + if seen[k] { + continue // duplicate + } + seen[k] = true + + analysisflags.PrintPlain(buf, act.Package.Fset, contextLines, diag) + } + } + return nil + }) + _, err := w.Write(buf.Bytes()) + return err +} + +// PrintJSON emits diagnostics in JSON form to w. +// Diagnostics are shown only for the root nodes, +// but errors (if any) are shown for all dependencies. +func (g *Graph) PrintJSON(w io.Writer) error { + return writeJSONDiagnostics(w, g.Roots) +} + +func writeJSONDiagnostics(w io.Writer, roots []*Action) error { + tree := make(analysisflags.JSONTree) + forEach(roots, func(act *Action) error { + var diags []analysis.Diagnostic + if act.IsRoot { + diags = act.Diagnostics + } + tree.Add(act.Package.Fset, act.Package.ID, act.Analyzer.Name, diags, act.Err) + return nil + }) + return tree.Print(w) +} diff --git a/hack/tools/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go b/hack/tools/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go new file mode 100644 index 0000000000..1282e70d41 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go @@ -0,0 +1,455 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package analysisflags defines helpers for processing flags of +// analysis driver tools. +package analysisflags + +import ( + "crypto/sha256" + "encoding/gob" + "encoding/json" + "flag" + "fmt" + "go/token" + "io" + "log" + "os" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis" +) + +// flags common to all {single,multi,unit}checkers. +var ( + JSON = false // -json + Context = -1 // -c=N: if N>0, display offending line plus N lines of context +) + +// Parse creates a flag for each of the analyzer's flags, +// including (in multi mode) a flag named after the analyzer, +// parses the flags, then filters and returns the list of +// analyzers enabled by flags. +// +// The result is intended to be passed to unitchecker.Run or checker.Run. +// Use in unitchecker.Run will gob.Register all fact types for the returned +// graph of analyzers but of course not the ones only reachable from +// dropped analyzers. To avoid inconsistency about which gob types are +// registered from run to run, Parse itself gob.Registers all the facts +// only reachable from dropped analyzers. +// This is not a particularly elegant API, but this is an internal package. +func Parse(analyzers []*analysis.Analyzer, multi bool) []*analysis.Analyzer { + // Connect each analysis flag to the command line as -analysis.flag. + enabled := make(map[*analysis.Analyzer]*triState) + for _, a := range analyzers { + var prefix string + + // Add -NAME flag to enable it. + if multi { + prefix = a.Name + "." + + enable := new(triState) + enableUsage := "enable " + a.Name + " analysis" + flag.Var(enable, a.Name, enableUsage) + enabled[a] = enable + } + + a.Flags.VisitAll(func(f *flag.Flag) { + if !multi && flag.Lookup(f.Name) != nil { + log.Printf("%s flag -%s would conflict with driver; skipping", a.Name, f.Name) + return + } + + name := prefix + f.Name + flag.Var(f.Value, name, f.Usage) + }) + } + + // standard flags: -flags, -V. + printflags := flag.Bool("flags", false, "print analyzer flags in JSON") + addVersionFlag() + + // flags common to all checkers + flag.BoolVar(&JSON, "json", JSON, "emit JSON output") + flag.IntVar(&Context, "c", Context, `display offending line with this many lines of context`) + + // Add shims for legacy vet flags to enable existing + // scripts that run vet to continue to work. + _ = flag.Bool("source", false, "no effect (deprecated)") + _ = flag.Bool("v", false, "no effect (deprecated)") + _ = flag.Bool("all", false, "no effect (deprecated)") + _ = flag.String("tags", "", "no effect (deprecated)") + for old, new := range vetLegacyFlags { + newFlag := flag.Lookup(new) + if newFlag != nil && flag.Lookup(old) == nil { + flag.Var(newFlag.Value, old, "deprecated alias for -"+new) + } + } + + flag.Parse() // (ExitOnError) + + // -flags: print flags so that go vet knows which ones are legitimate. + if *printflags { + printFlags() + os.Exit(0) + } + + everything := expand(analyzers) + + // If any -NAME flag is true, run only those analyzers. Otherwise, + // if any -NAME flag is false, run all but those analyzers. + if multi { + var hasTrue, hasFalse bool + for _, ts := range enabled { + switch *ts { + case setTrue: + hasTrue = true + case setFalse: + hasFalse = true + } + } + + var keep []*analysis.Analyzer + if hasTrue { + for _, a := range analyzers { + if *enabled[a] == setTrue { + keep = append(keep, a) + } + } + analyzers = keep + } else if hasFalse { + for _, a := range analyzers { + if *enabled[a] != setFalse { + keep = append(keep, a) + } + } + analyzers = keep + } + } + + // Register fact types of skipped analyzers + // in case we encounter them in imported files. + kept := expand(analyzers) + for a := range everything { + if !kept[a] { + for _, f := range a.FactTypes { + gob.Register(f) + } + } + } + + return analyzers +} + +func expand(analyzers []*analysis.Analyzer) map[*analysis.Analyzer]bool { + seen := make(map[*analysis.Analyzer]bool) + var visitAll func([]*analysis.Analyzer) + visitAll = func(analyzers []*analysis.Analyzer) { + for _, a := range analyzers { + if !seen[a] { + seen[a] = true + visitAll(a.Requires) + } + } + } + visitAll(analyzers) + return seen +} + +func printFlags() { + type jsonFlag struct { + Name string + Bool bool + Usage string + } + var flags []jsonFlag = nil + flag.VisitAll(func(f *flag.Flag) { + // Don't report {single,multi}checker debugging + // flags or fix as these have no effect on unitchecker + // (as invoked by 'go vet'). + switch f.Name { + case "debug", "cpuprofile", "memprofile", "trace", "fix": + return + } + + b, ok := f.Value.(interface{ IsBoolFlag() bool }) + isBool := ok && b.IsBoolFlag() + flags = append(flags, jsonFlag{f.Name, isBool, f.Usage}) + }) + data, err := json.MarshalIndent(flags, "", "\t") + if err != nil { + log.Fatal(err) + } + os.Stdout.Write(data) +} + +// addVersionFlag registers a -V flag that, if set, +// prints the executable version and exits 0. +// +// If the -V flag already exists — for example, because it was already +// registered by a call to cmd/internal/objabi.AddVersionFlag — then +// addVersionFlag does nothing. +func addVersionFlag() { + if flag.Lookup("V") == nil { + flag.Var(versionFlag{}, "V", "print version and exit") + } +} + +// versionFlag minimally complies with the -V protocol required by "go vet". +type versionFlag struct{} + +func (versionFlag) IsBoolFlag() bool { return true } +func (versionFlag) Get() interface{} { return nil } +func (versionFlag) String() string { return "" } +func (versionFlag) Set(s string) error { + if s != "full" { + log.Fatalf("unsupported flag value: -V=%s (use -V=full)", s) + } + + // This replicates the minimal subset of + // cmd/internal/objabi.AddVersionFlag, which is private to the + // go tool yet forms part of our command-line interface. + // TODO(adonovan): clarify the contract. + + // Print the tool version so the build system can track changes. + // Formats: + // $progname version devel ... buildID=... + // $progname version go1.9.1 + progname, err := os.Executable() + if err != nil { + return err + } + f, err := os.Open(progname) + if err != nil { + log.Fatal(err) + } + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + log.Fatal(err) + } + f.Close() + fmt.Printf("%s version devel comments-go-here buildID=%02x\n", + progname, string(h.Sum(nil))) + os.Exit(0) + return nil +} + +// A triState is a boolean that knows whether +// it has been set to either true or false. +// It is used to identify whether a flag appears; +// the standard boolean flag cannot +// distinguish missing from unset. +// It also satisfies flag.Value. +type triState int + +const ( + unset triState = iota + setTrue + setFalse +) + +func triStateFlag(name string, value triState, usage string) *triState { + flag.Var(&value, name, usage) + return &value +} + +// triState implements flag.Value, flag.Getter, and flag.boolFlag. +// They work like boolean flags: we can say vet -printf as well as vet -printf=true +func (ts *triState) Get() interface{} { + return *ts == setTrue +} + +func (ts triState) isTrue() bool { + return ts == setTrue +} + +func (ts *triState) Set(value string) error { + b, err := strconv.ParseBool(value) + if err != nil { + // This error message looks poor but package "flag" adds + // "invalid boolean value %q for -NAME: %s" + return fmt.Errorf("want true or false") + } + if b { + *ts = setTrue + } else { + *ts = setFalse + } + return nil +} + +func (ts *triState) String() string { + switch *ts { + case unset: + return "true" + case setTrue: + return "true" + case setFalse: + return "false" + } + panic("not reached") +} + +func (ts triState) IsBoolFlag() bool { + return true +} + +// Legacy flag support + +// vetLegacyFlags maps flags used by legacy vet to their corresponding +// new names. The old names will continue to work. +var vetLegacyFlags = map[string]string{ + // Analyzer name changes + "bool": "bools", + "buildtags": "buildtag", + "methods": "stdmethods", + "rangeloops": "loopclosure", + + // Analyzer flags + "compositewhitelist": "composites.whitelist", + "printfuncs": "printf.funcs", + "shadowstrict": "shadow.strict", + "unusedfuncs": "unusedresult.funcs", + "unusedstringmethods": "unusedresult.stringmethods", +} + +// ---- output helpers common to all drivers ---- +// +// These functions should not depend on global state (flags)! +// Really they belong in a different package. + +// TODO(adonovan): don't accept an io.Writer if we don't report errors. +// Either accept a bytes.Buffer (infallible), or return a []byte. + +// PrintPlain prints a diagnostic in plain text form. +// If contextLines is nonnegative, it also prints the +// offending line plus this many lines of context. +func PrintPlain(out io.Writer, fset *token.FileSet, contextLines int, diag analysis.Diagnostic) { + posn := fset.Position(diag.Pos) + fmt.Fprintf(out, "%s: %s\n", posn, diag.Message) + + // show offending line plus N lines of context. + if contextLines >= 0 { + posn := fset.Position(diag.Pos) + end := fset.Position(diag.End) + if !end.IsValid() { + end = posn + } + data, _ := os.ReadFile(posn.Filename) + lines := strings.Split(string(data), "\n") + for i := posn.Line - contextLines; i <= end.Line+contextLines; i++ { + if 1 <= i && i <= len(lines) { + fmt.Fprintf(out, "%d\t%s\n", i, lines[i-1]) + } + } + } +} + +// A JSONTree is a mapping from package ID to analysis name to result. +// Each result is either a jsonError or a list of JSONDiagnostic. +type JSONTree map[string]map[string]interface{} + +// A TextEdit describes the replacement of a portion of a file. +// Start and End are zero-based half-open indices into the original byte +// sequence of the file, and New is the new text. +type JSONTextEdit struct { + Filename string `json:"filename"` + Start int `json:"start"` + End int `json:"end"` + New string `json:"new"` +} + +// A JSONSuggestedFix describes an edit that should be applied as a whole or not +// at all. It might contain multiple TextEdits/text_edits if the SuggestedFix +// consists of multiple non-contiguous edits. +type JSONSuggestedFix struct { + Message string `json:"message"` + Edits []JSONTextEdit `json:"edits"` +} + +// A JSONDiagnostic describes the JSON schema of an analysis.Diagnostic. +// +// TODO(matloob): include End position if present. +type JSONDiagnostic struct { + Category string `json:"category,omitempty"` + Posn string `json:"posn"` // e.g. "file.go:line:column" + Message string `json:"message"` + SuggestedFixes []JSONSuggestedFix `json:"suggested_fixes,omitempty"` + Related []JSONRelatedInformation `json:"related,omitempty"` +} + +// A JSONRelated describes a secondary position and message related to +// a primary diagnostic. +// +// TODO(adonovan): include End position if present. +type JSONRelatedInformation struct { + Posn string `json:"posn"` // e.g. "file.go:line:column" + Message string `json:"message"` +} + +// Add adds the result of analysis 'name' on package 'id'. +// The result is either a list of diagnostics or an error. +func (tree JSONTree) Add(fset *token.FileSet, id, name string, diags []analysis.Diagnostic, err error) { + var v interface{} + if err != nil { + type jsonError struct { + Err string `json:"error"` + } + v = jsonError{err.Error()} + } else if len(diags) > 0 { + diagnostics := make([]JSONDiagnostic, 0, len(diags)) + for _, f := range diags { + var fixes []JSONSuggestedFix + for _, fix := range f.SuggestedFixes { + var edits []JSONTextEdit + for _, edit := range fix.TextEdits { + edits = append(edits, JSONTextEdit{ + Filename: fset.Position(edit.Pos).Filename, + Start: fset.Position(edit.Pos).Offset, + End: fset.Position(edit.End).Offset, + New: string(edit.NewText), + }) + } + fixes = append(fixes, JSONSuggestedFix{ + Message: fix.Message, + Edits: edits, + }) + } + var related []JSONRelatedInformation + for _, r := range f.Related { + related = append(related, JSONRelatedInformation{ + Posn: fset.Position(r.Pos).String(), + Message: r.Message, + }) + } + jdiag := JSONDiagnostic{ + Category: f.Category, + Posn: fset.Position(f.Pos).String(), + Message: f.Message, + SuggestedFixes: fixes, + Related: related, + } + diagnostics = append(diagnostics, jdiag) + } + v = diagnostics + } + if v != nil { + m, ok := tree[id] + if !ok { + m = make(map[string]interface{}) + tree[id] = m + } + m[name] = v + } +} + +func (tree JSONTree) Print(out io.Writer) error { + data, err := json.MarshalIndent(tree, "", "\t") + if err != nil { + log.Panicf("internal error: JSON marshaling failed: %v", err) + } + _, err = fmt.Fprintf(out, "%s\n", data) + return err +} diff --git a/hack/tools/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go b/hack/tools/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go new file mode 100644 index 0000000000..ce92892c81 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go @@ -0,0 +1,96 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analysisflags + +import ( + "flag" + "fmt" + "log" + "os" + "sort" + "strings" + + "golang.org/x/tools/go/analysis" +) + +const help = `PROGNAME is a tool for static analysis of Go programs. + +PROGNAME examines Go source code and reports suspicious constructs, +such as Printf calls whose arguments do not align with the format +string. It uses heuristics that do not guarantee all reports are +genuine problems, but it can find errors not caught by the compilers. +` + +// Help implements the help subcommand for a multichecker or unitchecker +// style command. The optional args specify the analyzers to describe. +// Help calls log.Fatal if no such analyzer exists. +func Help(progname string, analyzers []*analysis.Analyzer, args []string) { + // No args: show summary of all analyzers. + if len(args) == 0 { + fmt.Println(strings.Replace(help, "PROGNAME", progname, -1)) + fmt.Println("Registered analyzers:") + fmt.Println() + sort.Slice(analyzers, func(i, j int) bool { + return analyzers[i].Name < analyzers[j].Name + }) + for _, a := range analyzers { + title := strings.Split(a.Doc, "\n\n")[0] + fmt.Printf(" %-12s %s\n", a.Name, title) + } + fmt.Println("\nBy default all analyzers are run.") + fmt.Println("To select specific analyzers, use the -NAME flag for each one,") + fmt.Println(" or -NAME=false to run all analyzers not explicitly disabled.") + + // Show only the core command-line flags. + fmt.Println("\nCore flags:") + fmt.Println() + fs := flag.NewFlagSet("", flag.ExitOnError) + flag.VisitAll(func(f *flag.Flag) { + if !strings.Contains(f.Name, ".") { + fs.Var(f.Value, f.Name, f.Usage) + } + }) + fs.SetOutput(os.Stdout) + fs.PrintDefaults() + + fmt.Printf("\nTo see details and flags of a specific analyzer, run '%s help name'.\n", progname) + + return + } + + // Show help on specific analyzer(s). +outer: + for _, arg := range args { + for _, a := range analyzers { + if a.Name == arg { + paras := strings.Split(a.Doc, "\n\n") + title := paras[0] + fmt.Printf("%s: %s\n", a.Name, title) + + // Show only the flags relating to this analysis, + // properly prefixed. + first := true + fs := flag.NewFlagSet(a.Name, flag.ExitOnError) + a.Flags.VisitAll(func(f *flag.Flag) { + if first { + first = false + fmt.Println("\nAnalyzer flags:") + fmt.Println() + } + fs.Var(f.Value, a.Name+"."+f.Name, f.Usage) + }) + fs.SetOutput(os.Stdout) + fs.PrintDefaults() + + if len(paras) > 1 { + fmt.Printf("\n%s\n", strings.Join(paras[1:], "\n\n")) + } + + continue outer + } + } + log.Fatalf("Analyzer %q not registered", arg) + } +} diff --git a/hack/tools/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/url.go b/hack/tools/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/url.go new file mode 100644 index 0000000000..26a917a991 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/url.go @@ -0,0 +1,33 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analysisflags + +import ( + "fmt" + "net/url" + + "golang.org/x/tools/go/analysis" +) + +// ResolveURL resolves the URL field for a Diagnostic from an Analyzer +// and returns the URL. See Diagnostic.URL for details. +func ResolveURL(a *analysis.Analyzer, d analysis.Diagnostic) (string, error) { + if d.URL == "" && d.Category == "" && a.URL == "" { + return "", nil // do nothing + } + raw := d.URL + if d.URL == "" && d.Category != "" { + raw = "#" + d.Category + } + u, err := url.Parse(raw) + if err != nil { + return "", fmt.Errorf("invalid Diagnostic.URL %q: %s", raw, err) + } + base, err := url.Parse(a.URL) + if err != nil { + return "", fmt.Errorf("invalid Analyzer.URL %q: %s", a.URL, err) + } + return base.ResolveReference(u).String(), nil +} diff --git a/hack/tools/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go b/hack/tools/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go new file mode 100644 index 0000000000..0c2fc5e59d --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go @@ -0,0 +1,471 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal/checker defines various implementation helpers for +// the singlechecker and multichecker packages, which provide the +// complete main function for an analysis driver executable +// based on go/packages. +// +// (Note: it is not used by the public 'checker' package, since the +// latter provides a set of pure functions for use as building blocks.) +package checker + +// TODO(adonovan): publish the JSON schema in go/analysis or analysisjson. + +import ( + "flag" + "fmt" + "go/format" + "go/token" + "io" + "io/ioutil" + "log" + "os" + "runtime" + "runtime/pprof" + "runtime/trace" + "sort" + "strings" + "time" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/checker" + "golang.org/x/tools/go/analysis/internal/analysisflags" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/internal/diff" + "golang.org/x/tools/internal/robustio" +) + +var ( + // Debug is a set of single-letter flags: + // + // f show [f]acts as they are created + // p disable [p]arallel execution of analyzers + // s do additional [s]anity checks on fact types and serialization + // t show [t]iming info (NB: use 'p' flag to avoid GC/scheduler noise) + // v show [v]erbose logging + // + Debug = "" + + // Log files for optional performance tracing. + CPUProfile, MemProfile, Trace string + + // IncludeTests indicates whether test files should be analyzed too. + IncludeTests = true + + // Fix determines whether to apply all suggested fixes. + Fix bool +) + +// RegisterFlags registers command-line flags used by the analysis driver. +func RegisterFlags() { + // When adding flags here, remember to update + // the list of suppressed flags in analysisflags. + + flag.StringVar(&Debug, "debug", Debug, `debug flags, any subset of "fpstv"`) + + flag.StringVar(&CPUProfile, "cpuprofile", "", "write CPU profile to this file") + flag.StringVar(&MemProfile, "memprofile", "", "write memory profile to this file") + flag.StringVar(&Trace, "trace", "", "write trace log to this file") + flag.BoolVar(&IncludeTests, "test", IncludeTests, "indicates whether test files should be analyzed, too") + + flag.BoolVar(&Fix, "fix", false, "apply all suggested fixes") +} + +// Run loads the packages specified by args using go/packages, +// then applies the specified analyzers to them. +// Analysis flags must already have been set. +// Analyzers must be valid according to [analysis.Validate]. +// It provides most of the logic for the main functions of both the +// singlechecker and the multi-analysis commands. +// It returns the appropriate exit code. +func Run(args []string, analyzers []*analysis.Analyzer) int { + if CPUProfile != "" { + f, err := os.Create(CPUProfile) + if err != nil { + log.Fatal(err) + } + if err := pprof.StartCPUProfile(f); err != nil { + log.Fatal(err) + } + // NB: profile won't be written in case of error. + defer pprof.StopCPUProfile() + } + + if Trace != "" { + f, err := os.Create(Trace) + if err != nil { + log.Fatal(err) + } + if err := trace.Start(f); err != nil { + log.Fatal(err) + } + // NB: trace log won't be written in case of error. + defer func() { + trace.Stop() + log.Printf("To view the trace, run:\n$ go tool trace view %s", Trace) + }() + } + + if MemProfile != "" { + f, err := os.Create(MemProfile) + if err != nil { + log.Fatal(err) + } + // NB: memprofile won't be written in case of error. + defer func() { + runtime.GC() // get up-to-date statistics + if err := pprof.WriteHeapProfile(f); err != nil { + log.Fatalf("Writing memory profile: %v", err) + } + f.Close() + }() + } + + // Load the packages. + if dbg('v') { + log.SetPrefix("") + log.SetFlags(log.Lmicroseconds) // display timing + log.Printf("load %s", args) + } + + // Optimization: if the selected analyzers don't produce/consume + // facts, we need source only for the initial packages. + allSyntax := needFacts(analyzers) + initial, err := load(args, allSyntax) + if err != nil { + log.Print(err) + return 1 + } + + pkgsExitCode := 0 + // Print package and module errors regardless of RunDespiteErrors. + // Do not exit if there are errors, yet. + if n := packages.PrintErrors(initial); n > 0 { + pkgsExitCode = 1 + } + + var factLog io.Writer + if dbg('f') { + factLog = os.Stderr + } + + // Run the analysis. + opts := &checker.Options{ + SanityCheck: dbg('s'), + Sequential: dbg('p'), + FactLog: factLog, + } + if dbg('v') { + log.Printf("building graph of analysis passes") + } + graph, err := checker.Analyze(analyzers, initial, opts) + if err != nil { + log.Print(err) + return 1 + } + + // Apply all fixes from the root actions. + if Fix { + if err := applyFixes(graph.Roots); err != nil { + // Fail when applying fixes failed. + log.Print(err) + return 1 + } + } + + // Print the results. If !RunDespiteErrors and there + // are errors in the packages, this will have 0 exit + // code. Otherwise, we prefer to return exit code + // indicating diagnostics. + if diagExitCode := printDiagnostics(graph); diagExitCode != 0 { + return diagExitCode // there were diagnostics + } + return pkgsExitCode // package errors but no diagnostics +} + +// printDiagnostics prints diagnostics in text or JSON form +// and returns the appropriate exit code. +func printDiagnostics(graph *checker.Graph) (exitcode int) { + // Print the results. + // With -json, the exit code is always zero. + if analysisflags.JSON { + if err := graph.PrintJSON(os.Stdout); err != nil { + return 1 + } + } else { + if err := graph.PrintText(os.Stderr, analysisflags.Context); err != nil { + return 1 + } + + // Compute the exit code. + var numErrors, rootDiags int + // TODO(adonovan): use "for act := range graph.All() { ... }" in go1.23. + graph.All()(func(act *checker.Action) bool { + if act.Err != nil { + numErrors++ + } else if act.IsRoot { + rootDiags += len(act.Diagnostics) + } + return true + }) + if numErrors > 0 { + exitcode = 1 // analysis failed, at least partially + } else if rootDiags > 0 { + exitcode = 3 // successfully produced diagnostics + } + } + + // Print timing info. + if dbg('t') { + if !dbg('p') { + log.Println("Warning: times are mostly GC/scheduler noise; use -debug=tp to disable parallelism") + } + + var list []*checker.Action + var total time.Duration + // TODO(adonovan): use "for act := range graph.All() { ... }" in go1.23. + graph.All()(func(act *checker.Action) bool { + list = append(list, act) + total += act.Duration + return true + }) + + // Print actions accounting for 90% of the total. + sort.Slice(list, func(i, j int) bool { + return list[i].Duration > list[j].Duration + }) + var sum time.Duration + for _, act := range list { + fmt.Fprintf(os.Stderr, "%s\t%s\n", act.Duration, act) + sum += act.Duration + if sum >= total*9/10 { + break + } + } + if total > sum { + fmt.Fprintf(os.Stderr, "%s\tall others\n", total-sum) + } + } + + return exitcode +} + +// load loads the initial packages. Returns only top-level loading +// errors. Does not consider errors in packages. +func load(patterns []string, allSyntax bool) ([]*packages.Package, error) { + mode := packages.LoadSyntax + if allSyntax { + mode = packages.LoadAllSyntax + } + mode |= packages.NeedModule + conf := packages.Config{ + Mode: mode, + Tests: IncludeTests, + } + initial, err := packages.Load(&conf, patterns...) + if err == nil && len(initial) == 0 { + err = fmt.Errorf("%s matched no packages", strings.Join(patterns, " ")) + } + return initial, err +} + +// applyFixes applies suggested fixes associated with diagnostics +// reported by the specified actions. It verifies that edits do not +// conflict, even through file-system level aliases such as symbolic +// links, and then edits the files. +func applyFixes(actions []*checker.Action) error { + // Visit all of the actions and accumulate the suggested edits. + paths := make(map[robustio.FileID]string) + editsByAction := make(map[robustio.FileID]map[*checker.Action][]diff.Edit) + for _, act := range actions { + editsForTokenFile := make(map[*token.File][]diff.Edit) + for _, diag := range act.Diagnostics { + for _, sf := range diag.SuggestedFixes { + for _, edit := range sf.TextEdits { + // Validate the edit. + // Any error here indicates a bug in the analyzer. + start, end := edit.Pos, edit.End + file := act.Package.Fset.File(start) + if file == nil { + return fmt.Errorf("analysis %q suggests invalid fix: missing file info for pos (%v)", + act.Analyzer.Name, edit.Pos) + } + if !end.IsValid() { + end = start + } + if start > end { + return fmt.Errorf("analysis %q suggests invalid fix: pos (%v) > end (%v)", + act.Analyzer.Name, edit.Pos, edit.End) + } + if eof := token.Pos(file.Base() + file.Size()); end > eof { + return fmt.Errorf("analysis %q suggests invalid fix: end (%v) past end of file (%v)", + act.Analyzer.Name, edit.End, eof) + } + edit := diff.Edit{ + Start: file.Offset(start), + End: file.Offset(end), + New: string(edit.NewText), + } + editsForTokenFile[file] = append(editsForTokenFile[file], edit) + } + } + } + + for f, edits := range editsForTokenFile { + id, _, err := robustio.GetFileID(f.Name()) + if err != nil { + return err + } + if _, hasId := paths[id]; !hasId { + paths[id] = f.Name() + editsByAction[id] = make(map[*checker.Action][]diff.Edit) + } + editsByAction[id][act] = edits + } + } + + // Validate and group the edits to each actual file. + editsByPath := make(map[string][]diff.Edit) + for id, actToEdits := range editsByAction { + path := paths[id] + actions := make([]*checker.Action, 0, len(actToEdits)) + for act := range actToEdits { + actions = append(actions, act) + } + + // Does any action create conflicting edits? + for _, act := range actions { + edits := actToEdits[act] + if _, invalid := validateEdits(edits); invalid > 0 { + name, x, y := act.Analyzer.Name, edits[invalid-1], edits[invalid] + return diff3Conflict(path, name, name, []diff.Edit{x}, []diff.Edit{y}) + } + } + + // Does any pair of different actions create edits that conflict? + for j := range actions { + for k := range actions[:j] { + x, y := actions[j], actions[k] + if x.Analyzer.Name > y.Analyzer.Name { + x, y = y, x + } + xedits, yedits := actToEdits[x], actToEdits[y] + combined := append(xedits, yedits...) + if _, invalid := validateEdits(combined); invalid > 0 { + // TODO: consider applying each action's consistent list of edits entirely, + // and then using a three-way merge (such as GNU diff3) on the resulting + // files to report more precisely the parts that actually conflict. + return diff3Conflict(path, x.Analyzer.Name, y.Analyzer.Name, xedits, yedits) + } + } + } + + var edits []diff.Edit + for act := range actToEdits { + edits = append(edits, actToEdits[act]...) + } + editsByPath[path], _ = validateEdits(edits) // remove duplicates. already validated. + } + + // Now we've got a set of valid edits for each file. Apply them. + // TODO(adonovan): don't abort the operation partway just because one file fails. + for path, edits := range editsByPath { + // TODO(adonovan): this should really work on the same + // gulp from the file system that fed the analyzer (see #62292). + contents, err := os.ReadFile(path) + if err != nil { + return err + } + + out, err := diff.ApplyBytes(contents, edits) + if err != nil { + return err + } + + // Try to format the file. + if formatted, err := format.Source(out); err == nil { + out = formatted + } + + if err := os.WriteFile(path, out, 0644); err != nil { + return err + } + } + return nil +} + +// validateEdits returns a list of edits that is sorted and +// contains no duplicate edits. Returns the index of some +// overlapping adjacent edits if there is one and <0 if the +// edits are valid. +func validateEdits(edits []diff.Edit) ([]diff.Edit, int) { + if len(edits) == 0 { + return nil, -1 + } + equivalent := func(x, y diff.Edit) bool { + return x.Start == y.Start && x.End == y.End && x.New == y.New + } + diff.SortEdits(edits) + unique := []diff.Edit{edits[0]} + invalid := -1 + for i := 1; i < len(edits); i++ { + prev, cur := edits[i-1], edits[i] + // We skip over equivalent edits without considering them + // an error. This handles identical edits coming from the + // multiple ways of loading a package into a + // *go/packages.Packages for testing, e.g. packages "p" and "p [p.test]". + if !equivalent(prev, cur) { + unique = append(unique, cur) + if prev.End > cur.Start { + invalid = i + } + } + } + return unique, invalid +} + +// diff3Conflict returns an error describing two conflicting sets of +// edits on a file at path. +func diff3Conflict(path string, xlabel, ylabel string, xedits, yedits []diff.Edit) error { + contents, err := ioutil.ReadFile(path) + if err != nil { + return err + } + oldlabel, old := "base", string(contents) + + xdiff, err := diff.ToUnified(oldlabel, xlabel, old, xedits, diff.DefaultContextLines) + if err != nil { + return err + } + ydiff, err := diff.ToUnified(oldlabel, ylabel, old, yedits, diff.DefaultContextLines) + if err != nil { + return err + } + + return fmt.Errorf("conflicting edits from %s and %s on %s\nfirst edits:\n%s\nsecond edits:\n%s", + xlabel, ylabel, path, xdiff, ydiff) +} + +// needFacts reports whether any analysis required by the specified set +// needs facts. If so, we must load the entire program from source. +func needFacts(analyzers []*analysis.Analyzer) bool { + seen := make(map[*analysis.Analyzer]bool) + var q []*analysis.Analyzer // for BFS + q = append(q, analyzers...) + for len(q) > 0 { + a := q[0] + q = q[1:] + if !seen[a] { + seen[a] = true + if len(a.FactTypes) > 0 { + return true + } + q = append(q, a.Requires...) + } + } + return false +} + +func dbg(b byte) bool { return strings.IndexByte(Debug, b) >= 0 } diff --git a/hack/tools/vendor/golang.org/x/tools/go/analysis/internal/internal.go b/hack/tools/vendor/golang.org/x/tools/go/analysis/internal/internal.go new file mode 100644 index 0000000000..e7c8247fd3 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/go/analysis/internal/internal.go @@ -0,0 +1,12 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import "golang.org/x/tools/go/analysis" + +// This function is set by the checker package to provide +// backdoor access to the private Pass field +// of the checker.Action type, for use by analysistest. +var Pass func(interface{}) *analysis.Pass diff --git a/hack/tools/vendor/golang.org/x/tools/go/analysis/multichecker/multichecker.go b/hack/tools/vendor/golang.org/x/tools/go/analysis/multichecker/multichecker.go new file mode 100644 index 0000000000..3c62be5818 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/go/analysis/multichecker/multichecker.go @@ -0,0 +1,60 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package multichecker defines the main function for an analysis driver +// with several analyzers. This package makes it easy for anyone to build +// an analysis tool containing just the analyzers they need. +package multichecker + +import ( + "flag" + "fmt" + "log" + "os" + "path/filepath" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/internal/analysisflags" + "golang.org/x/tools/go/analysis/internal/checker" + "golang.org/x/tools/go/analysis/unitchecker" +) + +func Main(analyzers ...*analysis.Analyzer) { + progname := filepath.Base(os.Args[0]) + log.SetFlags(0) + log.SetPrefix(progname + ": ") // e.g. "vet: " + + if err := analysis.Validate(analyzers); err != nil { + log.Fatal(err) + } + + checker.RegisterFlags() + + analyzers = analysisflags.Parse(analyzers, true) + + args := flag.Args() + if len(args) == 0 { + fmt.Fprintf(os.Stderr, `%[1]s is a tool for static analysis of Go programs. + +Usage: %[1]s [-flag] [package] + +Run '%[1]s help' for more detail, + or '%[1]s help name' for details and flags of a specific analyzer. +`, progname) + os.Exit(1) + } + + if args[0] == "help" { + analysisflags.Help(progname, analyzers, args[1:]) + os.Exit(0) + } + + if len(args) == 1 && strings.HasSuffix(args[0], ".cfg") { + unitchecker.Run(args[0], analyzers) + panic("unreachable") + } + + os.Exit(checker.Run(args, analyzers)) +} diff --git a/hack/tools/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go b/hack/tools/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go index b5a2d2775f..e7434e8fed 100644 --- a/hack/tools/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go +++ b/hack/tools/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go @@ -15,7 +15,6 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/internal/analysisutil" - "golang.org/x/tools/internal/versions" ) const Doc = "check //go:build and // +build directives" @@ -371,11 +370,6 @@ func (check *checker) finish() { // tags reports issues in go versions in tags within the expression e. func (check *checker) tags(pos token.Pos, e constraint.Expr) { - // Check that constraint.GoVersion is meaningful (>= go1.21). - if versions.ConstraintGoVersion == nil { - return - } - // Use Eval to visit each tag. _ = e.Eval(func(tag string) bool { if malformedGoTag(tag) { @@ -393,10 +387,8 @@ func malformedGoTag(tag string) bool { // Check for close misspellings of the "go1." prefix. for _, pre := range []string{"go.", "g1.", "go"} { suffix := strings.TrimPrefix(tag, pre) - if suffix != tag { - if valid, ok := validTag("go1." + suffix); ok && valid { - return true - } + if suffix != tag && validGoVersion("go1."+suffix) { + return true } } return false @@ -404,15 +396,10 @@ func malformedGoTag(tag string) bool { // The tag starts with "go1" so it is almost certainly a GoVersion. // Report it if it is not a valid build constraint. - valid, ok := validTag(tag) - return ok && !valid + return !validGoVersion(tag) } -// validTag returns (valid, ok) where valid reports when a tag is valid, -// and ok reports determining if the tag is valid succeeded. -func validTag(tag string) (valid bool, ok bool) { - if versions.ConstraintGoVersion != nil { - return versions.ConstraintGoVersion(&constraint.TagExpr{Tag: tag}) != "", true - } - return false, false +// validGoVersion reports when a tag is a valid go version. +func validGoVersion(tag string) bool { + return constraint.GoVersion(&constraint.TagExpr{Tag: tag}) != "" } diff --git a/hack/tools/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go b/hack/tools/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go index 2d79d0b033..171ad20137 100644 --- a/hack/tools/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go +++ b/hack/tools/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go @@ -433,6 +433,9 @@ func printfNameAndKind(pass *analysis.Pass, call *ast.CallExpr) (fn *types.Func, return nil, 0 } + // Facts are associated with generic declarations, not instantiations. + fn = fn.Origin() + _, ok := isPrint[fn.FullName()] if !ok { // Next look up just "printf", for use with -printf.funcs. diff --git a/hack/tools/vendor/golang.org/x/tools/go/analysis/passes/stdversion/stdversion.go b/hack/tools/vendor/golang.org/x/tools/go/analysis/passes/stdversion/stdversion.go new file mode 100644 index 0000000000..75d8697759 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/go/analysis/passes/stdversion/stdversion.go @@ -0,0 +1,159 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package stdversion reports uses of standard library symbols that are +// "too new" for the Go version in force in the referring file. +package stdversion + +import ( + "go/ast" + "go/build" + "go/types" + "regexp" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/versions" +) + +const Doc = `report uses of too-new standard library symbols + +The stdversion analyzer reports references to symbols in the standard +library that were introduced by a Go release higher than the one in +force in the referring file. (Recall that the file's Go version is +defined by the 'go' directive its module's go.mod file, or by a +"//go:build go1.X" build tag at the top of the file.) + +The analyzer does not report a diagnostic for a reference to a "too +new" field or method of a type that is itself "too new", as this may +have false positives, for example if fields or methods are accessed +through a type alias that is guarded by a Go version constraint. +` + +var Analyzer = &analysis.Analyzer{ + Name: "stdversion", + Doc: Doc, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/stdversion", + RunDespiteErrors: true, + Run: run, +} + +func run(pass *analysis.Pass) (any, error) { + // Prior to go1.22, versions.FileVersion returns only the + // toolchain version, which is of no use to us, so + // disable this analyzer on earlier versions. + if !slicesContains(build.Default.ReleaseTags, "go1.22") { + return nil, nil + } + + // Don't report diagnostics for modules marked before go1.21, + // since at that time the go directive wasn't clearly + // specified as a toolchain requirement. + // + // TODO(adonovan): after go1.21, call GoVersion directly. + pkgVersion := any(pass.Pkg).(interface{ GoVersion() string }).GoVersion() + if !versions.AtLeast(pkgVersion, "go1.21") { + return nil, nil + } + + // disallowedSymbols returns the set of standard library symbols + // in a given package that are disallowed at the specified Go version. + type key struct { + pkg *types.Package + version string + } + memo := make(map[key]map[types.Object]string) // records symbol's minimum Go version + disallowedSymbols := func(pkg *types.Package, version string) map[types.Object]string { + k := key{pkg, version} + disallowed, ok := memo[k] + if !ok { + disallowed = typesinternal.TooNewStdSymbols(pkg, version) + memo[k] = disallowed + } + return disallowed + } + + // Scan the syntax looking for references to symbols + // that are disallowed by the version of the file. + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.File)(nil), + (*ast.Ident)(nil), + } + var fileVersion string // "" => no check + inspect.Preorder(nodeFilter, func(n ast.Node) { + switch n := n.(type) { + case *ast.File: + if isGenerated(n) { + // Suppress diagnostics in generated files (such as cgo). + fileVersion = "" + } else { + fileVersion = versions.Lang(versions.FileVersion(pass.TypesInfo, n)) + // (may be "" if unknown) + } + + case *ast.Ident: + if fileVersion != "" { + if obj, ok := pass.TypesInfo.Uses[n]; ok && obj.Pkg() != nil { + disallowed := disallowedSymbols(obj.Pkg(), fileVersion) + if minVersion, ok := disallowed[origin(obj)]; ok { + noun := "module" + if fileVersion != pkgVersion { + noun = "file" + } + pass.ReportRangef(n, "%s.%s requires %v or later (%s is %s)", + obj.Pkg().Name(), obj.Name(), minVersion, noun, fileVersion) + } + } + } + } + }) + return nil, nil +} + +// Reduced from x/tools/gopls/internal/golang/util.go. Good enough for now. +// TODO(adonovan): use ast.IsGenerated in go1.21. +func isGenerated(f *ast.File) bool { + for _, group := range f.Comments { + for _, comment := range group.List { + if matched := generatedRx.MatchString(comment.Text); matched { + return true + } + } + } + return false +} + +// Matches cgo generated comment as well as the proposed standard: +// +// https://golang.org/s/generatedcode +var generatedRx = regexp.MustCompile(`// .*DO NOT EDIT\.?`) + +// origin returns the original uninstantiated symbol for obj. +func origin(obj types.Object) types.Object { + switch obj := obj.(type) { + case *types.Var: + return obj.Origin() + case *types.Func: + return obj.Origin() + case *types.TypeName: + if named, ok := obj.Type().(*types.Named); ok { // (don't unalias) + return named.Origin().Obj() + } + } + return obj +} + +// TODO(adonovan): use go1.21 slices.Contains. +func slicesContains[S ~[]E, E comparable](slice S, x E) bool { + for _, elem := range slice { + if elem == x { + return true + } + } + return false +} diff --git a/hack/tools/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/doc.go b/hack/tools/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/doc.go new file mode 100644 index 0000000000..207f741830 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/doc.go @@ -0,0 +1,34 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package waitgroup defines an Analyzer that detects simple misuses +// of sync.WaitGroup. +// +// # Analyzer waitgroup +// +// waitgroup: check for misuses of sync.WaitGroup +// +// This analyzer detects mistaken calls to the (*sync.WaitGroup).Add +// method from inside a new goroutine, causing Add to race with Wait: +// +// // WRONG +// var wg sync.WaitGroup +// go func() { +// wg.Add(1) // "WaitGroup.Add called from inside new goroutine" +// defer wg.Done() +// ... +// }() +// wg.Wait() // (may return prematurely before new goroutine starts) +// +// The correct code calls Add before starting the goroutine: +// +// // RIGHT +// var wg sync.WaitGroup +// wg.Add(1) +// go func() { +// defer wg.Done() +// ... +// }() +// wg.Wait() +package waitgroup diff --git a/hack/tools/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go b/hack/tools/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go new file mode 100644 index 0000000000..cbb0bfc9e6 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/go/analysis/passes/waitgroup/waitgroup.go @@ -0,0 +1,105 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package waitgroup defines an Analyzer that detects simple misuses +// of sync.WaitGroup. +package waitgroup + +import ( + _ "embed" + "go/ast" + "go/types" + "reflect" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/analysis/passes/internal/analysisutil" + "golang.org/x/tools/go/ast/inspector" + "golang.org/x/tools/go/types/typeutil" + "golang.org/x/tools/internal/typesinternal" +) + +//go:embed doc.go +var doc string + +var Analyzer = &analysis.Analyzer{ + Name: "waitgroup", + Doc: analysisutil.MustExtractDoc(doc, "waitgroup"), + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/waitgroup", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, +} + +func run(pass *analysis.Pass) (any, error) { + if !analysisutil.Imports(pass.Pkg, "sync") { + return nil, nil // doesn't directly import sync + } + + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + nodeFilter := []ast.Node{ + (*ast.CallExpr)(nil), + } + + inspect.WithStack(nodeFilter, func(n ast.Node, push bool, stack []ast.Node) (proceed bool) { + if push { + call := n.(*ast.CallExpr) + if fn, ok := typeutil.Callee(pass.TypesInfo, call).(*types.Func); ok && + isMethodNamed(fn, "sync", "WaitGroup", "Add") && + hasSuffix(stack, wantSuffix) && + backindex(stack, 1) == backindex(stack, 2).(*ast.BlockStmt).List[0] { // ExprStmt must be Block's first stmt + + pass.Reportf(call.Lparen, "WaitGroup.Add called from inside new goroutine") + } + } + return true + }) + + return nil, nil +} + +// go func() { +// wg.Add(1) +// ... +// }() +var wantSuffix = []ast.Node{ + (*ast.GoStmt)(nil), + (*ast.CallExpr)(nil), + (*ast.FuncLit)(nil), + (*ast.BlockStmt)(nil), + (*ast.ExprStmt)(nil), + (*ast.CallExpr)(nil), +} + +// hasSuffix reports whether stack has the matching suffix, +// considering only node types. +func hasSuffix(stack, suffix []ast.Node) bool { + // TODO(adonovan): the inspector could implement this for us. + if len(stack) < len(suffix) { + return false + } + for i := range len(suffix) { + if reflect.TypeOf(backindex(stack, i)) != reflect.TypeOf(backindex(suffix, i)) { + return false + } + } + return true +} + +// isMethodNamed reports whether f is a method with the specified +// package, receiver type, and method names. +func isMethodNamed(fn *types.Func, pkg, recv, name string) bool { + if fn.Pkg() != nil && fn.Pkg().Path() == pkg && fn.Name() == name { + if r := fn.Type().(*types.Signature).Recv(); r != nil { + if _, gotRecv := typesinternal.ReceiverNamed(r); gotRecv != nil { + return gotRecv.Obj().Name() == recv + } + } + } + return false +} + +// backindex is like [slices.Index] but from the back of the slice. +func backindex[T any](slice []T, i int) T { + return slice[len(slice)-1-i] +} diff --git a/hack/tools/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go b/hack/tools/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go new file mode 100644 index 0000000000..1a9b3094e5 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go @@ -0,0 +1,451 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The unitchecker package defines the main function for an analysis +// driver that analyzes a single compilation unit during a build. +// It is invoked by a build system such as "go vet": +// +// $ go vet -vettool=$(which vet) +// +// It supports the following command-line protocol: +// +// -V=full describe executable (to the build tool) +// -flags describe flags (to the build tool) +// foo.cfg description of compilation unit (from the build tool) +// +// This package does not depend on go/packages. +// If you need a standalone tool, use multichecker, +// which supports this mode but can also load packages +// from source using go/packages. +package unitchecker + +// TODO(adonovan): +// - with gccgo, go build does not build standard library, +// so we will not get to analyze it. Yet we must in order +// to create base facts for, say, the fmt package for the +// printf checker. + +import ( + "encoding/gob" + "encoding/json" + "flag" + "fmt" + "go/ast" + "go/build" + "go/importer" + "go/parser" + "go/token" + "go/types" + "io" + "log" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/internal/analysisflags" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/facts" +) + +// A Config describes a compilation unit to be analyzed. +// It is provided to the tool in a JSON-encoded file +// whose name ends with ".cfg". +type Config struct { + ID string // e.g. "fmt [fmt.test]" + Compiler string // gc or gccgo, provided to MakeImporter + Dir string // (unused) + ImportPath string // package path + GoVersion string // minimum required Go version, such as "go1.21.0" + GoFiles []string + NonGoFiles []string + IgnoredFiles []string + ModulePath string // module path + ModuleVersion string // module version + ImportMap map[string]string // maps import path to package path + PackageFile map[string]string // maps package path to file of type information + Standard map[string]bool // package belongs to standard library + PackageVetx map[string]string // maps package path to file of fact information + VetxOnly bool // run analysis only for facts, not diagnostics + VetxOutput string // where to write file of fact information + SucceedOnTypecheckFailure bool +} + +// Main is the main function of a vet-like analysis tool that must be +// invoked by a build system to analyze a single package. +// +// The protocol required by 'go vet -vettool=...' is that the tool must support: +// +// -flags describe flags in JSON +// -V=full describe executable for build caching +// foo.cfg perform separate modular analyze on the single +// unit described by a JSON config file foo.cfg. +func Main(analyzers ...*analysis.Analyzer) { + progname := filepath.Base(os.Args[0]) + log.SetFlags(0) + log.SetPrefix(progname + ": ") + + if err := analysis.Validate(analyzers); err != nil { + log.Fatal(err) + } + + flag.Usage = func() { + fmt.Fprintf(os.Stderr, `%[1]s is a tool for static analysis of Go programs. + +Usage of %[1]s: + %.16[1]s unit.cfg # execute analysis specified by config file + %.16[1]s help # general help, including listing analyzers and flags + %.16[1]s help name # help on specific analyzer and its flags +`, progname) + os.Exit(1) + } + + analyzers = analysisflags.Parse(analyzers, true) + + args := flag.Args() + if len(args) == 0 { + flag.Usage() + } + if args[0] == "help" { + analysisflags.Help(progname, analyzers, args[1:]) + os.Exit(0) + } + if len(args) != 1 || !strings.HasSuffix(args[0], ".cfg") { + log.Fatalf(`invoking "go tool vet" directly is unsupported; use "go vet"`) + } + Run(args[0], analyzers) +} + +// Run reads the *.cfg file, runs the analysis, +// and calls os.Exit with an appropriate error code. +// It assumes flags have already been set. +func Run(configFile string, analyzers []*analysis.Analyzer) { + cfg, err := readConfig(configFile) + if err != nil { + log.Fatal(err) + } + + fset := token.NewFileSet() + results, err := run(fset, cfg, analyzers) + if err != nil { + log.Fatal(err) + } + + // In VetxOnly mode, the analysis is run only for facts. + if !cfg.VetxOnly { + if analysisflags.JSON { + // JSON output + tree := make(analysisflags.JSONTree) + for _, res := range results { + tree.Add(fset, cfg.ID, res.a.Name, res.diagnostics, res.err) + } + tree.Print(os.Stdout) + } else { + // plain text + exit := 0 + for _, res := range results { + if res.err != nil { + log.Println(res.err) + exit = 1 + } + } + for _, res := range results { + for _, diag := range res.diagnostics { + analysisflags.PrintPlain(os.Stderr, fset, analysisflags.Context, diag) + exit = 1 + } + } + os.Exit(exit) + } + } + + os.Exit(0) +} + +func readConfig(filename string) (*Config, error) { + data, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + cfg := new(Config) + if err := json.Unmarshal(data, cfg); err != nil { + return nil, fmt.Errorf("cannot decode JSON config file %s: %v", filename, err) + } + if len(cfg.GoFiles) == 0 { + // The go command disallows packages with no files. + // The only exception is unsafe, but the go command + // doesn't call vet on it. + return nil, fmt.Errorf("package has no files: %s", cfg.ImportPath) + } + return cfg, nil +} + +type factImporter = func(pkgPath string) ([]byte, error) + +// These four hook variables are a proof of concept of a future +// parameterization of a unitchecker API that allows the client to +// determine how and where facts and types are produced and consumed. +// (Note that the eventual API will likely be quite different.) +// +// The defaults honor a Config in a manner compatible with 'go vet'. +var ( + makeTypesImporter = func(cfg *Config, fset *token.FileSet) types.Importer { + compilerImporter := importer.ForCompiler(fset, cfg.Compiler, func(path string) (io.ReadCloser, error) { + // path is a resolved package path, not an import path. + file, ok := cfg.PackageFile[path] + if !ok { + if cfg.Compiler == "gccgo" && cfg.Standard[path] { + return nil, nil // fall back to default gccgo lookup + } + return nil, fmt.Errorf("no package file for %q", path) + } + return os.Open(file) + }) + return importerFunc(func(importPath string) (*types.Package, error) { + path, ok := cfg.ImportMap[importPath] // resolve vendoring, etc + if !ok { + return nil, fmt.Errorf("can't resolve import %q", path) + } + return compilerImporter.Import(path) + }) + } + + exportTypes = func(*Config, *token.FileSet, *types.Package) error { + // By default this is a no-op, because "go vet" + // makes the compiler produce type information. + return nil + } + + makeFactImporter = func(cfg *Config) factImporter { + return func(pkgPath string) ([]byte, error) { + if vetx, ok := cfg.PackageVetx[pkgPath]; ok { + return os.ReadFile(vetx) + } + return nil, nil // no .vetx file, no facts + } + } + + exportFacts = func(cfg *Config, data []byte) error { + return os.WriteFile(cfg.VetxOutput, data, 0666) + } +) + +func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]result, error) { + // Load, parse, typecheck. + var files []*ast.File + for _, name := range cfg.GoFiles { + f, err := parser.ParseFile(fset, name, nil, parser.ParseComments) + if err != nil { + if cfg.SucceedOnTypecheckFailure { + // Silently succeed; let the compiler + // report parse errors. + err = nil + } + return nil, err + } + files = append(files, f) + } + tc := &types.Config{ + Importer: makeTypesImporter(cfg, fset), + Sizes: types.SizesFor("gc", build.Default.GOARCH), // TODO(adonovan): use cfg.Compiler + GoVersion: cfg.GoVersion, + } + info := &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + FileVersions: make(map[*ast.File]string), + } + + pkg, err := tc.Check(cfg.ImportPath, fset, files, info) + if err != nil { + if cfg.SucceedOnTypecheckFailure { + // Silently succeed; let the compiler + // report type errors. + err = nil + } + return nil, err + } + + // Register fact types with gob. + // In VetxOnly mode, analyzers are only for their facts, + // so we can skip any analysis that neither produces facts + // nor depends on any analysis that produces facts. + // + // TODO(adonovan): fix: the command (and logic!) here are backwards. + // It should say "...nor is required by any...". (Issue 443099) + // + // Also build a map to hold working state and result. + type action struct { + once sync.Once + result interface{} + err error + usesFacts bool // (transitively uses) + diagnostics []analysis.Diagnostic + } + actions := make(map[*analysis.Analyzer]*action) + var registerFacts func(a *analysis.Analyzer) bool + registerFacts = func(a *analysis.Analyzer) bool { + act, ok := actions[a] + if !ok { + act = new(action) + var usesFacts bool + for _, f := range a.FactTypes { + usesFacts = true + gob.Register(f) + } + for _, req := range a.Requires { + if registerFacts(req) { + usesFacts = true + } + } + act.usesFacts = usesFacts + actions[a] = act + } + return act.usesFacts + } + var filtered []*analysis.Analyzer + for _, a := range analyzers { + if registerFacts(a) || !cfg.VetxOnly { + filtered = append(filtered, a) + } + } + analyzers = filtered + + // Read facts from imported packages. + facts, err := facts.NewDecoder(pkg).Decode(makeFactImporter(cfg)) + if err != nil { + return nil, err + } + + // In parallel, execute the DAG of analyzers. + var exec func(a *analysis.Analyzer) *action + var execAll func(analyzers []*analysis.Analyzer) + exec = func(a *analysis.Analyzer) *action { + act := actions[a] + act.once.Do(func() { + execAll(a.Requires) // prefetch dependencies in parallel + + // The inputs to this analysis are the + // results of its prerequisites. + inputs := make(map[*analysis.Analyzer]interface{}) + var failed []string + for _, req := range a.Requires { + reqact := exec(req) + if reqact.err != nil { + failed = append(failed, req.String()) + continue + } + inputs[req] = reqact.result + } + + // Report an error if any dependency failed. + if failed != nil { + sort.Strings(failed) + act.err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", ")) + return + } + + factFilter := make(map[reflect.Type]bool) + for _, f := range a.FactTypes { + factFilter[reflect.TypeOf(f)] = true + } + + module := &analysis.Module{ + Path: cfg.ModulePath, + Version: cfg.ModuleVersion, + GoVersion: cfg.GoVersion, + } + + pass := &analysis.Pass{ + Analyzer: a, + Fset: fset, + Files: files, + OtherFiles: cfg.NonGoFiles, + IgnoredFiles: cfg.IgnoredFiles, + Pkg: pkg, + TypesInfo: info, + TypesSizes: tc.Sizes, + TypeErrors: nil, // unitchecker doesn't RunDespiteErrors + ResultOf: inputs, + Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) }, + ImportObjectFact: facts.ImportObjectFact, + ExportObjectFact: facts.ExportObjectFact, + AllObjectFacts: func() []analysis.ObjectFact { return facts.AllObjectFacts(factFilter) }, + ImportPackageFact: facts.ImportPackageFact, + ExportPackageFact: facts.ExportPackageFact, + AllPackageFacts: func() []analysis.PackageFact { return facts.AllPackageFacts(factFilter) }, + Module: module, + } + pass.ReadFile = analysisinternal.MakeReadFile(pass) + + t0 := time.Now() + act.result, act.err = a.Run(pass) + + if act.err == nil { // resolve URLs on diagnostics. + for i := range act.diagnostics { + if url, uerr := analysisflags.ResolveURL(a, act.diagnostics[i]); uerr == nil { + act.diagnostics[i].URL = url + } else { + act.err = uerr // keep the last error + } + } + } + if false { + log.Printf("analysis %s = %s", pass, time.Since(t0)) + } + }) + return act + } + execAll = func(analyzers []*analysis.Analyzer) { + var wg sync.WaitGroup + for _, a := range analyzers { + wg.Add(1) + go func(a *analysis.Analyzer) { + _ = exec(a) + wg.Done() + }(a) + } + wg.Wait() + } + + execAll(analyzers) + + // Return diagnostics and errors from root analyzers. + results := make([]result, len(analyzers)) + for i, a := range analyzers { + act := actions[a] + results[i].a = a + results[i].err = act.err + results[i].diagnostics = act.diagnostics + } + + data := facts.Encode() + if err := exportFacts(cfg, data); err != nil { + return nil, fmt.Errorf("failed to export analysis facts: %v", err) + } + if err := exportTypes(cfg, fset, pkg); err != nil { + return nil, fmt.Errorf("failed to export type information: %v", err) + } + + return results, nil +} + +type result struct { + a *analysis.Analyzer + diagnostics []analysis.Diagnostic + err error +} + +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } diff --git a/hack/tools/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/hack/tools/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index f3ab0a2e12..65fe2628e9 100644 --- a/hack/tools/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/hack/tools/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -106,24 +106,18 @@ func Find(importPath, srcDir string) (filename, path string) { // additional trailing data beyond the end of the export data. func NewReader(r io.Reader) (io.Reader, error) { buf := bufio.NewReader(r) - _, size, err := gcimporter.FindExportData(buf) + size, err := gcimporter.FindExportData(buf) if err != nil { return nil, err } - if size >= 0 { - // We were given an archive and found the __.PKGDEF in it. - // This tells us the size of the export data, and we don't - // need to return the entire file. - return &io.LimitedReader{ - R: buf, - N: size, - }, nil - } else { - // We were given an object file. As such, we don't know how large - // the export data is and must return the entire file. - return buf, nil - } + // We were given an archive and found the __.PKGDEF in it. + // This tells us the size of the export data, and we don't + // need to return the entire file. + return &io.LimitedReader{ + R: buf, + N: size, + }, nil } // readAll works the same way as io.ReadAll, but avoids allocations and copies diff --git a/hack/tools/vendor/golang.org/x/tools/go/packages/external.go b/hack/tools/vendor/golang.org/x/tools/go/packages/external.go index 96db9daf31..91bd62e83b 100644 --- a/hack/tools/vendor/golang.org/x/tools/go/packages/external.go +++ b/hack/tools/vendor/golang.org/x/tools/go/packages/external.go @@ -13,6 +13,7 @@ import ( "fmt" "os" "os/exec" + "slices" "strings" ) @@ -131,7 +132,7 @@ func findExternalDriver(cfg *Config) driver { // command. // // (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go) - cmd.Env = append(slicesClip(cfg.Env), "PWD="+cfg.Dir) + cmd.Env = append(slices.Clip(cfg.Env), "PWD="+cfg.Dir) cmd.Stdin = bytes.NewReader(req) cmd.Stdout = buf cmd.Stderr = stderr @@ -150,7 +151,3 @@ func findExternalDriver(cfg *Config) driver { return &response, nil } } - -// slicesClip removes unused capacity from the slice, returning s[:len(s):len(s)]. -// TODO(adonovan): use go1.21 slices.Clip. -func slicesClip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] } diff --git a/hack/tools/vendor/golang.org/x/tools/go/packages/golist.go b/hack/tools/vendor/golang.org/x/tools/go/packages/golist.go index 76f910ecec..870271ed51 100644 --- a/hack/tools/vendor/golang.org/x/tools/go/packages/golist.go +++ b/hack/tools/vendor/golang.org/x/tools/go/packages/golist.go @@ -505,13 +505,14 @@ func (state *golistState) createDriverResponse(words ...string) (*DriverResponse pkg := &Package{ Name: p.Name, ID: p.ImportPath, + Dir: p.Dir, GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), EmbedFiles: absJoin(p.Dir, p.EmbedFiles), EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns), IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), - forTest: p.ForTest, + ForTest: p.ForTest, depsErrors: p.DepsErrors, Module: p.Module, } @@ -795,7 +796,7 @@ func jsonFlag(cfg *Config, goVersion int) string { // Request Dir in the unlikely case Export is not absolute. addFields("Dir", "Export") } - if cfg.Mode&needInternalForTest != 0 { + if cfg.Mode&NeedForTest != 0 { addFields("ForTest") } if cfg.Mode&needInternalDepsErrors != 0 { diff --git a/hack/tools/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/hack/tools/vendor/golang.org/x/tools/go/packages/loadmode_string.go index 5fcad6ea6d..969da4c263 100644 --- a/hack/tools/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ b/hack/tools/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -23,6 +23,7 @@ var modes = [...]struct { {NeedSyntax, "NeedSyntax"}, {NeedTypesInfo, "NeedTypesInfo"}, {NeedTypesSizes, "NeedTypesSizes"}, + {NeedForTest, "NeedForTest"}, {NeedModule, "NeedModule"}, {NeedEmbedFiles, "NeedEmbedFiles"}, {NeedEmbedPatterns, "NeedEmbedPatterns"}, diff --git a/hack/tools/vendor/golang.org/x/tools/go/packages/packages.go b/hack/tools/vendor/golang.org/x/tools/go/packages/packages.go index 2ecc64238e..9dedf9777d 100644 --- a/hack/tools/vendor/golang.org/x/tools/go/packages/packages.go +++ b/hack/tools/vendor/golang.org/x/tools/go/packages/packages.go @@ -43,6 +43,20 @@ import ( // ID and Errors (if present) will always be filled. // [Load] may return more information than requested. // +// The Mode flag is a union of several bits named NeedName, +// NeedFiles, and so on, each of which determines whether +// a given field of Package (Name, Files, etc) should be +// populated. +// +// For convenience, we provide named constants for the most +// common combinations of Need flags: +// +// [LoadFiles] lists of files in each package +// [LoadImports] ... plus imports +// [LoadTypes] ... plus type information +// [LoadSyntax] ... plus type-annotated syntax +// [LoadAllSyntax] ... for all dependencies +// // Unfortunately there are a number of open bugs related to // interactions among the LoadMode bits: // - https://github.com/golang/go/issues/56633 @@ -55,7 +69,7 @@ const ( // NeedName adds Name and PkgPath. NeedName LoadMode = 1 << iota - // NeedFiles adds GoFiles, OtherFiles, and IgnoredFiles + // NeedFiles adds Dir, GoFiles, OtherFiles, and IgnoredFiles NeedFiles // NeedCompiledGoFiles adds CompiledGoFiles. @@ -86,9 +100,10 @@ const ( // needInternalDepsErrors adds the internal deps errors field for use by gopls. needInternalDepsErrors - // needInternalForTest adds the internal forTest field. + // NeedForTest adds ForTest. + // // Tests must also be set on the context for this field to be populated. - needInternalForTest + NeedForTest // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. // Modifies CompiledGoFiles and Types, and has no effect on its own. @@ -108,33 +123,18 @@ const ( const ( // LoadFiles loads the name and file names for the initial packages. - // - // Deprecated: LoadFiles exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles // LoadImports loads the name, file names, and import mapping for the initial packages. - // - // Deprecated: LoadImports exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. LoadImports = LoadFiles | NeedImports // LoadTypes loads exported type information for the initial packages. - // - // Deprecated: LoadTypes exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. LoadTypes = LoadImports | NeedTypes | NeedTypesSizes // LoadSyntax loads typed syntax for the initial packages. - // - // Deprecated: LoadSyntax exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo // LoadAllSyntax loads typed syntax for the initial packages and all dependencies. - // - // Deprecated: LoadAllSyntax exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. LoadAllSyntax = LoadSyntax | NeedDeps // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. @@ -434,6 +434,12 @@ type Package struct { // PkgPath is the package path as used by the go/types package. PkgPath string + // Dir is the directory associated with the package, if it exists. + // + // For packages listed by the go command, this is the directory containing + // the package files. + Dir string + // Errors contains any errors encountered querying the metadata // of the package, or while parsing or type-checking its files. Errors []Error @@ -521,8 +527,8 @@ type Package struct { // -- internal -- - // forTest is the package under test, if any. - forTest string + // ForTest is the package under test, if any. + ForTest string // depsErrors is the DepsErrors field from the go list response, if any. depsErrors []*packagesinternal.PackageError @@ -551,9 +557,6 @@ type ModuleError struct { } func init() { - packagesinternal.GetForTest = func(p interface{}) string { - return p.(*Package).forTest - } packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { return p.(*Package).depsErrors } @@ -565,7 +568,6 @@ func init() { } packagesinternal.TypecheckCgo = int(typecheckCgo) packagesinternal.DepsErrors = int(needInternalDepsErrors) - packagesinternal.ForTest = int(needInternalForTest) } // An Error describes a problem with a package's metadata, syntax, or types. diff --git a/hack/tools/vendor/golang.org/x/tools/go/ssa/const.go b/hack/tools/vendor/golang.org/x/tools/go/ssa/const.go index 865329bfd3..4dc53ef83c 100644 --- a/hack/tools/vendor/golang.org/x/tools/go/ssa/const.go +++ b/hack/tools/vendor/golang.org/x/tools/go/ssa/const.go @@ -12,9 +12,9 @@ import ( "go/token" "go/types" "strconv" - "strings" "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" ) // NewConst returns a new constant of the specified value and type. @@ -78,7 +78,7 @@ func zeroConst(t types.Type) *Const { func (c *Const) RelString(from *types.Package) string { var s string if c.Value == nil { - s = zeroString(c.typ, from) + s = typesinternal.ZeroString(c.typ, types.RelativeTo(from)) } else if c.Value.Kind() == constant.String { s = constant.StringVal(c.Value) const max = 20 @@ -93,44 +93,6 @@ func (c *Const) RelString(from *types.Package) string { return s + ":" + relType(c.Type(), from) } -// zeroString returns the string representation of the "zero" value of the type t. -func zeroString(t types.Type, from *types.Package) string { - switch t := t.(type) { - case *types.Basic: - switch { - case t.Info()&types.IsBoolean != 0: - return "false" - case t.Info()&types.IsNumeric != 0: - return "0" - case t.Info()&types.IsString != 0: - return `""` - case t.Kind() == types.UnsafePointer: - fallthrough - case t.Kind() == types.UntypedNil: - return "nil" - default: - panic(fmt.Sprint("zeroString for unexpected type:", t)) - } - case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature: - return "nil" - case *types.Named, *types.Alias: - return zeroString(t.Underlying(), from) - case *types.Array, *types.Struct: - return relType(t, from) + "{}" - case *types.Tuple: - // Tuples are not normal values. - // We are currently format as "(t[0], ..., t[n])". Could be something else. - components := make([]string, t.Len()) - for i := 0; i < t.Len(); i++ { - components[i] = zeroString(t.At(i).Type(), from) - } - return "(" + strings.Join(components, ", ") + ")" - case *types.TypeParam: - return "*new(" + relType(t, from) + ")" - } - panic(fmt.Sprint("zeroString: unexpected ", t)) -} - func (c *Const) Name() string { return c.RelString(nil) } diff --git a/hack/tools/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/hack/tools/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go index 4ccaa210af..fe67b0fa27 100644 --- a/hack/tools/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go +++ b/hack/tools/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go @@ -15,7 +15,6 @@ import ( "go/types" "os" pathpkg "path" - "strconv" "golang.org/x/tools/go/analysis" ) @@ -66,200 +65,23 @@ func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos return end } -func ZeroValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { - // TODO(adonovan): think about generics, and also generic aliases. - under := types.Unalias(typ) - // Don't call Underlying unconditionally: although it removes - // Named and Alias, it also removes TypeParam. - if n, ok := under.(*types.Named); ok { - under = n.Underlying() - } - switch under := under.(type) { - case *types.Basic: - switch { - case under.Info()&types.IsNumeric != 0: - return &ast.BasicLit{Kind: token.INT, Value: "0"} - case under.Info()&types.IsBoolean != 0: - return &ast.Ident{Name: "false"} - case under.Info()&types.IsString != 0: - return &ast.BasicLit{Kind: token.STRING, Value: `""`} - default: - panic(fmt.Sprintf("unknown basic type %v", under)) - } - case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice, *types.Array: - return ast.NewIdent("nil") - case *types.Struct: - texpr := TypeExpr(f, pkg, typ) // typ because we want the name here. - if texpr == nil { - return nil - } - return &ast.CompositeLit{ - Type: texpr, - } - } - return nil -} - -// IsZeroValue checks whether the given expression is a 'zero value' (as determined by output of -// analysisinternal.ZeroValue) -func IsZeroValue(expr ast.Expr) bool { - switch e := expr.(type) { - case *ast.BasicLit: - return e.Value == "0" || e.Value == `""` - case *ast.Ident: - return e.Name == "nil" || e.Name == "false" - default: - return false - } -} - -// TypeExpr returns syntax for the specified type. References to -// named types from packages other than pkg are qualified by an appropriate -// package name, as defined by the import environment of file. -func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { - switch t := typ.(type) { - case *types.Basic: - switch t.Kind() { - case types.UnsafePointer: - return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")} - default: - return ast.NewIdent(t.Name()) - } - case *types.Pointer: - x := TypeExpr(f, pkg, t.Elem()) - if x == nil { - return nil - } - return &ast.UnaryExpr{ - Op: token.MUL, - X: x, - } - case *types.Array: - elt := TypeExpr(f, pkg, t.Elem()) - if elt == nil { - return nil - } - return &ast.ArrayType{ - Len: &ast.BasicLit{ - Kind: token.INT, - Value: fmt.Sprintf("%d", t.Len()), - }, - Elt: elt, - } - case *types.Slice: - elt := TypeExpr(f, pkg, t.Elem()) - if elt == nil { - return nil - } - return &ast.ArrayType{ - Elt: elt, - } - case *types.Map: - key := TypeExpr(f, pkg, t.Key()) - value := TypeExpr(f, pkg, t.Elem()) - if key == nil || value == nil { - return nil - } - return &ast.MapType{ - Key: key, - Value: value, - } - case *types.Chan: - dir := ast.ChanDir(t.Dir()) - if t.Dir() == types.SendRecv { - dir = ast.SEND | ast.RECV - } - value := TypeExpr(f, pkg, t.Elem()) - if value == nil { - return nil - } - return &ast.ChanType{ - Dir: dir, - Value: value, - } - case *types.Signature: - var params []*ast.Field - for i := 0; i < t.Params().Len(); i++ { - p := TypeExpr(f, pkg, t.Params().At(i).Type()) - if p == nil { - return nil - } - params = append(params, &ast.Field{ - Type: p, - Names: []*ast.Ident{ - { - Name: t.Params().At(i).Name(), - }, - }, - }) - } - if t.Variadic() { - last := params[len(params)-1] - last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt} - } - var returns []*ast.Field - for i := 0; i < t.Results().Len(); i++ { - r := TypeExpr(f, pkg, t.Results().At(i).Type()) - if r == nil { - return nil - } - returns = append(returns, &ast.Field{ - Type: r, - }) - } - return &ast.FuncType{ - Params: &ast.FieldList{ - List: params, - }, - Results: &ast.FieldList{ - List: returns, - }, - } - case interface{ Obj() *types.TypeName }: // *types.{Alias,Named,TypeParam} - if t.Obj().Pkg() == nil { - return ast.NewIdent(t.Obj().Name()) - } - if t.Obj().Pkg() == pkg { - return ast.NewIdent(t.Obj().Name()) - } - pkgName := t.Obj().Pkg().Name() - - // If the file already imports the package under another name, use that. - for _, cand := range f.Imports { - if path, _ := strconv.Unquote(cand.Path.Value); path == t.Obj().Pkg().Path() { - if cand.Name != nil && cand.Name.Name != "" { - pkgName = cand.Name.Name - } - } - } - if pkgName == "." { - return ast.NewIdent(t.Obj().Name()) - } - return &ast.SelectorExpr{ - X: ast.NewIdent(pkgName), - Sel: ast.NewIdent(t.Obj().Name()), - } - case *types.Struct: - return ast.NewIdent(t.String()) - case *types.Interface: - return ast.NewIdent(t.String()) - default: - return nil - } -} - -// StmtToInsertVarBefore returns the ast.Stmt before which we can safely insert a new variable. -// Some examples: +// StmtToInsertVarBefore returns the ast.Stmt before which we can +// safely insert a new var declaration, or nil if the path denotes a +// node outside any statement. // // Basic Example: -// z := 1 -// y := z + x +// +// z := 1 +// y := z + x +// // If x is undeclared, then this function would return `y := z + x`, so that we // can insert `x := ` on the line before `y := z + x`. // // If stmt example: -// if z == 1 { -// } else if z == y {} +// +// if z == 1 { +// } else if z == y {} +// // If y is undeclared, then this function would return `if z == 1 {`, because we cannot // insert a statement between an if and an else if statement. As a result, we need to find // the top of the if chain to insert `y := ` before. @@ -272,7 +94,7 @@ func StmtToInsertVarBefore(path []ast.Node) ast.Stmt { } } if enclosingIndex == -1 { - return nil + return nil // no enclosing statement: outside function } enclosingStmt := path[enclosingIndex] switch enclosingStmt.(type) { @@ -280,6 +102,9 @@ func StmtToInsertVarBefore(path []ast.Node) ast.Stmt { // The enclosingStmt is inside of the if declaration, // We need to check if we are in an else-if stmt and // get the base if statement. + // TODO(adonovan): for non-constants, it may be preferable + // to add the decl as the Init field of the innermost + // enclosing ast.IfStmt. return baseIfStmt(path, enclosingIndex) case *ast.CaseClause: // Get the enclosing switch stmt if the enclosingStmt is diff --git a/hack/tools/vendor/golang.org/x/tools/internal/diff/diff.go b/hack/tools/vendor/golang.org/x/tools/internal/diff/diff.go new file mode 100644 index 0000000000..a13547b7a7 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/internal/diff/diff.go @@ -0,0 +1,176 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package diff computes differences between text files or strings. +package diff + +import ( + "fmt" + "sort" + "strings" +) + +// An Edit describes the replacement of a portion of a text file. +type Edit struct { + Start, End int // byte offsets of the region to replace + New string // the replacement +} + +func (e Edit) String() string { + return fmt.Sprintf("{Start:%d,End:%d,New:%q}", e.Start, e.End, e.New) +} + +// Apply applies a sequence of edits to the src buffer and returns the +// result. Edits are applied in order of start offset; edits with the +// same start offset are applied in they order they were provided. +// +// Apply returns an error if any edit is out of bounds, +// or if any pair of edits is overlapping. +func Apply(src string, edits []Edit) (string, error) { + edits, size, err := validate(src, edits) + if err != nil { + return "", err + } + + // Apply edits. + out := make([]byte, 0, size) + lastEnd := 0 + for _, edit := range edits { + if lastEnd < edit.Start { + out = append(out, src[lastEnd:edit.Start]...) + } + out = append(out, edit.New...) + lastEnd = edit.End + } + out = append(out, src[lastEnd:]...) + + if len(out) != size { + panic("wrong size") + } + + return string(out), nil +} + +// ApplyBytes is like Apply, but it accepts a byte slice. +// The result is always a new array. +func ApplyBytes(src []byte, edits []Edit) ([]byte, error) { + res, err := Apply(string(src), edits) + return []byte(res), err +} + +// validate checks that edits are consistent with src, +// and returns the size of the patched output. +// It may return a different slice. +func validate(src string, edits []Edit) ([]Edit, int, error) { + if !sort.IsSorted(editsSort(edits)) { + edits = append([]Edit(nil), edits...) + SortEdits(edits) + } + + // Check validity of edits and compute final size. + size := len(src) + lastEnd := 0 + for _, edit := range edits { + if !(0 <= edit.Start && edit.Start <= edit.End && edit.End <= len(src)) { + return nil, 0, fmt.Errorf("diff has out-of-bounds edits") + } + if edit.Start < lastEnd { + return nil, 0, fmt.Errorf("diff has overlapping edits") + } + size += len(edit.New) + edit.Start - edit.End + lastEnd = edit.End + } + + return edits, size, nil +} + +// SortEdits orders a slice of Edits by (start, end) offset. +// This ordering puts insertions (end = start) before deletions +// (end > start) at the same point, but uses a stable sort to preserve +// the order of multiple insertions at the same point. +// (Apply detects multiple deletions at the same point as an error.) +func SortEdits(edits []Edit) { + sort.Stable(editsSort(edits)) +} + +type editsSort []Edit + +func (a editsSort) Len() int { return len(a) } +func (a editsSort) Less(i, j int) bool { + if cmp := a[i].Start - a[j].Start; cmp != 0 { + return cmp < 0 + } + return a[i].End < a[j].End +} +func (a editsSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// lineEdits expands and merges a sequence of edits so that each +// resulting edit replaces one or more complete lines. +// See ApplyEdits for preconditions. +func lineEdits(src string, edits []Edit) ([]Edit, error) { + edits, _, err := validate(src, edits) + if err != nil { + return nil, err + } + + // Do all deletions begin and end at the start of a line, + // and all insertions end with a newline? + // (This is merely a fast path.) + for _, edit := range edits { + if edit.Start >= len(src) || // insertion at EOF + edit.Start > 0 && src[edit.Start-1] != '\n' || // not at line start + edit.End > 0 && src[edit.End-1] != '\n' || // not at line start + edit.New != "" && edit.New[len(edit.New)-1] != '\n' { // partial insert + goto expand // slow path + } + } + return edits, nil // aligned + +expand: + if len(edits) == 0 { + return edits, nil // no edits (unreachable due to fast path) + } + expanded := make([]Edit, 0, len(edits)) // a guess + prev := edits[0] + // TODO(adonovan): opt: start from the first misaligned edit. + // TODO(adonovan): opt: avoid quadratic cost of string += string. + for _, edit := range edits[1:] { + between := src[prev.End:edit.Start] + if !strings.Contains(between, "\n") { + // overlapping lines: combine with previous edit. + prev.New += between + edit.New + prev.End = edit.End + } else { + // non-overlapping lines: flush previous edit. + expanded = append(expanded, expandEdit(prev, src)) + prev = edit + } + } + return append(expanded, expandEdit(prev, src)), nil // flush final edit +} + +// expandEdit returns edit expanded to complete whole lines. +func expandEdit(edit Edit, src string) Edit { + // Expand start left to start of line. + // (delta is the zero-based column number of start.) + start := edit.Start + if delta := start - 1 - strings.LastIndex(src[:start], "\n"); delta > 0 { + edit.Start -= delta + edit.New = src[start-delta:start] + edit.New + } + + // Expand end right to end of line. + end := edit.End + if end > 0 && src[end-1] != '\n' || + edit.New != "" && edit.New[len(edit.New)-1] != '\n' { + if nl := strings.IndexByte(src[end:], '\n'); nl < 0 { + edit.End = len(src) // extend to EOF + } else { + edit.End = end + nl + 1 // extend beyond \n + } + } + edit.New += src[end:edit.End] + + return edit +} diff --git a/hack/tools/vendor/golang.org/x/tools/internal/diff/lcs/common.go b/hack/tools/vendor/golang.org/x/tools/internal/diff/lcs/common.go new file mode 100644 index 0000000000..c3e82dd268 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/internal/diff/lcs/common.go @@ -0,0 +1,179 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +import ( + "log" + "sort" +) + +// lcs is a longest common sequence +type lcs []diag + +// A diag is a piece of the edit graph where A[X+i] == B[Y+i], for 0<=i l[j].Len + }) + return l +} + +// validate that the elements of the lcs do not overlap +// (can only happen when the two-sided algorithm ends early) +// expects the lcs to be sorted +func (l lcs) valid() bool { + for i := 1; i < len(l); i++ { + if l[i-1].X+l[i-1].Len > l[i].X { + return false + } + if l[i-1].Y+l[i-1].Len > l[i].Y { + return false + } + } + return true +} + +// repair overlapping lcs +// only called if two-sided stops early +func (l lcs) fix() lcs { + // from the set of diagonals in l, find a maximal non-conflicting set + // this problem may be NP-complete, but we use a greedy heuristic, + // which is quadratic, but with a better data structure, could be D log D. + // indepedent is not enough: {0,3,1} and {3,0,2} can't both occur in an lcs + // which has to have monotone x and y + if len(l) == 0 { + return nil + } + sort.Slice(l, func(i, j int) bool { return l[i].Len > l[j].Len }) + tmp := make(lcs, 0, len(l)) + tmp = append(tmp, l[0]) + for i := 1; i < len(l); i++ { + var dir direction + nxt := l[i] + for _, in := range tmp { + if dir, nxt = overlap(in, nxt); dir == empty || dir == bad { + break + } + } + if nxt.Len > 0 && dir != bad { + tmp = append(tmp, nxt) + } + } + tmp.sort() + if false && !tmp.valid() { // debug checking + log.Fatalf("here %d", len(tmp)) + } + return tmp +} + +type direction int + +const ( + empty direction = iota // diag is empty (so not in lcs) + leftdown // proposed acceptably to the left and below + rightup // proposed diag is acceptably to the right and above + bad // proposed diag is inconsistent with the lcs so far +) + +// overlap trims the proposed diag prop so it doesn't overlap with +// the existing diag that has already been added to the lcs. +func overlap(exist, prop diag) (direction, diag) { + if prop.X <= exist.X && exist.X < prop.X+prop.Len { + // remove the end of prop where it overlaps with the X end of exist + delta := prop.X + prop.Len - exist.X + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + } + if exist.X <= prop.X && prop.X < exist.X+exist.Len { + // remove the beginning of prop where overlaps with exist + delta := exist.X + exist.Len - prop.X + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + prop.X += delta + prop.Y += delta + } + if prop.Y <= exist.Y && exist.Y < prop.Y+prop.Len { + // remove the end of prop that overlaps (in Y) with exist + delta := prop.Y + prop.Len - exist.Y + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + } + if exist.Y <= prop.Y && prop.Y < exist.Y+exist.Len { + // remove the beginning of peop that overlaps with exist + delta := exist.Y + exist.Len - prop.Y + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + prop.X += delta // no test reaches this code + prop.Y += delta + } + if prop.X+prop.Len <= exist.X && prop.Y+prop.Len <= exist.Y { + return leftdown, prop + } + if exist.X+exist.Len <= prop.X && exist.Y+exist.Len <= prop.Y { + return rightup, prop + } + // prop can't be in an lcs that contains exist + return bad, prop +} + +// manipulating Diag and lcs + +// prepend a diagonal (x,y)-(x+1,y+1) segment either to an empty lcs +// or to its first Diag. prepend is only called to extend diagonals +// the backward direction. +func (lcs lcs) prepend(x, y int) lcs { + if len(lcs) > 0 { + d := &lcs[0] + if int(d.X) == x+1 && int(d.Y) == y+1 { + // extend the diagonal down and to the left + d.X, d.Y = int(x), int(y) + d.Len++ + return lcs + } + } + + r := diag{X: int(x), Y: int(y), Len: 1} + lcs = append([]diag{r}, lcs...) + return lcs +} + +// append appends a diagonal, or extends the existing one. +// by adding the edge (x,y)-(x+1.y+1). append is only called +// to extend diagonals in the forward direction. +func (lcs lcs) append(x, y int) lcs { + if len(lcs) > 0 { + last := &lcs[len(lcs)-1] + // Expand last element if adjoining. + if last.X+last.Len == x && last.Y+last.Len == y { + last.Len++ + return lcs + } + } + + return append(lcs, diag{X: x, Y: y, Len: 1}) +} + +// enforce constraint on d, k +func ok(d, k int) bool { + return d >= 0 && -d <= k && k <= d +} diff --git a/hack/tools/vendor/golang.org/x/tools/internal/diff/lcs/doc.go b/hack/tools/vendor/golang.org/x/tools/internal/diff/lcs/doc.go new file mode 100644 index 0000000000..9029dd20b3 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/internal/diff/lcs/doc.go @@ -0,0 +1,156 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// package lcs contains code to find longest-common-subsequences +// (and diffs) +package lcs + +/* +Compute longest-common-subsequences of two slices A, B using +algorithms from Myers' paper. A longest-common-subsequence +(LCS from now on) of A and B is a maximal set of lexically increasing +pairs of subscripts (x,y) with A[x]==B[y]. There may be many LCS, but +they all have the same length. An LCS determines a sequence of edits +that changes A into B. + +The key concept is the edit graph of A and B. +If A has length N and B has length M, then the edit graph has +vertices v[i][j] for 0 <= i <= N, 0 <= j <= M. There is a +horizontal edge from v[i][j] to v[i+1][j] whenever both are in +the graph, and a vertical edge from v[i][j] to f[i][j+1] similarly. +When A[i] == B[j] there is a diagonal edge from v[i][j] to v[i+1][j+1]. + +A path between in the graph between (0,0) and (N,M) determines a sequence +of edits converting A into B: each horizontal edge corresponds to removing +an element of A, and each vertical edge corresponds to inserting an +element of B. + +A vertex (x,y) is on (forward) diagonal k if x-y=k. A path in the graph +is of length D if it has D non-diagonal edges. The algorithms generate +forward paths (in which at least one of x,y increases at each edge), +or backward paths (in which at least one of x,y decreases at each edge), +or a combination. (Note that the orientation is the traditional mathematical one, +with the origin in the lower-left corner.) + +Here is the edit graph for A:"aabbaa", B:"aacaba". (I know the diagonals look weird.) + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + b | | | ___/‾‾‾ | ___/‾‾‾ | | | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + c | | | | | | | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a a b b a a + + +The algorithm labels a vertex (x,y) with D,k if it is on diagonal k and at +the end of a maximal path of length D. (Because x-y=k it suffices to remember +only the x coordinate of the vertex.) + +The forward algorithm: Find the longest diagonal starting at (0,0) and +label its end with D=0,k=0. From that vertex take a vertical step and +then follow the longest diagonal (up and to the right), and label that vertex +with D=1,k=-1. From the D=0,k=0 point take a horizontal step and the follow +the longest diagonal (up and to the right) and label that vertex +D=1,k=1. In the same way, having labelled all the D vertices, +from a vertex labelled D,k find two vertices +tentatively labelled D+1,k-1 and D+1,k+1. There may be two on the same +diagonal, in which case take the one with the larger x. + +Eventually the path gets to (N,M), and the diagonals on it are the LCS. + +Here is the edit graph with the ends of D-paths labelled. (So, for instance, +0/2,2 indicates that x=2,y=2 is labelled with 0, as it should be, since the first +step is to go up the longest diagonal from (0,0).) +A:"aabbaa", B:"aacaba" + ⊙ ------- ⊙ ------- ⊙ -------(3/3,6)------- ⊙ -------(3/5,6)-------(4/6,6) + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ -------(2/3,5)------- ⊙ ------- ⊙ ------- ⊙ + b | | | ___/‾‾‾ | ___/‾‾‾ | | | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ -------(3/5,4)------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ -------(1/2,3)-------(2/3,3)------- ⊙ ------- ⊙ ------- ⊙ + c | | | | | | | + ⊙ ------- ⊙ -------(0/2,2)-------(1/3,2)-------(2/4,2)-------(3/5,2)-------(4/6,2) + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a a b b a a + +The 4-path is reconstructed starting at (4/6,6), horizontal to (3/5,6), diagonal to (3,4), vertical +to (2/3,3), horizontal to (1/2,3), vertical to (0/2,2), and diagonal to (0,0). As expected, +there are 4 non-diagonal steps, and the diagonals form an LCS. + +There is a symmetric backward algorithm, which gives (backwards labels are prefixed with a colon): +A:"aabbaa", B:"aacaba" + ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ --------(:0/5,5)-------- ⊙ + b | | | ____/‾‾‾ | ____/‾‾‾ | | | + ⊙ -------- ⊙ -------- ⊙ --------(:1/3,4)-------- ⊙ -------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + (:3/0,3)--------(:2/1,3)-------- ⊙ --------(:2/3,3)--------(:1/4,3)-------- ⊙ -------- ⊙ + c | | | | | | | + ⊙ -------- ⊙ -------- ⊙ --------(:3/3,2)--------(:2/4,2)-------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + (:3/0,1)-------- ⊙ -------- ⊙ -------- ⊙ --------(:3/4,1)-------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + (:4/0,0)-------- ⊙ -------- ⊙ -------- ⊙ --------(:4/4,0)-------- ⊙ -------- ⊙ + a a b b a a + +Neither of these is ideal for use in an editor, where it is undesirable to send very long diffs to the +front end. It's tricky to decide exactly what 'very long diffs' means, as "replace A by B" is very short. +We want to control how big D can be, by stopping when it gets too large. The forward algorithm then +privileges common prefixes, and the backward algorithm privileges common suffixes. Either is an undesirable +asymmetry. + +Fortunately there is a two-sided algorithm, implied by results in Myers' paper. Here's what the labels in +the edit graph look like. +A:"aabbaa", B:"aacaba" + ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- ⊙ --------- ⊙ --------- (2/3,5) --------- ⊙ --------- (:0/5,5)--------- ⊙ + b | | | ____/‾‾‾‾ | ____/‾‾‾‾ | | | + ⊙ --------- ⊙ --------- ⊙ --------- (:1/3,4)--------- ⊙ --------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- (:2/1,3)--------- (1/2,3) ---------(2:2/3,3)--------- (:1/4,3)--------- ⊙ --------- ⊙ + c | | | | | | | + ⊙ --------- ⊙ --------- (0/2,2) --------- (1/3,2) ---------(2:2/4,2)--------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ + a a b b a a + +The algorithm stopped when it saw the backwards 2-path ending at (1,3) and the forwards 2-path ending at (3,5). The criterion +is a backwards path ending at (u,v) and a forward path ending at (x,y), where u <= x and the two points are on the same +diagonal. (Here the edgegraph has a diagonal, but the criterion is x-y=u-v.) Myers proves there is a forward +2-path from (0,0) to (1,3), and that together with the backwards 2-path ending at (1,3) gives the expected 4-path. +Unfortunately the forward path has to be constructed by another run of the forward algorithm; it can't be found from the +computed labels. That is the worst case. Had the code noticed (x,y)=(u,v)=(3,3) the whole path could be reconstructed +from the edgegraph. The implementation looks for a number of special cases to try to avoid computing an extra forward path. + +If the two-sided algorithm has stop early (because D has become too large) it will have found a forward LCS and a +backwards LCS. Ideally these go with disjoint prefixes and suffixes of A and B, but disjointness may fail and the two +computed LCS may conflict. (An easy example is where A is a suffix of B, and shares a short prefix. The backwards LCS +is all of A, and the forward LCS is a prefix of A.) The algorithm combines the two +to form a best-effort LCS. In the worst case the forward partial LCS may have to +be recomputed. +*/ + +/* Eugene Myers paper is titled +"An O(ND) Difference Algorithm and Its Variations" +and can be found at +http://www.xmailserver.org/diff2.pdf + +(There is a generic implementation of the algorithm the repository with git hash +b9ad7e4ade3a686d608e44475390ad428e60e7fc) +*/ diff --git a/hack/tools/vendor/golang.org/x/tools/internal/diff/lcs/git.sh b/hack/tools/vendor/golang.org/x/tools/internal/diff/lcs/git.sh new file mode 100644 index 0000000000..b25ba4aac7 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/internal/diff/lcs/git.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# +# Copyright 2022 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. +# +# Creates a zip file containing all numbered versions +# of the commit history of a large source file, for use +# as input data for the tests of the diff algorithm. +# +# Run script from root of the x/tools repo. + +set -eu + +# WARNING: This script will install the latest version of $file +# The largest real source file in the x/tools repo. +# file=internal/golang/completion/completion.go +# file=internal/golang/diagnostics.go +file=internal/protocol/tsprotocol.go + +tmp=$(mktemp -d) +git log $file | + awk '/^commit / {print $2}' | + nl -ba -nrz | + while read n hash; do + git checkout --quiet $hash $file + cp -f $file $tmp/$n + done +(cd $tmp && zip -q - *) > testdata.zip +rm -fr $tmp +git restore --staged $file +git restore $file +echo "Created testdata.zip" diff --git a/hack/tools/vendor/golang.org/x/tools/internal/diff/lcs/labels.go b/hack/tools/vendor/golang.org/x/tools/internal/diff/lcs/labels.go new file mode 100644 index 0000000000..504913d1da --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/internal/diff/lcs/labels.go @@ -0,0 +1,55 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +import ( + "fmt" +) + +// For each D, vec[D] has length D+1, +// and the label for (D, k) is stored in vec[D][(D+k)/2]. +type label struct { + vec [][]int +} + +// Temporary checking DO NOT COMMIT true TO PRODUCTION CODE +const debug = false + +// debugging. check that the (d,k) pair is valid +// (that is, -d<=k<=d and d+k even) +func checkDK(D, k int) { + if k >= -D && k <= D && (D+k)%2 == 0 { + return + } + panic(fmt.Sprintf("out of range, d=%d,k=%d", D, k)) +} + +func (t *label) set(D, k, x int) { + if debug { + checkDK(D, k) + } + for len(t.vec) <= D { + t.vec = append(t.vec, nil) + } + if t.vec[D] == nil { + t.vec[D] = make([]int, D+1) + } + t.vec[D][(D+k)/2] = x // known that D+k is even +} + +func (t *label) get(d, k int) int { + if debug { + checkDK(d, k) + } + return int(t.vec[d][(d+k)/2]) +} + +func newtriang(limit int) label { + if limit < 100 { + // Preallocate if limit is not large. + return label{vec: make([][]int, limit)} + } + return label{} +} diff --git a/hack/tools/vendor/golang.org/x/tools/internal/diff/lcs/old.go b/hack/tools/vendor/golang.org/x/tools/internal/diff/lcs/old.go new file mode 100644 index 0000000000..4353da15ba --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/internal/diff/lcs/old.go @@ -0,0 +1,480 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +// TODO(adonovan): remove unclear references to "old" in this package. + +import ( + "fmt" +) + +// A Diff is a replacement of a portion of A by a portion of B. +type Diff struct { + Start, End int // offsets of portion to delete in A + ReplStart, ReplEnd int // offset of replacement text in B +} + +// DiffStrings returns the differences between two strings. +// It does not respect rune boundaries. +func DiffStrings(a, b string) []Diff { return diff(stringSeqs{a, b}) } + +// DiffBytes returns the differences between two byte sequences. +// It does not respect rune boundaries. +func DiffBytes(a, b []byte) []Diff { return diff(bytesSeqs{a, b}) } + +// DiffRunes returns the differences between two rune sequences. +func DiffRunes(a, b []rune) []Diff { return diff(runesSeqs{a, b}) } + +func diff(seqs sequences) []Diff { + // A limit on how deeply the LCS algorithm should search. The value is just a guess. + const maxDiffs = 100 + diff, _ := compute(seqs, twosided, maxDiffs/2) + return diff +} + +// compute computes the list of differences between two sequences, +// along with the LCS. It is exercised directly by tests. +// The algorithm is one of {forward, backward, twosided}. +func compute(seqs sequences, algo func(*editGraph) lcs, limit int) ([]Diff, lcs) { + if limit <= 0 { + limit = 1 << 25 // effectively infinity + } + alen, blen := seqs.lengths() + g := &editGraph{ + seqs: seqs, + vf: newtriang(limit), + vb: newtriang(limit), + limit: limit, + ux: alen, + uy: blen, + delta: alen - blen, + } + lcs := algo(g) + diffs := lcs.toDiffs(alen, blen) + return diffs, lcs +} + +// editGraph carries the information for computing the lcs of two sequences. +type editGraph struct { + seqs sequences + vf, vb label // forward and backward labels + + limit int // maximal value of D + // the bounding rectangle of the current edit graph + lx, ly, ux, uy int + delta int // common subexpression: (ux-lx)-(uy-ly) +} + +// toDiffs converts an LCS to a list of edits. +func (lcs lcs) toDiffs(alen, blen int) []Diff { + var diffs []Diff + var pa, pb int // offsets in a, b + for _, l := range lcs { + if pa < l.X || pb < l.Y { + diffs = append(diffs, Diff{pa, l.X, pb, l.Y}) + } + pa = l.X + l.Len + pb = l.Y + l.Len + } + if pa < alen || pb < blen { + diffs = append(diffs, Diff{pa, alen, pb, blen}) + } + return diffs +} + +// --- FORWARD --- + +// fdone decides if the forward path has reached the upper right +// corner of the rectangle. If so, it also returns the computed lcs. +func (e *editGraph) fdone(D, k int) (bool, lcs) { + // x, y, k are relative to the rectangle + x := e.vf.get(D, k) + y := x - k + if x == e.ux && y == e.uy { + return true, e.forwardlcs(D, k) + } + return false, nil +} + +// run the forward algorithm, until success or up to the limit on D. +func forward(e *editGraph) lcs { + e.setForward(0, 0, e.lx) + if ok, ans := e.fdone(0, 0); ok { + return ans + } + // from D to D+1 + for D := 0; D < e.limit; D++ { + e.setForward(D+1, -(D + 1), e.getForward(D, -D)) + if ok, ans := e.fdone(D+1, -(D + 1)); ok { + return ans + } + e.setForward(D+1, D+1, e.getForward(D, D)+1) + if ok, ans := e.fdone(D+1, D+1); ok { + return ans + } + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get backwards + lookv := e.lookForward(k, e.getForward(D, k-1)+1) + lookh := e.lookForward(k, e.getForward(D, k+1)) + if lookv > lookh { + e.setForward(D+1, k, lookv) + } else { + e.setForward(D+1, k, lookh) + } + if ok, ans := e.fdone(D+1, k); ok { + return ans + } + } + } + // D is too large + // find the D path with maximal x+y inside the rectangle and + // use that to compute the found part of the lcs + kmax := -e.limit - 1 + diagmax := -1 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getForward(e.limit, k) + y := x - k + if x+y > diagmax && x <= e.ux && y <= e.uy { + diagmax, kmax = x+y, k + } + } + return e.forwardlcs(e.limit, kmax) +} + +// recover the lcs by backtracking from the farthest point reached +func (e *editGraph) forwardlcs(D, k int) lcs { + var ans lcs + for x := e.getForward(D, k); x != 0 || x-k != 0; { + if ok(D-1, k-1) && x-1 == e.getForward(D-1, k-1) { + // if (x-1,y) is labelled D-1, x--,D--,k--,continue + D, k, x = D-1, k-1, x-1 + continue + } else if ok(D-1, k+1) && x == e.getForward(D-1, k+1) { + // if (x,y-1) is labelled D-1, x, D--,k++, continue + D, k = D-1, k+1 + continue + } + // if (x-1,y-1)--(x,y) is a diagonal, prepend,x--,y--, continue + y := x - k + ans = ans.prepend(x+e.lx-1, y+e.ly-1) + x-- + } + return ans +} + +// start at (x,y), go up the diagonal as far as possible, +// and label the result with d +func (e *editGraph) lookForward(k, relx int) int { + rely := relx - k + x, y := relx+e.lx, rely+e.ly + if x < e.ux && y < e.uy { + x += e.seqs.commonPrefixLen(x, e.ux, y, e.uy) + } + return x +} + +func (e *editGraph) setForward(d, k, relx int) { + x := e.lookForward(k, relx) + e.vf.set(d, k, x-e.lx) +} + +func (e *editGraph) getForward(d, k int) int { + x := e.vf.get(d, k) + return x +} + +// --- BACKWARD --- + +// bdone decides if the backward path has reached the lower left corner +func (e *editGraph) bdone(D, k int) (bool, lcs) { + // x, y, k are relative to the rectangle + x := e.vb.get(D, k) + y := x - (k + e.delta) + if x == 0 && y == 0 { + return true, e.backwardlcs(D, k) + } + return false, nil +} + +// run the backward algorithm, until success or up to the limit on D. +func backward(e *editGraph) lcs { + e.setBackward(0, 0, e.ux) + if ok, ans := e.bdone(0, 0); ok { + return ans + } + // from D to D+1 + for D := 0; D < e.limit; D++ { + e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1) + if ok, ans := e.bdone(D+1, -(D + 1)); ok { + return ans + } + e.setBackward(D+1, D+1, e.getBackward(D, D)) + if ok, ans := e.bdone(D+1, D+1); ok { + return ans + } + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get wrong + lookv := e.lookBackward(k, e.getBackward(D, k-1)) + lookh := e.lookBackward(k, e.getBackward(D, k+1)-1) + if lookv < lookh { + e.setBackward(D+1, k, lookv) + } else { + e.setBackward(D+1, k, lookh) + } + if ok, ans := e.bdone(D+1, k); ok { + return ans + } + } + } + + // D is too large + // find the D path with minimal x+y inside the rectangle and + // use that to compute the part of the lcs found + kmax := -e.limit - 1 + diagmin := 1 << 25 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getBackward(e.limit, k) + y := x - (k + e.delta) + if x+y < diagmin && x >= 0 && y >= 0 { + diagmin, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no paths when limit=%d?", e.limit)) + } + return e.backwardlcs(e.limit, kmax) +} + +// recover the lcs by backtracking +func (e *editGraph) backwardlcs(D, k int) lcs { + var ans lcs + for x := e.getBackward(D, k); x != e.ux || x-(k+e.delta) != e.uy; { + if ok(D-1, k-1) && x == e.getBackward(D-1, k-1) { + // D--, k--, x unchanged + D, k = D-1, k-1 + continue + } else if ok(D-1, k+1) && x+1 == e.getBackward(D-1, k+1) { + // D--, k++, x++ + D, k, x = D-1, k+1, x+1 + continue + } + y := x - (k + e.delta) + ans = ans.append(x+e.lx, y+e.ly) + x++ + } + return ans +} + +// start at (x,y), go down the diagonal as far as possible, +func (e *editGraph) lookBackward(k, relx int) int { + rely := relx - (k + e.delta) // forward k = k + e.delta + x, y := relx+e.lx, rely+e.ly + if x > 0 && y > 0 { + x -= e.seqs.commonSuffixLen(0, x, 0, y) + } + return x +} + +// convert to rectangle, and label the result with d +func (e *editGraph) setBackward(d, k, relx int) { + x := e.lookBackward(k, relx) + e.vb.set(d, k, x-e.lx) +} + +func (e *editGraph) getBackward(d, k int) int { + x := e.vb.get(d, k) + return x +} + +// -- TWOSIDED --- + +func twosided(e *editGraph) lcs { + // The termination condition could be improved, as either the forward + // or backward pass could succeed before Myers' Lemma applies. + // Aside from questions of efficiency (is the extra testing cost-effective) + // this is more likely to matter when e.limit is reached. + e.setForward(0, 0, e.lx) + e.setBackward(0, 0, e.ux) + + // from D to D+1 + for D := 0; D < e.limit; D++ { + // just finished a backwards pass, so check + if got, ok := e.twoDone(D, D); ok { + return e.twolcs(D, D, got) + } + // do a forwards pass (D to D+1) + e.setForward(D+1, -(D + 1), e.getForward(D, -D)) + e.setForward(D+1, D+1, e.getForward(D, D)+1) + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get backwards + lookv := e.lookForward(k, e.getForward(D, k-1)+1) + lookh := e.lookForward(k, e.getForward(D, k+1)) + if lookv > lookh { + e.setForward(D+1, k, lookv) + } else { + e.setForward(D+1, k, lookh) + } + } + // just did a forward pass, so check + if got, ok := e.twoDone(D+1, D); ok { + return e.twolcs(D+1, D, got) + } + // do a backward pass, D to D+1 + e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1) + e.setBackward(D+1, D+1, e.getBackward(D, D)) + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get wrong + lookv := e.lookBackward(k, e.getBackward(D, k-1)) + lookh := e.lookBackward(k, e.getBackward(D, k+1)-1) + if lookv < lookh { + e.setBackward(D+1, k, lookv) + } else { + e.setBackward(D+1, k, lookh) + } + } + } + + // D too large. combine a forward and backward partial lcs + // first, a forward one + kmax := -e.limit - 1 + diagmax := -1 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getForward(e.limit, k) + y := x - k + if x+y > diagmax && x <= e.ux && y <= e.uy { + diagmax, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no forward paths when limit=%d?", e.limit)) + } + lcs := e.forwardlcs(e.limit, kmax) + // now a backward one + // find the D path with minimal x+y inside the rectangle and + // use that to compute the lcs + diagmin := 1 << 25 // infinity + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getBackward(e.limit, k) + y := x - (k + e.delta) + if x+y < diagmin && x >= 0 && y >= 0 { + diagmin, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no backward paths when limit=%d?", e.limit)) + } + lcs = append(lcs, e.backwardlcs(e.limit, kmax)...) + // These may overlap (e.forwardlcs and e.backwardlcs return sorted lcs) + ans := lcs.fix() + return ans +} + +// Does Myers' Lemma apply? +func (e *editGraph) twoDone(df, db int) (int, bool) { + if (df+db+e.delta)%2 != 0 { + return 0, false // diagonals cannot overlap + } + kmin := -db + e.delta + if -df > kmin { + kmin = -df + } + kmax := db + e.delta + if df < kmax { + kmax = df + } + for k := kmin; k <= kmax; k += 2 { + x := e.vf.get(df, k) + u := e.vb.get(db, k-e.delta) + if u <= x { + // is it worth looking at all the other k? + for l := k; l <= kmax; l += 2 { + x := e.vf.get(df, l) + y := x - l + u := e.vb.get(db, l-e.delta) + v := u - l + if x == u || u == 0 || v == 0 || y == e.uy || x == e.ux { + return l, true + } + } + return k, true + } + } + return 0, false +} + +func (e *editGraph) twolcs(df, db, kf int) lcs { + // db==df || db+1==df + x := e.vf.get(df, kf) + y := x - kf + kb := kf - e.delta + u := e.vb.get(db, kb) + v := u - kf + + // Myers proved there is a df-path from (0,0) to (u,v) + // and a db-path from (x,y) to (N,M). + // In the first case the overall path is the forward path + // to (u,v) followed by the backward path to (N,M). + // In the second case the path is the backward path to (x,y) + // followed by the forward path to (x,y) from (0,0). + + // Look for some special cases to avoid computing either of these paths. + if x == u { + // "babaab" "cccaba" + // already patched together + lcs := e.forwardlcs(df, kf) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + + // is (u-1,v) or (u,v-1) labelled df-1? + // if so, that forward df-1-path plus a horizontal or vertical edge + // is the df-path to (u,v), then plus the db-path to (N,M) + if u > 0 && ok(df-1, u-1-v) && e.vf.get(df-1, u-1-v) == u-1 { + // "aabbab" "cbcabc" + lcs := e.forwardlcs(df-1, u-1-v) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + if v > 0 && ok(df-1, (u-(v-1))) && e.vf.get(df-1, u-(v-1)) == u { + // "abaabb" "bcacab" + lcs := e.forwardlcs(df-1, u-(v-1)) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + + // The path can't possibly contribute to the lcs because it + // is all horizontal or vertical edges + if u == 0 || v == 0 || x == e.ux || y == e.uy { + // "abaabb" "abaaaa" + if u == 0 || v == 0 { + return e.backwardlcs(db, kb) + } + return e.forwardlcs(df, kf) + } + + // is (x+1,y) or (x,y+1) labelled db-1? + if x+1 <= e.ux && ok(db-1, x+1-y-e.delta) && e.vb.get(db-1, x+1-y-e.delta) == x+1 { + // "bababb" "baaabb" + lcs := e.backwardlcs(db-1, kb+1) + lcs = append(lcs, e.forwardlcs(df, kf)...) + return lcs.sort() + } + if y+1 <= e.uy && ok(db-1, x-(y+1)-e.delta) && e.vb.get(db-1, x-(y+1)-e.delta) == x { + // "abbbaa" "cabacc" + lcs := e.backwardlcs(db-1, kb-1) + lcs = append(lcs, e.forwardlcs(df, kf)...) + return lcs.sort() + } + + // need to compute another path + // "aabbaa" "aacaba" + lcs := e.backwardlcs(db, kb) + oldx, oldy := e.ux, e.uy + e.ux = u + e.uy = v + lcs = append(lcs, forward(e)...) + e.ux, e.uy = oldx, oldy + return lcs.sort() +} diff --git a/hack/tools/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go b/hack/tools/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go new file mode 100644 index 0000000000..2d72d26304 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go @@ -0,0 +1,113 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +// This file defines the abstract sequence over which the LCS algorithm operates. + +// sequences abstracts a pair of sequences, A and B. +type sequences interface { + lengths() (int, int) // len(A), len(B) + commonPrefixLen(ai, aj, bi, bj int) int // len(commonPrefix(A[ai:aj], B[bi:bj])) + commonSuffixLen(ai, aj, bi, bj int) int // len(commonSuffix(A[ai:aj], B[bi:bj])) +} + +type stringSeqs struct{ a, b string } + +func (s stringSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s stringSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLenString(s.a[ai:aj], s.b[bi:bj]) +} +func (s stringSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLenString(s.a[ai:aj], s.b[bi:bj]) +} + +// The explicit capacity in s[i:j:j] leads to more efficient code. + +type bytesSeqs struct{ a, b []byte } + +func (s bytesSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s bytesSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLenBytes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} +func (s bytesSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLenBytes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} + +type runesSeqs struct{ a, b []rune } + +func (s runesSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s runesSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLenRunes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} +func (s runesSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLenRunes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} + +// TODO(adonovan): optimize these functions using ideas from: +// - https://go.dev/cl/408116 common.go +// - https://go.dev/cl/421435 xor_generic.go + +// TODO(adonovan): factor using generics when available, +// but measure performance impact. + +// commonPrefixLen* returns the length of the common prefix of a[ai:aj] and b[bi:bj]. +func commonPrefixLenBytes(a, b []byte) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[i] == b[i] { + i++ + } + return i +} +func commonPrefixLenRunes(a, b []rune) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[i] == b[i] { + i++ + } + return i +} +func commonPrefixLenString(a, b string) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[i] == b[i] { + i++ + } + return i +} + +// commonSuffixLen* returns the length of the common suffix of a[ai:aj] and b[bi:bj]. +func commonSuffixLenBytes(a, b []byte) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[len(a)-1-i] == b[len(b)-1-i] { + i++ + } + return i +} +func commonSuffixLenRunes(a, b []rune) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[len(a)-1-i] == b[len(b)-1-i] { + i++ + } + return i +} +func commonSuffixLenString(a, b string) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[len(a)-1-i] == b[len(b)-1-i] { + i++ + } + return i +} + +func min(x, y int) int { + if x < y { + return x + } else { + return y + } +} diff --git a/hack/tools/vendor/golang.org/x/tools/internal/diff/ndiff.go b/hack/tools/vendor/golang.org/x/tools/internal/diff/ndiff.go new file mode 100644 index 0000000000..fbef4d730c --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/internal/diff/ndiff.go @@ -0,0 +1,99 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff + +import ( + "bytes" + "unicode/utf8" + + "golang.org/x/tools/internal/diff/lcs" +) + +// Strings computes the differences between two strings. +// The resulting edits respect rune boundaries. +func Strings(before, after string) []Edit { + if before == after { + return nil // common case + } + + if isASCII(before) && isASCII(after) { + // TODO(adonovan): opt: specialize diffASCII for strings. + return diffASCII([]byte(before), []byte(after)) + } + return diffRunes([]rune(before), []rune(after)) +} + +// Bytes computes the differences between two byte slices. +// The resulting edits respect rune boundaries. +func Bytes(before, after []byte) []Edit { + if bytes.Equal(before, after) { + return nil // common case + } + + if isASCII(before) && isASCII(after) { + return diffASCII(before, after) + } + return diffRunes(runes(before), runes(after)) +} + +func diffASCII(before, after []byte) []Edit { + diffs := lcs.DiffBytes(before, after) + + // Convert from LCS diffs. + res := make([]Edit, len(diffs)) + for i, d := range diffs { + res[i] = Edit{d.Start, d.End, string(after[d.ReplStart:d.ReplEnd])} + } + return res +} + +func diffRunes(before, after []rune) []Edit { + diffs := lcs.DiffRunes(before, after) + + // The diffs returned by the lcs package use indexes + // into whatever slice was passed in. + // Convert rune offsets to byte offsets. + res := make([]Edit, len(diffs)) + lastEnd := 0 + utf8Len := 0 + for i, d := range diffs { + utf8Len += runesLen(before[lastEnd:d.Start]) // text between edits + start := utf8Len + utf8Len += runesLen(before[d.Start:d.End]) // text deleted by this edit + res[i] = Edit{start, utf8Len, string(after[d.ReplStart:d.ReplEnd])} + lastEnd = d.End + } + return res +} + +// runes is like []rune(string(bytes)) without the duplicate allocation. +func runes(bytes []byte) []rune { + n := utf8.RuneCount(bytes) + runes := make([]rune, n) + for i := 0; i < n; i++ { + r, sz := utf8.DecodeRune(bytes) + bytes = bytes[sz:] + runes[i] = r + } + return runes +} + +// runesLen returns the length in bytes of the UTF-8 encoding of runes. +func runesLen(runes []rune) (len int) { + for _, r := range runes { + len += utf8.RuneLen(r) + } + return len +} + +// isASCII reports whether s contains only ASCII. +func isASCII[S string | []byte](s S) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/hack/tools/vendor/golang.org/x/tools/internal/diff/unified.go b/hack/tools/vendor/golang.org/x/tools/internal/diff/unified.go new file mode 100644 index 0000000000..cfbda61020 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/internal/diff/unified.go @@ -0,0 +1,251 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff + +import ( + "fmt" + "log" + "strings" +) + +// DefaultContextLines is the number of unchanged lines of surrounding +// context displayed by Unified. Use ToUnified to specify a different value. +const DefaultContextLines = 3 + +// Unified returns a unified diff of the old and new strings. +// The old and new labels are the names of the old and new files. +// If the strings are equal, it returns the empty string. +func Unified(oldLabel, newLabel, old, new string) string { + edits := Strings(old, new) + unified, err := ToUnified(oldLabel, newLabel, old, edits, DefaultContextLines) + if err != nil { + // Can't happen: edits are consistent. + log.Fatalf("internal error in diff.Unified: %v", err) + } + return unified +} + +// ToUnified applies the edits to content and returns a unified diff, +// with contextLines lines of (unchanged) context around each diff hunk. +// The old and new labels are the names of the content and result files. +// It returns an error if the edits are inconsistent; see ApplyEdits. +func ToUnified(oldLabel, newLabel, content string, edits []Edit, contextLines int) (string, error) { + u, err := toUnified(oldLabel, newLabel, content, edits, contextLines) + if err != nil { + return "", err + } + return u.String(), nil +} + +// unified represents a set of edits as a unified diff. +type unified struct { + // from is the name of the original file. + from string + // to is the name of the modified file. + to string + // hunks is the set of edit hunks needed to transform the file content. + hunks []*hunk +} + +// Hunk represents a contiguous set of line edits to apply. +type hunk struct { + // The line in the original source where the hunk starts. + fromLine int + // The line in the original source where the hunk finishes. + toLine int + // The set of line based edits to apply. + lines []line +} + +// Line represents a single line operation to apply as part of a Hunk. +type line struct { + // kind is the type of line this represents, deletion, insertion or copy. + kind opKind + // content is the content of this line. + // For deletion it is the line being removed, for all others it is the line + // to put in the output. + content string +} + +// opKind is used to denote the type of operation a line represents. +type opKind int + +const ( + // opDelete is the operation kind for a line that is present in the input + // but not in the output. + opDelete opKind = iota + // opInsert is the operation kind for a line that is new in the output. + opInsert + // opEqual is the operation kind for a line that is the same in the input and + // output, often used to provide context around edited lines. + opEqual +) + +// String returns a human readable representation of an OpKind. It is not +// intended for machine processing. +func (k opKind) String() string { + switch k { + case opDelete: + return "delete" + case opInsert: + return "insert" + case opEqual: + return "equal" + default: + panic("unknown operation kind") + } +} + +// toUnified takes a file contents and a sequence of edits, and calculates +// a unified diff that represents those edits. +func toUnified(fromName, toName string, content string, edits []Edit, contextLines int) (unified, error) { + gap := contextLines * 2 + u := unified{ + from: fromName, + to: toName, + } + if len(edits) == 0 { + return u, nil + } + var err error + edits, err = lineEdits(content, edits) // expand to whole lines + if err != nil { + return u, err + } + lines := splitLines(content) + var h *hunk + last := 0 + toLine := 0 + for _, edit := range edits { + // Compute the zero-based line numbers of the edit start and end. + // TODO(adonovan): opt: compute incrementally, avoid O(n^2). + start := strings.Count(content[:edit.Start], "\n") + end := strings.Count(content[:edit.End], "\n") + if edit.End == len(content) && len(content) > 0 && content[len(content)-1] != '\n' { + end++ // EOF counts as an implicit newline + } + + switch { + case h != nil && start == last: + //direct extension + case h != nil && start <= last+gap: + //within range of previous lines, add the joiners + addEqualLines(h, lines, last, start) + default: + //need to start a new hunk + if h != nil { + // add the edge to the previous hunk + addEqualLines(h, lines, last, last+contextLines) + u.hunks = append(u.hunks, h) + } + toLine += start - last + h = &hunk{ + fromLine: start + 1, + toLine: toLine + 1, + } + // add the edge to the new hunk + delta := addEqualLines(h, lines, start-contextLines, start) + h.fromLine -= delta + h.toLine -= delta + } + last = start + for i := start; i < end; i++ { + h.lines = append(h.lines, line{kind: opDelete, content: lines[i]}) + last++ + } + if edit.New != "" { + for _, content := range splitLines(edit.New) { + h.lines = append(h.lines, line{kind: opInsert, content: content}) + toLine++ + } + } + } + if h != nil { + // add the edge to the final hunk + addEqualLines(h, lines, last, last+contextLines) + u.hunks = append(u.hunks, h) + } + return u, nil +} + +func splitLines(text string) []string { + lines := strings.SplitAfter(text, "\n") + if lines[len(lines)-1] == "" { + lines = lines[:len(lines)-1] + } + return lines +} + +func addEqualLines(h *hunk, lines []string, start, end int) int { + delta := 0 + for i := start; i < end; i++ { + if i < 0 { + continue + } + if i >= len(lines) { + return delta + } + h.lines = append(h.lines, line{kind: opEqual, content: lines[i]}) + delta++ + } + return delta +} + +// String converts a unified diff to the standard textual form for that diff. +// The output of this function can be passed to tools like patch. +func (u unified) String() string { + if len(u.hunks) == 0 { + return "" + } + b := new(strings.Builder) + fmt.Fprintf(b, "--- %s\n", u.from) + fmt.Fprintf(b, "+++ %s\n", u.to) + for _, hunk := range u.hunks { + fromCount, toCount := 0, 0 + for _, l := range hunk.lines { + switch l.kind { + case opDelete: + fromCount++ + case opInsert: + toCount++ + default: + fromCount++ + toCount++ + } + } + fmt.Fprint(b, "@@") + if fromCount > 1 { + fmt.Fprintf(b, " -%d,%d", hunk.fromLine, fromCount) + } else if hunk.fromLine == 1 && fromCount == 0 { + // Match odd GNU diff -u behavior adding to empty file. + fmt.Fprintf(b, " -0,0") + } else { + fmt.Fprintf(b, " -%d", hunk.fromLine) + } + if toCount > 1 { + fmt.Fprintf(b, " +%d,%d", hunk.toLine, toCount) + } else if hunk.toLine == 1 && toCount == 0 { + // Match odd GNU diff -u behavior adding to empty file. + fmt.Fprintf(b, " +0,0") + } else { + fmt.Fprintf(b, " +%d", hunk.toLine) + } + fmt.Fprint(b, " @@\n") + for _, l := range hunk.lines { + switch l.kind { + case opDelete: + fmt.Fprintf(b, "-%s", l.content) + case opInsert: + fmt.Fprintf(b, "+%s", l.content) + default: + fmt.Fprintf(b, " %s", l.content) + } + if !strings.HasSuffix(l.content, "\n") { + fmt.Fprintf(b, "\n\\ No newline at end of file\n") + } + } + } + return b.String() +} diff --git a/hack/tools/vendor/golang.org/x/tools/internal/facts/facts.go b/hack/tools/vendor/golang.org/x/tools/internal/facts/facts.go new file mode 100644 index 0000000000..e1c18d373c --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/internal/facts/facts.go @@ -0,0 +1,389 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package facts defines a serializable set of analysis.Fact. +// +// It provides a partial implementation of the Fact-related parts of the +// analysis.Pass interface for use in analysis drivers such as "go vet" +// and other build systems. +// +// The serial format is unspecified and may change, so the same version +// of this package must be used for reading and writing serialized facts. +// +// The handling of facts in the analysis system parallels the handling +// of type information in the compiler: during compilation of package P, +// the compiler emits an export data file that describes the type of +// every object (named thing) defined in package P, plus every object +// indirectly reachable from one of those objects. Thus the downstream +// compiler of package Q need only load one export data file per direct +// import of Q, and it will learn everything about the API of package P +// and everything it needs to know about the API of P's dependencies. +// +// Similarly, analysis of package P emits a fact set containing facts +// about all objects exported from P, plus additional facts about only +// those objects of P's dependencies that are reachable from the API of +// package P; the downstream analysis of Q need only load one fact set +// per direct import of Q. +// +// The notion of "exportedness" that matters here is that of the +// compiler. According to the language spec, a method pkg.T.f is +// unexported simply because its name starts with lowercase. But the +// compiler must nonetheless export f so that downstream compilations can +// accurately ascertain whether pkg.T implements an interface pkg.I +// defined as interface{f()}. Exported thus means "described in export +// data". +package facts + +import ( + "bytes" + "encoding/gob" + "fmt" + "go/types" + "io" + "log" + "reflect" + "sort" + "sync" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/types/objectpath" +) + +const debug = false + +// A Set is a set of analysis.Facts. +// +// Decode creates a Set of facts by reading from the imports of a given +// package, and Encode writes out the set. Between these operation, +// the Import and Export methods will query and update the set. +// +// All of Set's methods except String are safe to call concurrently. +type Set struct { + pkg *types.Package + mu sync.Mutex + m map[key]analysis.Fact +} + +type key struct { + pkg *types.Package + obj types.Object // (object facts only) + t reflect.Type +} + +// ImportObjectFact implements analysis.Pass.ImportObjectFact. +func (s *Set) ImportObjectFact(obj types.Object, ptr analysis.Fact) bool { + if obj == nil { + panic("nil object") + } + key := key{pkg: obj.Pkg(), obj: obj, t: reflect.TypeOf(ptr)} + s.mu.Lock() + defer s.mu.Unlock() + if v, ok := s.m[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// ExportObjectFact implements analysis.Pass.ExportObjectFact. +func (s *Set) ExportObjectFact(obj types.Object, fact analysis.Fact) { + if obj.Pkg() != s.pkg { + log.Panicf("in package %s: ExportObjectFact(%s, %T): can't set fact on object belonging another package", + s.pkg, obj, fact) + } + key := key{pkg: obj.Pkg(), obj: obj, t: reflect.TypeOf(fact)} + s.mu.Lock() + s.m[key] = fact // clobber any existing entry + s.mu.Unlock() +} + +func (s *Set) AllObjectFacts(filter map[reflect.Type]bool) []analysis.ObjectFact { + var facts []analysis.ObjectFact + s.mu.Lock() + for k, v := range s.m { + if k.obj != nil && filter[k.t] { + facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: v}) + } + } + s.mu.Unlock() + return facts +} + +// ImportPackageFact implements analysis.Pass.ImportPackageFact. +func (s *Set) ImportPackageFact(pkg *types.Package, ptr analysis.Fact) bool { + if pkg == nil { + panic("nil package") + } + key := key{pkg: pkg, t: reflect.TypeOf(ptr)} + s.mu.Lock() + defer s.mu.Unlock() + if v, ok := s.m[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// ExportPackageFact implements analysis.Pass.ExportPackageFact. +func (s *Set) ExportPackageFact(fact analysis.Fact) { + key := key{pkg: s.pkg, t: reflect.TypeOf(fact)} + s.mu.Lock() + s.m[key] = fact // clobber any existing entry + s.mu.Unlock() +} + +func (s *Set) AllPackageFacts(filter map[reflect.Type]bool) []analysis.PackageFact { + var facts []analysis.PackageFact + s.mu.Lock() + for k, v := range s.m { + if k.obj == nil && filter[k.t] { + facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: v}) + } + } + s.mu.Unlock() + return facts +} + +// gobFact is the Gob declaration of a serialized fact. +type gobFact struct { + PkgPath string // path of package + Object objectpath.Path // optional path of object relative to package itself + Fact analysis.Fact // type and value of user-defined Fact +} + +// A Decoder decodes the facts from the direct imports of the package +// provided to NewEncoder. A single decoder may be used to decode +// multiple fact sets (e.g. each for a different set of fact types) +// for the same package. Each call to Decode returns an independent +// fact set. +type Decoder struct { + pkg *types.Package + getPackage GetPackageFunc +} + +// NewDecoder returns a fact decoder for the specified package. +// +// It uses a brute-force recursive approach to enumerate all objects +// defined by dependencies of pkg, so that it can learn the set of +// package paths that may be mentioned in the fact encoding. This does +// not scale well; use [NewDecoderFunc] where possible. +func NewDecoder(pkg *types.Package) *Decoder { + // Compute the import map for this package. + // See the package doc comment. + m := importMap(pkg.Imports()) + getPackageFunc := func(path string) *types.Package { return m[path] } + return NewDecoderFunc(pkg, getPackageFunc) +} + +// NewDecoderFunc returns a fact decoder for the specified package. +// +// It calls the getPackage function for the package path string of +// each dependency (perhaps indirect) that it encounters in the +// encoding. If the function returns nil, the fact is discarded. +// +// This function is preferred over [NewDecoder] when the client is +// capable of efficient look-up of packages by package path. +func NewDecoderFunc(pkg *types.Package, getPackage GetPackageFunc) *Decoder { + return &Decoder{ + pkg: pkg, + getPackage: getPackage, + } +} + +// A GetPackageFunc function returns the package denoted by a package path. +type GetPackageFunc = func(pkgPath string) *types.Package + +// Decode decodes all the facts relevant to the analysis of package +// pkgPath. The read function reads serialized fact data from an external +// source for one of pkg's direct imports, identified by package path. +// The empty file is a valid encoding of an empty fact set. +// +// It is the caller's responsibility to call gob.Register on all +// necessary fact types. +// +// Concurrent calls to Decode are safe, so long as the +// [GetPackageFunc] (if any) is also concurrency-safe. +func (d *Decoder) Decode(read func(pkgPath string) ([]byte, error)) (*Set, error) { + // Read facts from imported packages. + // Facts may describe indirectly imported packages, or their objects. + m := make(map[key]analysis.Fact) // one big bucket + for _, imp := range d.pkg.Imports() { + logf := func(format string, args ...interface{}) { + if debug { + prefix := fmt.Sprintf("in %s, importing %s: ", + d.pkg.Path(), imp.Path()) + log.Print(prefix, fmt.Sprintf(format, args...)) + } + } + + // Read the gob-encoded facts. + data, err := read(imp.Path()) + if err != nil { + return nil, fmt.Errorf("in %s, can't import facts for package %q: %v", + d.pkg.Path(), imp.Path(), err) + } + if len(data) == 0 { + continue // no facts + } + var gobFacts []gobFact + if err := gob.NewDecoder(bytes.NewReader(data)).Decode(&gobFacts); err != nil { + return nil, fmt.Errorf("decoding facts for %q: %v", imp.Path(), err) + } + logf("decoded %d facts: %v", len(gobFacts), gobFacts) + + // Parse each one into a key and a Fact. + for _, f := range gobFacts { + factPkg := d.getPackage(f.PkgPath) // possibly an indirect dependency + if factPkg == nil { + // Fact relates to a dependency that was + // unused in this translation unit. Skip. + logf("no package %q; discarding %v", f.PkgPath, f.Fact) + continue + } + key := key{pkg: factPkg, t: reflect.TypeOf(f.Fact)} + if f.Object != "" { + // object fact + obj, err := objectpath.Object(factPkg, f.Object) + if err != nil { + // (most likely due to unexported object) + // TODO(adonovan): audit for other possibilities. + logf("no object for path: %v; discarding %s", err, f.Fact) + continue + } + key.obj = obj + logf("read %T fact %s for %v", f.Fact, f.Fact, key.obj) + } else { + // package fact + logf("read %T fact %s for %v", f.Fact, f.Fact, factPkg) + } + m[key] = f.Fact + } + } + + return &Set{pkg: d.pkg, m: m}, nil +} + +// Encode encodes a set of facts to a memory buffer. +// +// It may fail if one of the Facts could not be gob-encoded, but this is +// a sign of a bug in an Analyzer. +func (s *Set) Encode() []byte { + encoder := new(objectpath.Encoder) + + // TODO(adonovan): opt: use a more efficient encoding + // that avoids repeating PkgPath for each fact. + + // Gather all facts, including those from imported packages. + var gobFacts []gobFact + + s.mu.Lock() + for k, fact := range s.m { + if debug { + log.Printf("%v => %s\n", k, fact) + } + + // Don't export facts that we imported from another + // package, unless they represent fields or methods, + // or package-level types. + // (Facts about packages, and other package-level + // objects, are only obtained from direct imports so + // they needn't be reexported.) + // + // This is analogous to the pruning done by "deep" + // export data for types, but not as precise because + // we aren't careful about which structs or methods + // we rexport: it should be only those referenced + // from the API of s.pkg. + // TODO(adonovan): opt: be more precise. e.g. + // intersect with the set of objects computed by + // importMap(s.pkg.Imports()). + // TODO(adonovan): opt: implement "shallow" facts. + if k.pkg != s.pkg { + if k.obj == nil { + continue // imported package fact + } + if _, isType := k.obj.(*types.TypeName); !isType && + k.obj.Parent() == k.obj.Pkg().Scope() { + continue // imported fact about package-level non-type object + } + } + + var object objectpath.Path + if k.obj != nil { + path, err := encoder.For(k.obj) + if err != nil { + if debug { + log.Printf("discarding fact %s about %s\n", fact, k.obj) + } + continue // object not accessible from package API; discard fact + } + object = path + } + gobFacts = append(gobFacts, gobFact{ + PkgPath: k.pkg.Path(), + Object: object, + Fact: fact, + }) + } + s.mu.Unlock() + + // Sort facts by (package, object, type) for determinism. + sort.Slice(gobFacts, func(i, j int) bool { + x, y := gobFacts[i], gobFacts[j] + if x.PkgPath != y.PkgPath { + return x.PkgPath < y.PkgPath + } + if x.Object != y.Object { + return x.Object < y.Object + } + tx := reflect.TypeOf(x.Fact) + ty := reflect.TypeOf(y.Fact) + if tx != ty { + return tx.String() < ty.String() + } + return false // equal + }) + + var buf bytes.Buffer + if len(gobFacts) > 0 { + if err := gob.NewEncoder(&buf).Encode(gobFacts); err != nil { + // Fact encoding should never fail. Identify the culprit. + for _, gf := range gobFacts { + if err := gob.NewEncoder(io.Discard).Encode(gf); err != nil { + fact := gf.Fact + pkgpath := reflect.TypeOf(fact).Elem().PkgPath() + log.Panicf("internal error: gob encoding of analysis fact %s failed: %v; please report a bug against fact %T in package %q", + fact, err, fact, pkgpath) + } + } + } + } + + if debug { + log.Printf("package %q: encode %d facts, %d bytes\n", + s.pkg.Path(), len(gobFacts), buf.Len()) + } + + return buf.Bytes() +} + +// String is provided only for debugging, and must not be called +// concurrent with any Import/Export method. +func (s *Set) String() string { + var buf bytes.Buffer + buf.WriteString("{") + for k, f := range s.m { + if buf.Len() > 1 { + buf.WriteString(", ") + } + if k.obj != nil { + buf.WriteString(k.obj.String()) + } else { + buf.WriteString(k.pkg.Path()) + } + fmt.Fprintf(&buf, ": %v", f) + } + buf.WriteString("}") + return buf.String() +} diff --git a/hack/tools/vendor/golang.org/x/tools/internal/facts/imports.go b/hack/tools/vendor/golang.org/x/tools/internal/facts/imports.go new file mode 100644 index 0000000000..ed5ec5fa13 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/internal/facts/imports.go @@ -0,0 +1,146 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package facts + +import ( + "go/types" + + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typesinternal" +) + +// importMap computes the import map for a package by traversing the +// entire exported API each of its imports. +// +// This is a workaround for the fact that we cannot access the map used +// internally by the types.Importer returned by go/importer. The entries +// in this map are the packages and objects that may be relevant to the +// current analysis unit. +// +// Packages in the map that are only indirectly imported may be +// incomplete (!pkg.Complete()). +// +// This function scales very poorly with packages' transitive object +// references, which can be more than a million for each package near +// the top of a large project. (This was a significant contributor to +// #60621.) +// TODO(adonovan): opt: compute this information more efficiently +// by obtaining it from the internals of the gcexportdata decoder. +func importMap(imports []*types.Package) map[string]*types.Package { + objects := make(map[types.Object]bool) + typs := make(map[types.Type]bool) // Named and TypeParam + packages := make(map[string]*types.Package) + + var addObj func(obj types.Object) + var addType func(T types.Type) + + addObj = func(obj types.Object) { + if !objects[obj] { + objects[obj] = true + addType(obj.Type()) + if pkg := obj.Pkg(); pkg != nil { + packages[pkg.Path()] = pkg + } + } + } + + addType = func(T types.Type) { + switch T := T.(type) { + case *types.Basic: + // nop + case typesinternal.NamedOrAlias: // *types.{Named,Alias} + // Add the type arguments if this is an instance. + if targs := typesinternal.TypeArgs(T); targs.Len() > 0 { + for i := 0; i < targs.Len(); i++ { + addType(targs.At(i)) + } + } + + // Remove infinite expansions of *types.Named by always looking at the origin. + // Some named types with type parameters [that will not type check] have + // infinite expansions: + // type N[T any] struct { F *N[N[T]] } + // importMap() is called on such types when Analyzer.RunDespiteErrors is true. + T = typesinternal.Origin(T) + if !typs[T] { + typs[T] = true + + // common aspects + addObj(T.Obj()) + if tparams := typesinternal.TypeParams(T); tparams.Len() > 0 { + for i := 0; i < tparams.Len(); i++ { + addType(tparams.At(i)) + } + } + + // variant aspects + switch T := T.(type) { + case *types.Alias: + addType(aliases.Rhs(T)) + case *types.Named: + addType(T.Underlying()) + for i := 0; i < T.NumMethods(); i++ { + addObj(T.Method(i)) + } + } + } + case *types.Pointer: + addType(T.Elem()) + case *types.Slice: + addType(T.Elem()) + case *types.Array: + addType(T.Elem()) + case *types.Chan: + addType(T.Elem()) + case *types.Map: + addType(T.Key()) + addType(T.Elem()) + case *types.Signature: + addType(T.Params()) + addType(T.Results()) + if tparams := T.TypeParams(); tparams != nil { + for i := 0; i < tparams.Len(); i++ { + addType(tparams.At(i)) + } + } + case *types.Struct: + for i := 0; i < T.NumFields(); i++ { + addObj(T.Field(i)) + } + case *types.Tuple: + for i := 0; i < T.Len(); i++ { + addObj(T.At(i)) + } + case *types.Interface: + for i := 0; i < T.NumMethods(); i++ { + addObj(T.Method(i)) + } + for i := 0; i < T.NumEmbeddeds(); i++ { + addType(T.EmbeddedType(i)) // walk Embedded for implicits + } + case *types.Union: + for i := 0; i < T.Len(); i++ { + addType(T.Term(i).Type()) + } + case *types.TypeParam: + if !typs[T] { + typs[T] = true + addObj(T.Obj()) + addType(T.Constraint()) + } + } + } + + for _, imp := range imports { + packages[imp.Path()] = imp + + scope := imp.Scope() + for _, name := range scope.Names() { + addObj(scope.Lookup(name)) + } + } + + return packages +} diff --git a/hack/tools/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/hack/tools/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go index f6437feb1c..6f5d8a2139 100644 --- a/hack/tools/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go +++ b/hack/tools/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go @@ -39,12 +39,15 @@ func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) { } // FindExportData positions the reader r at the beginning of the -// export data section of an underlying GC-created object/archive +// export data section of an underlying cmd/compile created archive // file by reading from it. The reader must be positioned at the -// start of the file before calling this function. The hdr result -// is the string before the export data, either "$$" or "$$B". -// The size result is the length of the export data in bytes, or -1 if not known. -func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { +// start of the file before calling this function. +// The size result is the length of the export data in bytes. +// +// This function is needed by [gcexportdata.Read], which must +// accept inputs produced by the last two releases of cmd/compile, +// plus tip. +func FindExportData(r *bufio.Reader) (size int64, err error) { // Read first line to make sure this is an object file. line, err := r.ReadSlice('\n') if err != nil { @@ -52,27 +55,32 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { return } - if string(line) == "!\n" { - // Archive file. Scan to __.PKGDEF. - var name string - if name, size, err = readGopackHeader(r); err != nil { - return - } + // Is the first line an archive file signature? + if string(line) != "!\n" { + err = fmt.Errorf("not the start of an archive file (%q)", line) + return + } - // First entry should be __.PKGDEF. - if name != "__.PKGDEF" { - err = fmt.Errorf("go archive is missing __.PKGDEF") - return - } + // Archive file. Scan to __.PKGDEF. + var name string + if name, size, err = readGopackHeader(r); err != nil { + return + } + arsize := size - // Read first line of __.PKGDEF data, so that line - // is once again the first line of the input. - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - size -= int64(len(line)) + // First entry should be __.PKGDEF. + if name != "__.PKGDEF" { + err = fmt.Errorf("go archive is missing __.PKGDEF") + return + } + + // Read first line of __.PKGDEF data, so that line + // is once again the first line of the input. + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return } + size -= int64(len(line)) // Now at __.PKGDEF in archive or still at beginning of file. // Either way, line should begin with "go object ". @@ -81,8 +89,8 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { return } - // Skip over object header to export data. - // Begins after first line starting with $$. + // Skip over object headers to get to the export data section header "$$B\n". + // Object headers are lines that do not start with '$'. for line[0] != '$' { if line, err = r.ReadSlice('\n'); err != nil { err = fmt.Errorf("can't find export data (%v)", err) @@ -90,9 +98,18 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { } size -= int64(len(line)) } - hdr = string(line) + + // Check for the binary export data section header "$$B\n". + hdr := string(line) + if hdr != "$$B\n" { + err = fmt.Errorf("unknown export data header: %q", hdr) + return + } + // TODO(taking): Remove end-of-section marker "\n$$\n" from size. + if size < 0 { - size = -1 + err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size) + return } return diff --git a/hack/tools/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/hack/tools/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index e6c5d51f8e..dbbca86043 100644 --- a/hack/tools/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/hack/tools/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -161,6 +161,8 @@ func FindPkg(path, srcDir string) (filename, id string) { // Import imports a gc-generated package given its import path and srcDir, adds // the corresponding package object to the packages map, and returns the object. // The packages map must contain all packages already imported. +// +// TODO(taking): Import is only used in tests. Move to gcimporter_test. func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { var rc io.ReadCloser var filename, id string @@ -210,58 +212,50 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func } defer rc.Close() - var hdr string var size int64 buf := bufio.NewReader(rc) - if hdr, size, err = FindExportData(buf); err != nil { + if size, err = FindExportData(buf); err != nil { return } - switch hdr { - case "$$B\n": - var data []byte - data, err = io.ReadAll(buf) - if err != nil { - break - } + var data []byte + data, err = io.ReadAll(buf) + if err != nil { + return + } + if len(data) == 0 { + return nil, fmt.Errorf("no data to load a package from for path %s", id) + } - // TODO(gri): allow clients of go/importer to provide a FileSet. - // Or, define a new standard go/types/gcexportdata package. - fset := token.NewFileSet() - - // Select appropriate importer. - if len(data) > 0 { - switch data[0] { - case 'v', 'c', 'd': - // binary: emitted by cmd/compile till go1.10; obsolete. - return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - - case 'i': - // indexed: emitted by cmd/compile till go1.19; - // now used only for serializing go/types. - // See https://github.com/golang/go/issues/69491. - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err - - case 'u': - // unified: emitted by cmd/compile since go1.20. - _, pkg, err := UImportData(fset, packages, data[1:size], id) - return pkg, err - - default: - l := len(data) - if l > 10 { - l = 10 - } - return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) - } - } + // TODO(gri): allow clients of go/importer to provide a FileSet. + // Or, define a new standard go/types/gcexportdata package. + fset := token.NewFileSet() + + // Select appropriate importer. + switch data[0] { + case 'v', 'c', 'd': + // binary: emitted by cmd/compile till go1.10; obsolete. + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) + + case 'i': + // indexed: emitted by cmd/compile till go1.19; + // now used only for serializing go/types. + // See https://github.com/golang/go/issues/69491. + _, pkg, err := IImportData(fset, packages, data[1:], id) + return pkg, err + + case 'u': + // unified: emitted by cmd/compile since go1.20. + _, pkg, err := UImportData(fset, packages, data[1:size], id) + return pkg, err default: - err = fmt.Errorf("unknown export data header: %q", hdr) + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) } - - return } type byPath []*types.Package diff --git a/hack/tools/vendor/golang.org/x/tools/internal/imports/source.go b/hack/tools/vendor/golang.org/x/tools/internal/imports/source.go index 5d2aeeebc9..cbe4f3c5ba 100644 --- a/hack/tools/vendor/golang.org/x/tools/internal/imports/source.go +++ b/hack/tools/vendor/golang.org/x/tools/internal/imports/source.go @@ -59,5 +59,5 @@ type Source interface { // candidates satisfy all missing references for that package name. It is up // to each data source to select the best result for each entry in the // missing map. - ResolveReferences(ctx context.Context, filename string, missing References) (map[PackageName]*Result, error) + ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) } diff --git a/hack/tools/vendor/golang.org/x/tools/internal/imports/source_env.go b/hack/tools/vendor/golang.org/x/tools/internal/imports/source_env.go index ff9555d287..d14abaa319 100644 --- a/hack/tools/vendor/golang.org/x/tools/internal/imports/source_env.go +++ b/hack/tools/vendor/golang.org/x/tools/internal/imports/source_env.go @@ -48,7 +48,7 @@ func (s *ProcessEnvSource) LoadPackageNames(ctx context.Context, srcDir string, return r.loadPackageNames(unknown, srcDir) } -func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename string, refs map[string]map[string]bool) (map[string]*Result, error) { +func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename string, refs map[string]map[string]bool) ([]*Result, error) { var mu sync.Mutex found := make(map[string][]pkgDistance) callback := &scanCallback{ @@ -121,5 +121,9 @@ func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename strin if err := g.Wait(); err != nil { return nil, err } - return results, nil + var ans []*Result + for _, x := range results { + ans = append(ans, x) + } + return ans, nil } diff --git a/hack/tools/vendor/golang.org/x/tools/internal/imports/source_modindex.go b/hack/tools/vendor/golang.org/x/tools/internal/imports/source_modindex.go new file mode 100644 index 0000000000..05229f06ce --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/internal/imports/source_modindex.go @@ -0,0 +1,103 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "context" + "sync" + "time" + + "golang.org/x/tools/internal/modindex" +) + +// This code is here rather than in the modindex package +// to avoid import loops + +// implements Source using modindex, so only for module cache. +// +// this is perhaps over-engineered. A new Index is read at first use. +// And then Update is called after every 15 minutes, and a new Index +// is read if the index changed. It is not clear the Mutex is needed. +type IndexSource struct { + modcachedir string + mutex sync.Mutex + ix *modindex.Index + expires time.Time +} + +// create a new Source. Called from NewView in cache/session.go. +func NewIndexSource(cachedir string) *IndexSource { + return &IndexSource{modcachedir: cachedir} +} + +func (s *IndexSource) LoadPackageNames(ctx context.Context, srcDir string, paths []ImportPath) (map[ImportPath]PackageName, error) { + /// This is used by goimports to resolve the package names of imports of the + // current package, which is irrelevant for the module cache. + return nil, nil +} + +func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) { + if err := s.maybeReadIndex(); err != nil { + return nil, err + } + var cs []modindex.Candidate + for pkg, nms := range missing { + for nm := range nms { + x := s.ix.Lookup(pkg, nm, false) + cs = append(cs, x...) + } + } + found := make(map[string]*Result) + for _, c := range cs { + var x *Result + if x = found[c.ImportPath]; x == nil { + x = &Result{ + Import: &ImportInfo{ + ImportPath: c.ImportPath, + Name: "", + }, + Package: &PackageInfo{ + Name: c.PkgName, + Exports: make(map[string]bool), + }, + } + found[c.ImportPath] = x + } + x.Package.Exports[c.Name] = true + } + var ans []*Result + for _, x := range found { + ans = append(ans, x) + } + return ans, nil +} + +func (s *IndexSource) maybeReadIndex() error { + s.mutex.Lock() + defer s.mutex.Unlock() + + var readIndex bool + if time.Now().After(s.expires) { + ok, err := modindex.Update(s.modcachedir) + if err != nil { + return err + } + if ok { + readIndex = true + } + } + + if readIndex || s.ix == nil { + ix, err := modindex.ReadIndex(s.modcachedir) + if err != nil { + return err + } + s.ix = ix + // for now refresh every 15 minutes + s.expires = time.Now().Add(time.Minute * 15) + } + + return nil +} diff --git a/hack/tools/vendor/golang.org/x/tools/internal/modindex/directories.go b/hack/tools/vendor/golang.org/x/tools/internal/modindex/directories.go new file mode 100644 index 0000000000..1e1a02f239 --- /dev/null +++ b/hack/tools/vendor/golang.org/x/tools/internal/modindex/directories.go @@ -0,0 +1,135 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modindex + +import ( + "fmt" + "log" + "os" + "path/filepath" + "regexp" + "slices" + "strings" + "sync" + "time" + + "golang.org/x/mod/semver" + "golang.org/x/tools/internal/gopathwalk" +) + +type directory struct { + path Relpath + importPath string + version string // semantic version + syms []symbol +} + +// filterDirs groups the directories by import path, +// sorting the ones with the same import path by semantic version, +// most recent first. +func byImportPath(dirs []Relpath) (map[string][]*directory, error) { + ans := make(map[string][]*directory) // key is import path + for _, d := range dirs { + ip, sv, err := DirToImportPathVersion(d) + if err != nil { + return nil, err + } + ans[ip] = append(ans[ip], &directory{ + path: d, + importPath: ip, + version: sv, + }) + } + for k, v := range ans { + semanticSort(v) + ans[k] = v + } + return ans, nil +} + +// sort the directories by semantic version, latest first +func semanticSort(v []*directory) { + slices.SortFunc(v, func(l, r *directory) int { + if n := semver.Compare(l.version, r.version); n != 0 { + return -n // latest first + } + return strings.Compare(string(l.path), string(r.path)) + }) +} + +// modCacheRegexp splits a relpathpath into module, module version, and package. +var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) + +// DirToImportPathVersion computes import path and semantic version +func DirToImportPathVersion(dir Relpath) (string, string, error) { + m := modCacheRegexp.FindStringSubmatch(string(dir)) + // m[1] is the module path + // m[2] is the version major.minor.patch(-
= 4 {
+					sig := strings.Split(flds[3], " ")
+					for i := 0; i < len(sig); i++ {
+						// $ cannot otherwise occur. removing the spaces
+						// almost works, but for chan struct{}, e.g.
+						sig[i] = strings.Replace(sig[i], "$", " ", -1)
+					}
+					px.Sig = toFields(sig)
+				}
+			}
+			ans = append(ans, px)
+		}
+	}
+	return ans
+}
+
+func toFields(sig []string) []Field {
+	ans := make([]Field, len(sig)/2)
+	for i := 0; i < len(ans); i++ {
+		ans[i] = Field{Arg: sig[2*i], Type: sig[2*i+1]}
+	}
+	return ans
+}
+
+// benchmarks show this is measurably better than strings.Split
+func fastSplit(x string) []string {
+	ans := make([]string, 0, 4)
+	nxt := 0
+	start := 0
+	for i := 0; i < len(x); i++ {
+		if x[i] != ' ' {
+			continue
+		}
+		ans = append(ans, x[start:i])
+		nxt++
+		start = i + 1
+		if nxt >= 3 {
+			break
+		}
+	}
+	ans = append(ans, x[start:])
+	return ans
+}
+
+func asLexType(c byte) LexType {
+	switch c {
+	case 'C':
+		return Const
+	case 'V':
+		return Var
+	case 'T':
+		return Type
+	case 'F':
+		return Func
+	}
+	return -1
+}
diff --git a/hack/tools/vendor/golang.org/x/tools/internal/modindex/modindex.go b/hack/tools/vendor/golang.org/x/tools/internal/modindex/modindex.go
new file mode 100644
index 0000000000..355a53e71a
--- /dev/null
+++ b/hack/tools/vendor/golang.org/x/tools/internal/modindex/modindex.go
@@ -0,0 +1,164 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package modindex contains code for building and searching an index to
+// the Go module cache. The directory containing the index, returned by
+// IndexDir(), contains a file index-name- that contains the name
+// of the current index. We believe writing that short file is atomic.
+// ReadIndex reads that file to get the file name of the index.
+// WriteIndex writes an index with a unique name and then
+// writes that name into a new version of index-name-.
+// ( stands for the CurrentVersion of the index format.)
+package modindex
+
+import (
+	"path/filepath"
+	"slices"
+	"strings"
+	"time"
+
+	"golang.org/x/mod/semver"
+)
+
+// Create always creates a new index for the go module cache that is in cachedir.
+func Create(cachedir string) error {
+	_, err := indexModCache(cachedir, true)
+	return err
+}
+
+// Update the index for the go module cache that is in cachedir,
+// If there is no existing index it will build one.
+// If there are changed directories since the last index, it will
+// write a new one and return true. Otherwise it returns false.
+func Update(cachedir string) (bool, error) {
+	return indexModCache(cachedir, false)
+}
+
+// indexModCache writes an index current as of when it is called.
+// If clear is true the index is constructed from all of GOMODCACHE
+// otherwise the index is constructed from the last previous index
+// and the updates to the cache. It returns true if it wrote an index,
+// false otherwise.
+func indexModCache(cachedir string, clear bool) (bool, error) {
+	cachedir, err := filepath.Abs(cachedir)
+	if err != nil {
+		return false, err
+	}
+	cd := Abspath(cachedir)
+	future := time.Now().Add(24 * time.Hour) // safely in the future
+	ok, err := modindexTimed(future, cd, clear)
+	if err != nil {
+		return false, err
+	}
+	return ok, nil
+}
+
+// modindexTimed writes an index current as of onlyBefore.
+// If clear is true the index is constructed from all of GOMODCACHE
+// otherwise the index is constructed from the last previous index
+// and all the updates to the cache before onlyBefore.
+// It returns true if it wrote a new index, false if it wrote nothing.
+func modindexTimed(onlyBefore time.Time, cachedir Abspath, clear bool) (bool, error) {
+	var curIndex *Index
+	if !clear {
+		var err error
+		curIndex, err = ReadIndex(string(cachedir))
+		if clear && err != nil {
+			return false, err
+		}
+		// TODO(pjw): check that most of those directories still exist
+	}
+	cfg := &work{
+		onlyBefore: onlyBefore,
+		oldIndex:   curIndex,
+		cacheDir:   cachedir,
+	}
+	if curIndex != nil {
+		cfg.onlyAfter = curIndex.Changed
+	}
+	if err := cfg.buildIndex(); err != nil {
+		return false, err
+	}
+	if len(cfg.newIndex.Entries) == 0 && curIndex != nil {
+		// no changes from existing curIndex, don't write a new index
+		return false, nil
+	}
+	if err := cfg.writeIndex(); err != nil {
+		return false, err
+	}
+	return true, nil
+}
+
+type work struct {
+	onlyBefore time.Time // do not use directories later than this
+	onlyAfter  time.Time // only interested in directories after this
+	// directories from before onlyAfter come from oldIndex
+	oldIndex *Index
+	newIndex *Index
+	cacheDir Abspath
+}
+
+func (w *work) buildIndex() error {
+	// The effective date of the new index should be at least
+	// slightly earlier than when the directories are scanned
+	// so set it now.
+	w.newIndex = &Index{Changed: time.Now(), Cachedir: w.cacheDir}
+	dirs := findDirs(string(w.cacheDir), w.onlyAfter, w.onlyBefore)
+	if len(dirs) == 0 {
+		return nil
+	}
+	newdirs, err := byImportPath(dirs)
+	if err != nil {
+		return err
+	}
+	// for each import path it might occur only in newdirs,
+	// only in w.oldIndex, or in both.
+	// If it occurs in both, use the semantically later one
+	if w.oldIndex != nil {
+		for _, e := range w.oldIndex.Entries {
+			found, ok := newdirs[e.ImportPath]
+			if !ok {
+				w.newIndex.Entries = append(w.newIndex.Entries, e)
+				continue // use this one, there is no new one
+			}
+			if semver.Compare(found[0].version, e.Version) > 0 {
+				// use the new one
+			} else {
+				// use the old one, forget the new one
+				w.newIndex.Entries = append(w.newIndex.Entries, e)
+				delete(newdirs, e.ImportPath)
+			}
+		}
+	}
+	// get symbol information for all the new diredtories
+	getSymbols(w.cacheDir, newdirs)
+	// assemble the new index entries
+	for k, v := range newdirs {
+		d := v[0]
+		pkg, names := processSyms(d.syms)
+		if pkg == "" {
+			continue // PJW: does this ever happen?
+		}
+		entry := Entry{
+			PkgName:    pkg,
+			Dir:        d.path,
+			ImportPath: k,
+			Version:    d.version,
+			Names:      names,
+		}
+		w.newIndex.Entries = append(w.newIndex.Entries, entry)
+	}
+	// sort the entries in the new index
+	slices.SortFunc(w.newIndex.Entries, func(l, r Entry) int {
+		if n := strings.Compare(l.PkgName, r.PkgName); n != 0 {
+			return n
+		}
+		return strings.Compare(l.ImportPath, r.ImportPath)
+	})
+	return nil
+}
+
+func (w *work) writeIndex() error {
+	return writeIndex(w.cacheDir, w.newIndex)
+}
diff --git a/hack/tools/vendor/golang.org/x/tools/internal/modindex/symbols.go b/hack/tools/vendor/golang.org/x/tools/internal/modindex/symbols.go
new file mode 100644
index 0000000000..2e285ed996
--- /dev/null
+++ b/hack/tools/vendor/golang.org/x/tools/internal/modindex/symbols.go
@@ -0,0 +1,189 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+	"fmt"
+	"go/ast"
+	"go/parser"
+	"go/token"
+	"go/types"
+	"os"
+	"path/filepath"
+	"slices"
+	"strings"
+
+	"golang.org/x/sync/errgroup"
+)
+
+// The name of a symbol contains information about the symbol:
+//  T for types
+//  C for consts
+//  V for vars
+// and for funcs:  F  ( )*
+// any spaces in  are replaced by $s so that the fields
+// of the name are space separated
+type symbol struct {
+	pkg  string // name of the symbols's package
+	name string // declared name
+	kind string // T, C, V, or F
+	sig  string // signature information, for F
+}
+
+// find the symbols for the best directories
+func getSymbols(cd Abspath, dirs map[string][]*directory) {
+	var g errgroup.Group
+	g.SetLimit(-1) // maybe throttle this some day
+	for _, vv := range dirs {
+		// throttling some day?
+		d := vv[0]
+		g.Go(func() error {
+			thedir := filepath.Join(string(cd), string(d.path))
+			mode := parser.SkipObjectResolution
+
+			fi, err := os.ReadDir(thedir)
+			if err != nil {
+				return nil // log this someday?
+			}
+			for _, fx := range fi {
+				if !strings.HasSuffix(fx.Name(), ".go") || strings.HasSuffix(fx.Name(), "_test.go") {
+					continue
+				}
+				fname := filepath.Join(thedir, fx.Name())
+				tr, err := parser.ParseFile(token.NewFileSet(), fname, nil, mode)
+				if err != nil {
+					continue // ignore errors, someday log them?
+				}
+				d.syms = append(d.syms, getFileExports(tr)...)
+			}
+			return nil
+		})
+	}
+	g.Wait()
+}
+
+func getFileExports(f *ast.File) []symbol {
+	pkg := f.Name.Name
+	if pkg == "main" {
+		return nil
+	}
+	var ans []symbol
+	// should we look for //go:build ignore?
+	for _, decl := range f.Decls {
+		switch decl := decl.(type) {
+		case *ast.FuncDecl:
+			if decl.Recv != nil {
+				// ignore methods, as we are completing package selections
+				continue
+			}
+			name := decl.Name.Name
+			dtype := decl.Type
+			// not looking at dtype.TypeParams. That is, treating
+			// generic functions just like non-generic ones.
+			sig := dtype.Params
+			kind := "F"
+			result := []string{fmt.Sprintf("%d", dtype.Results.NumFields())}
+			for _, x := range sig.List {
+				// This code creates a string representing the type.
+				// TODO(pjw): it may be fragile:
+				// 1. x.Type could be nil, perhaps in ill-formed code
+				// 2. ExprString might someday change incompatibly to
+				//    include struct tags, which can be arbitrary strings
+				if x.Type == nil {
+					// Can this happen without a parse error? (Files with parse
+					// errors are ignored in getSymbols)
+					continue // maybe report this someday
+				}
+				tp := types.ExprString(x.Type)
+				if len(tp) == 0 {
+					// Can this happen?
+					continue // maybe report this someday
+				}
+				// This is only safe if ExprString never returns anything with a $
+				// The only place a $ can occur seems to be in a struct tag, which
+				// can be an arbitrary string literal, and ExprString does not presently
+				// print struct tags. So for this to happen the type of a formal parameter
+				// has to be a explict struct, e.g. foo(x struct{a int "$"}) and ExprString
+				// would have to show the struct tag. Even testing for this case seems
+				// a waste of effort, but let's not ignore such pathologies
+				if strings.Contains(tp, "$") {
+					continue
+				}
+				tp = strings.Replace(tp, " ", "$", -1)
+				if len(x.Names) == 0 {
+					result = append(result, "_")
+					result = append(result, tp)
+				} else {
+					for _, y := range x.Names {
+						result = append(result, y.Name)
+						result = append(result, tp)
+					}
+				}
+			}
+			sigs := strings.Join(result, " ")
+			if s := newsym(pkg, name, kind, sigs); s != nil {
+				ans = append(ans, *s)
+			}
+		case *ast.GenDecl:
+			switch decl.Tok {
+			case token.CONST, token.VAR:
+				tp := "V"
+				if decl.Tok == token.CONST {
+					tp = "C"
+				}
+				for _, sp := range decl.Specs {
+					for _, x := range sp.(*ast.ValueSpec).Names {
+						if s := newsym(pkg, x.Name, tp, ""); s != nil {
+							ans = append(ans, *s)
+						}
+					}
+				}
+			case token.TYPE:
+				for _, sp := range decl.Specs {
+					if s := newsym(pkg, sp.(*ast.TypeSpec).Name.Name, "T", ""); s != nil {
+						ans = append(ans, *s)
+					}
+				}
+			}
+		}
+	}
+	return ans
+}
+
+func newsym(pkg, name, kind, sig string) *symbol {
+	if len(name) == 0 || !ast.IsExported(name) {
+		return nil
+	}
+	sym := symbol{pkg: pkg, name: name, kind: kind, sig: sig}
+	return &sym
+}
+
+// return the package name and the value for the symbols.
+// if there are multiple packages, choose one arbitrarily
+// the returned slice is sorted lexicographically
+func processSyms(syms []symbol) (string, []string) {
+	if len(syms) == 0 {
+		return "", nil
+	}
+	slices.SortFunc(syms, func(l, r symbol) int {
+		return strings.Compare(l.name, r.name)
+	})
+	pkg := syms[0].pkg
+	var names []string
+	for _, s := range syms {
+		var nx string
+		if s.pkg == pkg {
+			if s.sig != "" {
+				nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
+			} else {
+				nx = fmt.Sprintf("%s %s", s.name, s.kind)
+			}
+			names = append(names, nx)
+		} else {
+			continue // PJW: do we want to keep track of these?
+		}
+	}
+	return pkg, names
+}
diff --git a/hack/tools/vendor/golang.org/x/tools/internal/modindex/types.go b/hack/tools/vendor/golang.org/x/tools/internal/modindex/types.go
new file mode 100644
index 0000000000..ece4488630
--- /dev/null
+++ b/hack/tools/vendor/golang.org/x/tools/internal/modindex/types.go
@@ -0,0 +1,25 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package modindex
+
+import (
+	"strings"
+)
+
+// some special types to avoid confusions
+
+// distinguish various types of directory names. It's easy to get confused.
+type Abspath string // absolute paths
+type Relpath string // paths with GOMODCACHE prefix removed
+
+func toRelpath(cachedir Abspath, s string) Relpath {
+	if strings.HasPrefix(s, string(cachedir)) {
+		if s == string(cachedir) {
+			return Relpath("")
+		}
+		return Relpath(s[len(cachedir)+1:])
+	}
+	return Relpath(s)
+}
diff --git a/hack/tools/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/hack/tools/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
index 44719de173..66e69b4389 100644
--- a/hack/tools/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
+++ b/hack/tools/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
@@ -5,7 +5,6 @@
 // Package packagesinternal exposes internal-only fields from go/packages.
 package packagesinternal
 
-var GetForTest = func(p interface{}) string { return "" }
 var GetDepsErrors = func(p interface{}) []*PackageError { return nil }
 
 type PackageError struct {
@@ -16,7 +15,6 @@ type PackageError struct {
 
 var TypecheckCgo int
 var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
-var ForTest int    // must be set as a LoadMode to call GetForTest
 
 var SetModFlag = func(config interface{}, value string) {}
 var SetModFile = func(config interface{}, value string) {}
diff --git a/hack/tools/vendor/golang.org/x/tools/internal/robustio/gopls_windows.go b/hack/tools/vendor/golang.org/x/tools/internal/robustio/gopls_windows.go
new file mode 100644
index 0000000000..949f278161
--- /dev/null
+++ b/hack/tools/vendor/golang.org/x/tools/internal/robustio/gopls_windows.go
@@ -0,0 +1,16 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package robustio
+
+import "syscall"
+
+// The robustio package is copied from cmd/go/internal/robustio, a package used
+// by the go command to retry known flaky operations on certain operating systems.
+
+//go:generate go run copyfiles.go
+
+// Since the gopls module cannot access internal/syscall/windows, copy a
+// necessary constant.
+const ERROR_SHARING_VIOLATION syscall.Errno = 32
diff --git a/hack/tools/vendor/golang.org/x/tools/internal/robustio/robustio.go b/hack/tools/vendor/golang.org/x/tools/internal/robustio/robustio.go
new file mode 100644
index 0000000000..0a559fc9b8
--- /dev/null
+++ b/hack/tools/vendor/golang.org/x/tools/internal/robustio/robustio.go
@@ -0,0 +1,69 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package robustio wraps I/O functions that are prone to failure on Windows,
+// transparently retrying errors up to an arbitrary timeout.
+//
+// Errors are classified heuristically and retries are bounded, so the functions
+// in this package do not completely eliminate spurious errors. However, they do
+// significantly reduce the rate of failure in practice.
+//
+// If so, the error will likely wrap one of:
+// The functions in this package do not completely eliminate spurious errors,
+// but substantially reduce their rate of occurrence in practice.
+package robustio
+
+import "time"
+
+// Rename is like os.Rename, but on Windows retries errors that may occur if the
+// file is concurrently read or overwritten.
+//
+// (See golang.org/issue/31247 and golang.org/issue/32188.)
+func Rename(oldpath, newpath string) error {
+	return rename(oldpath, newpath)
+}
+
+// ReadFile is like os.ReadFile, but on Windows retries errors that may
+// occur if the file is concurrently replaced.
+//
+// (See golang.org/issue/31247 and golang.org/issue/32188.)
+func ReadFile(filename string) ([]byte, error) {
+	return readFile(filename)
+}
+
+// RemoveAll is like os.RemoveAll, but on Windows retries errors that may occur
+// if an executable file in the directory has recently been executed.
+//
+// (See golang.org/issue/19491.)
+func RemoveAll(path string) error {
+	return removeAll(path)
+}
+
+// IsEphemeralError reports whether err is one of the errors that the functions
+// in this package attempt to mitigate.
+//
+// Errors considered ephemeral include:
+//   - syscall.ERROR_ACCESS_DENIED
+//   - syscall.ERROR_FILE_NOT_FOUND
+//   - internal/syscall/windows.ERROR_SHARING_VIOLATION
+//
+// This set may be expanded in the future; programs must not rely on the
+// non-ephemerality of any given error.
+func IsEphemeralError(err error) bool {
+	return isEphemeralError(err)
+}
+
+// A FileID uniquely identifies a file in the file system.
+//
+// If GetFileID(name1) returns the same ID as GetFileID(name2), the two file
+// names denote the same file.
+// A FileID is comparable, and thus suitable for use as a map key.
+type FileID struct {
+	device, inode uint64
+}
+
+// GetFileID returns the file system's identifier for the file, and its
+// modification time.
+// Like os.Stat, it reads through symbolic links.
+func GetFileID(filename string) (FileID, time.Time, error) { return getFileID(filename) }
diff --git a/hack/tools/vendor/golang.org/x/tools/internal/robustio/robustio_darwin.go b/hack/tools/vendor/golang.org/x/tools/internal/robustio/robustio_darwin.go
new file mode 100644
index 0000000000..99fd8ebc2f
--- /dev/null
+++ b/hack/tools/vendor/golang.org/x/tools/internal/robustio/robustio_darwin.go
@@ -0,0 +1,21 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package robustio
+
+import (
+	"errors"
+	"syscall"
+)
+
+const errFileNotFound = syscall.ENOENT
+
+// isEphemeralError returns true if err may be resolved by waiting.
+func isEphemeralError(err error) bool {
+	var errno syscall.Errno
+	if errors.As(err, &errno) {
+		return errno == errFileNotFound
+	}
+	return false
+}
diff --git a/hack/tools/vendor/golang.org/x/tools/internal/robustio/robustio_flaky.go b/hack/tools/vendor/golang.org/x/tools/internal/robustio/robustio_flaky.go
new file mode 100644
index 0000000000..d5c241857b
--- /dev/null
+++ b/hack/tools/vendor/golang.org/x/tools/internal/robustio/robustio_flaky.go
@@ -0,0 +1,92 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows || darwin
+// +build windows darwin
+
+package robustio
+
+import (
+	"errors"
+	"math/rand"
+	"os"
+	"syscall"
+	"time"
+)
+
+const arbitraryTimeout = 2000 * time.Millisecond
+
+// retry retries ephemeral errors from f up to an arbitrary timeout
+// to work around filesystem flakiness on Windows and Darwin.
+func retry(f func() (err error, mayRetry bool)) error {
+	var (
+		bestErr     error
+		lowestErrno syscall.Errno
+		start       time.Time
+		nextSleep   time.Duration = 1 * time.Millisecond
+	)
+	for {
+		err, mayRetry := f()
+		if err == nil || !mayRetry {
+			return err
+		}
+
+		var errno syscall.Errno
+		if errors.As(err, &errno) && (lowestErrno == 0 || errno < lowestErrno) {
+			bestErr = err
+			lowestErrno = errno
+		} else if bestErr == nil {
+			bestErr = err
+		}
+
+		if start.IsZero() {
+			start = time.Now()
+		} else if d := time.Since(start) + nextSleep; d >= arbitraryTimeout {
+			break
+		}
+		time.Sleep(nextSleep)
+		nextSleep += time.Duration(rand.Int63n(int64(nextSleep)))
+	}
+
+	return bestErr
+}
+
+// rename is like os.Rename, but retries ephemeral errors.
+//
+// On Windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with
+// MOVEFILE_REPLACE_EXISTING.
+//
+// Windows also provides a different system call, ReplaceFile,
+// that provides similar semantics, but perhaps preserves more metadata. (The
+// documentation on the differences between the two is very sparse.)
+//
+// Empirical error rates with MoveFileEx are lower under modest concurrency, so
+// for now we're sticking with what the os package already provides.
+func rename(oldpath, newpath string) (err error) {
+	return retry(func() (err error, mayRetry bool) {
+		err = os.Rename(oldpath, newpath)
+		return err, isEphemeralError(err)
+	})
+}
+
+// readFile is like os.ReadFile, but retries ephemeral errors.
+func readFile(filename string) ([]byte, error) {
+	var b []byte
+	err := retry(func() (err error, mayRetry bool) {
+		b, err = os.ReadFile(filename)
+
+		// Unlike in rename, we do not retry errFileNotFound here: it can occur
+		// as a spurious error, but the file may also genuinely not exist, so the
+		// increase in robustness is probably not worth the extra latency.
+		return err, isEphemeralError(err) && !errors.Is(err, errFileNotFound)
+	})
+	return b, err
+}
+
+func removeAll(path string) error {
+	return retry(func() (err error, mayRetry bool) {
+		err = os.RemoveAll(path)
+		return err, isEphemeralError(err)
+	})
+}
diff --git a/hack/tools/vendor/golang.org/x/tools/internal/robustio/robustio_other.go b/hack/tools/vendor/golang.org/x/tools/internal/robustio/robustio_other.go
new file mode 100644
index 0000000000..3a20cac6cf
--- /dev/null
+++ b/hack/tools/vendor/golang.org/x/tools/internal/robustio/robustio_other.go
@@ -0,0 +1,28 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !windows && !darwin
+// +build !windows,!darwin
+
+package robustio
+
+import (
+	"os"
+)
+
+func rename(oldpath, newpath string) error {
+	return os.Rename(oldpath, newpath)
+}
+
+func readFile(filename string) ([]byte, error) {
+	return os.ReadFile(filename)
+}
+
+func removeAll(path string) error {
+	return os.RemoveAll(path)
+}
+
+func isEphemeralError(err error) bool {
+	return false
+}
diff --git a/hack/tools/vendor/golang.org/x/tools/internal/robustio/robustio_plan9.go b/hack/tools/vendor/golang.org/x/tools/internal/robustio/robustio_plan9.go
new file mode 100644
index 0000000000..9fa4cacb5a
--- /dev/null
+++ b/hack/tools/vendor/golang.org/x/tools/internal/robustio/robustio_plan9.go
@@ -0,0 +1,26 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build plan9
+// +build plan9
+
+package robustio
+
+import (
+	"os"
+	"syscall"
+	"time"
+)
+
+func getFileID(filename string) (FileID, time.Time, error) {
+	fi, err := os.Stat(filename)
+	if err != nil {
+		return FileID{}, time.Time{}, err
+	}
+	dir := fi.Sys().(*syscall.Dir)
+	return FileID{
+		device: uint64(dir.Type)<<32 | uint64(dir.Dev),
+		inode:  dir.Qid.Path,
+	}, fi.ModTime(), nil
+}
diff --git a/hack/tools/vendor/golang.org/x/tools/internal/robustio/robustio_posix.go b/hack/tools/vendor/golang.org/x/tools/internal/robustio/robustio_posix.go
new file mode 100644
index 0000000000..cf74865d0b
--- /dev/null
+++ b/hack/tools/vendor/golang.org/x/tools/internal/robustio/robustio_posix.go
@@ -0,0 +1,26 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !windows && !plan9
+// +build !windows,!plan9
+
+package robustio
+
+import (
+	"os"
+	"syscall"
+	"time"
+)
+
+func getFileID(filename string) (FileID, time.Time, error) {
+	fi, err := os.Stat(filename)
+	if err != nil {
+		return FileID{}, time.Time{}, err
+	}
+	stat := fi.Sys().(*syscall.Stat_t)
+	return FileID{
+		device: uint64(stat.Dev), // (int32 on darwin, uint64 on linux)
+		inode:  stat.Ino,
+	}, fi.ModTime(), nil
+}
diff --git a/hack/tools/vendor/golang.org/x/tools/internal/robustio/robustio_windows.go b/hack/tools/vendor/golang.org/x/tools/internal/robustio/robustio_windows.go
new file mode 100644
index 0000000000..616c32883d
--- /dev/null
+++ b/hack/tools/vendor/golang.org/x/tools/internal/robustio/robustio_windows.go
@@ -0,0 +1,51 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package robustio
+
+import (
+	"errors"
+	"syscall"
+	"time"
+)
+
+const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND
+
+// isEphemeralError returns true if err may be resolved by waiting.
+func isEphemeralError(err error) bool {
+	var errno syscall.Errno
+	if errors.As(err, &errno) {
+		switch errno {
+		case syscall.ERROR_ACCESS_DENIED,
+			syscall.ERROR_FILE_NOT_FOUND,
+			ERROR_SHARING_VIOLATION:
+			return true
+		}
+	}
+	return false
+}
+
+// Note: it may be convenient to have this helper return fs.FileInfo, but
+// implementing this is actually quite involved on Windows. Since we only
+// currently use mtime, keep it simple.
+func getFileID(filename string) (FileID, time.Time, error) {
+	filename16, err := syscall.UTF16PtrFromString(filename)
+	if err != nil {
+		return FileID{}, time.Time{}, err
+	}
+	h, err := syscall.CreateFile(filename16, 0, 0, nil, syscall.OPEN_EXISTING, uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS), 0)
+	if err != nil {
+		return FileID{}, time.Time{}, err
+	}
+	defer syscall.CloseHandle(h)
+	var i syscall.ByHandleFileInformation
+	if err := syscall.GetFileInformationByHandle(h, &i); err != nil {
+		return FileID{}, time.Time{}, err
+	}
+	mtime := time.Unix(0, i.LastWriteTime.Nanoseconds())
+	return FileID{
+		device: uint64(i.VolumeSerialNumber),
+		inode:  uint64(i.FileIndexHigh)<<32 | uint64(i.FileIndexLow),
+	}, mtime, nil
+}
diff --git a/hack/tools/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/hack/tools/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
new file mode 100644
index 0000000000..1066980649
--- /dev/null
+++ b/hack/tools/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
@@ -0,0 +1,282 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+	"fmt"
+	"go/ast"
+	"go/token"
+	"go/types"
+	"strconv"
+	"strings"
+)
+
+// ZeroString returns the string representation of the "zero" value of the type t.
+// This string can be used on the right-hand side of an assignment where the
+// left-hand side has that explicit type.
+// Exception: This does not apply to tuples. Their string representation is
+// informational only and cannot be used in an assignment.
+// When assigning to a wider type (such as 'any'), it's the caller's
+// responsibility to handle any necessary type conversions.
+// See [ZeroExpr] for a variant that returns an [ast.Expr].
+func ZeroString(t types.Type, qf types.Qualifier) string {
+	switch t := t.(type) {
+	case *types.Basic:
+		switch {
+		case t.Info()&types.IsBoolean != 0:
+			return "false"
+		case t.Info()&types.IsNumeric != 0:
+			return "0"
+		case t.Info()&types.IsString != 0:
+			return `""`
+		case t.Kind() == types.UnsafePointer:
+			fallthrough
+		case t.Kind() == types.UntypedNil:
+			return "nil"
+		default:
+			panic(fmt.Sprint("ZeroString for unexpected type:", t))
+		}
+
+	case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
+		return "nil"
+
+	case *types.Named, *types.Alias:
+		switch under := t.Underlying().(type) {
+		case *types.Struct, *types.Array:
+			return types.TypeString(t, qf) + "{}"
+		default:
+			return ZeroString(under, qf)
+		}
+
+	case *types.Array, *types.Struct:
+		return types.TypeString(t, qf) + "{}"
+
+	case *types.TypeParam:
+		// Assumes func new is not shadowed.
+		return "*new(" + types.TypeString(t, qf) + ")"
+
+	case *types.Tuple:
+		// Tuples are not normal values.
+		// We are currently format as "(t[0], ..., t[n])". Could be something else.
+		components := make([]string, t.Len())
+		for i := 0; i < t.Len(); i++ {
+			components[i] = ZeroString(t.At(i).Type(), qf)
+		}
+		return "(" + strings.Join(components, ", ") + ")"
+
+	case *types.Union:
+		// Variables of these types cannot be created, so it makes
+		// no sense to ask for their zero value.
+		panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+	default:
+		panic(t) // unreachable.
+	}
+}
+
+// ZeroExpr returns the ast.Expr representation of the "zero" value of the type t.
+// ZeroExpr is defined for types that are suitable for variables.
+// It may panic for other types such as Tuple or Union.
+// See [ZeroString] for a variant that returns a string.
+func ZeroExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
+	switch t := typ.(type) {
+	case *types.Basic:
+		switch {
+		case t.Info()&types.IsBoolean != 0:
+			return &ast.Ident{Name: "false"}
+		case t.Info()&types.IsNumeric != 0:
+			return &ast.BasicLit{Kind: token.INT, Value: "0"}
+		case t.Info()&types.IsString != 0:
+			return &ast.BasicLit{Kind: token.STRING, Value: `""`}
+		case t.Kind() == types.UnsafePointer:
+			fallthrough
+		case t.Kind() == types.UntypedNil:
+			return ast.NewIdent("nil")
+		default:
+			panic(fmt.Sprint("ZeroExpr for unexpected type:", t))
+		}
+
+	case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
+		return ast.NewIdent("nil")
+
+	case *types.Named, *types.Alias:
+		switch under := t.Underlying().(type) {
+		case *types.Struct, *types.Array:
+			return &ast.CompositeLit{
+				Type: TypeExpr(f, pkg, typ),
+			}
+		default:
+			return ZeroExpr(f, pkg, under)
+		}
+
+	case *types.Array, *types.Struct:
+		return &ast.CompositeLit{
+			Type: TypeExpr(f, pkg, typ),
+		}
+
+	case *types.TypeParam:
+		return &ast.StarExpr{ // *new(T)
+			X: &ast.CallExpr{
+				// Assumes func new is not shadowed.
+				Fun: ast.NewIdent("new"),
+				Args: []ast.Expr{
+					ast.NewIdent(t.Obj().Name()),
+				},
+			},
+		}
+
+	case *types.Tuple:
+		// Unlike ZeroString, there is no ast.Expr can express tuple by
+		// "(t[0], ..., t[n])".
+		panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+	case *types.Union:
+		// Variables of these types cannot be created, so it makes
+		// no sense to ask for their zero value.
+		panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+	default:
+		panic(t) // unreachable.
+	}
+}
+
+// IsZeroExpr uses simple syntactic heuristics to report whether expr
+// is a obvious zero value, such as 0, "", nil, or false.
+// It cannot do better without type information.
+func IsZeroExpr(expr ast.Expr) bool {
+	switch e := expr.(type) {
+	case *ast.BasicLit:
+		return e.Value == "0" || e.Value == `""`
+	case *ast.Ident:
+		return e.Name == "nil" || e.Name == "false"
+	default:
+		return false
+	}
+}
+
+// TypeExpr returns syntax for the specified type. References to named types
+// from packages other than pkg are qualified by an appropriate package name, as
+// defined by the import environment of file.
+// It may panic for types such as Tuple or Union.
+func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
+	switch t := typ.(type) {
+	case *types.Basic:
+		switch t.Kind() {
+		case types.UnsafePointer:
+			// TODO(hxjiang): replace the implementation with types.Qualifier.
+			return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")}
+		default:
+			return ast.NewIdent(t.Name())
+		}
+
+	case *types.Pointer:
+		return &ast.UnaryExpr{
+			Op: token.MUL,
+			X:  TypeExpr(f, pkg, t.Elem()),
+		}
+
+	case *types.Array:
+		return &ast.ArrayType{
+			Len: &ast.BasicLit{
+				Kind:  token.INT,
+				Value: fmt.Sprintf("%d", t.Len()),
+			},
+			Elt: TypeExpr(f, pkg, t.Elem()),
+		}
+
+	case *types.Slice:
+		return &ast.ArrayType{
+			Elt: TypeExpr(f, pkg, t.Elem()),
+		}
+
+	case *types.Map:
+		return &ast.MapType{
+			Key:   TypeExpr(f, pkg, t.Key()),
+			Value: TypeExpr(f, pkg, t.Elem()),
+		}
+
+	case *types.Chan:
+		dir := ast.ChanDir(t.Dir())
+		if t.Dir() == types.SendRecv {
+			dir = ast.SEND | ast.RECV
+		}
+		return &ast.ChanType{
+			Dir:   dir,
+			Value: TypeExpr(f, pkg, t.Elem()),
+		}
+
+	case *types.Signature:
+		var params []*ast.Field
+		for i := 0; i < t.Params().Len(); i++ {
+			params = append(params, &ast.Field{
+				Type: TypeExpr(f, pkg, t.Params().At(i).Type()),
+				Names: []*ast.Ident{
+					{
+						Name: t.Params().At(i).Name(),
+					},
+				},
+			})
+		}
+		if t.Variadic() {
+			last := params[len(params)-1]
+			last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt}
+		}
+		var returns []*ast.Field
+		for i := 0; i < t.Results().Len(); i++ {
+			returns = append(returns, &ast.Field{
+				Type: TypeExpr(f, pkg, t.Results().At(i).Type()),
+			})
+		}
+		return &ast.FuncType{
+			Params: &ast.FieldList{
+				List: params,
+			},
+			Results: &ast.FieldList{
+				List: returns,
+			},
+		}
+
+	case interface{ Obj() *types.TypeName }: // *types.{Alias,Named,TypeParam}
+		switch t.Obj().Pkg() {
+		case pkg, nil:
+			return ast.NewIdent(t.Obj().Name())
+		}
+		pkgName := t.Obj().Pkg().Name()
+
+		// TODO(hxjiang): replace the implementation with types.Qualifier.
+		// If the file already imports the package under another name, use that.
+		for _, cand := range f.Imports {
+			if path, _ := strconv.Unquote(cand.Path.Value); path == t.Obj().Pkg().Path() {
+				if cand.Name != nil && cand.Name.Name != "" {
+					pkgName = cand.Name.Name
+				}
+			}
+		}
+		if pkgName == "." {
+			return ast.NewIdent(t.Obj().Name())
+		}
+		return &ast.SelectorExpr{
+			X:   ast.NewIdent(pkgName),
+			Sel: ast.NewIdent(t.Obj().Name()),
+		}
+
+	case *types.Struct:
+		return ast.NewIdent(t.String())
+
+	case *types.Interface:
+		return ast.NewIdent(t.String())
+
+	case *types.Union:
+		// TODO(hxjiang): handle the union through syntax (~A | ... | ~Z).
+		// Remove nil check when calling typesinternal.TypeExpr.
+		return nil
+
+	case *types.Tuple:
+		panic("invalid input type types.Tuple")
+
+	default:
+		panic("unreachable")
+	}
+}
diff --git a/hack/tools/vendor/golang.org/x/tools/internal/versions/constraint.go b/hack/tools/vendor/golang.org/x/tools/internal/versions/constraint.go
deleted file mode 100644
index 179063d484..0000000000
--- a/hack/tools/vendor/golang.org/x/tools/internal/versions/constraint.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package versions
-
-import "go/build/constraint"
-
-// ConstraintGoVersion is constraint.GoVersion (if built with go1.21+).
-// Otherwise nil.
-//
-// Deprecate once x/tools is after go1.21.
-var ConstraintGoVersion func(x constraint.Expr) string
diff --git a/hack/tools/vendor/golang.org/x/xerrors/README b/hack/tools/vendor/golang.org/x/xerrors/README
deleted file mode 100644
index aac7867a56..0000000000
--- a/hack/tools/vendor/golang.org/x/xerrors/README
+++ /dev/null
@@ -1,2 +0,0 @@
-This repository holds the transition packages for the new Go 1.13 error values.
-See golang.org/design/29934-error-values.
diff --git a/hack/tools/vendor/golang.org/x/xerrors/adaptor.go b/hack/tools/vendor/golang.org/x/xerrors/adaptor.go
deleted file mode 100644
index 4317f24833..0000000000
--- a/hack/tools/vendor/golang.org/x/xerrors/adaptor.go
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package xerrors
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"reflect"
-	"strconv"
-)
-
-// FormatError calls the FormatError method of f with an errors.Printer
-// configured according to s and verb, and writes the result to s.
-func FormatError(f Formatter, s fmt.State, verb rune) {
-	// Assuming this function is only called from the Format method, and given
-	// that FormatError takes precedence over Format, it cannot be called from
-	// any package that supports errors.Formatter. It is therefore safe to
-	// disregard that State may be a specific printer implementation and use one
-	// of our choice instead.
-
-	// limitations: does not support printing error as Go struct.
-
-	var (
-		sep    = " " // separator before next error
-		p      = &state{State: s}
-		direct = true
-	)
-
-	var err error = f
-
-	switch verb {
-	// Note that this switch must match the preference order
-	// for ordinary string printing (%#v before %+v, and so on).
-
-	case 'v':
-		if s.Flag('#') {
-			if stringer, ok := err.(fmt.GoStringer); ok {
-				io.WriteString(&p.buf, stringer.GoString())
-				goto exit
-			}
-			// proceed as if it were %v
-		} else if s.Flag('+') {
-			p.printDetail = true
-			sep = "\n  - "
-		}
-	case 's':
-	case 'q', 'x', 'X':
-		// Use an intermediate buffer in the rare cases that precision,
-		// truncation, or one of the alternative verbs (q, x, and X) are
-		// specified.
-		direct = false
-
-	default:
-		p.buf.WriteString("%!")
-		p.buf.WriteRune(verb)
-		p.buf.WriteByte('(')
-		switch {
-		case err != nil:
-			p.buf.WriteString(reflect.TypeOf(f).String())
-		default:
-			p.buf.WriteString("")
-		}
-		p.buf.WriteByte(')')
-		io.Copy(s, &p.buf)
-		return
-	}
-
-loop:
-	for {
-		switch v := err.(type) {
-		case Formatter:
-			err = v.FormatError((*printer)(p))
-		case fmt.Formatter:
-			v.Format(p, 'v')
-			break loop
-		default:
-			io.WriteString(&p.buf, v.Error())
-			break loop
-		}
-		if err == nil {
-			break
-		}
-		if p.needColon || !p.printDetail {
-			p.buf.WriteByte(':')
-			p.needColon = false
-		}
-		p.buf.WriteString(sep)
-		p.inDetail = false
-		p.needNewline = false
-	}
-
-exit:
-	width, okW := s.Width()
-	prec, okP := s.Precision()
-
-	if !direct || (okW && width > 0) || okP {
-		// Construct format string from State s.
-		format := []byte{'%'}
-		if s.Flag('-') {
-			format = append(format, '-')
-		}
-		if s.Flag('+') {
-			format = append(format, '+')
-		}
-		if s.Flag(' ') {
-			format = append(format, ' ')
-		}
-		if okW {
-			format = strconv.AppendInt(format, int64(width), 10)
-		}
-		if okP {
-			format = append(format, '.')
-			format = strconv.AppendInt(format, int64(prec), 10)
-		}
-		format = append(format, string(verb)...)
-		fmt.Fprintf(s, string(format), p.buf.String())
-	} else {
-		io.Copy(s, &p.buf)
-	}
-}
-
-var detailSep = []byte("\n    ")
-
-// state tracks error printing state. It implements fmt.State.
-type state struct {
-	fmt.State
-	buf bytes.Buffer
-
-	printDetail bool
-	inDetail    bool
-	needColon   bool
-	needNewline bool
-}
-
-func (s *state) Write(b []byte) (n int, err error) {
-	if s.printDetail {
-		if len(b) == 0 {
-			return 0, nil
-		}
-		if s.inDetail && s.needColon {
-			s.needNewline = true
-			if b[0] == '\n' {
-				b = b[1:]
-			}
-		}
-		k := 0
-		for i, c := range b {
-			if s.needNewline {
-				if s.inDetail && s.needColon {
-					s.buf.WriteByte(':')
-					s.needColon = false
-				}
-				s.buf.Write(detailSep)
-				s.needNewline = false
-			}
-			if c == '\n' {
-				s.buf.Write(b[k:i])
-				k = i + 1
-				s.needNewline = true
-			}
-		}
-		s.buf.Write(b[k:])
-		if !s.inDetail {
-			s.needColon = true
-		}
-	} else if !s.inDetail {
-		s.buf.Write(b)
-	}
-	return len(b), nil
-}
-
-// printer wraps a state to implement an xerrors.Printer.
-type printer state
-
-func (s *printer) Print(args ...interface{}) {
-	if !s.inDetail || s.printDetail {
-		fmt.Fprint((*state)(s), args...)
-	}
-}
-
-func (s *printer) Printf(format string, args ...interface{}) {
-	if !s.inDetail || s.printDetail {
-		fmt.Fprintf((*state)(s), format, args...)
-	}
-}
-
-func (s *printer) Detail() bool {
-	s.inDetail = true
-	return s.printDetail
-}
diff --git a/hack/tools/vendor/golang.org/x/xerrors/codereview.cfg b/hack/tools/vendor/golang.org/x/xerrors/codereview.cfg
deleted file mode 100644
index 3f8b14b64e..0000000000
--- a/hack/tools/vendor/golang.org/x/xerrors/codereview.cfg
+++ /dev/null
@@ -1 +0,0 @@
-issuerepo: golang/go
diff --git a/hack/tools/vendor/golang.org/x/xerrors/doc.go b/hack/tools/vendor/golang.org/x/xerrors/doc.go
deleted file mode 100644
index 2ef99f5a87..0000000000
--- a/hack/tools/vendor/golang.org/x/xerrors/doc.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package xerrors implements functions to manipulate errors.
-//
-// This package is based on the Go 2 proposal for error values:
-//
-//	https://golang.org/design/29934-error-values
-//
-// These functions were incorporated into the standard library's errors package
-// in Go 1.13:
-// - Is
-// - As
-// - Unwrap
-//
-// Also, Errorf's %w verb was incorporated into fmt.Errorf.
-//
-// Use this package to get equivalent behavior in all supported Go versions.
-//
-// No other features of this package were included in Go 1.13, and at present
-// there are no plans to include any of them.
-package xerrors // import "golang.org/x/xerrors"
diff --git a/hack/tools/vendor/golang.org/x/xerrors/errors.go b/hack/tools/vendor/golang.org/x/xerrors/errors.go
deleted file mode 100644
index e88d3772d8..0000000000
--- a/hack/tools/vendor/golang.org/x/xerrors/errors.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package xerrors
-
-import "fmt"
-
-// errorString is a trivial implementation of error.
-type errorString struct {
-	s     string
-	frame Frame
-}
-
-// New returns an error that formats as the given text.
-//
-// The returned error contains a Frame set to the caller's location and
-// implements Formatter to show this information when printed with details.
-func New(text string) error {
-	return &errorString{text, Caller(1)}
-}
-
-func (e *errorString) Error() string {
-	return e.s
-}
-
-func (e *errorString) Format(s fmt.State, v rune) { FormatError(e, s, v) }
-
-func (e *errorString) FormatError(p Printer) (next error) {
-	p.Print(e.s)
-	e.frame.Format(p)
-	return nil
-}
diff --git a/hack/tools/vendor/golang.org/x/xerrors/fmt.go b/hack/tools/vendor/golang.org/x/xerrors/fmt.go
deleted file mode 100644
index 27a5d70bd6..0000000000
--- a/hack/tools/vendor/golang.org/x/xerrors/fmt.go
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package xerrors
-
-import (
-	"fmt"
-	"strings"
-	"unicode"
-	"unicode/utf8"
-
-	"golang.org/x/xerrors/internal"
-)
-
-const percentBangString = "%!"
-
-// Errorf formats according to a format specifier and returns the string as a
-// value that satisfies error.
-//
-// The returned error includes the file and line number of the caller when
-// formatted with additional detail enabled. If the last argument is an error
-// the returned error's Format method will return it if the format string ends
-// with ": %s", ": %v", or ": %w". If the last argument is an error and the
-// format string ends with ": %w", the returned error implements an Unwrap
-// method returning it.
-//
-// If the format specifier includes a %w verb with an error operand in a
-// position other than at the end, the returned error will still implement an
-// Unwrap method returning the operand, but the error's Format method will not
-// return the wrapped error.
-//
-// It is invalid to include more than one %w verb or to supply it with an
-// operand that does not implement the error interface. The %w verb is otherwise
-// a synonym for %v.
-//
-// Note that as of Go 1.13, the fmt.Errorf function will do error formatting,
-// but it will not capture a stack backtrace.
-func Errorf(format string, a ...interface{}) error {
-	format = formatPlusW(format)
-	// Support a ": %[wsv]" suffix, which works well with xerrors.Formatter.
-	wrap := strings.HasSuffix(format, ": %w")
-	idx, format2, ok := parsePercentW(format)
-	percentWElsewhere := !wrap && idx >= 0
-	if !percentWElsewhere && (wrap || strings.HasSuffix(format, ": %s") || strings.HasSuffix(format, ": %v")) {
-		err := errorAt(a, len(a)-1)
-		if err == nil {
-			return &noWrapError{fmt.Sprintf(format, a...), nil, Caller(1)}
-		}
-		// TODO: this is not entirely correct. The error value could be
-		// printed elsewhere in format if it mixes numbered with unnumbered
-		// substitutions. With relatively small changes to doPrintf we can
-		// have it optionally ignore extra arguments and pass the argument
-		// list in its entirety.
-		msg := fmt.Sprintf(format[:len(format)-len(": %s")], a[:len(a)-1]...)
-		frame := Frame{}
-		if internal.EnableTrace {
-			frame = Caller(1)
-		}
-		if wrap {
-			return &wrapError{msg, err, frame}
-		}
-		return &noWrapError{msg, err, frame}
-	}
-	// Support %w anywhere.
-	// TODO: don't repeat the wrapped error's message when %w occurs in the middle.
-	msg := fmt.Sprintf(format2, a...)
-	if idx < 0 {
-		return &noWrapError{msg, nil, Caller(1)}
-	}
-	err := errorAt(a, idx)
-	if !ok || err == nil {
-		// Too many %ws or argument of %w is not an error. Approximate the Go
-		// 1.13 fmt.Errorf message.
-		return &noWrapError{fmt.Sprintf("%sw(%s)", percentBangString, msg), nil, Caller(1)}
-	}
-	frame := Frame{}
-	if internal.EnableTrace {
-		frame = Caller(1)
-	}
-	return &wrapError{msg, err, frame}
-}
-
-func errorAt(args []interface{}, i int) error {
-	if i < 0 || i >= len(args) {
-		return nil
-	}
-	err, ok := args[i].(error)
-	if !ok {
-		return nil
-	}
-	return err
-}
-
-// formatPlusW is used to avoid the vet check that will barf at %w.
-func formatPlusW(s string) string {
-	return s
-}
-
-// Return the index of the only %w in format, or -1 if none.
-// Also return a rewritten format string with %w replaced by %v, and
-// false if there is more than one %w.
-// TODO: handle "%[N]w".
-func parsePercentW(format string) (idx int, newFormat string, ok bool) {
-	// Loosely copied from golang.org/x/tools/go/analysis/passes/printf/printf.go.
-	idx = -1
-	ok = true
-	n := 0
-	sz := 0
-	var isW bool
-	for i := 0; i < len(format); i += sz {
-		if format[i] != '%' {
-			sz = 1
-			continue
-		}
-		// "%%" is not a format directive.
-		if i+1 < len(format) && format[i+1] == '%' {
-			sz = 2
-			continue
-		}
-		sz, isW = parsePrintfVerb(format[i:])
-		if isW {
-			if idx >= 0 {
-				ok = false
-			} else {
-				idx = n
-			}
-			// "Replace" the last character, the 'w', with a 'v'.
-			p := i + sz - 1
-			format = format[:p] + "v" + format[p+1:]
-		}
-		n++
-	}
-	return idx, format, ok
-}
-
-// Parse the printf verb starting with a % at s[0].
-// Return how many bytes it occupies and whether the verb is 'w'.
-func parsePrintfVerb(s string) (int, bool) {
-	// Assume only that the directive is a sequence of non-letters followed by a single letter.
-	sz := 0
-	var r rune
-	for i := 1; i < len(s); i += sz {
-		r, sz = utf8.DecodeRuneInString(s[i:])
-		if unicode.IsLetter(r) {
-			return i + sz, r == 'w'
-		}
-	}
-	return len(s), false
-}
-
-type noWrapError struct {
-	msg   string
-	err   error
-	frame Frame
-}
-
-func (e *noWrapError) Error() string {
-	return fmt.Sprint(e)
-}
-
-func (e *noWrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) }
-
-func (e *noWrapError) FormatError(p Printer) (next error) {
-	p.Print(e.msg)
-	e.frame.Format(p)
-	return e.err
-}
-
-type wrapError struct {
-	msg   string
-	err   error
-	frame Frame
-}
-
-func (e *wrapError) Error() string {
-	return fmt.Sprint(e)
-}
-
-func (e *wrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) }
-
-func (e *wrapError) FormatError(p Printer) (next error) {
-	p.Print(e.msg)
-	e.frame.Format(p)
-	return e.err
-}
-
-func (e *wrapError) Unwrap() error {
-	return e.err
-}
diff --git a/hack/tools/vendor/golang.org/x/xerrors/format.go b/hack/tools/vendor/golang.org/x/xerrors/format.go
deleted file mode 100644
index 1bc9c26b97..0000000000
--- a/hack/tools/vendor/golang.org/x/xerrors/format.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package xerrors
-
-// A Formatter formats error messages.
-type Formatter interface {
-	error
-
-	// FormatError prints the receiver's first error and returns the next error in
-	// the error chain, if any.
-	FormatError(p Printer) (next error)
-}
-
-// A Printer formats error messages.
-//
-// The most common implementation of Printer is the one provided by package fmt
-// during Printf (as of Go 1.13). Localization packages such as golang.org/x/text/message
-// typically provide their own implementations.
-type Printer interface {
-	// Print appends args to the message output.
-	Print(args ...interface{})
-
-	// Printf writes a formatted string.
-	Printf(format string, args ...interface{})
-
-	// Detail reports whether error detail is requested.
-	// After the first call to Detail, all text written to the Printer
-	// is formatted as additional detail, or ignored when
-	// detail has not been requested.
-	// If Detail returns false, the caller can avoid printing the detail at all.
-	Detail() bool
-}
diff --git a/hack/tools/vendor/golang.org/x/xerrors/frame.go b/hack/tools/vendor/golang.org/x/xerrors/frame.go
deleted file mode 100644
index 0de628ec50..0000000000
--- a/hack/tools/vendor/golang.org/x/xerrors/frame.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package xerrors
-
-import (
-	"runtime"
-)
-
-// A Frame contains part of a call stack.
-type Frame struct {
-	// Make room for three PCs: the one we were asked for, what it called,
-	// and possibly a PC for skipPleaseUseCallersFrames. See:
-	// https://go.googlesource.com/go/+/032678e0fb/src/runtime/extern.go#169
-	frames [3]uintptr
-}
-
-// Caller returns a Frame that describes a frame on the caller's stack.
-// The argument skip is the number of frames to skip over.
-// Caller(0) returns the frame for the caller of Caller.
-func Caller(skip int) Frame {
-	var s Frame
-	runtime.Callers(skip+1, s.frames[:])
-	return s
-}
-
-// location reports the file, line, and function of a frame.
-//
-// The returned function may be "" even if file and line are not.
-func (f Frame) location() (function, file string, line int) {
-	frames := runtime.CallersFrames(f.frames[:])
-	if _, ok := frames.Next(); !ok {
-		return "", "", 0
-	}
-	fr, ok := frames.Next()
-	if !ok {
-		return "", "", 0
-	}
-	return fr.Function, fr.File, fr.Line
-}
-
-// Format prints the stack as error detail.
-// It should be called from an error's Format implementation
-// after printing any other error detail.
-func (f Frame) Format(p Printer) {
-	if p.Detail() {
-		function, file, line := f.location()
-		if function != "" {
-			p.Printf("%s\n    ", function)
-		}
-		if file != "" {
-			p.Printf("%s:%d\n", file, line)
-		}
-	}
-}
diff --git a/hack/tools/vendor/golang.org/x/xerrors/internal/internal.go b/hack/tools/vendor/golang.org/x/xerrors/internal/internal.go
deleted file mode 100644
index 89f4eca5df..0000000000
--- a/hack/tools/vendor/golang.org/x/xerrors/internal/internal.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package internal
-
-// EnableTrace indicates whether stack information should be recorded in errors.
-var EnableTrace = true
diff --git a/hack/tools/vendor/golang.org/x/xerrors/wrap.go b/hack/tools/vendor/golang.org/x/xerrors/wrap.go
deleted file mode 100644
index 9842758ca7..0000000000
--- a/hack/tools/vendor/golang.org/x/xerrors/wrap.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package xerrors
-
-import (
-	"reflect"
-)
-
-// A Wrapper provides context around another error.
-type Wrapper interface {
-	// Unwrap returns the next error in the error chain.
-	// If there is no next error, Unwrap returns nil.
-	Unwrap() error
-}
-
-// Opaque returns an error with the same error formatting as err
-// but that does not match err and cannot be unwrapped.
-func Opaque(err error) error {
-	return noWrapper{err}
-}
-
-type noWrapper struct {
-	error
-}
-
-func (e noWrapper) FormatError(p Printer) (next error) {
-	if f, ok := e.error.(Formatter); ok {
-		return f.FormatError(p)
-	}
-	p.Print(e.error)
-	return nil
-}
-
-// Unwrap returns the result of calling the Unwrap method on err, if err implements
-// Unwrap. Otherwise, Unwrap returns nil.
-//
-// Deprecated: As of Go 1.13, use errors.Unwrap instead.
-func Unwrap(err error) error {
-	u, ok := err.(Wrapper)
-	if !ok {
-		return nil
-	}
-	return u.Unwrap()
-}
-
-// Is reports whether any error in err's chain matches target.
-//
-// An error is considered to match a target if it is equal to that target or if
-// it implements a method Is(error) bool such that Is(target) returns true.
-//
-// Deprecated: As of Go 1.13, use errors.Is instead.
-func Is(err, target error) bool {
-	if target == nil {
-		return err == target
-	}
-
-	isComparable := reflect.TypeOf(target).Comparable()
-	for {
-		if isComparable && err == target {
-			return true
-		}
-		if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) {
-			return true
-		}
-		// TODO: consider supporing target.Is(err). This would allow
-		// user-definable predicates, but also may allow for coping with sloppy
-		// APIs, thereby making it easier to get away with them.
-		if err = Unwrap(err); err == nil {
-			return false
-		}
-	}
-}
-
-// As finds the first error in err's chain that matches the type to which target
-// points, and if so, sets the target to its value and returns true. An error
-// matches a type if it is assignable to the target type, or if it has a method
-// As(interface{}) bool such that As(target) returns true. As will panic if target
-// is not a non-nil pointer to a type which implements error or is of interface type.
-//
-// The As method should set the target to its value and return true if err
-// matches the type to which target points.
-//
-// Deprecated: As of Go 1.13, use errors.As instead.
-func As(err error, target interface{}) bool {
-	if target == nil {
-		panic("errors: target cannot be nil")
-	}
-	val := reflect.ValueOf(target)
-	typ := val.Type()
-	if typ.Kind() != reflect.Ptr || val.IsNil() {
-		panic("errors: target must be a non-nil pointer")
-	}
-	if e := typ.Elem(); e.Kind() != reflect.Interface && !e.Implements(errorType) {
-		panic("errors: *target must be interface or implement error")
-	}
-	targetType := typ.Elem()
-	for err != nil {
-		if reflect.TypeOf(err).AssignableTo(targetType) {
-			val.Elem().Set(reflect.ValueOf(err))
-			return true
-		}
-		if x, ok := err.(interface{ As(interface{}) bool }); ok && x.As(target) {
-			return true
-		}
-		err = Unwrap(err)
-	}
-	return false
-}
-
-var errorType = reflect.TypeOf((*error)(nil)).Elem()
diff --git a/hack/tools/vendor/google.golang.org/genproto/googleapis/api/LICENSE b/hack/tools/vendor/google.golang.org/genproto/googleapis/api/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/genproto/googleapis/api/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/hack/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go b/hack/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go
new file mode 100644
index 0000000000..9f81dbcd86
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go
@@ -0,0 +1,1664 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v4.24.4
+// source: google/api/expr/v1alpha1/checked.proto
+
+package expr
+
+import (
+	reflect "reflect"
+	sync "sync"
+
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	emptypb "google.golang.org/protobuf/types/known/emptypb"
+	structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// CEL primitive types.
+type Type_PrimitiveType int32
+
+const (
+	// Unspecified type.
+	Type_PRIMITIVE_TYPE_UNSPECIFIED Type_PrimitiveType = 0
+	// Boolean type.
+	Type_BOOL Type_PrimitiveType = 1
+	// Int64 type.
+	//
+	// Proto-based integer values are widened to int64.
+	Type_INT64 Type_PrimitiveType = 2
+	// Uint64 type.
+	//
+	// Proto-based unsigned integer values are widened to uint64.
+	Type_UINT64 Type_PrimitiveType = 3
+	// Double type.
+	//
+	// Proto-based float values are widened to double values.
+	Type_DOUBLE Type_PrimitiveType = 4
+	// String type.
+	Type_STRING Type_PrimitiveType = 5
+	// Bytes type.
+	Type_BYTES Type_PrimitiveType = 6
+)
+
+// Enum value maps for Type_PrimitiveType.
+var (
+	Type_PrimitiveType_name = map[int32]string{
+		0: "PRIMITIVE_TYPE_UNSPECIFIED",
+		1: "BOOL",
+		2: "INT64",
+		3: "UINT64",
+		4: "DOUBLE",
+		5: "STRING",
+		6: "BYTES",
+	}
+	Type_PrimitiveType_value = map[string]int32{
+		"PRIMITIVE_TYPE_UNSPECIFIED": 0,
+		"BOOL":                       1,
+		"INT64":                      2,
+		"UINT64":                     3,
+		"DOUBLE":                     4,
+		"STRING":                     5,
+		"BYTES":                      6,
+	}
+)
+
+func (x Type_PrimitiveType) Enum() *Type_PrimitiveType {
+	p := new(Type_PrimitiveType)
+	*p = x
+	return p
+}
+
+func (x Type_PrimitiveType) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Type_PrimitiveType) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_api_expr_v1alpha1_checked_proto_enumTypes[0].Descriptor()
+}
+
+func (Type_PrimitiveType) Type() protoreflect.EnumType {
+	return &file_google_api_expr_v1alpha1_checked_proto_enumTypes[0]
+}
+
+func (x Type_PrimitiveType) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Type_PrimitiveType.Descriptor instead.
+func (Type_PrimitiveType) EnumDescriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 0}
+}
+
+// Well-known protobuf types treated with first-class support in CEL.
+type Type_WellKnownType int32
+
+const (
+	// Unspecified type.
+	Type_WELL_KNOWN_TYPE_UNSPECIFIED Type_WellKnownType = 0
+	// Well-known protobuf.Any type.
+	//
+	// Any types are a polymorphic message type. During type-checking they are
+	// treated like `DYN` types, but at runtime they are resolved to a specific
+	// message type specified at evaluation time.
+	Type_ANY Type_WellKnownType = 1
+	// Well-known protobuf.Timestamp type, internally referenced as `timestamp`.
+	Type_TIMESTAMP Type_WellKnownType = 2
+	// Well-known protobuf.Duration type, internally referenced as `duration`.
+	Type_DURATION Type_WellKnownType = 3
+)
+
+// Enum value maps for Type_WellKnownType.
+var (
+	Type_WellKnownType_name = map[int32]string{
+		0: "WELL_KNOWN_TYPE_UNSPECIFIED",
+		1: "ANY",
+		2: "TIMESTAMP",
+		3: "DURATION",
+	}
+	Type_WellKnownType_value = map[string]int32{
+		"WELL_KNOWN_TYPE_UNSPECIFIED": 0,
+		"ANY":                         1,
+		"TIMESTAMP":                   2,
+		"DURATION":                    3,
+	}
+)
+
+func (x Type_WellKnownType) Enum() *Type_WellKnownType {
+	p := new(Type_WellKnownType)
+	*p = x
+	return p
+}
+
+func (x Type_WellKnownType) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Type_WellKnownType) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_api_expr_v1alpha1_checked_proto_enumTypes[1].Descriptor()
+}
+
+func (Type_WellKnownType) Type() protoreflect.EnumType {
+	return &file_google_api_expr_v1alpha1_checked_proto_enumTypes[1]
+}
+
+func (x Type_WellKnownType) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Type_WellKnownType.Descriptor instead.
+func (Type_WellKnownType) EnumDescriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 1}
+}
+
+// A CEL expression which has been successfully type checked.
+type CheckedExpr struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// A map from expression ids to resolved references.
+	//
+	// The following entries are in this table:
+	//
+	//   - An Ident or Select expression is represented here if it resolves to a
+	//     declaration. For instance, if `a.b.c` is represented by
+	//     `select(select(id(a), b), c)`, and `a.b` resolves to a declaration,
+	//     while `c` is a field selection, then the reference is attached to the
+	//     nested select expression (but not to the id or or the outer select).
+	//     In turn, if `a` resolves to a declaration and `b.c` are field selections,
+	//     the reference is attached to the ident expression.
+	//   - Every Call expression has an entry here, identifying the function being
+	//     called.
+	//   - Every CreateStruct expression for a message has an entry, identifying
+	//     the message.
+	ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// A map from expression ids to types.
+	//
+	// Every expression node which has a type different than DYN has a mapping
+	// here. If an expression has type DYN, it is omitted from this map to save
+	// space.
+	TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// The source info derived from input that generated the parsed `expr` and
+	// any optimizations made during the type-checking pass.
+	SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"`
+	// The expr version indicates the major / minor version number of the `expr`
+	// representation.
+	//
+	// The most common reason for a version change will be to indicate to the CEL
+	// runtimes that transformations have been performed on the expr during static
+	// analysis. In some cases, this will save the runtime the work of applying
+	// the same or similar transformations prior to evaluation.
+	ExprVersion string `protobuf:"bytes,6,opt,name=expr_version,json=exprVersion,proto3" json:"expr_version,omitempty"`
+	// The checked expression. Semantically equivalent to the parsed `expr`, but
+	// may have structural differences.
+	Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"`
+}
+
+func (x *CheckedExpr) Reset() {
+	*x = CheckedExpr{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *CheckedExpr) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CheckedExpr) ProtoMessage() {}
+
+func (x *CheckedExpr) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use CheckedExpr.ProtoReflect.Descriptor instead.
+func (*CheckedExpr) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CheckedExpr) GetReferenceMap() map[int64]*Reference {
+	if x != nil {
+		return x.ReferenceMap
+	}
+	return nil
+}
+
+func (x *CheckedExpr) GetTypeMap() map[int64]*Type {
+	if x != nil {
+		return x.TypeMap
+	}
+	return nil
+}
+
+func (x *CheckedExpr) GetSourceInfo() *SourceInfo {
+	if x != nil {
+		return x.SourceInfo
+	}
+	return nil
+}
+
+func (x *CheckedExpr) GetExprVersion() string {
+	if x != nil {
+		return x.ExprVersion
+	}
+	return ""
+}
+
+func (x *CheckedExpr) GetExpr() *Expr {
+	if x != nil {
+		return x.Expr
+	}
+	return nil
+}
+
+// Represents a CEL type.
+type Type struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The kind of type.
+	//
+	// Types that are assignable to TypeKind:
+	//
+	//	*Type_Dyn
+	//	*Type_Null
+	//	*Type_Primitive
+	//	*Type_Wrapper
+	//	*Type_WellKnown
+	//	*Type_ListType_
+	//	*Type_MapType_
+	//	*Type_Function
+	//	*Type_MessageType
+	//	*Type_TypeParam
+	//	*Type_Type
+	//	*Type_Error
+	//	*Type_AbstractType_
+	TypeKind isType_TypeKind `protobuf_oneof:"type_kind"`
+}
+
+func (x *Type) Reset() {
+	*x = Type{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Type) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type) ProtoMessage() {}
+
+func (x *Type) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type.ProtoReflect.Descriptor instead.
+func (*Type) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *Type) GetTypeKind() isType_TypeKind {
+	if m != nil {
+		return m.TypeKind
+	}
+	return nil
+}
+
+func (x *Type) GetDyn() *emptypb.Empty {
+	if x, ok := x.GetTypeKind().(*Type_Dyn); ok {
+		return x.Dyn
+	}
+	return nil
+}
+
+func (x *Type) GetNull() structpb.NullValue {
+	if x, ok := x.GetTypeKind().(*Type_Null); ok {
+		return x.Null
+	}
+	return structpb.NullValue_NULL_VALUE
+}
+
+func (x *Type) GetPrimitive() Type_PrimitiveType {
+	if x, ok := x.GetTypeKind().(*Type_Primitive); ok {
+		return x.Primitive
+	}
+	return Type_PRIMITIVE_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetWrapper() Type_PrimitiveType {
+	if x, ok := x.GetTypeKind().(*Type_Wrapper); ok {
+		return x.Wrapper
+	}
+	return Type_PRIMITIVE_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetWellKnown() Type_WellKnownType {
+	if x, ok := x.GetTypeKind().(*Type_WellKnown); ok {
+		return x.WellKnown
+	}
+	return Type_WELL_KNOWN_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetListType() *Type_ListType {
+	if x, ok := x.GetTypeKind().(*Type_ListType_); ok {
+		return x.ListType
+	}
+	return nil
+}
+
+func (x *Type) GetMapType() *Type_MapType {
+	if x, ok := x.GetTypeKind().(*Type_MapType_); ok {
+		return x.MapType
+	}
+	return nil
+}
+
+func (x *Type) GetFunction() *Type_FunctionType {
+	if x, ok := x.GetTypeKind().(*Type_Function); ok {
+		return x.Function
+	}
+	return nil
+}
+
+func (x *Type) GetMessageType() string {
+	if x, ok := x.GetTypeKind().(*Type_MessageType); ok {
+		return x.MessageType
+	}
+	return ""
+}
+
+func (x *Type) GetTypeParam() string {
+	if x, ok := x.GetTypeKind().(*Type_TypeParam); ok {
+		return x.TypeParam
+	}
+	return ""
+}
+
+func (x *Type) GetType() *Type {
+	if x, ok := x.GetTypeKind().(*Type_Type); ok {
+		return x.Type
+	}
+	return nil
+}
+
+func (x *Type) GetError() *emptypb.Empty {
+	if x, ok := x.GetTypeKind().(*Type_Error); ok {
+		return x.Error
+	}
+	return nil
+}
+
+func (x *Type) GetAbstractType() *Type_AbstractType {
+	if x, ok := x.GetTypeKind().(*Type_AbstractType_); ok {
+		return x.AbstractType
+	}
+	return nil
+}
+
+type isType_TypeKind interface {
+	isType_TypeKind()
+}
+
+type Type_Dyn struct {
+	// Dynamic type.
+	Dyn *emptypb.Empty `protobuf:"bytes,1,opt,name=dyn,proto3,oneof"`
+}
+
+type Type_Null struct {
+	// Null value.
+	Null structpb.NullValue `protobuf:"varint,2,opt,name=null,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Type_Primitive struct {
+	// Primitive types: `true`, `1u`, `-2.0`, `'string'`, `b'bytes'`.
+	Primitive Type_PrimitiveType `protobuf:"varint,3,opt,name=primitive,proto3,enum=google.api.expr.v1alpha1.Type_PrimitiveType,oneof"`
+}
+
+type Type_Wrapper struct {
+	// Wrapper of a primitive type, e.g. `google.protobuf.Int64Value`.
+	Wrapper Type_PrimitiveType `protobuf:"varint,4,opt,name=wrapper,proto3,enum=google.api.expr.v1alpha1.Type_PrimitiveType,oneof"`
+}
+
+type Type_WellKnown struct {
+	// Well-known protobuf type such as `google.protobuf.Timestamp`.
+	WellKnown Type_WellKnownType `protobuf:"varint,5,opt,name=well_known,json=wellKnown,proto3,enum=google.api.expr.v1alpha1.Type_WellKnownType,oneof"`
+}
+
+type Type_ListType_ struct {
+	// Parameterized list with elements of `list_type`, e.g. `list`.
+	ListType *Type_ListType `protobuf:"bytes,6,opt,name=list_type,json=listType,proto3,oneof"`
+}
+
+type Type_MapType_ struct {
+	// Parameterized map with typed keys and values.
+	MapType *Type_MapType `protobuf:"bytes,7,opt,name=map_type,json=mapType,proto3,oneof"`
+}
+
+type Type_Function struct {
+	// Function type.
+	Function *Type_FunctionType `protobuf:"bytes,8,opt,name=function,proto3,oneof"`
+}
+
+type Type_MessageType struct {
+	// Protocol buffer message type.
+	//
+	// The `message_type` string specifies the qualified message type name. For
+	// example, `google.plus.Profile`.
+	MessageType string `protobuf:"bytes,9,opt,name=message_type,json=messageType,proto3,oneof"`
+}
+
+type Type_TypeParam struct {
+	// Type param type.
+	//
+	// The `type_param` string specifies the type parameter name, e.g. `list`
+	// would be a `list_type` whose element type was a `type_param` type
+	// named `E`.
+	TypeParam string `protobuf:"bytes,10,opt,name=type_param,json=typeParam,proto3,oneof"`
+}
+
+type Type_Type struct {
+	// Type type.
+	//
+	// The `type` value specifies the target type. e.g. int is type with a
+	// target type of `Primitive.INT`.
+	Type *Type `protobuf:"bytes,11,opt,name=type,proto3,oneof"`
+}
+
+type Type_Error struct {
+	// Error type.
+	//
+	// During type-checking if an expression is an error, its type is propagated
+	// as the `ERROR` type. This permits the type-checker to discover other
+	// errors present in the expression.
+	Error *emptypb.Empty `protobuf:"bytes,12,opt,name=error,proto3,oneof"`
+}
+
+type Type_AbstractType_ struct {
+	// Abstract, application defined type.
+	AbstractType *Type_AbstractType `protobuf:"bytes,14,opt,name=abstract_type,json=abstractType,proto3,oneof"`
+}
+
+func (*Type_Dyn) isType_TypeKind() {}
+
+func (*Type_Null) isType_TypeKind() {}
+
+func (*Type_Primitive) isType_TypeKind() {}
+
+func (*Type_Wrapper) isType_TypeKind() {}
+
+func (*Type_WellKnown) isType_TypeKind() {}
+
+func (*Type_ListType_) isType_TypeKind() {}
+
+func (*Type_MapType_) isType_TypeKind() {}
+
+func (*Type_Function) isType_TypeKind() {}
+
+func (*Type_MessageType) isType_TypeKind() {}
+
+func (*Type_TypeParam) isType_TypeKind() {}
+
+func (*Type_Type) isType_TypeKind() {}
+
+func (*Type_Error) isType_TypeKind() {}
+
+func (*Type_AbstractType_) isType_TypeKind() {}
+
+// Represents a declaration of a named value or function.
+//
+// A declaration is part of the contract between the expression, the agent
+// evaluating that expression, and the caller requesting evaluation.
+type Decl struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The fully qualified name of the declaration.
+	//
+	// Declarations are organized in containers and this represents the full path
+	// to the declaration in its container, as in `google.api.expr.Decl`.
+	//
+	// Declarations used as
+	// [FunctionDecl.Overload][google.api.expr.v1alpha1.Decl.FunctionDecl.Overload]
+	// parameters may or may not have a name depending on whether the overload is
+	// function declaration or a function definition containing a result
+	// [Expr][google.api.expr.v1alpha1.Expr].
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// Required. The declaration kind.
+	//
+	// Types that are assignable to DeclKind:
+	//
+	//	*Decl_Ident
+	//	*Decl_Function
+	DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"`
+}
+
+func (x *Decl) Reset() {
+	*x = Decl{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Decl) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl) ProtoMessage() {}
+
+func (x *Decl) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl.ProtoReflect.Descriptor instead.
+func (*Decl) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Decl) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (m *Decl) GetDeclKind() isDecl_DeclKind {
+	if m != nil {
+		return m.DeclKind
+	}
+	return nil
+}
+
+func (x *Decl) GetIdent() *Decl_IdentDecl {
+	if x, ok := x.GetDeclKind().(*Decl_Ident); ok {
+		return x.Ident
+	}
+	return nil
+}
+
+func (x *Decl) GetFunction() *Decl_FunctionDecl {
+	if x, ok := x.GetDeclKind().(*Decl_Function); ok {
+		return x.Function
+	}
+	return nil
+}
+
+type isDecl_DeclKind interface {
+	isDecl_DeclKind()
+}
+
+type Decl_Ident struct {
+	// Identifier declaration.
+	Ident *Decl_IdentDecl `protobuf:"bytes,2,opt,name=ident,proto3,oneof"`
+}
+
+type Decl_Function struct {
+	// Function declaration.
+	Function *Decl_FunctionDecl `protobuf:"bytes,3,opt,name=function,proto3,oneof"`
+}
+
+func (*Decl_Ident) isDecl_DeclKind() {}
+
+func (*Decl_Function) isDecl_DeclKind() {}
+
+// Describes a resolved reference to a declaration.
+type Reference struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The fully qualified name of the declaration.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// For references to functions, this is a list of `Overload.overload_id`
+	// values which match according to typing rules.
+	//
+	// If the list has more than one element, overload resolution among the
+	// presented candidates must happen at runtime because of dynamic types. The
+	// type checker attempts to narrow down this list as much as possible.
+	//
+	// Empty if this is not a reference to a
+	// [Decl.FunctionDecl][google.api.expr.v1alpha1.Decl.FunctionDecl].
+	OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"`
+	// For references to constants, this may contain the value of the
+	// constant if known at compile time.
+	Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *Reference) Reset() {
+	*x = Reference{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Reference) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Reference) ProtoMessage() {}
+
+func (x *Reference) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Reference.ProtoReflect.Descriptor instead.
+func (*Reference) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Reference) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (x *Reference) GetOverloadId() []string {
+	if x != nil {
+		return x.OverloadId
+	}
+	return nil
+}
+
+func (x *Reference) GetValue() *Constant {
+	if x != nil {
+		return x.Value
+	}
+	return nil
+}
+
+// List type with typed elements, e.g. `list`.
+type Type_ListType struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The element type.
+	ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"`
+}
+
+func (x *Type_ListType) Reset() {
+	*x = Type_ListType{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[6]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Type_ListType) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_ListType) ProtoMessage() {}
+
+func (x *Type_ListType) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[6]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_ListType.ProtoReflect.Descriptor instead.
+func (*Type_ListType) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *Type_ListType) GetElemType() *Type {
+	if x != nil {
+		return x.ElemType
+	}
+	return nil
+}
+
+// Map type with parameterized key and value types, e.g. `map`.
+type Type_MapType struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The type of the key.
+	KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"`
+	// The type of the value.
+	ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"`
+}
+
+func (x *Type_MapType) Reset() {
+	*x = Type_MapType{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[7]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Type_MapType) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_MapType) ProtoMessage() {}
+
+func (x *Type_MapType) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[7]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_MapType.ProtoReflect.Descriptor instead.
+func (*Type_MapType) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *Type_MapType) GetKeyType() *Type {
+	if x != nil {
+		return x.KeyType
+	}
+	return nil
+}
+
+func (x *Type_MapType) GetValueType() *Type {
+	if x != nil {
+		return x.ValueType
+	}
+	return nil
+}
+
+// Function type with result and arg types.
+type Type_FunctionType struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Result type of the function.
+	ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"`
+	// Argument types of the function.
+	ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"`
+}
+
+func (x *Type_FunctionType) Reset() {
+	*x = Type_FunctionType{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[8]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Type_FunctionType) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_FunctionType) ProtoMessage() {}
+
+func (x *Type_FunctionType) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[8]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_FunctionType.ProtoReflect.Descriptor instead.
+func (*Type_FunctionType) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *Type_FunctionType) GetResultType() *Type {
+	if x != nil {
+		return x.ResultType
+	}
+	return nil
+}
+
+func (x *Type_FunctionType) GetArgTypes() []*Type {
+	if x != nil {
+		return x.ArgTypes
+	}
+	return nil
+}
+
+// Application defined abstract type.
+type Type_AbstractType struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The fully qualified name of this abstract type.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// Parameter types for this abstract type.
+	ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"`
+}
+
+func (x *Type_AbstractType) Reset() {
+	*x = Type_AbstractType{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[9]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Type_AbstractType) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_AbstractType) ProtoMessage() {}
+
+func (x *Type_AbstractType) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[9]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_AbstractType.ProtoReflect.Descriptor instead.
+func (*Type_AbstractType) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 3}
+}
+
+func (x *Type_AbstractType) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (x *Type_AbstractType) GetParameterTypes() []*Type {
+	if x != nil {
+		return x.ParameterTypes
+	}
+	return nil
+}
+
+// Identifier declaration which specifies its type and optional `Expr` value.
+//
+// An identifier without a value is a declaration that must be provided at
+// evaluation time. An identifier with a value should resolve to a constant,
+// but may be used in conjunction with other identifiers bound at evaluation
+// time.
+type Decl_IdentDecl struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Required. The type of the identifier.
+	Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+	// The constant value of the identifier. If not specified, the identifier
+	// must be supplied at evaluation time.
+	Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	// Documentation string for the identifier.
+	Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"`
+}
+
+func (x *Decl_IdentDecl) Reset() {
+	*x = Decl_IdentDecl{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[10]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Decl_IdentDecl) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_IdentDecl) ProtoMessage() {}
+
+func (x *Decl_IdentDecl) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[10]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_IdentDecl.ProtoReflect.Descriptor instead.
+func (*Decl_IdentDecl) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *Decl_IdentDecl) GetType() *Type {
+	if x != nil {
+		return x.Type
+	}
+	return nil
+}
+
+func (x *Decl_IdentDecl) GetValue() *Constant {
+	if x != nil {
+		return x.Value
+	}
+	return nil
+}
+
+func (x *Decl_IdentDecl) GetDoc() string {
+	if x != nil {
+		return x.Doc
+	}
+	return ""
+}
+
+// Function declaration specifies one or more overloads which indicate the
+// function's parameter types and return type.
+//
+// Functions have no observable side-effects (there may be side-effects like
+// logging which are not observable from CEL).
+type Decl_FunctionDecl struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Required. List of function overloads, must contain at least one overload.
+	Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"`
+}
+
+func (x *Decl_FunctionDecl) Reset() {
+	*x = Decl_FunctionDecl{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[11]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Decl_FunctionDecl) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_FunctionDecl) ProtoMessage() {}
+
+func (x *Decl_FunctionDecl) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[11]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_FunctionDecl.ProtoReflect.Descriptor instead.
+func (*Decl_FunctionDecl) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2, 1}
+}
+
+func (x *Decl_FunctionDecl) GetOverloads() []*Decl_FunctionDecl_Overload {
+	if x != nil {
+		return x.Overloads
+	}
+	return nil
+}
+
+// An overload indicates a function's parameter types and return type, and
+// may optionally include a function body described in terms of
+// [Expr][google.api.expr.v1alpha1.Expr] values.
+//
+// Functions overloads are declared in either a function or method
+// call-style. For methods, the `params[0]` is the expected type of the
+// target receiver.
+//
+// Overloads must have non-overlapping argument types after erasure of all
+// parameterized type variables (similar as type erasure in Java).
+type Decl_FunctionDecl_Overload struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Required. Globally unique overload name of the function which reflects
+	// the function name and argument types.
+	//
+	// This will be used by a [Reference][google.api.expr.v1alpha1.Reference]
+	// to indicate the `overload_id` that was resolved for the function
+	// `name`.
+	OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"`
+	// List of function parameter [Type][google.api.expr.v1alpha1.Type]
+	// values.
+	//
+	// Param types are disjoint after generic type parameters have been
+	// replaced with the type `DYN`. Since the `DYN` type is compatible with
+	// any other type, this means that if `A` is a type parameter, the
+	// function types `int` and `int` are not disjoint. Likewise,
+	// `map` is not disjoint from `map`.
+	//
+	// When the `result_type` of a function is a generic type param, the
+	// type param name also appears as the `type` of on at least one params.
+	Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"`
+	// The type param names associated with the function declaration.
+	//
+	// For example, `function ex(K key, map map) : V` would yield
+	// the type params of `K, V`.
+	TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"`
+	// Required. The result type of the function. For example, the operator
+	// `string.isEmpty()` would have `result_type` of `kind: BOOL`.
+	ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"`
+	// Whether the function is to be used in a method call-style `x.f(...)`
+	// or a function call-style `f(x, ...)`.
+	//
+	// For methods, the first parameter declaration, `params[0]` is the
+	// expected type of the target receiver.
+	IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"`
+	// Documentation string for the overload.
+	Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"`
+}
+
+func (x *Decl_FunctionDecl_Overload) Reset() {
+	*x = Decl_FunctionDecl_Overload{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[12]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Decl_FunctionDecl_Overload) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_FunctionDecl_Overload) ProtoMessage() {}
+
+func (x *Decl_FunctionDecl_Overload) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[12]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_FunctionDecl_Overload.ProtoReflect.Descriptor instead.
+func (*Decl_FunctionDecl_Overload) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2, 1, 0}
+}
+
+func (x *Decl_FunctionDecl_Overload) GetOverloadId() string {
+	if x != nil {
+		return x.OverloadId
+	}
+	return ""
+}
+
+func (x *Decl_FunctionDecl_Overload) GetParams() []*Type {
+	if x != nil {
+		return x.Params
+	}
+	return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetTypeParams() []string {
+	if x != nil {
+		return x.TypeParams
+	}
+	return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetResultType() *Type {
+	if x != nil {
+		return x.ResultType
+	}
+	return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetIsInstanceFunction() bool {
+	if x != nil {
+		return x.IsInstanceFunction
+	}
+	return false
+}
+
+func (x *Decl_FunctionDecl_Overload) GetDoc() string {
+	if x != nil {
+		return x.Doc
+	}
+	return ""
+}
+
+var File_google_api_expr_v1alpha1_checked_proto protoreflect.FileDescriptor
+
+var file_google_api_expr_v1alpha1_checked_proto_rawDesc = []byte{
+	0x0a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70,
+	0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b,
+	0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
+	0x61, 0x31, 0x1a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65,
+	0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x79, 0x6e,
+	0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9a, 0x04, 0x0a, 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64,
+	0x45, 0x78, 0x70, 0x72, 0x12, 0x5c, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63,
+	0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31,
+	0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78,
+	0x70, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45,
+	0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d,
+	0x61, 0x70, 0x12, 0x4d, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+	0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
+	0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65,
+	0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x4d, 0x61,
+	0x70, 0x12, 0x45, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f,
+	0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
+	0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, 0x6f,
+	0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72,
+	0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+	0x65, 0x78, 0x70, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x04, 0x65,
+	0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c,
+	0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x1a,
+	0x64, 0x0a, 0x11, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45,
+	0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+	0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
+	0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x5a, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x61, 0x70,
+	0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
+	0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+	0x01, 0x22, 0xc8, 0x0b, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x64, 0x79,
+	0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48,
+	0x00, 0x52, 0x03, 0x64, 0x79, 0x6e, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x18, 0x02,
+	0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
+	0x48, 0x00, 0x52, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x12, 0x4c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6d,
+	0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31,
+	0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d,
+	0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x69,
+	0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x48, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
+	0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
+	0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76,
+	0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72,
+	0x12, 0x4d, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x05,
+	0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+	0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
+	0x54, 0x79, 0x70, 0x65, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79,
+	0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x12,
+	0x46, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+	0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79,
+	0x70, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x6c,
+	0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x5f, 0x74,
+	0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c,
+	0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70,
+	0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x49, 0x0a, 0x08,
+	0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72,
+	0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46,
+	0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x66,
+	0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61,
+	0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52,
+	0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0a,
+	0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09,
+	0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x34, 0x0a,
+	0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31,
+	0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74,
+	0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72,
+	0x72, 0x6f, 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f,
+	0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61,
+	0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x62, 0x73, 0x74, 0x72,
+	0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x62, 0x73, 0x74, 0x72,
+	0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x47, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x54,
+	0x79, 0x70, 0x65, 0x12, 0x3b, 0x0a, 0x09, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
+	0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65,
+	0x1a, 0x83, 0x01, 0x0a, 0x07, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x08,
+	0x6b, 0x65, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72,
+	0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07,
+	0x6b, 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31,
+	0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c,
+	0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x8c, 0x01, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74,
+	0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c,
+	0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76,
+	0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65,
+	0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3b, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x5f,
+	0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31,
+	0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x61, 0x72, 0x67,
+	0x54, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x6b, 0x0a, 0x0c, 0x41, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63,
+	0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0f, 0x70, 0x61, 0x72,
+	0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03,
+	0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+	0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79,
+	0x70, 0x65, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70,
+	0x65, 0x73, 0x22, 0x73, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x54,
+	0x79, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x52, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x56, 0x45,
+	0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+	0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a,
+	0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54,
+	0x36, 0x34, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x04,
+	0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05,
+	0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x06, 0x22, 0x56, 0x0a, 0x0d, 0x57, 0x65, 0x6c, 0x6c, 0x4b,
+	0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x57, 0x45, 0x4c, 0x4c,
+	0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50,
+	0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59,
+	0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10,
+	0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x42,
+	0x0b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xb3, 0x05, 0x0a,
+	0x04, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x05, 0x69, 0x64, 0x65,
+	0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
+	0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65,
+	0x63, 0x6c, 0x48, 0x00, 0x52, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x08, 0x66,
+	0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+	0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75,
+	0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x66, 0x75,
+	0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x8b, 0x01, 0x0a, 0x09, 0x49, 0x64, 0x65, 0x6e, 0x74,
+	0x44, 0x65, 0x63, 0x6c, 0x12, 0x32, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+	0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79,
+	0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
+	0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c,
+	0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x03, 0x64, 0x6f, 0x63, 0x1a, 0xee, 0x02, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f,
+	0x6e, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x52, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61,
+	0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
+	0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f,
+	0x6e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09,
+	0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x1a, 0x89, 0x02, 0x0a, 0x08, 0x4f, 0x76,
+	0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f,
+	0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65,
+	0x72, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d,
+	0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
+	0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12,
+	0x1f, 0x0a, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03,
+	0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73,
+	0x12, 0x3f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18,
+	0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+	0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
+	0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70,
+	0x65, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
+	0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52,
+	0x12, 0x69, 0x73, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x75, 0x6e, 0x63, 0x74,
+	0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x03, 0x64, 0x6f, 0x63, 0x42, 0x0b, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6c, 0x5f, 0x6b, 0x69,
+	0x6e, 0x64, 0x22, 0x7a, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12,
+	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+	0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f,
+	0x69, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f,
+	0x61, 0x64, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+	0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43,
+	0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x6c,
+	0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+	0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x09,
+	0x44, 0x65, 0x63, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67,
+	0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+	0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c,
+	0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_api_expr_v1alpha1_checked_proto_rawDescOnce sync.Once
+	file_google_api_expr_v1alpha1_checked_proto_rawDescData = file_google_api_expr_v1alpha1_checked_proto_rawDesc
+)
+
+func file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP() []byte {
+	file_google_api_expr_v1alpha1_checked_proto_rawDescOnce.Do(func() {
+		file_google_api_expr_v1alpha1_checked_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_expr_v1alpha1_checked_proto_rawDescData)
+	})
+	return file_google_api_expr_v1alpha1_checked_proto_rawDescData
+}
+
+var file_google_api_expr_v1alpha1_checked_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_google_api_expr_v1alpha1_checked_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
+var file_google_api_expr_v1alpha1_checked_proto_goTypes = []interface{}{
+	(Type_PrimitiveType)(0),            // 0: google.api.expr.v1alpha1.Type.PrimitiveType
+	(Type_WellKnownType)(0),            // 1: google.api.expr.v1alpha1.Type.WellKnownType
+	(*CheckedExpr)(nil),                // 2: google.api.expr.v1alpha1.CheckedExpr
+	(*Type)(nil),                       // 3: google.api.expr.v1alpha1.Type
+	(*Decl)(nil),                       // 4: google.api.expr.v1alpha1.Decl
+	(*Reference)(nil),                  // 5: google.api.expr.v1alpha1.Reference
+	nil,                                // 6: google.api.expr.v1alpha1.CheckedExpr.ReferenceMapEntry
+	nil,                                // 7: google.api.expr.v1alpha1.CheckedExpr.TypeMapEntry
+	(*Type_ListType)(nil),              // 8: google.api.expr.v1alpha1.Type.ListType
+	(*Type_MapType)(nil),               // 9: google.api.expr.v1alpha1.Type.MapType
+	(*Type_FunctionType)(nil),          // 10: google.api.expr.v1alpha1.Type.FunctionType
+	(*Type_AbstractType)(nil),          // 11: google.api.expr.v1alpha1.Type.AbstractType
+	(*Decl_IdentDecl)(nil),             // 12: google.api.expr.v1alpha1.Decl.IdentDecl
+	(*Decl_FunctionDecl)(nil),          // 13: google.api.expr.v1alpha1.Decl.FunctionDecl
+	(*Decl_FunctionDecl_Overload)(nil), // 14: google.api.expr.v1alpha1.Decl.FunctionDecl.Overload
+	(*SourceInfo)(nil),                 // 15: google.api.expr.v1alpha1.SourceInfo
+	(*Expr)(nil),                       // 16: google.api.expr.v1alpha1.Expr
+	(*emptypb.Empty)(nil),              // 17: google.protobuf.Empty
+	(structpb.NullValue)(0),            // 18: google.protobuf.NullValue
+	(*Constant)(nil),                   // 19: google.api.expr.v1alpha1.Constant
+}
+var file_google_api_expr_v1alpha1_checked_proto_depIdxs = []int32{
+	6,  // 0: google.api.expr.v1alpha1.CheckedExpr.reference_map:type_name -> google.api.expr.v1alpha1.CheckedExpr.ReferenceMapEntry
+	7,  // 1: google.api.expr.v1alpha1.CheckedExpr.type_map:type_name -> google.api.expr.v1alpha1.CheckedExpr.TypeMapEntry
+	15, // 2: google.api.expr.v1alpha1.CheckedExpr.source_info:type_name -> google.api.expr.v1alpha1.SourceInfo
+	16, // 3: google.api.expr.v1alpha1.CheckedExpr.expr:type_name -> google.api.expr.v1alpha1.Expr
+	17, // 4: google.api.expr.v1alpha1.Type.dyn:type_name -> google.protobuf.Empty
+	18, // 5: google.api.expr.v1alpha1.Type.null:type_name -> google.protobuf.NullValue
+	0,  // 6: google.api.expr.v1alpha1.Type.primitive:type_name -> google.api.expr.v1alpha1.Type.PrimitiveType
+	0,  // 7: google.api.expr.v1alpha1.Type.wrapper:type_name -> google.api.expr.v1alpha1.Type.PrimitiveType
+	1,  // 8: google.api.expr.v1alpha1.Type.well_known:type_name -> google.api.expr.v1alpha1.Type.WellKnownType
+	8,  // 9: google.api.expr.v1alpha1.Type.list_type:type_name -> google.api.expr.v1alpha1.Type.ListType
+	9,  // 10: google.api.expr.v1alpha1.Type.map_type:type_name -> google.api.expr.v1alpha1.Type.MapType
+	10, // 11: google.api.expr.v1alpha1.Type.function:type_name -> google.api.expr.v1alpha1.Type.FunctionType
+	3,  // 12: google.api.expr.v1alpha1.Type.type:type_name -> google.api.expr.v1alpha1.Type
+	17, // 13: google.api.expr.v1alpha1.Type.error:type_name -> google.protobuf.Empty
+	11, // 14: google.api.expr.v1alpha1.Type.abstract_type:type_name -> google.api.expr.v1alpha1.Type.AbstractType
+	12, // 15: google.api.expr.v1alpha1.Decl.ident:type_name -> google.api.expr.v1alpha1.Decl.IdentDecl
+	13, // 16: google.api.expr.v1alpha1.Decl.function:type_name -> google.api.expr.v1alpha1.Decl.FunctionDecl
+	19, // 17: google.api.expr.v1alpha1.Reference.value:type_name -> google.api.expr.v1alpha1.Constant
+	5,  // 18: google.api.expr.v1alpha1.CheckedExpr.ReferenceMapEntry.value:type_name -> google.api.expr.v1alpha1.Reference
+	3,  // 19: google.api.expr.v1alpha1.CheckedExpr.TypeMapEntry.value:type_name -> google.api.expr.v1alpha1.Type
+	3,  // 20: google.api.expr.v1alpha1.Type.ListType.elem_type:type_name -> google.api.expr.v1alpha1.Type
+	3,  // 21: google.api.expr.v1alpha1.Type.MapType.key_type:type_name -> google.api.expr.v1alpha1.Type
+	3,  // 22: google.api.expr.v1alpha1.Type.MapType.value_type:type_name -> google.api.expr.v1alpha1.Type
+	3,  // 23: google.api.expr.v1alpha1.Type.FunctionType.result_type:type_name -> google.api.expr.v1alpha1.Type
+	3,  // 24: google.api.expr.v1alpha1.Type.FunctionType.arg_types:type_name -> google.api.expr.v1alpha1.Type
+	3,  // 25: google.api.expr.v1alpha1.Type.AbstractType.parameter_types:type_name -> google.api.expr.v1alpha1.Type
+	3,  // 26: google.api.expr.v1alpha1.Decl.IdentDecl.type:type_name -> google.api.expr.v1alpha1.Type
+	19, // 27: google.api.expr.v1alpha1.Decl.IdentDecl.value:type_name -> google.api.expr.v1alpha1.Constant
+	14, // 28: google.api.expr.v1alpha1.Decl.FunctionDecl.overloads:type_name -> google.api.expr.v1alpha1.Decl.FunctionDecl.Overload
+	3,  // 29: google.api.expr.v1alpha1.Decl.FunctionDecl.Overload.params:type_name -> google.api.expr.v1alpha1.Type
+	3,  // 30: google.api.expr.v1alpha1.Decl.FunctionDecl.Overload.result_type:type_name -> google.api.expr.v1alpha1.Type
+	31, // [31:31] is the sub-list for method output_type
+	31, // [31:31] is the sub-list for method input_type
+	31, // [31:31] is the sub-list for extension type_name
+	31, // [31:31] is the sub-list for extension extendee
+	0,  // [0:31] is the sub-list for field type_name
+}
+
+func init() { file_google_api_expr_v1alpha1_checked_proto_init() }
+func file_google_api_expr_v1alpha1_checked_proto_init() {
+	if File_google_api_expr_v1alpha1_checked_proto != nil {
+		return
+	}
+	file_google_api_expr_v1alpha1_syntax_proto_init()
+	if !protoimpl.UnsafeEnabled {
+		file_google_api_expr_v1alpha1_checked_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*CheckedExpr); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_checked_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Type); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_checked_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Decl); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_checked_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Reference); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_checked_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Type_ListType); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_checked_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Type_MapType); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_checked_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Type_FunctionType); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_checked_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Type_AbstractType); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_checked_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Decl_IdentDecl); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_checked_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Decl_FunctionDecl); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_checked_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Decl_FunctionDecl_Overload); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	file_google_api_expr_v1alpha1_checked_proto_msgTypes[1].OneofWrappers = []interface{}{
+		(*Type_Dyn)(nil),
+		(*Type_Null)(nil),
+		(*Type_Primitive)(nil),
+		(*Type_Wrapper)(nil),
+		(*Type_WellKnown)(nil),
+		(*Type_ListType_)(nil),
+		(*Type_MapType_)(nil),
+		(*Type_Function)(nil),
+		(*Type_MessageType)(nil),
+		(*Type_TypeParam)(nil),
+		(*Type_Type)(nil),
+		(*Type_Error)(nil),
+		(*Type_AbstractType_)(nil),
+	}
+	file_google_api_expr_v1alpha1_checked_proto_msgTypes[2].OneofWrappers = []interface{}{
+		(*Decl_Ident)(nil),
+		(*Decl_Function)(nil),
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_api_expr_v1alpha1_checked_proto_rawDesc,
+			NumEnums:      2,
+			NumMessages:   13,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_api_expr_v1alpha1_checked_proto_goTypes,
+		DependencyIndexes: file_google_api_expr_v1alpha1_checked_proto_depIdxs,
+		EnumInfos:         file_google_api_expr_v1alpha1_checked_proto_enumTypes,
+		MessageInfos:      file_google_api_expr_v1alpha1_checked_proto_msgTypes,
+	}.Build()
+	File_google_api_expr_v1alpha1_checked_proto = out.File
+	file_google_api_expr_v1alpha1_checked_proto_rawDesc = nil
+	file_google_api_expr_v1alpha1_checked_proto_goTypes = nil
+	file_google_api_expr_v1alpha1_checked_proto_depIdxs = nil
+}
diff --git a/hack/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go b/hack/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go
new file mode 100644
index 0000000000..0a2ffb5955
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go
@@ -0,0 +1,580 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v4.24.4
+// source: google/api/expr/v1alpha1/eval.proto
+
+package expr
+
+import (
+	reflect "reflect"
+	sync "sync"
+
+	status "google.golang.org/genproto/googleapis/rpc/status"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The state of an evaluation.
+//
+// Can represent an inital, partial, or completed state of evaluation.
+type EvalState struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The unique values referenced in this message.
+	Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+	// An ordered list of results.
+	//
+	// Tracks the flow of evaluation through the expression.
+	// May be sparse.
+	Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"`
+}
+
+func (x *EvalState) Reset() {
+	*x = EvalState{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *EvalState) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvalState) ProtoMessage() {}
+
+func (x *EvalState) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvalState.ProtoReflect.Descriptor instead.
+func (*EvalState) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *EvalState) GetValues() []*ExprValue {
+	if x != nil {
+		return x.Values
+	}
+	return nil
+}
+
+func (x *EvalState) GetResults() []*EvalState_Result {
+	if x != nil {
+		return x.Results
+	}
+	return nil
+}
+
+// The value of an evaluated expression.
+type ExprValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// An expression can resolve to a value, error or unknown.
+	//
+	// Types that are assignable to Kind:
+	//
+	//	*ExprValue_Value
+	//	*ExprValue_Error
+	//	*ExprValue_Unknown
+	Kind isExprValue_Kind `protobuf_oneof:"kind"`
+}
+
+func (x *ExprValue) Reset() {
+	*x = ExprValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ExprValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExprValue) ProtoMessage() {}
+
+func (x *ExprValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExprValue.ProtoReflect.Descriptor instead.
+func (*ExprValue) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *ExprValue) GetKind() isExprValue_Kind {
+	if m != nil {
+		return m.Kind
+	}
+	return nil
+}
+
+func (x *ExprValue) GetValue() *Value {
+	if x, ok := x.GetKind().(*ExprValue_Value); ok {
+		return x.Value
+	}
+	return nil
+}
+
+func (x *ExprValue) GetError() *ErrorSet {
+	if x, ok := x.GetKind().(*ExprValue_Error); ok {
+		return x.Error
+	}
+	return nil
+}
+
+func (x *ExprValue) GetUnknown() *UnknownSet {
+	if x, ok := x.GetKind().(*ExprValue_Unknown); ok {
+		return x.Unknown
+	}
+	return nil
+}
+
+type isExprValue_Kind interface {
+	isExprValue_Kind()
+}
+
+type ExprValue_Value struct {
+	// A concrete value.
+	Value *Value `protobuf:"bytes,1,opt,name=value,proto3,oneof"`
+}
+
+type ExprValue_Error struct {
+	// The set of errors in the critical path of evalution.
+	//
+	// Only errors in the critical path are included. For example,
+	// `( || true) && ` will only result in ``,
+	// while ` || ` will result in both `` and
+	// ``.
+	//
+	// Errors cause by the presence of other errors are not included in the
+	// set. For example `.foo`, `foo()`, and ` + 1` will
+	// only result in ``.
+	//
+	// Multiple errors *might* be included when evaluation could result
+	// in different errors. For example ` + ` and
+	// `foo(, )` may result in ``, `` or both.
+	// The exact subset of errors included for this case is unspecified and
+	// depends on the implementation details of the evaluator.
+	Error *ErrorSet `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
+}
+
+type ExprValue_Unknown struct {
+	// The set of unknowns in the critical path of evaluation.
+	//
+	// Unknown behaves identically to Error with regards to propagation.
+	// Specifically, only unknowns in the critical path are included, unknowns
+	// caused by the presence of other unknowns are not included, and multiple
+	// unknowns *might* be included included when evaluation could result in
+	// different unknowns. For example:
+	//
+	//	( || true) &&  -> 
+	//	 ||  -> 
+	//	.foo -> 
+	//	foo() -> 
+	//	 +  ->  or 
+	//
+	// Unknown takes precidence over Error in cases where a `Value` can short
+	// circuit the result:
+	//
+	//	 ||  -> 
+	//	 &&  -> 
+	//
+	// Errors take precidence in all other cases:
+	//
+	//	 +  -> 
+	//	foo(, ) -> 
+	Unknown *UnknownSet `protobuf:"bytes,3,opt,name=unknown,proto3,oneof"`
+}
+
+func (*ExprValue_Value) isExprValue_Kind() {}
+
+func (*ExprValue_Error) isExprValue_Kind() {}
+
+func (*ExprValue_Unknown) isExprValue_Kind() {}
+
+// A set of errors.
+//
+// The errors included depend on the context. See `ExprValue.error`.
+type ErrorSet struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The errors in the set.
+	Errors []*status.Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"`
+}
+
+func (x *ErrorSet) Reset() {
+	*x = ErrorSet{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ErrorSet) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ErrorSet) ProtoMessage() {}
+
+func (x *ErrorSet) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ErrorSet.ProtoReflect.Descriptor instead.
+func (*ErrorSet) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ErrorSet) GetErrors() []*status.Status {
+	if x != nil {
+		return x.Errors
+	}
+	return nil
+}
+
+// A set of expressions for which the value is unknown.
+//
+// The unknowns included depend on the context. See `ExprValue.unknown`.
+type UnknownSet struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The ids of the expressions with unknown values.
+	Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"`
+}
+
+func (x *UnknownSet) Reset() {
+	*x = UnknownSet{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *UnknownSet) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UnknownSet) ProtoMessage() {}
+
+func (x *UnknownSet) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead.
+func (*UnknownSet) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *UnknownSet) GetExprs() []int64 {
+	if x != nil {
+		return x.Exprs
+	}
+	return nil
+}
+
+// A single evalution result.
+type EvalState_Result struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The id of the expression this result if for.
+	Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"`
+	// The index in `values` of the resulting value.
+	Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *EvalState_Result) Reset() {
+	*x = EvalState_Result{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *EvalState_Result) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvalState_Result) ProtoMessage() {}
+
+func (x *EvalState_Result) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvalState_Result.ProtoReflect.Descriptor instead.
+func (*EvalState_Result) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *EvalState_Result) GetExpr() int64 {
+	if x != nil {
+		return x.Expr
+	}
+	return 0
+}
+
+func (x *EvalState_Result) GetValue() int64 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+var File_google_api_expr_v1alpha1_eval_proto protoreflect.FileDescriptor
+
+var file_google_api_expr_v1alpha1_eval_proto_rawDesc = []byte{
+	0x0a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70,
+	0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+	0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x1a,
+	0x24, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72,
+	0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70,
+	0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc2,
+	0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x06,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76,
+	0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x07, 0x72, 0x65, 0x73,
+	0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61,
+	0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e,
+	0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a,
+	0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x78, 0x70,
+	0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, 0x14, 0x0a,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61,
+	0x6c, 0x75, 0x65, 0x22, 0xca, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x12, 0x37, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+	0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78,
+	0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x05, 0x65, 0x72,
+	0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c,
+	0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52,
+	0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x40, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77,
+	0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
+	0x61, 0x31, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52,
+	0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64,
+	0x22, 0x36, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x06,
+	0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+	0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e,
+	0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18,
+	0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x42, 0x6c, 0x0a, 0x1c,
+	0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65,
+	0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x09, 0x45, 0x76,
+	0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+	0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
+	0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x33,
+}
+
+var (
+	file_google_api_expr_v1alpha1_eval_proto_rawDescOnce sync.Once
+	file_google_api_expr_v1alpha1_eval_proto_rawDescData = file_google_api_expr_v1alpha1_eval_proto_rawDesc
+)
+
+func file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP() []byte {
+	file_google_api_expr_v1alpha1_eval_proto_rawDescOnce.Do(func() {
+		file_google_api_expr_v1alpha1_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_expr_v1alpha1_eval_proto_rawDescData)
+	})
+	return file_google_api_expr_v1alpha1_eval_proto_rawDescData
+}
+
+var file_google_api_expr_v1alpha1_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_google_api_expr_v1alpha1_eval_proto_goTypes = []interface{}{
+	(*EvalState)(nil),        // 0: google.api.expr.v1alpha1.EvalState
+	(*ExprValue)(nil),        // 1: google.api.expr.v1alpha1.ExprValue
+	(*ErrorSet)(nil),         // 2: google.api.expr.v1alpha1.ErrorSet
+	(*UnknownSet)(nil),       // 3: google.api.expr.v1alpha1.UnknownSet
+	(*EvalState_Result)(nil), // 4: google.api.expr.v1alpha1.EvalState.Result
+	(*Value)(nil),            // 5: google.api.expr.v1alpha1.Value
+	(*status.Status)(nil),    // 6: google.rpc.Status
+}
+var file_google_api_expr_v1alpha1_eval_proto_depIdxs = []int32{
+	1, // 0: google.api.expr.v1alpha1.EvalState.values:type_name -> google.api.expr.v1alpha1.ExprValue
+	4, // 1: google.api.expr.v1alpha1.EvalState.results:type_name -> google.api.expr.v1alpha1.EvalState.Result
+	5, // 2: google.api.expr.v1alpha1.ExprValue.value:type_name -> google.api.expr.v1alpha1.Value
+	2, // 3: google.api.expr.v1alpha1.ExprValue.error:type_name -> google.api.expr.v1alpha1.ErrorSet
+	3, // 4: google.api.expr.v1alpha1.ExprValue.unknown:type_name -> google.api.expr.v1alpha1.UnknownSet
+	6, // 5: google.api.expr.v1alpha1.ErrorSet.errors:type_name -> google.rpc.Status
+	6, // [6:6] is the sub-list for method output_type
+	6, // [6:6] is the sub-list for method input_type
+	6, // [6:6] is the sub-list for extension type_name
+	6, // [6:6] is the sub-list for extension extendee
+	0, // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_google_api_expr_v1alpha1_eval_proto_init() }
+func file_google_api_expr_v1alpha1_eval_proto_init() {
+	if File_google_api_expr_v1alpha1_eval_proto != nil {
+		return
+	}
+	file_google_api_expr_v1alpha1_value_proto_init()
+	if !protoimpl.UnsafeEnabled {
+		file_google_api_expr_v1alpha1_eval_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*EvalState); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_eval_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ExprValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_eval_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ErrorSet); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_eval_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*UnknownSet); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_eval_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*EvalState_Result); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	file_google_api_expr_v1alpha1_eval_proto_msgTypes[1].OneofWrappers = []interface{}{
+		(*ExprValue_Value)(nil),
+		(*ExprValue_Error)(nil),
+		(*ExprValue_Unknown)(nil),
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_api_expr_v1alpha1_eval_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   5,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_api_expr_v1alpha1_eval_proto_goTypes,
+		DependencyIndexes: file_google_api_expr_v1alpha1_eval_proto_depIdxs,
+		MessageInfos:      file_google_api_expr_v1alpha1_eval_proto_msgTypes,
+	}.Build()
+	File_google_api_expr_v1alpha1_eval_proto = out.File
+	file_google_api_expr_v1alpha1_eval_proto_rawDesc = nil
+	file_google_api_expr_v1alpha1_eval_proto_goTypes = nil
+	file_google_api_expr_v1alpha1_eval_proto_depIdxs = nil
+}
diff --git a/hack/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go b/hack/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go
new file mode 100644
index 0000000000..57aaa2c9f5
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go
@@ -0,0 +1,275 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v4.24.4
+// source: google/api/expr/v1alpha1/explain.proto
+
+package expr
+
+import (
+	reflect "reflect"
+	sync "sync"
+
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Values of intermediate expressions produced when evaluating expression.
+// Deprecated, use `EvalState` instead.
+//
+// Deprecated: Do not use.
+type Explain struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// All of the observed values.
+	//
+	// The field value_index is an index in the values list.
+	// Separating values from steps is needed to remove redundant values.
+	Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+	// List of steps.
+	//
+	// Repeated evaluations of the same expression generate new ExprStep
+	// instances. The order of such ExprStep instances matches the order of
+	// elements returned by Comprehension.iter_range.
+	ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"`
+}
+
+func (x *Explain) Reset() {
+	*x = Explain{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_explain_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Explain) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Explain) ProtoMessage() {}
+
+func (x *Explain) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_explain_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Explain.ProtoReflect.Descriptor instead.
+func (*Explain) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_explain_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Explain) GetValues() []*Value {
+	if x != nil {
+		return x.Values
+	}
+	return nil
+}
+
+func (x *Explain) GetExprSteps() []*Explain_ExprStep {
+	if x != nil {
+		return x.ExprSteps
+	}
+	return nil
+}
+
+// ID and value index of one step.
+type Explain_ExprStep struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// ID of corresponding Expr node.
+	Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+	// Index of the value in the values list.
+	ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"`
+}
+
+func (x *Explain_ExprStep) Reset() {
+	*x = Explain_ExprStep{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_explain_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Explain_ExprStep) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Explain_ExprStep) ProtoMessage() {}
+
+func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_explain_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Explain_ExprStep.ProtoReflect.Descriptor instead.
+func (*Explain_ExprStep) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_explain_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *Explain_ExprStep) GetId() int64 {
+	if x != nil {
+		return x.Id
+	}
+	return 0
+}
+
+func (x *Explain_ExprStep) GetValueIndex() int32 {
+	if x != nil {
+		return x.ValueIndex
+	}
+	return 0
+}
+
+var File_google_api_expr_v1alpha1_explain_proto protoreflect.FileDescriptor
+
+var file_google_api_expr_v1alpha1_explain_proto_rawDesc = []byte{
+	0x0a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70,
+	0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61,
+	0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
+	0x61, 0x31, 0x1a, 0x24, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65,
+	0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x76, 0x61, 0x6c,
+	0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xce, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70,
+	0x6c, 0x61, 0x69, 0x6e, 0x12, 0x37, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+	0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
+	0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x49, 0x0a,
+	0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+	0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65,
+	0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70,
+	0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65,
+	0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72,
+	0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+	0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e,
+	0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x6f, 0x0a, 0x1c, 0x63, 0x6f, 0x6d,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72,
+	0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61,
+	0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+	0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
+	0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x33,
+}
+
+var (
+	file_google_api_expr_v1alpha1_explain_proto_rawDescOnce sync.Once
+	file_google_api_expr_v1alpha1_explain_proto_rawDescData = file_google_api_expr_v1alpha1_explain_proto_rawDesc
+)
+
+func file_google_api_expr_v1alpha1_explain_proto_rawDescGZIP() []byte {
+	file_google_api_expr_v1alpha1_explain_proto_rawDescOnce.Do(func() {
+		file_google_api_expr_v1alpha1_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_expr_v1alpha1_explain_proto_rawDescData)
+	})
+	return file_google_api_expr_v1alpha1_explain_proto_rawDescData
+}
+
+var file_google_api_expr_v1alpha1_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_google_api_expr_v1alpha1_explain_proto_goTypes = []interface{}{
+	(*Explain)(nil),          // 0: google.api.expr.v1alpha1.Explain
+	(*Explain_ExprStep)(nil), // 1: google.api.expr.v1alpha1.Explain.ExprStep
+	(*Value)(nil),            // 2: google.api.expr.v1alpha1.Value
+}
+var file_google_api_expr_v1alpha1_explain_proto_depIdxs = []int32{
+	2, // 0: google.api.expr.v1alpha1.Explain.values:type_name -> google.api.expr.v1alpha1.Value
+	1, // 1: google.api.expr.v1alpha1.Explain.expr_steps:type_name -> google.api.expr.v1alpha1.Explain.ExprStep
+	2, // [2:2] is the sub-list for method output_type
+	2, // [2:2] is the sub-list for method input_type
+	2, // [2:2] is the sub-list for extension type_name
+	2, // [2:2] is the sub-list for extension extendee
+	0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_google_api_expr_v1alpha1_explain_proto_init() }
+func file_google_api_expr_v1alpha1_explain_proto_init() {
+	if File_google_api_expr_v1alpha1_explain_proto != nil {
+		return
+	}
+	file_google_api_expr_v1alpha1_value_proto_init()
+	if !protoimpl.UnsafeEnabled {
+		file_google_api_expr_v1alpha1_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Explain); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Explain_ExprStep); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_api_expr_v1alpha1_explain_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   2,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_api_expr_v1alpha1_explain_proto_goTypes,
+		DependencyIndexes: file_google_api_expr_v1alpha1_explain_proto_depIdxs,
+		MessageInfos:      file_google_api_expr_v1alpha1_explain_proto_msgTypes,
+	}.Build()
+	File_google_api_expr_v1alpha1_explain_proto = out.File
+	file_google_api_expr_v1alpha1_explain_proto_rawDesc = nil
+	file_google_api_expr_v1alpha1_explain_proto_goTypes = nil
+	file_google_api_expr_v1alpha1_explain_proto_depIdxs = nil
+}
diff --git a/hack/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go b/hack/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go
new file mode 100644
index 0000000000..c90c6015d2
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go
@@ -0,0 +1,2040 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v4.24.4
+// source: google/api/expr/v1alpha1/syntax.proto
+
+package expr
+
+import (
+	reflect "reflect"
+	sync "sync"
+
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	durationpb "google.golang.org/protobuf/types/known/durationpb"
+	structpb "google.golang.org/protobuf/types/known/structpb"
+	timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// CEL component specifier.
+type SourceInfo_Extension_Component int32
+
+const (
+	// Unspecified, default.
+	SourceInfo_Extension_COMPONENT_UNSPECIFIED SourceInfo_Extension_Component = 0
+	// Parser. Converts a CEL string to an AST.
+	SourceInfo_Extension_COMPONENT_PARSER SourceInfo_Extension_Component = 1
+	// Type checker. Checks that references in an AST are defined and types
+	// agree.
+	SourceInfo_Extension_COMPONENT_TYPE_CHECKER SourceInfo_Extension_Component = 2
+	// Runtime. Evaluates a parsed and optionally checked CEL AST against a
+	// context.
+	SourceInfo_Extension_COMPONENT_RUNTIME SourceInfo_Extension_Component = 3
+)
+
+// Enum value maps for SourceInfo_Extension_Component.
+var (
+	SourceInfo_Extension_Component_name = map[int32]string{
+		0: "COMPONENT_UNSPECIFIED",
+		1: "COMPONENT_PARSER",
+		2: "COMPONENT_TYPE_CHECKER",
+		3: "COMPONENT_RUNTIME",
+	}
+	SourceInfo_Extension_Component_value = map[string]int32{
+		"COMPONENT_UNSPECIFIED":  0,
+		"COMPONENT_PARSER":       1,
+		"COMPONENT_TYPE_CHECKER": 2,
+		"COMPONENT_RUNTIME":      3,
+	}
+)
+
+func (x SourceInfo_Extension_Component) Enum() *SourceInfo_Extension_Component {
+	p := new(SourceInfo_Extension_Component)
+	*p = x
+	return p
+}
+
+func (x SourceInfo_Extension_Component) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SourceInfo_Extension_Component) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_api_expr_v1alpha1_syntax_proto_enumTypes[0].Descriptor()
+}
+
+func (SourceInfo_Extension_Component) Type() protoreflect.EnumType {
+	return &file_google_api_expr_v1alpha1_syntax_proto_enumTypes[0]
+}
+
+func (x SourceInfo_Extension_Component) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SourceInfo_Extension_Component.Descriptor instead.
+func (SourceInfo_Extension_Component) EnumDescriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{3, 0, 0}
+}
+
+// An expression together with source information as returned by the parser.
+type ParsedExpr struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The parsed expression.
+	Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"`
+	// The source info derived from input that generated the parsed `expr`.
+	SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"`
+}
+
+func (x *ParsedExpr) Reset() {
+	*x = ParsedExpr{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ParsedExpr) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ParsedExpr) ProtoMessage() {}
+
+func (x *ParsedExpr) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ParsedExpr.ProtoReflect.Descriptor instead.
+func (*ParsedExpr) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ParsedExpr) GetExpr() *Expr {
+	if x != nil {
+		return x.Expr
+	}
+	return nil
+}
+
+func (x *ParsedExpr) GetSourceInfo() *SourceInfo {
+	if x != nil {
+		return x.SourceInfo
+	}
+	return nil
+}
+
+// An abstract representation of a common expression.
+//
+// Expressions are abstractly represented as a collection of identifiers,
+// select statements, function calls, literals, and comprehensions. All
+// operators with the exception of the '.' operator are modelled as function
+// calls. This makes it easy to represent new operators into the existing AST.
+//
+// All references within expressions must resolve to a
+// [Decl][google.api.expr.v1alpha1.Decl] provided at type-check for an
+// expression to be valid. A reference may either be a bare identifier `name` or
+// a qualified identifier `google.api.name`. References may either refer to a
+// value or a function declaration.
+//
+// For example, the expression `google.api.name.startsWith('expr')` references
+// the declaration `google.api.name` within a
+// [Expr.Select][google.api.expr.v1alpha1.Expr.Select] expression, and the
+// function declaration `startsWith`.
+type Expr struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Required. An id assigned to this node by the parser which is unique in a
+	// given expression tree. This is used to associate type information and other
+	// attributes to a node in the parse tree.
+	Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
+	// Required. Variants of expressions.
+	//
+	// Types that are assignable to ExprKind:
+	//
+	//	*Expr_ConstExpr
+	//	*Expr_IdentExpr
+	//	*Expr_SelectExpr
+	//	*Expr_CallExpr
+	//	*Expr_ListExpr
+	//	*Expr_StructExpr
+	//	*Expr_ComprehensionExpr
+	ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"`
+}
+
+func (x *Expr) Reset() {
+	*x = Expr{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Expr) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr) ProtoMessage() {}
+
+func (x *Expr) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr.ProtoReflect.Descriptor instead.
+func (*Expr) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Expr) GetId() int64 {
+	if x != nil {
+		return x.Id
+	}
+	return 0
+}
+
+func (m *Expr) GetExprKind() isExpr_ExprKind {
+	if m != nil {
+		return m.ExprKind
+	}
+	return nil
+}
+
+func (x *Expr) GetConstExpr() *Constant {
+	if x, ok := x.GetExprKind().(*Expr_ConstExpr); ok {
+		return x.ConstExpr
+	}
+	return nil
+}
+
+func (x *Expr) GetIdentExpr() *Expr_Ident {
+	if x, ok := x.GetExprKind().(*Expr_IdentExpr); ok {
+		return x.IdentExpr
+	}
+	return nil
+}
+
+func (x *Expr) GetSelectExpr() *Expr_Select {
+	if x, ok := x.GetExprKind().(*Expr_SelectExpr); ok {
+		return x.SelectExpr
+	}
+	return nil
+}
+
+func (x *Expr) GetCallExpr() *Expr_Call {
+	if x, ok := x.GetExprKind().(*Expr_CallExpr); ok {
+		return x.CallExpr
+	}
+	return nil
+}
+
+func (x *Expr) GetListExpr() *Expr_CreateList {
+	if x, ok := x.GetExprKind().(*Expr_ListExpr); ok {
+		return x.ListExpr
+	}
+	return nil
+}
+
+func (x *Expr) GetStructExpr() *Expr_CreateStruct {
+	if x, ok := x.GetExprKind().(*Expr_StructExpr); ok {
+		return x.StructExpr
+	}
+	return nil
+}
+
+func (x *Expr) GetComprehensionExpr() *Expr_Comprehension {
+	if x, ok := x.GetExprKind().(*Expr_ComprehensionExpr); ok {
+		return x.ComprehensionExpr
+	}
+	return nil
+}
+
+type isExpr_ExprKind interface {
+	isExpr_ExprKind()
+}
+
+type Expr_ConstExpr struct {
+	// A literal expression.
+	ConstExpr *Constant `protobuf:"bytes,3,opt,name=const_expr,json=constExpr,proto3,oneof"`
+}
+
+type Expr_IdentExpr struct {
+	// An identifier expression.
+	IdentExpr *Expr_Ident `protobuf:"bytes,4,opt,name=ident_expr,json=identExpr,proto3,oneof"`
+}
+
+type Expr_SelectExpr struct {
+	// A field selection expression, e.g. `request.auth`.
+	SelectExpr *Expr_Select `protobuf:"bytes,5,opt,name=select_expr,json=selectExpr,proto3,oneof"`
+}
+
+type Expr_CallExpr struct {
+	// A call expression, including calls to predefined functions and operators.
+	CallExpr *Expr_Call `protobuf:"bytes,6,opt,name=call_expr,json=callExpr,proto3,oneof"`
+}
+
+type Expr_ListExpr struct {
+	// A list creation expression.
+	ListExpr *Expr_CreateList `protobuf:"bytes,7,opt,name=list_expr,json=listExpr,proto3,oneof"`
+}
+
+type Expr_StructExpr struct {
+	// A map or message creation expression.
+	StructExpr *Expr_CreateStruct `protobuf:"bytes,8,opt,name=struct_expr,json=structExpr,proto3,oneof"`
+}
+
+type Expr_ComprehensionExpr struct {
+	// A comprehension expression.
+	ComprehensionExpr *Expr_Comprehension `protobuf:"bytes,9,opt,name=comprehension_expr,json=comprehensionExpr,proto3,oneof"`
+}
+
+func (*Expr_ConstExpr) isExpr_ExprKind() {}
+
+func (*Expr_IdentExpr) isExpr_ExprKind() {}
+
+func (*Expr_SelectExpr) isExpr_ExprKind() {}
+
+func (*Expr_CallExpr) isExpr_ExprKind() {}
+
+func (*Expr_ListExpr) isExpr_ExprKind() {}
+
+func (*Expr_StructExpr) isExpr_ExprKind() {}
+
+func (*Expr_ComprehensionExpr) isExpr_ExprKind() {}
+
+// Represents a primitive literal.
+//
+// Named 'Constant' here for backwards compatibility.
+//
+// This is similar as the primitives supported in the well-known type
+// `google.protobuf.Value`, but richer so it can represent CEL's full range of
+// primitives.
+//
+// Lists and structs are not included as constants as these aggregate types may
+// contain [Expr][google.api.expr.v1alpha1.Expr] elements which require
+// evaluation and are thus not constant.
+//
+// Examples of literals include: `"hello"`, `b'bytes'`, `1u`, `4.2`, `-2`,
+// `true`, `null`.
+type Constant struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Required. The valid constant kinds.
+	//
+	// Types that are assignable to ConstantKind:
+	//
+	//	*Constant_NullValue
+	//	*Constant_BoolValue
+	//	*Constant_Int64Value
+	//	*Constant_Uint64Value
+	//	*Constant_DoubleValue
+	//	*Constant_StringValue
+	//	*Constant_BytesValue
+	//	*Constant_DurationValue
+	//	*Constant_TimestampValue
+	ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"`
+}
+
+func (x *Constant) Reset() {
+	*x = Constant{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Constant) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Constant) ProtoMessage() {}
+
+func (x *Constant) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Constant.ProtoReflect.Descriptor instead.
+func (*Constant) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{2}
+}
+
+func (m *Constant) GetConstantKind() isConstant_ConstantKind {
+	if m != nil {
+		return m.ConstantKind
+	}
+	return nil
+}
+
+func (x *Constant) GetNullValue() structpb.NullValue {
+	if x, ok := x.GetConstantKind().(*Constant_NullValue); ok {
+		return x.NullValue
+	}
+	return structpb.NullValue_NULL_VALUE
+}
+
+func (x *Constant) GetBoolValue() bool {
+	if x, ok := x.GetConstantKind().(*Constant_BoolValue); ok {
+		return x.BoolValue
+	}
+	return false
+}
+
+func (x *Constant) GetInt64Value() int64 {
+	if x, ok := x.GetConstantKind().(*Constant_Int64Value); ok {
+		return x.Int64Value
+	}
+	return 0
+}
+
+func (x *Constant) GetUint64Value() uint64 {
+	if x, ok := x.GetConstantKind().(*Constant_Uint64Value); ok {
+		return x.Uint64Value
+	}
+	return 0
+}
+
+func (x *Constant) GetDoubleValue() float64 {
+	if x, ok := x.GetConstantKind().(*Constant_DoubleValue); ok {
+		return x.DoubleValue
+	}
+	return 0
+}
+
+func (x *Constant) GetStringValue() string {
+	if x, ok := x.GetConstantKind().(*Constant_StringValue); ok {
+		return x.StringValue
+	}
+	return ""
+}
+
+func (x *Constant) GetBytesValue() []byte {
+	if x, ok := x.GetConstantKind().(*Constant_BytesValue); ok {
+		return x.BytesValue
+	}
+	return nil
+}
+
+// Deprecated: Do not use.
+func (x *Constant) GetDurationValue() *durationpb.Duration {
+	if x, ok := x.GetConstantKind().(*Constant_DurationValue); ok {
+		return x.DurationValue
+	}
+	return nil
+}
+
+// Deprecated: Do not use.
+func (x *Constant) GetTimestampValue() *timestamppb.Timestamp {
+	if x, ok := x.GetConstantKind().(*Constant_TimestampValue); ok {
+		return x.TimestampValue
+	}
+	return nil
+}
+
+type isConstant_ConstantKind interface {
+	isConstant_ConstantKind()
+}
+
+type Constant_NullValue struct {
+	// null value.
+	NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Constant_BoolValue struct {
+	// boolean value.
+	BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Constant_Int64Value struct {
+	// int64 value.
+	Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Constant_Uint64Value struct {
+	// uint64 value.
+	Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+type Constant_DoubleValue struct {
+	// double value.
+	DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Constant_StringValue struct {
+	// string value.
+	StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Constant_BytesValue struct {
+	// bytes value.
+	BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+type Constant_DurationValue struct {
+	// protobuf.Duration value.
+	//
+	// Deprecated: duration is no longer considered a builtin cel type.
+	//
+	// Deprecated: Do not use.
+	DurationValue *durationpb.Duration `protobuf:"bytes,8,opt,name=duration_value,json=durationValue,proto3,oneof"`
+}
+
+type Constant_TimestampValue struct {
+	// protobuf.Timestamp value.
+	//
+	// Deprecated: timestamp is no longer considered a builtin cel type.
+	//
+	// Deprecated: Do not use.
+	TimestampValue *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=timestamp_value,json=timestampValue,proto3,oneof"`
+}
+
+func (*Constant_NullValue) isConstant_ConstantKind() {}
+
+func (*Constant_BoolValue) isConstant_ConstantKind() {}
+
+func (*Constant_Int64Value) isConstant_ConstantKind() {}
+
+func (*Constant_Uint64Value) isConstant_ConstantKind() {}
+
+func (*Constant_DoubleValue) isConstant_ConstantKind() {}
+
+func (*Constant_StringValue) isConstant_ConstantKind() {}
+
+func (*Constant_BytesValue) isConstant_ConstantKind() {}
+
+func (*Constant_DurationValue) isConstant_ConstantKind() {}
+
+func (*Constant_TimestampValue) isConstant_ConstantKind() {}
+
+// Source information collected at parse time.
+type SourceInfo struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The syntax version of the source, e.g. `cel1`.
+	SyntaxVersion string `protobuf:"bytes,1,opt,name=syntax_version,json=syntaxVersion,proto3" json:"syntax_version,omitempty"`
+	// The location name. All position information attached to an expression is
+	// relative to this location.
+	//
+	// The location could be a file, UI element, or similar. For example,
+	// `acme/app/AnvilPolicy.cel`.
+	Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
+	// Monotonically increasing list of code point offsets where newlines
+	// `\n` appear.
+	//
+	// The line number of a given position is the index `i` where for a given
+	// `id` the `line_offsets[i] < id_positions[id] < line_offsets[i+1]`. The
+	// column may be derivd from `id_positions[id] - line_offsets[i]`.
+	LineOffsets []int32 `protobuf:"varint,3,rep,packed,name=line_offsets,json=lineOffsets,proto3" json:"line_offsets,omitempty"`
+	// A map from the parse node id (e.g. `Expr.id`) to the code point offset
+	// within the source.
+	Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
+	// A map from the parse node id where a macro replacement was made to the
+	// call `Expr` that resulted in a macro expansion.
+	//
+	// For example, `has(value.field)` is a function call that is replaced by a
+	// `test_only` field selection in the AST. Likewise, the call
+	// `list.exists(e, e > 10)` translates to a comprehension expression. The key
+	// in the map corresponds to the expression id of the expanded macro, and the
+	// value is the call `Expr` that was replaced.
+	MacroCalls map[int64]*Expr `protobuf:"bytes,5,rep,name=macro_calls,json=macroCalls,proto3" json:"macro_calls,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// A list of tags for extensions that were used while parsing or type checking
+	// the source expression. For example, optimizations that require special
+	// runtime support may be specified.
+	//
+	// These are used to check feature support between components in separate
+	// implementations. This can be used to either skip redundant work or
+	// report an error if the extension is unsupported.
+	Extensions []*SourceInfo_Extension `protobuf:"bytes,6,rep,name=extensions,proto3" json:"extensions,omitempty"`
+}
+
+func (x *SourceInfo) Reset() {
+	*x = SourceInfo{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *SourceInfo) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo) ProtoMessage() {}
+
+func (x *SourceInfo) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo.ProtoReflect.Descriptor instead.
+func (*SourceInfo) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *SourceInfo) GetSyntaxVersion() string {
+	if x != nil {
+		return x.SyntaxVersion
+	}
+	return ""
+}
+
+func (x *SourceInfo) GetLocation() string {
+	if x != nil {
+		return x.Location
+	}
+	return ""
+}
+
+func (x *SourceInfo) GetLineOffsets() []int32 {
+	if x != nil {
+		return x.LineOffsets
+	}
+	return nil
+}
+
+func (x *SourceInfo) GetPositions() map[int64]int32 {
+	if x != nil {
+		return x.Positions
+	}
+	return nil
+}
+
+func (x *SourceInfo) GetMacroCalls() map[int64]*Expr {
+	if x != nil {
+		return x.MacroCalls
+	}
+	return nil
+}
+
+func (x *SourceInfo) GetExtensions() []*SourceInfo_Extension {
+	if x != nil {
+		return x.Extensions
+	}
+	return nil
+}
+
+// A specific position in source.
+type SourcePosition struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The soucre location name (e.g. file name).
+	Location string `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"`
+	// The UTF-8 code unit offset.
+	Offset int32 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
+	// The 1-based index of the starting line in the source text
+	// where the issue occurs, or 0 if unknown.
+	Line int32 `protobuf:"varint,3,opt,name=line,proto3" json:"line,omitempty"`
+	// The 0-based index of the starting position within the line of source text
+	// where the issue occurs.  Only meaningful if line is nonzero.
+	Column int32 `protobuf:"varint,4,opt,name=column,proto3" json:"column,omitempty"`
+}
+
+func (x *SourcePosition) Reset() {
+	*x = SourcePosition{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *SourcePosition) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourcePosition) ProtoMessage() {}
+
+func (x *SourcePosition) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourcePosition.ProtoReflect.Descriptor instead.
+func (*SourcePosition) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *SourcePosition) GetLocation() string {
+	if x != nil {
+		return x.Location
+	}
+	return ""
+}
+
+func (x *SourcePosition) GetOffset() int32 {
+	if x != nil {
+		return x.Offset
+	}
+	return 0
+}
+
+func (x *SourcePosition) GetLine() int32 {
+	if x != nil {
+		return x.Line
+	}
+	return 0
+}
+
+func (x *SourcePosition) GetColumn() int32 {
+	if x != nil {
+		return x.Column
+	}
+	return 0
+}
+
+// An identifier expression. e.g. `request`.
+type Expr_Ident struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Required. Holds a single, unqualified identifier, possibly preceded by a
+	// '.'.
+	//
+	// Qualified names are represented by the
+	// [Expr.Select][google.api.expr.v1alpha1.Expr.Select] expression.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *Expr_Ident) Reset() {
+	*x = Expr_Ident{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[5]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Expr_Ident) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Ident) ProtoMessage() {}
+
+func (x *Expr_Ident) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[5]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Ident.ProtoReflect.Descriptor instead.
+func (*Expr_Ident) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *Expr_Ident) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+// A field selection expression. e.g. `request.auth`.
+type Expr_Select struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Required. The target of the selection expression.
+	//
+	// For example, in the select expression `request.auth`, the `request`
+	// portion of the expression is the `operand`.
+	Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"`
+	// Required. The name of the field to select.
+	//
+	// For example, in the select expression `request.auth`, the `auth` portion
+	// of the expression would be the `field`.
+	Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"`
+	// Whether the select is to be interpreted as a field presence test.
+	//
+	// This results from the macro `has(request.auth)`.
+	TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"`
+}
+
+func (x *Expr_Select) Reset() {
+	*x = Expr_Select{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[6]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Expr_Select) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Select) ProtoMessage() {}
+
+func (x *Expr_Select) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[6]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Select.ProtoReflect.Descriptor instead.
+func (*Expr_Select) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *Expr_Select) GetOperand() *Expr {
+	if x != nil {
+		return x.Operand
+	}
+	return nil
+}
+
+func (x *Expr_Select) GetField() string {
+	if x != nil {
+		return x.Field
+	}
+	return ""
+}
+
+func (x *Expr_Select) GetTestOnly() bool {
+	if x != nil {
+		return x.TestOnly
+	}
+	return false
+}
+
+// A call expression, including calls to predefined functions and operators.
+//
+// For example, `value == 10`, `size(map_value)`.
+type Expr_Call struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The target of an method call-style expression. For example, `x` in
+	// `x.f()`.
+	Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"`
+	// Required. The name of the function or method being called.
+	Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"`
+	// The arguments.
+	Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
+}
+
+func (x *Expr_Call) Reset() {
+	*x = Expr_Call{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[7]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Expr_Call) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Call) ProtoMessage() {}
+
+func (x *Expr_Call) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[7]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Call.ProtoReflect.Descriptor instead.
+func (*Expr_Call) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *Expr_Call) GetTarget() *Expr {
+	if x != nil {
+		return x.Target
+	}
+	return nil
+}
+
+func (x *Expr_Call) GetFunction() string {
+	if x != nil {
+		return x.Function
+	}
+	return ""
+}
+
+func (x *Expr_Call) GetArgs() []*Expr {
+	if x != nil {
+		return x.Args
+	}
+	return nil
+}
+
+// A list creation expression.
+//
+// Lists may either be homogenous, e.g. `[1, 2, 3]`, or heterogeneous, e.g.
+// `dyn([1, 'hello', 2.0])`
+type Expr_CreateList struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The elements part of the list.
+	Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"`
+	// The indices within the elements list which are marked as optional
+	// elements.
+	//
+	// When an optional-typed value is present, the value it contains
+	// is included in the list. If the optional-typed value is absent, the list
+	// element is omitted from the CreateList result.
+	OptionalIndices []int32 `protobuf:"varint,2,rep,packed,name=optional_indices,json=optionalIndices,proto3" json:"optional_indices,omitempty"`
+}
+
+func (x *Expr_CreateList) Reset() {
+	*x = Expr_CreateList{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[8]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Expr_CreateList) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateList) ProtoMessage() {}
+
+func (x *Expr_CreateList) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[8]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateList.ProtoReflect.Descriptor instead.
+func (*Expr_CreateList) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 3}
+}
+
+func (x *Expr_CreateList) GetElements() []*Expr {
+	if x != nil {
+		return x.Elements
+	}
+	return nil
+}
+
+func (x *Expr_CreateList) GetOptionalIndices() []int32 {
+	if x != nil {
+		return x.OptionalIndices
+	}
+	return nil
+}
+
+// A map or message creation expression.
+//
+// Maps are constructed as `{'key_name': 'value'}`. Message construction is
+// similar, but prefixed with a type name and composed of field ids:
+// `types.MyType{field_id: 'value'}`.
+type Expr_CreateStruct struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The type name of the message to be created, empty when creating map
+	// literals.
+	MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"`
+	// The entries in the creation expression.
+	Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"`
+}
+
+func (x *Expr_CreateStruct) Reset() {
+	*x = Expr_CreateStruct{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[9]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Expr_CreateStruct) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateStruct) ProtoMessage() {}
+
+func (x *Expr_CreateStruct) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[9]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateStruct.ProtoReflect.Descriptor instead.
+func (*Expr_CreateStruct) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 4}
+}
+
+func (x *Expr_CreateStruct) GetMessageName() string {
+	if x != nil {
+		return x.MessageName
+	}
+	return ""
+}
+
+func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry {
+	if x != nil {
+		return x.Entries
+	}
+	return nil
+}
+
+// A comprehension expression applied to a list or map.
+//
+// Comprehensions are not part of the core syntax, but enabled with macros.
+// A macro matches a specific call signature within a parsed AST and replaces
+// the call with an alternate AST block. Macro expansion happens at parse
+// time.
+//
+// The following macros are supported within CEL:
+//
+// Aggregate type macros may be applied to all elements in a list or all keys
+// in a map:
+//
+//   - `all`, `exists`, `exists_one` -  test a predicate expression against
+//     the inputs and return `true` if the predicate is satisfied for all,
+//     any, or only one value `list.all(x, x < 10)`.
+//   - `filter` - test a predicate expression against the inputs and return
+//     the subset of elements which satisfy the predicate:
+//     `payments.filter(p, p > 1000)`.
+//   - `map` - apply an expression to all elements in the input and return the
+//     output aggregate type: `[1, 2, 3].map(i, i * i)`.
+//
+// The `has(m.x)` macro tests whether the property `x` is present in struct
+// `m`. The semantics of this macro depend on the type of `m`. For proto2
+// messages `has(m.x)` is defined as 'defined, but not set`. For proto3, the
+// macro tests whether the property is set to its default. For map and struct
+// types, the macro tests whether the property `x` is defined on `m`.
+//
+// Comprehensions for the standard environment macros evaluation can be best
+// visualized as the following pseudocode:
+//
+// ```
+// let `accu_var` = `accu_init`
+//
+//	for (let `iter_var` in `iter_range`) {
+//	  if (!`loop_condition`) {
+//	    break
+//	  }
+//	  `accu_var` = `loop_step`
+//	}
+//
+// return `result`
+// ```
+//
+// Comprehensions for the optional V2 macros which support map-to-map
+// translation differ slightly from the standard environment macros in that
+// they expose both the key or index in addition to the value for each list
+// or map entry:
+//
+// ```
+// let `accu_var` = `accu_init`
+//
+//	for (let `iter_var`, `iter_var2` in `iter_range`) {
+//	  if (!`loop_condition`) {
+//	    break
+//	  }
+//	  `accu_var` = `loop_step`
+//	}
+//
+// return `result`
+// ```
+type Expr_Comprehension struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The name of the first iteration variable.
+	// When the iter_range is a list, this variable is the list element.
+	// When the iter_range is a map, this variable is the map entry key.
+	IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"`
+	// The name of the second iteration variable, empty if not set.
+	// When the iter_range is a list, this variable is the integer index.
+	// When the iter_range is a map, this variable is the map entry value.
+	// This field is only set for comprehension v2 macros.
+	IterVar2 string `protobuf:"bytes,8,opt,name=iter_var2,json=iterVar2,proto3" json:"iter_var2,omitempty"`
+	// The range over which the comprehension iterates.
+	IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"`
+	// The name of the variable used for accumulation of the result.
+	AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"`
+	// The initial value of the accumulator.
+	AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"`
+	// An expression which can contain iter_var, iter_var2, and accu_var.
+	//
+	// Returns false when the result has been computed and may be used as
+	// a hint to short-circuit the remainder of the comprehension.
+	LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"`
+	// An expression which can contain iter_var, iter_var2, and accu_var.
+	//
+	// Computes the next value of accu_var.
+	LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"`
+	// An expression which can contain accu_var.
+	//
+	// Computes the result.
+	Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"`
+}
+
+func (x *Expr_Comprehension) Reset() {
+	*x = Expr_Comprehension{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[10]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Expr_Comprehension) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Comprehension) ProtoMessage() {}
+
+func (x *Expr_Comprehension) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[10]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Comprehension.ProtoReflect.Descriptor instead.
+func (*Expr_Comprehension) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 5}
+}
+
+func (x *Expr_Comprehension) GetIterVar() string {
+	if x != nil {
+		return x.IterVar
+	}
+	return ""
+}
+
+func (x *Expr_Comprehension) GetIterVar2() string {
+	if x != nil {
+		return x.IterVar2
+	}
+	return ""
+}
+
+func (x *Expr_Comprehension) GetIterRange() *Expr {
+	if x != nil {
+		return x.IterRange
+	}
+	return nil
+}
+
+func (x *Expr_Comprehension) GetAccuVar() string {
+	if x != nil {
+		return x.AccuVar
+	}
+	return ""
+}
+
+func (x *Expr_Comprehension) GetAccuInit() *Expr {
+	if x != nil {
+		return x.AccuInit
+	}
+	return nil
+}
+
+func (x *Expr_Comprehension) GetLoopCondition() *Expr {
+	if x != nil {
+		return x.LoopCondition
+	}
+	return nil
+}
+
+func (x *Expr_Comprehension) GetLoopStep() *Expr {
+	if x != nil {
+		return x.LoopStep
+	}
+	return nil
+}
+
+func (x *Expr_Comprehension) GetResult() *Expr {
+	if x != nil {
+		return x.Result
+	}
+	return nil
+}
+
+// Represents an entry.
+type Expr_CreateStruct_Entry struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Required. An id assigned to this node by the parser which is unique
+	// in a given expression tree. This is used to associate type
+	// information and other attributes to the node.
+	Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+	// The `Entry` key kinds.
+	//
+	// Types that are assignable to KeyKind:
+	//
+	//	*Expr_CreateStruct_Entry_FieldKey
+	//	*Expr_CreateStruct_Entry_MapKey
+	KeyKind isExpr_CreateStruct_Entry_KeyKind `protobuf_oneof:"key_kind"`
+	// Required. The value assigned to the key.
+	//
+	// If the optional_entry field is true, the expression must resolve to an
+	// optional-typed value. If the optional value is present, the key will be
+	// set; however, if the optional value is absent, the key will be unset.
+	Value *Expr `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
+	// Whether the key-value pair is optional.
+	OptionalEntry bool `protobuf:"varint,5,opt,name=optional_entry,json=optionalEntry,proto3" json:"optional_entry,omitempty"`
+}
+
+func (x *Expr_CreateStruct_Entry) Reset() {
+	*x = Expr_CreateStruct_Entry{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[11]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Expr_CreateStruct_Entry) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateStruct_Entry) ProtoMessage() {}
+
+func (x *Expr_CreateStruct_Entry) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[11]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateStruct_Entry.ProtoReflect.Descriptor instead.
+func (*Expr_CreateStruct_Entry) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 4, 0}
+}
+
+func (x *Expr_CreateStruct_Entry) GetId() int64 {
+	if x != nil {
+		return x.Id
+	}
+	return 0
+}
+
+func (m *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind {
+	if m != nil {
+		return m.KeyKind
+	}
+	return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetFieldKey() string {
+	if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_FieldKey); ok {
+		return x.FieldKey
+	}
+	return ""
+}
+
+func (x *Expr_CreateStruct_Entry) GetMapKey() *Expr {
+	if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_MapKey); ok {
+		return x.MapKey
+	}
+	return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetValue() *Expr {
+	if x != nil {
+		return x.Value
+	}
+	return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetOptionalEntry() bool {
+	if x != nil {
+		return x.OptionalEntry
+	}
+	return false
+}
+
+type isExpr_CreateStruct_Entry_KeyKind interface {
+	isExpr_CreateStruct_Entry_KeyKind()
+}
+
+type Expr_CreateStruct_Entry_FieldKey struct {
+	// The field key for a message creator statement.
+	FieldKey string `protobuf:"bytes,2,opt,name=field_key,json=fieldKey,proto3,oneof"`
+}
+
+type Expr_CreateStruct_Entry_MapKey struct {
+	// The key expression for a map creation statement.
+	MapKey *Expr `protobuf:"bytes,3,opt,name=map_key,json=mapKey,proto3,oneof"`
+}
+
+func (*Expr_CreateStruct_Entry_FieldKey) isExpr_CreateStruct_Entry_KeyKind() {}
+
+func (*Expr_CreateStruct_Entry_MapKey) isExpr_CreateStruct_Entry_KeyKind() {}
+
+// An extension that was requested for the source expression.
+type SourceInfo_Extension struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Identifier for the extension. Example: constant_folding
+	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	// If set, the listed components must understand the extension for the
+	// expression to evaluate correctly.
+	//
+	// This field has set semantics, repeated values should be deduplicated.
+	AffectedComponents []SourceInfo_Extension_Component `protobuf:"varint,2,rep,packed,name=affected_components,json=affectedComponents,proto3,enum=google.api.expr.v1alpha1.SourceInfo_Extension_Component" json:"affected_components,omitempty"`
+	// Version info. May be skipped if it isn't meaningful for the extension.
+	// (for example constant_folding might always be v0.0).
+	Version *SourceInfo_Extension_Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"`
+}
+
+func (x *SourceInfo_Extension) Reset() {
+	*x = SourceInfo_Extension{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[12]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *SourceInfo_Extension) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo_Extension) ProtoMessage() {}
+
+func (x *SourceInfo_Extension) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[12]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo_Extension.ProtoReflect.Descriptor instead.
+func (*SourceInfo_Extension) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *SourceInfo_Extension) GetId() string {
+	if x != nil {
+		return x.Id
+	}
+	return ""
+}
+
+func (x *SourceInfo_Extension) GetAffectedComponents() []SourceInfo_Extension_Component {
+	if x != nil {
+		return x.AffectedComponents
+	}
+	return nil
+}
+
+func (x *SourceInfo_Extension) GetVersion() *SourceInfo_Extension_Version {
+	if x != nil {
+		return x.Version
+	}
+	return nil
+}
+
+// Version
+type SourceInfo_Extension_Version struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Major version changes indicate different required support level from
+	// the required components.
+	Major int64 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"`
+	// Minor version changes must not change the observed behavior from
+	// existing implementations, but may be provided informationally.
+	Minor int64 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"`
+}
+
+func (x *SourceInfo_Extension_Version) Reset() {
+	*x = SourceInfo_Extension_Version{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[15]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *SourceInfo_Extension_Version) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo_Extension_Version) ProtoMessage() {}
+
+func (x *SourceInfo_Extension_Version) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[15]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo_Extension_Version.ProtoReflect.Descriptor instead.
+func (*SourceInfo_Extension_Version) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{3, 0, 0}
+}
+
+func (x *SourceInfo_Extension_Version) GetMajor() int64 {
+	if x != nil {
+		return x.Major
+	}
+	return 0
+}
+
+func (x *SourceInfo_Extension_Version) GetMinor() int64 {
+	if x != nil {
+		return x.Minor
+	}
+	return 0
+}
+
+var File_google_api_expr_v1alpha1_syntax_proto protoreflect.FileDescriptor
+
+var file_google_api_expr_v1alpha1_syntax_proto_rawDesc = []byte{
+	0x0a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70,
+	0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61,
+	0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
+	0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+	0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x22, 0x87, 0x01, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12,
+	0x32, 0x0a, 0x04, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+	0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65,
+	0x78, 0x70, 0x72, 0x12, 0x45, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e,
+	0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
+	0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a,
+	0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xcb, 0x0d, 0x0a, 0x04, 0x45,
+	0x78, 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52,
+	0x02, 0x69, 0x64, 0x12, 0x43, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70,
+	0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
+	0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x63,
+	0x6f, 0x6e, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x45, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e,
+	0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76,
+	0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x49, 0x64, 0x65,
+	0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12,
+	0x48, 0x0a, 0x0b, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x05,
+	0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+	0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
+	0x45, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73,
+	0x65, 0x6c, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x42, 0x0a, 0x09, 0x63, 0x61, 0x6c,
+	0x6c, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76,
+	0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x61, 0x6c,
+	0x6c, 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x12, 0x48, 0x0a,
+	0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b,
+	0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78,
+	0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72,
+	0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x6c,
+	0x69, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4e, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x75, 0x63,
+	0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76,
+	0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65,
+	0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x72,
+	0x75, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x5d, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x72,
+	0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x09, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+	0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45,
+	0x78, 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+	0x6e, 0x48, 0x00, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69,
+	0x6f, 0x6e, 0x45, 0x78, 0x70, 0x72, 0x1a, 0x1b, 0x0a, 0x05, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x12,
+	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+	0x61, 0x6d, 0x65, 0x1a, 0x75, 0x0a, 0x06, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x38, 0x0a,
+	0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72,
+	0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x07,
+	0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1b, 0x0a,
+	0x09, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
+	0x52, 0x08, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x8e, 0x01, 0x0a, 0x04, 0x43,
+	0x61, 0x6c, 0x6c, 0x12, 0x36, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+	0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45,
+	0x78, 0x70, 0x72, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66,
+	0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66,
+	0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18,
+	0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+	0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
+	0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x73, 0x0a, 0x0a, 0x43,
+	0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x6c, 0x65,
+	0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31,
+	0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x65, 0x6c, 0x65,
+	0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61,
+	0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52,
+	0x0f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73,
+	0x1a, 0xdb, 0x02, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63,
+	0x74, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d,
+	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+	0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18,
+	0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+	0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
+	0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75,
+	0x63, 0x74, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65,
+	0x73, 0x1a, 0xda, 0x01, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69,
+	0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x66,
+	0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00,
+	0x52, 0x08, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x07, 0x6d, 0x61,
+	0x70, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31,
+	0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x48, 0x00, 0x52, 0x06, 0x6d,
+	0x61, 0x70, 0x4b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04,
+	0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+	0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
+	0x45, 0x78, 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20,
+	0x01, 0x28, 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74,
+	0x72, 0x79, 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0x9a,
+	0x03, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+	0x12, 0x19, 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x69,
+	0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x32, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+	0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x32, 0x12, 0x3d, 0x0a, 0x0a, 0x69, 0x74, 0x65, 0x72,
+	0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76,
+	0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69, 0x74,
+	0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75, 0x5f,
+	0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75, 0x56,
+	0x61, 0x72, 0x12, 0x3b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x18,
+	0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+	0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
+	0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74, 0x12,
+	0x45, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f,
+	0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
+	0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f, 0x6e,
+	0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x73,
+	0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c,
+	0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70, 0x53,
+	0x74, 0x65, 0x70, 0x12, 0x36, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+	0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45,
+	0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x65,
+	0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f, 0x6e,
+	0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61,
+	0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c,
+	0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c,
+	0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61,
+	0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c,
+	0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36,
+	0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34,
+	0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b,
+	0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64,
+	0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28,
+	0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65,
+	0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67,
+	0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79,
+	0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72, 0x61,
+	0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b,
+	0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01, 0x48,
+	0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65,
+	0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76, 0x61,
+	0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+	0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69, 0x6d,
+	0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, 0x63,
+	0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x8c, 0x07, 0x0a,
+	0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e, 0x73,
+	0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73, 0x69,
+	0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21,
+	0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18, 0x03,
+	0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74,
+	0x73, 0x12, 0x51, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+	0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
+	0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69, 0x74,
+	0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74,
+	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63, 0x61,
+	0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c,
+	0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e,
+	0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+	0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x4e, 0x0a, 0x0a, 0x65,
+	0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32,
+	0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70,
+	0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63,
+	0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52,
+	0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x80, 0x03, 0x0a, 0x09,
+	0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x69, 0x0a, 0x13, 0x61, 0x66, 0x66,
+	0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73,
+	0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
+	0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74,
+	0x52, 0x12, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e,
+	0x65, 0x6e, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18,
+	0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+	0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
+	0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65,
+	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76,
+	0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+	0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+	0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a,
+	0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f,
+	0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
+	0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45,
+	0x4e, 0x54, 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43,
+	0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48,
+	0x45, 0x43, 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f,
+	0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x1a, 0x3c,
+	0x0a, 0x0e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+	0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b,
+	0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x5d, 0x0a, 0x0f,
+	0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
+	0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65,
+	0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+	0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78,
+	0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72,
+	0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x70, 0x0a, 0x0e, 0x53,
+	0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a,
+	0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66,
+	0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65,
+	0x74, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52,
+	0x04, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18,
+	0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x42, 0x6e, 0x0a,
+	0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+	0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0b, 0x53,
+	0x79, 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
+	0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+	0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61,
+	0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_api_expr_v1alpha1_syntax_proto_rawDescOnce sync.Once
+	file_google_api_expr_v1alpha1_syntax_proto_rawDescData = file_google_api_expr_v1alpha1_syntax_proto_rawDesc
+)
+
+func file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP() []byte {
+	file_google_api_expr_v1alpha1_syntax_proto_rawDescOnce.Do(func() {
+		file_google_api_expr_v1alpha1_syntax_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_expr_v1alpha1_syntax_proto_rawDescData)
+	})
+	return file_google_api_expr_v1alpha1_syntax_proto_rawDescData
+}
+
+var file_google_api_expr_v1alpha1_syntax_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_google_api_expr_v1alpha1_syntax_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
+var file_google_api_expr_v1alpha1_syntax_proto_goTypes = []interface{}{
+	(SourceInfo_Extension_Component)(0),  // 0: google.api.expr.v1alpha1.SourceInfo.Extension.Component
+	(*ParsedExpr)(nil),                   // 1: google.api.expr.v1alpha1.ParsedExpr
+	(*Expr)(nil),                         // 2: google.api.expr.v1alpha1.Expr
+	(*Constant)(nil),                     // 3: google.api.expr.v1alpha1.Constant
+	(*SourceInfo)(nil),                   // 4: google.api.expr.v1alpha1.SourceInfo
+	(*SourcePosition)(nil),               // 5: google.api.expr.v1alpha1.SourcePosition
+	(*Expr_Ident)(nil),                   // 6: google.api.expr.v1alpha1.Expr.Ident
+	(*Expr_Select)(nil),                  // 7: google.api.expr.v1alpha1.Expr.Select
+	(*Expr_Call)(nil),                    // 8: google.api.expr.v1alpha1.Expr.Call
+	(*Expr_CreateList)(nil),              // 9: google.api.expr.v1alpha1.Expr.CreateList
+	(*Expr_CreateStruct)(nil),            // 10: google.api.expr.v1alpha1.Expr.CreateStruct
+	(*Expr_Comprehension)(nil),           // 11: google.api.expr.v1alpha1.Expr.Comprehension
+	(*Expr_CreateStruct_Entry)(nil),      // 12: google.api.expr.v1alpha1.Expr.CreateStruct.Entry
+	(*SourceInfo_Extension)(nil),         // 13: google.api.expr.v1alpha1.SourceInfo.Extension
+	nil,                                  // 14: google.api.expr.v1alpha1.SourceInfo.PositionsEntry
+	nil,                                  // 15: google.api.expr.v1alpha1.SourceInfo.MacroCallsEntry
+	(*SourceInfo_Extension_Version)(nil), // 16: google.api.expr.v1alpha1.SourceInfo.Extension.Version
+	(structpb.NullValue)(0),              // 17: google.protobuf.NullValue
+	(*durationpb.Duration)(nil),          // 18: google.protobuf.Duration
+	(*timestamppb.Timestamp)(nil),        // 19: google.protobuf.Timestamp
+}
+var file_google_api_expr_v1alpha1_syntax_proto_depIdxs = []int32{
+	2,  // 0: google.api.expr.v1alpha1.ParsedExpr.expr:type_name -> google.api.expr.v1alpha1.Expr
+	4,  // 1: google.api.expr.v1alpha1.ParsedExpr.source_info:type_name -> google.api.expr.v1alpha1.SourceInfo
+	3,  // 2: google.api.expr.v1alpha1.Expr.const_expr:type_name -> google.api.expr.v1alpha1.Constant
+	6,  // 3: google.api.expr.v1alpha1.Expr.ident_expr:type_name -> google.api.expr.v1alpha1.Expr.Ident
+	7,  // 4: google.api.expr.v1alpha1.Expr.select_expr:type_name -> google.api.expr.v1alpha1.Expr.Select
+	8,  // 5: google.api.expr.v1alpha1.Expr.call_expr:type_name -> google.api.expr.v1alpha1.Expr.Call
+	9,  // 6: google.api.expr.v1alpha1.Expr.list_expr:type_name -> google.api.expr.v1alpha1.Expr.CreateList
+	10, // 7: google.api.expr.v1alpha1.Expr.struct_expr:type_name -> google.api.expr.v1alpha1.Expr.CreateStruct
+	11, // 8: google.api.expr.v1alpha1.Expr.comprehension_expr:type_name -> google.api.expr.v1alpha1.Expr.Comprehension
+	17, // 9: google.api.expr.v1alpha1.Constant.null_value:type_name -> google.protobuf.NullValue
+	18, // 10: google.api.expr.v1alpha1.Constant.duration_value:type_name -> google.protobuf.Duration
+	19, // 11: google.api.expr.v1alpha1.Constant.timestamp_value:type_name -> google.protobuf.Timestamp
+	14, // 12: google.api.expr.v1alpha1.SourceInfo.positions:type_name -> google.api.expr.v1alpha1.SourceInfo.PositionsEntry
+	15, // 13: google.api.expr.v1alpha1.SourceInfo.macro_calls:type_name -> google.api.expr.v1alpha1.SourceInfo.MacroCallsEntry
+	13, // 14: google.api.expr.v1alpha1.SourceInfo.extensions:type_name -> google.api.expr.v1alpha1.SourceInfo.Extension
+	2,  // 15: google.api.expr.v1alpha1.Expr.Select.operand:type_name -> google.api.expr.v1alpha1.Expr
+	2,  // 16: google.api.expr.v1alpha1.Expr.Call.target:type_name -> google.api.expr.v1alpha1.Expr
+	2,  // 17: google.api.expr.v1alpha1.Expr.Call.args:type_name -> google.api.expr.v1alpha1.Expr
+	2,  // 18: google.api.expr.v1alpha1.Expr.CreateList.elements:type_name -> google.api.expr.v1alpha1.Expr
+	12, // 19: google.api.expr.v1alpha1.Expr.CreateStruct.entries:type_name -> google.api.expr.v1alpha1.Expr.CreateStruct.Entry
+	2,  // 20: google.api.expr.v1alpha1.Expr.Comprehension.iter_range:type_name -> google.api.expr.v1alpha1.Expr
+	2,  // 21: google.api.expr.v1alpha1.Expr.Comprehension.accu_init:type_name -> google.api.expr.v1alpha1.Expr
+	2,  // 22: google.api.expr.v1alpha1.Expr.Comprehension.loop_condition:type_name -> google.api.expr.v1alpha1.Expr
+	2,  // 23: google.api.expr.v1alpha1.Expr.Comprehension.loop_step:type_name -> google.api.expr.v1alpha1.Expr
+	2,  // 24: google.api.expr.v1alpha1.Expr.Comprehension.result:type_name -> google.api.expr.v1alpha1.Expr
+	2,  // 25: google.api.expr.v1alpha1.Expr.CreateStruct.Entry.map_key:type_name -> google.api.expr.v1alpha1.Expr
+	2,  // 26: google.api.expr.v1alpha1.Expr.CreateStruct.Entry.value:type_name -> google.api.expr.v1alpha1.Expr
+	0,  // 27: google.api.expr.v1alpha1.SourceInfo.Extension.affected_components:type_name -> google.api.expr.v1alpha1.SourceInfo.Extension.Component
+	16, // 28: google.api.expr.v1alpha1.SourceInfo.Extension.version:type_name -> google.api.expr.v1alpha1.SourceInfo.Extension.Version
+	2,  // 29: google.api.expr.v1alpha1.SourceInfo.MacroCallsEntry.value:type_name -> google.api.expr.v1alpha1.Expr
+	30, // [30:30] is the sub-list for method output_type
+	30, // [30:30] is the sub-list for method input_type
+	30, // [30:30] is the sub-list for extension type_name
+	30, // [30:30] is the sub-list for extension extendee
+	0,  // [0:30] is the sub-list for field type_name
+}
+
+func init() { file_google_api_expr_v1alpha1_syntax_proto_init() }
+func file_google_api_expr_v1alpha1_syntax_proto_init() {
+	if File_google_api_expr_v1alpha1_syntax_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_api_expr_v1alpha1_syntax_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ParsedExpr); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_syntax_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Expr); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_syntax_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Constant); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_syntax_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*SourceInfo); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_syntax_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*SourcePosition); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_syntax_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Expr_Ident); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_syntax_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Expr_Select); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_syntax_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Expr_Call); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_syntax_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Expr_CreateList); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_syntax_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Expr_CreateStruct); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_syntax_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Expr_Comprehension); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_syntax_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Expr_CreateStruct_Entry); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_syntax_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*SourceInfo_Extension); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_syntax_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*SourceInfo_Extension_Version); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	file_google_api_expr_v1alpha1_syntax_proto_msgTypes[1].OneofWrappers = []interface{}{
+		(*Expr_ConstExpr)(nil),
+		(*Expr_IdentExpr)(nil),
+		(*Expr_SelectExpr)(nil),
+		(*Expr_CallExpr)(nil),
+		(*Expr_ListExpr)(nil),
+		(*Expr_StructExpr)(nil),
+		(*Expr_ComprehensionExpr)(nil),
+	}
+	file_google_api_expr_v1alpha1_syntax_proto_msgTypes[2].OneofWrappers = []interface{}{
+		(*Constant_NullValue)(nil),
+		(*Constant_BoolValue)(nil),
+		(*Constant_Int64Value)(nil),
+		(*Constant_Uint64Value)(nil),
+		(*Constant_DoubleValue)(nil),
+		(*Constant_StringValue)(nil),
+		(*Constant_BytesValue)(nil),
+		(*Constant_DurationValue)(nil),
+		(*Constant_TimestampValue)(nil),
+	}
+	file_google_api_expr_v1alpha1_syntax_proto_msgTypes[11].OneofWrappers = []interface{}{
+		(*Expr_CreateStruct_Entry_FieldKey)(nil),
+		(*Expr_CreateStruct_Entry_MapKey)(nil),
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_api_expr_v1alpha1_syntax_proto_rawDesc,
+			NumEnums:      1,
+			NumMessages:   16,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_api_expr_v1alpha1_syntax_proto_goTypes,
+		DependencyIndexes: file_google_api_expr_v1alpha1_syntax_proto_depIdxs,
+		EnumInfos:         file_google_api_expr_v1alpha1_syntax_proto_enumTypes,
+		MessageInfos:      file_google_api_expr_v1alpha1_syntax_proto_msgTypes,
+	}.Build()
+	File_google_api_expr_v1alpha1_syntax_proto = out.File
+	file_google_api_expr_v1alpha1_syntax_proto_rawDesc = nil
+	file_google_api_expr_v1alpha1_syntax_proto_goTypes = nil
+	file_google_api_expr_v1alpha1_syntax_proto_depIdxs = nil
+}
diff --git a/hack/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go b/hack/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go
new file mode 100644
index 0000000000..0a5ca6a1b9
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go
@@ -0,0 +1,721 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v4.24.4
+// source: google/api/expr/v1alpha1/value.proto
+
+package expr
+
+import (
+	reflect "reflect"
+	sync "sync"
+
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	anypb "google.golang.org/protobuf/types/known/anypb"
+	structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Represents a CEL value.
+//
+// This is similar to `google.protobuf.Value`, but can represent CEL's full
+// range of values.
+type Value struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Required. The valid kinds of values.
+	//
+	// Types that are assignable to Kind:
+	//
+	//	*Value_NullValue
+	//	*Value_BoolValue
+	//	*Value_Int64Value
+	//	*Value_Uint64Value
+	//	*Value_DoubleValue
+	//	*Value_StringValue
+	//	*Value_BytesValue
+	//	*Value_EnumValue
+	//	*Value_ObjectValue
+	//	*Value_MapValue
+	//	*Value_ListValue
+	//	*Value_TypeValue
+	Kind isValue_Kind `protobuf_oneof:"kind"`
+}
+
+func (x *Value) Reset() {
+	*x = Value{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Value) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Value) ProtoMessage() {}
+
+func (x *Value) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Value.ProtoReflect.Descriptor instead.
+func (*Value) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_value_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *Value) GetKind() isValue_Kind {
+	if m != nil {
+		return m.Kind
+	}
+	return nil
+}
+
+func (x *Value) GetNullValue() structpb.NullValue {
+	if x, ok := x.GetKind().(*Value_NullValue); ok {
+		return x.NullValue
+	}
+	return structpb.NullValue_NULL_VALUE
+}
+
+func (x *Value) GetBoolValue() bool {
+	if x, ok := x.GetKind().(*Value_BoolValue); ok {
+		return x.BoolValue
+	}
+	return false
+}
+
+func (x *Value) GetInt64Value() int64 {
+	if x, ok := x.GetKind().(*Value_Int64Value); ok {
+		return x.Int64Value
+	}
+	return 0
+}
+
+func (x *Value) GetUint64Value() uint64 {
+	if x, ok := x.GetKind().(*Value_Uint64Value); ok {
+		return x.Uint64Value
+	}
+	return 0
+}
+
+func (x *Value) GetDoubleValue() float64 {
+	if x, ok := x.GetKind().(*Value_DoubleValue); ok {
+		return x.DoubleValue
+	}
+	return 0
+}
+
+func (x *Value) GetStringValue() string {
+	if x, ok := x.GetKind().(*Value_StringValue); ok {
+		return x.StringValue
+	}
+	return ""
+}
+
+func (x *Value) GetBytesValue() []byte {
+	if x, ok := x.GetKind().(*Value_BytesValue); ok {
+		return x.BytesValue
+	}
+	return nil
+}
+
+func (x *Value) GetEnumValue() *EnumValue {
+	if x, ok := x.GetKind().(*Value_EnumValue); ok {
+		return x.EnumValue
+	}
+	return nil
+}
+
+func (x *Value) GetObjectValue() *anypb.Any {
+	if x, ok := x.GetKind().(*Value_ObjectValue); ok {
+		return x.ObjectValue
+	}
+	return nil
+}
+
+func (x *Value) GetMapValue() *MapValue {
+	if x, ok := x.GetKind().(*Value_MapValue); ok {
+		return x.MapValue
+	}
+	return nil
+}
+
+func (x *Value) GetListValue() *ListValue {
+	if x, ok := x.GetKind().(*Value_ListValue); ok {
+		return x.ListValue
+	}
+	return nil
+}
+
+func (x *Value) GetTypeValue() string {
+	if x, ok := x.GetKind().(*Value_TypeValue); ok {
+		return x.TypeValue
+	}
+	return ""
+}
+
+type isValue_Kind interface {
+	isValue_Kind()
+}
+
+type Value_NullValue struct {
+	// Null value.
+	NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Value_BoolValue struct {
+	// Boolean value.
+	BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Value_Int64Value struct {
+	// Signed integer value.
+	Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Value_Uint64Value struct {
+	// Unsigned integer value.
+	Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+type Value_DoubleValue struct {
+	// Floating point value.
+	DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Value_StringValue struct {
+	// UTF-8 string value.
+	StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Value_BytesValue struct {
+	// Byte string value.
+	BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+type Value_EnumValue struct {
+	// An enum value.
+	EnumValue *EnumValue `protobuf:"bytes,9,opt,name=enum_value,json=enumValue,proto3,oneof"`
+}
+
+type Value_ObjectValue struct {
+	// The proto message backing an object value.
+	ObjectValue *anypb.Any `protobuf:"bytes,10,opt,name=object_value,json=objectValue,proto3,oneof"`
+}
+
+type Value_MapValue struct {
+	// Map value.
+	MapValue *MapValue `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"`
+}
+
+type Value_ListValue struct {
+	// List value.
+	ListValue *ListValue `protobuf:"bytes,12,opt,name=list_value,json=listValue,proto3,oneof"`
+}
+
+type Value_TypeValue struct {
+	// Type value.
+	TypeValue string `protobuf:"bytes,15,opt,name=type_value,json=typeValue,proto3,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+
+func (*Value_BoolValue) isValue_Kind() {}
+
+func (*Value_Int64Value) isValue_Kind() {}
+
+func (*Value_Uint64Value) isValue_Kind() {}
+
+func (*Value_DoubleValue) isValue_Kind() {}
+
+func (*Value_StringValue) isValue_Kind() {}
+
+func (*Value_BytesValue) isValue_Kind() {}
+
+func (*Value_EnumValue) isValue_Kind() {}
+
+func (*Value_ObjectValue) isValue_Kind() {}
+
+func (*Value_MapValue) isValue_Kind() {}
+
+func (*Value_ListValue) isValue_Kind() {}
+
+func (*Value_TypeValue) isValue_Kind() {}
+
+// An enum value.
+type EnumValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The fully qualified name of the enum type.
+	Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+	// The value of the enum.
+	Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *EnumValue) Reset() {
+	*x = EnumValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *EnumValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumValue) ProtoMessage() {}
+
+func (x *EnumValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead.
+func (*EnumValue) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_value_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *EnumValue) GetType() string {
+	if x != nil {
+		return x.Type
+	}
+	return ""
+}
+
+func (x *EnumValue) GetValue() int32 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// A list.
+//
+// Wrapped in a message so 'not set' and empty can be differentiated, which is
+// required for use in a 'oneof'.
+type ListValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The ordered values in the list.
+	Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+func (x *ListValue) Reset() {
+	*x = ListValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ListValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListValue) ProtoMessage() {}
+
+func (x *ListValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListValue.ProtoReflect.Descriptor instead.
+func (*ListValue) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_value_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListValue) GetValues() []*Value {
+	if x != nil {
+		return x.Values
+	}
+	return nil
+}
+
+// A map.
+//
+// Wrapped in a message so 'not set' and empty can be differentiated, which is
+// required for use in a 'oneof'.
+type MapValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The set of map entries.
+	//
+	// CEL has fewer restrictions on keys, so a protobuf map represenation
+	// cannot be used.
+	Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
+}
+
+func (x *MapValue) Reset() {
+	*x = MapValue{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *MapValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MapValue) ProtoMessage() {}
+
+func (x *MapValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use MapValue.ProtoReflect.Descriptor instead.
+func (*MapValue) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_value_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *MapValue) GetEntries() []*MapValue_Entry {
+	if x != nil {
+		return x.Entries
+	}
+	return nil
+}
+
+// An entry in the map.
+type MapValue_Entry struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The key.
+	//
+	// Must be unique with in the map.
+	// Currently only boolean, int, uint, and string values can be keys.
+	Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	// The value.
+	Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *MapValue_Entry) Reset() {
+	*x = MapValue_Entry{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *MapValue_Entry) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MapValue_Entry) ProtoMessage() {}
+
+func (x *MapValue_Entry) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use MapValue_Entry.ProtoReflect.Descriptor instead.
+func (*MapValue_Entry) Descriptor() ([]byte, []int) {
+	return file_google_api_expr_v1alpha1_value_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *MapValue_Entry) GetKey() *Value {
+	if x != nil {
+		return x.Key
+	}
+	return nil
+}
+
+func (x *MapValue_Entry) GetValue() *Value {
+	if x != nil {
+		return x.Value
+	}
+	return nil
+}
+
+var File_google_api_expr_v1alpha1_value_proto protoreflect.FileDescriptor
+
+var file_google_api_expr_v1alpha1_value_proto_rawDesc = []byte{
+	0x0a, 0x24, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70,
+	0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+	0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
+	0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72,
+	0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x04, 0x0a, 0x05, 0x56, 0x61,
+	0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61,
+	0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
+	0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+	0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56,
+	0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69,
+	0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75,
+	0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48,
+	0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23,
+	0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06,
+	0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61,
+	0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c,
+	0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65,
+	0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61,
+	0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48,
+	0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c,
+	0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65,
+	0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61,
+	0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00,
+	0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6c, 0x69,
+	0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72,
+	0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61,
+	0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65,
+	0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f,
+	0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75,
+	0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
+	0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x22, 0x44, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x37, 0x0a,
+	0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+	0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0xc1, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61,
+	0x6c, 0x75, 0x65, 0x12, 0x42, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+	0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
+	0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07,
+	0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x71, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79,
+	0x12, 0x31, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+	0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03,
+	0x6b, 0x65, 0x79, 0x12, 0x35, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+	0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61,
+	0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x6d, 0x0a, 0x1c, 0x63, 0x6f,
+	0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70,
+	0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f,
+	0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
+	0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x33,
+}
+
+var (
+	file_google_api_expr_v1alpha1_value_proto_rawDescOnce sync.Once
+	file_google_api_expr_v1alpha1_value_proto_rawDescData = file_google_api_expr_v1alpha1_value_proto_rawDesc
+)
+
+func file_google_api_expr_v1alpha1_value_proto_rawDescGZIP() []byte {
+	file_google_api_expr_v1alpha1_value_proto_rawDescOnce.Do(func() {
+		file_google_api_expr_v1alpha1_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_expr_v1alpha1_value_proto_rawDescData)
+	})
+	return file_google_api_expr_v1alpha1_value_proto_rawDescData
+}
+
+var file_google_api_expr_v1alpha1_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_google_api_expr_v1alpha1_value_proto_goTypes = []interface{}{
+	(*Value)(nil),           // 0: google.api.expr.v1alpha1.Value
+	(*EnumValue)(nil),       // 1: google.api.expr.v1alpha1.EnumValue
+	(*ListValue)(nil),       // 2: google.api.expr.v1alpha1.ListValue
+	(*MapValue)(nil),        // 3: google.api.expr.v1alpha1.MapValue
+	(*MapValue_Entry)(nil),  // 4: google.api.expr.v1alpha1.MapValue.Entry
+	(structpb.NullValue)(0), // 5: google.protobuf.NullValue
+	(*anypb.Any)(nil),       // 6: google.protobuf.Any
+}
+var file_google_api_expr_v1alpha1_value_proto_depIdxs = []int32{
+	5, // 0: google.api.expr.v1alpha1.Value.null_value:type_name -> google.protobuf.NullValue
+	1, // 1: google.api.expr.v1alpha1.Value.enum_value:type_name -> google.api.expr.v1alpha1.EnumValue
+	6, // 2: google.api.expr.v1alpha1.Value.object_value:type_name -> google.protobuf.Any
+	3, // 3: google.api.expr.v1alpha1.Value.map_value:type_name -> google.api.expr.v1alpha1.MapValue
+	2, // 4: google.api.expr.v1alpha1.Value.list_value:type_name -> google.api.expr.v1alpha1.ListValue
+	0, // 5: google.api.expr.v1alpha1.ListValue.values:type_name -> google.api.expr.v1alpha1.Value
+	4, // 6: google.api.expr.v1alpha1.MapValue.entries:type_name -> google.api.expr.v1alpha1.MapValue.Entry
+	0, // 7: google.api.expr.v1alpha1.MapValue.Entry.key:type_name -> google.api.expr.v1alpha1.Value
+	0, // 8: google.api.expr.v1alpha1.MapValue.Entry.value:type_name -> google.api.expr.v1alpha1.Value
+	9, // [9:9] is the sub-list for method output_type
+	9, // [9:9] is the sub-list for method input_type
+	9, // [9:9] is the sub-list for extension type_name
+	9, // [9:9] is the sub-list for extension extendee
+	0, // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_google_api_expr_v1alpha1_value_proto_init() }
+func file_google_api_expr_v1alpha1_value_proto_init() {
+	if File_google_api_expr_v1alpha1_value_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_api_expr_v1alpha1_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Value); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*EnumValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ListValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*MapValue); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_expr_v1alpha1_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*MapValue_Entry); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	file_google_api_expr_v1alpha1_value_proto_msgTypes[0].OneofWrappers = []interface{}{
+		(*Value_NullValue)(nil),
+		(*Value_BoolValue)(nil),
+		(*Value_Int64Value)(nil),
+		(*Value_Uint64Value)(nil),
+		(*Value_DoubleValue)(nil),
+		(*Value_StringValue)(nil),
+		(*Value_BytesValue)(nil),
+		(*Value_EnumValue)(nil),
+		(*Value_ObjectValue)(nil),
+		(*Value_MapValue)(nil),
+		(*Value_ListValue)(nil),
+		(*Value_TypeValue)(nil),
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_api_expr_v1alpha1_value_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   5,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_api_expr_v1alpha1_value_proto_goTypes,
+		DependencyIndexes: file_google_api_expr_v1alpha1_value_proto_depIdxs,
+		MessageInfos:      file_google_api_expr_v1alpha1_value_proto_msgTypes,
+	}.Build()
+	File_google_api_expr_v1alpha1_value_proto = out.File
+	file_google_api_expr_v1alpha1_value_proto_rawDesc = nil
+	file_google_api_expr_v1alpha1_value_proto_goTypes = nil
+	file_google_api_expr_v1alpha1_value_proto_depIdxs = nil
+}
diff --git a/hack/tools/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/hack/tools/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
new file mode 100644
index 0000000000..e7d3805e36
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
@@ -0,0 +1,235 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v4.24.4
+// source: google/api/httpbody.proto
+
+package httpbody
+
+import (
+	reflect "reflect"
+	sync "sync"
+
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	anypb "google.golang.org/protobuf/types/known/anypb"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Message that represents an arbitrary HTTP body. It should only be used for
+// payload formats that can't be represented as JSON, such as raw binary or
+// an HTML page.
+//
+// This message can be used both in streaming and non-streaming API methods in
+// the request as well as the response.
+//
+// It can be used as a top-level request field, which is convenient if one
+// wants to extract parameters from either the URL or HTTP template into the
+// request fields and also want access to the raw HTTP body.
+//
+// Example:
+//
+//	message GetResourceRequest {
+//	  // A unique request id.
+//	  string request_id = 1;
+//
+//	  // The raw HTTP body is bound to this field.
+//	  google.api.HttpBody http_body = 2;
+//
+//	}
+//
+//	service ResourceService {
+//	  rpc GetResource(GetResourceRequest)
+//	    returns (google.api.HttpBody);
+//	  rpc UpdateResource(google.api.HttpBody)
+//	    returns (google.protobuf.Empty);
+//
+//	}
+//
+// Example with streaming methods:
+//
+//	service CaldavService {
+//	  rpc GetCalendar(stream google.api.HttpBody)
+//	    returns (stream google.api.HttpBody);
+//	  rpc UpdateCalendar(stream google.api.HttpBody)
+//	    returns (stream google.api.HttpBody);
+//
+//	}
+//
+// Use of this type only changes how the request and response bodies are
+// handled, all other features will continue to work unchanged.
+type HttpBody struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The HTTP Content-Type header value specifying the content type of the body.
+	ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"`
+	// The HTTP request/response body as raw binary.
+	Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+	// Application specific response metadata. Must be set in the first response
+	// for streaming APIs.
+	Extensions []*anypb.Any `protobuf:"bytes,3,rep,name=extensions,proto3" json:"extensions,omitempty"`
+}
+
+func (x *HttpBody) Reset() {
+	*x = HttpBody{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_httpbody_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *HttpBody) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HttpBody) ProtoMessage() {}
+
+func (x *HttpBody) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_httpbody_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use HttpBody.ProtoReflect.Descriptor instead.
+func (*HttpBody) Descriptor() ([]byte, []int) {
+	return file_google_api_httpbody_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *HttpBody) GetContentType() string {
+	if x != nil {
+		return x.ContentType
+	}
+	return ""
+}
+
+func (x *HttpBody) GetData() []byte {
+	if x != nil {
+		return x.Data
+	}
+	return nil
+}
+
+func (x *HttpBody) GetExtensions() []*anypb.Any {
+	if x != nil {
+		return x.Extensions
+	}
+	return nil
+}
+
+var File_google_api_httpbody_proto protoreflect.FileDescriptor
+
+var file_google_api_httpbody_proto_rawDesc = []byte{
+	0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74,
+	0x70, 0x62, 0x6f, 0x64, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x22, 0x77, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21,
+	0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70,
+	0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52,
+	0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+	0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52,
+	0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x68, 0x0a, 0x0e, 0x63,
+	0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x48,
+	0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72,
+	0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f,
+	0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02,
+	0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_api_httpbody_proto_rawDescOnce sync.Once
+	file_google_api_httpbody_proto_rawDescData = file_google_api_httpbody_proto_rawDesc
+)
+
+func file_google_api_httpbody_proto_rawDescGZIP() []byte {
+	file_google_api_httpbody_proto_rawDescOnce.Do(func() {
+		file_google_api_httpbody_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_httpbody_proto_rawDescData)
+	})
+	return file_google_api_httpbody_proto_rawDescData
+}
+
+var file_google_api_httpbody_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_api_httpbody_proto_goTypes = []interface{}{
+	(*HttpBody)(nil),  // 0: google.api.HttpBody
+	(*anypb.Any)(nil), // 1: google.protobuf.Any
+}
+var file_google_api_httpbody_proto_depIdxs = []int32{
+	1, // 0: google.api.HttpBody.extensions:type_name -> google.protobuf.Any
+	1, // [1:1] is the sub-list for method output_type
+	1, // [1:1] is the sub-list for method input_type
+	1, // [1:1] is the sub-list for extension type_name
+	1, // [1:1] is the sub-list for extension extendee
+	0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_google_api_httpbody_proto_init() }
+func file_google_api_httpbody_proto_init() {
+	if File_google_api_httpbody_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_api_httpbody_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*HttpBody); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_api_httpbody_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   1,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_api_httpbody_proto_goTypes,
+		DependencyIndexes: file_google_api_httpbody_proto_depIdxs,
+		MessageInfos:      file_google_api_httpbody_proto_msgTypes,
+	}.Build()
+	File_google_api_httpbody_proto = out.File
+	file_google_api_httpbody_proto_rawDesc = nil
+	file_google_api_httpbody_proto_goTypes = nil
+	file_google_api_httpbody_proto_depIdxs = nil
+}
diff --git a/hack/tools/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE b/hack/tools/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/hack/tools/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/hack/tools/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
new file mode 100644
index 0000000000..3e56218279
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
@@ -0,0 +1,1314 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v4.24.4
+// source: google/rpc/error_details.proto
+
+package errdetails
+
+import (
+	reflect "reflect"
+	sync "sync"
+
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	durationpb "google.golang.org/protobuf/types/known/durationpb"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Describes the cause of the error with structured details.
+//
+// Example of an error when contacting the "pubsub.googleapis.com" API when it
+// is not enabled:
+//
+//	{ "reason": "API_DISABLED"
+//	  "domain": "googleapis.com"
+//	  "metadata": {
+//	    "resource": "projects/123",
+//	    "service": "pubsub.googleapis.com"
+//	  }
+//	}
+//
+// This response indicates that the pubsub.googleapis.com API is not enabled.
+//
+// Example of an error that is returned when attempting to create a Spanner
+// instance in a region that is out of stock:
+//
+//	{ "reason": "STOCKOUT"
+//	  "domain": "spanner.googleapis.com",
+//	  "metadata": {
+//	    "availableRegions": "us-central1,us-east2"
+//	  }
+//	}
+type ErrorInfo struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The reason of the error. This is a constant value that identifies the
+	// proximate cause of the error. Error reasons are unique within a particular
+	// domain of errors. This should be at most 63 characters and match a
+	// regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, which represents
+	// UPPER_SNAKE_CASE.
+	Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"`
+	// The logical grouping to which the "reason" belongs. The error domain
+	// is typically the registered service name of the tool or product that
+	// generates the error. Example: "pubsub.googleapis.com". If the error is
+	// generated by some common infrastructure, the error domain must be a
+	// globally unique value that identifies the infrastructure. For Google API
+	// infrastructure, the error domain is "googleapis.com".
+	Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"`
+	// Additional structured details about this error.
+	//
+	// Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in
+	// length. When identifying the current value of an exceeded limit, the units
+	// should be contained in the key, not the value.  For example, rather than
+	// {"instanceLimit": "100/request"}, should be returned as,
+	// {"instanceLimitPerRequest": "100"}, if the client exceeds the number of
+	// instances that can be created in a single (batch) request.
+	Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *ErrorInfo) Reset() {
+	*x = ErrorInfo{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ErrorInfo) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ErrorInfo) ProtoMessage() {}
+
+func (x *ErrorInfo) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ErrorInfo.ProtoReflect.Descriptor instead.
+func (*ErrorInfo) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ErrorInfo) GetReason() string {
+	if x != nil {
+		return x.Reason
+	}
+	return ""
+}
+
+func (x *ErrorInfo) GetDomain() string {
+	if x != nil {
+		return x.Domain
+	}
+	return ""
+}
+
+func (x *ErrorInfo) GetMetadata() map[string]string {
+	if x != nil {
+		return x.Metadata
+	}
+	return nil
+}
+
+// Describes when the clients can retry a failed request. Clients could ignore
+// the recommendation here or retry when this information is missing from error
+// responses.
+//
+// It's always recommended that clients should use exponential backoff when
+// retrying.
+//
+// Clients should wait until `retry_delay` amount of time has passed since
+// receiving the error response before retrying.  If retrying requests also
+// fail, clients should use an exponential backoff scheme to gradually increase
+// the delay between retries based on `retry_delay`, until either a maximum
+// number of retries have been reached or a maximum retry delay cap has been
+// reached.
+type RetryInfo struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Clients should wait at least this long between retrying the same request.
+	RetryDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=retry_delay,json=retryDelay,proto3" json:"retry_delay,omitempty"`
+}
+
+func (x *RetryInfo) Reset() {
+	*x = RetryInfo{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *RetryInfo) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RetryInfo) ProtoMessage() {}
+
+func (x *RetryInfo) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use RetryInfo.ProtoReflect.Descriptor instead.
+func (*RetryInfo) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *RetryInfo) GetRetryDelay() *durationpb.Duration {
+	if x != nil {
+		return x.RetryDelay
+	}
+	return nil
+}
+
+// Describes additional debugging info.
+type DebugInfo struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The stack trace entries indicating where the error occurred.
+	StackEntries []string `protobuf:"bytes,1,rep,name=stack_entries,json=stackEntries,proto3" json:"stack_entries,omitempty"`
+	// Additional debugging information provided by the server.
+	Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"`
+}
+
+func (x *DebugInfo) Reset() {
+	*x = DebugInfo{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *DebugInfo) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DebugInfo) ProtoMessage() {}
+
+func (x *DebugInfo) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use DebugInfo.ProtoReflect.Descriptor instead.
+func (*DebugInfo) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *DebugInfo) GetStackEntries() []string {
+	if x != nil {
+		return x.StackEntries
+	}
+	return nil
+}
+
+func (x *DebugInfo) GetDetail() string {
+	if x != nil {
+		return x.Detail
+	}
+	return ""
+}
+
+// Describes how a quota check failed.
+//
+// For example if a daily limit was exceeded for the calling project,
+// a service could respond with a QuotaFailure detail containing the project
+// id and the description of the quota limit that was exceeded.  If the
+// calling project hasn't enabled the service in the developer console, then
+// a service could respond with the project id and set `service_disabled`
+// to true.
+//
+// Also see RetryInfo and Help types for other details about handling a
+// quota failure.
+type QuotaFailure struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Describes all quota violations.
+	Violations []*QuotaFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"`
+}
+
+func (x *QuotaFailure) Reset() {
+	*x = QuotaFailure{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *QuotaFailure) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QuotaFailure) ProtoMessage() {}
+
+func (x *QuotaFailure) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use QuotaFailure.ProtoReflect.Descriptor instead.
+func (*QuotaFailure) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation {
+	if x != nil {
+		return x.Violations
+	}
+	return nil
+}
+
+// Describes what preconditions have failed.
+//
+// For example, if an RPC failed because it required the Terms of Service to be
+// acknowledged, it could list the terms of service violation in the
+// PreconditionFailure message.
+type PreconditionFailure struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Describes all precondition violations.
+	Violations []*PreconditionFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"`
+}
+
+func (x *PreconditionFailure) Reset() {
+	*x = PreconditionFailure{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *PreconditionFailure) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PreconditionFailure) ProtoMessage() {}
+
+func (x *PreconditionFailure) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use PreconditionFailure.ProtoReflect.Descriptor instead.
+func (*PreconditionFailure) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *PreconditionFailure) GetViolations() []*PreconditionFailure_Violation {
+	if x != nil {
+		return x.Violations
+	}
+	return nil
+}
+
+// Describes violations in a client request. This error type focuses on the
+// syntactic aspects of the request.
+type BadRequest struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Describes all violations in a client request.
+	FieldViolations []*BadRequest_FieldViolation `protobuf:"bytes,1,rep,name=field_violations,json=fieldViolations,proto3" json:"field_violations,omitempty"`
+}
+
+func (x *BadRequest) Reset() {
+	*x = BadRequest{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[5]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *BadRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BadRequest) ProtoMessage() {}
+
+func (x *BadRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[5]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use BadRequest.ProtoReflect.Descriptor instead.
+func (*BadRequest) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *BadRequest) GetFieldViolations() []*BadRequest_FieldViolation {
+	if x != nil {
+		return x.FieldViolations
+	}
+	return nil
+}
+
+// Contains metadata about the request that clients can attach when filing a bug
+// or providing other forms of feedback.
+type RequestInfo struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// An opaque string that should only be interpreted by the service generating
+	// it. For example, it can be used to identify requests in the service's logs.
+	RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
+	// Any data that was used to serve this request. For example, an encrypted
+	// stack trace that can be sent back to the service provider for debugging.
+	ServingData string `protobuf:"bytes,2,opt,name=serving_data,json=servingData,proto3" json:"serving_data,omitempty"`
+}
+
+func (x *RequestInfo) Reset() {
+	*x = RequestInfo{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[6]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *RequestInfo) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RequestInfo) ProtoMessage() {}
+
+func (x *RequestInfo) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[6]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use RequestInfo.ProtoReflect.Descriptor instead.
+func (*RequestInfo) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *RequestInfo) GetRequestId() string {
+	if x != nil {
+		return x.RequestId
+	}
+	return ""
+}
+
+func (x *RequestInfo) GetServingData() string {
+	if x != nil {
+		return x.ServingData
+	}
+	return ""
+}
+
+// Describes the resource that is being accessed.
+type ResourceInfo struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// A name for the type of resource being accessed, e.g. "sql table",
+	// "cloud storage bucket", "file", "Google calendar"; or the type URL
+	// of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic".
+	ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"`
+	// The name of the resource being accessed.  For example, a shared calendar
+	// name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current
+	// error is
+	// [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED].
+	ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
+	// The owner of the resource (optional).
+	// For example, "user:" or "project:".
+	Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"`
+	// Describes what error is encountered when accessing this resource.
+	// For example, updating a cloud project may require the `writer` permission
+	// on the developer console project.
+	Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *ResourceInfo) Reset() {
+	*x = ResourceInfo{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[7]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ResourceInfo) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceInfo) ProtoMessage() {}
+
+func (x *ResourceInfo) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[7]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceInfo.ProtoReflect.Descriptor instead.
+func (*ResourceInfo) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *ResourceInfo) GetResourceType() string {
+	if x != nil {
+		return x.ResourceType
+	}
+	return ""
+}
+
+func (x *ResourceInfo) GetResourceName() string {
+	if x != nil {
+		return x.ResourceName
+	}
+	return ""
+}
+
+func (x *ResourceInfo) GetOwner() string {
+	if x != nil {
+		return x.Owner
+	}
+	return ""
+}
+
+func (x *ResourceInfo) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+// Provides links to documentation or for performing an out of band action.
+//
+// For example, if a quota check failed with an error indicating the calling
+// project hasn't enabled the accessed service, this can contain a URL pointing
+// directly to the right place in the developer console to flip the bit.
+type Help struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// URL(s) pointing to additional information on handling the current error.
+	Links []*Help_Link `protobuf:"bytes,1,rep,name=links,proto3" json:"links,omitempty"`
+}
+
+func (x *Help) Reset() {
+	*x = Help{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[8]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Help) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Help) ProtoMessage() {}
+
+func (x *Help) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[8]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Help.ProtoReflect.Descriptor instead.
+func (*Help) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *Help) GetLinks() []*Help_Link {
+	if x != nil {
+		return x.Links
+	}
+	return nil
+}
+
+// Provides a localized error message that is safe to return to the user
+// which can be attached to an RPC error.
+type LocalizedMessage struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The locale used following the specification defined at
+	// https://www.rfc-editor.org/rfc/bcp/bcp47.txt.
+	// Examples are: "en-US", "fr-CH", "es-MX"
+	Locale string `protobuf:"bytes,1,opt,name=locale,proto3" json:"locale,omitempty"`
+	// The localized error message in the above locale.
+	Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+}
+
+func (x *LocalizedMessage) Reset() {
+	*x = LocalizedMessage{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[9]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *LocalizedMessage) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LocalizedMessage) ProtoMessage() {}
+
+func (x *LocalizedMessage) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[9]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use LocalizedMessage.ProtoReflect.Descriptor instead.
+func (*LocalizedMessage) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *LocalizedMessage) GetLocale() string {
+	if x != nil {
+		return x.Locale
+	}
+	return ""
+}
+
+func (x *LocalizedMessage) GetMessage() string {
+	if x != nil {
+		return x.Message
+	}
+	return ""
+}
+
+// A message type used to describe a single quota violation.  For example, a
+// daily quota or a custom quota that was exceeded.
+type QuotaFailure_Violation struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The subject on which the quota check failed.
+	// For example, "clientip:" or "project:".
+	Subject string `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"`
+	// A description of how the quota check failed. Clients can use this
+	// description to find more about the quota configuration in the service's
+	// public documentation, or find the relevant quota limit to adjust through
+	// developer console.
+	//
+	// For example: "Service disabled" or "Daily Limit for read operations
+	// exceeded".
+	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *QuotaFailure_Violation) Reset() {
+	*x = QuotaFailure_Violation{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[11]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *QuotaFailure_Violation) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QuotaFailure_Violation) ProtoMessage() {}
+
+func (x *QuotaFailure_Violation) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[11]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use QuotaFailure_Violation.ProtoReflect.Descriptor instead.
+func (*QuotaFailure_Violation) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *QuotaFailure_Violation) GetSubject() string {
+	if x != nil {
+		return x.Subject
+	}
+	return ""
+}
+
+func (x *QuotaFailure_Violation) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+// A message type used to describe a single precondition failure.
+type PreconditionFailure_Violation struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The type of PreconditionFailure. We recommend using a service-specific
+	// enum type to define the supported precondition violation subjects. For
+	// example, "TOS" for "Terms of Service violation".
+	Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+	// The subject, relative to the type, that failed.
+	// For example, "google.com/cloud" relative to the "TOS" type would indicate
+	// which terms of service is being referenced.
+	Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"`
+	// A description of how the precondition failed. Developers can use this
+	// description to understand how to fix the failure.
+	//
+	// For example: "Terms of service not accepted".
+	Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *PreconditionFailure_Violation) Reset() {
+	*x = PreconditionFailure_Violation{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[12]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *PreconditionFailure_Violation) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PreconditionFailure_Violation) ProtoMessage() {}
+
+func (x *PreconditionFailure_Violation) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[12]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use PreconditionFailure_Violation.ProtoReflect.Descriptor instead.
+func (*PreconditionFailure_Violation) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4, 0}
+}
+
+func (x *PreconditionFailure_Violation) GetType() string {
+	if x != nil {
+		return x.Type
+	}
+	return ""
+}
+
+func (x *PreconditionFailure_Violation) GetSubject() string {
+	if x != nil {
+		return x.Subject
+	}
+	return ""
+}
+
+func (x *PreconditionFailure_Violation) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+// A message type used to describe a single bad request field.
+type BadRequest_FieldViolation struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// A path that leads to a field in the request body. The value will be a
+	// sequence of dot-separated identifiers that identify a protocol buffer
+	// field.
+	//
+	// Consider the following:
+	//
+	//	message CreateContactRequest {
+	//	  message EmailAddress {
+	//	    enum Type {
+	//	      TYPE_UNSPECIFIED = 0;
+	//	      HOME = 1;
+	//	      WORK = 2;
+	//	    }
+	//
+	//	    optional string email = 1;
+	//	    repeated EmailType type = 2;
+	//	  }
+	//
+	//	  string full_name = 1;
+	//	  repeated EmailAddress email_addresses = 2;
+	//	}
+	//
+	// In this example, in proto `field` could take one of the following values:
+	//
+	//   - `full_name` for a violation in the `full_name` value
+	//   - `email_addresses[1].email` for a violation in the `email` field of the
+	//     first `email_addresses` message
+	//   - `email_addresses[3].type[2]` for a violation in the second `type`
+	//     value in the third `email_addresses` message.
+	//
+	// In JSON, the same values are represented as:
+	//
+	//   - `fullName` for a violation in the `fullName` value
+	//   - `emailAddresses[1].email` for a violation in the `email` field of the
+	//     first `emailAddresses` message
+	//   - `emailAddresses[3].type[2]` for a violation in the second `type`
+	//     value in the third `emailAddresses` message.
+	Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"`
+	// A description of why the request element is bad.
+	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *BadRequest_FieldViolation) Reset() {
+	*x = BadRequest_FieldViolation{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[13]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *BadRequest_FieldViolation) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BadRequest_FieldViolation) ProtoMessage() {}
+
+func (x *BadRequest_FieldViolation) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[13]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use BadRequest_FieldViolation.ProtoReflect.Descriptor instead.
+func (*BadRequest_FieldViolation) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5, 0}
+}
+
+func (x *BadRequest_FieldViolation) GetField() string {
+	if x != nil {
+		return x.Field
+	}
+	return ""
+}
+
+func (x *BadRequest_FieldViolation) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+// Describes a URL link.
+type Help_Link struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Describes what the link offers.
+	Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	// The URL of the link.
+	Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+}
+
+func (x *Help_Link) Reset() {
+	*x = Help_Link{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_error_details_proto_msgTypes[14]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Help_Link) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Help_Link) ProtoMessage() {}
+
+func (x *Help_Link) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_error_details_proto_msgTypes[14]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Help_Link.ProtoReflect.Descriptor instead.
+func (*Help_Link) Descriptor() ([]byte, []int) {
+	return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8, 0}
+}
+
+func (x *Help_Link) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *Help_Link) GetUrl() string {
+	if x != nil {
+		return x.Url
+	}
+	return ""
+}
+
+var File_google_rpc_error_details_proto protoreflect.FileDescriptor
+
+var file_google_rpc_error_details_proto_rawDesc = []byte{
+	0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72,
+	0x6f, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x1e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75,
+	0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb9, 0x01, 0x0a,
+	0x09, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65,
+	0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73,
+	0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x3f, 0x0a, 0x08, 0x6d, 0x65,
+	0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49,
+	0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
+	0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d,
+	0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+	0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
+	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x47, 0x0a, 0x09, 0x52, 0x65, 0x74, 0x72,
+	0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3a, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x64,
+	0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x44, 0x65, 0x6c, 0x61,
+	0x79, 0x22, 0x48, 0x0a, 0x09, 0x44, 0x65, 0x62, 0x75, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23,
+	0x0a, 0x0d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18,
+	0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72,
+	0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x9b, 0x01, 0x0a, 0x0c,
+	0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x42, 0x0a, 0x0a,
+	0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+	0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75,
+	0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61,
+	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x1a, 0x47, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a,
+	0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
+	0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+	0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
+	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbd, 0x01, 0x0a, 0x13, 0x50, 0x72,
+	0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72,
+	0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+	0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72,
+	0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46,
+	0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x5b, 0x0a, 0x09,
+	0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70,
+	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a,
+	0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
+	0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+	0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
+	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61,
+	0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c,
+	0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03,
+	0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e,
+	0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+	0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64,
+	0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69,
+	0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05,
+	0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65,
+	0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49,
+	0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69,
+	0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+	0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61,
+	0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e,
+	0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+	0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+	0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72,
+	0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72,
+	0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65,
+	0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73,
+	0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70,
+	0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+	0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c,
+	0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a,
+	0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
+	0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63,
+	0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a,
+	0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c,
+	0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42,
+	0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70,
+	0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50,
+	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
+	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70,
+	0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72,
+	0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_rpc_error_details_proto_rawDescOnce sync.Once
+	file_google_rpc_error_details_proto_rawDescData = file_google_rpc_error_details_proto_rawDesc
+)
+
+func file_google_rpc_error_details_proto_rawDescGZIP() []byte {
+	file_google_rpc_error_details_proto_rawDescOnce.Do(func() {
+		file_google_rpc_error_details_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_error_details_proto_rawDescData)
+	})
+	return file_google_rpc_error_details_proto_rawDescData
+}
+
+var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
+var file_google_rpc_error_details_proto_goTypes = []interface{}{
+	(*ErrorInfo)(nil),                     // 0: google.rpc.ErrorInfo
+	(*RetryInfo)(nil),                     // 1: google.rpc.RetryInfo
+	(*DebugInfo)(nil),                     // 2: google.rpc.DebugInfo
+	(*QuotaFailure)(nil),                  // 3: google.rpc.QuotaFailure
+	(*PreconditionFailure)(nil),           // 4: google.rpc.PreconditionFailure
+	(*BadRequest)(nil),                    // 5: google.rpc.BadRequest
+	(*RequestInfo)(nil),                   // 6: google.rpc.RequestInfo
+	(*ResourceInfo)(nil),                  // 7: google.rpc.ResourceInfo
+	(*Help)(nil),                          // 8: google.rpc.Help
+	(*LocalizedMessage)(nil),              // 9: google.rpc.LocalizedMessage
+	nil,                                   // 10: google.rpc.ErrorInfo.MetadataEntry
+	(*QuotaFailure_Violation)(nil),        // 11: google.rpc.QuotaFailure.Violation
+	(*PreconditionFailure_Violation)(nil), // 12: google.rpc.PreconditionFailure.Violation
+	(*BadRequest_FieldViolation)(nil),     // 13: google.rpc.BadRequest.FieldViolation
+	(*Help_Link)(nil),                     // 14: google.rpc.Help.Link
+	(*durationpb.Duration)(nil),           // 15: google.protobuf.Duration
+}
+var file_google_rpc_error_details_proto_depIdxs = []int32{
+	10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry
+	15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration
+	11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation
+	12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation
+	13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation
+	14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link
+	6,  // [6:6] is the sub-list for method output_type
+	6,  // [6:6] is the sub-list for method input_type
+	6,  // [6:6] is the sub-list for extension type_name
+	6,  // [6:6] is the sub-list for extension extendee
+	0,  // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_google_rpc_error_details_proto_init() }
+func file_google_rpc_error_details_proto_init() {
+	if File_google_rpc_error_details_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_rpc_error_details_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ErrorInfo); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*RetryInfo); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*DebugInfo); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*QuotaFailure); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*PreconditionFailure); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*BadRequest); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*RequestInfo); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*ResourceInfo); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Help); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*LocalizedMessage); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*QuotaFailure_Violation); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*PreconditionFailure_Violation); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*BadRequest_FieldViolation); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Help_Link); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_rpc_error_details_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   15,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_rpc_error_details_proto_goTypes,
+		DependencyIndexes: file_google_rpc_error_details_proto_depIdxs,
+		MessageInfos:      file_google_rpc_error_details_proto_msgTypes,
+	}.Build()
+	File_google_rpc_error_details_proto = out.File
+	file_google_rpc_error_details_proto_rawDesc = nil
+	file_google_rpc_error_details_proto_goTypes = nil
+	file_google_rpc_error_details_proto_depIdxs = nil
+}
diff --git a/hack/tools/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/hack/tools/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
new file mode 100644
index 0000000000..6ad1b1c1df
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
@@ -0,0 +1,203 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v4.24.4
+// source: google/rpc/status.proto
+
+package status
+
+import (
+	reflect "reflect"
+	sync "sync"
+
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	anypb "google.golang.org/protobuf/types/known/anypb"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The `Status` type defines a logical error model that is suitable for
+// different programming environments, including REST APIs and RPC APIs. It is
+// used by [gRPC](https://github.com/grpc). Each `Status` message contains
+// three pieces of data: error code, error message, and error details.
+//
+// You can find out more about this error model and how to work with it in the
+// [API Design Guide](https://cloud.google.com/apis/design/errors).
+type Status struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The status code, which should be an enum value of
+	// [google.rpc.Code][google.rpc.Code].
+	Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
+	// A developer-facing error message, which should be in English. Any
+	// user-facing error message should be localized and sent in the
+	// [google.rpc.Status.details][google.rpc.Status.details] field, or localized
+	// by the client.
+	Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+	// A list of messages that carry the error details.  There is a common set of
+	// message types for APIs to use.
+	Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
+}
+
+func (x *Status) Reset() {
+	*x = Status{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_rpc_status_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Status) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Status) ProtoMessage() {}
+
+func (x *Status) ProtoReflect() protoreflect.Message {
+	mi := &file_google_rpc_status_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Status.ProtoReflect.Descriptor instead.
+func (*Status) Descriptor() ([]byte, []int) {
+	return file_google_rpc_status_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Status) GetCode() int32 {
+	if x != nil {
+		return x.Code
+	}
+	return 0
+}
+
+func (x *Status) GetMessage() string {
+	if x != nil {
+		return x.Message
+	}
+	return ""
+}
+
+func (x *Status) GetDetails() []*anypb.Any {
+	if x != nil {
+		return x.Details
+	}
+	return nil
+}
+
+var File_google_rpc_status_proto protoreflect.FileDescriptor
+
+var file_google_rpc_status_proto_rawDesc = []byte{
+	0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61,
+	0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f,
+	0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18,
+	0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61,
+	0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52,
+	0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74,
+	0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+	0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74,
+	0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_rpc_status_proto_rawDescOnce sync.Once
+	file_google_rpc_status_proto_rawDescData = file_google_rpc_status_proto_rawDesc
+)
+
+func file_google_rpc_status_proto_rawDescGZIP() []byte {
+	file_google_rpc_status_proto_rawDescOnce.Do(func() {
+		file_google_rpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_status_proto_rawDescData)
+	})
+	return file_google_rpc_status_proto_rawDescData
+}
+
+var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_rpc_status_proto_goTypes = []interface{}{
+	(*Status)(nil),    // 0: google.rpc.Status
+	(*anypb.Any)(nil), // 1: google.protobuf.Any
+}
+var file_google_rpc_status_proto_depIdxs = []int32{
+	1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any
+	1, // [1:1] is the sub-list for method output_type
+	1, // [1:1] is the sub-list for method input_type
+	1, // [1:1] is the sub-list for extension type_name
+	1, // [1:1] is the sub-list for extension extendee
+	0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_google_rpc_status_proto_init() }
+func file_google_rpc_status_proto_init() {
+	if File_google_rpc_status_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_rpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Status); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_rpc_status_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   1,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_rpc_status_proto_goTypes,
+		DependencyIndexes: file_google_rpc_status_proto_depIdxs,
+		MessageInfos:      file_google_rpc_status_proto_msgTypes,
+	}.Build()
+	File_google_rpc_status_proto = out.File
+	file_google_rpc_status_proto_rawDesc = nil
+	file_google_rpc_status_proto_goTypes = nil
+	file_google_rpc_status_proto_depIdxs = nil
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/AUTHORS b/hack/tools/vendor/google.golang.org/grpc/AUTHORS
new file mode 100644
index 0000000000..e491a9e7f7
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/AUTHORS
@@ -0,0 +1 @@
+Google Inc.
diff --git a/hack/tools/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md b/hack/tools/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md
new file mode 100644
index 0000000000..9d4213ebca
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md
@@ -0,0 +1,3 @@
+## Community Code of Conduct
+
+gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
diff --git a/hack/tools/vendor/google.golang.org/grpc/CONTRIBUTING.md b/hack/tools/vendor/google.golang.org/grpc/CONTRIBUTING.md
new file mode 100644
index 0000000000..0854d298e4
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/CONTRIBUTING.md
@@ -0,0 +1,73 @@
+# How to contribute
+
+We definitely welcome your patches and contributions to gRPC! Please read the gRPC
+organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md)
+and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding.
+
+If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
+
+## Legal requirements
+
+In order to protect both you and ourselves, you will need to sign the
+[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf).
+
+## Guidelines for Pull Requests
+How to get your contributions merged smoothly and quickly.
+
+- Create **small PRs** that are narrowly focused on **addressing a single
+  concern**. We often times receive PRs that are trying to fix several things at
+  a time, but only one fix is considered acceptable, nothing gets merged and
+  both author's & review's time is wasted. Create more PRs to address different
+  concerns and everyone will be happy.
+
+- If you are searching for features to work on, issues labeled [Status: Help
+  Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22)
+  is a great place to start. These issues are well-documented and usually can be
+  resolved with a single pull request.
+
+- If you are adding a new file, make sure it has the copyright message template 
+  at the top as a comment. You can copy over the message from an existing file 
+  and update the year.
+
+- The grpc package should only depend on standard Go packages and a small number
+  of exceptions. If your contribution introduces new dependencies which are NOT
+  in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a
+  discussion with gRPC-Go authors and consultants.
+
+- For speculative changes, consider opening an issue and discussing it first. If
+  you are suggesting a behavioral or API change, consider starting with a [gRFC
+  proposal](https://github.com/grpc/proposal).
+
+- Provide a good **PR description** as a record of **what** change is being made
+  and **why** it was made. Link to a github issue if it exists.
+
+- If you want to fix formatting or style, consider whether your changes are an 
+  obvious improvement or might be considered a personal preference. If a style 
+  change is based on preference, it likely will not be accepted. If it corrects 
+  widely agreed-upon anti-patterns, then please do create a PR and explain the 
+  benefits of the change.
+
+- Unless your PR is trivial, you should expect there will be reviewer comments
+  that you'll need to address before merging. We'll mark it as `Status: Requires
+  Reporter Clarification` if we expect you to respond to these comments in a
+  timely manner. If the PR remains inactive for 6 days, it will be marked as
+  `stale` and automatically close 7 days after that if we don't hear back from
+  you.
+
+- Maintain **clean commit history** and use **meaningful commit messages**. PRs
+  with messy commit history are difficult to review and won't be merged. Use
+  `rebase -i upstream/master` to curate your commit history and/or to bring in
+  latest changes from master (but avoid rebasing in the middle of a code
+  review).
+
+- Keep your PR up to date with upstream/master (if there are merge conflicts, we
+  can't really merge your change).
+
+- **All tests need to be passing** before your change can be merged. We
+  recommend you **run tests locally** before creating your PR to catch breakages
+  early on.
+  - `./scripts/vet.sh` to catch vet errors
+  - `go test -cpu 1,4 -timeout 7m ./...` to run the tests
+  - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode
+
+- Exceptions to the rules can be made if there's a compelling reason for doing so.
diff --git a/hack/tools/vendor/google.golang.org/grpc/GOVERNANCE.md b/hack/tools/vendor/google.golang.org/grpc/GOVERNANCE.md
new file mode 100644
index 0000000000..d6ff267471
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/GOVERNANCE.md
@@ -0,0 +1 @@
+This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md).
diff --git a/hack/tools/vendor/google.golang.org/grpc/LICENSE b/hack/tools/vendor/google.golang.org/grpc/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/hack/tools/vendor/google.golang.org/grpc/MAINTAINERS.md b/hack/tools/vendor/google.golang.org/grpc/MAINTAINERS.md
new file mode 100644
index 0000000000..5d4096d46a
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/MAINTAINERS.md
@@ -0,0 +1,36 @@
+This page lists all active maintainers of this repository. If you were a
+maintainer and would like to add your name to the Emeritus list, please send us a
+PR.
+
+See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md)
+for governance guidelines and how to become a maintainer.
+See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md)
+for general contribution guidelines.
+
+## Maintainers (in alphabetical order)
+
+- [aranjans](https://github.com/aranjans), Google LLC
+- [arjan-bal](https://github.com/arjan-bal), Google LLC
+- [arvindbr8](https://github.com/arvindbr8), Google LLC
+- [atollena](https://github.com/atollena), Datadog, Inc.
+- [dfawley](https://github.com/dfawley), Google LLC
+- [easwars](https://github.com/easwars), Google LLC
+- [erm-g](https://github.com/erm-g), Google LLC
+- [gtcooke94](https://github.com/gtcooke94), Google LLC
+- [purnesh42h](https://github.com/purnesh42h), Google LLC
+- [zasweq](https://github.com/zasweq), Google LLC
+
+## Emeritus Maintainers (in alphabetical order)
+- [adelez](https://github.com/adelez)
+- [canguler](https://github.com/canguler)
+- [cesarghali](https://github.com/cesarghali)
+- [iamqizhao](https://github.com/iamqizhao)
+- [jeanbza](https://github.com/jeanbza)
+- [jtattermusch](https://github.com/jtattermusch)
+- [lyuxuan](https://github.com/lyuxuan)
+- [makmukhi](https://github.com/makmukhi)
+- [matt-kwong](https://github.com/matt-kwong)
+- [menghanl](https://github.com/menghanl)
+- [nicolasnoble](https://github.com/nicolasnoble)
+- [srini100](https://github.com/srini100)
+- [yongni](https://github.com/yongni)
diff --git a/hack/tools/vendor/google.golang.org/grpc/Makefile b/hack/tools/vendor/google.golang.org/grpc/Makefile
new file mode 100644
index 0000000000..be38384ff6
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/Makefile
@@ -0,0 +1,49 @@
+all: vet test testrace
+
+build:
+	go build google.golang.org/grpc/...
+
+clean:
+	go clean -i google.golang.org/grpc/...
+
+deps:
+	GO111MODULE=on go get -d -v google.golang.org/grpc/...
+
+proto:
+	@ if ! which protoc > /dev/null; then \
+		echo "error: protoc not installed" >&2; \
+		exit 1; \
+	fi
+	go generate google.golang.org/grpc/...
+
+test:
+	go test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
+
+testsubmodule:
+	cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/...
+	cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/...
+
+testrace:
+	go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/...
+
+testdeps:
+	GO111MODULE=on go get -d -v -t google.golang.org/grpc/...
+
+vet: vetdeps
+	./scripts/vet.sh
+
+vetdeps:
+	./scripts/vet.sh -install
+
+.PHONY: \
+	all \
+	build \
+	clean \
+	deps \
+	proto \
+	test \
+	testsubmodule \
+	testrace \
+	testdeps \
+	vet \
+	vetdeps
diff --git a/hack/tools/vendor/google.golang.org/grpc/NOTICE.txt b/hack/tools/vendor/google.golang.org/grpc/NOTICE.txt
new file mode 100644
index 0000000000..530197749e
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/NOTICE.txt
@@ -0,0 +1,13 @@
+Copyright 2014 gRPC authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/hack/tools/vendor/google.golang.org/grpc/README.md b/hack/tools/vendor/google.golang.org/grpc/README.md
new file mode 100644
index 0000000000..b572707c62
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/README.md
@@ -0,0 +1,107 @@
+# gRPC-Go
+
+[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API]
+[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go)
+[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go)
+
+The [Go][] implementation of [gRPC][]: A high performance, open source, general
+RPC framework that puts mobile and HTTP/2 first. For more information see the
+[Go gRPC docs][], or jump directly into the [quick start][].
+
+## Prerequisites
+
+- **[Go][]**: any one of the **two latest major** [releases][go-releases].
+
+## Installation
+
+Simply add the following import to your code, and then `go [build|run|test]`
+will automatically fetch the necessary dependencies:
+
+
+```go
+import "google.golang.org/grpc"
+```
+
+> **Note:** If you are trying to access `grpc-go` from **China**, see the
+> [FAQ](#FAQ) below.
+
+## Learn more
+
+- [Go gRPC docs][], which include a [quick start][] and [API
+  reference][API] among other resources
+- [Low-level technical docs](Documentation) from this repository
+- [Performance benchmark][]
+- [Examples](examples)
+
+## FAQ
+
+### I/O Timeout Errors
+
+The `golang.org` domain may be blocked from some countries. `go get` usually
+produces an error like the following when this happens:
+
+```console
+$ go get -u google.golang.org/grpc
+package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout)
+```
+
+To build Go code, there are several options:
+
+- Set up a VPN and access google.golang.org through that.
+
+- With Go module support: it is possible to use the `replace` feature of `go
+  mod` to create aliases for golang.org packages.  In your project's directory:
+
+  ```sh
+  go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest
+  go mod tidy
+  go mod vendor
+  go build -mod=vendor
+  ```
+
+  Again, this will need to be done for all transitive dependencies hosted on
+  golang.org as well. For details, refer to [golang/go issue
+  #28652](https://github.com/golang/go/issues/28652).
+
+### Compiling error, undefined: grpc.SupportPackageIsVersion
+
+Please update to the latest version of gRPC-Go using
+`go get google.golang.org/grpc`.
+
+### How to turn on logging
+
+The default logger is controlled by environment variables. Turn everything on
+like this:
+
+```console
+$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99
+$ export GRPC_GO_LOG_SEVERITY_LEVEL=info
+```
+
+### The RPC failed with error `"code = Unavailable desc = transport is closing"`
+
+This error means the connection the RPC is using was closed, and there are many
+possible reasons, including:
+ 1. mis-configured transport credentials, connection failed on handshaking
+ 1. bytes disrupted, possibly by a proxy in between
+ 1. server shutdown
+ 1. Keepalive parameters caused connection shutdown, for example if you have
+    configured your server to terminate connections regularly to [trigger DNS
+    lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779).
+    If this is the case, you may want to increase your
+    [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters),
+    to allow longer RPC calls to finish.
+
+It can be tricky to debug this because the error happens on the client side but
+the root cause of the connection being closed is on the server side. Turn on
+logging on __both client and server__, and see if there are any transport
+errors.
+
+[API]: https://pkg.go.dev/google.golang.org/grpc
+[Go]: https://golang.org
+[Go module]: https://github.com/golang/go/wiki/Modules
+[gRPC]: https://grpc.io
+[Go gRPC docs]: https://grpc.io/docs/languages/go
+[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608
+[quick start]: https://grpc.io/docs/languages/go/quickstart
+[go-releases]: https://golang.org/doc/devel/release.html
diff --git a/hack/tools/vendor/google.golang.org/grpc/SECURITY.md b/hack/tools/vendor/google.golang.org/grpc/SECURITY.md
new file mode 100644
index 0000000000..abab279379
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/SECURITY.md
@@ -0,0 +1,3 @@
+# Security Policy
+
+For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
diff --git a/hack/tools/vendor/google.golang.org/grpc/attributes/attributes.go b/hack/tools/vendor/google.golang.org/grpc/attributes/attributes.go
new file mode 100644
index 0000000000..52d530d7ad
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/attributes/attributes.go
@@ -0,0 +1,141 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package attributes defines a generic key/value store used in various gRPC
+// components.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package attributes
+
+import (
+	"fmt"
+	"strings"
+)
+
+// Attributes is an immutable struct for storing and retrieving generic
+// key/value pairs.  Keys must be hashable, and users should define their own
+// types for keys.  Values should not be modified after they are added to an
+// Attributes or if they were received from one.  If values implement 'Equal(o
+// any) bool', it will be called by (*Attributes).Equal to determine whether
+// two values with the same key should be considered equal.
+type Attributes struct {
+	m map[any]any
+}
+
+// New returns a new Attributes containing the key/value pair.
+func New(key, value any) *Attributes {
+	return &Attributes{m: map[any]any{key: value}}
+}
+
+// WithValue returns a new Attributes containing the previous keys and values
+// and the new key/value pair.  If the same key appears multiple times, the
+// last value overwrites all previous values for that key.  To remove an
+// existing key, use a nil value.  value should not be modified later.
+func (a *Attributes) WithValue(key, value any) *Attributes {
+	if a == nil {
+		return New(key, value)
+	}
+	n := &Attributes{m: make(map[any]any, len(a.m)+1)}
+	for k, v := range a.m {
+		n.m[k] = v
+	}
+	n.m[key] = value
+	return n
+}
+
+// Value returns the value associated with these attributes for key, or nil if
+// no value is associated with key.  The returned value should not be modified.
+func (a *Attributes) Value(key any) any {
+	if a == nil {
+		return nil
+	}
+	return a.m[key]
+}
+
+// Equal returns whether a and o are equivalent.  If 'Equal(o any) bool' is
+// implemented for a value in the attributes, it is called to determine if the
+// value matches the one stored in the other attributes.  If Equal is not
+// implemented, standard equality is used to determine if the two values are
+// equal. Note that some types (e.g. maps) aren't comparable by default, so
+// they must be wrapped in a struct, or in an alias type, with Equal defined.
+func (a *Attributes) Equal(o *Attributes) bool {
+	if a == nil && o == nil {
+		return true
+	}
+	if a == nil || o == nil {
+		return false
+	}
+	if len(a.m) != len(o.m) {
+		return false
+	}
+	for k, v := range a.m {
+		ov, ok := o.m[k]
+		if !ok {
+			// o missing element of a
+			return false
+		}
+		if eq, ok := v.(interface{ Equal(o any) bool }); ok {
+			if !eq.Equal(ov) {
+				return false
+			}
+		} else if v != ov {
+			// Fallback to a standard equality check if Value is unimplemented.
+			return false
+		}
+	}
+	return true
+}
+
+// String prints the attribute map. If any key or values throughout the map
+// implement fmt.Stringer, it calls that method and appends.
+func (a *Attributes) String() string {
+	var sb strings.Builder
+	sb.WriteString("{")
+	first := true
+	for k, v := range a.m {
+		if !first {
+			sb.WriteString(", ")
+		}
+		sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v)))
+		first = false
+	}
+	sb.WriteString("}")
+	return sb.String()
+}
+
+func str(x any) (s string) {
+	if v, ok := x.(fmt.Stringer); ok {
+		return fmt.Sprint(v)
+	} else if v, ok := x.(string); ok {
+		return v
+	}
+	return fmt.Sprintf("<%p>", x)
+}
+
+// MarshalJSON helps implement the json.Marshaler interface, thereby rendering
+// the Attributes correctly when printing (via pretty.JSON) structs containing
+// Attributes as fields.
+//
+// Is it impossible to unmarshal attributes from a JSON representation and this
+// method is meant only for debugging purposes.
+func (a *Attributes) MarshalJSON() ([]byte, error) {
+	return []byte(a.String()), nil
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/backoff.go b/hack/tools/vendor/google.golang.org/grpc/backoff.go
new file mode 100644
index 0000000000..29475e31c9
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/backoff.go
@@ -0,0 +1,61 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// See internal/backoff package for the backoff implementation. This file is
+// kept for the exported types and API backward compatibility.
+
+package grpc
+
+import (
+	"time"
+
+	"google.golang.org/grpc/backoff"
+)
+
+// DefaultBackoffConfig uses values specified for backoff in
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+//
+// Deprecated: use ConnectParams instead. Will be supported throughout 1.x.
+var DefaultBackoffConfig = BackoffConfig{
+	MaxDelay: 120 * time.Second,
+}
+
+// BackoffConfig defines the parameters for the default gRPC backoff strategy.
+//
+// Deprecated: use ConnectParams instead. Will be supported throughout 1.x.
+type BackoffConfig struct {
+	// MaxDelay is the upper bound of backoff delay.
+	MaxDelay time.Duration
+}
+
+// ConnectParams defines the parameters for connecting and retrying. Users are
+// encouraged to use this instead of the BackoffConfig type defined above. See
+// here for more details:
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type ConnectParams struct {
+	// Backoff specifies the configuration options for connection backoff.
+	Backoff backoff.Config
+	// MinConnectTimeout is the minimum amount of time we are willing to give a
+	// connection to complete.
+	MinConnectTimeout time.Duration
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/backoff/backoff.go b/hack/tools/vendor/google.golang.org/grpc/backoff/backoff.go
new file mode 100644
index 0000000000..d7b40b7cb6
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/backoff/backoff.go
@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package backoff provides configuration options for backoff.
+//
+// More details can be found at:
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+//
+// All APIs in this package are experimental.
+package backoff
+
+import "time"
+
+// Config defines the configuration options for backoff.
+type Config struct {
+	// BaseDelay is the amount of time to backoff after the first failure.
+	BaseDelay time.Duration
+	// Multiplier is the factor with which to multiply backoffs after a
+	// failed retry. Should ideally be greater than 1.
+	Multiplier float64
+	// Jitter is the factor with which backoffs are randomized.
+	Jitter float64
+	// MaxDelay is the upper bound of backoff delay.
+	MaxDelay time.Duration
+}
+
+// DefaultConfig is a backoff configuration with the default values specified
+// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+//
+// This should be useful for callers who want to configure backoff with
+// non-default values only for a subset of the options.
+var DefaultConfig = Config{
+	BaseDelay:  1.0 * time.Second,
+	Multiplier: 1.6,
+	Jitter:     0.2,
+	MaxDelay:   120 * time.Second,
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/balancer/balancer.go b/hack/tools/vendor/google.golang.org/grpc/balancer/balancer.go
new file mode 100644
index 0000000000..b181f386a1
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -0,0 +1,464 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package balancer defines APIs for load balancing in gRPC.
+// All APIs in this package are experimental.
+package balancer
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"net"
+	"strings"
+
+	"google.golang.org/grpc/channelz"
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/credentials"
+	estats "google.golang.org/grpc/experimental/stats"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/serviceconfig"
+)
+
+var (
+	// m is a map from name to balancer builder.
+	m = make(map[string]Builder)
+
+	logger = grpclog.Component("balancer")
+)
+
+// Register registers the balancer builder to the balancer map. b.Name
+// (lowercased) will be used as the name registered with this builder.  If the
+// Builder implements ConfigParser, ParseConfig will be called when new service
+// configs are received by the resolver, and the result will be provided to the
+// Balancer in UpdateClientConnState.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple Balancers are
+// registered with the same name, the one registered last will take effect.
+func Register(b Builder) {
+	name := strings.ToLower(b.Name())
+	if name != b.Name() {
+		// TODO: Skip the use of strings.ToLower() to index the map after v1.59
+		// is released to switch to case sensitive balancer registry. Also,
+		// remove this warning and update the docstrings for Register and Get.
+		logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name())
+	}
+	m[name] = b
+}
+
+// unregisterForTesting deletes the balancer with the given name from the
+// balancer map.
+//
+// This function is not thread-safe.
+func unregisterForTesting(name string) {
+	delete(m, name)
+}
+
+// connectedAddress returns the connected address for a SubConnState. The
+// address is only valid if the state is READY.
+func connectedAddress(scs SubConnState) resolver.Address {
+	return scs.connectedAddress
+}
+
+// setConnectedAddress sets the connected address for a SubConnState.
+func setConnectedAddress(scs *SubConnState, addr resolver.Address) {
+	scs.connectedAddress = addr
+}
+
+func init() {
+	internal.BalancerUnregister = unregisterForTesting
+	internal.ConnectedAddress = connectedAddress
+	internal.SetConnectedAddress = setConnectedAddress
+}
+
+// Get returns the resolver builder registered with the given name.
+// Note that the compare is done in a case-insensitive fashion.
+// If no builder is register with the name, nil will be returned.
+func Get(name string) Builder {
+	if strings.ToLower(name) != name {
+		// TODO: Skip the use of strings.ToLower() to index the map after v1.59
+		// is released to switch to case sensitive balancer registry. Also,
+		// remove this warning and update the docstrings for Register and Get.
+		logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name)
+	}
+	if b, ok := m[strings.ToLower(name)]; ok {
+		return b
+	}
+	return nil
+}
+
+// A SubConn represents a single connection to a gRPC backend service.
+//
+// Each SubConn contains a list of addresses.
+//
+// All SubConns start in IDLE, and will not try to connect. To trigger the
+// connecting, Balancers must call Connect.  If a connection re-enters IDLE,
+// Balancers must call Connect again to trigger a new connection attempt.
+//
+// gRPC will try to connect to the addresses in sequence, and stop trying the
+// remainder once the first connection is successful. If an attempt to connect
+// to all addresses encounters an error, the SubConn will enter
+// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE.
+//
+// Once established, if a connection is lost, the SubConn will transition
+// directly to IDLE.
+//
+// This interface is to be implemented by gRPC. Users should not need their own
+// implementation of this interface. For situations like testing, any
+// implementations should embed this interface. This allows gRPC to add new
+// methods to this interface.
+type SubConn interface {
+	// UpdateAddresses updates the addresses used in this SubConn.
+	// gRPC checks if currently-connected address is still in the new list.
+	// If it's in the list, the connection will be kept.
+	// If it's not in the list, the connection will gracefully closed, and
+	// a new connection will be created.
+	//
+	// This will trigger a state transition for the SubConn.
+	//
+	// Deprecated: this method will be removed.  Create new SubConns for new
+	// addresses instead.
+	UpdateAddresses([]resolver.Address)
+	// Connect starts the connecting for this SubConn.
+	Connect()
+	// GetOrBuildProducer returns a reference to the existing Producer for this
+	// ProducerBuilder in this SubConn, or, if one does not currently exist,
+	// creates a new one and returns it.  Returns a close function which must
+	// be called when the Producer is no longer needed.
+	GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
+	// Shutdown shuts down the SubConn gracefully.  Any started RPCs will be
+	// allowed to complete.  No future calls should be made on the SubConn.
+	// One final state update will be delivered to the StateListener (or
+	// UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to
+	// indicate the shutdown operation.  This may be delivered before
+	// in-progress RPCs are complete and the actual connection is closed.
+	Shutdown()
+}
+
+// NewSubConnOptions contains options to create new SubConn.
+type NewSubConnOptions struct {
+	// CredsBundle is the credentials bundle that will be used in the created
+	// SubConn. If it's nil, the original creds from grpc DialOptions will be
+	// used.
+	//
+	// Deprecated: Use the Attributes field in resolver.Address to pass
+	// arbitrary data to the credential handshaker.
+	CredsBundle credentials.Bundle
+	// HealthCheckEnabled indicates whether health check service should be
+	// enabled on this SubConn
+	HealthCheckEnabled bool
+	// StateListener is called when the state of the subconn changes.  If nil,
+	// Balancer.UpdateSubConnState will be called instead.  Will never be
+	// invoked until after Connect() is called on the SubConn created with
+	// these options.
+	StateListener func(SubConnState)
+}
+
+// State contains the balancer's state relevant to the gRPC ClientConn.
+type State struct {
+	// State contains the connectivity state of the balancer, which is used to
+	// determine the state of the ClientConn.
+	ConnectivityState connectivity.State
+	// Picker is used to choose connections (SubConns) for RPCs.
+	Picker Picker
+}
+
+// ClientConn represents a gRPC ClientConn.
+//
+// This interface is to be implemented by gRPC. Users should not need a
+// brand new implementation of this interface. For the situations like
+// testing, the new implementation should embed this interface. This allows
+// gRPC to add new methods to this interface.
+type ClientConn interface {
+	// NewSubConn is called by balancer to create a new SubConn.
+	// It doesn't block and wait for the connections to be established.
+	// Behaviors of the SubConn can be controlled by options.
+	//
+	// Deprecated: please be aware that in a future version, SubConns will only
+	// support one address per SubConn.
+	NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error)
+	// RemoveSubConn removes the SubConn from ClientConn.
+	// The SubConn will be shutdown.
+	//
+	// Deprecated: use SubConn.Shutdown instead.
+	RemoveSubConn(SubConn)
+	// UpdateAddresses updates the addresses used in the passed in SubConn.
+	// gRPC checks if the currently connected address is still in the new list.
+	// If so, the connection will be kept. Else, the connection will be
+	// gracefully closed, and a new connection will be created.
+	//
+	// This may trigger a state transition for the SubConn.
+	//
+	// Deprecated: this method will be removed.  Create new SubConns for new
+	// addresses instead.
+	UpdateAddresses(SubConn, []resolver.Address)
+
+	// UpdateState notifies gRPC that the balancer's internal state has
+	// changed.
+	//
+	// gRPC will update the connectivity state of the ClientConn, and will call
+	// Pick on the new Picker to pick new SubConns.
+	UpdateState(State)
+
+	// ResolveNow is called by balancer to notify gRPC to do a name resolving.
+	ResolveNow(resolver.ResolveNowOptions)
+
+	// Target returns the dial target for this ClientConn.
+	//
+	// Deprecated: Use the Target field in the BuildOptions instead.
+	Target() string
+}
+
+// BuildOptions contains additional information for Build.
+type BuildOptions struct {
+	// DialCreds is the transport credentials to use when communicating with a
+	// remote load balancer server. Balancer implementations which do not
+	// communicate with a remote load balancer server can ignore this field.
+	DialCreds credentials.TransportCredentials
+	// CredsBundle is the credentials bundle to use when communicating with a
+	// remote load balancer server. Balancer implementations which do not
+	// communicate with a remote load balancer server can ignore this field.
+	CredsBundle credentials.Bundle
+	// Dialer is the custom dialer to use when communicating with a remote load
+	// balancer server. Balancer implementations which do not communicate with a
+	// remote load balancer server can ignore this field.
+	Dialer func(context.Context, string) (net.Conn, error)
+	// Authority is the server name to use as part of the authentication
+	// handshake when communicating with a remote load balancer server. Balancer
+	// implementations which do not communicate with a remote load balancer
+	// server can ignore this field.
+	Authority string
+	// ChannelzParent is the parent ClientConn's channelz channel.
+	ChannelzParent channelz.Identifier
+	// CustomUserAgent is the custom user agent set on the parent ClientConn.
+	// The balancer should set the same custom user agent if it creates a
+	// ClientConn.
+	CustomUserAgent string
+	// Target contains the parsed address info of the dial target. It is the
+	// same resolver.Target as passed to the resolver. See the documentation for
+	// the resolver.Target type for details about what it contains.
+	Target resolver.Target
+	// MetricsRecorder is the metrics recorder that balancers can use to record
+	// metrics. Balancer implementations which do not register metrics on
+	// metrics registry and record on them can ignore this field.
+	MetricsRecorder estats.MetricsRecorder
+}
+
+// Builder creates a balancer.
+type Builder interface {
+	// Build creates a new balancer with the ClientConn.
+	Build(cc ClientConn, opts BuildOptions) Balancer
+	// Name returns the name of balancers built by this builder.
+	// It will be used to pick balancers (for example in service config).
+	Name() string
+}
+
+// ConfigParser parses load balancer configs.
+type ConfigParser interface {
+	// ParseConfig parses the JSON load balancer config provided into an
+	// internal form or returns an error if the config is invalid.  For future
+	// compatibility reasons, unknown fields in the config should be ignored.
+	ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error)
+}
+
+// PickInfo contains additional information for the Pick operation.
+type PickInfo struct {
+	// FullMethodName is the method name that NewClientStream() is called
+	// with. The canonical format is /service/Method.
+	FullMethodName string
+	// Ctx is the RPC's context, and may contain relevant RPC-level information
+	// like the outgoing header metadata.
+	Ctx context.Context
+}
+
+// DoneInfo contains additional information for done.
+type DoneInfo struct {
+	// Err is the rpc error the RPC finished with. It could be nil.
+	Err error
+	// Trailer contains the metadata from the RPC's trailer, if present.
+	Trailer metadata.MD
+	// BytesSent indicates if any bytes have been sent to the server.
+	BytesSent bool
+	// BytesReceived indicates if any byte has been received from the server.
+	BytesReceived bool
+	// ServerLoad is the load received from server. It's usually sent as part of
+	// trailing metadata.
+	//
+	// The only supported type now is *orca_v3.LoadReport.
+	ServerLoad any
+}
+
+var (
+	// ErrNoSubConnAvailable indicates no SubConn is available for pick().
+	// gRPC will block the RPC until a new picker is available via UpdateState().
+	ErrNoSubConnAvailable = errors.New("no SubConn is available")
+	// ErrTransientFailure indicates all SubConns are in TransientFailure.
+	// WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
+	//
+	// Deprecated: return an appropriate error based on the last resolution or
+	// connection attempt instead.  The behavior is the same for any non-gRPC
+	// status error.
+	ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
+)
+
+// PickResult contains information related to a connection chosen for an RPC.
+type PickResult struct {
+	// SubConn is the connection to use for this pick, if its state is Ready.
+	// If the state is not Ready, gRPC will block the RPC until a new Picker is
+	// provided by the balancer (using ClientConn.UpdateState).  The SubConn
+	// must be one returned by ClientConn.NewSubConn.
+	SubConn SubConn
+
+	// Done is called when the RPC is completed.  If the SubConn is not ready,
+	// this will be called with a nil parameter.  If the SubConn is not a valid
+	// type, Done may not be called.  May be nil if the balancer does not wish
+	// to be notified when the RPC completes.
+	Done func(DoneInfo)
+
+	// Metadata provides a way for LB policies to inject arbitrary per-call
+	// metadata. Any metadata returned here will be merged with existing
+	// metadata added by the client application.
+	//
+	// LB policies with child policies are responsible for propagating metadata
+	// injected by their children to the ClientConn, as part of Pick().
+	Metadata metadata.MD
+}
+
+// TransientFailureError returns e.  It exists for backward compatibility and
+// will be deleted soon.
+//
+// Deprecated: no longer necessary, picker errors are treated this way by
+// default.
+func TransientFailureError(e error) error { return e }
+
+// Picker is used by gRPC to pick a SubConn to send an RPC.
+// Balancer is expected to generate a new picker from its snapshot every time its
+// internal state has changed.
+//
+// The pickers used by gRPC can be updated by ClientConn.UpdateState().
+type Picker interface {
+	// Pick returns the connection to use for this RPC and related information.
+	//
+	// Pick should not block.  If the balancer needs to do I/O or any blocking
+	// or time-consuming work to service this call, it should return
+	// ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when
+	// the Picker is updated (using ClientConn.UpdateState).
+	//
+	// If an error is returned:
+	//
+	// - If the error is ErrNoSubConnAvailable, gRPC will block until a new
+	//   Picker is provided by the balancer (using ClientConn.UpdateState).
+	//
+	// - If the error is a status error (implemented by the grpc/status
+	//   package), gRPC will terminate the RPC with the code and message
+	//   provided.
+	//
+	// - For all other errors, wait for ready RPCs will wait, but non-wait for
+	//   ready RPCs will be terminated with this error's Error() string and
+	//   status code Unavailable.
+	Pick(info PickInfo) (PickResult, error)
+}
+
+// Balancer takes input from gRPC, manages SubConns, and collects and aggregates
+// the connectivity states.
+//
+// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs.
+//
+// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are
+// guaranteed to be called synchronously from the same goroutine.  There's no
+// guarantee on picker.Pick, it may be called anytime.
+type Balancer interface {
+	// UpdateClientConnState is called by gRPC when the state of the ClientConn
+	// changes.  If the error returned is ErrBadResolverState, the ClientConn
+	// will begin calling ResolveNow on the active name resolver with
+	// exponential backoff until a subsequent call to UpdateClientConnState
+	// returns a nil error.  Any other errors are currently ignored.
+	UpdateClientConnState(ClientConnState) error
+	// ResolverError is called by gRPC when the name resolver reports an error.
+	ResolverError(error)
+	// UpdateSubConnState is called by gRPC when the state of a SubConn
+	// changes.
+	//
+	// Deprecated: Use NewSubConnOptions.StateListener when creating the
+	// SubConn instead.
+	UpdateSubConnState(SubConn, SubConnState)
+	// Close closes the balancer. The balancer is not currently required to
+	// call SubConn.Shutdown for its existing SubConns; however, this will be
+	// required in a future release, so it is recommended.
+	Close()
+}
+
+// ExitIdler is an optional interface for balancers to implement.  If
+// implemented, ExitIdle will be called when ClientConn.Connect is called, if
+// the ClientConn is idle.  If unimplemented, ClientConn.Connect will cause
+// all SubConns to connect.
+//
+// Notice: it will be required for all balancers to implement this in a future
+// release.
+type ExitIdler interface {
+	// ExitIdle instructs the LB policy to reconnect to backends / exit the
+	// IDLE state, if appropriate and possible.  Note that SubConns that enter
+	// the IDLE state will not reconnect until SubConn.Connect is called.
+	ExitIdle()
+}
+
+// SubConnState describes the state of a SubConn.
+type SubConnState struct {
+	// ConnectivityState is the connectivity state of the SubConn.
+	ConnectivityState connectivity.State
+	// ConnectionError is set if the ConnectivityState is TransientFailure,
+	// describing the reason the SubConn failed.  Otherwise, it is nil.
+	ConnectionError error
+	// connectedAddr contains the connected address when ConnectivityState is
+	// Ready. Otherwise, it is indeterminate.
+	connectedAddress resolver.Address
+}
+
+// ClientConnState describes the state of a ClientConn relevant to the
+// balancer.
+type ClientConnState struct {
+	ResolverState resolver.State
+	// The parsed load balancing configuration returned by the builder's
+	// ParseConfig method, if implemented.
+	BalancerConfig serviceconfig.LoadBalancingConfig
+}
+
+// ErrBadResolverState may be returned by UpdateClientConnState to indicate a
+// problem with the provided name resolver data.
+var ErrBadResolverState = errors.New("bad resolver state")
+
+// A ProducerBuilder is a simple constructor for a Producer.  It is used by the
+// SubConn to create producers when needed.
+type ProducerBuilder interface {
+	// Build creates a Producer.  The first parameter is always a
+	// grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
+	// associated SubConn), but is declared as `any` to avoid a dependency
+	// cycle.  Should also return a close function that will be called when all
+	// references to the Producer have been given up.
+	Build(grpcClientConnInterface any) (p Producer, close func())
+}
+
+// A Producer is a type shared among potentially many consumers.  It is
+// associated with a SubConn, and an implementation will typically contain
+// other methods to provide additional functionality, e.g. configuration or
+// subscription registration.
+type Producer any
diff --git a/hack/tools/vendor/google.golang.org/grpc/balancer/base/balancer.go b/hack/tools/vendor/google.golang.org/grpc/balancer/base/balancer.go
new file mode 100644
index 0000000000..2b87bd79c7
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -0,0 +1,264 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package base
+
+import (
+	"errors"
+	"fmt"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/resolver"
+)
+
+var logger = grpclog.Component("balancer")
+
+type baseBuilder struct {
+	name          string
+	pickerBuilder PickerBuilder
+	config        Config
+}
+
+func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
+	bal := &baseBalancer{
+		cc:            cc,
+		pickerBuilder: bb.pickerBuilder,
+
+		subConns: resolver.NewAddressMap(),
+		scStates: make(map[balancer.SubConn]connectivity.State),
+		csEvltr:  &balancer.ConnectivityStateEvaluator{},
+		config:   bb.config,
+		state:    connectivity.Connecting,
+	}
+	// Initialize picker to a picker that always returns
+	// ErrNoSubConnAvailable, because when state of a SubConn changes, we
+	// may call UpdateState with this picker.
+	bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
+	return bal
+}
+
+func (bb *baseBuilder) Name() string {
+	return bb.name
+}
+
+type baseBalancer struct {
+	cc            balancer.ClientConn
+	pickerBuilder PickerBuilder
+
+	csEvltr *balancer.ConnectivityStateEvaluator
+	state   connectivity.State
+
+	subConns *resolver.AddressMap
+	scStates map[balancer.SubConn]connectivity.State
+	picker   balancer.Picker
+	config   Config
+
+	resolverErr error // the last error reported by the resolver; cleared on successful resolution
+	connErr     error // the last connection error; cleared upon leaving TransientFailure
+}
+
+func (b *baseBalancer) ResolverError(err error) {
+	b.resolverErr = err
+	if b.subConns.Len() == 0 {
+		b.state = connectivity.TransientFailure
+	}
+
+	if b.state != connectivity.TransientFailure {
+		// The picker will not change since the balancer does not currently
+		// report an error.
+		return
+	}
+	b.regeneratePicker()
+	b.cc.UpdateState(balancer.State{
+		ConnectivityState: b.state,
+		Picker:            b.picker,
+	})
+}
+
+func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
+	// TODO: handle s.ResolverState.ServiceConfig?
+	if logger.V(2) {
+		logger.Info("base.baseBalancer: got new ClientConn state: ", s)
+	}
+	// Successful resolution; clear resolver error and ensure we return nil.
+	b.resolverErr = nil
+	// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
+	addrsSet := resolver.NewAddressMap()
+	for _, a := range s.ResolverState.Addresses {
+		addrsSet.Set(a, nil)
+		if _, ok := b.subConns.Get(a); !ok {
+			// a is a new address (not existing in b.subConns).
+			var sc balancer.SubConn
+			opts := balancer.NewSubConnOptions{
+				HealthCheckEnabled: b.config.HealthCheck,
+				StateListener:      func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) },
+			}
+			sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts)
+			if err != nil {
+				logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
+				continue
+			}
+			b.subConns.Set(a, sc)
+			b.scStates[sc] = connectivity.Idle
+			b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle)
+			sc.Connect()
+		}
+	}
+	for _, a := range b.subConns.Keys() {
+		sci, _ := b.subConns.Get(a)
+		sc := sci.(balancer.SubConn)
+		// a was removed by resolver.
+		if _, ok := addrsSet.Get(a); !ok {
+			sc.Shutdown()
+			b.subConns.Delete(a)
+			// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
+			// The entry will be deleted in updateSubConnState.
+		}
+	}
+	// If resolver state contains no addresses, return an error so ClientConn
+	// will trigger re-resolve. Also records this as an resolver error, so when
+	// the overall state turns transient failure, the error message will have
+	// the zero address information.
+	if len(s.ResolverState.Addresses) == 0 {
+		b.ResolverError(errors.New("produced zero addresses"))
+		return balancer.ErrBadResolverState
+	}
+
+	b.regeneratePicker()
+	b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
+	return nil
+}
+
+// mergeErrors builds an error from the last connection error and the last
+// resolver error.  Must only be called if b.state is TransientFailure.
+func (b *baseBalancer) mergeErrors() error {
+	// connErr must always be non-nil unless there are no SubConns, in which
+	// case resolverErr must be non-nil.
+	if b.connErr == nil {
+		return fmt.Errorf("last resolver error: %v", b.resolverErr)
+	}
+	if b.resolverErr == nil {
+		return fmt.Errorf("last connection error: %v", b.connErr)
+	}
+	return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr)
+}
+
+// regeneratePicker takes a snapshot of the balancer, and generates a picker
+// from it. The picker is
+//   - errPicker if the balancer is in TransientFailure,
+//   - built by the pickerBuilder with all READY SubConns otherwise.
+func (b *baseBalancer) regeneratePicker() {
+	if b.state == connectivity.TransientFailure {
+		b.picker = NewErrPicker(b.mergeErrors())
+		return
+	}
+	readySCs := make(map[balancer.SubConn]SubConnInfo)
+
+	// Filter out all ready SCs from full subConn map.
+	for _, addr := range b.subConns.Keys() {
+		sci, _ := b.subConns.Get(addr)
+		sc := sci.(balancer.SubConn)
+		if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
+			readySCs[sc] = SubConnInfo{Address: addr}
+		}
+	}
+	b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
+}
+
+// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn.
+func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
+	logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state)
+}
+
+func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
+	s := state.ConnectivityState
+	if logger.V(2) {
+		logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
+	}
+	oldS, ok := b.scStates[sc]
+	if !ok {
+		if logger.V(2) {
+			logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
+		}
+		return
+	}
+	if oldS == connectivity.TransientFailure &&
+		(s == connectivity.Connecting || s == connectivity.Idle) {
+		// Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or
+		// CONNECTING transitions to prevent the aggregated state from being
+		// always CONNECTING when many backends exist but are all down.
+		if s == connectivity.Idle {
+			sc.Connect()
+		}
+		return
+	}
+	b.scStates[sc] = s
+	switch s {
+	case connectivity.Idle:
+		sc.Connect()
+	case connectivity.Shutdown:
+		// When an address was removed by resolver, b called Shutdown but kept
+		// the sc's state in scStates. Remove state for this sc here.
+		delete(b.scStates, sc)
+	case connectivity.TransientFailure:
+		// Save error to be reported via picker.
+		b.connErr = state.ConnectionError
+	}
+
+	b.state = b.csEvltr.RecordTransition(oldS, s)
+
+	// Regenerate picker when one of the following happens:
+	//  - this sc entered or left ready
+	//  - the aggregated state of balancer is TransientFailure
+	//    (may need to update error message)
+	if (s == connectivity.Ready) != (oldS == connectivity.Ready) ||
+		b.state == connectivity.TransientFailure {
+		b.regeneratePicker()
+	}
+	b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
+}
+
+// Close is a nop because base balancer doesn't have internal state to clean up,
+// and it doesn't need to call Shutdown for the SubConns.
+func (b *baseBalancer) Close() {
+}
+
+// ExitIdle is a nop because the base balancer attempts to stay connected to
+// all SubConns at all times.
+func (b *baseBalancer) ExitIdle() {
+}
+
+// NewErrPicker returns a Picker that always returns err on Pick().
+func NewErrPicker(err error) balancer.Picker {
+	return &errPicker{err: err}
+}
+
+// NewErrPickerV2 is temporarily defined for backward compatibility reasons.
+//
+// Deprecated: use NewErrPicker instead.
+var NewErrPickerV2 = NewErrPicker
+
+type errPicker struct {
+	err error // Pick() always returns this err.
+}
+
+func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+	return balancer.PickResult{}, p.err
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/balancer/base/base.go b/hack/tools/vendor/google.golang.org/grpc/balancer/base/base.go
new file mode 100644
index 0000000000..e31d76e338
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/balancer/base/base.go
@@ -0,0 +1,71 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package base defines a balancer base that can be used to build balancers with
+// different picking algorithms.
+//
+// The base balancer creates a new SubConn for each resolved address. The
+// provided picker will only be notified about READY SubConns.
+//
+// This package is the base of round_robin balancer, its purpose is to be used
+// to build round_robin like balancers with complex picking algorithms.
+// Balancers with more complicated logic should try to implement a balancer
+// builder from scratch.
+//
+// All APIs in this package are experimental.
+package base
+
+import (
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/resolver"
+)
+
+// PickerBuilder creates balancer.Picker.
+type PickerBuilder interface {
+	// Build returns a picker that will be used by gRPC to pick a SubConn.
+	Build(info PickerBuildInfo) balancer.Picker
+}
+
+// PickerBuildInfo contains information needed by the picker builder to
+// construct a picker.
+type PickerBuildInfo struct {
+	// ReadySCs is a map from all ready SubConns to the Addresses used to
+	// create them.
+	ReadySCs map[balancer.SubConn]SubConnInfo
+}
+
+// SubConnInfo contains information about a SubConn created by the base
+// balancer.
+type SubConnInfo struct {
+	Address resolver.Address // the address used to create this SubConn
+}
+
+// Config contains the config info about the base balancer builder.
+type Config struct {
+	// HealthCheck indicates whether health checking should be enabled for this specific balancer.
+	HealthCheck bool
+}
+
+// NewBalancerBuilder returns a base balancer builder configured by the provided config.
+func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder {
+	return &baseBuilder{
+		name:          name,
+		pickerBuilder: pb,
+		config:        config,
+	}
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/hack/tools/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go
new file mode 100644
index 0000000000..c334135810
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go
@@ -0,0 +1,74 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package balancer
+
+import "google.golang.org/grpc/connectivity"
+
+// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
+// and returns one aggregated connectivity state.
+//
+// It's not thread safe.
+type ConnectivityStateEvaluator struct {
+	numReady            uint64 // Number of addrConns in ready state.
+	numConnecting       uint64 // Number of addrConns in connecting state.
+	numTransientFailure uint64 // Number of addrConns in transient failure state.
+	numIdle             uint64 // Number of addrConns in idle state.
+}
+
+// RecordTransition records state change happening in subConn and based on that
+// it evaluates what aggregated state should be.
+//
+//   - If at least one SubConn in Ready, the aggregated state is Ready;
+//   - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
+//   - Else if at least one SubConn is Idle, the aggregated state is Idle;
+//   - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure.
+//
+// Shutdown is not considered.
+func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
+	// Update counters.
+	for idx, state := range []connectivity.State{oldState, newState} {
+		updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
+		switch state {
+		case connectivity.Ready:
+			cse.numReady += updateVal
+		case connectivity.Connecting:
+			cse.numConnecting += updateVal
+		case connectivity.TransientFailure:
+			cse.numTransientFailure += updateVal
+		case connectivity.Idle:
+			cse.numIdle += updateVal
+		}
+	}
+	return cse.CurrentState()
+}
+
+// CurrentState returns the current aggregate conn state by evaluating the counters
+func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State {
+	// Evaluate.
+	if cse.numReady > 0 {
+		return connectivity.Ready
+	}
+	if cse.numConnecting > 0 {
+		return connectivity.Connecting
+	}
+	if cse.numIdle > 0 {
+		return connectivity.Idle
+	}
+	return connectivity.TransientFailure
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/hack/tools/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go
new file mode 100644
index 0000000000..4ecfa1c215
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go
@@ -0,0 +1,51 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package state declares grpclb types to be set by resolvers wishing to pass
+// information to grpclb via resolver.State Attributes.
+package state
+
+import (
+	"google.golang.org/grpc/resolver"
+)
+
+// keyType is the key to use for storing State in Attributes.
+type keyType string
+
+const key = keyType("grpc.grpclb.state")
+
+// State contains gRPCLB-relevant data passed from the name resolver.
+type State struct {
+	// BalancerAddresses contains the remote load balancer address(es).  If
+	// set, overrides any resolver-provided addresses with Type of GRPCLB.
+	BalancerAddresses []resolver.Address
+}
+
+// Set returns a copy of the provided state with attributes containing s.  s's
+// data should not be mutated after calling Set.
+func Set(state resolver.State, s *State) resolver.State {
+	state.Attributes = state.Attributes.WithValue(key, s)
+	return state
+}
+
+// Get returns the grpclb State in the resolver.State, or nil if not present.
+// The returned data should not be mutated.
+func Get(state resolver.State) *State {
+	s, _ := state.Attributes.Value(key).(*State)
+	return s
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/hack/tools/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
new file mode 100644
index 0000000000..4d69b4052f
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
@@ -0,0 +1,283 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package pickfirst contains the pick_first load balancing policy.
+package pickfirst
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"math/rand"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal"
+	internalgrpclog "google.golang.org/grpc/internal/grpclog"
+	"google.golang.org/grpc/internal/pretty"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/serviceconfig"
+)
+
+func init() {
+	balancer.Register(pickfirstBuilder{})
+	internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
+}
+
+var logger = grpclog.Component("pick-first-lb")
+
+const (
+	// Name is the name of the pick_first balancer.
+	Name      = "pick_first"
+	logPrefix = "[pick-first-lb %p] "
+)
+
+type pickfirstBuilder struct{}
+
+func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
+	b := &pickfirstBalancer{cc: cc}
+	b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
+	return b
+}
+
+func (pickfirstBuilder) Name() string {
+	return Name
+}
+
+type pfConfig struct {
+	serviceconfig.LoadBalancingConfig `json:"-"`
+
+	// If set to true, instructs the LB policy to shuffle the order of the list
+	// of endpoints received from the name resolver before attempting to
+	// connect to them.
+	ShuffleAddressList bool `json:"shuffleAddressList"`
+}
+
+func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
+	var cfg pfConfig
+	if err := json.Unmarshal(js, &cfg); err != nil {
+		return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
+	}
+	return cfg, nil
+}
+
+type pickfirstBalancer struct {
+	logger  *internalgrpclog.PrefixLogger
+	state   connectivity.State
+	cc      balancer.ClientConn
+	subConn balancer.SubConn
+}
+
+func (b *pickfirstBalancer) ResolverError(err error) {
+	if b.logger.V(2) {
+		b.logger.Infof("Received error from the name resolver: %v", err)
+	}
+	if b.subConn == nil {
+		b.state = connectivity.TransientFailure
+	}
+
+	if b.state != connectivity.TransientFailure {
+		// The picker will not change since the balancer does not currently
+		// report an error.
+		return
+	}
+	b.cc.UpdateState(balancer.State{
+		ConnectivityState: connectivity.TransientFailure,
+		Picker:            &picker{err: fmt.Errorf("name resolver error: %v", err)},
+	})
+}
+
+type Shuffler interface {
+	ShuffleAddressListForTesting(n int, swap func(i, j int))
+}
+
+func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
+
+func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
+	if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
+		// The resolver reported an empty address list. Treat it like an error by
+		// calling b.ResolverError.
+		if b.subConn != nil {
+			// Shut down the old subConn. All addresses were removed, so it is
+			// no longer valid.
+			b.subConn.Shutdown()
+			b.subConn = nil
+		}
+		b.ResolverError(errors.New("produced zero addresses"))
+		return balancer.ErrBadResolverState
+	}
+	// We don't have to guard this block with the env var because ParseConfig
+	// already does so.
+	cfg, ok := state.BalancerConfig.(pfConfig)
+	if state.BalancerConfig != nil && !ok {
+		return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig)
+	}
+
+	if b.logger.V(2) {
+		b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
+	}
+
+	var addrs []resolver.Address
+	if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
+		// Perform the optional shuffling described in gRFC A62. The shuffling will
+		// change the order of endpoints but not touch the order of the addresses
+		// within each endpoint. - A61
+		if cfg.ShuffleAddressList {
+			endpoints = append([]resolver.Endpoint{}, endpoints...)
+			internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
+		}
+
+		// "Flatten the list by concatenating the ordered list of addresses for each
+		// of the endpoints, in order." - A61
+		for _, endpoint := range endpoints {
+			// "In the flattened list, interleave addresses from the two address
+			// families, as per RFC-8304 section 4." - A61
+			// TODO: support the above language.
+			addrs = append(addrs, endpoint.Addresses...)
+		}
+	} else {
+		// Endpoints not set, process addresses until we migrate resolver
+		// emissions fully to Endpoints. The top channel does wrap emitted
+		// addresses with endpoints, however some balancers such as weighted
+		// target do not forward the corresponding correct endpoints down/split
+		// endpoints properly. Once all balancers correctly forward endpoints
+		// down, can delete this else conditional.
+		addrs = state.ResolverState.Addresses
+		if cfg.ShuffleAddressList {
+			addrs = append([]resolver.Address{}, addrs...)
+			rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
+		}
+	}
+
+	if b.subConn != nil {
+		b.cc.UpdateAddresses(b.subConn, addrs)
+		return nil
+	}
+
+	var subConn balancer.SubConn
+	subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{
+		StateListener: func(state balancer.SubConnState) {
+			b.updateSubConnState(subConn, state)
+		},
+	})
+	if err != nil {
+		if b.logger.V(2) {
+			b.logger.Infof("Failed to create new SubConn: %v", err)
+		}
+		b.state = connectivity.TransientFailure
+		b.cc.UpdateState(balancer.State{
+			ConnectivityState: connectivity.TransientFailure,
+			Picker:            &picker{err: fmt.Errorf("error creating connection: %v", err)},
+		})
+		return balancer.ErrBadResolverState
+	}
+	b.subConn = subConn
+	b.state = connectivity.Idle
+	b.cc.UpdateState(balancer.State{
+		ConnectivityState: connectivity.Connecting,
+		Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
+	})
+	b.subConn.Connect()
+	return nil
+}
+
+// UpdateSubConnState is unused as a StateListener is always registered when
+// creating SubConns.
+func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
+	b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
+}
+
+func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
+	if b.logger.V(2) {
+		b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state)
+	}
+	if b.subConn != subConn {
+		if b.logger.V(2) {
+			b.logger.Infof("Ignored state change because subConn is not recognized")
+		}
+		return
+	}
+	if state.ConnectivityState == connectivity.Shutdown {
+		b.subConn = nil
+		return
+	}
+
+	switch state.ConnectivityState {
+	case connectivity.Ready:
+		b.cc.UpdateState(balancer.State{
+			ConnectivityState: state.ConnectivityState,
+			Picker:            &picker{result: balancer.PickResult{SubConn: subConn}},
+		})
+	case connectivity.Connecting:
+		if b.state == connectivity.TransientFailure {
+			// We stay in TransientFailure until we are Ready. See A62.
+			return
+		}
+		b.cc.UpdateState(balancer.State{
+			ConnectivityState: state.ConnectivityState,
+			Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
+		})
+	case connectivity.Idle:
+		if b.state == connectivity.TransientFailure {
+			// We stay in TransientFailure until we are Ready. Also kick the
+			// subConn out of Idle into Connecting. See A62.
+			b.subConn.Connect()
+			return
+		}
+		b.cc.UpdateState(balancer.State{
+			ConnectivityState: state.ConnectivityState,
+			Picker:            &idlePicker{subConn: subConn},
+		})
+	case connectivity.TransientFailure:
+		b.cc.UpdateState(balancer.State{
+			ConnectivityState: state.ConnectivityState,
+			Picker:            &picker{err: state.ConnectionError},
+		})
+	}
+	b.state = state.ConnectivityState
+}
+
+func (b *pickfirstBalancer) Close() {
+}
+
+func (b *pickfirstBalancer) ExitIdle() {
+	if b.subConn != nil && b.state == connectivity.Idle {
+		b.subConn.Connect()
+	}
+}
+
+type picker struct {
+	result balancer.PickResult
+	err    error
+}
+
+func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+	return p.result, p.err
+}
+
+// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
+// CONNECTING when Pick is called.
+type idlePicker struct {
+	subConn balancer.SubConn
+}
+
+func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+	i.subConn.Connect()
+	return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/hack/tools/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
new file mode 100644
index 0000000000..260255d31b
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
@@ -0,0 +1,81 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is
+// installed as one of the default balancers in gRPC, users don't need to
+// explicitly install this balancer.
+package roundrobin
+
+import (
+	"math/rand"
+	"sync/atomic"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/balancer/base"
+	"google.golang.org/grpc/grpclog"
+)
+
+// Name is the name of round_robin balancer.
+const Name = "round_robin"
+
+var logger = grpclog.Component("roundrobin")
+
+// newBuilder creates a new roundrobin balancer builder.
+func newBuilder() balancer.Builder {
+	return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
+}
+
+func init() {
+	balancer.Register(newBuilder())
+}
+
+type rrPickerBuilder struct{}
+
+func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
+	logger.Infof("roundrobinPicker: Build called with info: %v", info)
+	if len(info.ReadySCs) == 0 {
+		return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
+	}
+	scs := make([]balancer.SubConn, 0, len(info.ReadySCs))
+	for sc := range info.ReadySCs {
+		scs = append(scs, sc)
+	}
+	return &rrPicker{
+		subConns: scs,
+		// Start at a random index, as the same RR balancer rebuilds a new
+		// picker when SubConn states change, and we don't want to apply excess
+		// load to the first server in the list.
+		next: uint32(rand.Intn(len(scs))),
+	}
+}
+
+type rrPicker struct {
+	// subConns is the snapshot of the roundrobin balancer when this picker was
+	// created. The slice is immutable. Each Get() will do a round robin
+	// selection from it and return the selected SubConn.
+	subConns []balancer.SubConn
+	next     uint32
+}
+
+func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+	subConnsLen := uint32(len(p.subConns))
+	nextIndex := atomic.AddUint32(&p.next, 1)
+
+	sc := p.subConns[nextIndex%subConnsLen]
+	return balancer.PickResult{SubConn: sc}, nil
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/balancer_wrapper.go b/hack/tools/vendor/google.golang.org/grpc/balancer_wrapper.go
new file mode 100644
index 0000000000..8ad6ce2f09
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/balancer_wrapper.go
@@ -0,0 +1,365 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+	"fmt"
+	"sync"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/internal/balancer/gracefulswitch"
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcsync"
+	"google.golang.org/grpc/resolver"
+)
+
+var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address))
+
+// ccBalancerWrapper sits between the ClientConn and the Balancer.
+//
+// ccBalancerWrapper implements methods corresponding to the ones on the
+// balancer.Balancer interface. The ClientConn is free to call these methods
+// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn
+// to the Balancer happen in order by performing them in the serializer, without
+// any mutexes held.
+//
+// ccBalancerWrapper also implements the balancer.ClientConn interface and is
+// passed to the Balancer implementations. It invokes unexported methods on the
+// ClientConn to handle these calls from the Balancer.
+//
+// It uses the gracefulswitch.Balancer internally to ensure that balancer
+// switches happen in a graceful manner.
+type ccBalancerWrapper struct {
+	// The following fields are initialized when the wrapper is created and are
+	// read-only afterwards, and therefore can be accessed without a mutex.
+	cc               *ClientConn
+	opts             balancer.BuildOptions
+	serializer       *grpcsync.CallbackSerializer
+	serializerCancel context.CancelFunc
+
+	// The following fields are only accessed within the serializer or during
+	// initialization.
+	curBalancerName string
+	balancer        *gracefulswitch.Balancer
+
+	// The following field is protected by mu.  Caller must take cc.mu before
+	// taking mu.
+	mu     sync.Mutex
+	closed bool
+}
+
+// newCCBalancerWrapper creates a new balancer wrapper in idle state. The
+// underlying balancer is not created until the updateClientConnState() method
+// is invoked.
+func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
+	ctx, cancel := context.WithCancel(cc.ctx)
+	ccb := &ccBalancerWrapper{
+		cc: cc,
+		opts: balancer.BuildOptions{
+			DialCreds:       cc.dopts.copts.TransportCredentials,
+			CredsBundle:     cc.dopts.copts.CredsBundle,
+			Dialer:          cc.dopts.copts.Dialer,
+			Authority:       cc.authority,
+			CustomUserAgent: cc.dopts.copts.UserAgent,
+			ChannelzParent:  cc.channelz,
+			Target:          cc.parsedTarget,
+			MetricsRecorder: cc.metricsRecorderList,
+		},
+		serializer:       grpcsync.NewCallbackSerializer(ctx),
+		serializerCancel: cancel,
+	}
+	ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts)
+	return ccb
+}
+
+// updateClientConnState is invoked by grpc to push a ClientConnState update to
+// the underlying balancer.  This is always executed from the serializer, so
+// it is safe to call into the balancer here.
+func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
+	errCh := make(chan error)
+	uccs := func(ctx context.Context) {
+		defer close(errCh)
+		if ctx.Err() != nil || ccb.balancer == nil {
+			return
+		}
+		name := gracefulswitch.ChildName(ccs.BalancerConfig)
+		if ccb.curBalancerName != name {
+			ccb.curBalancerName = name
+			channelz.Infof(logger, ccb.cc.channelz, "Channel switches to new LB policy %q", name)
+		}
+		err := ccb.balancer.UpdateClientConnState(*ccs)
+		if logger.V(2) && err != nil {
+			logger.Infof("error from balancer.UpdateClientConnState: %v", err)
+		}
+		errCh <- err
+	}
+	onFailure := func() { close(errCh) }
+
+	// UpdateClientConnState can race with Close, and when the latter wins, the
+	// serializer is closed, and the attempt to schedule the callback will fail.
+	// It is acceptable to ignore this failure. But since we want to handle the
+	// state update in a blocking fashion (when we successfully schedule the
+	// callback), we have to use the ScheduleOr method and not the MaybeSchedule
+	// method on the serializer.
+	ccb.serializer.ScheduleOr(uccs, onFailure)
+	return <-errCh
+}
+
+// resolverError is invoked by grpc to push a resolver error to the underlying
+// balancer.  The call to the balancer is executed from the serializer.
+func (ccb *ccBalancerWrapper) resolverError(err error) {
+	ccb.serializer.TrySchedule(func(ctx context.Context) {
+		if ctx.Err() != nil || ccb.balancer == nil {
+			return
+		}
+		ccb.balancer.ResolverError(err)
+	})
+}
+
+// close initiates async shutdown of the wrapper.  cc.mu must be held when
+// calling this function.  To determine the wrapper has finished shutting down,
+// the channel should block on ccb.serializer.Done() without cc.mu held.
+func (ccb *ccBalancerWrapper) close() {
+	ccb.mu.Lock()
+	ccb.closed = true
+	ccb.mu.Unlock()
+	channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing")
+	ccb.serializer.TrySchedule(func(context.Context) {
+		if ccb.balancer == nil {
+			return
+		}
+		ccb.balancer.Close()
+		ccb.balancer = nil
+	})
+	ccb.serializerCancel()
+}
+
+// exitIdle invokes the balancer's exitIdle method in the serializer.
+func (ccb *ccBalancerWrapper) exitIdle() {
+	ccb.serializer.TrySchedule(func(ctx context.Context) {
+		if ctx.Err() != nil || ccb.balancer == nil {
+			return
+		}
+		ccb.balancer.ExitIdle()
+	})
+}
+
+func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
+	ccb.cc.mu.Lock()
+	defer ccb.cc.mu.Unlock()
+
+	ccb.mu.Lock()
+	if ccb.closed {
+		ccb.mu.Unlock()
+		return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed")
+	}
+	ccb.mu.Unlock()
+
+	if len(addrs) == 0 {
+		return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
+	}
+	ac, err := ccb.cc.newAddrConnLocked(addrs, opts)
+	if err != nil {
+		channelz.Warningf(logger, ccb.cc.channelz, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
+		return nil, err
+	}
+	acbw := &acBalancerWrapper{
+		ccb:           ccb,
+		ac:            ac,
+		producers:     make(map[balancer.ProducerBuilder]*refCountedProducer),
+		stateListener: opts.StateListener,
+	}
+	ac.acbw = acbw
+	return acbw, nil
+}
+
+func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) {
+	// The graceful switch balancer will never call this.
+	logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc")
+}
+
+func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
+	acbw, ok := sc.(*acBalancerWrapper)
+	if !ok {
+		return
+	}
+	acbw.UpdateAddresses(addrs)
+}
+
+func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
+	ccb.cc.mu.Lock()
+	defer ccb.cc.mu.Unlock()
+	if ccb.cc.conns == nil {
+		// The CC has been closed; ignore this update.
+		return
+	}
+
+	ccb.mu.Lock()
+	if ccb.closed {
+		ccb.mu.Unlock()
+		return
+	}
+	ccb.mu.Unlock()
+	// Update picker before updating state.  Even though the ordering here does
+	// not matter, it can lead to multiple calls of Pick in the common start-up
+	// case where we wait for ready and then perform an RPC.  If the picker is
+	// updated later, we could call the "connecting" picker when the state is
+	// updated, and then call the "ready" picker after the picker gets updated.
+
+	// Note that there is no need to check if the balancer wrapper was closed,
+	// as we know the graceful switch LB policy will not call cc if it has been
+	// closed.
+	ccb.cc.pickerWrapper.updatePicker(s.Picker)
+	ccb.cc.csMgr.updateState(s.ConnectivityState)
+}
+
+func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
+	ccb.cc.mu.RLock()
+	defer ccb.cc.mu.RUnlock()
+
+	ccb.mu.Lock()
+	if ccb.closed {
+		ccb.mu.Unlock()
+		return
+	}
+	ccb.mu.Unlock()
+	ccb.cc.resolveNowLocked(o)
+}
+
+func (ccb *ccBalancerWrapper) Target() string {
+	return ccb.cc.target
+}
+
+// acBalancerWrapper is a wrapper on top of ac for balancers.
+// It implements balancer.SubConn interface.
+type acBalancerWrapper struct {
+	ac            *addrConn          // read-only
+	ccb           *ccBalancerWrapper // read-only
+	stateListener func(balancer.SubConnState)
+
+	mu        sync.Mutex
+	producers map[balancer.ProducerBuilder]*refCountedProducer
+}
+
+// updateState is invoked by grpc to push a subConn state update to the
+// underlying balancer.
+func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) {
+	acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
+		if ctx.Err() != nil || acbw.ccb.balancer == nil {
+			return
+		}
+		// Even though it is optional for balancers, gracefulswitch ensures
+		// opts.StateListener is set, so this cannot ever be nil.
+		// TODO: delete this comment when UpdateSubConnState is removed.
+		scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err}
+		if s == connectivity.Ready {
+			setConnectedAddress(&scs, curAddr)
+		}
+		acbw.stateListener(scs)
+		acbw.ac.mu.Lock()
+		defer acbw.ac.mu.Unlock()
+		if s == connectivity.Ready {
+			// When changing states to READY, reset stateReadyChan.  Wait until
+			// after we notify the LB policy's listener(s) in order to prevent
+			// ac.getTransport() from unblocking before the LB policy starts
+			// tracking the subchannel as READY.
+			close(acbw.ac.stateReadyChan)
+			acbw.ac.stateReadyChan = make(chan struct{})
+		}
+	})
+}
+
+func (acbw *acBalancerWrapper) String() string {
+	return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelz.ID)
+}
+
+func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
+	acbw.ac.updateAddrs(addrs)
+}
+
+func (acbw *acBalancerWrapper) Connect() {
+	go acbw.ac.connect()
+}
+
+func (acbw *acBalancerWrapper) Shutdown() {
+	acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
+}
+
+// NewStream begins a streaming RPC on the addrConn.  If the addrConn is not
+// ready, blocks until it is or ctx expires.  Returns an error when the context
+// expires or the addrConn is shut down.
+func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
+	transport, err := acbw.ac.getTransport(ctx)
+	if err != nil {
+		return nil, err
+	}
+	return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
+}
+
+// Invoke performs a unary RPC.  If the addrConn is not ready, returns
+// errSubConnNotReady.
+func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error {
+	cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...)
+	if err != nil {
+		return err
+	}
+	if err := cs.SendMsg(args); err != nil {
+		return err
+	}
+	return cs.RecvMsg(reply)
+}
+
+type refCountedProducer struct {
+	producer balancer.Producer
+	refs     int    // number of current refs to the producer
+	close    func() // underlying producer's close function
+}
+
+func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
+	acbw.mu.Lock()
+	defer acbw.mu.Unlock()
+
+	// Look up existing producer from this builder.
+	pData := acbw.producers[pb]
+	if pData == nil {
+		// Not found; create a new one and add it to the producers map.
+		p, closeFn := pb.Build(acbw)
+		pData = &refCountedProducer{producer: p, close: closeFn}
+		acbw.producers[pb] = pData
+	}
+	// Account for this new reference.
+	pData.refs++
+
+	// Return a cleanup function wrapped in a OnceFunc to remove this reference
+	// and delete the refCountedProducer from the map if the total reference
+	// count goes to zero.
+	unref := func() {
+		acbw.mu.Lock()
+		pData.refs--
+		if pData.refs == 0 {
+			defer pData.close() // Run outside the acbw mutex
+			delete(acbw.producers, pb)
+		}
+		acbw.mu.Unlock()
+	}
+	return pData.producer, grpcsync.OnceFunc(unref)
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/hack/tools/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
new file mode 100644
index 0000000000..55bffaa77e
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -0,0 +1,1183 @@
+// Copyright 2018 The gRPC Authors
+// All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The canonical version of this proto can be found at
+// https://github.com/grpc/grpc-proto/blob/master/grpc/binlog/v1/binarylog.proto
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.34.2
+// 	protoc        v5.27.1
+// source: grpc/binlog/v1/binarylog.proto
+
+package grpc_binarylog_v1
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	durationpb "google.golang.org/protobuf/types/known/durationpb"
+	timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Enumerates the type of event
+// Note the terminology is different from the RPC semantics
+// definition, but the same meaning is expressed here.
+type GrpcLogEntry_EventType int32
+
+const (
+	GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0
+	// Header sent from client to server
+	GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1
+	// Header sent from server to client
+	GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2
+	// Message sent from client to server
+	GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3
+	// Message sent from server to client
+	GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4
+	// A signal that client is done sending
+	GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5
+	// Trailer indicates the end of the RPC.
+	// On client side, this event means a trailer was either received
+	// from the network or the gRPC library locally generated a status
+	// to inform the application about a failure.
+	// On server side, this event means the server application requested
+	// to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after
+	// this due to races on server side.
+	GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6
+	// A signal that the RPC is cancelled. On client side, this
+	// indicates the client application requests a cancellation.
+	// On server side, this indicates that cancellation was detected.
+	// Note: This marks the end of the RPC. Events may arrive after
+	// this due to races. For example, on client side a trailer
+	// may arrive even though the application requested to cancel the RPC.
+	GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7
+)
+
+// Enum value maps for GrpcLogEntry_EventType.
+var (
+	GrpcLogEntry_EventType_name = map[int32]string{
+		0: "EVENT_TYPE_UNKNOWN",
+		1: "EVENT_TYPE_CLIENT_HEADER",
+		2: "EVENT_TYPE_SERVER_HEADER",
+		3: "EVENT_TYPE_CLIENT_MESSAGE",
+		4: "EVENT_TYPE_SERVER_MESSAGE",
+		5: "EVENT_TYPE_CLIENT_HALF_CLOSE",
+		6: "EVENT_TYPE_SERVER_TRAILER",
+		7: "EVENT_TYPE_CANCEL",
+	}
+	GrpcLogEntry_EventType_value = map[string]int32{
+		"EVENT_TYPE_UNKNOWN":           0,
+		"EVENT_TYPE_CLIENT_HEADER":     1,
+		"EVENT_TYPE_SERVER_HEADER":     2,
+		"EVENT_TYPE_CLIENT_MESSAGE":    3,
+		"EVENT_TYPE_SERVER_MESSAGE":    4,
+		"EVENT_TYPE_CLIENT_HALF_CLOSE": 5,
+		"EVENT_TYPE_SERVER_TRAILER":    6,
+		"EVENT_TYPE_CANCEL":            7,
+	}
+)
+
+func (x GrpcLogEntry_EventType) Enum() *GrpcLogEntry_EventType {
+	p := new(GrpcLogEntry_EventType)
+	*p = x
+	return p
+}
+
+func (x GrpcLogEntry_EventType) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (GrpcLogEntry_EventType) Descriptor() protoreflect.EnumDescriptor {
+	return file_grpc_binlog_v1_binarylog_proto_enumTypes[0].Descriptor()
+}
+
+func (GrpcLogEntry_EventType) Type() protoreflect.EnumType {
+	return &file_grpc_binlog_v1_binarylog_proto_enumTypes[0]
+}
+
+func (x GrpcLogEntry_EventType) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use GrpcLogEntry_EventType.Descriptor instead.
+func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// Enumerates the entity that generates the log entry
+type GrpcLogEntry_Logger int32
+
+const (
+	GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0
+	GrpcLogEntry_LOGGER_CLIENT  GrpcLogEntry_Logger = 1
+	GrpcLogEntry_LOGGER_SERVER  GrpcLogEntry_Logger = 2
+)
+
+// Enum value maps for GrpcLogEntry_Logger.
+var (
+	GrpcLogEntry_Logger_name = map[int32]string{
+		0: "LOGGER_UNKNOWN",
+		1: "LOGGER_CLIENT",
+		2: "LOGGER_SERVER",
+	}
+	GrpcLogEntry_Logger_value = map[string]int32{
+		"LOGGER_UNKNOWN": 0,
+		"LOGGER_CLIENT":  1,
+		"LOGGER_SERVER":  2,
+	}
+)
+
+func (x GrpcLogEntry_Logger) Enum() *GrpcLogEntry_Logger {
+	p := new(GrpcLogEntry_Logger)
+	*p = x
+	return p
+}
+
+func (x GrpcLogEntry_Logger) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (GrpcLogEntry_Logger) Descriptor() protoreflect.EnumDescriptor {
+	return file_grpc_binlog_v1_binarylog_proto_enumTypes[1].Descriptor()
+}
+
+func (GrpcLogEntry_Logger) Type() protoreflect.EnumType {
+	return &file_grpc_binlog_v1_binarylog_proto_enumTypes[1]
+}
+
+func (x GrpcLogEntry_Logger) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use GrpcLogEntry_Logger.Descriptor instead.
+func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 1}
+}
+
+type Address_Type int32
+
+const (
+	Address_TYPE_UNKNOWN Address_Type = 0
+	// address is in 1.2.3.4 form
+	Address_TYPE_IPV4 Address_Type = 1
+	// address is in IPv6 canonical form (RFC5952 section 4)
+	// The scope is NOT included in the address string.
+	Address_TYPE_IPV6 Address_Type = 2
+	// address is UDS string
+	Address_TYPE_UNIX Address_Type = 3
+)
+
+// Enum value maps for Address_Type.
+var (
+	Address_Type_name = map[int32]string{
+		0: "TYPE_UNKNOWN",
+		1: "TYPE_IPV4",
+		2: "TYPE_IPV6",
+		3: "TYPE_UNIX",
+	}
+	Address_Type_value = map[string]int32{
+		"TYPE_UNKNOWN": 0,
+		"TYPE_IPV4":    1,
+		"TYPE_IPV6":    2,
+		"TYPE_UNIX":    3,
+	}
+)
+
+func (x Address_Type) Enum() *Address_Type {
+	p := new(Address_Type)
+	*p = x
+	return p
+}
+
+func (x Address_Type) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Address_Type) Descriptor() protoreflect.EnumDescriptor {
+	return file_grpc_binlog_v1_binarylog_proto_enumTypes[2].Descriptor()
+}
+
+func (Address_Type) Type() protoreflect.EnumType {
+	return &file_grpc_binlog_v1_binarylog_proto_enumTypes[2]
+}
+
+func (x Address_Type) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Address_Type.Descriptor instead.
+func (Address_Type) EnumDescriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7, 0}
+}
+
+// Log entry we store in binary logs
+type GrpcLogEntry struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The timestamp of the binary log message
+	Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+	// Uniquely identifies a call. The value must not be 0 in order to disambiguate
+	// from an unset value.
+	// Each call may have several log entries, they will all have the same call_id.
+	// Nothing is guaranteed about their value other than they are unique across
+	// different RPCs in the same gRPC process.
+	CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"`
+	// The entry sequence id for this call. The first GrpcLogEntry has a
+	// value of 1, to disambiguate from an unset value. The purpose of
+	// this field is to detect missing entries in environments where
+	// durability or ordering is not guaranteed.
+	SequenceIdWithinCall uint64                 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"`
+	Type                 GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"`
+	Logger               GrpcLogEntry_Logger    `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` // One of the above Logger enum
+	// The logger uses one of the following fields to record the payload,
+	// according to the type of the log entry.
+	//
+	// Types that are assignable to Payload:
+	//
+	//	*GrpcLogEntry_ClientHeader
+	//	*GrpcLogEntry_ServerHeader
+	//	*GrpcLogEntry_Message
+	//	*GrpcLogEntry_Trailer
+	Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"`
+	// true if payload does not represent the full message or metadata.
+	PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"`
+	// Peer address information, will only be recorded on the first
+	// incoming event. On client side, peer is logged on
+	// EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in
+	// the case of trailers-only. On server side, peer is always
+	// logged on EVENT_TYPE_CLIENT_HEADER.
+	Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
+}
+
+func (x *GrpcLogEntry) Reset() {
+	*x = GrpcLogEntry{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *GrpcLogEntry) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcLogEntry) ProtoMessage() {}
+
+func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcLogEntry.ProtoReflect.Descriptor instead.
+func (*GrpcLogEntry) Descriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *GrpcLogEntry) GetTimestamp() *timestamppb.Timestamp {
+	if x != nil {
+		return x.Timestamp
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetCallId() uint64 {
+	if x != nil {
+		return x.CallId
+	}
+	return 0
+}
+
+func (x *GrpcLogEntry) GetSequenceIdWithinCall() uint64 {
+	if x != nil {
+		return x.SequenceIdWithinCall
+	}
+	return 0
+}
+
+func (x *GrpcLogEntry) GetType() GrpcLogEntry_EventType {
+	if x != nil {
+		return x.Type
+	}
+	return GrpcLogEntry_EVENT_TYPE_UNKNOWN
+}
+
+func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger {
+	if x != nil {
+		return x.Logger
+	}
+	return GrpcLogEntry_LOGGER_UNKNOWN
+}
+
+func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
+	if m != nil {
+		return m.Payload
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetClientHeader() *ClientHeader {
+	if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok {
+		return x.ClientHeader
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetServerHeader() *ServerHeader {
+	if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok {
+		return x.ServerHeader
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetMessage() *Message {
+	if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok {
+		return x.Message
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetTrailer() *Trailer {
+	if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok {
+		return x.Trailer
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetPayloadTruncated() bool {
+	if x != nil {
+		return x.PayloadTruncated
+	}
+	return false
+}
+
+func (x *GrpcLogEntry) GetPeer() *Address {
+	if x != nil {
+		return x.Peer
+	}
+	return nil
+}
+
+type isGrpcLogEntry_Payload interface {
+	isGrpcLogEntry_Payload()
+}
+
+type GrpcLogEntry_ClientHeader struct {
+	ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"`
+}
+
+type GrpcLogEntry_ServerHeader struct {
+	ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"`
+}
+
+type GrpcLogEntry_Message struct {
+	// Used by EVENT_TYPE_CLIENT_MESSAGE, EVENT_TYPE_SERVER_MESSAGE
+	Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"`
+}
+
+type GrpcLogEntry_Trailer struct {
+	Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"`
+}
+
+func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {}
+
+func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {}
+
+func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {}
+
+func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {}
+
+type ClientHeader struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// This contains only the metadata from the application.
+	Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
+	// The name of the RPC method, which looks something like:
+	// //
+	// Note the leading "/" character.
+	MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"`
+	// A single process may be used to run multiple virtual
+	// servers with different identities.
+	// The authority is the name of such a server identity.
+	// It is typically a portion of the URI in the form of
+	//  or : .
+	Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
+	// the RPC timeout
+	Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
+}
+
+func (x *ClientHeader) Reset() {
+	*x = ClientHeader{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ClientHeader) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClientHeader) ProtoMessage() {}
+
+func (x *ClientHeader) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClientHeader.ProtoReflect.Descriptor instead.
+func (*ClientHeader) Descriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ClientHeader) GetMetadata() *Metadata {
+	if x != nil {
+		return x.Metadata
+	}
+	return nil
+}
+
+func (x *ClientHeader) GetMethodName() string {
+	if x != nil {
+		return x.MethodName
+	}
+	return ""
+}
+
+func (x *ClientHeader) GetAuthority() string {
+	if x != nil {
+		return x.Authority
+	}
+	return ""
+}
+
+func (x *ClientHeader) GetTimeout() *durationpb.Duration {
+	if x != nil {
+		return x.Timeout
+	}
+	return nil
+}
+
+type ServerHeader struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// This contains only the metadata from the application.
+	Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
+}
+
+func (x *ServerHeader) Reset() {
+	*x = ServerHeader{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *ServerHeader) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServerHeader) ProtoMessage() {}
+
+func (x *ServerHeader) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServerHeader.ProtoReflect.Descriptor instead.
+func (*ServerHeader) Descriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ServerHeader) GetMetadata() *Metadata {
+	if x != nil {
+		return x.Metadata
+	}
+	return nil
+}
+
+type Trailer struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// This contains only the metadata from the application.
+	Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
+	// The gRPC status code.
+	StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"`
+	// An original status message before any transport specific
+	// encoding.
+	StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"`
+	// The value of the 'grpc-status-details-bin' metadata key. If
+	// present, this is always an encoded 'google.rpc.Status' message.
+	StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"`
+}
+
+func (x *Trailer) Reset() {
+	*x = Trailer{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Trailer) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Trailer) ProtoMessage() {}
+
+func (x *Trailer) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Trailer.ProtoReflect.Descriptor instead.
+func (*Trailer) Descriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Trailer) GetMetadata() *Metadata {
+	if x != nil {
+		return x.Metadata
+	}
+	return nil
+}
+
+func (x *Trailer) GetStatusCode() uint32 {
+	if x != nil {
+		return x.StatusCode
+	}
+	return 0
+}
+
+func (x *Trailer) GetStatusMessage() string {
+	if x != nil {
+		return x.StatusMessage
+	}
+	return ""
+}
+
+func (x *Trailer) GetStatusDetails() []byte {
+	if x != nil {
+		return x.StatusDetails
+	}
+	return nil
+}
+
+// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE
+type Message struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Length of the message. It may not be the same as the length of the
+	// data field, as the logging payload can be truncated or omitted.
+	Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"`
+	// May be truncated or omitted.
+	Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (x *Message) Reset() {
+	*x = Message{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Message) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Message) ProtoMessage() {}
+
+func (x *Message) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Message.ProtoReflect.Descriptor instead.
+func (*Message) Descriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Message) GetLength() uint32 {
+	if x != nil {
+		return x.Length
+	}
+	return 0
+}
+
+func (x *Message) GetData() []byte {
+	if x != nil {
+		return x.Data
+	}
+	return nil
+}
+
+// A list of metadata pairs, used in the payload of client header,
+// server header, and server trailer.
+// Implementations may omit some entries to honor the header limits
+// of GRPC_BINARY_LOG_CONFIG.
+//
+// Header keys added by gRPC are omitted. To be more specific,
+// implementations will not log the following entries, and this is
+// not to be treated as a truncation:
+//   - entries handled by grpc that are not user visible, such as those
+//     that begin with 'grpc-' (with exception of grpc-trace-bin)
+//     or keys like 'lb-token'
+//   - transport specific entries, including but not limited to:
+//     ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
+//   - entries added for call credentials
+//
+// Implementations must always log grpc-trace-bin if it is present.
+// Practically speaking it will only be visible on server side because
+// grpc-trace-bin is managed by low level client side mechanisms
+// inaccessible from the application level. On server side, the
+// header is just a normal metadata key.
+// The pair will not count towards the size limit.
+type Metadata struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
+}
+
+func (x *Metadata) Reset() {
+	*x = Metadata{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Metadata) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Metadata) ProtoMessage() {}
+
+func (x *Metadata) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Metadata.ProtoReflect.Descriptor instead.
+func (*Metadata) Descriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *Metadata) GetEntry() []*MetadataEntry {
+	if x != nil {
+		return x.Entry
+	}
+	return nil
+}
+
+// A metadata key value pair
+type MetadataEntry struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Key   string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *MetadataEntry) Reset() {
+	*x = MetadataEntry{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *MetadataEntry) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetadataEntry) ProtoMessage() {}
+
+func (x *MetadataEntry) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetadataEntry.ProtoReflect.Descriptor instead.
+func (*MetadataEntry) Descriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *MetadataEntry) GetKey() string {
+	if x != nil {
+		return x.Key
+	}
+	return ""
+}
+
+func (x *MetadataEntry) GetValue() []byte {
+	if x != nil {
+		return x.Value
+	}
+	return nil
+}
+
+// Address information
+type Address struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Type    Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"`
+	Address string       `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
+	// only for TYPE_IPV4 and TYPE_IPV6
+	IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
+}
+
+func (x *Address) Reset() {
+	*x = Address{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Address) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Address) ProtoMessage() {}
+
+func (x *Address) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Address.ProtoReflect.Descriptor instead.
+func (*Address) Descriptor() ([]byte, []int) {
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *Address) GetType() Address_Type {
+	if x != nil {
+		return x.Type
+	}
+	return Address_TYPE_UNKNOWN
+}
+
+func (x *Address) GetAddress() string {
+	if x != nil {
+		return x.Address
+	}
+	return ""
+}
+
+func (x *Address) GetIpPort() uint32 {
+	if x != nil {
+		return x.IpPort
+	}
+	return 0
+}
+
+var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor
+
+var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{
+	0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31,
+	0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
+	0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67,
+	0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+	0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
+	0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12,
+	0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
+	0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75,
+	0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63,
+	0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65,
+	0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12,
+	0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
+	0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45,
+	0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e,
+	0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26,
+	0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e,
+	0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e,
+	0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46,
+	0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18,
+	0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e,
+	0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+	0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+	0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+	0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
+	0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00,
+	0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36,
+	0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
+	0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
+	0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d,
+	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65,
+	0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62,
+	0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69,
+	0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b,
+	0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61,
+	0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f,
+	0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70,
+	0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63,
+	0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64,
+	0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09,
+	0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45,
+	0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
+	0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
+	0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12,
+	0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45,
+	0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a,
+	0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45,
+	0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19,
+	0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45,
+	0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45,
+	0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54,
+	0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a,
+	0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56,
+	0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11,
+	0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45,
+	0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a,
+	0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
+	0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45,
+	0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53,
+	0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f,
+	0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61,
+	0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e,
+	0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
+	0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b,
+	0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a,
+	0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74,
+	0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
+	0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
+	0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72,
+	0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79,
+	0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52,
+	0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72,
+	0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+	0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62,
+	0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61,
+	0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f,
+	0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20,
+	0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12,
+	0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
+	0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d,
+	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+	0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d,
+	0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a,
+	0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67,
+	0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
+	0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04,
+	0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
+	0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+	0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67,
+	0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
+	0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61,
+	0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a,
+	0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72,
+	0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e,
+	0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79,
+	0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07,
+	0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69,
+	0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a,
+	0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
+	0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d,
+	0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a,
+	0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14,
+	0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f,
+	0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50,
+	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
+	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62,
+	0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69,
+	0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x33,
+}
+
+var (
+	file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once
+	file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc
+)
+
+func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte {
+	file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() {
+		file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData)
+	})
+	return file_grpc_binlog_v1_binarylog_proto_rawDescData
+}
+
+var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
+var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
+var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{
+	(GrpcLogEntry_EventType)(0),   // 0: grpc.binarylog.v1.GrpcLogEntry.EventType
+	(GrpcLogEntry_Logger)(0),      // 1: grpc.binarylog.v1.GrpcLogEntry.Logger
+	(Address_Type)(0),             // 2: grpc.binarylog.v1.Address.Type
+	(*GrpcLogEntry)(nil),          // 3: grpc.binarylog.v1.GrpcLogEntry
+	(*ClientHeader)(nil),          // 4: grpc.binarylog.v1.ClientHeader
+	(*ServerHeader)(nil),          // 5: grpc.binarylog.v1.ServerHeader
+	(*Trailer)(nil),               // 6: grpc.binarylog.v1.Trailer
+	(*Message)(nil),               // 7: grpc.binarylog.v1.Message
+	(*Metadata)(nil),              // 8: grpc.binarylog.v1.Metadata
+	(*MetadataEntry)(nil),         // 9: grpc.binarylog.v1.MetadataEntry
+	(*Address)(nil),               // 10: grpc.binarylog.v1.Address
+	(*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp
+	(*durationpb.Duration)(nil),   // 12: google.protobuf.Duration
+}
+var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{
+	11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp
+	0,  // 1: grpc.binarylog.v1.GrpcLogEntry.type:type_name -> grpc.binarylog.v1.GrpcLogEntry.EventType
+	1,  // 2: grpc.binarylog.v1.GrpcLogEntry.logger:type_name -> grpc.binarylog.v1.GrpcLogEntry.Logger
+	4,  // 3: grpc.binarylog.v1.GrpcLogEntry.client_header:type_name -> grpc.binarylog.v1.ClientHeader
+	5,  // 4: grpc.binarylog.v1.GrpcLogEntry.server_header:type_name -> grpc.binarylog.v1.ServerHeader
+	7,  // 5: grpc.binarylog.v1.GrpcLogEntry.message:type_name -> grpc.binarylog.v1.Message
+	6,  // 6: grpc.binarylog.v1.GrpcLogEntry.trailer:type_name -> grpc.binarylog.v1.Trailer
+	10, // 7: grpc.binarylog.v1.GrpcLogEntry.peer:type_name -> grpc.binarylog.v1.Address
+	8,  // 8: grpc.binarylog.v1.ClientHeader.metadata:type_name -> grpc.binarylog.v1.Metadata
+	12, // 9: grpc.binarylog.v1.ClientHeader.timeout:type_name -> google.protobuf.Duration
+	8,  // 10: grpc.binarylog.v1.ServerHeader.metadata:type_name -> grpc.binarylog.v1.Metadata
+	8,  // 11: grpc.binarylog.v1.Trailer.metadata:type_name -> grpc.binarylog.v1.Metadata
+	9,  // 12: grpc.binarylog.v1.Metadata.entry:type_name -> grpc.binarylog.v1.MetadataEntry
+	2,  // 13: grpc.binarylog.v1.Address.type:type_name -> grpc.binarylog.v1.Address.Type
+	14, // [14:14] is the sub-list for method output_type
+	14, // [14:14] is the sub-list for method input_type
+	14, // [14:14] is the sub-list for extension type_name
+	14, // [14:14] is the sub-list for extension extendee
+	0,  // [0:14] is the sub-list for field type_name
+}
+
+func init() { file_grpc_binlog_v1_binarylog_proto_init() }
+func file_grpc_binlog_v1_binarylog_proto_init() {
+	if File_grpc_binlog_v1_binarylog_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any {
+			switch v := v.(*GrpcLogEntry); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any {
+			switch v := v.(*ClientHeader); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any {
+			switch v := v.(*ServerHeader); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any {
+			switch v := v.(*Trailer); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any {
+			switch v := v.(*Message); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any {
+			switch v := v.(*Metadata); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any {
+			switch v := v.(*MetadataEntry); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any {
+			switch v := v.(*Address); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{
+		(*GrpcLogEntry_ClientHeader)(nil),
+		(*GrpcLogEntry_ServerHeader)(nil),
+		(*GrpcLogEntry_Message)(nil),
+		(*GrpcLogEntry_Trailer)(nil),
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc,
+			NumEnums:      3,
+			NumMessages:   8,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_grpc_binlog_v1_binarylog_proto_goTypes,
+		DependencyIndexes: file_grpc_binlog_v1_binarylog_proto_depIdxs,
+		EnumInfos:         file_grpc_binlog_v1_binarylog_proto_enumTypes,
+		MessageInfos:      file_grpc_binlog_v1_binarylog_proto_msgTypes,
+	}.Build()
+	File_grpc_binlog_v1_binarylog_proto = out.File
+	file_grpc_binlog_v1_binarylog_proto_rawDesc = nil
+	file_grpc_binlog_v1_binarylog_proto_goTypes = nil
+	file_grpc_binlog_v1_binarylog_proto_depIdxs = nil
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/call.go b/hack/tools/vendor/google.golang.org/grpc/call.go
new file mode 100644
index 0000000000..788c89c16f
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/call.go
@@ -0,0 +1,74 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+)
+
+// Invoke sends the RPC request on the wire and returns after response is
+// received.  This is typically called by generated code.
+//
+// All errors returned by Invoke are compatible with the status package.
+func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error {
+	// allow interceptor to see all applicable call options, which means those
+	// configured as defaults from dial option as well as per-call options
+	opts = combine(cc.dopts.callOptions, opts)
+
+	if cc.dopts.unaryInt != nil {
+		return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...)
+	}
+	return invoke(ctx, method, args, reply, cc, opts...)
+}
+
+func combine(o1 []CallOption, o2 []CallOption) []CallOption {
+	// we don't use append because o1 could have extra capacity whose
+	// elements would be overwritten, which could cause inadvertent
+	// sharing (and race conditions) between concurrent calls
+	if len(o1) == 0 {
+		return o2
+	} else if len(o2) == 0 {
+		return o1
+	}
+	ret := make([]CallOption, len(o1)+len(o2))
+	copy(ret, o1)
+	copy(ret[len(o1):], o2)
+	return ret
+}
+
+// Invoke sends the RPC request on the wire and returns after response is
+// received.  This is typically called by generated code.
+//
+// DEPRECATED: Use ClientConn.Invoke instead.
+func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error {
+	return cc.Invoke(ctx, method, args, reply, opts...)
+}
+
+var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false}
+
+func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error {
+	cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)
+	if err != nil {
+		return err
+	}
+	if err := cs.SendMsg(req); err != nil {
+		return err
+	}
+	return cs.RecvMsg(reply)
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/channelz/channelz.go b/hack/tools/vendor/google.golang.org/grpc/channelz/channelz.go
new file mode 100644
index 0000000000..32b7fa5794
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/channelz/channelz.go
@@ -0,0 +1,36 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package channelz exports internals of the channelz implementation as required
+// by other gRPC packages.
+//
+// The implementation of the channelz spec as defined in
+// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by
+// the `internal/channelz` package.
+//
+// # Experimental
+//
+// Notice: All APIs in this package are experimental and may be removed in a
+// later release.
+package channelz
+
+import "google.golang.org/grpc/internal/channelz"
+
+// Identifier is an opaque identifier which uniquely identifies an entity in the
+// channelz database.
+type Identifier = channelz.Identifier
diff --git a/hack/tools/vendor/google.golang.org/grpc/clientconn.go b/hack/tools/vendor/google.golang.org/grpc/clientconn.go
new file mode 100644
index 0000000000..9c8850e3fd
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/clientconn.go
@@ -0,0 +1,1827 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"math"
+	"net/url"
+	"slices"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/balancer/base"
+	"google.golang.org/grpc/balancer/pickfirst"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcsync"
+	"google.golang.org/grpc/internal/idle"
+	iresolver "google.golang.org/grpc/internal/resolver"
+	"google.golang.org/grpc/internal/stats"
+	"google.golang.org/grpc/internal/transport"
+	"google.golang.org/grpc/keepalive"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/serviceconfig"
+	"google.golang.org/grpc/status"
+
+	_ "google.golang.org/grpc/balancer/roundrobin"           // To register roundrobin.
+	_ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver.
+	_ "google.golang.org/grpc/internal/resolver/unix"        // To register unix resolver.
+	_ "google.golang.org/grpc/resolver/dns"                  // To register dns resolver.
+)
+
+const (
+	// minimum time to give a connection to complete
+	minConnectTimeout = 20 * time.Second
+)
+
+var (
+	// ErrClientConnClosing indicates that the operation is illegal because
+	// the ClientConn is closing.
+	//
+	// Deprecated: this error should not be relied upon by users; use the status
+	// code of Canceled instead.
+	ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing")
+	// errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs.
+	errConnDrain = errors.New("grpc: the connection is drained")
+	// errConnClosing indicates that the connection is closing.
+	errConnClosing = errors.New("grpc: the connection is closing")
+	// errConnIdling indicates the connection is being closed as the channel
+	// is moving to an idle mode due to inactivity.
+	errConnIdling = errors.New("grpc: the connection is closing due to channel idleness")
+	// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
+	// service config.
+	invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
+	// PickFirstBalancerName is the name of the pick_first balancer.
+	PickFirstBalancerName = pickfirst.Name
+)
+
+// The following errors are returned from Dial and DialContext
+var (
+	// errNoTransportSecurity indicates that there is no transport security
+	// being set for ClientConn. Users should either set one or explicitly
+	// call WithInsecure DialOption to disable security.
+	errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)")
+	// errTransportCredsAndBundle indicates that creds bundle is used together
+	// with other individual Transport Credentials.
+	errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials")
+	// errNoTransportCredsInBundle indicated that the configured creds bundle
+	// returned a transport credentials which was nil.
+	errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials")
+	// errTransportCredentialsMissing indicates that users want to transmit
+	// security information (e.g., OAuth2 token) which requires secure
+	// connection on an insecure connection.
+	errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)")
+)
+
+const (
+	defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4
+	defaultClientMaxSendMessageSize    = math.MaxInt32
+	// http2IOBufSize specifies the buffer size for sending frames.
+	defaultWriteBufSize = 32 * 1024
+	defaultReadBufSize  = 32 * 1024
+)
+
+type defaultConfigSelector struct {
+	sc *ServiceConfig
+}
+
+func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) {
+	return &iresolver.RPCConfig{
+		Context:      rpcInfo.Context,
+		MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method),
+	}, nil
+}
+
+// NewClient creates a new gRPC "channel" for the target URI provided.  No I/O
+// is performed.  Use of the ClientConn for RPCs will automatically cause it to
+// connect.  Connect may be used to manually create a connection, but for most
+// users this is unnecessary.
+//
+// The target name syntax is defined in
+// https://github.com/grpc/grpc/blob/master/doc/naming.md.  e.g. to use dns
+// resolver, a "dns:///" prefix should be applied to the target.
+//
+// The DialOptions returned by WithBlock, WithTimeout,
+// WithReturnConnectionError, and FailOnNonTempDialError are ignored by this
+// function.
+func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) {
+	cc := &ClientConn{
+		target: target,
+		conns:  make(map[*addrConn]struct{}),
+		dopts:  defaultDialOptions(),
+	}
+
+	cc.retryThrottler.Store((*retryThrottler)(nil))
+	cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
+	cc.ctx, cc.cancel = context.WithCancel(context.Background())
+
+	// Apply dial options.
+	disableGlobalOpts := false
+	for _, opt := range opts {
+		if _, ok := opt.(*disableGlobalDialOptions); ok {
+			disableGlobalOpts = true
+			break
+		}
+	}
+
+	if !disableGlobalOpts {
+		for _, opt := range globalDialOptions {
+			opt.apply(&cc.dopts)
+		}
+	}
+
+	for _, opt := range opts {
+		opt.apply(&cc.dopts)
+	}
+
+	// Determine the resolver to use.
+	if err := cc.initParsedTargetAndResolverBuilder(); err != nil {
+		return nil, err
+	}
+
+	for _, opt := range globalPerTargetDialOptions {
+		opt.DialOptionForTarget(cc.parsedTarget.URL).apply(&cc.dopts)
+	}
+
+	chainUnaryClientInterceptors(cc)
+	chainStreamClientInterceptors(cc)
+
+	if err := cc.validateTransportCredentials(); err != nil {
+		return nil, err
+	}
+
+	if cc.dopts.defaultServiceConfigRawJSON != nil {
+		scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON, cc.dopts.maxCallAttempts)
+		if scpr.Err != nil {
+			return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err)
+		}
+		cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig)
+	}
+	cc.mkp = cc.dopts.copts.KeepaliveParams
+
+	if err = cc.initAuthority(); err != nil {
+		return nil, err
+	}
+
+	// Register ClientConn with channelz. Note that this is only done after
+	// channel creation cannot fail.
+	cc.channelzRegistration(target)
+	channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", cc.parsedTarget)
+	channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority)
+
+	cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz)
+	cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers)
+
+	cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers)
+
+	cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc.
+	cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout)
+
+	return cc, nil
+}
+
+// Dial calls DialContext(context.Background(), target, opts...).
+//
+// Deprecated: use NewClient instead.  Will be supported throughout 1.x.
+func Dial(target string, opts ...DialOption) (*ClientConn, error) {
+	return DialContext(context.Background(), target, opts...)
+}
+
+// DialContext calls NewClient and then exits idle mode.  If WithBlock(true) is
+// used, it calls Connect and WaitForStateChange until either the context
+// expires or the state of the ClientConn is Ready.
+//
+// One subtle difference between NewClient and Dial and DialContext is that the
+// former uses "dns" as the default name resolver, while the latter use
+// "passthrough" for backward compatibility.  This distinction should not matter
+// to most users, but could matter to legacy users that specify a custom dialer
+// and expect it to receive the target string directly.
+//
+// Deprecated: use NewClient instead.  Will be supported throughout 1.x.
+func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
+	// At the end of this method, we kick the channel out of idle, rather than
+	// waiting for the first rpc.
+	opts = append([]DialOption{withDefaultScheme("passthrough")}, opts...)
+	cc, err := NewClient(target, opts...)
+	if err != nil {
+		return nil, err
+	}
+
+	// We start the channel off in idle mode, but kick it out of idle now,
+	// instead of waiting for the first RPC.  This is the legacy behavior of
+	// Dial.
+	defer func() {
+		if err != nil {
+			cc.Close()
+		}
+	}()
+
+	// This creates the name resolver, load balancer, etc.
+	if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
+		return nil, err
+	}
+
+	// Return now for non-blocking dials.
+	if !cc.dopts.block {
+		return cc, nil
+	}
+
+	if cc.dopts.timeout > 0 {
+		var cancel context.CancelFunc
+		ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout)
+		defer cancel()
+	}
+	defer func() {
+		select {
+		case <-ctx.Done():
+			switch {
+			case ctx.Err() == err:
+				conn = nil
+			case err == nil || !cc.dopts.returnLastError:
+				conn, err = nil, ctx.Err()
+			default:
+				conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err)
+			}
+		default:
+		}
+	}()
+
+	// A blocking dial blocks until the clientConn is ready.
+	for {
+		s := cc.GetState()
+		if s == connectivity.Idle {
+			cc.Connect()
+		}
+		if s == connectivity.Ready {
+			return cc, nil
+		} else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
+			if err = cc.connectionError(); err != nil {
+				terr, ok := err.(interface {
+					Temporary() bool
+				})
+				if ok && !terr.Temporary() {
+					return nil, err
+				}
+			}
+		}
+		if !cc.WaitForStateChange(ctx, s) {
+			// ctx got timeout or canceled.
+			if err = cc.connectionError(); err != nil && cc.dopts.returnLastError {
+				return nil, err
+			}
+			return nil, ctx.Err()
+		}
+	}
+}
+
+// addTraceEvent is a helper method to add a trace event on the channel. If the
+// channel is a nested one, the same event is also added on the parent channel.
+func (cc *ClientConn) addTraceEvent(msg string) {
+	ted := &channelz.TraceEvent{
+		Desc:     fmt.Sprintf("Channel %s", msg),
+		Severity: channelz.CtInfo,
+	}
+	if cc.dopts.channelzParent != nil {
+		ted.Parent = &channelz.TraceEvent{
+			Desc:     fmt.Sprintf("Nested channel(id:%d) %s", cc.channelz.ID, msg),
+			Severity: channelz.CtInfo,
+		}
+	}
+	channelz.AddTraceEvent(logger, cc.channelz, 0, ted)
+}
+
+type idler ClientConn
+
+func (i *idler) EnterIdleMode() {
+	(*ClientConn)(i).enterIdleMode()
+}
+
+func (i *idler) ExitIdleMode() error {
+	return (*ClientConn)(i).exitIdleMode()
+}
+
+// exitIdleMode moves the channel out of idle mode by recreating the name
+// resolver and load balancer.  This should never be called directly; use
+// cc.idlenessMgr.ExitIdleMode instead.
+func (cc *ClientConn) exitIdleMode() (err error) {
+	cc.mu.Lock()
+	if cc.conns == nil {
+		cc.mu.Unlock()
+		return errConnClosing
+	}
+	cc.mu.Unlock()
+
+	// This needs to be called without cc.mu because this builds a new resolver
+	// which might update state or report error inline, which would then need to
+	// acquire cc.mu.
+	if err := cc.resolverWrapper.start(); err != nil {
+		return err
+	}
+
+	cc.addTraceEvent("exiting idle mode")
+	return nil
+}
+
+// initIdleStateLocked initializes common state to how it should be while idle.
+func (cc *ClientConn) initIdleStateLocked() {
+	cc.resolverWrapper = newCCResolverWrapper(cc)
+	cc.balancerWrapper = newCCBalancerWrapper(cc)
+	cc.firstResolveEvent = grpcsync.NewEvent()
+	// cc.conns == nil is a proxy for the ClientConn being closed. So, instead
+	// of setting it to nil here, we recreate the map. This also means that we
+	// don't have to do this when exiting idle mode.
+	cc.conns = make(map[*addrConn]struct{})
+}
+
+// enterIdleMode puts the channel in idle mode, and as part of it shuts down the
+// name resolver, load balancer, and any subchannels.  This should never be
+// called directly; use cc.idlenessMgr.EnterIdleMode instead.
+func (cc *ClientConn) enterIdleMode() {
+	cc.mu.Lock()
+
+	if cc.conns == nil {
+		cc.mu.Unlock()
+		return
+	}
+
+	conns := cc.conns
+
+	rWrapper := cc.resolverWrapper
+	rWrapper.close()
+	cc.pickerWrapper.reset()
+	bWrapper := cc.balancerWrapper
+	bWrapper.close()
+	cc.csMgr.updateState(connectivity.Idle)
+	cc.addTraceEvent("entering idle mode")
+
+	cc.initIdleStateLocked()
+
+	cc.mu.Unlock()
+
+	// Block until the name resolver and LB policy are closed.
+	<-rWrapper.serializer.Done()
+	<-bWrapper.serializer.Done()
+
+	// Close all subchannels after the LB policy is closed.
+	for ac := range conns {
+		ac.tearDown(errConnIdling)
+	}
+}
+
+// validateTransportCredentials performs a series of checks on the configured
+// transport credentials. It returns a non-nil error if any of these conditions
+// are met:
+//   - no transport creds and no creds bundle is configured
+//   - both transport creds and creds bundle are configured
+//   - creds bundle is configured, but it lacks a transport credentials
+//   - insecure transport creds configured alongside call creds that require
+//     transport level security
+//
+// If none of the above conditions are met, the configured credentials are
+// deemed valid and a nil error is returned.
+func (cc *ClientConn) validateTransportCredentials() error {
+	if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
+		return errNoTransportSecurity
+	}
+	if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
+		return errTransportCredsAndBundle
+	}
+	if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil {
+		return errNoTransportCredsInBundle
+	}
+	transportCreds := cc.dopts.copts.TransportCredentials
+	if transportCreds == nil {
+		transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials()
+	}
+	if transportCreds.Info().SecurityProtocol == "insecure" {
+		for _, cd := range cc.dopts.copts.PerRPCCredentials {
+			if cd.RequireTransportSecurity() {
+				return errTransportCredentialsMissing
+			}
+		}
+	}
+	return nil
+}
+
+// channelzRegistration registers the newly created ClientConn with channelz and
+// stores the returned identifier in `cc.channelz`.  A channelz trace event is
+// emitted for ClientConn creation. If the newly created ClientConn is a nested
+// one, i.e a valid parent ClientConn ID is specified via a dial option, the
+// trace event is also added to the parent.
+//
+// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
+func (cc *ClientConn) channelzRegistration(target string) {
+	parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel)
+	cc.channelz = channelz.RegisterChannel(parentChannel, target)
+	cc.addTraceEvent("created")
+}
+
+// chainUnaryClientInterceptors chains all unary client interceptors into one.
+func chainUnaryClientInterceptors(cc *ClientConn) {
+	interceptors := cc.dopts.chainUnaryInts
+	// Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will
+	// be executed before any other chained interceptors.
+	if cc.dopts.unaryInt != nil {
+		interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...)
+	}
+	var chainedInt UnaryClientInterceptor
+	if len(interceptors) == 0 {
+		chainedInt = nil
+	} else if len(interceptors) == 1 {
+		chainedInt = interceptors[0]
+	} else {
+		chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error {
+			return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...)
+		}
+	}
+	cc.dopts.unaryInt = chainedInt
+}
+
+// getChainUnaryInvoker recursively generate the chained unary invoker.
+func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker {
+	if curr == len(interceptors)-1 {
+		return finalInvoker
+	}
+	return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error {
+		return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...)
+	}
+}
+
+// chainStreamClientInterceptors chains all stream client interceptors into one.
+func chainStreamClientInterceptors(cc *ClientConn) {
+	interceptors := cc.dopts.chainStreamInts
+	// Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will
+	// be executed before any other chained interceptors.
+	if cc.dopts.streamInt != nil {
+		interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...)
+	}
+	var chainedInt StreamClientInterceptor
+	if len(interceptors) == 0 {
+		chainedInt = nil
+	} else if len(interceptors) == 1 {
+		chainedInt = interceptors[0]
+	} else {
+		chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) {
+			return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...)
+		}
+	}
+	cc.dopts.streamInt = chainedInt
+}
+
+// getChainStreamer recursively generate the chained client stream constructor.
+func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer {
+	if curr == len(interceptors)-1 {
+		return finalStreamer
+	}
+	return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
+		return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...)
+	}
+}
+
+// newConnectivityStateManager creates an connectivityStateManager with
+// the specified channel.
+func newConnectivityStateManager(ctx context.Context, channel *channelz.Channel) *connectivityStateManager {
+	return &connectivityStateManager{
+		channelz: channel,
+		pubSub:   grpcsync.NewPubSub(ctx),
+	}
+}
+
+// connectivityStateManager keeps the connectivity.State of ClientConn.
+// This struct will eventually be exported so the balancers can access it.
+//
+// TODO: If possible, get rid of the `connectivityStateManager` type, and
+// provide this functionality using the `PubSub`, to avoid keeping track of
+// the connectivity state at two places.
+type connectivityStateManager struct {
+	mu         sync.Mutex
+	state      connectivity.State
+	notifyChan chan struct{}
+	channelz   *channelz.Channel
+	pubSub     *grpcsync.PubSub
+}
+
+// updateState updates the connectivity.State of ClientConn.
+// If there's a change it notifies goroutines waiting on state change to
+// happen.
+func (csm *connectivityStateManager) updateState(state connectivity.State) {
+	csm.mu.Lock()
+	defer csm.mu.Unlock()
+	if csm.state == connectivity.Shutdown {
+		return
+	}
+	if csm.state == state {
+		return
+	}
+	csm.state = state
+	csm.channelz.ChannelMetrics.State.Store(&state)
+	csm.pubSub.Publish(state)
+
+	channelz.Infof(logger, csm.channelz, "Channel Connectivity change to %v", state)
+	if csm.notifyChan != nil {
+		// There are other goroutines waiting on this channel.
+		close(csm.notifyChan)
+		csm.notifyChan = nil
+	}
+}
+
+func (csm *connectivityStateManager) getState() connectivity.State {
+	csm.mu.Lock()
+	defer csm.mu.Unlock()
+	return csm.state
+}
+
+func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} {
+	csm.mu.Lock()
+	defer csm.mu.Unlock()
+	if csm.notifyChan == nil {
+		csm.notifyChan = make(chan struct{})
+	}
+	return csm.notifyChan
+}
+
+// ClientConnInterface defines the functions clients need to perform unary and
+// streaming RPCs.  It is implemented by *ClientConn, and is only intended to
+// be referenced by generated code.
+type ClientConnInterface interface {
+	// Invoke performs a unary RPC and returns after the response is received
+	// into reply.
+	Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error
+	// NewStream begins a streaming RPC.
+	NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error)
+}
+
+// Assert *ClientConn implements ClientConnInterface.
+var _ ClientConnInterface = (*ClientConn)(nil)
+
+// ClientConn represents a virtual connection to a conceptual endpoint, to
+// perform RPCs.
+//
+// A ClientConn is free to have zero or more actual connections to the endpoint
+// based on configuration, load, etc. It is also free to determine which actual
+// endpoints to use and may change it every RPC, permitting client-side load
+// balancing.
+//
+// A ClientConn encapsulates a range of functionality including name
+// resolution, TCP connection establishment (with retries and backoff) and TLS
+// handshakes. It also handles errors on established connections by
+// re-resolving the name and reconnecting.
+type ClientConn struct {
+	ctx    context.Context    // Initialized using the background context at dial time.
+	cancel context.CancelFunc // Cancelled on close.
+
+	// The following are initialized at dial time, and are read-only after that.
+	target              string            // User's dial target.
+	parsedTarget        resolver.Target   // See initParsedTargetAndResolverBuilder().
+	authority           string            // See initAuthority().
+	dopts               dialOptions       // Default and user specified dial options.
+	channelz            *channelz.Channel // Channelz object.
+	resolverBuilder     resolver.Builder  // See initParsedTargetAndResolverBuilder().
+	idlenessMgr         *idle.Manager
+	metricsRecorderList *stats.MetricsRecorderList
+
+	// The following provide their own synchronization, and therefore don't
+	// require cc.mu to be held to access them.
+	csMgr              *connectivityStateManager
+	pickerWrapper      *pickerWrapper
+	safeConfigSelector iresolver.SafeConfigSelector
+	retryThrottler     atomic.Value // Updated from service config.
+
+	// mu protects the following fields.
+	// TODO: split mu so the same mutex isn't used for everything.
+	mu              sync.RWMutex
+	resolverWrapper *ccResolverWrapper         // Always recreated whenever entering idle to simplify Close.
+	balancerWrapper *ccBalancerWrapper         // Always recreated whenever entering idle to simplify Close.
+	sc              *ServiceConfig             // Latest service config received from the resolver.
+	conns           map[*addrConn]struct{}     // Set to nil on close.
+	mkp             keepalive.ClientParameters // May be updated upon receipt of a GoAway.
+	// firstResolveEvent is used to track whether the name resolver sent us at
+	// least one update. RPCs block on this event.  May be accessed without mu
+	// if we know we cannot be asked to enter idle mode while accessing it (e.g.
+	// when the idle manager has already been closed, or if we are already
+	// entering idle mode).
+	firstResolveEvent *grpcsync.Event
+
+	lceMu               sync.Mutex // protects lastConnectionError
+	lastConnectionError error
+}
+
+// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
+// ctx expires. A true value is returned in former case and false in latter.
+func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
+	ch := cc.csMgr.getNotifyChan()
+	if cc.csMgr.getState() != sourceState {
+		return true
+	}
+	select {
+	case <-ctx.Done():
+		return false
+	case <-ch:
+		return true
+	}
+}
+
+// GetState returns the connectivity.State of ClientConn.
+func (cc *ClientConn) GetState() connectivity.State {
+	return cc.csMgr.getState()
+}
+
+// Connect causes all subchannels in the ClientConn to attempt to connect if
+// the channel is idle.  Does not wait for the connection attempts to begin
+// before returning.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
+// release.
+func (cc *ClientConn) Connect() {
+	if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
+		cc.addTraceEvent(err.Error())
+		return
+	}
+	// If the ClientConn was not in idle mode, we need to call ExitIdle on the
+	// LB policy so that connections can be created.
+	cc.mu.Lock()
+	cc.balancerWrapper.exitIdle()
+	cc.mu.Unlock()
+}
+
+// waitForResolvedAddrs blocks until the resolver has provided addresses or the
+// context expires.  Returns nil unless the context expires first; otherwise
+// returns a status error based on the context.
+func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error {
+	// This is on the RPC path, so we use a fast path to avoid the
+	// more-expensive "select" below after the resolver has returned once.
+	if cc.firstResolveEvent.HasFired() {
+		return nil
+	}
+	select {
+	case <-cc.firstResolveEvent.Done():
+		return nil
+	case <-ctx.Done():
+		return status.FromContextError(ctx.Err()).Err()
+	case <-cc.ctx.Done():
+		return ErrClientConnClosing
+	}
+}
+
+var emptyServiceConfig *ServiceConfig
+
+func init() {
+	cfg := parseServiceConfig("{}", defaultMaxCallAttempts)
+	if cfg.Err != nil {
+		panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err))
+	}
+	emptyServiceConfig = cfg.Config.(*ServiceConfig)
+
+	internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() {
+		return cc.csMgr.pubSub.Subscribe(s)
+	}
+	internal.EnterIdleModeForTesting = func(cc *ClientConn) {
+		cc.idlenessMgr.EnterIdleModeForTesting()
+	}
+	internal.ExitIdleModeForTesting = func(cc *ClientConn) error {
+		return cc.idlenessMgr.ExitIdleMode()
+	}
+}
+
+func (cc *ClientConn) maybeApplyDefaultServiceConfig() {
+	if cc.sc != nil {
+		cc.applyServiceConfigAndBalancer(cc.sc, nil)
+		return
+	}
+	if cc.dopts.defaultServiceConfig != nil {
+		cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig})
+	} else {
+		cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig})
+	}
+}
+
+func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error {
+	defer cc.firstResolveEvent.Fire()
+	// Check if the ClientConn is already closed. Some fields (e.g.
+	// balancerWrapper) are set to nil when closing the ClientConn, and could
+	// cause nil pointer panic if we don't have this check.
+	if cc.conns == nil {
+		cc.mu.Unlock()
+		return nil
+	}
+
+	if err != nil {
+		// May need to apply the initial service config in case the resolver
+		// doesn't support service configs, or doesn't provide a service config
+		// with the new addresses.
+		cc.maybeApplyDefaultServiceConfig()
+
+		cc.balancerWrapper.resolverError(err)
+
+		// No addresses are valid with err set; return early.
+		cc.mu.Unlock()
+		return balancer.ErrBadResolverState
+	}
+
+	var ret error
+	if cc.dopts.disableServiceConfig {
+		channelz.Infof(logger, cc.channelz, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig)
+		cc.maybeApplyDefaultServiceConfig()
+	} else if s.ServiceConfig == nil {
+		cc.maybeApplyDefaultServiceConfig()
+		// TODO: do we need to apply a failing LB policy if there is no
+		// default, per the error handling design?
+	} else {
+		if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok {
+			configSelector := iresolver.GetConfigSelector(s)
+			if configSelector != nil {
+				if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 {
+					channelz.Infof(logger, cc.channelz, "method configs in service config will be ignored due to presence of config selector")
+				}
+			} else {
+				configSelector = &defaultConfigSelector{sc}
+			}
+			cc.applyServiceConfigAndBalancer(sc, configSelector)
+		} else {
+			ret = balancer.ErrBadResolverState
+			if cc.sc == nil {
+				// Apply the failing LB only if we haven't received valid service config
+				// from the name resolver in the past.
+				cc.applyFailingLBLocked(s.ServiceConfig)
+				cc.mu.Unlock()
+				return ret
+			}
+		}
+	}
+
+	var balCfg serviceconfig.LoadBalancingConfig
+	if cc.sc != nil && cc.sc.lbConfig != nil {
+		balCfg = cc.sc.lbConfig
+	}
+	bw := cc.balancerWrapper
+	cc.mu.Unlock()
+
+	uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
+	if ret == nil {
+		ret = uccsErr // prefer ErrBadResolver state since any other error is
+		// currently meaningless to the caller.
+	}
+	return ret
+}
+
+// applyFailingLBLocked is akin to configuring an LB policy on the channel which
+// always fails RPCs. Here, an actual LB policy is not configured, but an always
+// erroring picker is configured, which returns errors with information about
+// what was invalid in the received service config. A config selector with no
+// service config is configured, and the connectivity state of the channel is
+// set to TransientFailure.
+func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) {
+	var err error
+	if sc.Err != nil {
+		err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err)
+	} else {
+		err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config)
+	}
+	cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
+	cc.pickerWrapper.updatePicker(base.NewErrPicker(err))
+	cc.csMgr.updateState(connectivity.TransientFailure)
+}
+
+// Makes a copy of the input addresses slice. Addresses are passed during
+// subconn creation and address update operations.
+func copyAddresses(in []resolver.Address) []resolver.Address {
+	out := make([]resolver.Address, len(in))
+	copy(out, in)
+	return out
+}
+
+// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns.
+//
+// Caller needs to make sure len(addrs) > 0.
+func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) {
+	if cc.conns == nil {
+		return nil, ErrClientConnClosing
+	}
+
+	ac := &addrConn{
+		state:          connectivity.Idle,
+		cc:             cc,
+		addrs:          copyAddresses(addrs),
+		scopts:         opts,
+		dopts:          cc.dopts,
+		channelz:       channelz.RegisterSubChannel(cc.channelz, ""),
+		resetBackoff:   make(chan struct{}),
+		stateReadyChan: make(chan struct{}),
+	}
+	ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
+	// Start with our address set to the first address; this may be updated if
+	// we connect to different addresses.
+	ac.channelz.ChannelMetrics.Target.Store(&addrs[0].Addr)
+
+	channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{
+		Desc:     "Subchannel created",
+		Severity: channelz.CtInfo,
+		Parent: &channelz.TraceEvent{
+			Desc:     fmt.Sprintf("Subchannel(id:%d) created", ac.channelz.ID),
+			Severity: channelz.CtInfo,
+		},
+	})
+
+	// Track ac in cc. This needs to be done before any getTransport(...) is called.
+	cc.conns[ac] = struct{}{}
+	return ac, nil
+}
+
+// removeAddrConn removes the addrConn in the subConn from clientConn.
+// It also tears down the ac with the given error.
+func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) {
+	cc.mu.Lock()
+	if cc.conns == nil {
+		cc.mu.Unlock()
+		return
+	}
+	delete(cc.conns, ac)
+	cc.mu.Unlock()
+	ac.tearDown(err)
+}
+
+// Target returns the target string of the ClientConn.
+func (cc *ClientConn) Target() string {
+	return cc.target
+}
+
+// CanonicalTarget returns the canonical target string of the ClientConn.
+func (cc *ClientConn) CanonicalTarget() string {
+	return cc.parsedTarget.String()
+}
+
+func (cc *ClientConn) incrCallsStarted() {
+	cc.channelz.ChannelMetrics.CallsStarted.Add(1)
+	cc.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano())
+}
+
+func (cc *ClientConn) incrCallsSucceeded() {
+	cc.channelz.ChannelMetrics.CallsSucceeded.Add(1)
+}
+
+func (cc *ClientConn) incrCallsFailed() {
+	cc.channelz.ChannelMetrics.CallsFailed.Add(1)
+}
+
+// connect starts creating a transport.
+// It does nothing if the ac is not IDLE.
+// TODO(bar) Move this to the addrConn section.
+func (ac *addrConn) connect() error {
+	ac.mu.Lock()
+	if ac.state == connectivity.Shutdown {
+		if logger.V(2) {
+			logger.Infof("connect called on shutdown addrConn; ignoring.")
+		}
+		ac.mu.Unlock()
+		return errConnClosing
+	}
+	if ac.state != connectivity.Idle {
+		if logger.V(2) {
+			logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state)
+		}
+		ac.mu.Unlock()
+		return nil
+	}
+
+	ac.resetTransportAndUnlock()
+	return nil
+}
+
+// equalAddressIgnoringBalAttributes returns true is a and b are considered equal.
+// This is different from the Equal method on the resolver.Address type which
+// considers all fields to determine equality. Here, we only consider fields
+// that are meaningful to the subConn.
+func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
+	return a.Addr == b.Addr && a.ServerName == b.ServerName &&
+		a.Attributes.Equal(b.Attributes) &&
+		a.Metadata == b.Metadata
+}
+
+func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool {
+	return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) })
+}
+
+// updateAddrs updates ac.addrs with the new addresses list and handles active
+// connections or connection attempts.
+func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
+	addrs = copyAddresses(addrs)
+	limit := len(addrs)
+	if limit > 5 {
+		limit = 5
+	}
+	channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit])
+
+	ac.mu.Lock()
+	if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) {
+		ac.mu.Unlock()
+		return
+	}
+
+	ac.addrs = addrs
+
+	if ac.state == connectivity.Shutdown ||
+		ac.state == connectivity.TransientFailure ||
+		ac.state == connectivity.Idle {
+		// We were not connecting, so do nothing but update the addresses.
+		ac.mu.Unlock()
+		return
+	}
+
+	if ac.state == connectivity.Ready {
+		// Try to find the connected address.
+		for _, a := range addrs {
+			a.ServerName = ac.cc.getServerName(a)
+			if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) {
+				// We are connected to a valid address, so do nothing but
+				// update the addresses.
+				ac.mu.Unlock()
+				return
+			}
+		}
+	}
+
+	// We are either connected to the wrong address or currently connecting.
+	// Stop the current iteration and restart.
+
+	ac.cancel()
+	ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx)
+
+	// We have to defer here because GracefulClose => onClose, which requires
+	// locking ac.mu.
+	if ac.transport != nil {
+		defer ac.transport.GracefulClose()
+		ac.transport = nil
+	}
+
+	if len(addrs) == 0 {
+		ac.updateConnectivityState(connectivity.Idle, nil)
+	}
+
+	// Since we were connecting/connected, we should start a new connection
+	// attempt.
+	go ac.resetTransportAndUnlock()
+}
+
+// getServerName determines the serverName to be used in the connection
+// handshake. The default value for the serverName is the authority on the
+// ClientConn, which either comes from the user's dial target or through an
+// authority override specified using the WithAuthority dial option. Name
+// resolvers can specify a per-address override for the serverName through the
+// resolver.Address.ServerName field which is used only if the WithAuthority
+// dial option was not used. The rationale is that per-address authority
+// overrides specified by the name resolver can represent a security risk, while
+// an override specified by the user is more dependable since they probably know
+// what they are doing.
+func (cc *ClientConn) getServerName(addr resolver.Address) string {
+	if cc.dopts.authority != "" {
+		return cc.dopts.authority
+	}
+	if addr.ServerName != "" {
+		return addr.ServerName
+	}
+	return cc.authority
+}
+
+func getMethodConfig(sc *ServiceConfig, method string) MethodConfig {
+	if sc == nil {
+		return MethodConfig{}
+	}
+	if m, ok := sc.Methods[method]; ok {
+		return m
+	}
+	i := strings.LastIndex(method, "/")
+	if m, ok := sc.Methods[method[:i+1]]; ok {
+		return m
+	}
+	return sc.Methods[""]
+}
+
+// GetMethodConfig gets the method config of the input method.
+// If there's an exact match for input method (i.e. /service/method), we return
+// the corresponding MethodConfig.
+// If there isn't an exact match for the input method, we look for the service's default
+// config under the service (i.e /service/) and then for the default for all services (empty string).
+//
+// If there is a default MethodConfig for the service, we return it.
+// Otherwise, we return an empty MethodConfig.
+func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
+	// TODO: Avoid the locking here.
+	cc.mu.RLock()
+	defer cc.mu.RUnlock()
+	return getMethodConfig(cc.sc, method)
+}
+
+func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
+	cc.mu.RLock()
+	defer cc.mu.RUnlock()
+	if cc.sc == nil {
+		return nil
+	}
+	return cc.sc.healthCheckConfig
+}
+
+func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) {
+	return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{
+		Ctx:            ctx,
+		FullMethodName: method,
+	})
+}
+
+func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) {
+	if sc == nil {
+		// should never reach here.
+		return
+	}
+	cc.sc = sc
+	if configSelector != nil {
+		cc.safeConfigSelector.UpdateConfigSelector(configSelector)
+	}
+
+	if cc.sc.retryThrottling != nil {
+		newThrottler := &retryThrottler{
+			tokens: cc.sc.retryThrottling.MaxTokens,
+			max:    cc.sc.retryThrottling.MaxTokens,
+			thresh: cc.sc.retryThrottling.MaxTokens / 2,
+			ratio:  cc.sc.retryThrottling.TokenRatio,
+		}
+		cc.retryThrottler.Store(newThrottler)
+	} else {
+		cc.retryThrottler.Store((*retryThrottler)(nil))
+	}
+}
+
+func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
+	cc.mu.RLock()
+	cc.resolverWrapper.resolveNow(o)
+	cc.mu.RUnlock()
+}
+
+func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) {
+	cc.resolverWrapper.resolveNow(o)
+}
+
+// ResetConnectBackoff wakes up all subchannels in transient failure and causes
+// them to attempt another connection immediately.  It also resets the backoff
+// times used for subsequent attempts regardless of the current state.
+//
+// In general, this function should not be used.  Typical service or network
+// outages result in a reasonable client reconnection strategy by default.
+// However, if a previously unavailable network becomes available, this may be
+// used to trigger an immediate reconnect.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func (cc *ClientConn) ResetConnectBackoff() {
+	cc.mu.Lock()
+	conns := cc.conns
+	cc.mu.Unlock()
+	for ac := range conns {
+		ac.resetConnectBackoff()
+	}
+}
+
+// Close tears down the ClientConn and all underlying connections.
+func (cc *ClientConn) Close() error {
+	defer func() {
+		cc.cancel()
+		<-cc.csMgr.pubSub.Done()
+	}()
+
+	// Prevent calls to enter/exit idle immediately, and ensure we are not
+	// currently entering/exiting idle mode.
+	cc.idlenessMgr.Close()
+
+	cc.mu.Lock()
+	if cc.conns == nil {
+		cc.mu.Unlock()
+		return ErrClientConnClosing
+	}
+
+	conns := cc.conns
+	cc.conns = nil
+	cc.csMgr.updateState(connectivity.Shutdown)
+
+	// We can safely unlock and continue to access all fields now as
+	// cc.conns==nil, preventing any further operations on cc.
+	cc.mu.Unlock()
+
+	cc.resolverWrapper.close()
+	// The order of closing matters here since the balancer wrapper assumes the
+	// picker is closed before it is closed.
+	cc.pickerWrapper.close()
+	cc.balancerWrapper.close()
+
+	<-cc.resolverWrapper.serializer.Done()
+	<-cc.balancerWrapper.serializer.Done()
+
+	for ac := range conns {
+		ac.tearDown(ErrClientConnClosing)
+	}
+	cc.addTraceEvent("deleted")
+	// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
+	// trace reference to the entity being deleted, and thus prevent it from being
+	// deleted right away.
+	channelz.RemoveEntry(cc.channelz.ID)
+
+	return nil
+}
+
+// addrConn is a network connection to a given address.
+type addrConn struct {
+	ctx    context.Context
+	cancel context.CancelFunc
+
+	cc     *ClientConn
+	dopts  dialOptions
+	acbw   *acBalancerWrapper
+	scopts balancer.NewSubConnOptions
+
+	// transport is set when there's a viable transport (note: ac state may not be READY as LB channel
+	// health checking may require server to report healthy to set ac to READY), and is reset
+	// to nil when the current transport should no longer be used to create a stream (e.g. after GoAway
+	// is received, transport is closed, ac has been torn down).
+	transport transport.ClientTransport // The current transport.
+
+	// This mutex is used on the RPC path, so its usage should be minimized as
+	// much as possible.
+	// TODO: Find a lock-free way to retrieve the transport and state from the
+	// addrConn.
+	mu      sync.Mutex
+	curAddr resolver.Address   // The current address.
+	addrs   []resolver.Address // All addresses that the resolver resolved to.
+
+	// Use updateConnectivityState for updating addrConn's connectivity state.
+	state          connectivity.State
+	stateReadyChan chan struct{} // closed and recreated on every READY state change.
+
+	backoffIdx   int // Needs to be stateful for resetConnectBackoff.
+	resetBackoff chan struct{}
+
+	channelz *channelz.SubChannel
+}
+
+// Note: this requires a lock on ac.mu.
+func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) {
+	if ac.state == s {
+		return
+	}
+	ac.state = s
+	ac.channelz.ChannelMetrics.State.Store(&s)
+	if lastErr == nil {
+		channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v", s)
+	} else {
+		channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr)
+	}
+	ac.acbw.updateState(s, ac.curAddr, lastErr)
+}
+
+// adjustParams updates parameters used to create transports upon
+// receiving a GoAway.
+func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
+	switch r {
+	case transport.GoAwayTooManyPings:
+		v := 2 * ac.dopts.copts.KeepaliveParams.Time
+		ac.cc.mu.Lock()
+		if v > ac.cc.mkp.Time {
+			ac.cc.mkp.Time = v
+		}
+		ac.cc.mu.Unlock()
+	}
+}
+
+// resetTransportAndUnlock unconditionally connects the addrConn.
+//
+// ac.mu must be held by the caller, and this function will guarantee it is released.
+func (ac *addrConn) resetTransportAndUnlock() {
+	acCtx := ac.ctx
+	if acCtx.Err() != nil {
+		ac.mu.Unlock()
+		return
+	}
+
+	addrs := ac.addrs
+	backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx)
+	// This will be the duration that dial gets to finish.
+	dialDuration := minConnectTimeout
+	if ac.dopts.minConnectTimeout != nil {
+		dialDuration = ac.dopts.minConnectTimeout()
+	}
+
+	if dialDuration < backoffFor {
+		// Give dial more time as we keep failing to connect.
+		dialDuration = backoffFor
+	}
+	// We can potentially spend all the time trying the first address, and
+	// if the server accepts the connection and then hangs, the following
+	// addresses will never be tried.
+	//
+	// The spec doesn't mention what should be done for multiple addresses.
+	// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
+	connectDeadline := time.Now().Add(dialDuration)
+
+	ac.updateConnectivityState(connectivity.Connecting, nil)
+	ac.mu.Unlock()
+
+	if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil {
+		ac.cc.resolveNow(resolver.ResolveNowOptions{})
+		ac.mu.Lock()
+		if acCtx.Err() != nil {
+			// addrConn was torn down.
+			ac.mu.Unlock()
+			return
+		}
+		// After exhausting all addresses, the addrConn enters
+		// TRANSIENT_FAILURE.
+		ac.updateConnectivityState(connectivity.TransientFailure, err)
+
+		// Backoff.
+		b := ac.resetBackoff
+		ac.mu.Unlock()
+
+		timer := time.NewTimer(backoffFor)
+		select {
+		case <-timer.C:
+			ac.mu.Lock()
+			ac.backoffIdx++
+			ac.mu.Unlock()
+		case <-b:
+			timer.Stop()
+		case <-acCtx.Done():
+			timer.Stop()
+			return
+		}
+
+		ac.mu.Lock()
+		if acCtx.Err() == nil {
+			ac.updateConnectivityState(connectivity.Idle, err)
+		}
+		ac.mu.Unlock()
+		return
+	}
+	// Success; reset backoff.
+	ac.mu.Lock()
+	ac.backoffIdx = 0
+	ac.mu.Unlock()
+}
+
+// tryAllAddrs tries to creates a connection to the addresses, and stop when at
+// the first successful one. It returns an error if no address was successfully
+// connected, or updates ac appropriately with the new transport.
+func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error {
+	var firstConnErr error
+	for _, addr := range addrs {
+		ac.channelz.ChannelMetrics.Target.Store(&addr.Addr)
+		if ctx.Err() != nil {
+			return errConnClosing
+		}
+		ac.mu.Lock()
+
+		ac.cc.mu.RLock()
+		ac.dopts.copts.KeepaliveParams = ac.cc.mkp
+		ac.cc.mu.RUnlock()
+
+		copts := ac.dopts.copts
+		if ac.scopts.CredsBundle != nil {
+			copts.CredsBundle = ac.scopts.CredsBundle
+		}
+		ac.mu.Unlock()
+
+		channelz.Infof(logger, ac.channelz, "Subchannel picks a new address %q to connect", addr.Addr)
+
+		err := ac.createTransport(ctx, addr, copts, connectDeadline)
+		if err == nil {
+			return nil
+		}
+		if firstConnErr == nil {
+			firstConnErr = err
+		}
+		ac.cc.updateConnectionError(err)
+	}
+
+	// Couldn't connect to any address.
+	return firstConnErr
+}
+
+// createTransport creates a connection to addr. It returns an error if the
+// address was not successfully connected, or updates ac appropriately with the
+// new transport.
+func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error {
+	addr.ServerName = ac.cc.getServerName(addr)
+	hctx, hcancel := context.WithCancel(ctx)
+
+	onClose := func(r transport.GoAwayReason) {
+		ac.mu.Lock()
+		defer ac.mu.Unlock()
+		// adjust params based on GoAwayReason
+		ac.adjustParams(r)
+		if ctx.Err() != nil {
+			// Already shut down or connection attempt canceled.  tearDown() or
+			// updateAddrs() already cleared the transport and canceled hctx
+			// via ac.ctx, and we expected this connection to be closed, so do
+			// nothing here.
+			return
+		}
+		hcancel()
+		if ac.transport == nil {
+			// We're still connecting to this address, which could error.  Do
+			// not update the connectivity state or resolve; these will happen
+			// at the end of the tryAllAddrs connection loop in the event of an
+			// error.
+			return
+		}
+		ac.transport = nil
+		// Refresh the name resolver on any connection loss.
+		ac.cc.resolveNow(resolver.ResolveNowOptions{})
+		// Always go idle and wait for the LB policy to initiate a new
+		// connection attempt.
+		ac.updateConnectivityState(connectivity.Idle, nil)
+	}
+
+	connectCtx, cancel := context.WithDeadline(ctx, connectDeadline)
+	defer cancel()
+	copts.ChannelzParent = ac.channelz
+
+	newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose)
+	if err != nil {
+		if logger.V(2) {
+			logger.Infof("Creating new client transport to %q: %v", addr, err)
+		}
+		// newTr is either nil, or closed.
+		hcancel()
+		channelz.Warningf(logger, ac.channelz, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err)
+		return err
+	}
+
+	ac.mu.Lock()
+	defer ac.mu.Unlock()
+	if ctx.Err() != nil {
+		// This can happen if the subConn was removed while in `Connecting`
+		// state. tearDown() would have set the state to `Shutdown`, but
+		// would not have closed the transport since ac.transport would not
+		// have been set at that point.
+		//
+		// We run this in a goroutine because newTr.Close() calls onClose()
+		// inline, which requires locking ac.mu.
+		//
+		// The error we pass to Close() is immaterial since there are no open
+		// streams at this point, so no trailers with error details will be sent
+		// out. We just need to pass a non-nil error.
+		//
+		// This can also happen when updateAddrs is called during a connection
+		// attempt.
+		go newTr.Close(transport.ErrConnClosing)
+		return nil
+	}
+	if hctx.Err() != nil {
+		// onClose was already called for this connection, but the connection
+		// was successfully established first.  Consider it a success and set
+		// the new state to Idle.
+		ac.updateConnectivityState(connectivity.Idle, nil)
+		return nil
+	}
+	ac.curAddr = addr
+	ac.transport = newTr
+	ac.startHealthCheck(hctx) // Will set state to READY if appropriate.
+	return nil
+}
+
+// startHealthCheck starts the health checking stream (RPC) to watch the health
+// stats of this connection if health checking is requested and configured.
+//
+// LB channel health checking is enabled when all requirements below are met:
+// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption
+// 2. internal.HealthCheckFunc is set by importing the grpc/health package
+// 3. a service config with non-empty healthCheckConfig field is provided
+// 4. the load balancer requests it
+//
+// It sets addrConn to READY if the health checking stream is not started.
+//
+// Caller must hold ac.mu.
+func (ac *addrConn) startHealthCheck(ctx context.Context) {
+	var healthcheckManagingState bool
+	defer func() {
+		if !healthcheckManagingState {
+			ac.updateConnectivityState(connectivity.Ready, nil)
+		}
+	}()
+
+	if ac.cc.dopts.disableHealthCheck {
+		return
+	}
+	healthCheckConfig := ac.cc.healthCheckConfig()
+	if healthCheckConfig == nil {
+		return
+	}
+	if !ac.scopts.HealthCheckEnabled {
+		return
+	}
+	healthCheckFunc := ac.cc.dopts.healthCheckFunc
+	if healthCheckFunc == nil {
+		// The health package is not imported to set health check function.
+		//
+		// TODO: add a link to the health check doc in the error message.
+		channelz.Error(logger, ac.channelz, "Health check is requested but health check function is not set.")
+		return
+	}
+
+	healthcheckManagingState = true
+
+	// Set up the health check helper functions.
+	currentTr := ac.transport
+	newStream := func(method string) (any, error) {
+		ac.mu.Lock()
+		if ac.transport != currentTr {
+			ac.mu.Unlock()
+			return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use")
+		}
+		ac.mu.Unlock()
+		return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac)
+	}
+	setConnectivityState := func(s connectivity.State, lastErr error) {
+		ac.mu.Lock()
+		defer ac.mu.Unlock()
+		if ac.transport != currentTr {
+			return
+		}
+		ac.updateConnectivityState(s, lastErr)
+	}
+	// Start the health checking stream.
+	go func() {
+		err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
+		if err != nil {
+			if status.Code(err) == codes.Unimplemented {
+				channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled")
+			} else {
+				channelz.Errorf(logger, ac.channelz, "Health checking failed: %v", err)
+			}
+		}
+	}()
+}
+
+func (ac *addrConn) resetConnectBackoff() {
+	ac.mu.Lock()
+	close(ac.resetBackoff)
+	ac.backoffIdx = 0
+	ac.resetBackoff = make(chan struct{})
+	ac.mu.Unlock()
+}
+
+// getReadyTransport returns the transport if ac's state is READY or nil if not.
+func (ac *addrConn) getReadyTransport() transport.ClientTransport {
+	ac.mu.Lock()
+	defer ac.mu.Unlock()
+	if ac.state == connectivity.Ready {
+		return ac.transport
+	}
+	return nil
+}
+
+// getTransport waits until the addrconn is ready and returns the transport.
+// If the context expires first, returns an appropriate status.  If the
+// addrConn is stopped first, returns an Unavailable status error.
+func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) {
+	for ctx.Err() == nil {
+		ac.mu.Lock()
+		t, state, sc := ac.transport, ac.state, ac.stateReadyChan
+		ac.mu.Unlock()
+		if state == connectivity.Ready {
+			return t, nil
+		}
+		if state == connectivity.Shutdown {
+			return nil, status.Errorf(codes.Unavailable, "SubConn shutting down")
+		}
+
+		select {
+		case <-ctx.Done():
+		case <-sc:
+		}
+	}
+	return nil, status.FromContextError(ctx.Err()).Err()
+}
+
+// tearDown starts to tear down the addrConn.
+//
+// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct
+// will leak. In most cases, call cc.removeAddrConn() instead.
+func (ac *addrConn) tearDown(err error) {
+	ac.mu.Lock()
+	if ac.state == connectivity.Shutdown {
+		ac.mu.Unlock()
+		return
+	}
+	curTr := ac.transport
+	ac.transport = nil
+	// We have to set the state to Shutdown before anything else to prevent races
+	// between setting the state and logic that waits on context cancellation / etc.
+	ac.updateConnectivityState(connectivity.Shutdown, nil)
+	ac.cancel()
+	ac.curAddr = resolver.Address{}
+
+	channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{
+		Desc:     "Subchannel deleted",
+		Severity: channelz.CtInfo,
+		Parent: &channelz.TraceEvent{
+			Desc:     fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelz.ID),
+			Severity: channelz.CtInfo,
+		},
+	})
+	// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
+	// trace reference to the entity being deleted, and thus prevent it from
+	// being deleted right away.
+	channelz.RemoveEntry(ac.channelz.ID)
+	ac.mu.Unlock()
+
+	// We have to release the lock before the call to GracefulClose/Close here
+	// because both of them call onClose(), which requires locking ac.mu.
+	if curTr != nil {
+		if err == errConnDrain {
+			// Close the transport gracefully when the subConn is being shutdown.
+			//
+			// GracefulClose() may be executed multiple times if:
+			// - multiple GoAway frames are received from the server
+			// - there are concurrent name resolver or balancer triggered
+			//   address removal and GoAway
+			curTr.GracefulClose()
+		} else {
+			// Hard close the transport when the channel is entering idle or is
+			// being shutdown. In the case where the channel is being shutdown,
+			// closing of transports is also taken care of by cancellation of cc.ctx.
+			// But in the case where the channel is entering idle, we need to
+			// explicitly close the transports here. Instead of distinguishing
+			// between these two cases, it is simpler to close the transport
+			// unconditionally here.
+			curTr.Close(err)
+		}
+	}
+}
+
+type retryThrottler struct {
+	max    float64
+	thresh float64
+	ratio  float64
+
+	mu     sync.Mutex
+	tokens float64 // TODO(dfawley): replace with atomic and remove lock.
+}
+
+// throttle subtracts a retry token from the pool and returns whether a retry
+// should be throttled (disallowed) based upon the retry throttling policy in
+// the service config.
+func (rt *retryThrottler) throttle() bool {
+	if rt == nil {
+		return false
+	}
+	rt.mu.Lock()
+	defer rt.mu.Unlock()
+	rt.tokens--
+	if rt.tokens < 0 {
+		rt.tokens = 0
+	}
+	return rt.tokens <= rt.thresh
+}
+
+func (rt *retryThrottler) successfulRPC() {
+	if rt == nil {
+		return
+	}
+	rt.mu.Lock()
+	defer rt.mu.Unlock()
+	rt.tokens += rt.ratio
+	if rt.tokens > rt.max {
+		rt.tokens = rt.max
+	}
+}
+
+func (ac *addrConn) incrCallsStarted() {
+	ac.channelz.ChannelMetrics.CallsStarted.Add(1)
+	ac.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano())
+}
+
+func (ac *addrConn) incrCallsSucceeded() {
+	ac.channelz.ChannelMetrics.CallsSucceeded.Add(1)
+}
+
+func (ac *addrConn) incrCallsFailed() {
+	ac.channelz.ChannelMetrics.CallsFailed.Add(1)
+}
+
+// ErrClientConnTimeout indicates that the ClientConn cannot establish the
+// underlying connections within the specified timeout.
+//
+// Deprecated: This error is never returned by grpc and should not be
+// referenced by users.
+var ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
+
+// getResolver finds the scheme in the cc's resolvers or the global registry.
+// scheme should always be lowercase (typically by virtue of url.Parse()
+// performing proper RFC3986 behavior).
+func (cc *ClientConn) getResolver(scheme string) resolver.Builder {
+	for _, rb := range cc.dopts.resolvers {
+		if scheme == rb.Scheme() {
+			return rb
+		}
+	}
+	return resolver.Get(scheme)
+}
+
+func (cc *ClientConn) updateConnectionError(err error) {
+	cc.lceMu.Lock()
+	cc.lastConnectionError = err
+	cc.lceMu.Unlock()
+}
+
+func (cc *ClientConn) connectionError() error {
+	cc.lceMu.Lock()
+	defer cc.lceMu.Unlock()
+	return cc.lastConnectionError
+}
+
+// initParsedTargetAndResolverBuilder parses the user's dial target and stores
+// the parsed target in `cc.parsedTarget`.
+//
+// The resolver to use is determined based on the scheme in the parsed target
+// and the same is stored in `cc.resolverBuilder`.
+//
+// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
+func (cc *ClientConn) initParsedTargetAndResolverBuilder() error {
+	logger.Infof("original dial target is: %q", cc.target)
+
+	var rb resolver.Builder
+	parsedTarget, err := parseTarget(cc.target)
+	if err == nil {
+		rb = cc.getResolver(parsedTarget.URL.Scheme)
+		if rb != nil {
+			cc.parsedTarget = parsedTarget
+			cc.resolverBuilder = rb
+			return nil
+		}
+	}
+
+	// We are here because the user's dial target did not contain a scheme or
+	// specified an unregistered scheme. We should fallback to the default
+	// scheme, except when a custom dialer is specified in which case, we should
+	// always use passthrough scheme. For either case, we need to respect any overridden
+	// global defaults set by the user.
+	defScheme := cc.dopts.defaultScheme
+	if internal.UserSetDefaultScheme {
+		defScheme = resolver.GetDefaultScheme()
+	}
+
+	canonicalTarget := defScheme + ":///" + cc.target
+
+	parsedTarget, err = parseTarget(canonicalTarget)
+	if err != nil {
+		return err
+	}
+	rb = cc.getResolver(parsedTarget.URL.Scheme)
+	if rb == nil {
+		return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme)
+	}
+	cc.parsedTarget = parsedTarget
+	cc.resolverBuilder = rb
+	return nil
+}
+
+// parseTarget uses RFC 3986 semantics to parse the given target into a
+// resolver.Target struct containing url. Query params are stripped from the
+// endpoint.
+func parseTarget(target string) (resolver.Target, error) {
+	u, err := url.Parse(target)
+	if err != nil {
+		return resolver.Target{}, err
+	}
+
+	return resolver.Target{URL: *u}, nil
+}
+
+// encodeAuthority escapes the authority string based on valid chars defined in
+// https://datatracker.ietf.org/doc/html/rfc3986#section-3.2.
+func encodeAuthority(authority string) string {
+	const upperhex = "0123456789ABCDEF"
+
+	// Return for characters that must be escaped as per
+	// Valid chars are mentioned here:
+	// https://datatracker.ietf.org/doc/html/rfc3986#section-3.2
+	shouldEscape := func(c byte) bool {
+		// Alphanum are always allowed.
+		if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
+			return false
+		}
+		switch c {
+		case '-', '_', '.', '~': // Unreserved characters
+			return false
+		case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters
+			return false
+		case ':', '[', ']', '@': // Authority related delimiters
+			return false
+		}
+		// Everything else must be escaped.
+		return true
+	}
+
+	hexCount := 0
+	for i := 0; i < len(authority); i++ {
+		c := authority[i]
+		if shouldEscape(c) {
+			hexCount++
+		}
+	}
+
+	if hexCount == 0 {
+		return authority
+	}
+
+	required := len(authority) + 2*hexCount
+	t := make([]byte, required)
+
+	j := 0
+	// This logic is a barebones version of escape in the go net/url library.
+	for i := 0; i < len(authority); i++ {
+		switch c := authority[i]; {
+		case shouldEscape(c):
+			t[j] = '%'
+			t[j+1] = upperhex[c>>4]
+			t[j+2] = upperhex[c&15]
+			j += 3
+		default:
+			t[j] = authority[i]
+			j++
+		}
+	}
+	return string(t)
+}
+
+// Determine channel authority. The order of precedence is as follows:
+// - user specified authority override using `WithAuthority` dial option
+// - creds' notion of server name for the authentication handshake
+// - endpoint from dial target of the form "scheme://[authority]/endpoint"
+//
+// Stores the determined authority in `cc.authority`.
+//
+// Returns a non-nil error if the authority returned by the transport
+// credentials do not match the authority configured through the dial option.
+//
+// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
+func (cc *ClientConn) initAuthority() error {
+	dopts := cc.dopts
+	// Historically, we had two options for users to specify the serverName or
+	// authority for a channel. One was through the transport credentials
+	// (either in its constructor, or through the OverrideServerName() method).
+	// The other option (for cases where WithInsecure() dial option was used)
+	// was to use the WithAuthority() dial option.
+	//
+	// A few things have changed since:
+	// - `insecure` package with an implementation of the `TransportCredentials`
+	//   interface for the insecure case
+	// - WithAuthority() dial option support for secure credentials
+	authorityFromCreds := ""
+	if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" {
+		authorityFromCreds = creds.Info().ServerName
+	}
+	authorityFromDialOption := dopts.authority
+	if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption {
+		return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption)
+	}
+
+	endpoint := cc.parsedTarget.Endpoint()
+	if authorityFromDialOption != "" {
+		cc.authority = authorityFromDialOption
+	} else if authorityFromCreds != "" {
+		cc.authority = authorityFromCreds
+	} else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok {
+		cc.authority = auth.OverrideAuthority(cc.parsedTarget)
+	} else if strings.HasPrefix(endpoint, ":") {
+		cc.authority = "localhost" + endpoint
+	} else {
+		cc.authority = encodeAuthority(endpoint)
+	}
+	return nil
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/codec.go b/hack/tools/vendor/google.golang.org/grpc/codec.go
new file mode 100644
index 0000000000..e840858b77
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/codec.go
@@ -0,0 +1,105 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"google.golang.org/grpc/encoding"
+	_ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto"
+	"google.golang.org/grpc/mem"
+)
+
+// baseCodec captures the new encoding.CodecV2 interface without the Name
+// function, allowing it to be implemented by older Codec and encoding.Codec
+// implementations. The omitted Name function is only needed for the register in
+// the encoding package and is not part of the core functionality.
+type baseCodec interface {
+	Marshal(v any) (mem.BufferSlice, error)
+	Unmarshal(data mem.BufferSlice, v any) error
+}
+
+// getCodec returns an encoding.CodecV2 for the codec of the given name (if
+// registered). Initially checks the V2 registry with encoding.GetCodecV2 and
+// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry
+// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge
+// to turn it into an encoding.CodecV2. Returns nil otherwise.
+func getCodec(name string) encoding.CodecV2 {
+	if codecV1 := encoding.GetCodec(name); codecV1 != nil {
+		return newCodecV1Bridge(codecV1)
+	}
+
+	return encoding.GetCodecV2(name)
+}
+
+func newCodecV0Bridge(c Codec) baseCodec {
+	return codecV0Bridge{codec: c}
+}
+
+func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 {
+	return codecV1Bridge{
+		codecV0Bridge: codecV0Bridge{codec: c},
+		name:          c.Name(),
+	}
+}
+
+var _ baseCodec = codecV0Bridge{}
+
+type codecV0Bridge struct {
+	codec interface {
+		Marshal(v any) ([]byte, error)
+		Unmarshal(data []byte, v any) error
+	}
+}
+
+func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) {
+	data, err := c.codec.Marshal(v)
+	if err != nil {
+		return nil, err
+	}
+	return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil
+}
+
+func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) {
+	return c.codec.Unmarshal(data.Materialize(), v)
+}
+
+var _ encoding.CodecV2 = codecV1Bridge{}
+
+type codecV1Bridge struct {
+	codecV0Bridge
+	name string
+}
+
+func (c codecV1Bridge) Name() string {
+	return c.name
+}
+
+// Codec defines the interface gRPC uses to encode and decode messages.
+// Note that implementations of this interface must be thread safe;
+// a Codec's methods can be called from concurrent goroutines.
+//
+// Deprecated: use encoding.Codec instead.
+type Codec interface {
+	// Marshal returns the wire format of v.
+	Marshal(v any) ([]byte, error)
+	// Unmarshal parses the wire format into v.
+	Unmarshal(data []byte, v any) error
+	// String returns the name of the Codec implementation.  This is unused by
+	// gRPC.
+	String() string
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/codes/code_string.go b/hack/tools/vendor/google.golang.org/grpc/codes/code_string.go
new file mode 100644
index 0000000000..934fac2b09
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/codes/code_string.go
@@ -0,0 +1,111 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package codes
+
+import (
+	"strconv"
+
+	"google.golang.org/grpc/internal"
+)
+
+func init() {
+	internal.CanonicalString = canonicalString
+}
+
+func (c Code) String() string {
+	switch c {
+	case OK:
+		return "OK"
+	case Canceled:
+		return "Canceled"
+	case Unknown:
+		return "Unknown"
+	case InvalidArgument:
+		return "InvalidArgument"
+	case DeadlineExceeded:
+		return "DeadlineExceeded"
+	case NotFound:
+		return "NotFound"
+	case AlreadyExists:
+		return "AlreadyExists"
+	case PermissionDenied:
+		return "PermissionDenied"
+	case ResourceExhausted:
+		return "ResourceExhausted"
+	case FailedPrecondition:
+		return "FailedPrecondition"
+	case Aborted:
+		return "Aborted"
+	case OutOfRange:
+		return "OutOfRange"
+	case Unimplemented:
+		return "Unimplemented"
+	case Internal:
+		return "Internal"
+	case Unavailable:
+		return "Unavailable"
+	case DataLoss:
+		return "DataLoss"
+	case Unauthenticated:
+		return "Unauthenticated"
+	default:
+		return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
+	}
+}
+
+func canonicalString(c Code) string {
+	switch c {
+	case OK:
+		return "OK"
+	case Canceled:
+		return "CANCELLED"
+	case Unknown:
+		return "UNKNOWN"
+	case InvalidArgument:
+		return "INVALID_ARGUMENT"
+	case DeadlineExceeded:
+		return "DEADLINE_EXCEEDED"
+	case NotFound:
+		return "NOT_FOUND"
+	case AlreadyExists:
+		return "ALREADY_EXISTS"
+	case PermissionDenied:
+		return "PERMISSION_DENIED"
+	case ResourceExhausted:
+		return "RESOURCE_EXHAUSTED"
+	case FailedPrecondition:
+		return "FAILED_PRECONDITION"
+	case Aborted:
+		return "ABORTED"
+	case OutOfRange:
+		return "OUT_OF_RANGE"
+	case Unimplemented:
+		return "UNIMPLEMENTED"
+	case Internal:
+		return "INTERNAL"
+	case Unavailable:
+		return "UNAVAILABLE"
+	case DataLoss:
+		return "DATA_LOSS"
+	case Unauthenticated:
+		return "UNAUTHENTICATED"
+	default:
+		return "CODE(" + strconv.FormatInt(int64(c), 10) + ")"
+	}
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/codes/codes.go b/hack/tools/vendor/google.golang.org/grpc/codes/codes.go
new file mode 100644
index 0000000000..0b42c302b2
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/codes/codes.go
@@ -0,0 +1,250 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package codes defines the canonical error codes used by gRPC. It is
+// consistent across various languages.
+package codes // import "google.golang.org/grpc/codes"
+
+import (
+	"fmt"
+	"strconv"
+)
+
+// A Code is a status code defined according to the [gRPC documentation].
+//
+// Only the codes defined as consts in this package are valid codes. Do not use
+// other code values.  Behavior of other codes is implementation-specific and
+// interoperability between implementations is not guaranteed.
+//
+// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
+type Code uint32
+
+const (
+	// OK is returned on success.
+	OK Code = 0
+
+	// Canceled indicates the operation was canceled (typically by the caller).
+	//
+	// The gRPC framework will generate this error code when cancellation
+	// is requested.
+	Canceled Code = 1
+
+	// Unknown error. An example of where this error may be returned is
+	// if a Status value received from another address space belongs to
+	// an error-space that is not known in this address space. Also
+	// errors raised by APIs that do not return enough error information
+	// may be converted to this error.
+	//
+	// The gRPC framework will generate this error code in the above two
+	// mentioned cases.
+	Unknown Code = 2
+
+	// InvalidArgument indicates client specified an invalid argument.
+	// Note that this differs from FailedPrecondition. It indicates arguments
+	// that are problematic regardless of the state of the system
+	// (e.g., a malformed file name).
+	//
+	// This error code will not be generated by the gRPC framework.
+	InvalidArgument Code = 3
+
+	// DeadlineExceeded means operation expired before completion.
+	// For operations that change the state of the system, this error may be
+	// returned even if the operation has completed successfully. For
+	// example, a successful response from a server could have been delayed
+	// long enough for the deadline to expire.
+	//
+	// The gRPC framework will generate this error code when the deadline is
+	// exceeded.
+	DeadlineExceeded Code = 4
+
+	// NotFound means some requested entity (e.g., file or directory) was
+	// not found.
+	//
+	// This error code will not be generated by the gRPC framework.
+	NotFound Code = 5
+
+	// AlreadyExists means an attempt to create an entity failed because one
+	// already exists.
+	//
+	// This error code will not be generated by the gRPC framework.
+	AlreadyExists Code = 6
+
+	// PermissionDenied indicates the caller does not have permission to
+	// execute the specified operation. It must not be used for rejections
+	// caused by exhausting some resource (use ResourceExhausted
+	// instead for those errors). It must not be
+	// used if the caller cannot be identified (use Unauthenticated
+	// instead for those errors).
+	//
+	// This error code will not be generated by the gRPC core framework,
+	// but expect authentication middleware to use it.
+	PermissionDenied Code = 7
+
+	// ResourceExhausted indicates some resource has been exhausted, perhaps
+	// a per-user quota, or perhaps the entire file system is out of space.
+	//
+	// This error code will be generated by the gRPC framework in
+	// out-of-memory and server overload situations, or when a message is
+	// larger than the configured maximum size.
+	ResourceExhausted Code = 8
+
+	// FailedPrecondition indicates operation was rejected because the
+	// system is not in a state required for the operation's execution.
+	// For example, directory to be deleted may be non-empty, an rmdir
+	// operation is applied to a non-directory, etc.
+	//
+	// A litmus test that may help a service implementor in deciding
+	// between FailedPrecondition, Aborted, and Unavailable:
+	//  (a) Use Unavailable if the client can retry just the failing call.
+	//  (b) Use Aborted if the client should retry at a higher-level
+	//      (e.g., restarting a read-modify-write sequence).
+	//  (c) Use FailedPrecondition if the client should not retry until
+	//      the system state has been explicitly fixed. E.g., if an "rmdir"
+	//      fails because the directory is non-empty, FailedPrecondition
+	//      should be returned since the client should not retry unless
+	//      they have first fixed up the directory by deleting files from it.
+	//  (d) Use FailedPrecondition if the client performs conditional
+	//      REST Get/Update/Delete on a resource and the resource on the
+	//      server does not match the condition. E.g., conflicting
+	//      read-modify-write on the same resource.
+	//
+	// This error code will not be generated by the gRPC framework.
+	FailedPrecondition Code = 9
+
+	// Aborted indicates the operation was aborted, typically due to a
+	// concurrency issue like sequencer check failures, transaction aborts,
+	// etc.
+	//
+	// See litmus test above for deciding between FailedPrecondition,
+	// Aborted, and Unavailable.
+	//
+	// This error code will not be generated by the gRPC framework.
+	Aborted Code = 10
+
+	// OutOfRange means operation was attempted past the valid range.
+	// E.g., seeking or reading past end of file.
+	//
+	// Unlike InvalidArgument, this error indicates a problem that may
+	// be fixed if the system state changes. For example, a 32-bit file
+	// system will generate InvalidArgument if asked to read at an
+	// offset that is not in the range [0,2^32-1], but it will generate
+	// OutOfRange if asked to read from an offset past the current
+	// file size.
+	//
+	// There is a fair bit of overlap between FailedPrecondition and
+	// OutOfRange. We recommend using OutOfRange (the more specific
+	// error) when it applies so that callers who are iterating through
+	// a space can easily look for an OutOfRange error to detect when
+	// they are done.
+	//
+	// This error code will not be generated by the gRPC framework.
+	OutOfRange Code = 11
+
+	// Unimplemented indicates operation is not implemented or not
+	// supported/enabled in this service.
+	//
+	// This error code will be generated by the gRPC framework. Most
+	// commonly, you will see this error code when a method implementation
+	// is missing on the server. It can also be generated for unknown
+	// compression algorithms or a disagreement as to whether an RPC should
+	// be streaming.
+	Unimplemented Code = 12
+
+	// Internal errors. Means some invariants expected by underlying
+	// system has been broken. If you see one of these errors,
+	// something is very broken.
+	//
+	// This error code will be generated by the gRPC framework in several
+	// internal error conditions.
+	Internal Code = 13
+
+	// Unavailable indicates the service is currently unavailable.
+	// This is a most likely a transient condition and may be corrected
+	// by retrying with a backoff. Note that it is not always safe to retry
+	// non-idempotent operations.
+	//
+	// See litmus test above for deciding between FailedPrecondition,
+	// Aborted, and Unavailable.
+	//
+	// This error code will be generated by the gRPC framework during
+	// abrupt shutdown of a server process or network connection.
+	Unavailable Code = 14
+
+	// DataLoss indicates unrecoverable data loss or corruption.
+	//
+	// This error code will not be generated by the gRPC framework.
+	DataLoss Code = 15
+
+	// Unauthenticated indicates the request does not have valid
+	// authentication credentials for the operation.
+	//
+	// The gRPC framework will generate this error code when the
+	// authentication metadata is invalid or a Credentials callback fails,
+	// but also expect authentication middleware to generate it.
+	Unauthenticated Code = 16
+
+	_maxCode = 17
+)
+
+var strToCode = map[string]Code{
+	`"OK"`: OK,
+	`"CANCELLED"`:/* [sic] */ Canceled,
+	`"UNKNOWN"`:             Unknown,
+	`"INVALID_ARGUMENT"`:    InvalidArgument,
+	`"DEADLINE_EXCEEDED"`:   DeadlineExceeded,
+	`"NOT_FOUND"`:           NotFound,
+	`"ALREADY_EXISTS"`:      AlreadyExists,
+	`"PERMISSION_DENIED"`:   PermissionDenied,
+	`"RESOURCE_EXHAUSTED"`:  ResourceExhausted,
+	`"FAILED_PRECONDITION"`: FailedPrecondition,
+	`"ABORTED"`:             Aborted,
+	`"OUT_OF_RANGE"`:        OutOfRange,
+	`"UNIMPLEMENTED"`:       Unimplemented,
+	`"INTERNAL"`:            Internal,
+	`"UNAVAILABLE"`:         Unavailable,
+	`"DATA_LOSS"`:           DataLoss,
+	`"UNAUTHENTICATED"`:     Unauthenticated,
+}
+
+// UnmarshalJSON unmarshals b into the Code.
+func (c *Code) UnmarshalJSON(b []byte) error {
+	// From json.Unmarshaler: By convention, to approximate the behavior of
+	// Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
+	// a no-op.
+	if string(b) == "null" {
+		return nil
+	}
+	if c == nil {
+		return fmt.Errorf("nil receiver passed to UnmarshalJSON")
+	}
+
+	if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
+		if ci >= _maxCode {
+			return fmt.Errorf("invalid code: %d", ci)
+		}
+
+		*c = Code(ci)
+		return nil
+	}
+
+	if jc, ok := strToCode[string(b)]; ok {
+		*c = jc
+		return nil
+	}
+	return fmt.Errorf("invalid code: %q", string(b))
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/connectivity/connectivity.go b/hack/tools/vendor/google.golang.org/grpc/connectivity/connectivity.go
new file mode 100644
index 0000000000..4a89926422
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/connectivity/connectivity.go
@@ -0,0 +1,94 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package connectivity defines connectivity semantics.
+// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md.
+package connectivity
+
+import (
+	"google.golang.org/grpc/grpclog"
+)
+
+var logger = grpclog.Component("core")
+
+// State indicates the state of connectivity.
+// It can be the state of a ClientConn or SubConn.
+type State int
+
+func (s State) String() string {
+	switch s {
+	case Idle:
+		return "IDLE"
+	case Connecting:
+		return "CONNECTING"
+	case Ready:
+		return "READY"
+	case TransientFailure:
+		return "TRANSIENT_FAILURE"
+	case Shutdown:
+		return "SHUTDOWN"
+	default:
+		logger.Errorf("unknown connectivity state: %d", s)
+		return "INVALID_STATE"
+	}
+}
+
+const (
+	// Idle indicates the ClientConn is idle.
+	Idle State = iota
+	// Connecting indicates the ClientConn is connecting.
+	Connecting
+	// Ready indicates the ClientConn is ready for work.
+	Ready
+	// TransientFailure indicates the ClientConn has seen a failure but expects to recover.
+	TransientFailure
+	// Shutdown indicates the ClientConn has started shutting down.
+	Shutdown
+)
+
+// ServingMode indicates the current mode of operation of the server.
+//
+// Only xDS enabled gRPC servers currently report their serving mode.
+type ServingMode int
+
+const (
+	// ServingModeStarting indicates that the server is starting up.
+	ServingModeStarting ServingMode = iota
+	// ServingModeServing indicates that the server contains all required
+	// configuration and is serving RPCs.
+	ServingModeServing
+	// ServingModeNotServing indicates that the server is not accepting new
+	// connections. Existing connections will be closed gracefully, allowing
+	// in-progress RPCs to complete. A server enters this mode when it does not
+	// contain the required configuration to serve RPCs.
+	ServingModeNotServing
+)
+
+func (s ServingMode) String() string {
+	switch s {
+	case ServingModeStarting:
+		return "STARTING"
+	case ServingModeServing:
+		return "SERVING"
+	case ServingModeNotServing:
+		return "NOT_SERVING"
+	default:
+		logger.Errorf("unknown serving mode: %d", s)
+		return "INVALID_MODE"
+	}
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/credentials/credentials.go b/hack/tools/vendor/google.golang.org/grpc/credentials/credentials.go
new file mode 100644
index 0000000000..665e790bb0
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/credentials/credentials.go
@@ -0,0 +1,291 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package credentials implements various credentials supported by gRPC library,
+// which encapsulate all the state needed by a client to authenticate with a
+// server and make various assertions, e.g., about the client's identity, role,
+// or whether it is authorized to make a particular call.
+package credentials // import "google.golang.org/grpc/credentials"
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net"
+
+	"google.golang.org/grpc/attributes"
+	icredentials "google.golang.org/grpc/internal/credentials"
+	"google.golang.org/protobuf/proto"
+)
+
+// PerRPCCredentials defines the common interface for the credentials which need to
+// attach security information to every RPC (e.g., oauth2).
+type PerRPCCredentials interface {
+	// GetRequestMetadata gets the current request metadata, refreshing tokens
+	// if required. This should be called by the transport layer on each
+	// request, and the data should be populated in headers or other
+	// context. If a status code is returned, it will be used as the status for
+	// the RPC (restricted to an allowable set of codes as defined by gRFC
+	// A54). uri is the URI of the entry point for the request.  When supported
+	// by the underlying implementation, ctx can be used for timeout and
+	// cancellation. Additionally, RequestInfo data will be available via ctx
+	// to this call.  TODO(zhaoq): Define the set of the qualified keys instead
+	// of leaving it as an arbitrary string.
+	GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
+	// RequireTransportSecurity indicates whether the credentials requires
+	// transport security.
+	RequireTransportSecurity() bool
+}
+
+// SecurityLevel defines the protection level on an established connection.
+//
+// This API is experimental.
+type SecurityLevel int
+
+const (
+	// InvalidSecurityLevel indicates an invalid security level.
+	// The zero SecurityLevel value is invalid for backward compatibility.
+	InvalidSecurityLevel SecurityLevel = iota
+	// NoSecurity indicates a connection is insecure.
+	NoSecurity
+	// IntegrityOnly indicates a connection only provides integrity protection.
+	IntegrityOnly
+	// PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection.
+	PrivacyAndIntegrity
+)
+
+// String returns SecurityLevel in a string format.
+func (s SecurityLevel) String() string {
+	switch s {
+	case NoSecurity:
+		return "NoSecurity"
+	case IntegrityOnly:
+		return "IntegrityOnly"
+	case PrivacyAndIntegrity:
+		return "PrivacyAndIntegrity"
+	}
+	return fmt.Sprintf("invalid SecurityLevel: %v", int(s))
+}
+
+// CommonAuthInfo contains authenticated information common to AuthInfo implementations.
+// It should be embedded in a struct implementing AuthInfo to provide additional information
+// about the credentials.
+//
+// This API is experimental.
+type CommonAuthInfo struct {
+	SecurityLevel SecurityLevel
+}
+
+// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct.
+func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo {
+	return c
+}
+
+// ProtocolInfo provides information regarding the gRPC wire protocol version,
+// security protocol, security protocol version in use, server name, etc.
+type ProtocolInfo struct {
+	// ProtocolVersion is the gRPC wire protocol version.
+	ProtocolVersion string
+	// SecurityProtocol is the security protocol in use.
+	SecurityProtocol string
+	// SecurityVersion is the security protocol version.  It is a static version string from the
+	// credentials, not a value that reflects per-connection protocol negotiation.  To retrieve
+	// details about the credentials used for a connection, use the Peer's AuthInfo field instead.
+	//
+	// Deprecated: please use Peer.AuthInfo.
+	SecurityVersion string
+	// ServerName is the user-configured server name.
+	ServerName string
+}
+
+// AuthInfo defines the common interface for the auth information the users are interested in.
+// A struct that implements AuthInfo should embed CommonAuthInfo by including additional
+// information about the credentials in it.
+type AuthInfo interface {
+	AuthType() string
+}
+
+// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
+// and the caller should not close rawConn.
+var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
+
+// TransportCredentials defines the common interface for all the live gRPC wire
+// protocols and supported transport security protocols (e.g., TLS, SSL).
+type TransportCredentials interface {
+	// ClientHandshake does the authentication handshake specified by the
+	// corresponding authentication protocol on rawConn for clients. It returns
+	// the authenticated connection and the corresponding auth information
+	// about the connection.  The auth information should embed CommonAuthInfo
+	// to return additional information about the credentials. Implementations
+	// must use the provided context to implement timely cancellation.  gRPC
+	// will try to reconnect if the error returned is a temporary error
+	// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).  If the
+	// returned error is a wrapper error, implementations should make sure that
+	// the error implements Temporary() to have the correct retry behaviors.
+	// Additionally, ClientHandshakeInfo data will be available via the context
+	// passed to this call.
+	//
+	// The second argument to this method is the `:authority` header value used
+	// while creating new streams on this connection after authentication
+	// succeeds. Implementations must use this as the server name during the
+	// authentication handshake.
+	//
+	// If the returned net.Conn is closed, it MUST close the net.Conn provided.
+	ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
+	// ServerHandshake does the authentication handshake for servers. It returns
+	// the authenticated connection and the corresponding auth information about
+	// the connection. The auth information should embed CommonAuthInfo to return additional information
+	// about the credentials.
+	//
+	// If the returned net.Conn is closed, it MUST close the net.Conn provided.
+	ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
+	// Info provides the ProtocolInfo of this TransportCredentials.
+	Info() ProtocolInfo
+	// Clone makes a copy of this TransportCredentials.
+	Clone() TransportCredentials
+	// OverrideServerName specifies the value used for the following:
+	// - verifying the hostname on the returned certificates
+	// - as SNI in the client's handshake to support virtual hosting
+	// - as the value for `:authority` header at stream creation time
+	//
+	// Deprecated: use grpc.WithAuthority instead. Will be supported
+	// throughout 1.x.
+	OverrideServerName(string) error
+}
+
+// Bundle is a combination of TransportCredentials and PerRPCCredentials.
+//
+// It also contains a mode switching method, so it can be used as a combination
+// of different credential policies.
+//
+// Bundle cannot be used together with individual TransportCredentials.
+// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials.
+//
+// This API is experimental.
+type Bundle interface {
+	// TransportCredentials returns the transport credentials from the Bundle.
+	//
+	// Implementations must return non-nil transport credentials. If transport
+	// security is not needed by the Bundle, implementations may choose to
+	// return insecure.NewCredentials().
+	TransportCredentials() TransportCredentials
+
+	// PerRPCCredentials returns the per-RPC credentials from the Bundle.
+	//
+	// May be nil if per-RPC credentials are not needed.
+	PerRPCCredentials() PerRPCCredentials
+
+	// NewWithMode should make a copy of Bundle, and switch mode. Modifying the
+	// existing Bundle may cause races.
+	//
+	// NewWithMode returns nil if the requested mode is not supported.
+	NewWithMode(mode string) (Bundle, error)
+}
+
+// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls.
+//
+// This API is experimental.
+type RequestInfo struct {
+	// The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method")
+	Method string
+	// AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake)
+	AuthInfo AuthInfo
+}
+
+// RequestInfoFromContext extracts the RequestInfo from the context if it exists.
+//
+// This API is experimental.
+func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
+	ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo)
+	return ri, ok
+}
+
+// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes
+// it possible to pass arbitrary data to the handshaker from gRPC, resolver,
+// balancer etc. Individual credential implementations control the actual
+// format of the data that they are willing to receive.
+//
+// This API is experimental.
+type ClientHandshakeInfo struct {
+	// Attributes contains the attributes for the address. It could be provided
+	// by the gRPC, resolver, balancer etc.
+	Attributes *attributes.Attributes
+}
+
+// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored
+// in ctx.
+//
+// This API is experimental.
+func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo {
+	chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo)
+	return chi
+}
+
+// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
+// It returns success if 1) the condition is satisfied or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
+// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
+//
+// This API is experimental.
+func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error {
+	type internalInfo interface {
+		GetCommonAuthInfo() CommonAuthInfo
+	}
+	if ai == nil {
+		return errors.New("AuthInfo is nil")
+	}
+	if ci, ok := ai.(internalInfo); ok {
+		// CommonAuthInfo.SecurityLevel has an invalid value.
+		if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel {
+			return nil
+		}
+		if ci.GetCommonAuthInfo().SecurityLevel < level {
+			return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel)
+		}
+	}
+	// The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method.
+	return nil
+}
+
+// ChannelzSecurityInfo defines the interface that security protocols should implement
+// in order to provide security info to channelz.
+//
+// This API is experimental.
+type ChannelzSecurityInfo interface {
+	GetSecurityValue() ChannelzSecurityValue
+}
+
+// ChannelzSecurityValue defines the interface that GetSecurityValue() return value
+// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue
+// and *OtherChannelzSecurityValue.
+//
+// This API is experimental.
+type ChannelzSecurityValue interface {
+	isChannelzSecurityValue()
+}
+
+// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
+// from GetSecurityValue(), which contains protocol specific security info. Note
+// the Value field will be sent to users of channelz requesting channel info, and
+// thus sensitive info should better be avoided.
+//
+// This API is experimental.
+type OtherChannelzSecurityValue struct {
+	ChannelzSecurityValue
+	Name  string
+	Value proto.Message
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/hack/tools/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
new file mode 100644
index 0000000000..4c805c6446
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
@@ -0,0 +1,98 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package insecure provides an implementation of the
+// credentials.TransportCredentials interface which disables transport security.
+package insecure
+
+import (
+	"context"
+	"net"
+
+	"google.golang.org/grpc/credentials"
+)
+
+// NewCredentials returns a credentials which disables transport security.
+//
+// Note that using this credentials with per-RPC credentials which require
+// transport security is incompatible and will cause grpc.Dial() to fail.
+func NewCredentials() credentials.TransportCredentials {
+	return insecureTC{}
+}
+
+// insecureTC implements the insecure transport credentials. The handshake
+// methods simply return the passed in net.Conn and set the security level to
+// NoSecurity.
+type insecureTC struct{}
+
+func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+	return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
+}
+
+func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+	return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
+}
+
+func (insecureTC) Info() credentials.ProtocolInfo {
+	return credentials.ProtocolInfo{SecurityProtocol: "insecure"}
+}
+
+func (insecureTC) Clone() credentials.TransportCredentials {
+	return insecureTC{}
+}
+
+func (insecureTC) OverrideServerName(string) error {
+	return nil
+}
+
+// info contains the auth information for an insecure connection.
+// It implements the AuthInfo interface.
+type info struct {
+	credentials.CommonAuthInfo
+}
+
+// AuthType returns the type of info as a string.
+func (info) AuthType() string {
+	return "insecure"
+}
+
+// insecureBundle implements an insecure bundle.
+// An insecure bundle provides a thin wrapper around insecureTC to support
+// the credentials.Bundle interface.
+type insecureBundle struct{}
+
+// NewBundle returns a bundle with disabled transport security and no per rpc credential.
+func NewBundle() credentials.Bundle {
+	return insecureBundle{}
+}
+
+// NewWithMode returns a new insecure Bundle. The mode is ignored.
+func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) {
+	return insecureBundle{}, nil
+}
+
+// PerRPCCredentials returns an nil implementation as insecure
+// bundle does not support a per rpc credential.
+func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials {
+	return nil
+}
+
+// TransportCredentials returns the underlying insecure transport credential.
+func (insecureBundle) TransportCredentials() credentials.TransportCredentials {
+	return NewCredentials()
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/credentials/tls.go b/hack/tools/vendor/google.golang.org/grpc/credentials/tls.go
new file mode 100644
index 0000000000..4114358545
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/credentials/tls.go
@@ -0,0 +1,283 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package credentials
+
+import (
+	"context"
+	"crypto/tls"
+	"crypto/x509"
+	"fmt"
+	"net"
+	"net/url"
+	"os"
+
+	"google.golang.org/grpc/grpclog"
+	credinternal "google.golang.org/grpc/internal/credentials"
+	"google.golang.org/grpc/internal/envconfig"
+)
+
+var logger = grpclog.Component("credentials")
+
+// TLSInfo contains the auth information for a TLS authenticated connection.
+// It implements the AuthInfo interface.
+type TLSInfo struct {
+	State tls.ConnectionState
+	CommonAuthInfo
+	// This API is experimental.
+	SPIFFEID *url.URL
+}
+
+// AuthType returns the type of TLSInfo as a string.
+func (t TLSInfo) AuthType() string {
+	return "tls"
+}
+
+// cipherSuiteLookup returns the string version of a TLS cipher suite ID.
+func cipherSuiteLookup(cipherSuiteID uint16) string {
+	for _, s := range tls.CipherSuites() {
+		if s.ID == cipherSuiteID {
+			return s.Name
+		}
+	}
+	for _, s := range tls.InsecureCipherSuites() {
+		if s.ID == cipherSuiteID {
+			return s.Name
+		}
+	}
+	return fmt.Sprintf("unknown ID: %v", cipherSuiteID)
+}
+
+// GetSecurityValue returns security info requested by channelz.
+func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
+	v := &TLSChannelzSecurityValue{
+		StandardName: cipherSuiteLookup(t.State.CipherSuite),
+	}
+	// Currently there's no way to get LocalCertificate info from tls package.
+	if len(t.State.PeerCertificates) > 0 {
+		v.RemoteCertificate = t.State.PeerCertificates[0].Raw
+	}
+	return v
+}
+
+// tlsCreds is the credentials required for authenticating a connection using TLS.
+type tlsCreds struct {
+	// TLS configuration
+	config *tls.Config
+}
+
+func (c tlsCreds) Info() ProtocolInfo {
+	return ProtocolInfo{
+		SecurityProtocol: "tls",
+		SecurityVersion:  "1.2",
+		ServerName:       c.config.ServerName,
+	}
+}
+
+func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
+	// use local cfg to avoid clobbering ServerName if using multiple endpoints
+	cfg := credinternal.CloneTLSConfig(c.config)
+	if cfg.ServerName == "" {
+		serverName, _, err := net.SplitHostPort(authority)
+		if err != nil {
+			// If the authority had no host port or if the authority cannot be parsed, use it as-is.
+			serverName = authority
+		}
+		cfg.ServerName = serverName
+	}
+	conn := tls.Client(rawConn, cfg)
+	errChannel := make(chan error, 1)
+	go func() {
+		errChannel <- conn.Handshake()
+		close(errChannel)
+	}()
+	select {
+	case err := <-errChannel:
+		if err != nil {
+			conn.Close()
+			return nil, nil, err
+		}
+	case <-ctx.Done():
+		conn.Close()
+		return nil, nil, ctx.Err()
+	}
+
+	// The negotiated protocol can be either of the following:
+	// 1. h2: When the server supports ALPN. Only HTTP/2 can be negotiated since
+	//    it is the only protocol advertised by the client during the handshake.
+	//    The tls library ensures that the server chooses a protocol advertised
+	//    by the client.
+	// 2. "" (empty string): If the server doesn't support ALPN. ALPN is a requirement
+	//    for using HTTP/2 over TLS. We can terminate the connection immediately.
+	np := conn.ConnectionState().NegotiatedProtocol
+	if np == "" {
+		if envconfig.EnforceALPNEnabled {
+			conn.Close()
+			return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property")
+		}
+		logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName)
+	}
+	tlsInfo := TLSInfo{
+		State: conn.ConnectionState(),
+		CommonAuthInfo: CommonAuthInfo{
+			SecurityLevel: PrivacyAndIntegrity,
+		},
+	}
+	id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
+	if id != nil {
+		tlsInfo.SPIFFEID = id
+	}
+	return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
+}
+
+func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
+	conn := tls.Server(rawConn, c.config)
+	if err := conn.Handshake(); err != nil {
+		conn.Close()
+		return nil, nil, err
+	}
+	cs := conn.ConnectionState()
+	// The negotiated application protocol can be empty only if the client doesn't
+	// support ALPN. In such cases, we can close the connection since ALPN is required
+	// for using HTTP/2 over TLS.
+	if cs.NegotiatedProtocol == "" {
+		if envconfig.EnforceALPNEnabled {
+			conn.Close()
+			return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property")
+		} else if logger.V(2) {
+			logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases")
+		}
+	}
+	tlsInfo := TLSInfo{
+		State: cs,
+		CommonAuthInfo: CommonAuthInfo{
+			SecurityLevel: PrivacyAndIntegrity,
+		},
+	}
+	id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
+	if id != nil {
+		tlsInfo.SPIFFEID = id
+	}
+	return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
+}
+
+func (c *tlsCreds) Clone() TransportCredentials {
+	return NewTLS(c.config)
+}
+
+func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
+	c.config.ServerName = serverNameOverride
+	return nil
+}
+
+// The following cipher suites are forbidden for use with HTTP/2 by
+// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A
+var tls12ForbiddenCipherSuites = map[uint16]struct{}{
+	tls.TLS_RSA_WITH_AES_128_CBC_SHA:         {},
+	tls.TLS_RSA_WITH_AES_256_CBC_SHA:         {},
+	tls.TLS_RSA_WITH_AES_128_GCM_SHA256:      {},
+	tls.TLS_RSA_WITH_AES_256_GCM_SHA384:      {},
+	tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {},
+	tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {},
+	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA:   {},
+	tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:   {},
+}
+
+// NewTLS uses c to construct a TransportCredentials based on TLS.
+func NewTLS(c *tls.Config) TransportCredentials {
+	tc := &tlsCreds{credinternal.CloneTLSConfig(c)}
+	tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos)
+	// If the user did not configure a MinVersion and did not configure a
+	// MaxVersion < 1.2, use MinVersion=1.2, which is required by
+	// https://datatracker.ietf.org/doc/html/rfc7540#section-9.2
+	if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) {
+		tc.config.MinVersion = tls.VersionTLS12
+	}
+	// If the user did not configure CipherSuites, use all "secure" cipher
+	// suites reported by the TLS package, but remove some explicitly forbidden
+	// by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A
+	if tc.config.CipherSuites == nil {
+		for _, cs := range tls.CipherSuites() {
+			if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok {
+				tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID)
+			}
+		}
+	}
+	return tc
+}
+
+// NewClientTLSFromCert constructs TLS credentials from the provided root
+// certificate authority certificate(s) to validate server connections. If
+// certificates to establish the identity of the client need to be included in
+// the credentials (eg: for mTLS), use NewTLS instead, where a complete
+// tls.Config can be specified.
+// serverNameOverride is for testing only. If set to a non empty string,
+// it will override the virtual host name of authority (e.g. :authority header
+// field) in requests.
+func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
+	return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
+}
+
+// NewClientTLSFromFile constructs TLS credentials from the provided root
+// certificate authority certificate file(s) to validate server connections. If
+// certificates to establish the identity of the client need to be included in
+// the credentials (eg: for mTLS), use NewTLS instead, where a complete
+// tls.Config can be specified.
+// serverNameOverride is for testing only. If set to a non empty string,
+// it will override the virtual host name of authority (e.g. :authority header
+// field) in requests.
+func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
+	b, err := os.ReadFile(certFile)
+	if err != nil {
+		return nil, err
+	}
+	cp := x509.NewCertPool()
+	if !cp.AppendCertsFromPEM(b) {
+		return nil, fmt.Errorf("credentials: failed to append certificates")
+	}
+	return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
+}
+
+// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
+func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
+	return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
+}
+
+// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
+// file for server.
+func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
+	cert, err := tls.LoadX509KeyPair(certFile, keyFile)
+	if err != nil {
+		return nil, err
+	}
+	return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
+}
+
+// TLSChannelzSecurityValue defines the struct that TLS protocol should return
+// from GetSecurityValue(), containing security info like cipher and certificate used.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type TLSChannelzSecurityValue struct {
+	ChannelzSecurityValue
+	StandardName      string
+	LocalCertificate  []byte
+	RemoteCertificate []byte
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/dialoptions.go b/hack/tools/vendor/google.golang.org/grpc/dialoptions.go
new file mode 100644
index 0000000000..2b285beee3
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/dialoptions.go
@@ -0,0 +1,767 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+	"net"
+	"net/url"
+	"time"
+
+	"google.golang.org/grpc/backoff"
+	"google.golang.org/grpc/channelz"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/credentials/insecure"
+	"google.golang.org/grpc/internal"
+	internalbackoff "google.golang.org/grpc/internal/backoff"
+	"google.golang.org/grpc/internal/binarylog"
+	"google.golang.org/grpc/internal/transport"
+	"google.golang.org/grpc/keepalive"
+	"google.golang.org/grpc/mem"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/stats"
+)
+
+const (
+	// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#limits-on-retries-and-hedges
+	defaultMaxCallAttempts = 5
+)
+
+func init() {
+	internal.AddGlobalDialOptions = func(opt ...DialOption) {
+		globalDialOptions = append(globalDialOptions, opt...)
+	}
+	internal.ClearGlobalDialOptions = func() {
+		globalDialOptions = nil
+	}
+	internal.AddGlobalPerTargetDialOptions = func(opt any) {
+		if ptdo, ok := opt.(perTargetDialOption); ok {
+			globalPerTargetDialOptions = append(globalPerTargetDialOptions, ptdo)
+		}
+	}
+	internal.ClearGlobalPerTargetDialOptions = func() {
+		globalPerTargetDialOptions = nil
+	}
+	internal.WithBinaryLogger = withBinaryLogger
+	internal.JoinDialOptions = newJoinDialOption
+	internal.DisableGlobalDialOptions = newDisableGlobalDialOptions
+	internal.WithBufferPool = withBufferPool
+}
+
+// dialOptions configure a Dial call. dialOptions are set by the DialOption
+// values passed to Dial.
+type dialOptions struct {
+	unaryInt  UnaryClientInterceptor
+	streamInt StreamClientInterceptor
+
+	chainUnaryInts  []UnaryClientInterceptor
+	chainStreamInts []StreamClientInterceptor
+
+	cp                          Compressor
+	dc                          Decompressor
+	bs                          internalbackoff.Strategy
+	block                       bool
+	returnLastError             bool
+	timeout                     time.Duration
+	authority                   string
+	binaryLogger                binarylog.Logger
+	copts                       transport.ConnectOptions
+	callOptions                 []CallOption
+	channelzParent              channelz.Identifier
+	disableServiceConfig        bool
+	disableRetry                bool
+	disableHealthCheck          bool
+	healthCheckFunc             internal.HealthChecker
+	minConnectTimeout           func() time.Duration
+	defaultServiceConfig        *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
+	defaultServiceConfigRawJSON *string
+	resolvers                   []resolver.Builder
+	idleTimeout                 time.Duration
+	defaultScheme               string
+	maxCallAttempts             int
+}
+
+// DialOption configures how we set up the connection.
+type DialOption interface {
+	apply(*dialOptions)
+}
+
+var globalDialOptions []DialOption
+
+// perTargetDialOption takes a parsed target and returns a dial option to apply.
+//
+// This gets called after NewClient() parses the target, and allows per target
+// configuration set through a returned DialOption. The DialOption will not take
+// effect if specifies a resolver builder, as that Dial Option is factored in
+// while parsing target.
+type perTargetDialOption interface {
+	// DialOption returns a Dial Option to apply.
+	DialOptionForTarget(parsedTarget url.URL) DialOption
+}
+
+var globalPerTargetDialOptions []perTargetDialOption
+
+// EmptyDialOption does not alter the dial configuration. It can be embedded in
+// another structure to build custom dial options.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type EmptyDialOption struct{}
+
+func (EmptyDialOption) apply(*dialOptions) {}
+
+type disableGlobalDialOptions struct{}
+
+func (disableGlobalDialOptions) apply(*dialOptions) {}
+
+// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn
+// from applying the global DialOptions (set via AddGlobalDialOptions).
+func newDisableGlobalDialOptions() DialOption {
+	return &disableGlobalDialOptions{}
+}
+
+// funcDialOption wraps a function that modifies dialOptions into an
+// implementation of the DialOption interface.
+type funcDialOption struct {
+	f func(*dialOptions)
+}
+
+func (fdo *funcDialOption) apply(do *dialOptions) {
+	fdo.f(do)
+}
+
+func newFuncDialOption(f func(*dialOptions)) *funcDialOption {
+	return &funcDialOption{
+		f: f,
+	}
+}
+
+type joinDialOption struct {
+	opts []DialOption
+}
+
+func (jdo *joinDialOption) apply(do *dialOptions) {
+	for _, opt := range jdo.opts {
+		opt.apply(do)
+	}
+}
+
+func newJoinDialOption(opts ...DialOption) DialOption {
+	return &joinDialOption{opts: opts}
+}
+
+// WithSharedWriteBuffer allows reusing per-connection transport write buffer.
+// If this option is set to true every connection will release the buffer after
+// flushing the data on the wire.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithSharedWriteBuffer(val bool) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.SharedWriteBuffer = val
+	})
+}
+
+// WithWriteBufferSize determines how much data can be batched before doing a
+// write on the wire. The default value for this buffer is 32KB.
+//
+// Zero or negative values will disable the write buffer such that each write
+// will be on underlying connection. Note: A Send call may not directly
+// translate to a write.
+func WithWriteBufferSize(s int) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.WriteBufferSize = s
+	})
+}
+
+// WithReadBufferSize lets you set the size of read buffer, this determines how
+// much data can be read at most for each read syscall.
+//
+// The default value for this buffer is 32KB. Zero or negative values will
+// disable read buffer for a connection so data framer can access the
+// underlying conn directly.
+func WithReadBufferSize(s int) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.ReadBufferSize = s
+	})
+}
+
+// WithInitialWindowSize returns a DialOption which sets the value for initial
+// window size on a stream. The lower bound for window size is 64K and any value
+// smaller than that will be ignored.
+func WithInitialWindowSize(s int32) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.InitialWindowSize = s
+	})
+}
+
+// WithInitialConnWindowSize returns a DialOption which sets the value for
+// initial window size on a connection. The lower bound for window size is 64K
+// and any value smaller than that will be ignored.
+func WithInitialConnWindowSize(s int32) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.InitialConnWindowSize = s
+	})
+}
+
+// WithMaxMsgSize returns a DialOption which sets the maximum message size the
+// client can receive.
+//
+// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.  Will
+// be supported throughout 1.x.
+func WithMaxMsgSize(s int) DialOption {
+	return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
+}
+
+// WithDefaultCallOptions returns a DialOption which sets the default
+// CallOptions for calls over the connection.
+func WithDefaultCallOptions(cos ...CallOption) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.callOptions = append(o.callOptions, cos...)
+	})
+}
+
+// WithCodec returns a DialOption which sets a codec for message marshaling and
+// unmarshaling.
+//
+// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead.  Will be
+// supported throughout 1.x.
+func WithCodec(c Codec) DialOption {
+	return WithDefaultCallOptions(CallCustomCodec(c))
+}
+
+// WithCompressor returns a DialOption which sets a Compressor to use for
+// message compression. It has lower priority than the compressor set by the
+// UseCompressor CallOption.
+//
+// Deprecated: use UseCompressor instead.  Will be supported throughout 1.x.
+func WithCompressor(cp Compressor) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.cp = cp
+	})
+}
+
+// WithDecompressor returns a DialOption which sets a Decompressor to use for
+// incoming message decompression.  If incoming response messages are encoded
+// using the decompressor's Type(), it will be used.  Otherwise, the message
+// encoding will be used to look up the compressor registered via
+// encoding.RegisterCompressor, which will then be used to decompress the
+// message.  If no compressor is registered for the encoding, an Unimplemented
+// status error will be returned.
+//
+// Deprecated: use encoding.RegisterCompressor instead.  Will be supported
+// throughout 1.x.
+func WithDecompressor(dc Decompressor) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.dc = dc
+	})
+}
+
+// WithConnectParams configures the ClientConn to use the provided ConnectParams
+// for creating and maintaining connections to servers.
+//
+// The backoff configuration specified as part of the ConnectParams overrides
+// all defaults specified in
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider
+// using the backoff.DefaultConfig as a base, in cases where you want to
+// override only a subset of the backoff configuration.
+func WithConnectParams(p ConnectParams) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.bs = internalbackoff.Exponential{Config: p.Backoff}
+		o.minConnectTimeout = func() time.Duration {
+			return p.MinConnectTimeout
+		}
+	})
+}
+
+// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
+// when backing off after failed connection attempts.
+//
+// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x.
+func WithBackoffMaxDelay(md time.Duration) DialOption {
+	return WithBackoffConfig(BackoffConfig{MaxDelay: md})
+}
+
+// WithBackoffConfig configures the dialer to use the provided backoff
+// parameters after connection failures.
+//
+// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x.
+func WithBackoffConfig(b BackoffConfig) DialOption {
+	bc := backoff.DefaultConfig
+	bc.MaxDelay = b.MaxDelay
+	return withBackoff(internalbackoff.Exponential{Config: bc})
+}
+
+// withBackoff sets the backoff strategy used for connectRetryNum after a failed
+// connection attempt.
+//
+// This can be exported if arbitrary backoff strategies are allowed by gRPC.
+func withBackoff(bs internalbackoff.Strategy) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.bs = bs
+	})
+}
+
+// WithBlock returns a DialOption which makes callers of Dial block until the
+// underlying connection is up. Without this, Dial returns immediately and
+// connecting the server happens in background.
+//
+// Use of this feature is not recommended.  For more information, please see:
+// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
+//
+// Deprecated: this DialOption is not supported by NewClient.
+// Will be supported throughout 1.x.
+func WithBlock() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.block = true
+	})
+}
+
+// WithReturnConnectionError returns a DialOption which makes the client connection
+// return a string containing both the last connection error that occurred and
+// the context.DeadlineExceeded error.
+// Implies WithBlock()
+//
+// Use of this feature is not recommended.  For more information, please see:
+// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
+//
+// Deprecated: this DialOption is not supported by NewClient.
+// Will be supported throughout 1.x.
+func WithReturnConnectionError() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.block = true
+		o.returnLastError = true
+	})
+}
+
+// WithInsecure returns a DialOption which disables transport security for this
+// ClientConn. Under the hood, it uses insecure.NewCredentials().
+//
+// Note that using this DialOption with per-RPC credentials (through
+// WithCredentialsBundle or WithPerRPCCredentials) which require transport
+// security is incompatible and will cause grpc.Dial() to fail.
+//
+// Deprecated: use WithTransportCredentials and insecure.NewCredentials()
+// instead. Will be supported throughout 1.x.
+func WithInsecure() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.TransportCredentials = insecure.NewCredentials()
+	})
+}
+
+// WithNoProxy returns a DialOption which disables the use of proxies for this
+// ClientConn. This is ignored if WithDialer or WithContextDialer are used.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithNoProxy() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.UseProxy = false
+	})
+}
+
+// WithTransportCredentials returns a DialOption which configures a connection
+// level security credentials (e.g., TLS/SSL). This should not be used together
+// with WithCredentialsBundle.
+func WithTransportCredentials(creds credentials.TransportCredentials) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.TransportCredentials = creds
+	})
+}
+
+// WithPerRPCCredentials returns a DialOption which sets credentials and places
+// auth state on each outbound RPC.
+func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
+	})
+}
+
+// WithCredentialsBundle returns a DialOption to set a credentials bundle for
+// the ClientConn.WithCreds. This should not be used together with
+// WithTransportCredentials.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithCredentialsBundle(b credentials.Bundle) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.CredsBundle = b
+	})
+}
+
+// WithTimeout returns a DialOption that configures a timeout for dialing a
+// ClientConn initially. This is valid if and only if WithBlock() is present.
+//
+// Deprecated: this DialOption is not supported by NewClient.
+// Will be supported throughout 1.x.
+func WithTimeout(d time.Duration) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.timeout = d
+	})
+}
+
+// WithContextDialer returns a DialOption that sets a dialer to create
+// connections. If FailOnNonTempDialError() is set to true, and an error is
+// returned by f, gRPC checks the error's Temporary() method to decide if it
+// should try to reconnect to the network address.
+//
+// Note: All supported releases of Go (as of December 2023) override the OS
+// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
+// with OS defaults for keepalive time and interval, use a net.Dialer that sets
+// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket
+// option to true from the Control field. For a concrete example of how to do
+// this, see internal.NetDialerWithTCPKeepalive().
+//
+// For more information, please see [issue 23459] in the Go github repo.
+//
+// [issue 23459]: https://github.com/golang/go/issues/23459
+func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.Dialer = f
+	})
+}
+
+func init() {
+	internal.WithHealthCheckFunc = withHealthCheckFunc
+}
+
+// WithDialer returns a DialOption that specifies a function to use for dialing
+// network addresses. If FailOnNonTempDialError() is set to true, and an error
+// is returned by f, gRPC checks the error's Temporary() method to decide if it
+// should try to reconnect to the network address.
+//
+// Deprecated: use WithContextDialer instead.  Will be supported throughout
+// 1.x.
+func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
+	return WithContextDialer(
+		func(ctx context.Context, addr string) (net.Conn, error) {
+			if deadline, ok := ctx.Deadline(); ok {
+				return f(addr, time.Until(deadline))
+			}
+			return f(addr, 0)
+		})
+}
+
+// WithStatsHandler returns a DialOption that specifies the stats handler for
+// all the RPCs and underlying network connections in this ClientConn.
+func WithStatsHandler(h stats.Handler) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		if h == nil {
+			logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption")
+			// Do not allow a nil stats handler, which would otherwise cause
+			// panics.
+			return
+		}
+		o.copts.StatsHandlers = append(o.copts.StatsHandlers, h)
+	})
+}
+
+// withBinaryLogger returns a DialOption that specifies the binary logger for
+// this ClientConn.
+func withBinaryLogger(bl binarylog.Logger) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.binaryLogger = bl
+	})
+}
+
+// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on
+// non-temporary dial errors. If f is true, and dialer returns a non-temporary
+// error, gRPC will fail the connection to the network address and won't try to
+// reconnect. The default value of FailOnNonTempDialError is false.
+//
+// FailOnNonTempDialError only affects the initial dial, and does not do
+// anything useful unless you are also using WithBlock().
+//
+// Use of this feature is not recommended.  For more information, please see:
+// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
+//
+// Deprecated: this DialOption is not supported by NewClient.
+// This API may be changed or removed in a
+// later release.
+func FailOnNonTempDialError(f bool) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.FailOnNonTempDialError = f
+	})
+}
+
+// WithUserAgent returns a DialOption that specifies a user agent string for all
+// the RPCs.
+func WithUserAgent(s string) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.UserAgent = s + " " + grpcUA
+	})
+}
+
+// WithKeepaliveParams returns a DialOption that specifies keepalive parameters
+// for the client transport.
+//
+// Keepalive is disabled by default.
+func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
+	if kp.Time < internal.KeepaliveMinPingTime {
+		logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
+		kp.Time = internal.KeepaliveMinPingTime
+	}
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.KeepaliveParams = kp
+	})
+}
+
+// WithUnaryInterceptor returns a DialOption that specifies the interceptor for
+// unary RPCs.
+func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.unaryInt = f
+	})
+}
+
+// WithChainUnaryInterceptor returns a DialOption that specifies the chained
+// interceptor for unary RPCs. The first interceptor will be the outer most,
+// while the last interceptor will be the inner most wrapper around the real call.
+// All interceptors added by this method will be chained, and the interceptor
+// defined by WithUnaryInterceptor will always be prepended to the chain.
+func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.chainUnaryInts = append(o.chainUnaryInts, interceptors...)
+	})
+}
+
+// WithStreamInterceptor returns a DialOption that specifies the interceptor for
+// streaming RPCs.
+func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.streamInt = f
+	})
+}
+
+// WithChainStreamInterceptor returns a DialOption that specifies the chained
+// interceptor for streaming RPCs. The first interceptor will be the outer most,
+// while the last interceptor will be the inner most wrapper around the real call.
+// All interceptors added by this method will be chained, and the interceptor
+// defined by WithStreamInterceptor will always be prepended to the chain.
+func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.chainStreamInts = append(o.chainStreamInts, interceptors...)
+	})
+}
+
+// WithAuthority returns a DialOption that specifies the value to be used as the
+// :authority pseudo-header and as the server name in authentication handshake.
+func WithAuthority(a string) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.authority = a
+	})
+}
+
+// WithChannelzParentID returns a DialOption that specifies the channelz ID of
+// current ClientConn's parent. This function is used in nested channel creation
+// (e.g. grpclb dial).
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithChannelzParentID(c channelz.Identifier) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.channelzParent = c
+	})
+}
+
+// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any
+// service config provided by the resolver and provides a hint to the resolver
+// to not fetch service configs.
+//
+// Note that this dial option only disables service config from resolver. If
+// default service config is provided, gRPC will use the default service config.
+func WithDisableServiceConfig() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.disableServiceConfig = true
+	})
+}
+
+// WithDefaultServiceConfig returns a DialOption that configures the default
+// service config, which will be used in cases where:
+//
+// 1. WithDisableServiceConfig is also used, or
+//
+// 2. The name resolver does not provide a service config or provides an
+// invalid service config.
+//
+// The parameter s is the JSON representation of the default service config.
+// For more information about service configs, see:
+// https://github.com/grpc/grpc/blob/master/doc/service_config.md
+// For a simple example of usage, see:
+// examples/features/load_balancing/client/main.go
+func WithDefaultServiceConfig(s string) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.defaultServiceConfigRawJSON = &s
+	})
+}
+
+// WithDisableRetry returns a DialOption that disables retries, even if the
+// service config enables them.  This does not impact transparent retries, which
+// will happen automatically if no data is written to the wire or if the RPC is
+// unprocessed by the remote server.
+func WithDisableRetry() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.disableRetry = true
+	})
+}
+
+// MaxHeaderListSizeDialOption is a DialOption that specifies the maximum
+// (uncompressed) size of header list that the client is prepared to accept.
+type MaxHeaderListSizeDialOption struct {
+	MaxHeaderListSize uint32
+}
+
+func (o MaxHeaderListSizeDialOption) apply(do *dialOptions) {
+	do.copts.MaxHeaderListSize = &o.MaxHeaderListSize
+}
+
+// WithMaxHeaderListSize returns a DialOption that specifies the maximum
+// (uncompressed) size of header list that the client is prepared to accept.
+func WithMaxHeaderListSize(s uint32) DialOption {
+	return MaxHeaderListSizeDialOption{
+		MaxHeaderListSize: s,
+	}
+}
+
+// WithDisableHealthCheck disables the LB channel health checking for all
+// SubConns of this ClientConn.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithDisableHealthCheck() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.disableHealthCheck = true
+	})
+}
+
+// withHealthCheckFunc replaces the default health check function with the
+// provided one. It makes tests easier to change the health check function.
+//
+// For testing purpose only.
+func withHealthCheckFunc(f internal.HealthChecker) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.healthCheckFunc = f
+	})
+}
+
+func defaultDialOptions() dialOptions {
+	return dialOptions{
+		copts: transport.ConnectOptions{
+			ReadBufferSize:  defaultReadBufSize,
+			WriteBufferSize: defaultWriteBufSize,
+			UseProxy:        true,
+			UserAgent:       grpcUA,
+			BufferPool:      mem.DefaultBufferPool(),
+		},
+		bs:              internalbackoff.DefaultExponential,
+		healthCheckFunc: internal.HealthCheckFunc,
+		idleTimeout:     30 * time.Minute,
+		defaultScheme:   "dns",
+		maxCallAttempts: defaultMaxCallAttempts,
+	}
+}
+
+// withMinConnectDeadline specifies the function that clientconn uses to
+// get minConnectDeadline. This can be used to make connection attempts happen
+// faster/slower.
+//
+// For testing purpose only.
+func withMinConnectDeadline(f func() time.Duration) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.minConnectTimeout = f
+	})
+}
+
+// withDefaultScheme is used to allow Dial to use "passthrough" as the default
+// name resolver, while NewClient uses "dns" otherwise.
+func withDefaultScheme(s string) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.defaultScheme = s
+	})
+}
+
+// WithResolvers allows a list of resolver implementations to be registered
+// locally with the ClientConn without needing to be globally registered via
+// resolver.Register.  They will be matched against the scheme used for the
+// current Dial only, and will take precedence over the global registry.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithResolvers(rs ...resolver.Builder) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.resolvers = append(o.resolvers, rs...)
+	})
+}
+
+// WithIdleTimeout returns a DialOption that configures an idle timeout for the
+// channel. If the channel is idle for the configured timeout, i.e there are no
+// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode
+// and as a result the name resolver and load balancer will be shut down. The
+// channel will exit idle mode when the Connect() method is called or when an
+// RPC is initiated.
+//
+// A default timeout of 30 minutes will be used if this dial option is not set
+// at dial time and idleness can be disabled by passing a timeout of zero.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithIdleTimeout(d time.Duration) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.idleTimeout = d
+	})
+}
+
+// WithMaxCallAttempts returns a DialOption that configures the maximum number
+// of attempts per call (including retries and hedging) using the channel.
+// Service owners may specify a higher value for these parameters, but higher
+// values will be treated as equal to the maximum value by the client
+// implementation. This mitigates security concerns related to the service
+// config being transferred to the client via DNS.
+//
+// A value of 5 will be used if this dial option is not set or n < 2.
+func WithMaxCallAttempts(n int) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		if n < 2 {
+			n = defaultMaxCallAttempts
+		}
+		o.maxCallAttempts = n
+	})
+}
+
+func withBufferPool(bufferPool mem.BufferPool) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.BufferPool = bufferPool
+	})
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/doc.go b/hack/tools/vendor/google.golang.org/grpc/doc.go
new file mode 100644
index 0000000000..e7b532b6f8
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/doc.go
@@ -0,0 +1,26 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+//go:generate ./scripts/regenerate.sh
+
+/*
+Package grpc implements an RPC system called gRPC.
+
+See grpc.io for more information about gRPC.
+*/
+package grpc // import "google.golang.org/grpc"
diff --git a/hack/tools/vendor/google.golang.org/grpc/encoding/encoding.go b/hack/tools/vendor/google.golang.org/grpc/encoding/encoding.go
new file mode 100644
index 0000000000..11d0ae142c
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -0,0 +1,131 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package encoding defines the interface for the compressor and codec, and
+// functions to register and retrieve compressors and codecs.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package encoding
+
+import (
+	"io"
+	"strings"
+
+	"google.golang.org/grpc/internal/grpcutil"
+)
+
+// Identity specifies the optional encoding for uncompressed streams.
+// It is intended for grpc internal use only.
+const Identity = "identity"
+
+// Compressor is used for compressing and decompressing when sending or
+// receiving messages.
+//
+// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`,
+// gRPC will invoke it to determine the size of the buffer allocated for the
+// result of decompression.  A return value of -1 indicates unknown size.
+type Compressor interface {
+	// Compress writes the data written to wc to w after compressing it.  If an
+	// error occurs while initializing the compressor, that error is returned
+	// instead.
+	Compress(w io.Writer) (io.WriteCloser, error)
+	// Decompress reads data from r, decompresses it, and provides the
+	// uncompressed data via the returned io.Reader.  If an error occurs while
+	// initializing the decompressor, that error is returned instead.
+	Decompress(r io.Reader) (io.Reader, error)
+	// Name is the name of the compression codec and is used to set the content
+	// coding header.  The result must be static; the result cannot change
+	// between calls.
+	Name() string
+}
+
+var registeredCompressor = make(map[string]Compressor)
+
+// RegisterCompressor registers the compressor with gRPC by its name.  It can
+// be activated when sending an RPC via grpc.UseCompressor().  It will be
+// automatically accessed when receiving a message based on the content coding
+// header.  Servers also use it to send a response with the same encoding as
+// the request.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe.  If multiple Compressors are
+// registered with the same name, the one registered last will take effect.
+func RegisterCompressor(c Compressor) {
+	registeredCompressor[c.Name()] = c
+	if !grpcutil.IsCompressorNameRegistered(c.Name()) {
+		grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name())
+	}
+}
+
+// GetCompressor returns Compressor for the given compressor name.
+func GetCompressor(name string) Compressor {
+	return registeredCompressor[name]
+}
+
+// Codec defines the interface gRPC uses to encode and decode messages.  Note
+// that implementations of this interface must be thread safe; a Codec's
+// methods can be called from concurrent goroutines.
+type Codec interface {
+	// Marshal returns the wire format of v.
+	Marshal(v any) ([]byte, error)
+	// Unmarshal parses the wire format into v.
+	Unmarshal(data []byte, v any) error
+	// Name returns the name of the Codec implementation. The returned string
+	// will be used as part of content type in transmission.  The result must be
+	// static; the result cannot change between calls.
+	Name() string
+}
+
+var registeredCodecs = make(map[string]any)
+
+// RegisterCodec registers the provided Codec for use with all gRPC clients and
+// servers.
+//
+// The Codec will be stored and looked up by result of its Name() method, which
+// should match the content-subtype of the encoding handled by the Codec.  This
+// is case-insensitive, and is stored and looked up as lowercase.  If the
+// result of calling Name() is an empty string, RegisterCodec will panic. See
+// Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe.  If multiple Codecs are
+// registered with the same name, the one registered last will take effect.
+func RegisterCodec(codec Codec) {
+	if codec == nil {
+		panic("cannot register a nil Codec")
+	}
+	if codec.Name() == "" {
+		panic("cannot register Codec with empty string result for Name()")
+	}
+	contentSubtype := strings.ToLower(codec.Name())
+	registeredCodecs[contentSubtype] = codec
+}
+
+// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is
+// registered for the content-subtype.
+//
+// The content-subtype is expected to be lowercase.
+func GetCodec(contentSubtype string) Codec {
+	c, _ := registeredCodecs[contentSubtype].(Codec)
+	return c
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/hack/tools/vendor/google.golang.org/grpc/encoding/encoding_v2.go
new file mode 100644
index 0000000000..074c5e234a
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/encoding/encoding_v2.go
@@ -0,0 +1,81 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package encoding
+
+import (
+	"strings"
+
+	"google.golang.org/grpc/mem"
+)
+
+// CodecV2 defines the interface gRPC uses to encode and decode messages. Note
+// that implementations of this interface must be thread safe; a CodecV2's
+// methods can be called from concurrent goroutines.
+type CodecV2 interface {
+	// Marshal returns the wire format of v. The buffers in the returned
+	// [mem.BufferSlice] must have at least one reference each, which will be freed
+	// by gRPC when they are no longer needed.
+	Marshal(v any) (out mem.BufferSlice, err error)
+	// Unmarshal parses the wire format into v. Note that data will be freed as soon
+	// as this function returns. If the codec wishes to guarantee access to the data
+	// after this function, it must take its own reference that it frees when it is
+	// no longer needed.
+	Unmarshal(data mem.BufferSlice, v any) error
+	// Name returns the name of the Codec implementation. The returned string
+	// will be used as part of content type in transmission.  The result must be
+	// static; the result cannot change between calls.
+	Name() string
+}
+
+// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and
+// servers.
+//
+// The CodecV2 will be stored and looked up by result of its Name() method, which
+// should match the content-subtype of the encoding handled by the CodecV2.  This
+// is case-insensitive, and is stored and looked up as lowercase.  If the
+// result of calling Name() is an empty string, RegisterCodecV2 will panic. See
+// Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// If both a Codec and CodecV2 are registered with the same name, the CodecV2
+// will be used.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe.  If multiple Codecs are
+// registered with the same name, the one registered last will take effect.
+func RegisterCodecV2(codec CodecV2) {
+	if codec == nil {
+		panic("cannot register a nil CodecV2")
+	}
+	if codec.Name() == "" {
+		panic("cannot register CodecV2 with empty string result for Name()")
+	}
+	contentSubtype := strings.ToLower(codec.Name())
+	registeredCodecs[contentSubtype] = codec
+}
+
+// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is
+// registered for the content-subtype.
+//
+// The content-subtype is expected to be lowercase.
+func GetCodecV2(contentSubtype string) CodecV2 {
+	c, _ := registeredCodecs[contentSubtype].(CodecV2)
+	return c
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/encoding/gzip/gzip.go b/hack/tools/vendor/google.golang.org/grpc/encoding/gzip/gzip.go
new file mode 100644
index 0000000000..6306e8bb0f
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/encoding/gzip/gzip.go
@@ -0,0 +1,132 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package gzip implements and registers the gzip compressor
+// during the initialization.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package gzip
+
+import (
+	"compress/gzip"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"sync"
+
+	"google.golang.org/grpc/encoding"
+)
+
+// Name is the name registered for the gzip compressor.
+const Name = "gzip"
+
+func init() {
+	c := &compressor{}
+	c.poolCompressor.New = func() any {
+		return &writer{Writer: gzip.NewWriter(io.Discard), pool: &c.poolCompressor}
+	}
+	encoding.RegisterCompressor(c)
+}
+
+type writer struct {
+	*gzip.Writer
+	pool *sync.Pool
+}
+
+// SetLevel updates the registered gzip compressor to use the compression level specified (gzip.HuffmanOnly is not supported).
+// NOTE: this function must only be called during initialization time (i.e. in an init() function),
+// and is not thread-safe.
+//
+// The error returned will be nil if the specified level is valid.
+func SetLevel(level int) error {
+	if level < gzip.DefaultCompression || level > gzip.BestCompression {
+		return fmt.Errorf("grpc: invalid gzip compression level: %d", level)
+	}
+	c := encoding.GetCompressor(Name).(*compressor)
+	c.poolCompressor.New = func() any {
+		w, err := gzip.NewWriterLevel(io.Discard, level)
+		if err != nil {
+			panic(err)
+		}
+		return &writer{Writer: w, pool: &c.poolCompressor}
+	}
+	return nil
+}
+
+func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) {
+	z := c.poolCompressor.Get().(*writer)
+	z.Writer.Reset(w)
+	return z, nil
+}
+
+func (z *writer) Close() error {
+	defer z.pool.Put(z)
+	return z.Writer.Close()
+}
+
+type reader struct {
+	*gzip.Reader
+	pool *sync.Pool
+}
+
+func (c *compressor) Decompress(r io.Reader) (io.Reader, error) {
+	z, inPool := c.poolDecompressor.Get().(*reader)
+	if !inPool {
+		newZ, err := gzip.NewReader(r)
+		if err != nil {
+			return nil, err
+		}
+		return &reader{Reader: newZ, pool: &c.poolDecompressor}, nil
+	}
+	if err := z.Reset(r); err != nil {
+		c.poolDecompressor.Put(z)
+		return nil, err
+	}
+	return z, nil
+}
+
+func (z *reader) Read(p []byte) (n int, err error) {
+	n, err = z.Reader.Read(p)
+	if err == io.EOF {
+		z.pool.Put(z)
+	}
+	return n, err
+}
+
+// RFC1952 specifies that the last four bytes "contains the size of
+// the original (uncompressed) input data modulo 2^32."
+// gRPC has a max message size of 2GB so we don't need to worry about wraparound.
+func (c *compressor) DecompressedSize(buf []byte) int {
+	last := len(buf)
+	if last < 4 {
+		return -1
+	}
+	return int(binary.LittleEndian.Uint32(buf[last-4 : last]))
+}
+
+func (c *compressor) Name() string {
+	return Name
+}
+
+type compressor struct {
+	poolCompressor   sync.Pool
+	poolDecompressor sync.Pool
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/encoding/proto/proto.go b/hack/tools/vendor/google.golang.org/grpc/encoding/proto/proto.go
new file mode 100644
index 0000000000..ceec319dd2
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/encoding/proto/proto.go
@@ -0,0 +1,96 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package proto defines the protobuf codec. Importing this package will
+// register the codec.
+package proto
+
+import (
+	"fmt"
+
+	"google.golang.org/grpc/encoding"
+	"google.golang.org/grpc/mem"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/protoadapt"
+)
+
+// Name is the name registered for the proto compressor.
+const Name = "proto"
+
+func init() {
+	encoding.RegisterCodecV2(&codecV2{})
+}
+
+// codec is a CodecV2 implementation with protobuf. It is the default codec for
+// gRPC.
+type codecV2 struct{}
+
+func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) {
+	vv := messageV2Of(v)
+	if vv == nil {
+		return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v)
+	}
+
+	size := proto.Size(vv)
+	if mem.IsBelowBufferPoolingThreshold(size) {
+		buf, err := proto.Marshal(vv)
+		if err != nil {
+			return nil, err
+		}
+		data = append(data, mem.SliceBuffer(buf))
+	} else {
+		pool := mem.DefaultBufferPool()
+		buf := pool.Get(size)
+		if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil {
+			pool.Put(buf)
+			return nil, err
+		}
+		data = append(data, mem.NewBuffer(buf, pool))
+	}
+
+	return data, nil
+}
+
+func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) {
+	vv := messageV2Of(v)
+	if vv == nil {
+		return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
+	}
+
+	buf := data.MaterializeToBuffer(mem.DefaultBufferPool())
+	defer buf.Free()
+	// TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not
+	//  really possible without a major overhaul of the proto package, but the
+	//  vtprotobuf library may be able to support this.
+	return proto.Unmarshal(buf.ReadOnlyData(), vv)
+}
+
+func messageV2Of(v any) proto.Message {
+	switch v := v.(type) {
+	case protoadapt.MessageV1:
+		return protoadapt.MessageV2Of(v)
+	case protoadapt.MessageV2:
+		return v
+	}
+
+	return nil
+}
+
+func (c *codecV2) Name() string {
+	return Name
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/hack/tools/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
new file mode 100644
index 0000000000..1d827dd5d9
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
@@ -0,0 +1,269 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package stats
+
+import (
+	"maps"
+
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal"
+)
+
+func init() {
+	internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting
+}
+
+var logger = grpclog.Component("metrics-registry")
+
+// DefaultMetrics are the default metrics registered through global metrics
+// registry. This is written to at initialization time only, and is read only
+// after initialization.
+var DefaultMetrics = NewMetrics()
+
+// MetricDescriptor is the data for a registered metric.
+type MetricDescriptor struct {
+	// The name of this metric. This name must be unique across the whole binary
+	// (including any per call metrics). See
+	// https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions
+	// for metric naming conventions.
+	Name Metric
+	// The description of this metric.
+	Description string
+	// The unit (e.g. entries, seconds) of this metric.
+	Unit string
+	// The required label keys for this metric. These are intended to
+	// metrics emitted from a stats handler.
+	Labels []string
+	// The optional label keys for this metric. These are intended to attached
+	// to metrics emitted from a stats handler if configured.
+	OptionalLabels []string
+	// Whether this metric is on by default.
+	Default bool
+	// The type of metric. This is set by the metric registry, and not intended
+	// to be set by a component registering a metric.
+	Type MetricType
+	// Bounds are the bounds of this metric. This only applies to histogram
+	// metrics. If unset or set with length 0, stats handlers will fall back to
+	// default bounds.
+	Bounds []float64
+}
+
+// MetricType is the type of metric.
+type MetricType int
+
+// Type of metric supported by this instrument registry.
+const (
+	MetricTypeIntCount MetricType = iota
+	MetricTypeFloatCount
+	MetricTypeIntHisto
+	MetricTypeFloatHisto
+	MetricTypeIntGauge
+)
+
+// Int64CountHandle is a typed handle for a int count metric. This handle
+// is passed at the recording point in order to know which metric to record
+// on.
+type Int64CountHandle MetricDescriptor
+
+// Descriptor returns the int64 count handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64CountHandle) Descriptor() *MetricDescriptor {
+	return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 count value on the metrics recorder provided.
+func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+	recorder.RecordInt64Count(h, incr, labels...)
+}
+
+// Float64CountHandle is a typed handle for a float count metric. This handle is
+// passed at the recording point in order to know which metric to record on.
+type Float64CountHandle MetricDescriptor
+
+// Descriptor returns the float64 count handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Float64CountHandle) Descriptor() *MetricDescriptor {
+	return (*MetricDescriptor)(h)
+}
+
+// Record records the float64 count value on the metrics recorder provided.
+func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
+	recorder.RecordFloat64Count(h, incr, labels...)
+}
+
+// Int64HistoHandle is a typed handle for an int histogram metric. This handle
+// is passed at the recording point in order to know which metric to record on.
+type Int64HistoHandle MetricDescriptor
+
+// Descriptor returns the int64 histo handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64HistoHandle) Descriptor() *MetricDescriptor {
+	return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 histo value on the metrics recorder provided.
+func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+	recorder.RecordInt64Histo(h, incr, labels...)
+}
+
+// Float64HistoHandle is a typed handle for a float histogram metric. This
+// handle is passed at the recording point in order to know which metric to
+// record on.
+type Float64HistoHandle MetricDescriptor
+
+// Descriptor returns the float64 histo handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Float64HistoHandle) Descriptor() *MetricDescriptor {
+	return (*MetricDescriptor)(h)
+}
+
+// Record records the float64 histo value on the metrics recorder provided.
+func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
+	recorder.RecordFloat64Histo(h, incr, labels...)
+}
+
+// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is
+// passed at the recording point in order to know which metric to record on.
+type Int64GaugeHandle MetricDescriptor
+
+// Descriptor returns the int64 gauge handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor {
+	return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 histo value on the metrics recorder provided.
+func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+	recorder.RecordInt64Gauge(h, incr, labels...)
+}
+
+// registeredMetrics are the registered metric descriptor names.
+var registeredMetrics = make(map[Metric]bool)
+
+// metricsRegistry contains all of the registered metrics.
+//
+// This is written to only at init time, and read only after that.
+var metricsRegistry = make(map[Metric]*MetricDescriptor)
+
+// DescriptorForMetric returns the MetricDescriptor from the global registry.
+//
+// Returns nil if MetricDescriptor not present.
+func DescriptorForMetric(metric Metric) *MetricDescriptor {
+	return metricsRegistry[metric]
+}
+
+func registerMetric(name Metric, def bool) {
+	if registeredMetrics[name] {
+		logger.Fatalf("metric %v already registered", name)
+	}
+	registeredMetrics[name] = true
+	if def {
+		DefaultMetrics = DefaultMetrics.Add(name)
+	}
+}
+
+// RegisterInt64Count registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle {
+	registerMetric(descriptor.Name, descriptor.Default)
+	descriptor.Type = MetricTypeIntCount
+	descPtr := &descriptor
+	metricsRegistry[descriptor.Name] = descPtr
+	return (*Int64CountHandle)(descPtr)
+}
+
+// RegisterFloat64Count registers the metric description onto the global
+// registry. It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle {
+	registerMetric(descriptor.Name, descriptor.Default)
+	descriptor.Type = MetricTypeFloatCount
+	descPtr := &descriptor
+	metricsRegistry[descriptor.Name] = descPtr
+	return (*Float64CountHandle)(descPtr)
+}
+
+// RegisterInt64Histo registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle {
+	registerMetric(descriptor.Name, descriptor.Default)
+	descriptor.Type = MetricTypeIntHisto
+	descPtr := &descriptor
+	metricsRegistry[descriptor.Name] = descPtr
+	return (*Int64HistoHandle)(descPtr)
+}
+
+// RegisterFloat64Histo registers the metric description onto the global
+// registry. It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle {
+	registerMetric(descriptor.Name, descriptor.Default)
+	descriptor.Type = MetricTypeFloatHisto
+	descPtr := &descriptor
+	metricsRegistry[descriptor.Name] = descPtr
+	return (*Float64HistoHandle)(descPtr)
+}
+
+// RegisterInt64Gauge registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle {
+	registerMetric(descriptor.Name, descriptor.Default)
+	descriptor.Type = MetricTypeIntGauge
+	descPtr := &descriptor
+	metricsRegistry[descriptor.Name] = descPtr
+	return (*Int64GaugeHandle)(descPtr)
+}
+
+// snapshotMetricsRegistryForTesting snapshots the global data of the metrics
+// registry. Returns a cleanup function that sets the metrics registry to its
+// original state.
+func snapshotMetricsRegistryForTesting() func() {
+	oldDefaultMetrics := DefaultMetrics
+	oldRegisteredMetrics := registeredMetrics
+	oldMetricsRegistry := metricsRegistry
+
+	registeredMetrics = make(map[Metric]bool)
+	metricsRegistry = make(map[Metric]*MetricDescriptor)
+	maps.Copy(registeredMetrics, registeredMetrics)
+	maps.Copy(metricsRegistry, metricsRegistry)
+
+	return func() {
+		DefaultMetrics = oldDefaultMetrics
+		registeredMetrics = oldRegisteredMetrics
+		metricsRegistry = oldMetricsRegistry
+	}
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/hack/tools/vendor/google.golang.org/grpc/experimental/stats/metrics.go
new file mode 100644
index 0000000000..3221f7a633
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/experimental/stats/metrics.go
@@ -0,0 +1,114 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package stats contains experimental metrics/stats API's.
+package stats
+
+import "maps"
+
+// MetricsRecorder records on metrics derived from metric registry.
+type MetricsRecorder interface {
+	// RecordInt64Count records the measurement alongside labels on the int
+	// count associated with the provided handle.
+	RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string)
+	// RecordFloat64Count records the measurement alongside labels on the float
+	// count associated with the provided handle.
+	RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string)
+	// RecordInt64Histo records the measurement alongside labels on the int
+	// histo associated with the provided handle.
+	RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string)
+	// RecordFloat64Histo records the measurement alongside labels on the float
+	// histo associated with the provided handle.
+	RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string)
+	// RecordInt64Gauge records the measurement alongside labels on the int
+	// gauge associated with the provided handle.
+	RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string)
+}
+
+// Metric is an identifier for a metric.
+type Metric string
+
+// Metrics is a set of metrics to record. Once created, Metrics is immutable,
+// however Add and Remove can make copies with specific metrics added or
+// removed, respectively.
+//
+// Do not construct directly; use NewMetrics instead.
+type Metrics struct {
+	// metrics are the set of metrics to initialize.
+	metrics map[Metric]bool
+}
+
+// NewMetrics returns a Metrics containing Metrics.
+func NewMetrics(metrics ...Metric) *Metrics {
+	newMetrics := make(map[Metric]bool)
+	for _, metric := range metrics {
+		newMetrics[metric] = true
+	}
+	return &Metrics{
+		metrics: newMetrics,
+	}
+}
+
+// Metrics returns the metrics set. The returned map is read-only and must not
+// be modified.
+func (m *Metrics) Metrics() map[Metric]bool {
+	return m.metrics
+}
+
+// Add adds the metrics to the metrics set and returns a new copy with the
+// additional metrics.
+func (m *Metrics) Add(metrics ...Metric) *Metrics {
+	newMetrics := make(map[Metric]bool)
+	for metric := range m.metrics {
+		newMetrics[metric] = true
+	}
+
+	for _, metric := range metrics {
+		newMetrics[metric] = true
+	}
+	return &Metrics{
+		metrics: newMetrics,
+	}
+}
+
+// Join joins the metrics passed in with the metrics set, and returns a new copy
+// with the merged metrics.
+func (m *Metrics) Join(metrics *Metrics) *Metrics {
+	newMetrics := make(map[Metric]bool)
+	maps.Copy(newMetrics, m.metrics)
+	maps.Copy(newMetrics, metrics.metrics)
+	return &Metrics{
+		metrics: newMetrics,
+	}
+}
+
+// Remove removes the metrics from the metrics set and returns a new copy with
+// the metrics removed.
+func (m *Metrics) Remove(metrics ...Metric) *Metrics {
+	newMetrics := make(map[Metric]bool)
+	for metric := range m.metrics {
+		newMetrics[metric] = true
+	}
+
+	for _, metric := range metrics {
+		delete(newMetrics, metric)
+	}
+	return &Metrics{
+		metrics: newMetrics,
+	}
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/grpclog/component.go b/hack/tools/vendor/google.golang.org/grpc/grpclog/component.go
new file mode 100644
index 0000000000..f1ae080dcb
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/grpclog/component.go
@@ -0,0 +1,115 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpclog
+
+import (
+	"fmt"
+)
+
+// componentData records the settings for a component.
+type componentData struct {
+	name string
+}
+
+var cache = map[string]*componentData{}
+
+func (c *componentData) InfoDepth(depth int, args ...any) {
+	args = append([]any{"[" + string(c.name) + "]"}, args...)
+	InfoDepth(depth+1, args...)
+}
+
+func (c *componentData) WarningDepth(depth int, args ...any) {
+	args = append([]any{"[" + string(c.name) + "]"}, args...)
+	WarningDepth(depth+1, args...)
+}
+
+func (c *componentData) ErrorDepth(depth int, args ...any) {
+	args = append([]any{"[" + string(c.name) + "]"}, args...)
+	ErrorDepth(depth+1, args...)
+}
+
+func (c *componentData) FatalDepth(depth int, args ...any) {
+	args = append([]any{"[" + string(c.name) + "]"}, args...)
+	FatalDepth(depth+1, args...)
+}
+
+func (c *componentData) Info(args ...any) {
+	c.InfoDepth(1, args...)
+}
+
+func (c *componentData) Warning(args ...any) {
+	c.WarningDepth(1, args...)
+}
+
+func (c *componentData) Error(args ...any) {
+	c.ErrorDepth(1, args...)
+}
+
+func (c *componentData) Fatal(args ...any) {
+	c.FatalDepth(1, args...)
+}
+
+func (c *componentData) Infof(format string, args ...any) {
+	c.InfoDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Warningf(format string, args ...any) {
+	c.WarningDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Errorf(format string, args ...any) {
+	c.ErrorDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Fatalf(format string, args ...any) {
+	c.FatalDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Infoln(args ...any) {
+	c.InfoDepth(1, args...)
+}
+
+func (c *componentData) Warningln(args ...any) {
+	c.WarningDepth(1, args...)
+}
+
+func (c *componentData) Errorln(args ...any) {
+	c.ErrorDepth(1, args...)
+}
+
+func (c *componentData) Fatalln(args ...any) {
+	c.FatalDepth(1, args...)
+}
+
+func (c *componentData) V(l int) bool {
+	return V(l)
+}
+
+// Component creates a new component and returns it for logging. If a component
+// with the name already exists, nothing will be created and it will be
+// returned. SetLoggerV2 will panic if it is called with a logger created by
+// Component.
+func Component(componentName string) DepthLoggerV2 {
+	if cData, ok := cache[componentName]; ok {
+		return cData
+	}
+	c := &componentData{componentName}
+	cache[componentName] = c
+	return c
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/grpclog/grpclog.go b/hack/tools/vendor/google.golang.org/grpc/grpclog/grpclog.go
new file mode 100644
index 0000000000..db320105e6
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/grpclog/grpclog.go
@@ -0,0 +1,186 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package grpclog defines logging for grpc.
+//
+// In the default logger, severity level can be set by environment variable
+// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by
+// GRPC_GO_LOG_VERBOSITY_LEVEL.
+package grpclog
+
+import (
+	"os"
+
+	"google.golang.org/grpc/grpclog/internal"
+)
+
+func init() {
+	SetLoggerV2(newLoggerV2())
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func V(l int) bool {
+	return internal.LoggerV2Impl.V(l)
+}
+
+// Info logs to the INFO log.
+func Info(args ...any) {
+	internal.LoggerV2Impl.Info(args...)
+}
+
+// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
+func Infof(format string, args ...any) {
+	internal.LoggerV2Impl.Infof(format, args...)
+}
+
+// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
+func Infoln(args ...any) {
+	internal.LoggerV2Impl.Infoln(args...)
+}
+
+// Warning logs to the WARNING log.
+func Warning(args ...any) {
+	internal.LoggerV2Impl.Warning(args...)
+}
+
+// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
+func Warningf(format string, args ...any) {
+	internal.LoggerV2Impl.Warningf(format, args...)
+}
+
+// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
+func Warningln(args ...any) {
+	internal.LoggerV2Impl.Warningln(args...)
+}
+
+// Error logs to the ERROR log.
+func Error(args ...any) {
+	internal.LoggerV2Impl.Error(args...)
+}
+
+// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
+func Errorf(format string, args ...any) {
+	internal.LoggerV2Impl.Errorf(format, args...)
+}
+
+// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
+func Errorln(args ...any) {
+	internal.LoggerV2Impl.Errorln(args...)
+}
+
+// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
+// It calls os.Exit() with exit code 1.
+func Fatal(args ...any) {
+	internal.LoggerV2Impl.Fatal(args...)
+	// Make sure fatal logs will exit.
+	os.Exit(1)
+}
+
+// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
+// It calls os.Exit() with exit code 1.
+func Fatalf(format string, args ...any) {
+	internal.LoggerV2Impl.Fatalf(format, args...)
+	// Make sure fatal logs will exit.
+	os.Exit(1)
+}
+
+// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
+// It calls os.Exit() with exit code 1.
+func Fatalln(args ...any) {
+	internal.LoggerV2Impl.Fatalln(args...)
+	// Make sure fatal logs will exit.
+	os.Exit(1)
+}
+
+// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
+//
+// Deprecated: use Info.
+func Print(args ...any) {
+	internal.LoggerV2Impl.Info(args...)
+}
+
+// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
+//
+// Deprecated: use Infof.
+func Printf(format string, args ...any) {
+	internal.LoggerV2Impl.Infof(format, args...)
+}
+
+// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
+//
+// Deprecated: use Infoln.
+func Println(args ...any) {
+	internal.LoggerV2Impl.Infoln(args...)
+}
+
+// InfoDepth logs to the INFO log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func InfoDepth(depth int, args ...any) {
+	if internal.DepthLoggerV2Impl != nil {
+		internal.DepthLoggerV2Impl.InfoDepth(depth, args...)
+	} else {
+		internal.LoggerV2Impl.Infoln(args...)
+	}
+}
+
+// WarningDepth logs to the WARNING log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WarningDepth(depth int, args ...any) {
+	if internal.DepthLoggerV2Impl != nil {
+		internal.DepthLoggerV2Impl.WarningDepth(depth, args...)
+	} else {
+		internal.LoggerV2Impl.Warningln(args...)
+	}
+}
+
+// ErrorDepth logs to the ERROR log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ErrorDepth(depth int, args ...any) {
+	if internal.DepthLoggerV2Impl != nil {
+		internal.DepthLoggerV2Impl.ErrorDepth(depth, args...)
+	} else {
+		internal.LoggerV2Impl.Errorln(args...)
+	}
+}
+
+// FatalDepth logs to the FATAL log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func FatalDepth(depth int, args ...any) {
+	if internal.DepthLoggerV2Impl != nil {
+		internal.DepthLoggerV2Impl.FatalDepth(depth, args...)
+	} else {
+		internal.LoggerV2Impl.Fatalln(args...)
+	}
+	os.Exit(1)
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/hack/tools/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go
new file mode 100644
index 0000000000..59c03bc14c
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go
@@ -0,0 +1,26 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains functionality internal to the grpclog package.
+package internal
+
+// LoggerV2Impl is the logger used for the non-depth log functions.
+var LoggerV2Impl LoggerV2
+
+// DepthLoggerV2Impl is the logger used for the depth log functions.
+var DepthLoggerV2Impl DepthLoggerV2
diff --git a/hack/tools/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/hack/tools/vendor/google.golang.org/grpc/grpclog/internal/logger.go
new file mode 100644
index 0000000000..e524fdd40b
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/grpclog/internal/logger.go
@@ -0,0 +1,87 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+// Logger mimics golang's standard Logger as an interface.
+//
+// Deprecated: use LoggerV2.
+type Logger interface {
+	Fatal(args ...any)
+	Fatalf(format string, args ...any)
+	Fatalln(args ...any)
+	Print(args ...any)
+	Printf(format string, args ...any)
+	Println(args ...any)
+}
+
+// LoggerWrapper wraps Logger into a LoggerV2.
+type LoggerWrapper struct {
+	Logger
+}
+
+// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Info(args ...any) {
+	l.Logger.Print(args...)
+}
+
+// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Infoln(args ...any) {
+	l.Logger.Println(args...)
+}
+
+// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Infof(format string, args ...any) {
+	l.Logger.Printf(format, args...)
+}
+
+// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Warning(args ...any) {
+	l.Logger.Print(args...)
+}
+
+// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Warningln(args ...any) {
+	l.Logger.Println(args...)
+}
+
+// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Warningf(format string, args ...any) {
+	l.Logger.Printf(format, args...)
+}
+
+// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Error(args ...any) {
+	l.Logger.Print(args...)
+}
+
+// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Errorln(args ...any) {
+	l.Logger.Println(args...)
+}
+
+// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Errorf(format string, args ...any) {
+	l.Logger.Printf(format, args...)
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func (*LoggerWrapper) V(int) bool {
+	// Returns true for all verbose level.
+	return true
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go b/hack/tools/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
new file mode 100644
index 0000000000..07df71e98a
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
@@ -0,0 +1,204 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"log"
+	"os"
+)
+
+// LoggerV2 does underlying logging work for grpclog.
+type LoggerV2 interface {
+	// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
+	Info(args ...any)
+	// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
+	Infoln(args ...any)
+	// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
+	Infof(format string, args ...any)
+	// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
+	Warning(args ...any)
+	// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
+	Warningln(args ...any)
+	// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
+	Warningf(format string, args ...any)
+	// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+	Error(args ...any)
+	// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+	Errorln(args ...any)
+	// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+	Errorf(format string, args ...any)
+	// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
+	// Implementations may also call os.Exit() with a non-zero exit code.
+	Fatal(args ...any)
+	// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
+	// Implementations may also call os.Exit() with a non-zero exit code.
+	Fatalln(args ...any)
+	// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
+	// Implementations may also call os.Exit() with a non-zero exit code.
+	Fatalf(format string, args ...any)
+	// V reports whether verbosity level l is at least the requested verbose level.
+	V(l int) bool
+}
+
+// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
+// DepthLoggerV2, the below functions will be called with the appropriate stack
+// depth set for trivial functions the logger may ignore.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type DepthLoggerV2 interface {
+	LoggerV2
+	// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
+	InfoDepth(depth int, args ...any)
+	// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
+	WarningDepth(depth int, args ...any)
+	// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
+	ErrorDepth(depth int, args ...any)
+	// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
+	FatalDepth(depth int, args ...any)
+}
+
+const (
+	// infoLog indicates Info severity.
+	infoLog int = iota
+	// warningLog indicates Warning severity.
+	warningLog
+	// errorLog indicates Error severity.
+	errorLog
+	// fatalLog indicates Fatal severity.
+	fatalLog
+)
+
+// severityName contains the string representation of each severity.
+var severityName = []string{
+	infoLog:    "INFO",
+	warningLog: "WARNING",
+	errorLog:   "ERROR",
+	fatalLog:   "FATAL",
+}
+
+// loggerT is the default logger used by grpclog.
+type loggerT struct {
+	m          []*log.Logger
+	v          int
+	jsonFormat bool
+}
+
+func (g *loggerT) output(severity int, s string) {
+	sevStr := severityName[severity]
+	if !g.jsonFormat {
+		g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s))
+		return
+	}
+	// TODO: we can also include the logging component, but that needs more
+	// (API) changes.
+	b, _ := json.Marshal(map[string]string{
+		"severity": sevStr,
+		"message":  s,
+	})
+	g.m[severity].Output(2, string(b))
+}
+
+func (g *loggerT) Info(args ...any) {
+	g.output(infoLog, fmt.Sprint(args...))
+}
+
+func (g *loggerT) Infoln(args ...any) {
+	g.output(infoLog, fmt.Sprintln(args...))
+}
+
+func (g *loggerT) Infof(format string, args ...any) {
+	g.output(infoLog, fmt.Sprintf(format, args...))
+}
+
+func (g *loggerT) Warning(args ...any) {
+	g.output(warningLog, fmt.Sprint(args...))
+}
+
+func (g *loggerT) Warningln(args ...any) {
+	g.output(warningLog, fmt.Sprintln(args...))
+}
+
+func (g *loggerT) Warningf(format string, args ...any) {
+	g.output(warningLog, fmt.Sprintf(format, args...))
+}
+
+func (g *loggerT) Error(args ...any) {
+	g.output(errorLog, fmt.Sprint(args...))
+}
+
+func (g *loggerT) Errorln(args ...any) {
+	g.output(errorLog, fmt.Sprintln(args...))
+}
+
+func (g *loggerT) Errorf(format string, args ...any) {
+	g.output(errorLog, fmt.Sprintf(format, args...))
+}
+
+func (g *loggerT) Fatal(args ...any) {
+	g.output(fatalLog, fmt.Sprint(args...))
+	os.Exit(1)
+}
+
+func (g *loggerT) Fatalln(args ...any) {
+	g.output(fatalLog, fmt.Sprintln(args...))
+	os.Exit(1)
+}
+
+func (g *loggerT) Fatalf(format string, args ...any) {
+	g.output(fatalLog, fmt.Sprintf(format, args...))
+	os.Exit(1)
+}
+
+func (g *loggerT) V(l int) bool {
+	return l <= g.v
+}
+
+// LoggerV2Config configures the LoggerV2 implementation.
+type LoggerV2Config struct {
+	// Verbosity sets the verbosity level of the logger.
+	Verbosity int
+	// FormatJSON controls whether the logger should output logs in JSON format.
+	FormatJSON bool
+}
+
+// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration.
+// The infoW, warningW, and errorW writers are used to write log messages of
+// different severity levels.
+func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 {
+	var m []*log.Logger
+	flag := log.LstdFlags
+	if c.FormatJSON {
+		flag = 0
+	}
+	m = append(m, log.New(infoW, "", flag))
+	m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag))
+	ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
+	m = append(m, log.New(ew, "", flag))
+	m = append(m, log.New(ew, "", flag))
+	return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON}
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/grpclog/logger.go b/hack/tools/vendor/google.golang.org/grpc/grpclog/logger.go
new file mode 100644
index 0000000000..4b20358570
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/grpclog/logger.go
@@ -0,0 +1,34 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpclog
+
+import "google.golang.org/grpc/grpclog/internal"
+
+// Logger mimics golang's standard Logger as an interface.
+//
+// Deprecated: use LoggerV2.
+type Logger internal.Logger
+
+// SetLogger sets the logger that is used in grpc. Call only from
+// init() functions.
+//
+// Deprecated: use SetLoggerV2.
+func SetLogger(l Logger) {
+	internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l}
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/hack/tools/vendor/google.golang.org/grpc/grpclog/loggerv2.go
new file mode 100644
index 0000000000..892dc13d16
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/grpclog/loggerv2.go
@@ -0,0 +1,97 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpclog
+
+import (
+	"io"
+	"os"
+	"strconv"
+	"strings"
+
+	"google.golang.org/grpc/grpclog/internal"
+)
+
+// LoggerV2 does underlying logging work for grpclog.
+type LoggerV2 internal.LoggerV2
+
+// SetLoggerV2 sets logger that is used in grpc to a V2 logger.
+// Not mutex-protected, should be called before any gRPC functions.
+func SetLoggerV2(l LoggerV2) {
+	if _, ok := l.(*componentData); ok {
+		panic("cannot use component logger as grpclog logger")
+	}
+	internal.LoggerV2Impl = l
+	internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2)
+}
+
+// NewLoggerV2 creates a loggerV2 with the provided writers.
+// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1).
+// Error logs will be written to errorW, warningW and infoW.
+// Warning logs will be written to warningW and infoW.
+// Info logs will be written to infoW.
+func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
+	return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{})
+}
+
+// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
+// verbosity level.
+func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
+	return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v})
+}
+
+// newLoggerV2 creates a loggerV2 to be used as default logger.
+// All logs are written to stderr.
+func newLoggerV2() LoggerV2 {
+	errorW := io.Discard
+	warningW := io.Discard
+	infoW := io.Discard
+
+	logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL")
+	switch logLevel {
+	case "", "ERROR", "error": // If env is unset, set level to ERROR.
+		errorW = os.Stderr
+	case "WARNING", "warning":
+		warningW = os.Stderr
+	case "INFO", "info":
+		infoW = os.Stderr
+	}
+
+	var v int
+	vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL")
+	if vl, err := strconv.Atoi(vLevel); err == nil {
+		v = vl
+	}
+
+	jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json")
+
+	return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{
+		Verbosity:  v,
+		FormatJSON: jsonFormat,
+	})
+}
+
+// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
+// DepthLoggerV2, the below functions will be called with the appropriate stack
+// depth set for trivial functions the logger may ignore.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type DepthLoggerV2 internal.DepthLoggerV2
diff --git a/hack/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/hack/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
new file mode 100644
index 0000000000..d92335445f
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
@@ -0,0 +1,308 @@
+// Copyright 2015 The gRPC Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The canonical version of this proto can be found at
+// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.34.2
+// 	protoc        v5.27.1
+// source: grpc/health/v1/health.proto
+
+package grpc_health_v1
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type HealthCheckResponse_ServingStatus int32
+
+const (
+	HealthCheckResponse_UNKNOWN         HealthCheckResponse_ServingStatus = 0
+	HealthCheckResponse_SERVING         HealthCheckResponse_ServingStatus = 1
+	HealthCheckResponse_NOT_SERVING     HealthCheckResponse_ServingStatus = 2
+	HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3 // Used only by the Watch method.
+)
+
+// Enum value maps for HealthCheckResponse_ServingStatus.
+var (
+	HealthCheckResponse_ServingStatus_name = map[int32]string{
+		0: "UNKNOWN",
+		1: "SERVING",
+		2: "NOT_SERVING",
+		3: "SERVICE_UNKNOWN",
+	}
+	HealthCheckResponse_ServingStatus_value = map[string]int32{
+		"UNKNOWN":         0,
+		"SERVING":         1,
+		"NOT_SERVING":     2,
+		"SERVICE_UNKNOWN": 3,
+	}
+)
+
+func (x HealthCheckResponse_ServingStatus) Enum() *HealthCheckResponse_ServingStatus {
+	p := new(HealthCheckResponse_ServingStatus)
+	*p = x
+	return p
+}
+
+func (x HealthCheckResponse_ServingStatus) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HealthCheckResponse_ServingStatus) Descriptor() protoreflect.EnumDescriptor {
+	return file_grpc_health_v1_health_proto_enumTypes[0].Descriptor()
+}
+
+func (HealthCheckResponse_ServingStatus) Type() protoreflect.EnumType {
+	return &file_grpc_health_v1_health_proto_enumTypes[0]
+}
+
+func (x HealthCheckResponse_ServingStatus) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use HealthCheckResponse_ServingStatus.Descriptor instead.
+func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
+	return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1, 0}
+}
+
+type HealthCheckRequest struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
+}
+
+func (x *HealthCheckRequest) Reset() {
+	*x = HealthCheckRequest{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_health_v1_health_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *HealthCheckRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheckRequest) ProtoMessage() {}
+
+func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_health_v1_health_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead.
+func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
+	return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *HealthCheckRequest) GetService() string {
+	if x != nil {
+		return x.Service
+	}
+	return ""
+}
+
+type HealthCheckResponse struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
+}
+
+func (x *HealthCheckResponse) Reset() {
+	*x = HealthCheckResponse{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_grpc_health_v1_health_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *HealthCheckResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheckResponse) ProtoMessage() {}
+
+func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_health_v1_health_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead.
+func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
+	return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
+	if x != nil {
+		return x.Status
+	}
+	return HealthCheckResponse_UNKNOWN
+}
+
+var File_grpc_health_v1_health_proto protoreflect.FileDescriptor
+
+var file_grpc_health_v1_health_proto_rawDesc = []byte{
+	0x0a, 0x1b, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x31,
+	0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67,
+	0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x22, 0x2e, 0x0a,
+	0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75,
+	0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0xb1, 0x01,
+	0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73,
+	0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61,
+	0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
+	0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
+	0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+	0x22, 0x4f, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75,
+	0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b,
+	0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e,
+	0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f,
+	0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
+	0x03, 0x32, 0xae, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05,
+	0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61,
+	0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
+	0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63,
+	0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74,
+	0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52,
+	0x0a, 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68,
+	0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
+	0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72,
+	0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61,
+	0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+	0x30, 0x01, 0x42, 0x61, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65,
+	0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50,
+	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
+	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68,
+	0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74,
+	0x68, 0x5f, 0x76, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c,
+	0x74, 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_grpc_health_v1_health_proto_rawDescOnce sync.Once
+	file_grpc_health_v1_health_proto_rawDescData = file_grpc_health_v1_health_proto_rawDesc
+)
+
+func file_grpc_health_v1_health_proto_rawDescGZIP() []byte {
+	file_grpc_health_v1_health_proto_rawDescOnce.Do(func() {
+		file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_health_v1_health_proto_rawDescData)
+	})
+	return file_grpc_health_v1_health_proto_rawDescData
+}
+
+var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_grpc_health_v1_health_proto_goTypes = []any{
+	(HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus
+	(*HealthCheckRequest)(nil),             // 1: grpc.health.v1.HealthCheckRequest
+	(*HealthCheckResponse)(nil),            // 2: grpc.health.v1.HealthCheckResponse
+}
+var file_grpc_health_v1_health_proto_depIdxs = []int32{
+	0, // 0: grpc.health.v1.HealthCheckResponse.status:type_name -> grpc.health.v1.HealthCheckResponse.ServingStatus
+	1, // 1: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest
+	1, // 2: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest
+	2, // 3: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse
+	2, // 4: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse
+	3, // [3:5] is the sub-list for method output_type
+	1, // [1:3] is the sub-list for method input_type
+	1, // [1:1] is the sub-list for extension type_name
+	1, // [1:1] is the sub-list for extension extendee
+	0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_grpc_health_v1_health_proto_init() }
+func file_grpc_health_v1_health_proto_init() {
+	if File_grpc_health_v1_health_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any {
+			switch v := v.(*HealthCheckRequest); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any {
+			switch v := v.(*HealthCheckResponse); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_grpc_health_v1_health_proto_rawDesc,
+			NumEnums:      1,
+			NumMessages:   2,
+			NumExtensions: 0,
+			NumServices:   1,
+		},
+		GoTypes:           file_grpc_health_v1_health_proto_goTypes,
+		DependencyIndexes: file_grpc_health_v1_health_proto_depIdxs,
+		EnumInfos:         file_grpc_health_v1_health_proto_enumTypes,
+		MessageInfos:      file_grpc_health_v1_health_proto_msgTypes,
+	}.Build()
+	File_grpc_health_v1_health_proto = out.File
+	file_grpc_health_v1_health_proto_rawDesc = nil
+	file_grpc_health_v1_health_proto_goTypes = nil
+	file_grpc_health_v1_health_proto_depIdxs = nil
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/hack/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
new file mode 100644
index 0000000000..f96b8ab492
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
@@ -0,0 +1,234 @@
+// Copyright 2015 The gRPC Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The canonical version of this proto can be found at
+// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto
+
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.5.1
+// - protoc             v5.27.1
+// source: grpc/health/v1/health.proto
+
+package grpc_health_v1
+
+import (
+	context "context"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
+
+const (
+	Health_Check_FullMethodName = "/grpc.health.v1.Health/Check"
+	Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch"
+)
+
+// HealthClient is the client API for Health service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+//
+// Health is gRPC's mechanism for checking whether a server is able to handle
+// RPCs. Its semantics are documented in
+// https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
+type HealthClient interface {
+	// Check gets the health of the specified service. If the requested service
+	// is unknown, the call will fail with status NOT_FOUND. If the caller does
+	// not specify a service name, the server should respond with its overall
+	// health status.
+	//
+	// Clients should set a deadline when calling Check, and can declare the
+	// server unhealthy if they do not receive a timely response.
+	//
+	// Check implementations should be idempotent and side effect free.
+	Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
+	// Performs a watch for the serving status of the requested service.
+	// The server will immediately send back a message indicating the current
+	// serving status.  It will then subsequently send a new message whenever
+	// the service's serving status changes.
+	//
+	// If the requested service is unknown when the call is received, the
+	// server will send a message setting the serving status to
+	// SERVICE_UNKNOWN but will *not* terminate the call.  If at some
+	// future point, the serving status of the service becomes known, the
+	// server will send a new message with the service's serving status.
+	//
+	// If the call terminates with status UNIMPLEMENTED, then clients
+	// should assume this method is not supported and should not retry the
+	// call.  If the call terminates with any other status (including OK),
+	// clients should retry the call with appropriate exponential backoff.
+	Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error)
+}
+
+type healthClient struct {
+	cc grpc.ClientConnInterface
+}
+
+func NewHealthClient(cc grpc.ClientConnInterface) HealthClient {
+	return &healthClient{cc}
+}
+
+func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
+	cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+	out := new(HealthCheckResponse)
+	err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, cOpts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) {
+	cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+	stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &grpc.GenericClientStream[HealthCheckRequest, HealthCheckResponse]{ClientStream: stream}
+	if err := x.ClientStream.SendMsg(in); err != nil {
+		return nil, err
+	}
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return x, nil
+}
+
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type Health_WatchClient = grpc.ServerStreamingClient[HealthCheckResponse]
+
+// HealthServer is the server API for Health service.
+// All implementations should embed UnimplementedHealthServer
+// for forward compatibility.
+//
+// Health is gRPC's mechanism for checking whether a server is able to handle
+// RPCs. Its semantics are documented in
+// https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
+type HealthServer interface {
+	// Check gets the health of the specified service. If the requested service
+	// is unknown, the call will fail with status NOT_FOUND. If the caller does
+	// not specify a service name, the server should respond with its overall
+	// health status.
+	//
+	// Clients should set a deadline when calling Check, and can declare the
+	// server unhealthy if they do not receive a timely response.
+	//
+	// Check implementations should be idempotent and side effect free.
+	Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
+	// Performs a watch for the serving status of the requested service.
+	// The server will immediately send back a message indicating the current
+	// serving status.  It will then subsequently send a new message whenever
+	// the service's serving status changes.
+	//
+	// If the requested service is unknown when the call is received, the
+	// server will send a message setting the serving status to
+	// SERVICE_UNKNOWN but will *not* terminate the call.  If at some
+	// future point, the serving status of the service becomes known, the
+	// server will send a new message with the service's serving status.
+	//
+	// If the call terminates with status UNIMPLEMENTED, then clients
+	// should assume this method is not supported and should not retry the
+	// call.  If the call terminates with any other status (including OK),
+	// clients should retry the call with appropriate exponential backoff.
+	Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error
+}
+
+// UnimplementedHealthServer should be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedHealthServer struct{}
+
+func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Check not implemented")
+}
+func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error {
+	return status.Errorf(codes.Unimplemented, "method Watch not implemented")
+}
+func (UnimplementedHealthServer) testEmbeddedByValue() {}
+
+// UnsafeHealthServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to HealthServer will
+// result in compilation errors.
+type UnsafeHealthServer interface {
+	mustEmbedUnimplementedHealthServer()
+}
+
+func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) {
+	// If the following call panics, it indicates UnimplementedHealthServer was
+	// embedded by pointer and is nil.  This will cause panics if an
+	// unimplemented method is ever invoked, so we test this at initialization
+	// time to prevent it from happening at runtime later due to I/O.
+	if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+		t.testEmbeddedByValue()
+	}
+	s.RegisterService(&Health_ServiceDesc, srv)
+}
+
+func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(HealthCheckRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(HealthServer).Check(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: Health_Check_FullMethodName,
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
+	m := new(HealthCheckRequest)
+	if err := stream.RecvMsg(m); err != nil {
+		return err
+	}
+	return srv.(HealthServer).Watch(m, &grpc.GenericServerStream[HealthCheckRequest, HealthCheckResponse]{ServerStream: stream})
+}
+
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type Health_WatchServer = grpc.ServerStreamingServer[HealthCheckResponse]
+
+// Health_ServiceDesc is the grpc.ServiceDesc for Health service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var Health_ServiceDesc = grpc.ServiceDesc{
+	ServiceName: "grpc.health.v1.Health",
+	HandlerType: (*HealthServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Check",
+			Handler:    _Health_Check_Handler,
+		},
+	},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "Watch",
+			Handler:       _Health_Watch_Handler,
+			ServerStreams: true,
+		},
+	},
+	Metadata: "grpc/health/v1/health.proto",
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/interceptor.go b/hack/tools/vendor/google.golang.org/grpc/interceptor.go
new file mode 100644
index 0000000000..877d78fc3d
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/interceptor.go
@@ -0,0 +1,104 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+)
+
+// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
+type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error
+
+// UnaryClientInterceptor intercepts the execution of a unary RPC on the client.
+// Unary interceptors can be specified as a DialOption, using
+// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a
+// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC
+// delegates all unary RPC invocations to the interceptor, and it is the
+// responsibility of the interceptor to call invoker to complete the processing
+// of the RPC.
+//
+// method is the RPC name. req and reply are the corresponding request and
+// response messages. cc is the ClientConn on which the RPC was invoked. invoker
+// is the handler to complete the RPC and it is the responsibility of the
+// interceptor to call it. opts contain all applicable call options, including
+// defaults from the ClientConn as well as per-call options.
+//
+// The returned error must be compatible with the status package.
+type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
+
+// Streamer is called by StreamClientInterceptor to create a ClientStream.
+type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error)
+
+// StreamClientInterceptor intercepts the creation of a ClientStream. Stream
+// interceptors can be specified as a DialOption, using WithStreamInterceptor()
+// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream
+// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations
+// to the interceptor, and it is the responsibility of the interceptor to call
+// streamer.
+//
+// desc contains a description of the stream. cc is the ClientConn on which the
+// RPC was invoked. streamer is the handler to create a ClientStream and it is
+// the responsibility of the interceptor to call it. opts contain all applicable
+// call options, including defaults from the ClientConn as well as per-call
+// options.
+//
+// StreamClientInterceptor may return a custom ClientStream to intercept all I/O
+// operations. The returned error must be compatible with the status package.
+type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error)
+
+// UnaryServerInfo consists of various information about a unary RPC on
+// server side. All per-rpc information may be mutated by the interceptor.
+type UnaryServerInfo struct {
+	// Server is the service implementation the user provides. This is read-only.
+	Server any
+	// FullMethod is the full RPC method string, i.e., /package.service/method.
+	FullMethod string
+}
+
+// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
+// execution of a unary RPC.
+//
+// If a UnaryHandler returns an error, it should either be produced by the
+// status package, or be one of the context errors. Otherwise, gRPC will use
+// codes.Unknown as the status code and err.Error() as the status message of the
+// RPC.
+type UnaryHandler func(ctx context.Context, req any) (any, error)
+
+// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
+// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper
+// of the service method implementation. It is the responsibility of the interceptor to invoke handler
+// to complete the RPC.
+type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error)
+
+// StreamServerInfo consists of various information about a streaming RPC on
+// server side. All per-rpc information may be mutated by the interceptor.
+type StreamServerInfo struct {
+	// FullMethod is the full RPC method string, i.e., /package.service/method.
+	FullMethod string
+	// IsClientStream indicates whether the RPC is a client streaming RPC.
+	IsClientStream bool
+	// IsServerStream indicates whether the RPC is a server streaming RPC.
+	IsServerStream bool
+}
+
+// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server.
+// info contains all the information of this RPC the interceptor can operate on. And handler is the
+// service method implementation. It is the responsibility of the interceptor to invoke handler to
+// complete the RPC.
+type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/hack/tools/vendor/google.golang.org/grpc/internal/backoff/backoff.go
new file mode 100644
index 0000000000..b15cf482d2
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/backoff/backoff.go
@@ -0,0 +1,109 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package backoff implement the backoff strategy for gRPC.
+//
+// This is kept in internal until the gRPC project decides whether or not to
+// allow alternative backoff strategies.
+package backoff
+
+import (
+	"context"
+	"errors"
+	"math/rand"
+	"time"
+
+	grpcbackoff "google.golang.org/grpc/backoff"
+)
+
+// Strategy defines the methodology for backing off after a grpc connection
+// failure.
+type Strategy interface {
+	// Backoff returns the amount of time to wait before the next retry given
+	// the number of consecutive failures.
+	Backoff(retries int) time.Duration
+}
+
+// DefaultExponential is an exponential backoff implementation using the
+// default values for all the configurable knobs defined in
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig}
+
+// Exponential implements exponential backoff algorithm as defined in
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+type Exponential struct {
+	// Config contains all options to configure the backoff algorithm.
+	Config grpcbackoff.Config
+}
+
+// Backoff returns the amount of time to wait before the next retry given the
+// number of retries.
+func (bc Exponential) Backoff(retries int) time.Duration {
+	if retries == 0 {
+		return bc.Config.BaseDelay
+	}
+	backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay)
+	for backoff < max && retries > 0 {
+		backoff *= bc.Config.Multiplier
+		retries--
+	}
+	if backoff > max {
+		backoff = max
+	}
+	// Randomize backoff delays so that if a cluster of requests start at
+	// the same time, they won't operate in lockstep.
+	backoff *= 1 + bc.Config.Jitter*(rand.Float64()*2-1)
+	if backoff < 0 {
+		return 0
+	}
+	return time.Duration(backoff)
+}
+
+// ErrResetBackoff is the error to be returned by the function executed by RunF,
+// to instruct the latter to reset its backoff state.
+var ErrResetBackoff = errors.New("reset backoff state")
+
+// RunF provides a convenient way to run a function f repeatedly until the
+// context expires or f returns a non-nil error that is not ErrResetBackoff.
+// When f returns ErrResetBackoff, RunF continues to run f, but resets its
+// backoff state before doing so. backoff accepts an integer representing the
+// number of retries, and returns the amount of time to backoff.
+func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) {
+	attempt := 0
+	timer := time.NewTimer(0)
+	for ctx.Err() == nil {
+		select {
+		case <-timer.C:
+		case <-ctx.Done():
+			timer.Stop()
+			return
+		}
+
+		err := f()
+		if errors.Is(err, ErrResetBackoff) {
+			timer.Reset(0)
+			attempt = 0
+			continue
+		}
+		if err != nil {
+			return
+		}
+		timer.Reset(backoff(attempt))
+		attempt++
+	}
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/hack/tools/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
new file mode 100644
index 0000000000..13821a9266
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
@@ -0,0 +1,82 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package gracefulswitch
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/serviceconfig"
+)
+
+type lbConfig struct {
+	serviceconfig.LoadBalancingConfig
+
+	childBuilder balancer.Builder
+	childConfig  serviceconfig.LoadBalancingConfig
+}
+
+func ChildName(l serviceconfig.LoadBalancingConfig) string {
+	return l.(*lbConfig).childBuilder.Name()
+}
+
+// ParseConfig parses a child config list and returns a LB config for the
+// gracefulswitch Balancer.
+//
+// cfg is expected to be a json.RawMessage containing a JSON array of LB policy
+// names + configs as the format of the "loadBalancingConfig" field in
+// ServiceConfig.  It returns a type that should be passed to
+// UpdateClientConnState in the BalancerConfig field.
+func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
+	var lbCfg []map[string]json.RawMessage
+	if err := json.Unmarshal(cfg, &lbCfg); err != nil {
+		return nil, err
+	}
+	for i, e := range lbCfg {
+		if len(e) != 1 {
+			return nil, fmt.Errorf("expected a JSON struct with one entry; received entry %v at index %d", e, i)
+		}
+
+		var name string
+		var jsonCfg json.RawMessage
+		for name, jsonCfg = range e {
+		}
+
+		builder := balancer.Get(name)
+		if builder == nil {
+			// Skip unregistered balancer names.
+			continue
+		}
+
+		parser, ok := builder.(balancer.ConfigParser)
+		if !ok {
+			// This is a valid child with no config.
+			return &lbConfig{childBuilder: builder}, nil
+		}
+
+		cfg, err := parser.ParseConfig(jsonCfg)
+		if err != nil {
+			return nil, fmt.Errorf("error parsing config for policy %q: %v", name, err)
+		}
+		return &lbConfig{childBuilder: builder, childConfig: cfg}, nil
+	}
+
+	return nil, fmt.Errorf("no supported policies found in config: %v", string(cfg))
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/hack/tools/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
new file mode 100644
index 0000000000..73bb4c4ee9
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
@@ -0,0 +1,419 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package gracefulswitch implements a graceful switch load balancer.
+package gracefulswitch
+
+import (
+	"errors"
+	"fmt"
+	"sync"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/balancer/base"
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/resolver"
+)
+
+var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed")
+var _ balancer.Balancer = (*Balancer)(nil)
+
+// NewBalancer returns a graceful switch Balancer.
+func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer {
+	return &Balancer{
+		cc:    cc,
+		bOpts: opts,
+	}
+}
+
+// Balancer is a utility to gracefully switch from one balancer to
+// a new balancer. It implements the balancer.Balancer interface.
+type Balancer struct {
+	bOpts balancer.BuildOptions
+	cc    balancer.ClientConn
+
+	// mu protects the following fields and all fields within balancerCurrent
+	// and balancerPending. mu does not need to be held when calling into the
+	// child balancers, as all calls into these children happen only as a direct
+	// result of a call into the gracefulSwitchBalancer, which are also
+	// guaranteed to be synchronous. There is one exception: an UpdateState call
+	// from a child balancer when current and pending are populated can lead to
+	// calling Close() on the current. To prevent that racing with an
+	// UpdateSubConnState from the channel, we hold currentMu during Close and
+	// UpdateSubConnState calls.
+	mu              sync.Mutex
+	balancerCurrent *balancerWrapper
+	balancerPending *balancerWrapper
+	closed          bool // set to true when this balancer is closed
+
+	// currentMu must be locked before mu. This mutex guards against this
+	// sequence of events: UpdateSubConnState() called, finds the
+	// balancerCurrent, gives up lock, updateState comes in, causes Close() on
+	// balancerCurrent before the UpdateSubConnState is called on the
+	// balancerCurrent.
+	currentMu sync.Mutex
+}
+
+// swap swaps out the current lb with the pending lb and updates the ClientConn.
+// The caller must hold gsb.mu.
+func (gsb *Balancer) swap() {
+	gsb.cc.UpdateState(gsb.balancerPending.lastState)
+	cur := gsb.balancerCurrent
+	gsb.balancerCurrent = gsb.balancerPending
+	gsb.balancerPending = nil
+	go func() {
+		gsb.currentMu.Lock()
+		defer gsb.currentMu.Unlock()
+		cur.Close()
+	}()
+}
+
+// Helper function that checks if the balancer passed in is current or pending.
+// The caller must hold gsb.mu.
+func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool {
+	return bw == gsb.balancerCurrent || bw == gsb.balancerPending
+}
+
+// SwitchTo initializes the graceful switch process, which completes based on
+// connectivity state changes on the current/pending balancer. Thus, the switch
+// process is not complete when this method returns. This method must be called
+// synchronously alongside the rest of the balancer.Balancer methods this
+// Graceful Switch Balancer implements.
+//
+// Deprecated: use ParseConfig and pass a parsed config to UpdateClientConnState
+// to cause the Balancer to automatically change to the new child when necessary.
+func (gsb *Balancer) SwitchTo(builder balancer.Builder) error {
+	_, err := gsb.switchTo(builder)
+	return err
+}
+
+func (gsb *Balancer) switchTo(builder balancer.Builder) (*balancerWrapper, error) {
+	gsb.mu.Lock()
+	if gsb.closed {
+		gsb.mu.Unlock()
+		return nil, errBalancerClosed
+	}
+	bw := &balancerWrapper{
+		builder: builder,
+		gsb:     gsb,
+		lastState: balancer.State{
+			ConnectivityState: connectivity.Connecting,
+			Picker:            base.NewErrPicker(balancer.ErrNoSubConnAvailable),
+		},
+		subconns: make(map[balancer.SubConn]bool),
+	}
+	balToClose := gsb.balancerPending // nil if there is no pending balancer
+	if gsb.balancerCurrent == nil {
+		gsb.balancerCurrent = bw
+	} else {
+		gsb.balancerPending = bw
+	}
+	gsb.mu.Unlock()
+	balToClose.Close()
+	// This function takes a builder instead of a balancer because builder.Build
+	// can call back inline, and this utility needs to handle the callbacks.
+	newBalancer := builder.Build(bw, gsb.bOpts)
+	if newBalancer == nil {
+		// This is illegal and should never happen; we clear the balancerWrapper
+		// we were constructing if it happens to avoid a potential panic.
+		gsb.mu.Lock()
+		if gsb.balancerPending != nil {
+			gsb.balancerPending = nil
+		} else {
+			gsb.balancerCurrent = nil
+		}
+		gsb.mu.Unlock()
+		return nil, balancer.ErrBadResolverState
+	}
+
+	// This write doesn't need to take gsb.mu because this field never gets read
+	// or written to on any calls from the current or pending. Calls from grpc
+	// to this balancer are guaranteed to be called synchronously, so this
+	// bw.Balancer field will never be forwarded to until this SwitchTo()
+	// function returns.
+	bw.Balancer = newBalancer
+	return bw, nil
+}
+
+// Returns nil if the graceful switch balancer is closed.
+func (gsb *Balancer) latestBalancer() *balancerWrapper {
+	gsb.mu.Lock()
+	defer gsb.mu.Unlock()
+	if gsb.balancerPending != nil {
+		return gsb.balancerPending
+	}
+	return gsb.balancerCurrent
+}
+
+// UpdateClientConnState forwards the update to the latest balancer created.
+//
+// If the state's BalancerConfig is the config returned by a call to
+// gracefulswitch.ParseConfig, then this function will automatically SwitchTo
+// the balancer indicated by the config before forwarding its config to it, if
+// necessary.
+func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error {
+	// The resolver data is only relevant to the most recent LB Policy.
+	balToUpdate := gsb.latestBalancer()
+	gsbCfg, ok := state.BalancerConfig.(*lbConfig)
+	if ok {
+		// Switch to the child in the config unless it is already active.
+		if balToUpdate == nil || gsbCfg.childBuilder.Name() != balToUpdate.builder.Name() {
+			var err error
+			balToUpdate, err = gsb.switchTo(gsbCfg.childBuilder)
+			if err != nil {
+				return fmt.Errorf("could not switch to new child balancer: %w", err)
+			}
+		}
+		// Unwrap the child balancer's config.
+		state.BalancerConfig = gsbCfg.childConfig
+	}
+
+	if balToUpdate == nil {
+		return errBalancerClosed
+	}
+
+	// Perform this call without gsb.mu to prevent deadlocks if the child calls
+	// back into the channel. The latest balancer can never be closed during a
+	// call from the channel, even without gsb.mu held.
+	return balToUpdate.UpdateClientConnState(state)
+}
+
+// ResolverError forwards the error to the latest balancer created.
+func (gsb *Balancer) ResolverError(err error) {
+	// The resolver data is only relevant to the most recent LB Policy.
+	balToUpdate := gsb.latestBalancer()
+	if balToUpdate == nil {
+		gsb.cc.UpdateState(balancer.State{
+			ConnectivityState: connectivity.TransientFailure,
+			Picker:            base.NewErrPicker(err),
+		})
+		return
+	}
+	// Perform this call without gsb.mu to prevent deadlocks if the child calls
+	// back into the channel. The latest balancer can never be closed during a
+	// call from the channel, even without gsb.mu held.
+	balToUpdate.ResolverError(err)
+}
+
+// ExitIdle forwards the call to the latest balancer created.
+//
+// If the latest balancer does not support ExitIdle, the subConns are
+// re-connected to manually.
+func (gsb *Balancer) ExitIdle() {
+	balToUpdate := gsb.latestBalancer()
+	if balToUpdate == nil {
+		return
+	}
+	// There is no need to protect this read with a mutex, as the write to the
+	// Balancer field happens in SwitchTo, which completes before this can be
+	// called.
+	if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok {
+		ei.ExitIdle()
+		return
+	}
+	gsb.mu.Lock()
+	defer gsb.mu.Unlock()
+	for sc := range balToUpdate.subconns {
+		sc.Connect()
+	}
+}
+
+// updateSubConnState forwards the update to the appropriate child.
+func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) {
+	gsb.currentMu.Lock()
+	defer gsb.currentMu.Unlock()
+	gsb.mu.Lock()
+	// Forward update to the appropriate child.  Even if there is a pending
+	// balancer, the current balancer should continue to get SubConn updates to
+	// maintain the proper state while the pending is still connecting.
+	var balToUpdate *balancerWrapper
+	if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] {
+		balToUpdate = gsb.balancerCurrent
+	} else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] {
+		balToUpdate = gsb.balancerPending
+	}
+	if balToUpdate == nil {
+		// SubConn belonged to a stale lb policy that has not yet fully closed,
+		// or the balancer was already closed.
+		gsb.mu.Unlock()
+		return
+	}
+	if state.ConnectivityState == connectivity.Shutdown {
+		delete(balToUpdate.subconns, sc)
+	}
+	gsb.mu.Unlock()
+	if cb != nil {
+		cb(state)
+	} else {
+		balToUpdate.UpdateSubConnState(sc, state)
+	}
+}
+
+// UpdateSubConnState forwards the update to the appropriate child.
+func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
+	gsb.updateSubConnState(sc, state, nil)
+}
+
+// Close closes any active child balancers.
+func (gsb *Balancer) Close() {
+	gsb.mu.Lock()
+	gsb.closed = true
+	currentBalancerToClose := gsb.balancerCurrent
+	gsb.balancerCurrent = nil
+	pendingBalancerToClose := gsb.balancerPending
+	gsb.balancerPending = nil
+	gsb.mu.Unlock()
+
+	currentBalancerToClose.Close()
+	pendingBalancerToClose.Close()
+}
+
+// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer
+// methods to help cleanup SubConns created by the wrapped balancer.
+//
+// It implements the balancer.ClientConn interface and is passed down in that
+// capacity to the wrapped balancer. It maintains a set of subConns created by
+// the wrapped balancer and calls from the latter to create/update/shutdown
+// SubConns update this set before being forwarded to the parent ClientConn.
+// State updates from the wrapped balancer can result in invocation of the
+// graceful switch logic.
+type balancerWrapper struct {
+	balancer.Balancer
+	gsb     *Balancer
+	builder balancer.Builder
+
+	lastState balancer.State
+	subconns  map[balancer.SubConn]bool // subconns created by this balancer
+}
+
+// Close closes the underlying LB policy and shuts down the subconns it
+// created. bw must not be referenced via balancerCurrent or balancerPending in
+// gsb when called. gsb.mu must not be held.  Does not panic with a nil
+// receiver.
+func (bw *balancerWrapper) Close() {
+	// before Close is called.
+	if bw == nil {
+		return
+	}
+	// There is no need to protect this read with a mutex, as Close() is
+	// impossible to be called concurrently with the write in SwitchTo(). The
+	// callsites of Close() for this balancer in Graceful Switch Balancer will
+	// never be called until SwitchTo() returns.
+	bw.Balancer.Close()
+	bw.gsb.mu.Lock()
+	for sc := range bw.subconns {
+		sc.Shutdown()
+	}
+	bw.gsb.mu.Unlock()
+}
+
+func (bw *balancerWrapper) UpdateState(state balancer.State) {
+	// Hold the mutex for this entire call to ensure it cannot occur
+	// concurrently with other updateState() calls. This causes updates to
+	// lastState and calls to cc.UpdateState to happen atomically.
+	bw.gsb.mu.Lock()
+	defer bw.gsb.mu.Unlock()
+	bw.lastState = state
+
+	if !bw.gsb.balancerCurrentOrPending(bw) {
+		return
+	}
+
+	if bw == bw.gsb.balancerCurrent {
+		// In the case that the current balancer exits READY, and there is a pending
+		// balancer, you can forward the pending balancer's cached State up to
+		// ClientConn and swap the pending into the current. This is because there
+		// is no reason to gracefully switch from and keep using the old policy as
+		// the ClientConn is not connected to any backends.
+		if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil {
+			bw.gsb.swap()
+			return
+		}
+		// Even if there is a pending balancer waiting to be gracefully switched to,
+		// continue to forward current balancer updates to the Client Conn. Ignoring
+		// state + picker from the current would cause undefined behavior/cause the
+		// system to behave incorrectly from the current LB policies perspective.
+		// Also, the current LB is still being used by grpc to choose SubConns per
+		// RPC, and thus should use the most updated form of the current balancer.
+		bw.gsb.cc.UpdateState(state)
+		return
+	}
+	// This method is now dealing with a state update from the pending balancer.
+	// If the current balancer is currently in a state other than READY, the new
+	// policy can be swapped into place immediately. This is because there is no
+	// reason to gracefully switch from and keep using the old policy as the
+	// ClientConn is not connected to any backends.
+	if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready {
+		bw.gsb.swap()
+	}
+}
+
+func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
+	bw.gsb.mu.Lock()
+	if !bw.gsb.balancerCurrentOrPending(bw) {
+		bw.gsb.mu.Unlock()
+		return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
+	}
+	bw.gsb.mu.Unlock()
+
+	var sc balancer.SubConn
+	oldListener := opts.StateListener
+	opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) }
+	sc, err := bw.gsb.cc.NewSubConn(addrs, opts)
+	if err != nil {
+		return nil, err
+	}
+	bw.gsb.mu.Lock()
+	if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call
+		sc.Shutdown()
+		bw.gsb.mu.Unlock()
+		return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
+	}
+	bw.subconns[sc] = true
+	bw.gsb.mu.Unlock()
+	return sc, nil
+}
+
+func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) {
+	// Ignore ResolveNow requests from anything other than the most recent
+	// balancer, because older balancers were already removed from the config.
+	if bw != bw.gsb.latestBalancer() {
+		return
+	}
+	bw.gsb.cc.ResolveNow(opts)
+}
+
+func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) {
+	// Note: existing third party balancers may call this, so it must remain
+	// until RemoveSubConn is fully removed.
+	sc.Shutdown()
+}
+
+func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
+	bw.gsb.mu.Lock()
+	if !bw.gsb.balancerCurrentOrPending(bw) {
+		bw.gsb.mu.Unlock()
+		return
+	}
+	bw.gsb.mu.Unlock()
+	bw.gsb.cc.UpdateAddresses(sc, addrs)
+}
+
+func (bw *balancerWrapper) Target() string {
+	return bw.gsb.cc.Target()
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/balancerload/load.go b/hack/tools/vendor/google.golang.org/grpc/internal/balancerload/load.go
new file mode 100644
index 0000000000..94a08d6875
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/balancerload/load.go
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package balancerload defines APIs to parse server loads in trailers. The
+// parsed loads are sent to balancers in DoneInfo.
+package balancerload
+
+import (
+	"google.golang.org/grpc/metadata"
+)
+
+// Parser converts loads from metadata into a concrete type.
+type Parser interface {
+	// Parse parses loads from metadata.
+	Parse(md metadata.MD) any
+}
+
+var parser Parser
+
+// SetParser sets the load parser.
+//
+// Not mutex-protected, should be called before any gRPC functions.
+func SetParser(lr Parser) {
+	parser = lr
+}
+
+// Parse calls parser.Read().
+func Parse(md metadata.MD) any {
+	if parser == nil {
+		return nil
+	}
+	return parser.Parse(md)
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/hack/tools/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
new file mode 100644
index 0000000000..755fdebc1b
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
@@ -0,0 +1,192 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package binarylog implementation binary logging as defined in
+// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md.
+package binarylog
+
+import (
+	"fmt"
+	"os"
+
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal/grpcutil"
+)
+
+var grpclogLogger = grpclog.Component("binarylog")
+
+// Logger specifies MethodLoggers for method names with a Log call that
+// takes a context.
+//
+// This is used in the 1.0 release of gcp/observability, and thus must not be
+// deleted or changed.
+type Logger interface {
+	GetMethodLogger(methodName string) MethodLogger
+}
+
+// binLogger is the global binary logger for the binary. One of this should be
+// built at init time from the configuration (environment variable or flags).
+//
+// It is used to get a MethodLogger for each individual method.
+var binLogger Logger
+
+// SetLogger sets the binary logger.
+//
+// Only call this at init time.
+func SetLogger(l Logger) {
+	binLogger = l
+}
+
+// GetLogger gets the binary logger.
+//
+// Only call this at init time.
+func GetLogger() Logger {
+	return binLogger
+}
+
+// GetMethodLogger returns the MethodLogger for the given methodName.
+//
+// methodName should be in the format of "/service/method".
+//
+// Each MethodLogger returned by this method is a new instance. This is to
+// generate sequence id within the call.
+func GetMethodLogger(methodName string) MethodLogger {
+	if binLogger == nil {
+		return nil
+	}
+	return binLogger.GetMethodLogger(methodName)
+}
+
+func init() {
+	const envStr = "GRPC_BINARY_LOG_FILTER"
+	configStr := os.Getenv(envStr)
+	binLogger = NewLoggerFromConfigString(configStr)
+}
+
+// MethodLoggerConfig contains the setting for logging behavior of a method
+// logger. Currently, it contains the max length of header and message.
+type MethodLoggerConfig struct {
+	// Max length of header and message.
+	Header, Message uint64
+}
+
+// LoggerConfig contains the config for loggers to create method loggers.
+type LoggerConfig struct {
+	All      *MethodLoggerConfig
+	Services map[string]*MethodLoggerConfig
+	Methods  map[string]*MethodLoggerConfig
+
+	Blacklist map[string]struct{}
+}
+
+type logger struct {
+	config LoggerConfig
+}
+
+// NewLoggerFromConfig builds a logger with the given LoggerConfig.
+func NewLoggerFromConfig(config LoggerConfig) Logger {
+	return &logger{config: config}
+}
+
+// newEmptyLogger creates an empty logger. The map fields need to be filled in
+// using the set* functions.
+func newEmptyLogger() *logger {
+	return &logger{}
+}
+
+// Set method logger for "*".
+func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error {
+	if l.config.All != nil {
+		return fmt.Errorf("conflicting global rules found")
+	}
+	l.config.All = ml
+	return nil
+}
+
+// Set method logger for "service/*".
+//
+// New MethodLogger with same service overrides the old one.
+func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error {
+	if _, ok := l.config.Services[service]; ok {
+		return fmt.Errorf("conflicting service rules for service %v found", service)
+	}
+	if l.config.Services == nil {
+		l.config.Services = make(map[string]*MethodLoggerConfig)
+	}
+	l.config.Services[service] = ml
+	return nil
+}
+
+// Set method logger for "service/method".
+//
+// New MethodLogger with same method overrides the old one.
+func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error {
+	if _, ok := l.config.Blacklist[method]; ok {
+		return fmt.Errorf("conflicting blacklist rules for method %v found", method)
+	}
+	if _, ok := l.config.Methods[method]; ok {
+		return fmt.Errorf("conflicting method rules for method %v found", method)
+	}
+	if l.config.Methods == nil {
+		l.config.Methods = make(map[string]*MethodLoggerConfig)
+	}
+	l.config.Methods[method] = ml
+	return nil
+}
+
+// Set blacklist method for "-service/method".
+func (l *logger) setBlacklist(method string) error {
+	if _, ok := l.config.Blacklist[method]; ok {
+		return fmt.Errorf("conflicting blacklist rules for method %v found", method)
+	}
+	if _, ok := l.config.Methods[method]; ok {
+		return fmt.Errorf("conflicting method rules for method %v found", method)
+	}
+	if l.config.Blacklist == nil {
+		l.config.Blacklist = make(map[string]struct{})
+	}
+	l.config.Blacklist[method] = struct{}{}
+	return nil
+}
+
+// getMethodLogger returns the MethodLogger for the given methodName.
+//
+// methodName should be in the format of "/service/method".
+//
+// Each MethodLogger returned by this method is a new instance. This is to
+// generate sequence id within the call.
+func (l *logger) GetMethodLogger(methodName string) MethodLogger {
+	s, m, err := grpcutil.ParseMethod(methodName)
+	if err != nil {
+		grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err)
+		return nil
+	}
+	if ml, ok := l.config.Methods[s+"/"+m]; ok {
+		return NewTruncatingMethodLogger(ml.Header, ml.Message)
+	}
+	if _, ok := l.config.Blacklist[s+"/"+m]; ok {
+		return nil
+	}
+	if ml, ok := l.config.Services[s]; ok {
+		return NewTruncatingMethodLogger(ml.Header, ml.Message)
+	}
+	if l.config.All == nil {
+		return nil
+	}
+	return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message)
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/hack/tools/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go
new file mode 100644
index 0000000000..1ee00a39ac
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// This file contains exported variables/functions that are exported for testing
+// only.
+//
+// An ideal way for this would be to put those in a *_test.go but in binarylog
+// package. But this doesn't work with staticcheck with go module. Error was:
+// "MdToMetadataProto not declared by package binarylog". This could be caused
+// by the way staticcheck looks for files for a certain package, which doesn't
+// support *_test.go files.
+//
+// Move those to binary_test.go when staticcheck is fixed.
+
+package binarylog
+
+var (
+	// AllLogger is a logger that logs all headers/messages for all RPCs. It's
+	// for testing only.
+	AllLogger = NewLoggerFromConfigString("*")
+	// MdToMetadataProto converts metadata to a binary logging proto message.
+	// It's for testing only.
+	MdToMetadataProto = mdToMetadataProto
+	// AddrToProto converts an address to a binary logging proto message. It's
+	// for testing only.
+	AddrToProto = addrToProto
+)
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/hack/tools/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
new file mode 100644
index 0000000000..f9e80e27ab
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
@@ -0,0 +1,208 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package binarylog
+
+import (
+	"errors"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+// NewLoggerFromConfigString reads the string and build a logger. It can be used
+// to build a new logger and assign it to binarylog.Logger.
+//
+// Example filter config strings:
+//   - "" Nothing will be logged
+//   - "*" All headers and messages will be fully logged.
+//   - "*{h}" Only headers will be logged.
+//   - "*{m:256}" Only the first 256 bytes of each message will be logged.
+//   - "Foo/*" Logs every method in service Foo
+//   - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
+//   - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
+//     /Foo/Bar, logs all headers and messages in every other method in service
+//     Foo.
+//
+// If two configs exist for one certain method or service, the one specified
+// later overrides the previous config.
+func NewLoggerFromConfigString(s string) Logger {
+	if s == "" {
+		return nil
+	}
+	l := newEmptyLogger()
+	methods := strings.Split(s, ",")
+	for _, method := range methods {
+		if err := l.fillMethodLoggerWithConfigString(method); err != nil {
+			grpclogLogger.Warningf("failed to parse binary log config: %v", err)
+			return nil
+		}
+	}
+	return l
+}
+
+// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds
+// it to the right map in the logger.
+func (l *logger) fillMethodLoggerWithConfigString(config string) error {
+	// "" is invalid.
+	if config == "" {
+		return errors.New("empty string is not a valid method binary logging config")
+	}
+
+	// "-service/method", blacklist, no * or {} allowed.
+	if config[0] == '-' {
+		s, m, suffix, err := parseMethodConfigAndSuffix(config[1:])
+		if err != nil {
+			return fmt.Errorf("invalid config: %q, %v", config, err)
+		}
+		if m == "*" {
+			return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config")
+		}
+		if suffix != "" {
+			return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config")
+		}
+		if err := l.setBlacklist(s + "/" + m); err != nil {
+			return fmt.Errorf("invalid config: %v", err)
+		}
+		return nil
+	}
+
+	// "*{h:256;m:256}"
+	if config[0] == '*' {
+		hdr, msg, err := parseHeaderMessageLengthConfig(config[1:])
+		if err != nil {
+			return fmt.Errorf("invalid config: %q, %v", config, err)
+		}
+		if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
+			return fmt.Errorf("invalid config: %v", err)
+		}
+		return nil
+	}
+
+	s, m, suffix, err := parseMethodConfigAndSuffix(config)
+	if err != nil {
+		return fmt.Errorf("invalid config: %q, %v", config, err)
+	}
+	hdr, msg, err := parseHeaderMessageLengthConfig(suffix)
+	if err != nil {
+		return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
+	}
+	if m == "*" {
+		if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
+			return fmt.Errorf("invalid config: %v", err)
+		}
+	} else {
+		if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
+			return fmt.Errorf("invalid config: %v", err)
+		}
+	}
+	return nil
+}
+
+const (
+	// TODO: this const is only used by env_config now. But could be useful for
+	// other config. Move to binarylog.go if necessary.
+	maxUInt = ^uint64(0)
+
+	// For "p.s/m" plus any suffix. Suffix will be parsed again. See test for
+	// expected output.
+	longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$`
+
+	// For suffix from above, "{h:123,m:123}". See test for expected output.
+	optionalLengthRegexpStr      = `(?::(\d+))?` // Optional ":123".
+	headerConfigRegexpStr        = `^{h` + optionalLengthRegexpStr + `}$`
+	messageConfigRegexpStr       = `^{m` + optionalLengthRegexpStr + `}$`
+	headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$`
+)
+
+var (
+	longMethodConfigRegexp    = regexp.MustCompile(longMethodConfigRegexpStr)
+	headerConfigRegexp        = regexp.MustCompile(headerConfigRegexpStr)
+	messageConfigRegexp       = regexp.MustCompile(messageConfigRegexpStr)
+	headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr)
+)
+
+// Turn "service/method{h;m}" into "service", "method", "{h;m}".
+func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) {
+	// Regexp result:
+	//
+	// in:  "p.s/m{h:123,m:123}",
+	// out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"},
+	match := longMethodConfigRegexp.FindStringSubmatch(c)
+	if match == nil {
+		return "", "", "", fmt.Errorf("%q contains invalid substring", c)
+	}
+	service = match[1]
+	method = match[2]
+	suffix = match[3]
+	return
+}
+
+// Turn "{h:123;m:345}" into 123, 345.
+//
+// Return maxUInt if length is unspecified.
+func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) {
+	if c == "" {
+		return maxUInt, maxUInt, nil
+	}
+	// Header config only.
+	if match := headerConfigRegexp.FindStringSubmatch(c); match != nil {
+		if s := match[1]; s != "" {
+			hdrLenStr, err = strconv.ParseUint(s, 10, 64)
+			if err != nil {
+				return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
+			}
+			return hdrLenStr, 0, nil
+		}
+		return maxUInt, 0, nil
+	}
+
+	// Message config only.
+	if match := messageConfigRegexp.FindStringSubmatch(c); match != nil {
+		if s := match[1]; s != "" {
+			msgLenStr, err = strconv.ParseUint(s, 10, 64)
+			if err != nil {
+				return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
+			}
+			return 0, msgLenStr, nil
+		}
+		return 0, maxUInt, nil
+	}
+
+	// Header and message config both.
+	if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil {
+		// Both hdr and msg are specified, but one or two of them might be empty.
+		hdrLenStr = maxUInt
+		msgLenStr = maxUInt
+		if s := match[1]; s != "" {
+			hdrLenStr, err = strconv.ParseUint(s, 10, 64)
+			if err != nil {
+				return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
+			}
+		}
+		if s := match[2]; s != "" {
+			msgLenStr, err = strconv.ParseUint(s, 10, 64)
+			if err != nil {
+				return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
+			}
+		}
+		return hdrLenStr, msgLenStr, nil
+	}
+	return 0, 0, fmt.Errorf("%q contains invalid substring", c)
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/hack/tools/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
new file mode 100644
index 0000000000..9669328914
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
@@ -0,0 +1,446 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package binarylog
+
+import (
+	"context"
+	"net"
+	"strings"
+	"sync/atomic"
+	"time"
+
+	binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/types/known/durationpb"
+	"google.golang.org/protobuf/types/known/timestamppb"
+)
+
+type callIDGenerator struct {
+	id uint64
+}
+
+func (g *callIDGenerator) next() uint64 {
+	id := atomic.AddUint64(&g.id, 1)
+	return id
+}
+
+// reset is for testing only, and doesn't need to be thread safe.
+func (g *callIDGenerator) reset() {
+	g.id = 0
+}
+
+var idGen callIDGenerator
+
+// MethodLogger is the sub-logger for each method.
+//
+// This is used in the 1.0 release of gcp/observability, and thus must not be
+// deleted or changed.
+type MethodLogger interface {
+	Log(context.Context, LogEntryConfig)
+}
+
+// TruncatingMethodLogger is a method logger that truncates headers and messages
+// based on configured fields.
+type TruncatingMethodLogger struct {
+	headerMaxLen, messageMaxLen uint64
+
+	callID          uint64
+	idWithinCallGen *callIDGenerator
+
+	sink Sink // TODO(blog): make this pluggable.
+}
+
+// NewTruncatingMethodLogger returns a new truncating method logger.
+//
+// This is used in the 1.0 release of gcp/observability, and thus must not be
+// deleted or changed.
+func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
+	return &TruncatingMethodLogger{
+		headerMaxLen:  h,
+		messageMaxLen: m,
+
+		callID:          idGen.next(),
+		idWithinCallGen: &callIDGenerator{},
+
+		sink: DefaultSink, // TODO(blog): make it pluggable.
+	}
+}
+
+// Build is an internal only method for building the proto message out of the
+// input event. It's made public to enable other library to reuse as much logic
+// in TruncatingMethodLogger as possible.
+func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry {
+	m := c.toProto()
+	timestamp := timestamppb.Now()
+	m.Timestamp = timestamp
+	m.CallId = ml.callID
+	m.SequenceIdWithinCall = ml.idWithinCallGen.next()
+
+	switch pay := m.Payload.(type) {
+	case *binlogpb.GrpcLogEntry_ClientHeader:
+		m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
+	case *binlogpb.GrpcLogEntry_ServerHeader:
+		m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
+	case *binlogpb.GrpcLogEntry_Message:
+		m.PayloadTruncated = ml.truncateMessage(pay.Message)
+	}
+	return m
+}
+
+// Log creates a proto binary log entry, and logs it to the sink.
+func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) {
+	ml.sink.Write(ml.Build(c))
+}
+
+func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) {
+	if ml.headerMaxLen == maxUInt {
+		return false
+	}
+	var (
+		bytesLimit = ml.headerMaxLen
+		index      int
+	)
+	// At the end of the loop, index will be the first entry where the total
+	// size is greater than the limit:
+	//
+	// len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr.
+	for ; index < len(mdPb.Entry); index++ {
+		entry := mdPb.Entry[index]
+		if entry.Key == "grpc-trace-bin" {
+			// "grpc-trace-bin" is a special key. It's kept in the log entry,
+			// but not counted towards the size limit.
+			continue
+		}
+		currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue()))
+		if currentEntryLen > bytesLimit {
+			break
+		}
+		bytesLimit -= currentEntryLen
+	}
+	truncated = index < len(mdPb.Entry)
+	mdPb.Entry = mdPb.Entry[:index]
+	return truncated
+}
+
+func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) {
+	if ml.messageMaxLen == maxUInt {
+		return false
+	}
+	if ml.messageMaxLen >= uint64(len(msgPb.Data)) {
+		return false
+	}
+	msgPb.Data = msgPb.Data[:ml.messageMaxLen]
+	return true
+}
+
+// LogEntryConfig represents the configuration for binary log entry.
+//
+// This is used in the 1.0 release of gcp/observability, and thus must not be
+// deleted or changed.
+type LogEntryConfig interface {
+	toProto() *binlogpb.GrpcLogEntry
+}
+
+// ClientHeader configs the binary log entry to be a ClientHeader entry.
+type ClientHeader struct {
+	OnClientSide bool
+	Header       metadata.MD
+	MethodName   string
+	Authority    string
+	Timeout      time.Duration
+	// PeerAddr is required only when it's on server side.
+	PeerAddr net.Addr
+}
+
+func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry {
+	// This function doesn't need to set all the fields (e.g. seq ID). The Log
+	// function will set the fields when necessary.
+	clientHeader := &binlogpb.ClientHeader{
+		Metadata:   mdToMetadataProto(c.Header),
+		MethodName: c.MethodName,
+		Authority:  c.Authority,
+	}
+	if c.Timeout > 0 {
+		clientHeader.Timeout = durationpb.New(c.Timeout)
+	}
+	ret := &binlogpb.GrpcLogEntry{
+		Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
+		Payload: &binlogpb.GrpcLogEntry_ClientHeader{
+			ClientHeader: clientHeader,
+		},
+	}
+	if c.OnClientSide {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
+	} else {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
+	}
+	if c.PeerAddr != nil {
+		ret.Peer = addrToProto(c.PeerAddr)
+	}
+	return ret
+}
+
+// ServerHeader configs the binary log entry to be a ServerHeader entry.
+type ServerHeader struct {
+	OnClientSide bool
+	Header       metadata.MD
+	// PeerAddr is required only when it's on client side.
+	PeerAddr net.Addr
+}
+
+func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry {
+	ret := &binlogpb.GrpcLogEntry{
+		Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
+		Payload: &binlogpb.GrpcLogEntry_ServerHeader{
+			ServerHeader: &binlogpb.ServerHeader{
+				Metadata: mdToMetadataProto(c.Header),
+			},
+		},
+	}
+	if c.OnClientSide {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
+	} else {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
+	}
+	if c.PeerAddr != nil {
+		ret.Peer = addrToProto(c.PeerAddr)
+	}
+	return ret
+}
+
+// ClientMessage configs the binary log entry to be a ClientMessage entry.
+type ClientMessage struct {
+	OnClientSide bool
+	// Message can be a proto.Message or []byte. Other messages formats are not
+	// supported.
+	Message any
+}
+
+func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry {
+	var (
+		data []byte
+		err  error
+	)
+	if m, ok := c.Message.(proto.Message); ok {
+		data, err = proto.Marshal(m)
+		if err != nil {
+			grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
+		}
+	} else if b, ok := c.Message.([]byte); ok {
+		data = b
+	} else {
+		grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
+	}
+	ret := &binlogpb.GrpcLogEntry{
+		Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
+		Payload: &binlogpb.GrpcLogEntry_Message{
+			Message: &binlogpb.Message{
+				Length: uint32(len(data)),
+				Data:   data,
+			},
+		},
+	}
+	if c.OnClientSide {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
+	} else {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
+	}
+	return ret
+}
+
+// ServerMessage configs the binary log entry to be a ServerMessage entry.
+type ServerMessage struct {
+	OnClientSide bool
+	// Message can be a proto.Message or []byte. Other messages formats are not
+	// supported.
+	Message any
+}
+
+func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry {
+	var (
+		data []byte
+		err  error
+	)
+	if m, ok := c.Message.(proto.Message); ok {
+		data, err = proto.Marshal(m)
+		if err != nil {
+			grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
+		}
+	} else if b, ok := c.Message.([]byte); ok {
+		data = b
+	} else {
+		grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
+	}
+	ret := &binlogpb.GrpcLogEntry{
+		Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
+		Payload: &binlogpb.GrpcLogEntry_Message{
+			Message: &binlogpb.Message{
+				Length: uint32(len(data)),
+				Data:   data,
+			},
+		},
+	}
+	if c.OnClientSide {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
+	} else {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
+	}
+	return ret
+}
+
+// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry.
+type ClientHalfClose struct {
+	OnClientSide bool
+}
+
+func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry {
+	ret := &binlogpb.GrpcLogEntry{
+		Type:    binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
+		Payload: nil, // No payload here.
+	}
+	if c.OnClientSide {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
+	} else {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
+	}
+	return ret
+}
+
+// ServerTrailer configs the binary log entry to be a ServerTrailer entry.
+type ServerTrailer struct {
+	OnClientSide bool
+	Trailer      metadata.MD
+	// Err is the status error.
+	Err error
+	// PeerAddr is required only when it's on client side and the RPC is trailer
+	// only.
+	PeerAddr net.Addr
+}
+
+func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry {
+	st, ok := status.FromError(c.Err)
+	if !ok {
+		grpclogLogger.Info("binarylogging: error in trailer is not a status error")
+	}
+	var (
+		detailsBytes []byte
+		err          error
+	)
+	stProto := st.Proto()
+	if stProto != nil && len(stProto.Details) != 0 {
+		detailsBytes, err = proto.Marshal(stProto)
+		if err != nil {
+			grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
+		}
+	}
+	ret := &binlogpb.GrpcLogEntry{
+		Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
+		Payload: &binlogpb.GrpcLogEntry_Trailer{
+			Trailer: &binlogpb.Trailer{
+				Metadata:      mdToMetadataProto(c.Trailer),
+				StatusCode:    uint32(st.Code()),
+				StatusMessage: st.Message(),
+				StatusDetails: detailsBytes,
+			},
+		},
+	}
+	if c.OnClientSide {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
+	} else {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
+	}
+	if c.PeerAddr != nil {
+		ret.Peer = addrToProto(c.PeerAddr)
+	}
+	return ret
+}
+
+// Cancel configs the binary log entry to be a Cancel entry.
+type Cancel struct {
+	OnClientSide bool
+}
+
+func (c *Cancel) toProto() *binlogpb.GrpcLogEntry {
+	ret := &binlogpb.GrpcLogEntry{
+		Type:    binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL,
+		Payload: nil,
+	}
+	if c.OnClientSide {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
+	} else {
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
+	}
+	return ret
+}
+
+// metadataKeyOmit returns whether the metadata entry with this key should be
+// omitted.
+func metadataKeyOmit(key string) bool {
+	switch key {
+	case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te":
+		return true
+	case "grpc-trace-bin": // grpc-trace-bin is special because it's visible to users.
+		return false
+	}
+	return strings.HasPrefix(key, "grpc-")
+}
+
+func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata {
+	ret := &binlogpb.Metadata{}
+	for k, vv := range md {
+		if metadataKeyOmit(k) {
+			continue
+		}
+		for _, v := range vv {
+			ret.Entry = append(ret.Entry,
+				&binlogpb.MetadataEntry{
+					Key:   k,
+					Value: []byte(v),
+				},
+			)
+		}
+	}
+	return ret
+}
+
+func addrToProto(addr net.Addr) *binlogpb.Address {
+	ret := &binlogpb.Address{}
+	switch a := addr.(type) {
+	case *net.TCPAddr:
+		if a.IP.To4() != nil {
+			ret.Type = binlogpb.Address_TYPE_IPV4
+		} else if a.IP.To16() != nil {
+			ret.Type = binlogpb.Address_TYPE_IPV6
+		} else {
+			ret.Type = binlogpb.Address_TYPE_UNKNOWN
+			// Do not set address and port fields.
+			break
+		}
+		ret.Address = a.IP.String()
+		ret.IpPort = uint32(a.Port)
+	case *net.UnixAddr:
+		ret.Type = binlogpb.Address_TYPE_UNIX
+		ret.Address = a.String()
+	default:
+		ret.Type = binlogpb.Address_TYPE_UNKNOWN
+	}
+	return ret
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/hack/tools/vendor/google.golang.org/grpc/internal/binarylog/sink.go
new file mode 100644
index 0000000000..9ea598b14c
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/binarylog/sink.go
@@ -0,0 +1,170 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package binarylog
+
+import (
+	"bufio"
+	"encoding/binary"
+	"io"
+	"sync"
+	"time"
+
+	binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
+	"google.golang.org/protobuf/proto"
+)
+
+var (
+	// DefaultSink is the sink where the logs will be written to. It's exported
+	// for the binarylog package to update.
+	DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
+)
+
+// Sink writes log entry into the binary log sink.
+//
+// sink is a copy of the exported binarylog.Sink, to avoid circular dependency.
+type Sink interface {
+	// Write will be called to write the log entry into the sink.
+	//
+	// It should be thread-safe so it can be called in parallel.
+	Write(*binlogpb.GrpcLogEntry) error
+	// Close will be called when the Sink is replaced by a new Sink.
+	Close() error
+}
+
+type noopSink struct{}
+
+func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil }
+func (ns *noopSink) Close() error                       { return nil }
+
+// newWriterSink creates a binary log sink with the given writer.
+//
+// Write() marshals the proto message and writes it to the given writer. Each
+// message is prefixed with a 4 byte big endian unsigned integer as the length.
+//
+// No buffer is done, Close() doesn't try to close the writer.
+func newWriterSink(w io.Writer) Sink {
+	return &writerSink{out: w}
+}
+
+type writerSink struct {
+	out io.Writer
+}
+
+func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error {
+	b, err := proto.Marshal(e)
+	if err != nil {
+		grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err)
+		return err
+	}
+	hdr := make([]byte, 4)
+	binary.BigEndian.PutUint32(hdr, uint32(len(b)))
+	if _, err := ws.out.Write(hdr); err != nil {
+		return err
+	}
+	if _, err := ws.out.Write(b); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (ws *writerSink) Close() error { return nil }
+
+type bufferedSink struct {
+	mu             sync.Mutex
+	closer         io.Closer
+	out            Sink          // out is built on buf.
+	buf            *bufio.Writer // buf is kept for flush.
+	flusherStarted bool
+
+	writeTicker *time.Ticker
+	done        chan struct{}
+}
+
+func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error {
+	fs.mu.Lock()
+	defer fs.mu.Unlock()
+	if !fs.flusherStarted {
+		// Start the write loop when Write is called.
+		fs.startFlushGoroutine()
+		fs.flusherStarted = true
+	}
+	if err := fs.out.Write(e); err != nil {
+		return err
+	}
+	return nil
+}
+
+const (
+	bufFlushDuration = 60 * time.Second
+)
+
+func (fs *bufferedSink) startFlushGoroutine() {
+	fs.writeTicker = time.NewTicker(bufFlushDuration)
+	go func() {
+		for {
+			select {
+			case <-fs.done:
+				return
+			case <-fs.writeTicker.C:
+			}
+			fs.mu.Lock()
+			if err := fs.buf.Flush(); err != nil {
+				grpclogLogger.Warningf("failed to flush to Sink: %v", err)
+			}
+			fs.mu.Unlock()
+		}
+	}()
+}
+
+func (fs *bufferedSink) Close() error {
+	fs.mu.Lock()
+	defer fs.mu.Unlock()
+	if fs.writeTicker != nil {
+		fs.writeTicker.Stop()
+	}
+	close(fs.done)
+	if err := fs.buf.Flush(); err != nil {
+		grpclogLogger.Warningf("failed to flush to Sink: %v", err)
+	}
+	if err := fs.closer.Close(); err != nil {
+		grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err)
+	}
+	if err := fs.out.Close(); err != nil {
+		grpclogLogger.Warningf("failed to close the Sink: %v", err)
+	}
+	return nil
+}
+
+// NewBufferedSink creates a binary log sink with the given WriteCloser.
+//
+// Write() marshals the proto message and writes it to the given writer. Each
+// message is prefixed with a 4 byte big endian unsigned integer as the length.
+//
+// Content is kept in a buffer, and is flushed every 60 seconds.
+//
+// Close closes the WriteCloser.
+func NewBufferedSink(o io.WriteCloser) Sink {
+	bufW := bufio.NewWriter(o)
+	return &bufferedSink{
+		closer: o,
+		out:    newWriterSink(bufW),
+		buf:    bufW,
+		done:   make(chan struct{}),
+	}
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/hack/tools/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
new file mode 100644
index 0000000000..11f91668ac
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package buffer provides an implementation of an unbounded buffer.
+package buffer
+
+import (
+	"errors"
+	"sync"
+)
+
+// Unbounded is an implementation of an unbounded buffer which does not use
+// extra goroutines. This is typically used for passing updates from one entity
+// to another within gRPC.
+//
+// All methods on this type are thread-safe and don't block on anything except
+// the underlying mutex used for synchronization.
+//
+// Unbounded supports values of any type to be stored in it by using a channel
+// of `any`. This means that a call to Put() incurs an extra memory allocation,
+// and also that users need a type assertion while reading. For performance
+// critical code paths, using Unbounded is strongly discouraged and defining a
+// new type specific implementation of this buffer is preferred. See
+// internal/transport/transport.go for an example of this.
+type Unbounded struct {
+	c       chan any
+	closed  bool
+	closing bool
+	mu      sync.Mutex
+	backlog []any
+}
+
+// NewUnbounded returns a new instance of Unbounded.
+func NewUnbounded() *Unbounded {
+	return &Unbounded{c: make(chan any, 1)}
+}
+
+var errBufferClosed = errors.New("Put called on closed buffer.Unbounded")
+
+// Put adds t to the unbounded buffer.
+func (b *Unbounded) Put(t any) error {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	if b.closing {
+		return errBufferClosed
+	}
+	if len(b.backlog) == 0 {
+		select {
+		case b.c <- t:
+			return nil
+		default:
+		}
+	}
+	b.backlog = append(b.backlog, t)
+	return nil
+}
+
+// Load sends the earliest buffered data, if any, onto the read channel returned
+// by Get(). Users are expected to call this every time they successfully read a
+// value from the read channel.
+func (b *Unbounded) Load() {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	if len(b.backlog) > 0 {
+		select {
+		case b.c <- b.backlog[0]:
+			b.backlog[0] = nil
+			b.backlog = b.backlog[1:]
+		default:
+		}
+	} else if b.closing && !b.closed {
+		close(b.c)
+	}
+}
+
+// Get returns a read channel on which values added to the buffer, via Put(),
+// are sent on.
+//
+// Upon reading a value from this channel, users are expected to call Load() to
+// send the next buffered value onto the channel if there is any.
+//
+// If the unbounded buffer is closed, the read channel returned by this method
+// is closed after all data is drained.
+func (b *Unbounded) Get() <-chan any {
+	return b.c
+}
+
+// Close closes the unbounded buffer. No subsequent data may be Put(), and the
+// channel returned from Get() will be closed after all the data is read and
+// Load() is called for the final time.
+func (b *Unbounded) Close() {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	if b.closing {
+		return
+	}
+	b.closing = true
+	if len(b.backlog) == 0 {
+		b.closed = true
+		close(b.c)
+	}
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/channelz/channel.go b/hack/tools/vendor/google.golang.org/grpc/internal/channelz/channel.go
new file mode 100644
index 0000000000..d7e9e1d54e
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/channelz/channel.go
@@ -0,0 +1,255 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"fmt"
+	"sync/atomic"
+
+	"google.golang.org/grpc/connectivity"
+)
+
+// Channel represents a channel within channelz, which includes metrics and
+// internal channelz data, such as channelz id, child list, etc.
+type Channel struct {
+	Entity
+	// ID is the channelz id of this channel.
+	ID int64
+	// RefName is the human readable reference string of this channel.
+	RefName string
+
+	closeCalled bool
+	nestedChans map[int64]string
+	subChans    map[int64]string
+	Parent      *Channel
+	trace       *ChannelTrace
+	// traceRefCount is the number of trace events that reference this channel.
+	// Non-zero traceRefCount means the trace of this channel cannot be deleted.
+	traceRefCount int32
+
+	ChannelMetrics ChannelMetrics
+}
+
+// Implemented to make Channel implement the Identifier interface used for
+// nesting.
+func (c *Channel) channelzIdentifier() {}
+
+func (c *Channel) String() string {
+	if c.Parent == nil {
+		return fmt.Sprintf("Channel #%d", c.ID)
+	}
+	return fmt.Sprintf("%s Channel #%d", c.Parent, c.ID)
+}
+
+func (c *Channel) id() int64 {
+	return c.ID
+}
+
+func (c *Channel) SubChans() map[int64]string {
+	db.mu.RLock()
+	defer db.mu.RUnlock()
+	return copyMap(c.subChans)
+}
+
+func (c *Channel) NestedChans() map[int64]string {
+	db.mu.RLock()
+	defer db.mu.RUnlock()
+	return copyMap(c.nestedChans)
+}
+
+func (c *Channel) Trace() *ChannelTrace {
+	db.mu.RLock()
+	defer db.mu.RUnlock()
+	return c.trace.copy()
+}
+
+type ChannelMetrics struct {
+	// The current connectivity state of the channel.
+	State atomic.Pointer[connectivity.State]
+	// The target this channel originally tried to connect to.  May be absent
+	Target atomic.Pointer[string]
+	// The number of calls started on the channel.
+	CallsStarted atomic.Int64
+	// The number of calls that have completed with an OK status.
+	CallsSucceeded atomic.Int64
+	// The number of calls that have a completed with a non-OK status.
+	CallsFailed atomic.Int64
+	// The last time a call was started on the channel.
+	LastCallStartedTimestamp atomic.Int64
+}
+
+// CopyFrom copies the metrics in o to c.  For testing only.
+func (c *ChannelMetrics) CopyFrom(o *ChannelMetrics) {
+	c.State.Store(o.State.Load())
+	c.Target.Store(o.Target.Load())
+	c.CallsStarted.Store(o.CallsStarted.Load())
+	c.CallsSucceeded.Store(o.CallsSucceeded.Load())
+	c.CallsFailed.Store(o.CallsFailed.Load())
+	c.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load())
+}
+
+// Equal returns true iff the metrics of c are the same as the metrics of o.
+// For testing only.
+func (c *ChannelMetrics) Equal(o any) bool {
+	oc, ok := o.(*ChannelMetrics)
+	if !ok {
+		return false
+	}
+	if (c.State.Load() == nil) != (oc.State.Load() == nil) {
+		return false
+	}
+	if c.State.Load() != nil && *c.State.Load() != *oc.State.Load() {
+		return false
+	}
+	if (c.Target.Load() == nil) != (oc.Target.Load() == nil) {
+		return false
+	}
+	if c.Target.Load() != nil && *c.Target.Load() != *oc.Target.Load() {
+		return false
+	}
+	return c.CallsStarted.Load() == oc.CallsStarted.Load() &&
+		c.CallsFailed.Load() == oc.CallsFailed.Load() &&
+		c.CallsSucceeded.Load() == oc.CallsSucceeded.Load() &&
+		c.LastCallStartedTimestamp.Load() == oc.LastCallStartedTimestamp.Load()
+}
+
+func strFromPointer(s *string) string {
+	if s == nil {
+		return ""
+	}
+	return *s
+}
+
+func (c *ChannelMetrics) String() string {
+	return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v",
+		c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(),
+	)
+}
+
+func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics {
+	c := &ChannelMetrics{}
+	c.State.Store(&state)
+	c.Target.Store(&target)
+	c.CallsStarted.Store(started)
+	c.CallsSucceeded.Store(succeeded)
+	c.CallsFailed.Store(failed)
+	c.LastCallStartedTimestamp.Store(timestamp)
+	return c
+}
+
+func (c *Channel) addChild(id int64, e entry) {
+	switch v := e.(type) {
+	case *SubChannel:
+		c.subChans[id] = v.RefName
+	case *Channel:
+		c.nestedChans[id] = v.RefName
+	default:
+		logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
+	}
+}
+
+func (c *Channel) deleteChild(id int64) {
+	delete(c.subChans, id)
+	delete(c.nestedChans, id)
+	c.deleteSelfIfReady()
+}
+
+func (c *Channel) triggerDelete() {
+	c.closeCalled = true
+	c.deleteSelfIfReady()
+}
+
+func (c *Channel) getParentID() int64 {
+	if c.Parent == nil {
+		return -1
+	}
+	return c.Parent.ID
+}
+
+// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
+// deleting the channel reference from its parent's child list.
+//
+// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
+// corresponding grpc object has been invoked, and the channel does not have any children left.
+//
+// The returned boolean value indicates whether the channel has been successfully deleted from tree.
+func (c *Channel) deleteSelfFromTree() (deleted bool) {
+	if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
+		return false
+	}
+	// not top channel
+	if c.Parent != nil {
+		c.Parent.deleteChild(c.ID)
+	}
+	return true
+}
+
+// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
+// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
+// channel, and its memory will be garbage collected.
+//
+// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
+// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
+// the trace of the referenced entity must not be deleted. In order to release the resource allocated
+// by grpc, the reference to the grpc object is reset to a dummy object.
+//
+// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
+//
+// It returns a bool to indicate whether the channel can be safely deleted from map.
+func (c *Channel) deleteSelfFromMap() (delete bool) {
+	return c.getTraceRefCount() == 0
+}
+
+// deleteSelfIfReady tries to delete the channel itself from the channelz database.
+// The delete process includes two steps:
+//  1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
+//     parent's child list.
+//  2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
+//     will return entry not found error.
+func (c *Channel) deleteSelfIfReady() {
+	if !c.deleteSelfFromTree() {
+		return
+	}
+	if !c.deleteSelfFromMap() {
+		return
+	}
+	db.deleteEntry(c.ID)
+	c.trace.clear()
+}
+
+func (c *Channel) getChannelTrace() *ChannelTrace {
+	return c.trace
+}
+
+func (c *Channel) incrTraceRefCount() {
+	atomic.AddInt32(&c.traceRefCount, 1)
+}
+
+func (c *Channel) decrTraceRefCount() {
+	atomic.AddInt32(&c.traceRefCount, -1)
+}
+
+func (c *Channel) getTraceRefCount() int {
+	i := atomic.LoadInt32(&c.traceRefCount)
+	return int(i)
+}
+
+func (c *Channel) getRefName() string {
+	return c.RefName
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/hack/tools/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
new file mode 100644
index 0000000000..64c791953d
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
@@ -0,0 +1,395 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"fmt"
+	"sort"
+	"sync"
+	"time"
+)
+
+// entry represents a node in the channelz database.
+type entry interface {
+	// addChild adds a child e, whose channelz id is id to child list
+	addChild(id int64, e entry)
+	// deleteChild deletes a child with channelz id to be id from child list
+	deleteChild(id int64)
+	// triggerDelete tries to delete self from channelz database. However, if
+	// child list is not empty, then deletion from the database is on hold until
+	// the last child is deleted from database.
+	triggerDelete()
+	// deleteSelfIfReady check whether triggerDelete() has been called before,
+	// and whether child list is now empty. If both conditions are met, then
+	// delete self from database.
+	deleteSelfIfReady()
+	// getParentID returns parent ID of the entry. 0 value parent ID means no parent.
+	getParentID() int64
+	Entity
+}
+
+// channelMap is the storage data structure for channelz.
+//
+// Methods of channelMap can be divided into two categories with respect to
+// locking.
+//
+// 1. Methods acquire the global lock.
+// 2. Methods that can only be called when global lock is held.
+//
+// A second type of method need always to be called inside a first type of method.
+type channelMap struct {
+	mu               sync.RWMutex
+	topLevelChannels map[int64]struct{}
+	channels         map[int64]*Channel
+	subChannels      map[int64]*SubChannel
+	sockets          map[int64]*Socket
+	servers          map[int64]*Server
+}
+
+func newChannelMap() *channelMap {
+	return &channelMap{
+		topLevelChannels: make(map[int64]struct{}),
+		channels:         make(map[int64]*Channel),
+		subChannels:      make(map[int64]*SubChannel),
+		sockets:          make(map[int64]*Socket),
+		servers:          make(map[int64]*Server),
+	}
+}
+
+func (c *channelMap) addServer(id int64, s *Server) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	s.cm = c
+	c.servers[id] = s
+}
+
+func (c *channelMap) addChannel(id int64, cn *Channel, isTopChannel bool, pid int64) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	cn.trace.cm = c
+	c.channels[id] = cn
+	if isTopChannel {
+		c.topLevelChannels[id] = struct{}{}
+	} else if p := c.channels[pid]; p != nil {
+		p.addChild(id, cn)
+	} else {
+		logger.Infof("channel %d references invalid parent ID %d", id, pid)
+	}
+}
+
+func (c *channelMap) addSubChannel(id int64, sc *SubChannel, pid int64) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	sc.trace.cm = c
+	c.subChannels[id] = sc
+	if p := c.channels[pid]; p != nil {
+		p.addChild(id, sc)
+	} else {
+		logger.Infof("subchannel %d references invalid parent ID %d", id, pid)
+	}
+}
+
+func (c *channelMap) addSocket(s *Socket) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	s.cm = c
+	c.sockets[s.ID] = s
+	if s.Parent == nil {
+		logger.Infof("normal socket %d has no parent", s.ID)
+	}
+	s.Parent.(entry).addChild(s.ID, s)
+}
+
+// removeEntry triggers the removal of an entry, which may not indeed delete the
+// entry, if it has to wait on the deletion of its children and until no other
+// entity's channel trace references it.  It may lead to a chain of entry
+// deletion. For example, deleting the last socket of a gracefully shutting down
+// server will lead to the server being also deleted.
+func (c *channelMap) removeEntry(id int64) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	c.findEntry(id).triggerDelete()
+}
+
+// tracedChannel represents tracing operations which are present on both
+// channels and subChannels.
+type tracedChannel interface {
+	getChannelTrace() *ChannelTrace
+	incrTraceRefCount()
+	decrTraceRefCount()
+	getRefName() string
+}
+
+// c.mu must be held by the caller
+func (c *channelMap) decrTraceRefCount(id int64) {
+	e := c.findEntry(id)
+	if v, ok := e.(tracedChannel); ok {
+		v.decrTraceRefCount()
+		e.deleteSelfIfReady()
+	}
+}
+
+// c.mu must be held by the caller.
+func (c *channelMap) findEntry(id int64) entry {
+	if v, ok := c.channels[id]; ok {
+		return v
+	}
+	if v, ok := c.subChannels[id]; ok {
+		return v
+	}
+	if v, ok := c.servers[id]; ok {
+		return v
+	}
+	if v, ok := c.sockets[id]; ok {
+		return v
+	}
+	return &dummyEntry{idNotFound: id}
+}
+
+// c.mu must be held by the caller
+//
+// deleteEntry deletes an entry from the channelMap. Before calling this method,
+// caller must check this entry is ready to be deleted, i.e removeEntry() has
+// been called on it, and no children still exist.
+func (c *channelMap) deleteEntry(id int64) entry {
+	if v, ok := c.sockets[id]; ok {
+		delete(c.sockets, id)
+		return v
+	}
+	if v, ok := c.subChannels[id]; ok {
+		delete(c.subChannels, id)
+		return v
+	}
+	if v, ok := c.channels[id]; ok {
+		delete(c.channels, id)
+		delete(c.topLevelChannels, id)
+		return v
+	}
+	if v, ok := c.servers[id]; ok {
+		delete(c.servers, id)
+		return v
+	}
+	return &dummyEntry{idNotFound: id}
+}
+
+func (c *channelMap) traceEvent(id int64, desc *TraceEvent) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	child := c.findEntry(id)
+	childTC, ok := child.(tracedChannel)
+	if !ok {
+		return
+	}
+	childTC.getChannelTrace().append(&traceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
+	if desc.Parent != nil {
+		parent := c.findEntry(child.getParentID())
+		var chanType RefChannelType
+		switch child.(type) {
+		case *Channel:
+			chanType = RefChannel
+		case *SubChannel:
+			chanType = RefSubChannel
+		}
+		if parentTC, ok := parent.(tracedChannel); ok {
+			parentTC.getChannelTrace().append(&traceEvent{
+				Desc:      desc.Parent.Desc,
+				Severity:  desc.Parent.Severity,
+				Timestamp: time.Now(),
+				RefID:     id,
+				RefName:   childTC.getRefName(),
+				RefType:   chanType,
+			})
+			childTC.incrTraceRefCount()
+		}
+	}
+}
+
+type int64Slice []int64
+
+func (s int64Slice) Len() int           { return len(s) }
+func (s int64Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
+
+func copyMap(m map[int64]string) map[int64]string {
+	n := make(map[int64]string)
+	for k, v := range m {
+		n[k] = v
+	}
+	return n
+}
+
+func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) {
+	if maxResults <= 0 {
+		maxResults = EntriesPerPage
+	}
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	l := int64(len(c.topLevelChannels))
+	ids := make([]int64, 0, l)
+
+	for k := range c.topLevelChannels {
+		ids = append(ids, k)
+	}
+	sort.Sort(int64Slice(ids))
+	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
+	end := true
+	var t []*Channel
+	for _, v := range ids[idx:] {
+		if len(t) == maxResults {
+			end = false
+			break
+		}
+		if cn, ok := c.channels[v]; ok {
+			t = append(t, cn)
+		}
+	}
+	return t, end
+}
+
+func (c *channelMap) getServers(id int64, maxResults int) ([]*Server, bool) {
+	if maxResults <= 0 {
+		maxResults = EntriesPerPage
+	}
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	ids := make([]int64, 0, len(c.servers))
+	for k := range c.servers {
+		ids = append(ids, k)
+	}
+	sort.Sort(int64Slice(ids))
+	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
+	end := true
+	var s []*Server
+	for _, v := range ids[idx:] {
+		if len(s) == maxResults {
+			end = false
+			break
+		}
+		if svr, ok := c.servers[v]; ok {
+			s = append(s, svr)
+		}
+	}
+	return s, end
+}
+
+func (c *channelMap) getServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) {
+	if maxResults <= 0 {
+		maxResults = EntriesPerPage
+	}
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	svr, ok := c.servers[id]
+	if !ok {
+		// server with id doesn't exist.
+		return nil, true
+	}
+	svrskts := svr.sockets
+	ids := make([]int64, 0, len(svrskts))
+	sks := make([]*Socket, 0, min(len(svrskts), maxResults))
+	for k := range svrskts {
+		ids = append(ids, k)
+	}
+	sort.Sort(int64Slice(ids))
+	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID })
+	end := true
+	for _, v := range ids[idx:] {
+		if len(sks) == maxResults {
+			end = false
+			break
+		}
+		if ns, ok := c.sockets[v]; ok {
+			sks = append(sks, ns)
+		}
+	}
+	return sks, end
+}
+
+func (c *channelMap) getChannel(id int64) *Channel {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	return c.channels[id]
+}
+
+func (c *channelMap) getSubChannel(id int64) *SubChannel {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	return c.subChannels[id]
+}
+
+func (c *channelMap) getSocket(id int64) *Socket {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	return c.sockets[id]
+}
+
+func (c *channelMap) getServer(id int64) *Server {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	return c.servers[id]
+}
+
+type dummyEntry struct {
+	// dummyEntry is a fake entry to handle entry not found case.
+	idNotFound int64
+	Entity
+}
+
+func (d *dummyEntry) String() string {
+	return fmt.Sprintf("non-existent entity #%d", d.idNotFound)
+}
+
+func (d *dummyEntry) ID() int64 { return d.idNotFound }
+
+func (d *dummyEntry) addChild(id int64, e entry) {
+	// Note: It is possible for a normal program to reach here under race
+	// condition.  For example, there could be a race between ClientConn.Close()
+	// info being propagated to addrConn and http2Client. ClientConn.Close()
+	// cancel the context and result in http2Client to error. The error info is
+	// then caught by transport monitor and before addrConn.tearDown() is called
+	// in side ClientConn.Close(). Therefore, the addrConn will create a new
+	// transport. And when registering the new transport in channelz, its parent
+	// addrConn could have already been torn down and deleted from channelz
+	// tracking, and thus reach the code here.
+	logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
+}
+
+func (d *dummyEntry) deleteChild(id int64) {
+	// It is possible for a normal program to reach here under race condition.
+	// Refer to the example described in addChild().
+	logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
+}
+
+func (d *dummyEntry) triggerDelete() {
+	logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
+}
+
+func (*dummyEntry) deleteSelfIfReady() {
+	// code should not reach here. deleteSelfIfReady is always called on an existing entry.
+}
+
+func (*dummyEntry) getParentID() int64 {
+	return 0
+}
+
+// Entity is implemented by all channelz types.
+type Entity interface {
+	isEntity()
+	fmt.Stringer
+	id() int64
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/hack/tools/vendor/google.golang.org/grpc/internal/channelz/funcs.go
new file mode 100644
index 0000000000..078bb81238
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/channelz/funcs.go
@@ -0,0 +1,230 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package channelz defines internal APIs for enabling channelz service, entry
+// registration/deletion, and accessing channelz data. It also defines channelz
+// metric struct formats.
+package channelz
+
+import (
+	"sync/atomic"
+	"time"
+
+	"google.golang.org/grpc/internal"
+)
+
+var (
+	// IDGen is the global channelz entity ID generator.  It should not be used
+	// outside this package except by tests.
+	IDGen IDGenerator
+
+	db = newChannelMap()
+	// EntriesPerPage defines the number of channelz entries to be shown on a web page.
+	EntriesPerPage = 50
+	curState       int32
+)
+
+// TurnOn turns on channelz data collection.
+func TurnOn() {
+	atomic.StoreInt32(&curState, 1)
+}
+
+func init() {
+	internal.ChannelzTurnOffForTesting = func() {
+		atomic.StoreInt32(&curState, 0)
+	}
+}
+
+// IsOn returns whether channelz data collection is on.
+func IsOn() bool {
+	return atomic.LoadInt32(&curState) == 1
+}
+
+// GetTopChannels returns a slice of top channel's ChannelMetric, along with a
+// boolean indicating whether there's more top channels to be queried for.
+//
+// The arg id specifies that only top channel with id at or above it will be
+// included in the result. The returned slice is up to a length of the arg
+// maxResults or EntriesPerPage if maxResults is zero, and is sorted in ascending
+// id order.
+func GetTopChannels(id int64, maxResults int) ([]*Channel, bool) {
+	return db.getTopChannels(id, maxResults)
+}
+
+// GetServers returns a slice of server's ServerMetric, along with a
+// boolean indicating whether there's more servers to be queried for.
+//
+// The arg id specifies that only server with id at or above it will be included
+// in the result. The returned slice is up to a length of the arg maxResults or
+// EntriesPerPage if maxResults is zero, and is sorted in ascending id order.
+func GetServers(id int64, maxResults int) ([]*Server, bool) {
+	return db.getServers(id, maxResults)
+}
+
+// GetServerSockets returns a slice of server's (identified by id) normal socket's
+// SocketMetrics, along with a boolean indicating whether there's more sockets to
+// be queried for.
+//
+// The arg startID specifies that only sockets with id at or above it will be
+// included in the result. The returned slice is up to a length of the arg maxResults
+// or EntriesPerPage if maxResults is zero, and is sorted in ascending id order.
+func GetServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) {
+	return db.getServerSockets(id, startID, maxResults)
+}
+
+// GetChannel returns the Channel for the channel (identified by id).
+func GetChannel(id int64) *Channel {
+	return db.getChannel(id)
+}
+
+// GetSubChannel returns the SubChannel for the subchannel (identified by id).
+func GetSubChannel(id int64) *SubChannel {
+	return db.getSubChannel(id)
+}
+
+// GetSocket returns the Socket for the socket (identified by id).
+func GetSocket(id int64) *Socket {
+	return db.getSocket(id)
+}
+
+// GetServer returns the ServerMetric for the server (identified by id).
+func GetServer(id int64) *Server {
+	return db.getServer(id)
+}
+
+// RegisterChannel registers the given channel c in the channelz database with
+// target as its target and reference name, and adds it to the child list of its
+// parent.  parent == nil means no parent.
+//
+// Returns a unique channelz identifier assigned to this channel.
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterChannel(parent *Channel, target string) *Channel {
+	id := IDGen.genID()
+
+	if !IsOn() {
+		return &Channel{ID: id}
+	}
+
+	isTopChannel := parent == nil
+
+	cn := &Channel{
+		ID:          id,
+		RefName:     target,
+		nestedChans: make(map[int64]string),
+		subChans:    make(map[int64]string),
+		Parent:      parent,
+		trace:       &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())},
+	}
+	cn.ChannelMetrics.Target.Store(&target)
+	db.addChannel(id, cn, isTopChannel, cn.getParentID())
+	return cn
+}
+
+// RegisterSubChannel registers the given subChannel c in the channelz database
+// with ref as its reference name, and adds it to the child list of its parent
+// (identified by pid).
+//
+// Returns a unique channelz identifier assigned to this subChannel.
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterSubChannel(parent *Channel, ref string) *SubChannel {
+	id := IDGen.genID()
+	sc := &SubChannel{
+		ID:      id,
+		RefName: ref,
+		parent:  parent,
+	}
+
+	if !IsOn() {
+		return sc
+	}
+
+	sc.sockets = make(map[int64]string)
+	sc.trace = &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())}
+	db.addSubChannel(id, sc, parent.ID)
+	return sc
+}
+
+// RegisterServer registers the given server s in channelz database. It returns
+// the unique channelz tracking id assigned to this server.
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterServer(ref string) *Server {
+	id := IDGen.genID()
+	if !IsOn() {
+		return &Server{ID: id}
+	}
+
+	svr := &Server{
+		RefName:       ref,
+		sockets:       make(map[int64]string),
+		listenSockets: make(map[int64]string),
+		ID:            id,
+	}
+	db.addServer(id, svr)
+	return svr
+}
+
+// RegisterSocket registers the given normal socket s in channelz database
+// with ref as its reference name, and adds it to the child list of its parent
+// (identified by skt.Parent, which must be set). It returns the unique channelz
+// tracking id assigned to this normal socket.
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterSocket(skt *Socket) *Socket {
+	skt.ID = IDGen.genID()
+	if IsOn() {
+		db.addSocket(skt)
+	}
+	return skt
+}
+
+// RemoveEntry removes an entry with unique channelz tracking id to be id from
+// channelz database.
+//
+// If channelz is not turned ON, this function is a no-op.
+func RemoveEntry(id int64) {
+	if !IsOn() {
+		return
+	}
+	db.removeEntry(id)
+}
+
+// IDGenerator is an incrementing atomic that tracks IDs for channelz entities.
+type IDGenerator struct {
+	id int64
+}
+
+// Reset resets the generated ID back to zero.  Should only be used at
+// initialization or by tests sensitive to the ID number.
+func (i *IDGenerator) Reset() {
+	atomic.StoreInt64(&i.id, 0)
+}
+
+func (i *IDGenerator) genID() int64 {
+	return atomic.AddInt64(&i.id, 1)
+}
+
+// Identifier is an opaque channelz identifier used to expose channelz symbols
+// outside of grpc.  Currently only implemented by Channel since no other
+// types require exposure outside grpc.
+type Identifier interface {
+	Entity
+	channelzIdentifier()
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/channelz/logging.go b/hack/tools/vendor/google.golang.org/grpc/internal/channelz/logging.go
new file mode 100644
index 0000000000..ee4d721258
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/channelz/logging.go
@@ -0,0 +1,75 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"fmt"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+var logger = grpclog.Component("channelz")
+
+// Info logs and adds a trace event if channelz is on.
+func Info(l grpclog.DepthLoggerV2, e Entity, args ...any) {
+	AddTraceEvent(l, e, 1, &TraceEvent{
+		Desc:     fmt.Sprint(args...),
+		Severity: CtInfo,
+	})
+}
+
+// Infof logs and adds a trace event if channelz is on.
+func Infof(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) {
+	AddTraceEvent(l, e, 1, &TraceEvent{
+		Desc:     fmt.Sprintf(format, args...),
+		Severity: CtInfo,
+	})
+}
+
+// Warning logs and adds a trace event if channelz is on.
+func Warning(l grpclog.DepthLoggerV2, e Entity, args ...any) {
+	AddTraceEvent(l, e, 1, &TraceEvent{
+		Desc:     fmt.Sprint(args...),
+		Severity: CtWarning,
+	})
+}
+
+// Warningf logs and adds a trace event if channelz is on.
+func Warningf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) {
+	AddTraceEvent(l, e, 1, &TraceEvent{
+		Desc:     fmt.Sprintf(format, args...),
+		Severity: CtWarning,
+	})
+}
+
+// Error logs and adds a trace event if channelz is on.
+func Error(l grpclog.DepthLoggerV2, e Entity, args ...any) {
+	AddTraceEvent(l, e, 1, &TraceEvent{
+		Desc:     fmt.Sprint(args...),
+		Severity: CtError,
+	})
+}
+
+// Errorf logs and adds a trace event if channelz is on.
+func Errorf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) {
+	AddTraceEvent(l, e, 1, &TraceEvent{
+		Desc:     fmt.Sprintf(format, args...),
+		Severity: CtError,
+	})
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/channelz/server.go b/hack/tools/vendor/google.golang.org/grpc/internal/channelz/server.go
new file mode 100644
index 0000000000..cdfc49d6ea
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/channelz/server.go
@@ -0,0 +1,119 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"fmt"
+	"sync/atomic"
+)
+
+// Server is the channelz representation of a server.
+type Server struct {
+	Entity
+	ID      int64
+	RefName string
+
+	ServerMetrics ServerMetrics
+
+	closeCalled   bool
+	sockets       map[int64]string
+	listenSockets map[int64]string
+	cm            *channelMap
+}
+
+// ServerMetrics defines a struct containing metrics for servers.
+type ServerMetrics struct {
+	// The number of incoming calls started on the server.
+	CallsStarted atomic.Int64
+	// The number of incoming calls that have completed with an OK status.
+	CallsSucceeded atomic.Int64
+	// The number of incoming calls that have a completed with a non-OK status.
+	CallsFailed atomic.Int64
+	// The last time a call was started on the server.
+	LastCallStartedTimestamp atomic.Int64
+}
+
+// NewServerMetricsForTesting returns an initialized ServerMetrics.
+func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *ServerMetrics {
+	sm := &ServerMetrics{}
+	sm.CallsStarted.Store(started)
+	sm.CallsSucceeded.Store(succeeded)
+	sm.CallsFailed.Store(failed)
+	sm.LastCallStartedTimestamp.Store(timestamp)
+	return sm
+}
+
+func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) {
+	sm.CallsStarted.Store(o.CallsStarted.Load())
+	sm.CallsSucceeded.Store(o.CallsSucceeded.Load())
+	sm.CallsFailed.Store(o.CallsFailed.Load())
+	sm.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load())
+}
+
+// ListenSockets returns the listening sockets for s.
+func (s *Server) ListenSockets() map[int64]string {
+	db.mu.RLock()
+	defer db.mu.RUnlock()
+	return copyMap(s.listenSockets)
+}
+
+// String returns a printable description of s.
+func (s *Server) String() string {
+	return fmt.Sprintf("Server #%d", s.ID)
+}
+
+func (s *Server) id() int64 {
+	return s.ID
+}
+
+func (s *Server) addChild(id int64, e entry) {
+	switch v := e.(type) {
+	case *Socket:
+		switch v.SocketType {
+		case SocketTypeNormal:
+			s.sockets[id] = v.RefName
+		case SocketTypeListen:
+			s.listenSockets[id] = v.RefName
+		}
+	default:
+		logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
+	}
+}
+
+func (s *Server) deleteChild(id int64) {
+	delete(s.sockets, id)
+	delete(s.listenSockets, id)
+	s.deleteSelfIfReady()
+}
+
+func (s *Server) triggerDelete() {
+	s.closeCalled = true
+	s.deleteSelfIfReady()
+}
+
+func (s *Server) deleteSelfIfReady() {
+	if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
+		return
+	}
+	s.cm.deleteEntry(s.ID)
+}
+
+func (s *Server) getParentID() int64 {
+	return 0
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/channelz/socket.go b/hack/tools/vendor/google.golang.org/grpc/internal/channelz/socket.go
new file mode 100644
index 0000000000..fa64834b25
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/channelz/socket.go
@@ -0,0 +1,130 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"fmt"
+	"net"
+	"sync/atomic"
+
+	"google.golang.org/grpc/credentials"
+)
+
+// SocketMetrics defines the struct that the implementor of Socket interface
+// should return from ChannelzMetric().
+type SocketMetrics struct {
+	// The number of streams that have been started.
+	StreamsStarted atomic.Int64
+	// The number of streams that have ended successfully:
+	// On client side, receiving frame with eos bit set.
+	// On server side, sending frame with eos bit set.
+	StreamsSucceeded atomic.Int64
+	// The number of streams that have ended unsuccessfully:
+	// On client side, termination without receiving frame with eos bit set.
+	// On server side, termination without sending frame with eos bit set.
+	StreamsFailed atomic.Int64
+	// The number of messages successfully sent on this socket.
+	MessagesSent     atomic.Int64
+	MessagesReceived atomic.Int64
+	// The number of keep alives sent.  This is typically implemented with HTTP/2
+	// ping messages.
+	KeepAlivesSent atomic.Int64
+	// The last time a stream was created by this endpoint.  Usually unset for
+	// servers.
+	LastLocalStreamCreatedTimestamp atomic.Int64
+	// The last time a stream was created by the remote endpoint.  Usually unset
+	// for clients.
+	LastRemoteStreamCreatedTimestamp atomic.Int64
+	// The last time a message was sent by this endpoint.
+	LastMessageSentTimestamp atomic.Int64
+	// The last time a message was received by this endpoint.
+	LastMessageReceivedTimestamp atomic.Int64
+}
+
+// EphemeralSocketMetrics are metrics that change rapidly and are tracked
+// outside of channelz.
+type EphemeralSocketMetrics struct {
+	// The amount of window, granted to the local endpoint by the remote endpoint.
+	// This may be slightly out of date due to network latency.  This does NOT
+	// include stream level or TCP level flow control info.
+	LocalFlowControlWindow int64
+	// The amount of window, granted to the remote endpoint by the local endpoint.
+	// This may be slightly out of date due to network latency.  This does NOT
+	// include stream level or TCP level flow control info.
+	RemoteFlowControlWindow int64
+}
+
+type SocketType string
+
+const (
+	SocketTypeNormal = "NormalSocket"
+	SocketTypeListen = "ListenSocket"
+)
+
+type Socket struct {
+	Entity
+	SocketType       SocketType
+	ID               int64
+	Parent           Entity
+	cm               *channelMap
+	SocketMetrics    SocketMetrics
+	EphemeralMetrics func() *EphemeralSocketMetrics
+
+	RefName string
+	// The locally bound address.  Immutable.
+	LocalAddr net.Addr
+	// The remote bound address.  May be absent.  Immutable.
+	RemoteAddr net.Addr
+	// Optional, represents the name of the remote endpoint, if different than
+	// the original target name.  Immutable.
+	RemoteName string
+	// Immutable.
+	SocketOptions *SocketOptionData
+	// Immutable.
+	Security credentials.ChannelzSecurityValue
+}
+
+func (ls *Socket) String() string {
+	return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID)
+}
+
+func (ls *Socket) id() int64 {
+	return ls.ID
+}
+
+func (ls *Socket) addChild(id int64, e entry) {
+	logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
+}
+
+func (ls *Socket) deleteChild(id int64) {
+	logger.Errorf("cannot delete a child (id = %d) from a listen socket", id)
+}
+
+func (ls *Socket) triggerDelete() {
+	ls.cm.deleteEntry(ls.ID)
+	ls.Parent.(entry).deleteChild(ls.ID)
+}
+
+func (ls *Socket) deleteSelfIfReady() {
+	logger.Errorf("cannot call deleteSelfIfReady on a listen socket")
+}
+
+func (ls *Socket) getParentID() int64 {
+	return ls.Parent.id()
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/hack/tools/vendor/google.golang.org/grpc/internal/channelz/subchannel.go
new file mode 100644
index 0000000000..3b88e4cba8
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/channelz/subchannel.go
@@ -0,0 +1,151 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"fmt"
+	"sync/atomic"
+)
+
+// SubChannel is the channelz representation of a subchannel.
+type SubChannel struct {
+	Entity
+	// ID is the channelz id of this subchannel.
+	ID int64
+	// RefName is the human readable reference string of this subchannel.
+	RefName       string
+	closeCalled   bool
+	sockets       map[int64]string
+	parent        *Channel
+	trace         *ChannelTrace
+	traceRefCount int32
+
+	ChannelMetrics ChannelMetrics
+}
+
+func (sc *SubChannel) String() string {
+	return fmt.Sprintf("%s SubChannel #%d", sc.parent, sc.ID)
+}
+
+func (sc *SubChannel) id() int64 {
+	return sc.ID
+}
+
+func (sc *SubChannel) Sockets() map[int64]string {
+	db.mu.RLock()
+	defer db.mu.RUnlock()
+	return copyMap(sc.sockets)
+}
+
+func (sc *SubChannel) Trace() *ChannelTrace {
+	db.mu.RLock()
+	defer db.mu.RUnlock()
+	return sc.trace.copy()
+}
+
+func (sc *SubChannel) addChild(id int64, e entry) {
+	if v, ok := e.(*Socket); ok && v.SocketType == SocketTypeNormal {
+		sc.sockets[id] = v.RefName
+	} else {
+		logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
+	}
+}
+
+func (sc *SubChannel) deleteChild(id int64) {
+	delete(sc.sockets, id)
+	sc.deleteSelfIfReady()
+}
+
+func (sc *SubChannel) triggerDelete() {
+	sc.closeCalled = true
+	sc.deleteSelfIfReady()
+}
+
+func (sc *SubChannel) getParentID() int64 {
+	return sc.parent.ID
+}
+
+// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
+// means deleting the subchannel reference from its parent's child list.
+//
+// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
+// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
+//
+// The returned boolean value indicates whether the channel has been successfully deleted from tree.
+func (sc *SubChannel) deleteSelfFromTree() (deleted bool) {
+	if !sc.closeCalled || len(sc.sockets) != 0 {
+		return false
+	}
+	sc.parent.deleteChild(sc.ID)
+	return true
+}
+
+// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
+// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
+// the subchannel, and its memory will be garbage collected.
+//
+// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
+// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
+// the trace of the referenced entity must not be deleted. In order to release the resource allocated
+// by grpc, the reference to the grpc object is reset to a dummy object.
+//
+// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
+//
+// It returns a bool to indicate whether the channel can be safely deleted from map.
+func (sc *SubChannel) deleteSelfFromMap() (delete bool) {
+	return sc.getTraceRefCount() == 0
+}
+
+// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
+// The delete process includes two steps:
+//  1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
+//     its parent's child list.
+//  2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
+//     by id will return entry not found error.
+func (sc *SubChannel) deleteSelfIfReady() {
+	if !sc.deleteSelfFromTree() {
+		return
+	}
+	if !sc.deleteSelfFromMap() {
+		return
+	}
+	db.deleteEntry(sc.ID)
+	sc.trace.clear()
+}
+
+func (sc *SubChannel) getChannelTrace() *ChannelTrace {
+	return sc.trace
+}
+
+func (sc *SubChannel) incrTraceRefCount() {
+	atomic.AddInt32(&sc.traceRefCount, 1)
+}
+
+func (sc *SubChannel) decrTraceRefCount() {
+	atomic.AddInt32(&sc.traceRefCount, -1)
+}
+
+func (sc *SubChannel) getTraceRefCount() int {
+	i := atomic.LoadInt32(&sc.traceRefCount)
+	return int(i)
+}
+
+func (sc *SubChannel) getRefName() string {
+	return sc.RefName
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go b/hack/tools/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go
new file mode 100644
index 0000000000..5ac73ff833
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go
@@ -0,0 +1,65 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"syscall"
+
+	"golang.org/x/sys/unix"
+)
+
+// SocketOptionData defines the struct to hold socket option data, and related
+// getter function to obtain info from fd.
+type SocketOptionData struct {
+	Linger      *unix.Linger
+	RecvTimeout *unix.Timeval
+	SendTimeout *unix.Timeval
+	TCPInfo     *unix.TCPInfo
+}
+
+// Getsockopt defines the function to get socket options requested by channelz.
+// It is to be passed to syscall.RawConn.Control().
+func (s *SocketOptionData) Getsockopt(fd uintptr) {
+	if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil {
+		s.Linger = v
+	}
+	if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil {
+		s.RecvTimeout = v
+	}
+	if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil {
+		s.SendTimeout = v
+	}
+	if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil {
+		s.TCPInfo = v
+	}
+}
+
+// GetSocketOption gets the socket option info of the conn.
+func GetSocketOption(socket any) *SocketOptionData {
+	c, ok := socket.(syscall.Conn)
+	if !ok {
+		return nil
+	}
+	data := &SocketOptionData{}
+	if rawConn, err := c.SyscallConn(); err == nil {
+		rawConn.Control(data.Getsockopt)
+		return data
+	}
+	return nil
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/hack/tools/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
new file mode 100644
index 0000000000..0e6e18e185
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
@@ -0,0 +1,47 @@
+//go:build !linux
+
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"sync"
+)
+
+var once sync.Once
+
+// SocketOptionData defines the struct to hold socket option data, and related
+// getter function to obtain info from fd.
+// Windows OS doesn't support Socket Option
+type SocketOptionData struct {
+}
+
+// Getsockopt defines the function to get socket options requested by channelz.
+// It is to be passed to syscall.RawConn.Control().
+// Windows OS doesn't support Socket Option
+func (s *SocketOptionData) Getsockopt(uintptr) {
+	once.Do(func() {
+		logger.Warning("Channelz: socket options are not supported on non-linux environments")
+	})
+}
+
+// GetSocketOption gets the socket option info of the conn.
+func GetSocketOption(any) *SocketOptionData {
+	return nil
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/channelz/trace.go b/hack/tools/vendor/google.golang.org/grpc/internal/channelz/trace.go
new file mode 100644
index 0000000000..36b8674032
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/channelz/trace.go
@@ -0,0 +1,204 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"fmt"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+const (
+	defaultMaxTraceEntry int32 = 30
+)
+
+var maxTraceEntry = defaultMaxTraceEntry
+
+// SetMaxTraceEntry sets maximum number of trace entries per entity (i.e.
+// channel/subchannel).  Setting it to 0 will disable channel tracing.
+func SetMaxTraceEntry(i int32) {
+	atomic.StoreInt32(&maxTraceEntry, i)
+}
+
+// ResetMaxTraceEntryToDefault resets the maximum number of trace entries per
+// entity to default.
+func ResetMaxTraceEntryToDefault() {
+	atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
+}
+
+func getMaxTraceEntry() int {
+	i := atomic.LoadInt32(&maxTraceEntry)
+	return int(i)
+}
+
+// traceEvent is an internal representation of a single trace event
+type traceEvent struct {
+	// Desc is a simple description of the trace event.
+	Desc string
+	// Severity states the severity of this trace event.
+	Severity Severity
+	// Timestamp is the event time.
+	Timestamp time.Time
+	// RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
+	// involved in this event.
+	// e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
+	RefID int64
+	// RefName is the reference name for the entity that gets referenced in the event.
+	RefName string
+	// RefType indicates the referenced entity type, i.e Channel or SubChannel.
+	RefType RefChannelType
+}
+
+// TraceEvent is what the caller of AddTraceEvent should provide to describe the
+// event to be added to the channel trace.
+//
+// The Parent field is optional. It is used for an event that will be recorded
+// in the entity's parent trace.
+type TraceEvent struct {
+	Desc     string
+	Severity Severity
+	Parent   *TraceEvent
+}
+
+type ChannelTrace struct {
+	cm           *channelMap
+	clearCalled  bool
+	CreationTime time.Time
+	EventNum     int64
+	mu           sync.Mutex
+	Events       []*traceEvent
+}
+
+func (c *ChannelTrace) copy() *ChannelTrace {
+	return &ChannelTrace{
+		CreationTime: c.CreationTime,
+		EventNum:     c.EventNum,
+		Events:       append(([]*traceEvent)(nil), c.Events...),
+	}
+}
+
+func (c *ChannelTrace) append(e *traceEvent) {
+	c.mu.Lock()
+	if len(c.Events) == getMaxTraceEntry() {
+		del := c.Events[0]
+		c.Events = c.Events[1:]
+		if del.RefID != 0 {
+			// start recursive cleanup in a goroutine to not block the call originated from grpc.
+			go func() {
+				// need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
+				c.cm.mu.Lock()
+				c.cm.decrTraceRefCount(del.RefID)
+				c.cm.mu.Unlock()
+			}()
+		}
+	}
+	e.Timestamp = time.Now()
+	c.Events = append(c.Events, e)
+	c.EventNum++
+	c.mu.Unlock()
+}
+
+func (c *ChannelTrace) clear() {
+	if c.clearCalled {
+		return
+	}
+	c.clearCalled = true
+	c.mu.Lock()
+	for _, e := range c.Events {
+		if e.RefID != 0 {
+			// caller should have already held the c.cm.mu lock.
+			c.cm.decrTraceRefCount(e.RefID)
+		}
+	}
+	c.mu.Unlock()
+}
+
+// Severity is the severity level of a trace event.
+// The canonical enumeration of all valid values is here:
+// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
+type Severity int
+
+const (
+	// CtUnknown indicates unknown severity of a trace event.
+	CtUnknown Severity = iota
+	// CtInfo indicates info level severity of a trace event.
+	CtInfo
+	// CtWarning indicates warning level severity of a trace event.
+	CtWarning
+	// CtError indicates error level severity of a trace event.
+	CtError
+)
+
+// RefChannelType is the type of the entity being referenced in a trace event.
+type RefChannelType int
+
+const (
+	// RefUnknown indicates an unknown entity type, the zero value for this type.
+	RefUnknown RefChannelType = iota
+	// RefChannel indicates the referenced entity is a Channel.
+	RefChannel
+	// RefSubChannel indicates the referenced entity is a SubChannel.
+	RefSubChannel
+	// RefServer indicates the referenced entity is a Server.
+	RefServer
+	// RefListenSocket indicates the referenced entity is a ListenSocket.
+	RefListenSocket
+	// RefNormalSocket indicates the referenced entity is a NormalSocket.
+	RefNormalSocket
+)
+
+var refChannelTypeToString = map[RefChannelType]string{
+	RefUnknown:      "Unknown",
+	RefChannel:      "Channel",
+	RefSubChannel:   "SubChannel",
+	RefServer:       "Server",
+	RefListenSocket: "ListenSocket",
+	RefNormalSocket: "NormalSocket",
+}
+
+func (r RefChannelType) String() string {
+	return refChannelTypeToString[r]
+}
+
+// AddTraceEvent adds trace related to the entity with specified id, using the
+// provided TraceEventDesc.
+//
+// If channelz is not turned ON, this will simply log the event descriptions.
+func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) {
+	// Log only the trace description associated with the bottom most entity.
+	d := fmt.Sprintf("[%s]%s", e, desc.Desc)
+	switch desc.Severity {
+	case CtUnknown, CtInfo:
+		l.InfoDepth(depth+1, d)
+	case CtWarning:
+		l.WarningDepth(depth+1, d)
+	case CtError:
+		l.ErrorDepth(depth+1, d)
+	}
+
+	if getMaxTraceEntry() == 0 {
+		return
+	}
+	if IsOn() {
+		db.traceEvent(e.id(), desc)
+	}
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/hack/tools/vendor/google.golang.org/grpc/internal/credentials/credentials.go
new file mode 100644
index 0000000000..9deee7f651
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/credentials/credentials.go
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+	"context"
+)
+
+// requestInfoKey is a struct to be used as the key to store RequestInfo in a
+// context.
+type requestInfoKey struct{}
+
+// NewRequestInfoContext creates a context with ri.
+func NewRequestInfoContext(ctx context.Context, ri any) context.Context {
+	return context.WithValue(ctx, requestInfoKey{}, ri)
+}
+
+// RequestInfoFromContext extracts the RequestInfo from ctx.
+func RequestInfoFromContext(ctx context.Context) any {
+	return ctx.Value(requestInfoKey{})
+}
+
+// clientHandshakeInfoKey is a struct used as the key to store
+// ClientHandshakeInfo in a context.
+type clientHandshakeInfoKey struct{}
+
+// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx.
+func ClientHandshakeInfoFromContext(ctx context.Context) any {
+	return ctx.Value(clientHandshakeInfoKey{})
+}
+
+// NewClientHandshakeInfoContext creates a context with chi.
+func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context {
+	return context.WithValue(ctx, clientHandshakeInfoKey{}, chi)
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/hack/tools/vendor/google.golang.org/grpc/internal/credentials/spiffe.go
new file mode 100644
index 0000000000..25ade62305
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/credentials/spiffe.go
@@ -0,0 +1,75 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package credentials defines APIs for parsing SPIFFE ID.
+//
+// All APIs in this package are experimental.
+package credentials
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"net/url"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+var logger = grpclog.Component("credentials")
+
+// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format
+// is invalid, return nil with warning.
+func SPIFFEIDFromState(state tls.ConnectionState) *url.URL {
+	if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 {
+		return nil
+	}
+	return SPIFFEIDFromCert(state.PeerCertificates[0])
+}
+
+// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE
+// ID format is invalid, return nil with warning.
+func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL {
+	if cert == nil || cert.URIs == nil {
+		return nil
+	}
+	var spiffeID *url.URL
+	for _, uri := range cert.URIs {
+		if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") {
+			continue
+		}
+		// From this point, we assume the uri is intended for a SPIFFE ID.
+		if len(uri.String()) > 2048 {
+			logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes")
+			return nil
+		}
+		if len(uri.Host) == 0 || len(uri.Path) == 0 {
+			logger.Warning("invalid SPIFFE ID: domain or workload ID is empty")
+			return nil
+		}
+		if len(uri.Host) > 255 {
+			logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters")
+			return nil
+		}
+		// A valid SPIFFE certificate can only have exactly one URI SAN field.
+		if len(cert.URIs) > 1 {
+			logger.Warning("invalid SPIFFE ID: multiple URI SANs")
+			return nil
+		}
+		spiffeID = uri
+	}
+	return spiffeID
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/hack/tools/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
new file mode 100644
index 0000000000..2919632d65
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
@@ -0,0 +1,58 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package credentials
+
+import (
+	"net"
+	"syscall"
+)
+
+type sysConn = syscall.Conn
+
+// syscallConn keeps reference of rawConn to support syscall.Conn for channelz.
+// SyscallConn() (the method in interface syscall.Conn) is explicitly
+// implemented on this type,
+//
+// Interface syscall.Conn is implemented by most net.Conn implementations (e.g.
+// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns
+// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn
+// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't
+// help here).
+type syscallConn struct {
+	net.Conn
+	// sysConn is a type alias of syscall.Conn. It's necessary because the name
+	// `Conn` collides with `net.Conn`.
+	sysConn
+}
+
+// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that
+// implements syscall.Conn. rawConn will be used to support syscall, and newConn
+// will be used for read/write.
+//
+// This function returns newConn if rawConn doesn't implement syscall.Conn.
+func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
+	sysConn, ok := rawConn.(syscall.Conn)
+	if !ok {
+		return newConn
+	}
+	return &syscallConn{
+		Conn:    newConn,
+		sysConn: sysConn,
+	}
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/credentials/util.go b/hack/tools/vendor/google.golang.org/grpc/internal/credentials/util.go
new file mode 100644
index 0000000000..f792fd22ca
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/credentials/util.go
@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package credentials
+
+import (
+	"crypto/tls"
+)
+
+const alpnProtoStrH2 = "h2"
+
+// AppendH2ToNextProtos appends h2 to next protos.
+func AppendH2ToNextProtos(ps []string) []string {
+	for _, p := range ps {
+		if p == alpnProtoStrH2 {
+			return ps
+		}
+	}
+	ret := make([]string, 0, len(ps)+1)
+	ret = append(ret, ps...)
+	return append(ret, alpnProtoStrH2)
+}
+
+// CloneTLSConfig returns a shallow clone of the exported
+// fields of cfg, ignoring the unexported sync.Once, which
+// contains a mutex and must not be copied.
+//
+// If cfg is nil, a new zero tls.Config is returned.
+//
+// TODO: inline this function if possible.
+func CloneTLSConfig(cfg *tls.Config) *tls.Config {
+	if cfg == nil {
+		return &tls.Config{}
+	}
+
+	return cfg.Clone()
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/hack/tools/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
new file mode 100644
index 0000000000..452985f8d8
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -0,0 +1,76 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package envconfig contains grpc settings configured by environment variables.
+package envconfig
+
+import (
+	"os"
+	"strconv"
+	"strings"
+)
+
+var (
+	// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
+	TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
+	// RingHashCap indicates the maximum ring size which defaults to 4096
+	// entries but may be overridden by setting the environment variable
+	// "GRPC_RING_HASH_CAP".  This does not override the default bounds
+	// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
+	RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
+	// LeastRequestLB is set if we should support the least_request_experimental
+	// LB policy, which can be enabled by setting the environment variable
+	// "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true".
+	LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false)
+	// ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS
+	// handshakes that can be performed.
+	ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100)
+	// EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled
+	// should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this
+	// option is present for backward compatibility. This option may be overridden
+	// by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true"
+	// or "false".
+	EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true)
+	// XDSFallbackSupport is the env variable that controls whether support for
+	// xDS fallback is turned on. If this is unset or is false, only the first
+	// xDS server in the list of server configs will be used.
+	XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false)
+)
+
+func boolFromEnv(envVar string, def bool) bool {
+	if def {
+		// The default is true; return true unless the variable is "false".
+		return !strings.EqualFold(os.Getenv(envVar), "false")
+	}
+	// The default is false; return false unless the variable is "true".
+	return strings.EqualFold(os.Getenv(envVar), "true")
+}
+
+func uint64FromEnv(envVar string, def, min, max uint64) uint64 {
+	v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64)
+	if err != nil {
+		return def
+	}
+	if v < min {
+		return min
+	}
+	if v > max {
+		return max
+	}
+	return v
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/hack/tools/vendor/google.golang.org/grpc/internal/envconfig/observability.go
new file mode 100644
index 0000000000..dd314cfb18
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/envconfig/observability.go
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package envconfig
+
+import "os"
+
+const (
+	envObservabilityConfig     = "GRPC_GCP_OBSERVABILITY_CONFIG"
+	envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE"
+)
+
+var (
+	// ObservabilityConfig is the json configuration for the gcp/observability
+	// package specified directly in the envObservabilityConfig env var.
+	//
+	// This is used in the 1.0 release of gcp/observability, and thus must not be
+	// deleted or changed.
+	ObservabilityConfig = os.Getenv(envObservabilityConfig)
+	// ObservabilityConfigFile is the json configuration for the
+	// gcp/observability specified in a file with the location specified in
+	// envObservabilityConfigFile env var.
+	//
+	// This is used in the 1.0 release of gcp/observability, and thus must not be
+	// deleted or changed.
+	ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile)
+)
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/hack/tools/vendor/google.golang.org/grpc/internal/envconfig/xds.go
new file mode 100644
index 0000000000..29f234acb1
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/envconfig/xds.go
@@ -0,0 +1,56 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package envconfig
+
+import (
+	"os"
+)
+
+const (
+	// XDSBootstrapFileNameEnv is the env variable to set bootstrap file name.
+	// Do not use this and read from env directly. Its value is read and kept in
+	// variable XDSBootstrapFileName.
+	//
+	// When both bootstrap FileName and FileContent are set, FileName is used.
+	XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP"
+	// XDSBootstrapFileContentEnv is the env variable to set bootstrap file
+	// content. Do not use this and read from env directly. Its value is read
+	// and kept in variable XDSBootstrapFileContent.
+	//
+	// When both bootstrap FileName and FileContent are set, FileName is used.
+	XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
+)
+
+var (
+	// XDSBootstrapFileName holds the name of the file which contains xDS
+	// bootstrap configuration. Users can specify the location of the bootstrap
+	// file by setting the environment variable "GRPC_XDS_BOOTSTRAP".
+	//
+	// When both bootstrap FileName and FileContent are set, FileName is used.
+	XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv)
+	// XDSBootstrapFileContent holds the content of the xDS bootstrap
+	// configuration. Users can specify the bootstrap config by setting the
+	// environment variable "GRPC_XDS_BOOTSTRAP_CONFIG".
+	//
+	// When both bootstrap FileName and FileContent are set, FileName is used.
+	XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv)
+
+	// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
+	C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
+)
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/experimental.go b/hack/tools/vendor/google.golang.org/grpc/internal/experimental.go
new file mode 100644
index 0000000000..7617be2158
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/experimental.go
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+var (
+	// WithBufferPool is implemented by the grpc package and returns a dial
+	// option to configure a shared buffer pool for a grpc.ClientConn.
+	WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption
+
+	// BufferPool is implemented by the grpc package and returns a server
+	// option to configure a shared buffer pool for a grpc.Server.
+	BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
+)
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go b/hack/tools/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
new file mode 100644
index 0000000000..092ad187a2
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
@@ -0,0 +1,79 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package grpclog provides logging functionality for internal gRPC packages,
+// outside of the functionality provided by the external `grpclog` package.
+package grpclog
+
+import (
+	"fmt"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+// PrefixLogger does logging with a prefix.
+//
+// Logging method on a nil logs without any prefix.
+type PrefixLogger struct {
+	logger grpclog.DepthLoggerV2
+	prefix string
+}
+
+// Infof does info logging.
+func (pl *PrefixLogger) Infof(format string, args ...any) {
+	if pl != nil {
+		// Handle nil, so the tests can pass in a nil logger.
+		format = pl.prefix + format
+		pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
+		return
+	}
+	grpclog.InfoDepth(1, fmt.Sprintf(format, args...))
+}
+
+// Warningf does warning logging.
+func (pl *PrefixLogger) Warningf(format string, args ...any) {
+	if pl != nil {
+		format = pl.prefix + format
+		pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
+		return
+	}
+	grpclog.WarningDepth(1, fmt.Sprintf(format, args...))
+}
+
+// Errorf does error logging.
+func (pl *PrefixLogger) Errorf(format string, args ...any) {
+	if pl != nil {
+		format = pl.prefix + format
+		pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
+		return
+	}
+	grpclog.ErrorDepth(1, fmt.Sprintf(format, args...))
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func (pl *PrefixLogger) V(l int) bool {
+	if pl != nil {
+		return pl.logger.V(l)
+	}
+	return true
+}
+
+// NewPrefixLogger creates a prefix logger with the given prefix.
+func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger {
+	return &PrefixLogger{logger: logger, prefix: prefix}
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/hack/tools/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
new file mode 100644
index 0000000000..19b9d63927
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
@@ -0,0 +1,112 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcsync
+
+import (
+	"context"
+
+	"google.golang.org/grpc/internal/buffer"
+)
+
+// CallbackSerializer provides a mechanism to schedule callbacks in a
+// synchronized manner. It provides a FIFO guarantee on the order of execution
+// of scheduled callbacks. New callbacks can be scheduled by invoking the
+// Schedule() method.
+//
+// This type is safe for concurrent access.
+type CallbackSerializer struct {
+	// done is closed once the serializer is shut down completely, i.e all
+	// scheduled callbacks are executed and the serializer has deallocated all
+	// its resources.
+	done chan struct{}
+
+	callbacks *buffer.Unbounded
+}
+
+// NewCallbackSerializer returns a new CallbackSerializer instance. The provided
+// context will be passed to the scheduled callbacks. Users should cancel the
+// provided context to shutdown the CallbackSerializer. It is guaranteed that no
+// callbacks will be added once this context is canceled, and any pending un-run
+// callbacks will be executed before the serializer is shut down.
+func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
+	cs := &CallbackSerializer{
+		done:      make(chan struct{}),
+		callbacks: buffer.NewUnbounded(),
+	}
+	go cs.run(ctx)
+	return cs
+}
+
+// TrySchedule tries to schedules the provided callback function f to be
+// executed in the order it was added. This is a best-effort operation. If the
+// context passed to NewCallbackSerializer was canceled before this method is
+// called, the callback will not be scheduled.
+//
+// Callbacks are expected to honor the context when performing any blocking
+// operations, and should return early when the context is canceled.
+func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) {
+	cs.callbacks.Put(f)
+}
+
+// ScheduleOr schedules the provided callback function f to be executed in the
+// order it was added. If the context passed to NewCallbackSerializer has been
+// canceled before this method is called, the onFailure callback will be
+// executed inline instead.
+//
+// Callbacks are expected to honor the context when performing any blocking
+// operations, and should return early when the context is canceled.
+func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) {
+	if cs.callbacks.Put(f) != nil {
+		onFailure()
+	}
+}
+
+func (cs *CallbackSerializer) run(ctx context.Context) {
+	defer close(cs.done)
+
+	// TODO: when Go 1.21 is the oldest supported version, this loop and Close
+	// can be replaced with:
+	//
+	// context.AfterFunc(ctx, cs.callbacks.Close)
+	for ctx.Err() == nil {
+		select {
+		case <-ctx.Done():
+			// Do nothing here. Next iteration of the for loop will not happen,
+			// since ctx.Err() would be non-nil.
+		case cb := <-cs.callbacks.Get():
+			cs.callbacks.Load()
+			cb.(func(context.Context))(ctx)
+		}
+	}
+
+	// Close the buffer to prevent new callbacks from being added.
+	cs.callbacks.Close()
+
+	// Run all pending callbacks.
+	for cb := range cs.callbacks.Get() {
+		cs.callbacks.Load()
+		cb.(func(context.Context))(ctx)
+	}
+}
+
+// Done returns a channel that is closed after the context passed to
+// NewCallbackSerializer is canceled and all callbacks have been executed.
+func (cs *CallbackSerializer) Done() <-chan struct{} {
+	return cs.done
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/hack/tools/vendor/google.golang.org/grpc/internal/grpcsync/event.go
new file mode 100644
index 0000000000..fbe697c376
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/grpcsync/event.go
@@ -0,0 +1,61 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package grpcsync implements additional synchronization primitives built upon
+// the sync package.
+package grpcsync
+
+import (
+	"sync"
+	"sync/atomic"
+)
+
+// Event represents a one-time event that may occur in the future.
+type Event struct {
+	fired int32
+	c     chan struct{}
+	o     sync.Once
+}
+
+// Fire causes e to complete.  It is safe to call multiple times, and
+// concurrently.  It returns true iff this call to Fire caused the signaling
+// channel returned by Done to close.
+func (e *Event) Fire() bool {
+	ret := false
+	e.o.Do(func() {
+		atomic.StoreInt32(&e.fired, 1)
+		close(e.c)
+		ret = true
+	})
+	return ret
+}
+
+// Done returns a channel that will be closed when Fire is called.
+func (e *Event) Done() <-chan struct{} {
+	return e.c
+}
+
+// HasFired returns true if Fire has been called.
+func (e *Event) HasFired() bool {
+	return atomic.LoadInt32(&e.fired) == 1
+}
+
+// NewEvent returns a new, ready-to-use Event.
+func NewEvent() *Event {
+	return &Event{c: make(chan struct{})}
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/hack/tools/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go
new file mode 100644
index 0000000000..6635f7bca9
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go
@@ -0,0 +1,32 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcsync
+
+import (
+	"sync"
+)
+
+// OnceFunc returns a function wrapping f which ensures f is only executed
+// once even if the returned function is executed multiple times.
+func OnceFunc(f func()) func() {
+	var once sync.Once
+	return func() {
+		once.Do(f)
+	}
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/hack/tools/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
new file mode 100644
index 0000000000..6d8c2f518d
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
@@ -0,0 +1,121 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcsync
+
+import (
+	"context"
+	"sync"
+)
+
+// Subscriber represents an entity that is subscribed to messages published on
+// a PubSub. It wraps the callback to be invoked by the PubSub when a new
+// message is published.
+type Subscriber interface {
+	// OnMessage is invoked when a new message is published. Implementations
+	// must not block in this method.
+	OnMessage(msg any)
+}
+
+// PubSub is a simple one-to-many publish-subscribe system that supports
+// messages of arbitrary type. It guarantees that messages are delivered in
+// the same order in which they were published.
+//
+// Publisher invokes the Publish() method to publish new messages, while
+// subscribers interested in receiving these messages register a callback
+// via the Subscribe() method.
+//
+// Once a PubSub is stopped, no more messages can be published, but any pending
+// published messages will be delivered to the subscribers.  Done may be used
+// to determine when all published messages have been delivered.
+type PubSub struct {
+	cs *CallbackSerializer
+
+	// Access to the below fields are guarded by this mutex.
+	mu          sync.Mutex
+	msg         any
+	subscribers map[Subscriber]bool
+}
+
+// NewPubSub returns a new PubSub instance.  Users should cancel the
+// provided context to shutdown the PubSub.
+func NewPubSub(ctx context.Context) *PubSub {
+	return &PubSub{
+		cs:          NewCallbackSerializer(ctx),
+		subscribers: map[Subscriber]bool{},
+	}
+}
+
+// Subscribe registers the provided Subscriber to the PubSub.
+//
+// If the PubSub contains a previously published message, the Subscriber's
+// OnMessage() callback will be invoked asynchronously with the existing
+// message to begin with, and subsequently for every newly published message.
+//
+// The caller is responsible for invoking the returned cancel function to
+// unsubscribe itself from the PubSub.
+func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) {
+	ps.mu.Lock()
+	defer ps.mu.Unlock()
+
+	ps.subscribers[sub] = true
+
+	if ps.msg != nil {
+		msg := ps.msg
+		ps.cs.TrySchedule(func(context.Context) {
+			ps.mu.Lock()
+			defer ps.mu.Unlock()
+			if !ps.subscribers[sub] {
+				return
+			}
+			sub.OnMessage(msg)
+		})
+	}
+
+	return func() {
+		ps.mu.Lock()
+		defer ps.mu.Unlock()
+		delete(ps.subscribers, sub)
+	}
+}
+
+// Publish publishes the provided message to the PubSub, and invokes
+// callbacks registered by subscribers asynchronously.
+func (ps *PubSub) Publish(msg any) {
+	ps.mu.Lock()
+	defer ps.mu.Unlock()
+
+	ps.msg = msg
+	for sub := range ps.subscribers {
+		s := sub
+		ps.cs.TrySchedule(func(context.Context) {
+			ps.mu.Lock()
+			defer ps.mu.Unlock()
+			if !ps.subscribers[s] {
+				return
+			}
+			s.OnMessage(msg)
+		})
+	}
+}
+
+// Done returns a channel that is closed after the context passed to NewPubSub
+// is canceled and all updates have been sent to subscribers.
+func (ps *PubSub) Done() <-chan struct{} {
+	return ps.cs.Done()
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/hack/tools/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
new file mode 100644
index 0000000000..e8d866984b
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+	"strings"
+)
+
+// RegisteredCompressorNames holds names of the registered compressors.
+var RegisteredCompressorNames []string
+
+// IsCompressorNameRegistered returns true when name is available in registry.
+func IsCompressorNameRegistered(name string) bool {
+	for _, compressor := range RegisteredCompressorNames {
+		if compressor == name {
+			return true
+		}
+	}
+	return false
+}
+
+// RegisteredCompressors returns a string of registered compressor names
+// separated by comma.
+func RegisteredCompressors() string {
+	return strings.Join(RegisteredCompressorNames, ",")
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/hack/tools/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
new file mode 100644
index 0000000000..b25b0baec3
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
@@ -0,0 +1,63 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+	"strconv"
+	"time"
+)
+
+const maxTimeoutValue int64 = 100000000 - 1
+
+// div does integer division and round-up the result. Note that this is
+// equivalent to (d+r-1)/r but has less chance to overflow.
+func div(d, r time.Duration) int64 {
+	if d%r > 0 {
+		return int64(d/r + 1)
+	}
+	return int64(d / r)
+}
+
+// EncodeDuration encodes the duration to the format grpc-timeout header
+// accepts.
+//
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
+func EncodeDuration(t time.Duration) string {
+	// TODO: This is simplistic and not bandwidth efficient. Improve it.
+	if t <= 0 {
+		return "0n"
+	}
+	if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "n"
+	}
+	if d := div(t, time.Microsecond); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "u"
+	}
+	if d := div(t, time.Millisecond); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "m"
+	}
+	if d := div(t, time.Second); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "S"
+	}
+	if d := div(t, time.Minute); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "M"
+	}
+	// Note that maxTimeoutValue * time.Hour > MaxInt64.
+	return strconv.FormatInt(div(t, time.Hour), 10) + "H"
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go b/hack/tools/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
new file mode 100644
index 0000000000..e2f948e8f4
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
@@ -0,0 +1,20 @@
+/*
+ *
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package grpcutil provides utility functions used across the gRPC codebase.
+package grpcutil
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/hack/tools/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go
new file mode 100644
index 0000000000..6f22bd8911
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go
@@ -0,0 +1,40 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+	"context"
+
+	"google.golang.org/grpc/metadata"
+)
+
+type mdExtraKey struct{}
+
+// WithExtraMetadata creates a new context with incoming md attached.
+func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context {
+	return context.WithValue(ctx, mdExtraKey{}, md)
+}
+
+// ExtraMetadata returns the incoming metadata in ctx if it exists.  The
+// returned MD should not be modified. Writing to it may cause races.
+// Modification should be made to copies of the returned MD.
+func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) {
+	md, ok = ctx.Value(mdExtraKey{}).(metadata.MD)
+	return
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/hack/tools/vendor/google.golang.org/grpc/internal/grpcutil/method.go
new file mode 100644
index 0000000000..ec62b4775e
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/grpcutil/method.go
@@ -0,0 +1,88 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+	"errors"
+	"strings"
+)
+
+// ParseMethod splits service and method from the input. It expects format
+// "/service/method".
+func ParseMethod(methodName string) (service, method string, _ error) {
+	if !strings.HasPrefix(methodName, "/") {
+		return "", "", errors.New("invalid method name: should start with /")
+	}
+	methodName = methodName[1:]
+
+	pos := strings.LastIndex(methodName, "/")
+	if pos < 0 {
+		return "", "", errors.New("invalid method name: suffix /method is missing")
+	}
+	return methodName[:pos], methodName[pos+1:], nil
+}
+
+// baseContentType is the base content-type for gRPC.  This is a valid
+// content-type on it's own, but can also include a content-subtype such as
+// "proto" as a suffix after "+" or ";".  See
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
+// for more details.
+const baseContentType = "application/grpc"
+
+// ContentSubtype returns the content-subtype for the given content-type.  The
+// given content-type must be a valid content-type that starts with
+// "application/grpc". A content-subtype will follow "application/grpc" after a
+// "+" or ";". See
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// If contentType is not a valid content-type for gRPC, the boolean
+// will be false, otherwise true. If content-type == "application/grpc",
+// "application/grpc+", or "application/grpc;", the boolean will be true,
+// but no content-subtype will be returned.
+//
+// contentType is assumed to be lowercase already.
+func ContentSubtype(contentType string) (string, bool) {
+	if contentType == baseContentType {
+		return "", true
+	}
+	if !strings.HasPrefix(contentType, baseContentType) {
+		return "", false
+	}
+	// guaranteed since != baseContentType and has baseContentType prefix
+	switch contentType[len(baseContentType)] {
+	case '+', ';':
+		// this will return true for "application/grpc+" or "application/grpc;"
+		// which the previous validContentType function tested to be valid, so we
+		// just say that no content-subtype is specified in this case
+		return contentType[len(baseContentType)+1:], true
+	default:
+		return "", false
+	}
+}
+
+// ContentType builds full content type with the given sub-type.
+//
+// contentSubtype is assumed to be lowercase
+func ContentType(contentSubtype string) string {
+	if contentSubtype == "" {
+		return baseContentType
+	}
+	return baseContentType + "+" + contentSubtype
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/grpcutil/regex.go b/hack/tools/vendor/google.golang.org/grpc/internal/grpcutil/regex.go
new file mode 100644
index 0000000000..7a092b2b80
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/grpcutil/regex.go
@@ -0,0 +1,31 @@
+/*
+ *
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import "regexp"
+
+// FullMatchWithRegex returns whether the full text matches the regex provided.
+func FullMatchWithRegex(re *regexp.Regexp, text string) bool {
+	if len(text) == 0 {
+		return re.MatchString(text)
+	}
+	re.Longest()
+	rem := re.FindString(text)
+	return len(rem) == len(text)
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/idle/idle.go b/hack/tools/vendor/google.golang.org/grpc/internal/idle/idle.go
new file mode 100644
index 0000000000..fe49cb74c5
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/idle/idle.go
@@ -0,0 +1,278 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package idle contains a component for managing idleness (entering and exiting)
+// based on RPC activity.
+package idle
+
+import (
+	"fmt"
+	"math"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// For overriding in unit tests.
+var timeAfterFunc = func(d time.Duration, f func()) *time.Timer {
+	return time.AfterFunc(d, f)
+}
+
+// Enforcer is the functionality provided by grpc.ClientConn to enter
+// and exit from idle mode.
+type Enforcer interface {
+	ExitIdleMode() error
+	EnterIdleMode()
+}
+
+// Manager implements idleness detection and calls the configured Enforcer to
+// enter/exit idle mode when appropriate.  Must be created by NewManager.
+type Manager struct {
+	// State accessed atomically.
+	lastCallEndTime           int64 // Unix timestamp in nanos; time when the most recent RPC completed.
+	activeCallsCount          int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there.
+	activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback.
+	closed                    int32 // Boolean; True when the manager is closed.
+
+	// Can be accessed without atomics or mutex since these are set at creation
+	// time and read-only after that.
+	enforcer Enforcer // Functionality provided by grpc.ClientConn.
+	timeout  time.Duration
+
+	// idleMu is used to guarantee mutual exclusion in two scenarios:
+	// - Opposing intentions:
+	//   - a: Idle timeout has fired and handleIdleTimeout() is trying to put
+	//     the channel in idle mode because the channel has been inactive.
+	//   - b: At the same time an RPC is made on the channel, and OnCallBegin()
+	//     is trying to prevent the channel from going idle.
+	// - Competing intentions:
+	//   - The channel is in idle mode and there are multiple RPCs starting at
+	//     the same time, all trying to move the channel out of idle. Only one
+	//     of them should succeed in doing so, while the other RPCs should
+	//     piggyback on the first one and be successfully handled.
+	idleMu       sync.RWMutex
+	actuallyIdle bool
+	timer        *time.Timer
+}
+
+// NewManager creates a new idleness manager implementation for the
+// given idle timeout.  It begins in idle mode.
+func NewManager(enforcer Enforcer, timeout time.Duration) *Manager {
+	return &Manager{
+		enforcer:         enforcer,
+		timeout:          timeout,
+		actuallyIdle:     true,
+		activeCallsCount: -math.MaxInt32,
+	}
+}
+
+// resetIdleTimerLocked resets the idle timer to the given duration.  Called
+// when exiting idle mode or when the timer fires and we need to reset it.
+func (m *Manager) resetIdleTimerLocked(d time.Duration) {
+	if m.isClosed() || m.timeout == 0 || m.actuallyIdle {
+		return
+	}
+
+	// It is safe to ignore the return value from Reset() because this method is
+	// only ever called from the timer callback or when exiting idle mode.
+	if m.timer != nil {
+		m.timer.Stop()
+	}
+	m.timer = timeAfterFunc(d, m.handleIdleTimeout)
+}
+
+func (m *Manager) resetIdleTimer(d time.Duration) {
+	m.idleMu.Lock()
+	defer m.idleMu.Unlock()
+	m.resetIdleTimerLocked(d)
+}
+
+// handleIdleTimeout is the timer callback that is invoked upon expiry of the
+// configured idle timeout. The channel is considered inactive if there are no
+// ongoing calls and no RPC activity since the last time the timer fired.
+func (m *Manager) handleIdleTimeout() {
+	if m.isClosed() {
+		return
+	}
+
+	if atomic.LoadInt32(&m.activeCallsCount) > 0 {
+		m.resetIdleTimer(m.timeout)
+		return
+	}
+
+	// There has been activity on the channel since we last got here. Reset the
+	// timer and return.
+	if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
+		// Set the timer to fire after a duration of idle timeout, calculated
+		// from the time the most recent RPC completed.
+		atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0)
+		m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout)
+		return
+	}
+
+	// Now that we've checked that there has been no activity, attempt to enter
+	// idle mode, which is very likely to succeed.
+	if m.tryEnterIdleMode() {
+		// Successfully entered idle mode. No timer needed until we exit idle.
+		return
+	}
+
+	// Failed to enter idle mode due to a concurrent RPC that kept the channel
+	// active, or because of an error from the channel. Undo the attempt to
+	// enter idle, and reset the timer to try again later.
+	m.resetIdleTimer(m.timeout)
+}
+
+// tryEnterIdleMode instructs the channel to enter idle mode. But before
+// that, it performs a last minute check to ensure that no new RPC has come in,
+// making the channel active.
+//
+// Return value indicates whether or not the channel moved to idle mode.
+//
+// Holds idleMu which ensures mutual exclusion with exitIdleMode.
+func (m *Manager) tryEnterIdleMode() bool {
+	// Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin()
+	// that the channel is either in idle mode or is trying to get there.
+	if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) {
+		// This CAS operation can fail if an RPC started after we checked for
+		// activity in the timer handler, or one was ongoing from before the
+		// last time the timer fired, or if a test is attempting to enter idle
+		// mode without checking.  In all cases, abort going into idle mode.
+		return false
+	}
+	// N.B. if we fail to enter idle mode after this, we must re-add
+	// math.MaxInt32 to m.activeCallsCount.
+
+	m.idleMu.Lock()
+	defer m.idleMu.Unlock()
+
+	if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 {
+		// We raced and lost to a new RPC. Very rare, but stop entering idle.
+		atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
+		return false
+	}
+	if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
+		// A very short RPC could have come in (and also finished) after we
+		// checked for calls count and activity in handleIdleTimeout(), but
+		// before the CAS operation. So, we need to check for activity again.
+		atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
+		return false
+	}
+
+	// No new RPCs have come in since we set the active calls count value to
+	// -math.MaxInt32. And since we have the lock, it is safe to enter idle mode
+	// unconditionally now.
+	m.enforcer.EnterIdleMode()
+	m.actuallyIdle = true
+	return true
+}
+
+func (m *Manager) EnterIdleModeForTesting() {
+	m.tryEnterIdleMode()
+}
+
+// OnCallBegin is invoked at the start of every RPC.
+func (m *Manager) OnCallBegin() error {
+	if m.isClosed() {
+		return nil
+	}
+
+	if atomic.AddInt32(&m.activeCallsCount, 1) > 0 {
+		// Channel is not idle now. Set the activity bit and allow the call.
+		atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1)
+		return nil
+	}
+
+	// Channel is either in idle mode or is in the process of moving to idle
+	// mode. Attempt to exit idle mode to allow this RPC.
+	if err := m.ExitIdleMode(); err != nil {
+		// Undo the increment to calls count, and return an error causing the
+		// RPC to fail.
+		atomic.AddInt32(&m.activeCallsCount, -1)
+		return err
+	}
+
+	atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1)
+	return nil
+}
+
+// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's
+// internal state.
+func (m *Manager) ExitIdleMode() error {
+	// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
+	m.idleMu.Lock()
+	defer m.idleMu.Unlock()
+
+	if m.isClosed() || !m.actuallyIdle {
+		// This can happen in three scenarios:
+		// - handleIdleTimeout() set the calls count to -math.MaxInt32 and called
+		//   tryEnterIdleMode(). But before the latter could grab the lock, an RPC
+		//   came in and OnCallBegin() noticed that the calls count is negative.
+		// - Channel is in idle mode, and multiple new RPCs come in at the same
+		//   time, all of them notice a negative calls count in OnCallBegin and get
+		//   here. The first one to get the lock would got the channel to exit idle.
+		// - Channel is not in idle mode, and the user calls Connect which calls
+		//   m.ExitIdleMode.
+		//
+		// In any case, there is nothing to do here.
+		return nil
+	}
+
+	if err := m.enforcer.ExitIdleMode(); err != nil {
+		return fmt.Errorf("failed to exit idle mode: %w", err)
+	}
+
+	// Undo the idle entry process. This also respects any new RPC attempts.
+	atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
+	m.actuallyIdle = false
+
+	// Start a new timer to fire after the configured idle timeout.
+	m.resetIdleTimerLocked(m.timeout)
+	return nil
+}
+
+// OnCallEnd is invoked at the end of every RPC.
+func (m *Manager) OnCallEnd() {
+	if m.isClosed() {
+		return
+	}
+
+	// Record the time at which the most recent call finished.
+	atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano())
+
+	// Decrement the active calls count. This count can temporarily go negative
+	// when the timer callback is in the process of moving the channel to idle
+	// mode, but one or more RPCs come in and complete before the timer callback
+	// can get done with the process of moving to idle mode.
+	atomic.AddInt32(&m.activeCallsCount, -1)
+}
+
+func (m *Manager) isClosed() bool {
+	return atomic.LoadInt32(&m.closed) == 1
+}
+
+func (m *Manager) Close() {
+	atomic.StoreInt32(&m.closed, 1)
+
+	m.idleMu.Lock()
+	if m.timer != nil {
+		m.timer.Stop()
+		m.timer = nil
+	}
+	m.idleMu.Unlock()
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/internal.go b/hack/tools/vendor/google.golang.org/grpc/internal/internal.go
new file mode 100644
index 0000000000..7aae9240ff
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/internal.go
@@ -0,0 +1,259 @@
+/*
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains gRPC-internal code, to avoid polluting
+// the godoc of the top-level grpc package.  It must not import any grpc
+// symbols to avoid circular dependencies.
+package internal
+
+import (
+	"context"
+	"time"
+
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/serviceconfig"
+)
+
+var (
+	// WithHealthCheckFunc is set by dialoptions.go
+	WithHealthCheckFunc any // func (HealthChecker) DialOption
+	// HealthCheckFunc is used to provide client-side LB channel health checking
+	HealthCheckFunc HealthChecker
+	// BalancerUnregister is exported by package balancer to unregister a balancer.
+	BalancerUnregister func(name string)
+	// KeepaliveMinPingTime is the minimum ping interval.  This must be 10s by
+	// default, but tests may wish to set it lower for convenience.
+	KeepaliveMinPingTime = 10 * time.Second
+	// KeepaliveMinServerPingTime is the minimum ping interval for servers.
+	// This must be 1s by default, but tests may wish to set it lower for
+	// convenience.
+	KeepaliveMinServerPingTime = time.Second
+	// ParseServiceConfig parses a JSON representation of the service config.
+	ParseServiceConfig any // func(string) *serviceconfig.ParseResult
+	// EqualServiceConfigForTesting is for testing service config generation and
+	// parsing. Both a and b should be returned by ParseServiceConfig.
+	// This function compares the config without rawJSON stripped, in case the
+	// there's difference in white space.
+	EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool
+	// GetCertificateProviderBuilder returns the registered builder for the
+	// given name. This is set by package certprovider for use from xDS
+	// bootstrap code while parsing certificate provider configs in the
+	// bootstrap file.
+	GetCertificateProviderBuilder any // func(string) certprovider.Builder
+	// GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
+	// stored in the passed in attributes. This is set by
+	// credentials/xds/xds.go.
+	GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer
+	// GetServerCredentials returns the transport credentials configured on a
+	// gRPC server. An xDS-enabled server needs to know what type of credentials
+	// is configured on the underlying gRPC server. This is set by server.go.
+	GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials
+	// CanonicalString returns the canonical string of the code defined here:
+	// https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.
+	//
+	// This is used in the 1.0 release of gcp/observability, and thus must not be
+	// deleted or changed.
+	CanonicalString any // func (codes.Code) string
+	// IsRegisteredMethod returns whether the passed in method is registered as
+	// a method on the server.
+	IsRegisteredMethod any // func(*grpc.Server, string) bool
+	// ServerFromContext returns the server from the context.
+	ServerFromContext any // func(context.Context) *grpc.Server
+	// AddGlobalServerOptions adds an array of ServerOption that will be
+	// effective globally for newly created servers. The priority will be: 1.
+	// user-provided; 2. this method; 3. default values.
+	//
+	// This is used in the 1.0 release of gcp/observability, and thus must not be
+	// deleted or changed.
+	AddGlobalServerOptions any // func(opt ...ServerOption)
+	// ClearGlobalServerOptions clears the array of extra ServerOption. This
+	// method is useful in testing and benchmarking.
+	//
+	// This is used in the 1.0 release of gcp/observability, and thus must not be
+	// deleted or changed.
+	ClearGlobalServerOptions func()
+	// AddGlobalDialOptions adds an array of DialOption that will be effective
+	// globally for newly created client channels. The priority will be: 1.
+	// user-provided; 2. this method; 3. default values.
+	//
+	// This is used in the 1.0 release of gcp/observability, and thus must not be
+	// deleted or changed.
+	AddGlobalDialOptions any // func(opt ...DialOption)
+	// DisableGlobalDialOptions returns a DialOption that prevents the
+	// ClientConn from applying the global DialOptions (set via
+	// AddGlobalDialOptions).
+	//
+	// This is used in the 1.0 release of gcp/observability, and thus must not be
+	// deleted or changed.
+	DisableGlobalDialOptions any // func() grpc.DialOption
+	// ClearGlobalDialOptions clears the array of extra DialOption. This
+	// method is useful in testing and benchmarking.
+	//
+	// This is used in the 1.0 release of gcp/observability, and thus must not be
+	// deleted or changed.
+	ClearGlobalDialOptions func()
+
+	// AddGlobalPerTargetDialOptions adds a PerTargetDialOption that will be
+	// configured for newly created ClientConns.
+	AddGlobalPerTargetDialOptions any // func (opt any)
+	// ClearGlobalPerTargetDialOptions clears the slice of global late apply
+	// dial options.
+	ClearGlobalPerTargetDialOptions func()
+
+	// JoinDialOptions combines the dial options passed as arguments into a
+	// single dial option.
+	JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption
+	// JoinServerOptions combines the server options passed as arguments into a
+	// single server option.
+	JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption
+
+	// WithBinaryLogger returns a DialOption that specifies the binary logger
+	// for a ClientConn.
+	//
+	// This is used in the 1.0 release of gcp/observability, and thus must not be
+	// deleted or changed.
+	WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption
+	// BinaryLogger returns a ServerOption that can set the binary logger for a
+	// server.
+	//
+	// This is used in the 1.0 release of gcp/observability, and thus must not be
+	// deleted or changed.
+	BinaryLogger any // func(binarylog.Logger) grpc.ServerOption
+
+	// SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a
+	// provided grpc.ClientConn.
+	SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber)
+
+	// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
+	// the provided xds bootstrap config instead of the global configuration from
+	// the supported environment variables.  The resolver.Builder is meant to be
+	// used in conjunction with the grpc.WithResolvers DialOption.
+	//
+	// Testing Only
+	//
+	// This function should ONLY be used for testing and may not work with some
+	// other features, including the CSDS service.
+	NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error)
+
+	// RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
+	// Specifier Plugin for testing purposes, regardless of the XDSRLS environment
+	// variable.
+	//
+	// TODO: Remove this function once the RLS env var is removed.
+	RegisterRLSClusterSpecifierPluginForTesting func()
+
+	// UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster
+	// Specifier Plugin for testing purposes. This is needed because there is no way
+	// to unregister the RLS Cluster Specifier Plugin after registering it solely
+	// for testing purposes using RegisterRLSClusterSpecifierPluginForTesting().
+	//
+	// TODO: Remove this function once the RLS env var is removed.
+	UnregisterRLSClusterSpecifierPluginForTesting func()
+
+	// RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing
+	// purposes, regardless of the RBAC environment variable.
+	//
+	// TODO: Remove this function once the RBAC env var is removed.
+	RegisterRBACHTTPFilterForTesting func()
+
+	// UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for
+	// testing purposes. This is needed because there is no way to unregister the
+	// HTTP Filter after registering it solely for testing purposes using
+	// RegisterRBACHTTPFilterForTesting().
+	//
+	// TODO: Remove this function once the RBAC env var is removed.
+	UnregisterRBACHTTPFilterForTesting func()
+
+	// ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY.
+	ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions)
+
+	// GRPCResolverSchemeExtraMetadata determines when gRPC will add extra
+	// metadata to RPCs.
+	GRPCResolverSchemeExtraMetadata = "xds"
+
+	// EnterIdleModeForTesting gets the ClientConn to enter IDLE mode.
+	EnterIdleModeForTesting any // func(*grpc.ClientConn)
+
+	// ExitIdleModeForTesting gets the ClientConn to exit IDLE mode.
+	ExitIdleModeForTesting any // func(*grpc.ClientConn) error
+
+	ChannelzTurnOffForTesting func()
+
+	// TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to
+	// invoke resource-not-found error for the given resource type and name.
+	TriggerXDSResourceNotFoundForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error
+
+	// FromOutgoingContextRaw returns the un-merged, intermediary contents of
+	// metadata.rawMD.
+	FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool)
+
+	// UserSetDefaultScheme is set to true if the user has overridden the
+	// default resolver scheme.
+	UserSetDefaultScheme = false
+
+	// ShuffleAddressListForTesting pseudo-randomizes the order of addresses.  n
+	// is the number of elements.  swap swaps the elements with indexes i and j.
+	ShuffleAddressListForTesting any // func(n int, swap func(i, j int))
+
+	// ConnectedAddress returns the connected address for a SubConnState. The
+	// address is only valid if the state is READY.
+	ConnectedAddress any // func (scs SubConnState) resolver.Address
+
+	// SetConnectedAddress sets the connected address for a SubConnState.
+	SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address)
+
+	// SnapshotMetricRegistryForTesting snapshots the global data of the metric
+	// registry. Returns a cleanup function that sets the metric registry to its
+	// original state. Only called in testing functions.
+	SnapshotMetricRegistryForTesting func() func()
+
+	// SetDefaultBufferPoolForTesting updates the default buffer pool, for
+	// testing purposes.
+	SetDefaultBufferPoolForTesting any // func(mem.BufferPool)
+
+	// SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for
+	// testing purposes.
+	SetBufferPoolingThresholdForTesting any // func(int)
+)
+
+// HealthChecker defines the signature of the client-side LB channel health
+// checking function.
+//
+// The implementation is expected to create a health checking RPC stream by
+// calling newStream(), watch for the health status of serviceName, and report
+// it's health back by calling setConnectivityState().
+//
+// The health checking protocol is defined at:
+// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
+type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error
+
+const (
+	// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
+	CredsBundleModeFallback = "fallback"
+	// CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer
+	// mode.
+	CredsBundleModeBalancer = "balancer"
+	// CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode
+	// that supports backend returned by grpclb balancer.
+	CredsBundleModeBackendFromBalancer = "backend-from-balancer"
+)
+
+// RLSLoadBalancingPolicyName is the name of the RLS LB policy.
+//
+// It currently has an experimental suffix which would be removed once
+// end-to-end testing of the policy is completed.
+const RLSLoadBalancingPolicyName = "rls_experimental"
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/hack/tools/vendor/google.golang.org/grpc/internal/metadata/metadata.go
new file mode 100644
index 0000000000..900bfb7160
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/metadata/metadata.go
@@ -0,0 +1,132 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package metadata contains functions to set and get metadata from addresses.
+//
+// This package is experimental.
+package metadata
+
+import (
+	"fmt"
+	"strings"
+
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/resolver"
+)
+
+type mdKeyType string
+
+const mdKey = mdKeyType("grpc.internal.address.metadata")
+
+type mdValue metadata.MD
+
+func (m mdValue) Equal(o any) bool {
+	om, ok := o.(mdValue)
+	if !ok {
+		return false
+	}
+	if len(m) != len(om) {
+		return false
+	}
+	for k, v := range m {
+		ov := om[k]
+		if len(ov) != len(v) {
+			return false
+		}
+		for i, ve := range v {
+			if ov[i] != ve {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// Get returns the metadata of addr.
+func Get(addr resolver.Address) metadata.MD {
+	attrs := addr.Attributes
+	if attrs == nil {
+		return nil
+	}
+	md, _ := attrs.Value(mdKey).(mdValue)
+	return metadata.MD(md)
+}
+
+// Set sets (overrides) the metadata in addr.
+//
+// When a SubConn is created with this address, the RPCs sent on it will all
+// have this metadata.
+func Set(addr resolver.Address, md metadata.MD) resolver.Address {
+	addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md))
+	return addr
+}
+
+// Validate validates every pair in md with ValidatePair.
+func Validate(md metadata.MD) error {
+	for k, vals := range md {
+		if err := ValidatePair(k, vals...); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E
+func hasNotPrintable(msg string) bool {
+	// for i that saving a conversion if not using for range
+	for i := 0; i < len(msg); i++ {
+		if msg[i] < 0x20 || msg[i] > 0x7E {
+			return true
+		}
+	}
+	return false
+}
+
+// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) :
+//
+// - key must contain one or more characters.
+// - the characters in the key must be contained in [0-9 a-z _ - .].
+// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed.
+// - the characters in the every value must be printable (in [%x20-%x7E]).
+func ValidatePair(key string, vals ...string) error {
+	// key should not be empty
+	if key == "" {
+		return fmt.Errorf("there is an empty key in the header")
+	}
+	// pseudo-header will be ignored
+	if key[0] == ':' {
+		return nil
+	}
+	// check key, for i that saving a conversion if not using for range
+	for i := 0; i < len(key); i++ {
+		r := key[i]
+		if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
+			return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key)
+		}
+	}
+	if strings.HasSuffix(key, "-bin") {
+		return nil
+	}
+	// check value
+	for _, val := range vals {
+		if hasNotPrintable(val) {
+			return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key)
+		}
+	}
+	return nil
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/hack/tools/vendor/google.golang.org/grpc/internal/pretty/pretty.go
new file mode 100644
index 0000000000..dbee7a60d7
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/pretty/pretty.go
@@ -0,0 +1,73 @@
+/*
+ *
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package pretty defines helper functions to pretty-print structs for logging.
+package pretty
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+
+	"google.golang.org/protobuf/encoding/protojson"
+	"google.golang.org/protobuf/protoadapt"
+)
+
+const jsonIndent = "  "
+
+// ToJSON marshals the input into a json string.
+//
+// If marshal fails, it falls back to fmt.Sprintf("%+v").
+func ToJSON(e any) string {
+	if ee, ok := e.(protoadapt.MessageV1); ok {
+		e = protoadapt.MessageV2Of(ee)
+	}
+
+	if ee, ok := e.(protoadapt.MessageV2); ok {
+		mm := protojson.MarshalOptions{
+			Indent:    jsonIndent,
+			Multiline: true,
+		}
+		ret, err := mm.Marshal(ee)
+		if err != nil {
+			// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
+			// messages are not imported, and this will fail because the message
+			// is not found.
+			return fmt.Sprintf("%+v", ee)
+		}
+		return string(ret)
+	}
+
+	ret, err := json.MarshalIndent(e, "", jsonIndent)
+	if err != nil {
+		return fmt.Sprintf("%+v", e)
+	}
+	return string(ret)
+}
+
+// FormatJSON formats the input json bytes with indentation.
+//
+// If Indent fails, it returns the unchanged input as string.
+func FormatJSON(b []byte) string {
+	var out bytes.Buffer
+	err := json.Indent(&out, b, "", jsonIndent)
+	if err != nil {
+		return string(b)
+	}
+	return out.String()
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/hack/tools/vendor/google.golang.org/grpc/internal/resolver/config_selector.go
new file mode 100644
index 0000000000..f0603871c9
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/resolver/config_selector.go
@@ -0,0 +1,167 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package resolver provides internal resolver-related functionality.
+package resolver
+
+import (
+	"context"
+	"sync"
+
+	"google.golang.org/grpc/internal/serviceconfig"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/resolver"
+)
+
+// ConfigSelector controls what configuration to use for every RPC.
+type ConfigSelector interface {
+	// Selects the configuration for the RPC, or terminates it using the error.
+	// This error will be converted by the gRPC library to a status error with
+	// code UNKNOWN if it is not returned as a status error.
+	SelectConfig(RPCInfo) (*RPCConfig, error)
+}
+
+// RPCInfo contains RPC information needed by a ConfigSelector.
+type RPCInfo struct {
+	// Context is the user's context for the RPC and contains headers and
+	// application timeout.  It is passed for interception purposes and for
+	// efficiency reasons.  SelectConfig should not be blocking.
+	Context context.Context
+	Method  string // i.e. "/Service/Method"
+}
+
+// RPCConfig describes the configuration to use for each RPC.
+type RPCConfig struct {
+	// The context to use for the remainder of the RPC; can pass info to LB
+	// policy or affect timeout or metadata.
+	Context      context.Context
+	MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC
+	OnCommitted  func()                     // Called when the RPC has been committed (retries no longer possible)
+	Interceptor  ClientInterceptor
+}
+
+// ClientStream is the same as grpc.ClientStream, but defined here for circular
+// dependency reasons.
+type ClientStream interface {
+	// Header returns the header metadata received from the server if there
+	// is any. It blocks if the metadata is not ready to read.
+	Header() (metadata.MD, error)
+	// Trailer returns the trailer metadata from the server, if there is any.
+	// It must only be called after stream.CloseAndRecv has returned, or
+	// stream.Recv has returned a non-nil error (including io.EOF).
+	Trailer() metadata.MD
+	// CloseSend closes the send direction of the stream. It closes the stream
+	// when non-nil error is met. It is also not safe to call CloseSend
+	// concurrently with SendMsg.
+	CloseSend() error
+	// Context returns the context for this stream.
+	//
+	// It should not be called until after Header or RecvMsg has returned. Once
+	// called, subsequent client-side retries are disabled.
+	Context() context.Context
+	// SendMsg is generally called by generated code. On error, SendMsg aborts
+	// the stream. If the error was generated by the client, the status is
+	// returned directly; otherwise, io.EOF is returned and the status of
+	// the stream may be discovered using RecvMsg.
+	//
+	// SendMsg blocks until:
+	//   - There is sufficient flow control to schedule m with the transport, or
+	//   - The stream is done, or
+	//   - The stream breaks.
+	//
+	// SendMsg does not wait until the message is received by the server. An
+	// untimely stream closure may result in lost messages. To ensure delivery,
+	// users should ensure the RPC completed successfully using RecvMsg.
+	//
+	// It is safe to have a goroutine calling SendMsg and another goroutine
+	// calling RecvMsg on the same stream at the same time, but it is not safe
+	// to call SendMsg on the same stream in different goroutines. It is also
+	// not safe to call CloseSend concurrently with SendMsg.
+	SendMsg(m any) error
+	// RecvMsg blocks until it receives a message into m or the stream is
+	// done. It returns io.EOF when the stream completes successfully. On
+	// any other error, the stream is aborted and the error contains the RPC
+	// status.
+	//
+	// It is safe to have a goroutine calling SendMsg and another goroutine
+	// calling RecvMsg on the same stream at the same time, but it is not
+	// safe to call RecvMsg on the same stream in different goroutines.
+	RecvMsg(m any) error
+}
+
+// ClientInterceptor is an interceptor for gRPC client streams.
+type ClientInterceptor interface {
+	// NewStream produces a ClientStream for an RPC which may optionally use
+	// the provided function to produce a stream for delegation.  Note:
+	// RPCInfo.Context should not be used (will be nil).
+	//
+	// done is invoked when the RPC is finished using its connection, or could
+	// not be assigned a connection.  RPC operations may still occur on
+	// ClientStream after done is called, since the interceptor is invoked by
+	// application-layer operations.  done must never be nil when called.
+	NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error)
+}
+
+// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side.
+type ServerInterceptor interface {
+	// AllowRPC checks if an incoming RPC is allowed to proceed based on
+	// information about connection RPC was received on, and HTTP Headers. This
+	// information will be piped into context.
+	AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting.
+}
+
+type csKeyType string
+
+const csKey = csKeyType("grpc.internal.resolver.configSelector")
+
+// SetConfigSelector sets the config selector in state and returns the new
+// state.
+func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State {
+	state.Attributes = state.Attributes.WithValue(csKey, cs)
+	return state
+}
+
+// GetConfigSelector retrieves the config selector from state, if present, and
+// returns it or nil if absent.
+func GetConfigSelector(state resolver.State) ConfigSelector {
+	cs, _ := state.Attributes.Value(csKey).(ConfigSelector)
+	return cs
+}
+
+// SafeConfigSelector allows for safe switching of ConfigSelector
+// implementations such that previous values are guaranteed to not be in use
+// when UpdateConfigSelector returns.
+type SafeConfigSelector struct {
+	mu sync.RWMutex
+	cs ConfigSelector
+}
+
+// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until
+// all uses of the previous ConfigSelector have completed.
+func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) {
+	scs.mu.Lock()
+	defer scs.mu.Unlock()
+	scs.cs = cs
+}
+
+// SelectConfig defers to the current ConfigSelector in scs.
+func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) {
+	scs.mu.RLock()
+	defer scs.mu.RUnlock()
+	return scs.cs.SelectConfig(r)
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/hack/tools/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
new file mode 100644
index 0000000000..4552db16b0
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
@@ -0,0 +1,458 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package dns implements a dns resolver to be installed as the default resolver
+// in grpc.
+package dns
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"math/rand"
+	"net"
+	"os"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	grpclbstate "google.golang.org/grpc/balancer/grpclb/state"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal/backoff"
+	"google.golang.org/grpc/internal/envconfig"
+	"google.golang.org/grpc/internal/resolver/dns/internal"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/serviceconfig"
+)
+
+var (
+	// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB
+	// addresses from SRV records.  Must not be changed after init time.
+	EnableSRVLookups = false
+
+	// MinResolutionInterval is the minimum interval at which re-resolutions are
+	// allowed. This helps to prevent excessive re-resolution.
+	MinResolutionInterval = 30 * time.Second
+
+	// ResolvingTimeout specifies the maximum duration for a DNS resolution request.
+	// If the timeout expires before a response is received, the request will be canceled.
+	//
+	// It is recommended to set this value at application startup. Avoid modifying this variable
+	// after initialization as it's not thread-safe for concurrent modification.
+	ResolvingTimeout = 30 * time.Second
+
+	logger = grpclog.Component("dns")
+)
+
+func init() {
+	resolver.Register(NewBuilder())
+	internal.TimeAfterFunc = time.After
+	internal.TimeNowFunc = time.Now
+	internal.TimeUntilFunc = time.Until
+	internal.NewNetResolver = newNetResolver
+	internal.AddressDialer = addressDialer
+}
+
+const (
+	defaultPort       = "443"
+	defaultDNSSvrPort = "53"
+	golang            = "GO"
+	// txtPrefix is the prefix string to be prepended to the host name for txt
+	// record lookup.
+	txtPrefix = "_grpc_config."
+	// In DNS, service config is encoded in a TXT record via the mechanism
+	// described in RFC-1464 using the attribute name grpc_config.
+	txtAttribute = "grpc_config="
+)
+
+var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) {
+	return func(ctx context.Context, network, _ string) (net.Conn, error) {
+		var dialer net.Dialer
+		return dialer.DialContext(ctx, network, address)
+	}
+}
+
+var newNetResolver = func(authority string) (internal.NetResolver, error) {
+	if authority == "" {
+		return net.DefaultResolver, nil
+	}
+
+	host, port, err := parseTarget(authority, defaultDNSSvrPort)
+	if err != nil {
+		return nil, err
+	}
+
+	authorityWithPort := net.JoinHostPort(host, port)
+
+	return &net.Resolver{
+		PreferGo: true,
+		Dial:     internal.AddressDialer(authorityWithPort),
+	}, nil
+}
+
+// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
+func NewBuilder() resolver.Builder {
+	return &dnsBuilder{}
+}
+
+type dnsBuilder struct{}
+
+// Build creates and starts a DNS resolver that watches the name resolution of
+// the target.
+func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
+	host, port, err := parseTarget(target.Endpoint(), defaultPort)
+	if err != nil {
+		return nil, err
+	}
+
+	// IP address.
+	if ipAddr, ok := formatIP(host); ok {
+		addr := []resolver.Address{{Addr: ipAddr + ":" + port}}
+		cc.UpdateState(resolver.State{Addresses: addr})
+		return deadResolver{}, nil
+	}
+
+	// DNS address (non-IP).
+	ctx, cancel := context.WithCancel(context.Background())
+	d := &dnsResolver{
+		host:                 host,
+		port:                 port,
+		ctx:                  ctx,
+		cancel:               cancel,
+		cc:                   cc,
+		rn:                   make(chan struct{}, 1),
+		disableServiceConfig: opts.DisableServiceConfig,
+	}
+
+	d.resolver, err = internal.NewNetResolver(target.URL.Host)
+	if err != nil {
+		return nil, err
+	}
+
+	d.wg.Add(1)
+	go d.watcher()
+	return d, nil
+}
+
+// Scheme returns the naming scheme of this resolver builder, which is "dns".
+func (b *dnsBuilder) Scheme() string {
+	return "dns"
+}
+
+// deadResolver is a resolver that does nothing.
+type deadResolver struct{}
+
+func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {}
+
+func (deadResolver) Close() {}
+
+// dnsResolver watches for the name resolution update for a non-IP target.
+type dnsResolver struct {
+	host     string
+	port     string
+	resolver internal.NetResolver
+	ctx      context.Context
+	cancel   context.CancelFunc
+	cc       resolver.ClientConn
+	// rn channel is used by ResolveNow() to force an immediate resolution of the
+	// target.
+	rn chan struct{}
+	// wg is used to enforce Close() to return after the watcher() goroutine has
+	// finished. Otherwise, data race will be possible. [Race Example] in
+	// dns_resolver_test we replace the real lookup functions with mocked ones to
+	// facilitate testing. If Close() doesn't wait for watcher() goroutine
+	// finishes, race detector sometimes will warns lookup (READ the lookup
+	// function pointers) inside watcher() goroutine has data race with
+	// replaceNetFunc (WRITE the lookup function pointers).
+	wg                   sync.WaitGroup
+	disableServiceConfig bool
+}
+
+// ResolveNow invoke an immediate resolution of the target that this
+// dnsResolver watches.
+func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) {
+	select {
+	case d.rn <- struct{}{}:
+	default:
+	}
+}
+
+// Close closes the dnsResolver.
+func (d *dnsResolver) Close() {
+	d.cancel()
+	d.wg.Wait()
+}
+
+func (d *dnsResolver) watcher() {
+	defer d.wg.Done()
+	backoffIndex := 1
+	for {
+		state, err := d.lookup()
+		if err != nil {
+			// Report error to the underlying grpc.ClientConn.
+			d.cc.ReportError(err)
+		} else {
+			err = d.cc.UpdateState(*state)
+		}
+
+		var nextResolutionTime time.Time
+		if err == nil {
+			// Success resolving, wait for the next ResolveNow. However, also wait 30
+			// seconds at the very least to prevent constantly re-resolving.
+			backoffIndex = 1
+			nextResolutionTime = internal.TimeNowFunc().Add(MinResolutionInterval)
+			select {
+			case <-d.ctx.Done():
+				return
+			case <-d.rn:
+			}
+		} else {
+			// Poll on an error found in DNS Resolver or an error received from
+			// ClientConn.
+			nextResolutionTime = internal.TimeNowFunc().Add(backoff.DefaultExponential.Backoff(backoffIndex))
+			backoffIndex++
+		}
+		select {
+		case <-d.ctx.Done():
+			return
+		case <-internal.TimeAfterFunc(internal.TimeUntilFunc(nextResolutionTime)):
+		}
+	}
+}
+
+func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) {
+	if !EnableSRVLookups {
+		return nil, nil
+	}
+	var newAddrs []resolver.Address
+	_, srvs, err := d.resolver.LookupSRV(ctx, "grpclb", "tcp", d.host)
+	if err != nil {
+		err = handleDNSError(err, "SRV") // may become nil
+		return nil, err
+	}
+	for _, s := range srvs {
+		lbAddrs, err := d.resolver.LookupHost(ctx, s.Target)
+		if err != nil {
+			err = handleDNSError(err, "A") // may become nil
+			if err == nil {
+				// If there are other SRV records, look them up and ignore this
+				// one that does not exist.
+				continue
+			}
+			return nil, err
+		}
+		for _, a := range lbAddrs {
+			ip, ok := formatIP(a)
+			if !ok {
+				return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
+			}
+			addr := ip + ":" + strconv.Itoa(int(s.Port))
+			newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target})
+		}
+	}
+	return newAddrs, nil
+}
+
+func handleDNSError(err error, lookupType string) error {
+	dnsErr, ok := err.(*net.DNSError)
+	if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
+		// Timeouts and temporary errors should be communicated to gRPC to
+		// attempt another DNS query (with backoff).  Other errors should be
+		// suppressed (they may represent the absence of a TXT record).
+		return nil
+	}
+	if err != nil {
+		err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err)
+		logger.Info(err)
+	}
+	return err
+}
+
+func (d *dnsResolver) lookupTXT(ctx context.Context) *serviceconfig.ParseResult {
+	ss, err := d.resolver.LookupTXT(ctx, txtPrefix+d.host)
+	if err != nil {
+		if envconfig.TXTErrIgnore {
+			return nil
+		}
+		if err = handleDNSError(err, "TXT"); err != nil {
+			return &serviceconfig.ParseResult{Err: err}
+		}
+		return nil
+	}
+	var res string
+	for _, s := range ss {
+		res += s
+	}
+
+	// TXT record must have "grpc_config=" attribute in order to be used as
+	// service config.
+	if !strings.HasPrefix(res, txtAttribute) {
+		logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
+		// This is not an error; it is the equivalent of not having a service
+		// config.
+		return nil
+	}
+	sc := canaryingSC(strings.TrimPrefix(res, txtAttribute))
+	return d.cc.ParseServiceConfig(sc)
+}
+
+func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error) {
+	addrs, err := d.resolver.LookupHost(ctx, d.host)
+	if err != nil {
+		err = handleDNSError(err, "A")
+		return nil, err
+	}
+	newAddrs := make([]resolver.Address, 0, len(addrs))
+	for _, a := range addrs {
+		ip, ok := formatIP(a)
+		if !ok {
+			return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
+		}
+		addr := ip + ":" + d.port
+		newAddrs = append(newAddrs, resolver.Address{Addr: addr})
+	}
+	return newAddrs, nil
+}
+
+func (d *dnsResolver) lookup() (*resolver.State, error) {
+	ctx, cancel := context.WithTimeout(d.ctx, ResolvingTimeout)
+	defer cancel()
+	srv, srvErr := d.lookupSRV(ctx)
+	addrs, hostErr := d.lookupHost(ctx)
+	if hostErr != nil && (srvErr != nil || len(srv) == 0) {
+		return nil, hostErr
+	}
+
+	state := resolver.State{Addresses: addrs}
+	if len(srv) > 0 {
+		state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv})
+	}
+	if !d.disableServiceConfig {
+		state.ServiceConfig = d.lookupTXT(ctx)
+	}
+	return &state, nil
+}
+
+// formatIP returns ok = false if addr is not a valid textual representation of
+// an IP address. If addr is an IPv4 address, return the addr and ok = true.
+// If addr is an IPv6 address, return the addr enclosed in square brackets and
+// ok = true.
+func formatIP(addr string) (addrIP string, ok bool) {
+	ip := net.ParseIP(addr)
+	if ip == nil {
+		return "", false
+	}
+	if ip.To4() != nil {
+		return addr, true
+	}
+	return "[" + addr + "]", true
+}
+
+// parseTarget takes the user input target string and default port, returns
+// formatted host and port info. If target doesn't specify a port, set the port
+// to be the defaultPort. If target is in IPv6 format and host-name is enclosed
+// in square brackets, brackets are stripped when setting the host.
+// examples:
+// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443"
+// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80"
+// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443"
+// target: ":80" defaultPort: "443" returns host: "localhost", port: "80"
+func parseTarget(target, defaultPort string) (host, port string, err error) {
+	if target == "" {
+		return "", "", internal.ErrMissingAddr
+	}
+	if ip := net.ParseIP(target); ip != nil {
+		// target is an IPv4 or IPv6(without brackets) address
+		return target, defaultPort, nil
+	}
+	if host, port, err = net.SplitHostPort(target); err == nil {
+		if port == "" {
+			// If the port field is empty (target ends with colon), e.g. "[::1]:",
+			// this is an error.
+			return "", "", internal.ErrEndsWithColon
+		}
+		// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
+		if host == "" {
+			// Keep consistent with net.Dial(): If the host is empty, as in ":80",
+			// the local system is assumed.
+			host = "localhost"
+		}
+		return host, port, nil
+	}
+	if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil {
+		// target doesn't have port
+		return host, port, nil
+	}
+	return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err)
+}
+
+type rawChoice struct {
+	ClientLanguage *[]string        `json:"clientLanguage,omitempty"`
+	Percentage     *int             `json:"percentage,omitempty"`
+	ClientHostName *[]string        `json:"clientHostName,omitempty"`
+	ServiceConfig  *json.RawMessage `json:"serviceConfig,omitempty"`
+}
+
+func containsString(a *[]string, b string) bool {
+	if a == nil {
+		return true
+	}
+	for _, c := range *a {
+		if c == b {
+			return true
+		}
+	}
+	return false
+}
+
+func chosenByPercentage(a *int) bool {
+	if a == nil {
+		return true
+	}
+	return rand.Intn(100)+1 <= *a
+}
+
+func canaryingSC(js string) string {
+	if js == "" {
+		return ""
+	}
+	var rcs []rawChoice
+	err := json.Unmarshal([]byte(js), &rcs)
+	if err != nil {
+		logger.Warningf("dns: error parsing service config json: %v", err)
+		return ""
+	}
+	cliHostname, err := os.Hostname()
+	if err != nil {
+		logger.Warningf("dns: error getting client hostname: %v", err)
+		return ""
+	}
+	var sc string
+	for _, c := range rcs {
+		if !containsString(c.ClientLanguage, golang) ||
+			!chosenByPercentage(c.Percentage) ||
+			!containsString(c.ClientHostName, cliHostname) ||
+			c.ServiceConfig == nil {
+			continue
+		}
+		sc = string(*c.ServiceConfig)
+		break
+	}
+	return sc
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/hack/tools/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
new file mode 100644
index 0000000000..c0eae4f5f8
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
@@ -0,0 +1,77 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains functionality internal to the dns resolver package.
+package internal
+
+import (
+	"context"
+	"errors"
+	"net"
+	"time"
+)
+
+// NetResolver groups the methods on net.Resolver that are used by the DNS
+// resolver implementation. This allows the default net.Resolver instance to be
+// overridden from tests.
+type NetResolver interface {
+	LookupHost(ctx context.Context, host string) (addrs []string, err error)
+	LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
+	LookupTXT(ctx context.Context, name string) (txts []string, err error)
+}
+
+var (
+	// ErrMissingAddr is the error returned when building a DNS resolver when
+	// the provided target name is empty.
+	ErrMissingAddr = errors.New("dns resolver: missing address")
+
+	// ErrEndsWithColon is the error returned when building a DNS resolver when
+	// the provided target name ends with a colon that is supposed to be the
+	// separator between host and port.  E.g. "::" is a valid address as it is
+	// an IPv6 address (host only) and "[::]:" is invalid as it ends with a
+	// colon as the host and port separator
+	ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
+)
+
+// The following vars are overridden from tests.
+var (
+	// TimeAfterFunc is used by the DNS resolver to wait for the given duration
+	// to elapse. In non-test code, this is implemented by time.After. In test
+	// code, this can be used to control the amount of time the resolver is
+	// blocked waiting for the duration to elapse.
+	TimeAfterFunc func(time.Duration) <-chan time.Time
+
+	// TimeNowFunc is used by the DNS resolver to get the current time.
+	// In non-test code, this is implemented by time.Now. In test code,
+	// this can be used to control the current time for the resolver.
+	TimeNowFunc func() time.Time
+
+	// TimeUntilFunc is used by the DNS resolver to calculate the remaining
+	// wait time for re-resolution. In non-test code, this is implemented by
+	// time.Until. In test code, this can be used to control the remaining
+	// time for resolver to wait for re-resolution.
+	TimeUntilFunc func(time.Time) time.Duration
+
+	// NewNetResolver returns the net.Resolver instance for the given target.
+	NewNetResolver func(string) (NetResolver, error)
+
+	// AddressDialer is the dialer used to dial the DNS server. It accepts the
+	// Host portion of the URL corresponding to the user's dial target and
+	// returns a dial function.
+	AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error)
+)
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/hack/tools/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
new file mode 100644
index 0000000000..b901c7bace
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
@@ -0,0 +1,64 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package passthrough implements a pass-through resolver. It sends the target
+// name without scheme back to gRPC as resolved address.
+package passthrough
+
+import (
+	"errors"
+
+	"google.golang.org/grpc/resolver"
+)
+
+const scheme = "passthrough"
+
+type passthroughBuilder struct{}
+
+func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
+	if target.Endpoint() == "" && opts.Dialer == nil {
+		return nil, errors.New("passthrough: received empty target in Build()")
+	}
+	r := &passthroughResolver{
+		target: target,
+		cc:     cc,
+	}
+	r.start()
+	return r, nil
+}
+
+func (*passthroughBuilder) Scheme() string {
+	return scheme
+}
+
+type passthroughResolver struct {
+	target resolver.Target
+	cc     resolver.ClientConn
+}
+
+func (r *passthroughResolver) start() {
+	r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}})
+}
+
+func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {}
+
+func (*passthroughResolver) Close() {}
+
+func init() {
+	resolver.Register(&passthroughBuilder{})
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/hack/tools/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
new file mode 100644
index 0000000000..27cd81af9e
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
@@ -0,0 +1,78 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package unix implements a resolver for unix targets.
+package unix
+
+import (
+	"fmt"
+
+	"google.golang.org/grpc/internal/transport/networktype"
+	"google.golang.org/grpc/resolver"
+)
+
+const unixScheme = "unix"
+const unixAbstractScheme = "unix-abstract"
+
+type builder struct {
+	scheme string
+}
+
+func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
+	if target.URL.Host != "" {
+		return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host)
+	}
+
+	// gRPC was parsing the dial target manually before PR #4817, and we
+	// switched to using url.Parse() in that PR. To avoid breaking existing
+	// resolver implementations we ended up stripping the leading "/" from the
+	// endpoint. This obviously does not work for the "unix" scheme. Hence we
+	// end up using the parsed URL instead.
+	endpoint := target.URL.Path
+	if endpoint == "" {
+		endpoint = target.URL.Opaque
+	}
+	addr := resolver.Address{Addr: endpoint}
+	if b.scheme == unixAbstractScheme {
+		// We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do
+		// not want trailing \0 in address.
+		addr.Addr = "@" + addr.Addr
+	}
+	cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}})
+	return &nopResolver{}, nil
+}
+
+func (b *builder) Scheme() string {
+	return b.scheme
+}
+
+func (b *builder) OverrideAuthority(resolver.Target) string {
+	return "localhost"
+}
+
+type nopResolver struct {
+}
+
+func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {}
+
+func (*nopResolver) Close() {}
+
+func init() {
+	resolver.Register(&builder{scheme: unixScheme})
+	resolver.Register(&builder{scheme: unixAbstractScheme})
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/hack/tools/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go
new file mode 100644
index 0000000000..11d82afcc7
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go
@@ -0,0 +1,130 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package serviceconfig
+
+import (
+	"encoding/json"
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// Duration defines JSON marshal and unmarshal methods to conform to the
+// protobuf JSON spec defined [here].
+//
+// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration
+type Duration time.Duration
+
+func (d Duration) String() string {
+	return fmt.Sprint(time.Duration(d))
+}
+
+// MarshalJSON converts from d to a JSON string output.
+func (d Duration) MarshalJSON() ([]byte, error) {
+	ns := time.Duration(d).Nanoseconds()
+	sec := ns / int64(time.Second)
+	ns = ns % int64(time.Second)
+
+	var sign string
+	if sec < 0 || ns < 0 {
+		sign, sec, ns = "-", -1*sec, -1*ns
+	}
+
+	// Generated output always contains 0, 3, 6, or 9 fractional digits,
+	// depending on required precision.
+	str := fmt.Sprintf("%s%d.%09d", sign, sec, ns)
+	str = strings.TrimSuffix(str, "000")
+	str = strings.TrimSuffix(str, "000")
+	str = strings.TrimSuffix(str, ".000")
+	return []byte(fmt.Sprintf("\"%ss\"", str)), nil
+}
+
+// UnmarshalJSON unmarshals b as a duration JSON string into d.
+func (d *Duration) UnmarshalJSON(b []byte) error {
+	var s string
+	if err := json.Unmarshal(b, &s); err != nil {
+		return err
+	}
+	if !strings.HasSuffix(s, "s") {
+		return fmt.Errorf("malformed duration %q: missing seconds unit", s)
+	}
+	neg := false
+	if s[0] == '-' {
+		neg = true
+		s = s[1:]
+	}
+	ss := strings.SplitN(s[:len(s)-1], ".", 3)
+	if len(ss) > 2 {
+		return fmt.Errorf("malformed duration %q: too many decimals", s)
+	}
+	// hasDigits is set if either the whole or fractional part of the number is
+	// present, since both are optional but one is required.
+	hasDigits := false
+	var sec, ns int64
+	if len(ss[0]) > 0 {
+		var err error
+		if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil {
+			return fmt.Errorf("malformed duration %q: %v", s, err)
+		}
+		// Maximum seconds value per the durationpb spec.
+		const maxProtoSeconds = 315_576_000_000
+		if sec > maxProtoSeconds {
+			return fmt.Errorf("out of range: %q", s)
+		}
+		hasDigits = true
+	}
+	if len(ss) == 2 && len(ss[1]) > 0 {
+		if len(ss[1]) > 9 {
+			return fmt.Errorf("malformed duration %q: too many digits after decimal", s)
+		}
+		var err error
+		if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil {
+			return fmt.Errorf("malformed duration %q: %v", s, err)
+		}
+		for i := 9; i > len(ss[1]); i-- {
+			ns *= 10
+		}
+		hasDigits = true
+	}
+	if !hasDigits {
+		return fmt.Errorf("malformed duration %q: contains no numbers", s)
+	}
+
+	if neg {
+		sec *= -1
+		ns *= -1
+	}
+
+	// Maximum/minimum seconds/nanoseconds representable by Go's time.Duration.
+	const maxSeconds = math.MaxInt64 / int64(time.Second)
+	const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second)
+	const minSeconds = math.MinInt64 / int64(time.Second)
+	const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second)
+
+	if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) {
+		*d = Duration(math.MaxInt64)
+	} else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) {
+		*d = Duration(math.MinInt64)
+	} else {
+		*d = Duration(sec*int64(time.Second) + ns)
+	}
+	return nil
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/hack/tools/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
new file mode 100644
index 0000000000..51e733e495
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
@@ -0,0 +1,180 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package serviceconfig contains utility functions to parse service config.
+package serviceconfig
+
+import (
+	"encoding/json"
+	"fmt"
+	"time"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	externalserviceconfig "google.golang.org/grpc/serviceconfig"
+)
+
+var logger = grpclog.Component("core")
+
+// BalancerConfig wraps the name and config associated with one load balancing
+// policy. It corresponds to a single entry of the loadBalancingConfig field
+// from ServiceConfig.
+//
+// It implements the json.Unmarshaler interface.
+//
+// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247
+type BalancerConfig struct {
+	Name   string
+	Config externalserviceconfig.LoadBalancingConfig
+}
+
+type intermediateBalancerConfig []map[string]json.RawMessage
+
+// MarshalJSON implements the json.Marshaler interface.
+//
+// It marshals the balancer and config into a length-1 slice
+// ([]map[string]config).
+func (bc *BalancerConfig) MarshalJSON() ([]byte, error) {
+	if bc.Config == nil {
+		// If config is nil, return empty config `{}`.
+		return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil
+	}
+	c, err := json.Marshal(bc.Config)
+	if err != nil {
+		return nil, err
+	}
+	return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+//
+// ServiceConfig contains a list of loadBalancingConfigs, each with a name and
+// config. This method iterates through that list in order, and stops at the
+// first policy that is supported.
+//   - If the config for the first supported policy is invalid, the whole service
+//     config is invalid.
+//   - If the list doesn't contain any supported policy, the whole service config
+//     is invalid.
+func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
+	var ir intermediateBalancerConfig
+	err := json.Unmarshal(b, &ir)
+	if err != nil {
+		return err
+	}
+
+	var names []string
+	for i, lbcfg := range ir {
+		if len(lbcfg) != 1 {
+			return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
+		}
+
+		var (
+			name    string
+			jsonCfg json.RawMessage
+		)
+		// Get the key:value pair from the map. We have already made sure that
+		// the map contains a single entry.
+		for name, jsonCfg = range lbcfg {
+		}
+
+		names = append(names, name)
+		builder := balancer.Get(name)
+		if builder == nil {
+			// If the balancer is not registered, move on to the next config.
+			// This is not an error.
+			continue
+		}
+		bc.Name = name
+
+		parser, ok := builder.(balancer.ConfigParser)
+		if !ok {
+			if string(jsonCfg) != "{}" {
+				logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
+			}
+			// Stop at this, though the builder doesn't support parsing config.
+			return nil
+		}
+
+		cfg, err := parser.ParseConfig(jsonCfg)
+		if err != nil {
+			return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
+		}
+		bc.Config = cfg
+		return nil
+	}
+	// This is reached when the for loop iterates over all entries, but didn't
+	// return. This means we had a loadBalancingConfig slice but did not
+	// encounter a registered policy. The config is considered invalid in this
+	// case.
+	return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names)
+}
+
+// MethodConfig defines the configuration recommended by the service providers for a
+// particular method.
+type MethodConfig struct {
+	// WaitForReady indicates whether RPCs sent to this method should wait until
+	// the connection is ready by default (!failfast). The value specified via the
+	// gRPC client API will override the value set here.
+	WaitForReady *bool
+	// Timeout is the default timeout for RPCs sent to this method. The actual
+	// deadline used will be the minimum of the value specified here and the value
+	// set by the application via the gRPC client API.  If either one is not set,
+	// then the other will be used.  If neither is set, then the RPC has no deadline.
+	Timeout *time.Duration
+	// MaxReqSize is the maximum allowed payload size for an individual request in a
+	// stream (client->server) in bytes. The size which is measured is the serialized
+	// payload after per-message compression (but before stream compression) in bytes.
+	// The actual value used is the minimum of the value specified here and the value set
+	// by the application via the gRPC client API. If either one is not set, then the other
+	// will be used.  If neither is set, then the built-in default is used.
+	MaxReqSize *int
+	// MaxRespSize is the maximum allowed payload size for an individual response in a
+	// stream (server->client) in bytes.
+	MaxRespSize *int
+	// RetryPolicy configures retry options for the method.
+	RetryPolicy *RetryPolicy
+}
+
+// RetryPolicy defines the go-native version of the retry policy defined by the
+// service config here:
+// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
+type RetryPolicy struct {
+	// MaxAttempts is the maximum number of attempts, including the original RPC.
+	//
+	// This field is required and must be two or greater.
+	MaxAttempts int
+
+	// Exponential backoff parameters. The initial retry attempt will occur at
+	// random(0, initialBackoff). In general, the nth attempt will occur at
+	// random(0,
+	//   min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)).
+	//
+	// These fields are required and must be greater than zero.
+	InitialBackoff    time.Duration
+	MaxBackoff        time.Duration
+	BackoffMultiplier float64
+
+	// The set of status codes which may be retried.
+	//
+	// Status codes are specified as strings, e.g., "UNAVAILABLE".
+	//
+	// This field is required and must be non-empty.
+	// Note: a set is used to store this for easy lookup.
+	RetryableStatusCodes map[codes.Code]bool
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/stats/labels.go b/hack/tools/vendor/google.golang.org/grpc/internal/stats/labels.go
new file mode 100644
index 0000000000..fd33af51ae
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/stats/labels.go
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package stats provides internal stats related functionality.
+package stats
+
+import "context"
+
+// Labels are the labels for metrics.
+type Labels struct {
+	// TelemetryLabels are the telemetry labels to record.
+	TelemetryLabels map[string]string
+}
+
+type labelsKey struct{}
+
+// GetLabels returns the Labels stored in the context, or nil if there is one.
+func GetLabels(ctx context.Context) *Labels {
+	labels, _ := ctx.Value(labelsKey{}).(*Labels)
+	return labels
+}
+
+// SetLabels sets the Labels in the context.
+func SetLabels(ctx context.Context, labels *Labels) context.Context {
+	// could also append
+	return context.WithValue(ctx, labelsKey{}, labels)
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/hack/tools/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
new file mode 100644
index 0000000000..be110d41f9
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package stats
+
+import (
+	"fmt"
+
+	estats "google.golang.org/grpc/experimental/stats"
+	"google.golang.org/grpc/stats"
+)
+
+// MetricsRecorderList forwards Record calls to all of its metricsRecorders.
+//
+// It eats any record calls where the label values provided do not match the
+// number of label keys.
+type MetricsRecorderList struct {
+	// metricsRecorders are the metrics recorders this list will forward to.
+	metricsRecorders []estats.MetricsRecorder
+}
+
+// NewMetricsRecorderList creates a new metric recorder list with all the stats
+// handlers provided which implement the MetricsRecorder interface.
+// If no stats handlers provided implement the MetricsRecorder interface,
+// the MetricsRecorder list returned is a no-op.
+func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList {
+	var mrs []estats.MetricsRecorder
+	for _, sh := range shs {
+		if mr, ok := sh.(estats.MetricsRecorder); ok {
+			mrs = append(mrs, mr)
+		}
+	}
+	return &MetricsRecorderList{
+		metricsRecorders: mrs,
+	}
+}
+
+func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) {
+	if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want {
+		panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want))
+	}
+}
+
+func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) {
+	verifyLabels(handle.Descriptor(), labels...)
+
+	for _, metricRecorder := range l.metricsRecorders {
+		metricRecorder.RecordInt64Count(handle, incr, labels...)
+	}
+}
+
+func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) {
+	verifyLabels(handle.Descriptor(), labels...)
+
+	for _, metricRecorder := range l.metricsRecorders {
+		metricRecorder.RecordFloat64Count(handle, incr, labels...)
+	}
+}
+
+func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) {
+	verifyLabels(handle.Descriptor(), labels...)
+
+	for _, metricRecorder := range l.metricsRecorders {
+		metricRecorder.RecordInt64Histo(handle, incr, labels...)
+	}
+}
+
+func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) {
+	verifyLabels(handle.Descriptor(), labels...)
+
+	for _, metricRecorder := range l.metricsRecorders {
+		metricRecorder.RecordFloat64Histo(handle, incr, labels...)
+	}
+}
+
+func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) {
+	verifyLabels(handle.Descriptor(), labels...)
+
+	for _, metricRecorder := range l.metricsRecorders {
+		metricRecorder.RecordInt64Gauge(handle, incr, labels...)
+	}
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/status/status.go b/hack/tools/vendor/google.golang.org/grpc/internal/status/status.go
new file mode 100644
index 0000000000..757925381f
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/status/status.go
@@ -0,0 +1,205 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package status implements errors returned by gRPC.  These errors are
+// serialized and transmitted on the wire between server and client, and allow
+// for additional data to be transmitted via the Details field in the status
+// proto.  gRPC service handlers should return an error created by this
+// package, and gRPC clients should expect a corresponding error to be
+// returned from the RPC call.
+//
+// This package upholds the invariants that a non-nil error may not
+// contain an OK code, and an OK code must result in a nil error.
+package status
+
+import (
+	"errors"
+	"fmt"
+
+	spb "google.golang.org/genproto/googleapis/rpc/status"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/protoadapt"
+	"google.golang.org/protobuf/types/known/anypb"
+)
+
+// Status represents an RPC status code, message, and details.  It is immutable
+// and should be created with New, Newf, or FromProto.
+type Status struct {
+	s *spb.Status
+}
+
+// NewWithProto returns a new status including details from statusProto.  This
+// is meant to be used by the gRPC library only.
+func NewWithProto(code codes.Code, message string, statusProto []string) *Status {
+	if len(statusProto) != 1 {
+		// No grpc-status-details bin header, or multiple; just ignore.
+		return &Status{s: &spb.Status{Code: int32(code), Message: message}}
+	}
+	st := &spb.Status{}
+	if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil {
+		// Probably not a google.rpc.Status proto; do not provide details.
+		return &Status{s: &spb.Status{Code: int32(code), Message: message}}
+	}
+	if st.Code == int32(code) {
+		// The codes match between the grpc-status header and the
+		// grpc-status-details-bin header; use the full details proto.
+		return &Status{s: st}
+	}
+	return &Status{
+		s: &spb.Status{
+			Code: int32(codes.Internal),
+			Message: fmt.Sprintf(
+				"grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v",
+				code, message, st,
+			),
+		},
+	}
+}
+
+// New returns a Status representing c and msg.
+func New(c codes.Code, msg string) *Status {
+	return &Status{s: &spb.Status{Code: int32(c), Message: msg}}
+}
+
+// Newf returns New(c, fmt.Sprintf(format, a...)).
+func Newf(c codes.Code, format string, a ...any) *Status {
+	return New(c, fmt.Sprintf(format, a...))
+}
+
+// FromProto returns a Status representing s.
+func FromProto(s *spb.Status) *Status {
+	return &Status{s: proto.Clone(s).(*spb.Status)}
+}
+
+// Err returns an error representing c and msg.  If c is OK, returns nil.
+func Err(c codes.Code, msg string) error {
+	return New(c, msg).Err()
+}
+
+// Errorf returns Error(c, fmt.Sprintf(format, a...)).
+func Errorf(c codes.Code, format string, a ...any) error {
+	return Err(c, fmt.Sprintf(format, a...))
+}
+
+// Code returns the status code contained in s.
+func (s *Status) Code() codes.Code {
+	if s == nil || s.s == nil {
+		return codes.OK
+	}
+	return codes.Code(s.s.Code)
+}
+
+// Message returns the message contained in s.
+func (s *Status) Message() string {
+	if s == nil || s.s == nil {
+		return ""
+	}
+	return s.s.Message
+}
+
+// Proto returns s's status as an spb.Status proto message.
+func (s *Status) Proto() *spb.Status {
+	if s == nil {
+		return nil
+	}
+	return proto.Clone(s.s).(*spb.Status)
+}
+
+// Err returns an immutable error representing s; returns nil if s.Code() is OK.
+func (s *Status) Err() error {
+	if s.Code() == codes.OK {
+		return nil
+	}
+	return &Error{s: s}
+}
+
+// WithDetails returns a new status with the provided details messages appended to the status.
+// If any errors are encountered, it returns nil and the first error encountered.
+func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) {
+	if s.Code() == codes.OK {
+		return nil, errors.New("no error details for status with code OK")
+	}
+	// s.Code() != OK implies that s.Proto() != nil.
+	p := s.Proto()
+	for _, detail := range details {
+		m, err := anypb.New(protoadapt.MessageV2Of(detail))
+		if err != nil {
+			return nil, err
+		}
+		p.Details = append(p.Details, m)
+	}
+	return &Status{s: p}, nil
+}
+
+// Details returns a slice of details messages attached to the status.
+// If a detail cannot be decoded, the error is returned in place of the detail.
+func (s *Status) Details() []any {
+	if s == nil || s.s == nil {
+		return nil
+	}
+	details := make([]any, 0, len(s.s.Details))
+	for _, any := range s.s.Details {
+		detail, err := any.UnmarshalNew()
+		if err != nil {
+			details = append(details, err)
+			continue
+		}
+		details = append(details, detail)
+	}
+	return details
+}
+
+func (s *Status) String() string {
+	return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message())
+}
+
+// Error wraps a pointer of a status proto. It implements error and Status,
+// and a nil *Error should never be returned by this package.
+type Error struct {
+	s *Status
+}
+
+func (e *Error) Error() string {
+	return e.s.String()
+}
+
+// GRPCStatus returns the Status represented by se.
+func (e *Error) GRPCStatus() *Status {
+	return e.s
+}
+
+// Is implements future error.Is functionality.
+// A Error is equivalent if the code and message are identical.
+func (e *Error) Is(target error) bool {
+	tse, ok := target.(*Error)
+	if !ok {
+		return false
+	}
+	return proto.Equal(e.s.s, tse.s.s)
+}
+
+// IsRestrictedControlPlaneCode returns whether the status includes a code
+// restricted for control plane usage as defined by gRFC A54.
+func IsRestrictedControlPlaneCode(s *Status) bool {
+	switch s.Code() {
+	case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss:
+		return true
+	}
+	return false
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/hack/tools/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
new file mode 100644
index 0000000000..b3a72276de
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
@@ -0,0 +1,112 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package syscall provides functionalities that grpc uses to get low-level operating system
+// stats/info.
+package syscall
+
+import (
+	"fmt"
+	"net"
+	"syscall"
+	"time"
+
+	"golang.org/x/sys/unix"
+	"google.golang.org/grpc/grpclog"
+)
+
+var logger = grpclog.Component("core")
+
+// GetCPUTime returns the how much CPU time has passed since the start of this process.
+func GetCPUTime() int64 {
+	var ts unix.Timespec
+	if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil {
+		logger.Fatal(err)
+	}
+	return ts.Nano()
+}
+
+// Rusage is an alias for syscall.Rusage under linux environment.
+type Rusage = syscall.Rusage
+
+// GetRusage returns the resource usage of current process.
+func GetRusage() *Rusage {
+	rusage := new(Rusage)
+	syscall.Getrusage(syscall.RUSAGE_SELF, rusage)
+	return rusage
+}
+
+// CPUTimeDiff returns the differences of user CPU time and system CPU time used
+// between two Rusage structs.
+func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
+	var (
+		utimeDiffs  = latest.Utime.Sec - first.Utime.Sec
+		utimeDiffus = latest.Utime.Usec - first.Utime.Usec
+		stimeDiffs  = latest.Stime.Sec - first.Stime.Sec
+		stimeDiffus = latest.Stime.Usec - first.Stime.Usec
+	)
+
+	uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6
+	sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6
+
+	return uTimeElapsed, sTimeElapsed
+}
+
+// SetTCPUserTimeout sets the TCP user timeout on a connection's socket
+func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
+	tcpconn, ok := conn.(*net.TCPConn)
+	if !ok {
+		// not a TCP connection. exit early
+		return nil
+	}
+	rawConn, err := tcpconn.SyscallConn()
+	if err != nil {
+		return fmt.Errorf("error getting raw connection: %v", err)
+	}
+	err = rawConn.Control(func(fd uintptr) {
+		err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond))
+	})
+	if err != nil {
+		return fmt.Errorf("error setting option on socket: %v", err)
+	}
+
+	return nil
+}
+
+// GetTCPUserTimeout gets the TCP user timeout on a connection's socket
+func GetTCPUserTimeout(conn net.Conn) (opt int, err error) {
+	tcpconn, ok := conn.(*net.TCPConn)
+	if !ok {
+		err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn)
+		return
+	}
+	rawConn, err := tcpconn.SyscallConn()
+	if err != nil {
+		err = fmt.Errorf("error getting raw connection: %v", err)
+		return
+	}
+	err = rawConn.Control(func(fd uintptr) {
+		opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT)
+	})
+	if err != nil {
+		err = fmt.Errorf("error getting option on socket: %v", err)
+		return
+	}
+
+	return
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/hack/tools/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
new file mode 100644
index 0000000000..54c24c2ff3
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
@@ -0,0 +1,77 @@
+//go:build !linux
+// +build !linux
+
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package syscall provides functionalities that grpc uses to get low-level
+// operating system stats/info.
+package syscall
+
+import (
+	"net"
+	"sync"
+	"time"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+var once sync.Once
+var logger = grpclog.Component("core")
+
+func log() {
+	once.Do(func() {
+		logger.Info("CPU time info is unavailable on non-linux environments.")
+	})
+}
+
+// GetCPUTime returns the how much CPU time has passed since the start of this
+// process. It always returns 0 under non-linux environments.
+func GetCPUTime() int64 {
+	log()
+	return 0
+}
+
+// Rusage is an empty struct under non-linux environments.
+type Rusage struct{}
+
+// GetRusage is a no-op function under non-linux environments.
+func GetRusage() *Rusage {
+	log()
+	return nil
+}
+
+// CPUTimeDiff returns the differences of user CPU time and system CPU time used
+// between two Rusage structs. It a no-op function for non-linux environments.
+func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) {
+	log()
+	return 0, 0
+}
+
+// SetTCPUserTimeout is a no-op function under non-linux environments.
+func SetTCPUserTimeout(net.Conn, time.Duration) error {
+	log()
+	return nil
+}
+
+// GetTCPUserTimeout is a no-op function under non-linux environments.
+// A negative return value indicates the operation is not supported
+func GetTCPUserTimeout(net.Conn) (int, error) {
+	log()
+	return -1, nil
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go b/hack/tools/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go
new file mode 100644
index 0000000000..4f347edd42
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go
@@ -0,0 +1,29 @@
+//go:build !unix && !windows
+
+/*
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+import (
+	"net"
+)
+
+// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms.
+func NetDialerWithTCPKeepalive() *net.Dialer {
+	return &net.Dialer{}
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/hack/tools/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
new file mode 100644
index 0000000000..7e7aaa5463
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
@@ -0,0 +1,54 @@
+//go:build unix
+
+/*
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+import (
+	"net"
+	"syscall"
+	"time"
+
+	"golang.org/x/sys/unix"
+)
+
+// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on
+// the underlying connection with OS default values for keepalive parameters.
+//
+// TODO: Once https://github.com/golang/go/issues/62254 lands, and the
+// appropriate Go version becomes less than our least supported Go version, we
+// should look into using the new API to make things more straightforward.
+func NetDialerWithTCPKeepalive() *net.Dialer {
+	return &net.Dialer{
+		// Setting a negative value here prevents the Go stdlib from overriding
+		// the values of TCP keepalive time and interval. It also prevents the
+		// Go stdlib from enabling TCP keepalives by default.
+		KeepAlive: time.Duration(-1),
+		// This method is called after the underlying network socket is created,
+		// but before dialing the socket (or calling its connect() method). The
+		// combination of unconditionally enabling TCP keepalives here, and
+		// disabling the overriding of TCP keepalive parameters by setting the
+		// KeepAlive field to a negative value above, results in OS defaults for
+		// the TCP keepalive interval and time parameters.
+		Control: func(_, _ string, c syscall.RawConn) error {
+			return c.Control(func(fd uintptr) {
+				unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
+			})
+		},
+	}
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/hack/tools/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
new file mode 100644
index 0000000000..d5c1085eea
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
@@ -0,0 +1,54 @@
+//go:build windows
+
+/*
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+import (
+	"net"
+	"syscall"
+	"time"
+
+	"golang.org/x/sys/windows"
+)
+
+// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on
+// the underlying connection with OS default values for keepalive parameters.
+//
+// TODO: Once https://github.com/golang/go/issues/62254 lands, and the
+// appropriate Go version becomes less than our least supported Go version, we
+// should look into using the new API to make things more straightforward.
+func NetDialerWithTCPKeepalive() *net.Dialer {
+	return &net.Dialer{
+		// Setting a negative value here prevents the Go stdlib from overriding
+		// the values of TCP keepalive time and interval. It also prevents the
+		// Go stdlib from enabling TCP keepalives by default.
+		KeepAlive: time.Duration(-1),
+		// This method is called after the underlying network socket is created,
+		// but before dialing the socket (or calling its connect() method). The
+		// combination of unconditionally enabling TCP keepalives here, and
+		// disabling the overriding of TCP keepalive parameters by setting the
+		// KeepAlive field to a negative value above, results in OS defaults for
+		// the TCP keepalive interval and time parameters.
+		Control: func(_, _ string, c syscall.RawConn) error {
+			return c.Control(func(fd uintptr) {
+				windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1)
+			})
+		},
+	}
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/hack/tools/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go
new file mode 100644
index 0000000000..070680edba
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go
@@ -0,0 +1,141 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"sync"
+	"time"
+)
+
+const (
+	// bdpLimit is the maximum value the flow control windows will be increased
+	// to.  TCP typically limits this to 4MB, but some systems go up to 16MB.
+	// Since this is only a limit, it is safe to make it optimistic.
+	bdpLimit = (1 << 20) * 16
+	// alpha is a constant factor used to keep a moving average
+	// of RTTs.
+	alpha = 0.9
+	// If the current bdp sample is greater than or equal to
+	// our beta * our estimated bdp and the current bandwidth
+	// sample is the maximum bandwidth observed so far, we
+	// increase our bbp estimate by a factor of gamma.
+	beta = 0.66
+	// To put our bdp to be smaller than or equal to twice the real BDP,
+	// we should multiply our current sample with 4/3, however to round things out
+	// we use 2 as the multiplication factor.
+	gamma = 2
+)
+
+// Adding arbitrary data to ping so that its ack can be identified.
+// Easter-egg: what does the ping message say?
+var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}}
+
+type bdpEstimator struct {
+	// sentAt is the time when the ping was sent.
+	sentAt time.Time
+
+	mu sync.Mutex
+	// bdp is the current bdp estimate.
+	bdp uint32
+	// sample is the number of bytes received in one measurement cycle.
+	sample uint32
+	// bwMax is the maximum bandwidth noted so far (bytes/sec).
+	bwMax float64
+	// bool to keep track of the beginning of a new measurement cycle.
+	isSent bool
+	// Callback to update the window sizes.
+	updateFlowControl func(n uint32)
+	// sampleCount is the number of samples taken so far.
+	sampleCount uint64
+	// round trip time (seconds)
+	rtt float64
+}
+
+// timesnap registers the time bdp ping was sent out so that
+// network rtt can be calculated when its ack is received.
+// It is called (by controller) when the bdpPing is
+// being written on the wire.
+func (b *bdpEstimator) timesnap(d [8]byte) {
+	if bdpPing.data != d {
+		return
+	}
+	b.sentAt = time.Now()
+}
+
+// add adds bytes to the current sample for calculating bdp.
+// It returns true only if a ping must be sent. This can be used
+// by the caller (handleData) to make decision about batching
+// a window update with it.
+func (b *bdpEstimator) add(n uint32) bool {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	if b.bdp == bdpLimit {
+		return false
+	}
+	if !b.isSent {
+		b.isSent = true
+		b.sample = n
+		b.sentAt = time.Time{}
+		b.sampleCount++
+		return true
+	}
+	b.sample += n
+	return false
+}
+
+// calculate is called when an ack for a bdp ping is received.
+// Here we calculate the current bdp and bandwidth sample and
+// decide if the flow control windows should go up.
+func (b *bdpEstimator) calculate(d [8]byte) {
+	// Check if the ping acked for was the bdp ping.
+	if bdpPing.data != d {
+		return
+	}
+	b.mu.Lock()
+	rttSample := time.Since(b.sentAt).Seconds()
+	if b.sampleCount < 10 {
+		// Bootstrap rtt with an average of first 10 rtt samples.
+		b.rtt += (rttSample - b.rtt) / float64(b.sampleCount)
+	} else {
+		// Heed to the recent past more.
+		b.rtt += (rttSample - b.rtt) * float64(alpha)
+	}
+	b.isSent = false
+	// The number of bytes accumulated so far in the sample is smaller
+	// than or equal to 1.5 times the real BDP on a saturated connection.
+	bwCurrent := float64(b.sample) / (b.rtt * float64(1.5))
+	if bwCurrent > b.bwMax {
+		b.bwMax = bwCurrent
+	}
+	// If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is
+	// greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we
+	// should update our perception of the network BDP.
+	if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit {
+		sampleFloat := float64(b.sample)
+		b.bdp = uint32(gamma * sampleFloat)
+		if b.bdp > bdpLimit {
+			b.bdp = bdpLimit
+		}
+		bdp := b.bdp
+		b.mu.Unlock()
+		b.updateFlowControl(bdp)
+		return
+	}
+	b.mu.Unlock()
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/hack/tools/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
new file mode 100644
index 0000000000..ef72fbb3a0
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -0,0 +1,1035 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"net"
+	"runtime"
+	"strconv"
+	"sync"
+	"sync/atomic"
+
+	"golang.org/x/net/http2"
+	"golang.org/x/net/http2/hpack"
+	"google.golang.org/grpc/internal/grpclog"
+	"google.golang.org/grpc/internal/grpcutil"
+	"google.golang.org/grpc/mem"
+	"google.golang.org/grpc/status"
+)
+
+var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
+	e.SetMaxDynamicTableSizeLimit(v)
+}
+
+type itemNode struct {
+	it   any
+	next *itemNode
+}
+
+type itemList struct {
+	head *itemNode
+	tail *itemNode
+}
+
+func (il *itemList) enqueue(i any) {
+	n := &itemNode{it: i}
+	if il.tail == nil {
+		il.head, il.tail = n, n
+		return
+	}
+	il.tail.next = n
+	il.tail = n
+}
+
+// peek returns the first item in the list without removing it from the
+// list.
+func (il *itemList) peek() any {
+	return il.head.it
+}
+
+func (il *itemList) dequeue() any {
+	if il.head == nil {
+		return nil
+	}
+	i := il.head.it
+	il.head = il.head.next
+	if il.head == nil {
+		il.tail = nil
+	}
+	return i
+}
+
+func (il *itemList) dequeueAll() *itemNode {
+	h := il.head
+	il.head, il.tail = nil, nil
+	return h
+}
+
+func (il *itemList) isEmpty() bool {
+	return il.head == nil
+}
+
+// The following defines various control items which could flow through
+// the control buffer of transport. They represent different aspects of
+// control tasks, e.g., flow control, settings, streaming resetting, etc.
+
+// maxQueuedTransportResponseFrames is the most queued "transport response"
+// frames we will buffer before preventing new reads from occurring on the
+// transport.  These are control frames sent in response to client requests,
+// such as RST_STREAM due to bad headers or settings acks.
+const maxQueuedTransportResponseFrames = 50
+
+type cbItem interface {
+	isTransportResponseFrame() bool
+}
+
+// registerStream is used to register an incoming stream with loopy writer.
+type registerStream struct {
+	streamID uint32
+	wq       *writeQuota
+}
+
+func (*registerStream) isTransportResponseFrame() bool { return false }
+
+// headerFrame is also used to register stream on the client-side.
+type headerFrame struct {
+	streamID   uint32
+	hf         []hpack.HeaderField
+	endStream  bool               // Valid on server side.
+	initStream func(uint32) error // Used only on the client side.
+	onWrite    func()
+	wq         *writeQuota    // write quota for the stream created.
+	cleanup    *cleanupStream // Valid on the server side.
+	onOrphaned func(error)    // Valid on client-side
+}
+
+func (h *headerFrame) isTransportResponseFrame() bool {
+	return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM
+}
+
+type cleanupStream struct {
+	streamID uint32
+	rst      bool
+	rstCode  http2.ErrCode
+	onWrite  func()
+}
+
+func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM
+
+type earlyAbortStream struct {
+	httpStatus     uint32
+	streamID       uint32
+	contentSubtype string
+	status         *status.Status
+	rst            bool
+}
+
+func (*earlyAbortStream) isTransportResponseFrame() bool { return false }
+
+type dataFrame struct {
+	streamID  uint32
+	endStream bool
+	h         []byte
+	reader    mem.Reader
+	// onEachWrite is called every time
+	// a part of data is written out.
+	onEachWrite func()
+}
+
+func (*dataFrame) isTransportResponseFrame() bool { return false }
+
+type incomingWindowUpdate struct {
+	streamID  uint32
+	increment uint32
+}
+
+func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false }
+
+type outgoingWindowUpdate struct {
+	streamID  uint32
+	increment uint32
+}
+
+func (*outgoingWindowUpdate) isTransportResponseFrame() bool {
+	return false // window updates are throttled by thresholds
+}
+
+type incomingSettings struct {
+	ss []http2.Setting
+}
+
+func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK
+
+type outgoingSettings struct {
+	ss []http2.Setting
+}
+
+func (*outgoingSettings) isTransportResponseFrame() bool { return false }
+
+type incomingGoAway struct {
+}
+
+func (*incomingGoAway) isTransportResponseFrame() bool { return false }
+
+type goAway struct {
+	code      http2.ErrCode
+	debugData []byte
+	headsUp   bool
+	closeConn error // if set, loopyWriter will exit with this error
+}
+
+func (*goAway) isTransportResponseFrame() bool { return false }
+
+type ping struct {
+	ack  bool
+	data [8]byte
+}
+
+func (*ping) isTransportResponseFrame() bool { return true }
+
+type outFlowControlSizeRequest struct {
+	resp chan uint32
+}
+
+func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
+
+// closeConnection is an instruction to tell the loopy writer to flush the
+// framer and exit, which will cause the transport's connection to be closed
+// (by the client or server).  The transport itself will close after the reader
+// encounters the EOF caused by the connection closure.
+type closeConnection struct{}
+
+func (closeConnection) isTransportResponseFrame() bool { return false }
+
+type outStreamState int
+
+const (
+	active outStreamState = iota
+	empty
+	waitingOnStreamQuota
+)
+
+type outStream struct {
+	id               uint32
+	state            outStreamState
+	itl              *itemList
+	bytesOutStanding int
+	wq               *writeQuota
+
+	next *outStream
+	prev *outStream
+}
+
+func (s *outStream) deleteSelf() {
+	if s.prev != nil {
+		s.prev.next = s.next
+	}
+	if s.next != nil {
+		s.next.prev = s.prev
+	}
+	s.next, s.prev = nil, nil
+}
+
+type outStreamList struct {
+	// Following are sentinel objects that mark the
+	// beginning and end of the list. They do not
+	// contain any item lists. All valid objects are
+	// inserted in between them.
+	// This is needed so that an outStream object can
+	// deleteSelf() in O(1) time without knowing which
+	// list it belongs to.
+	head *outStream
+	tail *outStream
+}
+
+func newOutStreamList() *outStreamList {
+	head, tail := new(outStream), new(outStream)
+	head.next = tail
+	tail.prev = head
+	return &outStreamList{
+		head: head,
+		tail: tail,
+	}
+}
+
+func (l *outStreamList) enqueue(s *outStream) {
+	e := l.tail.prev
+	e.next = s
+	s.prev = e
+	s.next = l.tail
+	l.tail.prev = s
+}
+
+// remove from the beginning of the list.
+func (l *outStreamList) dequeue() *outStream {
+	b := l.head.next
+	if b == l.tail {
+		return nil
+	}
+	b.deleteSelf()
+	return b
+}
+
+// controlBuffer is a way to pass information to loopy.
+//
+// Information is passed as specific struct types called control frames. A
+// control frame not only represents data, messages or headers to be sent out
+// but can also be used to instruct loopy to update its internal state. It
+// shouldn't be confused with an HTTP2 frame, although some of the control
+// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames.
+type controlBuffer struct {
+	wakeupCh chan struct{}   // Unblocks readers waiting for something to read.
+	done     <-chan struct{} // Closed when the transport is done.
+
+	// Mutex guards all the fields below, except trfChan which can be read
+	// atomically without holding mu.
+	mu              sync.Mutex
+	consumerWaiting bool      // True when readers are blocked waiting for new data.
+	closed          bool      // True when the controlbuf is finished.
+	list            *itemList // List of queued control frames.
+
+	// transportResponseFrames counts the number of queued items that represent
+	// the response of an action initiated by the peer.  trfChan is created
+	// when transportResponseFrames >= maxQueuedTransportResponseFrames and is
+	// closed and nilled when transportResponseFrames drops below the
+	// threshold.  Both fields are protected by mu.
+	transportResponseFrames int
+	trfChan                 atomic.Pointer[chan struct{}]
+}
+
+func newControlBuffer(done <-chan struct{}) *controlBuffer {
+	return &controlBuffer{
+		wakeupCh: make(chan struct{}, 1),
+		list:     &itemList{},
+		done:     done,
+	}
+}
+
+// throttle blocks if there are too many frames in the control buf that
+// represent the response of an action initiated by the peer, like
+// incomingSettings cleanupStreams etc.
+func (c *controlBuffer) throttle() {
+	if ch := c.trfChan.Load(); ch != nil {
+		select {
+		case <-(*ch):
+		case <-c.done:
+		}
+	}
+}
+
+// put adds an item to the controlbuf.
+func (c *controlBuffer) put(it cbItem) error {
+	_, err := c.executeAndPut(nil, it)
+	return err
+}
+
+// executeAndPut runs f, and if the return value is true, adds the given item to
+// the controlbuf. The item could be nil, in which case, this method simply
+// executes f and does not add the item to the controlbuf.
+//
+// The first return value indicates whether the item was successfully added to
+// the control buffer. A non-nil error, specifically ErrConnClosing, is returned
+// if the control buffer is already closed.
+func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return false, ErrConnClosing
+	}
+	if f != nil {
+		if !f() { // f wasn't successful
+			return false, nil
+		}
+	}
+	if it == nil {
+		return true, nil
+	}
+
+	var wakeUp bool
+	if c.consumerWaiting {
+		wakeUp = true
+		c.consumerWaiting = false
+	}
+	c.list.enqueue(it)
+	if it.isTransportResponseFrame() {
+		c.transportResponseFrames++
+		if c.transportResponseFrames == maxQueuedTransportResponseFrames {
+			// We are adding the frame that puts us over the threshold; create
+			// a throttling channel.
+			ch := make(chan struct{})
+			c.trfChan.Store(&ch)
+		}
+	}
+	if wakeUp {
+		select {
+		case c.wakeupCh <- struct{}{}:
+		default:
+		}
+	}
+	return true, nil
+}
+
+// get returns the next control frame from the control buffer. If block is true
+// **and** there are no control frames in the control buffer, the call blocks
+// until one of the conditions is met: there is a frame to return or the
+// transport is closed.
+func (c *controlBuffer) get(block bool) (any, error) {
+	for {
+		c.mu.Lock()
+		frame, err := c.getOnceLocked()
+		if frame != nil || err != nil || !block {
+			// If we read a frame or an error, we can return to the caller. The
+			// call to getOnceLocked() returns a nil frame and a nil error if
+			// there is nothing to read, and in that case, if the caller asked
+			// us not to block, we can return now as well.
+			c.mu.Unlock()
+			return frame, err
+		}
+		c.consumerWaiting = true
+		c.mu.Unlock()
+
+		// Release the lock above and wait to be woken up.
+		select {
+		case <-c.wakeupCh:
+		case <-c.done:
+			return nil, errors.New("transport closed by client")
+		}
+	}
+}
+
+// Callers must not use this method, but should instead use get().
+//
+// Caller must hold c.mu.
+func (c *controlBuffer) getOnceLocked() (any, error) {
+	if c.closed {
+		return false, ErrConnClosing
+	}
+	if c.list.isEmpty() {
+		return nil, nil
+	}
+	h := c.list.dequeue().(cbItem)
+	if h.isTransportResponseFrame() {
+		if c.transportResponseFrames == maxQueuedTransportResponseFrames {
+			// We are removing the frame that put us over the
+			// threshold; close and clear the throttling channel.
+			ch := c.trfChan.Swap(nil)
+			close(*ch)
+		}
+		c.transportResponseFrames--
+	}
+	return h, nil
+}
+
+// finish closes the control buffer, cleaning up any streams that have queued
+// header frames. Once this method returns, no more frames can be added to the
+// control buffer, and attempts to do so will return ErrConnClosing.
+func (c *controlBuffer) finish() {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return
+	}
+	c.closed = true
+	// There may be headers for streams in the control buffer.
+	// These streams need to be cleaned out since the transport
+	// is still not aware of these yet.
+	for head := c.list.dequeueAll(); head != nil; head = head.next {
+		switch v := head.it.(type) {
+		case *headerFrame:
+			if v.onOrphaned != nil { // It will be nil on the server-side.
+				v.onOrphaned(ErrConnClosing)
+			}
+		case *dataFrame:
+			_ = v.reader.Close()
+		}
+	}
+
+	// In case throttle() is currently in flight, it needs to be unblocked.
+	// Otherwise, the transport may not close, since the transport is closed by
+	// the reader encountering the connection error.
+	ch := c.trfChan.Swap(nil)
+	if ch != nil {
+		close(*ch)
+	}
+}
+
+type side int
+
+const (
+	clientSide side = iota
+	serverSide
+)
+
+// Loopy receives frames from the control buffer.
+// Each frame is handled individually; most of the work done by loopy goes
+// into handling data frames. Loopy maintains a queue of active streams, and each
+// stream maintains a queue of data frames; as loopy receives data frames
+// it gets added to the queue of the relevant stream.
+// Loopy goes over this list of active streams by processing one node every iteration,
+// thereby closely resembling a round-robin scheduling over all streams. While
+// processing a stream, loopy writes out data bytes from this stream capped by the min
+// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
+type loopyWriter struct {
+	side      side
+	cbuf      *controlBuffer
+	sendQuota uint32
+	oiws      uint32 // outbound initial window size.
+	// estdStreams is map of all established streams that are not cleaned-up yet.
+	// On client-side, this is all streams whose headers were sent out.
+	// On server-side, this is all streams whose headers were received.
+	estdStreams map[uint32]*outStream // Established streams.
+	// activeStreams is a linked-list of all streams that have data to send and some
+	// stream-level flow control quota.
+	// Each of these streams internally have a list of data items(and perhaps trailers
+	// on the server-side) to be sent out.
+	activeStreams *outStreamList
+	framer        *framer
+	hBuf          *bytes.Buffer  // The buffer for HPACK encoding.
+	hEnc          *hpack.Encoder // HPACK encoder.
+	bdpEst        *bdpEstimator
+	draining      bool
+	conn          net.Conn
+	logger        *grpclog.PrefixLogger
+	bufferPool    mem.BufferPool
+
+	// Side-specific handlers
+	ssGoAwayHandler func(*goAway) (bool, error)
+}
+
+func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter {
+	var buf bytes.Buffer
+	l := &loopyWriter{
+		side:            s,
+		cbuf:            cbuf,
+		sendQuota:       defaultWindowSize,
+		oiws:            defaultWindowSize,
+		estdStreams:     make(map[uint32]*outStream),
+		activeStreams:   newOutStreamList(),
+		framer:          fr,
+		hBuf:            &buf,
+		hEnc:            hpack.NewEncoder(&buf),
+		bdpEst:          bdpEst,
+		conn:            conn,
+		logger:          logger,
+		ssGoAwayHandler: goAwayHandler,
+		bufferPool:      bufferPool,
+	}
+	return l
+}
+
+const minBatchSize = 1000
+
+// run should be run in a separate goroutine.
+// It reads control frames from controlBuf and processes them by:
+// 1. Updating loopy's internal state, or/and
+// 2. Writing out HTTP2 frames on the wire.
+//
+// Loopy keeps all active streams with data to send in a linked-list.
+// All streams in the activeStreams linked-list must have both:
+// 1. Data to send, and
+// 2. Stream level flow control quota available.
+//
+// In each iteration of run loop, other than processing the incoming control
+// frame, loopy calls processData, which processes one node from the
+// activeStreams linked-list.  This results in writing of HTTP2 frames into an
+// underlying write buffer.  When there's no more control frames to read from
+// controlBuf, loopy flushes the write buffer.  As an optimization, to increase
+// the batch size for each flush, loopy yields the processor, once if the batch
+// size is too low to give stream goroutines a chance to fill it up.
+//
+// Upon exiting, if the error causing the exit is not an I/O error, run()
+// flushes the underlying connection.  The connection is always left open to
+// allow different closing behavior on the client and server.
+func (l *loopyWriter) run() (err error) {
+	defer func() {
+		if l.logger.V(logLevel) {
+			l.logger.Infof("loopyWriter exiting with error: %v", err)
+		}
+		if !isIOError(err) {
+			l.framer.writer.Flush()
+		}
+		l.cbuf.finish()
+	}()
+	for {
+		it, err := l.cbuf.get(true)
+		if err != nil {
+			return err
+		}
+		if err = l.handle(it); err != nil {
+			return err
+		}
+		if _, err = l.processData(); err != nil {
+			return err
+		}
+		gosched := true
+	hasdata:
+		for {
+			it, err := l.cbuf.get(false)
+			if err != nil {
+				return err
+			}
+			if it != nil {
+				if err = l.handle(it); err != nil {
+					return err
+				}
+				if _, err = l.processData(); err != nil {
+					return err
+				}
+				continue hasdata
+			}
+			isEmpty, err := l.processData()
+			if err != nil {
+				return err
+			}
+			if !isEmpty {
+				continue hasdata
+			}
+			if gosched {
+				gosched = false
+				if l.framer.writer.offset < minBatchSize {
+					runtime.Gosched()
+					continue hasdata
+				}
+			}
+			l.framer.writer.Flush()
+			break hasdata
+		}
+	}
+}
+
+func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error {
+	return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
+}
+
+func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) {
+	// Otherwise update the quota.
+	if w.streamID == 0 {
+		l.sendQuota += w.increment
+		return
+	}
+	// Find the stream and update it.
+	if str, ok := l.estdStreams[w.streamID]; ok {
+		str.bytesOutStanding -= int(w.increment)
+		if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
+			str.state = active
+			l.activeStreams.enqueue(str)
+			return
+		}
+	}
+}
+
+func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
+	return l.framer.fr.WriteSettings(s.ss...)
+}
+
+func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
+	l.applySettings(s.ss)
+	return l.framer.fr.WriteSettingsAck()
+}
+
+func (l *loopyWriter) registerStreamHandler(h *registerStream) {
+	str := &outStream{
+		id:    h.streamID,
+		state: empty,
+		itl:   &itemList{},
+		wq:    h.wq,
+	}
+	l.estdStreams[h.streamID] = str
+}
+
+func (l *loopyWriter) headerHandler(h *headerFrame) error {
+	if l.side == serverSide {
+		str, ok := l.estdStreams[h.streamID]
+		if !ok {
+			if l.logger.V(logLevel) {
+				l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID)
+			}
+			return nil
+		}
+		// Case 1.A: Server is responding back with headers.
+		if !h.endStream {
+			return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite)
+		}
+		// else:  Case 1.B: Server wants to close stream.
+
+		if str.state != empty { // either active or waiting on stream quota.
+			// add it str's list of items.
+			str.itl.enqueue(h)
+			return nil
+		}
+		if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
+			return err
+		}
+		return l.cleanupStreamHandler(h.cleanup)
+	}
+	// Case 2: Client wants to originate stream.
+	str := &outStream{
+		id:    h.streamID,
+		state: empty,
+		itl:   &itemList{},
+		wq:    h.wq,
+	}
+	return l.originateStream(str, h)
+}
+
+func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error {
+	// l.draining is set when handling GoAway. In which case, we want to avoid
+	// creating new streams.
+	if l.draining {
+		// TODO: provide a better error with the reason we are in draining.
+		hdr.onOrphaned(errStreamDrain)
+		return nil
+	}
+	if err := hdr.initStream(str.id); err != nil {
+		return err
+	}
+	if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
+		return err
+	}
+	l.estdStreams[str.id] = str
+	return nil
+}
+
+func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error {
+	if onWrite != nil {
+		onWrite()
+	}
+	l.hBuf.Reset()
+	for _, f := range hf {
+		if err := l.hEnc.WriteField(f); err != nil {
+			if l.logger.V(logLevel) {
+				l.logger.Warningf("Encountered error while encoding headers: %v", err)
+			}
+		}
+	}
+	var (
+		err               error
+		endHeaders, first bool
+	)
+	first = true
+	for !endHeaders {
+		size := l.hBuf.Len()
+		if size > http2MaxFrameLen {
+			size = http2MaxFrameLen
+		} else {
+			endHeaders = true
+		}
+		if first {
+			first = false
+			err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{
+				StreamID:      streamID,
+				BlockFragment: l.hBuf.Next(size),
+				EndStream:     endStream,
+				EndHeaders:    endHeaders,
+			})
+		} else {
+			err = l.framer.fr.WriteContinuation(
+				streamID,
+				endHeaders,
+				l.hBuf.Next(size),
+			)
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (l *loopyWriter) preprocessData(df *dataFrame) {
+	str, ok := l.estdStreams[df.streamID]
+	if !ok {
+		return
+	}
+	// If we got data for a stream it means that
+	// stream was originated and the headers were sent out.
+	str.itl.enqueue(df)
+	if str.state == empty {
+		str.state = active
+		l.activeStreams.enqueue(str)
+	}
+}
+
+func (l *loopyWriter) pingHandler(p *ping) error {
+	if !p.ack {
+		l.bdpEst.timesnap(p.data)
+	}
+	return l.framer.fr.WritePing(p.ack, p.data)
+
+}
+
+func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) {
+	o.resp <- l.sendQuota
+}
+
+func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
+	c.onWrite()
+	if str, ok := l.estdStreams[c.streamID]; ok {
+		// On the server side it could be a trailers-only response or
+		// a RST_STREAM before stream initialization thus the stream might
+		// not be established yet.
+		delete(l.estdStreams, c.streamID)
+		str.deleteSelf()
+		for head := str.itl.dequeueAll(); head != nil; head = head.next {
+			if df, ok := head.it.(*dataFrame); ok {
+				_ = df.reader.Close()
+			}
+		}
+	}
+	if c.rst { // If RST_STREAM needs to be sent.
+		if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
+			return err
+		}
+	}
+	if l.draining && len(l.estdStreams) == 0 {
+		// Flush and close the connection; we are done with it.
+		return errors.New("finished processing active streams while in draining mode")
+	}
+	return nil
+}
+
+func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error {
+	if l.side == clientSide {
+		return errors.New("earlyAbortStream not handled on client")
+	}
+	// In case the caller forgets to set the http status, default to 200.
+	if eas.httpStatus == 0 {
+		eas.httpStatus = 200
+	}
+	headerFields := []hpack.HeaderField{
+		{Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))},
+		{Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)},
+		{Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))},
+		{Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())},
+	}
+
+	if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil {
+		return err
+	}
+	if eas.rst {
+		if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
+	if l.side == clientSide {
+		l.draining = true
+		if len(l.estdStreams) == 0 {
+			// Flush and close the connection; we are done with it.
+			return errors.New("received GOAWAY with no active streams")
+		}
+	}
+	return nil
+}
+
+func (l *loopyWriter) goAwayHandler(g *goAway) error {
+	// Handling of outgoing GoAway is very specific to side.
+	if l.ssGoAwayHandler != nil {
+		draining, err := l.ssGoAwayHandler(g)
+		if err != nil {
+			return err
+		}
+		l.draining = draining
+	}
+	return nil
+}
+
+func (l *loopyWriter) handle(i any) error {
+	switch i := i.(type) {
+	case *incomingWindowUpdate:
+		l.incomingWindowUpdateHandler(i)
+	case *outgoingWindowUpdate:
+		return l.outgoingWindowUpdateHandler(i)
+	case *incomingSettings:
+		return l.incomingSettingsHandler(i)
+	case *outgoingSettings:
+		return l.outgoingSettingsHandler(i)
+	case *headerFrame:
+		return l.headerHandler(i)
+	case *registerStream:
+		l.registerStreamHandler(i)
+	case *cleanupStream:
+		return l.cleanupStreamHandler(i)
+	case *earlyAbortStream:
+		return l.earlyAbortStreamHandler(i)
+	case *incomingGoAway:
+		return l.incomingGoAwayHandler(i)
+	case *dataFrame:
+		l.preprocessData(i)
+	case *ping:
+		return l.pingHandler(i)
+	case *goAway:
+		return l.goAwayHandler(i)
+	case *outFlowControlSizeRequest:
+		l.outFlowControlSizeRequestHandler(i)
+	case closeConnection:
+		// Just return a non-I/O error and run() will flush and close the
+		// connection.
+		return ErrConnClosing
+	default:
+		return fmt.Errorf("transport: unknown control message type %T", i)
+	}
+	return nil
+}
+
+func (l *loopyWriter) applySettings(ss []http2.Setting) {
+	for _, s := range ss {
+		switch s.ID {
+		case http2.SettingInitialWindowSize:
+			o := l.oiws
+			l.oiws = s.Val
+			if o < l.oiws {
+				// If the new limit is greater make all depleted streams active.
+				for _, stream := range l.estdStreams {
+					if stream.state == waitingOnStreamQuota {
+						stream.state = active
+						l.activeStreams.enqueue(stream)
+					}
+				}
+			}
+		case http2.SettingHeaderTableSize:
+			updateHeaderTblSize(l.hEnc, s.Val)
+		}
+	}
+}
+
+// processData removes the first stream from active streams, writes out at most 16KB
+// of its data and then puts it at the end of activeStreams if there's still more data
+// to be sent and stream has some stream-level flow control.
+func (l *loopyWriter) processData() (bool, error) {
+	if l.sendQuota == 0 {
+		return true, nil
+	}
+	str := l.activeStreams.dequeue() // Remove the first stream.
+	if str == nil {
+		return true, nil
+	}
+	dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
+	// A data item is represented by a dataFrame, since it later translates into
+	// multiple HTTP2 data frames.
+	// Every dataFrame has two buffers; h that keeps grpc-message header and data
+	// that is the actual message. As an optimization to keep wire traffic low, data
+	// from data is copied to h to make as big as the maximum possible HTTP2 frame
+	// size.
+
+	if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame
+		// Client sends out empty data frame with endStream = true
+		if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
+			return false, err
+		}
+		str.itl.dequeue() // remove the empty data item from stream
+		_ = dataItem.reader.Close()
+		if str.itl.isEmpty() {
+			str.state = empty
+		} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
+			if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
+				return false, err
+			}
+			if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
+				return false, err
+			}
+		} else {
+			l.activeStreams.enqueue(str)
+		}
+		return false, nil
+	}
+
+	// Figure out the maximum size we can send
+	maxSize := http2MaxFrameLen
+	if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
+		str.state = waitingOnStreamQuota
+		return false, nil
+	} else if maxSize > strQuota {
+		maxSize = strQuota
+	}
+	if maxSize > int(l.sendQuota) { // connection-level flow control.
+		maxSize = int(l.sendQuota)
+	}
+	// Compute how much of the header and data we can send within quota and max frame length
+	hSize := min(maxSize, len(dataItem.h))
+	dSize := min(maxSize-hSize, dataItem.reader.Remaining())
+	remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize
+	size := hSize + dSize
+
+	var buf *[]byte
+
+	if hSize != 0 && dSize == 0 {
+		buf = &dataItem.h
+	} else {
+		// Note: this is only necessary because the http2.Framer does not support
+		// partially writing a frame, so the sequence must be materialized into a buffer.
+		// TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed.
+		pool := l.bufferPool
+		if pool == nil {
+			// Note that this is only supposed to be nil in tests. Otherwise, stream is
+			// always initialized with a BufferPool.
+			pool = mem.DefaultBufferPool()
+		}
+		buf = pool.Get(size)
+		defer pool.Put(buf)
+
+		copy((*buf)[:hSize], dataItem.h)
+		_, _ = dataItem.reader.Read((*buf)[hSize:])
+	}
+
+	// Now that outgoing flow controls are checked we can replenish str's write quota
+	str.wq.replenish(size)
+	var endStream bool
+	// If this is the last data message on this stream and all of it can be written in this iteration.
+	if dataItem.endStream && remainingBytes == 0 {
+		endStream = true
+	}
+	if dataItem.onEachWrite != nil {
+		dataItem.onEachWrite()
+	}
+	if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil {
+		return false, err
+	}
+	str.bytesOutStanding += size
+	l.sendQuota -= uint32(size)
+	dataItem.h = dataItem.h[hSize:]
+
+	if remainingBytes == 0 { // All the data from that message was written out.
+		_ = dataItem.reader.Close()
+		str.itl.dequeue()
+	}
+	if str.itl.isEmpty() {
+		str.state = empty
+	} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers.
+		if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
+			return false, err
+		}
+		if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
+			return false, err
+		}
+	} else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota.
+		str.state = waitingOnStreamQuota
+	} else { // Otherwise add it back to the list of active streams.
+		l.activeStreams.enqueue(str)
+	}
+	return false, nil
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/transport/defaults.go b/hack/tools/vendor/google.golang.org/grpc/internal/transport/defaults.go
new file mode 100644
index 0000000000..bc8ee07474
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/transport/defaults.go
@@ -0,0 +1,55 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"math"
+	"time"
+)
+
+const (
+	// The default value of flow control window size in HTTP2 spec.
+	defaultWindowSize = 65535
+	// The initial window size for flow control.
+	initialWindowSize             = defaultWindowSize // for an RPC
+	infinity                      = time.Duration(math.MaxInt64)
+	defaultClientKeepaliveTime    = infinity
+	defaultClientKeepaliveTimeout = 20 * time.Second
+	defaultMaxStreamsClient       = 100
+	defaultMaxConnectionIdle      = infinity
+	defaultMaxConnectionAge       = infinity
+	defaultMaxConnectionAgeGrace  = infinity
+	defaultServerKeepaliveTime    = 2 * time.Hour
+	defaultServerKeepaliveTimeout = 20 * time.Second
+	defaultKeepalivePolicyMinTime = 5 * time.Minute
+	// max window limit set by HTTP2 Specs.
+	maxWindowSize = math.MaxInt32
+	// defaultWriteQuota is the default value for number of data
+	// bytes that each stream can schedule before some of it being
+	// flushed out.
+	defaultWriteQuota              = 64 * 1024
+	defaultClientMaxHeaderListSize = uint32(16 << 20)
+	defaultServerMaxHeaderListSize = uint32(16 << 20)
+)
+
+// MaxStreamID is the upper bound for the stream ID before the current
+// transport gracefully closes and new transport is created for subsequent RPCs.
+// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit
+// integer. It's exported so that tests can override it.
+var MaxStreamID = uint32(math.MaxInt32 * 3 / 4)
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/hack/tools/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
new file mode 100644
index 0000000000..97198c5158
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
@@ -0,0 +1,215 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"fmt"
+	"math"
+	"sync"
+	"sync/atomic"
+)
+
+// writeQuota is a soft limit on the amount of data a stream can
+// schedule before some of it is written out.
+type writeQuota struct {
+	quota int32
+	// get waits on read from when quota goes less than or equal to zero.
+	// replenish writes on it when quota goes positive again.
+	ch chan struct{}
+	// done is triggered in error case.
+	done <-chan struct{}
+	// replenish is called by loopyWriter to give quota back to.
+	// It is implemented as a field so that it can be updated
+	// by tests.
+	replenish func(n int)
+}
+
+func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
+	w := &writeQuota{
+		quota: sz,
+		ch:    make(chan struct{}, 1),
+		done:  done,
+	}
+	w.replenish = w.realReplenish
+	return w
+}
+
+func (w *writeQuota) get(sz int32) error {
+	for {
+		if atomic.LoadInt32(&w.quota) > 0 {
+			atomic.AddInt32(&w.quota, -sz)
+			return nil
+		}
+		select {
+		case <-w.ch:
+			continue
+		case <-w.done:
+			return errStreamDone
+		}
+	}
+}
+
+func (w *writeQuota) realReplenish(n int) {
+	sz := int32(n)
+	a := atomic.AddInt32(&w.quota, sz)
+	b := a - sz
+	if b <= 0 && a > 0 {
+		select {
+		case w.ch <- struct{}{}:
+		default:
+		}
+	}
+}
+
+type trInFlow struct {
+	limit               uint32
+	unacked             uint32
+	effectiveWindowSize uint32
+}
+
+func (f *trInFlow) newLimit(n uint32) uint32 {
+	d := n - f.limit
+	f.limit = n
+	f.updateEffectiveWindowSize()
+	return d
+}
+
+func (f *trInFlow) onData(n uint32) uint32 {
+	f.unacked += n
+	if f.unacked >= f.limit/4 {
+		w := f.unacked
+		f.unacked = 0
+		f.updateEffectiveWindowSize()
+		return w
+	}
+	f.updateEffectiveWindowSize()
+	return 0
+}
+
+func (f *trInFlow) reset() uint32 {
+	w := f.unacked
+	f.unacked = 0
+	f.updateEffectiveWindowSize()
+	return w
+}
+
+func (f *trInFlow) updateEffectiveWindowSize() {
+	atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked)
+}
+
+func (f *trInFlow) getSize() uint32 {
+	return atomic.LoadUint32(&f.effectiveWindowSize)
+}
+
+// TODO(mmukhi): Simplify this code.
+// inFlow deals with inbound flow control
+type inFlow struct {
+	mu sync.Mutex
+	// The inbound flow control limit for pending data.
+	limit uint32
+	// pendingData is the overall data which have been received but not been
+	// consumed by applications.
+	pendingData uint32
+	// The amount of data the application has consumed but grpc has not sent
+	// window update for them. Used to reduce window update frequency.
+	pendingUpdate uint32
+	// delta is the extra window update given by receiver when an application
+	// is reading data bigger in size than the inFlow limit.
+	delta uint32
+}
+
+// newLimit updates the inflow window to a new value n.
+// It assumes that n is always greater than the old limit.
+func (f *inFlow) newLimit(n uint32) {
+	f.mu.Lock()
+	f.limit = n
+	f.mu.Unlock()
+}
+
+func (f *inFlow) maybeAdjust(n uint32) uint32 {
+	if n > uint32(math.MaxInt32) {
+		n = uint32(math.MaxInt32)
+	}
+	f.mu.Lock()
+	defer f.mu.Unlock()
+	// estSenderQuota is the receiver's view of the maximum number of bytes the sender
+	// can send without a window update.
+	estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
+	// estUntransmittedData is the maximum number of bytes the sends might not have put
+	// on the wire yet. A value of 0 or less means that we have already received all or
+	// more bytes than the application is requesting to read.
+	estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative.
+	// This implies that unless we send a window update, the sender won't be able to send all the bytes
+	// for this message. Therefore we must send an update over the limit since there's an active read
+	// request from the application.
+	if estUntransmittedData > estSenderQuota {
+		// Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec.
+		if f.limit+n > maxWindowSize {
+			f.delta = maxWindowSize - f.limit
+		} else {
+			// Send a window update for the whole message and not just the difference between
+			// estUntransmittedData and estSenderQuota. This will be helpful in case the message
+			// is padded; We will fallback on the current available window(at least a 1/4th of the limit).
+			f.delta = n
+		}
+		return f.delta
+	}
+	return 0
+}
+
+// onData is invoked when some data frame is received. It updates pendingData.
+func (f *inFlow) onData(n uint32) error {
+	f.mu.Lock()
+	f.pendingData += n
+	if f.pendingData+f.pendingUpdate > f.limit+f.delta {
+		limit := f.limit
+		rcvd := f.pendingData + f.pendingUpdate
+		f.mu.Unlock()
+		return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit)
+	}
+	f.mu.Unlock()
+	return nil
+}
+
+// onRead is invoked when the application reads the data. It returns the window size
+// to be sent to the peer.
+func (f *inFlow) onRead(n uint32) uint32 {
+	f.mu.Lock()
+	if f.pendingData == 0 {
+		f.mu.Unlock()
+		return 0
+	}
+	f.pendingData -= n
+	if n > f.delta {
+		n -= f.delta
+		f.delta = 0
+	} else {
+		f.delta -= n
+		n = 0
+	}
+	f.pendingUpdate += n
+	if f.pendingUpdate >= f.limit/4 {
+		wu := f.pendingUpdate
+		f.pendingUpdate = 0
+		f.mu.Unlock()
+		return wu
+	}
+	f.mu.Unlock()
+	return 0
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/hack/tools/vendor/google.golang.org/grpc/internal/transport/handler_server.go
new file mode 100644
index 0000000000..ce878693bd
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -0,0 +1,502 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// This file is the implementation of a gRPC server using HTTP/2 which
+// uses the standard Go http2 Server implementation (via the
+// http.Handler interface), rather than speaking low-level HTTP/2
+// frames itself. It is the implementation of *grpc.Server.ServeHTTP.
+
+package transport
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/net/http2"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/internal/grpclog"
+	"google.golang.org/grpc/internal/grpcutil"
+	"google.golang.org/grpc/mem"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/peer"
+	"google.golang.org/grpc/stats"
+	"google.golang.org/grpc/status"
+	"google.golang.org/protobuf/proto"
+)
+
+// NewServerHandlerTransport returns a ServerTransport handling gRPC from
+// inside an http.Handler, or writes an HTTP error to w and returns an error.
+// It requires that the http Server supports HTTP/2.
+func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) {
+	if r.Method != http.MethodPost {
+		w.Header().Set("Allow", http.MethodPost)
+		msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
+		http.Error(w, msg, http.StatusMethodNotAllowed)
+		return nil, errors.New(msg)
+	}
+	contentType := r.Header.Get("Content-Type")
+	// TODO: do we assume contentType is lowercase? we did before
+	contentSubtype, validContentType := grpcutil.ContentSubtype(contentType)
+	if !validContentType {
+		msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType)
+		http.Error(w, msg, http.StatusUnsupportedMediaType)
+		return nil, errors.New(msg)
+	}
+	if r.ProtoMajor != 2 {
+		msg := "gRPC requires HTTP/2"
+		http.Error(w, msg, http.StatusHTTPVersionNotSupported)
+		return nil, errors.New(msg)
+	}
+	if _, ok := w.(http.Flusher); !ok {
+		msg := "gRPC requires a ResponseWriter supporting http.Flusher"
+		http.Error(w, msg, http.StatusInternalServerError)
+		return nil, errors.New(msg)
+	}
+
+	var localAddr net.Addr
+	if la := r.Context().Value(http.LocalAddrContextKey); la != nil {
+		localAddr, _ = la.(net.Addr)
+	}
+	var authInfo credentials.AuthInfo
+	if r.TLS != nil {
+		authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}}
+	}
+	p := peer.Peer{
+		Addr:      strAddr(r.RemoteAddr),
+		LocalAddr: localAddr,
+		AuthInfo:  authInfo,
+	}
+	st := &serverHandlerTransport{
+		rw:             w,
+		req:            r,
+		closedCh:       make(chan struct{}),
+		writes:         make(chan func()),
+		peer:           p,
+		contentType:    contentType,
+		contentSubtype: contentSubtype,
+		stats:          stats,
+		bufferPool:     bufferPool,
+	}
+	st.logger = prefixLoggerForServerHandlerTransport(st)
+
+	if v := r.Header.Get("grpc-timeout"); v != "" {
+		to, err := decodeTimeout(v)
+		if err != nil {
+			msg := fmt.Sprintf("malformed grpc-timeout: %v", err)
+			http.Error(w, msg, http.StatusBadRequest)
+			return nil, status.Error(codes.Internal, msg)
+		}
+		st.timeoutSet = true
+		st.timeout = to
+	}
+
+	metakv := []string{"content-type", contentType}
+	if r.Host != "" {
+		metakv = append(metakv, ":authority", r.Host)
+	}
+	for k, vv := range r.Header {
+		k = strings.ToLower(k)
+		if isReservedHeader(k) && !isWhitelistedHeader(k) {
+			continue
+		}
+		for _, v := range vv {
+			v, err := decodeMetadataHeader(k, v)
+			if err != nil {
+				msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err)
+				http.Error(w, msg, http.StatusBadRequest)
+				return nil, status.Error(codes.Internal, msg)
+			}
+			metakv = append(metakv, k, v)
+		}
+	}
+	st.headerMD = metadata.Pairs(metakv...)
+
+	return st, nil
+}
+
+// serverHandlerTransport is an implementation of ServerTransport
+// which replies to exactly one gRPC request (exactly one HTTP request),
+// using the net/http.Handler interface. This http.Handler is guaranteed
+// at this point to be speaking over HTTP/2, so it's able to speak valid
+// gRPC.
+type serverHandlerTransport struct {
+	rw         http.ResponseWriter
+	req        *http.Request
+	timeoutSet bool
+	timeout    time.Duration
+
+	headerMD metadata.MD
+
+	peer peer.Peer
+
+	closeOnce sync.Once
+	closedCh  chan struct{} // closed on Close
+
+	// writes is a channel of code to run serialized in the
+	// ServeHTTP (HandleStreams) goroutine. The channel is closed
+	// when WriteStatus is called.
+	writes chan func()
+
+	// block concurrent WriteStatus calls
+	// e.g. grpc/(*serverStream).SendMsg/RecvMsg
+	writeStatusMu sync.Mutex
+
+	// we just mirror the request content-type
+	contentType string
+	// we store both contentType and contentSubtype so we don't keep recreating them
+	// TODO make sure this is consistent across handler_server and http2_server
+	contentSubtype string
+
+	stats  []stats.Handler
+	logger *grpclog.PrefixLogger
+
+	bufferPool mem.BufferPool
+}
+
+func (ht *serverHandlerTransport) Close(err error) {
+	ht.closeOnce.Do(func() {
+		if ht.logger.V(logLevel) {
+			ht.logger.Infof("Closing: %v", err)
+		}
+		close(ht.closedCh)
+	})
+}
+
+func (ht *serverHandlerTransport) Peer() *peer.Peer {
+	return &peer.Peer{
+		Addr:      ht.peer.Addr,
+		LocalAddr: ht.peer.LocalAddr,
+		AuthInfo:  ht.peer.AuthInfo,
+	}
+}
+
+// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
+// the empty string if unknown.
+type strAddr string
+
+func (a strAddr) Network() string {
+	if a != "" {
+		// Per the documentation on net/http.Request.RemoteAddr, if this is
+		// set, it's set to the IP:port of the peer (hence, TCP):
+		// https://golang.org/pkg/net/http/#Request
+		//
+		// If we want to support Unix sockets later, we can
+		// add our own grpc-specific convention within the
+		// grpc codebase to set RemoteAddr to a different
+		// format, or probably better: we can attach it to the
+		// context and use that from serverHandlerTransport.RemoteAddr.
+		return "tcp"
+	}
+	return ""
+}
+
+func (a strAddr) String() string { return string(a) }
+
+// do runs fn in the ServeHTTP goroutine.
+func (ht *serverHandlerTransport) do(fn func()) error {
+	select {
+	case <-ht.closedCh:
+		return ErrConnClosing
+	case ht.writes <- fn:
+		return nil
+	}
+}
+
+func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error {
+	ht.writeStatusMu.Lock()
+	defer ht.writeStatusMu.Unlock()
+
+	headersWritten := s.updateHeaderSent()
+	err := ht.do(func() {
+		if !headersWritten {
+			ht.writePendingHeaders(s)
+		}
+
+		// And flush, in case no header or body has been sent yet.
+		// This forces a separation of headers and trailers if this is the
+		// first call (for example, in end2end tests's TestNoService).
+		ht.rw.(http.Flusher).Flush()
+
+		h := ht.rw.Header()
+		h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code()))
+		if m := st.Message(); m != "" {
+			h.Set("Grpc-Message", encodeGrpcMessage(m))
+		}
+
+		s.hdrMu.Lock()
+		defer s.hdrMu.Unlock()
+		if p := st.Proto(); p != nil && len(p.Details) > 0 {
+			delete(s.trailer, grpcStatusDetailsBinHeader)
+			stBytes, err := proto.Marshal(p)
+			if err != nil {
+				// TODO: return error instead, when callers are able to handle it.
+				panic(err)
+			}
+
+			h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes))
+		}
+
+		if len(s.trailer) > 0 {
+			for k, vv := range s.trailer {
+				// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
+				if isReservedHeader(k) {
+					continue
+				}
+				for _, v := range vv {
+					// http2 ResponseWriter mechanism to send undeclared Trailers after
+					// the headers have possibly been written.
+					h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v))
+				}
+			}
+		}
+	})
+
+	if err == nil { // transport has not been closed
+		// Note: The trailer fields are compressed with hpack after this call returns.
+		// No WireLength field is set here.
+		for _, sh := range ht.stats {
+			sh.HandleRPC(s.Context(), &stats.OutTrailer{
+				Trailer: s.trailer.Copy(),
+			})
+		}
+	}
+	ht.Close(errors.New("finished writing status"))
+	return err
+}
+
+// writePendingHeaders sets common and custom headers on the first
+// write call (Write, WriteHeader, or WriteStatus)
+func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) {
+	ht.writeCommonHeaders(s)
+	ht.writeCustomHeaders(s)
+}
+
+// writeCommonHeaders sets common headers on the first write
+// call (Write, WriteHeader, or WriteStatus).
+func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
+	h := ht.rw.Header()
+	h["Date"] = nil // suppress Date to make tests happy; TODO: restore
+	h.Set("Content-Type", ht.contentType)
+
+	// Predeclare trailers we'll set later in WriteStatus (after the body).
+	// This is a SHOULD in the HTTP RFC, and the way you add (known)
+	// Trailers per the net/http.ResponseWriter contract.
+	// See https://golang.org/pkg/net/http/#ResponseWriter
+	// and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
+	h.Add("Trailer", "Grpc-Status")
+	h.Add("Trailer", "Grpc-Message")
+	h.Add("Trailer", "Grpc-Status-Details-Bin")
+
+	if s.sendCompress != "" {
+		h.Set("Grpc-Encoding", s.sendCompress)
+	}
+}
+
+// writeCustomHeaders sets custom headers set on the stream via SetHeader
+// on the first write call (Write, WriteHeader, or WriteStatus)
+func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) {
+	h := ht.rw.Header()
+
+	s.hdrMu.Lock()
+	for k, vv := range s.header {
+		if isReservedHeader(k) {
+			continue
+		}
+		for _, v := range vv {
+			h.Add(k, encodeMetadataHeader(k, v))
+		}
+	}
+
+	s.hdrMu.Unlock()
+}
+
+func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error {
+	// Always take a reference because otherwise there is no guarantee the data will
+	// be available after this function returns. This is what callers to Write
+	// expect.
+	data.Ref()
+	headersWritten := s.updateHeaderSent()
+	err := ht.do(func() {
+		defer data.Free()
+		if !headersWritten {
+			ht.writePendingHeaders(s)
+		}
+		ht.rw.Write(hdr)
+		for _, b := range data {
+			_, _ = ht.rw.Write(b.ReadOnlyData())
+		}
+		ht.rw.(http.Flusher).Flush()
+	})
+	if err != nil {
+		data.Free()
+		return err
+	}
+	return nil
+}
+
+func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
+	if err := s.SetHeader(md); err != nil {
+		return err
+	}
+
+	headersWritten := s.updateHeaderSent()
+	err := ht.do(func() {
+		if !headersWritten {
+			ht.writePendingHeaders(s)
+		}
+
+		ht.rw.WriteHeader(200)
+		ht.rw.(http.Flusher).Flush()
+	})
+
+	if err == nil {
+		for _, sh := range ht.stats {
+			// Note: The header fields are compressed with hpack after this call returns.
+			// No WireLength field is set here.
+			sh.HandleRPC(s.Context(), &stats.OutHeader{
+				Header:      md.Copy(),
+				Compression: s.sendCompress,
+			})
+		}
+	}
+	return err
+}
+
+func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) {
+	// With this transport type there will be exactly 1 stream: this HTTP request.
+	var cancel context.CancelFunc
+	if ht.timeoutSet {
+		ctx, cancel = context.WithTimeout(ctx, ht.timeout)
+	} else {
+		ctx, cancel = context.WithCancel(ctx)
+	}
+
+	// requestOver is closed when the status has been written via WriteStatus.
+	requestOver := make(chan struct{})
+	go func() {
+		select {
+		case <-requestOver:
+		case <-ht.closedCh:
+		case <-ht.req.Context().Done():
+		}
+		cancel()
+		ht.Close(errors.New("request is done processing"))
+	}()
+
+	ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
+	req := ht.req
+	s := &Stream{
+		id:               0, // irrelevant
+		ctx:              ctx,
+		requestRead:      func(int) {},
+		cancel:           cancel,
+		buf:              newRecvBuffer(),
+		st:               ht,
+		method:           req.URL.Path,
+		recvCompress:     req.Header.Get("grpc-encoding"),
+		contentSubtype:   ht.contentSubtype,
+		headerWireLength: 0, // won't have access to header wire length until golang/go#18997.
+	}
+	s.trReader = &transportReader{
+		reader:        &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
+		windowHandler: func(int) {},
+	}
+
+	// readerDone is closed when the Body.Read-ing goroutine exits.
+	readerDone := make(chan struct{})
+	go func() {
+		defer close(readerDone)
+
+		for {
+			buf := ht.bufferPool.Get(http2MaxFrameLen)
+			n, err := req.Body.Read(*buf)
+			if n > 0 {
+				*buf = (*buf)[:n]
+				s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)})
+			} else {
+				ht.bufferPool.Put(buf)
+			}
+			if err != nil {
+				s.buf.put(recvMsg{err: mapRecvMsgError(err)})
+				return
+			}
+		}
+	}()
+
+	// startStream is provided by the *grpc.Server's serveStreams.
+	// It starts a goroutine serving s and exits immediately.
+	// The goroutine that is started is the one that then calls
+	// into ht, calling WriteHeader, Write, WriteStatus, Close, etc.
+	startStream(s)
+
+	ht.runStream()
+	close(requestOver)
+
+	// Wait for reading goroutine to finish.
+	req.Body.Close()
+	<-readerDone
+}
+
+func (ht *serverHandlerTransport) runStream() {
+	for {
+		select {
+		case fn := <-ht.writes:
+			fn()
+		case <-ht.closedCh:
+			return
+		}
+	}
+}
+
+func (ht *serverHandlerTransport) IncrMsgSent() {}
+
+func (ht *serverHandlerTransport) IncrMsgRecv() {}
+
+func (ht *serverHandlerTransport) Drain(string) {
+	panic("Drain() is not implemented")
+}
+
+// mapRecvMsgError returns the non-nil err into the appropriate
+// error value as expected by callers of *grpc.parser.recvMsg.
+// In particular, in can only be:
+//   - io.EOF
+//   - io.ErrUnexpectedEOF
+//   - of type transport.ConnectionError
+//   - an error from the status package
+func mapRecvMsgError(err error) error {
+	if err == io.EOF || err == io.ErrUnexpectedEOF {
+		return err
+	}
+	if se, ok := err.(http2.StreamError); ok {
+		if code, ok := http2ErrConvTab[se.Code]; ok {
+			return status.Error(code, se.Error())
+		}
+	}
+	if strings.Contains(err.Error(), "body closed by handler") {
+		return status.Error(codes.Canceled, err.Error())
+	}
+	return connectionErrorf(true, err, err.Error())
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/hack/tools/vendor/google.golang.org/grpc/internal/transport/http2_client.go
new file mode 100644
index 0000000000..c769deab53
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -0,0 +1,1823 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"math"
+	"net"
+	"net/http"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"golang.org/x/net/http2"
+	"golang.org/x/net/http2/hpack"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/internal/channelz"
+	icredentials "google.golang.org/grpc/internal/credentials"
+	"google.golang.org/grpc/internal/grpclog"
+	"google.golang.org/grpc/internal/grpcsync"
+	"google.golang.org/grpc/internal/grpcutil"
+	imetadata "google.golang.org/grpc/internal/metadata"
+	istatus "google.golang.org/grpc/internal/status"
+	isyscall "google.golang.org/grpc/internal/syscall"
+	"google.golang.org/grpc/internal/transport/networktype"
+	"google.golang.org/grpc/keepalive"
+	"google.golang.org/grpc/mem"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/peer"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/stats"
+	"google.golang.org/grpc/status"
+)
+
+// clientConnectionCounter counts the number of connections a client has
+// initiated (equal to the number of http2Clients created). Must be accessed
+// atomically.
+var clientConnectionCounter uint64
+
+var goAwayLoopyWriterTimeout = 5 * time.Second
+
+var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
+
+// http2Client implements the ClientTransport interface with HTTP2.
+type http2Client struct {
+	lastRead  int64 // Keep this field 64-bit aligned. Accessed atomically.
+	ctx       context.Context
+	cancel    context.CancelFunc
+	ctxDone   <-chan struct{} // Cache the ctx.Done() chan.
+	userAgent string
+	// address contains the resolver returned address for this transport.
+	// If the `ServerName` field is set, it takes precedence over `CallHdr.Host`
+	// passed to `NewStream`, when determining the :authority header.
+	address    resolver.Address
+	md         metadata.MD
+	conn       net.Conn // underlying communication channel
+	loopy      *loopyWriter
+	remoteAddr net.Addr
+	localAddr  net.Addr
+	authInfo   credentials.AuthInfo // auth info about the connection
+
+	readerDone chan struct{} // sync point to enable testing.
+	writerDone chan struct{} // sync point to enable testing.
+	// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
+	// that the server sent GoAway on this transport.
+	goAway chan struct{}
+
+	framer *framer
+	// controlBuf delivers all the control related tasks (e.g., window
+	// updates, reset streams, and various settings) to the controller.
+	// Do not access controlBuf with mu held.
+	controlBuf *controlBuffer
+	fc         *trInFlow
+	// The scheme used: https if TLS is on, http otherwise.
+	scheme string
+
+	isSecure bool
+
+	perRPCCreds []credentials.PerRPCCredentials
+
+	kp               keepalive.ClientParameters
+	keepaliveEnabled bool
+
+	statsHandlers []stats.Handler
+
+	initialWindowSize int32
+
+	// configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE
+	maxSendHeaderListSize *uint32
+
+	bdpEst *bdpEstimator
+
+	maxConcurrentStreams  uint32
+	streamQuota           int64
+	streamsQuotaAvailable chan struct{}
+	waitingStreams        uint32
+	registeredCompressors string
+
+	// Do not access controlBuf with mu held.
+	mu            sync.Mutex // guard the following variables
+	nextID        uint32
+	state         transportState
+	activeStreams map[uint32]*Stream
+	// prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
+	prevGoAwayID uint32
+	// goAwayReason records the http2.ErrCode and debug data received with the
+	// GoAway frame.
+	goAwayReason GoAwayReason
+	// goAwayDebugMessage contains a detailed human readable string about a
+	// GoAway frame, useful for error messages.
+	goAwayDebugMessage string
+	// A condition variable used to signal when the keepalive goroutine should
+	// go dormant. The condition for dormancy is based on the number of active
+	// streams and the `PermitWithoutStream` keepalive client parameter. And
+	// since the number of active streams is guarded by the above mutex, we use
+	// the same for this condition variable as well.
+	kpDormancyCond *sync.Cond
+	// A boolean to track whether the keepalive goroutine is dormant or not.
+	// This is checked before attempting to signal the above condition
+	// variable.
+	kpDormant bool
+
+	channelz *channelz.Socket
+
+	onClose func(GoAwayReason)
+
+	bufferPool mem.BufferPool
+
+	connectionID uint64
+	logger       *grpclog.PrefixLogger
+}
+
+func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) {
+	address := addr.Addr
+	networkType, ok := networktype.Get(addr)
+	if fn != nil {
+		// Special handling for unix scheme with custom dialer. Back in the day,
+		// we did not have a unix resolver and therefore targets with a unix
+		// scheme would end up using the passthrough resolver. So, user's used a
+		// custom dialer in this case and expected the original dial target to
+		// be passed to the custom dialer. Now, we have a unix resolver. But if
+		// a custom dialer is specified, we want to retain the old behavior in
+		// terms of the address being passed to the custom dialer.
+		if networkType == "unix" && !strings.HasPrefix(address, "\x00") {
+			// Supported unix targets are either "unix://absolute-path" or
+			// "unix:relative-path".
+			if filepath.IsAbs(address) {
+				return fn(ctx, "unix://"+address)
+			}
+			return fn(ctx, "unix:"+address)
+		}
+		return fn(ctx, address)
+	}
+	if !ok {
+		networkType, address = parseDialTarget(address)
+	}
+	if networkType == "tcp" && useProxy {
+		return proxyDial(ctx, address, grpcUA)
+	}
+	return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address)
+}
+
+func isTemporary(err error) bool {
+	switch err := err.(type) {
+	case interface {
+		Temporary() bool
+	}:
+		return err.Temporary()
+	case interface {
+		Timeout() bool
+	}:
+		// Timeouts may be resolved upon retry, and are thus treated as
+		// temporary.
+		return err.Timeout()
+	}
+	return true
+}
+
+// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
+// and starts to receive messages on it. Non-nil error returns if construction
+// fails.
+func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) {
+	scheme := "http"
+	ctx, cancel := context.WithCancel(ctx)
+	defer func() {
+		if err != nil {
+			cancel()
+		}
+	}()
+
+	// gRPC, resolver, balancer etc. can specify arbitrary data in the
+	// Attributes field of resolver.Address, which is shoved into connectCtx
+	// and passed to the dialer and credential handshaker. This makes it possible for
+	// address specific arbitrary data to reach custom dialers and credential handshakers.
+	connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes})
+
+	conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent)
+	if err != nil {
+		if opts.FailOnNonTempDialError {
+			return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
+		}
+		return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err)
+	}
+
+	// Any further errors will close the underlying connection
+	defer func(conn net.Conn) {
+		if err != nil {
+			conn.Close()
+		}
+	}(conn)
+
+	// The following defer and goroutine monitor the connectCtx for cancellation
+	// and deadline.  On context expiration, the connection is hard closed and
+	// this function will naturally fail as a result.  Otherwise, the defer
+	// waits for the goroutine to exit to prevent the context from being
+	// monitored (and to prevent the connection from ever being closed) after
+	// returning from this function.
+	ctxMonitorDone := grpcsync.NewEvent()
+	newClientCtx, newClientDone := context.WithCancel(connectCtx)
+	defer func() {
+		newClientDone()         // Awaken the goroutine below if connectCtx hasn't expired.
+		<-ctxMonitorDone.Done() // Wait for the goroutine below to exit.
+	}()
+	go func(conn net.Conn) {
+		defer ctxMonitorDone.Fire() // Signal this goroutine has exited.
+		<-newClientCtx.Done()       // Block until connectCtx expires or the defer above executes.
+		if err := connectCtx.Err(); err != nil {
+			// connectCtx expired before exiting the function.  Hard close the connection.
+			if logger.V(logLevel) {
+				logger.Infof("Aborting due to connect deadline expiring: %v", err)
+			}
+			conn.Close()
+		}
+	}(conn)
+
+	kp := opts.KeepaliveParams
+	// Validate keepalive parameters.
+	if kp.Time == 0 {
+		kp.Time = defaultClientKeepaliveTime
+	}
+	if kp.Timeout == 0 {
+		kp.Timeout = defaultClientKeepaliveTimeout
+	}
+	keepaliveEnabled := false
+	if kp.Time != infinity {
+		if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
+			return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
+		}
+		keepaliveEnabled = true
+	}
+	var (
+		isSecure bool
+		authInfo credentials.AuthInfo
+	)
+	transportCreds := opts.TransportCredentials
+	perRPCCreds := opts.PerRPCCredentials
+
+	if b := opts.CredsBundle; b != nil {
+		if t := b.TransportCredentials(); t != nil {
+			transportCreds = t
+		}
+		if t := b.PerRPCCredentials(); t != nil {
+			perRPCCreds = append(perRPCCreds, t)
+		}
+	}
+	if transportCreds != nil {
+		conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn)
+		if err != nil {
+			return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
+		}
+		for _, cd := range perRPCCreds {
+			if cd.RequireTransportSecurity() {
+				if ci, ok := authInfo.(interface {
+					GetCommonAuthInfo() credentials.CommonAuthInfo
+				}); ok {
+					secLevel := ci.GetCommonAuthInfo().SecurityLevel
+					if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity {
+						return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection")
+					}
+				}
+			}
+		}
+		isSecure = true
+		if transportCreds.Info().SecurityProtocol == "tls" {
+			scheme = "https"
+		}
+	}
+	dynamicWindow := true
+	icwz := int32(initialWindowSize)
+	if opts.InitialConnWindowSize >= defaultWindowSize {
+		icwz = opts.InitialConnWindowSize
+		dynamicWindow = false
+	}
+	writeBufSize := opts.WriteBufferSize
+	readBufSize := opts.ReadBufferSize
+	maxHeaderListSize := defaultClientMaxHeaderListSize
+	if opts.MaxHeaderListSize != nil {
+		maxHeaderListSize = *opts.MaxHeaderListSize
+	}
+
+	t := &http2Client{
+		ctx:                   ctx,
+		ctxDone:               ctx.Done(), // Cache Done chan.
+		cancel:                cancel,
+		userAgent:             opts.UserAgent,
+		registeredCompressors: grpcutil.RegisteredCompressors(),
+		address:               addr,
+		conn:                  conn,
+		remoteAddr:            conn.RemoteAddr(),
+		localAddr:             conn.LocalAddr(),
+		authInfo:              authInfo,
+		readerDone:            make(chan struct{}),
+		writerDone:            make(chan struct{}),
+		goAway:                make(chan struct{}),
+		framer:                newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize),
+		fc:                    &trInFlow{limit: uint32(icwz)},
+		scheme:                scheme,
+		activeStreams:         make(map[uint32]*Stream),
+		isSecure:              isSecure,
+		perRPCCreds:           perRPCCreds,
+		kp:                    kp,
+		statsHandlers:         opts.StatsHandlers,
+		initialWindowSize:     initialWindowSize,
+		nextID:                1,
+		maxConcurrentStreams:  defaultMaxStreamsClient,
+		streamQuota:           defaultMaxStreamsClient,
+		streamsQuotaAvailable: make(chan struct{}, 1),
+		keepaliveEnabled:      keepaliveEnabled,
+		bufferPool:            opts.BufferPool,
+		onClose:               onClose,
+	}
+	var czSecurity credentials.ChannelzSecurityValue
+	if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
+		czSecurity = au.GetSecurityValue()
+	}
+	t.channelz = channelz.RegisterSocket(
+		&channelz.Socket{
+			SocketType:       channelz.SocketTypeNormal,
+			Parent:           opts.ChannelzParent,
+			SocketMetrics:    channelz.SocketMetrics{},
+			EphemeralMetrics: t.socketMetrics,
+			LocalAddr:        t.localAddr,
+			RemoteAddr:       t.remoteAddr,
+			SocketOptions:    channelz.GetSocketOption(t.conn),
+			Security:         czSecurity,
+		})
+	t.logger = prefixLoggerForClientTransport(t)
+	// Add peer information to the http2client context.
+	t.ctx = peer.NewContext(t.ctx, t.getPeer())
+
+	if md, ok := addr.Metadata.(*metadata.MD); ok {
+		t.md = *md
+	} else if md := imetadata.Get(addr); md != nil {
+		t.md = md
+	}
+	t.controlBuf = newControlBuffer(t.ctxDone)
+	if opts.InitialWindowSize >= defaultWindowSize {
+		t.initialWindowSize = opts.InitialWindowSize
+		dynamicWindow = false
+	}
+	if dynamicWindow {
+		t.bdpEst = &bdpEstimator{
+			bdp:               initialWindowSize,
+			updateFlowControl: t.updateFlowControl,
+		}
+	}
+	for _, sh := range t.statsHandlers {
+		t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
+			RemoteAddr: t.remoteAddr,
+			LocalAddr:  t.localAddr,
+		})
+		connBegin := &stats.ConnBegin{
+			Client: true,
+		}
+		sh.HandleConn(t.ctx, connBegin)
+	}
+	if t.keepaliveEnabled {
+		t.kpDormancyCond = sync.NewCond(&t.mu)
+		go t.keepalive()
+	}
+
+	// Start the reader goroutine for incoming messages. Each transport has a
+	// dedicated goroutine which reads HTTP2 frames from the network. Then it
+	// dispatches the frame to the corresponding stream entity.  When the
+	// server preface is received, readerErrCh is closed.  If an error occurs
+	// first, an error is pushed to the channel.  This must be checked before
+	// returning from this function.
+	readerErrCh := make(chan error, 1)
+	go t.reader(readerErrCh)
+	defer func() {
+		if err != nil {
+			// writerDone should be closed since the loopy goroutine
+			// wouldn't have started in the case this function returns an error.
+			close(t.writerDone)
+			t.Close(err)
+		}
+	}()
+
+	// Send connection preface to server.
+	n, err := t.conn.Write(clientPreface)
+	if err != nil {
+		err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
+		return nil, err
+	}
+	if n != len(clientPreface) {
+		err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
+		return nil, err
+	}
+	var ss []http2.Setting
+
+	if t.initialWindowSize != defaultWindowSize {
+		ss = append(ss, http2.Setting{
+			ID:  http2.SettingInitialWindowSize,
+			Val: uint32(t.initialWindowSize),
+		})
+	}
+	if opts.MaxHeaderListSize != nil {
+		ss = append(ss, http2.Setting{
+			ID:  http2.SettingMaxHeaderListSize,
+			Val: *opts.MaxHeaderListSize,
+		})
+	}
+	err = t.framer.fr.WriteSettings(ss...)
+	if err != nil {
+		err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
+		return nil, err
+	}
+	// Adjust the connection flow control window if needed.
+	if delta := uint32(icwz - defaultWindowSize); delta > 0 {
+		if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil {
+			err = connectionErrorf(true, err, "transport: failed to write window update: %v", err)
+			return nil, err
+		}
+	}
+
+	t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1)
+
+	if err := t.framer.writer.Flush(); err != nil {
+		return nil, err
+	}
+	// Block until the server preface is received successfully or an error occurs.
+	if err = <-readerErrCh; err != nil {
+		return nil, err
+	}
+	go func() {
+		t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
+		if err := t.loopy.run(); !isIOError(err) {
+			// Immediately close the connection, as the loopy writer returns
+			// when there are no more active streams and we were draining (the
+			// server sent a GOAWAY).  For I/O errors, the reader will hit it
+			// after draining any remaining incoming data.
+			t.conn.Close()
+		}
+		close(t.writerDone)
+	}()
+	return t, nil
+}
+
+func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
+	// TODO(zhaoq): Handle uint32 overflow of Stream.id.
+	s := &Stream{
+		ct:             t,
+		done:           make(chan struct{}),
+		method:         callHdr.Method,
+		sendCompress:   callHdr.SendCompress,
+		buf:            newRecvBuffer(),
+		headerChan:     make(chan struct{}),
+		contentSubtype: callHdr.ContentSubtype,
+		doneFunc:       callHdr.DoneFunc,
+	}
+	s.wq = newWriteQuota(defaultWriteQuota, s.done)
+	s.requestRead = func(n int) {
+		t.adjustWindow(s, uint32(n))
+	}
+	// The client side stream context should have exactly the same life cycle with the user provided context.
+	// That means, s.ctx should be read-only. And s.ctx is done iff ctx is done.
+	// So we use the original context here instead of creating a copy.
+	s.ctx = ctx
+	s.trReader = &transportReader{
+		reader: &recvBufferReader{
+			ctx:     s.ctx,
+			ctxDone: s.ctx.Done(),
+			recv:    s.buf,
+			closeStream: func(err error) {
+				t.CloseStream(s, err)
+			},
+		},
+		windowHandler: func(n int) {
+			t.updateWindow(s, uint32(n))
+		},
+	}
+	return s
+}
+
+func (t *http2Client) getPeer() *peer.Peer {
+	return &peer.Peer{
+		Addr:      t.remoteAddr,
+		AuthInfo:  t.authInfo, // Can be nil
+		LocalAddr: t.localAddr,
+	}
+}
+
+// OutgoingGoAwayHandler writes a GOAWAY to the connection.  Always returns (false, err) as we want the GoAway
+// to be the last frame loopy writes to the transport.
+func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil {
+		return false, err
+	}
+	return false, g.closeConn
+}
+
+func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) {
+	aud := t.createAudience(callHdr)
+	ri := credentials.RequestInfo{
+		Method:   callHdr.Method,
+		AuthInfo: t.authInfo,
+	}
+	ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri)
+	authData, err := t.getTrAuthData(ctxWithRequestInfo, aud)
+	if err != nil {
+		return nil, err
+	}
+	callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr)
+	if err != nil {
+		return nil, err
+	}
+	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
+	// first and create a slice of that exact size.
+	// Make the slice of certain predictable size to reduce allocations made by append.
+	hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te
+	hfLen += len(authData) + len(callAuthData)
+	headerFields := make([]hpack.HeaderField, 0, hfLen)
+	headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"})
+	headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
+	headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method})
+	headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
+	headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)})
+	headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
+	headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
+	if callHdr.PreviousAttempts > 0 {
+		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)})
+	}
+
+	registeredCompressors := t.registeredCompressors
+	if callHdr.SendCompress != "" {
+		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
+		// Include the outgoing compressor name when compressor is not registered
+		// via encoding.RegisterCompressor. This is possible when client uses
+		// WithCompressor dial option.
+		if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) {
+			if registeredCompressors != "" {
+				registeredCompressors += ","
+			}
+			registeredCompressors += callHdr.SendCompress
+		}
+	}
+
+	if registeredCompressors != "" {
+		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors})
+	}
+	if dl, ok := ctx.Deadline(); ok {
+		// Send out timeout regardless its value. The server can detect timeout context by itself.
+		// TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
+		timeout := time.Until(dl)
+		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)})
+	}
+	for k, v := range authData {
+		headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
+	}
+	for k, v := range callAuthData {
+		headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
+	}
+	if b := stats.OutgoingTags(ctx); b != nil {
+		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)})
+	}
+	if b := stats.OutgoingTrace(ctx); b != nil {
+		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
+	}
+
+	if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
+		var k string
+		for k, vv := range md {
+			// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
+			if isReservedHeader(k) {
+				continue
+			}
+			for _, v := range vv {
+				headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
+			}
+		}
+		for _, vv := range added {
+			for i, v := range vv {
+				if i%2 == 0 {
+					k = strings.ToLower(v)
+					continue
+				}
+				// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
+				if isReservedHeader(k) {
+					continue
+				}
+				headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
+			}
+		}
+	}
+	for k, vv := range t.md {
+		if isReservedHeader(k) {
+			continue
+		}
+		for _, v := range vv {
+			headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
+		}
+	}
+	return headerFields, nil
+}
+
+func (t *http2Client) createAudience(callHdr *CallHdr) string {
+	// Create an audience string only if needed.
+	if len(t.perRPCCreds) == 0 && callHdr.Creds == nil {
+		return ""
+	}
+	// Construct URI required to get auth request metadata.
+	// Omit port if it is the default one.
+	host := strings.TrimSuffix(callHdr.Host, ":443")
+	pos := strings.LastIndex(callHdr.Method, "/")
+	if pos == -1 {
+		pos = len(callHdr.Method)
+	}
+	return "https://" + host + callHdr.Method[:pos]
+}
+
+func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) {
+	if len(t.perRPCCreds) == 0 {
+		return nil, nil
+	}
+	authData := map[string]string{}
+	for _, c := range t.perRPCCreds {
+		data, err := c.GetRequestMetadata(ctx, audience)
+		if err != nil {
+			if st, ok := status.FromError(err); ok {
+				// Restrict the code to the list allowed by gRFC A54.
+				if istatus.IsRestrictedControlPlaneCode(st) {
+					err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
+				}
+				return nil, err
+			}
+
+			return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err)
+		}
+		for k, v := range data {
+			// Capital header names are illegal in HTTP/2.
+			k = strings.ToLower(k)
+			authData[k] = v
+		}
+	}
+	return authData, nil
+}
+
+func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) {
+	var callAuthData map[string]string
+	// Check if credentials.PerRPCCredentials were provided via call options.
+	// Note: if these credentials are provided both via dial options and call
+	// options, then both sets of credentials will be applied.
+	if callCreds := callHdr.Creds; callCreds != nil {
+		if callCreds.RequireTransportSecurity() {
+			ri, _ := credentials.RequestInfoFromContext(ctx)
+			if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil {
+				return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
+			}
+		}
+		data, err := callCreds.GetRequestMetadata(ctx, audience)
+		if err != nil {
+			if st, ok := status.FromError(err); ok {
+				// Restrict the code to the list allowed by gRFC A54.
+				if istatus.IsRestrictedControlPlaneCode(st) {
+					err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
+				}
+				return nil, err
+			}
+			return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err)
+		}
+		callAuthData = make(map[string]string, len(data))
+		for k, v := range data {
+			// Capital header names are illegal in HTTP/2
+			k = strings.ToLower(k)
+			callAuthData[k] = v
+		}
+	}
+	return callAuthData, nil
+}
+
+// NewStreamError wraps an error and reports additional information.  Typically
+// NewStream errors result in transparent retry, as they mean nothing went onto
+// the wire.  However, there are two notable exceptions:
+//
+//  1. If the stream headers violate the max header list size allowed by the
+//     server.  It's possible this could succeed on another transport, even if
+//     it's unlikely, but do not transparently retry.
+//  2. If the credentials errored when requesting their headers.  In this case,
+//     it's possible a retry can fix the problem, but indefinitely transparently
+//     retrying is not appropriate as it is likely the credentials, if they can
+//     eventually succeed, would need I/O to do so.
+type NewStreamError struct {
+	Err error
+
+	AllowTransparentRetry bool
+}
+
+func (e NewStreamError) Error() string {
+	return e.Err.Error()
+}
+
+// NewStream creates a stream and registers it into the transport as "active"
+// streams.  All non-nil errors returned will be *NewStreamError.
+func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) {
+	ctx = peer.NewContext(ctx, t.getPeer())
+
+	// ServerName field of the resolver returned address takes precedence over
+	// Host field of CallHdr to determine the :authority header. This is because,
+	// the ServerName field takes precedence for server authentication during
+	// TLS handshake, and the :authority header should match the value used
+	// for server authentication.
+	if t.address.ServerName != "" {
+		newCallHdr := *callHdr
+		newCallHdr.Host = t.address.ServerName
+		callHdr = &newCallHdr
+	}
+
+	headerFields, err := t.createHeaderFields(ctx, callHdr)
+	if err != nil {
+		return nil, &NewStreamError{Err: err, AllowTransparentRetry: false}
+	}
+	s := t.newStream(ctx, callHdr)
+	cleanup := func(err error) {
+		if s.swapState(streamDone) == streamDone {
+			// If it was already done, return.
+			return
+		}
+		// The stream was unprocessed by the server.
+		atomic.StoreUint32(&s.unprocessed, 1)
+		s.write(recvMsg{err: err})
+		close(s.done)
+		// If headerChan isn't closed, then close it.
+		if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
+			close(s.headerChan)
+		}
+	}
+	hdr := &headerFrame{
+		hf:        headerFields,
+		endStream: false,
+		initStream: func(uint32) error {
+			t.mu.Lock()
+			// TODO: handle transport closure in loopy instead and remove this
+			// initStream is never called when transport is draining.
+			if t.state == closing {
+				t.mu.Unlock()
+				cleanup(ErrConnClosing)
+				return ErrConnClosing
+			}
+			if channelz.IsOn() {
+				t.channelz.SocketMetrics.StreamsStarted.Add(1)
+				t.channelz.SocketMetrics.LastLocalStreamCreatedTimestamp.Store(time.Now().UnixNano())
+			}
+			// If the keepalive goroutine has gone dormant, wake it up.
+			if t.kpDormant {
+				t.kpDormancyCond.Signal()
+			}
+			t.mu.Unlock()
+			return nil
+		},
+		onOrphaned: cleanup,
+		wq:         s.wq,
+	}
+	firstTry := true
+	var ch chan struct{}
+	transportDrainRequired := false
+	checkForStreamQuota := func() bool {
+		if t.streamQuota <= 0 { // Can go negative if server decreases it.
+			if firstTry {
+				t.waitingStreams++
+			}
+			ch = t.streamsQuotaAvailable
+			return false
+		}
+		if !firstTry {
+			t.waitingStreams--
+		}
+		t.streamQuota--
+
+		t.mu.Lock()
+		if t.state == draining || t.activeStreams == nil { // Can be niled from Close().
+			t.mu.Unlock()
+			return false // Don't create a stream if the transport is already closed.
+		}
+
+		hdr.streamID = t.nextID
+		t.nextID += 2
+		// Drain client transport if nextID > MaxStreamID which signals gRPC that
+		// the connection is closed and a new one must be created for subsequent RPCs.
+		transportDrainRequired = t.nextID > MaxStreamID
+
+		s.id = hdr.streamID
+		s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
+		t.activeStreams[s.id] = s
+		t.mu.Unlock()
+
+		if t.streamQuota > 0 && t.waitingStreams > 0 {
+			select {
+			case t.streamsQuotaAvailable <- struct{}{}:
+			default:
+			}
+		}
+		return true
+	}
+	var hdrListSizeErr error
+	checkForHeaderListSize := func() bool {
+		if t.maxSendHeaderListSize == nil {
+			return true
+		}
+		var sz int64
+		for _, f := range hdr.hf {
+			if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
+				hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize)
+				return false
+			}
+		}
+		return true
+	}
+	for {
+		success, err := t.controlBuf.executeAndPut(func() bool {
+			return checkForHeaderListSize() && checkForStreamQuota()
+		}, hdr)
+		if err != nil {
+			// Connection closed.
+			return nil, &NewStreamError{Err: err, AllowTransparentRetry: true}
+		}
+		if success {
+			break
+		}
+		if hdrListSizeErr != nil {
+			return nil, &NewStreamError{Err: hdrListSizeErr}
+		}
+		firstTry = false
+		select {
+		case <-ch:
+		case <-ctx.Done():
+			return nil, &NewStreamError{Err: ContextErr(ctx.Err())}
+		case <-t.goAway:
+			return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true}
+		case <-t.ctx.Done():
+			return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true}
+		}
+	}
+	if len(t.statsHandlers) != 0 {
+		header, ok := metadata.FromOutgoingContext(ctx)
+		if ok {
+			header.Set("user-agent", t.userAgent)
+		} else {
+			header = metadata.Pairs("user-agent", t.userAgent)
+		}
+		for _, sh := range t.statsHandlers {
+			// Note: The header fields are compressed with hpack after this call returns.
+			// No WireLength field is set here.
+			// Note: Creating a new stats object to prevent pollution.
+			outHeader := &stats.OutHeader{
+				Client:      true,
+				FullMethod:  callHdr.Method,
+				RemoteAddr:  t.remoteAddr,
+				LocalAddr:   t.localAddr,
+				Compression: callHdr.SendCompress,
+				Header:      header,
+			}
+			sh.HandleRPC(s.ctx, outHeader)
+		}
+	}
+	if transportDrainRequired {
+		if t.logger.V(logLevel) {
+			t.logger.Infof("Draining transport: t.nextID > MaxStreamID")
+		}
+		t.GracefulClose()
+	}
+	return s, nil
+}
+
+// CloseStream clears the footprint of a stream when the stream is not needed any more.
+// This must not be executed in reader's goroutine.
+func (t *http2Client) CloseStream(s *Stream, err error) {
+	var (
+		rst     bool
+		rstCode http2.ErrCode
+	)
+	if err != nil {
+		rst = true
+		rstCode = http2.ErrCodeCancel
+	}
+	t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false)
+}
+
+func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) {
+	// Set stream status to done.
+	if s.swapState(streamDone) == streamDone {
+		// If it was already done, return.  If multiple closeStream calls
+		// happen simultaneously, wait for the first to finish.
+		<-s.done
+		return
+	}
+	// status and trailers can be updated here without any synchronization because the stream goroutine will
+	// only read it after it sees an io.EOF error from read or write and we'll write those errors
+	// only after updating this.
+	s.status = st
+	if len(mdata) > 0 {
+		s.trailer = mdata
+	}
+	if err != nil {
+		// This will unblock reads eventually.
+		s.write(recvMsg{err: err})
+	}
+	// If headerChan isn't closed, then close it.
+	if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
+		s.noHeaders = true
+		close(s.headerChan)
+	}
+	cleanup := &cleanupStream{
+		streamID: s.id,
+		onWrite: func() {
+			t.mu.Lock()
+			if t.activeStreams != nil {
+				delete(t.activeStreams, s.id)
+			}
+			t.mu.Unlock()
+			if channelz.IsOn() {
+				if eosReceived {
+					t.channelz.SocketMetrics.StreamsSucceeded.Add(1)
+				} else {
+					t.channelz.SocketMetrics.StreamsFailed.Add(1)
+				}
+			}
+		},
+		rst:     rst,
+		rstCode: rstCode,
+	}
+	addBackStreamQuota := func() bool {
+		t.streamQuota++
+		if t.streamQuota > 0 && t.waitingStreams > 0 {
+			select {
+			case t.streamsQuotaAvailable <- struct{}{}:
+			default:
+			}
+		}
+		return true
+	}
+	t.controlBuf.executeAndPut(addBackStreamQuota, cleanup)
+	// This will unblock write.
+	close(s.done)
+	if s.doneFunc != nil {
+		s.doneFunc()
+	}
+}
+
+// Close kicks off the shutdown process of the transport. This should be called
+// only once on a transport. Once it is called, the transport should not be
+// accessed anymore.
+func (t *http2Client) Close(err error) {
+	t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10))
+	t.mu.Lock()
+	// Make sure we only close once.
+	if t.state == closing {
+		t.mu.Unlock()
+		return
+	}
+	if t.logger.V(logLevel) {
+		t.logger.Infof("Closing: %v", err)
+	}
+	// Call t.onClose ASAP to prevent the client from attempting to create new
+	// streams.
+	if t.state != draining {
+		t.onClose(GoAwayInvalid)
+	}
+	t.state = closing
+	streams := t.activeStreams
+	t.activeStreams = nil
+	if t.kpDormant {
+		// If the keepalive goroutine is blocked on this condition variable, we
+		// should unblock it so that the goroutine eventually exits.
+		t.kpDormancyCond.Signal()
+	}
+	t.mu.Unlock()
+
+	// Per HTTP/2 spec, a GOAWAY frame must be sent before closing the
+	// connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It
+	// also waits for loopyWriter to be closed with a timer to avoid the
+	// long blocking in case the connection is blackholed, i.e. TCP is
+	// just stuck.
+	t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err})
+	timer := time.NewTimer(goAwayLoopyWriterTimeout)
+	defer timer.Stop()
+	select {
+	case <-t.writerDone: // success
+	case <-timer.C:
+		t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout)
+	}
+	t.cancel()
+	t.conn.Close()
+	channelz.RemoveEntry(t.channelz.ID)
+	// Append info about previous goaways if there were any, since this may be important
+	// for understanding the root cause for this connection to be closed.
+	_, goAwayDebugMessage := t.GetGoAwayReason()
+
+	var st *status.Status
+	if len(goAwayDebugMessage) > 0 {
+		st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage)
+		err = st.Err()
+	} else {
+		st = status.New(codes.Unavailable, err.Error())
+	}
+
+	// Notify all active streams.
+	for _, s := range streams {
+		t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false)
+	}
+	for _, sh := range t.statsHandlers {
+		connEnd := &stats.ConnEnd{
+			Client: true,
+		}
+		sh.HandleConn(t.ctx, connEnd)
+	}
+}
+
+// GracefulClose sets the state to draining, which prevents new streams from
+// being created and causes the transport to be closed when the last active
+// stream is closed.  If there are no active streams, the transport is closed
+// immediately.  This does nothing if the transport is already draining or
+// closing.
+func (t *http2Client) GracefulClose() {
+	t.mu.Lock()
+	// Make sure we move to draining only from active.
+	if t.state == draining || t.state == closing {
+		t.mu.Unlock()
+		return
+	}
+	if t.logger.V(logLevel) {
+		t.logger.Infof("GracefulClose called")
+	}
+	t.onClose(GoAwayInvalid)
+	t.state = draining
+	active := len(t.activeStreams)
+	t.mu.Unlock()
+	if active == 0 {
+		t.Close(connectionErrorf(true, nil, "no active streams left to process while draining"))
+		return
+	}
+	t.controlBuf.put(&incomingGoAway{})
+}
+
+// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
+// should proceed only if Write returns nil.
+func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error {
+	reader := data.Reader()
+
+	if opts.Last {
+		// If it's the last message, update stream state.
+		if !s.compareAndSwapState(streamActive, streamWriteDone) {
+			_ = reader.Close()
+			return errStreamDone
+		}
+	} else if s.getState() != streamActive {
+		_ = reader.Close()
+		return errStreamDone
+	}
+	df := &dataFrame{
+		streamID:  s.id,
+		endStream: opts.Last,
+		h:         hdr,
+		reader:    reader,
+	}
+	if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota.
+		if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
+			_ = reader.Close()
+			return err
+		}
+	}
+	if err := t.controlBuf.put(df); err != nil {
+		_ = reader.Close()
+		return err
+	}
+	return nil
+}
+
+func (t *http2Client) getStream(f http2.Frame) *Stream {
+	t.mu.Lock()
+	s := t.activeStreams[f.Header().StreamID]
+	t.mu.Unlock()
+	return s
+}
+
+// adjustWindow sends out extra window update over the initial window size
+// of stream if the application is requesting data larger in size than
+// the window.
+func (t *http2Client) adjustWindow(s *Stream, n uint32) {
+	if w := s.fc.maybeAdjust(n); w > 0 {
+		t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
+	}
+}
+
+// updateWindow adjusts the inbound quota for the stream.
+// Window updates will be sent out when the cumulative quota
+// exceeds the corresponding threshold.
+func (t *http2Client) updateWindow(s *Stream, n uint32) {
+	if w := s.fc.onRead(n); w > 0 {
+		t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
+	}
+}
+
+// updateFlowControl updates the incoming flow control windows
+// for the transport and the stream based on the current bdp
+// estimation.
+func (t *http2Client) updateFlowControl(n uint32) {
+	updateIWS := func() bool {
+		t.initialWindowSize = int32(n)
+		t.mu.Lock()
+		for _, s := range t.activeStreams {
+			s.fc.newLimit(n)
+		}
+		t.mu.Unlock()
+		return true
+	}
+	t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)})
+	t.controlBuf.put(&outgoingSettings{
+		ss: []http2.Setting{
+			{
+				ID:  http2.SettingInitialWindowSize,
+				Val: n,
+			},
+		},
+	})
+}
+
+func (t *http2Client) handleData(f *http2.DataFrame) {
+	size := f.Header().Length
+	var sendBDPPing bool
+	if t.bdpEst != nil {
+		sendBDPPing = t.bdpEst.add(size)
+	}
+	// Decouple connection's flow control from application's read.
+	// An update on connection's flow control should not depend on
+	// whether user application has read the data or not. Such a
+	// restriction is already imposed on the stream's flow control,
+	// and therefore the sender will be blocked anyways.
+	// Decoupling the connection flow control will prevent other
+	// active(fast) streams from starving in presence of slow or
+	// inactive streams.
+	//
+	if w := t.fc.onData(size); w > 0 {
+		t.controlBuf.put(&outgoingWindowUpdate{
+			streamID:  0,
+			increment: w,
+		})
+	}
+	if sendBDPPing {
+		// Avoid excessive ping detection (e.g. in an L7 proxy)
+		// by sending a window update prior to the BDP ping.
+
+		if w := t.fc.reset(); w > 0 {
+			t.controlBuf.put(&outgoingWindowUpdate{
+				streamID:  0,
+				increment: w,
+			})
+		}
+
+		t.controlBuf.put(bdpPing)
+	}
+	// Select the right stream to dispatch.
+	s := t.getStream(f)
+	if s == nil {
+		return
+	}
+	if size > 0 {
+		if err := s.fc.onData(size); err != nil {
+			t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false)
+			return
+		}
+		if f.Header().Flags.Has(http2.FlagDataPadded) {
+			if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
+				t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
+			}
+		}
+		// TODO(bradfitz, zhaoq): A copy is required here because there is no
+		// guarantee f.Data() is consumed before the arrival of next frame.
+		// Can this copy be eliminated?
+		if len(f.Data()) > 0 {
+			pool := t.bufferPool
+			if pool == nil {
+				// Note that this is only supposed to be nil in tests. Otherwise, stream is
+				// always initialized with a BufferPool.
+				pool = mem.DefaultBufferPool()
+			}
+			s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
+		}
+	}
+	// The server has closed the stream without sending trailers.  Record that
+	// the read direction is closed, and set the status appropriately.
+	if f.StreamEnded() {
+		t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true)
+	}
+}
+
+func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
+	s := t.getStream(f)
+	if s == nil {
+		return
+	}
+	if f.ErrCode == http2.ErrCodeRefusedStream {
+		// The stream was unprocessed by the server.
+		atomic.StoreUint32(&s.unprocessed, 1)
+	}
+	statusCode, ok := http2ErrConvTab[f.ErrCode]
+	if !ok {
+		if t.logger.V(logLevel) {
+			t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode)
+		}
+		statusCode = codes.Unknown
+	}
+	if statusCode == codes.Canceled {
+		if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) {
+			// Our deadline was already exceeded, and that was likely the cause
+			// of this cancellation.  Alter the status code accordingly.
+			statusCode = codes.DeadlineExceeded
+		}
+	}
+	t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false)
+}
+
+func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
+	if f.IsAck() {
+		return
+	}
+	var maxStreams *uint32
+	var ss []http2.Setting
+	var updateFuncs []func()
+	f.ForeachSetting(func(s http2.Setting) error {
+		switch s.ID {
+		case http2.SettingMaxConcurrentStreams:
+			maxStreams = new(uint32)
+			*maxStreams = s.Val
+		case http2.SettingMaxHeaderListSize:
+			updateFuncs = append(updateFuncs, func() {
+				t.maxSendHeaderListSize = new(uint32)
+				*t.maxSendHeaderListSize = s.Val
+			})
+		default:
+			ss = append(ss, s)
+		}
+		return nil
+	})
+	if isFirst && maxStreams == nil {
+		maxStreams = new(uint32)
+		*maxStreams = math.MaxUint32
+	}
+	sf := &incomingSettings{
+		ss: ss,
+	}
+	if maxStreams != nil {
+		updateStreamQuota := func() {
+			delta := int64(*maxStreams) - int64(t.maxConcurrentStreams)
+			t.maxConcurrentStreams = *maxStreams
+			t.streamQuota += delta
+			if delta > 0 && t.waitingStreams > 0 {
+				close(t.streamsQuotaAvailable) // wake all of them up.
+				t.streamsQuotaAvailable = make(chan struct{}, 1)
+			}
+		}
+		updateFuncs = append(updateFuncs, updateStreamQuota)
+	}
+	t.controlBuf.executeAndPut(func() bool {
+		for _, f := range updateFuncs {
+			f()
+		}
+		return true
+	}, sf)
+}
+
+func (t *http2Client) handlePing(f *http2.PingFrame) {
+	if f.IsAck() {
+		// Maybe it's a BDP ping.
+		if t.bdpEst != nil {
+			t.bdpEst.calculate(f.Data)
+		}
+		return
+	}
+	pingAck := &ping{ack: true}
+	copy(pingAck.data[:], f.Data[:])
+	t.controlBuf.put(pingAck)
+}
+
+func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
+	t.mu.Lock()
+	if t.state == closing {
+		t.mu.Unlock()
+		return
+	}
+	if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" {
+		// When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug
+		// data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is
+		// enabled by default and double the configure KEEPALIVE_TIME used for new connections
+		// on that channel.
+		logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".")
+	}
+	id := f.LastStreamID
+	if id > 0 && id%2 == 0 {
+		t.mu.Unlock()
+		t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id))
+		return
+	}
+	// A client can receive multiple GoAways from the server (see
+	// https://github.com/grpc/grpc-go/issues/1387).  The idea is that the first
+	// GoAway will be sent with an ID of MaxInt32 and the second GoAway will be
+	// sent after an RTT delay with the ID of the last stream the server will
+	// process.
+	//
+	// Therefore, when we get the first GoAway we don't necessarily close any
+	// streams. While in case of second GoAway we close all streams created after
+	// the GoAwayId. This way streams that were in-flight while the GoAway from
+	// server was being sent don't get killed.
+	select {
+	case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways).
+		// If there are multiple GoAways the first one should always have an ID greater than the following ones.
+		if id > t.prevGoAwayID {
+			t.mu.Unlock()
+			t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID))
+			return
+		}
+	default:
+		t.setGoAwayReason(f)
+		close(t.goAway)
+		defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held.
+		// Notify the clientconn about the GOAWAY before we set the state to
+		// draining, to allow the client to stop attempting to create streams
+		// before disallowing new streams on this connection.
+		if t.state != draining {
+			t.onClose(t.goAwayReason)
+			t.state = draining
+		}
+	}
+	// All streams with IDs greater than the GoAwayId
+	// and smaller than the previous GoAway ID should be killed.
+	upperLimit := t.prevGoAwayID
+	if upperLimit == 0 { // This is the first GoAway Frame.
+		upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID.
+	}
+
+	t.prevGoAwayID = id
+	if len(t.activeStreams) == 0 {
+		t.mu.Unlock()
+		t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams"))
+		return
+	}
+
+	streamsToClose := make([]*Stream, 0)
+	for streamID, stream := range t.activeStreams {
+		if streamID > id && streamID <= upperLimit {
+			// The stream was unprocessed by the server.
+			atomic.StoreUint32(&stream.unprocessed, 1)
+			streamsToClose = append(streamsToClose, stream)
+		}
+	}
+	t.mu.Unlock()
+	// Called outside t.mu because closeStream can take controlBuf's mu, which
+	// could induce deadlock and is not allowed.
+	for _, stream := range streamsToClose {
+		t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
+	}
+}
+
+// setGoAwayReason sets the value of t.goAwayReason based
+// on the GoAway frame received.
+// It expects a lock on transport's mutex to be held by
+// the caller.
+func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
+	t.goAwayReason = GoAwayNoReason
+	switch f.ErrCode {
+	case http2.ErrCodeEnhanceYourCalm:
+		if string(f.DebugData()) == "too_many_pings" {
+			t.goAwayReason = GoAwayTooManyPings
+		}
+	}
+	if len(f.DebugData()) == 0 {
+		t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode)
+	} else {
+		t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData()))
+	}
+}
+
+func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	return t.goAwayReason, t.goAwayDebugMessage
+}
+
+func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
+	t.controlBuf.put(&incomingWindowUpdate{
+		streamID:  f.Header().StreamID,
+		increment: f.Increment,
+	})
+}
+
+// operateHeaders takes action on the decoded headers.
+func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
+	s := t.getStream(frame)
+	if s == nil {
+		return
+	}
+	endStream := frame.StreamEnded()
+	atomic.StoreUint32(&s.bytesReceived, 1)
+	initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0
+
+	if !initialHeader && !endStream {
+		// As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set.
+		st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream")
+		t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false)
+		return
+	}
+
+	// frame.Truncated is set to true when framer detects that the current header
+	// list size hits MaxHeaderListSize limit.
+	if frame.Truncated {
+		se := status.New(codes.Internal, "peer header list size exceeded limit")
+		t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream)
+		return
+	}
+
+	var (
+		// If a gRPC Response-Headers has already been received, then it means
+		// that the peer is speaking gRPC and we are in gRPC mode.
+		isGRPC         = !initialHeader
+		mdata          = make(map[string][]string)
+		contentTypeErr = "malformed header: missing HTTP content-type"
+		grpcMessage    string
+		recvCompress   string
+		httpStatusCode *int
+		httpStatusErr  string
+		rawStatusCode  = codes.Unknown
+		// headerError is set if an error is encountered while parsing the headers
+		headerError string
+	)
+
+	if initialHeader {
+		httpStatusErr = "malformed header: missing HTTP status"
+	}
+
+	for _, hf := range frame.Fields {
+		switch hf.Name {
+		case "content-type":
+			if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType {
+				contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value)
+				break
+			}
+			contentTypeErr = ""
+			mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
+			isGRPC = true
+		case "grpc-encoding":
+			recvCompress = hf.Value
+		case "grpc-status":
+			code, err := strconv.ParseInt(hf.Value, 10, 32)
+			if err != nil {
+				se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err))
+				t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+				return
+			}
+			rawStatusCode = codes.Code(uint32(code))
+		case "grpc-message":
+			grpcMessage = decodeGrpcMessage(hf.Value)
+		case ":status":
+			if hf.Value == "200" {
+				httpStatusErr = ""
+				statusCode := 200
+				httpStatusCode = &statusCode
+				break
+			}
+
+			c, err := strconv.ParseInt(hf.Value, 10, 32)
+			if err != nil {
+				se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err))
+				t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+				return
+			}
+			statusCode := int(c)
+			httpStatusCode = &statusCode
+
+			httpStatusErr = fmt.Sprintf(
+				"unexpected HTTP status code received from server: %d (%s)",
+				statusCode,
+				http.StatusText(statusCode),
+			)
+		default:
+			if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
+				break
+			}
+			v, err := decodeMetadataHeader(hf.Name, hf.Value)
+			if err != nil {
+				headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err)
+				logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
+				break
+			}
+			mdata[hf.Name] = append(mdata[hf.Name], v)
+		}
+	}
+
+	if !isGRPC || httpStatusErr != "" {
+		var code = codes.Internal // when header does not include HTTP status, return INTERNAL
+
+		if httpStatusCode != nil {
+			var ok bool
+			code, ok = HTTPStatusConvTab[*httpStatusCode]
+			if !ok {
+				code = codes.Unknown
+			}
+		}
+		var errs []string
+		if httpStatusErr != "" {
+			errs = append(errs, httpStatusErr)
+		}
+		if contentTypeErr != "" {
+			errs = append(errs, contentTypeErr)
+		}
+		// Verify the HTTP response is a 200.
+		se := status.New(code, strings.Join(errs, "; "))
+		t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+		return
+	}
+
+	if headerError != "" {
+		se := status.New(codes.Internal, headerError)
+		t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+		return
+	}
+
+	// For headers, set them in s.header and close headerChan.  For trailers or
+	// trailers-only, closeStream will set the trailers and close headerChan as
+	// needed.
+	if !endStream {
+		// If headerChan hasn't been closed yet (expected, given we checked it
+		// above, but something else could have potentially closed the whole
+		// stream).
+		if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
+			s.headerValid = true
+			// These values can be set without any synchronization because
+			// stream goroutine will read it only after seeing a closed
+			// headerChan which we'll close after setting this.
+			s.recvCompress = recvCompress
+			if len(mdata) > 0 {
+				s.header = mdata
+			}
+			close(s.headerChan)
+		}
+	}
+
+	for _, sh := range t.statsHandlers {
+		if !endStream {
+			inHeader := &stats.InHeader{
+				Client:      true,
+				WireLength:  int(frame.Header().Length),
+				Header:      metadata.MD(mdata).Copy(),
+				Compression: s.recvCompress,
+			}
+			sh.HandleRPC(s.ctx, inHeader)
+		} else {
+			inTrailer := &stats.InTrailer{
+				Client:     true,
+				WireLength: int(frame.Header().Length),
+				Trailer:    metadata.MD(mdata).Copy(),
+			}
+			sh.HandleRPC(s.ctx, inTrailer)
+		}
+	}
+
+	if !endStream {
+		return
+	}
+
+	status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader])
+
+	// If client received END_STREAM from server while stream was still active,
+	// send RST_STREAM.
+	rstStream := s.getState() == streamActive
+	t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true)
+}
+
+// readServerPreface reads and handles the initial settings frame from the
+// server.
+func (t *http2Client) readServerPreface() error {
+	frame, err := t.framer.fr.ReadFrame()
+	if err != nil {
+		return connectionErrorf(true, err, "error reading server preface: %v", err)
+	}
+	sf, ok := frame.(*http2.SettingsFrame)
+	if !ok {
+		return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)
+	}
+	t.handleSettings(sf, true)
+	return nil
+}
+
+// reader verifies the server preface and reads all subsequent data from
+// network connection.  If the server preface is not read successfully, an
+// error is pushed to errCh; otherwise errCh is closed with no error.
+func (t *http2Client) reader(errCh chan<- error) {
+	defer close(t.readerDone)
+
+	if err := t.readServerPreface(); err != nil {
+		errCh <- err
+		return
+	}
+	close(errCh)
+	if t.keepaliveEnabled {
+		atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
+	}
+
+	// loop to keep reading incoming messages on this transport.
+	for {
+		t.controlBuf.throttle()
+		frame, err := t.framer.fr.ReadFrame()
+		if t.keepaliveEnabled {
+			atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
+		}
+		if err != nil {
+			// Abort an active stream if the http2.Framer returns a
+			// http2.StreamError. This can happen only if the server's response
+			// is malformed http2.
+			if se, ok := err.(http2.StreamError); ok {
+				t.mu.Lock()
+				s := t.activeStreams[se.StreamID]
+				t.mu.Unlock()
+				if s != nil {
+					// use error detail to provide better err message
+					code := http2ErrConvTab[se.Code]
+					errorDetail := t.framer.fr.ErrorDetail()
+					var msg string
+					if errorDetail != nil {
+						msg = errorDetail.Error()
+					} else {
+						msg = "received invalid frame"
+					}
+					t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
+				}
+				continue
+			}
+			// Transport error.
+			t.Close(connectionErrorf(true, err, "error reading from server: %v", err))
+			return
+		}
+		switch frame := frame.(type) {
+		case *http2.MetaHeadersFrame:
+			t.operateHeaders(frame)
+		case *http2.DataFrame:
+			t.handleData(frame)
+		case *http2.RSTStreamFrame:
+			t.handleRSTStream(frame)
+		case *http2.SettingsFrame:
+			t.handleSettings(frame, false)
+		case *http2.PingFrame:
+			t.handlePing(frame)
+		case *http2.GoAwayFrame:
+			t.handleGoAway(frame)
+		case *http2.WindowUpdateFrame:
+			t.handleWindowUpdate(frame)
+		default:
+			if logger.V(logLevel) {
+				logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
+			}
+		}
+	}
+}
+
+// keepalive running in a separate goroutine makes sure the connection is alive by sending pings.
+func (t *http2Client) keepalive() {
+	p := &ping{data: [8]byte{}}
+	// True iff a ping has been sent, and no data has been received since then.
+	outstandingPing := false
+	// Amount of time remaining before which we should receive an ACK for the
+	// last sent ping.
+	timeoutLeft := time.Duration(0)
+	// Records the last value of t.lastRead before we go block on the timer.
+	// This is required to check for read activity since then.
+	prevNano := time.Now().UnixNano()
+	timer := time.NewTimer(t.kp.Time)
+	for {
+		select {
+		case <-timer.C:
+			lastRead := atomic.LoadInt64(&t.lastRead)
+			if lastRead > prevNano {
+				// There has been read activity since the last time we were here.
+				outstandingPing = false
+				// Next timer should fire at kp.Time seconds from lastRead time.
+				timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
+				prevNano = lastRead
+				continue
+			}
+			if outstandingPing && timeoutLeft <= 0 {
+				t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout"))
+				return
+			}
+			t.mu.Lock()
+			if t.state == closing {
+				// If the transport is closing, we should exit from the
+				// keepalive goroutine here. If not, we could have a race
+				// between the call to Signal() from Close() and the call to
+				// Wait() here, whereby the keepalive goroutine ends up
+				// blocking on the condition variable which will never be
+				// signalled again.
+				t.mu.Unlock()
+				return
+			}
+			if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream {
+				// If a ping was sent out previously (because there were active
+				// streams at that point) which wasn't acked and its timeout
+				// hadn't fired, but we got here and are about to go dormant,
+				// we should make sure that we unconditionally send a ping once
+				// we awaken.
+				outstandingPing = false
+				t.kpDormant = true
+				t.kpDormancyCond.Wait()
+			}
+			t.kpDormant = false
+			t.mu.Unlock()
+
+			// We get here either because we were dormant and a new stream was
+			// created which unblocked the Wait() call, or because the
+			// keepalive timer expired. In both cases, we need to send a ping.
+			if !outstandingPing {
+				if channelz.IsOn() {
+					t.channelz.SocketMetrics.KeepAlivesSent.Add(1)
+				}
+				t.controlBuf.put(p)
+				timeoutLeft = t.kp.Timeout
+				outstandingPing = true
+			}
+			// The amount of time to sleep here is the minimum of kp.Time and
+			// timeoutLeft. This will ensure that we wait only for kp.Time
+			// before sending out the next ping (for cases where the ping is
+			// acked).
+			sleepDuration := min(t.kp.Time, timeoutLeft)
+			timeoutLeft -= sleepDuration
+			timer.Reset(sleepDuration)
+		case <-t.ctx.Done():
+			if !timer.Stop() {
+				<-timer.C
+			}
+			return
+		}
+	}
+}
+
+func (t *http2Client) Error() <-chan struct{} {
+	return t.ctx.Done()
+}
+
+func (t *http2Client) GoAway() <-chan struct{} {
+	return t.goAway
+}
+
+func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics {
+	return &channelz.EphemeralSocketMetrics{
+		LocalFlowControlWindow:  int64(t.fc.getSize()),
+		RemoteFlowControlWindow: t.getOutFlowWindow(),
+	}
+}
+
+func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr }
+
+func (t *http2Client) IncrMsgSent() {
+	t.channelz.SocketMetrics.MessagesSent.Add(1)
+	t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano())
+}
+
+func (t *http2Client) IncrMsgRecv() {
+	t.channelz.SocketMetrics.MessagesReceived.Add(1)
+	t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano())
+}
+
+func (t *http2Client) getOutFlowWindow() int64 {
+	resp := make(chan uint32, 1)
+	timer := time.NewTimer(time.Second)
+	defer timer.Stop()
+	t.controlBuf.put(&outFlowControlSizeRequest{resp})
+	select {
+	case sz := <-resp:
+		return int64(sz)
+	case <-t.ctxDone:
+		return -1
+	case <-timer.C:
+		return -2
+	}
+}
+
+func (t *http2Client) stateForTesting() transportState {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	return t.state
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/hack/tools/vendor/google.golang.org/grpc/internal/transport/http2_server.go
new file mode 100644
index 0000000000..584b50fe55
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -0,0 +1,1475 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"math/rand"
+	"net"
+	"net/http"
+	"strconv"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"golang.org/x/net/http2"
+	"golang.org/x/net/http2/hpack"
+	"google.golang.org/grpc/internal/grpclog"
+	"google.golang.org/grpc/internal/grpcutil"
+	"google.golang.org/grpc/internal/pretty"
+	"google.golang.org/grpc/internal/syscall"
+	"google.golang.org/grpc/mem"
+	"google.golang.org/protobuf/proto"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcsync"
+	"google.golang.org/grpc/keepalive"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/peer"
+	"google.golang.org/grpc/stats"
+	"google.golang.org/grpc/status"
+	"google.golang.org/grpc/tap"
+)
+
+var (
+	// ErrIllegalHeaderWrite indicates that setting header is illegal because of
+	// the stream's state.
+	ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times")
+	// ErrHeaderListSizeLimitViolation indicates that the header list size is larger
+	// than the limit set by peer.
+	ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer")
+)
+
+// serverConnectionCounter counts the number of connections a server has seen
+// (equal to the number of http2Servers created). Must be accessed atomically.
+var serverConnectionCounter uint64
+
+// http2Server implements the ServerTransport interface with HTTP2.
+type http2Server struct {
+	lastRead        int64 // Keep this field 64-bit aligned. Accessed atomically.
+	done            chan struct{}
+	conn            net.Conn
+	loopy           *loopyWriter
+	readerDone      chan struct{} // sync point to enable testing.
+	loopyWriterDone chan struct{}
+	peer            peer.Peer
+	inTapHandle     tap.ServerInHandle
+	framer          *framer
+	// The max number of concurrent streams.
+	maxStreams uint32
+	// controlBuf delivers all the control related tasks (e.g., window
+	// updates, reset streams, and various settings) to the controller.
+	controlBuf *controlBuffer
+	fc         *trInFlow
+	stats      []stats.Handler
+	// Keepalive and max-age parameters for the server.
+	kp keepalive.ServerParameters
+	// Keepalive enforcement policy.
+	kep keepalive.EnforcementPolicy
+	// The time instance last ping was received.
+	lastPingAt time.Time
+	// Number of times the client has violated keepalive ping policy so far.
+	pingStrikes uint8
+	// Flag to signify that number of ping strikes should be reset to 0.
+	// This is set whenever data or header frames are sent.
+	// 1 means yes.
+	resetPingStrikes      uint32 // Accessed atomically.
+	initialWindowSize     int32
+	bdpEst                *bdpEstimator
+	maxSendHeaderListSize *uint32
+
+	mu sync.Mutex // guard the following
+
+	// drainEvent is initialized when Drain() is called the first time. After
+	// which the server writes out the first GoAway(with ID 2^31-1) frame. Then
+	// an independent goroutine will be launched to later send the second
+	// GoAway. During this time we don't want to write another first GoAway(with
+	// ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is
+	// already initialized since draining is already underway.
+	drainEvent    *grpcsync.Event
+	state         transportState
+	activeStreams map[uint32]*Stream
+	// idle is the time instant when the connection went idle.
+	// This is either the beginning of the connection or when the number of
+	// RPCs go down to 0.
+	// When the connection is busy, this value is set to 0.
+	idle time.Time
+
+	// Fields below are for channelz metric collection.
+	channelz   *channelz.Socket
+	bufferPool mem.BufferPool
+
+	connectionID uint64
+
+	// maxStreamMu guards the maximum stream ID
+	// This lock may not be taken if mu is already held.
+	maxStreamMu sync.Mutex
+	maxStreamID uint32 // max stream ID ever seen
+
+	logger *grpclog.PrefixLogger
+}
+
+// NewServerTransport creates a http2 transport with conn and configuration
+// options from config.
+//
+// It returns a non-nil transport and a nil error on success. On failure, it
+// returns a nil transport and a non-nil error. For a special case where the
+// underlying conn gets closed before the client preface could be read, it
+// returns a nil transport and a nil error.
+func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
+	var authInfo credentials.AuthInfo
+	rawConn := conn
+	if config.Credentials != nil {
+		var err error
+		conn, authInfo, err = config.Credentials.ServerHandshake(rawConn)
+		if err != nil {
+			// ErrConnDispatched means that the connection was dispatched away
+			// from gRPC; those connections should be left open. io.EOF means
+			// the connection was closed before handshaking completed, which can
+			// happen naturally from probers. Return these errors directly.
+			if err == credentials.ErrConnDispatched || err == io.EOF {
+				return nil, err
+			}
+			return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
+		}
+	}
+	writeBufSize := config.WriteBufferSize
+	readBufSize := config.ReadBufferSize
+	maxHeaderListSize := defaultServerMaxHeaderListSize
+	if config.MaxHeaderListSize != nil {
+		maxHeaderListSize = *config.MaxHeaderListSize
+	}
+	framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize)
+	// Send initial settings as connection preface to client.
+	isettings := []http2.Setting{{
+		ID:  http2.SettingMaxFrameSize,
+		Val: http2MaxFrameLen,
+	}}
+	if config.MaxStreams != math.MaxUint32 {
+		isettings = append(isettings, http2.Setting{
+			ID:  http2.SettingMaxConcurrentStreams,
+			Val: config.MaxStreams,
+		})
+	}
+	dynamicWindow := true
+	iwz := int32(initialWindowSize)
+	if config.InitialWindowSize >= defaultWindowSize {
+		iwz = config.InitialWindowSize
+		dynamicWindow = false
+	}
+	icwz := int32(initialWindowSize)
+	if config.InitialConnWindowSize >= defaultWindowSize {
+		icwz = config.InitialConnWindowSize
+		dynamicWindow = false
+	}
+	if iwz != defaultWindowSize {
+		isettings = append(isettings, http2.Setting{
+			ID:  http2.SettingInitialWindowSize,
+			Val: uint32(iwz)})
+	}
+	if config.MaxHeaderListSize != nil {
+		isettings = append(isettings, http2.Setting{
+			ID:  http2.SettingMaxHeaderListSize,
+			Val: *config.MaxHeaderListSize,
+		})
+	}
+	if config.HeaderTableSize != nil {
+		isettings = append(isettings, http2.Setting{
+			ID:  http2.SettingHeaderTableSize,
+			Val: *config.HeaderTableSize,
+		})
+	}
+	if err := framer.fr.WriteSettings(isettings...); err != nil {
+		return nil, connectionErrorf(false, err, "transport: %v", err)
+	}
+	// Adjust the connection flow control window if needed.
+	if delta := uint32(icwz - defaultWindowSize); delta > 0 {
+		if err := framer.fr.WriteWindowUpdate(0, delta); err != nil {
+			return nil, connectionErrorf(false, err, "transport: %v", err)
+		}
+	}
+	kp := config.KeepaliveParams
+	if kp.MaxConnectionIdle == 0 {
+		kp.MaxConnectionIdle = defaultMaxConnectionIdle
+	}
+	if kp.MaxConnectionAge == 0 {
+		kp.MaxConnectionAge = defaultMaxConnectionAge
+	}
+	// Add a jitter to MaxConnectionAge.
+	kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge)
+	if kp.MaxConnectionAgeGrace == 0 {
+		kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace
+	}
+	if kp.Time == 0 {
+		kp.Time = defaultServerKeepaliveTime
+	}
+	if kp.Timeout == 0 {
+		kp.Timeout = defaultServerKeepaliveTimeout
+	}
+	if kp.Time != infinity {
+		if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil {
+			return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
+		}
+	}
+	kep := config.KeepalivePolicy
+	if kep.MinTime == 0 {
+		kep.MinTime = defaultKeepalivePolicyMinTime
+	}
+
+	done := make(chan struct{})
+	peer := peer.Peer{
+		Addr:      conn.RemoteAddr(),
+		LocalAddr: conn.LocalAddr(),
+		AuthInfo:  authInfo,
+	}
+	t := &http2Server{
+		done:              done,
+		conn:              conn,
+		peer:              peer,
+		framer:            framer,
+		readerDone:        make(chan struct{}),
+		loopyWriterDone:   make(chan struct{}),
+		maxStreams:        config.MaxStreams,
+		inTapHandle:       config.InTapHandle,
+		fc:                &trInFlow{limit: uint32(icwz)},
+		state:             reachable,
+		activeStreams:     make(map[uint32]*Stream),
+		stats:             config.StatsHandlers,
+		kp:                kp,
+		idle:              time.Now(),
+		kep:               kep,
+		initialWindowSize: iwz,
+		bufferPool:        config.BufferPool,
+	}
+	var czSecurity credentials.ChannelzSecurityValue
+	if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
+		czSecurity = au.GetSecurityValue()
+	}
+	t.channelz = channelz.RegisterSocket(
+		&channelz.Socket{
+			SocketType:       channelz.SocketTypeNormal,
+			Parent:           config.ChannelzParent,
+			SocketMetrics:    channelz.SocketMetrics{},
+			EphemeralMetrics: t.socketMetrics,
+			LocalAddr:        t.peer.LocalAddr,
+			RemoteAddr:       t.peer.Addr,
+			SocketOptions:    channelz.GetSocketOption(t.conn),
+			Security:         czSecurity,
+		},
+	)
+	t.logger = prefixLoggerForServerTransport(t)
+
+	t.controlBuf = newControlBuffer(t.done)
+	if dynamicWindow {
+		t.bdpEst = &bdpEstimator{
+			bdp:               initialWindowSize,
+			updateFlowControl: t.updateFlowControl,
+		}
+	}
+
+	t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
+	t.framer.writer.Flush()
+
+	defer func() {
+		if err != nil {
+			t.Close(err)
+		}
+	}()
+
+	// Check the validity of client preface.
+	preface := make([]byte, len(clientPreface))
+	if _, err := io.ReadFull(t.conn, preface); err != nil {
+		// In deployments where a gRPC server runs behind a cloud load balancer
+		// which performs regular TCP level health checks, the connection is
+		// closed immediately by the latter.  Returning io.EOF here allows the
+		// grpc server implementation to recognize this scenario and suppress
+		// logging to reduce spam.
+		if err == io.EOF {
+			return nil, io.EOF
+		}
+		return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
+	}
+	if !bytes.Equal(preface, clientPreface) {
+		return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface)
+	}
+
+	frame, err := t.framer.fr.ReadFrame()
+	if err == io.EOF || err == io.ErrUnexpectedEOF {
+		return nil, err
+	}
+	if err != nil {
+		return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
+	}
+	atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
+	sf, ok := frame.(*http2.SettingsFrame)
+	if !ok {
+		return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
+	}
+	t.handleSettings(sf)
+
+	go func() {
+		t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
+		err := t.loopy.run()
+		close(t.loopyWriterDone)
+		if !isIOError(err) {
+			// Close the connection if a non-I/O error occurs (for I/O errors
+			// the reader will also encounter the error and close).  Wait 1
+			// second before closing the connection, or when the reader is done
+			// (i.e. the client already closed the connection or a connection
+			// error occurred).  This avoids the potential problem where there
+			// is unread data on the receive side of the connection, which, if
+			// closed, would lead to a TCP RST instead of FIN, and the client
+			// encountering errors.  For more info:
+			// https://github.com/grpc/grpc-go/issues/5358
+			timer := time.NewTimer(time.Second)
+			defer timer.Stop()
+			select {
+			case <-t.readerDone:
+			case <-timer.C:
+			}
+			t.conn.Close()
+		}
+	}()
+	go t.keepalive()
+	return t, nil
+}
+
+// operateHeaders takes action on the decoded headers. Returns an error if fatal
+// error encountered and transport needs to close, otherwise returns nil.
+func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error {
+	// Acquire max stream ID lock for entire duration
+	t.maxStreamMu.Lock()
+	defer t.maxStreamMu.Unlock()
+
+	streamID := frame.Header().StreamID
+
+	// frame.Truncated is set to true when framer detects that the current header
+	// list size hits MaxHeaderListSize limit.
+	if frame.Truncated {
+		t.controlBuf.put(&cleanupStream{
+			streamID: streamID,
+			rst:      true,
+			rstCode:  http2.ErrCodeFrameSize,
+			onWrite:  func() {},
+		})
+		return nil
+	}
+
+	if streamID%2 != 1 || streamID <= t.maxStreamID {
+		// illegal gRPC stream id.
+		return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame)
+	}
+	t.maxStreamID = streamID
+
+	buf := newRecvBuffer()
+	s := &Stream{
+		id:               streamID,
+		st:               t,
+		buf:              buf,
+		fc:               &inFlow{limit: uint32(t.initialWindowSize)},
+		headerWireLength: int(frame.Header().Length),
+	}
+	var (
+		// if false, content-type was missing or invalid
+		isGRPC      = false
+		contentType = ""
+		mdata       = make(metadata.MD, len(frame.Fields))
+		httpMethod  string
+		// these are set if an error is encountered while parsing the headers
+		protocolError bool
+		headerError   *status.Status
+
+		timeoutSet bool
+		timeout    time.Duration
+	)
+
+	for _, hf := range frame.Fields {
+		switch hf.Name {
+		case "content-type":
+			contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value)
+			if !validContentType {
+				contentType = hf.Value
+				break
+			}
+			mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
+			s.contentSubtype = contentSubtype
+			isGRPC = true
+
+		case "grpc-accept-encoding":
+			mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
+			if hf.Value == "" {
+				continue
+			}
+			compressors := hf.Value
+			if s.clientAdvertisedCompressors != "" {
+				compressors = s.clientAdvertisedCompressors + "," + compressors
+			}
+			s.clientAdvertisedCompressors = compressors
+		case "grpc-encoding":
+			s.recvCompress = hf.Value
+		case ":method":
+			httpMethod = hf.Value
+		case ":path":
+			s.method = hf.Value
+		case "grpc-timeout":
+			timeoutSet = true
+			var err error
+			if timeout, err = decodeTimeout(hf.Value); err != nil {
+				headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err)
+			}
+		// "Transports must consider requests containing the Connection header
+		// as malformed." - A41
+		case "connection":
+			if t.logger.V(logLevel) {
+				t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec")
+			}
+			protocolError = true
+		default:
+			if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
+				break
+			}
+			v, err := decodeMetadataHeader(hf.Name, hf.Value)
+			if err != nil {
+				headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err)
+				t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
+				break
+			}
+			mdata[hf.Name] = append(mdata[hf.Name], v)
+		}
+	}
+
+	// "If multiple Host headers or multiple :authority headers are present, the
+	// request must be rejected with an HTTP status code 400 as required by Host
+	// validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM
+	// with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2
+	// error, this takes precedence over a client not speaking gRPC.
+	if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 {
+		errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"]))
+		if t.logger.V(logLevel) {
+			t.logger.Infof("Aborting the stream early: %v", errMsg)
+		}
+		t.controlBuf.put(&earlyAbortStream{
+			httpStatus:     http.StatusBadRequest,
+			streamID:       streamID,
+			contentSubtype: s.contentSubtype,
+			status:         status.New(codes.Internal, errMsg),
+			rst:            !frame.StreamEnded(),
+		})
+		return nil
+	}
+
+	if protocolError {
+		t.controlBuf.put(&cleanupStream{
+			streamID: streamID,
+			rst:      true,
+			rstCode:  http2.ErrCodeProtocol,
+			onWrite:  func() {},
+		})
+		return nil
+	}
+	if !isGRPC {
+		t.controlBuf.put(&earlyAbortStream{
+			httpStatus:     http.StatusUnsupportedMediaType,
+			streamID:       streamID,
+			contentSubtype: s.contentSubtype,
+			status:         status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType),
+			rst:            !frame.StreamEnded(),
+		})
+		return nil
+	}
+	if headerError != nil {
+		t.controlBuf.put(&earlyAbortStream{
+			httpStatus:     http.StatusBadRequest,
+			streamID:       streamID,
+			contentSubtype: s.contentSubtype,
+			status:         headerError,
+			rst:            !frame.StreamEnded(),
+		})
+		return nil
+	}
+
+	// "If :authority is missing, Host must be renamed to :authority." - A41
+	if len(mdata[":authority"]) == 0 {
+		// No-op if host isn't present, no eventual :authority header is a valid
+		// RPC.
+		if host, ok := mdata["host"]; ok {
+			mdata[":authority"] = host
+			delete(mdata, "host")
+		}
+	} else {
+		// "If :authority is present, Host must be discarded" - A41
+		delete(mdata, "host")
+	}
+
+	if frame.StreamEnded() {
+		// s is just created by the caller. No lock needed.
+		s.state = streamReadDone
+	}
+	if timeoutSet {
+		s.ctx, s.cancel = context.WithTimeout(ctx, timeout)
+	} else {
+		s.ctx, s.cancel = context.WithCancel(ctx)
+	}
+
+	// Attach the received metadata to the context.
+	if len(mdata) > 0 {
+		s.ctx = metadata.NewIncomingContext(s.ctx, mdata)
+		if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 {
+			s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1]))
+		}
+		if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 {
+			s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1]))
+		}
+	}
+	t.mu.Lock()
+	if t.state != reachable {
+		t.mu.Unlock()
+		s.cancel()
+		return nil
+	}
+	if uint32(len(t.activeStreams)) >= t.maxStreams {
+		t.mu.Unlock()
+		t.controlBuf.put(&cleanupStream{
+			streamID: streamID,
+			rst:      true,
+			rstCode:  http2.ErrCodeRefusedStream,
+			onWrite:  func() {},
+		})
+		s.cancel()
+		return nil
+	}
+	if httpMethod != http.MethodPost {
+		t.mu.Unlock()
+		errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod)
+		if t.logger.V(logLevel) {
+			t.logger.Infof("Aborting the stream early: %v", errMsg)
+		}
+		t.controlBuf.put(&earlyAbortStream{
+			httpStatus:     405,
+			streamID:       streamID,
+			contentSubtype: s.contentSubtype,
+			status:         status.New(codes.Internal, errMsg),
+			rst:            !frame.StreamEnded(),
+		})
+		s.cancel()
+		return nil
+	}
+	if t.inTapHandle != nil {
+		var err error
+		if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil {
+			t.mu.Unlock()
+			if t.logger.V(logLevel) {
+				t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err)
+			}
+			stat, ok := status.FromError(err)
+			if !ok {
+				stat = status.New(codes.PermissionDenied, err.Error())
+			}
+			t.controlBuf.put(&earlyAbortStream{
+				httpStatus:     200,
+				streamID:       s.id,
+				contentSubtype: s.contentSubtype,
+				status:         stat,
+				rst:            !frame.StreamEnded(),
+			})
+			return nil
+		}
+	}
+	t.activeStreams[streamID] = s
+	if len(t.activeStreams) == 1 {
+		t.idle = time.Time{}
+	}
+	t.mu.Unlock()
+	if channelz.IsOn() {
+		t.channelz.SocketMetrics.StreamsStarted.Add(1)
+		t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano())
+	}
+	s.requestRead = func(n int) {
+		t.adjustWindow(s, uint32(n))
+	}
+	s.ctxDone = s.ctx.Done()
+	s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
+	s.trReader = &transportReader{
+		reader: &recvBufferReader{
+			ctx:     s.ctx,
+			ctxDone: s.ctxDone,
+			recv:    s.buf,
+		},
+		windowHandler: func(n int) {
+			t.updateWindow(s, uint32(n))
+		},
+	}
+	// Register the stream with loopy.
+	t.controlBuf.put(®isterStream{
+		streamID: s.id,
+		wq:       s.wq,
+	})
+	handle(s)
+	return nil
+}
+
+// HandleStreams receives incoming streams using the given handler. This is
+// typically run in a separate goroutine.
+// traceCtx attaches trace to ctx and returns the new context.
+func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) {
+	defer func() {
+		close(t.readerDone)
+		<-t.loopyWriterDone
+	}()
+	for {
+		t.controlBuf.throttle()
+		frame, err := t.framer.fr.ReadFrame()
+		atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
+		if err != nil {
+			if se, ok := err.(http2.StreamError); ok {
+				if t.logger.V(logLevel) {
+					t.logger.Warningf("Encountered http2.StreamError: %v", se)
+				}
+				t.mu.Lock()
+				s := t.activeStreams[se.StreamID]
+				t.mu.Unlock()
+				if s != nil {
+					t.closeStream(s, true, se.Code, false)
+				} else {
+					t.controlBuf.put(&cleanupStream{
+						streamID: se.StreamID,
+						rst:      true,
+						rstCode:  se.Code,
+						onWrite:  func() {},
+					})
+				}
+				continue
+			}
+			t.Close(err)
+			return
+		}
+		switch frame := frame.(type) {
+		case *http2.MetaHeadersFrame:
+			if err := t.operateHeaders(ctx, frame, handle); err != nil {
+				// Any error processing client headers, e.g. invalid stream ID,
+				// is considered a protocol violation.
+				t.controlBuf.put(&goAway{
+					code:      http2.ErrCodeProtocol,
+					debugData: []byte(err.Error()),
+					closeConn: err,
+				})
+				continue
+			}
+		case *http2.DataFrame:
+			t.handleData(frame)
+		case *http2.RSTStreamFrame:
+			t.handleRSTStream(frame)
+		case *http2.SettingsFrame:
+			t.handleSettings(frame)
+		case *http2.PingFrame:
+			t.handlePing(frame)
+		case *http2.WindowUpdateFrame:
+			t.handleWindowUpdate(frame)
+		case *http2.GoAwayFrame:
+			// TODO: Handle GoAway from the client appropriately.
+		default:
+			if t.logger.V(logLevel) {
+				t.logger.Infof("Received unsupported frame type %T", frame)
+			}
+		}
+	}
+}
+
+func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	if t.activeStreams == nil {
+		// The transport is closing.
+		return nil, false
+	}
+	s, ok := t.activeStreams[f.Header().StreamID]
+	if !ok {
+		// The stream is already done.
+		return nil, false
+	}
+	return s, true
+}
+
+// adjustWindow sends out extra window update over the initial window size
+// of stream if the application is requesting data larger in size than
+// the window.
+func (t *http2Server) adjustWindow(s *Stream, n uint32) {
+	if w := s.fc.maybeAdjust(n); w > 0 {
+		t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
+	}
+
+}
+
+// updateWindow adjusts the inbound quota for the stream and the transport.
+// Window updates will deliver to the controller for sending when
+// the cumulative quota exceeds the corresponding threshold.
+func (t *http2Server) updateWindow(s *Stream, n uint32) {
+	if w := s.fc.onRead(n); w > 0 {
+		t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id,
+			increment: w,
+		})
+	}
+}
+
+// updateFlowControl updates the incoming flow control windows
+// for the transport and the stream based on the current bdp
+// estimation.
+func (t *http2Server) updateFlowControl(n uint32) {
+	t.mu.Lock()
+	for _, s := range t.activeStreams {
+		s.fc.newLimit(n)
+	}
+	t.initialWindowSize = int32(n)
+	t.mu.Unlock()
+	t.controlBuf.put(&outgoingWindowUpdate{
+		streamID:  0,
+		increment: t.fc.newLimit(n),
+	})
+	t.controlBuf.put(&outgoingSettings{
+		ss: []http2.Setting{
+			{
+				ID:  http2.SettingInitialWindowSize,
+				Val: n,
+			},
+		},
+	})
+
+}
+
+func (t *http2Server) handleData(f *http2.DataFrame) {
+	size := f.Header().Length
+	var sendBDPPing bool
+	if t.bdpEst != nil {
+		sendBDPPing = t.bdpEst.add(size)
+	}
+	// Decouple connection's flow control from application's read.
+	// An update on connection's flow control should not depend on
+	// whether user application has read the data or not. Such a
+	// restriction is already imposed on the stream's flow control,
+	// and therefore the sender will be blocked anyways.
+	// Decoupling the connection flow control will prevent other
+	// active(fast) streams from starving in presence of slow or
+	// inactive streams.
+	if w := t.fc.onData(size); w > 0 {
+		t.controlBuf.put(&outgoingWindowUpdate{
+			streamID:  0,
+			increment: w,
+		})
+	}
+	if sendBDPPing {
+		// Avoid excessive ping detection (e.g. in an L7 proxy)
+		// by sending a window update prior to the BDP ping.
+		if w := t.fc.reset(); w > 0 {
+			t.controlBuf.put(&outgoingWindowUpdate{
+				streamID:  0,
+				increment: w,
+			})
+		}
+		t.controlBuf.put(bdpPing)
+	}
+	// Select the right stream to dispatch.
+	s, ok := t.getStream(f)
+	if !ok {
+		return
+	}
+	if s.getState() == streamReadDone {
+		t.closeStream(s, true, http2.ErrCodeStreamClosed, false)
+		return
+	}
+	if size > 0 {
+		if err := s.fc.onData(size); err != nil {
+			t.closeStream(s, true, http2.ErrCodeFlowControl, false)
+			return
+		}
+		if f.Header().Flags.Has(http2.FlagDataPadded) {
+			if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
+				t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
+			}
+		}
+		// TODO(bradfitz, zhaoq): A copy is required here because there is no
+		// guarantee f.Data() is consumed before the arrival of next frame.
+		// Can this copy be eliminated?
+		if len(f.Data()) > 0 {
+			pool := t.bufferPool
+			if pool == nil {
+				// Note that this is only supposed to be nil in tests. Otherwise, stream is
+				// always initialized with a BufferPool.
+				pool = mem.DefaultBufferPool()
+			}
+			s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
+		}
+	}
+	if f.StreamEnded() {
+		// Received the end of stream from the client.
+		s.compareAndSwapState(streamActive, streamReadDone)
+		s.write(recvMsg{err: io.EOF})
+	}
+}
+
+func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {
+	// If the stream is not deleted from the transport's active streams map, then do a regular close stream.
+	if s, ok := t.getStream(f); ok {
+		t.closeStream(s, false, 0, false)
+		return
+	}
+	// If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map.
+	t.controlBuf.put(&cleanupStream{
+		streamID: f.Header().StreamID,
+		rst:      false,
+		rstCode:  0,
+		onWrite:  func() {},
+	})
+}
+
+func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
+	if f.IsAck() {
+		return
+	}
+	var ss []http2.Setting
+	var updateFuncs []func()
+	f.ForeachSetting(func(s http2.Setting) error {
+		switch s.ID {
+		case http2.SettingMaxHeaderListSize:
+			updateFuncs = append(updateFuncs, func() {
+				t.maxSendHeaderListSize = new(uint32)
+				*t.maxSendHeaderListSize = s.Val
+			})
+		default:
+			ss = append(ss, s)
+		}
+		return nil
+	})
+	t.controlBuf.executeAndPut(func() bool {
+		for _, f := range updateFuncs {
+			f()
+		}
+		return true
+	}, &incomingSettings{
+		ss: ss,
+	})
+}
+
+const (
+	maxPingStrikes     = 2
+	defaultPingTimeout = 2 * time.Hour
+)
+
+func (t *http2Server) handlePing(f *http2.PingFrame) {
+	if f.IsAck() {
+		if f.Data == goAwayPing.data && t.drainEvent != nil {
+			t.drainEvent.Fire()
+			return
+		}
+		// Maybe it's a BDP ping.
+		if t.bdpEst != nil {
+			t.bdpEst.calculate(f.Data)
+		}
+		return
+	}
+	pingAck := &ping{ack: true}
+	copy(pingAck.data[:], f.Data[:])
+	t.controlBuf.put(pingAck)
+
+	now := time.Now()
+	defer func() {
+		t.lastPingAt = now
+	}()
+	// A reset ping strikes means that we don't need to check for policy
+	// violation for this ping and the pingStrikes counter should be set
+	// to 0.
+	if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) {
+		t.pingStrikes = 0
+		return
+	}
+	t.mu.Lock()
+	ns := len(t.activeStreams)
+	t.mu.Unlock()
+	if ns < 1 && !t.kep.PermitWithoutStream {
+		// Keepalive shouldn't be active thus, this new ping should
+		// have come after at least defaultPingTimeout.
+		if t.lastPingAt.Add(defaultPingTimeout).After(now) {
+			t.pingStrikes++
+		}
+	} else {
+		// Check if keepalive policy is respected.
+		if t.lastPingAt.Add(t.kep.MinTime).After(now) {
+			t.pingStrikes++
+		}
+	}
+
+	if t.pingStrikes > maxPingStrikes {
+		// Send goaway and close the connection.
+		t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")})
+	}
+}
+
+func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
+	t.controlBuf.put(&incomingWindowUpdate{
+		streamID:  f.Header().StreamID,
+		increment: f.Increment,
+	})
+}
+
+func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField {
+	for k, vv := range md {
+		if isReservedHeader(k) {
+			// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
+			continue
+		}
+		for _, v := range vv {
+			headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
+		}
+	}
+	return headerFields
+}
+
+func (t *http2Server) checkForHeaderListSize(it any) bool {
+	if t.maxSendHeaderListSize == nil {
+		return true
+	}
+	hdrFrame := it.(*headerFrame)
+	var sz int64
+	for _, f := range hdrFrame.hf {
+		if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
+			if t.logger.V(logLevel) {
+				t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
+			}
+			return false
+		}
+	}
+	return true
+}
+
+func (t *http2Server) streamContextErr(s *Stream) error {
+	select {
+	case <-t.done:
+		return ErrConnClosing
+	default:
+	}
+	return ContextErr(s.ctx.Err())
+}
+
+// WriteHeader sends the header metadata md back to the client.
+func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
+	s.hdrMu.Lock()
+	defer s.hdrMu.Unlock()
+	if s.getState() == streamDone {
+		return t.streamContextErr(s)
+	}
+
+	if s.updateHeaderSent() {
+		return ErrIllegalHeaderWrite
+	}
+
+	if md.Len() > 0 {
+		if s.header.Len() > 0 {
+			s.header = metadata.Join(s.header, md)
+		} else {
+			s.header = md
+		}
+	}
+	if err := t.writeHeaderLocked(s); err != nil {
+		switch e := err.(type) {
+		case ConnectionError:
+			return status.Error(codes.Unavailable, e.Desc)
+		default:
+			return status.Convert(err).Err()
+		}
+	}
+	return nil
+}
+
+func (t *http2Server) setResetPingStrikes() {
+	atomic.StoreUint32(&t.resetPingStrikes, 1)
+}
+
+func (t *http2Server) writeHeaderLocked(s *Stream) error {
+	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
+	// first and create a slice of that exact size.
+	headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
+	headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
+	headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
+	if s.sendCompress != "" {
+		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
+	}
+	headerFields = appendHeaderFieldsFromMD(headerFields, s.header)
+	hf := &headerFrame{
+		streamID:  s.id,
+		hf:        headerFields,
+		endStream: false,
+		onWrite:   t.setResetPingStrikes,
+	}
+	success, err := t.controlBuf.executeAndPut(func() bool { return t.checkForHeaderListSize(hf) }, hf)
+	if !success {
+		if err != nil {
+			return err
+		}
+		t.closeStream(s, true, http2.ErrCodeInternal, false)
+		return ErrHeaderListSizeLimitViolation
+	}
+	for _, sh := range t.stats {
+		// Note: Headers are compressed with hpack after this call returns.
+		// No WireLength field is set here.
+		outHeader := &stats.OutHeader{
+			Header:      s.header.Copy(),
+			Compression: s.sendCompress,
+		}
+		sh.HandleRPC(s.Context(), outHeader)
+	}
+	return nil
+}
+
+// WriteStatus sends stream status to the client and terminates the stream.
+// There is no further I/O operations being able to perform on this stream.
+// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
+// OK is adopted.
+func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
+	s.hdrMu.Lock()
+	defer s.hdrMu.Unlock()
+
+	if s.getState() == streamDone {
+		return nil
+	}
+
+	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
+	// first and create a slice of that exact size.
+	headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
+	if !s.updateHeaderSent() {                      // No headers have been sent.
+		if len(s.header) > 0 { // Send a separate header frame.
+			if err := t.writeHeaderLocked(s); err != nil {
+				return err
+			}
+		} else { // Send a trailer only response.
+			headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
+			headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
+		}
+	}
+	headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
+	headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
+
+	if p := st.Proto(); p != nil && len(p.Details) > 0 {
+		// Do not use the user's grpc-status-details-bin (if present) if we are
+		// even attempting to set our own.
+		delete(s.trailer, grpcStatusDetailsBinHeader)
+		stBytes, err := proto.Marshal(p)
+		if err != nil {
+			// TODO: return error instead, when callers are able to handle it.
+			t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err)
+		} else {
+			headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)})
+		}
+	}
+
+	// Attach the trailer metadata.
+	headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer)
+	trailingHeader := &headerFrame{
+		streamID:  s.id,
+		hf:        headerFields,
+		endStream: true,
+		onWrite:   t.setResetPingStrikes,
+	}
+
+	success, err := t.controlBuf.executeAndPut(func() bool {
+		return t.checkForHeaderListSize(trailingHeader)
+	}, nil)
+	if !success {
+		if err != nil {
+			return err
+		}
+		t.closeStream(s, true, http2.ErrCodeInternal, false)
+		return ErrHeaderListSizeLimitViolation
+	}
+	// Send a RST_STREAM after the trailers if the client has not already half-closed.
+	rst := s.getState() == streamActive
+	t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
+	for _, sh := range t.stats {
+		// Note: The trailer fields are compressed with hpack after this call returns.
+		// No WireLength field is set here.
+		sh.HandleRPC(s.Context(), &stats.OutTrailer{
+			Trailer: s.trailer.Copy(),
+		})
+	}
+	return nil
+}
+
+// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
+// is returns if it fails (e.g., framing error, transport error).
+func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error {
+	reader := data.Reader()
+
+	if !s.isHeaderSent() { // Headers haven't been written yet.
+		if err := t.WriteHeader(s, nil); err != nil {
+			_ = reader.Close()
+			return err
+		}
+	} else {
+		// Writing headers checks for this condition.
+		if s.getState() == streamDone {
+			_ = reader.Close()
+			return t.streamContextErr(s)
+		}
+	}
+
+	df := &dataFrame{
+		streamID:    s.id,
+		h:           hdr,
+		reader:      reader,
+		onEachWrite: t.setResetPingStrikes,
+	}
+	if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
+		_ = reader.Close()
+		return t.streamContextErr(s)
+	}
+	if err := t.controlBuf.put(df); err != nil {
+		_ = reader.Close()
+		return err
+	}
+	return nil
+}
+
+// keepalive running in a separate goroutine does the following:
+// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle.
+// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge.
+// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge.
+// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection
+// after an additional duration of keepalive.Timeout.
+func (t *http2Server) keepalive() {
+	p := &ping{}
+	// True iff a ping has been sent, and no data has been received since then.
+	outstandingPing := false
+	// Amount of time remaining before which we should receive an ACK for the
+	// last sent ping.
+	kpTimeoutLeft := time.Duration(0)
+	// Records the last value of t.lastRead before we go block on the timer.
+	// This is required to check for read activity since then.
+	prevNano := time.Now().UnixNano()
+	// Initialize the different timers to their default values.
+	idleTimer := time.NewTimer(t.kp.MaxConnectionIdle)
+	ageTimer := time.NewTimer(t.kp.MaxConnectionAge)
+	kpTimer := time.NewTimer(t.kp.Time)
+	defer func() {
+		// We need to drain the underlying channel in these timers after a call
+		// to Stop(), only if we are interested in resetting them. Clearly we
+		// are not interested in resetting them here.
+		idleTimer.Stop()
+		ageTimer.Stop()
+		kpTimer.Stop()
+	}()
+
+	for {
+		select {
+		case <-idleTimer.C:
+			t.mu.Lock()
+			idle := t.idle
+			if idle.IsZero() { // The connection is non-idle.
+				t.mu.Unlock()
+				idleTimer.Reset(t.kp.MaxConnectionIdle)
+				continue
+			}
+			val := t.kp.MaxConnectionIdle - time.Since(idle)
+			t.mu.Unlock()
+			if val <= 0 {
+				// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
+				// Gracefully close the connection.
+				t.Drain("max_idle")
+				return
+			}
+			idleTimer.Reset(val)
+		case <-ageTimer.C:
+			t.Drain("max_age")
+			ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
+			select {
+			case <-ageTimer.C:
+				// Close the connection after grace period.
+				if t.logger.V(logLevel) {
+					t.logger.Infof("Closing server transport due to maximum connection age")
+				}
+				t.controlBuf.put(closeConnection{})
+			case <-t.done:
+			}
+			return
+		case <-kpTimer.C:
+			lastRead := atomic.LoadInt64(&t.lastRead)
+			if lastRead > prevNano {
+				// There has been read activity since the last time we were
+				// here. Setup the timer to fire at kp.Time seconds from
+				// lastRead time and continue.
+				outstandingPing = false
+				kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
+				prevNano = lastRead
+				continue
+			}
+			if outstandingPing && kpTimeoutLeft <= 0 {
+				t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Timeout))
+				return
+			}
+			if !outstandingPing {
+				if channelz.IsOn() {
+					t.channelz.SocketMetrics.KeepAlivesSent.Add(1)
+				}
+				t.controlBuf.put(p)
+				kpTimeoutLeft = t.kp.Timeout
+				outstandingPing = true
+			}
+			// The amount of time to sleep here is the minimum of kp.Time and
+			// timeoutLeft. This will ensure that we wait only for kp.Time
+			// before sending out the next ping (for cases where the ping is
+			// acked).
+			sleepDuration := min(t.kp.Time, kpTimeoutLeft)
+			kpTimeoutLeft -= sleepDuration
+			kpTimer.Reset(sleepDuration)
+		case <-t.done:
+			return
+		}
+	}
+}
+
+// Close starts shutting down the http2Server transport.
+// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
+// could cause some resource issue. Revisit this later.
+func (t *http2Server) Close(err error) {
+	t.mu.Lock()
+	if t.state == closing {
+		t.mu.Unlock()
+		return
+	}
+	if t.logger.V(logLevel) {
+		t.logger.Infof("Closing: %v", err)
+	}
+	t.state = closing
+	streams := t.activeStreams
+	t.activeStreams = nil
+	t.mu.Unlock()
+	t.controlBuf.finish()
+	close(t.done)
+	if err := t.conn.Close(); err != nil && t.logger.V(logLevel) {
+		t.logger.Infof("Error closing underlying net.Conn during Close: %v", err)
+	}
+	channelz.RemoveEntry(t.channelz.ID)
+	// Cancel all active streams.
+	for _, s := range streams {
+		s.cancel()
+	}
+}
+
+// deleteStream deletes the stream s from transport's active streams.
+func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
+
+	t.mu.Lock()
+	if _, ok := t.activeStreams[s.id]; ok {
+		delete(t.activeStreams, s.id)
+		if len(t.activeStreams) == 0 {
+			t.idle = time.Now()
+		}
+	}
+	t.mu.Unlock()
+
+	if channelz.IsOn() {
+		if eosReceived {
+			t.channelz.SocketMetrics.StreamsSucceeded.Add(1)
+		} else {
+			t.channelz.SocketMetrics.StreamsFailed.Add(1)
+		}
+	}
+}
+
+// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
+func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
+	// In case stream sending and receiving are invoked in separate
+	// goroutines (e.g., bi-directional streaming), cancel needs to be
+	// called to interrupt the potential blocking on other goroutines.
+	s.cancel()
+
+	oldState := s.swapState(streamDone)
+	if oldState == streamDone {
+		// If the stream was already done, return.
+		return
+	}
+
+	hdr.cleanup = &cleanupStream{
+		streamID: s.id,
+		rst:      rst,
+		rstCode:  rstCode,
+		onWrite: func() {
+			t.deleteStream(s, eosReceived)
+		},
+	}
+	t.controlBuf.put(hdr)
+}
+
+// closeStream clears the footprint of a stream when the stream is not needed any more.
+func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
+	// In case stream sending and receiving are invoked in separate
+	// goroutines (e.g., bi-directional streaming), cancel needs to be
+	// called to interrupt the potential blocking on other goroutines.
+	s.cancel()
+
+	s.swapState(streamDone)
+	t.deleteStream(s, eosReceived)
+
+	t.controlBuf.put(&cleanupStream{
+		streamID: s.id,
+		rst:      rst,
+		rstCode:  rstCode,
+		onWrite:  func() {},
+	})
+}
+
+func (t *http2Server) Drain(debugData string) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	if t.drainEvent != nil {
+		return
+	}
+	t.drainEvent = grpcsync.NewEvent()
+	t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true})
+}
+
+var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
+
+// Handles outgoing GoAway and returns true if loopy needs to put itself
+// in draining mode.
+func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
+	t.maxStreamMu.Lock()
+	t.mu.Lock()
+	if t.state == closing { // TODO(mmukhi): This seems unnecessary.
+		t.mu.Unlock()
+		t.maxStreamMu.Unlock()
+		// The transport is closing.
+		return false, ErrConnClosing
+	}
+	if !g.headsUp {
+		// Stop accepting more streams now.
+		t.state = draining
+		sid := t.maxStreamID
+		retErr := g.closeConn
+		if len(t.activeStreams) == 0 {
+			retErr = errors.New("second GOAWAY written and no active streams left to process")
+		}
+		t.mu.Unlock()
+		t.maxStreamMu.Unlock()
+		if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
+			return false, err
+		}
+		t.framer.writer.Flush()
+		if retErr != nil {
+			return false, retErr
+		}
+		return true, nil
+	}
+	t.mu.Unlock()
+	t.maxStreamMu.Unlock()
+	// For a graceful close, send out a GoAway with stream ID of MaxUInt32,
+	// Follow that with a ping and wait for the ack to come back or a timer
+	// to expire. During this time accept new streams since they might have
+	// originated before the GoAway reaches the client.
+	// After getting the ack or timer expiration send out another GoAway this
+	// time with an ID of the max stream server intends to process.
+	if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil {
+		return false, err
+	}
+	if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil {
+		return false, err
+	}
+	go func() {
+		timer := time.NewTimer(5 * time.Second)
+		defer timer.Stop()
+		select {
+		case <-t.drainEvent.Done():
+		case <-timer.C:
+		case <-t.done:
+			return
+		}
+		t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData})
+	}()
+	return false, nil
+}
+
+func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics {
+	return &channelz.EphemeralSocketMetrics{
+		LocalFlowControlWindow:  int64(t.fc.getSize()),
+		RemoteFlowControlWindow: t.getOutFlowWindow(),
+	}
+}
+
+func (t *http2Server) IncrMsgSent() {
+	t.channelz.SocketMetrics.MessagesSent.Add(1)
+	t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1)
+}
+
+func (t *http2Server) IncrMsgRecv() {
+	t.channelz.SocketMetrics.MessagesReceived.Add(1)
+	t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1)
+}
+
+func (t *http2Server) getOutFlowWindow() int64 {
+	resp := make(chan uint32, 1)
+	timer := time.NewTimer(time.Second)
+	defer timer.Stop()
+	t.controlBuf.put(&outFlowControlSizeRequest{resp})
+	select {
+	case sz := <-resp:
+		return int64(sz)
+	case <-t.done:
+		return -1
+	case <-timer.C:
+		return -2
+	}
+}
+
+// Peer returns the peer of the transport.
+func (t *http2Server) Peer() *peer.Peer {
+	return &peer.Peer{
+		Addr:      t.peer.Addr,
+		LocalAddr: t.peer.LocalAddr,
+		AuthInfo:  t.peer.AuthInfo, // Can be nil
+	}
+}
+
+func getJitter(v time.Duration) time.Duration {
+	if v == infinity {
+		return 0
+	}
+	// Generate a jitter between +/- 10% of the value.
+	r := int64(v / 10)
+	j := rand.Int63n(2*r) - r
+	return time.Duration(j)
+}
+
+type connectionKey struct{}
+
+// GetConnection gets the connection from the context.
+func GetConnection(ctx context.Context) net.Conn {
+	conn, _ := ctx.Value(connectionKey{}).(net.Conn)
+	return conn
+}
+
+// SetConnection adds the connection to the context to be able to get
+// information about the destination ip and port for an incoming RPC. This also
+// allows any unary or streaming interceptors to see the connection.
+func SetConnection(ctx context.Context, conn net.Conn) context.Context {
+	return context.WithValue(ctx, connectionKey{}, conn)
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/transport/http_util.go b/hack/tools/vendor/google.golang.org/grpc/internal/transport/http_util.go
new file mode 100644
index 0000000000..3613d7b648
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -0,0 +1,468 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"bufio"
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"net"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+	"unicode/utf8"
+
+	"golang.org/x/net/http2"
+	"golang.org/x/net/http2/hpack"
+	"google.golang.org/grpc/codes"
+)
+
+const (
+	// http2MaxFrameLen specifies the max length of a HTTP2 frame.
+	http2MaxFrameLen = 16384 // 16KB frame
+	// https://httpwg.org/specs/rfc7540.html#SettingValues
+	http2InitHeaderTableSize = 4096
+)
+
+var (
+	clientPreface   = []byte(http2.ClientPreface)
+	http2ErrConvTab = map[http2.ErrCode]codes.Code{
+		http2.ErrCodeNo:                 codes.Internal,
+		http2.ErrCodeProtocol:           codes.Internal,
+		http2.ErrCodeInternal:           codes.Internal,
+		http2.ErrCodeFlowControl:        codes.ResourceExhausted,
+		http2.ErrCodeSettingsTimeout:    codes.Internal,
+		http2.ErrCodeStreamClosed:       codes.Internal,
+		http2.ErrCodeFrameSize:          codes.Internal,
+		http2.ErrCodeRefusedStream:      codes.Unavailable,
+		http2.ErrCodeCancel:             codes.Canceled,
+		http2.ErrCodeCompression:        codes.Internal,
+		http2.ErrCodeConnect:            codes.Internal,
+		http2.ErrCodeEnhanceYourCalm:    codes.ResourceExhausted,
+		http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
+		http2.ErrCodeHTTP11Required:     codes.Internal,
+	}
+	// HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table.
+	HTTPStatusConvTab = map[int]codes.Code{
+		// 400 Bad Request - INTERNAL.
+		http.StatusBadRequest: codes.Internal,
+		// 401 Unauthorized  - UNAUTHENTICATED.
+		http.StatusUnauthorized: codes.Unauthenticated,
+		// 403 Forbidden - PERMISSION_DENIED.
+		http.StatusForbidden: codes.PermissionDenied,
+		// 404 Not Found - UNIMPLEMENTED.
+		http.StatusNotFound: codes.Unimplemented,
+		// 429 Too Many Requests - UNAVAILABLE.
+		http.StatusTooManyRequests: codes.Unavailable,
+		// 502 Bad Gateway - UNAVAILABLE.
+		http.StatusBadGateway: codes.Unavailable,
+		// 503 Service Unavailable - UNAVAILABLE.
+		http.StatusServiceUnavailable: codes.Unavailable,
+		// 504 Gateway timeout - UNAVAILABLE.
+		http.StatusGatewayTimeout: codes.Unavailable,
+	}
+)
+
+var grpcStatusDetailsBinHeader = "grpc-status-details-bin"
+
+// isReservedHeader checks whether hdr belongs to HTTP2 headers
+// reserved by gRPC protocol. Any other headers are classified as the
+// user-specified metadata.
+func isReservedHeader(hdr string) bool {
+	if hdr != "" && hdr[0] == ':' {
+		return true
+	}
+	switch hdr {
+	case "content-type",
+		"user-agent",
+		"grpc-message-type",
+		"grpc-encoding",
+		"grpc-message",
+		"grpc-status",
+		"grpc-timeout",
+		// Intentionally exclude grpc-previous-rpc-attempts and
+		// grpc-retry-pushback-ms, which are "reserved", but their API
+		// intentionally works via metadata.
+		"te":
+		return true
+	default:
+		return false
+	}
+}
+
+// isWhitelistedHeader checks whether hdr should be propagated into metadata
+// visible to users, even though it is classified as "reserved", above.
+func isWhitelistedHeader(hdr string) bool {
+	switch hdr {
+	case ":authority", "user-agent":
+		return true
+	default:
+		return false
+	}
+}
+
+const binHdrSuffix = "-bin"
+
+func encodeBinHeader(v []byte) string {
+	return base64.RawStdEncoding.EncodeToString(v)
+}
+
+func decodeBinHeader(v string) ([]byte, error) {
+	if len(v)%4 == 0 {
+		// Input was padded, or padding was not necessary.
+		return base64.StdEncoding.DecodeString(v)
+	}
+	return base64.RawStdEncoding.DecodeString(v)
+}
+
+func encodeMetadataHeader(k, v string) string {
+	if strings.HasSuffix(k, binHdrSuffix) {
+		return encodeBinHeader(([]byte)(v))
+	}
+	return v
+}
+
+func decodeMetadataHeader(k, v string) (string, error) {
+	if strings.HasSuffix(k, binHdrSuffix) {
+		b, err := decodeBinHeader(v)
+		return string(b), err
+	}
+	return v, nil
+}
+
+type timeoutUnit uint8
+
+const (
+	hour        timeoutUnit = 'H'
+	minute      timeoutUnit = 'M'
+	second      timeoutUnit = 'S'
+	millisecond timeoutUnit = 'm'
+	microsecond timeoutUnit = 'u'
+	nanosecond  timeoutUnit = 'n'
+)
+
+func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) {
+	switch u {
+	case hour:
+		return time.Hour, true
+	case minute:
+		return time.Minute, true
+	case second:
+		return time.Second, true
+	case millisecond:
+		return time.Millisecond, true
+	case microsecond:
+		return time.Microsecond, true
+	case nanosecond:
+		return time.Nanosecond, true
+	default:
+	}
+	return
+}
+
+func decodeTimeout(s string) (time.Duration, error) {
+	size := len(s)
+	if size < 2 {
+		return 0, fmt.Errorf("transport: timeout string is too short: %q", s)
+	}
+	if size > 9 {
+		// Spec allows for 8 digits plus the unit.
+		return 0, fmt.Errorf("transport: timeout string is too long: %q", s)
+	}
+	unit := timeoutUnit(s[size-1])
+	d, ok := timeoutUnitToDuration(unit)
+	if !ok {
+		return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s)
+	}
+	t, err := strconv.ParseInt(s[:size-1], 10, 64)
+	if err != nil {
+		return 0, err
+	}
+	const maxHours = math.MaxInt64 / int64(time.Hour)
+	if d == time.Hour && t > maxHours {
+		// This timeout would overflow math.MaxInt64; clamp it.
+		return time.Duration(math.MaxInt64), nil
+	}
+	return d * time.Duration(t), nil
+}
+
+const (
+	spaceByte   = ' '
+	tildeByte   = '~'
+	percentByte = '%'
+)
+
+// encodeGrpcMessage is used to encode status code in header field
+// "grpc-message". It does percent encoding and also replaces invalid utf-8
+// characters with Unicode replacement character.
+//
+// It checks to see if each individual byte in msg is an allowable byte, and
+// then either percent encoding or passing it through. When percent encoding,
+// the byte is converted into hexadecimal notation with a '%' prepended.
+func encodeGrpcMessage(msg string) string {
+	if msg == "" {
+		return ""
+	}
+	lenMsg := len(msg)
+	for i := 0; i < lenMsg; i++ {
+		c := msg[i]
+		if !(c >= spaceByte && c <= tildeByte && c != percentByte) {
+			return encodeGrpcMessageUnchecked(msg)
+		}
+	}
+	return msg
+}
+
+func encodeGrpcMessageUnchecked(msg string) string {
+	var sb strings.Builder
+	for len(msg) > 0 {
+		r, size := utf8.DecodeRuneInString(msg)
+		for _, b := range []byte(string(r)) {
+			if size > 1 {
+				// If size > 1, r is not ascii. Always do percent encoding.
+				fmt.Fprintf(&sb, "%%%02X", b)
+				continue
+			}
+
+			// The for loop is necessary even if size == 1. r could be
+			// utf8.RuneError.
+			//
+			// fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD".
+			if b >= spaceByte && b <= tildeByte && b != percentByte {
+				sb.WriteByte(b)
+			} else {
+				fmt.Fprintf(&sb, "%%%02X", b)
+			}
+		}
+		msg = msg[size:]
+	}
+	return sb.String()
+}
+
+// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage.
+func decodeGrpcMessage(msg string) string {
+	if msg == "" {
+		return ""
+	}
+	lenMsg := len(msg)
+	for i := 0; i < lenMsg; i++ {
+		if msg[i] == percentByte && i+2 < lenMsg {
+			return decodeGrpcMessageUnchecked(msg)
+		}
+	}
+	return msg
+}
+
+func decodeGrpcMessageUnchecked(msg string) string {
+	var sb strings.Builder
+	lenMsg := len(msg)
+	for i := 0; i < lenMsg; i++ {
+		c := msg[i]
+		if c == percentByte && i+2 < lenMsg {
+			parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8)
+			if err != nil {
+				sb.WriteByte(c)
+			} else {
+				sb.WriteByte(byte(parsed))
+				i += 2
+			}
+		} else {
+			sb.WriteByte(c)
+		}
+	}
+	return sb.String()
+}
+
+type bufWriter struct {
+	pool      *sync.Pool
+	buf       []byte
+	offset    int
+	batchSize int
+	conn      net.Conn
+	err       error
+}
+
+func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter {
+	w := &bufWriter{
+		batchSize: batchSize,
+		conn:      conn,
+		pool:      pool,
+	}
+	// this indicates that we should use non shared buf
+	if pool == nil {
+		w.buf = make([]byte, batchSize)
+	}
+	return w
+}
+
+func (w *bufWriter) Write(b []byte) (int, error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+	if w.batchSize == 0 { // Buffer has been disabled.
+		n, err := w.conn.Write(b)
+		return n, toIOError(err)
+	}
+	if w.buf == nil {
+		b := w.pool.Get().(*[]byte)
+		w.buf = *b
+	}
+	written := 0
+	for len(b) > 0 {
+		copied := copy(w.buf[w.offset:], b)
+		b = b[copied:]
+		written += copied
+		w.offset += copied
+		if w.offset < w.batchSize {
+			continue
+		}
+		if err := w.flushKeepBuffer(); err != nil {
+			return written, err
+		}
+	}
+	return written, nil
+}
+
+func (w *bufWriter) Flush() error {
+	err := w.flushKeepBuffer()
+	// Only release the buffer if we are in a "shared" mode
+	if w.buf != nil && w.pool != nil {
+		b := w.buf
+		w.pool.Put(&b)
+		w.buf = nil
+	}
+	return err
+}
+
+func (w *bufWriter) flushKeepBuffer() error {
+	if w.err != nil {
+		return w.err
+	}
+	if w.offset == 0 {
+		return nil
+	}
+	_, w.err = w.conn.Write(w.buf[:w.offset])
+	w.err = toIOError(w.err)
+	w.offset = 0
+	return w.err
+}
+
+type ioError struct {
+	error
+}
+
+func (i ioError) Unwrap() error {
+	return i.error
+}
+
+func isIOError(err error) bool {
+	return errors.As(err, &ioError{})
+}
+
+func toIOError(err error) error {
+	if err == nil {
+		return nil
+	}
+	return ioError{error: err}
+}
+
+type framer struct {
+	writer *bufWriter
+	fr     *http2.Framer
+}
+
+var writeBufferPoolMap = make(map[int]*sync.Pool)
+var writeBufferMutex sync.Mutex
+
+func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer {
+	if writeBufferSize < 0 {
+		writeBufferSize = 0
+	}
+	var r io.Reader = conn
+	if readBufferSize > 0 {
+		r = bufio.NewReaderSize(r, readBufferSize)
+	}
+	var pool *sync.Pool
+	if sharedWriteBuffer {
+		pool = getWriteBufferPool(writeBufferSize)
+	}
+	w := newBufWriter(conn, writeBufferSize, pool)
+	f := &framer{
+		writer: w,
+		fr:     http2.NewFramer(w, r),
+	}
+	f.fr.SetMaxReadFrameSize(http2MaxFrameLen)
+	// Opt-in to Frame reuse API on framer to reduce garbage.
+	// Frames aren't safe to read from after a subsequent call to ReadFrame.
+	f.fr.SetReuseFrames()
+	f.fr.MaxHeaderListSize = maxHeaderListSize
+	f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
+	return f
+}
+
+func getWriteBufferPool(size int) *sync.Pool {
+	writeBufferMutex.Lock()
+	defer writeBufferMutex.Unlock()
+	pool, ok := writeBufferPoolMap[size]
+	if ok {
+		return pool
+	}
+	pool = &sync.Pool{
+		New: func() any {
+			b := make([]byte, size)
+			return &b
+		},
+	}
+	writeBufferPoolMap[size] = pool
+	return pool
+}
+
+// parseDialTarget returns the network and address to pass to dialer.
+func parseDialTarget(target string) (string, string) {
+	net := "tcp"
+	m1 := strings.Index(target, ":")
+	m2 := strings.Index(target, ":/")
+	// handle unix:addr which will fail with url.Parse
+	if m1 >= 0 && m2 < 0 {
+		if n := target[0:m1]; n == "unix" {
+			return n, target[m1+1:]
+		}
+	}
+	if m2 >= 0 {
+		t, err := url.Parse(target)
+		if err != nil {
+			return net, target
+		}
+		scheme := t.Scheme
+		addr := t.Path
+		if scheme == "unix" {
+			if addr == "" {
+				addr = t.Host
+			}
+			return scheme, addr
+		}
+	}
+	return net, target
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/transport/logging.go b/hack/tools/vendor/google.golang.org/grpc/internal/transport/logging.go
new file mode 100644
index 0000000000..42ed2b07af
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/transport/logging.go
@@ -0,0 +1,40 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"fmt"
+
+	"google.golang.org/grpc/grpclog"
+	internalgrpclog "google.golang.org/grpc/internal/grpclog"
+)
+
+var logger = grpclog.Component("transport")
+
+func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger {
+	return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p))
+}
+
+func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger {
+	return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p))
+}
+
+func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger {
+	return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p))
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/hack/tools/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
new file mode 100644
index 0000000000..c11b527827
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
@@ -0,0 +1,46 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package networktype declares the network type to be used in the default
+// dialer. Attribute of a resolver.Address.
+package networktype
+
+import (
+	"google.golang.org/grpc/resolver"
+)
+
+// keyType is the key to use for storing State in Attributes.
+type keyType string
+
+const key = keyType("grpc.internal.transport.networktype")
+
+// Set returns a copy of the provided address with attributes containing networkType.
+func Set(address resolver.Address, networkType string) resolver.Address {
+	address.Attributes = address.Attributes.WithValue(key, networkType)
+	return address
+}
+
+// Get returns the network type in the resolver.Address and true, or "", false
+// if not present.
+func Get(address resolver.Address) (string, bool) {
+	v := address.Attributes.Value(key)
+	if v == nil {
+		return "", false
+	}
+	return v.(string), true
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/transport/proxy.go b/hack/tools/vendor/google.golang.org/grpc/internal/transport/proxy.go
new file mode 100644
index 0000000000..54b2244365
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/transport/proxy.go
@@ -0,0 +1,150 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"bufio"
+	"context"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"net/http/httputil"
+	"net/url"
+
+	"google.golang.org/grpc/internal"
+)
+
+const proxyAuthHeaderKey = "Proxy-Authorization"
+
+var (
+	// The following variable will be overwritten in the tests.
+	httpProxyFromEnvironment = http.ProxyFromEnvironment
+)
+
+func mapAddress(address string) (*url.URL, error) {
+	req := &http.Request{
+		URL: &url.URL{
+			Scheme: "https",
+			Host:   address,
+		},
+	}
+	url, err := httpProxyFromEnvironment(req)
+	if err != nil {
+		return nil, err
+	}
+	return url, nil
+}
+
+// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
+// It's possible that this reader reads more than what's need for the response and stores
+// those bytes in the buffer.
+// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the
+// bytes in the buffer.
+type bufConn struct {
+	net.Conn
+	r io.Reader
+}
+
+func (c *bufConn) Read(b []byte) (int, error) {
+	return c.r.Read(b)
+}
+
+func basicAuth(username, password string) string {
+	auth := username + ":" + password
+	return base64.StdEncoding.EncodeToString([]byte(auth))
+}
+
+func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) {
+	defer func() {
+		if err != nil {
+			conn.Close()
+		}
+	}()
+
+	req := &http.Request{
+		Method: http.MethodConnect,
+		URL:    &url.URL{Host: backendAddr},
+		Header: map[string][]string{"User-Agent": {grpcUA}},
+	}
+	if t := proxyURL.User; t != nil {
+		u := t.Username()
+		p, _ := t.Password()
+		req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p))
+	}
+
+	if err := sendHTTPRequest(ctx, req, conn); err != nil {
+		return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
+	}
+
+	r := bufio.NewReader(conn)
+	resp, err := http.ReadResponse(r, req)
+	if err != nil {
+		return nil, fmt.Errorf("reading server HTTP response: %v", err)
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		dump, err := httputil.DumpResponse(resp, true)
+		if err != nil {
+			return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status)
+		}
+		return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
+	}
+	// The buffer could contain extra bytes from the target server, so we can't
+	// discard it. However, in many cases where the server waits for the client
+	// to send the first message (e.g. when TLS is being used), the buffer will
+	// be empty, so we can avoid the overhead of reading through this buffer.
+	if r.Buffered() != 0 {
+		return &bufConn{Conn: conn, r: r}, nil
+	}
+	return conn, nil
+}
+
+// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy
+// is necessary, dials, does the HTTP CONNECT handshake, and returns the
+// connection.
+func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) {
+	newAddr := addr
+	proxyURL, err := mapAddress(addr)
+	if err != nil {
+		return nil, err
+	}
+	if proxyURL != nil {
+		newAddr = proxyURL.Host
+	}
+
+	conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr)
+	if err != nil {
+		return nil, err
+	}
+	if proxyURL == nil {
+		// proxy is disabled if proxyURL is nil.
+		return conn, err
+	}
+	return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA)
+}
+
+func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
+	req = req.WithContext(ctx)
+	if err := req.Write(conn); err != nil {
+		return fmt.Errorf("failed to write the HTTP request: %v", err)
+	}
+	return nil
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/internal/transport/transport.go b/hack/tools/vendor/google.golang.org/grpc/internal/transport/transport.go
new file mode 100644
index 0000000000..fdd6fa86cc
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -0,0 +1,934 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package transport defines and implements message oriented communication
+// channel to complete various transactions (e.g., an RPC).  It is meant for
+// grpc-internal usage and is not intended to be imported directly by users.
+package transport
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/keepalive"
+	"google.golang.org/grpc/mem"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/peer"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/stats"
+	"google.golang.org/grpc/status"
+	"google.golang.org/grpc/tap"
+)
+
+const logLevel = 2
+
+// recvMsg represents the received msg from the transport. All transport
+// protocol specific info has been removed.
+type recvMsg struct {
+	buffer mem.Buffer
+	// nil: received some data
+	// io.EOF: stream is completed. data is nil.
+	// other non-nil error: transport failure. data is nil.
+	err error
+}
+
+// recvBuffer is an unbounded channel of recvMsg structs.
+//
+// Note: recvBuffer differs from buffer.Unbounded only in the fact that it
+// holds a channel of recvMsg structs instead of objects implementing "item"
+// interface. recvBuffer is written to much more often and using strict recvMsg
+// structs helps avoid allocation in "recvBuffer.put"
+type recvBuffer struct {
+	c       chan recvMsg
+	mu      sync.Mutex
+	backlog []recvMsg
+	err     error
+}
+
+func newRecvBuffer() *recvBuffer {
+	b := &recvBuffer{
+		c: make(chan recvMsg, 1),
+	}
+	return b
+}
+
+func (b *recvBuffer) put(r recvMsg) {
+	b.mu.Lock()
+	if b.err != nil {
+		// drop the buffer on the floor. Since b.err is not nil, any subsequent reads
+		// will always return an error, making this buffer inaccessible.
+		r.buffer.Free()
+		b.mu.Unlock()
+		// An error had occurred earlier, don't accept more
+		// data or errors.
+		return
+	}
+	b.err = r.err
+	if len(b.backlog) == 0 {
+		select {
+		case b.c <- r:
+			b.mu.Unlock()
+			return
+		default:
+		}
+	}
+	b.backlog = append(b.backlog, r)
+	b.mu.Unlock()
+}
+
+func (b *recvBuffer) load() {
+	b.mu.Lock()
+	if len(b.backlog) > 0 {
+		select {
+		case b.c <- b.backlog[0]:
+			b.backlog[0] = recvMsg{}
+			b.backlog = b.backlog[1:]
+		default:
+		}
+	}
+	b.mu.Unlock()
+}
+
+// get returns the channel that receives a recvMsg in the buffer.
+//
+// Upon receipt of a recvMsg, the caller should call load to send another
+// recvMsg onto the channel if there is any.
+func (b *recvBuffer) get() <-chan recvMsg {
+	return b.c
+}
+
+// recvBufferReader implements io.Reader interface to read the data from
+// recvBuffer.
+type recvBufferReader struct {
+	closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata.
+	ctx         context.Context
+	ctxDone     <-chan struct{} // cache of ctx.Done() (for performance).
+	recv        *recvBuffer
+	last        mem.Buffer // Stores the remaining data in the previous calls.
+	err         error
+}
+
+func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) {
+	if r.err != nil {
+		return 0, r.err
+	}
+	if r.last != nil {
+		n, r.last = mem.ReadUnsafe(header, r.last)
+		return n, nil
+	}
+	if r.closeStream != nil {
+		n, r.err = r.readHeaderClient(header)
+	} else {
+		n, r.err = r.readHeader(header)
+	}
+	return n, r.err
+}
+
+// Read reads the next n bytes from last. If last is drained, it tries to read
+// additional data from recv. It blocks if there no additional data available in
+// recv. If Read returns any non-nil error, it will continue to return that
+// error.
+func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.last != nil {
+		buf = r.last
+		if r.last.Len() > n {
+			buf, r.last = mem.SplitUnsafe(buf, n)
+		} else {
+			r.last = nil
+		}
+		return buf, nil
+	}
+	if r.closeStream != nil {
+		buf, r.err = r.readClient(n)
+	} else {
+		buf, r.err = r.read(n)
+	}
+	return buf, r.err
+}
+
+func (r *recvBufferReader) readHeader(header []byte) (n int, err error) {
+	select {
+	case <-r.ctxDone:
+		return 0, ContextErr(r.ctx.Err())
+	case m := <-r.recv.get():
+		return r.readHeaderAdditional(m, header)
+	}
+}
+
+func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) {
+	select {
+	case <-r.ctxDone:
+		return nil, ContextErr(r.ctx.Err())
+	case m := <-r.recv.get():
+		return r.readAdditional(m, n)
+	}
+}
+
+func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) {
+	// If the context is canceled, then closes the stream with nil metadata.
+	// closeStream writes its error parameter to r.recv as a recvMsg.
+	// r.readAdditional acts on that message and returns the necessary error.
+	select {
+	case <-r.ctxDone:
+		// Note that this adds the ctx error to the end of recv buffer, and
+		// reads from the head. This will delay the error until recv buffer is
+		// empty, thus will delay ctx cancellation in Recv().
+		//
+		// It's done this way to fix a race between ctx cancel and trailer. The
+		// race was, stream.Recv() may return ctx error if ctxDone wins the
+		// race, but stream.Trailer() may return a non-nil md because the stream
+		// was not marked as done when trailer is received. This closeStream
+		// call will mark stream as done, thus fix the race.
+		//
+		// TODO: delaying ctx error seems like a unnecessary side effect. What
+		// we really want is to mark the stream as done, and return ctx error
+		// faster.
+		r.closeStream(ContextErr(r.ctx.Err()))
+		m := <-r.recv.get()
+		return r.readHeaderAdditional(m, header)
+	case m := <-r.recv.get():
+		return r.readHeaderAdditional(m, header)
+	}
+}
+
+func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) {
+	// If the context is canceled, then closes the stream with nil metadata.
+	// closeStream writes its error parameter to r.recv as a recvMsg.
+	// r.readAdditional acts on that message and returns the necessary error.
+	select {
+	case <-r.ctxDone:
+		// Note that this adds the ctx error to the end of recv buffer, and
+		// reads from the head. This will delay the error until recv buffer is
+		// empty, thus will delay ctx cancellation in Recv().
+		//
+		// It's done this way to fix a race between ctx cancel and trailer. The
+		// race was, stream.Recv() may return ctx error if ctxDone wins the
+		// race, but stream.Trailer() may return a non-nil md because the stream
+		// was not marked as done when trailer is received. This closeStream
+		// call will mark stream as done, thus fix the race.
+		//
+		// TODO: delaying ctx error seems like a unnecessary side effect. What
+		// we really want is to mark the stream as done, and return ctx error
+		// faster.
+		r.closeStream(ContextErr(r.ctx.Err()))
+		m := <-r.recv.get()
+		return r.readAdditional(m, n)
+	case m := <-r.recv.get():
+		return r.readAdditional(m, n)
+	}
+}
+
+func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) {
+	r.recv.load()
+	if m.err != nil {
+		if m.buffer != nil {
+			m.buffer.Free()
+		}
+		return 0, m.err
+	}
+
+	n, r.last = mem.ReadUnsafe(header, m.buffer)
+
+	return n, nil
+}
+
+func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) {
+	r.recv.load()
+	if m.err != nil {
+		if m.buffer != nil {
+			m.buffer.Free()
+		}
+		return nil, m.err
+	}
+
+	if m.buffer.Len() > n {
+		m.buffer, r.last = mem.SplitUnsafe(m.buffer, n)
+	}
+
+	return m.buffer, nil
+}
+
+type streamState uint32
+
+const (
+	streamActive    streamState = iota
+	streamWriteDone             // EndStream sent
+	streamReadDone              // EndStream received
+	streamDone                  // the entire stream is finished.
+)
+
+// Stream represents an RPC in the transport layer.
+type Stream struct {
+	id           uint32
+	st           ServerTransport    // nil for client side Stream
+	ct           ClientTransport    // nil for server side Stream
+	ctx          context.Context    // the associated context of the stream
+	cancel       context.CancelFunc // always nil for client side Stream
+	done         chan struct{}      // closed at the end of stream to unblock writers. On the client side.
+	doneFunc     func()             // invoked at the end of stream on client side.
+	ctxDone      <-chan struct{}    // same as done chan but for server side. Cache of ctx.Done() (for performance)
+	method       string             // the associated RPC method of the stream
+	recvCompress string
+	sendCompress string
+	buf          *recvBuffer
+	trReader     *transportReader
+	fc           *inFlow
+	wq           *writeQuota
+
+	// Holds compressor names passed in grpc-accept-encoding metadata from the
+	// client. This is empty for the client side stream.
+	clientAdvertisedCompressors string
+	// Callback to state application's intentions to read data. This
+	// is used to adjust flow control, if needed.
+	requestRead func(int)
+
+	headerChan       chan struct{} // closed to indicate the end of header metadata.
+	headerChanClosed uint32        // set when headerChan is closed. Used to avoid closing headerChan multiple times.
+	// headerValid indicates whether a valid header was received.  Only
+	// meaningful after headerChan is closed (always call waitOnHeader() before
+	// reading its value).  Not valid on server side.
+	headerValid      bool
+	headerWireLength int // Only set on server side.
+
+	// hdrMu protects header and trailer metadata on the server-side.
+	hdrMu sync.Mutex
+	// On client side, header keeps the received header metadata.
+	//
+	// On server side, header keeps the header set by SetHeader(). The complete
+	// header will merged into this after t.WriteHeader() is called.
+	header  metadata.MD
+	trailer metadata.MD // the key-value map of trailer metadata.
+
+	noHeaders bool // set if the client never received headers (set only after the stream is done).
+
+	// On the server-side, headerSent is atomically set to 1 when the headers are sent out.
+	headerSent uint32
+
+	state streamState
+
+	// On client-side it is the status error received from the server.
+	// On server-side it is unused.
+	status *status.Status
+
+	bytesReceived uint32 // indicates whether any bytes have been received on this stream
+	unprocessed   uint32 // set if the server sends a refused stream or GOAWAY including this stream
+
+	// contentSubtype is the content-subtype for requests.
+	// this must be lowercase or the behavior is undefined.
+	contentSubtype string
+}
+
+// isHeaderSent is only valid on the server-side.
+func (s *Stream) isHeaderSent() bool {
+	return atomic.LoadUint32(&s.headerSent) == 1
+}
+
+// updateHeaderSent updates headerSent and returns true
+// if it was already set. It is valid only on server-side.
+func (s *Stream) updateHeaderSent() bool {
+	return atomic.SwapUint32(&s.headerSent, 1) == 1
+}
+
+func (s *Stream) swapState(st streamState) streamState {
+	return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st)))
+}
+
+func (s *Stream) compareAndSwapState(oldState, newState streamState) bool {
+	return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState))
+}
+
+func (s *Stream) getState() streamState {
+	return streamState(atomic.LoadUint32((*uint32)(&s.state)))
+}
+
+func (s *Stream) waitOnHeader() {
+	if s.headerChan == nil {
+		// On the server headerChan is always nil since a stream originates
+		// only after having received headers.
+		return
+	}
+	select {
+	case <-s.ctx.Done():
+		// Close the stream to prevent headers/trailers from changing after
+		// this function returns.
+		s.ct.CloseStream(s, ContextErr(s.ctx.Err()))
+		// headerChan could possibly not be closed yet if closeStream raced
+		// with operateHeaders; wait until it is closed explicitly here.
+		<-s.headerChan
+	case <-s.headerChan:
+	}
+}
+
+// RecvCompress returns the compression algorithm applied to the inbound
+// message. It is empty string if there is no compression applied.
+func (s *Stream) RecvCompress() string {
+	s.waitOnHeader()
+	return s.recvCompress
+}
+
+// SetSendCompress sets the compression algorithm to the stream.
+func (s *Stream) SetSendCompress(name string) error {
+	if s.isHeaderSent() || s.getState() == streamDone {
+		return errors.New("transport: set send compressor called after headers sent or stream done")
+	}
+
+	s.sendCompress = name
+	return nil
+}
+
+// SendCompress returns the send compressor name.
+func (s *Stream) SendCompress() string {
+	return s.sendCompress
+}
+
+// ClientAdvertisedCompressors returns the compressor names advertised by the
+// client via grpc-accept-encoding header.
+func (s *Stream) ClientAdvertisedCompressors() []string {
+	values := strings.Split(s.clientAdvertisedCompressors, ",")
+	for i, v := range values {
+		values[i] = strings.TrimSpace(v)
+	}
+	return values
+}
+
+// Done returns a channel which is closed when it receives the final status
+// from the server.
+func (s *Stream) Done() <-chan struct{} {
+	return s.done
+}
+
+// Header returns the header metadata of the stream.
+//
+// On client side, it acquires the key-value pairs of header metadata once it is
+// available. It blocks until i) the metadata is ready or ii) there is no header
+// metadata or iii) the stream is canceled/expired.
+//
+// On server side, it returns the out header after t.WriteHeader is called.  It
+// does not block and must not be called until after WriteHeader.
+func (s *Stream) Header() (metadata.MD, error) {
+	if s.headerChan == nil {
+		// On server side, return the header in stream. It will be the out
+		// header after t.WriteHeader is called.
+		return s.header.Copy(), nil
+	}
+	s.waitOnHeader()
+
+	if !s.headerValid || s.noHeaders {
+		return nil, s.status.Err()
+	}
+
+	return s.header.Copy(), nil
+}
+
+// TrailersOnly blocks until a header or trailers-only frame is received and
+// then returns true if the stream was trailers-only.  If the stream ends
+// before headers are received, returns true, nil.  Client-side only.
+func (s *Stream) TrailersOnly() bool {
+	s.waitOnHeader()
+	return s.noHeaders
+}
+
+// Trailer returns the cached trailer metadata. Note that if it is not called
+// after the entire stream is done, it could return an empty MD. Client
+// side only.
+// It can be safely read only after stream has ended that is either read
+// or write have returned io.EOF.
+func (s *Stream) Trailer() metadata.MD {
+	c := s.trailer.Copy()
+	return c
+}
+
+// ContentSubtype returns the content-subtype for a request. For example, a
+// content-subtype of "proto" will result in a content-type of
+// "application/grpc+proto". This will always be lowercase.  See
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+func (s *Stream) ContentSubtype() string {
+	return s.contentSubtype
+}
+
+// Context returns the context of the stream.
+func (s *Stream) Context() context.Context {
+	return s.ctx
+}
+
+// SetContext sets the context of the stream. This will be deleted once the
+// stats handler callouts all move to gRPC layer.
+func (s *Stream) SetContext(ctx context.Context) {
+	s.ctx = ctx
+}
+
+// Method returns the method for the stream.
+func (s *Stream) Method() string {
+	return s.method
+}
+
+// Status returns the status received from the server.
+// Status can be read safely only after the stream has ended,
+// that is, after Done() is closed.
+func (s *Stream) Status() *status.Status {
+	return s.status
+}
+
+// HeaderWireLength returns the size of the headers of the stream as received
+// from the wire. Valid only on the server.
+func (s *Stream) HeaderWireLength() int {
+	return s.headerWireLength
+}
+
+// SetHeader sets the header metadata. This can be called multiple times.
+// Server side only.
+// This should not be called in parallel to other data writes.
+func (s *Stream) SetHeader(md metadata.MD) error {
+	if md.Len() == 0 {
+		return nil
+	}
+	if s.isHeaderSent() || s.getState() == streamDone {
+		return ErrIllegalHeaderWrite
+	}
+	s.hdrMu.Lock()
+	s.header = metadata.Join(s.header, md)
+	s.hdrMu.Unlock()
+	return nil
+}
+
+// SendHeader sends the given header metadata. The given metadata is
+// combined with any metadata set by previous calls to SetHeader and
+// then written to the transport stream.
+func (s *Stream) SendHeader(md metadata.MD) error {
+	return s.st.WriteHeader(s, md)
+}
+
+// SetTrailer sets the trailer metadata which will be sent with the RPC status
+// by the server. This can be called multiple times. Server side only.
+// This should not be called parallel to other data writes.
+func (s *Stream) SetTrailer(md metadata.MD) error {
+	if md.Len() == 0 {
+		return nil
+	}
+	if s.getState() == streamDone {
+		return ErrIllegalHeaderWrite
+	}
+	s.hdrMu.Lock()
+	s.trailer = metadata.Join(s.trailer, md)
+	s.hdrMu.Unlock()
+	return nil
+}
+
+func (s *Stream) write(m recvMsg) {
+	s.buf.put(m)
+}
+
+func (s *Stream) ReadHeader(header []byte) (err error) {
+	// Don't request a read if there was an error earlier
+	if er := s.trReader.er; er != nil {
+		return er
+	}
+	s.requestRead(len(header))
+	for len(header) != 0 {
+		n, err := s.trReader.ReadHeader(header)
+		header = header[n:]
+		if len(header) == 0 {
+			err = nil
+		}
+		if err != nil {
+			if n > 0 && err == io.EOF {
+				err = io.ErrUnexpectedEOF
+			}
+			return err
+		}
+	}
+	return nil
+}
+
+// Read reads n bytes from the wire for this stream.
+func (s *Stream) Read(n int) (data mem.BufferSlice, err error) {
+	// Don't request a read if there was an error earlier
+	if er := s.trReader.er; er != nil {
+		return nil, er
+	}
+	s.requestRead(n)
+	for n != 0 {
+		buf, err := s.trReader.Read(n)
+		var bufLen int
+		if buf != nil {
+			bufLen = buf.Len()
+		}
+		n -= bufLen
+		if n == 0 {
+			err = nil
+		}
+		if err != nil {
+			if bufLen > 0 && err == io.EOF {
+				err = io.ErrUnexpectedEOF
+			}
+			data.Free()
+			return nil, err
+		}
+		data = append(data, buf)
+	}
+	return data, nil
+}
+
+// transportReader reads all the data available for this Stream from the transport and
+// passes them into the decoder, which converts them into a gRPC message stream.
+// The error is io.EOF when the stream is done or another non-nil error if
+// the stream broke.
+type transportReader struct {
+	reader *recvBufferReader
+	// The handler to control the window update procedure for both this
+	// particular stream and the associated transport.
+	windowHandler func(int)
+	er            error
+}
+
+func (t *transportReader) ReadHeader(header []byte) (int, error) {
+	n, err := t.reader.ReadHeader(header)
+	if err != nil {
+		t.er = err
+		return 0, err
+	}
+	t.windowHandler(len(header))
+	return n, nil
+}
+
+func (t *transportReader) Read(n int) (mem.Buffer, error) {
+	buf, err := t.reader.Read(n)
+	if err != nil {
+		t.er = err
+		return buf, err
+	}
+	t.windowHandler(buf.Len())
+	return buf, nil
+}
+
+// BytesReceived indicates whether any bytes have been received on this stream.
+func (s *Stream) BytesReceived() bool {
+	return atomic.LoadUint32(&s.bytesReceived) == 1
+}
+
+// Unprocessed indicates whether the server did not process this stream --
+// i.e. it sent a refused stream or GOAWAY including this stream ID.
+func (s *Stream) Unprocessed() bool {
+	return atomic.LoadUint32(&s.unprocessed) == 1
+}
+
+// GoString is implemented by Stream so context.String() won't
+// race when printing %#v.
+func (s *Stream) GoString() string {
+	return fmt.Sprintf("", s, s.method)
+}
+
+// state of transport
+type transportState int
+
+const (
+	reachable transportState = iota
+	closing
+	draining
+)
+
+// ServerConfig consists of all the configurations to establish a server transport.
+type ServerConfig struct {
+	MaxStreams            uint32
+	ConnectionTimeout     time.Duration
+	Credentials           credentials.TransportCredentials
+	InTapHandle           tap.ServerInHandle
+	StatsHandlers         []stats.Handler
+	KeepaliveParams       keepalive.ServerParameters
+	KeepalivePolicy       keepalive.EnforcementPolicy
+	InitialWindowSize     int32
+	InitialConnWindowSize int32
+	WriteBufferSize       int
+	ReadBufferSize        int
+	SharedWriteBuffer     bool
+	ChannelzParent        *channelz.Server
+	MaxHeaderListSize     *uint32
+	HeaderTableSize       *uint32
+	BufferPool            mem.BufferPool
+}
+
+// ConnectOptions covers all relevant options for communicating with the server.
+type ConnectOptions struct {
+	// UserAgent is the application user agent.
+	UserAgent string
+	// Dialer specifies how to dial a network address.
+	Dialer func(context.Context, string) (net.Conn, error)
+	// FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors.
+	FailOnNonTempDialError bool
+	// PerRPCCredentials stores the PerRPCCredentials required to issue RPCs.
+	PerRPCCredentials []credentials.PerRPCCredentials
+	// TransportCredentials stores the Authenticator required to setup a client
+	// connection. Only one of TransportCredentials and CredsBundle is non-nil.
+	TransportCredentials credentials.TransportCredentials
+	// CredsBundle is the credentials bundle to be used. Only one of
+	// TransportCredentials and CredsBundle is non-nil.
+	CredsBundle credentials.Bundle
+	// KeepaliveParams stores the keepalive parameters.
+	KeepaliveParams keepalive.ClientParameters
+	// StatsHandlers stores the handler for stats.
+	StatsHandlers []stats.Handler
+	// InitialWindowSize sets the initial window size for a stream.
+	InitialWindowSize int32
+	// InitialConnWindowSize sets the initial window size for a connection.
+	InitialConnWindowSize int32
+	// WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire.
+	WriteBufferSize int
+	// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
+	ReadBufferSize int
+	// SharedWriteBuffer indicates whether connections should reuse write buffer
+	SharedWriteBuffer bool
+	// ChannelzParent sets the addrConn id which initiated the creation of this client transport.
+	ChannelzParent *channelz.SubChannel
+	// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
+	MaxHeaderListSize *uint32
+	// UseProxy specifies if a proxy should be used.
+	UseProxy bool
+	// The mem.BufferPool to use when reading/writing to the wire.
+	BufferPool mem.BufferPool
+}
+
+// NewClientTransport establishes the transport with the required ConnectOptions
+// and returns it to the caller.
+func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) {
+	return newHTTP2Client(connectCtx, ctx, addr, opts, onClose)
+}
+
+// Options provides additional hints and information for message
+// transmission.
+type Options struct {
+	// Last indicates whether this write is the last piece for
+	// this stream.
+	Last bool
+}
+
+// CallHdr carries the information of a particular RPC.
+type CallHdr struct {
+	// Host specifies the peer's host.
+	Host string
+
+	// Method specifies the operation to perform.
+	Method string
+
+	// SendCompress specifies the compression algorithm applied on
+	// outbound message.
+	SendCompress string
+
+	// Creds specifies credentials.PerRPCCredentials for a call.
+	Creds credentials.PerRPCCredentials
+
+	// ContentSubtype specifies the content-subtype for a request. For example, a
+	// content-subtype of "proto" will result in a content-type of
+	// "application/grpc+proto". The value of ContentSubtype must be all
+	// lowercase, otherwise the behavior is undefined. See
+	// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
+	// for more details.
+	ContentSubtype string
+
+	PreviousAttempts int // value of grpc-previous-rpc-attempts header to set
+
+	DoneFunc func() // called when the stream is finished
+}
+
+// ClientTransport is the common interface for all gRPC client-side transport
+// implementations.
+type ClientTransport interface {
+	// Close tears down this transport. Once it returns, the transport
+	// should not be accessed any more. The caller must make sure this
+	// is called only once.
+	Close(err error)
+
+	// GracefulClose starts to tear down the transport: the transport will stop
+	// accepting new RPCs and NewStream will return error. Once all streams are
+	// finished, the transport will close.
+	//
+	// It does not block.
+	GracefulClose()
+
+	// Write sends the data for the given stream. A nil stream indicates
+	// the write is to be performed on the transport as a whole.
+	Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error
+
+	// NewStream creates a Stream for an RPC.
+	NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
+
+	// CloseStream clears the footprint of a stream when the stream is
+	// not needed any more. The err indicates the error incurred when
+	// CloseStream is called. Must be called when a stream is finished
+	// unless the associated transport is closing.
+	CloseStream(stream *Stream, err error)
+
+	// Error returns a channel that is closed when some I/O error
+	// happens. Typically the caller should have a goroutine to monitor
+	// this in order to take action (e.g., close the current transport
+	// and create a new one) in error case. It should not return nil
+	// once the transport is initiated.
+	Error() <-chan struct{}
+
+	// GoAway returns a channel that is closed when ClientTransport
+	// receives the draining signal from the server (e.g., GOAWAY frame in
+	// HTTP/2).
+	GoAway() <-chan struct{}
+
+	// GetGoAwayReason returns the reason why GoAway frame was received, along
+	// with a human readable string with debug info.
+	GetGoAwayReason() (GoAwayReason, string)
+
+	// RemoteAddr returns the remote network address.
+	RemoteAddr() net.Addr
+
+	// IncrMsgSent increments the number of message sent through this transport.
+	IncrMsgSent()
+
+	// IncrMsgRecv increments the number of message received through this transport.
+	IncrMsgRecv()
+}
+
+// ServerTransport is the common interface for all gRPC server-side transport
+// implementations.
+//
+// Methods may be called concurrently from multiple goroutines, but
+// Write methods for a given Stream will be called serially.
+type ServerTransport interface {
+	// HandleStreams receives incoming streams using the given handler.
+	HandleStreams(context.Context, func(*Stream))
+
+	// WriteHeader sends the header metadata for the given stream.
+	// WriteHeader may not be called on all streams.
+	WriteHeader(s *Stream, md metadata.MD) error
+
+	// Write sends the data for the given stream.
+	// Write may not be called on all streams.
+	Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error
+
+	// WriteStatus sends the status of a stream to the client.  WriteStatus is
+	// the final call made on a stream and always occurs.
+	WriteStatus(s *Stream, st *status.Status) error
+
+	// Close tears down the transport. Once it is called, the transport
+	// should not be accessed any more. All the pending streams and their
+	// handlers will be terminated asynchronously.
+	Close(err error)
+
+	// Peer returns the peer of the server transport.
+	Peer() *peer.Peer
+
+	// Drain notifies the client this ServerTransport stops accepting new RPCs.
+	Drain(debugData string)
+
+	// IncrMsgSent increments the number of message sent through this transport.
+	IncrMsgSent()
+
+	// IncrMsgRecv increments the number of message received through this transport.
+	IncrMsgRecv()
+}
+
+// connectionErrorf creates an ConnectionError with the specified error description.
+func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError {
+	return ConnectionError{
+		Desc: fmt.Sprintf(format, a...),
+		temp: temp,
+		err:  e,
+	}
+}
+
+// ConnectionError is an error that results in the termination of the
+// entire connection and the retry of all the active streams.
+type ConnectionError struct {
+	Desc string
+	temp bool
+	err  error
+}
+
+func (e ConnectionError) Error() string {
+	return fmt.Sprintf("connection error: desc = %q", e.Desc)
+}
+
+// Temporary indicates if this connection error is temporary or fatal.
+func (e ConnectionError) Temporary() bool {
+	return e.temp
+}
+
+// Origin returns the original error of this connection error.
+func (e ConnectionError) Origin() error {
+	// Never return nil error here.
+	// If the original error is nil, return itself.
+	if e.err == nil {
+		return e
+	}
+	return e.err
+}
+
+// Unwrap returns the original error of this connection error or nil when the
+// origin is nil.
+func (e ConnectionError) Unwrap() error {
+	return e.err
+}
+
+var (
+	// ErrConnClosing indicates that the transport is closing.
+	ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
+	// errStreamDrain indicates that the stream is rejected because the
+	// connection is draining. This could be caused by goaway or balancer
+	// removing the address.
+	errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
+	// errStreamDone is returned from write at the client side to indicate application
+	// layer of an error.
+	errStreamDone = errors.New("the stream is done")
+	// StatusGoAway indicates that the server sent a GOAWAY that included this
+	// stream's ID in unprocessed RPCs.
+	statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection")
+)
+
+// GoAwayReason contains the reason for the GoAway frame received.
+type GoAwayReason uint8
+
+const (
+	// GoAwayInvalid indicates that no GoAway frame is received.
+	GoAwayInvalid GoAwayReason = 0
+	// GoAwayNoReason is the default value when GoAway frame is received.
+	GoAwayNoReason GoAwayReason = 1
+	// GoAwayTooManyPings indicates that a GoAway frame with
+	// ErrCodeEnhanceYourCalm was received and that the debug data said
+	// "too_many_pings".
+	GoAwayTooManyPings GoAwayReason = 2
+)
+
+// ContextErr converts the error from context package into a status error.
+func ContextErr(err error) error {
+	switch err {
+	case context.DeadlineExceeded:
+		return status.Error(codes.DeadlineExceeded, err.Error())
+	case context.Canceled:
+		return status.Error(codes.Canceled, err.Error())
+	}
+	return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err)
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/keepalive/keepalive.go b/hack/tools/vendor/google.golang.org/grpc/keepalive/keepalive.go
new file mode 100644
index 0000000000..eb42b19fb9
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/keepalive/keepalive.go
@@ -0,0 +1,99 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package keepalive defines configurable parameters for point-to-point
+// healthcheck.
+package keepalive
+
+import (
+	"time"
+)
+
+// ClientParameters is used to set keepalive parameters on the client-side.
+// These configure how the client will actively probe to notice when a
+// connection is broken and send pings so intermediaries will be aware of the
+// liveness of the connection. Make sure these parameters are set in
+// coordination with the keepalive policy on the server, as incompatible
+// settings can result in closing of connection.
+type ClientParameters struct {
+	// After a duration of this time if the client doesn't see any activity it
+	// pings the server to see if the transport is still alive.
+	// If set below 10s, a minimum value of 10s will be used instead.
+	//
+	// Note that gRPC servers have a default EnforcementPolicy.MinTime of 5
+	// minutes (which means the client shouldn't ping more frequently than every
+	// 5 minutes).
+	//
+	// Though not ideal, it's not a strong requirement for Time to be less than
+	// EnforcementPolicy.MinTime.  Time will automatically double if the server
+	// disconnects due to its enforcement policy.
+	//
+	// For more details, see
+	// https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md
+	Time time.Duration
+	// After having pinged for keepalive check, the client waits for a duration
+	// of Timeout and if no activity is seen even after that the connection is
+	// closed.
+	//
+	// If keepalive is enabled, and this value is not explicitly set, the default
+	// is 20 seconds.
+	Timeout time.Duration
+	// If true, client sends keepalive pings even with no active RPCs. If false,
+	// when there are no active RPCs, Time and Timeout will be ignored and no
+	// keepalive pings will be sent.
+	PermitWithoutStream bool
+}
+
+// ServerParameters is used to set keepalive and max-age parameters on the
+// server-side.
+type ServerParameters struct {
+	// MaxConnectionIdle is a duration for the amount of time after which an
+	// idle connection would be closed by sending a GoAway. Idleness duration is
+	// defined since the most recent time the number of outstanding RPCs became
+	// zero or the connection establishment.
+	MaxConnectionIdle time.Duration // The current default value is infinity.
+	// MaxConnectionAge is a duration for the maximum amount of time a
+	// connection may exist before it will be closed by sending a GoAway. A
+	// random jitter of +/-10% will be added to MaxConnectionAge to spread out
+	// connection storms.
+	MaxConnectionAge time.Duration // The current default value is infinity.
+	// MaxConnectionAgeGrace is an additive period after MaxConnectionAge after
+	// which the connection will be forcibly closed.
+	MaxConnectionAgeGrace time.Duration // The current default value is infinity.
+	// After a duration of this time if the server doesn't see any activity it
+	// pings the client to see if the transport is still alive.
+	// If set below 1s, a minimum value of 1s will be used instead.
+	Time time.Duration // The current default value is 2 hours.
+	// After having pinged for keepalive check, the server waits for a duration
+	// of Timeout and if no activity is seen even after that the connection is
+	// closed.
+	Timeout time.Duration // The current default value is 20 seconds.
+}
+
+// EnforcementPolicy is used to set keepalive enforcement policy on the
+// server-side. Server will close connection with a client that violates this
+// policy.
+type EnforcementPolicy struct {
+	// MinTime is the minimum amount of time a client should wait before sending
+	// a keepalive ping.
+	MinTime time.Duration // The current default value is 5 minutes.
+	// If true, server allows keepalive pings even when there are no active
+	// streams(RPCs). If false, and client sends ping when there are no active
+	// streams, server will send GOAWAY and close the connection.
+	PermitWithoutStream bool // false by default.
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/mem/buffer_pool.go b/hack/tools/vendor/google.golang.org/grpc/mem/buffer_pool.go
new file mode 100644
index 0000000000..c37c58c023
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/mem/buffer_pool.go
@@ -0,0 +1,194 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package mem
+
+import (
+	"sort"
+	"sync"
+
+	"google.golang.org/grpc/internal"
+)
+
+// BufferPool is a pool of buffers that can be shared and reused, resulting in
+// decreased memory allocation.
+type BufferPool interface {
+	// Get returns a buffer with specified length from the pool.
+	Get(length int) *[]byte
+
+	// Put returns a buffer to the pool.
+	Put(*[]byte)
+}
+
+var defaultBufferPoolSizes = []int{
+	256,
+	4 << 10,  // 4KB (go page size)
+	16 << 10, // 16KB (max HTTP/2 frame size used by gRPC)
+	32 << 10, // 32KB (default buffer size for io.Copy)
+	1 << 20,  // 1MB
+}
+
+var defaultBufferPool BufferPool
+
+func init() {
+	defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...)
+
+	internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) {
+		defaultBufferPool = pool
+	}
+
+	internal.SetBufferPoolingThresholdForTesting = func(threshold int) {
+		bufferPoolingThreshold = threshold
+	}
+}
+
+// DefaultBufferPool returns the current default buffer pool. It is a BufferPool
+// created with NewBufferPool that uses a set of default sizes optimized for
+// expected workflows.
+func DefaultBufferPool() BufferPool {
+	return defaultBufferPool
+}
+
+// NewTieredBufferPool returns a BufferPool implementation that uses multiple
+// underlying pools of the given pool sizes.
+func NewTieredBufferPool(poolSizes ...int) BufferPool {
+	sort.Ints(poolSizes)
+	pools := make([]*sizedBufferPool, len(poolSizes))
+	for i, s := range poolSizes {
+		pools[i] = newSizedBufferPool(s)
+	}
+	return &tieredBufferPool{
+		sizedPools: pools,
+	}
+}
+
+// tieredBufferPool implements the BufferPool interface with multiple tiers of
+// buffer pools for different sizes of buffers.
+type tieredBufferPool struct {
+	sizedPools   []*sizedBufferPool
+	fallbackPool simpleBufferPool
+}
+
+func (p *tieredBufferPool) Get(size int) *[]byte {
+	return p.getPool(size).Get(size)
+}
+
+func (p *tieredBufferPool) Put(buf *[]byte) {
+	p.getPool(cap(*buf)).Put(buf)
+}
+
+func (p *tieredBufferPool) getPool(size int) BufferPool {
+	poolIdx := sort.Search(len(p.sizedPools), func(i int) bool {
+		return p.sizedPools[i].defaultSize >= size
+	})
+
+	if poolIdx == len(p.sizedPools) {
+		return &p.fallbackPool
+	}
+
+	return p.sizedPools[poolIdx]
+}
+
+// sizedBufferPool is a BufferPool implementation that is optimized for specific
+// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size
+// of 16kb and a sizedBufferPool can be configured to only return buffers with a
+// capacity of 16kb. Note that however it does not support returning larger
+// buffers and in fact panics if such a buffer is requested. Because of this,
+// this BufferPool implementation is not meant to be used on its own and rather
+// is intended to be embedded in a tieredBufferPool such that Get is only
+// invoked when the required size is smaller than or equal to defaultSize.
+type sizedBufferPool struct {
+	pool        sync.Pool
+	defaultSize int
+}
+
+func (p *sizedBufferPool) Get(size int) *[]byte {
+	buf := p.pool.Get().(*[]byte)
+	b := *buf
+	clear(b[:cap(b)])
+	*buf = b[:size]
+	return buf
+}
+
+func (p *sizedBufferPool) Put(buf *[]byte) {
+	if cap(*buf) < p.defaultSize {
+		// Ignore buffers that are too small to fit in the pool. Otherwise, when
+		// Get is called it will panic as it tries to index outside the bounds
+		// of the buffer.
+		return
+	}
+	p.pool.Put(buf)
+}
+
+func newSizedBufferPool(size int) *sizedBufferPool {
+	return &sizedBufferPool{
+		pool: sync.Pool{
+			New: func() any {
+				buf := make([]byte, size)
+				return &buf
+			},
+		},
+		defaultSize: size,
+	}
+}
+
+var _ BufferPool = (*simpleBufferPool)(nil)
+
+// simpleBufferPool is an implementation of the BufferPool interface that
+// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to
+// acquire a buffer from the pool but if that buffer is too small, it returns it
+// to the pool and creates a new one.
+type simpleBufferPool struct {
+	pool sync.Pool
+}
+
+func (p *simpleBufferPool) Get(size int) *[]byte {
+	bs, ok := p.pool.Get().(*[]byte)
+	if ok && cap(*bs) >= size {
+		*bs = (*bs)[:size]
+		return bs
+	}
+
+	// A buffer was pulled from the pool, but it is too small. Put it back in
+	// the pool and create one large enough.
+	if ok {
+		p.pool.Put(bs)
+	}
+
+	b := make([]byte, size)
+	return &b
+}
+
+func (p *simpleBufferPool) Put(buf *[]byte) {
+	p.pool.Put(buf)
+}
+
+var _ BufferPool = NopBufferPool{}
+
+// NopBufferPool is a buffer pool that returns new buffers without pooling.
+type NopBufferPool struct{}
+
+// Get returns a buffer with specified length from the pool.
+func (NopBufferPool) Get(length int) *[]byte {
+	b := make([]byte, length)
+	return &b
+}
+
+// Put returns a buffer to the pool.
+func (NopBufferPool) Put(*[]byte) {
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/mem/buffer_slice.go b/hack/tools/vendor/google.golang.org/grpc/mem/buffer_slice.go
new file mode 100644
index 0000000000..228e9c2f20
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/mem/buffer_slice.go
@@ -0,0 +1,226 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package mem
+
+import (
+	"io"
+)
+
+// BufferSlice offers a means to represent data that spans one or more Buffer
+// instances. A BufferSlice is meant to be immutable after creation, and methods
+// like Ref create and return copies of the slice. This is why all methods have
+// value receivers rather than pointer receivers.
+//
+// Note that any of the methods that read the underlying buffers such as Ref,
+// Len or CopyTo etc., will panic if any underlying buffers have already been
+// freed. It is recommended to not directly interact with any of the underlying
+// buffers directly, rather such interactions should be mediated through the
+// various methods on this type.
+//
+// By convention, any APIs that return (mem.BufferSlice, error) should reduce
+// the burden on the caller by never returning a mem.BufferSlice that needs to
+// be freed if the error is non-nil, unless explicitly stated.
+type BufferSlice []Buffer
+
+// Len returns the sum of the length of all the Buffers in this slice.
+//
+// # Warning
+//
+// Invoking the built-in len on a BufferSlice will return the number of buffers
+// in the slice, and *not* the value returned by this function.
+func (s BufferSlice) Len() int {
+	var length int
+	for _, b := range s {
+		length += b.Len()
+	}
+	return length
+}
+
+// Ref invokes Ref on each buffer in the slice.
+func (s BufferSlice) Ref() {
+	for _, b := range s {
+		b.Ref()
+	}
+}
+
+// Free invokes Buffer.Free() on each Buffer in the slice.
+func (s BufferSlice) Free() {
+	for _, b := range s {
+		b.Free()
+	}
+}
+
+// CopyTo copies each of the underlying Buffer's data into the given buffer,
+// returning the number of bytes copied. Has the same semantics as the copy
+// builtin in that it will copy as many bytes as it can, stopping when either dst
+// is full or s runs out of data, returning the minimum of s.Len() and len(dst).
+func (s BufferSlice) CopyTo(dst []byte) int {
+	off := 0
+	for _, b := range s {
+		off += copy(dst[off:], b.ReadOnlyData())
+	}
+	return off
+}
+
+// Materialize concatenates all the underlying Buffer's data into a single
+// contiguous buffer using CopyTo.
+func (s BufferSlice) Materialize() []byte {
+	l := s.Len()
+	if l == 0 {
+		return nil
+	}
+	out := make([]byte, l)
+	s.CopyTo(out)
+	return out
+}
+
+// MaterializeToBuffer functions like Materialize except that it writes the data
+// to a single Buffer pulled from the given BufferPool.
+//
+// As a special case, if the input BufferSlice only actually has one Buffer, this
+// function simply increases the refcount before returning said Buffer. Freeing this
+// buffer won't release it until the BufferSlice is itself released.
+func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer {
+	if len(s) == 1 {
+		s[0].Ref()
+		return s[0]
+	}
+	sLen := s.Len()
+	if sLen == 0 {
+		return emptyBuffer{}
+	}
+	buf := pool.Get(sLen)
+	s.CopyTo(*buf)
+	return NewBuffer(buf, pool)
+}
+
+// Reader returns a new Reader for the input slice after taking references to
+// each underlying buffer.
+func (s BufferSlice) Reader() Reader {
+	s.Ref()
+	return &sliceReader{
+		data: s,
+		len:  s.Len(),
+	}
+}
+
+// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface
+// with other parts systems. It also provides an additional convenience method
+// Remaining(), which returns the number of unread bytes remaining in the slice.
+// Buffers will be freed as they are read.
+type Reader interface {
+	io.Reader
+	io.ByteReader
+	// Close frees the underlying BufferSlice and never returns an error. Subsequent
+	// calls to Read will return (0, io.EOF).
+	Close() error
+	// Remaining returns the number of unread bytes remaining in the slice.
+	Remaining() int
+}
+
+type sliceReader struct {
+	data BufferSlice
+	len  int
+	// The index into data[0].ReadOnlyData().
+	bufferIdx int
+}
+
+func (r *sliceReader) Remaining() int {
+	return r.len
+}
+
+func (r *sliceReader) Close() error {
+	r.data.Free()
+	r.data = nil
+	r.len = 0
+	return nil
+}
+
+func (r *sliceReader) freeFirstBufferIfEmpty() bool {
+	if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) {
+		return false
+	}
+
+	r.data[0].Free()
+	r.data = r.data[1:]
+	r.bufferIdx = 0
+	return true
+}
+
+func (r *sliceReader) Read(buf []byte) (n int, _ error) {
+	if r.len == 0 {
+		return 0, io.EOF
+	}
+
+	for len(buf) != 0 && r.len != 0 {
+		// Copy as much as possible from the first Buffer in the slice into the
+		// given byte slice.
+		data := r.data[0].ReadOnlyData()
+		copied := copy(buf, data[r.bufferIdx:])
+		r.len -= copied       // Reduce len by the number of bytes copied.
+		r.bufferIdx += copied // Increment the buffer index.
+		n += copied           // Increment the total number of bytes read.
+		buf = buf[copied:]    // Shrink the given byte slice.
+
+		// If we have copied all the data from the first Buffer, free it and advance to
+		// the next in the slice.
+		r.freeFirstBufferIfEmpty()
+	}
+
+	return n, nil
+}
+
+func (r *sliceReader) ReadByte() (byte, error) {
+	if r.len == 0 {
+		return 0, io.EOF
+	}
+
+	// There may be any number of empty buffers in the slice, clear them all until a
+	// non-empty buffer is reached. This is guaranteed to exit since r.len is not 0.
+	for r.freeFirstBufferIfEmpty() {
+	}
+
+	b := r.data[0].ReadOnlyData()[r.bufferIdx]
+	r.len--
+	r.bufferIdx++
+	// Free the first buffer in the slice if the last byte was read
+	r.freeFirstBufferIfEmpty()
+	return b, nil
+}
+
+var _ io.Writer = (*writer)(nil)
+
+type writer struct {
+	buffers *BufferSlice
+	pool    BufferPool
+}
+
+func (w *writer) Write(p []byte) (n int, err error) {
+	b := Copy(p, w.pool)
+	*w.buffers = append(*w.buffers, b)
+	return b.Len(), nil
+}
+
+// NewWriter wraps the given BufferSlice and BufferPool to implement the
+// io.Writer interface. Every call to Write copies the contents of the given
+// buffer into a new Buffer pulled from the given pool and the Buffer is added to
+// the given BufferSlice.
+func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer {
+	return &writer{buffers: buffers, pool: pool}
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/mem/buffers.go b/hack/tools/vendor/google.golang.org/grpc/mem/buffers.go
new file mode 100644
index 0000000000..4d66b2ccc2
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/mem/buffers.go
@@ -0,0 +1,252 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package mem provides utilities that facilitate memory reuse in byte slices
+// that are used as buffers.
+//
+// # Experimental
+//
+// Notice: All APIs in this package are EXPERIMENTAL and may be changed or
+// removed in a later release.
+package mem
+
+import (
+	"fmt"
+	"sync"
+	"sync/atomic"
+)
+
+// A Buffer represents a reference counted piece of data (in bytes) that can be
+// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be
+// released by calling Free(), which invokes the free function given at creation
+// only after all references are released.
+//
+// Note that a Buffer is not safe for concurrent access and instead each
+// goroutine should use its own reference to the data, which can be acquired via
+// a call to Ref().
+//
+// Attempts to access the underlying data after releasing the reference to the
+// Buffer will panic.
+type Buffer interface {
+	// ReadOnlyData returns the underlying byte slice. Note that it is undefined
+	// behavior to modify the contents of this slice in any way.
+	ReadOnlyData() []byte
+	// Ref increases the reference counter for this Buffer.
+	Ref()
+	// Free decrements this Buffer's reference counter and frees the underlying
+	// byte slice if the counter reaches 0 as a result of this call.
+	Free()
+	// Len returns the Buffer's size.
+	Len() int
+
+	split(n int) (left, right Buffer)
+	read(buf []byte) (int, Buffer)
+}
+
+var (
+	bufferPoolingThreshold = 1 << 10
+
+	bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }}
+	refObjectPool    = sync.Pool{New: func() any { return new(atomic.Int32) }}
+)
+
+func IsBelowBufferPoolingThreshold(size int) bool {
+	return size <= bufferPoolingThreshold
+}
+
+type buffer struct {
+	origData *[]byte
+	data     []byte
+	refs     *atomic.Int32
+	pool     BufferPool
+}
+
+func newBuffer() *buffer {
+	return bufferObjectPool.Get().(*buffer)
+}
+
+// NewBuffer creates a new Buffer from the given data, initializing the reference
+// counter to 1. The data will then be returned to the given pool when all
+// references to the returned Buffer are released. As a special case to avoid
+// additional allocations, if the given buffer pool is nil, the returned buffer
+// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the
+// underlying data is never freed.
+//
+// Note that the backing array of the given data is not copied.
+func NewBuffer(data *[]byte, pool BufferPool) Buffer {
+	if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) {
+		return (SliceBuffer)(*data)
+	}
+	b := newBuffer()
+	b.origData = data
+	b.data = *data
+	b.pool = pool
+	b.refs = refObjectPool.Get().(*atomic.Int32)
+	b.refs.Add(1)
+	return b
+}
+
+// Copy creates a new Buffer from the given data, initializing the reference
+// counter to 1.
+//
+// It acquires a []byte from the given pool and copies over the backing array
+// of the given data. The []byte acquired from the pool is returned to the
+// pool when all references to the returned Buffer are released.
+func Copy(data []byte, pool BufferPool) Buffer {
+	if IsBelowBufferPoolingThreshold(len(data)) {
+		buf := make(SliceBuffer, len(data))
+		copy(buf, data)
+		return buf
+	}
+
+	buf := pool.Get(len(data))
+	copy(*buf, data)
+	return NewBuffer(buf, pool)
+}
+
+func (b *buffer) ReadOnlyData() []byte {
+	if b.refs == nil {
+		panic("Cannot read freed buffer")
+	}
+	return b.data
+}
+
+func (b *buffer) Ref() {
+	if b.refs == nil {
+		panic("Cannot ref freed buffer")
+	}
+	b.refs.Add(1)
+}
+
+func (b *buffer) Free() {
+	if b.refs == nil {
+		panic("Cannot free freed buffer")
+	}
+
+	refs := b.refs.Add(-1)
+	switch {
+	case refs > 0:
+		return
+	case refs == 0:
+		if b.pool != nil {
+			b.pool.Put(b.origData)
+		}
+
+		refObjectPool.Put(b.refs)
+		b.origData = nil
+		b.data = nil
+		b.refs = nil
+		b.pool = nil
+		bufferObjectPool.Put(b)
+	default:
+		panic("Cannot free freed buffer")
+	}
+}
+
+func (b *buffer) Len() int {
+	return len(b.ReadOnlyData())
+}
+
+func (b *buffer) split(n int) (Buffer, Buffer) {
+	if b.refs == nil {
+		panic("Cannot split freed buffer")
+	}
+
+	b.refs.Add(1)
+	split := newBuffer()
+	split.origData = b.origData
+	split.data = b.data[n:]
+	split.refs = b.refs
+	split.pool = b.pool
+
+	b.data = b.data[:n]
+
+	return b, split
+}
+
+func (b *buffer) read(buf []byte) (int, Buffer) {
+	if b.refs == nil {
+		panic("Cannot read freed buffer")
+	}
+
+	n := copy(buf, b.data)
+	if n == len(b.data) {
+		b.Free()
+		return n, nil
+	}
+
+	b.data = b.data[n:]
+	return n, b
+}
+
+// String returns a string representation of the buffer. May be used for
+// debugging purposes.
+func (b *buffer) String() string {
+	return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData()))
+}
+
+func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) {
+	return buf.read(dst)
+}
+
+// SplitUnsafe modifies the receiver to point to the first n bytes while it
+// returns a new reference to the remaining bytes. The returned Buffer functions
+// just like a normal reference acquired using Ref().
+func SplitUnsafe(buf Buffer, n int) (left, right Buffer) {
+	return buf.split(n)
+}
+
+type emptyBuffer struct{}
+
+func (e emptyBuffer) ReadOnlyData() []byte {
+	return nil
+}
+
+func (e emptyBuffer) Ref()  {}
+func (e emptyBuffer) Free() {}
+
+func (e emptyBuffer) Len() int {
+	return 0
+}
+
+func (e emptyBuffer) split(int) (left, right Buffer) {
+	return e, e
+}
+
+func (e emptyBuffer) read([]byte) (int, Buffer) {
+	return 0, e
+}
+
+type SliceBuffer []byte
+
+func (s SliceBuffer) ReadOnlyData() []byte { return s }
+func (s SliceBuffer) Ref()                 {}
+func (s SliceBuffer) Free()                {}
+func (s SliceBuffer) Len() int             { return len(s) }
+
+func (s SliceBuffer) split(n int) (left, right Buffer) {
+	return s[:n], s[n:]
+}
+
+func (s SliceBuffer) read(buf []byte) (int, Buffer) {
+	n := copy(buf, s)
+	if n == len(s) {
+		return n, nil
+	}
+	return n, s[n:]
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/metadata/metadata.go b/hack/tools/vendor/google.golang.org/grpc/metadata/metadata.go
new file mode 100644
index 0000000000..d2e15253bb
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -0,0 +1,295 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package metadata define the structure of the metadata supported by gRPC library.
+// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
+// for more information about custom-metadata.
+package metadata // import "google.golang.org/grpc/metadata"
+
+import (
+	"context"
+	"fmt"
+	"strings"
+
+	"google.golang.org/grpc/internal"
+)
+
+func init() {
+	internal.FromOutgoingContextRaw = fromOutgoingContextRaw
+}
+
+// DecodeKeyValue returns k, v, nil.
+//
+// Deprecated: use k and v directly instead.
+func DecodeKeyValue(k, v string) (string, string, error) {
+	return k, v, nil
+}
+
+// MD is a mapping from metadata keys to values. Users should use the following
+// two convenience functions New and Pairs to generate MD.
+type MD map[string][]string
+
+// New creates an MD from a given key-value map.
+//
+// Only the following ASCII characters are allowed in keys:
+//   - digits: 0-9
+//   - uppercase letters: A-Z (normalized to lower)
+//   - lowercase letters: a-z
+//   - special characters: -_.
+//
+// Uppercase letters are automatically converted to lowercase.
+//
+// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
+// result in errors if set in metadata.
+func New(m map[string]string) MD {
+	md := make(MD, len(m))
+	for k, val := range m {
+		key := strings.ToLower(k)
+		md[key] = append(md[key], val)
+	}
+	return md
+}
+
+// Pairs returns an MD formed by the mapping of key, value ...
+// Pairs panics if len(kv) is odd.
+//
+// Only the following ASCII characters are allowed in keys:
+//   - digits: 0-9
+//   - uppercase letters: A-Z (normalized to lower)
+//   - lowercase letters: a-z
+//   - special characters: -_.
+//
+// Uppercase letters are automatically converted to lowercase.
+//
+// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
+// result in errors if set in metadata.
+func Pairs(kv ...string) MD {
+	if len(kv)%2 == 1 {
+		panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
+	}
+	md := make(MD, len(kv)/2)
+	for i := 0; i < len(kv); i += 2 {
+		key := strings.ToLower(kv[i])
+		md[key] = append(md[key], kv[i+1])
+	}
+	return md
+}
+
+// Len returns the number of items in md.
+func (md MD) Len() int {
+	return len(md)
+}
+
+// Copy returns a copy of md.
+func (md MD) Copy() MD {
+	out := make(MD, len(md))
+	for k, v := range md {
+		out[k] = copyOf(v)
+	}
+	return out
+}
+
+// Get obtains the values for a given key.
+//
+// k is converted to lowercase before searching in md.
+func (md MD) Get(k string) []string {
+	k = strings.ToLower(k)
+	return md[k]
+}
+
+// Set sets the value of a given key with a slice of values.
+//
+// k is converted to lowercase before storing in md.
+func (md MD) Set(k string, vals ...string) {
+	if len(vals) == 0 {
+		return
+	}
+	k = strings.ToLower(k)
+	md[k] = vals
+}
+
+// Append adds the values to key k, not overwriting what was already stored at
+// that key.
+//
+// k is converted to lowercase before storing in md.
+func (md MD) Append(k string, vals ...string) {
+	if len(vals) == 0 {
+		return
+	}
+	k = strings.ToLower(k)
+	md[k] = append(md[k], vals...)
+}
+
+// Delete removes the values for a given key k which is converted to lowercase
+// before removing it from md.
+func (md MD) Delete(k string) {
+	k = strings.ToLower(k)
+	delete(md, k)
+}
+
+// Join joins any number of mds into a single MD.
+//
+// The order of values for each key is determined by the order in which the mds
+// containing those values are presented to Join.
+func Join(mds ...MD) MD {
+	out := MD{}
+	for _, md := range mds {
+		for k, v := range md {
+			out[k] = append(out[k], v...)
+		}
+	}
+	return out
+}
+
+type mdIncomingKey struct{}
+type mdOutgoingKey struct{}
+
+// NewIncomingContext creates a new context with incoming md attached. md must
+// not be modified after calling this function.
+func NewIncomingContext(ctx context.Context, md MD) context.Context {
+	return context.WithValue(ctx, mdIncomingKey{}, md)
+}
+
+// NewOutgoingContext creates a new context with outgoing md attached. If used
+// in conjunction with AppendToOutgoingContext, NewOutgoingContext will
+// overwrite any previously-appended metadata. md must not be modified after
+// calling this function.
+func NewOutgoingContext(ctx context.Context, md MD) context.Context {
+	return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md})
+}
+
+// AppendToOutgoingContext returns a new context with the provided kv merged
+// with any existing metadata in the context. Please refer to the documentation
+// of Pairs for a description of kv.
+func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context {
+	if len(kv)%2 == 1 {
+		panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
+	}
+	md, _ := ctx.Value(mdOutgoingKey{}).(rawMD)
+	added := make([][]string, len(md.added)+1)
+	copy(added, md.added)
+	kvCopy := make([]string, 0, len(kv))
+	for i := 0; i < len(kv); i += 2 {
+		kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1])
+	}
+	added[len(added)-1] = kvCopy
+	return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added})
+}
+
+// FromIncomingContext returns the incoming metadata in ctx if it exists.
+//
+// All keys in the returned MD are lowercase.
+func FromIncomingContext(ctx context.Context) (MD, bool) {
+	md, ok := ctx.Value(mdIncomingKey{}).(MD)
+	if !ok {
+		return nil, false
+	}
+	out := make(MD, len(md))
+	for k, v := range md {
+		// We need to manually convert all keys to lower case, because MD is a
+		// map, and there's no guarantee that the MD attached to the context is
+		// created using our helper functions.
+		key := strings.ToLower(k)
+		out[key] = copyOf(v)
+	}
+	return out, true
+}
+
+// ValueFromIncomingContext returns the metadata value corresponding to the metadata
+// key from the incoming metadata if it exists. Keys are matched in a case insensitive
+// manner.
+func ValueFromIncomingContext(ctx context.Context, key string) []string {
+	md, ok := ctx.Value(mdIncomingKey{}).(MD)
+	if !ok {
+		return nil
+	}
+
+	if v, ok := md[key]; ok {
+		return copyOf(v)
+	}
+	for k, v := range md {
+		// Case insensitive comparison: MD is a map, and there's no guarantee
+		// that the MD attached to the context is created using our helper
+		// functions.
+		if strings.EqualFold(k, key) {
+			return copyOf(v)
+		}
+	}
+	return nil
+}
+
+func copyOf(v []string) []string {
+	vals := make([]string, len(v))
+	copy(vals, v)
+	return vals
+}
+
+// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD.
+//
+// Remember to perform strings.ToLower on the keys, for both the returned MD (MD
+// is a map, there's no guarantee it's created using our helper functions) and
+// the extra kv pairs (AppendToOutgoingContext doesn't turn them into
+// lowercase).
+func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
+	raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
+	if !ok {
+		return nil, nil, false
+	}
+
+	return raw.md, raw.added, true
+}
+
+// FromOutgoingContext returns the outgoing metadata in ctx if it exists.
+//
+// All keys in the returned MD are lowercase.
+func FromOutgoingContext(ctx context.Context) (MD, bool) {
+	raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
+	if !ok {
+		return nil, false
+	}
+
+	mdSize := len(raw.md)
+	for i := range raw.added {
+		mdSize += len(raw.added[i]) / 2
+	}
+
+	out := make(MD, mdSize)
+	for k, v := range raw.md {
+		// We need to manually convert all keys to lower case, because MD is a
+		// map, and there's no guarantee that the MD attached to the context is
+		// created using our helper functions.
+		key := strings.ToLower(k)
+		out[key] = copyOf(v)
+	}
+	for _, added := range raw.added {
+		if len(added)%2 == 1 {
+			panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added)))
+		}
+
+		for i := 0; i < len(added); i += 2 {
+			key := strings.ToLower(added[i])
+			out[key] = append(out[key], added[i+1])
+		}
+	}
+	return out, ok
+}
+
+type rawMD struct {
+	md    MD
+	added [][]string
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/peer/peer.go b/hack/tools/vendor/google.golang.org/grpc/peer/peer.go
new file mode 100644
index 0000000000..499a49c8c1
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/peer/peer.go
@@ -0,0 +1,83 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package peer defines various peer information associated with RPCs and
+// corresponding utils.
+package peer
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"strings"
+
+	"google.golang.org/grpc/credentials"
+)
+
+// Peer contains the information of the peer for an RPC, such as the address
+// and authentication information.
+type Peer struct {
+	// Addr is the peer address.
+	Addr net.Addr
+	// LocalAddr is the local address.
+	LocalAddr net.Addr
+	// AuthInfo is the authentication information of the transport.
+	// It is nil if there is no transport security being used.
+	AuthInfo credentials.AuthInfo
+}
+
+// String ensures the Peer types implements the Stringer interface in order to
+// allow to print a context with a peerKey value effectively.
+func (p *Peer) String() string {
+	if p == nil {
+		return "Peer"
+	}
+	sb := &strings.Builder{}
+	sb.WriteString("Peer{")
+	if p.Addr != nil {
+		fmt.Fprintf(sb, "Addr: '%s', ", p.Addr.String())
+	} else {
+		fmt.Fprintf(sb, "Addr: , ")
+	}
+	if p.LocalAddr != nil {
+		fmt.Fprintf(sb, "LocalAddr: '%s', ", p.LocalAddr.String())
+	} else {
+		fmt.Fprintf(sb, "LocalAddr: , ")
+	}
+	if p.AuthInfo != nil {
+		fmt.Fprintf(sb, "AuthInfo: '%s'", p.AuthInfo.AuthType())
+	} else {
+		fmt.Fprintf(sb, "AuthInfo: ")
+	}
+	sb.WriteString("}")
+
+	return sb.String()
+}
+
+type peerKey struct{}
+
+// NewContext creates a new context with peer information attached.
+func NewContext(ctx context.Context, p *Peer) context.Context {
+	return context.WithValue(ctx, peerKey{}, p)
+}
+
+// FromContext returns the peer information in ctx if it exists.
+func FromContext(ctx context.Context) (p *Peer, ok bool) {
+	p, ok = ctx.Value(peerKey{}).(*Peer)
+	return
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/picker_wrapper.go b/hack/tools/vendor/google.golang.org/grpc/picker_wrapper.go
new file mode 100644
index 0000000000..bdaa2130e4
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -0,0 +1,219 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"sync/atomic"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/internal/channelz"
+	istatus "google.golang.org/grpc/internal/status"
+	"google.golang.org/grpc/internal/transport"
+	"google.golang.org/grpc/stats"
+	"google.golang.org/grpc/status"
+)
+
+// pickerGeneration stores a picker and a channel used to signal that a picker
+// newer than this one is available.
+type pickerGeneration struct {
+	// picker is the picker produced by the LB policy.  May be nil if a picker
+	// has never been produced.
+	picker balancer.Picker
+	// blockingCh is closed when the picker has been invalidated because there
+	// is a new one available.
+	blockingCh chan struct{}
+}
+
+// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
+// actions and unblock when there's a picker update.
+type pickerWrapper struct {
+	// If pickerGen holds a nil pointer, the pickerWrapper is closed.
+	pickerGen     atomic.Pointer[pickerGeneration]
+	statsHandlers []stats.Handler // to record blocking picker calls
+}
+
+func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper {
+	pw := &pickerWrapper{
+		statsHandlers: statsHandlers,
+	}
+	pw.pickerGen.Store(&pickerGeneration{
+		blockingCh: make(chan struct{}),
+	})
+	return pw
+}
+
+// updatePicker is called by UpdateState calls from the LB policy. It
+// unblocks all blocked pick.
+func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
+	old := pw.pickerGen.Swap(&pickerGeneration{
+		picker:     p,
+		blockingCh: make(chan struct{}),
+	})
+	close(old.blockingCh)
+}
+
+// doneChannelzWrapper performs the following:
+//   - increments the calls started channelz counter
+//   - wraps the done function in the passed in result to increment the calls
+//     failed or calls succeeded channelz counter before invoking the actual
+//     done function.
+func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) {
+	ac := acbw.ac
+	ac.incrCallsStarted()
+	done := result.Done
+	result.Done = func(b balancer.DoneInfo) {
+		if b.Err != nil && b.Err != io.EOF {
+			ac.incrCallsFailed()
+		} else {
+			ac.incrCallsSucceeded()
+		}
+		if done != nil {
+			done(b)
+		}
+	}
+}
+
+// pick returns the transport that will be used for the RPC.
+// It may block in the following cases:
+// - there's no picker
+// - the current picker returns ErrNoSubConnAvailable
+// - the current picker returns other errors and failfast is false.
+// - the subConn returned by the current picker is not READY
+// When one of these situations happens, pick blocks until the picker gets updated.
+func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) {
+	var ch chan struct{}
+
+	var lastPickErr error
+
+	for {
+		pg := pw.pickerGen.Load()
+		if pg == nil {
+			return nil, balancer.PickResult{}, ErrClientConnClosing
+		}
+		if pg.picker == nil {
+			ch = pg.blockingCh
+		}
+		if ch == pg.blockingCh {
+			// This could happen when either:
+			// - pw.picker is nil (the previous if condition), or
+			// - we have already called pick on the current picker.
+			select {
+			case <-ctx.Done():
+				var errStr string
+				if lastPickErr != nil {
+					errStr = "latest balancer error: " + lastPickErr.Error()
+				} else {
+					errStr = fmt.Sprintf("received context error while waiting for new LB policy update: %s", ctx.Err().Error())
+				}
+				switch ctx.Err() {
+				case context.DeadlineExceeded:
+					return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr)
+				case context.Canceled:
+					return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr)
+				}
+			case <-ch:
+			}
+			continue
+		}
+
+		// If the channel is set, it means that the pick call had to wait for a
+		// new picker at some point. Either it's the first iteration and this
+		// function received the first picker, or a picker errored with
+		// ErrNoSubConnAvailable or errored with failfast set to false, which
+		// will trigger a continue to the next iteration. In the first case this
+		// conditional will hit if this call had to block (the channel is set).
+		// In the second case, the only way it will get to this conditional is
+		// if there is a new picker.
+		if ch != nil {
+			for _, sh := range pw.statsHandlers {
+				sh.HandleRPC(ctx, &stats.PickerUpdated{})
+			}
+		}
+
+		ch = pg.blockingCh
+		p := pg.picker
+
+		pickResult, err := p.Pick(info)
+		if err != nil {
+			if err == balancer.ErrNoSubConnAvailable {
+				continue
+			}
+			if st, ok := status.FromError(err); ok {
+				// Status error: end the RPC unconditionally with this status.
+				// First restrict the code to the list allowed by gRFC A54.
+				if istatus.IsRestrictedControlPlaneCode(st) {
+					err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err)
+				}
+				return nil, balancer.PickResult{}, dropError{error: err}
+			}
+			// For all other errors, wait for ready RPCs should block and other
+			// RPCs should fail with unavailable.
+			if !failfast {
+				lastPickErr = err
+				continue
+			}
+			return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error())
+		}
+
+		acbw, ok := pickResult.SubConn.(*acBalancerWrapper)
+		if !ok {
+			logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn)
+			continue
+		}
+		if t := acbw.ac.getReadyTransport(); t != nil {
+			if channelz.IsOn() {
+				doneChannelzWrapper(acbw, &pickResult)
+				return t, pickResult, nil
+			}
+			return t, pickResult, nil
+		}
+		if pickResult.Done != nil {
+			// Calling done with nil error, no bytes sent and no bytes received.
+			// DoneInfo with default value works.
+			pickResult.Done(balancer.DoneInfo{})
+		}
+		logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
+		// If ok == false, ac.state is not READY.
+		// A valid picker always returns READY subConn. This means the state of ac
+		// just changed, and picker will be updated shortly.
+		// continue back to the beginning of the for loop to repick.
+	}
+}
+
+func (pw *pickerWrapper) close() {
+	old := pw.pickerGen.Swap(nil)
+	close(old.blockingCh)
+}
+
+// reset clears the pickerWrapper and prepares it for being used again when idle
+// mode is exited.
+func (pw *pickerWrapper) reset() {
+	old := pw.pickerGen.Swap(&pickerGeneration{blockingCh: make(chan struct{})})
+	close(old.blockingCh)
+}
+
+// dropError is a wrapper error that indicates the LB policy wishes to drop the
+// RPC and not retry it.
+type dropError struct {
+	error
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/preloader.go b/hack/tools/vendor/google.golang.org/grpc/preloader.go
new file mode 100644
index 0000000000..e87a17f36a
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/preloader.go
@@ -0,0 +1,85 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/mem"
+	"google.golang.org/grpc/status"
+)
+
+// PreparedMsg is responsible for creating a Marshalled and Compressed object.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type PreparedMsg struct {
+	// Struct for preparing msg before sending them
+	encodedData mem.BufferSlice
+	hdr         []byte
+	payload     mem.BufferSlice
+	pf          payloadFormat
+}
+
+// Encode marshalls and compresses the message using the codec and compressor for the stream.
+func (p *PreparedMsg) Encode(s Stream, msg any) error {
+	ctx := s.Context()
+	rpcInfo, ok := rpcInfoFromContext(ctx)
+	if !ok {
+		return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo")
+	}
+
+	// check if the context has the relevant information to prepareMsg
+	if rpcInfo.preloaderInfo == nil {
+		return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil")
+	}
+	if rpcInfo.preloaderInfo.codec == nil {
+		return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil")
+	}
+
+	// prepare the msg
+	data, err := encode(rpcInfo.preloaderInfo.codec, msg)
+	if err != nil {
+		return err
+	}
+
+	materializedData := data.Materialize()
+	data.Free()
+	p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)}
+
+	// TODO: it should be possible to grab the bufferPool from the underlying
+	//  stream implementation with a type cast to its actual type (such as
+	//  addrConnStream) and accessing the buffer pool directly.
+	var compData mem.BufferSlice
+	compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool())
+	if err != nil {
+		return err
+	}
+
+	if p.pf.isCompressed() {
+		materializedCompData := compData.Materialize()
+		compData.Free()
+		compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)}
+	}
+
+	p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf)
+
+	return nil
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/hack/tools/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
new file mode 100644
index 0000000000..ef3d6ed6c4
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
@@ -0,0 +1,60 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package dns implements a dns resolver to be installed as the default resolver
+// in grpc.
+package dns
+
+import (
+	"time"
+
+	"google.golang.org/grpc/internal/resolver/dns"
+	"google.golang.org/grpc/resolver"
+)
+
+// SetResolvingTimeout sets the maximum duration for DNS resolution requests.
+//
+// This function affects the global timeout used by all channels using the DNS
+// name resolver scheme.
+//
+// It must be called only at application startup, before any gRPC calls are
+// made. Modifying this value after initialization is not thread-safe.
+//
+// The default value is 30 seconds. Setting the timeout too low may result in
+// premature timeouts during resolution, while setting it too high may lead to
+// unnecessary delays in service discovery. Choose a value appropriate for your
+// specific needs and network environment.
+func SetResolvingTimeout(timeout time.Duration) {
+	dns.ResolvingTimeout = timeout
+}
+
+// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
+//
+// Deprecated: import grpc and use resolver.Get("dns") instead.
+func NewBuilder() resolver.Builder {
+	return dns.NewBuilder()
+}
+
+// SetMinResolutionInterval sets the default minimum interval at which DNS
+// re-resolutions are allowed. This helps to prevent excessive re-resolution.
+//
+// It must be called only at application startup, before any gRPC calls are
+// made. Modifying this value after initialization is not thread-safe.
+func SetMinResolutionInterval(d time.Duration) {
+	dns.MinResolutionInterval = d
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/resolver/map.go b/hack/tools/vendor/google.golang.org/grpc/resolver/map.go
new file mode 100644
index 0000000000..ada5b9bb79
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/resolver/map.go
@@ -0,0 +1,251 @@
+/*
+ *
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package resolver
+
+type addressMapEntry struct {
+	addr  Address
+	value any
+}
+
+// AddressMap is a map of addresses to arbitrary values taking into account
+// Attributes.  BalancerAttributes are ignored, as are Metadata and Type.
+// Multiple accesses may not be performed concurrently.  Must be created via
+// NewAddressMap; do not construct directly.
+type AddressMap struct {
+	// The underlying map is keyed by an Address with fields that we don't care
+	// about being set to their zero values. The only fields that we care about
+	// are `Addr`, `ServerName` and `Attributes`. Since we need to be able to
+	// distinguish between addresses with same `Addr` and `ServerName`, but
+	// different `Attributes`, we cannot store the `Attributes` in the map key.
+	//
+	// The comparison operation for structs work as follows:
+	//  Struct values are comparable if all their fields are comparable. Two
+	//  struct values are equal if their corresponding non-blank fields are equal.
+	//
+	// The value type of the map contains a slice of addresses which match the key
+	// in their `Addr` and `ServerName` fields and contain the corresponding value
+	// associated with them.
+	m map[Address]addressMapEntryList
+}
+
+func toMapKey(addr *Address) Address {
+	return Address{Addr: addr.Addr, ServerName: addr.ServerName}
+}
+
+type addressMapEntryList []*addressMapEntry
+
+// NewAddressMap creates a new AddressMap.
+func NewAddressMap() *AddressMap {
+	return &AddressMap{m: make(map[Address]addressMapEntryList)}
+}
+
+// find returns the index of addr in the addressMapEntry slice, or -1 if not
+// present.
+func (l addressMapEntryList) find(addr Address) int {
+	for i, entry := range l {
+		// Attributes are the only thing to match on here, since `Addr` and
+		// `ServerName` are already equal.
+		if entry.addr.Attributes.Equal(addr.Attributes) {
+			return i
+		}
+	}
+	return -1
+}
+
+// Get returns the value for the address in the map, if present.
+func (a *AddressMap) Get(addr Address) (value any, ok bool) {
+	addrKey := toMapKey(&addr)
+	entryList := a.m[addrKey]
+	if entry := entryList.find(addr); entry != -1 {
+		return entryList[entry].value, true
+	}
+	return nil, false
+}
+
+// Set updates or adds the value to the address in the map.
+func (a *AddressMap) Set(addr Address, value any) {
+	addrKey := toMapKey(&addr)
+	entryList := a.m[addrKey]
+	if entry := entryList.find(addr); entry != -1 {
+		entryList[entry].value = value
+		return
+	}
+	a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value})
+}
+
+// Delete removes addr from the map.
+func (a *AddressMap) Delete(addr Address) {
+	addrKey := toMapKey(&addr)
+	entryList := a.m[addrKey]
+	entry := entryList.find(addr)
+	if entry == -1 {
+		return
+	}
+	if len(entryList) == 1 {
+		entryList = nil
+	} else {
+		copy(entryList[entry:], entryList[entry+1:])
+		entryList = entryList[:len(entryList)-1]
+	}
+	a.m[addrKey] = entryList
+}
+
+// Len returns the number of entries in the map.
+func (a *AddressMap) Len() int {
+	ret := 0
+	for _, entryList := range a.m {
+		ret += len(entryList)
+	}
+	return ret
+}
+
+// Keys returns a slice of all current map keys.
+func (a *AddressMap) Keys() []Address {
+	ret := make([]Address, 0, a.Len())
+	for _, entryList := range a.m {
+		for _, entry := range entryList {
+			ret = append(ret, entry.addr)
+		}
+	}
+	return ret
+}
+
+// Values returns a slice of all current map values.
+func (a *AddressMap) Values() []any {
+	ret := make([]any, 0, a.Len())
+	for _, entryList := range a.m {
+		for _, entry := range entryList {
+			ret = append(ret, entry.value)
+		}
+	}
+	return ret
+}
+
+type endpointNode struct {
+	addrs map[string]struct{}
+}
+
+// Equal returns whether the unordered set of addrs are the same between the
+// endpoint nodes.
+func (en *endpointNode) Equal(en2 *endpointNode) bool {
+	if len(en.addrs) != len(en2.addrs) {
+		return false
+	}
+	for addr := range en.addrs {
+		if _, ok := en2.addrs[addr]; !ok {
+			return false
+		}
+	}
+	return true
+}
+
+func toEndpointNode(endpoint Endpoint) endpointNode {
+	en := make(map[string]struct{})
+	for _, addr := range endpoint.Addresses {
+		en[addr.Addr] = struct{}{}
+	}
+	return endpointNode{
+		addrs: en,
+	}
+}
+
+// EndpointMap is a map of endpoints to arbitrary values keyed on only the
+// unordered set of address strings within an endpoint. This map is not thread
+// safe, thus it is unsafe to access concurrently. Must be created via
+// NewEndpointMap; do not construct directly.
+type EndpointMap struct {
+	endpoints map[*endpointNode]any
+}
+
+// NewEndpointMap creates a new EndpointMap.
+func NewEndpointMap() *EndpointMap {
+	return &EndpointMap{
+		endpoints: make(map[*endpointNode]any),
+	}
+}
+
+// Get returns the value for the address in the map, if present.
+func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) {
+	en := toEndpointNode(e)
+	if endpoint := em.find(en); endpoint != nil {
+		return em.endpoints[endpoint], true
+	}
+	return nil, false
+}
+
+// Set updates or adds the value to the address in the map.
+func (em *EndpointMap) Set(e Endpoint, value any) {
+	en := toEndpointNode(e)
+	if endpoint := em.find(en); endpoint != nil {
+		em.endpoints[endpoint] = value
+		return
+	}
+	em.endpoints[&en] = value
+}
+
+// Len returns the number of entries in the map.
+func (em *EndpointMap) Len() int {
+	return len(em.endpoints)
+}
+
+// Keys returns a slice of all current map keys, as endpoints specifying the
+// addresses present in the endpoint keys, in which uniqueness is determined by
+// the unordered set of addresses. Thus, endpoint information returned is not
+// the full endpoint data (drops duplicated addresses and attributes) but can be
+// used for EndpointMap accesses.
+func (em *EndpointMap) Keys() []Endpoint {
+	ret := make([]Endpoint, 0, len(em.endpoints))
+	for en := range em.endpoints {
+		var endpoint Endpoint
+		for addr := range en.addrs {
+			endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr})
+		}
+		ret = append(ret, endpoint)
+	}
+	return ret
+}
+
+// Values returns a slice of all current map values.
+func (em *EndpointMap) Values() []any {
+	ret := make([]any, 0, len(em.endpoints))
+	for _, val := range em.endpoints {
+		ret = append(ret, val)
+	}
+	return ret
+}
+
+// find returns a pointer to the endpoint node in em if the endpoint node is
+// already present. If not found, nil is returned. The comparisons are done on
+// the unordered set of addresses within an endpoint.
+func (em EndpointMap) find(e endpointNode) *endpointNode {
+	for endpoint := range em.endpoints {
+		if e.Equal(endpoint) {
+			return endpoint
+		}
+	}
+	return nil
+}
+
+// Delete removes the specified endpoint from the map.
+func (em *EndpointMap) Delete(e Endpoint) {
+	en := toEndpointNode(e)
+	if entry := em.find(en); entry != nil {
+		delete(em.endpoints, entry)
+	}
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/resolver/resolver.go b/hack/tools/vendor/google.golang.org/grpc/resolver/resolver.go
new file mode 100644
index 0000000000..202854511b
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -0,0 +1,332 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package resolver defines APIs for name resolution in gRPC.
+// All APIs in this package are experimental.
+package resolver
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"net/url"
+	"strings"
+
+	"google.golang.org/grpc/attributes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/serviceconfig"
+)
+
+var (
+	// m is a map from scheme to resolver builder.
+	m = make(map[string]Builder)
+	// defaultScheme is the default scheme to use.
+	defaultScheme = "passthrough"
+)
+
+// TODO(bar) install dns resolver in init(){}.
+
+// Register registers the resolver builder to the resolver map. b.Scheme will
+// be used as the scheme registered with this builder. The registry is case
+// sensitive, and schemes should not contain any uppercase characters.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple Resolvers are
+// registered with the same name, the one registered last will take effect.
+func Register(b Builder) {
+	m[b.Scheme()] = b
+}
+
+// Get returns the resolver builder registered with the given scheme.
+//
+// If no builder is register with the scheme, nil will be returned.
+func Get(scheme string) Builder {
+	if b, ok := m[scheme]; ok {
+		return b
+	}
+	return nil
+}
+
+// SetDefaultScheme sets the default scheme that will be used. The default
+// scheme is initially set to "passthrough".
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. The scheme set last overrides
+// previously set values.
+func SetDefaultScheme(scheme string) {
+	defaultScheme = scheme
+	internal.UserSetDefaultScheme = true
+}
+
+// GetDefaultScheme gets the default scheme that will be used by grpc.Dial.  If
+// SetDefaultScheme is never called, the default scheme used by grpc.NewClient is "dns" instead.
+func GetDefaultScheme() string {
+	return defaultScheme
+}
+
+// Address represents a server the client connects to.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type Address struct {
+	// Addr is the server address on which a connection will be established.
+	Addr string
+
+	// ServerName is the name of this address.
+	// If non-empty, the ServerName is used as the transport certification authority for
+	// the address, instead of the hostname from the Dial target string. In most cases,
+	// this should not be set.
+	//
+	// WARNING: ServerName must only be populated with trusted values. It
+	// is insecure to populate it with data from untrusted inputs since untrusted
+	// values could be used to bypass the authority checks performed by TLS.
+	ServerName string
+
+	// Attributes contains arbitrary data about this address intended for
+	// consumption by the SubConn.
+	Attributes *attributes.Attributes
+
+	// BalancerAttributes contains arbitrary data about this address intended
+	// for consumption by the LB policy.  These attributes do not affect SubConn
+	// creation, connection establishment, handshaking, etc.
+	//
+	// Deprecated: when an Address is inside an Endpoint, this field should not
+	// be used, and it will eventually be removed entirely.
+	BalancerAttributes *attributes.Attributes
+
+	// Metadata is the information associated with Addr, which may be used
+	// to make load balancing decision.
+	//
+	// Deprecated: use Attributes instead.
+	Metadata any
+}
+
+// Equal returns whether a and o are identical.  Metadata is compared directly,
+// not with any recursive introspection.
+//
+// This method compares all fields of the address. When used to tell apart
+// addresses during subchannel creation or connection establishment, it might be
+// more appropriate for the caller to implement custom equality logic.
+func (a Address) Equal(o Address) bool {
+	return a.Addr == o.Addr && a.ServerName == o.ServerName &&
+		a.Attributes.Equal(o.Attributes) &&
+		a.BalancerAttributes.Equal(o.BalancerAttributes) &&
+		a.Metadata == o.Metadata
+}
+
+// String returns JSON formatted string representation of the address.
+func (a Address) String() string {
+	var sb strings.Builder
+	sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr))
+	sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName))
+	if a.Attributes != nil {
+		sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String()))
+	}
+	if a.BalancerAttributes != nil {
+		sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String()))
+	}
+	sb.WriteString("}")
+	return sb.String()
+}
+
+// BuildOptions includes additional information for the builder to create
+// the resolver.
+type BuildOptions struct {
+	// DisableServiceConfig indicates whether a resolver implementation should
+	// fetch service config data.
+	DisableServiceConfig bool
+	// DialCreds is the transport credentials used by the ClientConn for
+	// communicating with the target gRPC service (set via
+	// WithTransportCredentials). In cases where a name resolution service
+	// requires the same credentials, the resolver may use this field. In most
+	// cases though, it is not appropriate, and this field may be ignored.
+	DialCreds credentials.TransportCredentials
+	// CredsBundle is the credentials bundle used by the ClientConn for
+	// communicating with the target gRPC service (set via
+	// WithCredentialsBundle). In cases where a name resolution service
+	// requires the same credentials, the resolver may use this field. In most
+	// cases though, it is not appropriate, and this field may be ignored.
+	CredsBundle credentials.Bundle
+	// Dialer is the custom dialer used by the ClientConn for dialling the
+	// target gRPC service (set via WithDialer). In cases where a name
+	// resolution service requires the same dialer, the resolver may use this
+	// field. In most cases though, it is not appropriate, and this field may
+	// be ignored.
+	Dialer func(context.Context, string) (net.Conn, error)
+	// Authority is the effective authority of the clientconn for which the
+	// resolver is built.
+	Authority string
+}
+
+// An Endpoint is one network endpoint, or server, which may have multiple
+// addresses with which it can be accessed.
+type Endpoint struct {
+	// Addresses contains a list of addresses used to access this endpoint.
+	Addresses []Address
+
+	// Attributes contains arbitrary data about this endpoint intended for
+	// consumption by the LB policy.
+	Attributes *attributes.Attributes
+}
+
+// State contains the current Resolver state relevant to the ClientConn.
+type State struct {
+	// Addresses is the latest set of resolved addresses for the target.
+	//
+	// If a resolver sets Addresses but does not set Endpoints, one Endpoint
+	// will be created for each Address before the State is passed to the LB
+	// policy.  The BalancerAttributes of each entry in Addresses will be set
+	// in Endpoints.Attributes, and be cleared in the Endpoint's Address's
+	// BalancerAttributes.
+	//
+	// Soon, Addresses will be deprecated and replaced fully by Endpoints.
+	Addresses []Address
+
+	// Endpoints is the latest set of resolved endpoints for the target.
+	//
+	// If a resolver produces a State containing Endpoints but not Addresses,
+	// it must take care to ensure the LB policies it selects will support
+	// Endpoints.
+	Endpoints []Endpoint
+
+	// ServiceConfig contains the result from parsing the latest service
+	// config.  If it is nil, it indicates no service config is present or the
+	// resolver does not provide service configs.
+	ServiceConfig *serviceconfig.ParseResult
+
+	// Attributes contains arbitrary data about the resolver intended for
+	// consumption by the load balancing policy.
+	Attributes *attributes.Attributes
+}
+
+// ClientConn contains the callbacks for resolver to notify any updates
+// to the gRPC ClientConn.
+//
+// This interface is to be implemented by gRPC. Users should not need a
+// brand new implementation of this interface. For the situations like
+// testing, the new implementation should embed this interface. This allows
+// gRPC to add new methods to this interface.
+type ClientConn interface {
+	// UpdateState updates the state of the ClientConn appropriately.
+	//
+	// If an error is returned, the resolver should try to resolve the
+	// target again. The resolver should use a backoff timer to prevent
+	// overloading the server with requests. If a resolver is certain that
+	// reresolving will not change the result, e.g. because it is
+	// a watch-based resolver, returned errors can be ignored.
+	//
+	// If the resolved State is the same as the last reported one, calling
+	// UpdateState can be omitted.
+	UpdateState(State) error
+	// ReportError notifies the ClientConn that the Resolver encountered an
+	// error.  The ClientConn will notify the load balancer and begin calling
+	// ResolveNow on the Resolver with exponential backoff.
+	ReportError(error)
+	// NewAddress is called by resolver to notify ClientConn a new list
+	// of resolved addresses.
+	// The address list should be the complete list of resolved addresses.
+	//
+	// Deprecated: Use UpdateState instead.
+	NewAddress(addresses []Address)
+	// ParseServiceConfig parses the provided service config and returns an
+	// object that provides the parsed config.
+	ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult
+}
+
+// Target represents a target for gRPC, as specified in:
+// https://github.com/grpc/grpc/blob/master/doc/naming.md.
+// It is parsed from the target string that gets passed into Dial or DialContext
+// by the user. And gRPC passes it to the resolver and the balancer.
+//
+// If the target follows the naming spec, and the parsed scheme is registered
+// with gRPC, we will parse the target string according to the spec. If the
+// target does not contain a scheme or if the parsed scheme is not registered
+// (i.e. no corresponding resolver available to resolve the endpoint), we will
+// apply the default scheme, and will attempt to reparse it.
+type Target struct {
+	// URL contains the parsed dial target with an optional default scheme added
+	// to it if the original dial target contained no scheme or contained an
+	// unregistered scheme. Any query params specified in the original dial
+	// target can be accessed from here.
+	URL url.URL
+}
+
+// Endpoint retrieves endpoint without leading "/" from either `URL.Path`
+// or `URL.Opaque`. The latter is used when the former is empty.
+func (t Target) Endpoint() string {
+	endpoint := t.URL.Path
+	if endpoint == "" {
+		endpoint = t.URL.Opaque
+	}
+	// For targets of the form "[scheme]://[authority]/endpoint, the endpoint
+	// value returned from url.Parse() contains a leading "/". Although this is
+	// in accordance with RFC 3986, we do not want to break existing resolver
+	// implementations which expect the endpoint without the leading "/". So, we
+	// end up stripping the leading "/" here. But this will result in an
+	// incorrect parsing for something like "unix:///path/to/socket". Since we
+	// own the "unix" resolver, we can workaround in the unix resolver by using
+	// the `URL` field.
+	return strings.TrimPrefix(endpoint, "/")
+}
+
+// String returns the canonical string representation of Target.
+func (t Target) String() string {
+	return t.URL.Scheme + "://" + t.URL.Host + "/" + t.Endpoint()
+}
+
+// Builder creates a resolver that will be used to watch name resolution updates.
+type Builder interface {
+	// Build creates a new resolver for the given target.
+	//
+	// gRPC dial calls Build synchronously, and fails if the returned error is
+	// not nil.
+	Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error)
+	// Scheme returns the scheme supported by this resolver.  Scheme is defined
+	// at https://github.com/grpc/grpc/blob/master/doc/naming.md.  The returned
+	// string should not contain uppercase characters, as they will not match
+	// the parsed target's scheme as defined in RFC 3986.
+	Scheme() string
+}
+
+// ResolveNowOptions includes additional information for ResolveNow.
+type ResolveNowOptions struct{}
+
+// Resolver watches for the updates on the specified target.
+// Updates include address updates and service config updates.
+type Resolver interface {
+	// ResolveNow will be called by gRPC to try to resolve the target name
+	// again. It's just a hint, resolver can ignore this if it's not necessary.
+	//
+	// It could be called multiple times concurrently.
+	ResolveNow(ResolveNowOptions)
+	// Close closes the resolver.
+	Close()
+}
+
+// AuthorityOverrider is implemented by Builders that wish to override the
+// default authority for the ClientConn.
+// By default, the authority used is target.Endpoint().
+type AuthorityOverrider interface {
+	// OverrideAuthority returns the authority to use for a ClientConn with the
+	// given target. The implementation must generate it without blocking,
+	// typically in line, and must keep it unchanged.
+	OverrideAuthority(Target) string
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/resolver_wrapper.go b/hack/tools/vendor/google.golang.org/grpc/resolver_wrapper.go
new file mode 100644
index 0000000000..23bb3fb258
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/resolver_wrapper.go
@@ -0,0 +1,201 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+	"strings"
+	"sync"
+
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcsync"
+	"google.golang.org/grpc/internal/pretty"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/serviceconfig"
+)
+
+// ccResolverWrapper is a wrapper on top of cc for resolvers.
+// It implements resolver.ClientConn interface.
+type ccResolverWrapper struct {
+	// The following fields are initialized when the wrapper is created and are
+	// read-only afterwards, and therefore can be accessed without a mutex.
+	cc                  *ClientConn
+	ignoreServiceConfig bool
+	serializer          *grpcsync.CallbackSerializer
+	serializerCancel    context.CancelFunc
+
+	resolver resolver.Resolver // only accessed within the serializer
+
+	// The following fields are protected by mu.  Caller must take cc.mu before
+	// taking mu.
+	mu       sync.Mutex
+	curState resolver.State
+	closed   bool
+}
+
+// newCCResolverWrapper initializes the ccResolverWrapper.  It can only be used
+// after calling start, which builds the resolver.
+func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper {
+	ctx, cancel := context.WithCancel(cc.ctx)
+	return &ccResolverWrapper{
+		cc:                  cc,
+		ignoreServiceConfig: cc.dopts.disableServiceConfig,
+		serializer:          grpcsync.NewCallbackSerializer(ctx),
+		serializerCancel:    cancel,
+	}
+}
+
+// start builds the name resolver using the resolver.Builder in cc and returns
+// any error encountered.  It must always be the first operation performed on
+// any newly created ccResolverWrapper, except that close may be called instead.
+func (ccr *ccResolverWrapper) start() error {
+	errCh := make(chan error)
+	ccr.serializer.TrySchedule(func(ctx context.Context) {
+		if ctx.Err() != nil {
+			return
+		}
+		opts := resolver.BuildOptions{
+			DisableServiceConfig: ccr.cc.dopts.disableServiceConfig,
+			DialCreds:            ccr.cc.dopts.copts.TransportCredentials,
+			CredsBundle:          ccr.cc.dopts.copts.CredsBundle,
+			Dialer:               ccr.cc.dopts.copts.Dialer,
+			Authority:            ccr.cc.authority,
+		}
+		var err error
+		ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts)
+		errCh <- err
+	})
+	return <-errCh
+}
+
+func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
+	ccr.serializer.TrySchedule(func(ctx context.Context) {
+		if ctx.Err() != nil || ccr.resolver == nil {
+			return
+		}
+		ccr.resolver.ResolveNow(o)
+	})
+}
+
+// close initiates async shutdown of the wrapper.  To determine the wrapper has
+// finished shutting down, the channel should block on ccr.serializer.Done()
+// without cc.mu held.
+func (ccr *ccResolverWrapper) close() {
+	channelz.Info(logger, ccr.cc.channelz, "Closing the name resolver")
+	ccr.mu.Lock()
+	ccr.closed = true
+	ccr.mu.Unlock()
+
+	ccr.serializer.TrySchedule(func(context.Context) {
+		if ccr.resolver == nil {
+			return
+		}
+		ccr.resolver.Close()
+		ccr.resolver = nil
+	})
+	ccr.serializerCancel()
+}
+
+// UpdateState is called by resolver implementations to report new state to gRPC
+// which includes addresses and service config.
+func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
+	ccr.cc.mu.Lock()
+	ccr.mu.Lock()
+	if ccr.closed {
+		ccr.mu.Unlock()
+		ccr.cc.mu.Unlock()
+		return nil
+	}
+	if s.Endpoints == nil {
+		s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses))
+		for _, a := range s.Addresses {
+			ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes}
+			ep.Addresses[0].BalancerAttributes = nil
+			s.Endpoints = append(s.Endpoints, ep)
+		}
+	}
+	ccr.addChannelzTraceEvent(s)
+	ccr.curState = s
+	ccr.mu.Unlock()
+	return ccr.cc.updateResolverStateAndUnlock(s, nil)
+}
+
+// ReportError is called by resolver implementations to report errors
+// encountered during name resolution to gRPC.
+func (ccr *ccResolverWrapper) ReportError(err error) {
+	ccr.cc.mu.Lock()
+	ccr.mu.Lock()
+	if ccr.closed {
+		ccr.mu.Unlock()
+		ccr.cc.mu.Unlock()
+		return
+	}
+	ccr.mu.Unlock()
+	channelz.Warningf(logger, ccr.cc.channelz, "ccResolverWrapper: reporting error to cc: %v", err)
+	ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err)
+}
+
+// NewAddress is called by the resolver implementation to send addresses to
+// gRPC.
+func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
+	ccr.cc.mu.Lock()
+	ccr.mu.Lock()
+	if ccr.closed {
+		ccr.mu.Unlock()
+		ccr.cc.mu.Unlock()
+		return
+	}
+	s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}
+	ccr.addChannelzTraceEvent(s)
+	ccr.curState = s
+	ccr.mu.Unlock()
+	ccr.cc.updateResolverStateAndUnlock(s, nil)
+}
+
+// ParseServiceConfig is called by resolver implementations to parse a JSON
+// representation of the service config.
+func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
+	return parseServiceConfig(scJSON, ccr.cc.dopts.maxCallAttempts)
+}
+
+// addChannelzTraceEvent adds a channelz trace event containing the new
+// state received from resolver implementations.
+func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
+	if !logger.V(0) && !channelz.IsOn() {
+		return
+	}
+	var updates []string
+	var oldSC, newSC *ServiceConfig
+	var oldOK, newOK bool
+	if ccr.curState.ServiceConfig != nil {
+		oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig)
+	}
+	if s.ServiceConfig != nil {
+		newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig)
+	}
+	if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
+		updates = append(updates, "service config updated")
+	}
+	if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 {
+		updates = append(updates, "resolver returned an empty address list")
+	} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
+		updates = append(updates, "resolver returned new addresses")
+	}
+	channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/rpc_util.go b/hack/tools/vendor/google.golang.org/grpc/rpc_util.go
new file mode 100644
index 0000000000..2d96f1405e
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/rpc_util.go
@@ -0,0 +1,1069 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"compress/gzip"
+	"context"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"math"
+	"strings"
+	"sync"
+	"time"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/encoding"
+	"google.golang.org/grpc/encoding/proto"
+	"google.golang.org/grpc/internal/transport"
+	"google.golang.org/grpc/mem"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/peer"
+	"google.golang.org/grpc/stats"
+	"google.golang.org/grpc/status"
+)
+
+// Compressor defines the interface gRPC uses to compress a message.
+//
+// Deprecated: use package encoding.
+type Compressor interface {
+	// Do compresses p into w.
+	Do(w io.Writer, p []byte) error
+	// Type returns the compression algorithm the Compressor uses.
+	Type() string
+}
+
+type gzipCompressor struct {
+	pool sync.Pool
+}
+
+// NewGZIPCompressor creates a Compressor based on GZIP.
+//
+// Deprecated: use package encoding/gzip.
+func NewGZIPCompressor() Compressor {
+	c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression)
+	return c
+}
+
+// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead
+// of assuming DefaultCompression.
+//
+// The error returned will be nil if the level is valid.
+//
+// Deprecated: use package encoding/gzip.
+func NewGZIPCompressorWithLevel(level int) (Compressor, error) {
+	if level < gzip.DefaultCompression || level > gzip.BestCompression {
+		return nil, fmt.Errorf("grpc: invalid compression level: %d", level)
+	}
+	return &gzipCompressor{
+		pool: sync.Pool{
+			New: func() any {
+				w, err := gzip.NewWriterLevel(io.Discard, level)
+				if err != nil {
+					panic(err)
+				}
+				return w
+			},
+		},
+	}, nil
+}
+
+func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
+	z := c.pool.Get().(*gzip.Writer)
+	defer c.pool.Put(z)
+	z.Reset(w)
+	if _, err := z.Write(p); err != nil {
+		return err
+	}
+	return z.Close()
+}
+
+func (c *gzipCompressor) Type() string {
+	return "gzip"
+}
+
+// Decompressor defines the interface gRPC uses to decompress a message.
+//
+// Deprecated: use package encoding.
+type Decompressor interface {
+	// Do reads the data from r and uncompress them.
+	Do(r io.Reader) ([]byte, error)
+	// Type returns the compression algorithm the Decompressor uses.
+	Type() string
+}
+
+type gzipDecompressor struct {
+	pool sync.Pool
+}
+
+// NewGZIPDecompressor creates a Decompressor based on GZIP.
+//
+// Deprecated: use package encoding/gzip.
+func NewGZIPDecompressor() Decompressor {
+	return &gzipDecompressor{}
+}
+
+func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
+	var z *gzip.Reader
+	switch maybeZ := d.pool.Get().(type) {
+	case nil:
+		newZ, err := gzip.NewReader(r)
+		if err != nil {
+			return nil, err
+		}
+		z = newZ
+	case *gzip.Reader:
+		z = maybeZ
+		if err := z.Reset(r); err != nil {
+			d.pool.Put(z)
+			return nil, err
+		}
+	}
+
+	defer func() {
+		z.Close()
+		d.pool.Put(z)
+	}()
+	return io.ReadAll(z)
+}
+
+func (d *gzipDecompressor) Type() string {
+	return "gzip"
+}
+
+// callInfo contains all related configuration and information about an RPC.
+type callInfo struct {
+	compressorType        string
+	failFast              bool
+	maxReceiveMessageSize *int
+	maxSendMessageSize    *int
+	creds                 credentials.PerRPCCredentials
+	contentSubtype        string
+	codec                 baseCodec
+	maxRetryRPCBufferSize int
+	onFinish              []func(err error)
+}
+
+func defaultCallInfo() *callInfo {
+	return &callInfo{
+		failFast:              true,
+		maxRetryRPCBufferSize: 256 * 1024, // 256KB
+	}
+}
+
+// CallOption configures a Call before it starts or extracts information from
+// a Call after it completes.
+type CallOption interface {
+	// before is called before the call is sent to any server.  If before
+	// returns a non-nil error, the RPC fails with that error.
+	before(*callInfo) error
+
+	// after is called after the call has completed.  after cannot return an
+	// error, so any failures should be reported via output parameters.
+	after(*callInfo, *csAttempt)
+}
+
+// EmptyCallOption does not alter the Call configuration.
+// It can be embedded in another structure to carry satellite data for use
+// by interceptors.
+type EmptyCallOption struct{}
+
+func (EmptyCallOption) before(*callInfo) error      { return nil }
+func (EmptyCallOption) after(*callInfo, *csAttempt) {}
+
+// StaticMethod returns a CallOption which specifies that a call is being made
+// to a method that is static, which means the method is known at compile time
+// and doesn't change at runtime. This can be used as a signal to stats plugins
+// that this method is safe to include as a key to a measurement.
+func StaticMethod() CallOption {
+	return StaticMethodCallOption{}
+}
+
+// StaticMethodCallOption is a CallOption that specifies that a call comes
+// from a static method.
+type StaticMethodCallOption struct {
+	EmptyCallOption
+}
+
+// Header returns a CallOptions that retrieves the header metadata
+// for a unary RPC.
+func Header(md *metadata.MD) CallOption {
+	return HeaderCallOption{HeaderAddr: md}
+}
+
+// HeaderCallOption is a CallOption for collecting response header metadata.
+// The metadata field will be populated *after* the RPC completes.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type HeaderCallOption struct {
+	HeaderAddr *metadata.MD
+}
+
+func (o HeaderCallOption) before(*callInfo) error { return nil }
+func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) {
+	*o.HeaderAddr, _ = attempt.s.Header()
+}
+
+// Trailer returns a CallOptions that retrieves the trailer metadata
+// for a unary RPC.
+func Trailer(md *metadata.MD) CallOption {
+	return TrailerCallOption{TrailerAddr: md}
+}
+
+// TrailerCallOption is a CallOption for collecting response trailer metadata.
+// The metadata field will be populated *after* the RPC completes.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type TrailerCallOption struct {
+	TrailerAddr *metadata.MD
+}
+
+func (o TrailerCallOption) before(*callInfo) error { return nil }
+func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) {
+	*o.TrailerAddr = attempt.s.Trailer()
+}
+
+// Peer returns a CallOption that retrieves peer information for a unary RPC.
+// The peer field will be populated *after* the RPC completes.
+func Peer(p *peer.Peer) CallOption {
+	return PeerCallOption{PeerAddr: p}
+}
+
+// PeerCallOption is a CallOption for collecting the identity of the remote
+// peer. The peer field will be populated *after* the RPC completes.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type PeerCallOption struct {
+	PeerAddr *peer.Peer
+}
+
+func (o PeerCallOption) before(*callInfo) error { return nil }
+func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) {
+	if x, ok := peer.FromContext(attempt.s.Context()); ok {
+		*o.PeerAddr = *x
+	}
+}
+
+// WaitForReady configures the RPC's behavior when the client is in
+// TRANSIENT_FAILURE, which occurs when all addresses fail to connect.  If
+// waitForReady is false, the RPC will fail immediately.  Otherwise, the client
+// will wait until a connection becomes available or the RPC's deadline is
+// reached.
+//
+// By default, RPCs do not "wait for ready".
+func WaitForReady(waitForReady bool) CallOption {
+	return FailFastCallOption{FailFast: !waitForReady}
+}
+
+// FailFast is the opposite of WaitForReady.
+//
+// Deprecated: use WaitForReady.
+func FailFast(failFast bool) CallOption {
+	return FailFastCallOption{FailFast: failFast}
+}
+
+// FailFastCallOption is a CallOption for indicating whether an RPC should fail
+// fast or not.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type FailFastCallOption struct {
+	FailFast bool
+}
+
+func (o FailFastCallOption) before(c *callInfo) error {
+	c.failFast = o.FailFast
+	return nil
+}
+func (o FailFastCallOption) after(*callInfo, *csAttempt) {}
+
+// OnFinish returns a CallOption that configures a callback to be called when
+// the call completes. The error passed to the callback is the status of the
+// RPC, and may be nil. The onFinish callback provided will only be called once
+// by gRPC. This is mainly used to be used by streaming interceptors, to be
+// notified when the RPC completes along with information about the status of
+// the RPC.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func OnFinish(onFinish func(err error)) CallOption {
+	return OnFinishCallOption{
+		OnFinish: onFinish,
+	}
+}
+
+// OnFinishCallOption is CallOption that indicates a callback to be called when
+// the call completes.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type OnFinishCallOption struct {
+	OnFinish func(error)
+}
+
+func (o OnFinishCallOption) before(c *callInfo) error {
+	c.onFinish = append(c.onFinish, o.OnFinish)
+	return nil
+}
+
+func (o OnFinishCallOption) after(*callInfo, *csAttempt) {}
+
+// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
+// in bytes the client can receive. If this is not set, gRPC uses the default
+// 4MB.
+func MaxCallRecvMsgSize(bytes int) CallOption {
+	return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes}
+}
+
+// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
+// size in bytes the client can receive.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type MaxRecvMsgSizeCallOption struct {
+	MaxRecvMsgSize int
+}
+
+func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
+	c.maxReceiveMessageSize = &o.MaxRecvMsgSize
+	return nil
+}
+func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {}
+
+// MaxCallSendMsgSize returns a CallOption which sets the maximum message size
+// in bytes the client can send. If this is not set, gRPC uses the default
+// `math.MaxInt32`.
+func MaxCallSendMsgSize(bytes int) CallOption {
+	return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes}
+}
+
+// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
+// size in bytes the client can send.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type MaxSendMsgSizeCallOption struct {
+	MaxSendMsgSize int
+}
+
+func (o MaxSendMsgSizeCallOption) before(c *callInfo) error {
+	c.maxSendMessageSize = &o.MaxSendMsgSize
+	return nil
+}
+func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {}
+
+// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
+// for a call.
+func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
+	return PerRPCCredsCallOption{Creds: creds}
+}
+
+// PerRPCCredsCallOption is a CallOption that indicates the per-RPC
+// credentials to use for the call.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type PerRPCCredsCallOption struct {
+	Creds credentials.PerRPCCredentials
+}
+
+func (o PerRPCCredsCallOption) before(c *callInfo) error {
+	c.creds = o.Creds
+	return nil
+}
+func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {}
+
+// UseCompressor returns a CallOption which sets the compressor used when
+// sending the request.  If WithCompressor is also set, UseCompressor has
+// higher priority.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func UseCompressor(name string) CallOption {
+	return CompressorCallOption{CompressorType: name}
+}
+
+// CompressorCallOption is a CallOption that indicates the compressor to use.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type CompressorCallOption struct {
+	CompressorType string
+}
+
+func (o CompressorCallOption) before(c *callInfo) error {
+	c.compressorType = o.CompressorType
+	return nil
+}
+func (o CompressorCallOption) after(*callInfo, *csAttempt) {}
+
+// CallContentSubtype returns a CallOption that will set the content-subtype
+// for a call. For example, if content-subtype is "json", the Content-Type over
+// the wire will be "application/grpc+json". The content-subtype is converted
+// to lowercase before being included in Content-Type. See Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// If ForceCodec is not also used, the content-subtype will be used to look up
+// the Codec to use in the registry controlled by RegisterCodec. See the
+// documentation on RegisterCodec for details on registration. The lookup of
+// content-subtype is case-insensitive. If no such Codec is found, the call
+// will result in an error with code codes.Internal.
+//
+// If ForceCodec is also used, that Codec will be used for all request and
+// response messages, with the content-subtype set to the given contentSubtype
+// here for requests.
+func CallContentSubtype(contentSubtype string) CallOption {
+	return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)}
+}
+
+// ContentSubtypeCallOption is a CallOption that indicates the content-subtype
+// used for marshaling messages.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type ContentSubtypeCallOption struct {
+	ContentSubtype string
+}
+
+func (o ContentSubtypeCallOption) before(c *callInfo) error {
+	c.contentSubtype = o.ContentSubtype
+	return nil
+}
+func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {}
+
+// ForceCodec returns a CallOption that will set codec to be used for all
+// request and response messages for a call. The result of calling Name() will
+// be used as the content-subtype after converting to lowercase, unless
+// CallContentSubtype is also used.
+//
+// See Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details. Also see the documentation on RegisterCodec and
+// CallContentSubtype for more details on the interaction between Codec and
+// content-subtype.
+//
+// This function is provided for advanced users; prefer to use only
+// CallContentSubtype to select a registered codec instead.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceCodec(codec encoding.Codec) CallOption {
+	return ForceCodecCallOption{Codec: codec}
+}
+
+// ForceCodecCallOption is a CallOption that indicates the codec used for
+// marshaling messages.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type ForceCodecCallOption struct {
+	Codec encoding.Codec
+}
+
+func (o ForceCodecCallOption) before(c *callInfo) error {
+	c.codec = newCodecV1Bridge(o.Codec)
+	return nil
+}
+func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {}
+
+// ForceCodecV2 returns a CallOption that will set codec to be used for all
+// request and response messages for a call. The result of calling Name() will
+// be used as the content-subtype after converting to lowercase, unless
+// CallContentSubtype is also used.
+//
+// See Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details. Also see the documentation on RegisterCodec and
+// CallContentSubtype for more details on the interaction between Codec and
+// content-subtype.
+//
+// This function is provided for advanced users; prefer to use only
+// CallContentSubtype to select a registered codec instead.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceCodecV2(codec encoding.CodecV2) CallOption {
+	return ForceCodecV2CallOption{CodecV2: codec}
+}
+
+// ForceCodecV2CallOption is a CallOption that indicates the codec used for
+// marshaling messages.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type ForceCodecV2CallOption struct {
+	CodecV2 encoding.CodecV2
+}
+
+func (o ForceCodecV2CallOption) before(c *callInfo) error {
+	c.codec = o.CodecV2
+	return nil
+}
+
+func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {}
+
+// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of
+// an encoding.Codec.
+//
+// Deprecated: use ForceCodec instead.
+func CallCustomCodec(codec Codec) CallOption {
+	return CustomCodecCallOption{Codec: codec}
+}
+
+// CustomCodecCallOption is a CallOption that indicates the codec used for
+// marshaling messages.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type CustomCodecCallOption struct {
+	Codec Codec
+}
+
+func (o CustomCodecCallOption) before(c *callInfo) error {
+	c.codec = newCodecV0Bridge(o.Codec)
+	return nil
+}
+func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {}
+
+// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
+// used for buffering this RPC's requests for retry purposes.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func MaxRetryRPCBufferSize(bytes int) CallOption {
+	return MaxRetryRPCBufferSizeCallOption{bytes}
+}
+
+// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of
+// memory to be used for caching this RPC for retry purposes.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type MaxRetryRPCBufferSizeCallOption struct {
+	MaxRetryRPCBufferSize int
+}
+
+func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error {
+	c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize
+	return nil
+}
+func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {}
+
+// The format of the payload: compressed or not?
+type payloadFormat uint8
+
+const (
+	compressionNone payloadFormat = 0 // no compression
+	compressionMade payloadFormat = 1 // compressed
+)
+
+func (pf payloadFormat) isCompressed() bool {
+	return pf == compressionMade
+}
+
+type streamReader interface {
+	ReadHeader(header []byte) error
+	Read(n int) (mem.BufferSlice, error)
+}
+
+// parser reads complete gRPC messages from the underlying reader.
+type parser struct {
+	// r is the underlying reader.
+	// See the comment on recvMsg for the permissible
+	// error types.
+	r streamReader
+
+	// The header of a gRPC message. Find more detail at
+	// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
+	header [5]byte
+
+	// bufferPool is the pool of shared receive buffers.
+	bufferPool mem.BufferPool
+}
+
+// recvMsg reads a complete gRPC message from the stream.
+//
+// It returns the message and its payload (compression/encoding)
+// format. The caller owns the returned msg memory.
+//
+// If there is an error, possible values are:
+//   - io.EOF, when no messages remain
+//   - io.ErrUnexpectedEOF
+//   - of type transport.ConnectionError
+//   - an error from the status package
+//
+// No other error values or types must be returned, which also means
+// that the underlying streamReader must not return an incompatible
+// error.
+func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) {
+	err := p.r.ReadHeader(p.header[:])
+	if err != nil {
+		return 0, nil, err
+	}
+
+	pf := payloadFormat(p.header[0])
+	length := binary.BigEndian.Uint32(p.header[1:])
+
+	if length == 0 {
+		return pf, nil, nil
+	}
+	if int64(length) > int64(maxInt) {
+		return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt)
+	}
+	if int(length) > maxReceiveMessageSize {
+		return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
+	}
+
+	data, err := p.r.Read(int(length))
+	if err != nil {
+		if err == io.EOF {
+			err = io.ErrUnexpectedEOF
+		}
+		return 0, nil, err
+	}
+	return pf, data, nil
+}
+
+// encode serializes msg and returns a buffer containing the message, or an
+// error if it is too large to be transmitted by grpc.  If msg is nil, it
+// generates an empty message.
+func encode(c baseCodec, msg any) (mem.BufferSlice, error) {
+	if msg == nil { // NOTE: typed nils will not be caught by this check
+		return nil, nil
+	}
+	b, err := c.Marshal(msg)
+	if err != nil {
+		return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
+	}
+	if uint(b.Len()) > math.MaxUint32 {
+		b.Free()
+		return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
+	}
+	return b, nil
+}
+
+// compress returns the input bytes compressed by compressor or cp.
+// If both compressors are nil, or if the message has zero length, returns nil,
+// indicating no compression was done.
+//
+// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
+func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) {
+	if (compressor == nil && cp == nil) || in.Len() == 0 {
+		return nil, compressionNone, nil
+	}
+	var out mem.BufferSlice
+	w := mem.NewWriter(&out, pool)
+	wrapErr := func(err error) error {
+		out.Free()
+		return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
+	}
+	if compressor != nil {
+		z, err := compressor.Compress(w)
+		if err != nil {
+			return nil, 0, wrapErr(err)
+		}
+		for _, b := range in {
+			if _, err := z.Write(b.ReadOnlyData()); err != nil {
+				return nil, 0, wrapErr(err)
+			}
+		}
+		if err := z.Close(); err != nil {
+			return nil, 0, wrapErr(err)
+		}
+	} else {
+		// This is obviously really inefficient since it fully materializes the data, but
+		// there is no way around this with the old Compressor API. At least it attempts
+		// to return the buffer to the provider, in the hopes it can be reused (maybe
+		// even by a subsequent call to this very function).
+		buf := in.MaterializeToBuffer(pool)
+		defer buf.Free()
+		if err := cp.Do(w, buf.ReadOnlyData()); err != nil {
+			return nil, 0, wrapErr(err)
+		}
+	}
+	return out, compressionMade, nil
+}
+
+const (
+	payloadLen = 1
+	sizeLen    = 4
+	headerLen  = payloadLen + sizeLen
+)
+
+// msgHeader returns a 5-byte header for the message being transmitted and the
+// payload, which is compData if non-nil or data otherwise.
+func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) {
+	hdr = make([]byte, headerLen)
+	hdr[0] = byte(pf)
+
+	var length uint32
+	if pf.isCompressed() {
+		length = uint32(compData.Len())
+		payload = compData
+	} else {
+		length = uint32(data.Len())
+		payload = data
+	}
+
+	// Write length of payload into buf
+	binary.BigEndian.PutUint32(hdr[payloadLen:], length)
+	return hdr, payload
+}
+
+func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload {
+	return &stats.OutPayload{
+		Client:           client,
+		Payload:          msg,
+		Length:           dataLength,
+		WireLength:       payloadLength + headerLen,
+		CompressedLength: payloadLength,
+		SentTime:         t,
+	}
+}
+
+func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status {
+	switch pf {
+	case compressionNone:
+	case compressionMade:
+		if recvCompress == "" || recvCompress == encoding.Identity {
+			return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding")
+		}
+		if !haveCompressor {
+			if isServer {
+				return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+			} else {
+				return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+			}
+		}
+	default:
+		return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf)
+	}
+	return nil
+}
+
+type payloadInfo struct {
+	compressedLength  int // The compressed length got from wire.
+	uncompressedBytes mem.BufferSlice
+}
+
+func (p *payloadInfo) free() {
+	if p != nil && p.uncompressedBytes != nil {
+		p.uncompressedBytes.Free()
+	}
+}
+
+// recvAndDecompress reads a message from the stream, decompressing it if necessary.
+//
+// Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as
+// the buffer is no longer needed.
+// TODO: Refactor this function to reduce the number of arguments.
+// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists
+func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool,
+) (out mem.BufferSlice, err error) {
+	pf, compressed, err := p.recvMsg(maxReceiveMessageSize)
+	if err != nil {
+		return nil, err
+	}
+
+	compressedLength := compressed.Len()
+
+	if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil {
+		compressed.Free()
+		return nil, st.Err()
+	}
+
+	var size int
+	if pf.isCompressed() {
+		defer compressed.Free()
+
+		// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
+		// use this decompressor as the default.
+		if dc != nil {
+			var uncompressedBuf []byte
+			uncompressedBuf, err = dc.Do(compressed.Reader())
+			if err == nil {
+				out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)}
+			}
+			size = len(uncompressedBuf)
+		} else {
+			out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool)
+		}
+		if err != nil {
+			return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
+		}
+		if size > maxReceiveMessageSize {
+			out.Free()
+			// TODO: Revisit the error code. Currently keep it consistent with java
+			// implementation.
+			return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
+		}
+	} else {
+		out = compressed
+	}
+
+	if payInfo != nil {
+		payInfo.compressedLength = compressedLength
+		out.Ref()
+		payInfo.uncompressedBytes = out
+	}
+
+	return out, nil
+}
+
+// Using compressor, decompress d, returning data and size.
+// Optionally, if data will be over maxReceiveMessageSize, just return the size.
+func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) {
+	dcReader, err := compressor.Decompress(d.Reader())
+	if err != nil {
+		return nil, 0, err
+	}
+
+	// TODO: Can/should this still be preserved with the new BufferSlice API? Are
+	//  there any actual benefits to allocating a single large buffer instead of
+	//  multiple smaller ones?
+	//if sizer, ok := compressor.(interface {
+	//	DecompressedSize(compressedBytes []byte) int
+	//}); ok {
+	//	if size := sizer.DecompressedSize(d); size >= 0 {
+	//		if size > maxReceiveMessageSize {
+	//			return nil, size, nil
+	//		}
+	//		// size is used as an estimate to size the buffer, but we
+	//		// will read more data if available.
+	//		// +MinRead so ReadFrom will not reallocate if size is correct.
+	//		//
+	//		// TODO: If we ensure that the buffer size is the same as the DecompressedSize,
+	//		// we can also utilize the recv buffer pool here.
+	//		buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
+	//		bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
+	//		return buf.Bytes(), int(bytesRead), err
+	//	}
+	//}
+
+	var out mem.BufferSlice
+	_, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
+	if err != nil {
+		out.Free()
+		return nil, 0, err
+	}
+	return out, out.Len(), nil
+}
+
+// For the two compressor parameters, both should not be set, but if they are,
+// dc takes precedence over compressor.
+// TODO(dfawley): wrap the old compressor/decompressor using the new API?
+func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error {
+	data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer)
+	if err != nil {
+		return err
+	}
+
+	// If the codec wants its own reference to the data, it can get it. Otherwise, always
+	// free the buffers.
+	defer data.Free()
+
+	if err := c.Unmarshal(data, m); err != nil {
+		return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
+	}
+
+	return nil
+}
+
+// Information about RPC
+type rpcInfo struct {
+	failfast      bool
+	preloaderInfo *compressorInfo
+}
+
+// Information about Preloader
+// Responsible for storing codec, and compressors
+// If stream (s) has  context s.Context which stores rpcInfo that has non nil
+// pointers to codec, and compressors, then we can use preparedMsg for Async message prep
+// and reuse marshalled bytes
+type compressorInfo struct {
+	codec baseCodec
+	cp    Compressor
+	comp  encoding.Compressor
+}
+
+type rpcInfoContextKey struct{}
+
+func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context {
+	return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{
+		failfast: failfast,
+		preloaderInfo: &compressorInfo{
+			codec: codec,
+			cp:    cp,
+			comp:  comp,
+		},
+	})
+}
+
+func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) {
+	s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo)
+	return
+}
+
+// Code returns the error code for err if it was produced by the rpc system.
+// Otherwise, it returns codes.Unknown.
+//
+// Deprecated: use status.Code instead.
+func Code(err error) codes.Code {
+	return status.Code(err)
+}
+
+// ErrorDesc returns the error description of err if it was produced by the rpc system.
+// Otherwise, it returns err.Error() or empty string when err is nil.
+//
+// Deprecated: use status.Convert and Message method instead.
+func ErrorDesc(err error) string {
+	return status.Convert(err).Message()
+}
+
+// Errorf returns an error containing an error code and a description;
+// Errorf returns nil if c is OK.
+//
+// Deprecated: use status.Errorf instead.
+func Errorf(c codes.Code, format string, a ...any) error {
+	return status.Errorf(c, format, a...)
+}
+
+var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error())
+var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error())
+
+// toRPCErr converts an error into an error from the status package.
+func toRPCErr(err error) error {
+	switch err {
+	case nil, io.EOF:
+		return err
+	case context.DeadlineExceeded:
+		return errContextDeadline
+	case context.Canceled:
+		return errContextCanceled
+	case io.ErrUnexpectedEOF:
+		return status.Error(codes.Internal, err.Error())
+	}
+
+	switch e := err.(type) {
+	case transport.ConnectionError:
+		return status.Error(codes.Unavailable, e.Desc)
+	case *transport.NewStreamError:
+		return toRPCErr(e.Err)
+	}
+
+	if _, ok := status.FromError(err); ok {
+		return err
+	}
+
+	return status.Error(codes.Unknown, err.Error())
+}
+
+// setCallInfoCodec should only be called after CallOptions have been applied.
+func setCallInfoCodec(c *callInfo) error {
+	if c.codec != nil {
+		// codec was already set by a CallOption; use it, but set the content
+		// subtype if it is not set.
+		if c.contentSubtype == "" {
+			// c.codec is a baseCodec to hide the difference between grpc.Codec and
+			// encoding.Codec (Name vs. String method name).  We only support
+			// setting content subtype from encoding.Codec to avoid a behavior
+			// change with the deprecated version.
+			if ec, ok := c.codec.(encoding.CodecV2); ok {
+				c.contentSubtype = strings.ToLower(ec.Name())
+			}
+		}
+		return nil
+	}
+
+	if c.contentSubtype == "" {
+		// No codec specified in CallOptions; use proto by default.
+		c.codec = getCodec(proto.Name)
+		return nil
+	}
+
+	// c.contentSubtype is already lowercased in CallContentSubtype
+	c.codec = getCodec(c.contentSubtype)
+	if c.codec == nil {
+		return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype)
+	}
+	return nil
+}
+
+// The SupportPackageIsVersion variables are referenced from generated protocol
+// buffer files to ensure compatibility with the gRPC version used.  The latest
+// support package version is 9.
+//
+// Older versions are kept for compatibility.
+//
+// These constants should not be referenced from any other code.
+const (
+	SupportPackageIsVersion3 = true
+	SupportPackageIsVersion4 = true
+	SupportPackageIsVersion5 = true
+	SupportPackageIsVersion6 = true
+	SupportPackageIsVersion7 = true
+	SupportPackageIsVersion8 = true
+	SupportPackageIsVersion9 = true
+)
+
+const grpcUA = "grpc-go/" + Version
diff --git a/hack/tools/vendor/google.golang.org/grpc/server.go b/hack/tools/vendor/google.golang.org/grpc/server.go
new file mode 100644
index 0000000000..d1e1415a40
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/server.go
@@ -0,0 +1,2208 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"net"
+	"net/http"
+	"reflect"
+	"runtime"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/encoding"
+	"google.golang.org/grpc/encoding/proto"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/internal/binarylog"
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcsync"
+	"google.golang.org/grpc/internal/grpcutil"
+	"google.golang.org/grpc/internal/transport"
+	"google.golang.org/grpc/keepalive"
+	"google.golang.org/grpc/mem"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/peer"
+	"google.golang.org/grpc/stats"
+	"google.golang.org/grpc/status"
+	"google.golang.org/grpc/tap"
+)
+
+const (
+	defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4
+	defaultServerMaxSendMessageSize    = math.MaxInt32
+
+	// Server transports are tracked in a map which is keyed on listener
+	// address. For regular gRPC traffic, connections are accepted in Serve()
+	// through a call to Accept(), and we use the actual listener address as key
+	// when we add it to the map. But for connections received through
+	// ServeHTTP(), we do not have a listener and hence use this dummy value.
+	listenerAddressForServeHTTP = "listenerAddressForServeHTTP"
+)
+
+func init() {
+	internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials {
+		return srv.opts.creds
+	}
+	internal.IsRegisteredMethod = func(srv *Server, method string) bool {
+		return srv.isRegisteredMethod(method)
+	}
+	internal.ServerFromContext = serverFromContext
+	internal.AddGlobalServerOptions = func(opt ...ServerOption) {
+		globalServerOptions = append(globalServerOptions, opt...)
+	}
+	internal.ClearGlobalServerOptions = func() {
+		globalServerOptions = nil
+	}
+	internal.BinaryLogger = binaryLogger
+	internal.JoinServerOptions = newJoinServerOption
+	internal.BufferPool = bufferPool
+}
+
+var statusOK = status.New(codes.OK, "")
+var logger = grpclog.Component("core")
+
+type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error)
+
+// MethodDesc represents an RPC service's method specification.
+type MethodDesc struct {
+	MethodName string
+	Handler    methodHandler
+}
+
+// ServiceDesc represents an RPC service's specification.
+type ServiceDesc struct {
+	ServiceName string
+	// The pointer to the service interface. Used to check whether the user
+	// provided implementation satisfies the interface requirements.
+	HandlerType any
+	Methods     []MethodDesc
+	Streams     []StreamDesc
+	Metadata    any
+}
+
+// serviceInfo wraps information about a service. It is very similar to
+// ServiceDesc and is constructed from it for internal purposes.
+type serviceInfo struct {
+	// Contains the implementation for the methods in this service.
+	serviceImpl any
+	methods     map[string]*MethodDesc
+	streams     map[string]*StreamDesc
+	mdata       any
+}
+
+// Server is a gRPC server to serve RPC requests.
+type Server struct {
+	opts serverOptions
+
+	mu  sync.Mutex // guards following
+	lis map[net.Listener]bool
+	// conns contains all active server transports. It is a map keyed on a
+	// listener address with the value being the set of active transports
+	// belonging to that listener.
+	conns    map[string]map[transport.ServerTransport]bool
+	serve    bool
+	drain    bool
+	cv       *sync.Cond              // signaled when connections close for GracefulStop
+	services map[string]*serviceInfo // service name -> service info
+	events   traceEventLog
+
+	quit               *grpcsync.Event
+	done               *grpcsync.Event
+	channelzRemoveOnce sync.Once
+	serveWG            sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop
+	handlersWG         sync.WaitGroup // counts active method handler goroutines
+
+	channelz *channelz.Server
+
+	serverWorkerChannel      chan func()
+	serverWorkerChannelClose func()
+}
+
+type serverOptions struct {
+	creds                 credentials.TransportCredentials
+	codec                 baseCodec
+	cp                    Compressor
+	dc                    Decompressor
+	unaryInt              UnaryServerInterceptor
+	streamInt             StreamServerInterceptor
+	chainUnaryInts        []UnaryServerInterceptor
+	chainStreamInts       []StreamServerInterceptor
+	binaryLogger          binarylog.Logger
+	inTapHandle           tap.ServerInHandle
+	statsHandlers         []stats.Handler
+	maxConcurrentStreams  uint32
+	maxReceiveMessageSize int
+	maxSendMessageSize    int
+	unknownStreamDesc     *StreamDesc
+	keepaliveParams       keepalive.ServerParameters
+	keepalivePolicy       keepalive.EnforcementPolicy
+	initialWindowSize     int32
+	initialConnWindowSize int32
+	writeBufferSize       int
+	readBufferSize        int
+	sharedWriteBuffer     bool
+	connectionTimeout     time.Duration
+	maxHeaderListSize     *uint32
+	headerTableSize       *uint32
+	numServerWorkers      uint32
+	bufferPool            mem.BufferPool
+	waitForHandlers       bool
+}
+
+var defaultServerOptions = serverOptions{
+	maxConcurrentStreams:  math.MaxUint32,
+	maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
+	maxSendMessageSize:    defaultServerMaxSendMessageSize,
+	connectionTimeout:     120 * time.Second,
+	writeBufferSize:       defaultWriteBufSize,
+	readBufferSize:        defaultReadBufSize,
+	bufferPool:            mem.DefaultBufferPool(),
+}
+var globalServerOptions []ServerOption
+
+// A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
+type ServerOption interface {
+	apply(*serverOptions)
+}
+
+// EmptyServerOption does not alter the server configuration. It can be embedded
+// in another structure to build custom server options.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type EmptyServerOption struct{}
+
+func (EmptyServerOption) apply(*serverOptions) {}
+
+// funcServerOption wraps a function that modifies serverOptions into an
+// implementation of the ServerOption interface.
+type funcServerOption struct {
+	f func(*serverOptions)
+}
+
+func (fdo *funcServerOption) apply(do *serverOptions) {
+	fdo.f(do)
+}
+
+func newFuncServerOption(f func(*serverOptions)) *funcServerOption {
+	return &funcServerOption{
+		f: f,
+	}
+}
+
+// joinServerOption provides a way to combine arbitrary number of server
+// options into one.
+type joinServerOption struct {
+	opts []ServerOption
+}
+
+func (mdo *joinServerOption) apply(do *serverOptions) {
+	for _, opt := range mdo.opts {
+		opt.apply(do)
+	}
+}
+
+func newJoinServerOption(opts ...ServerOption) ServerOption {
+	return &joinServerOption{opts: opts}
+}
+
+// SharedWriteBuffer allows reusing per-connection transport write buffer.
+// If this option is set to true every connection will release the buffer after
+// flushing the data on the wire.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func SharedWriteBuffer(val bool) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.sharedWriteBuffer = val
+	})
+}
+
+// WriteBufferSize determines how much data can be batched before doing a write
+// on the wire. The default value for this buffer is 32KB. Zero or negative
+// values will disable the write buffer such that each write will be on underlying
+// connection. Note: A Send call may not directly translate to a write.
+func WriteBufferSize(s int) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.writeBufferSize = s
+	})
+}
+
+// ReadBufferSize lets you set the size of read buffer, this determines how much
+// data can be read at most for one read syscall. The default value for this
+// buffer is 32KB. Zero or negative values will disable read buffer for a
+// connection so data framer can access the underlying conn directly.
+func ReadBufferSize(s int) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.readBufferSize = s
+	})
+}
+
+// InitialWindowSize returns a ServerOption that sets window size for stream.
+// The lower bound for window size is 64K and any value smaller than that will be ignored.
+func InitialWindowSize(s int32) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.initialWindowSize = s
+	})
+}
+
+// InitialConnWindowSize returns a ServerOption that sets window size for a connection.
+// The lower bound for window size is 64K and any value smaller than that will be ignored.
+func InitialConnWindowSize(s int32) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.initialConnWindowSize = s
+	})
+}
+
+// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
+func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
+	if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime {
+		logger.Warning("Adjusting keepalive ping interval to minimum period of 1s")
+		kp.Time = internal.KeepaliveMinServerPingTime
+	}
+
+	return newFuncServerOption(func(o *serverOptions) {
+		o.keepaliveParams = kp
+	})
+}
+
+// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server.
+func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.keepalivePolicy = kep
+	})
+}
+
+// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
+//
+// This will override any lookups by content-subtype for Codecs registered with RegisterCodec.
+//
+// Deprecated: register codecs using encoding.RegisterCodec. The server will
+// automatically use registered codecs based on the incoming requests' headers.
+// See also
+// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec.
+// Will be supported throughout 1.x.
+func CustomCodec(codec Codec) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.codec = newCodecV0Bridge(codec)
+	})
+}
+
+// ForceServerCodec returns a ServerOption that sets a codec for message
+// marshaling and unmarshaling.
+//
+// This will override any lookups by content-subtype for Codecs registered
+// with RegisterCodec.
+//
+// See Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details. Also see the documentation on RegisterCodec and
+// CallContentSubtype for more details on the interaction between encoding.Codec
+// and content-subtype.
+//
+// This function is provided for advanced users; prefer to register codecs
+// using encoding.RegisterCodec.
+// The server will automatically use registered codecs based on the incoming
+// requests' headers. See also
+// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec.
+// Will be supported throughout 1.x.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceServerCodec(codec encoding.Codec) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.codec = newCodecV1Bridge(codec)
+	})
+}
+
+// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new
+// CodecV2 interface.
+//
+// Will be supported throughout 1.x.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.codec = codecV2
+	})
+}
+
+// RPCCompressor returns a ServerOption that sets a compressor for outbound
+// messages.  For backward compatibility, all outbound messages will be sent
+// using this compressor, regardless of incoming message compression.  By
+// default, server messages will be sent using the same compressor with which
+// request messages were sent.
+//
+// Deprecated: use encoding.RegisterCompressor instead. Will be supported
+// throughout 1.x.
+func RPCCompressor(cp Compressor) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.cp = cp
+	})
+}
+
+// RPCDecompressor returns a ServerOption that sets a decompressor for inbound
+// messages.  It has higher priority than decompressors registered via
+// encoding.RegisterCompressor.
+//
+// Deprecated: use encoding.RegisterCompressor instead. Will be supported
+// throughout 1.x.
+func RPCDecompressor(dc Decompressor) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.dc = dc
+	})
+}
+
+// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
+// If this is not set, gRPC uses the default limit.
+//
+// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x.
+func MaxMsgSize(m int) ServerOption {
+	return MaxRecvMsgSize(m)
+}
+
+// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
+// If this is not set, gRPC uses the default 4MB.
+func MaxRecvMsgSize(m int) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.maxReceiveMessageSize = m
+	})
+}
+
+// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send.
+// If this is not set, gRPC uses the default `math.MaxInt32`.
+func MaxSendMsgSize(m int) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.maxSendMessageSize = m
+	})
+}
+
+// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
+// of concurrent streams to each ServerTransport.
+func MaxConcurrentStreams(n uint32) ServerOption {
+	if n == 0 {
+		n = math.MaxUint32
+	}
+	return newFuncServerOption(func(o *serverOptions) {
+		o.maxConcurrentStreams = n
+	})
+}
+
+// Creds returns a ServerOption that sets credentials for server connections.
+func Creds(c credentials.TransportCredentials) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.creds = c
+	})
+}
+
+// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the
+// server. Only one unary interceptor can be installed. The construction of multiple
+// interceptors (e.g., chaining) can be implemented at the caller.
+func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		if o.unaryInt != nil {
+			panic("The unary server interceptor was already set and may not be reset.")
+		}
+		o.unaryInt = i
+	})
+}
+
+// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor
+// for unary RPCs. The first interceptor will be the outer most,
+// while the last interceptor will be the inner most wrapper around the real call.
+// All unary interceptors added by this method will be chained.
+func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.chainUnaryInts = append(o.chainUnaryInts, interceptors...)
+	})
+}
+
+// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the
+// server. Only one stream interceptor can be installed.
+func StreamInterceptor(i StreamServerInterceptor) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		if o.streamInt != nil {
+			panic("The stream server interceptor was already set and may not be reset.")
+		}
+		o.streamInt = i
+	})
+}
+
+// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor
+// for streaming RPCs. The first interceptor will be the outer most,
+// while the last interceptor will be the inner most wrapper around the real call.
+// All stream interceptors added by this method will be chained.
+func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.chainStreamInts = append(o.chainStreamInts, interceptors...)
+	})
+}
+
+// InTapHandle returns a ServerOption that sets the tap handle for all the server
+// transport to be created. Only one can be installed.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func InTapHandle(h tap.ServerInHandle) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		if o.inTapHandle != nil {
+			panic("The tap handle was already set and may not be reset.")
+		}
+		o.inTapHandle = h
+	})
+}
+
+// StatsHandler returns a ServerOption that sets the stats handler for the server.
+func StatsHandler(h stats.Handler) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		if h == nil {
+			logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption")
+			// Do not allow a nil stats handler, which would otherwise cause
+			// panics.
+			return
+		}
+		o.statsHandlers = append(o.statsHandlers, h)
+	})
+}
+
+// binaryLogger returns a ServerOption that can set the binary logger for the
+// server.
+func binaryLogger(bl binarylog.Logger) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.binaryLogger = bl
+	})
+}
+
+// UnknownServiceHandler returns a ServerOption that allows for adding a custom
+// unknown service handler. The provided method is a bidi-streaming RPC service
+// handler that will be invoked instead of returning the "unimplemented" gRPC
+// error whenever a request is received for an unregistered service or method.
+// The handling function and stream interceptor (if set) have full access to
+// the ServerStream, including its Context.
+func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.unknownStreamDesc = &StreamDesc{
+			StreamName: "unknown_service_handler",
+			Handler:    streamHandler,
+			// We need to assume that the users of the streamHandler will want to use both.
+			ClientStreams: true,
+			ServerStreams: true,
+		}
+	})
+}
+
+// ConnectionTimeout returns a ServerOption that sets the timeout for
+// connection establishment (up to and including HTTP/2 handshaking) for all
+// new connections.  If this is not set, the default is 120 seconds.  A zero or
+// negative value will result in an immediate timeout.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ConnectionTimeout(d time.Duration) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.connectionTimeout = d
+	})
+}
+
+// MaxHeaderListSizeServerOption is a ServerOption that sets the max
+// (uncompressed) size of header list that the server is prepared to accept.
+type MaxHeaderListSizeServerOption struct {
+	MaxHeaderListSize uint32
+}
+
+func (o MaxHeaderListSizeServerOption) apply(so *serverOptions) {
+	so.maxHeaderListSize = &o.MaxHeaderListSize
+}
+
+// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
+// of header list that the server is prepared to accept.
+func MaxHeaderListSize(s uint32) ServerOption {
+	return MaxHeaderListSizeServerOption{
+		MaxHeaderListSize: s,
+	}
+}
+
+// HeaderTableSize returns a ServerOption that sets the size of dynamic
+// header table for stream.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func HeaderTableSize(s uint32) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.headerTableSize = &s
+	})
+}
+
+// NumStreamWorkers returns a ServerOption that sets the number of worker
+// goroutines that should be used to process incoming streams. Setting this to
+// zero (default) will disable workers and spawn a new goroutine for each
+// stream.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func NumStreamWorkers(numServerWorkers uint32) ServerOption {
+	// TODO: If/when this API gets stabilized (i.e. stream workers become the
+	// only way streams are processed), change the behavior of the zero value to
+	// a sane default. Preliminary experiments suggest that a value equal to the
+	// number of CPUs available is most performant; requires thorough testing.
+	return newFuncServerOption(func(o *serverOptions) {
+		o.numServerWorkers = numServerWorkers
+	})
+}
+
+// WaitForHandlers cause Stop to wait until all outstanding method handlers have
+// exited before returning.  If false, Stop will return as soon as all
+// connections have closed, but method handlers may still be running. By
+// default, Stop does not wait for method handlers to return.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WaitForHandlers(w bool) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.waitForHandlers = w
+	})
+}
+
+func bufferPool(bufferPool mem.BufferPool) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.bufferPool = bufferPool
+	})
+}
+
+// serverWorkerResetThreshold defines how often the stack must be reset. Every
+// N requests, by spawning a new goroutine in its place, a worker can reset its
+// stack so that large stacks don't live in memory forever. 2^16 should allow
+// each goroutine stack to live for at least a few seconds in a typical
+// workload (assuming a QPS of a few thousand requests/sec).
+const serverWorkerResetThreshold = 1 << 16
+
+// serverWorker blocks on a *transport.Stream channel forever and waits for
+// data to be fed by serveStreams. This allows multiple requests to be
+// processed by the same goroutine, removing the need for expensive stack
+// re-allocations (see the runtime.morestack problem [1]).
+//
+// [1] https://github.com/golang/go/issues/18138
+func (s *Server) serverWorker() {
+	for completed := 0; completed < serverWorkerResetThreshold; completed++ {
+		f, ok := <-s.serverWorkerChannel
+		if !ok {
+			return
+		}
+		f()
+	}
+	go s.serverWorker()
+}
+
+// initServerWorkers creates worker goroutines and a channel to process incoming
+// connections to reduce the time spent overall on runtime.morestack.
+func (s *Server) initServerWorkers() {
+	s.serverWorkerChannel = make(chan func())
+	s.serverWorkerChannelClose = grpcsync.OnceFunc(func() {
+		close(s.serverWorkerChannel)
+	})
+	for i := uint32(0); i < s.opts.numServerWorkers; i++ {
+		go s.serverWorker()
+	}
+}
+
+// NewServer creates a gRPC server which has no service registered and has not
+// started to accept requests yet.
+func NewServer(opt ...ServerOption) *Server {
+	opts := defaultServerOptions
+	for _, o := range globalServerOptions {
+		o.apply(&opts)
+	}
+	for _, o := range opt {
+		o.apply(&opts)
+	}
+	s := &Server{
+		lis:      make(map[net.Listener]bool),
+		opts:     opts,
+		conns:    make(map[string]map[transport.ServerTransport]bool),
+		services: make(map[string]*serviceInfo),
+		quit:     grpcsync.NewEvent(),
+		done:     grpcsync.NewEvent(),
+		channelz: channelz.RegisterServer(""),
+	}
+	chainUnaryServerInterceptors(s)
+	chainStreamServerInterceptors(s)
+	s.cv = sync.NewCond(&s.mu)
+	if EnableTracing {
+		_, file, line, _ := runtime.Caller(1)
+		s.events = newTraceEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
+	}
+
+	if s.opts.numServerWorkers > 0 {
+		s.initServerWorkers()
+	}
+
+	channelz.Info(logger, s.channelz, "Server created")
+	return s
+}
+
+// printf records an event in s's event log, unless s has been stopped.
+// REQUIRES s.mu is held.
+func (s *Server) printf(format string, a ...any) {
+	if s.events != nil {
+		s.events.Printf(format, a...)
+	}
+}
+
+// errorf records an error in s's event log, unless s has been stopped.
+// REQUIRES s.mu is held.
+func (s *Server) errorf(format string, a ...any) {
+	if s.events != nil {
+		s.events.Errorf(format, a...)
+	}
+}
+
+// ServiceRegistrar wraps a single method that supports service registration. It
+// enables users to pass concrete types other than grpc.Server to the service
+// registration methods exported by the IDL generated code.
+type ServiceRegistrar interface {
+	// RegisterService registers a service and its implementation to the
+	// concrete type implementing this interface.  It may not be called
+	// once the server has started serving.
+	// desc describes the service and its methods and handlers. impl is the
+	// service implementation which is passed to the method handlers.
+	RegisterService(desc *ServiceDesc, impl any)
+}
+
+// RegisterService registers a service and its implementation to the gRPC
+// server. It is called from the IDL generated code. This must be called before
+// invoking Serve. If ss is non-nil (for legacy code), its type is checked to
+// ensure it implements sd.HandlerType.
+func (s *Server) RegisterService(sd *ServiceDesc, ss any) {
+	if ss != nil {
+		ht := reflect.TypeOf(sd.HandlerType).Elem()
+		st := reflect.TypeOf(ss)
+		if !st.Implements(ht) {
+			logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
+		}
+	}
+	s.register(sd, ss)
+}
+
+func (s *Server) register(sd *ServiceDesc, ss any) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	s.printf("RegisterService(%q)", sd.ServiceName)
+	if s.serve {
+		logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName)
+	}
+	if _, ok := s.services[sd.ServiceName]; ok {
+		logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
+	}
+	info := &serviceInfo{
+		serviceImpl: ss,
+		methods:     make(map[string]*MethodDesc),
+		streams:     make(map[string]*StreamDesc),
+		mdata:       sd.Metadata,
+	}
+	for i := range sd.Methods {
+		d := &sd.Methods[i]
+		info.methods[d.MethodName] = d
+	}
+	for i := range sd.Streams {
+		d := &sd.Streams[i]
+		info.streams[d.StreamName] = d
+	}
+	s.services[sd.ServiceName] = info
+}
+
+// MethodInfo contains the information of an RPC including its method name and type.
+type MethodInfo struct {
+	// Name is the method name only, without the service name or package name.
+	Name string
+	// IsClientStream indicates whether the RPC is a client streaming RPC.
+	IsClientStream bool
+	// IsServerStream indicates whether the RPC is a server streaming RPC.
+	IsServerStream bool
+}
+
+// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service.
+type ServiceInfo struct {
+	Methods []MethodInfo
+	// Metadata is the metadata specified in ServiceDesc when registering service.
+	Metadata any
+}
+
+// GetServiceInfo returns a map from service names to ServiceInfo.
+// Service names include the package names, in the form of ..
+func (s *Server) GetServiceInfo() map[string]ServiceInfo {
+	ret := make(map[string]ServiceInfo)
+	for n, srv := range s.services {
+		methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams))
+		for m := range srv.methods {
+			methods = append(methods, MethodInfo{
+				Name:           m,
+				IsClientStream: false,
+				IsServerStream: false,
+			})
+		}
+		for m, d := range srv.streams {
+			methods = append(methods, MethodInfo{
+				Name:           m,
+				IsClientStream: d.ClientStreams,
+				IsServerStream: d.ServerStreams,
+			})
+		}
+
+		ret[n] = ServiceInfo{
+			Methods:  methods,
+			Metadata: srv.mdata,
+		}
+	}
+	return ret
+}
+
+// ErrServerStopped indicates that the operation is now illegal because of
+// the server being stopped.
+var ErrServerStopped = errors.New("grpc: the server has been stopped")
+
+type listenSocket struct {
+	net.Listener
+	channelz *channelz.Socket
+}
+
+func (l *listenSocket) Close() error {
+	err := l.Listener.Close()
+	channelz.RemoveEntry(l.channelz.ID)
+	channelz.Info(logger, l.channelz, "ListenSocket deleted")
+	return err
+}
+
+// Serve accepts incoming connections on the listener lis, creating a new
+// ServerTransport and service goroutine for each. The service goroutines
+// read gRPC requests and then call the registered handlers to reply to them.
+// Serve returns when lis.Accept fails with fatal errors.  lis will be closed when
+// this method returns.
+// Serve will return a non-nil error unless Stop or GracefulStop is called.
+//
+// Note: All supported releases of Go (as of December 2023) override the OS
+// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
+// with OS defaults for keepalive time and interval, callers need to do the
+// following two things:
+//   - pass a net.Listener created by calling the Listen method on a
+//     net.ListenConfig with the `KeepAlive` field set to a negative value. This
+//     will result in the Go standard library not overriding OS defaults for TCP
+//     keepalive interval and time. But this will also result in the Go standard
+//     library not enabling TCP keepalives by default.
+//   - override the Accept method on the passed in net.Listener and set the
+//     SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults.
+func (s *Server) Serve(lis net.Listener) error {
+	s.mu.Lock()
+	s.printf("serving")
+	s.serve = true
+	if s.lis == nil {
+		// Serve called after Stop or GracefulStop.
+		s.mu.Unlock()
+		lis.Close()
+		return ErrServerStopped
+	}
+
+	s.serveWG.Add(1)
+	defer func() {
+		s.serveWG.Done()
+		if s.quit.HasFired() {
+			// Stop or GracefulStop called; block until done and return nil.
+			<-s.done.Done()
+		}
+	}()
+
+	ls := &listenSocket{
+		Listener: lis,
+		channelz: channelz.RegisterSocket(&channelz.Socket{
+			SocketType:    channelz.SocketTypeListen,
+			Parent:        s.channelz,
+			RefName:       lis.Addr().String(),
+			LocalAddr:     lis.Addr(),
+			SocketOptions: channelz.GetSocketOption(lis)},
+		),
+	}
+	s.lis[ls] = true
+
+	defer func() {
+		s.mu.Lock()
+		if s.lis != nil && s.lis[ls] {
+			ls.Close()
+			delete(s.lis, ls)
+		}
+		s.mu.Unlock()
+	}()
+
+	s.mu.Unlock()
+	channelz.Info(logger, ls.channelz, "ListenSocket created")
+
+	var tempDelay time.Duration // how long to sleep on accept failure
+	for {
+		rawConn, err := lis.Accept()
+		if err != nil {
+			if ne, ok := err.(interface {
+				Temporary() bool
+			}); ok && ne.Temporary() {
+				if tempDelay == 0 {
+					tempDelay = 5 * time.Millisecond
+				} else {
+					tempDelay *= 2
+				}
+				if max := 1 * time.Second; tempDelay > max {
+					tempDelay = max
+				}
+				s.mu.Lock()
+				s.printf("Accept error: %v; retrying in %v", err, tempDelay)
+				s.mu.Unlock()
+				timer := time.NewTimer(tempDelay)
+				select {
+				case <-timer.C:
+				case <-s.quit.Done():
+					timer.Stop()
+					return nil
+				}
+				continue
+			}
+			s.mu.Lock()
+			s.printf("done serving; Accept = %v", err)
+			s.mu.Unlock()
+
+			if s.quit.HasFired() {
+				return nil
+			}
+			return err
+		}
+		tempDelay = 0
+		// Start a new goroutine to deal with rawConn so we don't stall this Accept
+		// loop goroutine.
+		//
+		// Make sure we account for the goroutine so GracefulStop doesn't nil out
+		// s.conns before this conn can be added.
+		s.serveWG.Add(1)
+		go func() {
+			s.handleRawConn(lis.Addr().String(), rawConn)
+			s.serveWG.Done()
+		}()
+	}
+}
+
+// handleRawConn forks a goroutine to handle a just-accepted connection that
+// has not had any I/O performed on it yet.
+func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) {
+	if s.quit.HasFired() {
+		rawConn.Close()
+		return
+	}
+	rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
+
+	// Finish handshaking (HTTP2)
+	st := s.newHTTP2Transport(rawConn)
+	rawConn.SetDeadline(time.Time{})
+	if st == nil {
+		return
+	}
+
+	if cc, ok := rawConn.(interface {
+		PassServerTransport(transport.ServerTransport)
+	}); ok {
+		cc.PassServerTransport(st)
+	}
+
+	if !s.addConn(lisAddr, st) {
+		return
+	}
+	go func() {
+		s.serveStreams(context.Background(), st, rawConn)
+		s.removeConn(lisAddr, st)
+	}()
+}
+
+// newHTTP2Transport sets up a http/2 transport (using the
+// gRPC http2 server transport in transport/http2_server.go).
+func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
+	config := &transport.ServerConfig{
+		MaxStreams:            s.opts.maxConcurrentStreams,
+		ConnectionTimeout:     s.opts.connectionTimeout,
+		Credentials:           s.opts.creds,
+		InTapHandle:           s.opts.inTapHandle,
+		StatsHandlers:         s.opts.statsHandlers,
+		KeepaliveParams:       s.opts.keepaliveParams,
+		KeepalivePolicy:       s.opts.keepalivePolicy,
+		InitialWindowSize:     s.opts.initialWindowSize,
+		InitialConnWindowSize: s.opts.initialConnWindowSize,
+		WriteBufferSize:       s.opts.writeBufferSize,
+		ReadBufferSize:        s.opts.readBufferSize,
+		SharedWriteBuffer:     s.opts.sharedWriteBuffer,
+		ChannelzParent:        s.channelz,
+		MaxHeaderListSize:     s.opts.maxHeaderListSize,
+		HeaderTableSize:       s.opts.headerTableSize,
+		BufferPool:            s.opts.bufferPool,
+	}
+	st, err := transport.NewServerTransport(c, config)
+	if err != nil {
+		s.mu.Lock()
+		s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
+		s.mu.Unlock()
+		// ErrConnDispatched means that the connection was dispatched away from
+		// gRPC; those connections should be left open.
+		if err != credentials.ErrConnDispatched {
+			// Don't log on ErrConnDispatched and io.EOF to prevent log spam.
+			if err != io.EOF {
+				channelz.Info(logger, s.channelz, "grpc: Server.Serve failed to create ServerTransport: ", err)
+			}
+			c.Close()
+		}
+		return nil
+	}
+
+	return st
+}
+
+func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) {
+	ctx = transport.SetConnection(ctx, rawConn)
+	ctx = peer.NewContext(ctx, st.Peer())
+	for _, sh := range s.opts.statsHandlers {
+		ctx = sh.TagConn(ctx, &stats.ConnTagInfo{
+			RemoteAddr: st.Peer().Addr,
+			LocalAddr:  st.Peer().LocalAddr,
+		})
+		sh.HandleConn(ctx, &stats.ConnBegin{})
+	}
+
+	defer func() {
+		st.Close(errors.New("finished serving streams for the server transport"))
+		for _, sh := range s.opts.statsHandlers {
+			sh.HandleConn(ctx, &stats.ConnEnd{})
+		}
+	}()
+
+	streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
+	st.HandleStreams(ctx, func(stream *transport.Stream) {
+		s.handlersWG.Add(1)
+		streamQuota.acquire()
+		f := func() {
+			defer streamQuota.release()
+			defer s.handlersWG.Done()
+			s.handleStream(st, stream)
+		}
+
+		if s.opts.numServerWorkers > 0 {
+			select {
+			case s.serverWorkerChannel <- f:
+				return
+			default:
+				// If all stream workers are busy, fallback to the default code path.
+			}
+		}
+		go f()
+	})
+}
+
+var _ http.Handler = (*Server)(nil)
+
+// ServeHTTP implements the Go standard library's http.Handler
+// interface by responding to the gRPC request r, by looking up
+// the requested gRPC method in the gRPC server s.
+//
+// The provided HTTP request must have arrived on an HTTP/2
+// connection. When using the Go standard library's server,
+// practically this means that the Request must also have arrived
+// over TLS.
+//
+// To share one port (such as 443 for https) between gRPC and an
+// existing http.Handler, use a root http.Handler such as:
+//
+//	if r.ProtoMajor == 2 && strings.HasPrefix(
+//		r.Header.Get("Content-Type"), "application/grpc") {
+//		grpcServer.ServeHTTP(w, r)
+//	} else {
+//		yourMux.ServeHTTP(w, r)
+//	}
+//
+// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally
+// separate from grpc-go's HTTP/2 server. Performance and features may vary
+// between the two paths. ServeHTTP does not support some gRPC features
+// available through grpc-go's HTTP/2 server.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool)
+	if err != nil {
+		// Errors returned from transport.NewServerHandlerTransport have
+		// already been written to w.
+		return
+	}
+	if !s.addConn(listenerAddressForServeHTTP, st) {
+		return
+	}
+	defer s.removeConn(listenerAddressForServeHTTP, st)
+	s.serveStreams(r.Context(), st, nil)
+}
+
+func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if s.conns == nil {
+		st.Close(errors.New("Server.addConn called when server has already been stopped"))
+		return false
+	}
+	if s.drain {
+		// Transport added after we drained our existing conns: drain it
+		// immediately.
+		st.Drain("")
+	}
+
+	if s.conns[addr] == nil {
+		// Create a map entry if this is the first connection on this listener.
+		s.conns[addr] = make(map[transport.ServerTransport]bool)
+	}
+	s.conns[addr][st] = true
+	return true
+}
+
+func (s *Server) removeConn(addr string, st transport.ServerTransport) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	conns := s.conns[addr]
+	if conns != nil {
+		delete(conns, st)
+		if len(conns) == 0 {
+			// If the last connection for this address is being removed, also
+			// remove the map entry corresponding to the address. This is used
+			// in GracefulStop() when waiting for all connections to be closed.
+			delete(s.conns, addr)
+		}
+		s.cv.Broadcast()
+	}
+}
+
+func (s *Server) incrCallsStarted() {
+	s.channelz.ServerMetrics.CallsStarted.Add(1)
+	s.channelz.ServerMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano())
+}
+
+func (s *Server) incrCallsSucceeded() {
+	s.channelz.ServerMetrics.CallsSucceeded.Add(1)
+}
+
+func (s *Server) incrCallsFailed() {
+	s.channelz.ServerMetrics.CallsFailed.Add(1)
+}
+
+func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
+	data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
+	if err != nil {
+		channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err)
+		return err
+	}
+
+	compData, pf, err := compress(data, cp, comp, s.opts.bufferPool)
+	if err != nil {
+		data.Free()
+		channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err)
+		return err
+	}
+
+	hdr, payload := msgHeader(data, compData, pf)
+
+	defer func() {
+		compData.Free()
+		data.Free()
+		// payload does not need to be freed here, it is either data or compData, both of
+		// which are already freed.
+	}()
+
+	dataLen := data.Len()
+	payloadLen := payload.Len()
+	// TODO(dfawley): should we be checking len(data) instead?
+	if payloadLen > s.opts.maxSendMessageSize {
+		return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize)
+	}
+	err = t.Write(stream, hdr, payload, opts)
+	if err == nil {
+		if len(s.opts.statsHandlers) != 0 {
+			for _, sh := range s.opts.statsHandlers {
+				sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
+			}
+		}
+	}
+	return err
+}
+
+// chainUnaryServerInterceptors chains all unary server interceptors into one.
+func chainUnaryServerInterceptors(s *Server) {
+	// Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will
+	// be executed before any other chained interceptors.
+	interceptors := s.opts.chainUnaryInts
+	if s.opts.unaryInt != nil {
+		interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...)
+	}
+
+	var chainedInt UnaryServerInterceptor
+	if len(interceptors) == 0 {
+		chainedInt = nil
+	} else if len(interceptors) == 1 {
+		chainedInt = interceptors[0]
+	} else {
+		chainedInt = chainUnaryInterceptors(interceptors)
+	}
+
+	s.opts.unaryInt = chainedInt
+}
+
+func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor {
+	return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) {
+		return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler))
+	}
+}
+
+func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler {
+	if curr == len(interceptors)-1 {
+		return finalHandler
+	}
+	return func(ctx context.Context, req any) (any, error) {
+		return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler))
+	}
+}
+
+func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
+	shs := s.opts.statsHandlers
+	if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
+		if channelz.IsOn() {
+			s.incrCallsStarted()
+		}
+		var statsBegin *stats.Begin
+		for _, sh := range shs {
+			beginTime := time.Now()
+			statsBegin = &stats.Begin{
+				BeginTime:      beginTime,
+				IsClientStream: false,
+				IsServerStream: false,
+			}
+			sh.HandleRPC(ctx, statsBegin)
+		}
+		if trInfo != nil {
+			trInfo.tr.LazyLog(&trInfo.firstLine, false)
+		}
+		// The deferred error handling for tracing, stats handler and channelz are
+		// combined into one function to reduce stack usage -- a defer takes ~56-64
+		// bytes on the stack, so overflowing the stack will require a stack
+		// re-allocation, which is expensive.
+		//
+		// To maintain behavior similar to separate deferred statements, statements
+		// should be executed in the reverse order. That is, tracing first, stats
+		// handler second, and channelz last. Note that panics *within* defers will
+		// lead to different behavior, but that's an acceptable compromise; that
+		// would be undefined behavior territory anyway.
+		defer func() {
+			if trInfo != nil {
+				if err != nil && err != io.EOF {
+					trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
+					trInfo.tr.SetError()
+				}
+				trInfo.tr.Finish()
+			}
+
+			for _, sh := range shs {
+				end := &stats.End{
+					BeginTime: statsBegin.BeginTime,
+					EndTime:   time.Now(),
+				}
+				if err != nil && err != io.EOF {
+					end.Error = toRPCErr(err)
+				}
+				sh.HandleRPC(ctx, end)
+			}
+
+			if channelz.IsOn() {
+				if err != nil && err != io.EOF {
+					s.incrCallsFailed()
+				} else {
+					s.incrCallsSucceeded()
+				}
+			}
+		}()
+	}
+	var binlogs []binarylog.MethodLogger
+	if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil {
+		binlogs = append(binlogs, ml)
+	}
+	if s.opts.binaryLogger != nil {
+		if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil {
+			binlogs = append(binlogs, ml)
+		}
+	}
+	if len(binlogs) != 0 {
+		md, _ := metadata.FromIncomingContext(ctx)
+		logEntry := &binarylog.ClientHeader{
+			Header:     md,
+			MethodName: stream.Method(),
+			PeerAddr:   nil,
+		}
+		if deadline, ok := ctx.Deadline(); ok {
+			logEntry.Timeout = time.Until(deadline)
+			if logEntry.Timeout < 0 {
+				logEntry.Timeout = 0
+			}
+		}
+		if a := md[":authority"]; len(a) > 0 {
+			logEntry.Authority = a[0]
+		}
+		if peer, ok := peer.FromContext(ctx); ok {
+			logEntry.PeerAddr = peer.Addr
+		}
+		for _, binlog := range binlogs {
+			binlog.Log(ctx, logEntry)
+		}
+	}
+
+	// comp and cp are used for compression.  decomp and dc are used for
+	// decompression.  If comp and decomp are both set, they are the same;
+	// however they are kept separate to ensure that at most one of the
+	// compressor/decompressor variable pairs are set for use later.
+	var comp, decomp encoding.Compressor
+	var cp Compressor
+	var dc Decompressor
+	var sendCompressorName string
+
+	// If dc is set and matches the stream's compression, use it.  Otherwise, try
+	// to find a matching registered compressor for decomp.
+	if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
+		dc = s.opts.dc
+	} else if rc != "" && rc != encoding.Identity {
+		decomp = encoding.GetCompressor(rc)
+		if decomp == nil {
+			st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
+			t.WriteStatus(stream, st)
+			return st.Err()
+		}
+	}
+
+	// If cp is set, use it.  Otherwise, attempt to compress the response using
+	// the incoming message compression method.
+	//
+	// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
+	if s.opts.cp != nil {
+		cp = s.opts.cp
+		sendCompressorName = cp.Type()
+	} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
+		// Legacy compressor not specified; attempt to respond with same encoding.
+		comp = encoding.GetCompressor(rc)
+		if comp != nil {
+			sendCompressorName = comp.Name()
+		}
+	}
+
+	if sendCompressorName != "" {
+		if err := stream.SetSendCompress(sendCompressorName); err != nil {
+			return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err)
+		}
+	}
+
+	var payInfo *payloadInfo
+	if len(shs) != 0 || len(binlogs) != 0 {
+		payInfo = &payloadInfo{}
+		defer payInfo.free()
+	}
+
+	d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true)
+	if err != nil {
+		if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
+			channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
+		}
+		return err
+	}
+	defer d.Free()
+	if channelz.IsOn() {
+		t.IncrMsgRecv()
+	}
+	df := func(v any) error {
+		if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
+			return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
+		}
+
+		for _, sh := range shs {
+			sh.HandleRPC(ctx, &stats.InPayload{
+				RecvTime:         time.Now(),
+				Payload:          v,
+				Length:           d.Len(),
+				WireLength:       payInfo.compressedLength + headerLen,
+				CompressedLength: payInfo.compressedLength,
+			})
+		}
+		if len(binlogs) != 0 {
+			cm := &binarylog.ClientMessage{
+				Message: d.Materialize(),
+			}
+			for _, binlog := range binlogs {
+				binlog.Log(ctx, cm)
+			}
+		}
+		if trInfo != nil {
+			trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
+		}
+		return nil
+	}
+	ctx = NewContextWithServerTransportStream(ctx, stream)
+	reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt)
+	if appErr != nil {
+		appStatus, ok := status.FromError(appErr)
+		if !ok {
+			// Convert non-status application error to a status error with code
+			// Unknown, but handle context errors specifically.
+			appStatus = status.FromContextError(appErr)
+			appErr = appStatus.Err()
+		}
+		if trInfo != nil {
+			trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
+			trInfo.tr.SetError()
+		}
+		if e := t.WriteStatus(stream, appStatus); e != nil {
+			channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
+		}
+		if len(binlogs) != 0 {
+			if h, _ := stream.Header(); h.Len() > 0 {
+				// Only log serverHeader if there was header. Otherwise it can
+				// be trailer only.
+				sh := &binarylog.ServerHeader{
+					Header: h,
+				}
+				for _, binlog := range binlogs {
+					binlog.Log(ctx, sh)
+				}
+			}
+			st := &binarylog.ServerTrailer{
+				Trailer: stream.Trailer(),
+				Err:     appErr,
+			}
+			for _, binlog := range binlogs {
+				binlog.Log(ctx, st)
+			}
+		}
+		return appErr
+	}
+	if trInfo != nil {
+		trInfo.tr.LazyLog(stringer("OK"), false)
+	}
+	opts := &transport.Options{Last: true}
+
+	// Server handler could have set new compressor by calling SetSendCompressor.
+	// In case it is set, we need to use it for compressing outbound message.
+	if stream.SendCompress() != sendCompressorName {
+		comp = encoding.GetCompressor(stream.SendCompress())
+	}
+	if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil {
+		if err == io.EOF {
+			// The entire stream is done (for unary RPC only).
+			return err
+		}
+		if sts, ok := status.FromError(err); ok {
+			if e := t.WriteStatus(stream, sts); e != nil {
+				channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
+			}
+		} else {
+			switch st := err.(type) {
+			case transport.ConnectionError:
+				// Nothing to do here.
+			default:
+				panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st))
+			}
+		}
+		if len(binlogs) != 0 {
+			h, _ := stream.Header()
+			sh := &binarylog.ServerHeader{
+				Header: h,
+			}
+			st := &binarylog.ServerTrailer{
+				Trailer: stream.Trailer(),
+				Err:     appErr,
+			}
+			for _, binlog := range binlogs {
+				binlog.Log(ctx, sh)
+				binlog.Log(ctx, st)
+			}
+		}
+		return err
+	}
+	if len(binlogs) != 0 {
+		h, _ := stream.Header()
+		sh := &binarylog.ServerHeader{
+			Header: h,
+		}
+		sm := &binarylog.ServerMessage{
+			Message: reply,
+		}
+		for _, binlog := range binlogs {
+			binlog.Log(ctx, sh)
+			binlog.Log(ctx, sm)
+		}
+	}
+	if channelz.IsOn() {
+		t.IncrMsgSent()
+	}
+	if trInfo != nil {
+		trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
+	}
+	// TODO: Should we be logging if writing status failed here, like above?
+	// Should the logging be in WriteStatus?  Should we ignore the WriteStatus
+	// error or allow the stats handler to see it?
+	if len(binlogs) != 0 {
+		st := &binarylog.ServerTrailer{
+			Trailer: stream.Trailer(),
+			Err:     appErr,
+		}
+		for _, binlog := range binlogs {
+			binlog.Log(ctx, st)
+		}
+	}
+	return t.WriteStatus(stream, statusOK)
+}
+
+// chainStreamServerInterceptors chains all stream server interceptors into one.
+func chainStreamServerInterceptors(s *Server) {
+	// Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will
+	// be executed before any other chained interceptors.
+	interceptors := s.opts.chainStreamInts
+	if s.opts.streamInt != nil {
+		interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...)
+	}
+
+	var chainedInt StreamServerInterceptor
+	if len(interceptors) == 0 {
+		chainedInt = nil
+	} else if len(interceptors) == 1 {
+		chainedInt = interceptors[0]
+	} else {
+		chainedInt = chainStreamInterceptors(interceptors)
+	}
+
+	s.opts.streamInt = chainedInt
+}
+
+func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor {
+	return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
+		return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler))
+	}
+}
+
+func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler {
+	if curr == len(interceptors)-1 {
+		return finalHandler
+	}
+	return func(srv any, stream ServerStream) error {
+		return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler))
+	}
+}
+
+func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
+	if channelz.IsOn() {
+		s.incrCallsStarted()
+	}
+	shs := s.opts.statsHandlers
+	var statsBegin *stats.Begin
+	if len(shs) != 0 {
+		beginTime := time.Now()
+		statsBegin = &stats.Begin{
+			BeginTime:      beginTime,
+			IsClientStream: sd.ClientStreams,
+			IsServerStream: sd.ServerStreams,
+		}
+		for _, sh := range shs {
+			sh.HandleRPC(ctx, statsBegin)
+		}
+	}
+	ctx = NewContextWithServerTransportStream(ctx, stream)
+	ss := &serverStream{
+		ctx:                   ctx,
+		t:                     t,
+		s:                     stream,
+		p:                     &parser{r: stream, bufferPool: s.opts.bufferPool},
+		codec:                 s.getCodec(stream.ContentSubtype()),
+		maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
+		maxSendMessageSize:    s.opts.maxSendMessageSize,
+		trInfo:                trInfo,
+		statsHandler:          shs,
+	}
+
+	if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
+		// See comment in processUnaryRPC on defers.
+		defer func() {
+			if trInfo != nil {
+				ss.mu.Lock()
+				if err != nil && err != io.EOF {
+					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
+					ss.trInfo.tr.SetError()
+				}
+				ss.trInfo.tr.Finish()
+				ss.trInfo.tr = nil
+				ss.mu.Unlock()
+			}
+
+			if len(shs) != 0 {
+				end := &stats.End{
+					BeginTime: statsBegin.BeginTime,
+					EndTime:   time.Now(),
+				}
+				if err != nil && err != io.EOF {
+					end.Error = toRPCErr(err)
+				}
+				for _, sh := range shs {
+					sh.HandleRPC(ctx, end)
+				}
+			}
+
+			if channelz.IsOn() {
+				if err != nil && err != io.EOF {
+					s.incrCallsFailed()
+				} else {
+					s.incrCallsSucceeded()
+				}
+			}
+		}()
+	}
+
+	if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil {
+		ss.binlogs = append(ss.binlogs, ml)
+	}
+	if s.opts.binaryLogger != nil {
+		if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil {
+			ss.binlogs = append(ss.binlogs, ml)
+		}
+	}
+	if len(ss.binlogs) != 0 {
+		md, _ := metadata.FromIncomingContext(ctx)
+		logEntry := &binarylog.ClientHeader{
+			Header:     md,
+			MethodName: stream.Method(),
+			PeerAddr:   nil,
+		}
+		if deadline, ok := ctx.Deadline(); ok {
+			logEntry.Timeout = time.Until(deadline)
+			if logEntry.Timeout < 0 {
+				logEntry.Timeout = 0
+			}
+		}
+		if a := md[":authority"]; len(a) > 0 {
+			logEntry.Authority = a[0]
+		}
+		if peer, ok := peer.FromContext(ss.Context()); ok {
+			logEntry.PeerAddr = peer.Addr
+		}
+		for _, binlog := range ss.binlogs {
+			binlog.Log(ctx, logEntry)
+		}
+	}
+
+	// If dc is set and matches the stream's compression, use it.  Otherwise, try
+	// to find a matching registered compressor for decomp.
+	if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
+		ss.dc = s.opts.dc
+	} else if rc != "" && rc != encoding.Identity {
+		ss.decomp = encoding.GetCompressor(rc)
+		if ss.decomp == nil {
+			st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
+			t.WriteStatus(ss.s, st)
+			return st.Err()
+		}
+	}
+
+	// If cp is set, use it.  Otherwise, attempt to compress the response using
+	// the incoming message compression method.
+	//
+	// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
+	if s.opts.cp != nil {
+		ss.cp = s.opts.cp
+		ss.sendCompressorName = s.opts.cp.Type()
+	} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
+		// Legacy compressor not specified; attempt to respond with same encoding.
+		ss.comp = encoding.GetCompressor(rc)
+		if ss.comp != nil {
+			ss.sendCompressorName = rc
+		}
+	}
+
+	if ss.sendCompressorName != "" {
+		if err := stream.SetSendCompress(ss.sendCompressorName); err != nil {
+			return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err)
+		}
+	}
+
+	ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp)
+
+	if trInfo != nil {
+		trInfo.tr.LazyLog(&trInfo.firstLine, false)
+	}
+	var appErr error
+	var server any
+	if info != nil {
+		server = info.serviceImpl
+	}
+	if s.opts.streamInt == nil {
+		appErr = sd.Handler(server, ss)
+	} else {
+		info := &StreamServerInfo{
+			FullMethod:     stream.Method(),
+			IsClientStream: sd.ClientStreams,
+			IsServerStream: sd.ServerStreams,
+		}
+		appErr = s.opts.streamInt(server, ss, info, sd.Handler)
+	}
+	if appErr != nil {
+		appStatus, ok := status.FromError(appErr)
+		if !ok {
+			// Convert non-status application error to a status error with code
+			// Unknown, but handle context errors specifically.
+			appStatus = status.FromContextError(appErr)
+			appErr = appStatus.Err()
+		}
+		if trInfo != nil {
+			ss.mu.Lock()
+			ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
+			ss.trInfo.tr.SetError()
+			ss.mu.Unlock()
+		}
+		if len(ss.binlogs) != 0 {
+			st := &binarylog.ServerTrailer{
+				Trailer: ss.s.Trailer(),
+				Err:     appErr,
+			}
+			for _, binlog := range ss.binlogs {
+				binlog.Log(ctx, st)
+			}
+		}
+		t.WriteStatus(ss.s, appStatus)
+		// TODO: Should we log an error from WriteStatus here and below?
+		return appErr
+	}
+	if trInfo != nil {
+		ss.mu.Lock()
+		ss.trInfo.tr.LazyLog(stringer("OK"), false)
+		ss.mu.Unlock()
+	}
+	if len(ss.binlogs) != 0 {
+		st := &binarylog.ServerTrailer{
+			Trailer: ss.s.Trailer(),
+			Err:     appErr,
+		}
+		for _, binlog := range ss.binlogs {
+			binlog.Log(ctx, st)
+		}
+	}
+	return t.WriteStatus(ss.s, statusOK)
+}
+
+func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) {
+	ctx := stream.Context()
+	ctx = contextWithServer(ctx, s)
+	var ti *traceInfo
+	if EnableTracing {
+		tr := newTrace("grpc.Recv."+methodFamily(stream.Method()), stream.Method())
+		ctx = newTraceContext(ctx, tr)
+		ti = &traceInfo{
+			tr: tr,
+			firstLine: firstLine{
+				client:     false,
+				remoteAddr: t.Peer().Addr,
+			},
+		}
+		if dl, ok := ctx.Deadline(); ok {
+			ti.firstLine.deadline = time.Until(dl)
+		}
+	}
+
+	sm := stream.Method()
+	if sm != "" && sm[0] == '/' {
+		sm = sm[1:]
+	}
+	pos := strings.LastIndex(sm, "/")
+	if pos == -1 {
+		if ti != nil {
+			ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true)
+			ti.tr.SetError()
+		}
+		errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
+		if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
+			if ti != nil {
+				ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
+				ti.tr.SetError()
+			}
+			channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err)
+		}
+		if ti != nil {
+			ti.tr.Finish()
+		}
+		return
+	}
+	service := sm[:pos]
+	method := sm[pos+1:]
+
+	md, _ := metadata.FromIncomingContext(ctx)
+	for _, sh := range s.opts.statsHandlers {
+		ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
+		sh.HandleRPC(ctx, &stats.InHeader{
+			FullMethod:  stream.Method(),
+			RemoteAddr:  t.Peer().Addr,
+			LocalAddr:   t.Peer().LocalAddr,
+			Compression: stream.RecvCompress(),
+			WireLength:  stream.HeaderWireLength(),
+			Header:      md,
+		})
+	}
+	// To have calls in stream callouts work. Will delete once all stats handler
+	// calls come from the gRPC layer.
+	stream.SetContext(ctx)
+
+	srv, knownService := s.services[service]
+	if knownService {
+		if md, ok := srv.methods[method]; ok {
+			s.processUnaryRPC(ctx, t, stream, srv, md, ti)
+			return
+		}
+		if sd, ok := srv.streams[method]; ok {
+			s.processStreamingRPC(ctx, t, stream, srv, sd, ti)
+			return
+		}
+	}
+	// Unknown service, or known server unknown method.
+	if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
+		s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti)
+		return
+	}
+	var errDesc string
+	if !knownService {
+		errDesc = fmt.Sprintf("unknown service %v", service)
+	} else {
+		errDesc = fmt.Sprintf("unknown method %v for service %v", method, service)
+	}
+	if ti != nil {
+		ti.tr.LazyPrintf("%s", errDesc)
+		ti.tr.SetError()
+	}
+	if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
+		if ti != nil {
+			ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
+			ti.tr.SetError()
+		}
+		channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err)
+	}
+	if ti != nil {
+		ti.tr.Finish()
+	}
+}
+
+// The key to save ServerTransportStream in the context.
+type streamKey struct{}
+
+// NewContextWithServerTransportStream creates a new context from ctx and
+// attaches stream to it.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context {
+	return context.WithValue(ctx, streamKey{}, stream)
+}
+
+// ServerTransportStream is a minimal interface that a transport stream must
+// implement. This can be used to mock an actual transport stream for tests of
+// handler code that use, for example, grpc.SetHeader (which requires some
+// stream to be in context).
+//
+// See also NewContextWithServerTransportStream.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type ServerTransportStream interface {
+	Method() string
+	SetHeader(md metadata.MD) error
+	SendHeader(md metadata.MD) error
+	SetTrailer(md metadata.MD) error
+}
+
+// ServerTransportStreamFromContext returns the ServerTransportStream saved in
+// ctx. Returns nil if the given context has no stream associated with it
+// (which implies it is not an RPC invocation context).
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream {
+	s, _ := ctx.Value(streamKey{}).(ServerTransportStream)
+	return s
+}
+
+// Stop stops the gRPC server. It immediately closes all open
+// connections and listeners.
+// It cancels all active RPCs on the server side and the corresponding
+// pending RPCs on the client side will get notified by connection
+// errors.
+func (s *Server) Stop() {
+	s.stop(false)
+}
+
+// GracefulStop stops the gRPC server gracefully. It stops the server from
+// accepting new connections and RPCs and blocks until all the pending RPCs are
+// finished.
+func (s *Server) GracefulStop() {
+	s.stop(true)
+}
+
+func (s *Server) stop(graceful bool) {
+	s.quit.Fire()
+	defer s.done.Fire()
+
+	s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelz.ID) })
+	s.mu.Lock()
+	s.closeListenersLocked()
+	// Wait for serving threads to be ready to exit.  Only then can we be sure no
+	// new conns will be created.
+	s.mu.Unlock()
+	s.serveWG.Wait()
+
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	if graceful {
+		s.drainAllServerTransportsLocked()
+	} else {
+		s.closeServerTransportsLocked()
+	}
+
+	for len(s.conns) != 0 {
+		s.cv.Wait()
+	}
+	s.conns = nil
+
+	if s.opts.numServerWorkers > 0 {
+		// Closing the channel (only once, via grpcsync.OnceFunc) after all the
+		// connections have been closed above ensures that there are no
+		// goroutines executing the callback passed to st.HandleStreams (where
+		// the channel is written to).
+		s.serverWorkerChannelClose()
+	}
+
+	if graceful || s.opts.waitForHandlers {
+		s.handlersWG.Wait()
+	}
+
+	if s.events != nil {
+		s.events.Finish()
+		s.events = nil
+	}
+}
+
+// s.mu must be held by the caller.
+func (s *Server) closeServerTransportsLocked() {
+	for _, conns := range s.conns {
+		for st := range conns {
+			st.Close(errors.New("Server.Stop called"))
+		}
+	}
+}
+
+// s.mu must be held by the caller.
+func (s *Server) drainAllServerTransportsLocked() {
+	if !s.drain {
+		for _, conns := range s.conns {
+			for st := range conns {
+				st.Drain("graceful_stop")
+			}
+		}
+		s.drain = true
+	}
+}
+
+// s.mu must be held by the caller.
+func (s *Server) closeListenersLocked() {
+	for lis := range s.lis {
+		lis.Close()
+	}
+	s.lis = nil
+}
+
+// contentSubtype must be lowercase
+// cannot return nil
+func (s *Server) getCodec(contentSubtype string) baseCodec {
+	if s.opts.codec != nil {
+		return s.opts.codec
+	}
+	if contentSubtype == "" {
+		return getCodec(proto.Name)
+	}
+	codec := getCodec(contentSubtype)
+	if codec == nil {
+		logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name)
+		return getCodec(proto.Name)
+	}
+	return codec
+}
+
+type serverKey struct{}
+
+// serverFromContext gets the Server from the context.
+func serverFromContext(ctx context.Context) *Server {
+	s, _ := ctx.Value(serverKey{}).(*Server)
+	return s
+}
+
+// contextWithServer sets the Server in the context.
+func contextWithServer(ctx context.Context, server *Server) context.Context {
+	return context.WithValue(ctx, serverKey{}, server)
+}
+
+// isRegisteredMethod returns whether the passed in method is registered as a
+// method on the server. /service/method and service/method will match if the
+// service and method are registered on the server.
+func (s *Server) isRegisteredMethod(serviceMethod string) bool {
+	if serviceMethod != "" && serviceMethod[0] == '/' {
+		serviceMethod = serviceMethod[1:]
+	}
+	pos := strings.LastIndex(serviceMethod, "/")
+	if pos == -1 { // Invalid method name syntax.
+		return false
+	}
+	service := serviceMethod[:pos]
+	method := serviceMethod[pos+1:]
+	srv, knownService := s.services[service]
+	if knownService {
+		if _, ok := srv.methods[method]; ok {
+			return true
+		}
+		if _, ok := srv.streams[method]; ok {
+			return true
+		}
+	}
+	return false
+}
+
+// SetHeader sets the header metadata to be sent from the server to the client.
+// The context provided must be the context passed to the server's handler.
+//
+// Streaming RPCs should prefer the SetHeader method of the ServerStream.
+//
+// When called multiple times, all the provided metadata will be merged.  All
+// the metadata will be sent out when one of the following happens:
+//
+//   - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader.
+//   - The first response message is sent.  For unary handlers, this occurs when
+//     the handler returns; for streaming handlers, this can happen when stream's
+//     SendMsg method is called.
+//   - An RPC status is sent out (error or success).  This occurs when the handler
+//     returns.
+//
+// SetHeader will fail if called after any of the events above.
+//
+// The error returned is compatible with the status package.  However, the
+// status code will often not match the RPC status as seen by the client
+// application, and therefore, should not be relied upon for this purpose.
+func SetHeader(ctx context.Context, md metadata.MD) error {
+	if md.Len() == 0 {
+		return nil
+	}
+	stream := ServerTransportStreamFromContext(ctx)
+	if stream == nil {
+		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
+	}
+	return stream.SetHeader(md)
+}
+
+// SendHeader sends header metadata. It may be called at most once, and may not
+// be called after any event that causes headers to be sent (see SetHeader for
+// a complete list).  The provided md and headers set by SetHeader() will be
+// sent.
+//
+// The error returned is compatible with the status package.  However, the
+// status code will often not match the RPC status as seen by the client
+// application, and therefore, should not be relied upon for this purpose.
+func SendHeader(ctx context.Context, md metadata.MD) error {
+	stream := ServerTransportStreamFromContext(ctx)
+	if stream == nil {
+		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
+	}
+	if err := stream.SendHeader(md); err != nil {
+		return toRPCErr(err)
+	}
+	return nil
+}
+
+// SetSendCompressor sets a compressor for outbound messages from the server.
+// It must not be called after any event that causes headers to be sent
+// (see ServerStream.SetHeader for the complete list). Provided compressor is
+// used when below conditions are met:
+//
+//   - compressor is registered via encoding.RegisterCompressor
+//   - compressor name must exist in the client advertised compressor names
+//     sent in grpc-accept-encoding header. Use ClientSupportedCompressors to
+//     get client supported compressor names.
+//
+// The context provided must be the context passed to the server's handler.
+// It must be noted that compressor name encoding.Identity disables the
+// outbound compression.
+// By default, server messages will be sent using the same compressor with
+// which request messages were sent.
+//
+// It is not safe to call SetSendCompressor concurrently with SendHeader and
+// SendMsg.
+//
+// # Experimental
+//
+// Notice: This function is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func SetSendCompressor(ctx context.Context, name string) error {
+	stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream)
+	if !ok || stream == nil {
+		return fmt.Errorf("failed to fetch the stream from the given context")
+	}
+
+	if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil {
+		return fmt.Errorf("unable to set send compressor: %w", err)
+	}
+
+	return stream.SetSendCompress(name)
+}
+
+// ClientSupportedCompressors returns compressor names advertised by the client
+// via grpc-accept-encoding header.
+//
+// The context provided must be the context passed to the server's handler.
+//
+// # Experimental
+//
+// Notice: This function is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ClientSupportedCompressors(ctx context.Context) ([]string, error) {
+	stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream)
+	if !ok || stream == nil {
+		return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx)
+	}
+
+	return stream.ClientAdvertisedCompressors(), nil
+}
+
+// SetTrailer sets the trailer metadata that will be sent when an RPC returns.
+// When called more than once, all the provided metadata will be merged.
+//
+// The error returned is compatible with the status package.  However, the
+// status code will often not match the RPC status as seen by the client
+// application, and therefore, should not be relied upon for this purpose.
+func SetTrailer(ctx context.Context, md metadata.MD) error {
+	if md.Len() == 0 {
+		return nil
+	}
+	stream := ServerTransportStreamFromContext(ctx)
+	if stream == nil {
+		return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
+	}
+	return stream.SetTrailer(md)
+}
+
+// Method returns the method string for the server context.  The returned
+// string is in the format of "/service/method".
+func Method(ctx context.Context) (string, bool) {
+	s := ServerTransportStreamFromContext(ctx)
+	if s == nil {
+		return "", false
+	}
+	return s.Method(), true
+}
+
+// validateSendCompressor returns an error when given compressor name cannot be
+// handled by the server or the client based on the advertised compressors.
+func validateSendCompressor(name string, clientCompressors []string) error {
+	if name == encoding.Identity {
+		return nil
+	}
+
+	if !grpcutil.IsCompressorNameRegistered(name) {
+		return fmt.Errorf("compressor not registered %q", name)
+	}
+
+	for _, c := range clientCompressors {
+		if c == name {
+			return nil // found match
+		}
+	}
+	return fmt.Errorf("client does not support compressor %q", name)
+}
+
+// atomicSemaphore implements a blocking, counting semaphore. acquire should be
+// called synchronously; release may be called asynchronously.
+type atomicSemaphore struct {
+	n    atomic.Int64
+	wait chan struct{}
+}
+
+func (q *atomicSemaphore) acquire() {
+	if q.n.Add(-1) < 0 {
+		// We ran out of quota.  Block until a release happens.
+		<-q.wait
+	}
+}
+
+func (q *atomicSemaphore) release() {
+	// N.B. the "<= 0" check below should allow for this to work with multiple
+	// concurrent calls to acquire, but also note that with synchronous calls to
+	// acquire, as our system does, n will never be less than -1.  There are
+	// fairness issues (queuing) to consider if this was to be generalized.
+	if q.n.Add(1) <= 0 {
+		// An acquire was waiting on us.  Unblock it.
+		q.wait <- struct{}{}
+	}
+}
+
+func newHandlerQuota(n uint32) *atomicSemaphore {
+	a := &atomicSemaphore{wait: make(chan struct{}, 1)}
+	a.n.Store(int64(n))
+	return a
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/service_config.go b/hack/tools/vendor/google.golang.org/grpc/service_config.go
new file mode 100644
index 0000000000..2671c5ef69
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/service_config.go
@@ -0,0 +1,356 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"reflect"
+	"time"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/balancer/pickfirst"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/internal/balancer/gracefulswitch"
+	internalserviceconfig "google.golang.org/grpc/internal/serviceconfig"
+	"google.golang.org/grpc/serviceconfig"
+)
+
+const maxInt = int(^uint(0) >> 1)
+
+// MethodConfig defines the configuration recommended by the service providers for a
+// particular method.
+//
+// Deprecated: Users should not use this struct. Service config should be received
+// through name resolver, as specified here
+// https://github.com/grpc/grpc/blob/master/doc/service_config.md
+type MethodConfig = internalserviceconfig.MethodConfig
+
+// ServiceConfig is provided by the service provider and contains parameters for how
+// clients that connect to the service should behave.
+//
+// Deprecated: Users should not use this struct. Service config should be received
+// through name resolver, as specified here
+// https://github.com/grpc/grpc/blob/master/doc/service_config.md
+type ServiceConfig struct {
+	serviceconfig.Config
+
+	// lbConfig is the service config's load balancing configuration.  If
+	// lbConfig and LB are both present, lbConfig will be used.
+	lbConfig serviceconfig.LoadBalancingConfig
+
+	// Methods contains a map for the methods in this service.  If there is an
+	// exact match for a method (i.e. /service/method) in the map, use the
+	// corresponding MethodConfig.  If there's no exact match, look for the
+	// default config for the service (/service/) and use the corresponding
+	// MethodConfig if it exists.  Otherwise, the method has no MethodConfig to
+	// use.
+	Methods map[string]MethodConfig
+
+	// If a retryThrottlingPolicy is provided, gRPC will automatically throttle
+	// retry attempts and hedged RPCs when the client’s ratio of failures to
+	// successes exceeds a threshold.
+	//
+	// For each server name, the gRPC client will maintain a token_count which is
+	// initially set to maxTokens, and can take values between 0 and maxTokens.
+	//
+	// Every outgoing RPC (regardless of service or method invoked) will change
+	// token_count as follows:
+	//
+	//   - Every failed RPC will decrement the token_count by 1.
+	//   - Every successful RPC will increment the token_count by tokenRatio.
+	//
+	// If token_count is less than or equal to maxTokens / 2, then RPCs will not
+	// be retried and hedged RPCs will not be sent.
+	retryThrottling *retryThrottlingPolicy
+	// healthCheckConfig must be set as one of the requirement to enable LB channel
+	// health check.
+	healthCheckConfig *healthCheckConfig
+	// rawJSONString stores service config json string that get parsed into
+	// this service config struct.
+	rawJSONString string
+}
+
+// healthCheckConfig defines the go-native version of the LB channel health check config.
+type healthCheckConfig struct {
+	// serviceName is the service name to use in the health-checking request.
+	ServiceName string
+}
+
+type jsonRetryPolicy struct {
+	MaxAttempts          int
+	InitialBackoff       internalserviceconfig.Duration
+	MaxBackoff           internalserviceconfig.Duration
+	BackoffMultiplier    float64
+	RetryableStatusCodes []codes.Code
+}
+
+// retryThrottlingPolicy defines the go-native version of the retry throttling
+// policy defined by the service config here:
+// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
+type retryThrottlingPolicy struct {
+	// The number of tokens starts at maxTokens. The token_count will always be
+	// between 0 and maxTokens.
+	//
+	// This field is required and must be greater than zero.
+	MaxTokens float64
+	// The amount of tokens to add on each successful RPC. Typically this will
+	// be some number between 0 and 1, e.g., 0.1.
+	//
+	// This field is required and must be greater than zero. Up to 3 decimal
+	// places are supported.
+	TokenRatio float64
+}
+
+type jsonName struct {
+	Service string
+	Method  string
+}
+
+var (
+	errDuplicatedName             = errors.New("duplicated name")
+	errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'")
+)
+
+func (j jsonName) generatePath() (string, error) {
+	if j.Service == "" {
+		if j.Method != "" {
+			return "", errEmptyServiceNonEmptyMethod
+		}
+		return "", nil
+	}
+	res := "/" + j.Service + "/"
+	if j.Method != "" {
+		res += j.Method
+	}
+	return res, nil
+}
+
+// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
+type jsonMC struct {
+	Name                    *[]jsonName
+	WaitForReady            *bool
+	Timeout                 *internalserviceconfig.Duration
+	MaxRequestMessageBytes  *int64
+	MaxResponseMessageBytes *int64
+	RetryPolicy             *jsonRetryPolicy
+}
+
+// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
+type jsonSC struct {
+	LoadBalancingPolicy *string
+	LoadBalancingConfig *json.RawMessage
+	MethodConfig        *[]jsonMC
+	RetryThrottling     *retryThrottlingPolicy
+	HealthCheckConfig   *healthCheckConfig
+}
+
+func init() {
+	internal.ParseServiceConfig = func(js string) *serviceconfig.ParseResult {
+		return parseServiceConfig(js, defaultMaxCallAttempts)
+	}
+}
+func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult {
+	if len(js) == 0 {
+		return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")}
+	}
+	var rsc jsonSC
+	err := json.Unmarshal([]byte(js), &rsc)
+	if err != nil {
+		logger.Warningf("grpc: unmarshalling service config %s: %v", js, err)
+		return &serviceconfig.ParseResult{Err: err}
+	}
+	sc := ServiceConfig{
+		Methods:           make(map[string]MethodConfig),
+		retryThrottling:   rsc.RetryThrottling,
+		healthCheckConfig: rsc.HealthCheckConfig,
+		rawJSONString:     js,
+	}
+	c := rsc.LoadBalancingConfig
+	if c == nil {
+		name := pickfirst.Name
+		if rsc.LoadBalancingPolicy != nil {
+			name = *rsc.LoadBalancingPolicy
+		}
+		if balancer.Get(name) == nil {
+			name = pickfirst.Name
+		}
+		cfg := []map[string]any{{name: struct{}{}}}
+		strCfg, err := json.Marshal(cfg)
+		if err != nil {
+			return &serviceconfig.ParseResult{Err: fmt.Errorf("unexpected error marshaling simple LB config: %w", err)}
+		}
+		r := json.RawMessage(strCfg)
+		c = &r
+	}
+	cfg, err := gracefulswitch.ParseConfig(*c)
+	if err != nil {
+		return &serviceconfig.ParseResult{Err: err}
+	}
+	sc.lbConfig = cfg
+
+	if rsc.MethodConfig == nil {
+		return &serviceconfig.ParseResult{Config: &sc}
+	}
+
+	paths := map[string]struct{}{}
+	for _, m := range *rsc.MethodConfig {
+		if m.Name == nil {
+			continue
+		}
+
+		mc := MethodConfig{
+			WaitForReady: m.WaitForReady,
+			Timeout:      (*time.Duration)(m.Timeout),
+		}
+		if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy, maxAttempts); err != nil {
+			logger.Warningf("grpc: unmarshalling service config %s: %v", js, err)
+			return &serviceconfig.ParseResult{Err: err}
+		}
+		if m.MaxRequestMessageBytes != nil {
+			if *m.MaxRequestMessageBytes > int64(maxInt) {
+				mc.MaxReqSize = newInt(maxInt)
+			} else {
+				mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes))
+			}
+		}
+		if m.MaxResponseMessageBytes != nil {
+			if *m.MaxResponseMessageBytes > int64(maxInt) {
+				mc.MaxRespSize = newInt(maxInt)
+			} else {
+				mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes))
+			}
+		}
+		for i, n := range *m.Name {
+			path, err := n.generatePath()
+			if err != nil {
+				logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err)
+				return &serviceconfig.ParseResult{Err: err}
+			}
+
+			if _, ok := paths[path]; ok {
+				err = errDuplicatedName
+				logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err)
+				return &serviceconfig.ParseResult{Err: err}
+			}
+			paths[path] = struct{}{}
+			sc.Methods[path] = mc
+		}
+	}
+
+	if sc.retryThrottling != nil {
+		if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 {
+			return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)}
+		}
+		if tr := sc.retryThrottling.TokenRatio; tr <= 0 {
+			return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)}
+		}
+	}
+	return &serviceconfig.ParseResult{Config: &sc}
+}
+
+func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) {
+	if jrp == nil {
+		return nil, nil
+	}
+
+	if jrp.MaxAttempts <= 1 ||
+		jrp.InitialBackoff <= 0 ||
+		jrp.MaxBackoff <= 0 ||
+		jrp.BackoffMultiplier <= 0 ||
+		len(jrp.RetryableStatusCodes) == 0 {
+		logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
+		return nil, nil
+	}
+
+	if jrp.MaxAttempts < maxAttempts {
+		maxAttempts = jrp.MaxAttempts
+	}
+	rp := &internalserviceconfig.RetryPolicy{
+		MaxAttempts:          maxAttempts,
+		InitialBackoff:       time.Duration(jrp.InitialBackoff),
+		MaxBackoff:           time.Duration(jrp.MaxBackoff),
+		BackoffMultiplier:    jrp.BackoffMultiplier,
+		RetryableStatusCodes: make(map[codes.Code]bool),
+	}
+	for _, code := range jrp.RetryableStatusCodes {
+		rp.RetryableStatusCodes[code] = true
+	}
+	return rp, nil
+}
+
+func min(a, b *int) *int {
+	if *a < *b {
+		return a
+	}
+	return b
+}
+
+func getMaxSize(mcMax, doptMax *int, defaultVal int) *int {
+	if mcMax == nil && doptMax == nil {
+		return &defaultVal
+	}
+	if mcMax != nil && doptMax != nil {
+		return min(mcMax, doptMax)
+	}
+	if mcMax != nil {
+		return mcMax
+	}
+	return doptMax
+}
+
+func newInt(b int) *int {
+	return &b
+}
+
+func init() {
+	internal.EqualServiceConfigForTesting = equalServiceConfig
+}
+
+// equalServiceConfig compares two configs. The rawJSONString field is ignored,
+// because they may diff in white spaces.
+//
+// If any of them is NOT *ServiceConfig, return false.
+func equalServiceConfig(a, b serviceconfig.Config) bool {
+	if a == nil && b == nil {
+		return true
+	}
+	aa, ok := a.(*ServiceConfig)
+	if !ok {
+		return false
+	}
+	bb, ok := b.(*ServiceConfig)
+	if !ok {
+		return false
+	}
+	aaRaw := aa.rawJSONString
+	aa.rawJSONString = ""
+	bbRaw := bb.rawJSONString
+	bb.rawJSONString = ""
+	defer func() {
+		aa.rawJSONString = aaRaw
+		bb.rawJSONString = bbRaw
+	}()
+	// Using reflect.DeepEqual instead of cmp.Equal because many balancer
+	// configs are unexported, and cmp.Equal cannot compare unexported fields
+	// from unexported structs.
+	return reflect.DeepEqual(aa, bb)
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/hack/tools/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
new file mode 100644
index 0000000000..35e7a20a04
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
@@ -0,0 +1,44 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package serviceconfig defines types and methods for operating on gRPC
+// service configs.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package serviceconfig
+
+// Config represents an opaque data structure holding a service config.
+type Config interface {
+	isServiceConfig()
+}
+
+// LoadBalancingConfig represents an opaque data structure holding a load
+// balancing config.
+type LoadBalancingConfig interface {
+	isLoadBalancingConfig()
+}
+
+// ParseResult contains a service config or an error.  Exactly one must be
+// non-nil.
+type ParseResult struct {
+	Config Config
+	Err    error
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/stats/handlers.go b/hack/tools/vendor/google.golang.org/grpc/stats/handlers.go
new file mode 100644
index 0000000000..dc03731e45
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/stats/handlers.go
@@ -0,0 +1,63 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package stats
+
+import (
+	"context"
+	"net"
+)
+
+// ConnTagInfo defines the relevant information needed by connection context tagger.
+type ConnTagInfo struct {
+	// RemoteAddr is the remote address of the corresponding connection.
+	RemoteAddr net.Addr
+	// LocalAddr is the local address of the corresponding connection.
+	LocalAddr net.Addr
+}
+
+// RPCTagInfo defines the relevant information needed by RPC context tagger.
+type RPCTagInfo struct {
+	// FullMethodName is the RPC method in the format of /package.service/method.
+	FullMethodName string
+	// FailFast indicates if this RPC is failfast.
+	// This field is only valid on client side, it's always false on server side.
+	FailFast bool
+}
+
+// Handler defines the interface for the related stats handling (e.g., RPCs, connections).
+type Handler interface {
+	// TagRPC can attach some information to the given context.
+	// The context used for the rest lifetime of the RPC will be derived from
+	// the returned context.
+	TagRPC(context.Context, *RPCTagInfo) context.Context
+	// HandleRPC processes the RPC stats.
+	HandleRPC(context.Context, RPCStats)
+
+	// TagConn can attach some information to the given context.
+	// The returned context will be used for stats handling.
+	// For conn stats handling, the context used in HandleConn for this
+	// connection will be derived from the context returned.
+	// For RPC stats handling,
+	//  - On server side, the context used in HandleRPC for all RPCs on this
+	// connection will be derived from the context returned.
+	//  - On client side, the context is not derived from the context returned.
+	TagConn(context.Context, *ConnTagInfo) context.Context
+	// HandleConn processes the Conn stats.
+	HandleConn(context.Context, ConnStats)
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/stats/stats.go b/hack/tools/vendor/google.golang.org/grpc/stats/stats.go
new file mode 100644
index 0000000000..71195c4943
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/stats/stats.go
@@ -0,0 +1,343 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package stats is for collecting and reporting various network and RPC stats.
+// This package is for monitoring purpose only. All fields are read-only.
+// All APIs are experimental.
+package stats // import "google.golang.org/grpc/stats"
+
+import (
+	"context"
+	"net"
+	"time"
+
+	"google.golang.org/grpc/metadata"
+)
+
+// RPCStats contains stats information about RPCs.
+type RPCStats interface {
+	isRPCStats()
+	// IsClient returns true if this RPCStats is from client side.
+	IsClient() bool
+}
+
+// Begin contains stats when an RPC attempt begins.
+// FailFast is only valid if this Begin is from client side.
+type Begin struct {
+	// Client is true if this Begin is from client side.
+	Client bool
+	// BeginTime is the time when the RPC attempt begins.
+	BeginTime time.Time
+	// FailFast indicates if this RPC is failfast.
+	FailFast bool
+	// IsClientStream indicates whether the RPC is a client streaming RPC.
+	IsClientStream bool
+	// IsServerStream indicates whether the RPC is a server streaming RPC.
+	IsServerStream bool
+	// IsTransparentRetryAttempt indicates whether this attempt was initiated
+	// due to transparently retrying a previous attempt.
+	IsTransparentRetryAttempt bool
+}
+
+// IsClient indicates if the stats information is from client side.
+func (s *Begin) IsClient() bool { return s.Client }
+
+func (s *Begin) isRPCStats() {}
+
+// PickerUpdated indicates that the LB policy provided a new picker while the
+// RPC was waiting for one.
+type PickerUpdated struct{}
+
+// IsClient indicates if the stats information is from client side. Only Client
+// Side interfaces with a Picker, thus always returns true.
+func (*PickerUpdated) IsClient() bool { return true }
+
+func (*PickerUpdated) isRPCStats() {}
+
+// InPayload contains the information for an incoming payload.
+type InPayload struct {
+	// Client is true if this InPayload is from client side.
+	Client bool
+	// Payload is the payload with original type.  This may be modified after
+	// the call to HandleRPC which provides the InPayload returns and must be
+	// copied if needed later.
+	Payload any
+
+	// Length is the size of the uncompressed payload data. Does not include any
+	// framing (gRPC or HTTP/2).
+	Length int
+	// CompressedLength is the size of the compressed payload data. Does not
+	// include any framing (gRPC or HTTP/2). Same as Length if compression not
+	// enabled.
+	CompressedLength int
+	// WireLength is the size of the compressed payload data plus gRPC framing.
+	// Does not include HTTP/2 framing.
+	WireLength int
+
+	// RecvTime is the time when the payload is received.
+	RecvTime time.Time
+}
+
+// IsClient indicates if the stats information is from client side.
+func (s *InPayload) IsClient() bool { return s.Client }
+
+func (s *InPayload) isRPCStats() {}
+
+// InHeader contains stats when a header is received.
+type InHeader struct {
+	// Client is true if this InHeader is from client side.
+	Client bool
+	// WireLength is the wire length of header.
+	WireLength int
+	// Compression is the compression algorithm used for the RPC.
+	Compression string
+	// Header contains the header metadata received.
+	Header metadata.MD
+
+	// The following fields are valid only if Client is false.
+	// FullMethod is the full RPC method string, i.e., /package.service/method.
+	FullMethod string
+	// RemoteAddr is the remote address of the corresponding connection.
+	RemoteAddr net.Addr
+	// LocalAddr is the local address of the corresponding connection.
+	LocalAddr net.Addr
+}
+
+// IsClient indicates if the stats information is from client side.
+func (s *InHeader) IsClient() bool { return s.Client }
+
+func (s *InHeader) isRPCStats() {}
+
+// InTrailer contains stats when a trailer is received.
+type InTrailer struct {
+	// Client is true if this InTrailer is from client side.
+	Client bool
+	// WireLength is the wire length of trailer.
+	WireLength int
+	// Trailer contains the trailer metadata received from the server. This
+	// field is only valid if this InTrailer is from the client side.
+	Trailer metadata.MD
+}
+
+// IsClient indicates if the stats information is from client side.
+func (s *InTrailer) IsClient() bool { return s.Client }
+
+func (s *InTrailer) isRPCStats() {}
+
+// OutPayload contains the information for an outgoing payload.
+type OutPayload struct {
+	// Client is true if this OutPayload is from client side.
+	Client bool
+	// Payload is the payload with original type.  This may be modified after
+	// the call to HandleRPC which provides the OutPayload returns and must be
+	// copied if needed later.
+	Payload any
+	// Length is the size of the uncompressed payload data. Does not include any
+	// framing (gRPC or HTTP/2).
+	Length int
+	// CompressedLength is the size of the compressed payload data. Does not
+	// include any framing (gRPC or HTTP/2). Same as Length if compression not
+	// enabled.
+	CompressedLength int
+	// WireLength is the size of the compressed payload data plus gRPC framing.
+	// Does not include HTTP/2 framing.
+	WireLength int
+	// SentTime is the time when the payload is sent.
+	SentTime time.Time
+}
+
+// IsClient indicates if this stats information is from client side.
+func (s *OutPayload) IsClient() bool { return s.Client }
+
+func (s *OutPayload) isRPCStats() {}
+
+// OutHeader contains stats when a header is sent.
+type OutHeader struct {
+	// Client is true if this OutHeader is from client side.
+	Client bool
+	// Compression is the compression algorithm used for the RPC.
+	Compression string
+	// Header contains the header metadata sent.
+	Header metadata.MD
+
+	// The following fields are valid only if Client is true.
+	// FullMethod is the full RPC method string, i.e., /package.service/method.
+	FullMethod string
+	// RemoteAddr is the remote address of the corresponding connection.
+	RemoteAddr net.Addr
+	// LocalAddr is the local address of the corresponding connection.
+	LocalAddr net.Addr
+}
+
+// IsClient indicates if this stats information is from client side.
+func (s *OutHeader) IsClient() bool { return s.Client }
+
+func (s *OutHeader) isRPCStats() {}
+
+// OutTrailer contains stats when a trailer is sent.
+type OutTrailer struct {
+	// Client is true if this OutTrailer is from client side.
+	Client bool
+	// WireLength is the wire length of trailer.
+	//
+	// Deprecated: This field is never set. The length is not known when this message is
+	// emitted because the trailer fields are compressed with hpack after that.
+	WireLength int
+	// Trailer contains the trailer metadata sent to the client. This
+	// field is only valid if this OutTrailer is from the server side.
+	Trailer metadata.MD
+}
+
+// IsClient indicates if this stats information is from client side.
+func (s *OutTrailer) IsClient() bool { return s.Client }
+
+func (s *OutTrailer) isRPCStats() {}
+
+// End contains stats when an RPC ends.
+type End struct {
+	// Client is true if this End is from client side.
+	Client bool
+	// BeginTime is the time when the RPC began.
+	BeginTime time.Time
+	// EndTime is the time when the RPC ends.
+	EndTime time.Time
+	// Trailer contains the trailer metadata received from the server. This
+	// field is only valid if this End is from the client side.
+	// Deprecated: use Trailer in InTrailer instead.
+	Trailer metadata.MD
+	// Error is the error the RPC ended with. It is an error generated from
+	// status.Status and can be converted back to status.Status using
+	// status.FromError if non-nil.
+	Error error
+}
+
+// IsClient indicates if this is from client side.
+func (s *End) IsClient() bool { return s.Client }
+
+func (s *End) isRPCStats() {}
+
+// ConnStats contains stats information about connections.
+type ConnStats interface {
+	isConnStats()
+	// IsClient returns true if this ConnStats is from client side.
+	IsClient() bool
+}
+
+// ConnBegin contains the stats of a connection when it is established.
+type ConnBegin struct {
+	// Client is true if this ConnBegin is from client side.
+	Client bool
+}
+
+// IsClient indicates if this is from client side.
+func (s *ConnBegin) IsClient() bool { return s.Client }
+
+func (s *ConnBegin) isConnStats() {}
+
+// ConnEnd contains the stats of a connection when it ends.
+type ConnEnd struct {
+	// Client is true if this ConnEnd is from client side.
+	Client bool
+}
+
+// IsClient indicates if this is from client side.
+func (s *ConnEnd) IsClient() bool { return s.Client }
+
+func (s *ConnEnd) isConnStats() {}
+
+type incomingTagsKey struct{}
+type outgoingTagsKey struct{}
+
+// SetTags attaches stats tagging data to the context, which will be sent in
+// the outgoing RPC with the header grpc-tags-bin.  Subsequent calls to
+// SetTags will overwrite the values from earlier calls.
+//
+// NOTE: this is provided only for backward compatibility with existing clients
+// and will likely be removed in an upcoming release.  New uses should transmit
+// this type of data using metadata with a different, non-reserved (i.e. does
+// not begin with "grpc-") header name.
+func SetTags(ctx context.Context, b []byte) context.Context {
+	return context.WithValue(ctx, outgoingTagsKey{}, b)
+}
+
+// Tags returns the tags from the context for the inbound RPC.
+//
+// NOTE: this is provided only for backward compatibility with existing clients
+// and will likely be removed in an upcoming release.  New uses should transmit
+// this type of data using metadata with a different, non-reserved (i.e. does
+// not begin with "grpc-") header name.
+func Tags(ctx context.Context) []byte {
+	b, _ := ctx.Value(incomingTagsKey{}).([]byte)
+	return b
+}
+
+// SetIncomingTags attaches stats tagging data to the context, to be read by
+// the application (not sent in outgoing RPCs).
+//
+// This is intended for gRPC-internal use ONLY.
+func SetIncomingTags(ctx context.Context, b []byte) context.Context {
+	return context.WithValue(ctx, incomingTagsKey{}, b)
+}
+
+// OutgoingTags returns the tags from the context for the outbound RPC.
+//
+// This is intended for gRPC-internal use ONLY.
+func OutgoingTags(ctx context.Context) []byte {
+	b, _ := ctx.Value(outgoingTagsKey{}).([]byte)
+	return b
+}
+
+type incomingTraceKey struct{}
+type outgoingTraceKey struct{}
+
+// SetTrace attaches stats tagging data to the context, which will be sent in
+// the outgoing RPC with the header grpc-trace-bin.  Subsequent calls to
+// SetTrace will overwrite the values from earlier calls.
+//
+// NOTE: this is provided only for backward compatibility with existing clients
+// and will likely be removed in an upcoming release.  New uses should transmit
+// this type of data using metadata with a different, non-reserved (i.e. does
+// not begin with "grpc-") header name.
+func SetTrace(ctx context.Context, b []byte) context.Context {
+	return context.WithValue(ctx, outgoingTraceKey{}, b)
+}
+
+// Trace returns the trace from the context for the inbound RPC.
+//
+// NOTE: this is provided only for backward compatibility with existing clients
+// and will likely be removed in an upcoming release.  New uses should transmit
+// this type of data using metadata with a different, non-reserved (i.e. does
+// not begin with "grpc-") header name.
+func Trace(ctx context.Context) []byte {
+	b, _ := ctx.Value(incomingTraceKey{}).([]byte)
+	return b
+}
+
+// SetIncomingTrace attaches stats tagging data to the context, to be read by
+// the application (not sent in outgoing RPCs).  It is intended for
+// gRPC-internal use.
+func SetIncomingTrace(ctx context.Context, b []byte) context.Context {
+	return context.WithValue(ctx, incomingTraceKey{}, b)
+}
+
+// OutgoingTrace returns the trace from the context for the outbound RPC.  It is
+// intended for gRPC-internal use.
+func OutgoingTrace(ctx context.Context) []byte {
+	b, _ := ctx.Value(outgoingTraceKey{}).([]byte)
+	return b
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/status/status.go b/hack/tools/vendor/google.golang.org/grpc/status/status.go
new file mode 100644
index 0000000000..a93360efb8
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/status/status.go
@@ -0,0 +1,162 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package status implements errors returned by gRPC.  These errors are
+// serialized and transmitted on the wire between server and client, and allow
+// for additional data to be transmitted via the Details field in the status
+// proto.  gRPC service handlers should return an error created by this
+// package, and gRPC clients should expect a corresponding error to be
+// returned from the RPC call.
+//
+// This package upholds the invariants that a non-nil error may not
+// contain an OK code, and an OK code must result in a nil error.
+package status
+
+import (
+	"context"
+	"errors"
+	"fmt"
+
+	spb "google.golang.org/genproto/googleapis/rpc/status"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/internal/status"
+)
+
+// Status references google.golang.org/grpc/internal/status. It represents an
+// RPC status code, message, and details.  It is immutable and should be
+// created with New, Newf, or FromProto.
+// https://godoc.org/google.golang.org/grpc/internal/status
+type Status = status.Status
+
+// New returns a Status representing c and msg.
+func New(c codes.Code, msg string) *Status {
+	return status.New(c, msg)
+}
+
+// Newf returns New(c, fmt.Sprintf(format, a...)).
+func Newf(c codes.Code, format string, a ...any) *Status {
+	return New(c, fmt.Sprintf(format, a...))
+}
+
+// Error returns an error representing c and msg.  If c is OK, returns nil.
+func Error(c codes.Code, msg string) error {
+	return New(c, msg).Err()
+}
+
+// Errorf returns Error(c, fmt.Sprintf(format, a...)).
+func Errorf(c codes.Code, format string, a ...any) error {
+	return Error(c, fmt.Sprintf(format, a...))
+}
+
+// ErrorProto returns an error representing s.  If s.Code is OK, returns nil.
+func ErrorProto(s *spb.Status) error {
+	return FromProto(s).Err()
+}
+
+// FromProto returns a Status representing s.
+func FromProto(s *spb.Status) *Status {
+	return status.FromProto(s)
+}
+
+// FromError returns a Status representation of err.
+//
+//   - If err was produced by this package or implements the method `GRPCStatus()
+//     *Status` and `GRPCStatus()` does not return nil, or if err wraps a type
+//     satisfying this, the Status from `GRPCStatus()` is returned.  For wrapped
+//     errors, the message returned contains the entire err.Error() text and not
+//     just the wrapped status. In that case, ok is true.
+//
+//   - If err is nil, a Status is returned with codes.OK and no message, and ok
+//     is true.
+//
+//   - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()`
+//     returns nil (which maps to Codes.OK), or if err wraps a type
+//     satisfying this, a Status is returned with codes.Unknown and err's
+//     Error() message, and ok is false.
+//
+//   - Otherwise, err is an error not compatible with this package.  In this
+//     case, a Status is returned with codes.Unknown and err's Error() message,
+//     and ok is false.
+func FromError(err error) (s *Status, ok bool) {
+	if err == nil {
+		return nil, true
+	}
+	type grpcstatus interface{ GRPCStatus() *Status }
+	if gs, ok := err.(grpcstatus); ok {
+		grpcStatus := gs.GRPCStatus()
+		if grpcStatus == nil {
+			// Error has status nil, which maps to codes.OK. There
+			// is no sensible behavior for this, so we turn it into
+			// an error with codes.Unknown and discard the existing
+			// status.
+			return New(codes.Unknown, err.Error()), false
+		}
+		return grpcStatus, true
+	}
+	var gs grpcstatus
+	if errors.As(err, &gs) {
+		grpcStatus := gs.GRPCStatus()
+		if grpcStatus == nil {
+			// Error wraps an error that has status nil, which maps
+			// to codes.OK.  There is no sensible behavior for this,
+			// so we turn it into an error with codes.Unknown and
+			// discard the existing status.
+			return New(codes.Unknown, err.Error()), false
+		}
+		p := grpcStatus.Proto()
+		p.Message = err.Error()
+		return status.FromProto(p), true
+	}
+	return New(codes.Unknown, err.Error()), false
+}
+
+// Convert is a convenience function which removes the need to handle the
+// boolean return value from FromError.
+func Convert(err error) *Status {
+	s, _ := FromError(err)
+	return s
+}
+
+// Code returns the Code of the error if it is a Status error or if it wraps a
+// Status error. If that is not the case, it returns codes.OK if err is nil, or
+// codes.Unknown otherwise.
+func Code(err error) codes.Code {
+	// Don't use FromError to avoid allocation of OK status.
+	if err == nil {
+		return codes.OK
+	}
+
+	return Convert(err).Code()
+}
+
+// FromContextError converts a context error or wrapped context error into a
+// Status.  It returns a Status with codes.OK if err is nil, or a Status with
+// codes.Unknown if err is non-nil and not a context error.
+func FromContextError(err error) *Status {
+	if err == nil {
+		return nil
+	}
+	if errors.Is(err, context.DeadlineExceeded) {
+		return New(codes.DeadlineExceeded, err.Error())
+	}
+	if errors.Is(err, context.Canceled) {
+		return New(codes.Canceled, err.Error())
+	}
+	return New(codes.Unknown, err.Error())
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/stream.go b/hack/tools/vendor/google.golang.org/grpc/stream.go
new file mode 100644
index 0000000000..bb2b2a216c
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/stream.go
@@ -0,0 +1,1841 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+	"errors"
+	"io"
+	"math"
+	"math/rand"
+	"strconv"
+	"sync"
+	"time"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/encoding"
+	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/internal/balancerload"
+	"google.golang.org/grpc/internal/binarylog"
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcutil"
+	imetadata "google.golang.org/grpc/internal/metadata"
+	iresolver "google.golang.org/grpc/internal/resolver"
+	"google.golang.org/grpc/internal/serviceconfig"
+	istatus "google.golang.org/grpc/internal/status"
+	"google.golang.org/grpc/internal/transport"
+	"google.golang.org/grpc/mem"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/peer"
+	"google.golang.org/grpc/stats"
+	"google.golang.org/grpc/status"
+)
+
+var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
+
+// StreamHandler defines the handler called by gRPC server to complete the
+// execution of a streaming RPC.
+//
+// If a StreamHandler returns an error, it should either be produced by the
+// status package, or be one of the context errors. Otherwise, gRPC will use
+// codes.Unknown as the status code and err.Error() as the status message of the
+// RPC.
+type StreamHandler func(srv any, stream ServerStream) error
+
+// StreamDesc represents a streaming RPC service's method specification.  Used
+// on the server when registering services and on the client when initiating
+// new streams.
+type StreamDesc struct {
+	// StreamName and Handler are only used when registering handlers on a
+	// server.
+	StreamName string        // the name of the method excluding the service
+	Handler    StreamHandler // the handler called for the method
+
+	// ServerStreams and ClientStreams are used for registering handlers on a
+	// server as well as defining RPC behavior when passed to NewClientStream
+	// and ClientConn.NewStream.  At least one must be true.
+	ServerStreams bool // indicates the server can perform streaming sends
+	ClientStreams bool // indicates the client can perform streaming sends
+}
+
+// Stream defines the common interface a client or server stream has to satisfy.
+//
+// Deprecated: See ClientStream and ServerStream documentation instead.
+type Stream interface {
+	// Deprecated: See ClientStream and ServerStream documentation instead.
+	Context() context.Context
+	// Deprecated: See ClientStream and ServerStream documentation instead.
+	SendMsg(m any) error
+	// Deprecated: See ClientStream and ServerStream documentation instead.
+	RecvMsg(m any) error
+}
+
+// ClientStream defines the client-side behavior of a streaming RPC.
+//
+// All errors returned from ClientStream methods are compatible with the
+// status package.
+type ClientStream interface {
+	// Header returns the header metadata received from the server if there
+	// is any. It blocks if the metadata is not ready to read.  If the metadata
+	// is nil and the error is also nil, then the stream was terminated without
+	// headers, and the status can be discovered by calling RecvMsg.
+	Header() (metadata.MD, error)
+	// Trailer returns the trailer metadata from the server, if there is any.
+	// It must only be called after stream.CloseAndRecv has returned, or
+	// stream.Recv has returned a non-nil error (including io.EOF).
+	Trailer() metadata.MD
+	// CloseSend closes the send direction of the stream. It closes the stream
+	// when non-nil error is met. It is also not safe to call CloseSend
+	// concurrently with SendMsg.
+	CloseSend() error
+	// Context returns the context for this stream.
+	//
+	// It should not be called until after Header or RecvMsg has returned. Once
+	// called, subsequent client-side retries are disabled.
+	Context() context.Context
+	// SendMsg is generally called by generated code. On error, SendMsg aborts
+	// the stream. If the error was generated by the client, the status is
+	// returned directly; otherwise, io.EOF is returned and the status of
+	// the stream may be discovered using RecvMsg.
+	//
+	// SendMsg blocks until:
+	//   - There is sufficient flow control to schedule m with the transport, or
+	//   - The stream is done, or
+	//   - The stream breaks.
+	//
+	// SendMsg does not wait until the message is received by the server. An
+	// untimely stream closure may result in lost messages. To ensure delivery,
+	// users should ensure the RPC completed successfully using RecvMsg.
+	//
+	// It is safe to have a goroutine calling SendMsg and another goroutine
+	// calling RecvMsg on the same stream at the same time, but it is not safe
+	// to call SendMsg on the same stream in different goroutines. It is also
+	// not safe to call CloseSend concurrently with SendMsg.
+	//
+	// It is not safe to modify the message after calling SendMsg. Tracing
+	// libraries and stats handlers may use the message lazily.
+	SendMsg(m any) error
+	// RecvMsg blocks until it receives a message into m or the stream is
+	// done. It returns io.EOF when the stream completes successfully. On
+	// any other error, the stream is aborted and the error contains the RPC
+	// status.
+	//
+	// It is safe to have a goroutine calling SendMsg and another goroutine
+	// calling RecvMsg on the same stream at the same time, but it is not
+	// safe to call RecvMsg on the same stream in different goroutines.
+	RecvMsg(m any) error
+}
+
+// NewStream creates a new Stream for the client side. This is typically
+// called by generated code. ctx is used for the lifetime of the stream.
+//
+// To ensure resources are not leaked due to the stream returned, one of the following
+// actions must be performed:
+//
+//  1. Call Close on the ClientConn.
+//  2. Cancel the context provided.
+//  3. Call RecvMsg until a non-nil error is returned. A protobuf-generated
+//     client-streaming RPC, for instance, might use the helper function
+//     CloseAndRecv (note that CloseSend does not Recv, therefore is not
+//     guaranteed to release all resources).
+//  4. Receive a non-nil, non-io.EOF error from Header or SendMsg.
+//
+// If none of the above happen, a goroutine and a context will be leaked, and grpc
+// will not call the optionally-configured stats handler with a stats.End message.
+func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
+	// allow interceptor to see all applicable call options, which means those
+	// configured as defaults from dial option as well as per-call options
+	opts = combine(cc.dopts.callOptions, opts)
+
+	if cc.dopts.streamInt != nil {
+		return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
+	}
+	return newClientStream(ctx, desc, cc, method, opts...)
+}
+
+// NewClientStream is a wrapper for ClientConn.NewStream.
+func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
+	return cc.NewStream(ctx, desc, method, opts...)
+}
+
+func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
+	// Start tracking the RPC for idleness purposes. This is where a stream is
+	// created for both streaming and unary RPCs, and hence is a good place to
+	// track active RPC count.
+	if err := cc.idlenessMgr.OnCallBegin(); err != nil {
+		return nil, err
+	}
+	// Add a calloption, to decrement the active call count, that gets executed
+	// when the RPC completes.
+	opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...)
+
+	if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
+		// validate md
+		if err := imetadata.Validate(md); err != nil {
+			return nil, status.Error(codes.Internal, err.Error())
+		}
+		// validate added
+		for _, kvs := range added {
+			for i := 0; i < len(kvs); i += 2 {
+				if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil {
+					return nil, status.Error(codes.Internal, err.Error())
+				}
+			}
+		}
+	}
+	if channelz.IsOn() {
+		cc.incrCallsStarted()
+		defer func() {
+			if err != nil {
+				cc.incrCallsFailed()
+			}
+		}()
+	}
+	// Provide an opportunity for the first RPC to see the first service config
+	// provided by the resolver.
+	if err := cc.waitForResolvedAddrs(ctx); err != nil {
+		return nil, err
+	}
+
+	var mc serviceconfig.MethodConfig
+	var onCommit func()
+	var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
+		return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...)
+	}
+
+	rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method}
+	rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo)
+	if err != nil {
+		if st, ok := status.FromError(err); ok {
+			// Restrict the code to the list allowed by gRFC A54.
+			if istatus.IsRestrictedControlPlaneCode(st) {
+				err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err)
+			}
+			return nil, err
+		}
+		return nil, toRPCErr(err)
+	}
+
+	if rpcConfig != nil {
+		if rpcConfig.Context != nil {
+			ctx = rpcConfig.Context
+		}
+		mc = rpcConfig.MethodConfig
+		onCommit = rpcConfig.OnCommitted
+		if rpcConfig.Interceptor != nil {
+			rpcInfo.Context = nil
+			ns := newStream
+			newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
+				cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns)
+				if err != nil {
+					return nil, toRPCErr(err)
+				}
+				return cs, nil
+			}
+		}
+	}
+
+	return newStream(ctx, func() {})
+}
+
+func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) {
+	c := defaultCallInfo()
+	if mc.WaitForReady != nil {
+		c.failFast = !*mc.WaitForReady
+	}
+
+	// Possible context leak:
+	// The cancel function for the child context we create will only be called
+	// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
+	// an error is generated by SendMsg.
+	// https://github.com/grpc/grpc-go/issues/1818.
+	var cancel context.CancelFunc
+	if mc.Timeout != nil && *mc.Timeout >= 0 {
+		ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
+	} else {
+		ctx, cancel = context.WithCancel(ctx)
+	}
+	defer func() {
+		if err != nil {
+			cancel()
+		}
+	}()
+
+	for _, o := range opts {
+		if err := o.before(c); err != nil {
+			return nil, toRPCErr(err)
+		}
+	}
+	c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
+	c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
+	if err := setCallInfoCodec(c); err != nil {
+		return nil, err
+	}
+
+	callHdr := &transport.CallHdr{
+		Host:           cc.authority,
+		Method:         method,
+		ContentSubtype: c.contentSubtype,
+		DoneFunc:       doneFunc,
+	}
+
+	// Set our outgoing compression according to the UseCompressor CallOption, if
+	// set.  In that case, also find the compressor from the encoding package.
+	// Otherwise, use the compressor configured by the WithCompressor DialOption,
+	// if set.
+	var cp Compressor
+	var comp encoding.Compressor
+	if ct := c.compressorType; ct != "" {
+		callHdr.SendCompress = ct
+		if ct != encoding.Identity {
+			comp = encoding.GetCompressor(ct)
+			if comp == nil {
+				return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
+			}
+		}
+	} else if cc.dopts.cp != nil {
+		callHdr.SendCompress = cc.dopts.cp.Type()
+		cp = cc.dopts.cp
+	}
+	if c.creds != nil {
+		callHdr.Creds = c.creds
+	}
+
+	cs := &clientStream{
+		callHdr:      callHdr,
+		ctx:          ctx,
+		methodConfig: &mc,
+		opts:         opts,
+		callInfo:     c,
+		cc:           cc,
+		desc:         desc,
+		codec:        c.codec,
+		cp:           cp,
+		comp:         comp,
+		cancel:       cancel,
+		firstAttempt: true,
+		onCommit:     onCommit,
+	}
+	if !cc.dopts.disableRetry {
+		cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
+	}
+	if ml := binarylog.GetMethodLogger(method); ml != nil {
+		cs.binlogs = append(cs.binlogs, ml)
+	}
+	if cc.dopts.binaryLogger != nil {
+		if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil {
+			cs.binlogs = append(cs.binlogs, ml)
+		}
+	}
+
+	// Pick the transport to use and create a new stream on the transport.
+	// Assign cs.attempt upon success.
+	op := func(a *csAttempt) error {
+		if err := a.getTransport(); err != nil {
+			return err
+		}
+		if err := a.newStream(); err != nil {
+			return err
+		}
+		// Because this operation is always called either here (while creating
+		// the clientStream) or by the retry code while locked when replaying
+		// the operation, it is safe to access cs.attempt directly.
+		cs.attempt = a
+		return nil
+	}
+	if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil {
+		return nil, err
+	}
+
+	if len(cs.binlogs) != 0 {
+		md, _ := metadata.FromOutgoingContext(ctx)
+		logEntry := &binarylog.ClientHeader{
+			OnClientSide: true,
+			Header:       md,
+			MethodName:   method,
+			Authority:    cs.cc.authority,
+		}
+		if deadline, ok := ctx.Deadline(); ok {
+			logEntry.Timeout = time.Until(deadline)
+			if logEntry.Timeout < 0 {
+				logEntry.Timeout = 0
+			}
+		}
+		for _, binlog := range cs.binlogs {
+			binlog.Log(cs.ctx, logEntry)
+		}
+	}
+
+	if desc != unaryStreamDesc {
+		// Listen on cc and stream contexts to cleanup when the user closes the
+		// ClientConn or cancels the stream context.  In all other cases, an error
+		// should already be injected into the recv buffer by the transport, which
+		// the client will eventually receive, and then we will cancel the stream's
+		// context in clientStream.finish.
+		go func() {
+			select {
+			case <-cc.ctx.Done():
+				cs.finish(ErrClientConnClosing)
+			case <-ctx.Done():
+				cs.finish(toRPCErr(ctx.Err()))
+			}
+		}()
+	}
+	return cs, nil
+}
+
+// newAttemptLocked creates a new csAttempt without a transport or stream.
+func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) {
+	if err := cs.ctx.Err(); err != nil {
+		return nil, toRPCErr(err)
+	}
+	if err := cs.cc.ctx.Err(); err != nil {
+		return nil, ErrClientConnClosing
+	}
+
+	ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp)
+	method := cs.callHdr.Method
+	var beginTime time.Time
+	shs := cs.cc.dopts.copts.StatsHandlers
+	for _, sh := range shs {
+		ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast})
+		beginTime = time.Now()
+		begin := &stats.Begin{
+			Client:                    true,
+			BeginTime:                 beginTime,
+			FailFast:                  cs.callInfo.failFast,
+			IsClientStream:            cs.desc.ClientStreams,
+			IsServerStream:            cs.desc.ServerStreams,
+			IsTransparentRetryAttempt: isTransparent,
+		}
+		sh.HandleRPC(ctx, begin)
+	}
+
+	var trInfo *traceInfo
+	if EnableTracing {
+		trInfo = &traceInfo{
+			tr: newTrace("grpc.Sent."+methodFamily(method), method),
+			firstLine: firstLine{
+				client: true,
+			},
+		}
+		if deadline, ok := ctx.Deadline(); ok {
+			trInfo.firstLine.deadline = time.Until(deadline)
+		}
+		trInfo.tr.LazyLog(&trInfo.firstLine, false)
+		ctx = newTraceContext(ctx, trInfo.tr)
+	}
+
+	if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata {
+		// Add extra metadata (metadata that will be added by transport) to context
+		// so the balancer can see them.
+		ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs(
+			"content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype),
+		))
+	}
+
+	return &csAttempt{
+		ctx:           ctx,
+		beginTime:     beginTime,
+		cs:            cs,
+		dc:            cs.cc.dopts.dc,
+		statsHandlers: shs,
+		trInfo:        trInfo,
+	}, nil
+}
+
+func (a *csAttempt) getTransport() error {
+	cs := a.cs
+
+	var err error
+	a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
+	if err != nil {
+		if de, ok := err.(dropError); ok {
+			err = de.error
+			a.drop = true
+		}
+		return err
+	}
+	if a.trInfo != nil {
+		a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr())
+	}
+	return nil
+}
+
+func (a *csAttempt) newStream() error {
+	cs := a.cs
+	cs.callHdr.PreviousAttempts = cs.numRetries
+
+	// Merge metadata stored in PickResult, if any, with existing call metadata.
+	// It is safe to overwrite the csAttempt's context here, since all state
+	// maintained in it are local to the attempt. When the attempt has to be
+	// retried, a new instance of csAttempt will be created.
+	if a.pickResult.Metadata != nil {
+		// We currently do not have a function it the metadata package which
+		// merges given metadata with existing metadata in a context. Existing
+		// function `AppendToOutgoingContext()` takes a variadic argument of key
+		// value pairs.
+		//
+		// TODO: Make it possible to retrieve key value pairs from metadata.MD
+		// in a form passable to AppendToOutgoingContext(), or create a version
+		// of AppendToOutgoingContext() that accepts a metadata.MD.
+		md, _ := metadata.FromOutgoingContext(a.ctx)
+		md = metadata.Join(md, a.pickResult.Metadata)
+		a.ctx = metadata.NewOutgoingContext(a.ctx, md)
+	}
+
+	s, err := a.t.NewStream(a.ctx, cs.callHdr)
+	if err != nil {
+		nse, ok := err.(*transport.NewStreamError)
+		if !ok {
+			// Unexpected.
+			return err
+		}
+
+		if nse.AllowTransparentRetry {
+			a.allowTransparentRetry = true
+		}
+
+		// Unwrap and convert error.
+		return toRPCErr(nse.Err)
+	}
+	a.s = s
+	a.ctx = s.Context()
+	a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
+	return nil
+}
+
+// clientStream implements a client side Stream.
+type clientStream struct {
+	callHdr  *transport.CallHdr
+	opts     []CallOption
+	callInfo *callInfo
+	cc       *ClientConn
+	desc     *StreamDesc
+
+	codec baseCodec
+	cp    Compressor
+	comp  encoding.Compressor
+
+	cancel context.CancelFunc // cancels all attempts
+
+	sentLast bool // sent an end stream
+
+	methodConfig *MethodConfig
+
+	ctx context.Context // the application's context, wrapped by stats/tracing
+
+	retryThrottler *retryThrottler // The throttler active when the RPC began.
+
+	binlogs []binarylog.MethodLogger
+	// serverHeaderBinlogged is a boolean for whether server header has been
+	// logged. Server header will be logged when the first time one of those
+	// happens: stream.Header(), stream.Recv().
+	//
+	// It's only read and used by Recv() and Header(), so it doesn't need to be
+	// synchronized.
+	serverHeaderBinlogged bool
+
+	mu                      sync.Mutex
+	firstAttempt            bool // if true, transparent retry is valid
+	numRetries              int  // exclusive of transparent retry attempt(s)
+	numRetriesSincePushback int  // retries since pushback; to reset backoff
+	finished                bool // TODO: replace with atomic cmpxchg or sync.Once?
+	// attempt is the active client stream attempt.
+	// The only place where it is written is the newAttemptLocked method and this method never writes nil.
+	// So, attempt can be nil only inside newClientStream function when clientStream is first created.
+	// One of the first things done after clientStream's creation, is to call newAttemptLocked which either
+	// assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked,
+	// then newClientStream calls finish on the clientStream and returns. So, finish method is the only
+	// place where we need to check if the attempt is nil.
+	attempt *csAttempt
+	// TODO(hedging): hedging will have multiple attempts simultaneously.
+	committed        bool // active attempt committed for retry?
+	onCommit         func()
+	replayBuffer     []replayOp // operations to replay on retry
+	replayBufferSize int        // current size of replayBuffer
+}
+
+type replayOp struct {
+	op      func(a *csAttempt) error
+	cleanup func()
+}
+
+// csAttempt implements a single transport stream attempt within a
+// clientStream.
+type csAttempt struct {
+	ctx        context.Context
+	cs         *clientStream
+	t          transport.ClientTransport
+	s          *transport.Stream
+	p          *parser
+	pickResult balancer.PickResult
+
+	finished  bool
+	dc        Decompressor
+	decomp    encoding.Compressor
+	decompSet bool
+
+	mu sync.Mutex // guards trInfo.tr
+	// trInfo may be nil (if EnableTracing is false).
+	// trInfo.tr is set when created (if EnableTracing is true),
+	// and cleared when the finish method is called.
+	trInfo *traceInfo
+
+	statsHandlers []stats.Handler
+	beginTime     time.Time
+
+	// set for newStream errors that may be transparently retried
+	allowTransparentRetry bool
+	// set for pick errors that are returned as a status
+	drop bool
+}
+
+func (cs *clientStream) commitAttemptLocked() {
+	if !cs.committed && cs.onCommit != nil {
+		cs.onCommit()
+	}
+	cs.committed = true
+	for _, op := range cs.replayBuffer {
+		if op.cleanup != nil {
+			op.cleanup()
+		}
+	}
+	cs.replayBuffer = nil
+}
+
+func (cs *clientStream) commitAttempt() {
+	cs.mu.Lock()
+	cs.commitAttemptLocked()
+	cs.mu.Unlock()
+}
+
+// shouldRetry returns nil if the RPC should be retried; otherwise it returns
+// the error that should be returned by the operation.  If the RPC should be
+// retried, the bool indicates whether it is being retried transparently.
+func (a *csAttempt) shouldRetry(err error) (bool, error) {
+	cs := a.cs
+
+	if cs.finished || cs.committed || a.drop {
+		// RPC is finished or committed or was dropped by the picker; cannot retry.
+		return false, err
+	}
+	if a.s == nil && a.allowTransparentRetry {
+		return true, nil
+	}
+	// Wait for the trailers.
+	unprocessed := false
+	if a.s != nil {
+		<-a.s.Done()
+		unprocessed = a.s.Unprocessed()
+	}
+	if cs.firstAttempt && unprocessed {
+		// First attempt, stream unprocessed: transparently retry.
+		return true, nil
+	}
+	if cs.cc.dopts.disableRetry {
+		return false, err
+	}
+
+	pushback := 0
+	hasPushback := false
+	if a.s != nil {
+		if !a.s.TrailersOnly() {
+			return false, err
+		}
+
+		// TODO(retry): Move down if the spec changes to not check server pushback
+		// before considering this a failure for throttling.
+		sps := a.s.Trailer()["grpc-retry-pushback-ms"]
+		if len(sps) == 1 {
+			var e error
+			if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
+				channelz.Infof(logger, cs.cc.channelz, "Server retry pushback specified to abort (%q).", sps[0])
+				cs.retryThrottler.throttle() // This counts as a failure for throttling.
+				return false, err
+			}
+			hasPushback = true
+		} else if len(sps) > 1 {
+			channelz.Warningf(logger, cs.cc.channelz, "Server retry pushback specified multiple values (%q); not retrying.", sps)
+			cs.retryThrottler.throttle() // This counts as a failure for throttling.
+			return false, err
+		}
+	}
+
+	var code codes.Code
+	if a.s != nil {
+		code = a.s.Status().Code()
+	} else {
+		code = status.Code(err)
+	}
+
+	rp := cs.methodConfig.RetryPolicy
+	if rp == nil || !rp.RetryableStatusCodes[code] {
+		return false, err
+	}
+
+	// Note: the ordering here is important; we count this as a failure
+	// only if the code matched a retryable code.
+	if cs.retryThrottler.throttle() {
+		return false, err
+	}
+	if cs.numRetries+1 >= rp.MaxAttempts {
+		return false, err
+	}
+
+	var dur time.Duration
+	if hasPushback {
+		dur = time.Millisecond * time.Duration(pushback)
+		cs.numRetriesSincePushback = 0
+	} else {
+		fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback))
+		cur := float64(rp.InitialBackoff) * fact
+		if max := float64(rp.MaxBackoff); cur > max {
+			cur = max
+		}
+		dur = time.Duration(rand.Int63n(int64(cur)))
+		cs.numRetriesSincePushback++
+	}
+
+	// TODO(dfawley): we could eagerly fail here if dur puts us past the
+	// deadline, but unsure if it is worth doing.
+	t := time.NewTimer(dur)
+	select {
+	case <-t.C:
+		cs.numRetries++
+		return false, nil
+	case <-cs.ctx.Done():
+		t.Stop()
+		return false, status.FromContextError(cs.ctx.Err()).Err()
+	}
+}
+
+// Returns nil if a retry was performed and succeeded; error otherwise.
+func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error {
+	for {
+		attempt.finish(toRPCErr(lastErr))
+		isTransparent, err := attempt.shouldRetry(lastErr)
+		if err != nil {
+			cs.commitAttemptLocked()
+			return err
+		}
+		cs.firstAttempt = false
+		attempt, err = cs.newAttemptLocked(isTransparent)
+		if err != nil {
+			// Only returns error if the clientconn is closed or the context of
+			// the stream is canceled.
+			return err
+		}
+		// Note that the first op in replayBuffer always sets cs.attempt
+		// if it is able to pick a transport and create a stream.
+		if lastErr = cs.replayBufferLocked(attempt); lastErr == nil {
+			return nil
+		}
+	}
+}
+
+func (cs *clientStream) Context() context.Context {
+	cs.commitAttempt()
+	// No need to lock before using attempt, since we know it is committed and
+	// cannot change.
+	if cs.attempt.s != nil {
+		return cs.attempt.s.Context()
+	}
+	return cs.ctx
+}
+
+func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error {
+	cs.mu.Lock()
+	for {
+		if cs.committed {
+			cs.mu.Unlock()
+			// toRPCErr is used in case the error from the attempt comes from
+			// NewClientStream, which intentionally doesn't return a status
+			// error to allow for further inspection; all other errors should
+			// already be status errors.
+			return toRPCErr(op(cs.attempt))
+		}
+		if len(cs.replayBuffer) == 0 {
+			// For the first op, which controls creation of the stream and
+			// assigns cs.attempt, we need to create a new attempt inline
+			// before executing the first op.  On subsequent ops, the attempt
+			// is created immediately before replaying the ops.
+			var err error
+			if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil {
+				cs.mu.Unlock()
+				cs.finish(err)
+				return err
+			}
+		}
+		a := cs.attempt
+		cs.mu.Unlock()
+		err := op(a)
+		cs.mu.Lock()
+		if a != cs.attempt {
+			// We started another attempt already.
+			continue
+		}
+		if err == io.EOF {
+			<-a.s.Done()
+		}
+		if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) {
+			onSuccess()
+			cs.mu.Unlock()
+			return err
+		}
+		if err := cs.retryLocked(a, err); err != nil {
+			cs.mu.Unlock()
+			return err
+		}
+	}
+}
+
+func (cs *clientStream) Header() (metadata.MD, error) {
+	var m metadata.MD
+	err := cs.withRetry(func(a *csAttempt) error {
+		var err error
+		m, err = a.s.Header()
+		return toRPCErr(err)
+	}, cs.commitAttemptLocked)
+
+	if m == nil && err == nil {
+		// The stream ended with success.  Finish the clientStream.
+		err = io.EOF
+	}
+
+	if err != nil {
+		cs.finish(err)
+		// Do not return the error.  The user should get it by calling Recv().
+		return nil, nil
+	}
+
+	if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil {
+		// Only log if binary log is on and header has not been logged, and
+		// there is actually headers to log.
+		logEntry := &binarylog.ServerHeader{
+			OnClientSide: true,
+			Header:       m,
+			PeerAddr:     nil,
+		}
+		if peer, ok := peer.FromContext(cs.Context()); ok {
+			logEntry.PeerAddr = peer.Addr
+		}
+		cs.serverHeaderBinlogged = true
+		for _, binlog := range cs.binlogs {
+			binlog.Log(cs.ctx, logEntry)
+		}
+	}
+
+	return m, nil
+}
+
+func (cs *clientStream) Trailer() metadata.MD {
+	// On RPC failure, we never need to retry, because usage requires that
+	// RecvMsg() returned a non-nil error before calling this function is valid.
+	// We would have retried earlier if necessary.
+	//
+	// Commit the attempt anyway, just in case users are not following those
+	// directions -- it will prevent races and should not meaningfully impact
+	// performance.
+	cs.commitAttempt()
+	if cs.attempt.s == nil {
+		return nil
+	}
+	return cs.attempt.s.Trailer()
+}
+
+func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error {
+	for _, f := range cs.replayBuffer {
+		if err := f.op(attempt); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) {
+	// Note: we still will buffer if retry is disabled (for transparent retries).
+	if cs.committed {
+		return
+	}
+	cs.replayBufferSize += sz
+	if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize {
+		cs.commitAttemptLocked()
+		cleanup()
+		return
+	}
+	cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup})
+}
+
+func (cs *clientStream) SendMsg(m any) (err error) {
+	defer func() {
+		if err != nil && err != io.EOF {
+			// Call finish on the client stream for errors generated by this SendMsg
+			// call, as these indicate problems created by this client.  (Transport
+			// errors are converted to an io.EOF error in csAttempt.sendMsg; the real
+			// error will be returned from RecvMsg eventually in that case, or be
+			// retried.)
+			cs.finish(err)
+		}
+	}()
+	if cs.sentLast {
+		return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
+	}
+	if !cs.desc.ClientStreams {
+		cs.sentLast = true
+	}
+
+	// load hdr, payload, data
+	hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool)
+	if err != nil {
+		return err
+	}
+
+	defer func() {
+		data.Free()
+		// only free payload if compression was made, and therefore it is a different set
+		// of buffers from data.
+		if pf.isCompressed() {
+			payload.Free()
+		}
+	}()
+
+	dataLen := data.Len()
+	payloadLen := payload.Len()
+	// TODO(dfawley): should we be checking len(data) instead?
+	if payloadLen > *cs.callInfo.maxSendMessageSize {
+		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize)
+	}
+
+	// always take an extra ref in case data == payload (i.e. when the data isn't
+	// compressed). The original ref will always be freed by the deferred free above.
+	payload.Ref()
+	op := func(a *csAttempt) error {
+		return a.sendMsg(m, hdr, payload, dataLen, payloadLen)
+	}
+
+	// onSuccess is invoked when the op is captured for a subsequent retry. If the
+	// stream was established by a previous message and therefore retries are
+	// disabled, onSuccess will not be invoked, and payloadRef can be freed
+	// immediately.
+	onSuccessCalled := false
+	err = cs.withRetry(op, func() {
+		cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free)
+		onSuccessCalled = true
+	})
+	if !onSuccessCalled {
+		payload.Free()
+	}
+	if len(cs.binlogs) != 0 && err == nil {
+		cm := &binarylog.ClientMessage{
+			OnClientSide: true,
+			Message:      data.Materialize(),
+		}
+		for _, binlog := range cs.binlogs {
+			binlog.Log(cs.ctx, cm)
+		}
+	}
+	return err
+}
+
+func (cs *clientStream) RecvMsg(m any) error {
+	if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged {
+		// Call Header() to binary log header if it's not already logged.
+		cs.Header()
+	}
+	var recvInfo *payloadInfo
+	if len(cs.binlogs) != 0 {
+		recvInfo = &payloadInfo{}
+		defer recvInfo.free()
+	}
+	err := cs.withRetry(func(a *csAttempt) error {
+		return a.recvMsg(m, recvInfo)
+	}, cs.commitAttemptLocked)
+	if len(cs.binlogs) != 0 && err == nil {
+		sm := &binarylog.ServerMessage{
+			OnClientSide: true,
+			Message:      recvInfo.uncompressedBytes.Materialize(),
+		}
+		for _, binlog := range cs.binlogs {
+			binlog.Log(cs.ctx, sm)
+		}
+	}
+	if err != nil || !cs.desc.ServerStreams {
+		// err != nil or non-server-streaming indicates end of stream.
+		cs.finish(err)
+	}
+	return err
+}
+
+func (cs *clientStream) CloseSend() error {
+	if cs.sentLast {
+		// TODO: return an error and finish the stream instead, due to API misuse?
+		return nil
+	}
+	cs.sentLast = true
+	op := func(a *csAttempt) error {
+		a.t.Write(a.s, nil, nil, &transport.Options{Last: true})
+		// Always return nil; io.EOF is the only error that might make sense
+		// instead, but there is no need to signal the client to call RecvMsg
+		// as the only use left for the stream after CloseSend is to call
+		// RecvMsg.  This also matches historical behavior.
+		return nil
+	}
+	cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) })
+	if len(cs.binlogs) != 0 {
+		chc := &binarylog.ClientHalfClose{
+			OnClientSide: true,
+		}
+		for _, binlog := range cs.binlogs {
+			binlog.Log(cs.ctx, chc)
+		}
+	}
+	// We never returned an error here for reasons.
+	return nil
+}
+
+func (cs *clientStream) finish(err error) {
+	if err == io.EOF {
+		// Ending a stream with EOF indicates a success.
+		err = nil
+	}
+	cs.mu.Lock()
+	if cs.finished {
+		cs.mu.Unlock()
+		return
+	}
+	cs.finished = true
+	for _, onFinish := range cs.callInfo.onFinish {
+		onFinish(err)
+	}
+	cs.commitAttemptLocked()
+	if cs.attempt != nil {
+		cs.attempt.finish(err)
+		// after functions all rely upon having a stream.
+		if cs.attempt.s != nil {
+			for _, o := range cs.opts {
+				o.after(cs.callInfo, cs.attempt)
+			}
+		}
+	}
+
+	cs.mu.Unlock()
+	// Only one of cancel or trailer needs to be logged.
+	if len(cs.binlogs) != 0 {
+		switch err {
+		case errContextCanceled, errContextDeadline, ErrClientConnClosing:
+			c := &binarylog.Cancel{
+				OnClientSide: true,
+			}
+			for _, binlog := range cs.binlogs {
+				binlog.Log(cs.ctx, c)
+			}
+		default:
+			logEntry := &binarylog.ServerTrailer{
+				OnClientSide: true,
+				Trailer:      cs.Trailer(),
+				Err:          err,
+			}
+			if peer, ok := peer.FromContext(cs.Context()); ok {
+				logEntry.PeerAddr = peer.Addr
+			}
+			for _, binlog := range cs.binlogs {
+				binlog.Log(cs.ctx, logEntry)
+			}
+		}
+	}
+	if err == nil {
+		cs.retryThrottler.successfulRPC()
+	}
+	if channelz.IsOn() {
+		if err != nil {
+			cs.cc.incrCallsFailed()
+		} else {
+			cs.cc.incrCallsSucceeded()
+		}
+	}
+	cs.cancel()
+}
+
+func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error {
+	cs := a.cs
+	if a.trInfo != nil {
+		a.mu.Lock()
+		if a.trInfo.tr != nil {
+			a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
+		}
+		a.mu.Unlock()
+	}
+	if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil {
+		if !cs.desc.ClientStreams {
+			// For non-client-streaming RPCs, we return nil instead of EOF on error
+			// because the generated code requires it.  finish is not called; RecvMsg()
+			// will call it with the stream's status independently.
+			return nil
+		}
+		return io.EOF
+	}
+	if len(a.statsHandlers) != 0 {
+		for _, sh := range a.statsHandlers {
+			sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
+		}
+	}
+	if channelz.IsOn() {
+		a.t.IncrMsgSent()
+	}
+	return nil
+}
+
+func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
+	cs := a.cs
+	if len(a.statsHandlers) != 0 && payInfo == nil {
+		payInfo = &payloadInfo{}
+		defer payInfo.free()
+	}
+
+	if !a.decompSet {
+		// Block until we receive headers containing received message encoding.
+		if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
+			if a.dc == nil || a.dc.Type() != ct {
+				// No configured decompressor, or it does not match the incoming
+				// message encoding; attempt to find a registered compressor that does.
+				a.dc = nil
+				a.decomp = encoding.GetCompressor(ct)
+			}
+		} else {
+			// No compression is used; disable our decompressor.
+			a.dc = nil
+		}
+		// Only initialize this state once per stream.
+		a.decompSet = true
+	}
+	if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil {
+		if err == io.EOF {
+			if statusErr := a.s.Status().Err(); statusErr != nil {
+				return statusErr
+			}
+			return io.EOF // indicates successful end of stream.
+		}
+
+		return toRPCErr(err)
+	}
+	if a.trInfo != nil {
+		a.mu.Lock()
+		if a.trInfo.tr != nil {
+			a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
+		}
+		a.mu.Unlock()
+	}
+	for _, sh := range a.statsHandlers {
+		sh.HandleRPC(a.ctx, &stats.InPayload{
+			Client:           true,
+			RecvTime:         time.Now(),
+			Payload:          m,
+			WireLength:       payInfo.compressedLength + headerLen,
+			CompressedLength: payInfo.compressedLength,
+			Length:           payInfo.uncompressedBytes.Len(),
+		})
+	}
+	if channelz.IsOn() {
+		a.t.IncrMsgRecv()
+	}
+	if cs.desc.ServerStreams {
+		// Subsequent messages should be received by subsequent RecvMsg calls.
+		return nil
+	}
+	// Special handling for non-server-stream rpcs.
+	// This recv expects EOF or errors, so we don't collect inPayload.
+	if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF {
+		return a.s.Status().Err() // non-server streaming Recv returns nil on success
+	} else if err != nil {
+		return toRPCErr(err)
+	}
+	return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
+}
+
+func (a *csAttempt) finish(err error) {
+	a.mu.Lock()
+	if a.finished {
+		a.mu.Unlock()
+		return
+	}
+	a.finished = true
+	if err == io.EOF {
+		// Ending a stream with EOF indicates a success.
+		err = nil
+	}
+	var tr metadata.MD
+	if a.s != nil {
+		a.t.CloseStream(a.s, err)
+		tr = a.s.Trailer()
+	}
+
+	if a.pickResult.Done != nil {
+		br := false
+		if a.s != nil {
+			br = a.s.BytesReceived()
+		}
+		a.pickResult.Done(balancer.DoneInfo{
+			Err:           err,
+			Trailer:       tr,
+			BytesSent:     a.s != nil,
+			BytesReceived: br,
+			ServerLoad:    balancerload.Parse(tr),
+		})
+	}
+	for _, sh := range a.statsHandlers {
+		end := &stats.End{
+			Client:    true,
+			BeginTime: a.beginTime,
+			EndTime:   time.Now(),
+			Trailer:   tr,
+			Error:     err,
+		}
+		sh.HandleRPC(a.ctx, end)
+	}
+	if a.trInfo != nil && a.trInfo.tr != nil {
+		if err == nil {
+			a.trInfo.tr.LazyPrintf("RPC: [OK]")
+		} else {
+			a.trInfo.tr.LazyPrintf("RPC: [%v]", err)
+			a.trInfo.tr.SetError()
+		}
+		a.trInfo.tr.Finish()
+		a.trInfo.tr = nil
+	}
+	a.mu.Unlock()
+}
+
+// newNonRetryClientStream creates a ClientStream with the specified transport, on the
+// given addrConn.
+//
+// It's expected that the given transport is either the same one in addrConn, or
+// is already closed. To avoid race, transport is specified separately, instead
+// of using ac.transport.
+//
+// Main difference between this and ClientConn.NewStream:
+// - no retry
+// - no service config (or wait for service config)
+// - no tracing or stats
+func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) {
+	if t == nil {
+		// TODO: return RPC error here?
+		return nil, errors.New("transport provided is nil")
+	}
+	// defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct.
+	c := &callInfo{}
+
+	// Possible context leak:
+	// The cancel function for the child context we create will only be called
+	// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
+	// an error is generated by SendMsg.
+	// https://github.com/grpc/grpc-go/issues/1818.
+	ctx, cancel := context.WithCancel(ctx)
+	defer func() {
+		if err != nil {
+			cancel()
+		}
+	}()
+
+	for _, o := range opts {
+		if err := o.before(c); err != nil {
+			return nil, toRPCErr(err)
+		}
+	}
+	c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
+	c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize)
+	if err := setCallInfoCodec(c); err != nil {
+		return nil, err
+	}
+
+	callHdr := &transport.CallHdr{
+		Host:           ac.cc.authority,
+		Method:         method,
+		ContentSubtype: c.contentSubtype,
+	}
+
+	// Set our outgoing compression according to the UseCompressor CallOption, if
+	// set.  In that case, also find the compressor from the encoding package.
+	// Otherwise, use the compressor configured by the WithCompressor DialOption,
+	// if set.
+	var cp Compressor
+	var comp encoding.Compressor
+	if ct := c.compressorType; ct != "" {
+		callHdr.SendCompress = ct
+		if ct != encoding.Identity {
+			comp = encoding.GetCompressor(ct)
+			if comp == nil {
+				return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
+			}
+		}
+	} else if ac.cc.dopts.cp != nil {
+		callHdr.SendCompress = ac.cc.dopts.cp.Type()
+		cp = ac.cc.dopts.cp
+	}
+	if c.creds != nil {
+		callHdr.Creds = c.creds
+	}
+
+	// Use a special addrConnStream to avoid retry.
+	as := &addrConnStream{
+		callHdr:  callHdr,
+		ac:       ac,
+		ctx:      ctx,
+		cancel:   cancel,
+		opts:     opts,
+		callInfo: c,
+		desc:     desc,
+		codec:    c.codec,
+		cp:       cp,
+		comp:     comp,
+		t:        t,
+	}
+
+	s, err := as.t.NewStream(as.ctx, as.callHdr)
+	if err != nil {
+		err = toRPCErr(err)
+		return nil, err
+	}
+	as.s = s
+	as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
+	ac.incrCallsStarted()
+	if desc != unaryStreamDesc {
+		// Listen on stream context to cleanup when the stream context is
+		// canceled.  Also listen for the addrConn's context in case the
+		// addrConn is closed or reconnects to a different address.  In all
+		// other cases, an error should already be injected into the recv
+		// buffer by the transport, which the client will eventually receive,
+		// and then we will cancel the stream's context in
+		// addrConnStream.finish.
+		go func() {
+			ac.mu.Lock()
+			acCtx := ac.ctx
+			ac.mu.Unlock()
+			select {
+			case <-acCtx.Done():
+				as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing"))
+			case <-ctx.Done():
+				as.finish(toRPCErr(ctx.Err()))
+			}
+		}()
+	}
+	return as, nil
+}
+
+type addrConnStream struct {
+	s         *transport.Stream
+	ac        *addrConn
+	callHdr   *transport.CallHdr
+	cancel    context.CancelFunc
+	opts      []CallOption
+	callInfo  *callInfo
+	t         transport.ClientTransport
+	ctx       context.Context
+	sentLast  bool
+	desc      *StreamDesc
+	codec     baseCodec
+	cp        Compressor
+	comp      encoding.Compressor
+	decompSet bool
+	dc        Decompressor
+	decomp    encoding.Compressor
+	p         *parser
+	mu        sync.Mutex
+	finished  bool
+}
+
+func (as *addrConnStream) Header() (metadata.MD, error) {
+	m, err := as.s.Header()
+	if err != nil {
+		as.finish(toRPCErr(err))
+	}
+	return m, err
+}
+
+func (as *addrConnStream) Trailer() metadata.MD {
+	return as.s.Trailer()
+}
+
+func (as *addrConnStream) CloseSend() error {
+	if as.sentLast {
+		// TODO: return an error and finish the stream instead, due to API misuse?
+		return nil
+	}
+	as.sentLast = true
+
+	as.t.Write(as.s, nil, nil, &transport.Options{Last: true})
+	// Always return nil; io.EOF is the only error that might make sense
+	// instead, but there is no need to signal the client to call RecvMsg
+	// as the only use left for the stream after CloseSend is to call
+	// RecvMsg.  This also matches historical behavior.
+	return nil
+}
+
+func (as *addrConnStream) Context() context.Context {
+	return as.s.Context()
+}
+
+func (as *addrConnStream) SendMsg(m any) (err error) {
+	defer func() {
+		if err != nil && err != io.EOF {
+			// Call finish on the client stream for errors generated by this SendMsg
+			// call, as these indicate problems created by this client.  (Transport
+			// errors are converted to an io.EOF error in csAttempt.sendMsg; the real
+			// error will be returned from RecvMsg eventually in that case, or be
+			// retried.)
+			as.finish(err)
+		}
+	}()
+	if as.sentLast {
+		return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
+	}
+	if !as.desc.ClientStreams {
+		as.sentLast = true
+	}
+
+	// load hdr, payload, data
+	hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool)
+	if err != nil {
+		return err
+	}
+
+	defer func() {
+		data.Free()
+		// only free payload if compression was made, and therefore it is a different set
+		// of buffers from data.
+		if pf.isCompressed() {
+			payload.Free()
+		}
+	}()
+
+	// TODO(dfawley): should we be checking len(data) instead?
+	if payload.Len() > *as.callInfo.maxSendMessageSize {
+		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize)
+	}
+
+	if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
+		if !as.desc.ClientStreams {
+			// For non-client-streaming RPCs, we return nil instead of EOF on error
+			// because the generated code requires it.  finish is not called; RecvMsg()
+			// will call it with the stream's status independently.
+			return nil
+		}
+		return io.EOF
+	}
+
+	if channelz.IsOn() {
+		as.t.IncrMsgSent()
+	}
+	return nil
+}
+
+func (as *addrConnStream) RecvMsg(m any) (err error) {
+	defer func() {
+		if err != nil || !as.desc.ServerStreams {
+			// err != nil or non-server-streaming indicates end of stream.
+			as.finish(err)
+		}
+	}()
+
+	if !as.decompSet {
+		// Block until we receive headers containing received message encoding.
+		if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity {
+			if as.dc == nil || as.dc.Type() != ct {
+				// No configured decompressor, or it does not match the incoming
+				// message encoding; attempt to find a registered compressor that does.
+				as.dc = nil
+				as.decomp = encoding.GetCompressor(ct)
+			}
+		} else {
+			// No compression is used; disable our decompressor.
+			as.dc = nil
+		}
+		// Only initialize this state once per stream.
+		as.decompSet = true
+	}
+	if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil {
+		if err == io.EOF {
+			if statusErr := as.s.Status().Err(); statusErr != nil {
+				return statusErr
+			}
+			return io.EOF // indicates successful end of stream.
+		}
+		return toRPCErr(err)
+	}
+
+	if channelz.IsOn() {
+		as.t.IncrMsgRecv()
+	}
+	if as.desc.ServerStreams {
+		// Subsequent messages should be received by subsequent RecvMsg calls.
+		return nil
+	}
+
+	// Special handling for non-server-stream rpcs.
+	// This recv expects EOF or errors, so we don't collect inPayload.
+	if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF {
+		return as.s.Status().Err() // non-server streaming Recv returns nil on success
+	} else if err != nil {
+		return toRPCErr(err)
+	}
+	return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want "))
+}
+
+func (as *addrConnStream) finish(err error) {
+	as.mu.Lock()
+	if as.finished {
+		as.mu.Unlock()
+		return
+	}
+	as.finished = true
+	if err == io.EOF {
+		// Ending a stream with EOF indicates a success.
+		err = nil
+	}
+	if as.s != nil {
+		as.t.CloseStream(as.s, err)
+	}
+
+	if err != nil {
+		as.ac.incrCallsFailed()
+	} else {
+		as.ac.incrCallsSucceeded()
+	}
+	as.cancel()
+	as.mu.Unlock()
+}
+
+// ServerStream defines the server-side behavior of a streaming RPC.
+//
+// Errors returned from ServerStream methods are compatible with the status
+// package.  However, the status code will often not match the RPC status as
+// seen by the client application, and therefore, should not be relied upon for
+// this purpose.
+type ServerStream interface {
+	// SetHeader sets the header metadata. It may be called multiple times.
+	// When call multiple times, all the provided metadata will be merged.
+	// All the metadata will be sent out when one of the following happens:
+	//  - ServerStream.SendHeader() is called;
+	//  - The first response is sent out;
+	//  - An RPC status is sent out (error or success).
+	SetHeader(metadata.MD) error
+	// SendHeader sends the header metadata.
+	// The provided md and headers set by SetHeader() will be sent.
+	// It fails if called multiple times.
+	SendHeader(metadata.MD) error
+	// SetTrailer sets the trailer metadata which will be sent with the RPC status.
+	// When called more than once, all the provided metadata will be merged.
+	SetTrailer(metadata.MD)
+	// Context returns the context for this stream.
+	Context() context.Context
+	// SendMsg sends a message. On error, SendMsg aborts the stream and the
+	// error is returned directly.
+	//
+	// SendMsg blocks until:
+	//   - There is sufficient flow control to schedule m with the transport, or
+	//   - The stream is done, or
+	//   - The stream breaks.
+	//
+	// SendMsg does not wait until the message is received by the client. An
+	// untimely stream closure may result in lost messages.
+	//
+	// It is safe to have a goroutine calling SendMsg and another goroutine
+	// calling RecvMsg on the same stream at the same time, but it is not safe
+	// to call SendMsg on the same stream in different goroutines.
+	//
+	// It is not safe to modify the message after calling SendMsg. Tracing
+	// libraries and stats handlers may use the message lazily.
+	SendMsg(m any) error
+	// RecvMsg blocks until it receives a message into m or the stream is
+	// done. It returns io.EOF when the client has performed a CloseSend. On
+	// any non-EOF error, the stream is aborted and the error contains the
+	// RPC status.
+	//
+	// It is safe to have a goroutine calling SendMsg and another goroutine
+	// calling RecvMsg on the same stream at the same time, but it is not
+	// safe to call RecvMsg on the same stream in different goroutines.
+	RecvMsg(m any) error
+}
+
+// serverStream implements a server side Stream.
+type serverStream struct {
+	ctx   context.Context
+	t     transport.ServerTransport
+	s     *transport.Stream
+	p     *parser
+	codec baseCodec
+
+	cp     Compressor
+	dc     Decompressor
+	comp   encoding.Compressor
+	decomp encoding.Compressor
+
+	sendCompressorName string
+
+	maxReceiveMessageSize int
+	maxSendMessageSize    int
+	trInfo                *traceInfo
+
+	statsHandler []stats.Handler
+
+	binlogs []binarylog.MethodLogger
+	// serverHeaderBinlogged indicates whether server header has been logged. It
+	// will happen when one of the following two happens: stream.SendHeader(),
+	// stream.Send().
+	//
+	// It's only checked in send and sendHeader, doesn't need to be
+	// synchronized.
+	serverHeaderBinlogged bool
+
+	mu sync.Mutex // protects trInfo.tr after the service handler runs.
+}
+
+func (ss *serverStream) Context() context.Context {
+	return ss.ctx
+}
+
+func (ss *serverStream) SetHeader(md metadata.MD) error {
+	if md.Len() == 0 {
+		return nil
+	}
+	err := imetadata.Validate(md)
+	if err != nil {
+		return status.Error(codes.Internal, err.Error())
+	}
+	return ss.s.SetHeader(md)
+}
+
+func (ss *serverStream) SendHeader(md metadata.MD) error {
+	err := imetadata.Validate(md)
+	if err != nil {
+		return status.Error(codes.Internal, err.Error())
+	}
+
+	err = ss.t.WriteHeader(ss.s, md)
+	if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged {
+		h, _ := ss.s.Header()
+		sh := &binarylog.ServerHeader{
+			Header: h,
+		}
+		ss.serverHeaderBinlogged = true
+		for _, binlog := range ss.binlogs {
+			binlog.Log(ss.ctx, sh)
+		}
+	}
+	return err
+}
+
+func (ss *serverStream) SetTrailer(md metadata.MD) {
+	if md.Len() == 0 {
+		return
+	}
+	if err := imetadata.Validate(md); err != nil {
+		logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err)
+	}
+	ss.s.SetTrailer(md)
+}
+
+func (ss *serverStream) SendMsg(m any) (err error) {
+	defer func() {
+		if ss.trInfo != nil {
+			ss.mu.Lock()
+			if ss.trInfo.tr != nil {
+				if err == nil {
+					ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
+				} else {
+					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
+					ss.trInfo.tr.SetError()
+				}
+			}
+			ss.mu.Unlock()
+		}
+		if err != nil && err != io.EOF {
+			st, _ := status.FromError(toRPCErr(err))
+			ss.t.WriteStatus(ss.s, st)
+			// Non-user specified status was sent out. This should be an error
+			// case (as a server side Cancel maybe).
+			//
+			// This is not handled specifically now. User will return a final
+			// status from the service handler, we will log that error instead.
+			// This behavior is similar to an interceptor.
+		}
+		if channelz.IsOn() && err == nil {
+			ss.t.IncrMsgSent()
+		}
+	}()
+
+	// Server handler could have set new compressor by calling SetSendCompressor.
+	// In case it is set, we need to use it for compressing outbound message.
+	if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName {
+		ss.comp = encoding.GetCompressor(sendCompressorsName)
+		ss.sendCompressorName = sendCompressorsName
+	}
+
+	// load hdr, payload, data
+	hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool)
+	if err != nil {
+		return err
+	}
+
+	defer func() {
+		data.Free()
+		// only free payload if compression was made, and therefore it is a different set
+		// of buffers from data.
+		if pf.isCompressed() {
+			payload.Free()
+		}
+	}()
+
+	dataLen := data.Len()
+	payloadLen := payload.Len()
+
+	// TODO(dfawley): should we be checking len(data) instead?
+	if payloadLen > ss.maxSendMessageSize {
+		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize)
+	}
+	if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
+		return toRPCErr(err)
+	}
+
+	if len(ss.binlogs) != 0 {
+		if !ss.serverHeaderBinlogged {
+			h, _ := ss.s.Header()
+			sh := &binarylog.ServerHeader{
+				Header: h,
+			}
+			ss.serverHeaderBinlogged = true
+			for _, binlog := range ss.binlogs {
+				binlog.Log(ss.ctx, sh)
+			}
+		}
+		sm := &binarylog.ServerMessage{
+			Message: data.Materialize(),
+		}
+		for _, binlog := range ss.binlogs {
+			binlog.Log(ss.ctx, sm)
+		}
+	}
+	if len(ss.statsHandler) != 0 {
+		for _, sh := range ss.statsHandler {
+			sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now()))
+		}
+	}
+	return nil
+}
+
+func (ss *serverStream) RecvMsg(m any) (err error) {
+	defer func() {
+		if ss.trInfo != nil {
+			ss.mu.Lock()
+			if ss.trInfo.tr != nil {
+				if err == nil {
+					ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
+				} else if err != io.EOF {
+					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
+					ss.trInfo.tr.SetError()
+				}
+			}
+			ss.mu.Unlock()
+		}
+		if err != nil && err != io.EOF {
+			st, _ := status.FromError(toRPCErr(err))
+			ss.t.WriteStatus(ss.s, st)
+			// Non-user specified status was sent out. This should be an error
+			// case (as a server side Cancel maybe).
+			//
+			// This is not handled specifically now. User will return a final
+			// status from the service handler, we will log that error instead.
+			// This behavior is similar to an interceptor.
+		}
+		if channelz.IsOn() && err == nil {
+			ss.t.IncrMsgRecv()
+		}
+	}()
+	var payInfo *payloadInfo
+	if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
+		payInfo = &payloadInfo{}
+		defer payInfo.free()
+	}
+	if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil {
+		if err == io.EOF {
+			if len(ss.binlogs) != 0 {
+				chc := &binarylog.ClientHalfClose{}
+				for _, binlog := range ss.binlogs {
+					binlog.Log(ss.ctx, chc)
+				}
+			}
+			return err
+		}
+		if err == io.ErrUnexpectedEOF {
+			err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
+		}
+		return toRPCErr(err)
+	}
+	if len(ss.statsHandler) != 0 {
+		for _, sh := range ss.statsHandler {
+			sh.HandleRPC(ss.s.Context(), &stats.InPayload{
+				RecvTime:         time.Now(),
+				Payload:          m,
+				Length:           payInfo.uncompressedBytes.Len(),
+				WireLength:       payInfo.compressedLength + headerLen,
+				CompressedLength: payInfo.compressedLength,
+			})
+		}
+	}
+	if len(ss.binlogs) != 0 {
+		cm := &binarylog.ClientMessage{
+			Message: payInfo.uncompressedBytes.Materialize(),
+		}
+		for _, binlog := range ss.binlogs {
+			binlog.Log(ss.ctx, cm)
+		}
+	}
+	return nil
+}
+
+// MethodFromServerStream returns the method string for the input stream.
+// The returned string is in the format of "/service/method".
+func MethodFromServerStream(stream ServerStream) (string, bool) {
+	return Method(stream.Context())
+}
+
+// prepareMsg returns the hdr, payload and data using the compressors passed or
+// using the passed preparedmsg. The returned boolean indicates whether
+// compression was made and therefore whether the payload needs to be freed in
+// addition to the returned data. Freeing the payload if the returned boolean is
+// false can lead to undefined behavior.
+func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) {
+	if preparedMsg, ok := m.(*PreparedMsg); ok {
+		return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil
+	}
+	// The input interface is not a prepared msg.
+	// Marshal and Compress the data at this point
+	data, err = encode(codec, m)
+	if err != nil {
+		return nil, nil, nil, 0, err
+	}
+	compData, pf, err := compress(data, cp, comp, pool)
+	if err != nil {
+		data.Free()
+		return nil, nil, nil, 0, err
+	}
+	hdr, payload = msgHeader(data, compData, pf)
+	return hdr, data, payload, pf, nil
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/stream_interfaces.go b/hack/tools/vendor/google.golang.org/grpc/stream_interfaces.go
new file mode 100644
index 0000000000..0037fee0bd
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/stream_interfaces.go
@@ -0,0 +1,238 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+// ServerStreamingClient represents the client side of a server-streaming (one
+// request, many responses) RPC. It is generic over the type of the response
+// message. It is used in generated code.
+type ServerStreamingClient[Res any] interface {
+	// Recv receives the next response message from the server. The client may
+	// repeatedly call Recv to read messages from the response stream.  If
+	// io.EOF is returned, the stream has terminated with an OK status.  Any
+	// other error is compatible with the status package and indicates the
+	// RPC's status code and message.
+	Recv() (*Res, error)
+
+	// ClientStream is embedded to provide Context, Header, and Trailer
+	// functionality.  No other methods in the ClientStream should be called
+	// directly.
+	ClientStream
+}
+
+// ServerStreamingServer represents the server side of a server-streaming (one
+// request, many responses) RPC. It is generic over the type of the response
+// message. It is used in generated code.
+//
+// To terminate the response stream, return from the handler method and return
+// an error from the status package, or use nil to indicate an OK status code.
+type ServerStreamingServer[Res any] interface {
+	// Send sends a response message to the client.  The server handler may
+	// call Send multiple times to send multiple messages to the client.  An
+	// error is returned if the stream was terminated unexpectedly, and the
+	// handler method should return, as the stream is no longer usable.
+	Send(*Res) error
+
+	// ServerStream is embedded to provide Context, SetHeader, SendHeader, and
+	// SetTrailer functionality.  No other methods in the ServerStream should
+	// be called directly.
+	ServerStream
+}
+
+// ClientStreamingClient represents the client side of a client-streaming (many
+// requests, one response) RPC. It is generic over both the type of the request
+// message stream and the type of the unary response message. It is used in
+// generated code.
+type ClientStreamingClient[Req any, Res any] interface {
+	// Send sends a request message to the server.  The client may call Send
+	// multiple times to send multiple messages to the server.  On error, Send
+	// aborts the stream.  If the error was generated by the client, the status
+	// is returned directly.  Otherwise, io.EOF is returned, and the status of
+	// the stream may be discovered using CloseAndRecv().
+	Send(*Req) error
+
+	// CloseAndRecv closes the request stream and waits for the server's
+	// response.  This method must be called once and only once after sending
+	// all request messages.  Any error returned is implemented by the status
+	// package.
+	CloseAndRecv() (*Res, error)
+
+	// ClientStream is embedded to provide Context, Header, and Trailer
+	// functionality.  No other methods in the ClientStream should be called
+	// directly.
+	ClientStream
+}
+
+// ClientStreamingServer represents the server side of a client-streaming (many
+// requests, one response) RPC. It is generic over both the type of the request
+// message stream and the type of the unary response message. It is used in
+// generated code.
+//
+// To terminate the RPC, call SendAndClose and return nil from the method
+// handler or do not call SendAndClose and return an error from the status
+// package.
+type ClientStreamingServer[Req any, Res any] interface {
+	// Recv receives the next request message from the client.  The server may
+	// repeatedly call Recv to read messages from the request stream.  If
+	// io.EOF is returned, it indicates the client called CloseAndRecv on its
+	// ClientStreamingClient.  Any other error indicates the stream was
+	// terminated unexpectedly, and the handler method should return, as the
+	// stream is no longer usable.
+	Recv() (*Req, error)
+
+	// SendAndClose sends a single response message to the client and closes
+	// the stream.  This method must be called once and only once after all
+	// request messages have been processed.  Recv should not be called after
+	// calling SendAndClose.
+	SendAndClose(*Res) error
+
+	// ServerStream is embedded to provide Context, SetHeader, SendHeader, and
+	// SetTrailer functionality.  No other methods in the ServerStream should
+	// be called directly.
+	ServerStream
+}
+
+// BidiStreamingClient represents the client side of a bidirectional-streaming
+// (many requests, many responses) RPC. It is generic over both the type of the
+// request message stream and the type of the response message stream. It is
+// used in generated code.
+type BidiStreamingClient[Req any, Res any] interface {
+	// Send sends a request message to the server.  The client may call Send
+	// multiple times to send multiple messages to the server.  On error, Send
+	// aborts the stream.  If the error was generated by the client, the status
+	// is returned directly.  Otherwise, io.EOF is returned, and the status of
+	// the stream may be discovered using Recv().
+	Send(*Req) error
+
+	// Recv receives the next response message from the server. The client may
+	// repeatedly call Recv to read messages from the response stream.  If
+	// io.EOF is returned, the stream has terminated with an OK status.  Any
+	// other error is compatible with the status package and indicates the
+	// RPC's status code and message.
+	Recv() (*Res, error)
+
+	// ClientStream is embedded to provide Context, Header, Trailer, and
+	// CloseSend functionality.  No other methods in the ClientStream should be
+	// called directly.
+	ClientStream
+}
+
+// BidiStreamingServer represents the server side of a bidirectional-streaming
+// (many requests, many responses) RPC. It is generic over both the type of the
+// request message stream and the type of the response message stream. It is
+// used in generated code.
+//
+// To terminate the stream, return from the handler method and return
+// an error from the status package, or use nil to indicate an OK status code.
+type BidiStreamingServer[Req any, Res any] interface {
+	// Recv receives the next request message from the client.  The server may
+	// repeatedly call Recv to read messages from the request stream.  If
+	// io.EOF is returned, it indicates the client called CloseSend on its
+	// BidiStreamingClient.  Any other error indicates the stream was
+	// terminated unexpectedly, and the handler method should return, as the
+	// stream is no longer usable.
+	Recv() (*Req, error)
+
+	// Send sends a response message to the client.  The server handler may
+	// call Send multiple times to send multiple messages to the client.  An
+	// error is returned if the stream was terminated unexpectedly, and the
+	// handler method should return, as the stream is no longer usable.
+	Send(*Res) error
+
+	// ServerStream is embedded to provide Context, SetHeader, SendHeader, and
+	// SetTrailer functionality.  No other methods in the ServerStream should
+	// be called directly.
+	ServerStream
+}
+
+// GenericClientStream implements the ServerStreamingClient, ClientStreamingClient,
+// and BidiStreamingClient interfaces. It is used in generated code.
+type GenericClientStream[Req any, Res any] struct {
+	ClientStream
+}
+
+var _ ServerStreamingClient[string] = (*GenericClientStream[int, string])(nil)
+var _ ClientStreamingClient[int, string] = (*GenericClientStream[int, string])(nil)
+var _ BidiStreamingClient[int, string] = (*GenericClientStream[int, string])(nil)
+
+// Send pushes one message into the stream of requests to be consumed by the
+// server. The type of message which can be sent is determined by the Req type
+// parameter of the GenericClientStream receiver.
+func (x *GenericClientStream[Req, Res]) Send(m *Req) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+// Recv reads one message from the stream of responses generated by the server.
+// The type of the message returned is determined by the Res type parameter
+// of the GenericClientStream receiver.
+func (x *GenericClientStream[Req, Res]) Recv() (*Res, error) {
+	m := new(Res)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+// CloseAndRecv closes the sending side of the stream, then receives the unary
+// response from the server. The type of message which it returns is determined
+// by the Res type parameter of the GenericClientStream receiver.
+func (x *GenericClientStream[Req, Res]) CloseAndRecv() (*Res, error) {
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	m := new(Res)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+// GenericServerStream implements the ServerStreamingServer, ClientStreamingServer,
+// and BidiStreamingServer interfaces. It is used in generated code.
+type GenericServerStream[Req any, Res any] struct {
+	ServerStream
+}
+
+var _ ServerStreamingServer[string] = (*GenericServerStream[int, string])(nil)
+var _ ClientStreamingServer[int, string] = (*GenericServerStream[int, string])(nil)
+var _ BidiStreamingServer[int, string] = (*GenericServerStream[int, string])(nil)
+
+// Send pushes one message into the stream of responses to be consumed by the
+// client. The type of message which can be sent is determined by the Res
+// type parameter of the serverStreamServer receiver.
+func (x *GenericServerStream[Req, Res]) Send(m *Res) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+// SendAndClose pushes the unary response to the client. The type of message
+// which can be sent is determined by the Res type parameter of the
+// clientStreamServer receiver.
+func (x *GenericServerStream[Req, Res]) SendAndClose(m *Res) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+// Recv reads one message from the stream of requests generated by the client.
+// The type of the message returned is determined by the Req type parameter
+// of the clientStreamServer receiver.
+func (x *GenericServerStream[Req, Res]) Recv() (*Req, error) {
+	m := new(Req)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/tap/tap.go b/hack/tools/vendor/google.golang.org/grpc/tap/tap.go
new file mode 100644
index 0000000000..07f0125768
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/tap/tap.go
@@ -0,0 +1,62 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package tap defines the function handles which are executed on the transport
+// layer of gRPC-Go and related information.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package tap
+
+import (
+	"context"
+
+	"google.golang.org/grpc/metadata"
+)
+
+// Info defines the relevant information needed by the handles.
+type Info struct {
+	// FullMethodName is the string of grpc method (in the format of
+	// /package.service/method).
+	FullMethodName string
+
+	// Header contains the header metadata received.
+	Header metadata.MD
+
+	// TODO: More to be added.
+}
+
+// ServerInHandle defines the function which runs before a new stream is
+// created on the server side. If it returns a non-nil error, the stream will
+// not be created and an error will be returned to the client.  If the error
+// returned is a status error, that status code and message will be used,
+// otherwise PermissionDenied will be the code and err.Error() will be the
+// message.
+//
+// It's intended to be used in situations where you don't want to waste the
+// resources to accept the new stream (e.g. rate-limiting). For other general
+// usages, please use interceptors.
+//
+// Note that it is executed in the per-connection I/O goroutine(s) instead of
+// per-RPC goroutine. Therefore, users should NOT have any
+// blocking/time-consuming work in this handle. Otherwise all the RPCs would
+// slow down. Also, for the same reason, this handle won't be called
+// concurrently by gRPC.
+type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error)
diff --git a/hack/tools/vendor/google.golang.org/grpc/trace.go b/hack/tools/vendor/google.golang.org/grpc/trace.go
new file mode 100644
index 0000000000..10f4f798f5
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/trace.go
@@ -0,0 +1,143 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"net"
+	"strings"
+	"sync"
+	"time"
+)
+
+// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package.
+// This should only be set before any RPCs are sent or received by this program.
+var EnableTracing bool
+
+// methodFamily returns the trace family for the given method.
+// It turns "/pkg.Service/GetFoo" into "pkg.Service".
+func methodFamily(m string) string {
+	m = strings.TrimPrefix(m, "/") // remove leading slash
+	if i := strings.Index(m, "/"); i >= 0 {
+		m = m[:i] // remove everything from second slash
+	}
+	return m
+}
+
+// traceEventLog mirrors golang.org/x/net/trace.EventLog.
+//
+// It exists in order to avoid importing x/net/trace on grpcnotrace builds.
+type traceEventLog interface {
+	Printf(format string, a ...any)
+	Errorf(format string, a ...any)
+	Finish()
+}
+
+// traceLog mirrors golang.org/x/net/trace.Trace.
+//
+// It exists in order to avoid importing x/net/trace on grpcnotrace builds.
+type traceLog interface {
+	LazyLog(x fmt.Stringer, sensitive bool)
+	LazyPrintf(format string, a ...any)
+	SetError()
+	SetRecycler(f func(any))
+	SetTraceInfo(traceID, spanID uint64)
+	SetMaxEvents(m int)
+	Finish()
+}
+
+// traceInfo contains tracing information for an RPC.
+type traceInfo struct {
+	tr        traceLog
+	firstLine firstLine
+}
+
+// firstLine is the first line of an RPC trace.
+// It may be mutated after construction; remoteAddr specifically may change
+// during client-side use.
+type firstLine struct {
+	mu         sync.Mutex
+	client     bool // whether this is a client (outgoing) RPC
+	remoteAddr net.Addr
+	deadline   time.Duration // may be zero
+}
+
+func (f *firstLine) SetRemoteAddr(addr net.Addr) {
+	f.mu.Lock()
+	f.remoteAddr = addr
+	f.mu.Unlock()
+}
+
+func (f *firstLine) String() string {
+	f.mu.Lock()
+	defer f.mu.Unlock()
+
+	var line bytes.Buffer
+	io.WriteString(&line, "RPC: ")
+	if f.client {
+		io.WriteString(&line, "to")
+	} else {
+		io.WriteString(&line, "from")
+	}
+	fmt.Fprintf(&line, " %v deadline:", f.remoteAddr)
+	if f.deadline != 0 {
+		fmt.Fprint(&line, f.deadline)
+	} else {
+		io.WriteString(&line, "none")
+	}
+	return line.String()
+}
+
+const truncateSize = 100
+
+func truncate(x string, l int) string {
+	if l > len(x) {
+		return x
+	}
+	return x[:l]
+}
+
+// payload represents an RPC request or response payload.
+type payload struct {
+	sent bool // whether this is an outgoing payload
+	msg  any  // e.g. a proto.Message
+	// TODO(dsymonds): add stringifying info to codec, and limit how much we hold here?
+}
+
+func (p payload) String() string {
+	if p.sent {
+		return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize)
+	}
+	return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize)
+}
+
+type fmtStringer struct {
+	format string
+	a      []any
+}
+
+func (f *fmtStringer) String() string {
+	return fmt.Sprintf(f.format, f.a...)
+}
+
+type stringer string
+
+func (s stringer) String() string { return string(s) }
diff --git a/hack/tools/vendor/google.golang.org/grpc/trace_notrace.go b/hack/tools/vendor/google.golang.org/grpc/trace_notrace.go
new file mode 100644
index 0000000000..1da3a2308e
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/trace_notrace.go
@@ -0,0 +1,52 @@
+//go:build grpcnotrace
+
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+// grpcnotrace can be used to avoid importing golang.org/x/net/trace, which in
+// turn enables binaries using gRPC-Go for dead code elimination, which can
+// yield 10-15% improvements in binary size when tracing is not needed.
+
+import (
+	"context"
+	"fmt"
+)
+
+type notrace struct{}
+
+func (notrace) LazyLog(x fmt.Stringer, sensitive bool) {}
+func (notrace) LazyPrintf(format string, a ...any)     {}
+func (notrace) SetError()                              {}
+func (notrace) SetRecycler(f func(any))                {}
+func (notrace) SetTraceInfo(traceID, spanID uint64)    {}
+func (notrace) SetMaxEvents(m int)                     {}
+func (notrace) Finish()                                {}
+
+func newTrace(family, title string) traceLog {
+	return notrace{}
+}
+
+func newTraceContext(ctx context.Context, tr traceLog) context.Context {
+	return ctx
+}
+
+func newTraceEventLog(family, title string) traceEventLog {
+	return nil
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/trace_withtrace.go b/hack/tools/vendor/google.golang.org/grpc/trace_withtrace.go
new file mode 100644
index 0000000000..88d6e8571e
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/trace_withtrace.go
@@ -0,0 +1,39 @@
+//go:build !grpcnotrace
+
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+
+	t "golang.org/x/net/trace"
+)
+
+func newTrace(family, title string) traceLog {
+	return t.New(family, title)
+}
+
+func newTraceContext(ctx context.Context, tr traceLog) context.Context {
+	return t.NewContext(ctx, tr)
+}
+
+func newTraceEventLog(family, title string) traceEventLog {
+	return t.NewEventLog(family, title)
+}
diff --git a/hack/tools/vendor/google.golang.org/grpc/version.go b/hack/tools/vendor/google.golang.org/grpc/version.go
new file mode 100644
index 0000000000..187fbf1195
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/grpc/version.go
@@ -0,0 +1,22 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+// Version is the current grpc version.
+const Version = "1.67.0"
diff --git a/hack/tools/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/hack/tools/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
new file mode 100644
index 0000000000..8f9e592f87
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
@@ -0,0 +1,685 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protojson
+
+import (
+	"encoding/base64"
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/internal/encoding/json"
+	"google.golang.org/protobuf/internal/encoding/messageset"
+	"google.golang.org/protobuf/internal/errors"
+	"google.golang.org/protobuf/internal/flags"
+	"google.golang.org/protobuf/internal/genid"
+	"google.golang.org/protobuf/internal/pragma"
+	"google.golang.org/protobuf/internal/set"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// Unmarshal reads the given []byte into the given [proto.Message].
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+func Unmarshal(b []byte, m proto.Message) error {
+	return UnmarshalOptions{}.Unmarshal(b, m)
+}
+
+// UnmarshalOptions is a configurable JSON format parser.
+type UnmarshalOptions struct {
+	pragma.NoUnkeyedLiterals
+
+	// If AllowPartial is set, input for messages that will result in missing
+	// required fields will not return an error.
+	AllowPartial bool
+
+	// If DiscardUnknown is set, unknown fields and enum name values are ignored.
+	DiscardUnknown bool
+
+	// Resolver is used for looking up types when unmarshaling
+	// google.protobuf.Any messages or extension fields.
+	// If nil, this defaults to using protoregistry.GlobalTypes.
+	Resolver interface {
+		protoregistry.MessageTypeResolver
+		protoregistry.ExtensionTypeResolver
+	}
+
+	// RecursionLimit limits how deeply messages may be nested.
+	// If zero, a default limit is applied.
+	RecursionLimit int
+}
+
+// Unmarshal reads the given []byte and populates the given [proto.Message]
+// using options in the UnmarshalOptions object.
+// It will clear the message first before setting the fields.
+// If it returns an error, the given message may be partially set.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error {
+	return o.unmarshal(b, m)
+}
+
+// unmarshal is a centralized function that all unmarshal operations go through.
+// For profiling purposes, avoid changing the name of this function or
+// introducing other code paths for unmarshal that do not go through this.
+func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error {
+	proto.Reset(m)
+
+	if o.Resolver == nil {
+		o.Resolver = protoregistry.GlobalTypes
+	}
+	if o.RecursionLimit == 0 {
+		o.RecursionLimit = protowire.DefaultRecursionLimit
+	}
+
+	dec := decoder{json.NewDecoder(b), o}
+	if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil {
+		return err
+	}
+
+	// Check for EOF.
+	tok, err := dec.Read()
+	if err != nil {
+		return err
+	}
+	if tok.Kind() != json.EOF {
+		return dec.unexpectedTokenError(tok)
+	}
+
+	if o.AllowPartial {
+		return nil
+	}
+	return proto.CheckInitialized(m)
+}
+
+type decoder struct {
+	*json.Decoder
+	opts UnmarshalOptions
+}
+
+// newError returns an error object with position info.
+func (d decoder) newError(pos int, f string, x ...any) error {
+	line, column := d.Position(pos)
+	head := fmt.Sprintf("(line %d:%d): ", line, column)
+	return errors.New(head+f, x...)
+}
+
+// unexpectedTokenError returns a syntax error for the given unexpected token.
+func (d decoder) unexpectedTokenError(tok json.Token) error {
+	return d.syntaxError(tok.Pos(), "unexpected token %s", tok.RawString())
+}
+
+// syntaxError returns a syntax error for given position.
+func (d decoder) syntaxError(pos int, f string, x ...any) error {
+	line, column := d.Position(pos)
+	head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
+	return errors.New(head+f, x...)
+}
+
+// unmarshalMessage unmarshals a message into the given protoreflect.Message.
+func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error {
+	d.opts.RecursionLimit--
+	if d.opts.RecursionLimit < 0 {
+		return errors.New("exceeded max recursion depth")
+	}
+	if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil {
+		return unmarshal(d, m)
+	}
+
+	tok, err := d.Read()
+	if err != nil {
+		return err
+	}
+	if tok.Kind() != json.ObjectOpen {
+		return d.unexpectedTokenError(tok)
+	}
+
+	messageDesc := m.Descriptor()
+	if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
+		return errors.New("no support for proto1 MessageSets")
+	}
+
+	var seenNums set.Ints
+	var seenOneofs set.Ints
+	fieldDescs := messageDesc.Fields()
+	for {
+		// Read field name.
+		tok, err := d.Read()
+		if err != nil {
+			return err
+		}
+		switch tok.Kind() {
+		default:
+			return d.unexpectedTokenError(tok)
+		case json.ObjectClose:
+			return nil
+		case json.Name:
+			// Continue below.
+		}
+
+		name := tok.Name()
+		// Unmarshaling a non-custom embedded message in Any will contain the
+		// JSON field "@type" which should be skipped because it is not a field
+		// of the embedded message, but simply an artifact of the Any format.
+		if skipTypeURL && name == "@type" {
+			d.Read()
+			continue
+		}
+
+		// Get the FieldDescriptor.
+		var fd protoreflect.FieldDescriptor
+		if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") {
+			// Only extension names are in [name] format.
+			extName := protoreflect.FullName(name[1 : len(name)-1])
+			extType, err := d.opts.Resolver.FindExtensionByName(extName)
+			if err != nil && err != protoregistry.NotFound {
+				return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err)
+			}
+			if extType != nil {
+				fd = extType.TypeDescriptor()
+				if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() {
+					return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName())
+				}
+			}
+		} else {
+			// The name can either be the JSON name or the proto field name.
+			fd = fieldDescs.ByJSONName(name)
+			if fd == nil {
+				fd = fieldDescs.ByTextName(name)
+			}
+		}
+		if flags.ProtoLegacy {
+			if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
+				fd = nil // reset since the weak reference is not linked in
+			}
+		}
+
+		if fd == nil {
+			// Field is unknown.
+			if d.opts.DiscardUnknown {
+				if err := d.skipJSONValue(); err != nil {
+					return err
+				}
+				continue
+			}
+			return d.newError(tok.Pos(), "unknown field %v", tok.RawString())
+		}
+
+		// Do not allow duplicate fields.
+		num := uint64(fd.Number())
+		if seenNums.Has(num) {
+			return d.newError(tok.Pos(), "duplicate field %v", tok.RawString())
+		}
+		seenNums.Set(num)
+
+		// No need to set values for JSON null unless the field type is
+		// google.protobuf.Value or google.protobuf.NullValue.
+		if tok, _ := d.Peek(); tok.Kind() == json.Null && !isKnownValue(fd) && !isNullValue(fd) {
+			d.Read()
+			continue
+		}
+
+		switch {
+		case fd.IsList():
+			list := m.Mutable(fd).List()
+			if err := d.unmarshalList(list, fd); err != nil {
+				return err
+			}
+		case fd.IsMap():
+			mmap := m.Mutable(fd).Map()
+			if err := d.unmarshalMap(mmap, fd); err != nil {
+				return err
+			}
+		default:
+			// If field is a oneof, check if it has already been set.
+			if od := fd.ContainingOneof(); od != nil {
+				idx := uint64(od.Index())
+				if seenOneofs.Has(idx) {
+					return d.newError(tok.Pos(), "error parsing %s, oneof %v is already set", tok.RawString(), od.FullName())
+				}
+				seenOneofs.Set(idx)
+			}
+
+			// Required or optional fields.
+			if err := d.unmarshalSingular(m, fd); err != nil {
+				return err
+			}
+		}
+	}
+}
+
+func isKnownValue(fd protoreflect.FieldDescriptor) bool {
+	md := fd.Message()
+	return md != nil && md.FullName() == genid.Value_message_fullname
+}
+
+func isNullValue(fd protoreflect.FieldDescriptor) bool {
+	ed := fd.Enum()
+	return ed != nil && ed.FullName() == genid.NullValue_enum_fullname
+}
+
+// unmarshalSingular unmarshals to the non-repeated field specified
+// by the given FieldDescriptor.
+func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.FieldDescriptor) error {
+	var val protoreflect.Value
+	var err error
+	switch fd.Kind() {
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		val = m.NewField(fd)
+		err = d.unmarshalMessage(val.Message(), false)
+	default:
+		val, err = d.unmarshalScalar(fd)
+	}
+
+	if err != nil {
+		return err
+	}
+	if val.IsValid() {
+		m.Set(fd, val)
+	}
+	return nil
+}
+
+// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by
+// the given FieldDescriptor.
+func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+	const b32 int = 32
+	const b64 int = 64
+
+	tok, err := d.Read()
+	if err != nil {
+		return protoreflect.Value{}, err
+	}
+
+	kind := fd.Kind()
+	switch kind {
+	case protoreflect.BoolKind:
+		if tok.Kind() == json.Bool {
+			return protoreflect.ValueOfBool(tok.Bool()), nil
+		}
+
+	case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+		if v, ok := unmarshalInt(tok, b32); ok {
+			return v, nil
+		}
+
+	case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+		if v, ok := unmarshalInt(tok, b64); ok {
+			return v, nil
+		}
+
+	case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+		if v, ok := unmarshalUint(tok, b32); ok {
+			return v, nil
+		}
+
+	case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+		if v, ok := unmarshalUint(tok, b64); ok {
+			return v, nil
+		}
+
+	case protoreflect.FloatKind:
+		if v, ok := unmarshalFloat(tok, b32); ok {
+			return v, nil
+		}
+
+	case protoreflect.DoubleKind:
+		if v, ok := unmarshalFloat(tok, b64); ok {
+			return v, nil
+		}
+
+	case protoreflect.StringKind:
+		if tok.Kind() == json.String {
+			return protoreflect.ValueOfString(tok.ParsedString()), nil
+		}
+
+	case protoreflect.BytesKind:
+		if v, ok := unmarshalBytes(tok); ok {
+			return v, nil
+		}
+
+	case protoreflect.EnumKind:
+		if v, ok := unmarshalEnum(tok, fd, d.opts.DiscardUnknown); ok {
+			return v, nil
+		}
+
+	default:
+		panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind))
+	}
+
+	return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString())
+}
+
+func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {
+	switch tok.Kind() {
+	case json.Number:
+		return getInt(tok, bitSize)
+
+	case json.String:
+		// Decode number from string.
+		s := strings.TrimSpace(tok.ParsedString())
+		if len(s) != len(tok.ParsedString()) {
+			return protoreflect.Value{}, false
+		}
+		dec := json.NewDecoder([]byte(s))
+		tok, err := dec.Read()
+		if err != nil {
+			return protoreflect.Value{}, false
+		}
+		return getInt(tok, bitSize)
+	}
+	return protoreflect.Value{}, false
+}
+
+func getInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {
+	n, ok := tok.Int(bitSize)
+	if !ok {
+		return protoreflect.Value{}, false
+	}
+	if bitSize == 32 {
+		return protoreflect.ValueOfInt32(int32(n)), true
+	}
+	return protoreflect.ValueOfInt64(n), true
+}
+
+func unmarshalUint(tok json.Token, bitSize int) (protoreflect.Value, bool) {
+	switch tok.Kind() {
+	case json.Number:
+		return getUint(tok, bitSize)
+
+	case json.String:
+		// Decode number from string.
+		s := strings.TrimSpace(tok.ParsedString())
+		if len(s) != len(tok.ParsedString()) {
+			return protoreflect.Value{}, false
+		}
+		dec := json.NewDecoder([]byte(s))
+		tok, err := dec.Read()
+		if err != nil {
+			return protoreflect.Value{}, false
+		}
+		return getUint(tok, bitSize)
+	}
+	return protoreflect.Value{}, false
+}
+
+func getUint(tok json.Token, bitSize int) (protoreflect.Value, bool) {
+	n, ok := tok.Uint(bitSize)
+	if !ok {
+		return protoreflect.Value{}, false
+	}
+	if bitSize == 32 {
+		return protoreflect.ValueOfUint32(uint32(n)), true
+	}
+	return protoreflect.ValueOfUint64(n), true
+}
+
+func unmarshalFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) {
+	switch tok.Kind() {
+	case json.Number:
+		return getFloat(tok, bitSize)
+
+	case json.String:
+		s := tok.ParsedString()
+		switch s {
+		case "NaN":
+			if bitSize == 32 {
+				return protoreflect.ValueOfFloat32(float32(math.NaN())), true
+			}
+			return protoreflect.ValueOfFloat64(math.NaN()), true
+		case "Infinity":
+			if bitSize == 32 {
+				return protoreflect.ValueOfFloat32(float32(math.Inf(+1))), true
+			}
+			return protoreflect.ValueOfFloat64(math.Inf(+1)), true
+		case "-Infinity":
+			if bitSize == 32 {
+				return protoreflect.ValueOfFloat32(float32(math.Inf(-1))), true
+			}
+			return protoreflect.ValueOfFloat64(math.Inf(-1)), true
+		}
+
+		// Decode number from string.
+		if len(s) != len(strings.TrimSpace(s)) {
+			return protoreflect.Value{}, false
+		}
+		dec := json.NewDecoder([]byte(s))
+		tok, err := dec.Read()
+		if err != nil {
+			return protoreflect.Value{}, false
+		}
+		return getFloat(tok, bitSize)
+	}
+	return protoreflect.Value{}, false
+}
+
+func getFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) {
+	n, ok := tok.Float(bitSize)
+	if !ok {
+		return protoreflect.Value{}, false
+	}
+	if bitSize == 32 {
+		return protoreflect.ValueOfFloat32(float32(n)), true
+	}
+	return protoreflect.ValueOfFloat64(n), true
+}
+
+func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) {
+	if tok.Kind() != json.String {
+		return protoreflect.Value{}, false
+	}
+
+	s := tok.ParsedString()
+	enc := base64.StdEncoding
+	if strings.ContainsAny(s, "-_") {
+		enc = base64.URLEncoding
+	}
+	if len(s)%4 != 0 {
+		enc = enc.WithPadding(base64.NoPadding)
+	}
+	b, err := enc.DecodeString(s)
+	if err != nil {
+		return protoreflect.Value{}, false
+	}
+	return protoreflect.ValueOfBytes(b), true
+}
+
+func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnknown bool) (protoreflect.Value, bool) {
+	switch tok.Kind() {
+	case json.String:
+		// Lookup EnumNumber based on name.
+		s := tok.ParsedString()
+		if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil {
+			return protoreflect.ValueOfEnum(enumVal.Number()), true
+		}
+		if discardUnknown {
+			return protoreflect.Value{}, true
+		}
+
+	case json.Number:
+		if n, ok := tok.Int(32); ok {
+			return protoreflect.ValueOfEnum(protoreflect.EnumNumber(n)), true
+		}
+
+	case json.Null:
+		// This is only valid for google.protobuf.NullValue.
+		if isNullValue(fd) {
+			return protoreflect.ValueOfEnum(0), true
+		}
+	}
+
+	return protoreflect.Value{}, false
+}
+
+func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error {
+	tok, err := d.Read()
+	if err != nil {
+		return err
+	}
+	if tok.Kind() != json.ArrayOpen {
+		return d.unexpectedTokenError(tok)
+	}
+
+	switch fd.Kind() {
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		for {
+			tok, err := d.Peek()
+			if err != nil {
+				return err
+			}
+
+			if tok.Kind() == json.ArrayClose {
+				d.Read()
+				return nil
+			}
+
+			val := list.NewElement()
+			if err := d.unmarshalMessage(val.Message(), false); err != nil {
+				return err
+			}
+			list.Append(val)
+		}
+	default:
+		for {
+			tok, err := d.Peek()
+			if err != nil {
+				return err
+			}
+
+			if tok.Kind() == json.ArrayClose {
+				d.Read()
+				return nil
+			}
+
+			val, err := d.unmarshalScalar(fd)
+			if err != nil {
+				return err
+			}
+			if val.IsValid() {
+				list.Append(val)
+			}
+		}
+	}
+
+	return nil
+}
+
+func (d decoder) unmarshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error {
+	tok, err := d.Read()
+	if err != nil {
+		return err
+	}
+	if tok.Kind() != json.ObjectOpen {
+		return d.unexpectedTokenError(tok)
+	}
+
+	// Determine ahead whether map entry is a scalar type or a message type in
+	// order to call the appropriate unmarshalMapValue func inside the for loop
+	// below.
+	var unmarshalMapValue func() (protoreflect.Value, error)
+	switch fd.MapValue().Kind() {
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		unmarshalMapValue = func() (protoreflect.Value, error) {
+			val := mmap.NewValue()
+			if err := d.unmarshalMessage(val.Message(), false); err != nil {
+				return protoreflect.Value{}, err
+			}
+			return val, nil
+		}
+	default:
+		unmarshalMapValue = func() (protoreflect.Value, error) {
+			return d.unmarshalScalar(fd.MapValue())
+		}
+	}
+
+Loop:
+	for {
+		// Read field name.
+		tok, err := d.Read()
+		if err != nil {
+			return err
+		}
+		switch tok.Kind() {
+		default:
+			return d.unexpectedTokenError(tok)
+		case json.ObjectClose:
+			break Loop
+		case json.Name:
+			// Continue.
+		}
+
+		// Unmarshal field name.
+		pkey, err := d.unmarshalMapKey(tok, fd.MapKey())
+		if err != nil {
+			return err
+		}
+
+		// Check for duplicate field name.
+		if mmap.Has(pkey) {
+			return d.newError(tok.Pos(), "duplicate map key %v", tok.RawString())
+		}
+
+		// Read and unmarshal field value.
+		pval, err := unmarshalMapValue()
+		if err != nil {
+			return err
+		}
+		if pval.IsValid() {
+			mmap.Set(pkey, pval)
+		}
+	}
+
+	return nil
+}
+
+// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey.
+// A map key type is any integral or string type.
+func (d decoder) unmarshalMapKey(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.MapKey, error) {
+	const b32 = 32
+	const b64 = 64
+	const base10 = 10
+
+	name := tok.Name()
+	kind := fd.Kind()
+	switch kind {
+	case protoreflect.StringKind:
+		return protoreflect.ValueOfString(name).MapKey(), nil
+
+	case protoreflect.BoolKind:
+		switch name {
+		case "true":
+			return protoreflect.ValueOfBool(true).MapKey(), nil
+		case "false":
+			return protoreflect.ValueOfBool(false).MapKey(), nil
+		}
+
+	case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+		if n, err := strconv.ParseInt(name, base10, b32); err == nil {
+			return protoreflect.ValueOfInt32(int32(n)).MapKey(), nil
+		}
+
+	case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+		if n, err := strconv.ParseInt(name, base10, b64); err == nil {
+			return protoreflect.ValueOfInt64(int64(n)).MapKey(), nil
+		}
+
+	case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+		if n, err := strconv.ParseUint(name, base10, b32); err == nil {
+			return protoreflect.ValueOfUint32(uint32(n)).MapKey(), nil
+		}
+
+	case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+		if n, err := strconv.ParseUint(name, base10, b64); err == nil {
+			return protoreflect.ValueOfUint64(uint64(n)).MapKey(), nil
+		}
+
+	default:
+		panic(fmt.Sprintf("invalid kind for map key: %v", kind))
+	}
+
+	return protoreflect.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString())
+}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/hack/tools/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
new file mode 100644
index 0000000000..ae71007c18
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
@@ -0,0 +1,11 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protojson marshals and unmarshals protocol buffer messages as JSON
+// format. It follows the guide at
+// https://protobuf.dev/programming-guides/proto3#json.
+//
+// This package produces a different output than the standard [encoding/json]
+// package, which does not operate correctly on protocol buffer messages.
+package protojson
diff --git a/hack/tools/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/hack/tools/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
new file mode 100644
index 0000000000..0e72d85378
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
@@ -0,0 +1,380 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protojson
+
+import (
+	"encoding/base64"
+	"fmt"
+
+	"google.golang.org/protobuf/internal/encoding/json"
+	"google.golang.org/protobuf/internal/encoding/messageset"
+	"google.golang.org/protobuf/internal/errors"
+	"google.golang.org/protobuf/internal/filedesc"
+	"google.golang.org/protobuf/internal/flags"
+	"google.golang.org/protobuf/internal/genid"
+	"google.golang.org/protobuf/internal/order"
+	"google.golang.org/protobuf/internal/pragma"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const defaultIndent = "  "
+
+// Format formats the message as a multiline string.
+// This function is only intended for human consumption and ignores errors.
+// Do not depend on the output being stable. Its output will change across
+// different builds of your program, even when using the same version of the
+// protobuf module.
+func Format(m proto.Message) string {
+	return MarshalOptions{Multiline: true}.Format(m)
+}
+
+// Marshal writes the given [proto.Message] in JSON format using default options.
+// Do not depend on the output being stable. Its output will change across
+// different builds of your program, even when using the same version of the
+// protobuf module.
+func Marshal(m proto.Message) ([]byte, error) {
+	return MarshalOptions{}.Marshal(m)
+}
+
+// MarshalOptions is a configurable JSON format marshaler.
+type MarshalOptions struct {
+	pragma.NoUnkeyedLiterals
+
+	// Multiline specifies whether the marshaler should format the output in
+	// indented-form with every textual element on a new line.
+	// If Indent is an empty string, then an arbitrary indent is chosen.
+	Multiline bool
+
+	// Indent specifies the set of indentation characters to use in a multiline
+	// formatted output such that every entry is preceded by Indent and
+	// terminated by a newline. If non-empty, then Multiline is treated as true.
+	// Indent can only be composed of space or tab characters.
+	Indent string
+
+	// AllowPartial allows messages that have missing required fields to marshal
+	// without returning an error. If AllowPartial is false (the default),
+	// Marshal will return error if there are any missing required fields.
+	AllowPartial bool
+
+	// UseProtoNames uses proto field name instead of lowerCamelCase name in JSON
+	// field names.
+	UseProtoNames bool
+
+	// UseEnumNumbers emits enum values as numbers.
+	UseEnumNumbers bool
+
+	// EmitUnpopulated specifies whether to emit unpopulated fields. It does not
+	// emit unpopulated oneof fields or unpopulated extension fields.
+	// The JSON value emitted for unpopulated fields are as follows:
+	//  ╔═══════╤════════════════════════════╗
+	//  ║ JSON  │ Protobuf field             ║
+	//  ╠═══════╪════════════════════════════╣
+	//  ║ false │ proto3 boolean fields      ║
+	//  ║ 0     │ proto3 numeric fields      ║
+	//  ║ ""    │ proto3 string/bytes fields ║
+	//  ║ null  │ proto2 scalar fields       ║
+	//  ║ null  │ message fields             ║
+	//  ║ []    │ list fields                ║
+	//  ║ {}    │ map fields                 ║
+	//  ╚═══════╧════════════════════════════╝
+	EmitUnpopulated bool
+
+	// EmitDefaultValues specifies whether to emit default-valued primitive fields,
+	// empty lists, and empty maps. The fields affected are as follows:
+	//  ╔═══════╤════════════════════════════════════════╗
+	//  ║ JSON  │ Protobuf field                         ║
+	//  ╠═══════╪════════════════════════════════════════╣
+	//  ║ false │ non-optional scalar boolean fields     ║
+	//  ║ 0     │ non-optional scalar numeric fields     ║
+	//  ║ ""    │ non-optional scalar string/byte fields ║
+	//  ║ []    │ empty repeated fields                  ║
+	//  ║ {}    │ empty map fields                       ║
+	//  ╚═══════╧════════════════════════════════════════╝
+	//
+	// Behaves similarly to EmitUnpopulated, but does not emit "null"-value fields,
+	// i.e. presence-sensing fields that are omitted will remain omitted to preserve
+	// presence-sensing.
+	// EmitUnpopulated takes precedence over EmitDefaultValues since the former generates
+	// a strict superset of the latter.
+	EmitDefaultValues bool
+
+	// Resolver is used for looking up types when expanding google.protobuf.Any
+	// messages. If nil, this defaults to using protoregistry.GlobalTypes.
+	Resolver interface {
+		protoregistry.ExtensionTypeResolver
+		protoregistry.MessageTypeResolver
+	}
+}
+
+// Format formats the message as a string.
+// This method is only intended for human consumption and ignores errors.
+// Do not depend on the output being stable. Its output will change across
+// different builds of your program, even when using the same version of the
+// protobuf module.
+func (o MarshalOptions) Format(m proto.Message) string {
+	if m == nil || !m.ProtoReflect().IsValid() {
+		return "" // invalid syntax, but okay since this is for debugging
+	}
+	o.AllowPartial = true
+	b, _ := o.Marshal(m)
+	return string(b)
+}
+
+// Marshal marshals the given [proto.Message] in the JSON format using options in
+// Do not depend on the output being stable. Its output will change across
+// different builds of your program, even when using the same version of the
+// protobuf module.
+func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
+	return o.marshal(nil, m)
+}
+
+// MarshalAppend appends the JSON format encoding of m to b,
+// returning the result.
+func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) {
+	return o.marshal(b, m)
+}
+
+// marshal is a centralized function that all marshal operations go through.
+// For profiling purposes, avoid changing the name of this function or
+// introducing other code paths for marshal that do not go through this.
+func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) {
+	if o.Multiline && o.Indent == "" {
+		o.Indent = defaultIndent
+	}
+	if o.Resolver == nil {
+		o.Resolver = protoregistry.GlobalTypes
+	}
+
+	internalEnc, err := json.NewEncoder(b, o.Indent)
+	if err != nil {
+		return nil, err
+	}
+
+	// Treat nil message interface as an empty message,
+	// in which case the output in an empty JSON object.
+	if m == nil {
+		return append(b, '{', '}'), nil
+	}
+
+	enc := encoder{internalEnc, o}
+	if err := enc.marshalMessage(m.ProtoReflect(), ""); err != nil {
+		return nil, err
+	}
+	if o.AllowPartial {
+		return enc.Bytes(), nil
+	}
+	return enc.Bytes(), proto.CheckInitialized(m)
+}
+
+type encoder struct {
+	*json.Encoder
+	opts MarshalOptions
+}
+
+// typeFieldDesc is a synthetic field descriptor used for the "@type" field.
+var typeFieldDesc = func() protoreflect.FieldDescriptor {
+	var fd filedesc.Field
+	fd.L0.FullName = "@type"
+	fd.L0.Index = -1
+	fd.L1.Cardinality = protoreflect.Optional
+	fd.L1.Kind = protoreflect.StringKind
+	return &fd
+}()
+
+// typeURLFieldRanger wraps a protoreflect.Message and modifies its Range method
+// to additionally iterate over a synthetic field for the type URL.
+type typeURLFieldRanger struct {
+	order.FieldRanger
+	typeURL string
+}
+
+func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
+	if !f(typeFieldDesc, protoreflect.ValueOfString(m.typeURL)) {
+		return
+	}
+	m.FieldRanger.Range(f)
+}
+
+// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range
+// method to additionally iterate over unpopulated fields.
+type unpopulatedFieldRanger struct {
+	protoreflect.Message
+
+	skipNull bool
+}
+
+func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
+	fds := m.Descriptor().Fields()
+	for i := 0; i < fds.Len(); i++ {
+		fd := fds.Get(i)
+		if m.Has(fd) || fd.ContainingOneof() != nil {
+			continue // ignore populated fields and fields within a oneofs
+		}
+
+		v := m.Get(fd)
+		if fd.HasPresence() {
+			if m.skipNull {
+				continue
+			}
+			v = protoreflect.Value{} // use invalid value to emit null
+		}
+		if !f(fd, v) {
+			return
+		}
+	}
+	m.Message.Range(f)
+}
+
+// marshalMessage marshals the fields in the given protoreflect.Message.
+// If the typeURL is non-empty, then a synthetic "@type" field is injected
+// containing the URL as the value.
+func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error {
+	if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) {
+		return errors.New("no support for proto1 MessageSets")
+	}
+
+	if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil {
+		return marshal(e, m)
+	}
+
+	e.StartObject()
+	defer e.EndObject()
+
+	var fields order.FieldRanger = m
+	switch {
+	case e.opts.EmitUnpopulated:
+		fields = unpopulatedFieldRanger{Message: m, skipNull: false}
+	case e.opts.EmitDefaultValues:
+		fields = unpopulatedFieldRanger{Message: m, skipNull: true}
+	}
+	if typeURL != "" {
+		fields = typeURLFieldRanger{fields, typeURL}
+	}
+
+	var err error
+	order.RangeFields(fields, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+		name := fd.JSONName()
+		if e.opts.UseProtoNames {
+			name = fd.TextName()
+		}
+
+		if err = e.WriteName(name); err != nil {
+			return false
+		}
+		if err = e.marshalValue(v, fd); err != nil {
+			return false
+		}
+		return true
+	})
+	return err
+}
+
+// marshalValue marshals the given protoreflect.Value.
+func (e encoder) marshalValue(val protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+	switch {
+	case fd.IsList():
+		return e.marshalList(val.List(), fd)
+	case fd.IsMap():
+		return e.marshalMap(val.Map(), fd)
+	default:
+		return e.marshalSingular(val, fd)
+	}
+}
+
+// marshalSingular marshals the given non-repeated field value. This includes
+// all scalar types, enums, messages, and groups.
+func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+	if !val.IsValid() {
+		e.WriteNull()
+		return nil
+	}
+
+	switch kind := fd.Kind(); kind {
+	case protoreflect.BoolKind:
+		e.WriteBool(val.Bool())
+
+	case protoreflect.StringKind:
+		if e.WriteString(val.String()) != nil {
+			return errors.InvalidUTF8(string(fd.FullName()))
+		}
+
+	case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+		e.WriteInt(val.Int())
+
+	case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+		e.WriteUint(val.Uint())
+
+	case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Uint64Kind,
+		protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind:
+		// 64-bit integers are written out as JSON string.
+		e.WriteString(val.String())
+
+	case protoreflect.FloatKind:
+		// Encoder.WriteFloat handles the special numbers NaN and infinites.
+		e.WriteFloat(val.Float(), 32)
+
+	case protoreflect.DoubleKind:
+		// Encoder.WriteFloat handles the special numbers NaN and infinites.
+		e.WriteFloat(val.Float(), 64)
+
+	case protoreflect.BytesKind:
+		e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes()))
+
+	case protoreflect.EnumKind:
+		if fd.Enum().FullName() == genid.NullValue_enum_fullname {
+			e.WriteNull()
+		} else {
+			desc := fd.Enum().Values().ByNumber(val.Enum())
+			if e.opts.UseEnumNumbers || desc == nil {
+				e.WriteInt(int64(val.Enum()))
+			} else {
+				e.WriteString(string(desc.Name()))
+			}
+		}
+
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		if err := e.marshalMessage(val.Message(), ""); err != nil {
+			return err
+		}
+
+	default:
+		panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind))
+	}
+	return nil
+}
+
+// marshalList marshals the given protoreflect.List.
+func (e encoder) marshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error {
+	e.StartArray()
+	defer e.EndArray()
+
+	for i := 0; i < list.Len(); i++ {
+		item := list.Get(i)
+		if err := e.marshalSingular(item, fd); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// marshalMap marshals given protoreflect.Map.
+func (e encoder) marshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error {
+	e.StartObject()
+	defer e.EndObject()
+
+	var err error
+	order.RangeEntries(mmap, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool {
+		if err = e.WriteName(k.String()); err != nil {
+			return false
+		}
+		if err = e.marshalSingular(v, fd.MapValue()); err != nil {
+			return false
+		}
+		return true
+	})
+	return err
+}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/hack/tools/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
new file mode 100644
index 0000000000..4b177c8206
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
@@ -0,0 +1,876 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protojson
+
+import (
+	"bytes"
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+	"time"
+
+	"google.golang.org/protobuf/internal/encoding/json"
+	"google.golang.org/protobuf/internal/errors"
+	"google.golang.org/protobuf/internal/genid"
+	"google.golang.org/protobuf/internal/strs"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type marshalFunc func(encoder, protoreflect.Message) error
+
+// wellKnownTypeMarshaler returns a marshal function if the message type
+// has specialized serialization behavior. It returns nil otherwise.
+func wellKnownTypeMarshaler(name protoreflect.FullName) marshalFunc {
+	if name.Parent() == genid.GoogleProtobuf_package {
+		switch name.Name() {
+		case genid.Any_message_name:
+			return encoder.marshalAny
+		case genid.Timestamp_message_name:
+			return encoder.marshalTimestamp
+		case genid.Duration_message_name:
+			return encoder.marshalDuration
+		case genid.BoolValue_message_name,
+			genid.Int32Value_message_name,
+			genid.Int64Value_message_name,
+			genid.UInt32Value_message_name,
+			genid.UInt64Value_message_name,
+			genid.FloatValue_message_name,
+			genid.DoubleValue_message_name,
+			genid.StringValue_message_name,
+			genid.BytesValue_message_name:
+			return encoder.marshalWrapperType
+		case genid.Struct_message_name:
+			return encoder.marshalStruct
+		case genid.ListValue_message_name:
+			return encoder.marshalListValue
+		case genid.Value_message_name:
+			return encoder.marshalKnownValue
+		case genid.FieldMask_message_name:
+			return encoder.marshalFieldMask
+		case genid.Empty_message_name:
+			return encoder.marshalEmpty
+		}
+	}
+	return nil
+}
+
+type unmarshalFunc func(decoder, protoreflect.Message) error
+
+// wellKnownTypeUnmarshaler returns a unmarshal function if the message type
+// has specialized serialization behavior. It returns nil otherwise.
+func wellKnownTypeUnmarshaler(name protoreflect.FullName) unmarshalFunc {
+	if name.Parent() == genid.GoogleProtobuf_package {
+		switch name.Name() {
+		case genid.Any_message_name:
+			return decoder.unmarshalAny
+		case genid.Timestamp_message_name:
+			return decoder.unmarshalTimestamp
+		case genid.Duration_message_name:
+			return decoder.unmarshalDuration
+		case genid.BoolValue_message_name,
+			genid.Int32Value_message_name,
+			genid.Int64Value_message_name,
+			genid.UInt32Value_message_name,
+			genid.UInt64Value_message_name,
+			genid.FloatValue_message_name,
+			genid.DoubleValue_message_name,
+			genid.StringValue_message_name,
+			genid.BytesValue_message_name:
+			return decoder.unmarshalWrapperType
+		case genid.Struct_message_name:
+			return decoder.unmarshalStruct
+		case genid.ListValue_message_name:
+			return decoder.unmarshalListValue
+		case genid.Value_message_name:
+			return decoder.unmarshalKnownValue
+		case genid.FieldMask_message_name:
+			return decoder.unmarshalFieldMask
+		case genid.Empty_message_name:
+			return decoder.unmarshalEmpty
+		}
+	}
+	return nil
+}
+
+// The JSON representation of an Any message uses the regular representation of
+// the deserialized, embedded message, with an additional field `@type` which
+// contains the type URL. If the embedded message type is well-known and has a
+// custom JSON representation, that representation will be embedded adding a
+// field `value` which holds the custom JSON in addition to the `@type` field.
+
+func (e encoder) marshalAny(m protoreflect.Message) error {
+	fds := m.Descriptor().Fields()
+	fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
+	fdValue := fds.ByNumber(genid.Any_Value_field_number)
+
+	if !m.Has(fdType) {
+		if !m.Has(fdValue) {
+			// If message is empty, marshal out empty JSON object.
+			e.StartObject()
+			e.EndObject()
+			return nil
+		} else {
+			// Return error if type_url field is not set, but value is set.
+			return errors.New("%s: %v is not set", genid.Any_message_fullname, genid.Any_TypeUrl_field_name)
+		}
+	}
+
+	typeVal := m.Get(fdType)
+	valueVal := m.Get(fdValue)
+
+	// Resolve the type in order to unmarshal value field.
+	typeURL := typeVal.String()
+	emt, err := e.opts.Resolver.FindMessageByURL(typeURL)
+	if err != nil {
+		return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err)
+	}
+
+	em := emt.New()
+	err = proto.UnmarshalOptions{
+		AllowPartial: true, // never check required fields inside an Any
+		Resolver:     e.opts.Resolver,
+	}.Unmarshal(valueVal.Bytes(), em.Interface())
+	if err != nil {
+		return errors.New("%s: unable to unmarshal %q: %v", genid.Any_message_fullname, typeURL, err)
+	}
+
+	// If type of value has custom JSON encoding, marshal out a field "value"
+	// with corresponding custom JSON encoding of the embedded message as a
+	// field.
+	if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil {
+		e.StartObject()
+		defer e.EndObject()
+
+		// Marshal out @type field.
+		e.WriteName("@type")
+		if err := e.WriteString(typeURL); err != nil {
+			return err
+		}
+
+		e.WriteName("value")
+		return marshal(e, em)
+	}
+
+	// Else, marshal out the embedded message's fields in this Any object.
+	if err := e.marshalMessage(em, typeURL); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (d decoder) unmarshalAny(m protoreflect.Message) error {
+	// Peek to check for json.ObjectOpen to avoid advancing a read.
+	start, err := d.Peek()
+	if err != nil {
+		return err
+	}
+	if start.Kind() != json.ObjectOpen {
+		return d.unexpectedTokenError(start)
+	}
+
+	// Use another decoder to parse the unread bytes for @type field. This
+	// avoids advancing a read from current decoder because the current JSON
+	// object may contain the fields of the embedded type.
+	dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}}
+	tok, err := findTypeURL(dec)
+	switch err {
+	case errEmptyObject:
+		// An empty JSON object translates to an empty Any message.
+		d.Read() // Read json.ObjectOpen.
+		d.Read() // Read json.ObjectClose.
+		return nil
+
+	case errMissingType:
+		if d.opts.DiscardUnknown {
+			// Treat all fields as unknowns, similar to an empty object.
+			return d.skipJSONValue()
+		}
+		// Use start.Pos() for line position.
+		return d.newError(start.Pos(), err.Error())
+
+	default:
+		if err != nil {
+			return err
+		}
+	}
+
+	typeURL := tok.ParsedString()
+	emt, err := d.opts.Resolver.FindMessageByURL(typeURL)
+	if err != nil {
+		return d.newError(tok.Pos(), "unable to resolve %v: %q", tok.RawString(), err)
+	}
+
+	// Create new message for the embedded message type and unmarshal into it.
+	em := emt.New()
+	if unmarshal := wellKnownTypeUnmarshaler(emt.Descriptor().FullName()); unmarshal != nil {
+		// If embedded message is a custom type,
+		// unmarshal the JSON "value" field into it.
+		if err := d.unmarshalAnyValue(unmarshal, em); err != nil {
+			return err
+		}
+	} else {
+		// Else unmarshal the current JSON object into it.
+		if err := d.unmarshalMessage(em, true); err != nil {
+			return err
+		}
+	}
+	// Serialize the embedded message and assign the resulting bytes to the
+	// proto value field.
+	b, err := proto.MarshalOptions{
+		AllowPartial:  true, // No need to check required fields inside an Any.
+		Deterministic: true,
+	}.Marshal(em.Interface())
+	if err != nil {
+		return d.newError(start.Pos(), "error in marshaling Any.value field: %v", err)
+	}
+
+	fds := m.Descriptor().Fields()
+	fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
+	fdValue := fds.ByNumber(genid.Any_Value_field_number)
+
+	m.Set(fdType, protoreflect.ValueOfString(typeURL))
+	m.Set(fdValue, protoreflect.ValueOfBytes(b))
+	return nil
+}
+
+var errEmptyObject = fmt.Errorf(`empty object`)
+var errMissingType = fmt.Errorf(`missing "@type" field`)
+
+// findTypeURL returns the token for the "@type" field value from the given
+// JSON bytes. It is expected that the given bytes start with json.ObjectOpen.
+// It returns errEmptyObject if the JSON object is empty or errMissingType if
+// @type field does not exist. It returns other error if the @type field is not
+// valid or other decoding issues.
+func findTypeURL(d decoder) (json.Token, error) {
+	var typeURL string
+	var typeTok json.Token
+	numFields := 0
+	// Skip start object.
+	d.Read()
+
+Loop:
+	for {
+		tok, err := d.Read()
+		if err != nil {
+			return json.Token{}, err
+		}
+
+		switch tok.Kind() {
+		case json.ObjectClose:
+			if typeURL == "" {
+				// Did not find @type field.
+				if numFields > 0 {
+					return json.Token{}, errMissingType
+				}
+				return json.Token{}, errEmptyObject
+			}
+			break Loop
+
+		case json.Name:
+			numFields++
+			if tok.Name() != "@type" {
+				// Skip value.
+				if err := d.skipJSONValue(); err != nil {
+					return json.Token{}, err
+				}
+				continue
+			}
+
+			// Return error if this was previously set already.
+			if typeURL != "" {
+				return json.Token{}, d.newError(tok.Pos(), `duplicate "@type" field`)
+			}
+			// Read field value.
+			tok, err := d.Read()
+			if err != nil {
+				return json.Token{}, err
+			}
+			if tok.Kind() != json.String {
+				return json.Token{}, d.newError(tok.Pos(), `@type field value is not a string: %v`, tok.RawString())
+			}
+			typeURL = tok.ParsedString()
+			if typeURL == "" {
+				return json.Token{}, d.newError(tok.Pos(), `@type field contains empty value`)
+			}
+			typeTok = tok
+		}
+	}
+
+	return typeTok, nil
+}
+
+// skipJSONValue parses a JSON value (null, boolean, string, number, object and
+// array) in order to advance the read to the next JSON value. It relies on
+// the decoder returning an error if the types are not in valid sequence.
+func (d decoder) skipJSONValue() error {
+	var open int
+	for {
+		tok, err := d.Read()
+		if err != nil {
+			return err
+		}
+		switch tok.Kind() {
+		case json.ObjectClose, json.ArrayClose:
+			open--
+		case json.ObjectOpen, json.ArrayOpen:
+			open++
+			if open > d.opts.RecursionLimit {
+				return errors.New("exceeded max recursion depth")
+			}
+		case json.EOF:
+			// This can only happen if there's a bug in Decoder.Read.
+			// Avoid an infinite loop if this does happen.
+			return errors.New("unexpected EOF")
+		}
+		if open == 0 {
+			return nil
+		}
+	}
+}
+
+// unmarshalAnyValue unmarshals the given custom-type message from the JSON
+// object's "value" field.
+func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Message) error {
+	// Skip ObjectOpen, and start reading the fields.
+	d.Read()
+
+	var found bool // Used for detecting duplicate "value".
+	for {
+		tok, err := d.Read()
+		if err != nil {
+			return err
+		}
+		switch tok.Kind() {
+		case json.ObjectClose:
+			if !found {
+				return d.newError(tok.Pos(), `missing "value" field`)
+			}
+			return nil
+
+		case json.Name:
+			switch tok.Name() {
+			case "@type":
+				// Skip the value as this was previously parsed already.
+				d.Read()
+
+			case "value":
+				if found {
+					return d.newError(tok.Pos(), `duplicate "value" field`)
+				}
+				// Unmarshal the field value into the given message.
+				if err := unmarshal(d, m); err != nil {
+					return err
+				}
+				found = true
+
+			default:
+				if d.opts.DiscardUnknown {
+					if err := d.skipJSONValue(); err != nil {
+						return err
+					}
+					continue
+				}
+				return d.newError(tok.Pos(), "unknown field %v", tok.RawString())
+			}
+		}
+	}
+}
+
+// Wrapper types are encoded as JSON primitives like string, number or boolean.
+
+func (e encoder) marshalWrapperType(m protoreflect.Message) error {
+	fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number)
+	val := m.Get(fd)
+	return e.marshalSingular(val, fd)
+}
+
+func (d decoder) unmarshalWrapperType(m protoreflect.Message) error {
+	fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number)
+	val, err := d.unmarshalScalar(fd)
+	if err != nil {
+		return err
+	}
+	m.Set(fd, val)
+	return nil
+}
+
+// The JSON representation for Empty is an empty JSON object.
+
+func (e encoder) marshalEmpty(protoreflect.Message) error {
+	e.StartObject()
+	e.EndObject()
+	return nil
+}
+
+func (d decoder) unmarshalEmpty(protoreflect.Message) error {
+	tok, err := d.Read()
+	if err != nil {
+		return err
+	}
+	if tok.Kind() != json.ObjectOpen {
+		return d.unexpectedTokenError(tok)
+	}
+
+	for {
+		tok, err := d.Read()
+		if err != nil {
+			return err
+		}
+		switch tok.Kind() {
+		case json.ObjectClose:
+			return nil
+
+		case json.Name:
+			if d.opts.DiscardUnknown {
+				if err := d.skipJSONValue(); err != nil {
+					return err
+				}
+				continue
+			}
+			return d.newError(tok.Pos(), "unknown field %v", tok.RawString())
+
+		default:
+			return d.unexpectedTokenError(tok)
+		}
+	}
+}
+
+// The JSON representation for Struct is a JSON object that contains the encoded
+// Struct.fields map and follows the serialization rules for a map.
+
+func (e encoder) marshalStruct(m protoreflect.Message) error {
+	fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number)
+	return e.marshalMap(m.Get(fd).Map(), fd)
+}
+
+func (d decoder) unmarshalStruct(m protoreflect.Message) error {
+	fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number)
+	return d.unmarshalMap(m.Mutable(fd).Map(), fd)
+}
+
+// The JSON representation for ListValue is JSON array that contains the encoded
+// ListValue.values repeated field and follows the serialization rules for a
+// repeated field.
+
+func (e encoder) marshalListValue(m protoreflect.Message) error {
+	fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number)
+	return e.marshalList(m.Get(fd).List(), fd)
+}
+
+func (d decoder) unmarshalListValue(m protoreflect.Message) error {
+	fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number)
+	return d.unmarshalList(m.Mutable(fd).List(), fd)
+}
+
+// The JSON representation for a Value is dependent on the oneof field that is
+// set. Each of the field in the oneof has its own custom serialization rule. A
+// Value message needs to be a oneof field set, else it is an error.
+
+func (e encoder) marshalKnownValue(m protoreflect.Message) error {
+	od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name)
+	fd := m.WhichOneof(od)
+	if fd == nil {
+		return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname)
+	}
+	if fd.Number() == genid.Value_NumberValue_field_number {
+		if v := m.Get(fd).Float(); math.IsNaN(v) || math.IsInf(v, 0) {
+			return errors.New("%s: invalid %v value", genid.Value_NumberValue_field_fullname, v)
+		}
+	}
+	return e.marshalSingular(m.Get(fd), fd)
+}
+
+func (d decoder) unmarshalKnownValue(m protoreflect.Message) error {
+	tok, err := d.Peek()
+	if err != nil {
+		return err
+	}
+
+	var fd protoreflect.FieldDescriptor
+	var val protoreflect.Value
+	switch tok.Kind() {
+	case json.Null:
+		d.Read()
+		fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number)
+		val = protoreflect.ValueOfEnum(0)
+
+	case json.Bool:
+		tok, err := d.Read()
+		if err != nil {
+			return err
+		}
+		fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number)
+		val = protoreflect.ValueOfBool(tok.Bool())
+
+	case json.Number:
+		tok, err := d.Read()
+		if err != nil {
+			return err
+		}
+		fd = m.Descriptor().Fields().ByNumber(genid.Value_NumberValue_field_number)
+		var ok bool
+		val, ok = unmarshalFloat(tok, 64)
+		if !ok {
+			return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString())
+		}
+
+	case json.String:
+		// A JSON string may have been encoded from the number_value field,
+		// e.g. "NaN", "Infinity", etc. Parsing a proto double type also allows
+		// for it to be in JSON string form. Given this custom encoding spec,
+		// however, there is no way to identify that and hence a JSON string is
+		// always assigned to the string_value field, which means that certain
+		// encoding cannot be parsed back to the same field.
+		tok, err := d.Read()
+		if err != nil {
+			return err
+		}
+		fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number)
+		val = protoreflect.ValueOfString(tok.ParsedString())
+
+	case json.ObjectOpen:
+		fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number)
+		val = m.NewField(fd)
+		if err := d.unmarshalStruct(val.Message()); err != nil {
+			return err
+		}
+
+	case json.ArrayOpen:
+		fd = m.Descriptor().Fields().ByNumber(genid.Value_ListValue_field_number)
+		val = m.NewField(fd)
+		if err := d.unmarshalListValue(val.Message()); err != nil {
+			return err
+		}
+
+	default:
+		return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString())
+	}
+
+	m.Set(fd, val)
+	return nil
+}
+
+// The JSON representation for a Duration is a JSON string that ends in the
+// suffix "s" (indicating seconds) and is preceded by the number of seconds,
+// with nanoseconds expressed as fractional seconds.
+//
+// Durations less than one second are represented with a 0 seconds field and a
+// positive or negative nanos field. For durations of one second or more, a
+// non-zero value for the nanos field must be of the same sign as the seconds
+// field.
+//
+// Duration.seconds must be from -315,576,000,000 to +315,576,000,000 inclusive.
+// Duration.nanos must be from -999,999,999 to +999,999,999 inclusive.
+
+const (
+	secondsInNanos       = 999999999
+	maxSecondsInDuration = 315576000000
+)
+
+func (e encoder) marshalDuration(m protoreflect.Message) error {
+	fds := m.Descriptor().Fields()
+	fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number)
+	fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number)
+
+	secsVal := m.Get(fdSeconds)
+	nanosVal := m.Get(fdNanos)
+	secs := secsVal.Int()
+	nanos := nanosVal.Int()
+	if secs < -maxSecondsInDuration || secs > maxSecondsInDuration {
+		return errors.New("%s: seconds out of range %v", genid.Duration_message_fullname, secs)
+	}
+	if nanos < -secondsInNanos || nanos > secondsInNanos {
+		return errors.New("%s: nanos out of range %v", genid.Duration_message_fullname, nanos)
+	}
+	if (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) {
+		return errors.New("%s: signs of seconds and nanos do not match", genid.Duration_message_fullname)
+	}
+	// Generated output always contains 0, 3, 6, or 9 fractional digits,
+	// depending on required precision, followed by the suffix "s".
+	var sign string
+	if secs < 0 || nanos < 0 {
+		sign, secs, nanos = "-", -1*secs, -1*nanos
+	}
+	x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos)
+	x = strings.TrimSuffix(x, "000")
+	x = strings.TrimSuffix(x, "000")
+	x = strings.TrimSuffix(x, ".000")
+	e.WriteString(x + "s")
+	return nil
+}
+
+func (d decoder) unmarshalDuration(m protoreflect.Message) error {
+	tok, err := d.Read()
+	if err != nil {
+		return err
+	}
+	if tok.Kind() != json.String {
+		return d.unexpectedTokenError(tok)
+	}
+
+	secs, nanos, ok := parseDuration(tok.ParsedString())
+	if !ok {
+		return d.newError(tok.Pos(), "invalid %v value %v", genid.Duration_message_fullname, tok.RawString())
+	}
+	// Validate seconds. No need to validate nanos because parseDuration would
+	// have covered that already.
+	if secs < -maxSecondsInDuration || secs > maxSecondsInDuration {
+		return d.newError(tok.Pos(), "%v value out of range: %v", genid.Duration_message_fullname, tok.RawString())
+	}
+
+	fds := m.Descriptor().Fields()
+	fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number)
+	fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number)
+
+	m.Set(fdSeconds, protoreflect.ValueOfInt64(secs))
+	m.Set(fdNanos, protoreflect.ValueOfInt32(nanos))
+	return nil
+}
+
+// parseDuration parses the given input string for seconds and nanoseconds value
+// for the Duration JSON format. The format is a decimal number with a suffix
+// 's'. It can have optional plus/minus sign. There needs to be at least an
+// integer or fractional part. Fractional part is limited to 9 digits only for
+// nanoseconds precision, regardless of whether there are trailing zero digits.
+// Example values are 1s, 0.1s, 1.s, .1s, +1s, -1s, -.1s.
+func parseDuration(input string) (int64, int32, bool) {
+	b := []byte(input)
+	size := len(b)
+	if size < 2 {
+		return 0, 0, false
+	}
+	if b[size-1] != 's' {
+		return 0, 0, false
+	}
+	b = b[:size-1]
+
+	// Read optional plus/minus symbol.
+	var neg bool
+	switch b[0] {
+	case '-':
+		neg = true
+		b = b[1:]
+	case '+':
+		b = b[1:]
+	}
+	if len(b) == 0 {
+		return 0, 0, false
+	}
+
+	// Read the integer part.
+	var intp []byte
+	switch {
+	case b[0] == '0':
+		b = b[1:]
+
+	case '1' <= b[0] && b[0] <= '9':
+		intp = b[0:]
+		b = b[1:]
+		n := 1
+		for len(b) > 0 && '0' <= b[0] && b[0] <= '9' {
+			n++
+			b = b[1:]
+		}
+		intp = intp[:n]
+
+	case b[0] == '.':
+		// Continue below.
+
+	default:
+		return 0, 0, false
+	}
+
+	hasFrac := false
+	var frac [9]byte
+	if len(b) > 0 {
+		if b[0] != '.' {
+			return 0, 0, false
+		}
+		// Read the fractional part.
+		b = b[1:]
+		n := 0
+		for len(b) > 0 && n < 9 && '0' <= b[0] && b[0] <= '9' {
+			frac[n] = b[0]
+			n++
+			b = b[1:]
+		}
+		// It is not valid if there are more bytes left.
+		if len(b) > 0 {
+			return 0, 0, false
+		}
+		// Pad fractional part with 0s.
+		for i := n; i < 9; i++ {
+			frac[i] = '0'
+		}
+		hasFrac = true
+	}
+
+	var secs int64
+	if len(intp) > 0 {
+		var err error
+		secs, err = strconv.ParseInt(string(intp), 10, 64)
+		if err != nil {
+			return 0, 0, false
+		}
+	}
+
+	var nanos int64
+	if hasFrac {
+		nanob := bytes.TrimLeft(frac[:], "0")
+		if len(nanob) > 0 {
+			var err error
+			nanos, err = strconv.ParseInt(string(nanob), 10, 32)
+			if err != nil {
+				return 0, 0, false
+			}
+		}
+	}
+
+	if neg {
+		if secs > 0 {
+			secs = -secs
+		}
+		if nanos > 0 {
+			nanos = -nanos
+		}
+	}
+	return secs, int32(nanos), true
+}
+
+// The JSON representation for a Timestamp is a JSON string in the RFC 3339
+// format, i.e. "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" where
+// {year} is always expressed using four digits while {month}, {day}, {hour},
+// {min}, and {sec} are zero-padded to two digits each. The fractional seconds,
+// which can go up to 9 digits, up to 1 nanosecond resolution, is optional. The
+// "Z" suffix indicates the timezone ("UTC"); the timezone is required. Encoding
+// should always use UTC (as indicated by "Z") and a decoder should be able to
+// accept both UTC and other timezones (as indicated by an offset).
+//
+// Timestamp.seconds must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z
+// inclusive.
+// Timestamp.nanos must be from 0 to 999,999,999 inclusive.
+
+const (
+	maxTimestampSeconds = 253402300799
+	minTimestampSeconds = -62135596800
+)
+
+func (e encoder) marshalTimestamp(m protoreflect.Message) error {
+	fds := m.Descriptor().Fields()
+	fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
+	fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number)
+
+	secsVal := m.Get(fdSeconds)
+	nanosVal := m.Get(fdNanos)
+	secs := secsVal.Int()
+	nanos := nanosVal.Int()
+	if secs < minTimestampSeconds || secs > maxTimestampSeconds {
+		return errors.New("%s: seconds out of range %v", genid.Timestamp_message_fullname, secs)
+	}
+	if nanos < 0 || nanos > secondsInNanos {
+		return errors.New("%s: nanos out of range %v", genid.Timestamp_message_fullname, nanos)
+	}
+	// Uses RFC 3339, where generated output will be Z-normalized and uses 0, 3,
+	// 6 or 9 fractional digits.
+	t := time.Unix(secs, nanos).UTC()
+	x := t.Format("2006-01-02T15:04:05.000000000")
+	x = strings.TrimSuffix(x, "000")
+	x = strings.TrimSuffix(x, "000")
+	x = strings.TrimSuffix(x, ".000")
+	e.WriteString(x + "Z")
+	return nil
+}
+
+func (d decoder) unmarshalTimestamp(m protoreflect.Message) error {
+	tok, err := d.Read()
+	if err != nil {
+		return err
+	}
+	if tok.Kind() != json.String {
+		return d.unexpectedTokenError(tok)
+	}
+
+	s := tok.ParsedString()
+	t, err := time.Parse(time.RFC3339Nano, s)
+	if err != nil {
+		return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString())
+	}
+	// Validate seconds.
+	secs := t.Unix()
+	if secs < minTimestampSeconds || secs > maxTimestampSeconds {
+		return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString())
+	}
+	// Validate subseconds.
+	i := strings.LastIndexByte(s, '.')  // start of subsecond field
+	j := strings.LastIndexAny(s, "Z-+") // start of timezone field
+	if i >= 0 && j >= i && j-i > len(".999999999") {
+		return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString())
+	}
+
+	fds := m.Descriptor().Fields()
+	fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
+	fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number)
+
+	m.Set(fdSeconds, protoreflect.ValueOfInt64(secs))
+	m.Set(fdNanos, protoreflect.ValueOfInt32(int32(t.Nanosecond())))
+	return nil
+}
+
+// The JSON representation for a FieldMask is a JSON string where paths are
+// separated by a comma. Fields name in each path are converted to/from
+// lower-camel naming conventions. Encoding should fail if the path name would
+// end up differently after a round-trip.
+
+func (e encoder) marshalFieldMask(m protoreflect.Message) error {
+	fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number)
+	list := m.Get(fd).List()
+	paths := make([]string, 0, list.Len())
+
+	for i := 0; i < list.Len(); i++ {
+		s := list.Get(i).String()
+		if !protoreflect.FullName(s).IsValid() {
+			return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s)
+		}
+		// Return error if conversion to camelCase is not reversible.
+		cc := strs.JSONCamelCase(s)
+		if s != strs.JSONSnakeCase(cc) {
+			return errors.New("%s contains irreversible value %q", genid.FieldMask_Paths_field_fullname, s)
+		}
+		paths = append(paths, cc)
+	}
+
+	e.WriteString(strings.Join(paths, ","))
+	return nil
+}
+
+func (d decoder) unmarshalFieldMask(m protoreflect.Message) error {
+	tok, err := d.Read()
+	if err != nil {
+		return err
+	}
+	if tok.Kind() != json.String {
+		return d.unexpectedTokenError(tok)
+	}
+	str := strings.TrimSpace(tok.ParsedString())
+	if str == "" {
+		return nil
+	}
+	paths := strings.Split(str, ",")
+
+	fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number)
+	list := m.Mutable(fd).List()
+
+	for _, s0 := range paths {
+		s := strs.JSONSnakeCase(s0)
+		if strings.Contains(s0, "_") || !protoreflect.FullName(s).IsValid() {
+			return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0)
+		}
+		list.Append(protoreflect.ValueOfString(s))
+	}
+	return nil
+}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/descopts/options.go b/hack/tools/vendor/google.golang.org/protobuf/internal/descopts/options.go
index 8401be8c84..024ffebd3d 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/descopts/options.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/descopts/options.go
@@ -9,7 +9,7 @@
 // dependency on the descriptor proto package).
 package descopts
 
-import pref "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
 
 // These variables are set by the init function in descriptor.pb.go via logic
 // in internal/filetype. In other words, so long as the descriptor proto package
@@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect"
 //
 // Each variable is populated with a nil pointer to the options struct.
 var (
-	File           pref.ProtoMessage
-	Enum           pref.ProtoMessage
-	EnumValue      pref.ProtoMessage
-	Message        pref.ProtoMessage
-	Field          pref.ProtoMessage
-	Oneof          pref.ProtoMessage
-	ExtensionRange pref.ProtoMessage
-	Service        pref.ProtoMessage
-	Method         pref.ProtoMessage
+	File           protoreflect.ProtoMessage
+	Enum           protoreflect.ProtoMessage
+	EnumValue      protoreflect.ProtoMessage
+	Message        protoreflect.ProtoMessage
+	Field          protoreflect.ProtoMessage
+	Oneof          protoreflect.ProtoMessage
+	ExtensionRange protoreflect.ProtoMessage
+	Service        protoreflect.ProtoMessage
+	Method         protoreflect.ProtoMessage
 )
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/hack/tools/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
index 029a6a12d7..08dad7692c 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
@@ -5,7 +5,7 @@
 // Package editionssupport defines constants for editions that are supported.
 package editionssupport
 
-import descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+import "google.golang.org/protobuf/types/descriptorpb"
 
 const (
 	Minimum = descriptorpb.Edition_EDITION_PROTO2
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/hack/tools/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
new file mode 100644
index 0000000000..ea1d3e65a5
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
@@ -0,0 +1,340 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"regexp"
+	"unicode/utf8"
+
+	"google.golang.org/protobuf/internal/errors"
+)
+
+// call specifies which Decoder method was invoked.
+type call uint8
+
+const (
+	readCall call = iota
+	peekCall
+)
+
+const unexpectedFmt = "unexpected token %s"
+
+// ErrUnexpectedEOF means that EOF was encountered in the middle of the input.
+var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF)
+
+// Decoder is a token-based JSON decoder.
+type Decoder struct {
+	// lastCall is last method called, either readCall or peekCall.
+	// Initial value is readCall.
+	lastCall call
+
+	// lastToken contains the last read token.
+	lastToken Token
+
+	// lastErr contains the last read error.
+	lastErr error
+
+	// openStack is a stack containing ObjectOpen and ArrayOpen values. The
+	// top of stack represents the object or the array the current value is
+	// directly located in.
+	openStack []Kind
+
+	// orig is used in reporting line and column.
+	orig []byte
+	// in contains the unconsumed input.
+	in []byte
+}
+
+// NewDecoder returns a Decoder to read the given []byte.
+func NewDecoder(b []byte) *Decoder {
+	return &Decoder{orig: b, in: b}
+}
+
+// Peek looks ahead and returns the next token kind without advancing a read.
+func (d *Decoder) Peek() (Token, error) {
+	defer func() { d.lastCall = peekCall }()
+	if d.lastCall == readCall {
+		d.lastToken, d.lastErr = d.Read()
+	}
+	return d.lastToken, d.lastErr
+}
+
+// Read returns the next JSON token.
+// It will return an error if there is no valid token.
+func (d *Decoder) Read() (Token, error) {
+	const scalar = Null | Bool | Number | String
+
+	defer func() { d.lastCall = readCall }()
+	if d.lastCall == peekCall {
+		return d.lastToken, d.lastErr
+	}
+
+	tok, err := d.parseNext()
+	if err != nil {
+		return Token{}, err
+	}
+
+	switch tok.kind {
+	case EOF:
+		if len(d.openStack) != 0 ||
+			d.lastToken.kind&scalar|ObjectClose|ArrayClose == 0 {
+			return Token{}, ErrUnexpectedEOF
+		}
+
+	case Null:
+		if !d.isValueNext() {
+			return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+		}
+
+	case Bool, Number:
+		if !d.isValueNext() {
+			return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+		}
+
+	case String:
+		if d.isValueNext() {
+			break
+		}
+		// This string token should only be for a field name.
+		if d.lastToken.kind&(ObjectOpen|comma) == 0 {
+			return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+		}
+		if len(d.in) == 0 {
+			return Token{}, ErrUnexpectedEOF
+		}
+		if c := d.in[0]; c != ':' {
+			return Token{}, d.newSyntaxError(d.currPos(), `unexpected character %s, missing ":" after field name`, string(c))
+		}
+		tok.kind = Name
+		d.consume(1)
+
+	case ObjectOpen, ArrayOpen:
+		if !d.isValueNext() {
+			return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+		}
+		d.openStack = append(d.openStack, tok.kind)
+
+	case ObjectClose:
+		if len(d.openStack) == 0 ||
+			d.lastToken.kind&(Name|comma) != 0 ||
+			d.openStack[len(d.openStack)-1] != ObjectOpen {
+			return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+		}
+		d.openStack = d.openStack[:len(d.openStack)-1]
+
+	case ArrayClose:
+		if len(d.openStack) == 0 ||
+			d.lastToken.kind == comma ||
+			d.openStack[len(d.openStack)-1] != ArrayOpen {
+			return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+		}
+		d.openStack = d.openStack[:len(d.openStack)-1]
+
+	case comma:
+		if len(d.openStack) == 0 ||
+			d.lastToken.kind&(scalar|ObjectClose|ArrayClose) == 0 {
+			return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString())
+		}
+	}
+
+	// Update d.lastToken only after validating token to be in the right sequence.
+	d.lastToken = tok
+
+	if d.lastToken.kind == comma {
+		return d.Read()
+	}
+	return tok, nil
+}
+
+// Any sequence that looks like a non-delimiter (for error reporting).
+var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9]{1,32}|.)`)
+
+// parseNext parses for the next JSON token. It returns a Token object for
+// different types, except for Name. It does not handle whether the next token
+// is in a valid sequence or not.
+func (d *Decoder) parseNext() (Token, error) {
+	// Trim leading spaces.
+	d.consume(0)
+
+	in := d.in
+	if len(in) == 0 {
+		return d.consumeToken(EOF, 0), nil
+	}
+
+	switch in[0] {
+	case 'n':
+		if n := matchWithDelim("null", in); n != 0 {
+			return d.consumeToken(Null, n), nil
+		}
+
+	case 't':
+		if n := matchWithDelim("true", in); n != 0 {
+			return d.consumeBoolToken(true, n), nil
+		}
+
+	case 'f':
+		if n := matchWithDelim("false", in); n != 0 {
+			return d.consumeBoolToken(false, n), nil
+		}
+
+	case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+		if n, ok := parseNumber(in); ok {
+			return d.consumeToken(Number, n), nil
+		}
+
+	case '"':
+		s, n, err := d.parseString(in)
+		if err != nil {
+			return Token{}, err
+		}
+		return d.consumeStringToken(s, n), nil
+
+	case '{':
+		return d.consumeToken(ObjectOpen, 1), nil
+
+	case '}':
+		return d.consumeToken(ObjectClose, 1), nil
+
+	case '[':
+		return d.consumeToken(ArrayOpen, 1), nil
+
+	case ']':
+		return d.consumeToken(ArrayClose, 1), nil
+
+	case ',':
+		return d.consumeToken(comma, 1), nil
+	}
+	return Token{}, d.newSyntaxError(d.currPos(), "invalid value %s", errRegexp.Find(in))
+}
+
+// newSyntaxError returns an error with line and column information useful for
+// syntax errors.
+func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error {
+	e := errors.New(f, x...)
+	line, column := d.Position(pos)
+	return errors.New("syntax error (line %d:%d): %v", line, column, e)
+}
+
+// Position returns line and column number of given index of the original input.
+// It will panic if index is out of range.
+func (d *Decoder) Position(idx int) (line int, column int) {
+	b := d.orig[:idx]
+	line = bytes.Count(b, []byte("\n")) + 1
+	if i := bytes.LastIndexByte(b, '\n'); i >= 0 {
+		b = b[i+1:]
+	}
+	column = utf8.RuneCount(b) + 1 // ignore multi-rune characters
+	return line, column
+}
+
+// currPos returns the current index position of d.in from d.orig.
+func (d *Decoder) currPos() int {
+	return len(d.orig) - len(d.in)
+}
+
+// matchWithDelim matches s with the input b and verifies that the match
+// terminates with a delimiter of some form (e.g., r"[^-+_.a-zA-Z0-9]").
+// As a special case, EOF is considered a delimiter. It returns the length of s
+// if there is a match, else 0.
+func matchWithDelim(s string, b []byte) int {
+	if !bytes.HasPrefix(b, []byte(s)) {
+		return 0
+	}
+
+	n := len(s)
+	if n < len(b) && isNotDelim(b[n]) {
+		return 0
+	}
+	return n
+}
+
+// isNotDelim returns true if given byte is a not delimiter character.
+func isNotDelim(c byte) bool {
+	return (c == '-' || c == '+' || c == '.' || c == '_' ||
+		('a' <= c && c <= 'z') ||
+		('A' <= c && c <= 'Z') ||
+		('0' <= c && c <= '9'))
+}
+
+// consume consumes n bytes of input and any subsequent whitespace.
+func (d *Decoder) consume(n int) {
+	d.in = d.in[n:]
+	for len(d.in) > 0 {
+		switch d.in[0] {
+		case ' ', '\n', '\r', '\t':
+			d.in = d.in[1:]
+		default:
+			return
+		}
+	}
+}
+
+// isValueNext returns true if next type should be a JSON value: Null,
+// Number, String or Bool.
+func (d *Decoder) isValueNext() bool {
+	if len(d.openStack) == 0 {
+		return d.lastToken.kind == 0
+	}
+
+	start := d.openStack[len(d.openStack)-1]
+	switch start {
+	case ObjectOpen:
+		return d.lastToken.kind&Name != 0
+	case ArrayOpen:
+		return d.lastToken.kind&(ArrayOpen|comma) != 0
+	}
+	panic(fmt.Sprintf(
+		"unreachable logic in Decoder.isValueNext, lastToken.kind: %v, openStack: %v",
+		d.lastToken.kind, start))
+}
+
+// consumeToken constructs a Token for given Kind with raw value derived from
+// current d.in and given size, and consumes the given size-length of it.
+func (d *Decoder) consumeToken(kind Kind, size int) Token {
+	tok := Token{
+		kind: kind,
+		raw:  d.in[:size],
+		pos:  len(d.orig) - len(d.in),
+	}
+	d.consume(size)
+	return tok
+}
+
+// consumeBoolToken constructs a Token for a Bool kind with raw value derived from
+// current d.in and given size.
+func (d *Decoder) consumeBoolToken(b bool, size int) Token {
+	tok := Token{
+		kind: Bool,
+		raw:  d.in[:size],
+		pos:  len(d.orig) - len(d.in),
+		boo:  b,
+	}
+	d.consume(size)
+	return tok
+}
+
+// consumeStringToken constructs a Token for a String kind with raw value derived
+// from current d.in and given size.
+func (d *Decoder) consumeStringToken(s string, size int) Token {
+	tok := Token{
+		kind: String,
+		raw:  d.in[:size],
+		pos:  len(d.orig) - len(d.in),
+		str:  s,
+	}
+	d.consume(size)
+	return tok
+}
+
+// Clone returns a copy of the Decoder for use in reading ahead the next JSON
+// object, array or other values without affecting current Decoder.
+func (d *Decoder) Clone() *Decoder {
+	ret := *d
+	ret.openStack = append([]Kind(nil), ret.openStack...)
+	return &ret
+}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go b/hack/tools/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go
new file mode 100644
index 0000000000..2999d71332
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go
@@ -0,0 +1,254 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"bytes"
+	"strconv"
+)
+
+// parseNumber reads the given []byte for a valid JSON number. If it is valid,
+// it returns the number of bytes.  Parsing logic follows the definition in
+// https://tools.ietf.org/html/rfc7159#section-6, and is based off
+// encoding/json.isValidNumber function.
+func parseNumber(input []byte) (int, bool) {
+	var n int
+
+	s := input
+	if len(s) == 0 {
+		return 0, false
+	}
+
+	// Optional -
+	if s[0] == '-' {
+		s = s[1:]
+		n++
+		if len(s) == 0 {
+			return 0, false
+		}
+	}
+
+	// Digits
+	switch {
+	case s[0] == '0':
+		s = s[1:]
+		n++
+
+	case '1' <= s[0] && s[0] <= '9':
+		s = s[1:]
+		n++
+		for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+			s = s[1:]
+			n++
+		}
+
+	default:
+		return 0, false
+	}
+
+	// . followed by 1 or more digits.
+	if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
+		s = s[2:]
+		n += 2
+		for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+			s = s[1:]
+			n++
+		}
+	}
+
+	// e or E followed by an optional - or + and
+	// 1 or more digits.
+	if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+		s = s[1:]
+		n++
+		if s[0] == '+' || s[0] == '-' {
+			s = s[1:]
+			n++
+			if len(s) == 0 {
+				return 0, false
+			}
+		}
+		for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+			s = s[1:]
+			n++
+		}
+	}
+
+	// Check that next byte is a delimiter or it is at the end.
+	if n < len(input) && isNotDelim(input[n]) {
+		return 0, false
+	}
+
+	return n, true
+}
+
+// numberParts is the result of parsing out a valid JSON number. It contains
+// the parts of a number. The parts are used for integer conversion.
+type numberParts struct {
+	neg  bool
+	intp []byte
+	frac []byte
+	exp  []byte
+}
+
+// parseNumber constructs numberParts from given []byte. The logic here is
+// similar to consumeNumber above with the difference of having to construct
+// numberParts. The slice fields in numberParts are subslices of the input.
+func parseNumberParts(input []byte) (numberParts, bool) {
+	var neg bool
+	var intp []byte
+	var frac []byte
+	var exp []byte
+
+	s := input
+	if len(s) == 0 {
+		return numberParts{}, false
+	}
+
+	// Optional -
+	if s[0] == '-' {
+		neg = true
+		s = s[1:]
+		if len(s) == 0 {
+			return numberParts{}, false
+		}
+	}
+
+	// Digits
+	switch {
+	case s[0] == '0':
+		// Skip first 0 and no need to store.
+		s = s[1:]
+
+	case '1' <= s[0] && s[0] <= '9':
+		intp = s
+		n := 1
+		s = s[1:]
+		for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+			s = s[1:]
+			n++
+		}
+		intp = intp[:n]
+
+	default:
+		return numberParts{}, false
+	}
+
+	// . followed by 1 or more digits.
+	if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
+		frac = s[1:]
+		n := 1
+		s = s[2:]
+		for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+			s = s[1:]
+			n++
+		}
+		frac = frac[:n]
+	}
+
+	// e or E followed by an optional - or + and
+	// 1 or more digits.
+	if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+		s = s[1:]
+		exp = s
+		n := 0
+		if s[0] == '+' || s[0] == '-' {
+			s = s[1:]
+			n++
+			if len(s) == 0 {
+				return numberParts{}, false
+			}
+		}
+		for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+			s = s[1:]
+			n++
+		}
+		exp = exp[:n]
+	}
+
+	return numberParts{
+		neg:  neg,
+		intp: intp,
+		frac: bytes.TrimRight(frac, "0"), // Remove unnecessary 0s to the right.
+		exp:  exp,
+	}, true
+}
+
+// normalizeToIntString returns an integer string in normal form without the
+// E-notation for given numberParts. It will return false if it is not an
+// integer or if the exponent exceeds than max/min int value.
+func normalizeToIntString(n numberParts) (string, bool) {
+	intpSize := len(n.intp)
+	fracSize := len(n.frac)
+
+	if intpSize == 0 && fracSize == 0 {
+		return "0", true
+	}
+
+	var exp int
+	if len(n.exp) > 0 {
+		i, err := strconv.ParseInt(string(n.exp), 10, 32)
+		if err != nil {
+			return "", false
+		}
+		exp = int(i)
+	}
+
+	var num []byte
+	if exp >= 0 {
+		// For positive E, shift fraction digits into integer part and also pad
+		// with zeroes as needed.
+
+		// If there are more digits in fraction than the E value, then the
+		// number is not an integer.
+		if fracSize > exp {
+			return "", false
+		}
+
+		// Make sure resulting digits are within max value limit to avoid
+		// unnecessarily constructing a large byte slice that may simply fail
+		// later on.
+		const maxDigits = 20 // Max uint64 value has 20 decimal digits.
+		if intpSize+exp > maxDigits {
+			return "", false
+		}
+
+		// Set cap to make a copy of integer part when appended.
+		num = n.intp[:len(n.intp):len(n.intp)]
+		num = append(num, n.frac...)
+		for i := 0; i < exp-fracSize; i++ {
+			num = append(num, '0')
+		}
+	} else {
+		// For negative E, shift digits in integer part out.
+
+		// If there are fractions, then the number is not an integer.
+		if fracSize > 0 {
+			return "", false
+		}
+
+		// index is where the decimal point will be after adjusting for negative
+		// exponent.
+		index := intpSize + exp
+		if index < 0 {
+			return "", false
+		}
+
+		num = n.intp
+		// If any of the digits being shifted to the right of the decimal point
+		// is non-zero, then the number is not an integer.
+		for i := index; i < intpSize; i++ {
+			if num[i] != '0' {
+				return "", false
+			}
+		}
+		num = num[:index]
+	}
+
+	if n.neg {
+		return "-" + string(num), true
+	}
+	return string(num), true
+}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go b/hack/tools/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go
new file mode 100644
index 0000000000..f7fea7d8dd
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go
@@ -0,0 +1,91 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"strconv"
+	"unicode"
+	"unicode/utf16"
+	"unicode/utf8"
+
+	"google.golang.org/protobuf/internal/strs"
+)
+
+func (d *Decoder) parseString(in []byte) (string, int, error) {
+	in0 := in
+	if len(in) == 0 {
+		return "", 0, ErrUnexpectedEOF
+	}
+	if in[0] != '"' {
+		return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q at start of string", in[0])
+	}
+	in = in[1:]
+	i := indexNeedEscapeInBytes(in)
+	in, out := in[i:], in[:i:i] // set cap to prevent mutations
+	for len(in) > 0 {
+		switch r, n := utf8.DecodeRune(in); {
+		case r == utf8.RuneError && n == 1:
+			return "", 0, d.newSyntaxError(d.currPos(), "invalid UTF-8 in string")
+		case r < ' ':
+			return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q in string", r)
+		case r == '"':
+			in = in[1:]
+			n := len(in0) - len(in)
+			return string(out), n, nil
+		case r == '\\':
+			if len(in) < 2 {
+				return "", 0, ErrUnexpectedEOF
+			}
+			switch r := in[1]; r {
+			case '"', '\\', '/':
+				in, out = in[2:], append(out, r)
+			case 'b':
+				in, out = in[2:], append(out, '\b')
+			case 'f':
+				in, out = in[2:], append(out, '\f')
+			case 'n':
+				in, out = in[2:], append(out, '\n')
+			case 'r':
+				in, out = in[2:], append(out, '\r')
+			case 't':
+				in, out = in[2:], append(out, '\t')
+			case 'u':
+				if len(in) < 6 {
+					return "", 0, ErrUnexpectedEOF
+				}
+				v, err := strconv.ParseUint(string(in[2:6]), 16, 16)
+				if err != nil {
+					return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6])
+				}
+				in = in[6:]
+
+				r := rune(v)
+				if utf16.IsSurrogate(r) {
+					if len(in) < 6 {
+						return "", 0, ErrUnexpectedEOF
+					}
+					v, err := strconv.ParseUint(string(in[2:6]), 16, 16)
+					r = utf16.DecodeRune(r, rune(v))
+					if in[0] != '\\' || in[1] != 'u' ||
+						r == unicode.ReplacementChar || err != nil {
+						return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6])
+					}
+					in = in[6:]
+				}
+				out = append(out, string(r)...)
+			default:
+				return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:2])
+			}
+		default:
+			i := indexNeedEscapeInBytes(in[n:])
+			in, out = in[n+i:], append(out, in[:n+i]...)
+		}
+	}
+	return "", 0, ErrUnexpectedEOF
+}
+
+// indexNeedEscapeInBytes returns the index of the character that needs
+// escaping. If no characters need escaping, this returns the input length.
+func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) }
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go b/hack/tools/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go
new file mode 100644
index 0000000000..50578d6593
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go
@@ -0,0 +1,192 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"bytes"
+	"fmt"
+	"strconv"
+)
+
+// Kind represents a token kind expressible in the JSON format.
+type Kind uint16
+
+const (
+	Invalid Kind = (1 << iota) / 2
+	EOF
+	Null
+	Bool
+	Number
+	String
+	Name
+	ObjectOpen
+	ObjectClose
+	ArrayOpen
+	ArrayClose
+
+	// comma is only for parsing in between tokens and
+	// does not need to be exported.
+	comma
+)
+
+func (k Kind) String() string {
+	switch k {
+	case EOF:
+		return "eof"
+	case Null:
+		return "null"
+	case Bool:
+		return "bool"
+	case Number:
+		return "number"
+	case String:
+		return "string"
+	case ObjectOpen:
+		return "{"
+	case ObjectClose:
+		return "}"
+	case Name:
+		return "name"
+	case ArrayOpen:
+		return "["
+	case ArrayClose:
+		return "]"
+	case comma:
+		return ","
+	}
+	return ""
+}
+
+// Token provides a parsed token kind and value.
+//
+// Values are provided by the difference accessor methods. The accessor methods
+// Name, Bool, and ParsedString will panic if called on the wrong kind. There
+// are different accessor methods for the Number kind for converting to the
+// appropriate Go numeric type and those methods have the ok return value.
+type Token struct {
+	// Token kind.
+	kind Kind
+	// pos provides the position of the token in the original input.
+	pos int
+	// raw bytes of the serialized token.
+	// This is a subslice into the original input.
+	raw []byte
+	// boo is parsed boolean value.
+	boo bool
+	// str is parsed string value.
+	str string
+}
+
+// Kind returns the token kind.
+func (t Token) Kind() Kind {
+	return t.kind
+}
+
+// RawString returns the read value in string.
+func (t Token) RawString() string {
+	return string(t.raw)
+}
+
+// Pos returns the token position from the input.
+func (t Token) Pos() int {
+	return t.pos
+}
+
+// Name returns the object name if token is Name, else it panics.
+func (t Token) Name() string {
+	if t.kind == Name {
+		return t.str
+	}
+	panic(fmt.Sprintf("Token is not a Name: %v", t.RawString()))
+}
+
+// Bool returns the bool value if token kind is Bool, else it panics.
+func (t Token) Bool() bool {
+	if t.kind == Bool {
+		return t.boo
+	}
+	panic(fmt.Sprintf("Token is not a Bool: %v", t.RawString()))
+}
+
+// ParsedString returns the string value for a JSON string token or the read
+// value in string if token is not a string.
+func (t Token) ParsedString() string {
+	if t.kind == String {
+		return t.str
+	}
+	panic(fmt.Sprintf("Token is not a String: %v", t.RawString()))
+}
+
+// Float returns the floating-point number if token kind is Number.
+//
+// The floating-point precision is specified by the bitSize parameter: 32 for
+// float32 or 64 for float64. If bitSize=32, the result still has type float64,
+// but it will be convertible to float32 without changing its value. It will
+// return false if the number exceeds the floating point limits for given
+// bitSize.
+func (t Token) Float(bitSize int) (float64, bool) {
+	if t.kind != Number {
+		return 0, false
+	}
+	f, err := strconv.ParseFloat(t.RawString(), bitSize)
+	if err != nil {
+		return 0, false
+	}
+	return f, true
+}
+
+// Int returns the signed integer number if token is Number.
+//
+// The given bitSize specifies the integer type that the result must fit into.
+// It returns false if the number is not an integer value or if the result
+// exceeds the limits for given bitSize.
+func (t Token) Int(bitSize int) (int64, bool) {
+	s, ok := t.getIntStr()
+	if !ok {
+		return 0, false
+	}
+	n, err := strconv.ParseInt(s, 10, bitSize)
+	if err != nil {
+		return 0, false
+	}
+	return n, true
+}
+
+// Uint returns the signed integer number if token is Number.
+//
+// The given bitSize specifies the unsigned integer type that the result must
+// fit into. It returns false if the number is not an unsigned integer value
+// or if the result exceeds the limits for given bitSize.
+func (t Token) Uint(bitSize int) (uint64, bool) {
+	s, ok := t.getIntStr()
+	if !ok {
+		return 0, false
+	}
+	n, err := strconv.ParseUint(s, 10, bitSize)
+	if err != nil {
+		return 0, false
+	}
+	return n, true
+}
+
+func (t Token) getIntStr() (string, bool) {
+	if t.kind != Number {
+		return "", false
+	}
+	parts, ok := parseNumberParts(t.raw)
+	if !ok {
+		return "", false
+	}
+	return normalizeToIntString(parts)
+}
+
+// TokenEquals returns true if given Tokens are equal, else false.
+func TokenEquals(x, y Token) bool {
+	return x.kind == y.kind &&
+		x.pos == y.pos &&
+		bytes.Equal(x.raw, y.raw) &&
+		x.boo == y.boo &&
+		x.str == y.str
+}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/hack/tools/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go
new file mode 100644
index 0000000000..934f2dcb39
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go
@@ -0,0 +1,278 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"math"
+	"math/bits"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+
+	"google.golang.org/protobuf/internal/detrand"
+	"google.golang.org/protobuf/internal/errors"
+)
+
+// kind represents an encoding type.
+type kind uint8
+
+const (
+	_ kind = (1 << iota) / 2
+	name
+	scalar
+	objectOpen
+	objectClose
+	arrayOpen
+	arrayClose
+)
+
+// Encoder provides methods to write out JSON constructs and values. The user is
+// responsible for producing valid sequences of JSON constructs and values.
+type Encoder struct {
+	indent   string
+	lastKind kind
+	indents  []byte
+	out      []byte
+}
+
+// NewEncoder returns an Encoder.
+//
+// If indent is a non-empty string, it causes every entry for an Array or Object
+// to be preceded by the indent and trailed by a newline.
+func NewEncoder(buf []byte, indent string) (*Encoder, error) {
+	e := &Encoder{
+		out: buf,
+	}
+	if len(indent) > 0 {
+		if strings.Trim(indent, " \t") != "" {
+			return nil, errors.New("indent may only be composed of space or tab characters")
+		}
+		e.indent = indent
+	}
+	return e, nil
+}
+
+// Bytes returns the content of the written bytes.
+func (e *Encoder) Bytes() []byte {
+	return e.out
+}
+
+// WriteNull writes out the null value.
+func (e *Encoder) WriteNull() {
+	e.prepareNext(scalar)
+	e.out = append(e.out, "null"...)
+}
+
+// WriteBool writes out the given boolean value.
+func (e *Encoder) WriteBool(b bool) {
+	e.prepareNext(scalar)
+	if b {
+		e.out = append(e.out, "true"...)
+	} else {
+		e.out = append(e.out, "false"...)
+	}
+}
+
+// WriteString writes out the given string in JSON string value. Returns error
+// if input string contains invalid UTF-8.
+func (e *Encoder) WriteString(s string) error {
+	e.prepareNext(scalar)
+	var err error
+	if e.out, err = appendString(e.out, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Sentinel error used for indicating invalid UTF-8.
+var errInvalidUTF8 = errors.New("invalid UTF-8")
+
+func appendString(out []byte, in string) ([]byte, error) {
+	out = append(out, '"')
+	i := indexNeedEscapeInString(in)
+	in, out = in[i:], append(out, in[:i]...)
+	for len(in) > 0 {
+		switch r, n := utf8.DecodeRuneInString(in); {
+		case r == utf8.RuneError && n == 1:
+			return out, errInvalidUTF8
+		case r < ' ' || r == '"' || r == '\\':
+			out = append(out, '\\')
+			switch r {
+			case '"', '\\':
+				out = append(out, byte(r))
+			case '\b':
+				out = append(out, 'b')
+			case '\f':
+				out = append(out, 'f')
+			case '\n':
+				out = append(out, 'n')
+			case '\r':
+				out = append(out, 'r')
+			case '\t':
+				out = append(out, 't')
+			default:
+				out = append(out, 'u')
+				out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...)
+				out = strconv.AppendUint(out, uint64(r), 16)
+			}
+			in = in[n:]
+		default:
+			i := indexNeedEscapeInString(in[n:])
+			in, out = in[n+i:], append(out, in[:n+i]...)
+		}
+	}
+	out = append(out, '"')
+	return out, nil
+}
+
+// indexNeedEscapeInString returns the index of the character that needs
+// escaping. If no characters need escaping, this returns the input length.
+func indexNeedEscapeInString(s string) int {
+	for i, r := range s {
+		if r < ' ' || r == '\\' || r == '"' || r == utf8.RuneError {
+			return i
+		}
+	}
+	return len(s)
+}
+
+// WriteFloat writes out the given float and bitSize in JSON number value.
+func (e *Encoder) WriteFloat(n float64, bitSize int) {
+	e.prepareNext(scalar)
+	e.out = appendFloat(e.out, n, bitSize)
+}
+
+// appendFloat formats given float in bitSize, and appends to the given []byte.
+func appendFloat(out []byte, n float64, bitSize int) []byte {
+	switch {
+	case math.IsNaN(n):
+		return append(out, `"NaN"`...)
+	case math.IsInf(n, +1):
+		return append(out, `"Infinity"`...)
+	case math.IsInf(n, -1):
+		return append(out, `"-Infinity"`...)
+	}
+
+	// JSON number formatting logic based on encoding/json.
+	// See floatEncoder.encode for reference.
+	fmt := byte('f')
+	if abs := math.Abs(n); abs != 0 {
+		if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) ||
+			bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) {
+			fmt = 'e'
+		}
+	}
+	out = strconv.AppendFloat(out, n, fmt, -1, bitSize)
+	if fmt == 'e' {
+		n := len(out)
+		if n >= 4 && out[n-4] == 'e' && out[n-3] == '-' && out[n-2] == '0' {
+			out[n-2] = out[n-1]
+			out = out[:n-1]
+		}
+	}
+	return out
+}
+
+// WriteInt writes out the given signed integer in JSON number value.
+func (e *Encoder) WriteInt(n int64) {
+	e.prepareNext(scalar)
+	e.out = strconv.AppendInt(e.out, n, 10)
+}
+
+// WriteUint writes out the given unsigned integer in JSON number value.
+func (e *Encoder) WriteUint(n uint64) {
+	e.prepareNext(scalar)
+	e.out = strconv.AppendUint(e.out, n, 10)
+}
+
+// StartObject writes out the '{' symbol.
+func (e *Encoder) StartObject() {
+	e.prepareNext(objectOpen)
+	e.out = append(e.out, '{')
+}
+
+// EndObject writes out the '}' symbol.
+func (e *Encoder) EndObject() {
+	e.prepareNext(objectClose)
+	e.out = append(e.out, '}')
+}
+
+// WriteName writes out the given string in JSON string value and the name
+// separator ':'. Returns error if input string contains invalid UTF-8, which
+// should not be likely as protobuf field names should be valid.
+func (e *Encoder) WriteName(s string) error {
+	e.prepareNext(name)
+	var err error
+	// Append to output regardless of error.
+	e.out, err = appendString(e.out, s)
+	e.out = append(e.out, ':')
+	return err
+}
+
+// StartArray writes out the '[' symbol.
+func (e *Encoder) StartArray() {
+	e.prepareNext(arrayOpen)
+	e.out = append(e.out, '[')
+}
+
+// EndArray writes out the ']' symbol.
+func (e *Encoder) EndArray() {
+	e.prepareNext(arrayClose)
+	e.out = append(e.out, ']')
+}
+
+// prepareNext adds possible comma and indentation for the next value based
+// on last type and indent option. It also updates lastKind to next.
+func (e *Encoder) prepareNext(next kind) {
+	defer func() {
+		// Set lastKind to next.
+		e.lastKind = next
+	}()
+
+	if len(e.indent) == 0 {
+		// Need to add comma on the following condition.
+		if e.lastKind&(scalar|objectClose|arrayClose) != 0 &&
+			next&(name|scalar|objectOpen|arrayOpen) != 0 {
+			e.out = append(e.out, ',')
+			// For single-line output, add a random extra space after each
+			// comma to make output unstable.
+			if detrand.Bool() {
+				e.out = append(e.out, ' ')
+			}
+		}
+		return
+	}
+
+	switch {
+	case e.lastKind&(objectOpen|arrayOpen) != 0:
+		// If next type is NOT closing, add indent and newline.
+		if next&(objectClose|arrayClose) == 0 {
+			e.indents = append(e.indents, e.indent...)
+			e.out = append(e.out, '\n')
+			e.out = append(e.out, e.indents...)
+		}
+
+	case e.lastKind&(scalar|objectClose|arrayClose) != 0:
+		switch {
+		// If next type is either a value or name, add comma and newline.
+		case next&(name|scalar|objectOpen|arrayOpen) != 0:
+			e.out = append(e.out, ',', '\n')
+
+		// If next type is a closing object or array, adjust indentation.
+		case next&(objectClose|arrayClose) != 0:
+			e.indents = e.indents[:len(e.indents)-len(e.indent)]
+			e.out = append(e.out, '\n')
+		}
+		e.out = append(e.out, e.indents...)
+
+	case e.lastKind&name != 0:
+		e.out = append(e.out, ' ')
+		// For multi-line output, add a random extra space after key: to make
+		// output unstable.
+		if detrand.Bool() {
+			e.out = append(e.out, ' ')
+		}
+	}
+}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index df53ff40b2..fa790e0ff1 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -258,6 +258,7 @@ type (
 		StringName       stringName
 		IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
 		IsWeak           bool // promoted from google.protobuf.FieldOptions
+		IsLazy           bool // promoted from google.protobuf.FieldOptions
 		Default          defaultValue
 		ContainingOneof  protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
 		Enum             protoreflect.EnumDescriptor
@@ -351,6 +352,7 @@ func (fd *Field) IsPacked() bool {
 }
 func (fd *Field) IsExtension() bool { return false }
 func (fd *Field) IsWeak() bool      { return fd.L1.IsWeak }
+func (fd *Field) IsLazy() bool      { return fd.L1.IsLazy }
 func (fd *Field) IsList() bool      { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() }
 func (fd *Field) IsMap() bool       { return fd.Message() != nil && fd.Message().IsMapEntry() }
 func (fd *Field) MapKey() protoreflect.FieldDescriptor {
@@ -425,6 +427,7 @@ type (
 		Extendee        protoreflect.MessageDescriptor
 		Cardinality     protoreflect.Cardinality
 		Kind            protoreflect.Kind
+		IsLazy          bool
 		EditionFeatures EditionFeatures
 	}
 	ExtensionL2 struct {
@@ -465,6 +468,7 @@ func (xd *Extension) IsPacked() bool {
 }
 func (xd *Extension) IsExtension() bool                      { return true }
 func (xd *Extension) IsWeak() bool                           { return false }
+func (xd *Extension) IsLazy() bool                           { return xd.L1.IsLazy }
 func (xd *Extension) IsList() bool                           { return xd.Cardinality() == protoreflect.Repeated }
 func (xd *Extension) IsMap() bool                            { return false }
 func (xd *Extension) MapKey() protoreflect.FieldDescriptor   { return nil }
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
index 8a57d60b08..d2f549497e 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
@@ -495,6 +495,8 @@ func (xd *Extension) unmarshalOptions(b []byte) {
 			switch num {
 			case genid.FieldOptions_Packed_field_number:
 				xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
+			case genid.FieldOptions_Lazy_field_number:
+				xd.L1.IsLazy = protowire.DecodeBool(v)
 			}
 		case protowire.BytesType:
 			v, m := protowire.ConsumeBytes(b)
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
index e56c91a8db..67a51b327c 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -504,6 +504,8 @@ func (fd *Field) unmarshalOptions(b []byte) {
 				fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
 			case genid.FieldOptions_Weak_field_number:
 				fd.L1.IsWeak = protowire.DecodeBool(v)
+			case genid.FieldOptions_Lazy_field_number:
+				fd.L1.IsLazy = protowire.DecodeBool(v)
 			case FieldOptions_EnforceUTF8:
 				fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v)
 			}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
index 11f5f356b6..fd4d0c83d2 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
@@ -68,7 +68,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures {
 			v, m := protowire.ConsumeBytes(b)
 			b = b[m:]
 			switch num {
-			case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number:
+			case genid.FeatureSet_Go_ext_number:
 				parent = unmarshalGoFeature(v, parent)
 			}
 		}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/genid/doc.go b/hack/tools/vendor/google.golang.org/protobuf/internal/genid/doc.go
index 45ccd01211..d9b9d916a2 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/genid/doc.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/genid/doc.go
@@ -6,6 +6,6 @@
 // and the well-known types.
 package genid
 
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
 
 const GoogleProtobuf_package protoreflect.FullName = "google.protobuf"
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/hack/tools/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
index 9a652a2b42..7f67cbb6e9 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
@@ -12,20 +12,25 @@ import (
 
 const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto"
 
-// Names for google.protobuf.GoFeatures.
+// Names for pb.GoFeatures.
 const (
 	GoFeatures_message_name     protoreflect.Name     = "GoFeatures"
-	GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures"
+	GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures"
 )
 
-// Field names for google.protobuf.GoFeatures.
+// Field names for pb.GoFeatures.
 const (
 	GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum"
 
-	GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum"
+	GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum"
 )
 
-// Field numbers for google.protobuf.GoFeatures.
+// Field numbers for pb.GoFeatures.
 const (
 	GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1
 )
+
+// Extension numbers
+const (
+	FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002
+)
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/hack/tools/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
index 8f9ea02ff2..bef5a25fbb 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
@@ -4,7 +4,7 @@
 
 package genid
 
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
 
 // Generic field names and numbers for synthetic map entry messages.
 const (
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/hack/tools/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
index 429384b85b..9404270de0 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
@@ -4,7 +4,7 @@
 
 package genid
 
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
 
 // Generic field name and number for messages in wrappers.proto.
 const (
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
index 4bb0a7a20c..0d5b546e0e 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
@@ -67,7 +67,6 @@ type lazyExtensionValue struct {
 	xi         *extensionFieldInfo
 	value      protoreflect.Value
 	b          []byte
-	fn         func() protoreflect.Value
 }
 
 type ExtensionField struct {
@@ -158,10 +157,9 @@ func (f *ExtensionField) lazyInit() {
 		}
 		f.lazy.value = val
 	} else {
-		f.lazy.value = f.lazy.fn()
+		panic("No support for lazy fns for ExtensionField")
 	}
 	f.lazy.xi = nil
-	f.lazy.fn = nil
 	f.lazy.b = nil
 	atomic.StoreUint32(&f.lazy.atomicOnce, 1)
 }
@@ -174,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value)
 	f.lazy = nil
 }
 
-// SetLazy sets the type and a value that is to be lazily evaluated upon first use.
-// This must not be called concurrently.
-func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) {
-	f.typ = t
-	f.lazy = &lazyExtensionValue{fn: fn}
-}
-
 // Value returns the value of the extension field.
 // This may be called concurrently.
 func (f *ExtensionField) Value() protoreflect.Value {
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
index 78ee47e44b..7c1f66c8c1 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
@@ -65,6 +65,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si
 			if err != nil {
 				return out, err
 			}
+			if cf.funcs.isInit == nil {
+				out.initialized = true
+			}
 			vi.Set(vw)
 			return out, nil
 		}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
index 6b2fdbb739..78be9df342 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
@@ -189,6 +189,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
 	if mi.methods.Merge == nil {
 		mi.methods.Merge = mi.merge
 	}
+	if mi.methods.Equal == nil {
+		mi.methods.Equal = equal
+	}
 }
 
 // getUnknownBytes returns a *[]byte for the unknown fields.
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
deleted file mode 100644
index 145c577bd6..0000000000
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package impl
-
-import (
-	"reflect"
-
-	"google.golang.org/protobuf/encoding/protowire"
-)
-
-func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
-	v := p.v.Elem().Int()
-	return f.tagsize + protowire.SizeVarint(uint64(v))
-}
-
-func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
-	v := p.v.Elem().Int()
-	b = protowire.AppendVarint(b, f.wiretag)
-	b = protowire.AppendVarint(b, uint64(v))
-	return b, nil
-}
-
-func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
-	if wtyp != protowire.VarintType {
-		return out, errUnknown
-	}
-	v, n := protowire.ConsumeVarint(b)
-	if n < 0 {
-		return out, errDecode
-	}
-	p.v.Elem().SetInt(int64(v))
-	out.n = n
-	return out, nil
-}
-
-func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
-	dst.v.Elem().Set(src.v.Elem())
-}
-
-var coderEnum = pointerCoderFuncs{
-	size:      sizeEnum,
-	marshal:   appendEnum,
-	unmarshal: consumeEnum,
-	merge:     mergeEnum,
-}
-
-func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
-	if p.v.Elem().Int() == 0 {
-		return 0
-	}
-	return sizeEnum(p, f, opts)
-}
-
-func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
-	if p.v.Elem().Int() == 0 {
-		return b, nil
-	}
-	return appendEnum(b, p, f, opts)
-}
-
-func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
-	if src.v.Elem().Int() != 0 {
-		dst.v.Elem().Set(src.v.Elem())
-	}
-}
-
-var coderEnumNoZero = pointerCoderFuncs{
-	size:      sizeEnumNoZero,
-	marshal:   appendEnumNoZero,
-	unmarshal: consumeEnum,
-	merge:     mergeEnumNoZero,
-}
-
-func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
-	return sizeEnum(pointer{p.v.Elem()}, f, opts)
-}
-
-func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
-	return appendEnum(b, pointer{p.v.Elem()}, f, opts)
-}
-
-func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
-	if wtyp != protowire.VarintType {
-		return out, errUnknown
-	}
-	if p.v.Elem().IsNil() {
-		p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem()))
-	}
-	return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts)
-}
-
-func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
-	if !src.v.Elem().IsNil() {
-		v := reflect.New(dst.v.Type().Elem().Elem())
-		v.Elem().Set(src.v.Elem().Elem())
-		dst.v.Elem().Set(v)
-	}
-}
-
-var coderEnumPtr = pointerCoderFuncs{
-	size:      sizeEnumPtr,
-	marshal:   appendEnumPtr,
-	unmarshal: consumeEnumPtr,
-	merge:     mergeEnumPtr,
-}
-
-func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
-	s := p.v.Elem()
-	for i, llen := 0, s.Len(); i < llen; i++ {
-		size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize
-	}
-	return size
-}
-
-func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
-	s := p.v.Elem()
-	for i, llen := 0, s.Len(); i < llen; i++ {
-		b = protowire.AppendVarint(b, f.wiretag)
-		b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
-	}
-	return b, nil
-}
-
-func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
-	s := p.v.Elem()
-	if wtyp == protowire.BytesType {
-		b, n := protowire.ConsumeBytes(b)
-		if n < 0 {
-			return out, errDecode
-		}
-		for len(b) > 0 {
-			v, n := protowire.ConsumeVarint(b)
-			if n < 0 {
-				return out, errDecode
-			}
-			rv := reflect.New(s.Type().Elem()).Elem()
-			rv.SetInt(int64(v))
-			s.Set(reflect.Append(s, rv))
-			b = b[n:]
-		}
-		out.n = n
-		return out, nil
-	}
-	if wtyp != protowire.VarintType {
-		return out, errUnknown
-	}
-	v, n := protowire.ConsumeVarint(b)
-	if n < 0 {
-		return out, errDecode
-	}
-	rv := reflect.New(s.Type().Elem()).Elem()
-	rv.SetInt(int64(v))
-	s.Set(reflect.Append(s, rv))
-	out.n = n
-	return out, nil
-}
-
-func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
-	dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem()))
-}
-
-var coderEnumSlice = pointerCoderFuncs{
-	size:      sizeEnumSlice,
-	marshal:   appendEnumSlice,
-	unmarshal: consumeEnumSlice,
-	merge:     mergeEnumSlice,
-}
-
-func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
-	s := p.v.Elem()
-	llen := s.Len()
-	if llen == 0 {
-		return 0
-	}
-	n := 0
-	for i := 0; i < llen; i++ {
-		n += protowire.SizeVarint(uint64(s.Index(i).Int()))
-	}
-	return f.tagsize + protowire.SizeBytes(n)
-}
-
-func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
-	s := p.v.Elem()
-	llen := s.Len()
-	if llen == 0 {
-		return b, nil
-	}
-	b = protowire.AppendVarint(b, f.wiretag)
-	n := 0
-	for i := 0; i < llen; i++ {
-		n += protowire.SizeVarint(uint64(s.Index(i).Int()))
-	}
-	b = protowire.AppendVarint(b, uint64(n))
-	for i := 0; i < llen; i++ {
-		b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
-	}
-	return b, nil
-}
-
-var coderEnumPackedSlice = pointerCoderFuncs{
-	size:      sizeEnumPackedSlice,
-	marshal:   appendEnumPackedSlice,
-	unmarshal: consumeEnumSlice,
-	merge:     mergeEnumSlice,
-}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
index 757642e23c..077712c2c5 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
@@ -2,9 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !purego && !appengine
-// +build !purego,!appengine
-
 package impl
 
 // When using unsafe pointers, we can just treat enum values as int32s.
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/convert.go b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/convert.go
index e06ece55a2..f72ddd882f 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/convert.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/convert.go
@@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value {
 	return protoreflect.ValueOfString(v.Convert(stringType).String())
 }
 func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value {
-	// pref.Value.String never panics, so we go through an interface
+	// protoreflect.Value.String never panics, so we go through an interface
 	// conversion here to check the type.
 	s := v.Interface().(string)
 	if c.goType.Kind() == reflect.Slice && s == "" {
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/encode.go b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/encode.go
index febd212247..6254f5de41 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/encode.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/encode.go
@@ -10,7 +10,7 @@ import (
 	"sync/atomic"
 
 	"google.golang.org/protobuf/internal/flags"
-	proto "google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/proto"
 	piface "google.golang.org/protobuf/runtime/protoiface"
 )
 
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/equal.go b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/equal.go
new file mode 100644
index 0000000000..9f6c32a7d8
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/equal.go
@@ -0,0 +1,224 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+	"bytes"
+
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/runtime/protoiface"
+)
+
+func equal(in protoiface.EqualInput) protoiface.EqualOutput {
+	return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)}
+}
+
+// equalMessage is a fast-path variant of protoreflect.equalMessage.
+// It takes advantage of the internal messageState type to avoid
+// unnecessary allocations, type assertions.
+func equalMessage(mx, my protoreflect.Message) bool {
+	if mx == nil || my == nil {
+		return mx == my
+	}
+	if mx.Descriptor() != my.Descriptor() {
+		return false
+	}
+
+	msx, ok := mx.(*messageState)
+	if !ok {
+		return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+	}
+	msy, ok := my.(*messageState)
+	if !ok {
+		return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+	}
+
+	mi := msx.messageInfo()
+	miy := msy.messageInfo()
+	if mi != miy {
+		return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+	}
+	mi.init()
+	// Compares regular fields
+	// Modified Message.Range code that compares two messages of the same type
+	// while going over the fields.
+	for _, ri := range mi.rangeInfos {
+		var fd protoreflect.FieldDescriptor
+		var vx, vy protoreflect.Value
+
+		switch ri := ri.(type) {
+		case *fieldInfo:
+			hx := ri.has(msx.pointer())
+			hy := ri.has(msy.pointer())
+			if hx != hy {
+				return false
+			}
+			if !hx {
+				continue
+			}
+			fd = ri.fieldDesc
+			vx = ri.get(msx.pointer())
+			vy = ri.get(msy.pointer())
+		case *oneofInfo:
+			fnx := ri.which(msx.pointer())
+			fny := ri.which(msy.pointer())
+			if fnx != fny {
+				return false
+			}
+			if fnx <= 0 {
+				continue
+			}
+			fi := mi.fields[fnx]
+			fd = fi.fieldDesc
+			vx = fi.get(msx.pointer())
+			vy = fi.get(msy.pointer())
+		}
+
+		if !equalValue(fd, vx, vy) {
+			return false
+		}
+	}
+
+	// Compare extensions.
+	// This is more complicated because mx or my could have empty/nil extension maps,
+	// however some populated extension map values are equal to nil extension maps.
+	emx := mi.extensionMap(msx.pointer())
+	emy := mi.extensionMap(msy.pointer())
+	if emx != nil {
+		for k, x := range *emx {
+			xd := x.Type().TypeDescriptor()
+			xv := x.Value()
+			var y ExtensionField
+			ok := false
+			if emy != nil {
+				y, ok = (*emy)[k]
+			}
+			// We need to treat empty lists as equal to nil values
+			if emy == nil || !ok {
+				if xd.IsList() && xv.List().Len() == 0 {
+					continue
+				}
+				return false
+			}
+
+			if !equalValue(xd, xv, y.Value()) {
+				return false
+			}
+		}
+	}
+	if emy != nil {
+		// emy may have extensions emx does not have, need to check them as well
+		for k, y := range *emy {
+			if emx != nil {
+				// emx has the field, so we already checked it
+				if _, ok := (*emx)[k]; ok {
+					continue
+				}
+			}
+			// Empty lists are equal to nil
+			if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 {
+				continue
+			}
+
+			// Cant be equal if the extension is populated
+			return false
+		}
+	}
+
+	return equalUnknown(mx.GetUnknown(), my.GetUnknown())
+}
+
+func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool {
+	// slow path
+	if fd.Kind() != protoreflect.MessageKind {
+		return vx.Equal(vy)
+	}
+
+	// fast path special cases
+	if fd.IsMap() {
+		if fd.MapValue().Kind() == protoreflect.MessageKind {
+			return equalMessageMap(vx.Map(), vy.Map())
+		}
+		return vx.Equal(vy)
+	}
+
+	if fd.IsList() {
+		return equalMessageList(vx.List(), vy.List())
+	}
+
+	return equalMessage(vx.Message(), vy.Message())
+}
+
+// Mostly copied from protoreflect.equalMap.
+// This variant only works for messages as map types.
+// All other map types should be handled via Value.Equal.
+func equalMessageMap(mx, my protoreflect.Map) bool {
+	if mx.Len() != my.Len() {
+		return false
+	}
+	equal := true
+	mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool {
+		if !my.Has(k) {
+			equal = false
+			return false
+		}
+		vy := my.Get(k)
+		equal = equalMessage(vx.Message(), vy.Message())
+		return equal
+	})
+	return equal
+}
+
+// Mostly copied from protoreflect.equalList.
+// The only change is the usage of equalImpl instead of protoreflect.equalValue.
+func equalMessageList(lx, ly protoreflect.List) bool {
+	if lx.Len() != ly.Len() {
+		return false
+	}
+	for i := 0; i < lx.Len(); i++ {
+		// We only operate on messages here since equalImpl will not call us in any other case.
+		if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) {
+			return false
+		}
+	}
+	return true
+}
+
+// equalUnknown compares unknown fields by direct comparison on the raw bytes
+// of each individual field number.
+// Copied from protoreflect.equalUnknown.
+func equalUnknown(x, y protoreflect.RawFields) bool {
+	if len(x) != len(y) {
+		return false
+	}
+	if bytes.Equal([]byte(x), []byte(y)) {
+		return true
+	}
+
+	mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+	my := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+	for len(x) > 0 {
+		fnum, _, n := protowire.ConsumeField(x)
+		mx[fnum] = append(mx[fnum], x[:n]...)
+		x = x[n:]
+	}
+	for len(y) > 0 {
+		fnum, _, n := protowire.ConsumeField(y)
+		my[fnum] = append(my[fnum], y[:n]...)
+		y = y[n:]
+	}
+	if len(mx) != len(my) {
+		return false
+	}
+
+	for k, v1 := range mx {
+		if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
index 6e8677ee63..b6849d6692 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
@@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool
 func (x placeholderExtension) HasOptionalKeyword() bool                           { return false }
 func (x placeholderExtension) IsExtension() bool                                  { return true }
 func (x placeholderExtension) IsWeak() bool                                       { return false }
+func (x placeholderExtension) IsLazy() bool                                       { return false }
 func (x placeholderExtension) IsPacked() bool                                     { return false }
 func (x placeholderExtension) IsList() bool                                       { return false }
 func (x placeholderExtension) IsMap() bool                                        { return false }
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/message.go b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/message.go
index 019399d454..741b5ed29c 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/message.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -30,8 +30,8 @@ type MessageInfo struct {
 	// Desc is the underlying message descriptor type and must be populated.
 	Desc protoreflect.MessageDescriptor
 
-	// Exporter must be provided in a purego environment in order to provide
-	// access to unexported fields.
+	// Deprecated: Exporter will be removed the next time we bump
+	// protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640
 	Exporter exporter
 
 	// OneofWrappers is list of pointers to oneof wrapper struct types.
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
deleted file mode 100644
index da685e8a29..0000000000
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
+++ /dev/null
@@ -1,215 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package impl
-
-import (
-	"fmt"
-	"reflect"
-	"sync"
-)
-
-const UnsafeEnabled = false
-
-// Pointer is an opaque pointer type.
-type Pointer any
-
-// offset represents the offset to a struct field, accessible from a pointer.
-// The offset is the field index into a struct.
-type offset struct {
-	index  int
-	export exporter
-}
-
-// offsetOf returns a field offset for the struct field.
-func offsetOf(f reflect.StructField, x exporter) offset {
-	if len(f.Index) != 1 {
-		panic("embedded structs are not supported")
-	}
-	if f.PkgPath == "" {
-		return offset{index: f.Index[0]} // field is already exported
-	}
-	if x == nil {
-		panic("exporter must be provided for unexported field")
-	}
-	return offset{index: f.Index[0], export: x}
-}
-
-// IsValid reports whether the offset is valid.
-func (f offset) IsValid() bool { return f.index >= 0 }
-
-// invalidOffset is an invalid field offset.
-var invalidOffset = offset{index: -1}
-
-// zeroOffset is a noop when calling pointer.Apply.
-var zeroOffset = offset{index: 0}
-
-// pointer is an abstract representation of a pointer to a struct or field.
-type pointer struct{ v reflect.Value }
-
-// pointerOf returns p as a pointer.
-func pointerOf(p Pointer) pointer {
-	return pointerOfIface(p)
-}
-
-// pointerOfValue returns v as a pointer.
-func pointerOfValue(v reflect.Value) pointer {
-	return pointer{v: v}
-}
-
-// pointerOfIface returns the pointer portion of an interface.
-func pointerOfIface(v any) pointer {
-	return pointer{v: reflect.ValueOf(v)}
-}
-
-// IsNil reports whether the pointer is nil.
-func (p pointer) IsNil() bool {
-	return p.v.IsNil()
-}
-
-// Apply adds an offset to the pointer to derive a new pointer
-// to a specified field. The current pointer must be pointing at a struct.
-func (p pointer) Apply(f offset) pointer {
-	if f.export != nil {
-		if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() {
-			return pointer{v: v}
-		}
-	}
-	return pointer{v: p.v.Elem().Field(f.index).Addr()}
-}
-
-// AsValueOf treats p as a pointer to an object of type t and returns the value.
-// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t))
-func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
-	if got := p.v.Type().Elem(); got != t {
-		panic(fmt.Sprintf("invalid type: got %v, want %v", got, t))
-	}
-	return p.v
-}
-
-// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
-// It is equivalent to p.AsValueOf(t).Interface()
-func (p pointer) AsIfaceOf(t reflect.Type) any {
-	return p.AsValueOf(t).Interface()
-}
-
-func (p pointer) Bool() *bool              { return p.v.Interface().(*bool) }
-func (p pointer) BoolPtr() **bool          { return p.v.Interface().(**bool) }
-func (p pointer) BoolSlice() *[]bool       { return p.v.Interface().(*[]bool) }
-func (p pointer) Int32() *int32            { return p.v.Interface().(*int32) }
-func (p pointer) Int32Ptr() **int32        { return p.v.Interface().(**int32) }
-func (p pointer) Int32Slice() *[]int32     { return p.v.Interface().(*[]int32) }
-func (p pointer) Int64() *int64            { return p.v.Interface().(*int64) }
-func (p pointer) Int64Ptr() **int64        { return p.v.Interface().(**int64) }
-func (p pointer) Int64Slice() *[]int64     { return p.v.Interface().(*[]int64) }
-func (p pointer) Uint32() *uint32          { return p.v.Interface().(*uint32) }
-func (p pointer) Uint32Ptr() **uint32      { return p.v.Interface().(**uint32) }
-func (p pointer) Uint32Slice() *[]uint32   { return p.v.Interface().(*[]uint32) }
-func (p pointer) Uint64() *uint64          { return p.v.Interface().(*uint64) }
-func (p pointer) Uint64Ptr() **uint64      { return p.v.Interface().(**uint64) }
-func (p pointer) Uint64Slice() *[]uint64   { return p.v.Interface().(*[]uint64) }
-func (p pointer) Float32() *float32        { return p.v.Interface().(*float32) }
-func (p pointer) Float32Ptr() **float32    { return p.v.Interface().(**float32) }
-func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) }
-func (p pointer) Float64() *float64        { return p.v.Interface().(*float64) }
-func (p pointer) Float64Ptr() **float64    { return p.v.Interface().(**float64) }
-func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) }
-func (p pointer) String() *string          { return p.v.Interface().(*string) }
-func (p pointer) StringPtr() **string      { return p.v.Interface().(**string) }
-func (p pointer) StringSlice() *[]string   { return p.v.Interface().(*[]string) }
-func (p pointer) Bytes() *[]byte           { return p.v.Interface().(*[]byte) }
-func (p pointer) BytesPtr() **[]byte       { return p.v.Interface().(**[]byte) }
-func (p pointer) BytesSlice() *[][]byte    { return p.v.Interface().(*[][]byte) }
-func (p pointer) WeakFields() *weakFields  { return (*weakFields)(p.v.Interface().(*WeakFields)) }
-func (p pointer) Extensions() *map[int32]ExtensionField {
-	return p.v.Interface().(*map[int32]ExtensionField)
-}
-
-func (p pointer) Elem() pointer {
-	return pointer{v: p.v.Elem()}
-}
-
-// PointerSlice copies []*T from p as a new []pointer.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) PointerSlice() []pointer {
-	// TODO: reconsider this
-	if p.v.IsNil() {
-		return nil
-	}
-	n := p.v.Elem().Len()
-	s := make([]pointer, n)
-	for i := 0; i < n; i++ {
-		s[i] = pointer{v: p.v.Elem().Index(i)}
-	}
-	return s
-}
-
-// AppendPointerSlice appends v to p, which must be a []*T.
-func (p pointer) AppendPointerSlice(v pointer) {
-	sp := p.v.Elem()
-	sp.Set(reflect.Append(sp, v.v))
-}
-
-// SetPointer sets *p to v.
-func (p pointer) SetPointer(v pointer) {
-	p.v.Elem().Set(v.v)
-}
-
-func growSlice(p pointer, addCap int) {
-	// TODO: Once we only support Go 1.20 and newer, use reflect.Grow.
-	in := p.v.Elem()
-	out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap)
-	reflect.Copy(out, in)
-	p.v.Elem().Set(out)
-}
-
-func (p pointer) growBoolSlice(addCap int) {
-	growSlice(p, addCap)
-}
-
-func (p pointer) growInt32Slice(addCap int) {
-	growSlice(p, addCap)
-}
-
-func (p pointer) growUint32Slice(addCap int) {
-	growSlice(p, addCap)
-}
-
-func (p pointer) growInt64Slice(addCap int) {
-	growSlice(p, addCap)
-}
-
-func (p pointer) growUint64Slice(addCap int) {
-	growSlice(p, addCap)
-}
-
-func (p pointer) growFloat64Slice(addCap int) {
-	growSlice(p, addCap)
-}
-
-func (p pointer) growFloat32Slice(addCap int) {
-	growSlice(p, addCap)
-}
-
-func (Export) MessageStateOf(p Pointer) *messageState     { panic("not supported") }
-func (ms *messageState) pointer() pointer                 { panic("not supported") }
-func (ms *messageState) messageInfo() *MessageInfo        { panic("not supported") }
-func (ms *messageState) LoadMessageInfo() *MessageInfo    { panic("not supported") }
-func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") }
-
-type atomicNilMessage struct {
-	once sync.Once
-	m    messageReflectWrapper
-}
-
-func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper {
-	m.once.Do(func() {
-		m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface())
-		m.m.mi = mi
-	})
-	return &m.m
-}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
index 5f20ca5d8a..79e186667b 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
@@ -2,9 +2,6 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !purego && !appengine
-// +build !purego,!appengine
-
 package impl
 
 import (
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/hack/tools/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
deleted file mode 100644
index a1f6f33386..0000000000
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package strs
-
-import pref "google.golang.org/protobuf/reflect/protoreflect"
-
-func UnsafeString(b []byte) string {
-	return string(b)
-}
-
-func UnsafeBytes(s string) []byte {
-	return []byte(s)
-}
-
-type Builder struct{}
-
-func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName {
-	return prefix.Append(name)
-}
-
-func (*Builder) MakeString(b []byte) string {
-	return string(b)
-}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/hack/tools/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
index a008acd090..832a7988f1 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
@@ -2,8 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !purego && !appengine && !go1.21
-// +build !purego,!appengine,!go1.21
+//go:build !go1.21
 
 package strs
 
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/hack/tools/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
index 60166f2ba3..1ffddf6877 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
@@ -2,8 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !purego && !appengine && go1.21
-// +build !purego,!appengine,go1.21
+//go:build go1.21
 
 package strs
 
diff --git a/hack/tools/vendor/google.golang.org/protobuf/internal/version/version.go b/hack/tools/vendor/google.golang.org/protobuf/internal/version/version.go
index dbbf1f6862..fb8e15e8da 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -51,8 +51,8 @@ import (
 //  10. Send out the CL for review and submit it.
 const (
 	Major      = 1
-	Minor      = 34
-	Patch      = 2
+	Minor      = 35
+	Patch      = 1
 	PreRelease = ""
 )
 
diff --git a/hack/tools/vendor/google.golang.org/protobuf/proto/equal.go b/hack/tools/vendor/google.golang.org/protobuf/proto/equal.go
index 1a0be1b03c..c36d4a9cd7 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/proto/equal.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/proto/equal.go
@@ -8,6 +8,7 @@ import (
 	"reflect"
 
 	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/runtime/protoiface"
 )
 
 // Equal reports whether two messages are equal,
@@ -51,6 +52,14 @@ func Equal(x, y Message) bool {
 	if mx.IsValid() != my.IsValid() {
 		return false
 	}
+
+	// Only one of the messages needs to implement the fast-path for it to work.
+	pmx := protoMethods(mx)
+	pmy := protoMethods(my)
+	if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil {
+		return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal
+	}
+
 	vx := protoreflect.ValueOfMessage(mx)
 	vy := protoreflect.ValueOfMessage(my)
 	return vx.Equal(vy)
diff --git a/hack/tools/vendor/google.golang.org/protobuf/proto/extension.go b/hack/tools/vendor/google.golang.org/protobuf/proto/extension.go
index d248f29284..78445d116f 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/proto/extension.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/proto/extension.go
@@ -39,6 +39,48 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) {
 // If the field is unpopulated, it returns the default value for
 // scalars and an immutable, empty value for lists or messages.
 // It panics if xt does not extend m.
+//
+// The type of the value is dependent on the field type of the extension.
+// For extensions generated by protoc-gen-go, the Go type is as follows:
+//
+//	╔═══════════════════╤═════════════════════════╗
+//	║ Go type           │ Protobuf kind           ║
+//	╠═══════════════════╪═════════════════════════╣
+//	║ bool              │ bool                    ║
+//	║ int32             │ int32, sint32, sfixed32 ║
+//	║ int64             │ int64, sint64, sfixed64 ║
+//	║ uint32            │ uint32, fixed32         ║
+//	║ uint64            │ uint64, fixed64         ║
+//	║ float32           │ float                   ║
+//	║ float64           │ double                  ║
+//	║ string            │ string                  ║
+//	║ []byte            │ bytes                   ║
+//	║ protoreflect.Enum │ enum                    ║
+//	║ proto.Message     │ message, group          ║
+//	╚═══════════════════╧═════════════════════════╝
+//
+// The protoreflect.Enum and proto.Message types are the concrete Go type
+// associated with the named enum or message. Repeated fields are represented
+// using a Go slice of the base element type.
+//
+// If a generated extension descriptor variable is directly passed to
+// GetExtension, then the call should be followed immediately by a
+// type assertion to the expected output value. For example:
+//
+//	mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage)
+//
+// This pattern enables static analysis tools to verify that the asserted type
+// matches the Go type associated with the extension field and
+// also enables a possible future migration to a type-safe extension API.
+//
+// Since singular messages are the most common extension type, the pattern of
+// calling HasExtension followed by GetExtension may be simplified to:
+//
+//	if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil {
+//	    ... // make use of mm
+//	}
+//
+// The mm variable is non-nil if and only if HasExtension reports true.
 func GetExtension(m Message, xt protoreflect.ExtensionType) any {
 	// Treat nil message interface as an empty message; return the default.
 	if m == nil {
@@ -51,6 +93,35 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) any {
 // SetExtension stores the value of an extension field.
 // It panics if m is invalid, xt does not extend m, or if type of v
 // is invalid for the specified extension field.
+//
+// The type of the value is dependent on the field type of the extension.
+// For extensions generated by protoc-gen-go, the Go type is as follows:
+//
+//	╔═══════════════════╤═════════════════════════╗
+//	║ Go type           │ Protobuf kind           ║
+//	╠═══════════════════╪═════════════════════════╣
+//	║ bool              │ bool                    ║
+//	║ int32             │ int32, sint32, sfixed32 ║
+//	║ int64             │ int64, sint64, sfixed64 ║
+//	║ uint32            │ uint32, fixed32         ║
+//	║ uint64            │ uint64, fixed64         ║
+//	║ float32           │ float                   ║
+//	║ float64           │ double                  ║
+//	║ string            │ string                  ║
+//	║ []byte            │ bytes                   ║
+//	║ protoreflect.Enum │ enum                    ║
+//	║ proto.Message     │ message, group          ║
+//	╚═══════════════════╧═════════════════════════╝
+//
+// The protoreflect.Enum and proto.Message types are the concrete Go type
+// associated with the named enum or message. Repeated fields are represented
+// using a Go slice of the base element type.
+//
+// If a generated extension descriptor variable is directly passed to
+// SetExtension (e.g., foopb.E_MyExtension), then the value should be a
+// concrete type that matches the expected Go type for the extension descriptor
+// so that static analysis tools can verify type correctness.
+// This also enables a possible future migration to a type-safe extension API.
 func SetExtension(m Message, xt protoreflect.ExtensionType, v any) {
 	xd := xt.TypeDescriptor()
 	pv := xt.ValueOf(v)
diff --git a/hack/tools/vendor/google.golang.org/protobuf/protoadapt/convert.go b/hack/tools/vendor/google.golang.org/protobuf/protoadapt/convert.go
new file mode 100644
index 0000000000..ea276d15a0
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/protobuf/protoadapt/convert.go
@@ -0,0 +1,31 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protoadapt bridges the original and new proto APIs.
+package protoadapt
+
+import (
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/runtime/protoiface"
+	"google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// MessageV1 is the original [github.com/golang/protobuf/proto.Message] type.
+type MessageV1 = protoiface.MessageV1
+
+// MessageV2 is the [google.golang.org/protobuf/proto.Message] type used by the
+// current [google.golang.org/protobuf] module, adding support for reflection.
+type MessageV2 = proto.Message
+
+// MessageV1Of converts a v2 message to a v1 message.
+// It returns nil if m is nil.
+func MessageV1Of(m MessageV2) MessageV1 {
+	return protoimpl.X.ProtoMessageV1Of(m)
+}
+
+// MessageV2Of converts a v1 message to a v2 message.
+// It returns nil if m is nil.
+func MessageV2Of(m MessageV1) MessageV2 {
+	return protoimpl.X.ProtoMessageV2Of(m)
+}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/hack/tools/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
index 8561755427..ebcb4a8ab1 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
@@ -150,6 +150,7 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc
 			opts = proto.Clone(opts).(*descriptorpb.FieldOptions)
 			f.L1.Options = func() protoreflect.ProtoMessage { return opts }
 			f.L1.IsWeak = opts.GetWeak()
+			f.L1.IsLazy = opts.GetLazy()
 			if opts.Packed != nil {
 				f.L1.EditionFeatures.IsPacked = opts.GetPacked()
 			}
@@ -214,6 +215,9 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript
 		if xd.JsonName != nil {
 			x.L2.StringName.InitJSON(xd.GetJsonName())
 		}
+		if x.L1.Kind == protoreflect.MessageKind && x.L1.EditionFeatures.IsDelimitedEncoded {
+			x.L1.Kind = protoreflect.GroupKind
+		}
 	}
 	return xs, nil
 }
diff --git a/hack/tools/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/hack/tools/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
index 804830eda3..002e0047ae 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
@@ -14,7 +14,7 @@ import (
 	"google.golang.org/protobuf/proto"
 	"google.golang.org/protobuf/reflect/protoreflect"
 	"google.golang.org/protobuf/types/descriptorpb"
-	gofeaturespb "google.golang.org/protobuf/types/gofeaturespb"
+	"google.golang.org/protobuf/types/gofeaturespb"
 )
 
 var defaults = &descriptorpb.FeatureSetDefaults{}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
index d5d5af6ebe..742cb518c4 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
@@ -23,6 +23,7 @@ type (
 		Unmarshal        func(unmarshalInput) (unmarshalOutput, error)
 		Merge            func(mergeInput) mergeOutput
 		CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error)
+		Equal            func(equalInput) equalOutput
 	}
 	supportFlags = uint64
 	sizeInput    = struct {
@@ -75,4 +76,13 @@ type (
 	checkInitializedOutput = struct {
 		pragma.NoUnkeyedLiterals
 	}
+	equalInput = struct {
+		pragma.NoUnkeyedLiterals
+		MessageA Message
+		MessageB Message
+	}
+	equalOutput = struct {
+		pragma.NoUnkeyedLiterals
+		Equal bool
+	}
 )
diff --git a/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
deleted file mode 100644
index 75f83a2af0..0000000000
--- a/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package protoreflect
-
-import "google.golang.org/protobuf/internal/pragma"
-
-type valueType int
-
-const (
-	nilType valueType = iota
-	boolType
-	int32Type
-	int64Type
-	uint32Type
-	uint64Type
-	float32Type
-	float64Type
-	stringType
-	bytesType
-	enumType
-	ifaceType
-)
-
-// value is a union where only one type can be represented at a time.
-// This uses a distinct field for each type. This is type safe in Go, but
-// occupies more memory than necessary (72B).
-type value struct {
-	pragma.DoNotCompare // 0B
-
-	typ   valueType // 8B
-	num   uint64    // 8B
-	str   string    // 16B
-	bin   []byte    // 24B
-	iface any       // 16B
-}
-
-func valueOfString(v string) Value {
-	return Value{typ: stringType, str: v}
-}
-func valueOfBytes(v []byte) Value {
-	return Value{typ: bytesType, bin: v}
-}
-func valueOfIface(v any) Value {
-	return Value{typ: ifaceType, iface: v}
-}
-
-func (v Value) getString() string {
-	return v.str
-}
-func (v Value) getBytes() []byte {
-	return v.bin
-}
-func (v Value) getIface() any {
-	return v.iface
-}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
index 7f3583ead8..0015fcb35d 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
@@ -2,8 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !purego && !appengine && !go1.21
-// +build !purego,!appengine,!go1.21
+//go:build !go1.21
 
 package protoreflect
 
diff --git a/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
index f7d386990a..479527b58d 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
@@ -2,8 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !purego && !appengine && go1.21
-// +build !purego,!appengine,go1.21
+//go:build go1.21
 
 package protoreflect
 
diff --git a/hack/tools/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/hack/tools/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
index 44cf467d88..246156561c 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
@@ -39,6 +39,9 @@ type Methods = struct {
 
 	// CheckInitialized returns an error if any required fields in the message are not set.
 	CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error)
+
+	// Equal compares two messages and returns EqualOutput.Equal == true if they are equal.
+	Equal func(EqualInput) EqualOutput
 }
 
 // SupportFlags indicate support for optional features.
@@ -166,3 +169,18 @@ type CheckInitializedInput = struct {
 type CheckInitializedOutput = struct {
 	pragma.NoUnkeyedLiterals
 }
+
+// EqualInput is input to the Equal method.
+type EqualInput = struct {
+	pragma.NoUnkeyedLiterals
+
+	MessageA protoreflect.Message
+	MessageB protoreflect.Message
+}
+
+// EqualOutput is output from the Equal method.
+type EqualOutput = struct {
+	pragma.NoUnkeyedLiterals
+
+	Equal bool
+}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/hack/tools/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index 9403eb0750..6dea75cd5b 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -1217,11 +1217,9 @@ type FileDescriptorSet struct {
 
 func (x *FileDescriptorSet) Reset() {
 	*x = FileDescriptorSet{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FileDescriptorSet) String() string {
@@ -1232,7 +1230,7 @@ func (*FileDescriptorSet) ProtoMessage() {}
 
 func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1291,11 +1289,9 @@ type FileDescriptorProto struct {
 
 func (x *FileDescriptorProto) Reset() {
 	*x = FileDescriptorProto{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FileDescriptorProto) String() string {
@@ -1306,7 +1302,7 @@ func (*FileDescriptorProto) ProtoMessage() {}
 
 func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1434,11 +1430,9 @@ type DescriptorProto struct {
 
 func (x *DescriptorProto) Reset() {
 	*x = DescriptorProto{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *DescriptorProto) String() string {
@@ -1449,7 +1443,7 @@ func (*DescriptorProto) ProtoMessage() {}
 
 func (x *DescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1561,11 +1555,9 @@ const (
 
 func (x *ExtensionRangeOptions) Reset() {
 	*x = ExtensionRangeOptions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ExtensionRangeOptions) String() string {
@@ -1576,7 +1568,7 @@ func (*ExtensionRangeOptions) ProtoMessage() {}
 
 func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1680,11 +1672,9 @@ type FieldDescriptorProto struct {
 
 func (x *FieldDescriptorProto) Reset() {
 	*x = FieldDescriptorProto{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FieldDescriptorProto) String() string {
@@ -1695,7 +1685,7 @@ func (*FieldDescriptorProto) ProtoMessage() {}
 
 func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1799,11 +1789,9 @@ type OneofDescriptorProto struct {
 
 func (x *OneofDescriptorProto) Reset() {
 	*x = OneofDescriptorProto{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *OneofDescriptorProto) String() string {
@@ -1814,7 +1802,7 @@ func (*OneofDescriptorProto) ProtoMessage() {}
 
 func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1863,11 +1851,9 @@ type EnumDescriptorProto struct {
 
 func (x *EnumDescriptorProto) Reset() {
 	*x = EnumDescriptorProto{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *EnumDescriptorProto) String() string {
@@ -1878,7 +1864,7 @@ func (*EnumDescriptorProto) ProtoMessage() {}
 
 func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -1941,11 +1927,9 @@ type EnumValueDescriptorProto struct {
 
 func (x *EnumValueDescriptorProto) Reset() {
 	*x = EnumValueDescriptorProto{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *EnumValueDescriptorProto) String() string {
@@ -1956,7 +1940,7 @@ func (*EnumValueDescriptorProto) ProtoMessage() {}
 
 func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2005,11 +1989,9 @@ type ServiceDescriptorProto struct {
 
 func (x *ServiceDescriptorProto) Reset() {
 	*x = ServiceDescriptorProto{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ServiceDescriptorProto) String() string {
@@ -2020,7 +2002,7 @@ func (*ServiceDescriptorProto) ProtoMessage() {}
 
 func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2082,11 +2064,9 @@ const (
 
 func (x *MethodDescriptorProto) Reset() {
 	*x = MethodDescriptorProto{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *MethodDescriptorProto) String() string {
@@ -2097,7 +2077,7 @@ func (*MethodDescriptorProto) ProtoMessage() {}
 
 func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2267,11 +2247,9 @@ const (
 
 func (x *FileOptions) Reset() {
 	*x = FileOptions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FileOptions) String() string {
@@ -2282,7 +2260,7 @@ func (*FileOptions) ProtoMessage() {}
 
 func (x *FileOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2534,11 +2512,9 @@ const (
 
 func (x *MessageOptions) Reset() {
 	*x = MessageOptions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *MessageOptions) String() string {
@@ -2549,7 +2525,7 @@ func (*MessageOptions) ProtoMessage() {}
 
 func (x *MessageOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2707,11 +2683,9 @@ const (
 
 func (x *FieldOptions) Reset() {
 	*x = FieldOptions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FieldOptions) String() string {
@@ -2722,7 +2696,7 @@ func (*FieldOptions) ProtoMessage() {}
 
 func (x *FieldOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2849,11 +2823,9 @@ type OneofOptions struct {
 
 func (x *OneofOptions) Reset() {
 	*x = OneofOptions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *OneofOptions) String() string {
@@ -2864,7 +2836,7 @@ func (*OneofOptions) ProtoMessage() {}
 
 func (x *OneofOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -2929,11 +2901,9 @@ const (
 
 func (x *EnumOptions) Reset() {
 	*x = EnumOptions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *EnumOptions) String() string {
@@ -2944,7 +2914,7 @@ func (*EnumOptions) ProtoMessage() {}
 
 func (x *EnumOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3026,11 +2996,9 @@ const (
 
 func (x *EnumValueOptions) Reset() {
 	*x = EnumValueOptions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *EnumValueOptions) String() string {
@@ -3041,7 +3009,7 @@ func (*EnumValueOptions) ProtoMessage() {}
 
 func (x *EnumValueOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3115,11 +3083,9 @@ const (
 
 func (x *ServiceOptions) Reset() {
 	*x = ServiceOptions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ServiceOptions) String() string {
@@ -3130,7 +3096,7 @@ func (*ServiceOptions) ProtoMessage() {}
 
 func (x *ServiceOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3192,11 +3158,9 @@ const (
 
 func (x *MethodOptions) Reset() {
 	*x = MethodOptions{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *MethodOptions) String() string {
@@ -3207,7 +3171,7 @@ func (*MethodOptions) ProtoMessage() {}
 
 func (x *MethodOptions) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3274,11 +3238,9 @@ type UninterpretedOption struct {
 
 func (x *UninterpretedOption) Reset() {
 	*x = UninterpretedOption{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *UninterpretedOption) String() string {
@@ -3289,7 +3251,7 @@ func (*UninterpretedOption) ProtoMessage() {}
 
 func (x *UninterpretedOption) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3375,11 +3337,9 @@ type FeatureSet struct {
 
 func (x *FeatureSet) Reset() {
 	*x = FeatureSet{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FeatureSet) String() string {
@@ -3390,7 +3350,7 @@ func (*FeatureSet) ProtoMessage() {}
 
 func (x *FeatureSet) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3467,11 +3427,9 @@ type FeatureSetDefaults struct {
 
 func (x *FeatureSetDefaults) Reset() {
 	*x = FeatureSetDefaults{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FeatureSetDefaults) String() string {
@@ -3482,7 +3440,7 @@ func (*FeatureSetDefaults) ProtoMessage() {}
 
 func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3578,11 +3536,9 @@ type SourceCodeInfo struct {
 
 func (x *SourceCodeInfo) Reset() {
 	*x = SourceCodeInfo{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *SourceCodeInfo) String() string {
@@ -3593,7 +3549,7 @@ func (*SourceCodeInfo) ProtoMessage() {}
 
 func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3630,11 +3586,9 @@ type GeneratedCodeInfo struct {
 
 func (x *GeneratedCodeInfo) Reset() {
 	*x = GeneratedCodeInfo{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *GeneratedCodeInfo) String() string {
@@ -3645,7 +3599,7 @@ func (*GeneratedCodeInfo) ProtoMessage() {}
 
 func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3679,11 +3633,9 @@ type DescriptorProto_ExtensionRange struct {
 
 func (x *DescriptorProto_ExtensionRange) Reset() {
 	*x = DescriptorProto_ExtensionRange{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *DescriptorProto_ExtensionRange) String() string {
@@ -3694,7 +3646,7 @@ func (*DescriptorProto_ExtensionRange) ProtoMessage() {}
 
 func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3744,11 +3696,9 @@ type DescriptorProto_ReservedRange struct {
 
 func (x *DescriptorProto_ReservedRange) Reset() {
 	*x = DescriptorProto_ReservedRange{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *DescriptorProto_ReservedRange) String() string {
@@ -3759,7 +3709,7 @@ func (*DescriptorProto_ReservedRange) ProtoMessage() {}
 
 func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3813,11 +3763,9 @@ type ExtensionRangeOptions_Declaration struct {
 
 func (x *ExtensionRangeOptions_Declaration) Reset() {
 	*x = ExtensionRangeOptions_Declaration{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *ExtensionRangeOptions_Declaration) String() string {
@@ -3828,7 +3776,7 @@ func (*ExtensionRangeOptions_Declaration) ProtoMessage() {}
 
 func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3895,11 +3843,9 @@ type EnumDescriptorProto_EnumReservedRange struct {
 
 func (x *EnumDescriptorProto_EnumReservedRange) Reset() {
 	*x = EnumDescriptorProto_EnumReservedRange{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *EnumDescriptorProto_EnumReservedRange) String() string {
@@ -3910,7 +3856,7 @@ func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {}
 
 func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -3950,11 +3896,9 @@ type FieldOptions_EditionDefault struct {
 
 func (x *FieldOptions_EditionDefault) Reset() {
 	*x = FieldOptions_EditionDefault{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FieldOptions_EditionDefault) String() string {
@@ -3965,7 +3909,7 @@ func (*FieldOptions_EditionDefault) ProtoMessage() {}
 
 func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4018,11 +3962,9 @@ type FieldOptions_FeatureSupport struct {
 
 func (x *FieldOptions_FeatureSupport) Reset() {
 	*x = FieldOptions_FeatureSupport{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FieldOptions_FeatureSupport) String() string {
@@ -4033,7 +3975,7 @@ func (*FieldOptions_FeatureSupport) ProtoMessage() {}
 
 func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4092,11 +4034,9 @@ type UninterpretedOption_NamePart struct {
 
 func (x *UninterpretedOption_NamePart) Reset() {
 	*x = UninterpretedOption_NamePart{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *UninterpretedOption_NamePart) String() string {
@@ -4107,7 +4047,7 @@ func (*UninterpretedOption_NamePart) ProtoMessage() {}
 
 func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4154,11 +4094,9 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct {
 
 func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() {
 	*x = FeatureSetDefaults_FeatureSetEditionDefault{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string {
@@ -4169,7 +4107,7 @@ func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {}
 
 func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4305,11 +4243,9 @@ type SourceCodeInfo_Location struct {
 
 func (x *SourceCodeInfo_Location) Reset() {
 	*x = SourceCodeInfo_Location{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *SourceCodeInfo_Location) String() string {
@@ -4320,7 +4256,7 @@ func (*SourceCodeInfo_Location) ProtoMessage() {}
 
 func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -4392,11 +4328,9 @@ type GeneratedCodeInfo_Annotation struct {
 
 func (x *GeneratedCodeInfo_Annotation) Reset() {
 	*x = GeneratedCodeInfo_Annotation{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *GeneratedCodeInfo_Annotation) String() string {
@@ -4407,7 +4341,7 @@ func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
 
 func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -5385,424 +5319,6 @@ func file_google_protobuf_descriptor_proto_init() {
 	if File_google_protobuf_descriptor_proto != nil {
 		return
 	}
-	if !protoimpl.UnsafeEnabled {
-		file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any {
-			switch v := v.(*FileDescriptorSet); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any {
-			switch v := v.(*FileDescriptorProto); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any {
-			switch v := v.(*DescriptorProto); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any {
-			switch v := v.(*ExtensionRangeOptions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			case 3:
-				return &v.extensionFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any {
-			switch v := v.(*FieldDescriptorProto); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any {
-			switch v := v.(*OneofDescriptorProto); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any {
-			switch v := v.(*EnumDescriptorProto); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any {
-			switch v := v.(*EnumValueDescriptorProto); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any {
-			switch v := v.(*ServiceDescriptorProto); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any {
-			switch v := v.(*MethodDescriptorProto); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any {
-			switch v := v.(*FileOptions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			case 3:
-				return &v.extensionFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any {
-			switch v := v.(*MessageOptions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			case 3:
-				return &v.extensionFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any {
-			switch v := v.(*FieldOptions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			case 3:
-				return &v.extensionFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any {
-			switch v := v.(*OneofOptions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			case 3:
-				return &v.extensionFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any {
-			switch v := v.(*EnumOptions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			case 3:
-				return &v.extensionFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any {
-			switch v := v.(*EnumValueOptions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			case 3:
-				return &v.extensionFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any {
-			switch v := v.(*ServiceOptions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			case 3:
-				return &v.extensionFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any {
-			switch v := v.(*MethodOptions); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			case 3:
-				return &v.extensionFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any {
-			switch v := v.(*UninterpretedOption); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any {
-			switch v := v.(*FeatureSet); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			case 3:
-				return &v.extensionFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any {
-			switch v := v.(*FeatureSetDefaults); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any {
-			switch v := v.(*SourceCodeInfo); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any {
-			switch v := v.(*GeneratedCodeInfo); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any {
-			switch v := v.(*DescriptorProto_ExtensionRange); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any {
-			switch v := v.(*DescriptorProto_ReservedRange); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any {
-			switch v := v.(*ExtensionRangeOptions_Declaration); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any {
-			switch v := v.(*EnumDescriptorProto_EnumReservedRange); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any {
-			switch v := v.(*FieldOptions_EditionDefault); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any {
-			switch v := v.(*FieldOptions_FeatureSupport); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any {
-			switch v := v.(*UninterpretedOption_NamePart); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any {
-			switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any {
-			switch v := v.(*SourceCodeInfo_Location); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-		file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any {
-			switch v := v.(*GeneratedCodeInfo_Annotation); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-	}
 	type x struct{}
 	out := protoimpl.TypeBuilder{
 		File: protoimpl.DescBuilder{
diff --git a/hack/tools/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go b/hack/tools/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go
new file mode 100644
index 0000000000..1ba1dfa5ad
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go
@@ -0,0 +1,718 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package dynamicpb creates protocol buffer messages using runtime type information.
+package dynamicpb
+
+import (
+	"math"
+
+	"google.golang.org/protobuf/internal/errors"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/runtime/protoiface"
+	"google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// enum is a dynamic protoreflect.Enum.
+type enum struct {
+	num protoreflect.EnumNumber
+	typ protoreflect.EnumType
+}
+
+func (e enum) Descriptor() protoreflect.EnumDescriptor { return e.typ.Descriptor() }
+func (e enum) Type() protoreflect.EnumType             { return e.typ }
+func (e enum) Number() protoreflect.EnumNumber         { return e.num }
+
+// enumType is a dynamic protoreflect.EnumType.
+type enumType struct {
+	desc protoreflect.EnumDescriptor
+}
+
+// NewEnumType creates a new EnumType with the provided descriptor.
+//
+// EnumTypes created by this package are equal if their descriptors are equal.
+// That is, if ed1 == ed2, then NewEnumType(ed1) == NewEnumType(ed2).
+//
+// Enum values created by the EnumType are equal if their numbers are equal.
+func NewEnumType(desc protoreflect.EnumDescriptor) protoreflect.EnumType {
+	return enumType{desc}
+}
+
+func (et enumType) New(n protoreflect.EnumNumber) protoreflect.Enum { return enum{n, et} }
+func (et enumType) Descriptor() protoreflect.EnumDescriptor         { return et.desc }
+
+// extensionType is a dynamic protoreflect.ExtensionType.
+type extensionType struct {
+	desc extensionTypeDescriptor
+}
+
+// A Message is a dynamically constructed protocol buffer message.
+//
+// Message implements the [google.golang.org/protobuf/proto.Message] interface,
+// and may be used with all  standard proto package functions
+// such as Marshal, Unmarshal, and so forth.
+//
+// Message also implements the [protoreflect.Message] interface.
+// See the [protoreflect] package documentation for that interface for how to
+// get and set fields and otherwise interact with the contents of a Message.
+//
+// Reflection API functions which construct messages, such as NewField,
+// return new dynamic messages of the appropriate type. Functions which take
+// messages, such as Set for a message-value field, will accept any message
+// with a compatible type.
+//
+// Operations which modify a Message are not safe for concurrent use.
+type Message struct {
+	typ     messageType
+	known   map[protoreflect.FieldNumber]protoreflect.Value
+	ext     map[protoreflect.FieldNumber]protoreflect.FieldDescriptor
+	unknown protoreflect.RawFields
+}
+
+var (
+	_ protoreflect.Message      = (*Message)(nil)
+	_ protoreflect.ProtoMessage = (*Message)(nil)
+	_ protoiface.MessageV1      = (*Message)(nil)
+)
+
+// NewMessage creates a new message with the provided descriptor.
+func NewMessage(desc protoreflect.MessageDescriptor) *Message {
+	return &Message{
+		typ:   messageType{desc},
+		known: make(map[protoreflect.FieldNumber]protoreflect.Value),
+		ext:   make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor),
+	}
+}
+
+// ProtoMessage implements the legacy message interface.
+func (m *Message) ProtoMessage() {}
+
+// ProtoReflect implements the [protoreflect.ProtoMessage] interface.
+func (m *Message) ProtoReflect() protoreflect.Message {
+	return m
+}
+
+// String returns a string representation of a message.
+func (m *Message) String() string {
+	return protoimpl.X.MessageStringOf(m)
+}
+
+// Reset clears the message to be empty, but preserves the dynamic message type.
+func (m *Message) Reset() {
+	m.known = make(map[protoreflect.FieldNumber]protoreflect.Value)
+	m.ext = make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor)
+	m.unknown = nil
+}
+
+// Descriptor returns the message descriptor.
+func (m *Message) Descriptor() protoreflect.MessageDescriptor {
+	return m.typ.desc
+}
+
+// Type returns the message type.
+func (m *Message) Type() protoreflect.MessageType {
+	return m.typ
+}
+
+// New returns a newly allocated empty message with the same descriptor.
+// See [protoreflect.Message] for details.
+func (m *Message) New() protoreflect.Message {
+	return m.Type().New()
+}
+
+// Interface returns the message.
+// See [protoreflect.Message] for details.
+func (m *Message) Interface() protoreflect.ProtoMessage {
+	return m
+}
+
+// ProtoMethods is an internal detail of the [protoreflect.Message] interface.
+// Users should never call this directly.
+func (m *Message) ProtoMethods() *protoiface.Methods {
+	return nil
+}
+
+// Range visits every populated field in undefined order.
+// See [protoreflect.Message] for details.
+func (m *Message) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
+	for num, v := range m.known {
+		fd := m.ext[num]
+		if fd == nil {
+			fd = m.Descriptor().Fields().ByNumber(num)
+		}
+		if !isSet(fd, v) {
+			continue
+		}
+		if !f(fd, v) {
+			return
+		}
+	}
+}
+
+// Has reports whether a field is populated.
+// See [protoreflect.Message] for details.
+func (m *Message) Has(fd protoreflect.FieldDescriptor) bool {
+	m.checkField(fd)
+	if fd.IsExtension() && m.ext[fd.Number()] != fd {
+		return false
+	}
+	v, ok := m.known[fd.Number()]
+	if !ok {
+		return false
+	}
+	return isSet(fd, v)
+}
+
+// Clear clears a field.
+// See [protoreflect.Message] for details.
+func (m *Message) Clear(fd protoreflect.FieldDescriptor) {
+	m.checkField(fd)
+	num := fd.Number()
+	delete(m.known, num)
+	delete(m.ext, num)
+}
+
+// Get returns the value of a field.
+// See [protoreflect.Message] for details.
+func (m *Message) Get(fd protoreflect.FieldDescriptor) protoreflect.Value {
+	m.checkField(fd)
+	num := fd.Number()
+	if fd.IsExtension() {
+		if fd != m.ext[num] {
+			return fd.(protoreflect.ExtensionTypeDescriptor).Type().Zero()
+		}
+		return m.known[num]
+	}
+	if v, ok := m.known[num]; ok {
+		switch {
+		case fd.IsMap():
+			if v.Map().Len() > 0 {
+				return v
+			}
+		case fd.IsList():
+			if v.List().Len() > 0 {
+				return v
+			}
+		default:
+			return v
+		}
+	}
+	switch {
+	case fd.IsMap():
+		return protoreflect.ValueOfMap(&dynamicMap{desc: fd})
+	case fd.IsList():
+		return protoreflect.ValueOfList(emptyList{desc: fd})
+	case fd.Message() != nil:
+		return protoreflect.ValueOfMessage(&Message{typ: messageType{fd.Message()}})
+	case fd.Kind() == protoreflect.BytesKind:
+		return protoreflect.ValueOfBytes(append([]byte(nil), fd.Default().Bytes()...))
+	default:
+		return fd.Default()
+	}
+}
+
+// Mutable returns a mutable reference to a repeated, map, or message field.
+// See [protoreflect.Message] for details.
+func (m *Message) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
+	m.checkField(fd)
+	if !fd.IsMap() && !fd.IsList() && fd.Message() == nil {
+		panic(errors.New("%v: getting mutable reference to non-composite type", fd.FullName()))
+	}
+	if m.known == nil {
+		panic(errors.New("%v: modification of read-only message", fd.FullName()))
+	}
+	num := fd.Number()
+	if fd.IsExtension() {
+		if fd != m.ext[num] {
+			m.ext[num] = fd
+			m.known[num] = fd.(protoreflect.ExtensionTypeDescriptor).Type().New()
+		}
+		return m.known[num]
+	}
+	if v, ok := m.known[num]; ok {
+		return v
+	}
+	m.clearOtherOneofFields(fd)
+	m.known[num] = m.NewField(fd)
+	if fd.IsExtension() {
+		m.ext[num] = fd
+	}
+	return m.known[num]
+}
+
+// Set stores a value in a field.
+// See [protoreflect.Message] for details.
+func (m *Message) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
+	m.checkField(fd)
+	if m.known == nil {
+		panic(errors.New("%v: modification of read-only message", fd.FullName()))
+	}
+	if fd.IsExtension() {
+		isValid := true
+		switch {
+		case !fd.(protoreflect.ExtensionTypeDescriptor).Type().IsValidValue(v):
+			isValid = false
+		case fd.IsList():
+			isValid = v.List().IsValid()
+		case fd.IsMap():
+			isValid = v.Map().IsValid()
+		case fd.Message() != nil:
+			isValid = v.Message().IsValid()
+		}
+		if !isValid {
+			panic(errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface()))
+		}
+		m.ext[fd.Number()] = fd
+	} else {
+		typecheck(fd, v)
+	}
+	m.clearOtherOneofFields(fd)
+	m.known[fd.Number()] = v
+}
+
+func (m *Message) clearOtherOneofFields(fd protoreflect.FieldDescriptor) {
+	od := fd.ContainingOneof()
+	if od == nil {
+		return
+	}
+	num := fd.Number()
+	for i := 0; i < od.Fields().Len(); i++ {
+		if n := od.Fields().Get(i).Number(); n != num {
+			delete(m.known, n)
+		}
+	}
+}
+
+// NewField returns a new value for assignable to the field of a given descriptor.
+// See [protoreflect.Message] for details.
+func (m *Message) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
+	m.checkField(fd)
+	switch {
+	case fd.IsExtension():
+		return fd.(protoreflect.ExtensionTypeDescriptor).Type().New()
+	case fd.IsMap():
+		return protoreflect.ValueOfMap(&dynamicMap{
+			desc: fd,
+			mapv: make(map[any]protoreflect.Value),
+		})
+	case fd.IsList():
+		return protoreflect.ValueOfList(&dynamicList{desc: fd})
+	case fd.Message() != nil:
+		return protoreflect.ValueOfMessage(NewMessage(fd.Message()).ProtoReflect())
+	default:
+		return fd.Default()
+	}
+}
+
+// WhichOneof reports which field in a oneof is populated, returning nil if none are populated.
+// See [protoreflect.Message] for details.
+func (m *Message) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
+	for i := 0; i < od.Fields().Len(); i++ {
+		fd := od.Fields().Get(i)
+		if m.Has(fd) {
+			return fd
+		}
+	}
+	return nil
+}
+
+// GetUnknown returns the raw unknown fields.
+// See [protoreflect.Message] for details.
+func (m *Message) GetUnknown() protoreflect.RawFields {
+	return m.unknown
+}
+
+// SetUnknown sets the raw unknown fields.
+// See [protoreflect.Message] for details.
+func (m *Message) SetUnknown(r protoreflect.RawFields) {
+	if m.known == nil {
+		panic(errors.New("%v: modification of read-only message", m.typ.desc.FullName()))
+	}
+	m.unknown = r
+}
+
+// IsValid reports whether the message is valid.
+// See [protoreflect.Message] for details.
+func (m *Message) IsValid() bool {
+	return m.known != nil
+}
+
+func (m *Message) checkField(fd protoreflect.FieldDescriptor) {
+	if fd.IsExtension() && fd.ContainingMessage().FullName() == m.Descriptor().FullName() {
+		if _, ok := fd.(protoreflect.ExtensionTypeDescriptor); !ok {
+			panic(errors.New("%v: extension field descriptor does not implement ExtensionTypeDescriptor", fd.FullName()))
+		}
+		return
+	}
+	if fd.Parent() == m.Descriptor() {
+		return
+	}
+	fields := m.Descriptor().Fields()
+	index := fd.Index()
+	if index >= fields.Len() || fields.Get(index) != fd {
+		panic(errors.New("%v: field descriptor does not belong to this message", fd.FullName()))
+	}
+}
+
+type messageType struct {
+	desc protoreflect.MessageDescriptor
+}
+
+// NewMessageType creates a new MessageType with the provided descriptor.
+//
+// MessageTypes created by this package are equal if their descriptors are equal.
+// That is, if md1 == md2, then NewMessageType(md1) == NewMessageType(md2).
+func NewMessageType(desc protoreflect.MessageDescriptor) protoreflect.MessageType {
+	return messageType{desc}
+}
+
+func (mt messageType) New() protoreflect.Message                  { return NewMessage(mt.desc) }
+func (mt messageType) Zero() protoreflect.Message                 { return &Message{typ: messageType{mt.desc}} }
+func (mt messageType) Descriptor() protoreflect.MessageDescriptor { return mt.desc }
+func (mt messageType) Enum(i int) protoreflect.EnumType {
+	if ed := mt.desc.Fields().Get(i).Enum(); ed != nil {
+		return NewEnumType(ed)
+	}
+	return nil
+}
+func (mt messageType) Message(i int) protoreflect.MessageType {
+	if md := mt.desc.Fields().Get(i).Message(); md != nil {
+		return NewMessageType(md)
+	}
+	return nil
+}
+
+type emptyList struct {
+	desc protoreflect.FieldDescriptor
+}
+
+func (x emptyList) Len() int                     { return 0 }
+func (x emptyList) Get(n int) protoreflect.Value { panic(errors.New("out of range")) }
+func (x emptyList) Set(n int, v protoreflect.Value) {
+	panic(errors.New("modification of immutable list"))
+}
+func (x emptyList) Append(v protoreflect.Value) { panic(errors.New("modification of immutable list")) }
+func (x emptyList) AppendMutable() protoreflect.Value {
+	panic(errors.New("modification of immutable list"))
+}
+func (x emptyList) Truncate(n int)                 { panic(errors.New("modification of immutable list")) }
+func (x emptyList) NewElement() protoreflect.Value { return newListEntry(x.desc) }
+func (x emptyList) IsValid() bool                  { return false }
+
+type dynamicList struct {
+	desc protoreflect.FieldDescriptor
+	list []protoreflect.Value
+}
+
+func (x *dynamicList) Len() int {
+	return len(x.list)
+}
+
+func (x *dynamicList) Get(n int) protoreflect.Value {
+	return x.list[n]
+}
+
+func (x *dynamicList) Set(n int, v protoreflect.Value) {
+	typecheckSingular(x.desc, v)
+	x.list[n] = v
+}
+
+func (x *dynamicList) Append(v protoreflect.Value) {
+	typecheckSingular(x.desc, v)
+	x.list = append(x.list, v)
+}
+
+func (x *dynamicList) AppendMutable() protoreflect.Value {
+	if x.desc.Message() == nil {
+		panic(errors.New("%v: invalid AppendMutable on list with non-message type", x.desc.FullName()))
+	}
+	v := x.NewElement()
+	x.Append(v)
+	return v
+}
+
+func (x *dynamicList) Truncate(n int) {
+	// Zero truncated elements to avoid keeping data live.
+	for i := n; i < len(x.list); i++ {
+		x.list[i] = protoreflect.Value{}
+	}
+	x.list = x.list[:n]
+}
+
+func (x *dynamicList) NewElement() protoreflect.Value {
+	return newListEntry(x.desc)
+}
+
+func (x *dynamicList) IsValid() bool {
+	return true
+}
+
+type dynamicMap struct {
+	desc protoreflect.FieldDescriptor
+	mapv map[any]protoreflect.Value
+}
+
+func (x *dynamicMap) Get(k protoreflect.MapKey) protoreflect.Value { return x.mapv[k.Interface()] }
+func (x *dynamicMap) Set(k protoreflect.MapKey, v protoreflect.Value) {
+	typecheckSingular(x.desc.MapKey(), k.Value())
+	typecheckSingular(x.desc.MapValue(), v)
+	x.mapv[k.Interface()] = v
+}
+func (x *dynamicMap) Has(k protoreflect.MapKey) bool { return x.Get(k).IsValid() }
+func (x *dynamicMap) Clear(k protoreflect.MapKey)    { delete(x.mapv, k.Interface()) }
+func (x *dynamicMap) Mutable(k protoreflect.MapKey) protoreflect.Value {
+	if x.desc.MapValue().Message() == nil {
+		panic(errors.New("%v: invalid Mutable on map with non-message value type", x.desc.FullName()))
+	}
+	v := x.Get(k)
+	if !v.IsValid() {
+		v = x.NewValue()
+		x.Set(k, v)
+	}
+	return v
+}
+func (x *dynamicMap) Len() int { return len(x.mapv) }
+func (x *dynamicMap) NewValue() protoreflect.Value {
+	if md := x.desc.MapValue().Message(); md != nil {
+		return protoreflect.ValueOfMessage(NewMessage(md).ProtoReflect())
+	}
+	return x.desc.MapValue().Default()
+}
+func (x *dynamicMap) IsValid() bool {
+	return x.mapv != nil
+}
+
+func (x *dynamicMap) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) {
+	for k, v := range x.mapv {
+		if !f(protoreflect.ValueOf(k).MapKey(), v) {
+			return
+		}
+	}
+}
+
+func isSet(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+	switch {
+	case fd.IsMap():
+		return v.Map().Len() > 0
+	case fd.IsList():
+		return v.List().Len() > 0
+	case fd.ContainingOneof() != nil:
+		return true
+	case !fd.HasPresence() && !fd.IsExtension():
+		switch fd.Kind() {
+		case protoreflect.BoolKind:
+			return v.Bool()
+		case protoreflect.EnumKind:
+			return v.Enum() != 0
+		case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind:
+			return v.Int() != 0
+		case protoreflect.Uint32Kind, protoreflect.Uint64Kind, protoreflect.Fixed32Kind, protoreflect.Fixed64Kind:
+			return v.Uint() != 0
+		case protoreflect.FloatKind, protoreflect.DoubleKind:
+			return v.Float() != 0 || math.Signbit(v.Float())
+		case protoreflect.StringKind:
+			return v.String() != ""
+		case protoreflect.BytesKind:
+			return len(v.Bytes()) > 0
+		}
+	}
+	return true
+}
+
+func typecheck(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
+	if err := typeIsValid(fd, v); err != nil {
+		panic(err)
+	}
+}
+
+func typeIsValid(fd protoreflect.FieldDescriptor, v protoreflect.Value) error {
+	switch {
+	case !v.IsValid():
+		return errors.New("%v: assigning invalid value", fd.FullName())
+	case fd.IsMap():
+		if mapv, ok := v.Interface().(*dynamicMap); !ok || mapv.desc != fd || !mapv.IsValid() {
+			return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface())
+		}
+		return nil
+	case fd.IsList():
+		switch list := v.Interface().(type) {
+		case *dynamicList:
+			if list.desc == fd && list.IsValid() {
+				return nil
+			}
+		case emptyList:
+			if list.desc == fd && list.IsValid() {
+				return nil
+			}
+		}
+		return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface())
+	default:
+		return singularTypeIsValid(fd, v)
+	}
+}
+
+func typecheckSingular(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
+	if err := singularTypeIsValid(fd, v); err != nil {
+		panic(err)
+	}
+}
+
+func singularTypeIsValid(fd protoreflect.FieldDescriptor, v protoreflect.Value) error {
+	vi := v.Interface()
+	var ok bool
+	switch fd.Kind() {
+	case protoreflect.BoolKind:
+		_, ok = vi.(bool)
+	case protoreflect.EnumKind:
+		// We could check against the valid set of enum values, but do not.
+		_, ok = vi.(protoreflect.EnumNumber)
+	case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+		_, ok = vi.(int32)
+	case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+		_, ok = vi.(uint32)
+	case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+		_, ok = vi.(int64)
+	case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+		_, ok = vi.(uint64)
+	case protoreflect.FloatKind:
+		_, ok = vi.(float32)
+	case protoreflect.DoubleKind:
+		_, ok = vi.(float64)
+	case protoreflect.StringKind:
+		_, ok = vi.(string)
+	case protoreflect.BytesKind:
+		_, ok = vi.([]byte)
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		var m protoreflect.Message
+		m, ok = vi.(protoreflect.Message)
+		if ok && m.Descriptor().FullName() != fd.Message().FullName() {
+			return errors.New("%v: assigning invalid message type %v", fd.FullName(), m.Descriptor().FullName())
+		}
+		if dm, ok := vi.(*Message); ok && dm.known == nil {
+			return errors.New("%v: assigning invalid zero-value message", fd.FullName())
+		}
+	}
+	if !ok {
+		return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface())
+	}
+	return nil
+}
+
+func newListEntry(fd protoreflect.FieldDescriptor) protoreflect.Value {
+	switch fd.Kind() {
+	case protoreflect.BoolKind:
+		return protoreflect.ValueOfBool(false)
+	case protoreflect.EnumKind:
+		return protoreflect.ValueOfEnum(fd.Enum().Values().Get(0).Number())
+	case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+		return protoreflect.ValueOfInt32(0)
+	case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+		return protoreflect.ValueOfUint32(0)
+	case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+		return protoreflect.ValueOfInt64(0)
+	case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+		return protoreflect.ValueOfUint64(0)
+	case protoreflect.FloatKind:
+		return protoreflect.ValueOfFloat32(0)
+	case protoreflect.DoubleKind:
+		return protoreflect.ValueOfFloat64(0)
+	case protoreflect.StringKind:
+		return protoreflect.ValueOfString("")
+	case protoreflect.BytesKind:
+		return protoreflect.ValueOfBytes(nil)
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		return protoreflect.ValueOfMessage(NewMessage(fd.Message()).ProtoReflect())
+	}
+	panic(errors.New("%v: unknown kind %v", fd.FullName(), fd.Kind()))
+}
+
+// NewExtensionType creates a new ExtensionType with the provided descriptor.
+//
+// Dynamic ExtensionTypes with the same descriptor compare as equal. That is,
+// if xd1 == xd2, then NewExtensionType(xd1) == NewExtensionType(xd2).
+//
+// The InterfaceOf and ValueOf methods of the extension type are defined as:
+//
+//	func (xt extensionType) ValueOf(iv any) protoreflect.Value {
+//		return protoreflect.ValueOf(iv)
+//	}
+//
+//	func (xt extensionType) InterfaceOf(v protoreflect.Value) any {
+//		return v.Interface()
+//	}
+//
+// The Go type used by the proto.GetExtension and proto.SetExtension functions
+// is determined by these methods, and is therefore equivalent to the Go type
+// used to represent a protoreflect.Value. See the protoreflect.Value
+// documentation for more details.
+func NewExtensionType(desc protoreflect.ExtensionDescriptor) protoreflect.ExtensionType {
+	if xt, ok := desc.(protoreflect.ExtensionTypeDescriptor); ok {
+		desc = xt.Descriptor()
+	}
+	return extensionType{extensionTypeDescriptor{desc}}
+}
+
+func (xt extensionType) New() protoreflect.Value {
+	switch {
+	case xt.desc.IsMap():
+		return protoreflect.ValueOfMap(&dynamicMap{
+			desc: xt.desc,
+			mapv: make(map[any]protoreflect.Value),
+		})
+	case xt.desc.IsList():
+		return protoreflect.ValueOfList(&dynamicList{desc: xt.desc})
+	case xt.desc.Message() != nil:
+		return protoreflect.ValueOfMessage(NewMessage(xt.desc.Message()))
+	default:
+		return xt.desc.Default()
+	}
+}
+
+func (xt extensionType) Zero() protoreflect.Value {
+	switch {
+	case xt.desc.IsMap():
+		return protoreflect.ValueOfMap(&dynamicMap{desc: xt.desc})
+	case xt.desc.Cardinality() == protoreflect.Repeated:
+		return protoreflect.ValueOfList(emptyList{desc: xt.desc})
+	case xt.desc.Message() != nil:
+		return protoreflect.ValueOfMessage(&Message{typ: messageType{xt.desc.Message()}})
+	default:
+		return xt.desc.Default()
+	}
+}
+
+func (xt extensionType) TypeDescriptor() protoreflect.ExtensionTypeDescriptor {
+	return xt.desc
+}
+
+func (xt extensionType) ValueOf(iv any) protoreflect.Value {
+	v := protoreflect.ValueOf(iv)
+	typecheck(xt.desc, v)
+	return v
+}
+
+func (xt extensionType) InterfaceOf(v protoreflect.Value) any {
+	typecheck(xt.desc, v)
+	return v.Interface()
+}
+
+func (xt extensionType) IsValidInterface(iv any) bool {
+	return typeIsValid(xt.desc, protoreflect.ValueOf(iv)) == nil
+}
+
+func (xt extensionType) IsValidValue(v protoreflect.Value) bool {
+	return typeIsValid(xt.desc, v) == nil
+}
+
+type extensionTypeDescriptor struct {
+	protoreflect.ExtensionDescriptor
+}
+
+func (xt extensionTypeDescriptor) Type() protoreflect.ExtensionType {
+	return extensionType{xt}
+}
+
+func (xt extensionTypeDescriptor) Descriptor() protoreflect.ExtensionDescriptor {
+	return xt.ExtensionDescriptor
+}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/types/dynamicpb/types.go b/hack/tools/vendor/google.golang.org/protobuf/types/dynamicpb/types.go
new file mode 100644
index 0000000000..c432817bb9
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/protobuf/types/dynamicpb/types.go
@@ -0,0 +1,184 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dynamicpb
+
+import (
+	"fmt"
+	"strings"
+	"sync"
+	"sync/atomic"
+
+	"google.golang.org/protobuf/internal/errors"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+)
+
+type extField struct {
+	name   protoreflect.FullName
+	number protoreflect.FieldNumber
+}
+
+// A Types is a collection of dynamically constructed descriptors.
+// Its methods are safe for concurrent use.
+//
+// Types implements [protoregistry.MessageTypeResolver] and [protoregistry.ExtensionTypeResolver].
+// A Types may be used as a [google.golang.org/protobuf/proto.UnmarshalOptions.Resolver].
+type Types struct {
+	// atomicExtFiles is used with sync/atomic and hence must be the first word
+	// of the struct to guarantee 64-bit alignment.
+	//
+	// TODO(stapelberg): once we only support Go 1.19 and newer, switch this
+	// field to be of type atomic.Uint64 to guarantee alignment on
+	// stack-allocated values, too.
+	atomicExtFiles uint64
+	extMu          sync.Mutex
+
+	files *protoregistry.Files
+
+	extensionsByMessage map[extField]protoreflect.ExtensionDescriptor
+}
+
+// NewTypes creates a new Types registry with the provided files.
+// The Files registry is retained, and changes to Files will be reflected in Types.
+// It is not safe to concurrently change the Files while calling Types methods.
+func NewTypes(f *protoregistry.Files) *Types {
+	return &Types{
+		files: f,
+	}
+}
+
+// FindEnumByName looks up an enum by its full name;
+// e.g., "google.protobuf.Field.Kind".
+//
+// This returns (nil, [protoregistry.NotFound]) if not found.
+func (t *Types) FindEnumByName(name protoreflect.FullName) (protoreflect.EnumType, error) {
+	d, err := t.files.FindDescriptorByName(name)
+	if err != nil {
+		return nil, err
+	}
+	ed, ok := d.(protoreflect.EnumDescriptor)
+	if !ok {
+		return nil, errors.New("found wrong type: got %v, want enum", descName(d))
+	}
+	return NewEnumType(ed), nil
+}
+
+// FindExtensionByName looks up an extension field by the field's full name.
+// Note that this is the full name of the field as determined by
+// where the extension is declared and is unrelated to the full name of the
+// message being extended.
+//
+// This returns (nil, [protoregistry.NotFound]) if not found.
+func (t *Types) FindExtensionByName(name protoreflect.FullName) (protoreflect.ExtensionType, error) {
+	d, err := t.files.FindDescriptorByName(name)
+	if err != nil {
+		return nil, err
+	}
+	xd, ok := d.(protoreflect.ExtensionDescriptor)
+	if !ok {
+		return nil, errors.New("found wrong type: got %v, want extension", descName(d))
+	}
+	return NewExtensionType(xd), nil
+}
+
+// FindExtensionByNumber looks up an extension field by the field number
+// within some parent message, identified by full name.
+//
+// This returns (nil, [protoregistry.NotFound]) if not found.
+func (t *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+	// Construct the extension number map lazily, since not every user will need it.
+	// Update the map if new files are added to the registry.
+	if atomic.LoadUint64(&t.atomicExtFiles) != uint64(t.files.NumFiles()) {
+		t.updateExtensions()
+	}
+	xd := t.extensionsByMessage[extField{message, field}]
+	if xd == nil {
+		return nil, protoregistry.NotFound
+	}
+	return NewExtensionType(xd), nil
+}
+
+// FindMessageByName looks up a message by its full name;
+// e.g. "google.protobuf.Any".
+//
+// This returns (nil, [protoregistry.NotFound]) if not found.
+func (t *Types) FindMessageByName(name protoreflect.FullName) (protoreflect.MessageType, error) {
+	d, err := t.files.FindDescriptorByName(name)
+	if err != nil {
+		return nil, err
+	}
+	md, ok := d.(protoreflect.MessageDescriptor)
+	if !ok {
+		return nil, errors.New("found wrong type: got %v, want message", descName(d))
+	}
+	return NewMessageType(md), nil
+}
+
+// FindMessageByURL looks up a message by a URL identifier.
+// See documentation on google.protobuf.Any.type_url for the URL format.
+//
+// This returns (nil, [protoregistry.NotFound]) if not found.
+func (t *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) {
+	// This function is similar to FindMessageByName but
+	// truncates anything before and including '/' in the URL.
+	message := protoreflect.FullName(url)
+	if i := strings.LastIndexByte(url, '/'); i >= 0 {
+		message = message[i+len("/"):]
+	}
+	return t.FindMessageByName(message)
+}
+
+func (t *Types) updateExtensions() {
+	t.extMu.Lock()
+	defer t.extMu.Unlock()
+	if atomic.LoadUint64(&t.atomicExtFiles) == uint64(t.files.NumFiles()) {
+		return
+	}
+	defer atomic.StoreUint64(&t.atomicExtFiles, uint64(t.files.NumFiles()))
+	t.files.RangeFiles(func(fd protoreflect.FileDescriptor) bool {
+		t.registerExtensions(fd.Extensions())
+		t.registerExtensionsInMessages(fd.Messages())
+		return true
+	})
+}
+
+func (t *Types) registerExtensionsInMessages(mds protoreflect.MessageDescriptors) {
+	count := mds.Len()
+	for i := 0; i < count; i++ {
+		md := mds.Get(i)
+		t.registerExtensions(md.Extensions())
+		t.registerExtensionsInMessages(md.Messages())
+	}
+}
+
+func (t *Types) registerExtensions(xds protoreflect.ExtensionDescriptors) {
+	count := xds.Len()
+	for i := 0; i < count; i++ {
+		xd := xds.Get(i)
+		field := xd.Number()
+		message := xd.ContainingMessage().FullName()
+		if t.extensionsByMessage == nil {
+			t.extensionsByMessage = make(map[extField]protoreflect.ExtensionDescriptor)
+		}
+		t.extensionsByMessage[extField{message, field}] = xd
+	}
+}
+
+func descName(d protoreflect.Descriptor) string {
+	switch d.(type) {
+	case protoreflect.EnumDescriptor:
+		return "enum"
+	case protoreflect.EnumValueDescriptor:
+		return "enum value"
+	case protoreflect.MessageDescriptor:
+		return "message"
+	case protoreflect.ExtensionDescriptor:
+		return "extension"
+	case protoreflect.ServiceDescriptor:
+		return "service"
+	default:
+		return fmt.Sprintf("%T", d)
+	}
+}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/hack/tools/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
index a2ca940c50..c7e860fcd6 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
@@ -29,11 +29,9 @@ type GoFeatures struct {
 
 func (x *GoFeatures) Reset() {
 	*x = GoFeatures{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_go_features_proto_msgTypes[0]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_go_features_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *GoFeatures) String() string {
@@ -44,7 +42,7 @@ func (*GoFeatures) ProtoMessage() {}
 
 func (x *GoFeatures) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_go_features_proto_msgTypes[0]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -145,20 +143,6 @@ func file_google_protobuf_go_features_proto_init() {
 	if File_google_protobuf_go_features_proto != nil {
 		return
 	}
-	if !protoimpl.UnsafeEnabled {
-		file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any {
-			switch v := v.(*GoFeatures); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-	}
 	type x struct{}
 	out := protoimpl.TypeBuilder{
 		File: protoimpl.DescBuilder{
diff --git a/hack/tools/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/hack/tools/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 7172b43d38..87da199a38 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -368,11 +368,9 @@ func (x *Any) UnmarshalNew() (proto.Message, error) {
 
 func (x *Any) Reset() {
 	*x = Any{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_any_proto_msgTypes[0]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_any_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Any) String() string {
@@ -383,7 +381,7 @@ func (*Any) ProtoMessage() {}
 
 func (x *Any) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_any_proto_msgTypes[0]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -461,20 +459,6 @@ func file_google_protobuf_any_proto_init() {
 	if File_google_protobuf_any_proto != nil {
 		return
 	}
-	if !protoimpl.UnsafeEnabled {
-		file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any {
-			switch v := v.(*Any); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-	}
 	type x struct{}
 	out := protoimpl.TypeBuilder{
 		File: protoimpl.DescBuilder{
diff --git a/hack/tools/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/hack/tools/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
index 1b71bcd910..b99d4d2410 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
@@ -245,11 +245,9 @@ func (x *Duration) check() uint {
 
 func (x *Duration) Reset() {
 	*x = Duration{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_duration_proto_msgTypes[0]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_duration_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Duration) String() string {
@@ -260,7 +258,7 @@ func (*Duration) ProtoMessage() {}
 
 func (x *Duration) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_duration_proto_msgTypes[0]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -339,20 +337,6 @@ func file_google_protobuf_duration_proto_init() {
 	if File_google_protobuf_duration_proto != nil {
 		return
 	}
-	if !protoimpl.UnsafeEnabled {
-		file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any {
-			switch v := v.(*Duration); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-	}
 	type x struct{}
 	out := protoimpl.TypeBuilder{
 		File: protoimpl.DescBuilder{
diff --git a/hack/tools/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/hack/tools/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
new file mode 100644
index 0000000000..1761bc9c69
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
@@ -0,0 +1,150 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/empty.proto
+
+package emptypb
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+// A generic empty message that you can re-use to avoid defining duplicated
+// empty messages in your APIs. A typical example is to use it as the request
+// or the response type of an API method. For instance:
+//
+//	service Foo {
+//	  rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+//	}
+type Empty struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+}
+
+func (x *Empty) Reset() {
+	*x = Empty{}
+	mi := &file_google_protobuf_empty_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Empty) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Empty) ProtoMessage() {}
+
+func (x *Empty) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_empty_proto_msgTypes[0]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Empty.ProtoReflect.Descriptor instead.
+func (*Empty) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_empty_proto_rawDescGZIP(), []int{0}
+}
+
+var File_google_protobuf_empty_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_empty_proto_rawDesc = []byte{
+	0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07,
+	0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x7d, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a,
+	0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b,
+	0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2,
+	0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50,
+	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77,
+	0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_protobuf_empty_proto_rawDescOnce sync.Once
+	file_google_protobuf_empty_proto_rawDescData = file_google_protobuf_empty_proto_rawDesc
+)
+
+func file_google_protobuf_empty_proto_rawDescGZIP() []byte {
+	file_google_protobuf_empty_proto_rawDescOnce.Do(func() {
+		file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_empty_proto_rawDescData)
+	})
+	return file_google_protobuf_empty_proto_rawDescData
+}
+
+var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_protobuf_empty_proto_goTypes = []any{
+	(*Empty)(nil), // 0: google.protobuf.Empty
+}
+var file_google_protobuf_empty_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_empty_proto_init() }
+func file_google_protobuf_empty_proto_init() {
+	if File_google_protobuf_empty_proto != nil {
+		return
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_protobuf_empty_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   1,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_protobuf_empty_proto_goTypes,
+		DependencyIndexes: file_google_protobuf_empty_proto_depIdxs,
+		MessageInfos:      file_google_protobuf_empty_proto_msgTypes,
+	}.Build()
+	File_google_protobuf_empty_proto = out.File
+	file_google_protobuf_empty_proto_rawDesc = nil
+	file_google_protobuf_empty_proto_goTypes = nil
+	file_google_protobuf_empty_proto_depIdxs = nil
+}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/hack/tools/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
new file mode 100644
index 0000000000..19de8d371f
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
@@ -0,0 +1,572 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/field_mask.proto
+
+// Package fieldmaskpb contains generated types for google/protobuf/field_mask.proto.
+//
+// The FieldMask message represents a set of symbolic field paths.
+// The paths are specific to some target message type,
+// which is not stored within the FieldMask message itself.
+//
+// # Constructing a FieldMask
+//
+// The New function is used construct a FieldMask:
+//
+//	var messageType *descriptorpb.DescriptorProto
+//	fm, err := fieldmaskpb.New(messageType, "field.name", "field.number")
+//	if err != nil {
+//		... // handle error
+//	}
+//	... // make use of fm
+//
+// The "field.name" and "field.number" paths are valid paths according to the
+// google.protobuf.DescriptorProto message. Use of a path that does not correlate
+// to valid fields reachable from DescriptorProto would result in an error.
+//
+// Once a FieldMask message has been constructed,
+// the Append method can be used to insert additional paths to the path set:
+//
+//	var messageType *descriptorpb.DescriptorProto
+//	if err := fm.Append(messageType, "options"); err != nil {
+//		... // handle error
+//	}
+//
+// # Type checking a FieldMask
+//
+// In order to verify that a FieldMask represents a set of fields that are
+// reachable from some target message type, use the IsValid method:
+//
+//	var messageType *descriptorpb.DescriptorProto
+//	if fm.IsValid(messageType) {
+//		... // make use of fm
+//	}
+//
+// IsValid needs to be passed the target message type as an input since the
+// FieldMask message itself does not store the message type that the set of paths
+// are for.
+package fieldmaskpb
+
+import (
+	proto "google.golang.org/protobuf/proto"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sort "sort"
+	strings "strings"
+	sync "sync"
+)
+
+// `FieldMask` represents a set of symbolic field paths, for example:
+//
+//	paths: "f.a"
+//	paths: "f.b.d"
+//
+// Here `f` represents a field in some root message, `a` and `b`
+// fields in the message found in `f`, and `d` a field found in the
+// message in `f.b`.
+//
+// Field masks are used to specify a subset of fields that should be
+// returned by a get operation or modified by an update operation.
+// Field masks also have a custom JSON encoding (see below).
+//
+// # Field Masks in Projections
+//
+// When used in the context of a projection, a response message or
+// sub-message is filtered by the API to only contain those fields as
+// specified in the mask. For example, if the mask in the previous
+// example is applied to a response message as follows:
+//
+//	f {
+//	  a : 22
+//	  b {
+//	    d : 1
+//	    x : 2
+//	  }
+//	  y : 13
+//	}
+//	z: 8
+//
+// The result will not contain specific values for fields x,y and z
+// (their value will be set to the default, and omitted in proto text
+// output):
+//
+//	f {
+//	  a : 22
+//	  b {
+//	    d : 1
+//	  }
+//	}
+//
+// A repeated field is not allowed except at the last position of a
+// paths string.
+//
+// If a FieldMask object is not present in a get operation, the
+// operation applies to all fields (as if a FieldMask of all fields
+// had been specified).
+//
+// Note that a field mask does not necessarily apply to the
+// top-level response message. In case of a REST get operation, the
+// field mask applies directly to the response, but in case of a REST
+// list operation, the mask instead applies to each individual message
+// in the returned resource list. In case of a REST custom method,
+// other definitions may be used. Where the mask applies will be
+// clearly documented together with its declaration in the API.  In
+// any case, the effect on the returned resource/resources is required
+// behavior for APIs.
+//
+// # Field Masks in Update Operations
+//
+// A field mask in update operations specifies which fields of the
+// targeted resource are going to be updated. The API is required
+// to only change the values of the fields as specified in the mask
+// and leave the others untouched. If a resource is passed in to
+// describe the updated values, the API ignores the values of all
+// fields not covered by the mask.
+//
+// If a repeated field is specified for an update operation, new values will
+// be appended to the existing repeated field in the target resource. Note that
+// a repeated field is only allowed in the last position of a `paths` string.
+//
+// If a sub-message is specified in the last position of the field mask for an
+// update operation, then new value will be merged into the existing sub-message
+// in the target resource.
+//
+// For example, given the target message:
+//
+//	f {
+//	  b {
+//	    d: 1
+//	    x: 2
+//	  }
+//	  c: [1]
+//	}
+//
+// And an update message:
+//
+//	f {
+//	  b {
+//	    d: 10
+//	  }
+//	  c: [2]
+//	}
+//
+// then if the field mask is:
+//
+//	paths: ["f.b", "f.c"]
+//
+// then the result will be:
+//
+//	f {
+//	  b {
+//	    d: 10
+//	    x: 2
+//	  }
+//	  c: [1, 2]
+//	}
+//
+// An implementation may provide options to override this default behavior for
+// repeated and message fields.
+//
+// In order to reset a field's value to the default, the field must
+// be in the mask and set to the default value in the provided resource.
+// Hence, in order to reset all fields of a resource, provide a default
+// instance of the resource and set all fields in the mask, or do
+// not provide a mask as described below.
+//
+// If a field mask is not present on update, the operation applies to
+// all fields (as if a field mask of all fields has been specified).
+// Note that in the presence of schema evolution, this may mean that
+// fields the client does not know and has therefore not filled into
+// the request will be reset to their default. If this is unwanted
+// behavior, a specific service may require a client to always specify
+// a field mask, producing an error if not.
+//
+// As with get operations, the location of the resource which
+// describes the updated values in the request message depends on the
+// operation kind. In any case, the effect of the field mask is
+// required to be honored by the API.
+//
+// ## Considerations for HTTP REST
+//
+// The HTTP kind of an update operation which uses a field mask must
+// be set to PATCH instead of PUT in order to satisfy HTTP semantics
+// (PUT must only be used for full updates).
+//
+// # JSON Encoding of Field Masks
+//
+// In JSON, a field mask is encoded as a single string where paths are
+// separated by a comma. Fields name in each path are converted
+// to/from lower-camel naming conventions.
+//
+// As an example, consider the following message declarations:
+//
+//	message Profile {
+//	  User user = 1;
+//	  Photo photo = 2;
+//	}
+//	message User {
+//	  string display_name = 1;
+//	  string address = 2;
+//	}
+//
+// In proto a field mask for `Profile` may look as such:
+//
+//	mask {
+//	  paths: "user.display_name"
+//	  paths: "photo"
+//	}
+//
+// In JSON, the same mask is represented as below:
+//
+//	{
+//	  mask: "user.displayName,photo"
+//	}
+//
+// # Field Masks and Oneof Fields
+//
+// Field masks treat fields in oneofs just as regular fields. Consider the
+// following message:
+//
+//	message SampleMessage {
+//	  oneof test_oneof {
+//	    string name = 4;
+//	    SubMessage sub_message = 9;
+//	  }
+//	}
+//
+// The field mask can be:
+//
+//	mask {
+//	  paths: "name"
+//	}
+//
+// Or:
+//
+//	mask {
+//	  paths: "sub_message"
+//	}
+//
+// Note that oneof type names ("test_oneof" in this case) cannot be used in
+// paths.
+//
+// ## Field Mask Verification
+//
+// The implementation of any API method which has a FieldMask type field in the
+// request should verify the included field paths, and return an
+// `INVALID_ARGUMENT` error if any path is unmappable.
+type FieldMask struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The set of field mask paths.
+	Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
+}
+
+// New constructs a field mask from a list of paths and verifies that
+// each one is valid according to the specified message type.
+func New(m proto.Message, paths ...string) (*FieldMask, error) {
+	x := new(FieldMask)
+	return x, x.Append(m, paths...)
+}
+
+// Union returns the union of all the paths in the input field masks.
+func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {
+	var out []string
+	out = append(out, mx.GetPaths()...)
+	out = append(out, my.GetPaths()...)
+	for _, m := range ms {
+		out = append(out, m.GetPaths()...)
+	}
+	return &FieldMask{Paths: normalizePaths(out)}
+}
+
+// Intersect returns the intersection of all the paths in the input field masks.
+func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {
+	var ss1, ss2 []string // reused buffers for performance
+	intersect := func(out, in []string) []string {
+		ss1 = normalizePaths(append(ss1[:0], in...))
+		ss2 = normalizePaths(append(ss2[:0], out...))
+		out = out[:0]
+		for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); {
+			switch s1, s2 := ss1[i1], ss2[i2]; {
+			case hasPathPrefix(s1, s2):
+				out = append(out, s1)
+				i1++
+			case hasPathPrefix(s2, s1):
+				out = append(out, s2)
+				i2++
+			case lessPath(s1, s2):
+				i1++
+			case lessPath(s2, s1):
+				i2++
+			}
+		}
+		return out
+	}
+
+	out := Union(mx, my, ms...).GetPaths()
+	out = intersect(out, mx.GetPaths())
+	out = intersect(out, my.GetPaths())
+	for _, m := range ms {
+		out = intersect(out, m.GetPaths())
+	}
+	return &FieldMask{Paths: normalizePaths(out)}
+}
+
+// IsValid reports whether all the paths are syntactically valid and
+// refer to known fields in the specified message type.
+// It reports false for a nil FieldMask.
+func (x *FieldMask) IsValid(m proto.Message) bool {
+	paths := x.GetPaths()
+	return x != nil && numValidPaths(m, paths) == len(paths)
+}
+
+// Append appends a list of paths to the mask and verifies that each one
+// is valid according to the specified message type.
+// An invalid path is not appended and breaks insertion of subsequent paths.
+func (x *FieldMask) Append(m proto.Message, paths ...string) error {
+	numValid := numValidPaths(m, paths)
+	x.Paths = append(x.Paths, paths[:numValid]...)
+	paths = paths[numValid:]
+	if len(paths) > 0 {
+		name := m.ProtoReflect().Descriptor().FullName()
+		return protoimpl.X.NewError("invalid path %q for message %q", paths[0], name)
+	}
+	return nil
+}
+
+func numValidPaths(m proto.Message, paths []string) int {
+	md0 := m.ProtoReflect().Descriptor()
+	for i, path := range paths {
+		md := md0
+		if !rangeFields(path, func(field string) bool {
+			// Search the field within the message.
+			if md == nil {
+				return false // not within a message
+			}
+			fd := md.Fields().ByName(protoreflect.Name(field))
+			// The real field name of a group is the message name.
+			if fd == nil {
+				gd := md.Fields().ByName(protoreflect.Name(strings.ToLower(field)))
+				if gd != nil && gd.Kind() == protoreflect.GroupKind && string(gd.Message().Name()) == field {
+					fd = gd
+				}
+			} else if fd.Kind() == protoreflect.GroupKind && string(fd.Message().Name()) != field {
+				fd = nil
+			}
+			if fd == nil {
+				return false // message has does not have this field
+			}
+
+			// Identify the next message to search within.
+			md = fd.Message() // may be nil
+
+			// Repeated fields are only allowed at the last position.
+			if fd.IsList() || fd.IsMap() {
+				md = nil
+			}
+
+			return true
+		}) {
+			return i
+		}
+	}
+	return len(paths)
+}
+
+// Normalize converts the mask to its canonical form where all paths are sorted
+// and redundant paths are removed.
+func (x *FieldMask) Normalize() {
+	x.Paths = normalizePaths(x.Paths)
+}
+
+func normalizePaths(paths []string) []string {
+	sort.Slice(paths, func(i, j int) bool {
+		return lessPath(paths[i], paths[j])
+	})
+
+	// Elide any path that is a prefix match on the previous.
+	out := paths[:0]
+	for _, path := range paths {
+		if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) {
+			continue
+		}
+		out = append(out, path)
+	}
+	return out
+}
+
+// hasPathPrefix is like strings.HasPrefix, but further checks for either
+// an exact matche or that the prefix is delimited by a dot.
+func hasPathPrefix(path, prefix string) bool {
+	return strings.HasPrefix(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.')
+}
+
+// lessPath is a lexicographical comparison where dot is specially treated
+// as the smallest symbol.
+func lessPath(x, y string) bool {
+	for i := 0; i < len(x) && i < len(y); i++ {
+		if x[i] != y[i] {
+			return (x[i] - '.') < (y[i] - '.')
+		}
+	}
+	return len(x) < len(y)
+}
+
+// rangeFields is like strings.Split(path, "."), but avoids allocations by
+// iterating over each field in place and calling a iterator function.
+func rangeFields(path string, f func(field string) bool) bool {
+	for {
+		var field string
+		if i := strings.IndexByte(path, '.'); i >= 0 {
+			field, path = path[:i], path[i:]
+		} else {
+			field, path = path, ""
+		}
+
+		if !f(field) {
+			return false
+		}
+
+		if len(path) == 0 {
+			return true
+		}
+		path = strings.TrimPrefix(path, ".")
+	}
+}
+
+func (x *FieldMask) Reset() {
+	*x = FieldMask{}
+	mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *FieldMask) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldMask) ProtoMessage() {}
+
+func (x *FieldMask) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldMask.ProtoReflect.Descriptor instead.
+func (*FieldMask) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_field_mask_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *FieldMask) GetPaths() []string {
+	if x != nil {
+		return x.Paths
+	}
+	return nil
+}
+
+var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_field_mask_proto_rawDesc = []byte{
+	0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b,
+	0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
+	0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e,
+	0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+	0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
+	0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70,
+	0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61,
+	0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e,
+	0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_protobuf_field_mask_proto_rawDescOnce sync.Once
+	file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc
+)
+
+func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte {
+	file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() {
+		file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData)
+	})
+	return file_google_protobuf_field_mask_proto_rawDescData
+}
+
+var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_protobuf_field_mask_proto_goTypes = []any{
+	(*FieldMask)(nil), // 0: google.protobuf.FieldMask
+}
+var file_google_protobuf_field_mask_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_field_mask_proto_init() }
+func file_google_protobuf_field_mask_proto_init() {
+	if File_google_protobuf_field_mask_proto != nil {
+		return
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   1,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_protobuf_field_mask_proto_goTypes,
+		DependencyIndexes: file_google_protobuf_field_mask_proto_depIdxs,
+		MessageInfos:      file_google_protobuf_field_mask_proto_msgTypes,
+	}.Build()
+	File_google_protobuf_field_mask_proto = out.File
+	file_google_protobuf_field_mask_proto_rawDesc = nil
+	file_google_protobuf_field_mask_proto_goTypes = nil
+	file_google_protobuf_field_mask_proto_depIdxs = nil
+}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/hack/tools/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
new file mode 100644
index 0000000000..8f206a6611
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
@@ -0,0 +1,782 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/struct.proto
+
+// Package structpb contains generated types for google/protobuf/struct.proto.
+//
+// The messages (i.e., Value, Struct, and ListValue) defined in struct.proto are
+// used to represent arbitrary JSON. The Value message represents a JSON value,
+// the Struct message represents a JSON object, and the ListValue message
+// represents a JSON array. See https://json.org for more information.
+//
+// The Value, Struct, and ListValue types have generated MarshalJSON and
+// UnmarshalJSON methods such that they serialize JSON equivalent to what the
+// messages themselves represent. Use of these types with the
+// "google.golang.org/protobuf/encoding/protojson" package
+// ensures that they will be serialized as their JSON equivalent.
+//
+// # Conversion to and from a Go interface
+//
+// The standard Go "encoding/json" package has functionality to serialize
+// arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and
+// ListValue.AsSlice methods can convert the protobuf message representation into
+// a form represented by any, map[string]any, and []any.
+// This form can be used with other packages that operate on such data structures
+// and also directly with the standard json package.
+//
+// In order to convert the any, map[string]any, and []any
+// forms back as Value, Struct, and ListValue messages, use the NewStruct,
+// NewList, and NewValue constructor functions.
+//
+// # Example usage
+//
+// Consider the following example JSON object:
+//
+//	{
+//		"firstName": "John",
+//		"lastName": "Smith",
+//		"isAlive": true,
+//		"age": 27,
+//		"address": {
+//			"streetAddress": "21 2nd Street",
+//			"city": "New York",
+//			"state": "NY",
+//			"postalCode": "10021-3100"
+//		},
+//		"phoneNumbers": [
+//			{
+//				"type": "home",
+//				"number": "212 555-1234"
+//			},
+//			{
+//				"type": "office",
+//				"number": "646 555-4567"
+//			}
+//		],
+//		"children": [],
+//		"spouse": null
+//	}
+//
+// To construct a Value message representing the above JSON object:
+//
+//	m, err := structpb.NewValue(map[string]any{
+//		"firstName": "John",
+//		"lastName":  "Smith",
+//		"isAlive":   true,
+//		"age":       27,
+//		"address": map[string]any{
+//			"streetAddress": "21 2nd Street",
+//			"city":          "New York",
+//			"state":         "NY",
+//			"postalCode":    "10021-3100",
+//		},
+//		"phoneNumbers": []any{
+//			map[string]any{
+//				"type":   "home",
+//				"number": "212 555-1234",
+//			},
+//			map[string]any{
+//				"type":   "office",
+//				"number": "646 555-4567",
+//			},
+//		},
+//		"children": []any{},
+//		"spouse":   nil,
+//	})
+//	if err != nil {
+//		... // handle error
+//	}
+//	... // make use of m as a *structpb.Value
+package structpb
+
+import (
+	base64 "encoding/base64"
+	json "encoding/json"
+	protojson "google.golang.org/protobuf/encoding/protojson"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	math "math"
+	reflect "reflect"
+	sync "sync"
+	utf8 "unicode/utf8"
+)
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+// The JSON representation for `NullValue` is JSON `null`.
+type NullValue int32
+
+const (
+	// Null value.
+	NullValue_NULL_VALUE NullValue = 0
+)
+
+// Enum value maps for NullValue.
+var (
+	NullValue_name = map[int32]string{
+		0: "NULL_VALUE",
+	}
+	NullValue_value = map[string]int32{
+		"NULL_VALUE": 0,
+	}
+)
+
+func (x NullValue) Enum() *NullValue {
+	p := new(NullValue)
+	*p = x
+	return p
+}
+
+func (x NullValue) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (NullValue) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_struct_proto_enumTypes[0].Descriptor()
+}
+
+func (NullValue) Type() protoreflect.EnumType {
+	return &file_google_protobuf_struct_proto_enumTypes[0]
+}
+
+func (x NullValue) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use NullValue.Descriptor instead.
+func (NullValue) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0}
+}
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+type Struct struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Unordered map of dynamically typed values.
+	Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+// NewStruct constructs a Struct from a general-purpose Go map.
+// The map keys must be valid UTF-8.
+// The map values are converted using NewValue.
+func NewStruct(v map[string]any) (*Struct, error) {
+	x := &Struct{Fields: make(map[string]*Value, len(v))}
+	for k, v := range v {
+		if !utf8.ValidString(k) {
+			return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", k)
+		}
+		var err error
+		x.Fields[k], err = NewValue(v)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return x, nil
+}
+
+// AsMap converts x to a general-purpose Go map.
+// The map values are converted by calling Value.AsInterface.
+func (x *Struct) AsMap() map[string]any {
+	f := x.GetFields()
+	vs := make(map[string]any, len(f))
+	for k, v := range f {
+		vs[k] = v.AsInterface()
+	}
+	return vs
+}
+
+func (x *Struct) MarshalJSON() ([]byte, error) {
+	return protojson.Marshal(x)
+}
+
+func (x *Struct) UnmarshalJSON(b []byte) error {
+	return protojson.Unmarshal(b, x)
+}
+
+func (x *Struct) Reset() {
+	*x = Struct{}
+	mi := &file_google_protobuf_struct_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Struct) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Struct) ProtoMessage() {}
+
+func (x *Struct) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_struct_proto_msgTypes[0]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Struct.ProtoReflect.Descriptor instead.
+func (*Struct) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Struct) GetFields() map[string]*Value {
+	if x != nil {
+		return x.Fields
+	}
+	return nil
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of these
+// variants. Absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+type Value struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The kind of value.
+	//
+	// Types that are assignable to Kind:
+	//
+	//	*Value_NullValue
+	//	*Value_NumberValue
+	//	*Value_StringValue
+	//	*Value_BoolValue
+	//	*Value_StructValue
+	//	*Value_ListValue
+	Kind isValue_Kind `protobuf_oneof:"kind"`
+}
+
+// NewValue constructs a Value from a general-purpose Go interface.
+//
+//	╔═══════════════════════════════════════╤════════════════════════════════════════════╗
+//	║ Go type                               │ Conversion                                 ║
+//	╠═══════════════════════════════════════╪════════════════════════════════════════════╣
+//	║ nil                                   │ stored as NullValue                        ║
+//	║ bool                                  │ stored as BoolValue                        ║
+//	║ int, int8, int16, int32, int64        │ stored as NumberValue                      ║
+//	║ uint, uint8, uint16, uint32, uint64   │ stored as NumberValue                      ║
+//	║ float32, float64                      │ stored as NumberValue                      ║
+//	║ json.Number                           │ stored as NumberValue                      ║
+//	║ string                                │ stored as StringValue; must be valid UTF-8 ║
+//	║ []byte                                │ stored as StringValue; base64-encoded      ║
+//	║ map[string]any                        │ stored as StructValue                      ║
+//	║ []any                                 │ stored as ListValue                        ║
+//	╚═══════════════════════════════════════╧════════════════════════════════════════════╝
+//
+// When converting an int64 or uint64 to a NumberValue, numeric precision loss
+// is possible since they are stored as a float64.
+func NewValue(v any) (*Value, error) {
+	switch v := v.(type) {
+	case nil:
+		return NewNullValue(), nil
+	case bool:
+		return NewBoolValue(v), nil
+	case int:
+		return NewNumberValue(float64(v)), nil
+	case int8:
+		return NewNumberValue(float64(v)), nil
+	case int16:
+		return NewNumberValue(float64(v)), nil
+	case int32:
+		return NewNumberValue(float64(v)), nil
+	case int64:
+		return NewNumberValue(float64(v)), nil
+	case uint:
+		return NewNumberValue(float64(v)), nil
+	case uint8:
+		return NewNumberValue(float64(v)), nil
+	case uint16:
+		return NewNumberValue(float64(v)), nil
+	case uint32:
+		return NewNumberValue(float64(v)), nil
+	case uint64:
+		return NewNumberValue(float64(v)), nil
+	case float32:
+		return NewNumberValue(float64(v)), nil
+	case float64:
+		return NewNumberValue(float64(v)), nil
+	case json.Number:
+		n, err := v.Float64()
+		if err != nil {
+			return nil, protoimpl.X.NewError("invalid number format %q, expected a float64: %v", v, err)
+		}
+		return NewNumberValue(n), nil
+	case string:
+		if !utf8.ValidString(v) {
+			return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v)
+		}
+		return NewStringValue(v), nil
+	case []byte:
+		s := base64.StdEncoding.EncodeToString(v)
+		return NewStringValue(s), nil
+	case map[string]any:
+		v2, err := NewStruct(v)
+		if err != nil {
+			return nil, err
+		}
+		return NewStructValue(v2), nil
+	case []any:
+		v2, err := NewList(v)
+		if err != nil {
+			return nil, err
+		}
+		return NewListValue(v2), nil
+	default:
+		return nil, protoimpl.X.NewError("invalid type: %T", v)
+	}
+}
+
+// NewNullValue constructs a new null Value.
+func NewNullValue() *Value {
+	return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}}
+}
+
+// NewBoolValue constructs a new boolean Value.
+func NewBoolValue(v bool) *Value {
+	return &Value{Kind: &Value_BoolValue{BoolValue: v}}
+}
+
+// NewNumberValue constructs a new number Value.
+func NewNumberValue(v float64) *Value {
+	return &Value{Kind: &Value_NumberValue{NumberValue: v}}
+}
+
+// NewStringValue constructs a new string Value.
+func NewStringValue(v string) *Value {
+	return &Value{Kind: &Value_StringValue{StringValue: v}}
+}
+
+// NewStructValue constructs a new struct Value.
+func NewStructValue(v *Struct) *Value {
+	return &Value{Kind: &Value_StructValue{StructValue: v}}
+}
+
+// NewListValue constructs a new list Value.
+func NewListValue(v *ListValue) *Value {
+	return &Value{Kind: &Value_ListValue{ListValue: v}}
+}
+
+// AsInterface converts x to a general-purpose Go interface.
+//
+// Calling Value.MarshalJSON and "encoding/json".Marshal on this output produce
+// semantically equivalent JSON (assuming no errors occur).
+//
+// Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are
+// converted as strings to remain compatible with MarshalJSON.
+func (x *Value) AsInterface() any {
+	switch v := x.GetKind().(type) {
+	case *Value_NumberValue:
+		if v != nil {
+			switch {
+			case math.IsNaN(v.NumberValue):
+				return "NaN"
+			case math.IsInf(v.NumberValue, +1):
+				return "Infinity"
+			case math.IsInf(v.NumberValue, -1):
+				return "-Infinity"
+			default:
+				return v.NumberValue
+			}
+		}
+	case *Value_StringValue:
+		if v != nil {
+			return v.StringValue
+		}
+	case *Value_BoolValue:
+		if v != nil {
+			return v.BoolValue
+		}
+	case *Value_StructValue:
+		if v != nil {
+			return v.StructValue.AsMap()
+		}
+	case *Value_ListValue:
+		if v != nil {
+			return v.ListValue.AsSlice()
+		}
+	}
+	return nil
+}
+
+func (x *Value) MarshalJSON() ([]byte, error) {
+	return protojson.Marshal(x)
+}
+
+func (x *Value) UnmarshalJSON(b []byte) error {
+	return protojson.Unmarshal(b, x)
+}
+
+func (x *Value) Reset() {
+	*x = Value{}
+	mi := &file_google_protobuf_struct_proto_msgTypes[1]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Value) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Value) ProtoMessage() {}
+
+func (x *Value) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_struct_proto_msgTypes[1]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Value.ProtoReflect.Descriptor instead.
+func (*Value) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *Value) GetKind() isValue_Kind {
+	if m != nil {
+		return m.Kind
+	}
+	return nil
+}
+
+func (x *Value) GetNullValue() NullValue {
+	if x, ok := x.GetKind().(*Value_NullValue); ok {
+		return x.NullValue
+	}
+	return NullValue_NULL_VALUE
+}
+
+func (x *Value) GetNumberValue() float64 {
+	if x, ok := x.GetKind().(*Value_NumberValue); ok {
+		return x.NumberValue
+	}
+	return 0
+}
+
+func (x *Value) GetStringValue() string {
+	if x, ok := x.GetKind().(*Value_StringValue); ok {
+		return x.StringValue
+	}
+	return ""
+}
+
+func (x *Value) GetBoolValue() bool {
+	if x, ok := x.GetKind().(*Value_BoolValue); ok {
+		return x.BoolValue
+	}
+	return false
+}
+
+func (x *Value) GetStructValue() *Struct {
+	if x, ok := x.GetKind().(*Value_StructValue); ok {
+		return x.StructValue
+	}
+	return nil
+}
+
+func (x *Value) GetListValue() *ListValue {
+	if x, ok := x.GetKind().(*Value_ListValue); ok {
+		return x.ListValue
+	}
+	return nil
+}
+
+type isValue_Kind interface {
+	isValue_Kind()
+}
+
+type Value_NullValue struct {
+	// Represents a null value.
+	NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Value_NumberValue struct {
+	// Represents a double value.
+	NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"`
+}
+
+type Value_StringValue struct {
+	// Represents a string value.
+	StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Value_BoolValue struct {
+	// Represents a boolean value.
+	BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Value_StructValue struct {
+	// Represents a structured value.
+	StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"`
+}
+
+type Value_ListValue struct {
+	// Represents a repeated `Value`.
+	ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+
+func (*Value_NumberValue) isValue_Kind() {}
+
+func (*Value_StringValue) isValue_Kind() {}
+
+func (*Value_BoolValue) isValue_Kind() {}
+
+func (*Value_StructValue) isValue_Kind() {}
+
+func (*Value_ListValue) isValue_Kind() {}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+type ListValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Repeated field of dynamically typed values.
+	Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+// NewList constructs a ListValue from a general-purpose Go slice.
+// The slice elements are converted using NewValue.
+func NewList(v []any) (*ListValue, error) {
+	x := &ListValue{Values: make([]*Value, len(v))}
+	for i, v := range v {
+		var err error
+		x.Values[i], err = NewValue(v)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return x, nil
+}
+
+// AsSlice converts x to a general-purpose Go slice.
+// The slice elements are converted by calling Value.AsInterface.
+func (x *ListValue) AsSlice() []any {
+	vals := x.GetValues()
+	vs := make([]any, len(vals))
+	for i, v := range vals {
+		vs[i] = v.AsInterface()
+	}
+	return vs
+}
+
+func (x *ListValue) MarshalJSON() ([]byte, error) {
+	return protojson.Marshal(x)
+}
+
+func (x *ListValue) UnmarshalJSON(b []byte) error {
+	return protojson.Unmarshal(b, x)
+}
+
+func (x *ListValue) Reset() {
+	*x = ListValue{}
+	mi := &file_google_protobuf_struct_proto_msgTypes[2]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *ListValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListValue) ProtoMessage() {}
+
+func (x *ListValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_struct_proto_msgTypes[2]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListValue.ProtoReflect.Descriptor instead.
+func (*ListValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_struct_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListValue) GetValues() []*Value {
+	if x != nil {
+		return x.Values
+	}
+	return nil
+}
+
+var File_google_protobuf_struct_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_struct_proto_rawDesc = []byte{
+	0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22,
+	0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69,
+	0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72,
+	0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+	0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64,
+	0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56,
+	0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c,
+	0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56,
+	0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65,
+	0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67,
+	0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b,
+	0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62,
+	0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48,
+	0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c,
+	0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73,
+	0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69,
+	0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69,
+	0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22,
+	0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56,
+	0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09,
+	0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c,
+	0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x7f, 0x0a, 0x13, 0x63, 0x6f, 0x6d,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
+	0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f,
+	0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65,
+	0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62,
+	0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c,
+	0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x33,
+}
+
+var (
+	file_google_protobuf_struct_proto_rawDescOnce sync.Once
+	file_google_protobuf_struct_proto_rawDescData = file_google_protobuf_struct_proto_rawDesc
+)
+
+func file_google_protobuf_struct_proto_rawDescGZIP() []byte {
+	file_google_protobuf_struct_proto_rawDescOnce.Do(func() {
+		file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_struct_proto_rawDescData)
+	})
+	return file_google_protobuf_struct_proto_rawDescData
+}
+
+var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_google_protobuf_struct_proto_goTypes = []any{
+	(NullValue)(0),    // 0: google.protobuf.NullValue
+	(*Struct)(nil),    // 1: google.protobuf.Struct
+	(*Value)(nil),     // 2: google.protobuf.Value
+	(*ListValue)(nil), // 3: google.protobuf.ListValue
+	nil,               // 4: google.protobuf.Struct.FieldsEntry
+}
+var file_google_protobuf_struct_proto_depIdxs = []int32{
+	4, // 0: google.protobuf.Struct.fields:type_name -> google.protobuf.Struct.FieldsEntry
+	0, // 1: google.protobuf.Value.null_value:type_name -> google.protobuf.NullValue
+	1, // 2: google.protobuf.Value.struct_value:type_name -> google.protobuf.Struct
+	3, // 3: google.protobuf.Value.list_value:type_name -> google.protobuf.ListValue
+	2, // 4: google.protobuf.ListValue.values:type_name -> google.protobuf.Value
+	2, // 5: google.protobuf.Struct.FieldsEntry.value:type_name -> google.protobuf.Value
+	6, // [6:6] is the sub-list for method output_type
+	6, // [6:6] is the sub-list for method input_type
+	6, // [6:6] is the sub-list for extension type_name
+	6, // [6:6] is the sub-list for extension extendee
+	0, // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_struct_proto_init() }
+func file_google_protobuf_struct_proto_init() {
+	if File_google_protobuf_struct_proto != nil {
+		return
+	}
+	file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{
+		(*Value_NullValue)(nil),
+		(*Value_NumberValue)(nil),
+		(*Value_StringValue)(nil),
+		(*Value_BoolValue)(nil),
+		(*Value_StructValue)(nil),
+		(*Value_ListValue)(nil),
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_protobuf_struct_proto_rawDesc,
+			NumEnums:      1,
+			NumMessages:   4,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_protobuf_struct_proto_goTypes,
+		DependencyIndexes: file_google_protobuf_struct_proto_depIdxs,
+		EnumInfos:         file_google_protobuf_struct_proto_enumTypes,
+		MessageInfos:      file_google_protobuf_struct_proto_msgTypes,
+	}.Build()
+	File_google_protobuf_struct_proto = out.File
+	file_google_protobuf_struct_proto_rawDesc = nil
+	file_google_protobuf_struct_proto_goTypes = nil
+	file_google_protobuf_struct_proto_depIdxs = nil
+}
diff --git a/hack/tools/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/hack/tools/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
index 83a5a645b0..0d20722d70 100644
--- a/hack/tools/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
+++ b/hack/tools/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -254,11 +254,9 @@ func (x *Timestamp) check() uint {
 
 func (x *Timestamp) Reset() {
 	*x = Timestamp{}
-	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
-		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
-		ms.StoreMessageInfo(mi)
-	}
+	mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
 }
 
 func (x *Timestamp) String() string {
@@ -269,7 +267,7 @@ func (*Timestamp) ProtoMessage() {}
 
 func (x *Timestamp) ProtoReflect() protoreflect.Message {
 	mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
-	if protoimpl.UnsafeEnabled && x != nil {
+	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
 			ms.StoreMessageInfo(mi)
@@ -348,20 +346,6 @@ func file_google_protobuf_timestamp_proto_init() {
 	if File_google_protobuf_timestamp_proto != nil {
 		return
 	}
-	if !protoimpl.UnsafeEnabled {
-		file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any {
-			switch v := v.(*Timestamp); i {
-			case 0:
-				return &v.state
-			case 1:
-				return &v.sizeCache
-			case 2:
-				return &v.unknownFields
-			default:
-				return nil
-			}
-		}
-	}
 	type x struct{}
 	out := protoimpl.TypeBuilder{
 		File: protoimpl.DescBuilder{
diff --git a/hack/tools/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/hack/tools/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
new file mode 100644
index 0000000000..006060e569
--- /dev/null
+++ b/hack/tools/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
@@ -0,0 +1,632 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Wrappers for primitive (non-message) types. These types are useful
+// for embedding primitives in the `google.protobuf.Any` type and for places
+// where we need to distinguish between the absence of a primitive
+// typed field and its default value.
+//
+// These wrappers have no meaningful use within repeated fields as they lack
+// the ability to detect presence on individual elements.
+// These wrappers have no meaningful use within a map or a oneof since
+// individual entries of a map or fields of a oneof can already detect presence.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/wrappers.proto
+
+package wrapperspb
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+// Wrapper message for `double`.
+//
+// The JSON representation for `DoubleValue` is JSON number.
+type DoubleValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The double value.
+	Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Double stores v in a new DoubleValue and returns a pointer to it.
+func Double(v float64) *DoubleValue {
+	return &DoubleValue{Value: v}
+}
+
+func (x *DoubleValue) Reset() {
+	*x = DoubleValue{}
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *DoubleValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DoubleValue) ProtoMessage() {}
+
+func (x *DoubleValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use DoubleValue.ProtoReflect.Descriptor instead.
+func (*DoubleValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *DoubleValue) GetValue() float64 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `float`.
+//
+// The JSON representation for `FloatValue` is JSON number.
+type FloatValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The float value.
+	Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Float stores v in a new FloatValue and returns a pointer to it.
+func Float(v float32) *FloatValue {
+	return &FloatValue{Value: v}
+}
+
+func (x *FloatValue) Reset() {
+	*x = FloatValue{}
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *FloatValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FloatValue) ProtoMessage() {}
+
+func (x *FloatValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use FloatValue.ProtoReflect.Descriptor instead.
+func (*FloatValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *FloatValue) GetValue() float32 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `int64`.
+//
+// The JSON representation for `Int64Value` is JSON string.
+type Int64Value struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The int64 value.
+	Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Int64 stores v in a new Int64Value and returns a pointer to it.
+func Int64(v int64) *Int64Value {
+	return &Int64Value{Value: v}
+}
+
+func (x *Int64Value) Reset() {
+	*x = Int64Value{}
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Int64Value) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int64Value) ProtoMessage() {}
+
+func (x *Int64Value) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int64Value.ProtoReflect.Descriptor instead.
+func (*Int64Value) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Int64Value) GetValue() int64 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `uint64`.
+//
+// The JSON representation for `UInt64Value` is JSON string.
+type UInt64Value struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The uint64 value.
+	Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// UInt64 stores v in a new UInt64Value and returns a pointer to it.
+func UInt64(v uint64) *UInt64Value {
+	return &UInt64Value{Value: v}
+}
+
+func (x *UInt64Value) Reset() {
+	*x = UInt64Value{}
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *UInt64Value) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UInt64Value) ProtoMessage() {}
+
+func (x *UInt64Value) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use UInt64Value.ProtoReflect.Descriptor instead.
+func (*UInt64Value) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *UInt64Value) GetValue() uint64 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `int32`.
+//
+// The JSON representation for `Int32Value` is JSON number.
+type Int32Value struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The int32 value.
+	Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Int32 stores v in a new Int32Value and returns a pointer to it.
+func Int32(v int32) *Int32Value {
+	return &Int32Value{Value: v}
+}
+
+func (x *Int32Value) Reset() {
+	*x = Int32Value{}
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Int32Value) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int32Value) ProtoMessage() {}
+
+func (x *Int32Value) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int32Value.ProtoReflect.Descriptor instead.
+func (*Int32Value) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Int32Value) GetValue() int32 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `uint32`.
+//
+// The JSON representation for `UInt32Value` is JSON number.
+type UInt32Value struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The uint32 value.
+	Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// UInt32 stores v in a new UInt32Value and returns a pointer to it.
+func UInt32(v uint32) *UInt32Value {
+	return &UInt32Value{Value: v}
+}
+
+func (x *UInt32Value) Reset() {
+	*x = UInt32Value{}
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *UInt32Value) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UInt32Value) ProtoMessage() {}
+
+func (x *UInt32Value) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use UInt32Value.ProtoReflect.Descriptor instead.
+func (*UInt32Value) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *UInt32Value) GetValue() uint32 {
+	if x != nil {
+		return x.Value
+	}
+	return 0
+}
+
+// Wrapper message for `bool`.
+//
+// The JSON representation for `BoolValue` is JSON `true` and `false`.
+type BoolValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The bool value.
+	Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Bool stores v in a new BoolValue and returns a pointer to it.
+func Bool(v bool) *BoolValue {
+	return &BoolValue{Value: v}
+}
+
+func (x *BoolValue) Reset() {
+	*x = BoolValue{}
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *BoolValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BoolValue) ProtoMessage() {}
+
+func (x *BoolValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use BoolValue.ProtoReflect.Descriptor instead.
+func (*BoolValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *BoolValue) GetValue() bool {
+	if x != nil {
+		return x.Value
+	}
+	return false
+}
+
+// Wrapper message for `string`.
+//
+// The JSON representation for `StringValue` is JSON string.
+type StringValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The string value.
+	Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// String stores v in a new StringValue and returns a pointer to it.
+func String(v string) *StringValue {
+	return &StringValue{Value: v}
+}
+
+func (x *StringValue) Reset() {
+	*x = StringValue{}
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *StringValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StringValue) ProtoMessage() {}
+
+func (x *StringValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use StringValue.ProtoReflect.Descriptor instead.
+func (*StringValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *StringValue) GetValue() string {
+	if x != nil {
+		return x.Value
+	}
+	return ""
+}
+
+// Wrapper message for `bytes`.
+//
+// The JSON representation for `BytesValue` is JSON string.
+type BytesValue struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The bytes value.
+	Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+// Bytes stores v in a new BytesValue and returns a pointer to it.
+func Bytes(v []byte) *BytesValue {
+	return &BytesValue{Value: v}
+}
+
+func (x *BytesValue) Reset() {
+	*x = BytesValue{}
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *BytesValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BytesValue) ProtoMessage() {}
+
+func (x *BytesValue) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use BytesValue.ProtoReflect.Descriptor instead.
+func (*BytesValue) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *BytesValue) GetValue() []byte {
+	if x != nil {
+		return x.Value
+	}
+	return nil
+}
+
+var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_wrappers_proto_rawDesc = []byte{
+	0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x22, 0x23, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65,
+	0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56,
+	0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e,
+	0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23,
+	0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61,
+	0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
+	0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33,
+	0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09,
+	0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+	0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22,
+	0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14,
+	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c,
+	0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+	0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
+	0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
+	0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e,
+	0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_protobuf_wrappers_proto_rawDescOnce sync.Once
+	file_google_protobuf_wrappers_proto_rawDescData = file_google_protobuf_wrappers_proto_rawDesc
+)
+
+func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte {
+	file_google_protobuf_wrappers_proto_rawDescOnce.Do(func() {
+		file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_wrappers_proto_rawDescData)
+	})
+	return file_google_protobuf_wrappers_proto_rawDescData
+}
+
+var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
+var file_google_protobuf_wrappers_proto_goTypes = []any{
+	(*DoubleValue)(nil), // 0: google.protobuf.DoubleValue
+	(*FloatValue)(nil),  // 1: google.protobuf.FloatValue
+	(*Int64Value)(nil),  // 2: google.protobuf.Int64Value
+	(*UInt64Value)(nil), // 3: google.protobuf.UInt64Value
+	(*Int32Value)(nil),  // 4: google.protobuf.Int32Value
+	(*UInt32Value)(nil), // 5: google.protobuf.UInt32Value
+	(*BoolValue)(nil),   // 6: google.protobuf.BoolValue
+	(*StringValue)(nil), // 7: google.protobuf.StringValue
+	(*BytesValue)(nil),  // 8: google.protobuf.BytesValue
+}
+var file_google_protobuf_wrappers_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_wrappers_proto_init() }
+func file_google_protobuf_wrappers_proto_init() {
+	if File_google_protobuf_wrappers_proto != nil {
+		return
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_protobuf_wrappers_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   9,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_protobuf_wrappers_proto_goTypes,
+		DependencyIndexes: file_google_protobuf_wrappers_proto_depIdxs,
+		MessageInfos:      file_google_protobuf_wrappers_proto_msgTypes,
+	}.Build()
+	File_google_protobuf_wrappers_proto = out.File
+	file_google_protobuf_wrappers_proto_rawDesc = nil
+	file_google_protobuf_wrappers_proto_goTypes = nil
+	file_google_protobuf_wrappers_proto_depIdxs = nil
+}
diff --git a/hack/tools/vendor/k8s.io/api/authentication/v1/doc.go b/hack/tools/vendor/k8s.io/api/authentication/v1/doc.go
new file mode 100644
index 0000000000..3bdc89badc
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/authentication/v1/doc.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:protobuf-gen=package
+// +groupName=authentication.k8s.io
+// +k8s:openapi-gen=true
+// +k8s:prerelease-lifecycle-gen=true
+
+package v1 // import "k8s.io/api/authentication/v1"
diff --git a/hack/tools/vendor/k8s.io/api/authentication/v1/generated.pb.go b/hack/tools/vendor/k8s.io/api/authentication/v1/generated.pb.go
new file mode 100644
index 0000000000..6d922030c1
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/authentication/v1/generated.pb.go
@@ -0,0 +1,2935 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: k8s.io/api/authentication/v1/generated.proto
+
+package v1
+
+import (
+	fmt "fmt"
+
+	io "io"
+
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+
+	math "math"
+	math_bits "math/bits"
+	reflect "reflect"
+	strings "strings"
+
+	k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *BoundObjectReference) Reset()      { *m = BoundObjectReference{} }
+func (*BoundObjectReference) ProtoMessage() {}
+func (*BoundObjectReference) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d1237cbf54dccd53, []int{0}
+}
+func (m *BoundObjectReference) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *BoundObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *BoundObjectReference) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_BoundObjectReference.Merge(m, src)
+}
+func (m *BoundObjectReference) XXX_Size() int {
+	return m.Size()
+}
+func (m *BoundObjectReference) XXX_DiscardUnknown() {
+	xxx_messageInfo_BoundObjectReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BoundObjectReference proto.InternalMessageInfo
+
+func (m *ExtraValue) Reset()      { *m = ExtraValue{} }
+func (*ExtraValue) ProtoMessage() {}
+func (*ExtraValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d1237cbf54dccd53, []int{1}
+}
+func (m *ExtraValue) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ExtraValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ExtraValue.Merge(m, src)
+}
+func (m *ExtraValue) XXX_Size() int {
+	return m.Size()
+}
+func (m *ExtraValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_ExtraValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExtraValue proto.InternalMessageInfo
+
+func (m *SelfSubjectReview) Reset()      { *m = SelfSubjectReview{} }
+func (*SelfSubjectReview) ProtoMessage() {}
+func (*SelfSubjectReview) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d1237cbf54dccd53, []int{2}
+}
+func (m *SelfSubjectReview) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SelfSubjectReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *SelfSubjectReview) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SelfSubjectReview.Merge(m, src)
+}
+func (m *SelfSubjectReview) XXX_Size() int {
+	return m.Size()
+}
+func (m *SelfSubjectReview) XXX_DiscardUnknown() {
+	xxx_messageInfo_SelfSubjectReview.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SelfSubjectReview proto.InternalMessageInfo
+
+func (m *SelfSubjectReviewStatus) Reset()      { *m = SelfSubjectReviewStatus{} }
+func (*SelfSubjectReviewStatus) ProtoMessage() {}
+func (*SelfSubjectReviewStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d1237cbf54dccd53, []int{3}
+}
+func (m *SelfSubjectReviewStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SelfSubjectReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *SelfSubjectReviewStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SelfSubjectReviewStatus.Merge(m, src)
+}
+func (m *SelfSubjectReviewStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *SelfSubjectReviewStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_SelfSubjectReviewStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SelfSubjectReviewStatus proto.InternalMessageInfo
+
+func (m *TokenRequest) Reset()      { *m = TokenRequest{} }
+func (*TokenRequest) ProtoMessage() {}
+func (*TokenRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d1237cbf54dccd53, []int{4}
+}
+func (m *TokenRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *TokenRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TokenRequest.Merge(m, src)
+}
+func (m *TokenRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *TokenRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_TokenRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TokenRequest proto.InternalMessageInfo
+
+func (m *TokenRequestSpec) Reset()      { *m = TokenRequestSpec{} }
+func (*TokenRequestSpec) ProtoMessage() {}
+func (*TokenRequestSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d1237cbf54dccd53, []int{5}
+}
+func (m *TokenRequestSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TokenRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *TokenRequestSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TokenRequestSpec.Merge(m, src)
+}
+func (m *TokenRequestSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *TokenRequestSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_TokenRequestSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TokenRequestSpec proto.InternalMessageInfo
+
+func (m *TokenRequestStatus) Reset()      { *m = TokenRequestStatus{} }
+func (*TokenRequestStatus) ProtoMessage() {}
+func (*TokenRequestStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d1237cbf54dccd53, []int{6}
+}
+func (m *TokenRequestStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TokenRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *TokenRequestStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TokenRequestStatus.Merge(m, src)
+}
+func (m *TokenRequestStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *TokenRequestStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_TokenRequestStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TokenRequestStatus proto.InternalMessageInfo
+
+func (m *TokenReview) Reset()      { *m = TokenReview{} }
+func (*TokenReview) ProtoMessage() {}
+func (*TokenReview) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d1237cbf54dccd53, []int{7}
+}
+func (m *TokenReview) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TokenReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *TokenReview) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TokenReview.Merge(m, src)
+}
+func (m *TokenReview) XXX_Size() int {
+	return m.Size()
+}
+func (m *TokenReview) XXX_DiscardUnknown() {
+	xxx_messageInfo_TokenReview.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TokenReview proto.InternalMessageInfo
+
+func (m *TokenReviewSpec) Reset()      { *m = TokenReviewSpec{} }
+func (*TokenReviewSpec) ProtoMessage() {}
+func (*TokenReviewSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d1237cbf54dccd53, []int{8}
+}
+func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TokenReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *TokenReviewSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TokenReviewSpec.Merge(m, src)
+}
+func (m *TokenReviewSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *TokenReviewSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_TokenReviewSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo
+
+func (m *TokenReviewStatus) Reset()      { *m = TokenReviewStatus{} }
+func (*TokenReviewStatus) ProtoMessage() {}
+func (*TokenReviewStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d1237cbf54dccd53, []int{9}
+}
+func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TokenReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *TokenReviewStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TokenReviewStatus.Merge(m, src)
+}
+func (m *TokenReviewStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *TokenReviewStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_TokenReviewStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo
+
+func (m *UserInfo) Reset()      { *m = UserInfo{} }
+func (*UserInfo) ProtoMessage() {}
+func (*UserInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d1237cbf54dccd53, []int{10}
+}
+func (m *UserInfo) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *UserInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *UserInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UserInfo.Merge(m, src)
+}
+func (m *UserInfo) XXX_Size() int {
+	return m.Size()
+}
+func (m *UserInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_UserInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UserInfo proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*BoundObjectReference)(nil), "k8s.io.api.authentication.v1.BoundObjectReference")
+	proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1.ExtraValue")
+	proto.RegisterType((*SelfSubjectReview)(nil), "k8s.io.api.authentication.v1.SelfSubjectReview")
+	proto.RegisterType((*SelfSubjectReviewStatus)(nil), "k8s.io.api.authentication.v1.SelfSubjectReviewStatus")
+	proto.RegisterType((*TokenRequest)(nil), "k8s.io.api.authentication.v1.TokenRequest")
+	proto.RegisterType((*TokenRequestSpec)(nil), "k8s.io.api.authentication.v1.TokenRequestSpec")
+	proto.RegisterType((*TokenRequestStatus)(nil), "k8s.io.api.authentication.v1.TokenRequestStatus")
+	proto.RegisterType((*TokenReview)(nil), "k8s.io.api.authentication.v1.TokenReview")
+	proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1.TokenReviewSpec")
+	proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1.TokenReviewStatus")
+	proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1.UserInfo")
+	proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authentication.v1.UserInfo.ExtraEntry")
+}
+
+func init() {
+	proto.RegisterFile("k8s.io/api/authentication/v1/generated.proto", fileDescriptor_d1237cbf54dccd53)
+}
+
+var fileDescriptor_d1237cbf54dccd53 = []byte{
+	// 947 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4b, 0x6f, 0x23, 0xc5,
+	0x13, 0xf7, 0xf8, 0x11, 0xd9, 0xe5, 0x4d, 0xfe, 0x49, 0xef, 0x7f, 0x85, 0x15, 0x16, 0x4f, 0x98,
+	0x95, 0x50, 0x04, 0xbb, 0x33, 0x1b, 0x8b, 0xc7, 0x6a, 0x91, 0x90, 0x32, 0xc4, 0x02, 0x0b, 0xc1,
+	0xae, 0xda, 0x49, 0x40, 0x48, 0x48, 0xb4, 0xc7, 0x1d, 0xa7, 0xf1, 0xce, 0x83, 0x99, 0x1e, 0xb3,
+	0xbe, 0xed, 0x47, 0xe0, 0x08, 0x12, 0x07, 0x3e, 0x04, 0x12, 0x5f, 0x21, 0xc7, 0x15, 0xe2, 0xb0,
+	0x07, 0x64, 0x91, 0xe1, 0xca, 0x91, 0x13, 0x27, 0xd4, 0x3d, 0x1d, 0xdb, 0x63, 0x27, 0x13, 0x9f,
+	0xf6, 0xe6, 0xa9, 0xc7, 0xaf, 0xaa, 0x7e, 0x55, 0x5d, 0x65, 0xb8, 0x3b, 0x7c, 0x10, 0x99, 0xcc,
+	0xb7, 0x48, 0xc0, 0x2c, 0x12, 0xf3, 0x53, 0xea, 0x71, 0xe6, 0x10, 0xce, 0x7c, 0xcf, 0x1a, 0xed,
+	0x59, 0x03, 0xea, 0xd1, 0x90, 0x70, 0xda, 0x37, 0x83, 0xd0, 0xe7, 0x3e, 0xba, 0x9d, 0x5a, 0x9b,
+	0x24, 0x60, 0x66, 0xd6, 0xda, 0x1c, 0xed, 0x6d, 0xdf, 0x1b, 0x30, 0x7e, 0x1a, 0xf7, 0x4c, 0xc7,
+	0x77, 0xad, 0x81, 0x3f, 0xf0, 0x2d, 0xe9, 0xd4, 0x8b, 0x4f, 0xe4, 0x97, 0xfc, 0x90, 0xbf, 0x52,
+	0xb0, 0xed, 0xb7, 0x67, 0xa1, 0x5d, 0xe2, 0x9c, 0x32, 0x8f, 0x86, 0x63, 0x2b, 0x18, 0x0e, 0x84,
+	0x20, 0xb2, 0x5c, 0xca, 0xc9, 0x25, 0x29, 0x6c, 0x5b, 0x57, 0x79, 0x85, 0xb1, 0xc7, 0x99, 0x4b,
+	0x97, 0x1c, 0xde, 0xbd, 0xce, 0x21, 0x72, 0x4e, 0xa9, 0x4b, 0x16, 0xfd, 0x8c, 0xdf, 0x34, 0xf8,
+	0xbf, 0xed, 0xc7, 0x5e, 0xff, 0x51, 0xef, 0x1b, 0xea, 0x70, 0x4c, 0x4f, 0x68, 0x48, 0x3d, 0x87,
+	0xa2, 0x1d, 0x28, 0x0f, 0x99, 0xd7, 0x6f, 0x68, 0x3b, 0xda, 0x6e, 0xcd, 0xbe, 0x71, 0x36, 0xd1,
+	0x0b, 0xc9, 0x44, 0x2f, 0x7f, 0xc2, 0xbc, 0x3e, 0x96, 0x1a, 0xd4, 0x02, 0x20, 0x01, 0x3b, 0xa6,
+	0x61, 0xc4, 0x7c, 0xaf, 0x51, 0x94, 0x76, 0x48, 0xd9, 0xc1, 0xfe, 0xe3, 0x8e, 0xd2, 0xe0, 0x39,
+	0x2b, 0x81, 0xea, 0x11, 0x97, 0x36, 0x4a, 0x59, 0xd4, 0xcf, 0x88, 0x4b, 0xb1, 0xd4, 0x20, 0x1b,
+	0x4a, 0x71, 0xe7, 0xa0, 0x51, 0x96, 0x06, 0xf7, 0x95, 0x41, 0xe9, 0xa8, 0x73, 0xf0, 0xef, 0x44,
+	0x7f, 0xfd, 0xaa, 0x22, 0xf9, 0x38, 0xa0, 0x91, 0x79, 0xd4, 0x39, 0xc0, 0xc2, 0xd9, 0x78, 0x0f,
+	0xa0, 0xfd, 0x94, 0x87, 0xe4, 0x98, 0x3c, 0x89, 0x29, 0xd2, 0xa1, 0xc2, 0x38, 0x75, 0xa3, 0x86,
+	0xb6, 0x53, 0xda, 0xad, 0xd9, 0xb5, 0x64, 0xa2, 0x57, 0x3a, 0x42, 0x80, 0x53, 0xf9, 0xc3, 0xea,
+	0x0f, 0x3f, 0xeb, 0x85, 0x67, 0x7f, 0xec, 0x14, 0x8c, 0xdf, 0x35, 0xd8, 0xea, 0xd2, 0x27, 0x27,
+	0xdd, 0x58, 0xb1, 0x31, 0x62, 0xf4, 0x3b, 0xf4, 0x35, 0x54, 0x45, 0x9f, 0xfa, 0x84, 0x13, 0x49,
+	0x47, 0xbd, 0x75, 0xdf, 0x9c, 0x8d, 0xc8, 0x34, 0x13, 0x33, 0x18, 0x0e, 0x84, 0x20, 0x32, 0x85,
+	0xb5, 0x39, 0xda, 0x33, 0x53, 0x4e, 0x3f, 0xa5, 0x9c, 0xcc, 0x88, 0x99, 0xc9, 0xf0, 0x14, 0x15,
+	0x7d, 0x05, 0x6b, 0x11, 0x27, 0x3c, 0x8e, 0x24, 0x8d, 0xf5, 0xd6, 0x3b, 0x66, 0xde, 0x08, 0x9a,
+	0x4b, 0x29, 0x76, 0xa5, 0xb3, 0xbd, 0xa1, 0x82, 0xac, 0xa5, 0xdf, 0x58, 0x81, 0x1a, 0x3e, 0xbc,
+	0x72, 0x85, 0x0b, 0x3a, 0x84, 0x6a, 0x1c, 0xd1, 0xb0, 0xe3, 0x9d, 0xf8, 0xaa, 0xb6, 0x37, 0xf2,
+	0x63, 0x1f, 0x29, 0x6b, 0x7b, 0x53, 0x05, 0xab, 0x5e, 0x48, 0xf0, 0x14, 0xc9, 0xf8, 0xa9, 0x08,
+	0x37, 0x0e, 0xfd, 0x21, 0xf5, 0x30, 0xfd, 0x36, 0xa6, 0x11, 0x7f, 0x09, 0x14, 0x3e, 0x86, 0x72,
+	0x14, 0x50, 0x47, 0x11, 0x68, 0xe6, 0x17, 0x31, 0x9f, 0x5b, 0x37, 0xa0, 0xce, 0x6c, 0x12, 0xc5,
+	0x17, 0x96, 0x48, 0xe8, 0x8b, 0x69, 0x53, 0x4a, 0x4b, 0x19, 0x5f, 0x87, 0x99, 0xdf, 0x8f, 0x7f,
+	0x34, 0xd8, 0x5c, 0x4c, 0x01, 0xbd, 0x05, 0x35, 0x12, 0xf7, 0x99, 0x78, 0x7c, 0x17, 0xa3, 0xba,
+	0x9e, 0x4c, 0xf4, 0xda, 0xfe, 0x85, 0x10, 0xcf, 0xf4, 0xe8, 0x43, 0xd8, 0xa2, 0x4f, 0x03, 0x16,
+	0xca, 0xe8, 0x5d, 0xea, 0xf8, 0x5e, 0x3f, 0x92, 0x6f, 0xa6, 0x64, 0xdf, 0x4a, 0x26, 0xfa, 0x56,
+	0x7b, 0x51, 0x89, 0x97, 0xed, 0x91, 0x07, 0x1b, 0xbd, 0xcc, 0xd3, 0x57, 0x85, 0xb6, 0xf2, 0x0b,
+	0xbd, 0x6c, 0x5d, 0xd8, 0x28, 0x99, 0xe8, 0x1b, 0x59, 0x0d, 0x5e, 0x40, 0x37, 0x7e, 0xd1, 0x00,
+	0x2d, 0xb3, 0x84, 0xee, 0x40, 0x85, 0x0b, 0xa9, 0x5a, 0x35, 0xeb, 0x8a, 0xb4, 0x4a, 0x6a, 0x9a,
+	0xea, 0xd0, 0x18, 0x6e, 0xce, 0x0a, 0x38, 0x64, 0x2e, 0x8d, 0x38, 0x71, 0x03, 0xd5, 0xed, 0x37,
+	0x57, 0x9b, 0x25, 0xe1, 0x66, 0xbf, 0xaa, 0xe0, 0x6f, 0xb6, 0x97, 0xe1, 0xf0, 0x65, 0x31, 0x8c,
+	0x1f, 0x8b, 0x50, 0x57, 0x69, 0xbf, 0xa4, 0x75, 0xf0, 0x28, 0x33, 0xcb, 0xf7, 0x56, 0x9a, 0x3b,
+	0xf9, 0xa6, 0xaf, 0x1a, 0xe5, 0xcf, 0x17, 0x46, 0xd9, 0x5a, 0x1d, 0x32, 0x7f, 0x92, 0x1d, 0xf8,
+	0xdf, 0x42, 0xfc, 0xd5, 0xda, 0x99, 0x19, 0xf6, 0x62, 0xfe, 0xb0, 0x1b, 0x7f, 0x6b, 0xb0, 0xb5,
+	0x94, 0x12, 0x7a, 0x1f, 0xd6, 0xe7, 0x32, 0xa7, 0xe9, 0xa5, 0xaa, 0xda, 0xb7, 0x54, 0xbc, 0xf5,
+	0xfd, 0x79, 0x25, 0xce, 0xda, 0xa2, 0x8f, 0xa1, 0x2c, 0x96, 0x95, 0x62, 0x78, 0xd5, 0x95, 0x37,
+	0xa5, 0x56, 0x48, 0xb0, 0x44, 0xc8, 0x56, 0x52, 0xbe, 0xe6, 0xd9, 0xde, 0x81, 0x0a, 0x0d, 0x43,
+	0x3f, 0x54, 0xf7, 0x6f, 0xca, 0x4d, 0x5b, 0x08, 0x71, 0xaa, 0x33, 0x7e, 0x2d, 0xc2, 0x74, 0xa7,
+	0xa2, 0xbb, 0xe9, 0x7e, 0x96, 0x47, 0x33, 0x25, 0x34, 0xb3, 0x77, 0x85, 0x1c, 0x4f, 0x2d, 0xd0,
+	0x6b, 0x50, 0x8a, 0x59, 0x5f, 0xdd, 0xe2, 0xfa, 0xdc, 0xf1, 0xc4, 0x42, 0x8e, 0x0c, 0x58, 0x1b,
+	0x84, 0x7e, 0x1c, 0x88, 0x31, 0x10, 0x89, 0x82, 0xe8, 0xe8, 0x47, 0x52, 0x82, 0x95, 0x06, 0x1d,
+	0x43, 0x85, 0x8a, 0xdb, 0x29, 0x6b, 0xa9, 0xb7, 0xf6, 0x56, 0xa3, 0xc6, 0x94, 0xf7, 0xb6, 0xed,
+	0xf1, 0x70, 0x3c, 0x57, 0x95, 0x90, 0xe1, 0x14, 0x6e, 0xbb, 0xa7, 0x6e, 0xb2, 0xb4, 0x41, 0x9b,
+	0x50, 0x1a, 0xd2, 0x71, 0x5a, 0x11, 0x16, 0x3f, 0xd1, 0x07, 0x50, 0x19, 0x89, 0x73, 0xad, 0x5a,
+	0xb2, 0x9b, 0x1f, 0x77, 0x76, 0xde, 0x71, 0xea, 0xf6, 0xb0, 0xf8, 0x40, 0xb3, 0xed, 0xb3, 0xf3,
+	0x66, 0xe1, 0xf9, 0x79, 0xb3, 0xf0, 0xe2, 0xbc, 0x59, 0x78, 0x96, 0x34, 0xb5, 0xb3, 0xa4, 0xa9,
+	0x3d, 0x4f, 0x9a, 0xda, 0x8b, 0xa4, 0xa9, 0xfd, 0x99, 0x34, 0xb5, 0xef, 0xff, 0x6a, 0x16, 0xbe,
+	0xbc, 0x9d, 0xf7, 0x67, 0xf0, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf0, 0xb7, 0xc1, 0xa0, 0x2b,
+	0x0a, 0x00, 0x00,
+}
+
+func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *BoundObjectReference) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BoundObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.UID)
+	copy(dAtA[i:], m.UID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.APIVersion)
+	copy(dAtA[i:], m.APIVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Kind)
+	copy(dAtA[i:], m.Kind)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m ExtraValue) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m) > 0 {
+		for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m[iNdEx])
+			copy(dAtA[i:], m[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *SelfSubjectReview) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SelfSubjectReview) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SelfSubjectReview) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *SelfSubjectReviewStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SelfSubjectReviewStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SelfSubjectReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *TokenRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TokenRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TokenRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *TokenRequestSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TokenRequestSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TokenRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.ExpirationSeconds != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.BoundObjectRef != nil {
+		{
+			size, err := m.BoundObjectRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if len(m.Audiences) > 0 {
+		for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Audiences[iNdEx])
+			copy(dAtA[i:], m.Audiences[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *TokenRequestStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TokenRequestStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TokenRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.ExpirationTimestamp.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Token)
+	copy(dAtA[i:], m.Token)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *TokenReview) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TokenReview) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TokenReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Audiences) > 0 {
+		for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Audiences[iNdEx])
+			copy(dAtA[i:], m.Audiences[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	i -= len(m.Token)
+	copy(dAtA[i:], m.Token)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TokenReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Audiences) > 0 {
+		for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Audiences[iNdEx])
+			copy(dAtA[i:], m.Audiences[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx])))
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	i -= len(m.Error)
+	copy(dAtA[i:], m.Error)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error)))
+	i--
+	dAtA[i] = 0x1a
+	{
+		size, err := m.User.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	i--
+	if m.Authenticated {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *UserInfo) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UserInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Extra) > 0 {
+		keysForExtra := make([]string, 0, len(m.Extra))
+		for k := range m.Extra {
+			keysForExtra = append(keysForExtra, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForExtra)
+		for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Extra[string(keysForExtra[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForExtra[iNdEx])
+			copy(dAtA[i:], keysForExtra[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	if len(m.Groups) > 0 {
+		for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Groups[iNdEx])
+			copy(dAtA[i:], m.Groups[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx])))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	i -= len(m.UID)
+	copy(dAtA[i:], m.UID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Username)
+	copy(dAtA[i:], m.Username)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+	offset -= sovGenerated(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *BoundObjectReference) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Kind)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.APIVersion)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.UID)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m ExtraValue) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m) > 0 {
+		for _, s := range m {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *SelfSubjectReview) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Status.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *SelfSubjectReviewStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.UserInfo.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *TokenRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Status.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *TokenRequestSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Audiences) > 0 {
+		for _, s := range m.Audiences {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.BoundObjectRef != nil {
+		l = m.BoundObjectRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ExpirationSeconds != nil {
+		n += 1 + sovGenerated(uint64(*m.ExpirationSeconds))
+	}
+	return n
+}
+
+func (m *TokenRequestStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Token)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.ExpirationTimestamp.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *TokenReview) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Status.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *TokenReviewSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Token)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Audiences) > 0 {
+		for _, s := range m.Audiences {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *TokenReviewStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 2
+	l = m.User.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Error)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Audiences) > 0 {
+		for _, s := range m.Audiences {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *UserInfo) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Username)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.UID)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Groups) > 0 {
+		for _, s := range m.Groups {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Extra) > 0 {
+		for k, v := range m.Extra {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func sovGenerated(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *BoundObjectReference) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&BoundObjectReference{`,
+		`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+		`APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *SelfSubjectReview) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&SelfSubjectReview{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "SelfSubjectReviewStatus", "SelfSubjectReviewStatus", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *SelfSubjectReviewStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&SelfSubjectReviewStatus{`,
+		`UserInfo:` + strings.Replace(strings.Replace(this.UserInfo.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TokenRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TokenRequest{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenRequestSpec", "TokenRequestSpec", 1), `&`, ``, 1) + `,`,
+		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenRequestStatus", "TokenRequestStatus", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TokenRequestSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TokenRequestSpec{`,
+		`Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`,
+		`BoundObjectRef:` + strings.Replace(this.BoundObjectRef.String(), "BoundObjectReference", "BoundObjectReference", 1) + `,`,
+		`ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TokenRequestStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TokenRequestStatus{`,
+		`Token:` + fmt.Sprintf("%v", this.Token) + `,`,
+		`ExpirationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExpirationTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TokenReview) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TokenReview{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`,
+		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TokenReviewSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TokenReviewSpec{`,
+		`Token:` + fmt.Sprintf("%v", this.Token) + `,`,
+		`Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TokenReviewStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TokenReviewStatus{`,
+		`Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`,
+		`User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`,
+		`Error:` + fmt.Sprintf("%v", this.Error) + `,`,
+		`Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *UserInfo) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForExtra := make([]string, 0, len(this.Extra))
+	for k := range this.Extra {
+		keysForExtra = append(keysForExtra, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForExtra)
+	mapStringForExtra := "map[string]ExtraValue{"
+	for _, k := range keysForExtra {
+		mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k])
+	}
+	mapStringForExtra += "}"
+	s := strings.Join([]string{`&UserInfo{`,
+		`Username:` + fmt.Sprintf("%v", this.Username) + `,`,
+		`UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+		`Groups:` + fmt.Sprintf("%v", this.Groups) + `,`,
+		`Extra:` + mapStringForExtra + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringGenerated(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *BoundObjectReference) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: BoundObjectReference: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: BoundObjectReference: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Kind = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.APIVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ExtraValue) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			*m = append(*m, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SelfSubjectReview) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SelfSubjectReview: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SelfSubjectReview: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SelfSubjectReviewStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SelfSubjectReviewStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SelfSubjectReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TokenRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TokenRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TokenRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TokenRequestSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TokenRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field BoundObjectRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.BoundObjectRef == nil {
+				m.BoundObjectRef = &BoundObjectReference{}
+			}
+			if err := m.BoundObjectRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ExpirationSeconds = &v
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TokenRequestStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TokenRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Token = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExpirationTimestamp", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ExpirationTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TokenReview) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TokenReview: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TokenReview: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TokenReviewSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TokenReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Token = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TokenReviewStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TokenReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Authenticated", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Authenticated = bool(v != 0)
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Error = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *UserInfo) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: UserInfo: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: UserInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Username = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.UID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Extra == nil {
+				m.Extra = make(map[string]ExtraValue)
+			}
+			var mapkey string
+			mapvalue := &ExtraValue{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &ExtraValue{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Extra[mapkey] = *mapvalue
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	depth := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+		case 1:
+			iNdEx += 8
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthGenerated
+			}
+			iNdEx += length
+		case 3:
+			depth++
+		case 4:
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
+		case 5:
+			iNdEx += 4
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
+	}
+	return 0, io.ErrUnexpectedEOF
+}
+
+var (
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/hack/tools/vendor/k8s.io/api/authentication/v1/generated.proto b/hack/tools/vendor/k8s.io/api/authentication/v1/generated.proto
new file mode 100644
index 0000000000..ae9763576c
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/authentication/v1/generated.proto
@@ -0,0 +1,212 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package k8s.io.api.authentication.v1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "k8s.io/api/authentication/v1";
+
+// BoundObjectReference is a reference to an object that a token is bound to.
+message BoundObjectReference {
+  // Kind of the referent. Valid kinds are 'Pod' and 'Secret'.
+  // +optional
+  optional string kind = 1;
+
+  // API version of the referent.
+  // +optional
+  optional string apiVersion = 2;
+
+  // Name of the referent.
+  // +optional
+  optional string name = 3;
+
+  // UID of the referent.
+  // +optional
+  optional string uID = 4;
+}
+
+// ExtraValue masks the value so protobuf can generate
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+message ExtraValue {
+  // items, if empty, will result in an empty slice
+
+  repeated string items = 1;
+}
+
+// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request.
+// When using impersonation, users will receive the user info of the user being impersonated.  If impersonation or
+// request header authentication is used, any extra keys will have their case ignored and returned as lowercase.
+message SelfSubjectReview {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Status is filled in by the server with the user attributes.
+  optional SelfSubjectReviewStatus status = 2;
+}
+
+// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.
+message SelfSubjectReviewStatus {
+  // User attributes of the user making this request.
+  // +optional
+  optional UserInfo userInfo = 1;
+}
+
+// TokenRequest requests a token for a given service account.
+message TokenRequest {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec holds information about the request being evaluated
+  optional TokenRequestSpec spec = 2;
+
+  // Status is filled in by the server and indicates whether the token can be authenticated.
+  // +optional
+  optional TokenRequestStatus status = 3;
+}
+
+// TokenRequestSpec contains client provided parameters of a token request.
+message TokenRequestSpec {
+  // Audiences are the intendend audiences of the token. A recipient of a
+  // token must identify themself with an identifier in the list of
+  // audiences of the token, and otherwise should reject the token. A
+  // token issued for multiple audiences may be used to authenticate
+  // against any of the audiences listed but implies a high degree of
+  // trust between the target audiences.
+  // +listType=atomic
+  repeated string audiences = 1;
+
+  // ExpirationSeconds is the requested duration of validity of the request. The
+  // token issuer may return a token with a different validity duration so a
+  // client needs to check the 'expiration' field in a response.
+  // +optional
+  optional int64 expirationSeconds = 4;
+
+  // BoundObjectRef is a reference to an object that the token will be bound to.
+  // The token will only be valid for as long as the bound object exists.
+  // NOTE: The API server's TokenReview endpoint will validate the
+  // BoundObjectRef, but other audiences may not. Keep ExpirationSeconds
+  // small if you want prompt revocation.
+  // +optional
+  optional BoundObjectReference boundObjectRef = 3;
+}
+
+// TokenRequestStatus is the result of a token request.
+message TokenRequestStatus {
+  // Token is the opaque bearer token.
+  optional string token = 1;
+
+  // ExpirationTimestamp is the time of expiration of the returned token.
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time expirationTimestamp = 2;
+}
+
+// TokenReview attempts to authenticate a token to a known user.
+// Note: TokenReview requests may be cached by the webhook token authenticator
+// plugin in the kube-apiserver.
+message TokenReview {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec holds information about the request being evaluated
+  optional TokenReviewSpec spec = 2;
+
+  // Status is filled in by the server and indicates whether the request can be authenticated.
+  // +optional
+  optional TokenReviewStatus status = 3;
+}
+
+// TokenReviewSpec is a description of the token authentication request.
+message TokenReviewSpec {
+  // Token is the opaque bearer token.
+  // +optional
+  optional string token = 1;
+
+  // Audiences is a list of the identifiers that the resource server presented
+  // with the token identifies as. Audience-aware token authenticators will
+  // verify that the token was intended for at least one of the audiences in
+  // this list. If no audiences are provided, the audience will default to the
+  // audience of the Kubernetes apiserver.
+  // +optional
+  // +listType=atomic
+  repeated string audiences = 2;
+}
+
+// TokenReviewStatus is the result of the token authentication request.
+message TokenReviewStatus {
+  // Authenticated indicates that the token was associated with a known user.
+  // +optional
+  optional bool authenticated = 1;
+
+  // User is the UserInfo associated with the provided token.
+  // +optional
+  optional UserInfo user = 2;
+
+  // Audiences are audience identifiers chosen by the authenticator that are
+  // compatible with both the TokenReview and token. An identifier is any
+  // identifier in the intersection of the TokenReviewSpec audiences and the
+  // token's audiences. A client of the TokenReview API that sets the
+  // spec.audiences field should validate that a compatible audience identifier
+  // is returned in the status.audiences field to ensure that the TokenReview
+  // server is audience aware. If a TokenReview returns an empty
+  // status.audience field where status.authenticated is "true", the token is
+  // valid against the audience of the Kubernetes API server.
+  // +optional
+  // +listType=atomic
+  repeated string audiences = 4;
+
+  // Error indicates that the token couldn't be checked
+  // +optional
+  optional string error = 3;
+}
+
+// UserInfo holds the information about the user needed to implement the
+// user.Info interface.
+message UserInfo {
+  // The name that uniquely identifies this user among all active users.
+  // +optional
+  optional string username = 1;
+
+  // A unique value that identifies this user across time. If this user is
+  // deleted and another user by the same name is added, they will have
+  // different UIDs.
+  // +optional
+  optional string uid = 2;
+
+  // The names of groups this user is a part of.
+  // +optional
+  // +listType=atomic
+  repeated string groups = 3;
+
+  // Any additional information provided by the authenticator.
+  // +optional
+  map extra = 4;
+}
+
diff --git a/hack/tools/vendor/k8s.io/api/authentication/v1/register.go b/hack/tools/vendor/k8s.io/api/authentication/v1/register.go
new file mode 100644
index 0000000000..6a32b5926b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/authentication/v1/register.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "authentication.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&TokenReview{},
+		&TokenRequest{},
+		&SelfSubjectReview{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/api/authentication/v1/types.go b/hack/tools/vendor/k8s.io/api/authentication/v1/types.go
new file mode 100644
index 0000000000..2dc0707c4f
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/authentication/v1/types.go
@@ -0,0 +1,231 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"fmt"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/types"
+)
+
+const (
+	// ImpersonateUserHeader is used to impersonate a particular user during an API server request
+	ImpersonateUserHeader = "Impersonate-User"
+
+	// ImpersonateGroupHeader is used to impersonate a particular group during an API server request.
+	// It can be repeated multiplied times for multiple groups.
+	ImpersonateGroupHeader = "Impersonate-Group"
+
+	// ImpersonateUIDHeader is used to impersonate a particular UID during an API server request
+	ImpersonateUIDHeader = "Impersonate-Uid"
+
+	// ImpersonateUserExtraHeaderPrefix is a prefix for any header used to impersonate an entry in the
+	// extra map[string][]string for user.Info.  The key will be every after the prefix.
+	// It can be repeated multiplied times for multiple map keys and the same key can be repeated multiple
+	// times to have multiple elements in the slice under a single key
+	ImpersonateUserExtraHeaderPrefix = "Impersonate-Extra-"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +genclient:onlyVerbs=create
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.6
+
+// TokenReview attempts to authenticate a token to a known user.
+// Note: TokenReview requests may be cached by the webhook token authenticator
+// plugin in the kube-apiserver.
+type TokenReview struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec holds information about the request being evaluated
+	Spec TokenReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is filled in by the server and indicates whether the request can be authenticated.
+	// +optional
+	Status TokenReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// TokenReviewSpec is a description of the token authentication request.
+type TokenReviewSpec struct {
+	// Token is the opaque bearer token.
+	// +optional
+	Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"`
+	// Audiences is a list of the identifiers that the resource server presented
+	// with the token identifies as. Audience-aware token authenticators will
+	// verify that the token was intended for at least one of the audiences in
+	// this list. If no audiences are provided, the audience will default to the
+	// audience of the Kubernetes apiserver.
+	// +optional
+	// +listType=atomic
+	Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"`
+}
+
+// TokenReviewStatus is the result of the token authentication request.
+type TokenReviewStatus struct {
+	// Authenticated indicates that the token was associated with a known user.
+	// +optional
+	Authenticated bool `json:"authenticated,omitempty" protobuf:"varint,1,opt,name=authenticated"`
+	// User is the UserInfo associated with the provided token.
+	// +optional
+	User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"`
+	// Audiences are audience identifiers chosen by the authenticator that are
+	// compatible with both the TokenReview and token. An identifier is any
+	// identifier in the intersection of the TokenReviewSpec audiences and the
+	// token's audiences. A client of the TokenReview API that sets the
+	// spec.audiences field should validate that a compatible audience identifier
+	// is returned in the status.audiences field to ensure that the TokenReview
+	// server is audience aware. If a TokenReview returns an empty
+	// status.audience field where status.authenticated is "true", the token is
+	// valid against the audience of the Kubernetes API server.
+	// +optional
+	// +listType=atomic
+	Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"`
+	// Error indicates that the token couldn't be checked
+	// +optional
+	Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"`
+}
+
+// UserInfo holds the information about the user needed to implement the
+// user.Info interface.
+type UserInfo struct {
+	// The name that uniquely identifies this user among all active users.
+	// +optional
+	Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"`
+	// A unique value that identifies this user across time. If this user is
+	// deleted and another user by the same name is added, they will have
+	// different UIDs.
+	// +optional
+	UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"`
+	// The names of groups this user is a part of.
+	// +optional
+	// +listType=atomic
+	Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"`
+	// Any additional information provided by the authenticator.
+	// +optional
+	Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,4,rep,name=extra"`
+}
+
+// ExtraValue masks the value so protobuf can generate
+// +protobuf.nullable=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+type ExtraValue []string
+
+func (t ExtraValue) String() string {
+	return fmt.Sprintf("%v", []string(t))
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.10
+
+// TokenRequest requests a token for a given service account.
+type TokenRequest struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec holds information about the request being evaluated
+	Spec TokenRequestSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is filled in by the server and indicates whether the token can be authenticated.
+	// +optional
+	Status TokenRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// TokenRequestSpec contains client provided parameters of a token request.
+type TokenRequestSpec struct {
+	// Audiences are the intendend audiences of the token. A recipient of a
+	// token must identify themself with an identifier in the list of
+	// audiences of the token, and otherwise should reject the token. A
+	// token issued for multiple audiences may be used to authenticate
+	// against any of the audiences listed but implies a high degree of
+	// trust between the target audiences.
+	// +listType=atomic
+	Audiences []string `json:"audiences" protobuf:"bytes,1,rep,name=audiences"`
+
+	// ExpirationSeconds is the requested duration of validity of the request. The
+	// token issuer may return a token with a different validity duration so a
+	// client needs to check the 'expiration' field in a response.
+	// +optional
+	ExpirationSeconds *int64 `json:"expirationSeconds" protobuf:"varint,4,opt,name=expirationSeconds"`
+
+	// BoundObjectRef is a reference to an object that the token will be bound to.
+	// The token will only be valid for as long as the bound object exists.
+	// NOTE: The API server's TokenReview endpoint will validate the
+	// BoundObjectRef, but other audiences may not. Keep ExpirationSeconds
+	// small if you want prompt revocation.
+	// +optional
+	BoundObjectRef *BoundObjectReference `json:"boundObjectRef" protobuf:"bytes,3,opt,name=boundObjectRef"`
+}
+
+// TokenRequestStatus is the result of a token request.
+type TokenRequestStatus struct {
+	// Token is the opaque bearer token.
+	Token string `json:"token" protobuf:"bytes,1,opt,name=token"`
+	// ExpirationTimestamp is the time of expiration of the returned token.
+	ExpirationTimestamp metav1.Time `json:"expirationTimestamp" protobuf:"bytes,2,opt,name=expirationTimestamp"`
+}
+
+// BoundObjectReference is a reference to an object that a token is bound to.
+type BoundObjectReference struct {
+	// Kind of the referent. Valid kinds are 'Pod' and 'Secret'.
+	// +optional
+	Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
+	// API version of the referent.
+	// +optional
+	APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"`
+
+	// Name of the referent.
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"`
+	// UID of the referent.
+	// +optional
+	UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uID,casttype=k8s.io/apimachinery/pkg/types.UID"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +genclient:onlyVerbs=create
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.28
+
+// SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request.
+// When using impersonation, users will receive the user info of the user being impersonated.  If impersonation or
+// request header authentication is used, any extra keys will have their case ignored and returned as lowercase.
+type SelfSubjectReview struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+	// Status is filled in by the server with the user attributes.
+	Status SelfSubjectReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
+}
+
+// SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.
+type SelfSubjectReviewStatus struct {
+	// User attributes of the user making this request.
+	// +optional
+	UserInfo UserInfo `json:"userInfo,omitempty" protobuf:"bytes,1,opt,name=userInfo"`
+}
diff --git a/hack/tools/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go b/hack/tools/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go
new file mode 100644
index 0000000000..ebfd4852c0
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go
@@ -0,0 +1,138 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-codegen.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_BoundObjectReference = map[string]string{
+	"":           "BoundObjectReference is a reference to an object that a token is bound to.",
+	"kind":       "Kind of the referent. Valid kinds are 'Pod' and 'Secret'.",
+	"apiVersion": "API version of the referent.",
+	"name":       "Name of the referent.",
+	"uid":        "UID of the referent.",
+}
+
+func (BoundObjectReference) SwaggerDoc() map[string]string {
+	return map_BoundObjectReference
+}
+
+var map_SelfSubjectReview = map[string]string{
+	"":         "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated.  If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"status":   "Status is filled in by the server with the user attributes.",
+}
+
+func (SelfSubjectReview) SwaggerDoc() map[string]string {
+	return map_SelfSubjectReview
+}
+
+var map_SelfSubjectReviewStatus = map[string]string{
+	"":         "SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.",
+	"userInfo": "User attributes of the user making this request.",
+}
+
+func (SelfSubjectReviewStatus) SwaggerDoc() map[string]string {
+	return map_SelfSubjectReviewStatus
+}
+
+var map_TokenRequest = map[string]string{
+	"":         "TokenRequest requests a token for a given service account.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"spec":     "Spec holds information about the request being evaluated",
+	"status":   "Status is filled in by the server and indicates whether the token can be authenticated.",
+}
+
+func (TokenRequest) SwaggerDoc() map[string]string {
+	return map_TokenRequest
+}
+
+var map_TokenRequestSpec = map[string]string{
+	"":                  "TokenRequestSpec contains client provided parameters of a token request.",
+	"audiences":         "Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences.",
+	"expirationSeconds": "ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response.",
+	"boundObjectRef":    "BoundObjectRef is a reference to an object that the token will be bound to. The token will only be valid for as long as the bound object exists. NOTE: The API server's TokenReview endpoint will validate the BoundObjectRef, but other audiences may not. Keep ExpirationSeconds small if you want prompt revocation.",
+}
+
+func (TokenRequestSpec) SwaggerDoc() map[string]string {
+	return map_TokenRequestSpec
+}
+
+var map_TokenRequestStatus = map[string]string{
+	"":                    "TokenRequestStatus is the result of a token request.",
+	"token":               "Token is the opaque bearer token.",
+	"expirationTimestamp": "ExpirationTimestamp is the time of expiration of the returned token.",
+}
+
+func (TokenRequestStatus) SwaggerDoc() map[string]string {
+	return map_TokenRequestStatus
+}
+
+var map_TokenReview = map[string]string{
+	"":         "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"spec":     "Spec holds information about the request being evaluated",
+	"status":   "Status is filled in by the server and indicates whether the request can be authenticated.",
+}
+
+func (TokenReview) SwaggerDoc() map[string]string {
+	return map_TokenReview
+}
+
+var map_TokenReviewSpec = map[string]string{
+	"":          "TokenReviewSpec is a description of the token authentication request.",
+	"token":     "Token is the opaque bearer token.",
+	"audiences": "Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.",
+}
+
+func (TokenReviewSpec) SwaggerDoc() map[string]string {
+	return map_TokenReviewSpec
+}
+
+var map_TokenReviewStatus = map[string]string{
+	"":              "TokenReviewStatus is the result of the token authentication request.",
+	"authenticated": "Authenticated indicates that the token was associated with a known user.",
+	"user":          "User is the UserInfo associated with the provided token.",
+	"audiences":     "Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server.",
+	"error":         "Error indicates that the token couldn't be checked",
+}
+
+func (TokenReviewStatus) SwaggerDoc() map[string]string {
+	return map_TokenReviewStatus
+}
+
+var map_UserInfo = map[string]string{
+	"":         "UserInfo holds the information about the user needed to implement the user.Info interface.",
+	"username": "The name that uniquely identifies this user among all active users.",
+	"uid":      "A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.",
+	"groups":   "The names of groups this user is a part of.",
+	"extra":    "Any additional information provided by the authenticator.",
+}
+
+func (UserInfo) SwaggerDoc() map[string]string {
+	return map_UserInfo
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/hack/tools/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go b/hack/tools/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..369c89b863
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go
@@ -0,0 +1,289 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BoundObjectReference) DeepCopyInto(out *BoundObjectReference) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BoundObjectReference.
+func (in *BoundObjectReference) DeepCopy() *BoundObjectReference {
+	if in == nil {
+		return nil
+	}
+	out := new(BoundObjectReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in ExtraValue) DeepCopyInto(out *ExtraValue) {
+	{
+		in := &in
+		*out = make(ExtraValue, len(*in))
+		copy(*out, *in)
+		return
+	}
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue.
+func (in ExtraValue) DeepCopy() ExtraValue {
+	if in == nil {
+		return nil
+	}
+	out := new(ExtraValue)
+	in.DeepCopyInto(out)
+	return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SelfSubjectReview) DeepCopyInto(out *SelfSubjectReview) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReview.
+func (in *SelfSubjectReview) DeepCopy() *SelfSubjectReview {
+	if in == nil {
+		return nil
+	}
+	out := new(SelfSubjectReview)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SelfSubjectReview) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SelfSubjectReviewStatus) DeepCopyInto(out *SelfSubjectReviewStatus) {
+	*out = *in
+	in.UserInfo.DeepCopyInto(&out.UserInfo)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectReviewStatus.
+func (in *SelfSubjectReviewStatus) DeepCopy() *SelfSubjectReviewStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(SelfSubjectReviewStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenRequest) DeepCopyInto(out *TokenRequest) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequest.
+func (in *TokenRequest) DeepCopy() *TokenRequest {
+	if in == nil {
+		return nil
+	}
+	out := new(TokenRequest)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *TokenRequest) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenRequestSpec) DeepCopyInto(out *TokenRequestSpec) {
+	*out = *in
+	if in.Audiences != nil {
+		in, out := &in.Audiences, &out.Audiences
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ExpirationSeconds != nil {
+		in, out := &in.ExpirationSeconds, &out.ExpirationSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	if in.BoundObjectRef != nil {
+		in, out := &in.BoundObjectRef, &out.BoundObjectRef
+		*out = new(BoundObjectReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequestSpec.
+func (in *TokenRequestSpec) DeepCopy() *TokenRequestSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(TokenRequestSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenRequestStatus) DeepCopyInto(out *TokenRequestStatus) {
+	*out = *in
+	in.ExpirationTimestamp.DeepCopyInto(&out.ExpirationTimestamp)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequestStatus.
+func (in *TokenRequestStatus) DeepCopy() *TokenRequestStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(TokenRequestStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenReview) DeepCopyInto(out *TokenReview) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReview.
+func (in *TokenReview) DeepCopy() *TokenReview {
+	if in == nil {
+		return nil
+	}
+	out := new(TokenReview)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *TokenReview) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) {
+	*out = *in
+	if in.Audiences != nil {
+		in, out := &in.Audiences, &out.Audiences
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewSpec.
+func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(TokenReviewSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) {
+	*out = *in
+	in.User.DeepCopyInto(&out.User)
+	if in.Audiences != nil {
+		in, out := &in.Audiences, &out.Audiences
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewStatus.
+func (in *TokenReviewStatus) DeepCopy() *TokenReviewStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(TokenReviewStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UserInfo) DeepCopyInto(out *UserInfo) {
+	*out = *in
+	if in.Groups != nil {
+		in, out := &in.Groups, &out.Groups
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Extra != nil {
+		in, out := &in.Extra, &out.Extra
+		*out = make(map[string]ExtraValue, len(*in))
+		for key, val := range *in {
+			var outVal []string
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				in, out := &val, &outVal
+				*out = make(ExtraValue, len(*in))
+				copy(*out, *in)
+			}
+			(*out)[key] = outVal
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInfo.
+func (in *UserInfo) DeepCopy() *UserInfo {
+	if in == nil {
+		return nil
+	}
+	out := new(UserInfo)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/hack/tools/vendor/k8s.io/api/authentication/v1/zz_generated.prerelease-lifecycle.go b/hack/tools/vendor/k8s.io/api/authentication/v1/zz_generated.prerelease-lifecycle.go
new file mode 100644
index 0000000000..b612bdec48
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/authentication/v1/zz_generated.prerelease-lifecycle.go
@@ -0,0 +1,40 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
+
+package v1
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *SelfSubjectReview) APILifecycleIntroduced() (major, minor int) {
+	return 1, 28
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *TokenRequest) APILifecycleIntroduced() (major, minor int) {
+	return 1, 10
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *TokenReview) APILifecycleIntroduced() (major, minor int) {
+	return 1, 6
+}
diff --git a/hack/tools/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/hack/tools/vendor/k8s.io/api/core/v1/annotation_key_constants.go
new file mode 100644
index 0000000000..62e86402e1
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/core/v1/annotation_key_constants.go
@@ -0,0 +1,159 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file should be consistent with pkg/apis/core/annotation_key_constants.go.
+
+package v1
+
+const (
+	// ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy
+	// webhook backend fails.
+	ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open"
+
+	// MirrorPodAnnotationKey represents the annotation key set by kubelets when creating mirror pods
+	MirrorPodAnnotationKey string = "kubernetes.io/config.mirror"
+
+	// TolerationsAnnotationKey represents the key of tolerations data (json serialized)
+	// in the Annotations of a Pod.
+	TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations"
+
+	// TaintsAnnotationKey represents the key of taints data (json serialized)
+	// in the Annotations of a Node.
+	TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints"
+
+	// SeccompPodAnnotationKey represents the key of a seccomp profile applied
+	// to all containers of a pod.
+	// Deprecated: set a pod security context `seccompProfile` field.
+	SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod"
+
+	// SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied
+	// to one container of a pod.
+	// Deprecated: set a container security context `seccompProfile` field.
+	SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/"
+
+	// SeccompProfileRuntimeDefault represents the default seccomp profile used by container runtime.
+	// Deprecated: set a pod or container security context `seccompProfile` of type "RuntimeDefault" instead.
+	SeccompProfileRuntimeDefault string = "runtime/default"
+
+	// SeccompProfileNameUnconfined is the unconfined seccomp profile.
+	SeccompProfileNameUnconfined string = "unconfined"
+
+	// SeccompLocalhostProfileNamePrefix is the prefix for specifying profiles loaded from the node's disk.
+	SeccompLocalhostProfileNamePrefix = "localhost/"
+
+	// DeprecatedAppArmorBetaContainerAnnotationKeyPrefix is the prefix to an annotation key specifying a container's apparmor profile.
+	// Deprecated: use a pod or container security context `appArmorProfile` field instead.
+	DeprecatedAppArmorBetaContainerAnnotationKeyPrefix = "container.apparmor.security.beta.kubernetes.io/"
+
+	// DeprecatedAppArmorBetaProfileRuntimeDefault is the profile specifying the runtime default.
+	DeprecatedAppArmorBetaProfileRuntimeDefault = "runtime/default"
+
+	// DeprecatedAppArmorBetaProfileNamePrefix is the prefix for specifying profiles loaded on the node.
+	DeprecatedAppArmorBetaProfileNamePrefix = "localhost/"
+
+	// DeprecatedAppArmorBetaProfileNameUnconfined is the Unconfined AppArmor profile
+	DeprecatedAppArmorBetaProfileNameUnconfined = "unconfined"
+
+	// DeprecatedSeccompProfileDockerDefault represents the default seccomp profile used by docker.
+	// Deprecated: set a pod or container security context `seccompProfile` of type "RuntimeDefault" instead.
+	DeprecatedSeccompProfileDockerDefault string = "docker/default"
+
+	// PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized)
+	// in the Annotations of a Node.
+	PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods"
+
+	// ObjectTTLAnnotationKey represents a suggestion for kubelet for how long it can cache
+	// an object (e.g. secret, config map) before fetching it again from apiserver.
+	// This annotation can be attached to node.
+	ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl"
+
+	// NonConvertibleAnnotationPrefix is the annotation key prefix used to identify non-convertible json paths.
+	NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io"
+
+	kubectlPrefix = "kubectl.kubernetes.io/"
+
+	// LastAppliedConfigAnnotation is the annotation used to store the previous
+	// configuration of a resource for use in a three way diff by UpdateApplyAnnotation.
+	LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration"
+
+	// AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers
+	//
+	// It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to
+	// allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow
+	// access only from the CIDRs currently allocated to MIT & the USPS.
+	//
+	// Not all cloud providers support this annotation, though AWS & GCE do.
+	AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges"
+
+	// EndpointsLastChangeTriggerTime is the annotation key, set for endpoints objects, that
+	// represents the timestamp (stored as RFC 3339 date-time string, e.g. '2018-10-22T19:32:52.1Z')
+	// of the last change, of some Pod or Service object, that triggered the endpoints object change.
+	// In other words, if a Pod / Service changed at time T0, that change was observed by endpoints
+	// controller at T1, and the Endpoints object was changed at T2, the
+	// EndpointsLastChangeTriggerTime would be set to T0.
+	//
+	// The "endpoints change trigger" here means any Pod or Service change that resulted in the
+	// Endpoints object change.
+	//
+	// Given the definition of the "endpoints change trigger", please note that this annotation will
+	// be set ONLY for endpoints object changes triggered by either Pod or Service change. If the
+	// Endpoints object changes due to other reasons, this annotation won't be set (or updated if it's
+	// already set).
+	//
+	// This annotation will be used to compute the in-cluster network programming latency SLI, see
+	// https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md
+	EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time"
+
+	// EndpointsOverCapacity will be set on an Endpoints resource when it
+	// exceeds the maximum capacity of 1000 addresses. Initially the Endpoints
+	// controller will set this annotation with a value of "warning". In a
+	// future release, the controller may set this annotation with a value of
+	// "truncated" to indicate that any addresses exceeding the limit of 1000
+	// have been truncated from the Endpoints resource.
+	EndpointsOverCapacity = "endpoints.kubernetes.io/over-capacity"
+
+	// MigratedPluginsAnnotationKey is the annotation key, set for CSINode objects, that is a comma-separated
+	// list of in-tree plugins that will be serviced by the CSI backend on the Node represented by CSINode.
+	// This annotation is used by the Attach Detach Controller to determine whether to use the in-tree or
+	// CSI Backend for a volume plugin on a specific node.
+	MigratedPluginsAnnotationKey = "storage.alpha.kubernetes.io/migrated-plugins"
+
+	// PodDeletionCost can be used to set to an int32 that represent the cost of deleting
+	// a pod compared to other pods belonging to the same ReplicaSet. Pods with lower
+	// deletion cost are preferred to be deleted before pods with higher deletion cost.
+	// Note that this is honored on a best-effort basis, and so it does not offer guarantees on
+	// pod deletion order.
+	// The implicit deletion cost for pods that don't set the annotation is 0, negative values are permitted.
+	//
+	// This annotation is beta-level and is only honored when PodDeletionCost feature is enabled.
+	PodDeletionCost = "controller.kubernetes.io/pod-deletion-cost"
+
+	// DeprecatedAnnotationTopologyAwareHints can be used to enable or disable
+	// Topology Aware Hints for a Service. This may be set to "Auto" or
+	// "Disabled". Any other value is treated as "Disabled". This annotation has
+	// been deprecated in favor of the "service.kubernetes.io/topology-mode"
+	// annotation.
+	DeprecatedAnnotationTopologyAwareHints = "service.kubernetes.io/topology-aware-hints"
+
+	// AnnotationTopologyMode can be used to enable or disable Topology Aware
+	// Routing for a Service. Well known values are "Auto" and "Disabled".
+	// Implementations may choose to develop new topology approaches, exposing
+	// them with domain-prefixed values. For example, "example.com/lowest-rtt"
+	// could be a valid implementation-specific value for this annotation. These
+	// heuristics will often populate topology hints on EndpointSlices, but that
+	// is not a requirement.
+	AnnotationTopologyMode = "service.kubernetes.io/topology-mode"
+)
diff --git a/hack/tools/vendor/k8s.io/api/core/v1/doc.go b/hack/tools/vendor/k8s.io/api/core/v1/doc.go
new file mode 100644
index 0000000000..bc0041b331
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/core/v1/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:openapi-gen=true
+// +k8s:deepcopy-gen=package
+// +k8s:protobuf-gen=package
+// +k8s:prerelease-lifecycle-gen=true
+// +groupName=
+
+// Package v1 is the v1 version of the core API.
+package v1 // import "k8s.io/api/core/v1"
diff --git a/hack/tools/vendor/k8s.io/api/core/v1/generated.pb.go b/hack/tools/vendor/k8s.io/api/core/v1/generated.pb.go
new file mode 100644
index 0000000000..9d466c6d79
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/core/v1/generated.pb.go
@@ -0,0 +1,75137 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: k8s.io/api/core/v1/generated.proto
+
+package v1
+
+import (
+	fmt "fmt"
+
+	io "io"
+
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	resource "k8s.io/apimachinery/pkg/api/resource"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+
+	math "math"
+	math_bits "math/bits"
+	reflect "reflect"
+	strings "strings"
+
+	k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *AWSElasticBlockStoreVolumeSource) Reset()      { *m = AWSElasticBlockStoreVolumeSource{} }
+func (*AWSElasticBlockStoreVolumeSource) ProtoMessage() {}
+func (*AWSElasticBlockStoreVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{0}
+}
+func (m *AWSElasticBlockStoreVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AWSElasticBlockStoreVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *AWSElasticBlockStoreVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AWSElasticBlockStoreVolumeSource.Merge(m, src)
+}
+func (m *AWSElasticBlockStoreVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *AWSElasticBlockStoreVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_AWSElasticBlockStoreVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AWSElasticBlockStoreVolumeSource proto.InternalMessageInfo
+
+func (m *Affinity) Reset()      { *m = Affinity{} }
+func (*Affinity) ProtoMessage() {}
+func (*Affinity) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{1}
+}
+func (m *Affinity) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Affinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Affinity) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Affinity.Merge(m, src)
+}
+func (m *Affinity) XXX_Size() int {
+	return m.Size()
+}
+func (m *Affinity) XXX_DiscardUnknown() {
+	xxx_messageInfo_Affinity.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Affinity proto.InternalMessageInfo
+
+func (m *AppArmorProfile) Reset()      { *m = AppArmorProfile{} }
+func (*AppArmorProfile) ProtoMessage() {}
+func (*AppArmorProfile) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{2}
+}
+func (m *AppArmorProfile) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AppArmorProfile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *AppArmorProfile) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AppArmorProfile.Merge(m, src)
+}
+func (m *AppArmorProfile) XXX_Size() int {
+	return m.Size()
+}
+func (m *AppArmorProfile) XXX_DiscardUnknown() {
+	xxx_messageInfo_AppArmorProfile.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AppArmorProfile proto.InternalMessageInfo
+
+func (m *AttachedVolume) Reset()      { *m = AttachedVolume{} }
+func (*AttachedVolume) ProtoMessage() {}
+func (*AttachedVolume) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{3}
+}
+func (m *AttachedVolume) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AttachedVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *AttachedVolume) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AttachedVolume.Merge(m, src)
+}
+func (m *AttachedVolume) XXX_Size() int {
+	return m.Size()
+}
+func (m *AttachedVolume) XXX_DiscardUnknown() {
+	xxx_messageInfo_AttachedVolume.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AttachedVolume proto.InternalMessageInfo
+
+func (m *AvoidPods) Reset()      { *m = AvoidPods{} }
+func (*AvoidPods) ProtoMessage() {}
+func (*AvoidPods) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{4}
+}
+func (m *AvoidPods) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AvoidPods) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *AvoidPods) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AvoidPods.Merge(m, src)
+}
+func (m *AvoidPods) XXX_Size() int {
+	return m.Size()
+}
+func (m *AvoidPods) XXX_DiscardUnknown() {
+	xxx_messageInfo_AvoidPods.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AvoidPods proto.InternalMessageInfo
+
+func (m *AzureDiskVolumeSource) Reset()      { *m = AzureDiskVolumeSource{} }
+func (*AzureDiskVolumeSource) ProtoMessage() {}
+func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{5}
+}
+func (m *AzureDiskVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AzureDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *AzureDiskVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AzureDiskVolumeSource.Merge(m, src)
+}
+func (m *AzureDiskVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *AzureDiskVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_AzureDiskVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AzureDiskVolumeSource proto.InternalMessageInfo
+
+func (m *AzureFilePersistentVolumeSource) Reset()      { *m = AzureFilePersistentVolumeSource{} }
+func (*AzureFilePersistentVolumeSource) ProtoMessage() {}
+func (*AzureFilePersistentVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{6}
+}
+func (m *AzureFilePersistentVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AzureFilePersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *AzureFilePersistentVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AzureFilePersistentVolumeSource.Merge(m, src)
+}
+func (m *AzureFilePersistentVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *AzureFilePersistentVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_AzureFilePersistentVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AzureFilePersistentVolumeSource proto.InternalMessageInfo
+
+func (m *AzureFileVolumeSource) Reset()      { *m = AzureFileVolumeSource{} }
+func (*AzureFileVolumeSource) ProtoMessage() {}
+func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{7}
+}
+func (m *AzureFileVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AzureFileVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *AzureFileVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AzureFileVolumeSource.Merge(m, src)
+}
+func (m *AzureFileVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *AzureFileVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_AzureFileVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AzureFileVolumeSource proto.InternalMessageInfo
+
+func (m *Binding) Reset()      { *m = Binding{} }
+func (*Binding) ProtoMessage() {}
+func (*Binding) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{8}
+}
+func (m *Binding) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Binding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Binding) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Binding.Merge(m, src)
+}
+func (m *Binding) XXX_Size() int {
+	return m.Size()
+}
+func (m *Binding) XXX_DiscardUnknown() {
+	xxx_messageInfo_Binding.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Binding proto.InternalMessageInfo
+
+func (m *CSIPersistentVolumeSource) Reset()      { *m = CSIPersistentVolumeSource{} }
+func (*CSIPersistentVolumeSource) ProtoMessage() {}
+func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{9}
+}
+func (m *CSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CSIPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *CSIPersistentVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CSIPersistentVolumeSource.Merge(m, src)
+}
+func (m *CSIPersistentVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *CSIPersistentVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_CSIPersistentVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CSIPersistentVolumeSource proto.InternalMessageInfo
+
+func (m *CSIVolumeSource) Reset()      { *m = CSIVolumeSource{} }
+func (*CSIVolumeSource) ProtoMessage() {}
+func (*CSIVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{10}
+}
+func (m *CSIVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CSIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *CSIVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CSIVolumeSource.Merge(m, src)
+}
+func (m *CSIVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *CSIVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_CSIVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CSIVolumeSource proto.InternalMessageInfo
+
+func (m *Capabilities) Reset()      { *m = Capabilities{} }
+func (*Capabilities) ProtoMessage() {}
+func (*Capabilities) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{11}
+}
+func (m *Capabilities) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Capabilities) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Capabilities) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Capabilities.Merge(m, src)
+}
+func (m *Capabilities) XXX_Size() int {
+	return m.Size()
+}
+func (m *Capabilities) XXX_DiscardUnknown() {
+	xxx_messageInfo_Capabilities.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Capabilities proto.InternalMessageInfo
+
+func (m *CephFSPersistentVolumeSource) Reset()      { *m = CephFSPersistentVolumeSource{} }
+func (*CephFSPersistentVolumeSource) ProtoMessage() {}
+func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{12}
+}
+func (m *CephFSPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CephFSPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *CephFSPersistentVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CephFSPersistentVolumeSource.Merge(m, src)
+}
+func (m *CephFSPersistentVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *CephFSPersistentVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_CephFSPersistentVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CephFSPersistentVolumeSource proto.InternalMessageInfo
+
+func (m *CephFSVolumeSource) Reset()      { *m = CephFSVolumeSource{} }
+func (*CephFSVolumeSource) ProtoMessage() {}
+func (*CephFSVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{13}
+}
+func (m *CephFSVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CephFSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *CephFSVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CephFSVolumeSource.Merge(m, src)
+}
+func (m *CephFSVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *CephFSVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_CephFSVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CephFSVolumeSource proto.InternalMessageInfo
+
+func (m *CinderPersistentVolumeSource) Reset()      { *m = CinderPersistentVolumeSource{} }
+func (*CinderPersistentVolumeSource) ProtoMessage() {}
+func (*CinderPersistentVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{14}
+}
+func (m *CinderPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CinderPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *CinderPersistentVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CinderPersistentVolumeSource.Merge(m, src)
+}
+func (m *CinderPersistentVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *CinderPersistentVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_CinderPersistentVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CinderPersistentVolumeSource proto.InternalMessageInfo
+
+func (m *CinderVolumeSource) Reset()      { *m = CinderVolumeSource{} }
+func (*CinderVolumeSource) ProtoMessage() {}
+func (*CinderVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{15}
+}
+func (m *CinderVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CinderVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *CinderVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CinderVolumeSource.Merge(m, src)
+}
+func (m *CinderVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *CinderVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_CinderVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CinderVolumeSource proto.InternalMessageInfo
+
+func (m *ClientIPConfig) Reset()      { *m = ClientIPConfig{} }
+func (*ClientIPConfig) ProtoMessage() {}
+func (*ClientIPConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{16}
+}
+func (m *ClientIPConfig) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ClientIPConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ClientIPConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ClientIPConfig.Merge(m, src)
+}
+func (m *ClientIPConfig) XXX_Size() int {
+	return m.Size()
+}
+func (m *ClientIPConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_ClientIPConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClientIPConfig proto.InternalMessageInfo
+
+func (m *ClusterTrustBundleProjection) Reset()      { *m = ClusterTrustBundleProjection{} }
+func (*ClusterTrustBundleProjection) ProtoMessage() {}
+func (*ClusterTrustBundleProjection) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{17}
+}
+func (m *ClusterTrustBundleProjection) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ClusterTrustBundleProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ClusterTrustBundleProjection) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ClusterTrustBundleProjection.Merge(m, src)
+}
+func (m *ClusterTrustBundleProjection) XXX_Size() int {
+	return m.Size()
+}
+func (m *ClusterTrustBundleProjection) XXX_DiscardUnknown() {
+	xxx_messageInfo_ClusterTrustBundleProjection.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterTrustBundleProjection proto.InternalMessageInfo
+
+func (m *ComponentCondition) Reset()      { *m = ComponentCondition{} }
+func (*ComponentCondition) ProtoMessage() {}
+func (*ComponentCondition) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{18}
+}
+func (m *ComponentCondition) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ComponentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ComponentCondition) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ComponentCondition.Merge(m, src)
+}
+func (m *ComponentCondition) XXX_Size() int {
+	return m.Size()
+}
+func (m *ComponentCondition) XXX_DiscardUnknown() {
+	xxx_messageInfo_ComponentCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ComponentCondition proto.InternalMessageInfo
+
+func (m *ComponentStatus) Reset()      { *m = ComponentStatus{} }
+func (*ComponentStatus) ProtoMessage() {}
+func (*ComponentStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{19}
+}
+func (m *ComponentStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ComponentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ComponentStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ComponentStatus.Merge(m, src)
+}
+func (m *ComponentStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *ComponentStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_ComponentStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ComponentStatus proto.InternalMessageInfo
+
+func (m *ComponentStatusList) Reset()      { *m = ComponentStatusList{} }
+func (*ComponentStatusList) ProtoMessage() {}
+func (*ComponentStatusList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{20}
+}
+func (m *ComponentStatusList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ComponentStatusList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ComponentStatusList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ComponentStatusList.Merge(m, src)
+}
+func (m *ComponentStatusList) XXX_Size() int {
+	return m.Size()
+}
+func (m *ComponentStatusList) XXX_DiscardUnknown() {
+	xxx_messageInfo_ComponentStatusList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ComponentStatusList proto.InternalMessageInfo
+
+func (m *ConfigMap) Reset()      { *m = ConfigMap{} }
+func (*ConfigMap) ProtoMessage() {}
+func (*ConfigMap) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{21}
+}
+func (m *ConfigMap) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ConfigMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ConfigMap) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ConfigMap.Merge(m, src)
+}
+func (m *ConfigMap) XXX_Size() int {
+	return m.Size()
+}
+func (m *ConfigMap) XXX_DiscardUnknown() {
+	xxx_messageInfo_ConfigMap.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfigMap proto.InternalMessageInfo
+
+func (m *ConfigMapEnvSource) Reset()      { *m = ConfigMapEnvSource{} }
+func (*ConfigMapEnvSource) ProtoMessage() {}
+func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{22}
+}
+func (m *ConfigMapEnvSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ConfigMapEnvSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ConfigMapEnvSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ConfigMapEnvSource.Merge(m, src)
+}
+func (m *ConfigMapEnvSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *ConfigMapEnvSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_ConfigMapEnvSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfigMapEnvSource proto.InternalMessageInfo
+
+func (m *ConfigMapKeySelector) Reset()      { *m = ConfigMapKeySelector{} }
+func (*ConfigMapKeySelector) ProtoMessage() {}
+func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{23}
+}
+func (m *ConfigMapKeySelector) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ConfigMapKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ConfigMapKeySelector) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ConfigMapKeySelector.Merge(m, src)
+}
+func (m *ConfigMapKeySelector) XXX_Size() int {
+	return m.Size()
+}
+func (m *ConfigMapKeySelector) XXX_DiscardUnknown() {
+	xxx_messageInfo_ConfigMapKeySelector.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfigMapKeySelector proto.InternalMessageInfo
+
+func (m *ConfigMapList) Reset()      { *m = ConfigMapList{} }
+func (*ConfigMapList) ProtoMessage() {}
+func (*ConfigMapList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{24}
+}
+func (m *ConfigMapList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ConfigMapList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ConfigMapList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ConfigMapList.Merge(m, src)
+}
+func (m *ConfigMapList) XXX_Size() int {
+	return m.Size()
+}
+func (m *ConfigMapList) XXX_DiscardUnknown() {
+	xxx_messageInfo_ConfigMapList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfigMapList proto.InternalMessageInfo
+
+func (m *ConfigMapNodeConfigSource) Reset()      { *m = ConfigMapNodeConfigSource{} }
+func (*ConfigMapNodeConfigSource) ProtoMessage() {}
+func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{25}
+}
+func (m *ConfigMapNodeConfigSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ConfigMapNodeConfigSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ConfigMapNodeConfigSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ConfigMapNodeConfigSource.Merge(m, src)
+}
+func (m *ConfigMapNodeConfigSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *ConfigMapNodeConfigSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_ConfigMapNodeConfigSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfigMapNodeConfigSource proto.InternalMessageInfo
+
+func (m *ConfigMapProjection) Reset()      { *m = ConfigMapProjection{} }
+func (*ConfigMapProjection) ProtoMessage() {}
+func (*ConfigMapProjection) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{26}
+}
+func (m *ConfigMapProjection) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ConfigMapProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ConfigMapProjection) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ConfigMapProjection.Merge(m, src)
+}
+func (m *ConfigMapProjection) XXX_Size() int {
+	return m.Size()
+}
+func (m *ConfigMapProjection) XXX_DiscardUnknown() {
+	xxx_messageInfo_ConfigMapProjection.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfigMapProjection proto.InternalMessageInfo
+
+func (m *ConfigMapVolumeSource) Reset()      { *m = ConfigMapVolumeSource{} }
+func (*ConfigMapVolumeSource) ProtoMessage() {}
+func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{27}
+}
+func (m *ConfigMapVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ConfigMapVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ConfigMapVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ConfigMapVolumeSource.Merge(m, src)
+}
+func (m *ConfigMapVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *ConfigMapVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_ConfigMapVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfigMapVolumeSource proto.InternalMessageInfo
+
+func (m *Container) Reset()      { *m = Container{} }
+func (*Container) ProtoMessage() {}
+func (*Container) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{28}
+}
+func (m *Container) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Container) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Container) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Container.Merge(m, src)
+}
+func (m *Container) XXX_Size() int {
+	return m.Size()
+}
+func (m *Container) XXX_DiscardUnknown() {
+	xxx_messageInfo_Container.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Container proto.InternalMessageInfo
+
+func (m *ContainerImage) Reset()      { *m = ContainerImage{} }
+func (*ContainerImage) ProtoMessage() {}
+func (*ContainerImage) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{29}
+}
+func (m *ContainerImage) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ContainerImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ContainerImage) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ContainerImage.Merge(m, src)
+}
+func (m *ContainerImage) XXX_Size() int {
+	return m.Size()
+}
+func (m *ContainerImage) XXX_DiscardUnknown() {
+	xxx_messageInfo_ContainerImage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ContainerImage proto.InternalMessageInfo
+
+func (m *ContainerPort) Reset()      { *m = ContainerPort{} }
+func (*ContainerPort) ProtoMessage() {}
+func (*ContainerPort) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{30}
+}
+func (m *ContainerPort) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ContainerPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ContainerPort) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ContainerPort.Merge(m, src)
+}
+func (m *ContainerPort) XXX_Size() int {
+	return m.Size()
+}
+func (m *ContainerPort) XXX_DiscardUnknown() {
+	xxx_messageInfo_ContainerPort.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ContainerPort proto.InternalMessageInfo
+
+func (m *ContainerResizePolicy) Reset()      { *m = ContainerResizePolicy{} }
+func (*ContainerResizePolicy) ProtoMessage() {}
+func (*ContainerResizePolicy) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{31}
+}
+func (m *ContainerResizePolicy) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ContainerResizePolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ContainerResizePolicy) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ContainerResizePolicy.Merge(m, src)
+}
+func (m *ContainerResizePolicy) XXX_Size() int {
+	return m.Size()
+}
+func (m *ContainerResizePolicy) XXX_DiscardUnknown() {
+	xxx_messageInfo_ContainerResizePolicy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ContainerResizePolicy proto.InternalMessageInfo
+
+func (m *ContainerState) Reset()      { *m = ContainerState{} }
+func (*ContainerState) ProtoMessage() {}
+func (*ContainerState) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{32}
+}
+func (m *ContainerState) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ContainerState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ContainerState) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ContainerState.Merge(m, src)
+}
+func (m *ContainerState) XXX_Size() int {
+	return m.Size()
+}
+func (m *ContainerState) XXX_DiscardUnknown() {
+	xxx_messageInfo_ContainerState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ContainerState proto.InternalMessageInfo
+
+func (m *ContainerStateRunning) Reset()      { *m = ContainerStateRunning{} }
+func (*ContainerStateRunning) ProtoMessage() {}
+func (*ContainerStateRunning) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{33}
+}
+func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ContainerStateRunning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ContainerStateRunning) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ContainerStateRunning.Merge(m, src)
+}
+func (m *ContainerStateRunning) XXX_Size() int {
+	return m.Size()
+}
+func (m *ContainerStateRunning) XXX_DiscardUnknown() {
+	xxx_messageInfo_ContainerStateRunning.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo
+
+func (m *ContainerStateTerminated) Reset()      { *m = ContainerStateTerminated{} }
+func (*ContainerStateTerminated) ProtoMessage() {}
+func (*ContainerStateTerminated) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{34}
+}
+func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ContainerStateTerminated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ContainerStateTerminated) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ContainerStateTerminated.Merge(m, src)
+}
+func (m *ContainerStateTerminated) XXX_Size() int {
+	return m.Size()
+}
+func (m *ContainerStateTerminated) XXX_DiscardUnknown() {
+	xxx_messageInfo_ContainerStateTerminated.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo
+
+func (m *ContainerStateWaiting) Reset()      { *m = ContainerStateWaiting{} }
+func (*ContainerStateWaiting) ProtoMessage() {}
+func (*ContainerStateWaiting) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{35}
+}
+func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ContainerStateWaiting) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ContainerStateWaiting) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ContainerStateWaiting.Merge(m, src)
+}
+func (m *ContainerStateWaiting) XXX_Size() int {
+	return m.Size()
+}
+func (m *ContainerStateWaiting) XXX_DiscardUnknown() {
+	xxx_messageInfo_ContainerStateWaiting.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo
+
+func (m *ContainerStatus) Reset()      { *m = ContainerStatus{} }
+func (*ContainerStatus) ProtoMessage() {}
+func (*ContainerStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{36}
+}
+func (m *ContainerStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ContainerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ContainerStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ContainerStatus.Merge(m, src)
+}
+func (m *ContainerStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *ContainerStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_ContainerStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo
+
+func (m *ContainerUser) Reset()      { *m = ContainerUser{} }
+func (*ContainerUser) ProtoMessage() {}
+func (*ContainerUser) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{37}
+}
+func (m *ContainerUser) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ContainerUser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ContainerUser) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ContainerUser.Merge(m, src)
+}
+func (m *ContainerUser) XXX_Size() int {
+	return m.Size()
+}
+func (m *ContainerUser) XXX_DiscardUnknown() {
+	xxx_messageInfo_ContainerUser.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ContainerUser proto.InternalMessageInfo
+
+func (m *DaemonEndpoint) Reset()      { *m = DaemonEndpoint{} }
+func (*DaemonEndpoint) ProtoMessage() {}
+func (*DaemonEndpoint) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{38}
+}
+func (m *DaemonEndpoint) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DaemonEndpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DaemonEndpoint) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DaemonEndpoint.Merge(m, src)
+}
+func (m *DaemonEndpoint) XXX_Size() int {
+	return m.Size()
+}
+func (m *DaemonEndpoint) XXX_DiscardUnknown() {
+	xxx_messageInfo_DaemonEndpoint.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DaemonEndpoint proto.InternalMessageInfo
+
+func (m *DownwardAPIProjection) Reset()      { *m = DownwardAPIProjection{} }
+func (*DownwardAPIProjection) ProtoMessage() {}
+func (*DownwardAPIProjection) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{39}
+}
+func (m *DownwardAPIProjection) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DownwardAPIProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DownwardAPIProjection) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DownwardAPIProjection.Merge(m, src)
+}
+func (m *DownwardAPIProjection) XXX_Size() int {
+	return m.Size()
+}
+func (m *DownwardAPIProjection) XXX_DiscardUnknown() {
+	xxx_messageInfo_DownwardAPIProjection.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DownwardAPIProjection proto.InternalMessageInfo
+
+func (m *DownwardAPIVolumeFile) Reset()      { *m = DownwardAPIVolumeFile{} }
+func (*DownwardAPIVolumeFile) ProtoMessage() {}
+func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{40}
+}
+func (m *DownwardAPIVolumeFile) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DownwardAPIVolumeFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DownwardAPIVolumeFile) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DownwardAPIVolumeFile.Merge(m, src)
+}
+func (m *DownwardAPIVolumeFile) XXX_Size() int {
+	return m.Size()
+}
+func (m *DownwardAPIVolumeFile) XXX_DiscardUnknown() {
+	xxx_messageInfo_DownwardAPIVolumeFile.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DownwardAPIVolumeFile proto.InternalMessageInfo
+
+func (m *DownwardAPIVolumeSource) Reset()      { *m = DownwardAPIVolumeSource{} }
+func (*DownwardAPIVolumeSource) ProtoMessage() {}
+func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{41}
+}
+func (m *DownwardAPIVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DownwardAPIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *DownwardAPIVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DownwardAPIVolumeSource.Merge(m, src)
+}
+func (m *DownwardAPIVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *DownwardAPIVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_DownwardAPIVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DownwardAPIVolumeSource proto.InternalMessageInfo
+
+func (m *EmptyDirVolumeSource) Reset()      { *m = EmptyDirVolumeSource{} }
+func (*EmptyDirVolumeSource) ProtoMessage() {}
+func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{42}
+}
+func (m *EmptyDirVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *EmptyDirVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *EmptyDirVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EmptyDirVolumeSource.Merge(m, src)
+}
+func (m *EmptyDirVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *EmptyDirVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_EmptyDirVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EmptyDirVolumeSource proto.InternalMessageInfo
+
+func (m *EndpointAddress) Reset()      { *m = EndpointAddress{} }
+func (*EndpointAddress) ProtoMessage() {}
+func (*EndpointAddress) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{43}
+}
+func (m *EndpointAddress) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *EndpointAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *EndpointAddress) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EndpointAddress.Merge(m, src)
+}
+func (m *EndpointAddress) XXX_Size() int {
+	return m.Size()
+}
+func (m *EndpointAddress) XXX_DiscardUnknown() {
+	xxx_messageInfo_EndpointAddress.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo
+
+func (m *EndpointPort) Reset()      { *m = EndpointPort{} }
+func (*EndpointPort) ProtoMessage() {}
+func (*EndpointPort) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{44}
+}
+func (m *EndpointPort) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *EndpointPort) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EndpointPort.Merge(m, src)
+}
+func (m *EndpointPort) XXX_Size() int {
+	return m.Size()
+}
+func (m *EndpointPort) XXX_DiscardUnknown() {
+	xxx_messageInfo_EndpointPort.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EndpointPort proto.InternalMessageInfo
+
+func (m *EndpointSubset) Reset()      { *m = EndpointSubset{} }
+func (*EndpointSubset) ProtoMessage() {}
+func (*EndpointSubset) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{45}
+}
+func (m *EndpointSubset) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *EndpointSubset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *EndpointSubset) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EndpointSubset.Merge(m, src)
+}
+func (m *EndpointSubset) XXX_Size() int {
+	return m.Size()
+}
+func (m *EndpointSubset) XXX_DiscardUnknown() {
+	xxx_messageInfo_EndpointSubset.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo
+
+func (m *Endpoints) Reset()      { *m = Endpoints{} }
+func (*Endpoints) ProtoMessage() {}
+func (*Endpoints) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{46}
+}
+func (m *Endpoints) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Endpoints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Endpoints) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Endpoints.Merge(m, src)
+}
+func (m *Endpoints) XXX_Size() int {
+	return m.Size()
+}
+func (m *Endpoints) XXX_DiscardUnknown() {
+	xxx_messageInfo_Endpoints.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Endpoints proto.InternalMessageInfo
+
+func (m *EndpointsList) Reset()      { *m = EndpointsList{} }
+func (*EndpointsList) ProtoMessage() {}
+func (*EndpointsList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{47}
+}
+func (m *EndpointsList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *EndpointsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *EndpointsList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EndpointsList.Merge(m, src)
+}
+func (m *EndpointsList) XXX_Size() int {
+	return m.Size()
+}
+func (m *EndpointsList) XXX_DiscardUnknown() {
+	xxx_messageInfo_EndpointsList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EndpointsList proto.InternalMessageInfo
+
+func (m *EnvFromSource) Reset()      { *m = EnvFromSource{} }
+func (*EnvFromSource) ProtoMessage() {}
+func (*EnvFromSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{48}
+}
+func (m *EnvFromSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *EnvFromSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *EnvFromSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnvFromSource.Merge(m, src)
+}
+func (m *EnvFromSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *EnvFromSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnvFromSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnvFromSource proto.InternalMessageInfo
+
+func (m *EnvVar) Reset()      { *m = EnvVar{} }
+func (*EnvVar) ProtoMessage() {}
+func (*EnvVar) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{49}
+}
+func (m *EnvVar) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *EnvVar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *EnvVar) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnvVar.Merge(m, src)
+}
+func (m *EnvVar) XXX_Size() int {
+	return m.Size()
+}
+func (m *EnvVar) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnvVar.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnvVar proto.InternalMessageInfo
+
+func (m *EnvVarSource) Reset()      { *m = EnvVarSource{} }
+func (*EnvVarSource) ProtoMessage() {}
+func (*EnvVarSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{50}
+}
+func (m *EnvVarSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *EnvVarSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *EnvVarSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnvVarSource.Merge(m, src)
+}
+func (m *EnvVarSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *EnvVarSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnvVarSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnvVarSource proto.InternalMessageInfo
+
+func (m *EphemeralContainer) Reset()      { *m = EphemeralContainer{} }
+func (*EphemeralContainer) ProtoMessage() {}
+func (*EphemeralContainer) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{51}
+}
+func (m *EphemeralContainer) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *EphemeralContainer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *EphemeralContainer) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EphemeralContainer.Merge(m, src)
+}
+func (m *EphemeralContainer) XXX_Size() int {
+	return m.Size()
+}
+func (m *EphemeralContainer) XXX_DiscardUnknown() {
+	xxx_messageInfo_EphemeralContainer.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EphemeralContainer proto.InternalMessageInfo
+
+func (m *EphemeralContainerCommon) Reset()      { *m = EphemeralContainerCommon{} }
+func (*EphemeralContainerCommon) ProtoMessage() {}
+func (*EphemeralContainerCommon) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{52}
+}
+func (m *EphemeralContainerCommon) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *EphemeralContainerCommon) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *EphemeralContainerCommon) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EphemeralContainerCommon.Merge(m, src)
+}
+func (m *EphemeralContainerCommon) XXX_Size() int {
+	return m.Size()
+}
+func (m *EphemeralContainerCommon) XXX_DiscardUnknown() {
+	xxx_messageInfo_EphemeralContainerCommon.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo
+
+func (m *EphemeralVolumeSource) Reset()      { *m = EphemeralVolumeSource{} }
+func (*EphemeralVolumeSource) ProtoMessage() {}
+func (*EphemeralVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{53}
+}
+func (m *EphemeralVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *EphemeralVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *EphemeralVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EphemeralVolumeSource.Merge(m, src)
+}
+func (m *EphemeralVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *EphemeralVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_EphemeralVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EphemeralVolumeSource proto.InternalMessageInfo
+
+func (m *Event) Reset()      { *m = Event{} }
+func (*Event) ProtoMessage() {}
+func (*Event) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{54}
+}
+func (m *Event) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Event) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Event.Merge(m, src)
+}
+func (m *Event) XXX_Size() int {
+	return m.Size()
+}
+func (m *Event) XXX_DiscardUnknown() {
+	xxx_messageInfo_Event.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Event proto.InternalMessageInfo
+
+func (m *EventList) Reset()      { *m = EventList{} }
+func (*EventList) ProtoMessage() {}
+func (*EventList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{55}
+}
+func (m *EventList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *EventList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EventList.Merge(m, src)
+}
+func (m *EventList) XXX_Size() int {
+	return m.Size()
+}
+func (m *EventList) XXX_DiscardUnknown() {
+	xxx_messageInfo_EventList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventList proto.InternalMessageInfo
+
+func (m *EventSeries) Reset()      { *m = EventSeries{} }
+func (*EventSeries) ProtoMessage() {}
+func (*EventSeries) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{56}
+}
+func (m *EventSeries) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *EventSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *EventSeries) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EventSeries.Merge(m, src)
+}
+func (m *EventSeries) XXX_Size() int {
+	return m.Size()
+}
+func (m *EventSeries) XXX_DiscardUnknown() {
+	xxx_messageInfo_EventSeries.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventSeries proto.InternalMessageInfo
+
+func (m *EventSource) Reset()      { *m = EventSource{} }
+func (*EventSource) ProtoMessage() {}
+func (*EventSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{57}
+}
+func (m *EventSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *EventSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *EventSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EventSource.Merge(m, src)
+}
+func (m *EventSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *EventSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_EventSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventSource proto.InternalMessageInfo
+
+func (m *ExecAction) Reset()      { *m = ExecAction{} }
+func (*ExecAction) ProtoMessage() {}
+func (*ExecAction) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{58}
+}
+func (m *ExecAction) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ExecAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ExecAction) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ExecAction.Merge(m, src)
+}
+func (m *ExecAction) XXX_Size() int {
+	return m.Size()
+}
+func (m *ExecAction) XXX_DiscardUnknown() {
+	xxx_messageInfo_ExecAction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExecAction proto.InternalMessageInfo
+
+func (m *FCVolumeSource) Reset()      { *m = FCVolumeSource{} }
+func (*FCVolumeSource) ProtoMessage() {}
+func (*FCVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{59}
+}
+func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *FCVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *FCVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FCVolumeSource.Merge(m, src)
+}
+func (m *FCVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *FCVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_FCVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo
+
+func (m *FlexPersistentVolumeSource) Reset()      { *m = FlexPersistentVolumeSource{} }
+func (*FlexPersistentVolumeSource) ProtoMessage() {}
+func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{60}
+}
+func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *FlexPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *FlexPersistentVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FlexPersistentVolumeSource.Merge(m, src)
+}
+func (m *FlexPersistentVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *FlexPersistentVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_FlexPersistentVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo
+
+func (m *FlexVolumeSource) Reset()      { *m = FlexVolumeSource{} }
+func (*FlexVolumeSource) ProtoMessage() {}
+func (*FlexVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{61}
+}
+func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *FlexVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *FlexVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FlexVolumeSource.Merge(m, src)
+}
+func (m *FlexVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *FlexVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_FlexVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo
+
+func (m *FlockerVolumeSource) Reset()      { *m = FlockerVolumeSource{} }
+func (*FlockerVolumeSource) ProtoMessage() {}
+func (*FlockerVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{62}
+}
+func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *FlockerVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *FlockerVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FlockerVolumeSource.Merge(m, src)
+}
+func (m *FlockerVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *FlockerVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_FlockerVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo
+
+func (m *GCEPersistentDiskVolumeSource) Reset()      { *m = GCEPersistentDiskVolumeSource{} }
+func (*GCEPersistentDiskVolumeSource) ProtoMessage() {}
+func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{63}
+}
+func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *GCEPersistentDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *GCEPersistentDiskVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GCEPersistentDiskVolumeSource.Merge(m, src)
+}
+func (m *GCEPersistentDiskVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *GCEPersistentDiskVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_GCEPersistentDiskVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo
+
+func (m *GRPCAction) Reset()      { *m = GRPCAction{} }
+func (*GRPCAction) ProtoMessage() {}
+func (*GRPCAction) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{64}
+}
+func (m *GRPCAction) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *GRPCAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *GRPCAction) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GRPCAction.Merge(m, src)
+}
+func (m *GRPCAction) XXX_Size() int {
+	return m.Size()
+}
+func (m *GRPCAction) XXX_DiscardUnknown() {
+	xxx_messageInfo_GRPCAction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GRPCAction proto.InternalMessageInfo
+
+func (m *GitRepoVolumeSource) Reset()      { *m = GitRepoVolumeSource{} }
+func (*GitRepoVolumeSource) ProtoMessage() {}
+func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{65}
+}
+func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *GitRepoVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *GitRepoVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GitRepoVolumeSource.Merge(m, src)
+}
+func (m *GitRepoVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *GitRepoVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_GitRepoVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo
+
+func (m *GlusterfsPersistentVolumeSource) Reset()      { *m = GlusterfsPersistentVolumeSource{} }
+func (*GlusterfsPersistentVolumeSource) ProtoMessage() {}
+func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{66}
+}
+func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *GlusterfsPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *GlusterfsPersistentVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GlusterfsPersistentVolumeSource.Merge(m, src)
+}
+func (m *GlusterfsPersistentVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *GlusterfsPersistentVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_GlusterfsPersistentVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo
+
+func (m *GlusterfsVolumeSource) Reset()      { *m = GlusterfsVolumeSource{} }
+func (*GlusterfsVolumeSource) ProtoMessage() {}
+func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{67}
+}
+func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *GlusterfsVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *GlusterfsVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GlusterfsVolumeSource.Merge(m, src)
+}
+func (m *GlusterfsVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *GlusterfsVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_GlusterfsVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo
+
+func (m *HTTPGetAction) Reset()      { *m = HTTPGetAction{} }
+func (*HTTPGetAction) ProtoMessage() {}
+func (*HTTPGetAction) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{68}
+}
+func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HTTPGetAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *HTTPGetAction) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HTTPGetAction.Merge(m, src)
+}
+func (m *HTTPGetAction) XXX_Size() int {
+	return m.Size()
+}
+func (m *HTTPGetAction) XXX_DiscardUnknown() {
+	xxx_messageInfo_HTTPGetAction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo
+
+func (m *HTTPHeader) Reset()      { *m = HTTPHeader{} }
+func (*HTTPHeader) ProtoMessage() {}
+func (*HTTPHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{69}
+}
+func (m *HTTPHeader) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HTTPHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *HTTPHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HTTPHeader.Merge(m, src)
+}
+func (m *HTTPHeader) XXX_Size() int {
+	return m.Size()
+}
+func (m *HTTPHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_HTTPHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo
+
+func (m *HostAlias) Reset()      { *m = HostAlias{} }
+func (*HostAlias) ProtoMessage() {}
+func (*HostAlias) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{70}
+}
+func (m *HostAlias) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HostAlias) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *HostAlias) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HostAlias.Merge(m, src)
+}
+func (m *HostAlias) XXX_Size() int {
+	return m.Size()
+}
+func (m *HostAlias) XXX_DiscardUnknown() {
+	xxx_messageInfo_HostAlias.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HostAlias proto.InternalMessageInfo
+
+func (m *HostIP) Reset()      { *m = HostIP{} }
+func (*HostIP) ProtoMessage() {}
+func (*HostIP) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{71}
+}
+func (m *HostIP) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HostIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *HostIP) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HostIP.Merge(m, src)
+}
+func (m *HostIP) XXX_Size() int {
+	return m.Size()
+}
+func (m *HostIP) XXX_DiscardUnknown() {
+	xxx_messageInfo_HostIP.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HostIP proto.InternalMessageInfo
+
+func (m *HostPathVolumeSource) Reset()      { *m = HostPathVolumeSource{} }
+func (*HostPathVolumeSource) ProtoMessage() {}
+func (*HostPathVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{72}
+}
+func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HostPathVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *HostPathVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HostPathVolumeSource.Merge(m, src)
+}
+func (m *HostPathVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *HostPathVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_HostPathVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo
+
+func (m *ISCSIPersistentVolumeSource) Reset()      { *m = ISCSIPersistentVolumeSource{} }
+func (*ISCSIPersistentVolumeSource) ProtoMessage() {}
+func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{73}
+}
+func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ISCSIPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ISCSIPersistentVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ISCSIPersistentVolumeSource.Merge(m, src)
+}
+func (m *ISCSIPersistentVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *ISCSIPersistentVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_ISCSIPersistentVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo
+
+func (m *ISCSIVolumeSource) Reset()      { *m = ISCSIVolumeSource{} }
+func (*ISCSIVolumeSource) ProtoMessage() {}
+func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{74}
+}
+func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ISCSIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ISCSIVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ISCSIVolumeSource.Merge(m, src)
+}
+func (m *ISCSIVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *ISCSIVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_ISCSIVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo
+
+func (m *ImageVolumeSource) Reset()      { *m = ImageVolumeSource{} }
+func (*ImageVolumeSource) ProtoMessage() {}
+func (*ImageVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{75}
+}
+func (m *ImageVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ImageVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ImageVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ImageVolumeSource.Merge(m, src)
+}
+func (m *ImageVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *ImageVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_ImageVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageVolumeSource proto.InternalMessageInfo
+
+func (m *KeyToPath) Reset()      { *m = KeyToPath{} }
+func (*KeyToPath) ProtoMessage() {}
+func (*KeyToPath) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{76}
+}
+func (m *KeyToPath) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *KeyToPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *KeyToPath) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_KeyToPath.Merge(m, src)
+}
+func (m *KeyToPath) XXX_Size() int {
+	return m.Size()
+}
+func (m *KeyToPath) XXX_DiscardUnknown() {
+	xxx_messageInfo_KeyToPath.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KeyToPath proto.InternalMessageInfo
+
+func (m *Lifecycle) Reset()      { *m = Lifecycle{} }
+func (*Lifecycle) ProtoMessage() {}
+func (*Lifecycle) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{77}
+}
+func (m *Lifecycle) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Lifecycle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Lifecycle) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Lifecycle.Merge(m, src)
+}
+func (m *Lifecycle) XXX_Size() int {
+	return m.Size()
+}
+func (m *Lifecycle) XXX_DiscardUnknown() {
+	xxx_messageInfo_Lifecycle.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Lifecycle proto.InternalMessageInfo
+
+func (m *LifecycleHandler) Reset()      { *m = LifecycleHandler{} }
+func (*LifecycleHandler) ProtoMessage() {}
+func (*LifecycleHandler) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{78}
+}
+func (m *LifecycleHandler) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LifecycleHandler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *LifecycleHandler) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LifecycleHandler.Merge(m, src)
+}
+func (m *LifecycleHandler) XXX_Size() int {
+	return m.Size()
+}
+func (m *LifecycleHandler) XXX_DiscardUnknown() {
+	xxx_messageInfo_LifecycleHandler.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LifecycleHandler proto.InternalMessageInfo
+
+func (m *LimitRange) Reset()      { *m = LimitRange{} }
+func (*LimitRange) ProtoMessage() {}
+func (*LimitRange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{79}
+}
+func (m *LimitRange) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LimitRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *LimitRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LimitRange.Merge(m, src)
+}
+func (m *LimitRange) XXX_Size() int {
+	return m.Size()
+}
+func (m *LimitRange) XXX_DiscardUnknown() {
+	xxx_messageInfo_LimitRange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LimitRange proto.InternalMessageInfo
+
+func (m *LimitRangeItem) Reset()      { *m = LimitRangeItem{} }
+func (*LimitRangeItem) ProtoMessage() {}
+func (*LimitRangeItem) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{80}
+}
+func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LimitRangeItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *LimitRangeItem) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LimitRangeItem.Merge(m, src)
+}
+func (m *LimitRangeItem) XXX_Size() int {
+	return m.Size()
+}
+func (m *LimitRangeItem) XXX_DiscardUnknown() {
+	xxx_messageInfo_LimitRangeItem.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo
+
+func (m *LimitRangeList) Reset()      { *m = LimitRangeList{} }
+func (*LimitRangeList) ProtoMessage() {}
+func (*LimitRangeList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{81}
+}
+func (m *LimitRangeList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LimitRangeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *LimitRangeList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LimitRangeList.Merge(m, src)
+}
+func (m *LimitRangeList) XXX_Size() int {
+	return m.Size()
+}
+func (m *LimitRangeList) XXX_DiscardUnknown() {
+	xxx_messageInfo_LimitRangeList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo
+
+func (m *LimitRangeSpec) Reset()      { *m = LimitRangeSpec{} }
+func (*LimitRangeSpec) ProtoMessage() {}
+func (*LimitRangeSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{82}
+}
+func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LimitRangeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *LimitRangeSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LimitRangeSpec.Merge(m, src)
+}
+func (m *LimitRangeSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *LimitRangeSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_LimitRangeSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo
+
+func (m *LinuxContainerUser) Reset()      { *m = LinuxContainerUser{} }
+func (*LinuxContainerUser) ProtoMessage() {}
+func (*LinuxContainerUser) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{83}
+}
+func (m *LinuxContainerUser) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LinuxContainerUser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *LinuxContainerUser) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LinuxContainerUser.Merge(m, src)
+}
+func (m *LinuxContainerUser) XXX_Size() int {
+	return m.Size()
+}
+func (m *LinuxContainerUser) XXX_DiscardUnknown() {
+	xxx_messageInfo_LinuxContainerUser.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LinuxContainerUser proto.InternalMessageInfo
+
+func (m *List) Reset()      { *m = List{} }
+func (*List) ProtoMessage() {}
+func (*List) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{84}
+}
+func (m *List) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *List) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_List.Merge(m, src)
+}
+func (m *List) XXX_Size() int {
+	return m.Size()
+}
+func (m *List) XXX_DiscardUnknown() {
+	xxx_messageInfo_List.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_List proto.InternalMessageInfo
+
+func (m *LoadBalancerIngress) Reset()      { *m = LoadBalancerIngress{} }
+func (*LoadBalancerIngress) ProtoMessage() {}
+func (*LoadBalancerIngress) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{85}
+}
+func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LoadBalancerIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *LoadBalancerIngress) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LoadBalancerIngress.Merge(m, src)
+}
+func (m *LoadBalancerIngress) XXX_Size() int {
+	return m.Size()
+}
+func (m *LoadBalancerIngress) XXX_DiscardUnknown() {
+	xxx_messageInfo_LoadBalancerIngress.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo
+
+func (m *LoadBalancerStatus) Reset()      { *m = LoadBalancerStatus{} }
+func (*LoadBalancerStatus) ProtoMessage() {}
+func (*LoadBalancerStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{86}
+}
+func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LoadBalancerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *LoadBalancerStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LoadBalancerStatus.Merge(m, src)
+}
+func (m *LoadBalancerStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *LoadBalancerStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_LoadBalancerStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo
+
+func (m *LocalObjectReference) Reset()      { *m = LocalObjectReference{} }
+func (*LocalObjectReference) ProtoMessage() {}
+func (*LocalObjectReference) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{87}
+}
+func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LocalObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *LocalObjectReference) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LocalObjectReference.Merge(m, src)
+}
+func (m *LocalObjectReference) XXX_Size() int {
+	return m.Size()
+}
+func (m *LocalObjectReference) XXX_DiscardUnknown() {
+	xxx_messageInfo_LocalObjectReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo
+
+func (m *LocalVolumeSource) Reset()      { *m = LocalVolumeSource{} }
+func (*LocalVolumeSource) ProtoMessage() {}
+func (*LocalVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{88}
+}
+func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LocalVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *LocalVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LocalVolumeSource.Merge(m, src)
+}
+func (m *LocalVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *LocalVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_LocalVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo
+
+func (m *ModifyVolumeStatus) Reset()      { *m = ModifyVolumeStatus{} }
+func (*ModifyVolumeStatus) ProtoMessage() {}
+func (*ModifyVolumeStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{89}
+}
+func (m *ModifyVolumeStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ModifyVolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ModifyVolumeStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ModifyVolumeStatus.Merge(m, src)
+}
+func (m *ModifyVolumeStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *ModifyVolumeStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_ModifyVolumeStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ModifyVolumeStatus proto.InternalMessageInfo
+
+func (m *NFSVolumeSource) Reset()      { *m = NFSVolumeSource{} }
+func (*NFSVolumeSource) ProtoMessage() {}
+func (*NFSVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{90}
+}
+func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NFSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NFSVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NFSVolumeSource.Merge(m, src)
+}
+func (m *NFSVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *NFSVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_NFSVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo
+
+func (m *Namespace) Reset()      { *m = Namespace{} }
+func (*Namespace) ProtoMessage() {}
+func (*Namespace) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{91}
+}
+func (m *Namespace) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Namespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Namespace) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Namespace.Merge(m, src)
+}
+func (m *Namespace) XXX_Size() int {
+	return m.Size()
+}
+func (m *Namespace) XXX_DiscardUnknown() {
+	xxx_messageInfo_Namespace.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Namespace proto.InternalMessageInfo
+
+func (m *NamespaceCondition) Reset()      { *m = NamespaceCondition{} }
+func (*NamespaceCondition) ProtoMessage() {}
+func (*NamespaceCondition) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{92}
+}
+func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NamespaceCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NamespaceCondition) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NamespaceCondition.Merge(m, src)
+}
+func (m *NamespaceCondition) XXX_Size() int {
+	return m.Size()
+}
+func (m *NamespaceCondition) XXX_DiscardUnknown() {
+	xxx_messageInfo_NamespaceCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo
+
+func (m *NamespaceList) Reset()      { *m = NamespaceList{} }
+func (*NamespaceList) ProtoMessage() {}
+func (*NamespaceList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{93}
+}
+func (m *NamespaceList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NamespaceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NamespaceList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NamespaceList.Merge(m, src)
+}
+func (m *NamespaceList) XXX_Size() int {
+	return m.Size()
+}
+func (m *NamespaceList) XXX_DiscardUnknown() {
+	xxx_messageInfo_NamespaceList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NamespaceList proto.InternalMessageInfo
+
+func (m *NamespaceSpec) Reset()      { *m = NamespaceSpec{} }
+func (*NamespaceSpec) ProtoMessage() {}
+func (*NamespaceSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{94}
+}
+func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NamespaceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NamespaceSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NamespaceSpec.Merge(m, src)
+}
+func (m *NamespaceSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *NamespaceSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_NamespaceSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo
+
+func (m *NamespaceStatus) Reset()      { *m = NamespaceStatus{} }
+func (*NamespaceStatus) ProtoMessage() {}
+func (*NamespaceStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{95}
+}
+func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NamespaceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NamespaceStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NamespaceStatus.Merge(m, src)
+}
+func (m *NamespaceStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *NamespaceStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_NamespaceStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo
+
+func (m *Node) Reset()      { *m = Node{} }
+func (*Node) ProtoMessage() {}
+func (*Node) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{96}
+}
+func (m *Node) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Node) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Node.Merge(m, src)
+}
+func (m *Node) XXX_Size() int {
+	return m.Size()
+}
+func (m *Node) XXX_DiscardUnknown() {
+	xxx_messageInfo_Node.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Node proto.InternalMessageInfo
+
+func (m *NodeAddress) Reset()      { *m = NodeAddress{} }
+func (*NodeAddress) ProtoMessage() {}
+func (*NodeAddress) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{97}
+}
+func (m *NodeAddress) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NodeAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NodeAddress) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeAddress.Merge(m, src)
+}
+func (m *NodeAddress) XXX_Size() int {
+	return m.Size()
+}
+func (m *NodeAddress) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeAddress.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeAddress proto.InternalMessageInfo
+
+func (m *NodeAffinity) Reset()      { *m = NodeAffinity{} }
+func (*NodeAffinity) ProtoMessage() {}
+func (*NodeAffinity) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{98}
+}
+func (m *NodeAffinity) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NodeAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NodeAffinity) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeAffinity.Merge(m, src)
+}
+func (m *NodeAffinity) XXX_Size() int {
+	return m.Size()
+}
+func (m *NodeAffinity) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeAffinity.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo
+
+func (m *NodeCondition) Reset()      { *m = NodeCondition{} }
+func (*NodeCondition) ProtoMessage() {}
+func (*NodeCondition) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{99}
+}
+func (m *NodeCondition) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NodeCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NodeCondition) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeCondition.Merge(m, src)
+}
+func (m *NodeCondition) XXX_Size() int {
+	return m.Size()
+}
+func (m *NodeCondition) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeCondition proto.InternalMessageInfo
+
+func (m *NodeConfigSource) Reset()      { *m = NodeConfigSource{} }
+func (*NodeConfigSource) ProtoMessage() {}
+func (*NodeConfigSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{100}
+}
+func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NodeConfigSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NodeConfigSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeConfigSource.Merge(m, src)
+}
+func (m *NodeConfigSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *NodeConfigSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeConfigSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo
+
+func (m *NodeConfigStatus) Reset()      { *m = NodeConfigStatus{} }
+func (*NodeConfigStatus) ProtoMessage() {}
+func (*NodeConfigStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{101}
+}
+func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NodeConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NodeConfigStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeConfigStatus.Merge(m, src)
+}
+func (m *NodeConfigStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *NodeConfigStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeConfigStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo
+
+func (m *NodeDaemonEndpoints) Reset()      { *m = NodeDaemonEndpoints{} }
+func (*NodeDaemonEndpoints) ProtoMessage() {}
+func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{102}
+}
+func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NodeDaemonEndpoints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NodeDaemonEndpoints) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeDaemonEndpoints.Merge(m, src)
+}
+func (m *NodeDaemonEndpoints) XXX_Size() int {
+	return m.Size()
+}
+func (m *NodeDaemonEndpoints) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeDaemonEndpoints.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo
+
+func (m *NodeFeatures) Reset()      { *m = NodeFeatures{} }
+func (*NodeFeatures) ProtoMessage() {}
+func (*NodeFeatures) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{103}
+}
+func (m *NodeFeatures) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NodeFeatures) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NodeFeatures) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeFeatures.Merge(m, src)
+}
+func (m *NodeFeatures) XXX_Size() int {
+	return m.Size()
+}
+func (m *NodeFeatures) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeFeatures.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeFeatures proto.InternalMessageInfo
+
+func (m *NodeList) Reset()      { *m = NodeList{} }
+func (*NodeList) ProtoMessage() {}
+func (*NodeList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{104}
+}
+func (m *NodeList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NodeList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeList.Merge(m, src)
+}
+func (m *NodeList) XXX_Size() int {
+	return m.Size()
+}
+func (m *NodeList) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeList proto.InternalMessageInfo
+
+func (m *NodeProxyOptions) Reset()      { *m = NodeProxyOptions{} }
+func (*NodeProxyOptions) ProtoMessage() {}
+func (*NodeProxyOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{105}
+}
+func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NodeProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NodeProxyOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeProxyOptions.Merge(m, src)
+}
+func (m *NodeProxyOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *NodeProxyOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeProxyOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo
+
+func (m *NodeRuntimeHandler) Reset()      { *m = NodeRuntimeHandler{} }
+func (*NodeRuntimeHandler) ProtoMessage() {}
+func (*NodeRuntimeHandler) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{106}
+}
+func (m *NodeRuntimeHandler) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NodeRuntimeHandler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NodeRuntimeHandler) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeRuntimeHandler.Merge(m, src)
+}
+func (m *NodeRuntimeHandler) XXX_Size() int {
+	return m.Size()
+}
+func (m *NodeRuntimeHandler) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeRuntimeHandler.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeRuntimeHandler proto.InternalMessageInfo
+
+func (m *NodeRuntimeHandlerFeatures) Reset()      { *m = NodeRuntimeHandlerFeatures{} }
+func (*NodeRuntimeHandlerFeatures) ProtoMessage() {}
+func (*NodeRuntimeHandlerFeatures) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{107}
+}
+func (m *NodeRuntimeHandlerFeatures) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NodeRuntimeHandlerFeatures) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NodeRuntimeHandlerFeatures) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeRuntimeHandlerFeatures.Merge(m, src)
+}
+func (m *NodeRuntimeHandlerFeatures) XXX_Size() int {
+	return m.Size()
+}
+func (m *NodeRuntimeHandlerFeatures) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeRuntimeHandlerFeatures.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeRuntimeHandlerFeatures proto.InternalMessageInfo
+
+func (m *NodeSelector) Reset()      { *m = NodeSelector{} }
+func (*NodeSelector) ProtoMessage() {}
+func (*NodeSelector) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{108}
+}
+func (m *NodeSelector) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NodeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NodeSelector) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeSelector.Merge(m, src)
+}
+func (m *NodeSelector) XXX_Size() int {
+	return m.Size()
+}
+func (m *NodeSelector) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeSelector.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeSelector proto.InternalMessageInfo
+
+func (m *NodeSelectorRequirement) Reset()      { *m = NodeSelectorRequirement{} }
+func (*NodeSelectorRequirement) ProtoMessage() {}
+func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{109}
+}
+func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NodeSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NodeSelectorRequirement) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeSelectorRequirement.Merge(m, src)
+}
+func (m *NodeSelectorRequirement) XXX_Size() int {
+	return m.Size()
+}
+func (m *NodeSelectorRequirement) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeSelectorRequirement.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo
+
+func (m *NodeSelectorTerm) Reset()      { *m = NodeSelectorTerm{} }
+func (*NodeSelectorTerm) ProtoMessage() {}
+func (*NodeSelectorTerm) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{110}
+}
+func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NodeSelectorTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NodeSelectorTerm) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeSelectorTerm.Merge(m, src)
+}
+func (m *NodeSelectorTerm) XXX_Size() int {
+	return m.Size()
+}
+func (m *NodeSelectorTerm) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeSelectorTerm.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo
+
+func (m *NodeSpec) Reset()      { *m = NodeSpec{} }
+func (*NodeSpec) ProtoMessage() {}
+func (*NodeSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{111}
+}
+func (m *NodeSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NodeSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeSpec.Merge(m, src)
+}
+func (m *NodeSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *NodeSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeSpec proto.InternalMessageInfo
+
+func (m *NodeStatus) Reset()      { *m = NodeStatus{} }
+func (*NodeStatus) ProtoMessage() {}
+func (*NodeStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{112}
+}
+func (m *NodeStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NodeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NodeStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeStatus.Merge(m, src)
+}
+func (m *NodeStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *NodeStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeStatus proto.InternalMessageInfo
+
+func (m *NodeSystemInfo) Reset()      { *m = NodeSystemInfo{} }
+func (*NodeSystemInfo) ProtoMessage() {}
+func (*NodeSystemInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{113}
+}
+func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *NodeSystemInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *NodeSystemInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_NodeSystemInfo.Merge(m, src)
+}
+func (m *NodeSystemInfo) XXX_Size() int {
+	return m.Size()
+}
+func (m *NodeSystemInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_NodeSystemInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo
+
+func (m *ObjectFieldSelector) Reset()      { *m = ObjectFieldSelector{} }
+func (*ObjectFieldSelector) ProtoMessage() {}
+func (*ObjectFieldSelector) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{114}
+}
+func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ObjectFieldSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ObjectFieldSelector) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ObjectFieldSelector.Merge(m, src)
+}
+func (m *ObjectFieldSelector) XXX_Size() int {
+	return m.Size()
+}
+func (m *ObjectFieldSelector) XXX_DiscardUnknown() {
+	xxx_messageInfo_ObjectFieldSelector.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo
+
+func (m *ObjectReference) Reset()      { *m = ObjectReference{} }
+func (*ObjectReference) ProtoMessage() {}
+func (*ObjectReference) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{115}
+}
+func (m *ObjectReference) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ObjectReference) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ObjectReference.Merge(m, src)
+}
+func (m *ObjectReference) XXX_Size() int {
+	return m.Size()
+}
+func (m *ObjectReference) XXX_DiscardUnknown() {
+	xxx_messageInfo_ObjectReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ObjectReference proto.InternalMessageInfo
+
+func (m *PersistentVolume) Reset()      { *m = PersistentVolume{} }
+func (*PersistentVolume) ProtoMessage() {}
+func (*PersistentVolume) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{116}
+}
+func (m *PersistentVolume) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PersistentVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PersistentVolume) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PersistentVolume.Merge(m, src)
+}
+func (m *PersistentVolume) XXX_Size() int {
+	return m.Size()
+}
+func (m *PersistentVolume) XXX_DiscardUnknown() {
+	xxx_messageInfo_PersistentVolume.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo
+
+func (m *PersistentVolumeClaim) Reset()      { *m = PersistentVolumeClaim{} }
+func (*PersistentVolumeClaim) ProtoMessage() {}
+func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{117}
+}
+func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PersistentVolumeClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PersistentVolumeClaim) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PersistentVolumeClaim.Merge(m, src)
+}
+func (m *PersistentVolumeClaim) XXX_Size() int {
+	return m.Size()
+}
+func (m *PersistentVolumeClaim) XXX_DiscardUnknown() {
+	xxx_messageInfo_PersistentVolumeClaim.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo
+
+func (m *PersistentVolumeClaimCondition) Reset()      { *m = PersistentVolumeClaimCondition{} }
+func (*PersistentVolumeClaimCondition) ProtoMessage() {}
+func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{118}
+}
+func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PersistentVolumeClaimCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PersistentVolumeClaimCondition) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PersistentVolumeClaimCondition.Merge(m, src)
+}
+func (m *PersistentVolumeClaimCondition) XXX_Size() int {
+	return m.Size()
+}
+func (m *PersistentVolumeClaimCondition) XXX_DiscardUnknown() {
+	xxx_messageInfo_PersistentVolumeClaimCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo
+
+func (m *PersistentVolumeClaimList) Reset()      { *m = PersistentVolumeClaimList{} }
+func (*PersistentVolumeClaimList) ProtoMessage() {}
+func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{119}
+}
+func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PersistentVolumeClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PersistentVolumeClaimList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PersistentVolumeClaimList.Merge(m, src)
+}
+func (m *PersistentVolumeClaimList) XXX_Size() int {
+	return m.Size()
+}
+func (m *PersistentVolumeClaimList) XXX_DiscardUnknown() {
+	xxx_messageInfo_PersistentVolumeClaimList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo
+
+func (m *PersistentVolumeClaimSpec) Reset()      { *m = PersistentVolumeClaimSpec{} }
+func (*PersistentVolumeClaimSpec) ProtoMessage() {}
+func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{120}
+}
+func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PersistentVolumeClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PersistentVolumeClaimSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PersistentVolumeClaimSpec.Merge(m, src)
+}
+func (m *PersistentVolumeClaimSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *PersistentVolumeClaimSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_PersistentVolumeClaimSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo
+
+func (m *PersistentVolumeClaimStatus) Reset()      { *m = PersistentVolumeClaimStatus{} }
+func (*PersistentVolumeClaimStatus) ProtoMessage() {}
+func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{121}
+}
+func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PersistentVolumeClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PersistentVolumeClaimStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PersistentVolumeClaimStatus.Merge(m, src)
+}
+func (m *PersistentVolumeClaimStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *PersistentVolumeClaimStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_PersistentVolumeClaimStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo
+
+func (m *PersistentVolumeClaimTemplate) Reset()      { *m = PersistentVolumeClaimTemplate{} }
+func (*PersistentVolumeClaimTemplate) ProtoMessage() {}
+func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{122}
+}
+func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PersistentVolumeClaimTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PersistentVolumeClaimTemplate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PersistentVolumeClaimTemplate.Merge(m, src)
+}
+func (m *PersistentVolumeClaimTemplate) XXX_Size() int {
+	return m.Size()
+}
+func (m *PersistentVolumeClaimTemplate) XXX_DiscardUnknown() {
+	xxx_messageInfo_PersistentVolumeClaimTemplate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo
+
+func (m *PersistentVolumeClaimVolumeSource) Reset()      { *m = PersistentVolumeClaimVolumeSource{} }
+func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {}
+func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{123}
+}
+func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PersistentVolumeClaimVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PersistentVolumeClaimVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PersistentVolumeClaimVolumeSource.Merge(m, src)
+}
+func (m *PersistentVolumeClaimVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *PersistentVolumeClaimVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_PersistentVolumeClaimVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo
+
+func (m *PersistentVolumeList) Reset()      { *m = PersistentVolumeList{} }
+func (*PersistentVolumeList) ProtoMessage() {}
+func (*PersistentVolumeList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{124}
+}
+func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PersistentVolumeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PersistentVolumeList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PersistentVolumeList.Merge(m, src)
+}
+func (m *PersistentVolumeList) XXX_Size() int {
+	return m.Size()
+}
+func (m *PersistentVolumeList) XXX_DiscardUnknown() {
+	xxx_messageInfo_PersistentVolumeList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo
+
+func (m *PersistentVolumeSource) Reset()      { *m = PersistentVolumeSource{} }
+func (*PersistentVolumeSource) ProtoMessage() {}
+func (*PersistentVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{125}
+}
+func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PersistentVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PersistentVolumeSource.Merge(m, src)
+}
+func (m *PersistentVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *PersistentVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_PersistentVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo
+
+func (m *PersistentVolumeSpec) Reset()      { *m = PersistentVolumeSpec{} }
+func (*PersistentVolumeSpec) ProtoMessage() {}
+func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{126}
+}
+func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PersistentVolumeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PersistentVolumeSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PersistentVolumeSpec.Merge(m, src)
+}
+func (m *PersistentVolumeSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *PersistentVolumeSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_PersistentVolumeSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo
+
+func (m *PersistentVolumeStatus) Reset()      { *m = PersistentVolumeStatus{} }
+func (*PersistentVolumeStatus) ProtoMessage() {}
+func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{127}
+}
+func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PersistentVolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PersistentVolumeStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PersistentVolumeStatus.Merge(m, src)
+}
+func (m *PersistentVolumeStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *PersistentVolumeStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_PersistentVolumeStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo
+
+func (m *PhotonPersistentDiskVolumeSource) Reset()      { *m = PhotonPersistentDiskVolumeSource{} }
+func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {}
+func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{128}
+}
+func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PhotonPersistentDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PhotonPersistentDiskVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PhotonPersistentDiskVolumeSource.Merge(m, src)
+}
+func (m *PhotonPersistentDiskVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *PhotonPersistentDiskVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_PhotonPersistentDiskVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo
+
+func (m *Pod) Reset()      { *m = Pod{} }
+func (*Pod) ProtoMessage() {}
+func (*Pod) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{129}
+}
+func (m *Pod) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Pod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Pod) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Pod.Merge(m, src)
+}
+func (m *Pod) XXX_Size() int {
+	return m.Size()
+}
+func (m *Pod) XXX_DiscardUnknown() {
+	xxx_messageInfo_Pod.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Pod proto.InternalMessageInfo
+
+func (m *PodAffinity) Reset()      { *m = PodAffinity{} }
+func (*PodAffinity) ProtoMessage() {}
+func (*PodAffinity) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{130}
+}
+func (m *PodAffinity) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodAffinity) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodAffinity.Merge(m, src)
+}
+func (m *PodAffinity) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodAffinity) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodAffinity.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodAffinity proto.InternalMessageInfo
+
+func (m *PodAffinityTerm) Reset()      { *m = PodAffinityTerm{} }
+func (*PodAffinityTerm) ProtoMessage() {}
+func (*PodAffinityTerm) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{131}
+}
+func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodAffinityTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodAffinityTerm) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodAffinityTerm.Merge(m, src)
+}
+func (m *PodAffinityTerm) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodAffinityTerm) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodAffinityTerm.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo
+
+func (m *PodAntiAffinity) Reset()      { *m = PodAntiAffinity{} }
+func (*PodAntiAffinity) ProtoMessage() {}
+func (*PodAntiAffinity) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{132}
+}
+func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodAntiAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodAntiAffinity) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodAntiAffinity.Merge(m, src)
+}
+func (m *PodAntiAffinity) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodAntiAffinity) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodAntiAffinity.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo
+
+func (m *PodAttachOptions) Reset()      { *m = PodAttachOptions{} }
+func (*PodAttachOptions) ProtoMessage() {}
+func (*PodAttachOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{133}
+}
+func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodAttachOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodAttachOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodAttachOptions.Merge(m, src)
+}
+func (m *PodAttachOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodAttachOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodAttachOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo
+
+func (m *PodCondition) Reset()      { *m = PodCondition{} }
+func (*PodCondition) ProtoMessage() {}
+func (*PodCondition) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{134}
+}
+func (m *PodCondition) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodCondition) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodCondition.Merge(m, src)
+}
+func (m *PodCondition) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodCondition) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodCondition proto.InternalMessageInfo
+
+func (m *PodDNSConfig) Reset()      { *m = PodDNSConfig{} }
+func (*PodDNSConfig) ProtoMessage() {}
+func (*PodDNSConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{135}
+}
+func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodDNSConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodDNSConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodDNSConfig.Merge(m, src)
+}
+func (m *PodDNSConfig) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodDNSConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodDNSConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo
+
+func (m *PodDNSConfigOption) Reset()      { *m = PodDNSConfigOption{} }
+func (*PodDNSConfigOption) ProtoMessage() {}
+func (*PodDNSConfigOption) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{136}
+}
+func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodDNSConfigOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodDNSConfigOption) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodDNSConfigOption.Merge(m, src)
+}
+func (m *PodDNSConfigOption) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodDNSConfigOption) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodDNSConfigOption.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo
+
+func (m *PodExecOptions) Reset()      { *m = PodExecOptions{} }
+func (*PodExecOptions) ProtoMessage() {}
+func (*PodExecOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{137}
+}
+func (m *PodExecOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodExecOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodExecOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodExecOptions.Merge(m, src)
+}
+func (m *PodExecOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodExecOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodExecOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo
+
+func (m *PodIP) Reset()      { *m = PodIP{} }
+func (*PodIP) ProtoMessage() {}
+func (*PodIP) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{138}
+}
+func (m *PodIP) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodIP) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodIP.Merge(m, src)
+}
+func (m *PodIP) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodIP) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodIP.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodIP proto.InternalMessageInfo
+
+func (m *PodList) Reset()      { *m = PodList{} }
+func (*PodList) ProtoMessage() {}
+func (*PodList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{139}
+}
+func (m *PodList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodList.Merge(m, src)
+}
+func (m *PodList) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodList) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodList proto.InternalMessageInfo
+
+func (m *PodLogOptions) Reset()      { *m = PodLogOptions{} }
+func (*PodLogOptions) ProtoMessage() {}
+func (*PodLogOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{140}
+}
+func (m *PodLogOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodLogOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodLogOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodLogOptions.Merge(m, src)
+}
+func (m *PodLogOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodLogOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodLogOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo
+
+func (m *PodOS) Reset()      { *m = PodOS{} }
+func (*PodOS) ProtoMessage() {}
+func (*PodOS) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{141}
+}
+func (m *PodOS) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodOS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodOS) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodOS.Merge(m, src)
+}
+func (m *PodOS) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodOS) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodOS.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodOS proto.InternalMessageInfo
+
+func (m *PodPortForwardOptions) Reset()      { *m = PodPortForwardOptions{} }
+func (*PodPortForwardOptions) ProtoMessage() {}
+func (*PodPortForwardOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{142}
+}
+func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodPortForwardOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodPortForwardOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodPortForwardOptions.Merge(m, src)
+}
+func (m *PodPortForwardOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodPortForwardOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodPortForwardOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo
+
+func (m *PodProxyOptions) Reset()      { *m = PodProxyOptions{} }
+func (*PodProxyOptions) ProtoMessage() {}
+func (*PodProxyOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{143}
+}
+func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodProxyOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodProxyOptions.Merge(m, src)
+}
+func (m *PodProxyOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodProxyOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodProxyOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo
+
+func (m *PodReadinessGate) Reset()      { *m = PodReadinessGate{} }
+func (*PodReadinessGate) ProtoMessage() {}
+func (*PodReadinessGate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{144}
+}
+func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodReadinessGate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodReadinessGate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodReadinessGate.Merge(m, src)
+}
+func (m *PodReadinessGate) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodReadinessGate) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodReadinessGate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo
+
+func (m *PodResourceClaim) Reset()      { *m = PodResourceClaim{} }
+func (*PodResourceClaim) ProtoMessage() {}
+func (*PodResourceClaim) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{145}
+}
+func (m *PodResourceClaim) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodResourceClaim) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodResourceClaim.Merge(m, src)
+}
+func (m *PodResourceClaim) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodResourceClaim) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodResourceClaim.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodResourceClaim proto.InternalMessageInfo
+
+func (m *PodResourceClaimStatus) Reset()      { *m = PodResourceClaimStatus{} }
+func (*PodResourceClaimStatus) ProtoMessage() {}
+func (*PodResourceClaimStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{146}
+}
+func (m *PodResourceClaimStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodResourceClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodResourceClaimStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodResourceClaimStatus.Merge(m, src)
+}
+func (m *PodResourceClaimStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodResourceClaimStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodResourceClaimStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodResourceClaimStatus proto.InternalMessageInfo
+
+func (m *PodSchedulingGate) Reset()      { *m = PodSchedulingGate{} }
+func (*PodSchedulingGate) ProtoMessage() {}
+func (*PodSchedulingGate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{147}
+}
+func (m *PodSchedulingGate) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodSchedulingGate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodSchedulingGate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodSchedulingGate.Merge(m, src)
+}
+func (m *PodSchedulingGate) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodSchedulingGate) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodSchedulingGate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodSchedulingGate proto.InternalMessageInfo
+
+func (m *PodSecurityContext) Reset()      { *m = PodSecurityContext{} }
+func (*PodSecurityContext) ProtoMessage() {}
+func (*PodSecurityContext) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{148}
+}
+func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodSecurityContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodSecurityContext) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodSecurityContext.Merge(m, src)
+}
+func (m *PodSecurityContext) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodSecurityContext) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodSecurityContext.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo
+
+func (m *PodSignature) Reset()      { *m = PodSignature{} }
+func (*PodSignature) ProtoMessage() {}
+func (*PodSignature) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{149}
+}
+func (m *PodSignature) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodSignature) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodSignature) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodSignature.Merge(m, src)
+}
+func (m *PodSignature) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodSignature) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodSignature.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodSignature proto.InternalMessageInfo
+
+func (m *PodSpec) Reset()      { *m = PodSpec{} }
+func (*PodSpec) ProtoMessage() {}
+func (*PodSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{150}
+}
+func (m *PodSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodSpec.Merge(m, src)
+}
+func (m *PodSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodSpec proto.InternalMessageInfo
+
+func (m *PodStatus) Reset()      { *m = PodStatus{} }
+func (*PodStatus) ProtoMessage() {}
+func (*PodStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{151}
+}
+func (m *PodStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodStatus.Merge(m, src)
+}
+func (m *PodStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodStatus proto.InternalMessageInfo
+
+func (m *PodStatusResult) Reset()      { *m = PodStatusResult{} }
+func (*PodStatusResult) ProtoMessage() {}
+func (*PodStatusResult) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{152}
+}
+func (m *PodStatusResult) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodStatusResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodStatusResult) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodStatusResult.Merge(m, src)
+}
+func (m *PodStatusResult) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodStatusResult) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodStatusResult.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo
+
+func (m *PodTemplate) Reset()      { *m = PodTemplate{} }
+func (*PodTemplate) ProtoMessage() {}
+func (*PodTemplate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{153}
+}
+func (m *PodTemplate) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodTemplate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodTemplate.Merge(m, src)
+}
+func (m *PodTemplate) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodTemplate) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodTemplate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodTemplate proto.InternalMessageInfo
+
+func (m *PodTemplateList) Reset()      { *m = PodTemplateList{} }
+func (*PodTemplateList) ProtoMessage() {}
+func (*PodTemplateList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{154}
+}
+func (m *PodTemplateList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodTemplateList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodTemplateList.Merge(m, src)
+}
+func (m *PodTemplateList) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodTemplateList) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodTemplateList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo
+
+func (m *PodTemplateSpec) Reset()      { *m = PodTemplateSpec{} }
+func (*PodTemplateSpec) ProtoMessage() {}
+func (*PodTemplateSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{155}
+}
+func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PodTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PodTemplateSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PodTemplateSpec.Merge(m, src)
+}
+func (m *PodTemplateSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *PodTemplateSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_PodTemplateSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo
+
+func (m *PortStatus) Reset()      { *m = PortStatus{} }
+func (*PortStatus) ProtoMessage() {}
+func (*PortStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{156}
+}
+func (m *PortStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PortStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PortStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PortStatus.Merge(m, src)
+}
+func (m *PortStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *PortStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_PortStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PortStatus proto.InternalMessageInfo
+
+func (m *PortworxVolumeSource) Reset()      { *m = PortworxVolumeSource{} }
+func (*PortworxVolumeSource) ProtoMessage() {}
+func (*PortworxVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{157}
+}
+func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PortworxVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PortworxVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PortworxVolumeSource.Merge(m, src)
+}
+func (m *PortworxVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *PortworxVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_PortworxVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo
+
+func (m *Preconditions) Reset()      { *m = Preconditions{} }
+func (*Preconditions) ProtoMessage() {}
+func (*Preconditions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{158}
+}
+func (m *Preconditions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Preconditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Preconditions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Preconditions.Merge(m, src)
+}
+func (m *Preconditions) XXX_Size() int {
+	return m.Size()
+}
+func (m *Preconditions) XXX_DiscardUnknown() {
+	xxx_messageInfo_Preconditions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Preconditions proto.InternalMessageInfo
+
+func (m *PreferAvoidPodsEntry) Reset()      { *m = PreferAvoidPodsEntry{} }
+func (*PreferAvoidPodsEntry) ProtoMessage() {}
+func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{159}
+}
+func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PreferAvoidPodsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PreferAvoidPodsEntry) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PreferAvoidPodsEntry.Merge(m, src)
+}
+func (m *PreferAvoidPodsEntry) XXX_Size() int {
+	return m.Size()
+}
+func (m *PreferAvoidPodsEntry) XXX_DiscardUnknown() {
+	xxx_messageInfo_PreferAvoidPodsEntry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo
+
+func (m *PreferredSchedulingTerm) Reset()      { *m = PreferredSchedulingTerm{} }
+func (*PreferredSchedulingTerm) ProtoMessage() {}
+func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{160}
+}
+func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PreferredSchedulingTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PreferredSchedulingTerm) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PreferredSchedulingTerm.Merge(m, src)
+}
+func (m *PreferredSchedulingTerm) XXX_Size() int {
+	return m.Size()
+}
+func (m *PreferredSchedulingTerm) XXX_DiscardUnknown() {
+	xxx_messageInfo_PreferredSchedulingTerm.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo
+
+func (m *Probe) Reset()      { *m = Probe{} }
+func (*Probe) ProtoMessage() {}
+func (*Probe) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{161}
+}
+func (m *Probe) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Probe) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Probe) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Probe.Merge(m, src)
+}
+func (m *Probe) XXX_Size() int {
+	return m.Size()
+}
+func (m *Probe) XXX_DiscardUnknown() {
+	xxx_messageInfo_Probe.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Probe proto.InternalMessageInfo
+
+func (m *ProbeHandler) Reset()      { *m = ProbeHandler{} }
+func (*ProbeHandler) ProtoMessage() {}
+func (*ProbeHandler) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{162}
+}
+func (m *ProbeHandler) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ProbeHandler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ProbeHandler) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ProbeHandler.Merge(m, src)
+}
+func (m *ProbeHandler) XXX_Size() int {
+	return m.Size()
+}
+func (m *ProbeHandler) XXX_DiscardUnknown() {
+	xxx_messageInfo_ProbeHandler.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProbeHandler proto.InternalMessageInfo
+
+func (m *ProjectedVolumeSource) Reset()      { *m = ProjectedVolumeSource{} }
+func (*ProjectedVolumeSource) ProtoMessage() {}
+func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{163}
+}
+func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ProjectedVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ProjectedVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ProjectedVolumeSource.Merge(m, src)
+}
+func (m *ProjectedVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *ProjectedVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_ProjectedVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo
+
+func (m *QuobyteVolumeSource) Reset()      { *m = QuobyteVolumeSource{} }
+func (*QuobyteVolumeSource) ProtoMessage() {}
+func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{164}
+}
+func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *QuobyteVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *QuobyteVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_QuobyteVolumeSource.Merge(m, src)
+}
+func (m *QuobyteVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *QuobyteVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_QuobyteVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo
+
+func (m *RBDPersistentVolumeSource) Reset()      { *m = RBDPersistentVolumeSource{} }
+func (*RBDPersistentVolumeSource) ProtoMessage() {}
+func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{165}
+}
+func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RBDPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *RBDPersistentVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RBDPersistentVolumeSource.Merge(m, src)
+}
+func (m *RBDPersistentVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *RBDPersistentVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_RBDPersistentVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo
+
+func (m *RBDVolumeSource) Reset()      { *m = RBDVolumeSource{} }
+func (*RBDVolumeSource) ProtoMessage() {}
+func (*RBDVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{166}
+}
+func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RBDVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *RBDVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RBDVolumeSource.Merge(m, src)
+}
+func (m *RBDVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *RBDVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_RBDVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo
+
+func (m *RangeAllocation) Reset()      { *m = RangeAllocation{} }
+func (*RangeAllocation) ProtoMessage() {}
+func (*RangeAllocation) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{167}
+}
+func (m *RangeAllocation) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RangeAllocation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *RangeAllocation) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RangeAllocation.Merge(m, src)
+}
+func (m *RangeAllocation) XXX_Size() int {
+	return m.Size()
+}
+func (m *RangeAllocation) XXX_DiscardUnknown() {
+	xxx_messageInfo_RangeAllocation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo
+
+func (m *ReplicationController) Reset()      { *m = ReplicationController{} }
+func (*ReplicationController) ProtoMessage() {}
+func (*ReplicationController) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{168}
+}
+func (m *ReplicationController) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ReplicationController) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ReplicationController) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ReplicationController.Merge(m, src)
+}
+func (m *ReplicationController) XXX_Size() int {
+	return m.Size()
+}
+func (m *ReplicationController) XXX_DiscardUnknown() {
+	xxx_messageInfo_ReplicationController.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReplicationController proto.InternalMessageInfo
+
+func (m *ReplicationControllerCondition) Reset()      { *m = ReplicationControllerCondition{} }
+func (*ReplicationControllerCondition) ProtoMessage() {}
+func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{169}
+}
+func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ReplicationControllerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ReplicationControllerCondition) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ReplicationControllerCondition.Merge(m, src)
+}
+func (m *ReplicationControllerCondition) XXX_Size() int {
+	return m.Size()
+}
+func (m *ReplicationControllerCondition) XXX_DiscardUnknown() {
+	xxx_messageInfo_ReplicationControllerCondition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo
+
+func (m *ReplicationControllerList) Reset()      { *m = ReplicationControllerList{} }
+func (*ReplicationControllerList) ProtoMessage() {}
+func (*ReplicationControllerList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{170}
+}
+func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ReplicationControllerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ReplicationControllerList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ReplicationControllerList.Merge(m, src)
+}
+func (m *ReplicationControllerList) XXX_Size() int {
+	return m.Size()
+}
+func (m *ReplicationControllerList) XXX_DiscardUnknown() {
+	xxx_messageInfo_ReplicationControllerList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo
+
+func (m *ReplicationControllerSpec) Reset()      { *m = ReplicationControllerSpec{} }
+func (*ReplicationControllerSpec) ProtoMessage() {}
+func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{171}
+}
+func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ReplicationControllerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ReplicationControllerSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ReplicationControllerSpec.Merge(m, src)
+}
+func (m *ReplicationControllerSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *ReplicationControllerSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_ReplicationControllerSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo
+
+func (m *ReplicationControllerStatus) Reset()      { *m = ReplicationControllerStatus{} }
+func (*ReplicationControllerStatus) ProtoMessage() {}
+func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{172}
+}
+func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ReplicationControllerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ReplicationControllerStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ReplicationControllerStatus.Merge(m, src)
+}
+func (m *ReplicationControllerStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *ReplicationControllerStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_ReplicationControllerStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo
+
+func (m *ResourceClaim) Reset()      { *m = ResourceClaim{} }
+func (*ResourceClaim) ProtoMessage() {}
+func (*ResourceClaim) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{173}
+}
+func (m *ResourceClaim) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourceClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ResourceClaim) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceClaim.Merge(m, src)
+}
+func (m *ResourceClaim) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourceClaim) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceClaim.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo
+
+func (m *ResourceFieldSelector) Reset()      { *m = ResourceFieldSelector{} }
+func (*ResourceFieldSelector) ProtoMessage() {}
+func (*ResourceFieldSelector) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{174}
+}
+func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourceFieldSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ResourceFieldSelector) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceFieldSelector.Merge(m, src)
+}
+func (m *ResourceFieldSelector) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourceFieldSelector) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceFieldSelector.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo
+
+func (m *ResourceHealth) Reset()      { *m = ResourceHealth{} }
+func (*ResourceHealth) ProtoMessage() {}
+func (*ResourceHealth) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{175}
+}
+func (m *ResourceHealth) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourceHealth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ResourceHealth) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceHealth.Merge(m, src)
+}
+func (m *ResourceHealth) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourceHealth) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceHealth.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceHealth proto.InternalMessageInfo
+
+func (m *ResourceQuota) Reset()      { *m = ResourceQuota{} }
+func (*ResourceQuota) ProtoMessage() {}
+func (*ResourceQuota) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{176}
+}
+func (m *ResourceQuota) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourceQuota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ResourceQuota) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceQuota.Merge(m, src)
+}
+func (m *ResourceQuota) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourceQuota) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceQuota.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo
+
+func (m *ResourceQuotaList) Reset()      { *m = ResourceQuotaList{} }
+func (*ResourceQuotaList) ProtoMessage() {}
+func (*ResourceQuotaList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{177}
+}
+func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourceQuotaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ResourceQuotaList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceQuotaList.Merge(m, src)
+}
+func (m *ResourceQuotaList) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourceQuotaList) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceQuotaList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo
+
+func (m *ResourceQuotaSpec) Reset()      { *m = ResourceQuotaSpec{} }
+func (*ResourceQuotaSpec) ProtoMessage() {}
+func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{178}
+}
+func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourceQuotaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ResourceQuotaSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceQuotaSpec.Merge(m, src)
+}
+func (m *ResourceQuotaSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourceQuotaSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceQuotaSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo
+
+func (m *ResourceQuotaStatus) Reset()      { *m = ResourceQuotaStatus{} }
+func (*ResourceQuotaStatus) ProtoMessage() {}
+func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{179}
+}
+func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourceQuotaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ResourceQuotaStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceQuotaStatus.Merge(m, src)
+}
+func (m *ResourceQuotaStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourceQuotaStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceQuotaStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo
+
+func (m *ResourceRequirements) Reset()      { *m = ResourceRequirements{} }
+func (*ResourceRequirements) ProtoMessage() {}
+func (*ResourceRequirements) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{180}
+}
+func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourceRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ResourceRequirements) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceRequirements.Merge(m, src)
+}
+func (m *ResourceRequirements) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourceRequirements) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceRequirements.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo
+
+func (m *ResourceStatus) Reset()      { *m = ResourceStatus{} }
+func (*ResourceStatus) ProtoMessage() {}
+func (*ResourceStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{181}
+}
+func (m *ResourceStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResourceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ResourceStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceStatus.Merge(m, src)
+}
+func (m *ResourceStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResourceStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceStatus proto.InternalMessageInfo
+
+func (m *SELinuxOptions) Reset()      { *m = SELinuxOptions{} }
+func (*SELinuxOptions) ProtoMessage() {}
+func (*SELinuxOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{182}
+}
+func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SELinuxOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *SELinuxOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SELinuxOptions.Merge(m, src)
+}
+func (m *SELinuxOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *SELinuxOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_SELinuxOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo
+
+func (m *ScaleIOPersistentVolumeSource) Reset()      { *m = ScaleIOPersistentVolumeSource{} }
+func (*ScaleIOPersistentVolumeSource) ProtoMessage() {}
+func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{183}
+}
+func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ScaleIOPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ScaleIOPersistentVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ScaleIOPersistentVolumeSource.Merge(m, src)
+}
+func (m *ScaleIOPersistentVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *ScaleIOPersistentVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_ScaleIOPersistentVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo
+
+func (m *ScaleIOVolumeSource) Reset()      { *m = ScaleIOVolumeSource{} }
+func (*ScaleIOVolumeSource) ProtoMessage() {}
+func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{184}
+}
+func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ScaleIOVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ScaleIOVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ScaleIOVolumeSource.Merge(m, src)
+}
+func (m *ScaleIOVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *ScaleIOVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_ScaleIOVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo
+
+func (m *ScopeSelector) Reset()      { *m = ScopeSelector{} }
+func (*ScopeSelector) ProtoMessage() {}
+func (*ScopeSelector) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{185}
+}
+func (m *ScopeSelector) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ScopeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ScopeSelector) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ScopeSelector.Merge(m, src)
+}
+func (m *ScopeSelector) XXX_Size() int {
+	return m.Size()
+}
+func (m *ScopeSelector) XXX_DiscardUnknown() {
+	xxx_messageInfo_ScopeSelector.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo
+
+func (m *ScopedResourceSelectorRequirement) Reset()      { *m = ScopedResourceSelectorRequirement{} }
+func (*ScopedResourceSelectorRequirement) ProtoMessage() {}
+func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{186}
+}
+func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ScopedResourceSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ScopedResourceSelectorRequirement) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ScopedResourceSelectorRequirement.Merge(m, src)
+}
+func (m *ScopedResourceSelectorRequirement) XXX_Size() int {
+	return m.Size()
+}
+func (m *ScopedResourceSelectorRequirement) XXX_DiscardUnknown() {
+	xxx_messageInfo_ScopedResourceSelectorRequirement.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo
+
+func (m *SeccompProfile) Reset()      { *m = SeccompProfile{} }
+func (*SeccompProfile) ProtoMessage() {}
+func (*SeccompProfile) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{187}
+}
+func (m *SeccompProfile) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SeccompProfile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *SeccompProfile) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SeccompProfile.Merge(m, src)
+}
+func (m *SeccompProfile) XXX_Size() int {
+	return m.Size()
+}
+func (m *SeccompProfile) XXX_DiscardUnknown() {
+	xxx_messageInfo_SeccompProfile.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo
+
+func (m *Secret) Reset()      { *m = Secret{} }
+func (*Secret) ProtoMessage() {}
+func (*Secret) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{188}
+}
+func (m *Secret) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Secret) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Secret) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Secret.Merge(m, src)
+}
+func (m *Secret) XXX_Size() int {
+	return m.Size()
+}
+func (m *Secret) XXX_DiscardUnknown() {
+	xxx_messageInfo_Secret.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Secret proto.InternalMessageInfo
+
+func (m *SecretEnvSource) Reset()      { *m = SecretEnvSource{} }
+func (*SecretEnvSource) ProtoMessage() {}
+func (*SecretEnvSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{189}
+}
+func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SecretEnvSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *SecretEnvSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SecretEnvSource.Merge(m, src)
+}
+func (m *SecretEnvSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *SecretEnvSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_SecretEnvSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo
+
+func (m *SecretKeySelector) Reset()      { *m = SecretKeySelector{} }
+func (*SecretKeySelector) ProtoMessage() {}
+func (*SecretKeySelector) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{190}
+}
+func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SecretKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *SecretKeySelector) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SecretKeySelector.Merge(m, src)
+}
+func (m *SecretKeySelector) XXX_Size() int {
+	return m.Size()
+}
+func (m *SecretKeySelector) XXX_DiscardUnknown() {
+	xxx_messageInfo_SecretKeySelector.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo
+
+func (m *SecretList) Reset()      { *m = SecretList{} }
+func (*SecretList) ProtoMessage() {}
+func (*SecretList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{191}
+}
+func (m *SecretList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SecretList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *SecretList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SecretList.Merge(m, src)
+}
+func (m *SecretList) XXX_Size() int {
+	return m.Size()
+}
+func (m *SecretList) XXX_DiscardUnknown() {
+	xxx_messageInfo_SecretList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SecretList proto.InternalMessageInfo
+
+func (m *SecretProjection) Reset()      { *m = SecretProjection{} }
+func (*SecretProjection) ProtoMessage() {}
+func (*SecretProjection) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{192}
+}
+func (m *SecretProjection) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SecretProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *SecretProjection) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SecretProjection.Merge(m, src)
+}
+func (m *SecretProjection) XXX_Size() int {
+	return m.Size()
+}
+func (m *SecretProjection) XXX_DiscardUnknown() {
+	xxx_messageInfo_SecretProjection.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SecretProjection proto.InternalMessageInfo
+
+func (m *SecretReference) Reset()      { *m = SecretReference{} }
+func (*SecretReference) ProtoMessage() {}
+func (*SecretReference) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{193}
+}
+func (m *SecretReference) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SecretReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *SecretReference) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SecretReference.Merge(m, src)
+}
+func (m *SecretReference) XXX_Size() int {
+	return m.Size()
+}
+func (m *SecretReference) XXX_DiscardUnknown() {
+	xxx_messageInfo_SecretReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SecretReference proto.InternalMessageInfo
+
+func (m *SecretVolumeSource) Reset()      { *m = SecretVolumeSource{} }
+func (*SecretVolumeSource) ProtoMessage() {}
+func (*SecretVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{194}
+}
+func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SecretVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *SecretVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SecretVolumeSource.Merge(m, src)
+}
+func (m *SecretVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *SecretVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_SecretVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo
+
+func (m *SecurityContext) Reset()      { *m = SecurityContext{} }
+func (*SecurityContext) ProtoMessage() {}
+func (*SecurityContext) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{195}
+}
+func (m *SecurityContext) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SecurityContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *SecurityContext) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SecurityContext.Merge(m, src)
+}
+func (m *SecurityContext) XXX_Size() int {
+	return m.Size()
+}
+func (m *SecurityContext) XXX_DiscardUnknown() {
+	xxx_messageInfo_SecurityContext.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SecurityContext proto.InternalMessageInfo
+
+func (m *SerializedReference) Reset()      { *m = SerializedReference{} }
+func (*SerializedReference) ProtoMessage() {}
+func (*SerializedReference) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{196}
+}
+func (m *SerializedReference) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SerializedReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *SerializedReference) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SerializedReference.Merge(m, src)
+}
+func (m *SerializedReference) XXX_Size() int {
+	return m.Size()
+}
+func (m *SerializedReference) XXX_DiscardUnknown() {
+	xxx_messageInfo_SerializedReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SerializedReference proto.InternalMessageInfo
+
+func (m *Service) Reset()      { *m = Service{} }
+func (*Service) ProtoMessage() {}
+func (*Service) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{197}
+}
+func (m *Service) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Service) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Service.Merge(m, src)
+}
+func (m *Service) XXX_Size() int {
+	return m.Size()
+}
+func (m *Service) XXX_DiscardUnknown() {
+	xxx_messageInfo_Service.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Service proto.InternalMessageInfo
+
+func (m *ServiceAccount) Reset()      { *m = ServiceAccount{} }
+func (*ServiceAccount) ProtoMessage() {}
+func (*ServiceAccount) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{198}
+}
+func (m *ServiceAccount) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ServiceAccount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ServiceAccount) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServiceAccount.Merge(m, src)
+}
+func (m *ServiceAccount) XXX_Size() int {
+	return m.Size()
+}
+func (m *ServiceAccount) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServiceAccount.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo
+
+func (m *ServiceAccountList) Reset()      { *m = ServiceAccountList{} }
+func (*ServiceAccountList) ProtoMessage() {}
+func (*ServiceAccountList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{199}
+}
+func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ServiceAccountList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ServiceAccountList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServiceAccountList.Merge(m, src)
+}
+func (m *ServiceAccountList) XXX_Size() int {
+	return m.Size()
+}
+func (m *ServiceAccountList) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServiceAccountList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo
+
+func (m *ServiceAccountTokenProjection) Reset()      { *m = ServiceAccountTokenProjection{} }
+func (*ServiceAccountTokenProjection) ProtoMessage() {}
+func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{200}
+}
+func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ServiceAccountTokenProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ServiceAccountTokenProjection) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServiceAccountTokenProjection.Merge(m, src)
+}
+func (m *ServiceAccountTokenProjection) XXX_Size() int {
+	return m.Size()
+}
+func (m *ServiceAccountTokenProjection) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServiceAccountTokenProjection.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo
+
+func (m *ServiceList) Reset()      { *m = ServiceList{} }
+func (*ServiceList) ProtoMessage() {}
+func (*ServiceList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{201}
+}
+func (m *ServiceList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ServiceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ServiceList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServiceList.Merge(m, src)
+}
+func (m *ServiceList) XXX_Size() int {
+	return m.Size()
+}
+func (m *ServiceList) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServiceList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceList proto.InternalMessageInfo
+
+func (m *ServicePort) Reset()      { *m = ServicePort{} }
+func (*ServicePort) ProtoMessage() {}
+func (*ServicePort) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{202}
+}
+func (m *ServicePort) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ServicePort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ServicePort) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServicePort.Merge(m, src)
+}
+func (m *ServicePort) XXX_Size() int {
+	return m.Size()
+}
+func (m *ServicePort) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServicePort.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServicePort proto.InternalMessageInfo
+
+func (m *ServiceProxyOptions) Reset()      { *m = ServiceProxyOptions{} }
+func (*ServiceProxyOptions) ProtoMessage() {}
+func (*ServiceProxyOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{203}
+}
+func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ServiceProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ServiceProxyOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServiceProxyOptions.Merge(m, src)
+}
+func (m *ServiceProxyOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *ServiceProxyOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServiceProxyOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo
+
+func (m *ServiceSpec) Reset()      { *m = ServiceSpec{} }
+func (*ServiceSpec) ProtoMessage() {}
+func (*ServiceSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{204}
+}
+func (m *ServiceSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ServiceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ServiceSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServiceSpec.Merge(m, src)
+}
+func (m *ServiceSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *ServiceSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServiceSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo
+
+func (m *ServiceStatus) Reset()      { *m = ServiceStatus{} }
+func (*ServiceStatus) ProtoMessage() {}
+func (*ServiceStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{205}
+}
+func (m *ServiceStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ServiceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ServiceStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServiceStatus.Merge(m, src)
+}
+func (m *ServiceStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *ServiceStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServiceStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo
+
+func (m *SessionAffinityConfig) Reset()      { *m = SessionAffinityConfig{} }
+func (*SessionAffinityConfig) ProtoMessage() {}
+func (*SessionAffinityConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{206}
+}
+func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SessionAffinityConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *SessionAffinityConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SessionAffinityConfig.Merge(m, src)
+}
+func (m *SessionAffinityConfig) XXX_Size() int {
+	return m.Size()
+}
+func (m *SessionAffinityConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_SessionAffinityConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo
+
+func (m *SleepAction) Reset()      { *m = SleepAction{} }
+func (*SleepAction) ProtoMessage() {}
+func (*SleepAction) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{207}
+}
+func (m *SleepAction) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SleepAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *SleepAction) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SleepAction.Merge(m, src)
+}
+func (m *SleepAction) XXX_Size() int {
+	return m.Size()
+}
+func (m *SleepAction) XXX_DiscardUnknown() {
+	xxx_messageInfo_SleepAction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SleepAction proto.InternalMessageInfo
+
+func (m *StorageOSPersistentVolumeSource) Reset()      { *m = StorageOSPersistentVolumeSource{} }
+func (*StorageOSPersistentVolumeSource) ProtoMessage() {}
+func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{208}
+}
+func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *StorageOSPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *StorageOSPersistentVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StorageOSPersistentVolumeSource.Merge(m, src)
+}
+func (m *StorageOSPersistentVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *StorageOSPersistentVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_StorageOSPersistentVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo
+
+func (m *StorageOSVolumeSource) Reset()      { *m = StorageOSVolumeSource{} }
+func (*StorageOSVolumeSource) ProtoMessage() {}
+func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{209}
+}
+func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *StorageOSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *StorageOSVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StorageOSVolumeSource.Merge(m, src)
+}
+func (m *StorageOSVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *StorageOSVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_StorageOSVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo
+
+func (m *Sysctl) Reset()      { *m = Sysctl{} }
+func (*Sysctl) ProtoMessage() {}
+func (*Sysctl) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{210}
+}
+func (m *Sysctl) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Sysctl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Sysctl) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Sysctl.Merge(m, src)
+}
+func (m *Sysctl) XXX_Size() int {
+	return m.Size()
+}
+func (m *Sysctl) XXX_DiscardUnknown() {
+	xxx_messageInfo_Sysctl.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Sysctl proto.InternalMessageInfo
+
+func (m *TCPSocketAction) Reset()      { *m = TCPSocketAction{} }
+func (*TCPSocketAction) ProtoMessage() {}
+func (*TCPSocketAction) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{211}
+}
+func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TCPSocketAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *TCPSocketAction) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TCPSocketAction.Merge(m, src)
+}
+func (m *TCPSocketAction) XXX_Size() int {
+	return m.Size()
+}
+func (m *TCPSocketAction) XXX_DiscardUnknown() {
+	xxx_messageInfo_TCPSocketAction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo
+
+func (m *Taint) Reset()      { *m = Taint{} }
+func (*Taint) ProtoMessage() {}
+func (*Taint) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{212}
+}
+func (m *Taint) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Taint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Taint) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Taint.Merge(m, src)
+}
+func (m *Taint) XXX_Size() int {
+	return m.Size()
+}
+func (m *Taint) XXX_DiscardUnknown() {
+	xxx_messageInfo_Taint.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Taint proto.InternalMessageInfo
+
+func (m *Toleration) Reset()      { *m = Toleration{} }
+func (*Toleration) ProtoMessage() {}
+func (*Toleration) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{213}
+}
+func (m *Toleration) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Toleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Toleration) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Toleration.Merge(m, src)
+}
+func (m *Toleration) XXX_Size() int {
+	return m.Size()
+}
+func (m *Toleration) XXX_DiscardUnknown() {
+	xxx_messageInfo_Toleration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Toleration proto.InternalMessageInfo
+
+func (m *TopologySelectorLabelRequirement) Reset()      { *m = TopologySelectorLabelRequirement{} }
+func (*TopologySelectorLabelRequirement) ProtoMessage() {}
+func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{214}
+}
+func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TopologySelectorLabelRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *TopologySelectorLabelRequirement) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TopologySelectorLabelRequirement.Merge(m, src)
+}
+func (m *TopologySelectorLabelRequirement) XXX_Size() int {
+	return m.Size()
+}
+func (m *TopologySelectorLabelRequirement) XXX_DiscardUnknown() {
+	xxx_messageInfo_TopologySelectorLabelRequirement.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo
+
+func (m *TopologySelectorTerm) Reset()      { *m = TopologySelectorTerm{} }
+func (*TopologySelectorTerm) ProtoMessage() {}
+func (*TopologySelectorTerm) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{215}
+}
+func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TopologySelectorTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *TopologySelectorTerm) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TopologySelectorTerm.Merge(m, src)
+}
+func (m *TopologySelectorTerm) XXX_Size() int {
+	return m.Size()
+}
+func (m *TopologySelectorTerm) XXX_DiscardUnknown() {
+	xxx_messageInfo_TopologySelectorTerm.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo
+
+func (m *TopologySpreadConstraint) Reset()      { *m = TopologySpreadConstraint{} }
+func (*TopologySpreadConstraint) ProtoMessage() {}
+func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{216}
+}
+func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TopologySpreadConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *TopologySpreadConstraint) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TopologySpreadConstraint.Merge(m, src)
+}
+func (m *TopologySpreadConstraint) XXX_Size() int {
+	return m.Size()
+}
+func (m *TopologySpreadConstraint) XXX_DiscardUnknown() {
+	xxx_messageInfo_TopologySpreadConstraint.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo
+
+func (m *TypedLocalObjectReference) Reset()      { *m = TypedLocalObjectReference{} }
+func (*TypedLocalObjectReference) ProtoMessage() {}
+func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{217}
+}
+func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TypedLocalObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *TypedLocalObjectReference) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TypedLocalObjectReference.Merge(m, src)
+}
+func (m *TypedLocalObjectReference) XXX_Size() int {
+	return m.Size()
+}
+func (m *TypedLocalObjectReference) XXX_DiscardUnknown() {
+	xxx_messageInfo_TypedLocalObjectReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo
+
+func (m *TypedObjectReference) Reset()      { *m = TypedObjectReference{} }
+func (*TypedObjectReference) ProtoMessage() {}
+func (*TypedObjectReference) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{218}
+}
+func (m *TypedObjectReference) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TypedObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *TypedObjectReference) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TypedObjectReference.Merge(m, src)
+}
+func (m *TypedObjectReference) XXX_Size() int {
+	return m.Size()
+}
+func (m *TypedObjectReference) XXX_DiscardUnknown() {
+	xxx_messageInfo_TypedObjectReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TypedObjectReference proto.InternalMessageInfo
+
+func (m *Volume) Reset()      { *m = Volume{} }
+func (*Volume) ProtoMessage() {}
+func (*Volume) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{219}
+}
+func (m *Volume) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Volume) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Volume.Merge(m, src)
+}
+func (m *Volume) XXX_Size() int {
+	return m.Size()
+}
+func (m *Volume) XXX_DiscardUnknown() {
+	xxx_messageInfo_Volume.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Volume proto.InternalMessageInfo
+
+func (m *VolumeDevice) Reset()      { *m = VolumeDevice{} }
+func (*VolumeDevice) ProtoMessage() {}
+func (*VolumeDevice) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{220}
+}
+func (m *VolumeDevice) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *VolumeDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *VolumeDevice) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeDevice.Merge(m, src)
+}
+func (m *VolumeDevice) XXX_Size() int {
+	return m.Size()
+}
+func (m *VolumeDevice) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeDevice.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo
+
+func (m *VolumeMount) Reset()      { *m = VolumeMount{} }
+func (*VolumeMount) ProtoMessage() {}
+func (*VolumeMount) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{221}
+}
+func (m *VolumeMount) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *VolumeMount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *VolumeMount) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeMount.Merge(m, src)
+}
+func (m *VolumeMount) XXX_Size() int {
+	return m.Size()
+}
+func (m *VolumeMount) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeMount.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeMount proto.InternalMessageInfo
+
+func (m *VolumeMountStatus) Reset()      { *m = VolumeMountStatus{} }
+func (*VolumeMountStatus) ProtoMessage() {}
+func (*VolumeMountStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{222}
+}
+func (m *VolumeMountStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *VolumeMountStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *VolumeMountStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeMountStatus.Merge(m, src)
+}
+func (m *VolumeMountStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *VolumeMountStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeMountStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeMountStatus proto.InternalMessageInfo
+
+func (m *VolumeNodeAffinity) Reset()      { *m = VolumeNodeAffinity{} }
+func (*VolumeNodeAffinity) ProtoMessage() {}
+func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{223}
+}
+func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *VolumeNodeAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *VolumeNodeAffinity) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeNodeAffinity.Merge(m, src)
+}
+func (m *VolumeNodeAffinity) XXX_Size() int {
+	return m.Size()
+}
+func (m *VolumeNodeAffinity) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeNodeAffinity.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo
+
+func (m *VolumeProjection) Reset()      { *m = VolumeProjection{} }
+func (*VolumeProjection) ProtoMessage() {}
+func (*VolumeProjection) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{224}
+}
+func (m *VolumeProjection) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *VolumeProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *VolumeProjection) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeProjection.Merge(m, src)
+}
+func (m *VolumeProjection) XXX_Size() int {
+	return m.Size()
+}
+func (m *VolumeProjection) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeProjection.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo
+
+func (m *VolumeResourceRequirements) Reset()      { *m = VolumeResourceRequirements{} }
+func (*VolumeResourceRequirements) ProtoMessage() {}
+func (*VolumeResourceRequirements) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{225}
+}
+func (m *VolumeResourceRequirements) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *VolumeResourceRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *VolumeResourceRequirements) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeResourceRequirements.Merge(m, src)
+}
+func (m *VolumeResourceRequirements) XXX_Size() int {
+	return m.Size()
+}
+func (m *VolumeResourceRequirements) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeResourceRequirements.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeResourceRequirements proto.InternalMessageInfo
+
+func (m *VolumeSource) Reset()      { *m = VolumeSource{} }
+func (*VolumeSource) ProtoMessage() {}
+func (*VolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{226}
+}
+func (m *VolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *VolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *VolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VolumeSource.Merge(m, src)
+}
+func (m *VolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *VolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_VolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VolumeSource proto.InternalMessageInfo
+
+func (m *VsphereVirtualDiskVolumeSource) Reset()      { *m = VsphereVirtualDiskVolumeSource{} }
+func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {}
+func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{227}
+}
+func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *VsphereVirtualDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *VsphereVirtualDiskVolumeSource) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_VsphereVirtualDiskVolumeSource.Merge(m, src)
+}
+func (m *VsphereVirtualDiskVolumeSource) XXX_Size() int {
+	return m.Size()
+}
+func (m *VsphereVirtualDiskVolumeSource) XXX_DiscardUnknown() {
+	xxx_messageInfo_VsphereVirtualDiskVolumeSource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo
+
+func (m *WeightedPodAffinityTerm) Reset()      { *m = WeightedPodAffinityTerm{} }
+func (*WeightedPodAffinityTerm) ProtoMessage() {}
+func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{228}
+}
+func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WeightedPodAffinityTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *WeightedPodAffinityTerm) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WeightedPodAffinityTerm.Merge(m, src)
+}
+func (m *WeightedPodAffinityTerm) XXX_Size() int {
+	return m.Size()
+}
+func (m *WeightedPodAffinityTerm) XXX_DiscardUnknown() {
+	xxx_messageInfo_WeightedPodAffinityTerm.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo
+
+func (m *WindowsSecurityContextOptions) Reset()      { *m = WindowsSecurityContextOptions{} }
+func (*WindowsSecurityContextOptions) ProtoMessage() {}
+func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_6c07b07c062484ab, []int{229}
+}
+func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WindowsSecurityContextOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *WindowsSecurityContextOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WindowsSecurityContextOptions.Merge(m, src)
+}
+func (m *WindowsSecurityContextOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *WindowsSecurityContextOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_WindowsSecurityContextOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WindowsSecurityContextOptions proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource")
+	proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity")
+	proto.RegisterType((*AppArmorProfile)(nil), "k8s.io.api.core.v1.AppArmorProfile")
+	proto.RegisterType((*AttachedVolume)(nil), "k8s.io.api.core.v1.AttachedVolume")
+	proto.RegisterType((*AvoidPods)(nil), "k8s.io.api.core.v1.AvoidPods")
+	proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.api.core.v1.AzureDiskVolumeSource")
+	proto.RegisterType((*AzureFilePersistentVolumeSource)(nil), "k8s.io.api.core.v1.AzureFilePersistentVolumeSource")
+	proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource")
+	proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding")
+	proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource")
+	proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource.VolumeAttributesEntry")
+	proto.RegisterType((*CSIVolumeSource)(nil), "k8s.io.api.core.v1.CSIVolumeSource")
+	proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.CSIVolumeSource.VolumeAttributesEntry")
+	proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities")
+	proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource")
+	proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource")
+	proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource")
+	proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource")
+	proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig")
+	proto.RegisterType((*ClusterTrustBundleProjection)(nil), "k8s.io.api.core.v1.ClusterTrustBundleProjection")
+	proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition")
+	proto.RegisterType((*ComponentStatus)(nil), "k8s.io.api.core.v1.ComponentStatus")
+	proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.api.core.v1.ComponentStatusList")
+	proto.RegisterType((*ConfigMap)(nil), "k8s.io.api.core.v1.ConfigMap")
+	proto.RegisterMapType((map[string][]byte)(nil), "k8s.io.api.core.v1.ConfigMap.BinaryDataEntry")
+	proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ConfigMap.DataEntry")
+	proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.api.core.v1.ConfigMapEnvSource")
+	proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.api.core.v1.ConfigMapKeySelector")
+	proto.RegisterType((*ConfigMapList)(nil), "k8s.io.api.core.v1.ConfigMapList")
+	proto.RegisterType((*ConfigMapNodeConfigSource)(nil), "k8s.io.api.core.v1.ConfigMapNodeConfigSource")
+	proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.api.core.v1.ConfigMapProjection")
+	proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.api.core.v1.ConfigMapVolumeSource")
+	proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container")
+	proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage")
+	proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort")
+	proto.RegisterType((*ContainerResizePolicy)(nil), "k8s.io.api.core.v1.ContainerResizePolicy")
+	proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState")
+	proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning")
+	proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated")
+	proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting")
+	proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus")
+	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ContainerStatus.AllocatedResourcesEntry")
+	proto.RegisterType((*ContainerUser)(nil), "k8s.io.api.core.v1.ContainerUser")
+	proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint")
+	proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection")
+	proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile")
+	proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeSource")
+	proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.api.core.v1.EmptyDirVolumeSource")
+	proto.RegisterType((*EndpointAddress)(nil), "k8s.io.api.core.v1.EndpointAddress")
+	proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.core.v1.EndpointPort")
+	proto.RegisterType((*EndpointSubset)(nil), "k8s.io.api.core.v1.EndpointSubset")
+	proto.RegisterType((*Endpoints)(nil), "k8s.io.api.core.v1.Endpoints")
+	proto.RegisterType((*EndpointsList)(nil), "k8s.io.api.core.v1.EndpointsList")
+	proto.RegisterType((*EnvFromSource)(nil), "k8s.io.api.core.v1.EnvFromSource")
+	proto.RegisterType((*EnvVar)(nil), "k8s.io.api.core.v1.EnvVar")
+	proto.RegisterType((*EnvVarSource)(nil), "k8s.io.api.core.v1.EnvVarSource")
+	proto.RegisterType((*EphemeralContainer)(nil), "k8s.io.api.core.v1.EphemeralContainer")
+	proto.RegisterType((*EphemeralContainerCommon)(nil), "k8s.io.api.core.v1.EphemeralContainerCommon")
+	proto.RegisterType((*EphemeralVolumeSource)(nil), "k8s.io.api.core.v1.EphemeralVolumeSource")
+	proto.RegisterType((*Event)(nil), "k8s.io.api.core.v1.Event")
+	proto.RegisterType((*EventList)(nil), "k8s.io.api.core.v1.EventList")
+	proto.RegisterType((*EventSeries)(nil), "k8s.io.api.core.v1.EventSeries")
+	proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource")
+	proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction")
+	proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource")
+	proto.RegisterType((*FlexPersistentVolumeSource)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource")
+	proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource.OptionsEntry")
+	proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource")
+	proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexVolumeSource.OptionsEntry")
+	proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource")
+	proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource")
+	proto.RegisterType((*GRPCAction)(nil), "k8s.io.api.core.v1.GRPCAction")
+	proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource")
+	proto.RegisterType((*GlusterfsPersistentVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsPersistentVolumeSource")
+	proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource")
+	proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction")
+	proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader")
+	proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias")
+	proto.RegisterType((*HostIP)(nil), "k8s.io.api.core.v1.HostIP")
+	proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource")
+	proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource")
+	proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource")
+	proto.RegisterType((*ImageVolumeSource)(nil), "k8s.io.api.core.v1.ImageVolumeSource")
+	proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath")
+	proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle")
+	proto.RegisterType((*LifecycleHandler)(nil), "k8s.io.api.core.v1.LifecycleHandler")
+	proto.RegisterType((*LimitRange)(nil), "k8s.io.api.core.v1.LimitRange")
+	proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.api.core.v1.LimitRangeItem")
+	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.DefaultEntry")
+	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.DefaultRequestEntry")
+	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MaxEntry")
+	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MaxLimitRequestRatioEntry")
+	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MinEntry")
+	proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList")
+	proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec")
+	proto.RegisterType((*LinuxContainerUser)(nil), "k8s.io.api.core.v1.LinuxContainerUser")
+	proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List")
+	proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress")
+	proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus")
+	proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.api.core.v1.LocalObjectReference")
+	proto.RegisterType((*LocalVolumeSource)(nil), "k8s.io.api.core.v1.LocalVolumeSource")
+	proto.RegisterType((*ModifyVolumeStatus)(nil), "k8s.io.api.core.v1.ModifyVolumeStatus")
+	proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.api.core.v1.NFSVolumeSource")
+	proto.RegisterType((*Namespace)(nil), "k8s.io.api.core.v1.Namespace")
+	proto.RegisterType((*NamespaceCondition)(nil), "k8s.io.api.core.v1.NamespaceCondition")
+	proto.RegisterType((*NamespaceList)(nil), "k8s.io.api.core.v1.NamespaceList")
+	proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.api.core.v1.NamespaceSpec")
+	proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.api.core.v1.NamespaceStatus")
+	proto.RegisterType((*Node)(nil), "k8s.io.api.core.v1.Node")
+	proto.RegisterType((*NodeAddress)(nil), "k8s.io.api.core.v1.NodeAddress")
+	proto.RegisterType((*NodeAffinity)(nil), "k8s.io.api.core.v1.NodeAffinity")
+	proto.RegisterType((*NodeCondition)(nil), "k8s.io.api.core.v1.NodeCondition")
+	proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource")
+	proto.RegisterType((*NodeConfigStatus)(nil), "k8s.io.api.core.v1.NodeConfigStatus")
+	proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints")
+	proto.RegisterType((*NodeFeatures)(nil), "k8s.io.api.core.v1.NodeFeatures")
+	proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList")
+	proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions")
+	proto.RegisterType((*NodeRuntimeHandler)(nil), "k8s.io.api.core.v1.NodeRuntimeHandler")
+	proto.RegisterType((*NodeRuntimeHandlerFeatures)(nil), "k8s.io.api.core.v1.NodeRuntimeHandlerFeatures")
+	proto.RegisterType((*NodeSelector)(nil), "k8s.io.api.core.v1.NodeSelector")
+	proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.api.core.v1.NodeSelectorRequirement")
+	proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.api.core.v1.NodeSelectorTerm")
+	proto.RegisterType((*NodeSpec)(nil), "k8s.io.api.core.v1.NodeSpec")
+	proto.RegisterType((*NodeStatus)(nil), "k8s.io.api.core.v1.NodeStatus")
+	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.AllocatableEntry")
+	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.CapacityEntry")
+	proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.api.core.v1.NodeSystemInfo")
+	proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.api.core.v1.ObjectFieldSelector")
+	proto.RegisterType((*ObjectReference)(nil), "k8s.io.api.core.v1.ObjectReference")
+	proto.RegisterType((*PersistentVolume)(nil), "k8s.io.api.core.v1.PersistentVolume")
+	proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.api.core.v1.PersistentVolumeClaim")
+	proto.RegisterType((*PersistentVolumeClaimCondition)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimCondition")
+	proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimList")
+	proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimSpec")
+	proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus")
+	proto.RegisterMapType((map[ResourceName]ClaimResourceStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.AllocatedResourceStatusesEntry")
+	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.AllocatedResourcesEntry")
+	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.CapacityEntry")
+	proto.RegisterType((*PersistentVolumeClaimTemplate)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimTemplate")
+	proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource")
+	proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.api.core.v1.PersistentVolumeList")
+	proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeSource")
+	proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec")
+	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec.CapacityEntry")
+	proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeStatus")
+	proto.RegisterType((*PhotonPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.PhotonPersistentDiskVolumeSource")
+	proto.RegisterType((*Pod)(nil), "k8s.io.api.core.v1.Pod")
+	proto.RegisterType((*PodAffinity)(nil), "k8s.io.api.core.v1.PodAffinity")
+	proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.api.core.v1.PodAffinityTerm")
+	proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity")
+	proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions")
+	proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition")
+	proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig")
+	proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption")
+	proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions")
+	proto.RegisterType((*PodIP)(nil), "k8s.io.api.core.v1.PodIP")
+	proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList")
+	proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions")
+	proto.RegisterType((*PodOS)(nil), "k8s.io.api.core.v1.PodOS")
+	proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.api.core.v1.PodPortForwardOptions")
+	proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions")
+	proto.RegisterType((*PodReadinessGate)(nil), "k8s.io.api.core.v1.PodReadinessGate")
+	proto.RegisterType((*PodResourceClaim)(nil), "k8s.io.api.core.v1.PodResourceClaim")
+	proto.RegisterType((*PodResourceClaimStatus)(nil), "k8s.io.api.core.v1.PodResourceClaimStatus")
+	proto.RegisterType((*PodSchedulingGate)(nil), "k8s.io.api.core.v1.PodSchedulingGate")
+	proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.api.core.v1.PodSecurityContext")
+	proto.RegisterType((*PodSignature)(nil), "k8s.io.api.core.v1.PodSignature")
+	proto.RegisterType((*PodSpec)(nil), "k8s.io.api.core.v1.PodSpec")
+	proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.PodSpec.NodeSelectorEntry")
+	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PodSpec.OverheadEntry")
+	proto.RegisterType((*PodStatus)(nil), "k8s.io.api.core.v1.PodStatus")
+	proto.RegisterType((*PodStatusResult)(nil), "k8s.io.api.core.v1.PodStatusResult")
+	proto.RegisterType((*PodTemplate)(nil), "k8s.io.api.core.v1.PodTemplate")
+	proto.RegisterType((*PodTemplateList)(nil), "k8s.io.api.core.v1.PodTemplateList")
+	proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.api.core.v1.PodTemplateSpec")
+	proto.RegisterType((*PortStatus)(nil), "k8s.io.api.core.v1.PortStatus")
+	proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.api.core.v1.PortworxVolumeSource")
+	proto.RegisterType((*Preconditions)(nil), "k8s.io.api.core.v1.Preconditions")
+	proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.api.core.v1.PreferAvoidPodsEntry")
+	proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.api.core.v1.PreferredSchedulingTerm")
+	proto.RegisterType((*Probe)(nil), "k8s.io.api.core.v1.Probe")
+	proto.RegisterType((*ProbeHandler)(nil), "k8s.io.api.core.v1.ProbeHandler")
+	proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.api.core.v1.ProjectedVolumeSource")
+	proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.api.core.v1.QuobyteVolumeSource")
+	proto.RegisterType((*RBDPersistentVolumeSource)(nil), "k8s.io.api.core.v1.RBDPersistentVolumeSource")
+	proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.api.core.v1.RBDVolumeSource")
+	proto.RegisterType((*RangeAllocation)(nil), "k8s.io.api.core.v1.RangeAllocation")
+	proto.RegisterType((*ReplicationController)(nil), "k8s.io.api.core.v1.ReplicationController")
+	proto.RegisterType((*ReplicationControllerCondition)(nil), "k8s.io.api.core.v1.ReplicationControllerCondition")
+	proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.api.core.v1.ReplicationControllerList")
+	proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec")
+	proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec.SelectorEntry")
+	proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus")
+	proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.core.v1.ResourceClaim")
+	proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector")
+	proto.RegisterType((*ResourceHealth)(nil), "k8s.io.api.core.v1.ResourceHealth")
+	proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota")
+	proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList")
+	proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec")
+	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec.HardEntry")
+	proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus")
+	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus.HardEntry")
+	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus.UsedEntry")
+	proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements")
+	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.LimitsEntry")
+	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.RequestsEntry")
+	proto.RegisterType((*ResourceStatus)(nil), "k8s.io.api.core.v1.ResourceStatus")
+	proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions")
+	proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource")
+	proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource")
+	proto.RegisterType((*ScopeSelector)(nil), "k8s.io.api.core.v1.ScopeSelector")
+	proto.RegisterType((*ScopedResourceSelectorRequirement)(nil), "k8s.io.api.core.v1.ScopedResourceSelectorRequirement")
+	proto.RegisterType((*SeccompProfile)(nil), "k8s.io.api.core.v1.SeccompProfile")
+	proto.RegisterType((*Secret)(nil), "k8s.io.api.core.v1.Secret")
+	proto.RegisterMapType((map[string][]byte)(nil), "k8s.io.api.core.v1.Secret.DataEntry")
+	proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.Secret.StringDataEntry")
+	proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.api.core.v1.SecretEnvSource")
+	proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.api.core.v1.SecretKeySelector")
+	proto.RegisterType((*SecretList)(nil), "k8s.io.api.core.v1.SecretList")
+	proto.RegisterType((*SecretProjection)(nil), "k8s.io.api.core.v1.SecretProjection")
+	proto.RegisterType((*SecretReference)(nil), "k8s.io.api.core.v1.SecretReference")
+	proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.api.core.v1.SecretVolumeSource")
+	proto.RegisterType((*SecurityContext)(nil), "k8s.io.api.core.v1.SecurityContext")
+	proto.RegisterType((*SerializedReference)(nil), "k8s.io.api.core.v1.SerializedReference")
+	proto.RegisterType((*Service)(nil), "k8s.io.api.core.v1.Service")
+	proto.RegisterType((*ServiceAccount)(nil), "k8s.io.api.core.v1.ServiceAccount")
+	proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.api.core.v1.ServiceAccountList")
+	proto.RegisterType((*ServiceAccountTokenProjection)(nil), "k8s.io.api.core.v1.ServiceAccountTokenProjection")
+	proto.RegisterType((*ServiceList)(nil), "k8s.io.api.core.v1.ServiceList")
+	proto.RegisterType((*ServicePort)(nil), "k8s.io.api.core.v1.ServicePort")
+	proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.api.core.v1.ServiceProxyOptions")
+	proto.RegisterType((*ServiceSpec)(nil), "k8s.io.api.core.v1.ServiceSpec")
+	proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ServiceSpec.SelectorEntry")
+	proto.RegisterType((*ServiceStatus)(nil), "k8s.io.api.core.v1.ServiceStatus")
+	proto.RegisterType((*SessionAffinityConfig)(nil), "k8s.io.api.core.v1.SessionAffinityConfig")
+	proto.RegisterType((*SleepAction)(nil), "k8s.io.api.core.v1.SleepAction")
+	proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSPersistentVolumeSource")
+	proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSVolumeSource")
+	proto.RegisterType((*Sysctl)(nil), "k8s.io.api.core.v1.Sysctl")
+	proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.api.core.v1.TCPSocketAction")
+	proto.RegisterType((*Taint)(nil), "k8s.io.api.core.v1.Taint")
+	proto.RegisterType((*Toleration)(nil), "k8s.io.api.core.v1.Toleration")
+	proto.RegisterType((*TopologySelectorLabelRequirement)(nil), "k8s.io.api.core.v1.TopologySelectorLabelRequirement")
+	proto.RegisterType((*TopologySelectorTerm)(nil), "k8s.io.api.core.v1.TopologySelectorTerm")
+	proto.RegisterType((*TopologySpreadConstraint)(nil), "k8s.io.api.core.v1.TopologySpreadConstraint")
+	proto.RegisterType((*TypedLocalObjectReference)(nil), "k8s.io.api.core.v1.TypedLocalObjectReference")
+	proto.RegisterType((*TypedObjectReference)(nil), "k8s.io.api.core.v1.TypedObjectReference")
+	proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume")
+	proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice")
+	proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount")
+	proto.RegisterType((*VolumeMountStatus)(nil), "k8s.io.api.core.v1.VolumeMountStatus")
+	proto.RegisterType((*VolumeNodeAffinity)(nil), "k8s.io.api.core.v1.VolumeNodeAffinity")
+	proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection")
+	proto.RegisterType((*VolumeResourceRequirements)(nil), "k8s.io.api.core.v1.VolumeResourceRequirements")
+	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.VolumeResourceRequirements.LimitsEntry")
+	proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.VolumeResourceRequirements.RequestsEntry")
+	proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource")
+	proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource")
+	proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm")
+	proto.RegisterType((*WindowsSecurityContextOptions)(nil), "k8s.io.api.core.v1.WindowsSecurityContextOptions")
+}
+
+func init() {
+	proto.RegisterFile("k8s.io/api/core/v1/generated.proto", fileDescriptor_6c07b07c062484ab)
+}
+
+var fileDescriptor_6c07b07c062484ab = []byte{
+	// 16114 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x90, 0x64, 0xd9,
+	0x59, 0x28, 0xa6, 0x9b, 0x59, 0xeb, 0x57, 0xfb, 0xa9, 0x5e, 0xaa, 0x6b, 0xba, 0x3b, 0x7b, 0xee,
+	0xcc, 0xf4, 0xf4, 0x6c, 0xd5, 0xea, 0x59, 0x34, 0xad, 0x99, 0xd1, 0x30, 0xb5, 0x76, 0xd7, 0x74,
+	0x57, 0x75, 0xce, 0xc9, 0xaa, 0x6e, 0x69, 0x34, 0x12, 0xba, 0x9d, 0x79, 0xaa, 0xea, 0xaa, 0x32,
+	0xef, 0xcd, 0xb9, 0xf7, 0x66, 0x75, 0x57, 0x5b, 0x04, 0x20, 0x8c, 0x40, 0x02, 0x47, 0x28, 0x08,
+	0x6c, 0x1c, 0x82, 0xe0, 0x07, 0x60, 0x16, 0xcb, 0x60, 0x64, 0x61, 0xc0, 0x88, 0xcd, 0x36, 0x8e,
+	0x00, 0xff, 0xc0, 0x98, 0x08, 0x4b, 0x84, 0x09, 0x17, 0x56, 0xe1, 0x08, 0x82, 0x1f, 0x06, 0x82,
+	0xf7, 0x7e, 0xbc, 0x57, 0xc1, 0x7b, 0xbc, 0x38, 0xeb, 0x3d, 0xe7, 0x2e, 0x99, 0x59, 0x3d, 0xdd,
+	0xa5, 0x91, 0x62, 0xfe, 0x65, 0x9e, 0xef, 0x3b, 0xdf, 0x39, 0xf7, 0xac, 0xdf, 0xf9, 0x56, 0xb0,
+	0xb7, 0x2f, 0x87, 0x33, 0xae, 0x7f, 0xd1, 0x69, 0xba, 0x17, 0xab, 0x7e, 0x40, 0x2e, 0xee, 0x5c,
+	0xba, 0xb8, 0x49, 0x3c, 0x12, 0x38, 0x11, 0xa9, 0xcd, 0x34, 0x03, 0x3f, 0xf2, 0x11, 0xe2, 0x38,
+	0x33, 0x4e, 0xd3, 0x9d, 0xa1, 0x38, 0x33, 0x3b, 0x97, 0xa6, 0x9f, 0xdb, 0x74, 0xa3, 0xad, 0xd6,
+	0xed, 0x99, 0xaa, 0xdf, 0xb8, 0xb8, 0xe9, 0x6f, 0xfa, 0x17, 0x19, 0xea, 0xed, 0xd6, 0x06, 0xfb,
+	0xc7, 0xfe, 0xb0, 0x5f, 0x9c, 0xc4, 0xf4, 0x8b, 0x71, 0x33, 0x0d, 0xa7, 0xba, 0xe5, 0x7a, 0x24,
+	0xd8, 0xbd, 0xd8, 0xdc, 0xde, 0x64, 0xed, 0x06, 0x24, 0xf4, 0x5b, 0x41, 0x95, 0x24, 0x1b, 0x6e,
+	0x5b, 0x2b, 0xbc, 0xd8, 0x20, 0x91, 0x93, 0xd1, 0xdd, 0xe9, 0x8b, 0x79, 0xb5, 0x82, 0x96, 0x17,
+	0xb9, 0x8d, 0x74, 0x33, 0x1f, 0xe9, 0x54, 0x21, 0xac, 0x6e, 0x91, 0x86, 0x93, 0xaa, 0xf7, 0x42,
+	0x5e, 0xbd, 0x56, 0xe4, 0xd6, 0x2f, 0xba, 0x5e, 0x14, 0x46, 0x41, 0xb2, 0x92, 0xfd, 0x2d, 0x0b,
+	0xce, 0xcd, 0xde, 0xaa, 0x2c, 0xd6, 0x9d, 0x30, 0x72, 0xab, 0x73, 0x75, 0xbf, 0xba, 0x5d, 0x89,
+	0xfc, 0x80, 0xdc, 0xf4, 0xeb, 0xad, 0x06, 0xa9, 0xb0, 0x81, 0x40, 0xcf, 0xc2, 0xc0, 0x0e, 0xfb,
+	0xbf, 0xbc, 0x30, 0x65, 0x9d, 0xb3, 0x2e, 0x0c, 0xce, 0x8d, 0xff, 0xe9, 0x5e, 0xe9, 0x43, 0xfb,
+	0x7b, 0xa5, 0x81, 0x9b, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x1e, 0xfa, 0x36, 0xc2, 0xb5, 0xdd, 0x26,
+	0x99, 0x2a, 0x30, 0xdc, 0x51, 0x81, 0xdb, 0xb7, 0x54, 0xa1, 0xa5, 0x58, 0x40, 0xd1, 0x45, 0x18,
+	0x6c, 0x3a, 0x41, 0xe4, 0x46, 0xae, 0xef, 0x4d, 0x15, 0xcf, 0x59, 0x17, 0x7a, 0xe7, 0x26, 0x04,
+	0xea, 0x60, 0x59, 0x02, 0x70, 0x8c, 0x43, 0xbb, 0x11, 0x10, 0xa7, 0x76, 0xc3, 0xab, 0xef, 0x4e,
+	0xf5, 0x9c, 0xb3, 0x2e, 0x0c, 0xc4, 0xdd, 0xc0, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0x2b, 0x05, 0x18,
+	0x98, 0xdd, 0xd8, 0x70, 0x3d, 0x37, 0xda, 0x45, 0x37, 0x61, 0xd8, 0xf3, 0x6b, 0x44, 0xfe, 0x67,
+	0x5f, 0x31, 0xf4, 0xfc, 0xb9, 0x99, 0xf4, 0x52, 0x9a, 0x59, 0xd5, 0xf0, 0xe6, 0xc6, 0xf7, 0xf7,
+	0x4a, 0xc3, 0x7a, 0x09, 0x36, 0xe8, 0x20, 0x0c, 0x43, 0x4d, 0xbf, 0xa6, 0xc8, 0x16, 0x18, 0xd9,
+	0x52, 0x16, 0xd9, 0x72, 0x8c, 0x36, 0x37, 0xb6, 0xbf, 0x57, 0x1a, 0xd2, 0x0a, 0xb0, 0x4e, 0x04,
+	0xdd, 0x86, 0x31, 0xfa, 0xd7, 0x8b, 0x5c, 0x45, 0xb7, 0xc8, 0xe8, 0x3e, 0x96, 0x47, 0x57, 0x43,
+	0x9d, 0x9b, 0xdc, 0xdf, 0x2b, 0x8d, 0x25, 0x0a, 0x71, 0x92, 0xa0, 0xfd, 0x93, 0x16, 0x8c, 0xcd,
+	0x36, 0x9b, 0xb3, 0x41, 0xc3, 0x0f, 0xca, 0x81, 0xbf, 0xe1, 0xd6, 0x09, 0x7a, 0x19, 0x7a, 0x22,
+	0x3a, 0x6b, 0x7c, 0x86, 0x1f, 0x13, 0x43, 0xdb, 0x43, 0xe7, 0xea, 0x60, 0xaf, 0x34, 0x99, 0x40,
+	0x67, 0x53, 0xc9, 0x2a, 0xa0, 0x37, 0x60, 0xbc, 0xee, 0x57, 0x9d, 0xfa, 0x96, 0x1f, 0x46, 0x02,
+	0x2a, 0xa6, 0xfe, 0xd8, 0xfe, 0x5e, 0x69, 0xfc, 0x7a, 0x02, 0x86, 0x53, 0xd8, 0xf6, 0x3d, 0x18,
+	0x9d, 0x8d, 0x22, 0xa7, 0xba, 0x45, 0x6a, 0x7c, 0x41, 0xa1, 0x17, 0xa1, 0xc7, 0x73, 0x1a, 0xb2,
+	0x33, 0xe7, 0x64, 0x67, 0x56, 0x9d, 0x06, 0xed, 0xcc, 0xf8, 0xba, 0xe7, 0xbe, 0xdb, 0x12, 0x8b,
+	0x94, 0x96, 0x61, 0x86, 0x8d, 0x9e, 0x07, 0xa8, 0x91, 0x1d, 0xb7, 0x4a, 0xca, 0x4e, 0xb4, 0x25,
+	0xfa, 0x80, 0x44, 0x5d, 0x58, 0x50, 0x10, 0xac, 0x61, 0xd9, 0x77, 0x61, 0x70, 0x76, 0xc7, 0x77,
+	0x6b, 0x65, 0xbf, 0x16, 0xa2, 0x6d, 0x18, 0x6b, 0x06, 0x64, 0x83, 0x04, 0xaa, 0x68, 0xca, 0x3a,
+	0x57, 0xbc, 0x30, 0xf4, 0xfc, 0x85, 0xcc, 0xb1, 0x37, 0x51, 0x17, 0xbd, 0x28, 0xd8, 0x9d, 0x3b,
+	0x29, 0xda, 0x1b, 0x4b, 0x40, 0x71, 0x92, 0xb2, 0xfd, 0x27, 0x05, 0x38, 0x3e, 0x7b, 0xaf, 0x15,
+	0x90, 0x05, 0x37, 0xdc, 0x4e, 0x6e, 0xb8, 0x9a, 0x1b, 0x6e, 0xaf, 0xc6, 0x23, 0xa0, 0x56, 0xfa,
+	0x82, 0x28, 0xc7, 0x0a, 0x03, 0x3d, 0x07, 0xfd, 0xf4, 0xf7, 0x3a, 0x5e, 0x16, 0x9f, 0x3c, 0x29,
+	0x90, 0x87, 0x16, 0x9c, 0xc8, 0x59, 0xe0, 0x20, 0x2c, 0x71, 0xd0, 0x0a, 0x0c, 0x55, 0xd9, 0xf9,
+	0xb0, 0xb9, 0xe2, 0xd7, 0x08, 0x5b, 0x5b, 0x83, 0x73, 0xcf, 0x50, 0xf4, 0xf9, 0xb8, 0xf8, 0x60,
+	0xaf, 0x34, 0xc5, 0xfb, 0x26, 0x48, 0x68, 0x30, 0xac, 0xd7, 0x47, 0xb6, 0xda, 0xee, 0x3d, 0x8c,
+	0x12, 0x64, 0x6c, 0xf5, 0x0b, 0xda, 0xce, 0xed, 0x65, 0x3b, 0x77, 0x38, 0x7b, 0xd7, 0xa2, 0x4b,
+	0xd0, 0xb3, 0xed, 0x7a, 0xb5, 0xa9, 0x3e, 0x46, 0xeb, 0x0c, 0x9d, 0xf3, 0x6b, 0xae, 0x57, 0x3b,
+	0xd8, 0x2b, 0x4d, 0x18, 0xdd, 0xa1, 0x85, 0x98, 0xa1, 0xda, 0xff, 0xc6, 0x82, 0x12, 0x83, 0x2d,
+	0xb9, 0x75, 0x52, 0x26, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63, 0x40, 0x9f, 0x07, 0x08, 0x49,
+	0x35, 0x20, 0x91, 0x36, 0xa4, 0x6a, 0x61, 0x54, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x7c, 0x0a, 0xb7,
+	0x9c, 0x80, 0xad, 0x2f, 0x31, 0xb0, 0xea, 0x7c, 0xaa, 0x48, 0x00, 0x8e, 0x71, 0x8c, 0xf3, 0xa9,
+	0xd8, 0xe9, 0x7c, 0x42, 0x1f, 0x83, 0xb1, 0xb8, 0xb1, 0xb0, 0xe9, 0x54, 0xe5, 0x00, 0xb2, 0x1d,
+	0x5c, 0x31, 0x41, 0x38, 0x89, 0x6b, 0xff, 0xb7, 0x96, 0x58, 0x3c, 0xf4, 0xab, 0xdf, 0xe7, 0xdf,
+	0x6a, 0xff, 0xae, 0x05, 0xfd, 0x73, 0xae, 0x57, 0x73, 0xbd, 0x4d, 0xf4, 0x19, 0x18, 0xa0, 0x57,
+	0x65, 0xcd, 0x89, 0x1c, 0x71, 0x0c, 0x7f, 0x58, 0xdb, 0x5b, 0xea, 0xe6, 0x9a, 0x69, 0x6e, 0x6f,
+	0xd2, 0x82, 0x70, 0x86, 0x62, 0xd3, 0xdd, 0x76, 0xe3, 0xf6, 0x67, 0x49, 0x35, 0x5a, 0x21, 0x91,
+	0x13, 0x7f, 0x4e, 0x5c, 0x86, 0x15, 0x55, 0x74, 0x0d, 0xfa, 0x22, 0x27, 0xd8, 0x24, 0x91, 0x38,
+	0x8f, 0x33, 0xcf, 0x4d, 0x5e, 0x13, 0xd3, 0x1d, 0x49, 0xbc, 0x2a, 0x89, 0x6f, 0xa9, 0x35, 0x56,
+	0x15, 0x0b, 0x12, 0xf6, 0x7f, 0xe8, 0x87, 0x53, 0xf3, 0x95, 0xe5, 0x9c, 0x75, 0x75, 0x1e, 0xfa,
+	0x6a, 0x81, 0xbb, 0x43, 0x02, 0x31, 0xce, 0x8a, 0xca, 0x02, 0x2b, 0xc5, 0x02, 0x8a, 0x2e, 0xc3,
+	0x30, 0xbf, 0x1f, 0xaf, 0x3a, 0x5e, 0x2d, 0x3e, 0x1e, 0x05, 0xf6, 0xf0, 0x4d, 0x0d, 0x86, 0x0d,
+	0xcc, 0x43, 0x2e, 0xaa, 0xf3, 0x89, 0xcd, 0x98, 0x77, 0xf7, 0x7e, 0xd1, 0x82, 0x71, 0xde, 0xcc,
+	0x6c, 0x14, 0x05, 0xee, 0xed, 0x56, 0x44, 0xc2, 0xa9, 0x5e, 0x76, 0xd2, 0xcd, 0x67, 0x8d, 0x56,
+	0xee, 0x08, 0xcc, 0xdc, 0x4c, 0x50, 0xe1, 0x87, 0xe0, 0x94, 0x68, 0x77, 0x3c, 0x09, 0xc6, 0xa9,
+	0x66, 0xd1, 0x8f, 0x58, 0x30, 0x5d, 0xf5, 0xbd, 0x28, 0xf0, 0xeb, 0x75, 0x12, 0x94, 0x5b, 0xb7,
+	0xeb, 0x6e, 0xb8, 0xc5, 0xd7, 0x29, 0x26, 0x1b, 0xec, 0x24, 0xc8, 0x99, 0x43, 0x85, 0x24, 0xe6,
+	0xf0, 0xec, 0xfe, 0x5e, 0x69, 0x7a, 0x3e, 0x97, 0x14, 0x6e, 0xd3, 0x0c, 0xda, 0x06, 0x44, 0x6f,
+	0xf6, 0x4a, 0xe4, 0x6c, 0x92, 0xb8, 0xf1, 0xfe, 0xee, 0x1b, 0x3f, 0xb1, 0xbf, 0x57, 0x42, 0xab,
+	0x29, 0x12, 0x38, 0x83, 0x2c, 0x7a, 0x17, 0x8e, 0xd1, 0xd2, 0xd4, 0xb7, 0x0e, 0x74, 0xdf, 0xdc,
+	0xd4, 0xfe, 0x5e, 0xe9, 0xd8, 0x6a, 0x06, 0x11, 0x9c, 0x49, 0x1a, 0xfd, 0x90, 0x05, 0xa7, 0xe2,
+	0xcf, 0x5f, 0xbc, 0xdb, 0x74, 0xbc, 0x5a, 0xdc, 0xf0, 0x60, 0xf7, 0x0d, 0xd3, 0x33, 0xf9, 0xd4,
+	0x7c, 0x1e, 0x25, 0x9c, 0xdf, 0x08, 0xf2, 0x60, 0x92, 0x76, 0x2d, 0xd9, 0x36, 0x74, 0xdf, 0xf6,
+	0xc9, 0xfd, 0xbd, 0xd2, 0xe4, 0x6a, 0x9a, 0x06, 0xce, 0x22, 0x3c, 0x3d, 0x0f, 0xc7, 0x33, 0x57,
+	0x27, 0x1a, 0x87, 0xe2, 0x36, 0xe1, 0x4c, 0xe0, 0x20, 0xa6, 0x3f, 0xd1, 0x31, 0xe8, 0xdd, 0x71,
+	0xea, 0x2d, 0xb1, 0x31, 0x31, 0xff, 0xf3, 0x4a, 0xe1, 0xb2, 0x65, 0xff, 0x6f, 0x45, 0x18, 0x9b,
+	0xaf, 0x2c, 0xdf, 0xd7, 0xae, 0xd7, 0xaf, 0xbd, 0x42, 0xdb, 0x6b, 0x2f, 0xbe, 0x44, 0x8b, 0xb9,
+	0x97, 0xe8, 0x0f, 0x66, 0x6c, 0xd9, 0x1e, 0xb6, 0x65, 0x3f, 0x9a, 0xb3, 0x65, 0x1f, 0xf0, 0x46,
+	0xdd, 0xc9, 0x59, 0xb5, 0xbd, 0x6c, 0x02, 0x33, 0x39, 0x24, 0xc6, 0xfb, 0x25, 0x8f, 0xda, 0x43,
+	0x2e, 0xdd, 0x07, 0x33, 0x8f, 0x55, 0x18, 0x9e, 0x77, 0x9a, 0xce, 0x6d, 0xb7, 0xee, 0x46, 0x2e,
+	0x09, 0xd1, 0x93, 0x50, 0x74, 0x6a, 0x35, 0xc6, 0xdd, 0x0d, 0xce, 0x1d, 0xdf, 0xdf, 0x2b, 0x15,
+	0x67, 0x6b, 0x94, 0xcd, 0x00, 0x85, 0xb5, 0x8b, 0x29, 0x06, 0x7a, 0x1a, 0x7a, 0x6a, 0x81, 0xdf,
+	0x9c, 0x2a, 0x30, 0x4c, 0xba, 0xcb, 0x7b, 0x16, 0x02, 0xbf, 0x99, 0x40, 0x65, 0x38, 0xf6, 0x1f,
+	0x17, 0xe0, 0xf4, 0x3c, 0x69, 0x6e, 0x2d, 0x55, 0x72, 0xee, 0x8b, 0x0b, 0x30, 0xd0, 0xf0, 0x3d,
+	0x37, 0xf2, 0x83, 0x50, 0x34, 0xcd, 0x56, 0xc4, 0x8a, 0x28, 0xc3, 0x0a, 0x8a, 0xce, 0x41, 0x4f,
+	0x33, 0x66, 0x62, 0x87, 0x25, 0x03, 0xcc, 0xd8, 0x57, 0x06, 0xa1, 0x18, 0xad, 0x90, 0x04, 0x62,
+	0xc5, 0x28, 0x8c, 0xf5, 0x90, 0x04, 0x98, 0x41, 0x62, 0x4e, 0x80, 0xf2, 0x08, 0xe2, 0x46, 0x48,
+	0x70, 0x02, 0x14, 0x82, 0x35, 0x2c, 0x54, 0x86, 0xc1, 0x30, 0x31, 0xb3, 0x5d, 0x6d, 0xcd, 0x11,
+	0xc6, 0x2a, 0xa8, 0x99, 0x8c, 0x89, 0x18, 0x37, 0x58, 0x5f, 0x47, 0x56, 0xe1, 0x1b, 0x05, 0x40,
+	0x7c, 0x08, 0xbf, 0xcb, 0x06, 0x6e, 0x3d, 0x3d, 0x70, 0xdd, 0x6f, 0x89, 0x07, 0x35, 0x7a, 0xff,
+	0xd6, 0x82, 0xd3, 0xf3, 0xae, 0x57, 0x23, 0x41, 0xce, 0x02, 0x7c, 0x38, 0x4f, 0xf9, 0xc3, 0x31,
+	0x29, 0xc6, 0x12, 0xeb, 0x79, 0x00, 0x4b, 0xcc, 0xfe, 0x47, 0x0b, 0x10, 0xff, 0xec, 0xf7, 0xdd,
+	0xc7, 0xae, 0xa7, 0x3f, 0xf6, 0x01, 0x2c, 0x0b, 0xfb, 0x3a, 0x8c, 0xce, 0xd7, 0x5d, 0xe2, 0x45,
+	0xcb, 0xe5, 0x79, 0xdf, 0xdb, 0x70, 0x37, 0xd1, 0x2b, 0x30, 0x1a, 0xb9, 0x0d, 0xe2, 0xb7, 0xa2,
+	0x0a, 0xa9, 0xfa, 0x1e, 0x7b, 0xb9, 0x5a, 0x17, 0x7a, 0xe7, 0xd0, 0xfe, 0x5e, 0x69, 0x74, 0xcd,
+	0x80, 0xe0, 0x04, 0xa6, 0xfd, 0xcb, 0xf4, 0xdc, 0xaa, 0xb7, 0xc2, 0x88, 0x04, 0x6b, 0x41, 0x2b,
+	0x8c, 0xe6, 0x5a, 0x94, 0xf7, 0x2c, 0x07, 0x3e, 0xed, 0x8e, 0xeb, 0x7b, 0xe8, 0xb4, 0xf1, 0x1c,
+	0x1f, 0x90, 0x4f, 0x71, 0xf1, 0xec, 0x9e, 0x01, 0x08, 0xdd, 0x4d, 0x8f, 0x04, 0xda, 0xf3, 0x61,
+	0x94, 0x6d, 0x15, 0x55, 0x8a, 0x35, 0x0c, 0x54, 0x87, 0x91, 0xba, 0x73, 0x9b, 0xd4, 0x2b, 0xa4,
+	0x4e, 0xaa, 0x91, 0x1f, 0x08, 0xf9, 0xc6, 0x0b, 0xdd, 0xbd, 0x03, 0xae, 0xeb, 0x55, 0xe7, 0x26,
+	0xf6, 0xf7, 0x4a, 0x23, 0x46, 0x11, 0x36, 0x89, 0xd3, 0xa3, 0xc3, 0x6f, 0xd2, 0xaf, 0x70, 0xea,
+	0xfa, 0xe3, 0xf3, 0x86, 0x28, 0xc3, 0x0a, 0xaa, 0x8e, 0x8e, 0x9e, 0xbc, 0xa3, 0xc3, 0xfe, 0x6b,
+	0xba, 0xd0, 0xfc, 0x46, 0xd3, 0xf7, 0x88, 0x17, 0xcd, 0xfb, 0x5e, 0x8d, 0x4b, 0xa6, 0x5e, 0x31,
+	0x44, 0x27, 0xe7, 0x13, 0xa2, 0x93, 0x13, 0xe9, 0x1a, 0x9a, 0xf4, 0xe4, 0xa3, 0xd0, 0x17, 0x46,
+	0x4e, 0xd4, 0x0a, 0xc5, 0xc0, 0x3d, 0x2a, 0x97, 0x5d, 0x85, 0x95, 0x1e, 0xec, 0x95, 0xc6, 0x54,
+	0x35, 0x5e, 0x84, 0x45, 0x05, 0xf4, 0x14, 0xf4, 0x37, 0x48, 0x18, 0x3a, 0x9b, 0x92, 0x6d, 0x18,
+	0x13, 0x75, 0xfb, 0x57, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x18, 0xf4, 0x92, 0x20, 0xf0, 0x03, 0xf1,
+	0x6d, 0x23, 0x02, 0xb1, 0x77, 0x91, 0x16, 0x62, 0x0e, 0xb3, 0xff, 0x0f, 0x0b, 0xc6, 0x54, 0x5f,
+	0x79, 0x5b, 0x47, 0xf0, 0x5c, 0x7b, 0x1b, 0xa0, 0x2a, 0x3f, 0x30, 0x64, 0xd7, 0xec, 0xd0, 0xf3,
+	0xe7, 0x33, 0x39, 0x9a, 0xd4, 0x30, 0xc6, 0x94, 0x55, 0x51, 0x88, 0x35, 0x6a, 0xf6, 0x1f, 0x58,
+	0x30, 0x99, 0xf8, 0xa2, 0xeb, 0x6e, 0x18, 0xa1, 0x77, 0x52, 0x5f, 0x35, 0xd3, 0xe5, 0xe2, 0x73,
+	0x43, 0xfe, 0x4d, 0x6a, 0xcf, 0xcb, 0x12, 0xed, 0x8b, 0xae, 0x42, 0xaf, 0x1b, 0x91, 0x86, 0xfc,
+	0x98, 0xc7, 0xda, 0x7e, 0x0c, 0xef, 0x55, 0x3c, 0x23, 0xcb, 0xb4, 0x26, 0xe6, 0x04, 0xec, 0x3f,
+	0x2e, 0xc2, 0x20, 0xdf, 0xdf, 0x2b, 0x4e, 0xf3, 0x08, 0xe6, 0xe2, 0x19, 0x18, 0x74, 0x1b, 0x8d,
+	0x56, 0xe4, 0xdc, 0x16, 0xf7, 0xde, 0x00, 0x3f, 0x83, 0x96, 0x65, 0x21, 0x8e, 0xe1, 0x68, 0x19,
+	0x7a, 0x58, 0x57, 0xf8, 0x57, 0x3e, 0x99, 0xfd, 0x95, 0xa2, 0xef, 0x33, 0x0b, 0x4e, 0xe4, 0x70,
+	0x96, 0x53, 0xed, 0x2b, 0x5a, 0x84, 0x19, 0x09, 0xe4, 0x00, 0xdc, 0x76, 0x3d, 0x27, 0xd8, 0xa5,
+	0x65, 0x53, 0x45, 0x46, 0xf0, 0xb9, 0xf6, 0x04, 0xe7, 0x14, 0x3e, 0x27, 0xab, 0x3e, 0x2c, 0x06,
+	0x60, 0x8d, 0xe8, 0xf4, 0xcb, 0x30, 0xa8, 0x90, 0x0f, 0xc3, 0x39, 0x4e, 0x7f, 0x0c, 0xc6, 0x12,
+	0x6d, 0x75, 0xaa, 0x3e, 0xac, 0x33, 0x9e, 0xbf, 0xc7, 0x8e, 0x0c, 0xd1, 0xeb, 0x45, 0x6f, 0x47,
+	0xdc, 0x4d, 0xf7, 0xe0, 0x58, 0x3d, 0xe3, 0xc8, 0x17, 0xf3, 0xda, 0xfd, 0x15, 0x71, 0x5a, 0x7c,
+	0xf6, 0xb1, 0x2c, 0x28, 0xce, 0x6c, 0xc3, 0x38, 0x11, 0x0b, 0xed, 0x4e, 0x44, 0x7a, 0xde, 0x1d,
+	0x53, 0x9d, 0xbf, 0x46, 0x76, 0xd5, 0xa1, 0xfa, 0x9d, 0xec, 0xfe, 0x19, 0x3e, 0xfa, 0xfc, 0xb8,
+	0x1c, 0x12, 0x04, 0x8a, 0xd7, 0xc8, 0x2e, 0x9f, 0x0a, 0xfd, 0xeb, 0x8a, 0x6d, 0xbf, 0xee, 0x6b,
+	0x16, 0x8c, 0xa8, 0xaf, 0x3b, 0x82, 0x73, 0x61, 0xce, 0x3c, 0x17, 0xce, 0xb4, 0x5d, 0xe0, 0x39,
+	0x27, 0xc2, 0x37, 0x0a, 0x70, 0x4a, 0xe1, 0xd0, 0x47, 0x14, 0xff, 0x23, 0x56, 0xd5, 0x45, 0x18,
+	0xf4, 0x94, 0x38, 0xd1, 0x32, 0xe5, 0x78, 0xb1, 0x30, 0x31, 0xc6, 0xa1, 0x57, 0x9e, 0x17, 0x5f,
+	0xda, 0xc3, 0xba, 0x9c, 0x5d, 0x5c, 0xee, 0x73, 0x50, 0x6c, 0xb9, 0x35, 0x71, 0xc1, 0x7c, 0x58,
+	0x8e, 0xf6, 0xfa, 0xf2, 0xc2, 0xc1, 0x5e, 0xe9, 0xd1, 0x3c, 0x95, 0x13, 0xbd, 0xd9, 0xc2, 0x99,
+	0xf5, 0xe5, 0x05, 0x4c, 0x2b, 0xa3, 0x59, 0x18, 0x93, 0x5a, 0xb5, 0x9b, 0x94, 0x2f, 0xf5, 0x3d,
+	0x71, 0x0f, 0x29, 0x61, 0x39, 0x36, 0xc1, 0x38, 0x89, 0x8f, 0x16, 0x60, 0x7c, 0xbb, 0x75, 0x9b,
+	0xd4, 0x49, 0xc4, 0x3f, 0xf8, 0x1a, 0xe1, 0xa2, 0xe4, 0xc1, 0xf8, 0x09, 0x7b, 0x2d, 0x01, 0xc7,
+	0xa9, 0x1a, 0xf6, 0xbf, 0xb2, 0xfb, 0x40, 0x8c, 0x9e, 0xc6, 0xdf, 0x7c, 0x27, 0x97, 0x73, 0x37,
+	0xab, 0xe2, 0x1a, 0xd9, 0x5d, 0xf3, 0x29, 0x1f, 0x92, 0xbd, 0x2a, 0x8c, 0x35, 0xdf, 0xd3, 0x76,
+	0xcd, 0xff, 0x56, 0x01, 0x8e, 0xab, 0x11, 0x30, 0xb8, 0xe5, 0xef, 0xf6, 0x31, 0xb8, 0x04, 0x43,
+	0x35, 0xb2, 0xe1, 0xb4, 0xea, 0x91, 0xd2, 0x6b, 0xf4, 0x72, 0x55, 0xdb, 0x42, 0x5c, 0x8c, 0x75,
+	0x9c, 0x43, 0x0c, 0xdb, 0xaf, 0x8f, 0xb0, 0x8b, 0x38, 0x72, 0xe8, 0x1a, 0x57, 0xbb, 0xc6, 0xca,
+	0xdd, 0x35, 0x8f, 0x41, 0xaf, 0xdb, 0xa0, 0x8c, 0x59, 0xc1, 0xe4, 0xb7, 0x96, 0x69, 0x21, 0xe6,
+	0x30, 0xf4, 0x04, 0xf4, 0x57, 0xfd, 0x46, 0xc3, 0xf1, 0x6a, 0xec, 0xca, 0x1b, 0x9c, 0x1b, 0xa2,
+	0xbc, 0xdb, 0x3c, 0x2f, 0xc2, 0x12, 0x46, 0x99, 0x6f, 0x27, 0xd8, 0xe4, 0xc2, 0x1e, 0xc1, 0x7c,
+	0xcf, 0x06, 0x9b, 0x21, 0x66, 0xa5, 0xf4, 0xad, 0x7a, 0xc7, 0x0f, 0xb6, 0x5d, 0x6f, 0x73, 0xc1,
+	0x0d, 0xc4, 0x96, 0x50, 0x77, 0xe1, 0x2d, 0x05, 0xc1, 0x1a, 0x16, 0x5a, 0x82, 0xde, 0xa6, 0x1f,
+	0x44, 0xe1, 0x54, 0x1f, 0x1b, 0xee, 0x47, 0x73, 0x0e, 0x22, 0xfe, 0xb5, 0x65, 0x3f, 0x88, 0xe2,
+	0x0f, 0xa0, 0xff, 0x42, 0xcc, 0xab, 0xa3, 0xeb, 0xd0, 0x4f, 0xbc, 0x9d, 0xa5, 0xc0, 0x6f, 0x4c,
+	0x4d, 0xe6, 0x53, 0x5a, 0xe4, 0x28, 0x7c, 0x99, 0xc5, 0x3c, 0xaa, 0x28, 0xc6, 0x92, 0x04, 0xfa,
+	0x28, 0x14, 0x89, 0xb7, 0x33, 0xd5, 0xcf, 0x28, 0x4d, 0xe7, 0x50, 0xba, 0xe9, 0x04, 0xf1, 0x99,
+	0xbf, 0xe8, 0xed, 0x60, 0x5a, 0x07, 0x7d, 0x02, 0x06, 0xe5, 0x81, 0x11, 0x0a, 0x29, 0x6a, 0xe6,
+	0x82, 0x95, 0xc7, 0x0c, 0x26, 0xef, 0xb6, 0xdc, 0x80, 0x34, 0x88, 0x17, 0x85, 0xf1, 0x09, 0x29,
+	0xa1, 0x21, 0x8e, 0xa9, 0xa1, 0x2a, 0x0c, 0x07, 0x24, 0x74, 0xef, 0x91, 0xb2, 0x5f, 0x77, 0xab,
+	0xbb, 0x53, 0x27, 0x59, 0xf7, 0x9e, 0x6a, 0x3b, 0x64, 0x58, 0xab, 0x10, 0x4b, 0xf9, 0xf5, 0x52,
+	0x6c, 0x10, 0x45, 0x6f, 0xc1, 0x48, 0x40, 0xc2, 0xc8, 0x09, 0x22, 0xd1, 0xca, 0x94, 0xd2, 0xca,
+	0x8d, 0x60, 0x1d, 0xc0, 0x9f, 0x13, 0x71, 0x33, 0x31, 0x04, 0x9b, 0x14, 0xd0, 0x27, 0xa4, 0xca,
+	0x61, 0xc5, 0x6f, 0x79, 0x51, 0x38, 0x35, 0xc8, 0xfa, 0x9d, 0xa9, 0x9b, 0xbe, 0x19, 0xe3, 0x25,
+	0x75, 0x12, 0xbc, 0x32, 0x36, 0x48, 0xa1, 0x4f, 0xc1, 0x08, 0xff, 0xcf, 0x55, 0xaa, 0xe1, 0xd4,
+	0x71, 0x46, 0xfb, 0x5c, 0x3e, 0x6d, 0x8e, 0x38, 0x77, 0x5c, 0x10, 0x1f, 0xd1, 0x4b, 0x43, 0x6c,
+	0x52, 0x43, 0x18, 0x46, 0xea, 0xee, 0x0e, 0xf1, 0x48, 0x18, 0x96, 0x03, 0xff, 0x36, 0x11, 0x12,
+	0xe2, 0x53, 0xd9, 0x2a, 0x58, 0xff, 0x36, 0x11, 0x8f, 0x40, 0xbd, 0x0e, 0x36, 0x49, 0xa0, 0x75,
+	0x18, 0xa5, 0x4f, 0x72, 0x37, 0x26, 0x3a, 0xd4, 0x89, 0x28, 0x7b, 0x38, 0x63, 0xa3, 0x12, 0x4e,
+	0x10, 0x41, 0x37, 0x60, 0x98, 0x8d, 0x79, 0xab, 0xc9, 0x89, 0x9e, 0xe8, 0x44, 0x94, 0x19, 0x14,
+	0x54, 0xb4, 0x2a, 0xd8, 0x20, 0x80, 0xde, 0x84, 0xc1, 0xba, 0xbb, 0x41, 0xaa, 0xbb, 0xd5, 0x3a,
+	0x99, 0x1a, 0x66, 0xd4, 0x32, 0x0f, 0xc3, 0xeb, 0x12, 0x89, 0xf3, 0xe7, 0xea, 0x2f, 0x8e, 0xab,
+	0xa3, 0x9b, 0x70, 0x22, 0x22, 0x41, 0xc3, 0xf5, 0x1c, 0x7a, 0x88, 0x89, 0x27, 0x21, 0xd3, 0x8c,
+	0x8f, 0xb0, 0xd5, 0x75, 0x56, 0xcc, 0xc6, 0x89, 0xb5, 0x4c, 0x2c, 0x9c, 0x53, 0x1b, 0xdd, 0x85,
+	0xa9, 0x0c, 0x08, 0x5f, 0xb7, 0xc7, 0x18, 0xe5, 0xd7, 0x04, 0xe5, 0xa9, 0xb5, 0x1c, 0xbc, 0x83,
+	0x36, 0x30, 0x9c, 0x4b, 0x1d, 0xdd, 0x80, 0x31, 0x76, 0x72, 0x96, 0x5b, 0xf5, 0xba, 0x68, 0x70,
+	0x94, 0x35, 0xf8, 0x84, 0xe4, 0x23, 0x96, 0x4d, 0xf0, 0xc1, 0x5e, 0x09, 0xe2, 0x7f, 0x38, 0x59,
+	0x1b, 0xdd, 0x66, 0x4a, 0xd8, 0x56, 0xe0, 0x46, 0xbb, 0x74, 0x57, 0x91, 0xbb, 0xd1, 0xd4, 0x58,
+	0x5b, 0x81, 0x94, 0x8e, 0xaa, 0x34, 0xb5, 0x7a, 0x21, 0x4e, 0x12, 0xa4, 0x57, 0x41, 0x18, 0xd5,
+	0x5c, 0x6f, 0x6a, 0x9c, 0xbf, 0xa7, 0xe4, 0x49, 0x5a, 0xa1, 0x85, 0x98, 0xc3, 0x98, 0x02, 0x96,
+	0xfe, 0xb8, 0x41, 0x6f, 0xdc, 0x09, 0x86, 0x18, 0x2b, 0x60, 0x25, 0x00, 0xc7, 0x38, 0x94, 0x09,
+	0x8e, 0xa2, 0xdd, 0x29, 0xc4, 0x50, 0xd5, 0x81, 0xb8, 0xb6, 0xf6, 0x09, 0x4c, 0xcb, 0xed, 0xdb,
+	0x30, 0xaa, 0x8e, 0x09, 0x36, 0x26, 0xa8, 0x04, 0xbd, 0x8c, 0xed, 0x13, 0xe2, 0xd3, 0x41, 0xda,
+	0x05, 0xc6, 0x12, 0x62, 0x5e, 0xce, 0xba, 0xe0, 0xde, 0x23, 0x73, 0xbb, 0x11, 0xe1, 0xb2, 0x88,
+	0xa2, 0xd6, 0x05, 0x09, 0xc0, 0x31, 0x8e, 0xfd, 0x1f, 0x39, 0xfb, 0x1c, 0xdf, 0x12, 0x5d, 0xdc,
+	0x8b, 0xcf, 0xc2, 0x00, 0x33, 0xfc, 0xf0, 0x03, 0xae, 0x9d, 0xed, 0x8d, 0x19, 0xe6, 0xab, 0xa2,
+	0x1c, 0x2b, 0x0c, 0xf4, 0x2a, 0x8c, 0x54, 0xf5, 0x06, 0xc4, 0xa5, 0xae, 0x8e, 0x11, 0xa3, 0x75,
+	0x6c, 0xe2, 0xa2, 0xcb, 0x30, 0xc0, 0x6c, 0x9c, 0xaa, 0x7e, 0x5d, 0x70, 0x9b, 0x92, 0x33, 0x19,
+	0x28, 0x8b, 0xf2, 0x03, 0xed, 0x37, 0x56, 0xd8, 0xe8, 0x3c, 0xf4, 0xd1, 0x2e, 0x2c, 0x97, 0xc5,
+	0x75, 0xaa, 0x24, 0x81, 0x57, 0x59, 0x29, 0x16, 0x50, 0xfb, 0x0f, 0x2c, 0xc6, 0x4b, 0xa5, 0xcf,
+	0x7c, 0x74, 0x95, 0x5d, 0x1a, 0xec, 0x06, 0xd1, 0xb4, 0xf0, 0x8f, 0x6b, 0x37, 0x81, 0x82, 0x1d,
+	0x24, 0xfe, 0x63, 0xa3, 0x26, 0x7a, 0x3b, 0x79, 0x33, 0x70, 0x86, 0xe2, 0x45, 0x39, 0x04, 0xc9,
+	0xdb, 0xe1, 0x91, 0xf8, 0x8a, 0xa3, 0xfd, 0x69, 0x77, 0x45, 0xd8, 0x3f, 0x55, 0xd0, 0x56, 0x49,
+	0x25, 0x72, 0x22, 0x82, 0xca, 0xd0, 0x7f, 0xc7, 0x71, 0x23, 0xd7, 0xdb, 0x14, 0x7c, 0x5f, 0xfb,
+	0x8b, 0x8e, 0x55, 0xba, 0xc5, 0x2b, 0x70, 0xee, 0x45, 0xfc, 0xc1, 0x92, 0x0c, 0xa5, 0x18, 0xb4,
+	0x3c, 0x8f, 0x52, 0x2c, 0x74, 0x4b, 0x11, 0xf3, 0x0a, 0x9c, 0xa2, 0xf8, 0x83, 0x25, 0x19, 0xf4,
+	0x0e, 0x80, 0x3c, 0x21, 0x48, 0x4d, 0xc8, 0x0e, 0x9f, 0xed, 0x4c, 0x74, 0x4d, 0xd5, 0xe1, 0xc2,
+	0xc9, 0xf8, 0x3f, 0xd6, 0xe8, 0xd9, 0x91, 0x36, 0xa7, 0x7a, 0x67, 0xd0, 0x27, 0xe9, 0x16, 0x75,
+	0x82, 0x88, 0xd4, 0x66, 0x23, 0x31, 0x38, 0x4f, 0x77, 0xf7, 0x38, 0x5c, 0x73, 0x1b, 0x44, 0xdf,
+	0xce, 0x82, 0x08, 0x8e, 0xe9, 0xd9, 0xbf, 0x53, 0x84, 0xa9, 0xbc, 0xee, 0xd2, 0x4d, 0x43, 0xee,
+	0xba, 0xd1, 0x3c, 0x65, 0x6b, 0x2d, 0x73, 0xd3, 0x2c, 0x8a, 0x72, 0xac, 0x30, 0xe8, 0xea, 0x0d,
+	0xdd, 0x4d, 0xf9, 0xb6, 0xef, 0x8d, 0x57, 0x6f, 0x85, 0x95, 0x62, 0x01, 0xa5, 0x78, 0x01, 0x71,
+	0x42, 0x61, 0x7c, 0xa7, 0xad, 0x72, 0xcc, 0x4a, 0xb1, 0x80, 0xea, 0x52, 0xc6, 0x9e, 0x0e, 0x52,
+	0x46, 0x63, 0x88, 0x7a, 0x1f, 0xec, 0x10, 0xa1, 0x4f, 0x03, 0x6c, 0xb8, 0x9e, 0x1b, 0x6e, 0x31,
+	0xea, 0x7d, 0x87, 0xa6, 0xae, 0x98, 0xe2, 0x25, 0x45, 0x05, 0x6b, 0x14, 0xd1, 0x4b, 0x30, 0xa4,
+	0x0e, 0x90, 0xe5, 0x05, 0xa6, 0xfa, 0xd7, 0x4c, 0xa9, 0xe2, 0xd3, 0x74, 0x01, 0xeb, 0x78, 0xf6,
+	0x67, 0x93, 0xeb, 0x45, 0xec, 0x00, 0x6d, 0x7c, 0xad, 0x6e, 0xc7, 0xb7, 0xd0, 0x7e, 0x7c, 0xed,
+	0x9f, 0x19, 0x84, 0x31, 0xa3, 0xb1, 0x56, 0xd8, 0xc5, 0x99, 0x7b, 0x85, 0x5e, 0x40, 0x4e, 0x44,
+	0xc4, 0xfe, 0xb3, 0x3b, 0x6f, 0x15, 0xfd, 0x92, 0xa2, 0x3b, 0x80, 0xd7, 0x47, 0x9f, 0x86, 0xc1,
+	0xba, 0x13, 0x32, 0x89, 0x25, 0x11, 0xfb, 0xae, 0x1b, 0x62, 0xf1, 0x83, 0xd0, 0x09, 0x23, 0xed,
+	0xd6, 0xe7, 0xb4, 0x63, 0x92, 0xf4, 0xa6, 0xa4, 0xfc, 0x95, 0xb4, 0xee, 0x54, 0x9d, 0xa0, 0x4c,
+	0xd8, 0x2e, 0xe6, 0x30, 0x74, 0x99, 0x1d, 0xad, 0x74, 0x55, 0xcc, 0x53, 0x6e, 0x94, 0x2d, 0xb3,
+	0x5e, 0x83, 0xc9, 0x56, 0x30, 0x6c, 0x60, 0xc6, 0x6f, 0xb2, 0xbe, 0x36, 0x6f, 0xb2, 0xa7, 0xa0,
+	0x9f, 0xfd, 0x50, 0x2b, 0x40, 0xcd, 0xc6, 0x32, 0x2f, 0xc6, 0x12, 0x9e, 0x5c, 0x30, 0x03, 0xdd,
+	0x2d, 0x18, 0xfa, 0xea, 0x13, 0x8b, 0x9a, 0x99, 0x5d, 0x0c, 0xf0, 0x53, 0x4e, 0x2c, 0x79, 0x2c,
+	0x61, 0xe8, 0x57, 0x2c, 0x40, 0x4e, 0x9d, 0xbe, 0x96, 0x69, 0xb1, 0x7a, 0xdc, 0x00, 0x63, 0xb5,
+	0x5f, 0xed, 0x38, 0xec, 0xad, 0x70, 0x66, 0x36, 0x55, 0x9b, 0x4b, 0x4a, 0x5f, 0x11, 0x5d, 0x44,
+	0x69, 0x04, 0xfd, 0x32, 0xba, 0xee, 0x86, 0xd1, 0xe7, 0xff, 0x26, 0x71, 0x39, 0x65, 0x74, 0x09,
+	0xad, 0xeb, 0x8f, 0xaf, 0xa1, 0x43, 0x3e, 0xbe, 0x46, 0x72, 0x1f, 0x5e, 0xdf, 0x9f, 0x78, 0xc0,
+	0x0c, 0xb3, 0x2f, 0x7f, 0xa2, 0xc3, 0x03, 0x46, 0x88, 0xd3, 0xbb, 0x79, 0xc6, 0x94, 0x85, 0x1e,
+	0x78, 0x84, 0x75, 0xb9, 0xfd, 0x23, 0x78, 0x3d, 0x24, 0xc1, 0xdc, 0x29, 0xa9, 0x26, 0x3e, 0xd0,
+	0x79, 0x0f, 0x4d, 0x6f, 0xfc, 0x43, 0x16, 0x4c, 0xa5, 0x07, 0x88, 0x77, 0x69, 0x6a, 0x94, 0xf5,
+	0xdf, 0x6e, 0x37, 0x32, 0xa2, 0xf3, 0xd2, 0xdc, 0x75, 0x6a, 0x36, 0x87, 0x16, 0xce, 0x6d, 0x65,
+	0xba, 0x05, 0x27, 0x73, 0xe6, 0x3d, 0x43, 0x6a, 0xbd, 0xa0, 0x4b, 0xad, 0x3b, 0xc8, 0x3a, 0x67,
+	0xe4, 0xcc, 0xcc, 0xbc, 0xd5, 0x72, 0xbc, 0xc8, 0x8d, 0x76, 0x75, 0x29, 0xb7, 0x07, 0xe6, 0x80,
+	0xa0, 0x4f, 0x41, 0x6f, 0xdd, 0xf5, 0x5a, 0x77, 0xc5, 0x4d, 0x79, 0x3e, 0xfb, 0x11, 0xe3, 0xb5,
+	0xee, 0x9a, 0x43, 0x5c, 0xa2, 0x1b, 0x92, 0x95, 0x1f, 0xec, 0x95, 0x50, 0x1a, 0x01, 0x73, 0xaa,
+	0xf6, 0xd3, 0x30, 0xba, 0xe0, 0x90, 0x86, 0xef, 0x2d, 0x7a, 0xb5, 0xa6, 0xef, 0x7a, 0x11, 0x9a,
+	0x82, 0x1e, 0xc6, 0x22, 0xf2, 0x0b, 0xb2, 0x87, 0x0e, 0x21, 0x66, 0x25, 0xf6, 0x26, 0x1c, 0x5f,
+	0xf0, 0xef, 0x78, 0x77, 0x9c, 0xa0, 0x36, 0x5b, 0x5e, 0xd6, 0xa4, 0x7e, 0xab, 0x52, 0xea, 0x64,
+	0xe5, 0xbf, 0xe9, 0xb5, 0x9a, 0x7c, 0x29, 0x2d, 0xb9, 0x75, 0x92, 0x23, 0x9b, 0xfd, 0x99, 0x82,
+	0xd1, 0x52, 0x8c, 0xaf, 0x34, 0x8b, 0x56, 0xae, 0x51, 0xc2, 0x5b, 0x30, 0xb0, 0xe1, 0x92, 0x7a,
+	0x0d, 0x93, 0x0d, 0x31, 0x1b, 0x4f, 0xe6, 0x9b, 0x2d, 0x2e, 0x51, 0x4c, 0xa5, 0x02, 0x65, 0x32,
+	0xab, 0x25, 0x51, 0x19, 0x2b, 0x32, 0x68, 0x1b, 0xc6, 0xe5, 0x9c, 0x49, 0xa8, 0x38, 0xb5, 0x9f,
+	0x6a, 0xb7, 0x08, 0x4d, 0xe2, 0xcc, 0x84, 0x1b, 0x27, 0xc8, 0xe0, 0x14, 0x61, 0x74, 0x1a, 0x7a,
+	0x1a, 0x94, 0x3f, 0xe9, 0x61, 0xc3, 0xcf, 0x84, 0x54, 0x4c, 0xde, 0xc6, 0x4a, 0xed, 0x9f, 0xb3,
+	0xe0, 0x64, 0x6a, 0x64, 0x84, 0xdc, 0xf1, 0x01, 0xcf, 0x42, 0x52, 0x0e, 0x58, 0xe8, 0x2c, 0x07,
+	0xb4, 0xff, 0x3b, 0x0b, 0x8e, 0x2d, 0x36, 0x9a, 0xd1, 0xee, 0x82, 0x6b, 0x5a, 0x10, 0xbc, 0x0c,
+	0x7d, 0x0d, 0x52, 0x73, 0x5b, 0x0d, 0x31, 0x73, 0x25, 0x79, 0x87, 0xaf, 0xb0, 0x52, 0x7a, 0x0e,
+	0x54, 0x22, 0x3f, 0x70, 0x36, 0x09, 0x2f, 0xc0, 0x02, 0x9d, 0x71, 0x42, 0xee, 0x3d, 0x72, 0xdd,
+	0x6d, 0xb8, 0xd1, 0xfd, 0xed, 0x2e, 0xa1, 0xfc, 0x97, 0x44, 0x70, 0x4c, 0xcf, 0xfe, 0x96, 0x05,
+	0x63, 0x72, 0xdd, 0xcf, 0xd6, 0x6a, 0x01, 0x09, 0x43, 0x34, 0x0d, 0x05, 0xb7, 0x29, 0x7a, 0x09,
+	0xa2, 0x97, 0x85, 0xe5, 0x32, 0x2e, 0xb8, 0x4d, 0xf9, 0xe8, 0x62, 0x6c, 0x42, 0xd1, 0xb4, 0x83,
+	0xb8, 0x2a, 0xca, 0xb1, 0xc2, 0x40, 0x17, 0x60, 0xc0, 0xf3, 0x6b, 0xfc, 0xdd, 0x22, 0x34, 0xe1,
+	0x14, 0x73, 0x55, 0x94, 0x61, 0x05, 0x45, 0x65, 0x18, 0xe4, 0x56, 0xb2, 0xf1, 0xa2, 0xed, 0xca,
+	0xd6, 0x96, 0x7d, 0xd9, 0x9a, 0xac, 0x89, 0x63, 0x22, 0xf6, 0x1f, 0x59, 0x30, 0x2c, 0xbf, 0xac,
+	0xcb, 0x17, 0x25, 0xdd, 0x5a, 0xf1, 0x6b, 0x32, 0xde, 0x5a, 0xf4, 0x45, 0xc8, 0x20, 0xc6, 0x43,
+	0xb0, 0x78, 0xa8, 0x87, 0xe0, 0x25, 0x18, 0x72, 0x9a, 0xcd, 0xb2, 0xf9, 0x8a, 0x64, 0x4b, 0x69,
+	0x36, 0x2e, 0xc6, 0x3a, 0x8e, 0xfd, 0xb3, 0x05, 0x18, 0x95, 0x5f, 0x50, 0x69, 0xdd, 0x0e, 0x49,
+	0x84, 0xd6, 0x60, 0xd0, 0xe1, 0xb3, 0x44, 0xe4, 0x22, 0x7f, 0x2c, 0x5b, 0xba, 0x69, 0x4c, 0x69,
+	0xcc, 0x0e, 0xcf, 0xca, 0xda, 0x38, 0x26, 0x84, 0xea, 0x30, 0xe1, 0xf9, 0x11, 0x63, 0x8d, 0x14,
+	0xbc, 0x9d, 0xc2, 0x39, 0x49, 0xfd, 0x94, 0xa0, 0x3e, 0xb1, 0x9a, 0xa4, 0x82, 0xd3, 0x84, 0xd1,
+	0xa2, 0x94, 0x18, 0x17, 0xf3, 0x45, 0x7d, 0xfa, 0xc4, 0x65, 0x0b, 0x8c, 0xed, 0xdf, 0xb7, 0x60,
+	0x50, 0xa2, 0x1d, 0x85, 0x6d, 0xc1, 0x0a, 0xf4, 0x87, 0x6c, 0x12, 0xe4, 0xd0, 0xd8, 0xed, 0x3a,
+	0xce, 0xe7, 0x2b, 0xe6, 0xf8, 0xf8, 0xff, 0x10, 0x4b, 0x1a, 0x4c, 0x61, 0xa8, 0xba, 0xff, 0x3e,
+	0x51, 0x18, 0xaa, 0xfe, 0xe4, 0x5c, 0x4a, 0x7f, 0xc7, 0xfa, 0xac, 0x49, 0xe0, 0xe9, 0xc3, 0xa4,
+	0x19, 0x90, 0x0d, 0xf7, 0x6e, 0xf2, 0x61, 0x52, 0x66, 0xa5, 0x58, 0x40, 0xd1, 0x3b, 0x30, 0x5c,
+	0x95, 0x9a, 0xa2, 0x78, 0x87, 0x9f, 0x6f, 0xab, 0xb5, 0x54, 0x0a, 0x6e, 0x2e, 0xe9, 0x9c, 0xd7,
+	0xea, 0x63, 0x83, 0x9a, 0x69, 0x05, 0x56, 0xec, 0x64, 0x05, 0x16, 0xd3, 0xcd, 0xb7, 0x89, 0xfa,
+	0x79, 0x0b, 0xfa, 0xb8, 0x86, 0xa0, 0x3b, 0x05, 0x8d, 0xa6, 0xef, 0x8f, 0xc7, 0xee, 0x26, 0x2d,
+	0x14, 0x9c, 0x0d, 0x5a, 0x81, 0x41, 0xf6, 0x83, 0x69, 0x38, 0x8a, 0xf9, 0x3e, 0x63, 0xbc, 0x55,
+	0xbd, 0x83, 0x37, 0x65, 0x35, 0x1c, 0x53, 0xb0, 0x7f, 0xba, 0x48, 0x4f, 0xb7, 0x18, 0xd5, 0xb8,
+	0xf4, 0xad, 0x87, 0x77, 0xe9, 0x17, 0x1e, 0xd6, 0xa5, 0xbf, 0x09, 0x63, 0x55, 0xcd, 0x3a, 0x20,
+	0x9e, 0xc9, 0x0b, 0x6d, 0x17, 0x89, 0x66, 0x48, 0xc0, 0x65, 0xa8, 0xf3, 0x26, 0x11, 0x9c, 0xa4,
+	0x8a, 0x3e, 0x09, 0xc3, 0x7c, 0x9e, 0x45, 0x2b, 0xdc, 0x90, 0xee, 0x89, 0xfc, 0xf5, 0xa2, 0x37,
+	0xc1, 0x65, 0xee, 0x5a, 0x75, 0x6c, 0x10, 0xb3, 0xff, 0xc9, 0x02, 0xb4, 0xd8, 0xdc, 0x22, 0x0d,
+	0x12, 0x38, 0xf5, 0x58, 0xc9, 0xf7, 0x25, 0x0b, 0xa6, 0x48, 0xaa, 0x78, 0xde, 0x6f, 0x34, 0xc4,
+	0x93, 0x3e, 0x47, 0xea, 0xb4, 0x98, 0x53, 0x27, 0x66, 0xeb, 0xf3, 0x30, 0x70, 0x6e, 0x7b, 0x68,
+	0x05, 0x26, 0xf9, 0x2d, 0xa9, 0x00, 0x9a, 0xad, 0xdd, 0x23, 0x82, 0xf0, 0xe4, 0x5a, 0x1a, 0x05,
+	0x67, 0xd5, 0xb3, 0x7f, 0x7f, 0x04, 0x72, 0x7b, 0xf1, 0x81, 0x76, 0xf3, 0x03, 0xed, 0xe6, 0x07,
+	0xda, 0xcd, 0x0f, 0xb4, 0x9b, 0x1f, 0x68, 0x37, 0x3f, 0xd0, 0x6e, 0xbe, 0x4f, 0xb5, 0x9b, 0xff,
+	0xa5, 0x05, 0xc7, 0xd5, 0xf5, 0x65, 0x3c, 0xd8, 0x3f, 0x07, 0x93, 0x7c, 0xbb, 0xcd, 0xd7, 0x1d,
+	0xb7, 0xb1, 0x46, 0x1a, 0xcd, 0xba, 0x13, 0x49, 0x1b, 0xa6, 0x4b, 0x99, 0x2b, 0x37, 0xe1, 0x28,
+	0x61, 0x54, 0xe4, 0x1e, 0x67, 0x19, 0x00, 0x9c, 0xd5, 0x8c, 0xfd, 0x3b, 0x03, 0xd0, 0xbb, 0xb8,
+	0x43, 0xbc, 0xe8, 0x08, 0x9e, 0x36, 0x55, 0x18, 0x75, 0xbd, 0x1d, 0xbf, 0xbe, 0x43, 0x6a, 0x1c,
+	0x7e, 0x98, 0x17, 0xf8, 0x09, 0x41, 0x7a, 0x74, 0xd9, 0x20, 0x81, 0x13, 0x24, 0x1f, 0x86, 0x8e,
+	0xe8, 0x0a, 0xf4, 0xf1, 0xcb, 0x47, 0x28, 0x88, 0x32, 0xcf, 0x6c, 0x36, 0x88, 0xe2, 0x4a, 0x8d,
+	0xf5, 0x57, 0xfc, 0x72, 0x13, 0xd5, 0xd1, 0x67, 0x61, 0x74, 0xc3, 0x0d, 0xc2, 0x68, 0xcd, 0x6d,
+	0xd0, 0xab, 0xa1, 0xd1, 0xbc, 0x0f, 0x9d, 0x90, 0x1a, 0x87, 0x25, 0x83, 0x12, 0x4e, 0x50, 0x46,
+	0x9b, 0x30, 0x52, 0x77, 0xf4, 0xa6, 0xfa, 0x0f, 0xdd, 0x94, 0xba, 0x1d, 0xae, 0xeb, 0x84, 0xb0,
+	0x49, 0x97, 0x6e, 0xa7, 0x2a, 0x53, 0x6b, 0x0c, 0x30, 0x71, 0x86, 0xda, 0x4e, 0x5c, 0x9f, 0xc1,
+	0x61, 0x94, 0x41, 0x63, 0xee, 0x06, 0x83, 0x26, 0x83, 0xa6, 0x39, 0x15, 0x7c, 0x06, 0x06, 0x09,
+	0x1d, 0x42, 0x4a, 0x58, 0x5c, 0x30, 0x17, 0xbb, 0xeb, 0xeb, 0x8a, 0x5b, 0x0d, 0x7c, 0x53, 0x1b,
+	0xb7, 0x28, 0x29, 0xe1, 0x98, 0x28, 0x9a, 0x87, 0xbe, 0x90, 0x04, 0xae, 0x92, 0xf8, 0xb7, 0x99,
+	0x46, 0x86, 0xc6, 0x5d, 0x1a, 0xf9, 0x6f, 0x2c, 0xaa, 0xd2, 0xe5, 0xe5, 0x30, 0x51, 0x2c, 0xbb,
+	0x0c, 0xb4, 0xe5, 0x35, 0xcb, 0x4a, 0xb1, 0x80, 0xa2, 0x37, 0xa1, 0x3f, 0x20, 0x75, 0xa6, 0xee,
+	0x1d, 0xe9, 0x7e, 0x91, 0x73, 0xed, 0x31, 0xaf, 0x87, 0x25, 0x01, 0x74, 0x0d, 0x50, 0x40, 0x28,
+	0x83, 0xe7, 0x7a, 0x9b, 0xca, 0x08, 0x5f, 0x1c, 0xb4, 0x8a, 0x91, 0xc6, 0x31, 0x86, 0xf4, 0x66,
+	0xc5, 0x19, 0xd5, 0xd0, 0x15, 0x98, 0x50, 0xa5, 0xcb, 0x5e, 0x18, 0x39, 0xf4, 0x80, 0x1b, 0x63,
+	0xb4, 0x94, 0x7c, 0x05, 0x27, 0x11, 0x70, 0xba, 0x8e, 0xfd, 0x6b, 0x16, 0xf0, 0x71, 0x3e, 0x02,
+	0xa9, 0xc2, 0xeb, 0xa6, 0x54, 0xe1, 0x54, 0xee, 0xcc, 0xe5, 0x48, 0x14, 0x7e, 0xcd, 0x82, 0x21,
+	0x6d, 0x66, 0xe3, 0x35, 0x6b, 0xb5, 0x59, 0xb3, 0x2d, 0x18, 0xa7, 0x2b, 0xfd, 0xc6, 0xed, 0x90,
+	0x04, 0x3b, 0xa4, 0xc6, 0x16, 0x66, 0xe1, 0xfe, 0x16, 0xa6, 0x32, 0xf8, 0xbd, 0x9e, 0x20, 0x88,
+	0x53, 0x4d, 0xd8, 0x9f, 0x91, 0x5d, 0x55, 0xf6, 0xd1, 0x55, 0x35, 0xe7, 0x09, 0xfb, 0x68, 0x35,
+	0xab, 0x38, 0xc6, 0xa1, 0x5b, 0x6d, 0xcb, 0x0f, 0xa3, 0xa4, 0x7d, 0xf4, 0x55, 0x3f, 0x8c, 0x30,
+	0x83, 0xd8, 0x2f, 0x00, 0x2c, 0xde, 0x25, 0x55, 0xbe, 0x62, 0xf5, 0x47, 0x8f, 0x95, 0xff, 0xe8,
+	0xb1, 0xff, 0xd2, 0x82, 0xd1, 0xa5, 0x79, 0xe3, 0xe6, 0x9a, 0x01, 0xe0, 0x2f, 0xb5, 0x5b, 0xb7,
+	0x56, 0xa5, 0x91, 0x0e, 0xb7, 0x53, 0x50, 0xa5, 0x58, 0xc3, 0x40, 0xa7, 0xa0, 0x58, 0x6f, 0x79,
+	0x42, 0xec, 0xd9, 0x4f, 0xaf, 0xc7, 0xeb, 0x2d, 0x0f, 0xd3, 0x32, 0xcd, 0x93, 0xad, 0xd8, 0xb5,
+	0x27, 0x5b, 0xc7, 0x80, 0x3a, 0xa8, 0x04, 0xbd, 0x77, 0xee, 0xb8, 0x35, 0x1e, 0x27, 0x40, 0x18,
+	0x10, 0xdd, 0xba, 0xb5, 0xbc, 0x10, 0x62, 0x5e, 0x6e, 0x7f, 0xb9, 0x08, 0xd3, 0x4b, 0x75, 0x72,
+	0xf7, 0x3d, 0xc6, 0x4a, 0xe8, 0xd6, 0x0f, 0xef, 0x70, 0x02, 0xa4, 0xc3, 0xfa, 0x5a, 0x76, 0x1e,
+	0x8f, 0x0d, 0xe8, 0xe7, 0xe6, 0xc1, 0x32, 0x72, 0x42, 0xa6, 0x52, 0x36, 0x7f, 0x40, 0x66, 0xb8,
+	0x99, 0xb1, 0x50, 0xca, 0xaa, 0x0b, 0x53, 0x94, 0x62, 0x49, 0x7c, 0xfa, 0x15, 0x18, 0xd6, 0x31,
+	0x0f, 0xe5, 0xf5, 0xfc, 0xc3, 0x45, 0x18, 0xa7, 0x3d, 0x78, 0xa8, 0x13, 0xb1, 0x9e, 0x9e, 0x88,
+	0x07, 0xed, 0xf9, 0xda, 0x79, 0x36, 0xde, 0x49, 0xce, 0xc6, 0xa5, 0xbc, 0xd9, 0x38, 0xea, 0x39,
+	0xf8, 0x11, 0x0b, 0x26, 0x97, 0xea, 0x7e, 0x75, 0x3b, 0xe1, 0x9d, 0xfa, 0x12, 0x0c, 0xd1, 0xe3,
+	0x38, 0x34, 0x02, 0xb5, 0x18, 0xa1, 0x7b, 0x04, 0x08, 0xeb, 0x78, 0x5a, 0xb5, 0xf5, 0xf5, 0xe5,
+	0x85, 0xac, 0x88, 0x3f, 0x02, 0x84, 0x75, 0x3c, 0xfb, 0xcf, 0x2d, 0x38, 0x73, 0x65, 0x7e, 0x31,
+	0x5e, 0x8a, 0xa9, 0xa0, 0x43, 0xe7, 0xa1, 0xaf, 0x59, 0xd3, 0xba, 0x12, 0x8b, 0x85, 0x17, 0x58,
+	0x2f, 0x04, 0xf4, 0xfd, 0x12, 0xdf, 0x6b, 0x1d, 0xe0, 0x0a, 0x2e, 0xcf, 0x8b, 0x73, 0x57, 0x6a,
+	0x81, 0xac, 0x5c, 0x2d, 0xd0, 0x13, 0xd0, 0x4f, 0xef, 0x05, 0xb7, 0x2a, 0xfb, 0xcd, 0xcd, 0x2e,
+	0x78, 0x11, 0x96, 0x30, 0xfb, 0x57, 0x2d, 0x98, 0xbc, 0xe2, 0x46, 0xf4, 0xd2, 0x4e, 0x46, 0xd5,
+	0xa1, 0xb7, 0x76, 0xe8, 0x46, 0x7e, 0xb0, 0x9b, 0x8c, 0xaa, 0x83, 0x15, 0x04, 0x6b, 0x58, 0xfc,
+	0x83, 0x76, 0x5c, 0xe6, 0xef, 0x52, 0x30, 0xf5, 0x6e, 0x58, 0x94, 0x63, 0x85, 0x41, 0xc7, 0xab,
+	0xe6, 0x06, 0x4c, 0x64, 0xb9, 0x2b, 0x0e, 0x6e, 0x35, 0x5e, 0x0b, 0x12, 0x80, 0x63, 0x1c, 0xfb,
+	0x1f, 0x2c, 0x28, 0x5d, 0xe1, 0x5e, 0xbb, 0x1b, 0x61, 0xce, 0xa1, 0xfb, 0x02, 0x0c, 0x12, 0xa9,
+	0x20, 0x10, 0xbd, 0x56, 0x8c, 0xa8, 0xd2, 0x1c, 0xf0, 0xe0, 0x3e, 0x0a, 0xaf, 0x0b, 0x17, 0xfa,
+	0xc3, 0xf9, 0x40, 0x2f, 0x01, 0x22, 0x7a, 0x5b, 0x7a, 0xb4, 0x23, 0x16, 0x36, 0x65, 0x31, 0x05,
+	0xc5, 0x19, 0x35, 0xec, 0x9f, 0xb3, 0xe0, 0xb8, 0xfa, 0xe0, 0xf7, 0xdd, 0x67, 0xda, 0x5f, 0x2f,
+	0xc0, 0xc8, 0xd5, 0xb5, 0xb5, 0xf2, 0x15, 0x12, 0x69, 0xab, 0xb2, 0xbd, 0xda, 0x1f, 0x6b, 0xda,
+	0xcb, 0x76, 0x6f, 0xc4, 0x56, 0xe4, 0xd6, 0x67, 0x78, 0x0c, 0xbf, 0x99, 0x65, 0x2f, 0xba, 0x11,
+	0x54, 0xa2, 0xc0, 0xf5, 0x36, 0x33, 0x57, 0xba, 0xe4, 0x59, 0x8a, 0x79, 0x3c, 0x0b, 0x7a, 0x01,
+	0xfa, 0x58, 0x10, 0x41, 0x39, 0x09, 0x8f, 0xa8, 0x27, 0x16, 0x2b, 0x3d, 0xd8, 0x2b, 0x0d, 0xae,
+	0xe3, 0x65, 0xfe, 0x07, 0x0b, 0x54, 0xb4, 0x0e, 0x43, 0x5b, 0x51, 0xd4, 0xbc, 0x4a, 0x9c, 0x1a,
+	0x09, 0xe4, 0x29, 0x7b, 0x36, 0xeb, 0x94, 0xa5, 0x83, 0xc0, 0xd1, 0xe2, 0x83, 0x29, 0x2e, 0x0b,
+	0xb1, 0x4e, 0xc7, 0xae, 0x00, 0xc4, 0xb0, 0x07, 0xa4, 0xb8, 0xb1, 0xd7, 0x60, 0x90, 0x7e, 0xee,
+	0x6c, 0xdd, 0x75, 0xda, 0xab, 0xc6, 0x9f, 0x81, 0x41, 0xa9, 0xf8, 0x0e, 0x45, 0x88, 0x0f, 0x76,
+	0x23, 0x49, 0xbd, 0x78, 0x88, 0x63, 0xb8, 0xfd, 0x38, 0x08, 0x0b, 0xe0, 0x76, 0x24, 0xed, 0x0d,
+	0x38, 0xc6, 0x4c, 0x99, 0x9d, 0x68, 0xcb, 0x58, 0xa3, 0x9d, 0x17, 0xc3, 0xb3, 0xe2, 0x5d, 0xc7,
+	0xbf, 0x6c, 0x4a, 0x73, 0x21, 0x1f, 0x96, 0x14, 0xe3, 0x37, 0x9e, 0xfd, 0xf7, 0x3d, 0xf0, 0xc8,
+	0x72, 0x25, 0x3f, 0x36, 0xd5, 0x65, 0x18, 0xe6, 0xec, 0x22, 0x5d, 0x1a, 0x4e, 0x5d, 0xb4, 0xab,
+	0x24, 0xa0, 0x6b, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x0c, 0x14, 0xdd, 0x77, 0xbd, 0xa4, 0x83, 0xe5,
+	0xf2, 0x5b, 0xab, 0x98, 0x96, 0x53, 0x30, 0xe5, 0x3c, 0xf9, 0x91, 0xae, 0xc0, 0x8a, 0xfb, 0x7c,
+	0x1d, 0x46, 0xdd, 0xb0, 0x1a, 0xba, 0xcb, 0x1e, 0xdd, 0xa7, 0xda, 0x4e, 0x57, 0x32, 0x07, 0xda,
+	0x69, 0x05, 0xc5, 0x09, 0x6c, 0xed, 0x7e, 0xe9, 0xed, 0x9a, 0x7b, 0xed, 0x18, 0x19, 0x83, 0x1e,
+	0xff, 0x4d, 0xf6, 0x75, 0x21, 0x13, 0xc1, 0x8b, 0xe3, 0x9f, 0x7f, 0x70, 0x88, 0x25, 0x8c, 0x3e,
+	0xe8, 0xaa, 0x5b, 0x4e, 0x73, 0xb6, 0x15, 0x6d, 0x2d, 0xb8, 0x61, 0xd5, 0xdf, 0x21, 0xc1, 0x2e,
+	0x7b, 0x8b, 0x0f, 0xc4, 0x0f, 0x3a, 0x05, 0x98, 0xbf, 0x3a, 0x5b, 0xa6, 0x98, 0x38, 0x5d, 0x07,
+	0xcd, 0xc2, 0x98, 0x2c, 0xac, 0x90, 0x90, 0x5d, 0x01, 0x43, 0x8c, 0x8c, 0x72, 0x79, 0x14, 0xc5,
+	0x8a, 0x48, 0x12, 0xdf, 0x64, 0x70, 0xe1, 0x41, 0x30, 0xb8, 0x2f, 0xc3, 0x88, 0xeb, 0xb9, 0x91,
+	0xeb, 0x44, 0x3e, 0xd7, 0x1f, 0xf1, 0x67, 0x37, 0x13, 0x30, 0x2f, 0xeb, 0x00, 0x6c, 0xe2, 0xd9,
+	0xff, 0x5f, 0x0f, 0x4c, 0xb0, 0x69, 0xfb, 0x60, 0x85, 0x7d, 0x2f, 0xad, 0xb0, 0xf5, 0xf4, 0x0a,
+	0x7b, 0x10, 0x9c, 0xfb, 0x7d, 0x2f, 0xb3, 0x2f, 0x58, 0x30, 0xc1, 0x64, 0xdc, 0xc6, 0x32, 0xbb,
+	0x08, 0x83, 0x81, 0xe1, 0x8d, 0x3a, 0xa8, 0x2b, 0xb5, 0xa4, 0x63, 0x69, 0x8c, 0x83, 0xde, 0x00,
+	0x68, 0xc6, 0x32, 0xf4, 0x82, 0x11, 0x42, 0x14, 0x72, 0xc5, 0xe7, 0x5a, 0x1d, 0xfb, 0xb3, 0x30,
+	0xa8, 0xdc, 0x4d, 0xa5, 0xbf, 0xb9, 0x95, 0xe3, 0x6f, 0xde, 0x99, 0x8d, 0x90, 0xb6, 0x71, 0xc5,
+	0x4c, 0xdb, 0xb8, 0xaf, 0x5a, 0x10, 0x6b, 0x38, 0xd0, 0x5b, 0x30, 0xd8, 0xf4, 0x99, 0x41, 0x74,
+	0x20, 0xbd, 0x0c, 0x1e, 0x6f, 0xab, 0x22, 0xe1, 0x71, 0x02, 0x03, 0x3e, 0x1d, 0x65, 0x59, 0x15,
+	0xc7, 0x54, 0xd0, 0x35, 0xe8, 0x6f, 0x06, 0xa4, 0x12, 0xb1, 0x20, 0x56, 0xdd, 0x13, 0xe4, 0xcb,
+	0x97, 0x57, 0xc4, 0x92, 0x82, 0xfd, 0x1b, 0x05, 0x18, 0x4f, 0xa2, 0xa2, 0xd7, 0xa0, 0x87, 0xdc,
+	0x25, 0x55, 0xd1, 0xdf, 0x4c, 0x9e, 0x20, 0x96, 0x91, 0xf0, 0x01, 0xa0, 0xff, 0x31, 0xab, 0x85,
+	0xae, 0x42, 0x3f, 0x65, 0x08, 0xae, 0xa8, 0x80, 0x8d, 0x8f, 0xe6, 0x31, 0x15, 0x8a, 0xb3, 0xe2,
+	0x9d, 0x13, 0x45, 0x58, 0x56, 0x67, 0x06, 0x69, 0xd5, 0x66, 0x85, 0xbe, 0xb5, 0xa2, 0x76, 0x22,
+	0x81, 0xb5, 0xf9, 0x32, 0x47, 0x12, 0xd4, 0xb8, 0x41, 0x9a, 0x2c, 0xc4, 0x31, 0x11, 0xf4, 0x06,
+	0xf4, 0x86, 0x75, 0x42, 0x9a, 0xc2, 0xe2, 0x20, 0x53, 0xca, 0x59, 0xa1, 0x08, 0x82, 0x12, 0x93,
+	0x8a, 0xb0, 0x02, 0xcc, 0x2b, 0xda, 0xbf, 0x65, 0x01, 0x70, 0x0b, 0x3e, 0xc7, 0xdb, 0x24, 0x47,
+	0xa0, 0x18, 0x58, 0x80, 0x9e, 0xb0, 0x49, 0xaa, 0xed, 0xac, 0xfd, 0xe3, 0xfe, 0x54, 0x9a, 0xa4,
+	0x1a, 0xaf, 0x59, 0xfa, 0x0f, 0xb3, 0xda, 0xf6, 0x8f, 0x02, 0x8c, 0xc6, 0x68, 0xcb, 0x11, 0x69,
+	0xa0, 0xe7, 0x8c, 0x28, 0x37, 0xa7, 0x12, 0x51, 0x6e, 0x06, 0x19, 0xb6, 0x26, 0x83, 0xfe, 0x2c,
+	0x14, 0x1b, 0xce, 0x5d, 0x21, 0x64, 0x7c, 0xa6, 0x7d, 0x37, 0x28, 0xfd, 0x99, 0x15, 0xe7, 0x2e,
+	0x7f, 0x87, 0x3f, 0x23, 0xf7, 0xd8, 0x8a, 0x73, 0xb7, 0xa3, 0x45, 0x3a, 0x6d, 0x84, 0xb5, 0xe5,
+	0x7a, 0xc2, 0x38, 0xad, 0xab, 0xb6, 0x5c, 0x2f, 0xd9, 0x96, 0xeb, 0x75, 0xd1, 0x96, 0xeb, 0xa1,
+	0x7b, 0xd0, 0x2f, 0x6c, 0x47, 0x45, 0xf8, 0xbd, 0x8b, 0x5d, 0xb4, 0x27, 0x4c, 0x4f, 0x79, 0x9b,
+	0x17, 0xa5, 0x9c, 0x41, 0x94, 0x76, 0x6c, 0x57, 0x36, 0x88, 0xfe, 0x2b, 0x0b, 0x46, 0xc5, 0x6f,
+	0x4c, 0xde, 0x6d, 0x91, 0x30, 0x12, 0x7c, 0xf8, 0x47, 0xba, 0xef, 0x83, 0xa8, 0xc8, 0xbb, 0xf2,
+	0x11, 0x79, 0x65, 0x9a, 0xc0, 0x8e, 0x3d, 0x4a, 0xf4, 0x02, 0xfd, 0x86, 0x05, 0xc7, 0x1a, 0xce,
+	0x5d, 0xde, 0x22, 0x2f, 0xc3, 0x4e, 0xe4, 0xfa, 0xc2, 0x06, 0xe3, 0xb5, 0xee, 0xa6, 0x3f, 0x55,
+	0x9d, 0x77, 0x52, 0x2a, 0x5c, 0x8f, 0x65, 0xa1, 0x74, 0xec, 0x6a, 0x66, 0xbf, 0xa6, 0x37, 0x60,
+	0x40, 0xae, 0xb7, 0x87, 0x69, 0x18, 0xcf, 0xda, 0x11, 0x6b, 0xed, 0xa1, 0xb6, 0xf3, 0x59, 0x18,
+	0xd6, 0xd7, 0xd8, 0x43, 0x6d, 0xeb, 0x5d, 0x98, 0xcc, 0x58, 0x4b, 0x0f, 0xb5, 0xc9, 0x3b, 0x70,
+	0x2a, 0x77, 0x7d, 0x3c, 0x54, 0xc7, 0x86, 0xaf, 0x5b, 0xfa, 0x39, 0x78, 0x04, 0xda, 0x99, 0x79,
+	0x53, 0x3b, 0x73, 0xb6, 0xfd, 0xce, 0xc9, 0x51, 0xd1, 0xbc, 0xa3, 0x77, 0x9a, 0x9e, 0xea, 0xe8,
+	0x4d, 0xe8, 0xab, 0xd3, 0x12, 0x69, 0x81, 0x6c, 0x77, 0xde, 0x91, 0x31, 0x5f, 0xcc, 0xca, 0x43,
+	0x2c, 0x28, 0xd8, 0x5f, 0xb1, 0x20, 0xc3, 0x35, 0x83, 0xf2, 0x49, 0x2d, 0xb7, 0xc6, 0x86, 0xa4,
+	0x18, 0xf3, 0x49, 0x2a, 0x08, 0xcc, 0x19, 0x28, 0x6e, 0xba, 0x35, 0xe1, 0x59, 0xac, 0xc0, 0x57,
+	0x28, 0x78, 0xd3, 0xad, 0xa1, 0x25, 0x40, 0x61, 0xab, 0xd9, 0xac, 0x33, 0xb3, 0x25, 0xa7, 0x7e,
+	0x25, 0xf0, 0x5b, 0x4d, 0x6e, 0x6e, 0x5c, 0xe4, 0x42, 0xa2, 0x4a, 0x0a, 0x8a, 0x33, 0x6a, 0xd8,
+	0xbf, 0x6b, 0x41, 0xcf, 0x11, 0x4c, 0x13, 0x36, 0xa7, 0xe9, 0xb9, 0x5c, 0xd2, 0x22, 0x6b, 0xc3,
+	0x0c, 0x76, 0xee, 0x2c, 0xde, 0x8d, 0x88, 0x17, 0x32, 0x86, 0x23, 0x73, 0xd6, 0xf6, 0x2c, 0x98,
+	0xbc, 0xee, 0x3b, 0xb5, 0x39, 0xa7, 0xee, 0x78, 0x55, 0x12, 0x2c, 0x7b, 0x9b, 0x87, 0xb2, 0xed,
+	0x2f, 0x74, 0xb4, 0xed, 0xbf, 0x0c, 0x7d, 0x6e, 0x53, 0x0b, 0xfb, 0x7e, 0x8e, 0xce, 0xee, 0x72,
+	0x59, 0x44, 0x7c, 0x47, 0x46, 0xe3, 0xac, 0x14, 0x0b, 0x7c, 0xba, 0x2c, 0xb9, 0x51, 0x5d, 0x4f,
+	0xfe, 0xb2, 0xa4, 0x6f, 0x9d, 0x64, 0x38, 0x33, 0xc3, 0xfc, 0x7b, 0x0b, 0x8c, 0x26, 0x84, 0x07,
+	0x23, 0x86, 0x7e, 0x97, 0x7f, 0xa9, 0x58, 0x9b, 0x4f, 0x66, 0xbf, 0x41, 0x52, 0x03, 0xa3, 0xf9,
+	0xe6, 0xf1, 0x02, 0x2c, 0x09, 0xd9, 0x97, 0x21, 0x33, 0xfc, 0x4c, 0x67, 0xf9, 0x92, 0xfd, 0x09,
+	0x98, 0x60, 0x35, 0x0f, 0x29, 0xbb, 0xb1, 0x13, 0x52, 0xf1, 0x8c, 0x08, 0xbe, 0xf6, 0xff, 0x6d,
+	0x01, 0x5a, 0xf1, 0x6b, 0xee, 0xc6, 0xae, 0x20, 0xce, 0xbf, 0xff, 0x5d, 0x28, 0xf1, 0xc7, 0x71,
+	0x32, 0xca, 0xed, 0x7c, 0xdd, 0x09, 0x43, 0x4d, 0x22, 0xff, 0xa4, 0x68, 0xb7, 0xb4, 0xd6, 0x1e,
+	0x1d, 0x77, 0xa2, 0x87, 0xde, 0x4a, 0x04, 0x1d, 0xfc, 0x68, 0x2a, 0xe8, 0xe0, 0x93, 0x99, 0x76,
+	0x31, 0xe9, 0xde, 0xcb, 0x60, 0x84, 0xf6, 0x17, 0x2d, 0x18, 0x5b, 0x4d, 0x44, 0x6d, 0x3d, 0xcf,
+	0x8c, 0x04, 0x32, 0x34, 0x4d, 0x15, 0x56, 0x8a, 0x05, 0xf4, 0x81, 0x4b, 0x62, 0xff, 0xd5, 0x82,
+	0x38, 0xdc, 0xd5, 0x11, 0xb0, 0xdc, 0xf3, 0x06, 0xcb, 0x9d, 0xf9, 0x7c, 0x51, 0xdd, 0xc9, 0xe3,
+	0xb8, 0xd1, 0x35, 0x35, 0x27, 0x6d, 0x5e, 0x2e, 0x31, 0x19, 0xbe, 0xcf, 0x46, 0xcd, 0x89, 0x53,
+	0xb3, 0xf1, 0xcd, 0x02, 0x20, 0x85, 0xdb, 0x75, 0xa0, 0xca, 0x74, 0x8d, 0x07, 0x13, 0xa8, 0x72,
+	0x07, 0x10, 0x33, 0x73, 0x09, 0x1c, 0x2f, 0xe4, 0x64, 0x5d, 0x21, 0x7b, 0x3e, 0x9c, 0x0d, 0xcd,
+	0xb4, 0xf4, 0x5c, 0xbd, 0x9e, 0xa2, 0x86, 0x33, 0x5a, 0xd0, 0xcc, 0x97, 0x7a, 0xbb, 0x35, 0x5f,
+	0xea, 0xeb, 0xe0, 0x82, 0xfd, 0x35, 0x0b, 0x46, 0xd4, 0x30, 0xbd, 0x4f, 0x5c, 0x40, 0x54, 0x7f,
+	0x72, 0xee, 0x95, 0xb2, 0xd6, 0x65, 0xc6, 0x0c, 0x7c, 0x1f, 0x73, 0xa5, 0x77, 0xea, 0xee, 0x3d,
+	0xa2, 0xe2, 0x29, 0x97, 0x84, 0x6b, 0xbc, 0x28, 0x3d, 0xd8, 0x2b, 0x8d, 0xa8, 0x7f, 0x3c, 0x82,
+	0x6b, 0x5c, 0xc5, 0xfe, 0x25, 0xba, 0xd9, 0xcd, 0xa5, 0x88, 0x5e, 0x82, 0xde, 0xe6, 0x96, 0x13,
+	0x92, 0x84, 0xab, 0x5c, 0x6f, 0x99, 0x16, 0x1e, 0xec, 0x95, 0x46, 0x55, 0x05, 0x56, 0x82, 0x39,
+	0x76, 0xf7, 0xe1, 0x3f, 0xd3, 0x8b, 0xb3, 0x63, 0xf8, 0xcf, 0x7f, 0xb2, 0xa0, 0x67, 0x95, 0xde,
+	0x5e, 0x0f, 0xff, 0x08, 0x78, 0xdd, 0x38, 0x02, 0x4e, 0xe7, 0x65, 0x16, 0xca, 0xdd, 0xfd, 0x4b,
+	0x89, 0xdd, 0x7f, 0x36, 0x97, 0x42, 0xfb, 0x8d, 0xdf, 0x80, 0x21, 0x96, 0xaf, 0x48, 0xb8, 0x05,
+	0xbe, 0x60, 0x6c, 0xf8, 0x52, 0x62, 0xc3, 0x8f, 0x69, 0xa8, 0xda, 0x4e, 0x7f, 0x0a, 0xfa, 0x85,
+	0x9f, 0x59, 0x32, 0x22, 0x81, 0xc0, 0xc5, 0x12, 0x6e, 0xff, 0x7c, 0x11, 0x8c, 0xfc, 0x48, 0xe8,
+	0xf7, 0x2d, 0x98, 0x09, 0xb8, 0xfd, 0x79, 0x6d, 0xa1, 0x15, 0xb8, 0xde, 0x66, 0xa5, 0xba, 0x45,
+	0x6a, 0xad, 0xba, 0xeb, 0x6d, 0x2e, 0x6f, 0x7a, 0xbe, 0x2a, 0x5e, 0xbc, 0x4b, 0xaa, 0x2d, 0xa6,
+	0x1b, 0xee, 0x90, 0x8c, 0x49, 0xf9, 0x71, 0x3c, 0xbf, 0xbf, 0x57, 0x9a, 0xc1, 0x87, 0xa2, 0x8d,
+	0x0f, 0xd9, 0x17, 0xf4, 0xe7, 0x16, 0x5c, 0xe4, 0x79, 0x7a, 0xba, 0xef, 0x7f, 0x1b, 0x09, 0x47,
+	0x59, 0x92, 0x8a, 0x89, 0xac, 0x91, 0xa0, 0x31, 0xf7, 0xb2, 0x18, 0xd0, 0x8b, 0xe5, 0xc3, 0xb5,
+	0x85, 0x0f, 0xdb, 0x39, 0xfb, 0x7f, 0x2e, 0xc2, 0x88, 0x08, 0x13, 0x29, 0xee, 0x80, 0x97, 0x8c,
+	0x25, 0xf1, 0x68, 0x62, 0x49, 0x4c, 0x18, 0xc8, 0x0f, 0xe6, 0xf8, 0x0f, 0x61, 0x82, 0x1e, 0xce,
+	0x57, 0x89, 0x13, 0x44, 0xb7, 0x89, 0xc3, 0xad, 0x12, 0x8b, 0x87, 0x3e, 0xfd, 0x95, 0x78, 0xfc,
+	0x7a, 0x92, 0x18, 0x4e, 0xd3, 0xff, 0x5e, 0xba, 0x73, 0x3c, 0x18, 0x4f, 0x45, 0xfa, 0x7c, 0x1b,
+	0x06, 0x95, 0x93, 0x94, 0x38, 0x74, 0xda, 0x07, 0xcc, 0x4d, 0x52, 0xe0, 0x42, 0xcf, 0xd8, 0x41,
+	0x2f, 0x26, 0x67, 0xff, 0x66, 0xc1, 0x68, 0x90, 0x4f, 0xe2, 0x2a, 0x0c, 0x38, 0x21, 0x0b, 0xe2,
+	0x5d, 0x6b, 0x27, 0x97, 0x4e, 0x35, 0xc3, 0x1c, 0xd5, 0x66, 0x45, 0x4d, 0xac, 0x68, 0xa0, 0xab,
+	0xdc, 0xf6, 0x73, 0x87, 0xb4, 0x13, 0x4a, 0xa7, 0xa8, 0x81, 0xb4, 0x0e, 0xdd, 0x21, 0x58, 0xd4,
+	0x47, 0x9f, 0xe2, 0xc6, 0xb9, 0xd7, 0x3c, 0xff, 0x8e, 0x77, 0xc5, 0xf7, 0x65, 0x48, 0xa0, 0xee,
+	0x08, 0x4e, 0x48, 0x93, 0x5c, 0x55, 0x1d, 0x9b, 0xd4, 0xba, 0x0b, 0x9d, 0xfd, 0x39, 0x60, 0x79,
+	0x49, 0xcc, 0x98, 0x04, 0x21, 0x22, 0x30, 0x26, 0x62, 0x90, 0xca, 0x32, 0x31, 0x76, 0x99, 0xcf,
+	0x6f, 0xb3, 0x76, 0xac, 0xc7, 0xb9, 0x66, 0x92, 0xc0, 0x49, 0x9a, 0xf6, 0x16, 0x3f, 0x84, 0x97,
+	0x88, 0x13, 0xb5, 0x02, 0x12, 0xa2, 0x8f, 0xc3, 0x54, 0xfa, 0x65, 0x2c, 0xd4, 0x21, 0x16, 0xe3,
+	0x9e, 0x4f, 0xef, 0xef, 0x95, 0xa6, 0x2a, 0x39, 0x38, 0x38, 0xb7, 0xb6, 0xfd, 0x2b, 0x16, 0x30,
+	0x4f, 0xf0, 0x23, 0xe0, 0x7c, 0x3e, 0x66, 0x72, 0x3e, 0x53, 0x79, 0xd3, 0x99, 0xc3, 0xf4, 0xbc,
+	0xc8, 0xd7, 0x70, 0x39, 0xf0, 0xef, 0xee, 0x0a, 0xdb, 0xad, 0xce, 0xcf, 0x38, 0xfb, 0xcb, 0x16,
+	0xb0, 0x24, 0x3e, 0x98, 0xbf, 0xda, 0xa5, 0x82, 0xa3, 0xb3, 0x59, 0xc2, 0xc7, 0x61, 0x60, 0x43,
+	0x0c, 0x7f, 0x86, 0xd0, 0xc9, 0xe8, 0xb0, 0x49, 0x5b, 0x4e, 0x9a, 0xf0, 0xe8, 0x14, 0xff, 0xb0,
+	0xa2, 0x66, 0xff, 0xf7, 0x16, 0x4c, 0xe7, 0x57, 0x43, 0xeb, 0x70, 0x32, 0x20, 0xd5, 0x56, 0x10,
+	0xd2, 0x2d, 0x21, 0x1e, 0x40, 0xc2, 0x29, 0x8a, 0x4f, 0xf5, 0x23, 0xfb, 0x7b, 0xa5, 0x93, 0x38,
+	0x1b, 0x05, 0xe7, 0xd5, 0x45, 0xaf, 0xc0, 0x68, 0x2b, 0xe4, 0x9c, 0x1f, 0x63, 0xba, 0x42, 0x11,
+	0x29, 0x9a, 0xf9, 0x0d, 0xad, 0x1b, 0x10, 0x9c, 0xc0, 0xb4, 0x7f, 0x80, 0x2f, 0x47, 0x15, 0x2c,
+	0xba, 0x01, 0x13, 0x9e, 0xf6, 0x9f, 0xde, 0x80, 0xf2, 0xa9, 0xff, 0x78, 0xa7, 0x5b, 0x9f, 0x5d,
+	0x97, 0x9a, 0xaf, 0x7a, 0x82, 0x0c, 0x4e, 0x53, 0xb6, 0x7f, 0xc1, 0x82, 0x93, 0x3a, 0xa2, 0xe6,
+	0x0e, 0xd7, 0x49, 0x97, 0xb7, 0x00, 0x03, 0x7e, 0x93, 0x04, 0x4e, 0xe4, 0x07, 0xe2, 0x9a, 0xbb,
+	0x20, 0x57, 0xe8, 0x0d, 0x51, 0x7e, 0x20, 0x92, 0xd7, 0x48, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0xc8,
+	0x86, 0x3e, 0x26, 0x40, 0x0c, 0x85, 0xe3, 0x23, 0x3b, 0xb4, 0x98, 0x7d, 0x4a, 0x88, 0x05, 0xc4,
+	0xfe, 0x7b, 0x8b, 0xaf, 0x4f, 0xbd, 0xeb, 0xe8, 0x5d, 0x18, 0x6f, 0x38, 0x51, 0x75, 0x6b, 0xf1,
+	0x6e, 0x33, 0xe0, 0x2a, 0x5a, 0x39, 0x4e, 0xcf, 0x74, 0x1a, 0x27, 0xed, 0x23, 0x63, 0x03, 0xe9,
+	0x95, 0x04, 0x31, 0x9c, 0x22, 0x8f, 0x6e, 0xc3, 0x10, 0x2b, 0x63, 0x3e, 0xbd, 0x61, 0x3b, 0x5e,
+	0x26, 0xaf, 0x35, 0x65, 0xe2, 0xb3, 0x12, 0xd3, 0xc1, 0x3a, 0x51, 0xfb, 0xab, 0x45, 0x7e, 0x68,
+	0xb0, 0xb7, 0xc7, 0x53, 0xd0, 0xdf, 0xf4, 0x6b, 0xf3, 0xcb, 0x0b, 0x58, 0xcc, 0x82, 0xba, 0xf7,
+	0xca, 0xbc, 0x18, 0x4b, 0x38, 0xba, 0x00, 0x03, 0xe2, 0xa7, 0x54, 0xa9, 0xb3, 0x3d, 0x22, 0xf0,
+	0x42, 0xac, 0xa0, 0xe8, 0x79, 0x80, 0x66, 0xe0, 0xef, 0xb8, 0x35, 0x16, 0x89, 0xa9, 0x68, 0x5a,
+	0xe7, 0x95, 0x15, 0x04, 0x6b, 0x58, 0xe8, 0x55, 0x18, 0x69, 0x79, 0x21, 0xe7, 0x9f, 0xb4, 0x78,
+	0xf7, 0xca, 0x6e, 0x6c, 0x5d, 0x07, 0x62, 0x13, 0x17, 0xcd, 0x42, 0x5f, 0xe4, 0x30, 0x6b, 0xb3,
+	0xde, 0x7c, 0x23, 0xfa, 0x35, 0x8a, 0xa1, 0x67, 0x96, 0xa3, 0x15, 0xb0, 0xa8, 0x88, 0xde, 0x96,
+	0xee, 0xf5, 0xfc, 0x26, 0x12, 0xde, 0x2b, 0xdd, 0xdd, 0x5a, 0x9a, 0x73, 0xbd, 0xf0, 0x8a, 0x31,
+	0x68, 0xa1, 0x57, 0x00, 0xc8, 0xdd, 0x88, 0x04, 0x9e, 0x53, 0x57, 0x36, 0xa2, 0x8a, 0x91, 0x59,
+	0xf0, 0x57, 0xfd, 0x68, 0x3d, 0x24, 0x8b, 0x0a, 0x03, 0x6b, 0xd8, 0xf6, 0x8f, 0x0e, 0x01, 0xc4,
+	0x0f, 0x0d, 0x74, 0x0f, 0x06, 0xaa, 0x4e, 0xd3, 0xa9, 0xf2, 0xb4, 0xa9, 0xc5, 0x3c, 0xaf, 0xe7,
+	0xb8, 0xc6, 0xcc, 0xbc, 0x40, 0xe7, 0xca, 0x1b, 0x19, 0x32, 0x7c, 0x40, 0x16, 0x77, 0x54, 0xd8,
+	0xa8, 0xf6, 0xd0, 0x17, 0x2c, 0x18, 0x12, 0x91, 0x8e, 0xd8, 0x0c, 0x15, 0xf2, 0xf5, 0x6d, 0x5a,
+	0xfb, 0xb3, 0x71, 0x0d, 0xde, 0x85, 0x17, 0xe4, 0x0a, 0xd5, 0x20, 0x1d, 0x7b, 0xa1, 0x37, 0x8c,
+	0x3e, 0x2c, 0xdf, 0xb6, 0x45, 0x63, 0x28, 0xd5, 0xdb, 0x76, 0x90, 0x5d, 0x35, 0xfa, 0xb3, 0x76,
+	0xdd, 0x78, 0xd6, 0xf6, 0xe4, 0xfb, 0x0f, 0x1b, 0xfc, 0x76, 0xa7, 0x17, 0x2d, 0x2a, 0xeb, 0xb1,
+	0x44, 0x7a, 0xf3, 0x9d, 0x5e, 0xb5, 0x87, 0x5d, 0x87, 0x38, 0x22, 0x9f, 0x85, 0xb1, 0x9a, 0xc9,
+	0xb5, 0x88, 0x95, 0xf8, 0x64, 0x1e, 0xdd, 0x04, 0x93, 0x13, 0xf3, 0x29, 0x09, 0x00, 0x4e, 0x12,
+	0x46, 0x65, 0x1e, 0x5a, 0x66, 0xd9, 0xdb, 0xf0, 0x85, 0x07, 0x95, 0x9d, 0x3b, 0x97, 0xbb, 0x61,
+	0x44, 0x1a, 0x14, 0x33, 0x66, 0x12, 0x56, 0x45, 0x5d, 0xac, 0xa8, 0xa0, 0x37, 0xa1, 0x8f, 0x79,
+	0x3d, 0x86, 0x53, 0x03, 0xf9, 0x6a, 0x0d, 0x33, 0x12, 0x6a, 0xbc, 0x21, 0xd9, 0xdf, 0x10, 0x0b,
+	0x0a, 0xe8, 0xaa, 0xf4, 0x29, 0x0e, 0x97, 0xbd, 0xf5, 0x90, 0x30, 0x9f, 0xe2, 0xc1, 0xb9, 0xc7,
+	0x63, 0x77, 0x61, 0x5e, 0x9e, 0x99, 0x7f, 0xd6, 0xa8, 0x49, 0xd9, 0x3e, 0xf1, 0x5f, 0xa6, 0xb5,
+	0x15, 0x71, 0xdb, 0x32, 0xbb, 0x67, 0xa6, 0xbe, 0x8d, 0x87, 0xf3, 0xa6, 0x49, 0x02, 0x27, 0x69,
+	0x52, 0x16, 0x9a, 0xef, 0x7a, 0xe1, 0x83, 0xd5, 0xe9, 0xec, 0xe0, 0x92, 0x03, 0x76, 0x1b, 0xf1,
+	0x12, 0x2c, 0xea, 0x23, 0x17, 0xc6, 0x02, 0x83, 0xbd, 0x90, 0xe1, 0xd6, 0xce, 0x77, 0xc7, 0xc4,
+	0x68, 0x81, 0xfc, 0x4d, 0x32, 0x38, 0x49, 0x17, 0xbd, 0xa9, 0x31, 0x4a, 0x23, 0xed, 0x5f, 0xfe,
+	0x9d, 0x58, 0xa3, 0xe9, 0x6d, 0x18, 0x31, 0x0e, 0x9b, 0x87, 0xaa, 0x82, 0xf4, 0x60, 0x3c, 0x79,
+	0xb2, 0x3c, 0x54, 0xcd, 0xe3, 0xdf, 0xf6, 0xc0, 0xa8, 0xb9, 0x13, 0xd0, 0x45, 0x18, 0x14, 0x44,
+	0x54, 0x46, 0x2b, 0xb5, 0xb9, 0x57, 0x24, 0x00, 0xc7, 0x38, 0x2c, 0x91, 0x19, 0xab, 0xae, 0xf9,
+	0x0a, 0xc4, 0x89, 0xcc, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x01, 0x7b, 0xdb, 0xf7, 0x23, 0x75, 0x8f,
+	0xaa, 0xed, 0x32, 0xc7, 0x4a, 0xb1, 0x80, 0xd2, 0xfb, 0x73, 0x9b, 0x04, 0x1e, 0xa9, 0x9b, 0x29,
+	0x1d, 0xd4, 0xfd, 0x79, 0x4d, 0x07, 0x62, 0x13, 0x97, 0x72, 0x01, 0x7e, 0xc8, 0xf6, 0x9f, 0x78,
+	0x26, 0xc7, 0xbe, 0x17, 0x15, 0x1e, 0x45, 0x42, 0xc2, 0xd1, 0x27, 0xe0, 0xa4, 0x0a, 0x9f, 0x28,
+	0x56, 0x97, 0x6c, 0xb1, 0xcf, 0x90, 0x6a, 0x9d, 0x9c, 0xcf, 0x46, 0xc3, 0x79, 0xf5, 0xd1, 0xeb,
+	0x30, 0x2a, 0x9e, 0x52, 0x92, 0x62, 0xbf, 0x69, 0x48, 0x78, 0xcd, 0x80, 0xe2, 0x04, 0xb6, 0x4c,
+	0x4a, 0xc1, 0xde, 0x18, 0x92, 0xc2, 0x40, 0x3a, 0x29, 0x85, 0x0e, 0xc7, 0xa9, 0x1a, 0x68, 0x16,
+	0xc6, 0x38, 0xeb, 0xe8, 0x7a, 0x9b, 0x7c, 0x4e, 0x84, 0x67, 0xa7, 0xda, 0x54, 0x37, 0x4c, 0x30,
+	0x4e, 0xe2, 0xa3, 0xcb, 0x30, 0xec, 0x04, 0xd5, 0x2d, 0x37, 0x22, 0x55, 0xba, 0x33, 0x98, 0x2d,
+	0x9f, 0x66, 0x89, 0x39, 0xab, 0xc1, 0xb0, 0x81, 0x69, 0xdf, 0x83, 0xc9, 0x8c, 0xf0, 0x32, 0x74,
+	0xe1, 0x38, 0x4d, 0x57, 0x7e, 0x53, 0xc2, 0xdd, 0x61, 0xb6, 0xbc, 0x2c, 0xbf, 0x46, 0xc3, 0xa2,
+	0xab, 0x93, 0x85, 0xa1, 0xd1, 0x92, 0x6f, 0xab, 0xd5, 0xb9, 0x24, 0x01, 0x38, 0xc6, 0xb1, 0xff,
+	0xb9, 0x00, 0x63, 0x19, 0x0a, 0x3a, 0x96, 0x00, 0x3a, 0xf1, 0xd2, 0x8a, 0xf3, 0x3d, 0x9b, 0x39,
+	0x4e, 0x0a, 0x87, 0xc8, 0x71, 0x52, 0xec, 0x94, 0xe3, 0xa4, 0xe7, 0xbd, 0xe4, 0x38, 0x31, 0x47,
+	0xac, 0xb7, 0xab, 0x11, 0xcb, 0xc8, 0x8b, 0xd2, 0x77, 0xc8, 0xbc, 0x28, 0xc6, 0xa0, 0xf7, 0x77,
+	0x31, 0xe8, 0x3f, 0x5d, 0x80, 0xf1, 0xa4, 0x6e, 0xef, 0x08, 0xe4, 0xe3, 0x6f, 0x1a, 0xf2, 0xf1,
+	0x0b, 0xdd, 0x78, 0xe2, 0xe7, 0xca, 0xca, 0x71, 0x42, 0x56, 0xfe, 0x74, 0x57, 0xd4, 0xda, 0xcb,
+	0xcd, 0x7f, 0xb1, 0x00, 0xc7, 0x33, 0x55, 0x9e, 0x47, 0x30, 0x36, 0x37, 0x8c, 0xb1, 0x79, 0xae,
+	0xeb, 0x28, 0x05, 0xb9, 0x03, 0x74, 0x2b, 0x31, 0x40, 0x17, 0xbb, 0x27, 0xd9, 0x7e, 0x94, 0xbe,
+	0x55, 0x84, 0xb3, 0x99, 0xf5, 0x62, 0xf1, 0xf2, 0x92, 0x21, 0x5e, 0x7e, 0x3e, 0x21, 0x5e, 0xb6,
+	0xdb, 0xd7, 0x7e, 0x30, 0xf2, 0x66, 0xe1, 0xad, 0xcf, 0x62, 0x8e, 0xdc, 0xa7, 0xac, 0xd9, 0xf0,
+	0xd6, 0x57, 0x84, 0xb0, 0x49, 0xf7, 0x7b, 0x49, 0xc6, 0xfc, 0x67, 0x16, 0x9c, 0xca, 0x9c, 0x9b,
+	0x23, 0x90, 0xf4, 0xad, 0x9a, 0x92, 0xbe, 0xa7, 0xba, 0x5e, 0xad, 0x39, 0xa2, 0xbf, 0x2f, 0xf6,
+	0xe5, 0x7c, 0x0b, 0x13, 0x40, 0xdc, 0x80, 0x21, 0xa7, 0x5a, 0x25, 0x61, 0xb8, 0xe2, 0xd7, 0x54,
+	0x3a, 0x84, 0xe7, 0xd8, 0xf3, 0x30, 0x2e, 0x3e, 0xd8, 0x2b, 0x4d, 0x27, 0x49, 0xc4, 0x60, 0xac,
+	0x53, 0x40, 0x9f, 0x82, 0x81, 0x50, 0x66, 0xb2, 0xec, 0xb9, 0xff, 0x4c, 0x96, 0x8c, 0xc9, 0x55,
+	0x02, 0x16, 0x45, 0x12, 0x7d, 0xbf, 0x1e, 0xfd, 0xa9, 0x8d, 0x68, 0x91, 0x77, 0xf2, 0x3e, 0x62,
+	0x40, 0x3d, 0x0f, 0xb0, 0xa3, 0x5e, 0x32, 0x49, 0xe1, 0x89, 0xf6, 0xc6, 0xd1, 0xb0, 0xd0, 0x1b,
+	0x30, 0x1e, 0xf2, 0xc0, 0xa7, 0xb1, 0x91, 0x0a, 0x5f, 0x8b, 0x2c, 0x76, 0x5c, 0x25, 0x01, 0xc3,
+	0x29, 0x6c, 0xb4, 0x24, 0x5b, 0x65, 0xe6, 0x48, 0x7c, 0x79, 0x9e, 0x8f, 0x5b, 0x14, 0x26, 0x49,
+	0xc7, 0x92, 0x93, 0xc0, 0x86, 0x5f, 0xab, 0x89, 0x3e, 0x05, 0x40, 0x17, 0x91, 0x10, 0xa2, 0xf4,
+	0xe7, 0x1f, 0xa1, 0xf4, 0x6c, 0xa9, 0x65, 0x7a, 0x32, 0x30, 0x37, 0xfb, 0x05, 0x45, 0x04, 0x6b,
+	0x04, 0x91, 0x03, 0x23, 0xf1, 0xbf, 0x38, 0x47, 0xfb, 0x85, 0xdc, 0x16, 0x92, 0xc4, 0x99, 0x82,
+	0x61, 0x41, 0x27, 0x81, 0x4d, 0x8a, 0xe8, 0x93, 0x70, 0x6a, 0x27, 0xd7, 0xf2, 0x87, 0x73, 0x82,
+	0x2c, 0xe9, 0x7a, 0xbe, 0xbd, 0x4f, 0x7e, 0x7d, 0xfb, 0x7f, 0x07, 0x78, 0xa4, 0xcd, 0x49, 0x8f,
+	0x66, 0x4d, 0xad, 0xfd, 0x33, 0x49, 0xc9, 0xc6, 0x74, 0x66, 0x65, 0x43, 0xd4, 0x91, 0xd8, 0x50,
+	0x85, 0xf7, 0xbc, 0xa1, 0x7e, 0xc2, 0xd2, 0x64, 0x4e, 0xdc, 0xa6, 0xfb, 0x63, 0x87, 0xbc, 0xc1,
+	0x1e, 0xa0, 0x10, 0x6a, 0x23, 0x43, 0x92, 0xf3, 0x7c, 0xd7, 0xdd, 0xe9, 0x5e, 0xb4, 0xf3, 0xf5,
+	0xec, 0x80, 0xef, 0x5c, 0xc8, 0x73, 0xe5, 0xb0, 0xdf, 0x7f, 0x54, 0xc1, 0xdf, 0xbf, 0x69, 0xc1,
+	0xa9, 0x54, 0x31, 0xef, 0x03, 0x09, 0x45, 0xb4, 0xbb, 0xd5, 0xf7, 0xdc, 0x79, 0x49, 0x90, 0x7f,
+	0xc3, 0x55, 0xf1, 0x0d, 0xa7, 0x72, 0xf1, 0x92, 0x5d, 0xff, 0xd2, 0xdf, 0x94, 0x26, 0x59, 0x03,
+	0x26, 0x22, 0xce, 0xef, 0x3a, 0x6a, 0xc2, 0xb9, 0x6a, 0x2b, 0x08, 0xe2, 0xc5, 0x9a, 0xb1, 0x39,
+	0xf9, 0x5b, 0xef, 0xf1, 0xfd, 0xbd, 0xd2, 0xb9, 0xf9, 0x0e, 0xb8, 0xb8, 0x23, 0x35, 0xe4, 0x01,
+	0x6a, 0xa4, 0xec, 0xeb, 0xd8, 0x01, 0x90, 0x23, 0x87, 0x49, 0x5b, 0xe3, 0x71, 0x4b, 0xd9, 0x0c,
+	0x2b, 0xbd, 0x0c, 0xca, 0x47, 0x2b, 0x3d, 0xf9, 0xce, 0xc4, 0xa5, 0x9f, 0xbe, 0x0e, 0x67, 0xdb,
+	0x2f, 0xa6, 0x43, 0x85, 0x72, 0xf8, 0x4b, 0x0b, 0xce, 0xb4, 0x8d, 0x17, 0xf6, 0x5d, 0xf8, 0x58,
+	0xb0, 0x3f, 0x6f, 0xc1, 0xa3, 0x99, 0x35, 0x92, 0x4e, 0x78, 0x55, 0x5a, 0xa8, 0x99, 0xa3, 0xc6,
+	0x91, 0x73, 0x24, 0x00, 0xc7, 0x38, 0x86, 0xc5, 0x66, 0xa1, 0xa3, 0xc5, 0xe6, 0x1f, 0x59, 0x90,
+	0xba, 0xea, 0x8f, 0x80, 0xf3, 0x5c, 0x36, 0x39, 0xcf, 0xc7, 0xbb, 0x19, 0xcd, 0x1c, 0xa6, 0xf3,
+	0x1f, 0xc7, 0xe0, 0x44, 0x8e, 0x27, 0xf6, 0x0e, 0x4c, 0x6c, 0x56, 0x89, 0x19, 0x7a, 0xa3, 0x5d,
+	0x48, 0xba, 0xb6, 0x71, 0x3a, 0xe6, 0x8e, 0xef, 0xef, 0x95, 0x26, 0x52, 0x28, 0x38, 0xdd, 0x04,
+	0xfa, 0xbc, 0x05, 0xc7, 0x9c, 0x3b, 0xe1, 0x22, 0x7d, 0x41, 0xb8, 0xd5, 0xb9, 0xba, 0x5f, 0xdd,
+	0xa6, 0x8c, 0x99, 0xdc, 0x56, 0x2f, 0x66, 0x0a, 0xa3, 0x6f, 0x55, 0x52, 0xf8, 0x46, 0xf3, 0x53,
+	0xfb, 0x7b, 0xa5, 0x63, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0x84, 0x45, 0xc6, 0x2f, 0x27, 0xda, 0x6a,
+	0x17, 0x1c, 0x26, 0xcb, 0x65, 0x9e, 0xb3, 0xc4, 0x12, 0x82, 0x15, 0x1d, 0xf4, 0x19, 0x18, 0xdc,
+	0x94, 0x71, 0x20, 0x32, 0x58, 0xee, 0x78, 0x20, 0xdb, 0x47, 0xc7, 0xe0, 0x26, 0x30, 0x0a, 0x09,
+	0xc7, 0x44, 0xd1, 0xeb, 0x50, 0xf4, 0x36, 0x42, 0x11, 0xa2, 0x2e, 0xdb, 0x12, 0xd7, 0xb4, 0x75,
+	0xe6, 0x21, 0x98, 0x56, 0x97, 0x2a, 0x98, 0x56, 0x44, 0x57, 0xa1, 0x18, 0xdc, 0xae, 0x09, 0x4d,
+	0x4a, 0xe6, 0x26, 0xc5, 0x73, 0x0b, 0x39, 0xbd, 0x62, 0x94, 0xf0, 0xdc, 0x02, 0xa6, 0x24, 0x50,
+	0x19, 0x7a, 0x99, 0xfb, 0xb2, 0x60, 0x6d, 0x33, 0x9f, 0xf2, 0x6d, 0xc2, 0x00, 0x70, 0x8f, 0x44,
+	0x86, 0x80, 0x39, 0x21, 0xb4, 0x06, 0x7d, 0x55, 0xd7, 0xab, 0x91, 0x40, 0xf0, 0xb2, 0x1f, 0xce,
+	0xd4, 0x99, 0x30, 0x8c, 0x1c, 0x9a, 0x5c, 0x85, 0xc0, 0x30, 0xb0, 0xa0, 0xc5, 0xa8, 0x92, 0xe6,
+	0xd6, 0x86, 0xbc, 0xb1, 0xb2, 0xa9, 0x92, 0xe6, 0xd6, 0x52, 0xa5, 0x2d, 0x55, 0x86, 0x81, 0x05,
+	0x2d, 0xf4, 0x0a, 0x14, 0x36, 0xaa, 0xc2, 0x35, 0x39, 0x53, 0x79, 0x62, 0x46, 0xd1, 0x9a, 0xeb,
+	0xdb, 0xdf, 0x2b, 0x15, 0x96, 0xe6, 0x71, 0x61, 0xa3, 0x8a, 0x56, 0xa1, 0x7f, 0x83, 0xc7, 0xdd,
+	0x11, 0xfa, 0x91, 0x27, 0xb3, 0x43, 0x02, 0xa5, 0x42, 0xf3, 0x70, 0xef, 0x52, 0x01, 0xc0, 0x92,
+	0x08, 0x4b, 0x40, 0xa5, 0xe2, 0x07, 0x89, 0xf0, 0xa5, 0x33, 0x87, 0x8b, 0xf9, 0xc4, 0x9f, 0x1a,
+	0x71, 0x14, 0x22, 0xac, 0x51, 0xa4, 0xab, 0xda, 0xb9, 0xd7, 0x0a, 0x58, 0x6e, 0x0b, 0xa1, 0x1a,
+	0xc9, 0x5c, 0xd5, 0xb3, 0x12, 0xa9, 0xdd, 0xaa, 0x56, 0x48, 0x38, 0x26, 0x8a, 0xb6, 0x61, 0x64,
+	0x27, 0x6c, 0x6e, 0x11, 0xb9, 0xa5, 0x59, 0xd8, 0xbb, 0x1c, 0x6e, 0xf6, 0xa6, 0x40, 0x74, 0x83,
+	0xa8, 0xe5, 0xd4, 0x53, 0xa7, 0x10, 0x7b, 0xd6, 0xdc, 0xd4, 0x89, 0x61, 0x93, 0x36, 0x1d, 0xfe,
+	0x77, 0x5b, 0xfe, 0xed, 0xdd, 0x88, 0x88, 0xa8, 0xa3, 0x99, 0xc3, 0xff, 0x16, 0x47, 0x49, 0x0f,
+	0xbf, 0x00, 0x60, 0x49, 0x04, 0xdd, 0x14, 0xc3, 0xc3, 0x4e, 0xcf, 0xf1, 0xfc, 0x90, 0xe6, 0xb3,
+	0x12, 0x29, 0x67, 0x50, 0xd8, 0x69, 0x19, 0x93, 0x62, 0xa7, 0x64, 0x73, 0xcb, 0x8f, 0x7c, 0x2f,
+	0x71, 0x42, 0x4f, 0xe4, 0x9f, 0x92, 0xe5, 0x0c, 0xfc, 0xf4, 0x29, 0x99, 0x85, 0x85, 0x33, 0xdb,
+	0x42, 0x35, 0x18, 0x6d, 0xfa, 0x41, 0x74, 0xc7, 0x0f, 0xe4, 0xfa, 0x42, 0x6d, 0x04, 0xa5, 0x06,
+	0xa6, 0x68, 0x91, 0x19, 0xe6, 0x98, 0x10, 0x9c, 0xa0, 0x89, 0x3e, 0x0e, 0xfd, 0x61, 0xd5, 0xa9,
+	0x93, 0xe5, 0x1b, 0x53, 0x93, 0xf9, 0xd7, 0x4f, 0x85, 0xa3, 0xe4, 0xac, 0x2e, 0x1e, 0x36, 0x89,
+	0xa3, 0x60, 0x49, 0x0e, 0x2d, 0x41, 0x2f, 0x4b, 0xec, 0xcc, 0x42, 0xe4, 0xe6, 0x44, 0x66, 0x4f,
+	0xb9, 0xd5, 0xf0, 0xb3, 0x89, 0x15, 0x63, 0x5e, 0x9d, 0xee, 0x01, 0x21, 0x29, 0xf0, 0xc3, 0xa9,
+	0xe3, 0xf9, 0x7b, 0x40, 0x08, 0x18, 0x6e, 0x54, 0xda, 0xed, 0x01, 0x85, 0x84, 0x63, 0xa2, 0xf4,
+	0x64, 0xa6, 0xa7, 0xe9, 0x89, 0x36, 0x26, 0x93, 0xb9, 0x67, 0x29, 0x3b, 0x99, 0xe9, 0x49, 0x4a,
+	0x49, 0xd8, 0x7f, 0x30, 0x90, 0xe6, 0x59, 0x98, 0x84, 0xe9, 0x3f, 0xb7, 0x52, 0x36, 0x13, 0x1f,
+	0xe9, 0x56, 0xe0, 0xfd, 0x00, 0x1f, 0xae, 0x9f, 0xb7, 0xe0, 0x44, 0x33, 0xf3, 0x43, 0x04, 0x03,
+	0xd0, 0x9d, 0xdc, 0x9c, 0x7f, 0xba, 0x0a, 0xa7, 0x9c, 0x0d, 0xc7, 0x39, 0x2d, 0x25, 0x85, 0x03,
+	0xc5, 0xf7, 0x2c, 0x1c, 0x58, 0x81, 0x81, 0x2a, 0x7f, 0xc9, 0xc9, 0x34, 0x00, 0x5d, 0x05, 0x03,
+	0x65, 0xac, 0x84, 0x78, 0x02, 0x6e, 0x60, 0x45, 0x02, 0xfd, 0xa4, 0x05, 0x67, 0x92, 0x5d, 0xc7,
+	0x84, 0x81, 0x85, 0xc1, 0x24, 0x17, 0x6b, 0x2d, 0x89, 0xef, 0x4f, 0xf1, 0xff, 0x06, 0xf2, 0x41,
+	0x27, 0x04, 0xdc, 0xbe, 0x31, 0xb4, 0x90, 0x21, 0x57, 0xeb, 0x33, 0x35, 0x8a, 0x5d, 0xc8, 0xd6,
+	0x5e, 0x84, 0xe1, 0x86, 0xdf, 0xf2, 0x22, 0x61, 0xf7, 0x28, 0x8c, 0xa7, 0x98, 0xd1, 0xd0, 0x8a,
+	0x56, 0x8e, 0x0d, 0xac, 0x84, 0x44, 0x6e, 0xe0, 0xbe, 0x25, 0x72, 0xef, 0xc0, 0xb0, 0xa7, 0xb9,
+	0x04, 0xb4, 0x7b, 0xc1, 0x0a, 0xe9, 0xa2, 0x86, 0xcd, 0x7b, 0xa9, 0x97, 0x60, 0x83, 0x5a, 0x7b,
+	0x69, 0x19, 0xbc, 0x37, 0x69, 0xd9, 0x91, 0x3e, 0x89, 0xed, 0x5f, 0x2f, 0x64, 0xbc, 0x18, 0xb8,
+	0x54, 0xee, 0x35, 0x53, 0x2a, 0x77, 0x3e, 0x29, 0x95, 0x4b, 0xa9, 0xaa, 0x0c, 0x81, 0x5c, 0xf7,
+	0x19, 0x25, 0xbb, 0x0e, 0xf0, 0xfc, 0xc3, 0x16, 0x9c, 0x64, 0xba, 0x0f, 0xda, 0xc0, 0x7b, 0xd6,
+	0x77, 0x30, 0x93, 0xd4, 0xeb, 0xd9, 0xe4, 0x70, 0x5e, 0x3b, 0x76, 0x1d, 0xce, 0x75, 0xba, 0x77,
+	0x99, 0x85, 0x6f, 0x4d, 0x19, 0x47, 0xc4, 0x16, 0xbe, 0xb5, 0xe5, 0x05, 0xcc, 0x20, 0xdd, 0x86,
+	0x2f, 0xb4, 0xff, 0x7f, 0x0b, 0x8a, 0x65, 0xbf, 0x76, 0x04, 0x2f, 0xfa, 0x8f, 0x19, 0x2f, 0xfa,
+	0x47, 0xb2, 0x6f, 0xfc, 0x5a, 0xae, 0xb2, 0x6f, 0x31, 0xa1, 0xec, 0x3b, 0x93, 0x47, 0xa0, 0xbd,
+	0x6a, 0xef, 0x97, 0x8a, 0x30, 0x54, 0xf6, 0x6b, 0x6a, 0x9f, 0xfd, 0xaf, 0xf7, 0xe3, 0xc8, 0x93,
+	0x9b, 0x7d, 0x4a, 0xa3, 0xcc, 0x2c, 0x7a, 0x65, 0xdc, 0x89, 0xef, 0x32, 0x7f, 0x9e, 0x5b, 0xc4,
+	0xdd, 0xdc, 0x8a, 0x48, 0x2d, 0xf9, 0x39, 0x47, 0xe7, 0xcf, 0xf3, 0xed, 0x22, 0x8c, 0x25, 0x5a,
+	0x47, 0x75, 0x18, 0xa9, 0xeb, 0xaa, 0x24, 0xb1, 0x4e, 0xef, 0x4b, 0x0b, 0x25, 0xfc, 0x21, 0xb4,
+	0x22, 0x6c, 0x12, 0x47, 0x33, 0x00, 0x9e, 0x6e, 0x15, 0xae, 0x02, 0x15, 0x6b, 0x16, 0xe1, 0x1a,
+	0x06, 0x7a, 0x09, 0x86, 0x22, 0xbf, 0xe9, 0xd7, 0xfd, 0xcd, 0xdd, 0x6b, 0x44, 0x46, 0xb6, 0x54,
+	0x46, 0xc3, 0x6b, 0x31, 0x08, 0xeb, 0x78, 0xe8, 0x2e, 0x4c, 0x28, 0x22, 0x95, 0x07, 0xa0, 0x5e,
+	0x63, 0x62, 0x93, 0xd5, 0x24, 0x45, 0x9c, 0x6e, 0x04, 0xbd, 0x02, 0xa3, 0xcc, 0x7a, 0x99, 0xd5,
+	0xbf, 0x46, 0x76, 0x65, 0xc4, 0x63, 0xc6, 0x61, 0xaf, 0x18, 0x10, 0x9c, 0xc0, 0x44, 0xf3, 0x30,
+	0xd1, 0x70, 0xc3, 0x44, 0xf5, 0x3e, 0x56, 0x9d, 0x75, 0x60, 0x25, 0x09, 0xc4, 0x69, 0x7c, 0xfb,
+	0x57, 0xc5, 0x1c, 0x7b, 0x91, 0xfb, 0xc1, 0x76, 0x7c, 0x7f, 0x6f, 0xc7, 0x6f, 0x59, 0x30, 0x4e,
+	0x5b, 0x67, 0x26, 0x99, 0x92, 0x91, 0x52, 0x39, 0x31, 0xac, 0x36, 0x39, 0x31, 0xce, 0xd3, 0x63,
+	0xbb, 0xe6, 0xb7, 0x22, 0x21, 0x1d, 0xd5, 0xce, 0x65, 0x5a, 0x8a, 0x05, 0x54, 0xe0, 0x91, 0x20,
+	0x10, 0x7e, 0xef, 0x3a, 0x1e, 0x09, 0x02, 0x2c, 0xa0, 0x32, 0x65, 0x46, 0x4f, 0x76, 0xca, 0x0c,
+	0x1e, 0xf9, 0x5c, 0x58, 0xc1, 0x09, 0x96, 0x56, 0x8b, 0x7c, 0x2e, 0xcd, 0xe3, 0x62, 0x1c, 0xfb,
+	0xeb, 0x45, 0x18, 0x2e, 0xfb, 0xb5, 0xd8, 0xb0, 0xe3, 0x45, 0xc3, 0xb0, 0xe3, 0x5c, 0xc2, 0xb0,
+	0x63, 0x5c, 0xc7, 0xfd, 0xc0, 0x8c, 0xe3, 0x3b, 0x65, 0xc6, 0xf1, 0x87, 0x16, 0x9b, 0xb5, 0x85,
+	0xd5, 0x0a, 0xb7, 0xf0, 0x45, 0x97, 0x60, 0x88, 0x9d, 0x70, 0x2c, 0xd0, 0x82, 0xb4, 0x76, 0x60,
+	0x29, 0x2c, 0x57, 0xe3, 0x62, 0xac, 0xe3, 0xa0, 0x0b, 0x30, 0x10, 0x12, 0x27, 0xa8, 0x6e, 0xa9,
+	0xe3, 0x5d, 0x98, 0x26, 0xf0, 0x32, 0xac, 0xa0, 0xe8, 0xad, 0x38, 0xe8, 0x76, 0x31, 0xdf, 0x5c,
+	0x58, 0xef, 0x0f, 0xdf, 0x22, 0xf9, 0x91, 0xb6, 0xed, 0x5b, 0x80, 0xd2, 0xf8, 0x5d, 0xf8, 0x5f,
+	0x95, 0xcc, 0xb0, 0xb0, 0x83, 0xa9, 0x90, 0xb0, 0xff, 0x62, 0xc1, 0x68, 0xd9, 0xaf, 0xd1, 0xad,
+	0xfb, 0xbd, 0xb4, 0x4f, 0xf5, 0x8c, 0x03, 0x7d, 0x6d, 0x32, 0x0e, 0x3c, 0x06, 0xbd, 0x65, 0xbf,
+	0xd6, 0x21, 0x74, 0xed, 0x7f, 0x63, 0x41, 0x7f, 0xd9, 0xaf, 0x1d, 0x81, 0xe2, 0xe5, 0x35, 0x53,
+	0xf1, 0x72, 0x32, 0x67, 0xdd, 0xe4, 0xe8, 0x5a, 0xfe, 0xa4, 0x07, 0x46, 0x68, 0x3f, 0xfd, 0x4d,
+	0x39, 0x95, 0xc6, 0xb0, 0x59, 0x5d, 0x0c, 0x1b, 0x7d, 0x06, 0xf8, 0xf5, 0xba, 0x7f, 0x27, 0x39,
+	0xad, 0x4b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x16, 0x06, 0x9a, 0x01, 0xd9, 0x71, 0x7d, 0xc1, 0x5f,
+	0x6b, 0x6a, 0xac, 0xb2, 0x28, 0xc7, 0x0a, 0x83, 0x3e, 0xbc, 0x43, 0xd7, 0xa3, 0xbc, 0x44, 0xd5,
+	0xf7, 0x6a, 0x5c, 0x37, 0x51, 0x14, 0x69, 0xb1, 0xb4, 0x72, 0x6c, 0x60, 0xa1, 0x5b, 0x30, 0xc8,
+	0xfe, 0xb3, 0x63, 0xa7, 0xf7, 0xd0, 0xc7, 0x8e, 0x48, 0x14, 0x2c, 0x08, 0xe0, 0x98, 0x16, 0x7a,
+	0x1e, 0x20, 0x92, 0xa9, 0x65, 0x42, 0x11, 0xc2, 0x54, 0xbd, 0x45, 0x54, 0xd2, 0x99, 0x10, 0x6b,
+	0x58, 0xe8, 0x19, 0x18, 0x8c, 0x1c, 0xb7, 0x7e, 0xdd, 0xf5, 0x98, 0xfe, 0x9e, 0xf6, 0x5f, 0xe4,
+	0xeb, 0x15, 0x85, 0x38, 0x86, 0x53, 0x5e, 0x90, 0xc5, 0x84, 0x9a, 0xdb, 0x8d, 0x44, 0x6a, 0xba,
+	0x22, 0xe7, 0x05, 0xaf, 0xab, 0x52, 0xac, 0x61, 0xa0, 0x2d, 0x38, 0xed, 0x7a, 0x2c, 0x85, 0x14,
+	0xa9, 0x6c, 0xbb, 0xcd, 0xb5, 0xeb, 0x95, 0x9b, 0x24, 0x70, 0x37, 0x76, 0xe7, 0x9c, 0xea, 0x36,
+	0xf1, 0x64, 0x42, 0xfc, 0xc7, 0x45, 0x17, 0x4f, 0x2f, 0xb7, 0xc1, 0xc5, 0x6d, 0x29, 0x21, 0x9b,
+	0x6e, 0xc7, 0x80, 0x38, 0x0d, 0x21, 0x13, 0xe0, 0xe9, 0x67, 0x58, 0x09, 0x16, 0x10, 0xfb, 0x05,
+	0xb6, 0x27, 0x6e, 0x54, 0xd0, 0xd3, 0xc6, 0xf1, 0x72, 0x42, 0x3f, 0x5e, 0x0e, 0xf6, 0x4a, 0x7d,
+	0x37, 0x2a, 0x5a, 0x7c, 0xa0, 0xcb, 0x70, 0xbc, 0xec, 0xd7, 0xca, 0x7e, 0x10, 0x2d, 0xf9, 0xc1,
+	0x1d, 0x27, 0xa8, 0xc9, 0x25, 0x58, 0x92, 0x11, 0x92, 0xe8, 0x19, 0xdb, 0xcb, 0x4f, 0x20, 0x23,
+	0xfa, 0xd1, 0x0b, 0x8c, 0xab, 0x3b, 0xa4, 0x43, 0x6a, 0x95, 0xf1, 0x17, 0x2a, 0x51, 0xdb, 0x15,
+	0x27, 0x22, 0xe8, 0x06, 0x8c, 0x54, 0xf5, 0xab, 0x56, 0x54, 0x7f, 0x4a, 0x5e, 0x76, 0xc6, 0x3d,
+	0x9c, 0x79, 0x37, 0x9b, 0xf5, 0xed, 0x6f, 0x5a, 0xa2, 0x15, 0x2e, 0xad, 0xe0, 0x76, 0xaf, 0x9d,
+	0xcf, 0xdc, 0x79, 0x98, 0x08, 0xf4, 0x2a, 0x9a, 0xfd, 0xd8, 0x71, 0x9e, 0xf9, 0x26, 0x01, 0xc4,
+	0x69, 0x7c, 0xf4, 0x49, 0x38, 0x65, 0x14, 0x4a, 0x55, 0xba, 0x96, 0x7f, 0x9a, 0xc9, 0x73, 0x70,
+	0x1e, 0x12, 0xce, 0xaf, 0x6f, 0xff, 0x20, 0x9c, 0x48, 0x7e, 0x97, 0x90, 0xb0, 0xdc, 0xe7, 0xd7,
+	0x15, 0x0e, 0xf7, 0x75, 0xf6, 0x4b, 0x30, 0x41, 0x9f, 0xde, 0x8a, 0x8d, 0x64, 0xf3, 0xd7, 0x39,
+	0x08, 0xd5, 0x6f, 0x0e, 0xb0, 0x6b, 0x30, 0x91, 0x7d, 0x0d, 0x7d, 0x1a, 0x46, 0x43, 0xc2, 0x22,
+	0xaf, 0x49, 0xc9, 0x5e, 0x1b, 0x6f, 0xf2, 0xca, 0xa2, 0x8e, 0xc9, 0x5f, 0x2f, 0x66, 0x19, 0x4e,
+	0x50, 0x43, 0x0d, 0x18, 0xbd, 0xe3, 0x7a, 0x35, 0xff, 0x4e, 0x28, 0xe9, 0x0f, 0xe4, 0xab, 0x09,
+	0x6e, 0x71, 0xcc, 0x44, 0x1f, 0x8d, 0xe6, 0x6e, 0x19, 0xc4, 0x70, 0x82, 0x38, 0x3d, 0x6a, 0x82,
+	0x96, 0x37, 0x1b, 0xae, 0x87, 0x24, 0x10, 0x71, 0xe1, 0xd8, 0x51, 0x83, 0x65, 0x21, 0x8e, 0xe1,
+	0xf4, 0xa8, 0x61, 0x7f, 0x98, 0x3b, 0x3a, 0x3b, 0xcb, 0xc4, 0x51, 0x83, 0x55, 0x29, 0xd6, 0x30,
+	0xe8, 0x51, 0xcc, 0xfe, 0xad, 0xfa, 0x1e, 0xf6, 0xfd, 0x48, 0x1e, 0xde, 0x2c, 0x55, 0xa5, 0x56,
+	0x8e, 0x0d, 0xac, 0x9c, 0x28, 0x74, 0x3d, 0x87, 0x8d, 0x42, 0x87, 0xa2, 0x36, 0x1e, 0xf8, 0x3c,
+	0x1a, 0xf2, 0xe5, 0x76, 0x1e, 0xf8, 0x07, 0xf7, 0xe5, 0x9d, 0x4f, 0x79, 0x81, 0x0d, 0x31, 0x40,
+	0xbd, 0x3c, 0xcc, 0x1e, 0x53, 0x64, 0x56, 0xf8, 0xe8, 0x48, 0x18, 0x5a, 0x84, 0xfe, 0x70, 0x37,
+	0xac, 0x46, 0xf5, 0xb0, 0x5d, 0x3a, 0xd2, 0x0a, 0x43, 0xd1, 0xb2, 0x61, 0xf3, 0x2a, 0x58, 0xd6,
+	0x45, 0x55, 0x98, 0x14, 0x14, 0xe7, 0xb7, 0x1c, 0x4f, 0x25, 0x49, 0xe4, 0x16, 0x8b, 0x97, 0xf6,
+	0xf7, 0x4a, 0x93, 0xa2, 0x65, 0x1d, 0x7c, 0xb0, 0x57, 0xa2, 0x5b, 0x32, 0x03, 0x82, 0xb3, 0xa8,
+	0xf1, 0x25, 0x5f, 0xad, 0xfa, 0x8d, 0x66, 0x39, 0xf0, 0x37, 0xdc, 0x3a, 0x69, 0xa7, 0x0c, 0xae,
+	0x18, 0x98, 0x62, 0xc9, 0x1b, 0x65, 0x38, 0x41, 0x0d, 0xdd, 0x86, 0x31, 0xa7, 0xd9, 0x9c, 0x0d,
+	0x1a, 0x7e, 0x20, 0x1b, 0x18, 0xca, 0xd7, 0x2a, 0xcc, 0x9a, 0xa8, 0x3c, 0x47, 0x62, 0xa2, 0x10,
+	0x27, 0x09, 0xd2, 0x81, 0x12, 0x1b, 0xcd, 0x18, 0xa8, 0x91, 0x78, 0xa0, 0xc4, 0xbe, 0xcc, 0x18,
+	0xa8, 0x0c, 0x08, 0xce, 0xa2, 0x66, 0xff, 0x00, 0x63, 0xfc, 0x2b, 0xee, 0xa6, 0xc7, 0x9c, 0xe3,
+	0x50, 0x03, 0x46, 0x9a, 0xec, 0xd8, 0x17, 0xf9, 0xcb, 0xc4, 0x51, 0xf1, 0x62, 0x97, 0xc2, 0xcb,
+	0x3b, 0x2c, 0x03, 0xab, 0x61, 0xc4, 0x5a, 0xd6, 0xc9, 0x61, 0x93, 0xba, 0xfd, 0x8b, 0xd3, 0x8c,
+	0x75, 0xac, 0x70, 0x89, 0x64, 0xbf, 0x70, 0x55, 0x14, 0x32, 0x88, 0xe9, 0x7c, 0xd9, 0x7f, 0xbc,
+	0xbe, 0x84, 0xbb, 0x23, 0x96, 0x75, 0xd1, 0xa7, 0x60, 0x94, 0x3e, 0xe9, 0x15, 0xfb, 0x16, 0x4e,
+	0x1d, 0xcb, 0x8f, 0x81, 0xa5, 0xb0, 0xf4, 0xdc, 0x86, 0x7a, 0x65, 0x9c, 0x20, 0x86, 0xde, 0x62,
+	0x76, 0x9d, 0x92, 0x74, 0xa1, 0x1b, 0xd2, 0xba, 0x09, 0xa7, 0x24, 0xab, 0x11, 0x41, 0x2d, 0x98,
+	0x4c, 0x67, 0x70, 0x0e, 0xa7, 0xec, 0xfc, 0xb7, 0x51, 0x3a, 0x09, 0x73, 0x9c, 0x84, 0x2e, 0x0d,
+	0x0b, 0x71, 0x16, 0x7d, 0x74, 0x3d, 0x99, 0x5f, 0xb7, 0x68, 0x68, 0x0d, 0x52, 0x39, 0x76, 0x47,
+	0xda, 0xa6, 0xd6, 0xdd, 0x84, 0x33, 0x5a, 0x8a, 0xd2, 0x2b, 0x81, 0xc3, 0xec, 0x8a, 0x5c, 0x76,
+	0x1b, 0x69, 0x4c, 0xed, 0xa3, 0xfb, 0x7b, 0xa5, 0x33, 0x6b, 0xed, 0x10, 0x71, 0x7b, 0x3a, 0xe8,
+	0x06, 0x1c, 0xe7, 0x11, 0x5c, 0x16, 0x88, 0x53, 0xab, 0xbb, 0x9e, 0xe2, 0x9a, 0xf9, 0xd9, 0x75,
+	0x6a, 0x7f, 0xaf, 0x74, 0x7c, 0x36, 0x0b, 0x01, 0x67, 0xd7, 0x43, 0xaf, 0xc1, 0x60, 0xcd, 0x93,
+	0xa7, 0x6c, 0x9f, 0x91, 0x05, 0x76, 0x70, 0x61, 0xb5, 0xa2, 0xbe, 0x3f, 0xfe, 0x83, 0xe3, 0x0a,
+	0x68, 0x93, 0xab, 0xad, 0x94, 0xac, 0xb1, 0x3f, 0x15, 0xd8, 0x33, 0x29, 0x8e, 0x37, 0x42, 0x22,
+	0x70, 0x7d, 0xad, 0x72, 0xb9, 0x33, 0xa2, 0x25, 0x18, 0x84, 0xd1, 0x9b, 0x80, 0x44, 0xb6, 0xa1,
+	0xd9, 0x2a, 0x4b, 0x8e, 0xa7, 0xd9, 0x92, 0x2a, 0x11, 0x42, 0x25, 0x85, 0x81, 0x33, 0x6a, 0xa1,
+	0xab, 0xf4, 0x78, 0xd4, 0x4b, 0xc5, 0xf1, 0xab, 0x72, 0x8d, 0x2f, 0x90, 0x66, 0x40, 0x98, 0xf9,
+	0xa3, 0x49, 0x11, 0x27, 0xea, 0xa1, 0x1a, 0x9c, 0x76, 0x5a, 0x91, 0xcf, 0x34, 0x82, 0x26, 0xea,
+	0x9a, 0xbf, 0x4d, 0x3c, 0xa6, 0x8c, 0x1f, 0x60, 0x01, 0x43, 0x4f, 0xcf, 0xb6, 0xc1, 0xc3, 0x6d,
+	0xa9, 0xd0, 0xe7, 0x14, 0x1d, 0x0b, 0x4d, 0x59, 0x67, 0x78, 0x77, 0x73, 0x0d, 0xb6, 0xc4, 0x40,
+	0x2f, 0xc1, 0xd0, 0x96, 0x1f, 0x46, 0xab, 0x24, 0xba, 0xe3, 0x07, 0xdb, 0x22, 0xbd, 0x41, 0x9c,
+	0x52, 0x26, 0x06, 0x61, 0x1d, 0x0f, 0x3d, 0x05, 0xfd, 0xcc, 0x54, 0x6c, 0x79, 0x81, 0xdd, 0xb5,
+	0x03, 0xf1, 0x19, 0x73, 0x95, 0x17, 0x63, 0x09, 0x97, 0xa8, 0xcb, 0xe5, 0x79, 0x76, 0x1c, 0x27,
+	0x50, 0x97, 0xcb, 0xf3, 0x58, 0xc2, 0xe9, 0x72, 0x0d, 0xb7, 0x9c, 0x80, 0x94, 0x03, 0xbf, 0x4a,
+	0x42, 0x2d, 0x91, 0xd1, 0x23, 0x3c, 0x79, 0x03, 0x5d, 0xae, 0x95, 0x2c, 0x04, 0x9c, 0x5d, 0x0f,
+	0x91, 0x74, 0x7a, 0xde, 0xd1, 0x7c, 0x55, 0x69, 0x9a, 0x1d, 0xec, 0x32, 0x43, 0xaf, 0x07, 0xe3,
+	0x2a, 0x31, 0x30, 0x4f, 0xd7, 0x10, 0x4e, 0x8d, 0xb1, 0xb5, 0xdd, 0x7d, 0xae, 0x07, 0xa5, 0x7c,
+	0x5e, 0x4e, 0x50, 0xc2, 0x29, 0xda, 0x46, 0x44, 0xda, 0xf1, 0x8e, 0x11, 0x69, 0x2f, 0xc2, 0x60,
+	0xd8, 0xba, 0x5d, 0xf3, 0x1b, 0x8e, 0xeb, 0x31, 0x8b, 0x1b, 0xed, 0xe1, 0x5e, 0x91, 0x00, 0x1c,
+	0xe3, 0xa0, 0x25, 0x18, 0x70, 0xa4, 0x66, 0x19, 0xe5, 0x07, 0xdb, 0x53, 0xfa, 0x64, 0x1e, 0x7f,
+	0x4a, 0xea, 0x92, 0x55, 0x5d, 0xf4, 0x2a, 0x8c, 0x88, 0x80, 0x1e, 0x22, 0x97, 0xfe, 0xa4, 0xe9,
+	0xbe, 0x5c, 0xd1, 0x81, 0xd8, 0xc4, 0x45, 0xeb, 0x30, 0x14, 0xf9, 0x75, 0xe6, 0x83, 0x4b, 0xb9,
+	0xe4, 0x13, 0xf9, 0x31, 0x71, 0xd7, 0x14, 0x9a, 0xae, 0xf3, 0x50, 0x55, 0xb1, 0x4e, 0x07, 0xad,
+	0xf1, 0xf5, 0xce, 0xd2, 0x16, 0x91, 0x50, 0x24, 0x63, 0x3f, 0x93, 0x67, 0x2e, 0xc9, 0xd0, 0xcc,
+	0xed, 0x20, 0x6a, 0x62, 0x9d, 0x0c, 0xba, 0x02, 0x13, 0xcd, 0xc0, 0xf5, 0xd9, 0x9a, 0x50, 0x9a,
+	0xf2, 0x29, 0x33, 0x49, 0x69, 0x39, 0x89, 0x80, 0xd3, 0x75, 0x58, 0x3c, 0x16, 0x51, 0x38, 0x75,
+	0x8a, 0x27, 0x5a, 0xe3, 0x72, 0x10, 0x5e, 0x86, 0x15, 0x14, 0xad, 0xb0, 0x93, 0x98, 0x8b, 0xf0,
+	0xa6, 0xa6, 0xf3, 0xbd, 0xfc, 0x75, 0x51, 0x1f, 0xe7, 0xfd, 0xd5, 0x5f, 0x1c, 0x53, 0x40, 0x35,
+	0x2d, 0xbf, 0x39, 0x7d, 0x41, 0x85, 0x53, 0xa7, 0xdb, 0xd8, 0xeb, 0x26, 0x9e, 0xcb, 0x31, 0x43,
+	0x60, 0x14, 0x87, 0x38, 0x41, 0x13, 0xbd, 0x01, 0xe3, 0x22, 0x58, 0x41, 0x3c, 0x4c, 0x67, 0x62,
+	0x9f, 0x26, 0x9c, 0x80, 0xe1, 0x14, 0x36, 0x4f, 0x74, 0xe6, 0xdc, 0xae, 0x13, 0x71, 0xf4, 0x5d,
+	0x77, 0xbd, 0xed, 0x70, 0xea, 0x2c, 0x3b, 0x1f, 0x44, 0xa2, 0xb3, 0x24, 0x14, 0x67, 0xd4, 0x40,
+	0x6b, 0x30, 0xde, 0x0c, 0x08, 0x69, 0xb0, 0x77, 0x92, 0xb8, 0xcf, 0x4a, 0x3c, 0x1c, 0x11, 0xed,
+	0x49, 0x39, 0x01, 0x3b, 0xc8, 0x28, 0xc3, 0x29, 0x0a, 0xe8, 0x0e, 0x0c, 0xf8, 0x3b, 0x24, 0xd8,
+	0x22, 0x4e, 0x6d, 0xea, 0x5c, 0x1b, 0x4f, 0x3b, 0x71, 0xb9, 0xdd, 0x10, 0xb8, 0x09, 0x43, 0x24,
+	0x59, 0xdc, 0xd9, 0x10, 0x49, 0x36, 0x86, 0xfe, 0x0b, 0x0b, 0x4e, 0x49, 0xd5, 0x5e, 0xa5, 0x49,
+	0x47, 0x7d, 0xde, 0xf7, 0xc2, 0x28, 0xe0, 0x01, 0x74, 0x1e, 0xcd, 0x0f, 0x2a, 0xb3, 0x96, 0x53,
+	0x49, 0x69, 0x11, 0x4e, 0xe5, 0x61, 0x84, 0x38, 0xbf, 0x45, 0xfa, 0xb2, 0x0f, 0x49, 0x24, 0x0f,
+	0xa3, 0xd9, 0x70, 0xe9, 0xad, 0x85, 0xd5, 0xa9, 0xc7, 0x78, 0xf4, 0x1f, 0xba, 0x19, 0x2a, 0x49,
+	0x20, 0x4e, 0xe3, 0xa3, 0x4b, 0x50, 0xf0, 0xc3, 0xa9, 0xc7, 0xdb, 0xa4, 0xc4, 0xf7, 0x6b, 0x37,
+	0x2a, 0xdc, 0x20, 0xf5, 0x46, 0x05, 0x17, 0xfc, 0x50, 0x26, 0x1b, 0xa3, 0xcf, 0xd9, 0x70, 0xea,
+	0x09, 0x2e, 0x73, 0x96, 0xc9, 0xc6, 0x58, 0x21, 0x8e, 0xe1, 0x68, 0x0b, 0xc6, 0x42, 0x43, 0x6c,
+	0x10, 0x4e, 0x9d, 0x67, 0x23, 0xf5, 0x44, 0xde, 0xa4, 0x19, 0xd8, 0x5a, 0x16, 0x20, 0x93, 0x0a,
+	0x4e, 0x92, 0xe5, 0xbb, 0x4b, 0x13, 0x5c, 0x84, 0x53, 0x4f, 0x76, 0xd8, 0x5d, 0x1a, 0xb2, 0xbe,
+	0xbb, 0x74, 0x1a, 0x38, 0x41, 0x13, 0xad, 0xeb, 0x6e, 0x8c, 0x17, 0xf2, 0x8d, 0x1b, 0x33, 0x1d,
+	0x18, 0x47, 0xf2, 0x9c, 0x17, 0xa7, 0xbf, 0x0f, 0x26, 0x52, 0x5c, 0xd8, 0x61, 0x7c, 0x3a, 0xa6,
+	0xb7, 0x61, 0xc4, 0x58, 0xe9, 0x0f, 0xd5, 0xe4, 0xe7, 0xcf, 0x06, 0x61, 0x50, 0x99, 0x62, 0xa0,
+	0x8b, 0xa6, 0x95, 0xcf, 0xa9, 0xa4, 0x95, 0xcf, 0x40, 0xd9, 0xaf, 0x19, 0x86, 0x3d, 0x6b, 0x19,
+	0xb1, 0x72, 0xf3, 0xce, 0xd5, 0xee, 0x1d, 0xcf, 0x34, 0xf5, 0x52, 0xb1, 0x6b, 0x73, 0xa1, 0x9e,
+	0xb6, 0x1a, 0xab, 0x2b, 0x30, 0xe1, 0xf9, 0x8c, 0xf5, 0x27, 0x35, 0xc9, 0xd7, 0x31, 0xf6, 0x6d,
+	0x50, 0x8f, 0xe5, 0x96, 0x40, 0xc0, 0xe9, 0x3a, 0xb4, 0x41, 0xce, 0x7f, 0x25, 0x55, 0x64, 0x9c,
+	0x3d, 0xc3, 0x02, 0x4a, 0x9f, 0x9c, 0xfc, 0x57, 0x38, 0x35, 0x9e, 0xff, 0xe4, 0xe4, 0x95, 0x92,
+	0x3c, 0x5e, 0x28, 0x79, 0x3c, 0xa6, 0x11, 0x6a, 0xfa, 0xb5, 0xe5, 0xb2, 0x78, 0x3d, 0x68, 0x51,
+	0xec, 0x6b, 0xcb, 0x65, 0xcc, 0x61, 0x68, 0x16, 0xfa, 0xd8, 0x0f, 0x19, 0x23, 0x27, 0x6f, 0xf7,
+	0x2f, 0x97, 0xb5, 0x1c, 0xaa, 0xac, 0x02, 0x16, 0x15, 0x99, 0xc4, 0x9f, 0x3e, 0xb9, 0x98, 0xc4,
+	0xbf, 0xff, 0x3e, 0x25, 0xfe, 0x92, 0x00, 0x8e, 0x69, 0xa1, 0xbb, 0x70, 0xdc, 0x78, 0xe6, 0x2a,
+	0x4f, 0x3c, 0xc8, 0x37, 0x06, 0x48, 0x20, 0xcf, 0x9d, 0x11, 0x9d, 0x3e, 0xbe, 0x9c, 0x45, 0x09,
+	0x67, 0x37, 0x80, 0xea, 0x30, 0x51, 0x4d, 0xb5, 0x3a, 0xd0, 0x7d, 0xab, 0x6a, 0x5d, 0xa4, 0x5b,
+	0x4c, 0x13, 0x46, 0xaf, 0xc2, 0xc0, 0xbb, 0x3e, 0x37, 0xdc, 0x13, 0x2f, 0x1e, 0x19, 0x05, 0x66,
+	0xe0, 0xad, 0x1b, 0x15, 0x56, 0x7e, 0xb0, 0x57, 0x1a, 0x2a, 0xfb, 0x35, 0xf9, 0x17, 0xab, 0x0a,
+	0xe8, 0xc7, 0x2c, 0x98, 0x4e, 0xbf, 0xa3, 0x55, 0xa7, 0x47, 0xba, 0xef, 0xb4, 0x2d, 0x1a, 0x9d,
+	0x5e, 0xcc, 0x25, 0x87, 0xdb, 0x34, 0x85, 0x3e, 0x4a, 0xf7, 0x53, 0xe8, 0xde, 0x23, 0x22, 0x01,
+	0xfd, 0xa3, 0xf1, 0x7e, 0xa2, 0xa5, 0x07, 0x7b, 0xa5, 0x31, 0x7e, 0xe0, 0xba, 0xf7, 0x54, 0xbc,
+	0x7d, 0x5e, 0x01, 0xfd, 0x20, 0x1c, 0x0f, 0xd2, 0x72, 0x6d, 0x22, 0x79, 0xfb, 0xa7, 0xbb, 0x39,
+	0xbc, 0x93, 0x13, 0x8e, 0xb3, 0x08, 0xe2, 0xec, 0x76, 0xec, 0xdf, 0xb3, 0x98, 0x3e, 0x43, 0x74,
+	0x8b, 0x84, 0xad, 0x7a, 0x74, 0x04, 0xc6, 0x72, 0x8b, 0x86, 0x3d, 0xc1, 0x7d, 0x5b, 0xbb, 0xfd,
+	0x2f, 0x16, 0xb3, 0x76, 0x3b, 0x42, 0xbf, 0xbd, 0xb7, 0x60, 0x20, 0x12, 0xad, 0x89, 0xae, 0xe7,
+	0x59, 0xe6, 0xc8, 0x4e, 0x31, 0x8b, 0x3f, 0xf5, 0x76, 0x92, 0xa5, 0x58, 0x91, 0xb1, 0xff, 0x47,
+	0x3e, 0x03, 0x12, 0x72, 0x04, 0x6a, 0xdb, 0x05, 0x53, 0x6d, 0x5b, 0xea, 0xf0, 0x05, 0x39, 0xea,
+	0xdb, 0xff, 0xc1, 0xec, 0x37, 0x93, 0x19, 0xbe, 0xdf, 0xcd, 0x2c, 0xed, 0x2f, 0x5a, 0x00, 0x71,
+	0x82, 0x93, 0x2e, 0x12, 0x4e, 0x5f, 0xa6, 0xaf, 0x25, 0x3f, 0xf2, 0xab, 0x7e, 0x5d, 0xa8, 0x8d,
+	0x4e, 0xc7, 0x9a, 0x63, 0x5e, 0x7e, 0xa0, 0xfd, 0xc6, 0x0a, 0x1b, 0x95, 0x64, 0xc4, 0xe1, 0x62,
+	0x6c, 0xcb, 0x60, 0x44, 0x1b, 0xfe, 0x8a, 0x05, 0xc7, 0xb2, 0x9c, 0x40, 0xe8, 0xdb, 0x9b, 0x4b,
+	0x4f, 0x95, 0x09, 0xac, 0x9a, 0xcd, 0x9b, 0xa2, 0x1c, 0x2b, 0x8c, 0xae, 0x33, 0x79, 0x1f, 0x2e,
+	0xf9, 0xc6, 0x0d, 0x18, 0x29, 0x07, 0x44, 0xe3, 0x2f, 0x5e, 0x8f, 0xf3, 0x02, 0x0d, 0xce, 0x3d,
+	0x7b, 0xe8, 0xc8, 0x4a, 0xf6, 0x57, 0x0b, 0x70, 0x8c, 0x1b, 0x72, 0xcd, 0xee, 0xf8, 0x6e, 0xad,
+	0xec, 0xd7, 0x84, 0xeb, 0xee, 0xdb, 0x30, 0xdc, 0xd4, 0x44, 0xde, 0xed, 0x02, 0xc9, 0xeb, 0xa2,
+	0xf1, 0x58, 0x48, 0xa7, 0x97, 0x62, 0x83, 0x16, 0xaa, 0xc1, 0x30, 0xd9, 0x71, 0xab, 0xca, 0x1a,
+	0xa8, 0x70, 0xe8, 0x4b, 0x5a, 0xb5, 0xb2, 0xa8, 0xd1, 0xc1, 0x06, 0xd5, 0xae, 0xcd, 0xaf, 0x35,
+	0x16, 0xad, 0xa7, 0x83, 0x05, 0xd0, 0xcf, 0x5a, 0x70, 0x32, 0x27, 0xec, 0x3c, 0x6d, 0xee, 0x0e,
+	0x33, 0x99, 0x13, 0xcb, 0x56, 0x35, 0xc7, 0x0d, 0xe9, 0xb0, 0x80, 0xa2, 0x8f, 0x03, 0x34, 0xe3,
+	0x94, 0x9b, 0x1d, 0xe2, 0x73, 0x1b, 0x91, 0x7a, 0xb5, 0xa0, 0xab, 0x2a, 0x33, 0xa7, 0x46, 0xcb,
+	0xfe, 0x4a, 0x0f, 0xf4, 0x32, 0xc3, 0x2b, 0x54, 0x86, 0xfe, 0x2d, 0x1e, 0x13, 0xb0, 0xed, 0xbc,
+	0x51, 0x5c, 0x19, 0x64, 0x30, 0x9e, 0x37, 0xad, 0x14, 0x4b, 0x32, 0x68, 0x05, 0x26, 0x79, 0x3a,
+	0xd1, 0xfa, 0x02, 0xa9, 0x3b, 0xbb, 0x52, 0x9a, 0x5c, 0x60, 0x9f, 0xaa, 0xa4, 0xea, 0xcb, 0x69,
+	0x14, 0x9c, 0x55, 0x0f, 0xbd, 0x0e, 0xa3, 0xf4, 0x75, 0xef, 0xb7, 0x22, 0x49, 0x89, 0xe7, 0xef,
+	0x54, 0x0f, 0x9e, 0x35, 0x03, 0x8a, 0x13, 0xd8, 0xe8, 0x55, 0x18, 0x69, 0xa6, 0xe4, 0xe6, 0xbd,
+	0xb1, 0x80, 0xc9, 0x94, 0x95, 0x9b, 0xb8, 0xcc, 0x0f, 0xa4, 0xc5, 0xbc, 0x5e, 0xd6, 0xb6, 0x02,
+	0x12, 0x6e, 0xf9, 0xf5, 0x1a, 0xe3, 0x80, 0x7b, 0x35, 0x3f, 0x90, 0x04, 0x1c, 0xa7, 0x6a, 0x50,
+	0x2a, 0x1b, 0x8e, 0x5b, 0x6f, 0x05, 0x24, 0xa6, 0xd2, 0x67, 0x52, 0x59, 0x4a, 0xc0, 0x71, 0xaa,
+	0x46, 0x67, 0x85, 0x40, 0xff, 0x83, 0x51, 0x08, 0xd8, 0xbf, 0x5c, 0x00, 0x63, 0x6a, 0xbf, 0x87,
+	0xf3, 0x8a, 0xbe, 0x06, 0x3d, 0x9b, 0x41, 0xb3, 0x2a, 0x8c, 0x0c, 0x33, 0xbf, 0xec, 0x0a, 0x2e,
+	0xcf, 0xeb, 0x5f, 0x46, 0xff, 0x63, 0x56, 0x8b, 0xee, 0xf1, 0xe3, 0xe5, 0xc0, 0xa7, 0x97, 0x9c,
+	0x0c, 0x1b, 0xaa, 0xdc, 0xad, 0xfa, 0xe5, 0x1b, 0xbb, 0x4d, 0x80, 0x6d, 0xe1, 0x33, 0xc2, 0x29,
+	0x18, 0xf6, 0x78, 0x15, 0xf1, 0xc2, 0x96, 0x54, 0xd0, 0x25, 0x18, 0x12, 0xa9, 0x1e, 0x99, 0x57,
+	0x10, 0xdf, 0x4c, 0xcc, 0x7e, 0x70, 0x21, 0x2e, 0xc6, 0x3a, 0x8e, 0xfd, 0xe3, 0x05, 0x98, 0xcc,
+	0x70, 0xeb, 0xe4, 0xd7, 0xc8, 0xa6, 0x1b, 0x46, 0xc1, 0x6e, 0xf2, 0x72, 0xc2, 0xa2, 0x1c, 0x2b,
+	0x0c, 0x7a, 0x56, 0xf1, 0x8b, 0x2a, 0x79, 0x39, 0x09, 0xb7, 0x29, 0x01, 0x3d, 0xdc, 0xe5, 0x44,
+	0xaf, 0xed, 0x56, 0x48, 0x64, 0x2c, 0x7f, 0x75, 0x6d, 0x33, 0x63, 0x03, 0x06, 0xa1, 0x4f, 0xc0,
+	0x4d, 0xa5, 0x41, 0xd7, 0x9e, 0x80, 0x5c, 0x87, 0xce, 0x61, 0xb4, 0x73, 0x11, 0xf1, 0x1c, 0x2f,
+	0x12, 0x0f, 0xc5, 0x38, 0xc6, 0x33, 0x2b, 0xc5, 0x02, 0x6a, 0x7f, 0xb9, 0x08, 0xa7, 0x72, 0x1d,
+	0xbd, 0x69, 0xd7, 0x1b, 0xbe, 0xe7, 0x46, 0xbe, 0x32, 0xcc, 0xe4, 0x71, 0x9d, 0x49, 0x73, 0x6b,
+	0x45, 0x94, 0x63, 0x85, 0x81, 0xce, 0x43, 0x2f, 0x93, 0xb5, 0x27, 0xd3, 0xbc, 0xe1, 0xb9, 0x05,
+	0x1e, 0x31, 0x93, 0x83, 0xb5, 0x5b, 0xbd, 0xd8, 0xf6, 0x56, 0x7f, 0x8c, 0x72, 0x30, 0x7e, 0x3d,
+	0x79, 0xa1, 0xd0, 0xee, 0xfa, 0x7e, 0x1d, 0x33, 0x20, 0x7a, 0x42, 0x8c, 0x57, 0xc2, 0x12, 0x11,
+	0x3b, 0x35, 0x3f, 0xd4, 0x06, 0xed, 0x29, 0xe8, 0xdf, 0x26, 0xbb, 0x81, 0xeb, 0x6d, 0x26, 0x2d,
+	0x54, 0xaf, 0xf1, 0x62, 0x2c, 0xe1, 0x66, 0x56, 0xf3, 0xfe, 0x07, 0x91, 0xd5, 0x5c, 0x5f, 0x01,
+	0x03, 0x1d, 0xd9, 0x93, 0x9f, 0x28, 0xc2, 0x18, 0x9e, 0x5b, 0xf8, 0x60, 0x22, 0xd6, 0xd3, 0x13,
+	0xf1, 0x20, 0x92, 0x7f, 0x1f, 0x6e, 0x36, 0x7e, 0xdb, 0x82, 0x31, 0x96, 0x70, 0x52, 0x44, 0x69,
+	0x71, 0x7d, 0xef, 0x08, 0x9e, 0x02, 0x8f, 0x41, 0x6f, 0x40, 0x1b, 0x15, 0x33, 0xa8, 0xf6, 0x38,
+	0xeb, 0x09, 0xe6, 0x30, 0x74, 0x1a, 0x7a, 0x58, 0x17, 0xe8, 0xe4, 0x0d, 0xf3, 0x23, 0x78, 0xc1,
+	0x89, 0x1c, 0xcc, 0x4a, 0x59, 0xbc, 0x48, 0x4c, 0x9a, 0x75, 0x97, 0x77, 0x3a, 0xb6, 0x84, 0x78,
+	0x7f, 0x84, 0x80, 0xc9, 0xec, 0xda, 0x7b, 0x8b, 0x17, 0x99, 0x4d, 0xb2, 0xfd, 0x33, 0xfb, 0x1f,
+	0x0a, 0x70, 0x36, 0xb3, 0x5e, 0xd7, 0xf1, 0x22, 0xdb, 0xd7, 0x7e, 0x98, 0xe9, 0xe9, 0x8a, 0x47,
+	0x68, 0xff, 0xdf, 0xd3, 0x2d, 0xf7, 0xdf, 0xdb, 0x45, 0x18, 0xc7, 0xcc, 0x21, 0x7b, 0x9f, 0x84,
+	0x71, 0xcc, 0xec, 0x5b, 0x8e, 0x98, 0xe0, 0x5f, 0x0b, 0x39, 0xdf, 0xc2, 0x04, 0x06, 0x17, 0xe8,
+	0x39, 0xc3, 0x80, 0xa1, 0x7c, 0x84, 0xf3, 0x33, 0x86, 0x97, 0x61, 0x05, 0x45, 0xb3, 0x30, 0xd6,
+	0x70, 0x3d, 0x7a, 0xf8, 0xec, 0x9a, 0xac, 0xb8, 0x52, 0x91, 0xac, 0x98, 0x60, 0x9c, 0xc4, 0x47,
+	0xae, 0x16, 0xe2, 0x91, 0x7f, 0xdd, 0xab, 0x87, 0xda, 0x75, 0x33, 0xa6, 0x95, 0x88, 0x1a, 0xc5,
+	0x8c, 0x70, 0x8f, 0x2b, 0x9a, 0x9c, 0xa8, 0xd8, 0xbd, 0x9c, 0x68, 0x38, 0x5b, 0x46, 0x34, 0xfd,
+	0x2a, 0x8c, 0xdc, 0xb7, 0x6e, 0xc4, 0xfe, 0x56, 0x11, 0x1e, 0x69, 0xb3, 0xed, 0xf9, 0x59, 0x6f,
+	0xcc, 0x81, 0x76, 0xd6, 0xa7, 0xe6, 0xa1, 0x0c, 0xc7, 0x36, 0x5a, 0xf5, 0xfa, 0x2e, 0x73, 0x74,
+	0x23, 0x35, 0x89, 0x21, 0x78, 0x4a, 0x29, 0x1c, 0x39, 0xb6, 0x94, 0x81, 0x83, 0x33, 0x6b, 0xd2,
+	0x27, 0x16, 0xbd, 0x49, 0x76, 0x15, 0xa9, 0xc4, 0x13, 0x0b, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0x2b,
+	0x30, 0xe1, 0xec, 0x38, 0x2e, 0x4f, 0xef, 0x21, 0x09, 0xf0, 0x37, 0x96, 0x92, 0x45, 0xcf, 0x26,
+	0x11, 0x70, 0xba, 0x0e, 0x7a, 0x13, 0x90, 0x7f, 0x9b, 0x39, 0xcf, 0xd4, 0xae, 0x10, 0x4f, 0x28,
+	0xf3, 0xd9, 0xdc, 0x15, 0xe3, 0x23, 0xe1, 0x46, 0x0a, 0x03, 0x67, 0xd4, 0x4a, 0x04, 0x1b, 0xec,
+	0xcb, 0x0f, 0x36, 0xd8, 0xfe, 0x5c, 0xec, 0x98, 0x19, 0xf1, 0x1d, 0x18, 0x39, 0xac, 0xb5, 0xf7,
+	0x53, 0xd0, 0x1f, 0x88, 0x9c, 0xf3, 0x09, 0xaf, 0x72, 0x99, 0x91, 0x5b, 0xc2, 0xed, 0xff, 0xc7,
+	0x02, 0x25, 0x4b, 0x36, 0xe3, 0x8a, 0xbf, 0xca, 0x4c, 0xd7, 0xb9, 0x14, 0x5c, 0x0b, 0x25, 0x76,
+	0x5c, 0x33, 0x5d, 0x8f, 0x81, 0xd8, 0xc4, 0xe5, 0xcb, 0x2d, 0x8c, 0x23, 0x58, 0x18, 0x0f, 0x08,
+	0xa1, 0x35, 0x54, 0x18, 0xe8, 0x13, 0xd0, 0x5f, 0x73, 0x77, 0xdc, 0x50, 0xc8, 0xd1, 0x0e, 0xad,
+	0xb7, 0x8b, 0xbf, 0x6f, 0x81, 0x93, 0xc1, 0x92, 0x9e, 0xfd, 0x53, 0x16, 0x28, 0x75, 0xe7, 0x55,
+	0xe2, 0xd4, 0xa3, 0x2d, 0xf4, 0x06, 0x80, 0xa4, 0xa0, 0x64, 0x6f, 0xd2, 0x08, 0x0b, 0xb0, 0x82,
+	0x1c, 0x18, 0xff, 0xb0, 0x56, 0x07, 0xbd, 0x0e, 0x7d, 0x5b, 0x8c, 0x96, 0xf8, 0xb6, 0xf3, 0x4a,
+	0xd5, 0xc5, 0x4a, 0x0f, 0xf6, 0x4a, 0xc7, 0xcc, 0x36, 0xe5, 0x2d, 0xc6, 0x6b, 0xd9, 0x3f, 0x51,
+	0x88, 0xe7, 0xf4, 0xad, 0x96, 0x1f, 0x39, 0x47, 0xc0, 0x89, 0x5c, 0x31, 0x38, 0x91, 0x27, 0xda,
+	0xe9, 0x73, 0x59, 0x97, 0x72, 0x39, 0x90, 0x1b, 0x09, 0x0e, 0xe4, 0xc9, 0xce, 0xa4, 0xda, 0x73,
+	0x1e, 0xff, 0x93, 0x05, 0x13, 0x06, 0xfe, 0x11, 0x5c, 0x80, 0x4b, 0xe6, 0x05, 0xf8, 0x68, 0xc7,
+	0x6f, 0xc8, 0xb9, 0xf8, 0x7e, 0xb4, 0x98, 0xe8, 0x3b, 0xbb, 0xf0, 0xde, 0x85, 0x9e, 0x2d, 0x27,
+	0xa8, 0x89, 0x77, 0xfd, 0xc5, 0xae, 0xc6, 0x7a, 0xe6, 0xaa, 0x13, 0x08, 0x03, 0x8e, 0x67, 0xe5,
+	0xa8, 0xd3, 0xa2, 0x8e, 0xc6, 0x1b, 0xac, 0x29, 0x74, 0x19, 0xfa, 0xc2, 0xaa, 0xdf, 0x54, 0x7e,
+	0x80, 0x2c, 0x5d, 0x78, 0x85, 0x95, 0x1c, 0xec, 0x95, 0x90, 0xd9, 0x1c, 0x2d, 0xc6, 0x02, 0x1f,
+	0xbd, 0x0d, 0x23, 0xec, 0x97, 0xb2, 0xa6, 0x2c, 0xe6, 0x4b, 0x60, 0x2a, 0x3a, 0x22, 0x37, 0x35,
+	0x36, 0x8a, 0xb0, 0x49, 0x6a, 0x7a, 0x13, 0x06, 0xd5, 0x67, 0x3d, 0x54, 0x6d, 0xfd, 0xff, 0x59,
+	0x84, 0xc9, 0x8c, 0x35, 0x87, 0x42, 0x63, 0x26, 0x2e, 0x75, 0xb9, 0x54, 0xdf, 0xe3, 0x5c, 0x84,
+	0xec, 0x01, 0x58, 0x13, 0x6b, 0xab, 0xeb, 0x46, 0xd7, 0x43, 0x92, 0x6c, 0x94, 0x16, 0x75, 0x6e,
+	0x94, 0x36, 0x76, 0x64, 0x43, 0x4d, 0x1b, 0x52, 0x3d, 0x7d, 0xa8, 0x73, 0xfa, 0x87, 0x3d, 0x70,
+	0x2c, 0xcb, 0xc4, 0x04, 0x7d, 0x0e, 0xfa, 0x98, 0xa3, 0x9a, 0x14, 0x9c, 0xbd, 0xd8, 0xad, 0x71,
+	0xca, 0x0c, 0xf3, 0x75, 0x13, 0xa1, 0x69, 0x67, 0xe4, 0x71, 0xc4, 0x0b, 0x3b, 0x0e, 0xb3, 0x68,
+	0x93, 0x85, 0x8c, 0x12, 0xb7, 0xa7, 0x3c, 0x3e, 0x3e, 0xd2, 0x75, 0x07, 0xc4, 0xfd, 0x1b, 0x26,
+	0x2c, 0xb5, 0x64, 0x71, 0x67, 0x4b, 0x2d, 0xd9, 0x32, 0x5a, 0x86, 0xbe, 0x2a, 0x37, 0x01, 0x2a,
+	0x76, 0x3e, 0xc2, 0xb8, 0xfd, 0x8f, 0x3a, 0x80, 0x85, 0xdd, 0x8f, 0x20, 0x30, 0xed, 0xc2, 0x90,
+	0x36, 0x30, 0x0f, 0x75, 0xf1, 0x6c, 0xd3, 0x8b, 0x4f, 0x1b, 0x82, 0x87, 0xba, 0x80, 0x7e, 0x46,
+	0xbb, 0xfb, 0xc5, 0x79, 0xf0, 0x61, 0x83, 0x77, 0x3a, 0x9d, 0x70, 0x1f, 0x4c, 0xec, 0x2b, 0xc6,
+	0x4b, 0x55, 0xcc, 0x98, 0xee, 0xb9, 0xa9, 0xa1, 0xcc, 0x0b, 0xbf, 0x7d, 0x1c, 0x77, 0xfb, 0x67,
+	0x2d, 0x48, 0x38, 0x78, 0x29, 0x71, 0xa7, 0x95, 0x2b, 0xee, 0x3c, 0x07, 0x3d, 0x81, 0x5f, 0x27,
+	0xc9, 0xd4, 0xfb, 0xd8, 0xaf, 0x13, 0xcc, 0x20, 0x14, 0x23, 0x8a, 0x85, 0x58, 0xc3, 0xfa, 0x03,
+	0x5d, 0x3c, 0xbd, 0x1f, 0x83, 0xde, 0x3a, 0xd9, 0x21, 0xf5, 0x64, 0x86, 0xd4, 0xeb, 0xb4, 0x10,
+	0x73, 0x98, 0xfd, 0xdb, 0x3d, 0x70, 0xa6, 0x6d, 0x64, 0x39, 0xca, 0x60, 0x6e, 0x3a, 0x11, 0xb9,
+	0xe3, 0xec, 0x26, 0x33, 0x03, 0x5e, 0xe1, 0xc5, 0x58, 0xc2, 0x99, 0xb3, 0x35, 0xcf, 0x94, 0x93,
+	0x10, 0x0e, 0x8b, 0x04, 0x39, 0x02, 0x6a, 0x0a, 0x1b, 0x8b, 0x0f, 0x42, 0xd8, 0xf8, 0x3c, 0x40,
+	0x18, 0xd6, 0xb9, 0x1d, 0x67, 0x4d, 0x78, 0x71, 0xc7, 0x19, 0x95, 0x2a, 0xd7, 0x05, 0x04, 0x6b,
+	0x58, 0x68, 0x01, 0xc6, 0x9b, 0x81, 0x1f, 0x71, 0x59, 0xfb, 0x02, 0x37, 0x75, 0xee, 0x35, 0x83,
+	0x7a, 0x95, 0x13, 0x70, 0x9c, 0xaa, 0x81, 0x5e, 0x82, 0x21, 0x11, 0xe8, 0xab, 0xec, 0xfb, 0x75,
+	0x21, 0xde, 0x53, 0xd6, 0xbf, 0x95, 0x18, 0x84, 0x75, 0x3c, 0xad, 0x1a, 0x13, 0xe0, 0xf7, 0x67,
+	0x56, 0xe3, 0x42, 0x7c, 0x0d, 0x2f, 0x91, 0x14, 0x60, 0xa0, 0xab, 0xa4, 0x00, 0xb1, 0xc0, 0x73,
+	0xb0, 0x6b, 0x7d, 0x32, 0x74, 0x14, 0x11, 0x7e, 0xad, 0x07, 0x26, 0xc5, 0xc2, 0x79, 0xd8, 0xcb,
+	0x65, 0x3d, 0xbd, 0x5c, 0x1e, 0x84, 0x48, 0xf4, 0x83, 0x35, 0x73, 0xd4, 0x6b, 0xe6, 0x27, 0x2d,
+	0x30, 0x79, 0x48, 0xf4, 0x9f, 0xe5, 0xa6, 0x56, 0x7d, 0x29, 0x97, 0x27, 0x8d, 0x23, 0x86, 0xbf,
+	0xb7, 0x24, 0xab, 0xf6, 0xff, 0x65, 0xc1, 0xa3, 0x1d, 0x29, 0xa2, 0x45, 0x18, 0x64, 0x8c, 0xae,
+	0xf6, 0x2e, 0x7e, 0x52, 0xb9, 0x42, 0x48, 0x40, 0x0e, 0xdf, 0x1d, 0xd7, 0x44, 0x8b, 0xa9, 0x1c,
+	0xb6, 0x4f, 0x65, 0xe4, 0xb0, 0x3d, 0x6e, 0x0c, 0xcf, 0x7d, 0x26, 0xb1, 0xfd, 0x12, 0xbd, 0x71,
+	0x4c, 0x7f, 0xca, 0x8f, 0x18, 0xe2, 0x5c, 0x3b, 0x21, 0xce, 0x45, 0x26, 0xb6, 0x76, 0x87, 0xbc,
+	0x01, 0xe3, 0x2c, 0x02, 0x28, 0x73, 0xcc, 0x11, 0x8e, 0x98, 0x85, 0xd8, 0xf8, 0xfe, 0x7a, 0x02,
+	0x86, 0x53, 0xd8, 0xf6, 0xdf, 0x15, 0xa1, 0x8f, 0x6f, 0xbf, 0x23, 0x78, 0xf8, 0x3e, 0x03, 0x83,
+	0x6e, 0xa3, 0xd1, 0xe2, 0x69, 0x49, 0x7b, 0x63, 0x53, 0xee, 0x65, 0x59, 0x88, 0x63, 0x38, 0x5a,
+	0x12, 0x9a, 0x84, 0x36, 0x41, 0xc6, 0x79, 0xc7, 0x67, 0x16, 0x9c, 0xc8, 0xe1, 0x5c, 0x9c, 0xba,
+	0x67, 0x63, 0x9d, 0x03, 0xfa, 0x34, 0x40, 0x18, 0x05, 0xae, 0xb7, 0x49, 0xcb, 0x44, 0x26, 0x8a,
+	0xa7, 0xdb, 0x50, 0xab, 0x28, 0x64, 0x4e, 0x33, 0x3e, 0x73, 0x14, 0x00, 0x6b, 0x14, 0xd1, 0x8c,
+	0x71, 0xd3, 0x4f, 0x27, 0xe6, 0x0e, 0x38, 0xd5, 0x78, 0xce, 0xa6, 0x5f, 0x86, 0x41, 0x45, 0xbc,
+	0x93, 0x5c, 0x71, 0x58, 0x67, 0xd8, 0x3e, 0x06, 0x63, 0x89, 0xbe, 0x1d, 0x4a, 0x2c, 0xf9, 0x3b,
+	0x16, 0x8c, 0xf1, 0xce, 0x2c, 0x7a, 0x3b, 0xe2, 0x36, 0xb8, 0x07, 0xc7, 0xea, 0x19, 0xa7, 0xb2,
+	0x98, 0xfe, 0xee, 0x4f, 0x71, 0x25, 0x86, 0xcc, 0x82, 0xe2, 0xcc, 0x36, 0xd0, 0x05, 0xba, 0xe3,
+	0xe8, 0xa9, 0xeb, 0xd4, 0x45, 0x34, 0x91, 0x61, 0xbe, 0xdb, 0x78, 0x19, 0x56, 0x50, 0xfb, 0xaf,
+	0x2c, 0x98, 0xe0, 0x3d, 0xbf, 0x46, 0x76, 0xd5, 0xd9, 0xf4, 0x9d, 0xec, 0xbb, 0x48, 0x88, 0x5d,
+	0xc8, 0x49, 0x88, 0xad, 0x7f, 0x5a, 0xb1, 0xed, 0xa7, 0x7d, 0xd5, 0x02, 0xb1, 0x42, 0x8e, 0x40,
+	0xd2, 0xf2, 0x7d, 0xa6, 0xa4, 0x65, 0x3a, 0x7f, 0x13, 0xe4, 0x88, 0x58, 0xfe, 0xc5, 0x82, 0x71,
+	0x8e, 0x10, 0x5b, 0x41, 0x7c, 0x47, 0xe7, 0x61, 0xce, 0xfc, 0xa2, 0x4c, 0xb3, 0xd6, 0x6b, 0x64,
+	0x77, 0xcd, 0x2f, 0x3b, 0xd1, 0x56, 0xf6, 0x47, 0x19, 0x93, 0xd5, 0xd3, 0x76, 0xb2, 0x6a, 0x72,
+	0x03, 0x19, 0x89, 0x17, 0x3b, 0x08, 0x80, 0x0f, 0x9b, 0x78, 0xd1, 0xfe, 0x7b, 0x0b, 0x10, 0x6f,
+	0xc6, 0x60, 0xdc, 0x28, 0x3b, 0xc4, 0x4a, 0xb5, 0x8b, 0x2e, 0x3e, 0x9a, 0x14, 0x04, 0x6b, 0x58,
+	0x0f, 0x64, 0x78, 0x12, 0xa6, 0x2c, 0xc5, 0xce, 0xa6, 0x2c, 0x87, 0x18, 0xd1, 0xaf, 0xf6, 0x43,
+	0xd2, 0x15, 0x13, 0xdd, 0x84, 0xe1, 0xaa, 0xd3, 0x74, 0x6e, 0xbb, 0x75, 0x37, 0x72, 0x49, 0xd8,
+	0xce, 0xce, 0x6d, 0x5e, 0xc3, 0x13, 0xc6, 0x07, 0x5a, 0x09, 0x36, 0xe8, 0xa0, 0x19, 0x80, 0x66,
+	0xe0, 0xee, 0xb8, 0x75, 0xb2, 0xc9, 0x04, 0x42, 0x2c, 0x7e, 0x11, 0x37, 0xba, 0x93, 0xa5, 0x58,
+	0xc3, 0xc8, 0x08, 0x1b, 0x52, 0x7c, 0xc8, 0x61, 0x43, 0xe0, 0xc8, 0xc2, 0x86, 0xf4, 0x1c, 0x2a,
+	0x6c, 0xc8, 0xc0, 0xa1, 0xc3, 0x86, 0xf4, 0x76, 0x15, 0x36, 0x04, 0xc3, 0x09, 0xc9, 0x7b, 0xd2,
+	0xff, 0x4b, 0x6e, 0x9d, 0x88, 0x07, 0x07, 0x0f, 0xba, 0x34, 0xbd, 0xbf, 0x57, 0x3a, 0x81, 0x33,
+	0x31, 0x70, 0x4e, 0x4d, 0xf4, 0x71, 0x98, 0x72, 0xea, 0x75, 0xff, 0x8e, 0x9a, 0xd4, 0xc5, 0xb0,
+	0xea, 0xd4, 0xb9, 0x72, 0xa9, 0x9f, 0x51, 0x3d, 0xbd, 0xbf, 0x57, 0x9a, 0x9a, 0xcd, 0xc1, 0xc1,
+	0xb9, 0xb5, 0xd1, 0x6b, 0x30, 0xd8, 0x0c, 0xfc, 0xea, 0x8a, 0xe6, 0x2f, 0x7e, 0x96, 0x0e, 0x60,
+	0x59, 0x16, 0x1e, 0xec, 0x95, 0x46, 0xd4, 0x1f, 0x76, 0xe1, 0xc7, 0x15, 0x32, 0x22, 0x72, 0x0c,
+	0x3d, 0xec, 0x88, 0x1c, 0xc3, 0x0f, 0x38, 0x22, 0x87, 0xbd, 0x0d, 0x93, 0x15, 0x12, 0xb8, 0x4e,
+	0xdd, 0xbd, 0x47, 0x79, 0x72, 0x79, 0x06, 0xae, 0xc1, 0x60, 0x90, 0x38, 0xf5, 0xbb, 0x0a, 0x2e,
+	0xae, 0xc9, 0x65, 0xe4, 0x29, 0x1f, 0x13, 0xb2, 0xff, 0xbd, 0x05, 0xfd, 0xc2, 0xbd, 0xf3, 0x08,
+	0x38, 0xd3, 0x59, 0x43, 0x25, 0x53, 0xca, 0x9e, 0x14, 0xd6, 0x99, 0x5c, 0x65, 0xcc, 0x72, 0x42,
+	0x19, 0xf3, 0x68, 0x3b, 0x22, 0xed, 0xd5, 0x30, 0xff, 0x75, 0x91, 0xbe, 0x10, 0x8c, 0x40, 0x03,
+	0x0f, 0x7f, 0x08, 0x56, 0xa1, 0x3f, 0x14, 0x8e, 0xee, 0x85, 0x7c, 0x5f, 0x9e, 0xe4, 0x24, 0xc6,
+	0x36, 0x90, 0xc2, 0xb5, 0x5d, 0x12, 0xc9, 0xf4, 0xa0, 0x2f, 0x3e, 0x44, 0x0f, 0xfa, 0x4e, 0xa1,
+	0x18, 0x7a, 0x1e, 0x44, 0x28, 0x06, 0xfb, 0x1b, 0xec, 0x76, 0xd6, 0xcb, 0x8f, 0x80, 0x71, 0xbb,
+	0x62, 0xde, 0xe3, 0x76, 0x9b, 0x95, 0x25, 0x3a, 0x95, 0xc3, 0xc0, 0xfd, 0x96, 0x05, 0x67, 0x32,
+	0xbe, 0x4a, 0xe3, 0xe6, 0x9e, 0x85, 0x01, 0xa7, 0x55, 0x73, 0xd5, 0x5e, 0xd6, 0xb4, 0xc5, 0xb3,
+	0xa2, 0x1c, 0x2b, 0x0c, 0x34, 0x0f, 0x13, 0xe4, 0x6e, 0xd3, 0xe5, 0x6a, 0x78, 0xdd, 0x74, 0xbc,
+	0xc8, 0x7d, 0x82, 0x17, 0x93, 0x40, 0x9c, 0xc6, 0x57, 0xe1, 0xdc, 0x8a, 0xb9, 0xe1, 0xdc, 0x7e,
+	0xdd, 0x82, 0x21, 0xe5, 0xea, 0xfd, 0xd0, 0x47, 0xfb, 0x0d, 0x73, 0xb4, 0x1f, 0x69, 0x33, 0xda,
+	0x39, 0xc3, 0xfc, 0x97, 0x05, 0xd5, 0xdf, 0xb2, 0x1f, 0x44, 0x5d, 0x70, 0x89, 0xf7, 0xef, 0xf6,
+	0x72, 0x09, 0x86, 0x9c, 0x66, 0x53, 0x02, 0xa4, 0xfd, 0x22, 0x4b, 0x15, 0x11, 0x17, 0x63, 0x1d,
+	0x47, 0x79, 0xe1, 0x14, 0x73, 0xbd, 0x70, 0x6a, 0x00, 0x91, 0x13, 0x6c, 0x92, 0x88, 0x96, 0x09,
+	0x73, 0xeb, 0xfc, 0xf3, 0xa6, 0x15, 0xb9, 0xf5, 0x19, 0xd7, 0x8b, 0xc2, 0x28, 0x98, 0x59, 0xf6,
+	0xa2, 0x1b, 0x01, 0x7f, 0xa6, 0x6a, 0x41, 0x13, 0x15, 0x2d, 0xac, 0xd1, 0x95, 0x61, 0x4d, 0x58,
+	0x1b, 0xbd, 0xa6, 0x21, 0xcc, 0xaa, 0x28, 0xc7, 0x0a, 0xc3, 0x7e, 0x99, 0xdd, 0x3e, 0x6c, 0x4c,
+	0x0f, 0x17, 0x0c, 0xf0, 0x1f, 0x86, 0xd5, 0x6c, 0x30, 0x95, 0xf0, 0x82, 0x1e, 0x72, 0xb0, 0xfd,
+	0x61, 0x4f, 0x1b, 0xd6, 0xfd, 0x59, 0xe3, 0xb8, 0x84, 0xe8, 0x93, 0x29, 0xe3, 0xa6, 0xe7, 0x3a,
+	0xdc, 0x1a, 0x87, 0x30, 0x67, 0x62, 0x79, 0xe3, 0x58, 0x56, 0xad, 0xe5, 0xb2, 0xd8, 0x17, 0x5a,
+	0xde, 0x38, 0x01, 0xc0, 0x31, 0x0e, 0x65, 0xd8, 0xd4, 0x9f, 0x70, 0x0a, 0xc5, 0xe1, 0xc5, 0x15,
+	0x76, 0x88, 0x35, 0x0c, 0x74, 0x51, 0x08, 0x2d, 0xb8, 0xee, 0xe1, 0x91, 0x84, 0xd0, 0x42, 0x0e,
+	0x97, 0x26, 0x69, 0xba, 0x04, 0x43, 0xe4, 0x6e, 0x44, 0x02, 0xcf, 0xa9, 0xd3, 0x16, 0x7a, 0xe3,
+	0x88, 0xb8, 0x8b, 0x71, 0x31, 0xd6, 0x71, 0xd0, 0x1a, 0x8c, 0x85, 0x5c, 0x96, 0xa7, 0x92, 0x5a,
+	0x70, 0x99, 0xe8, 0xd3, 0xca, 0xc9, 0xde, 0x04, 0x1f, 0xb0, 0x22, 0x7e, 0x3a, 0xc9, 0xd0, 0x23,
+	0x49, 0x12, 0xe8, 0x75, 0x18, 0xad, 0xfb, 0x4e, 0x6d, 0xce, 0xa9, 0x3b, 0x5e, 0x95, 0x8d, 0xcf,
+	0x80, 0x11, 0x7f, 0x72, 0xf4, 0xba, 0x01, 0xc5, 0x09, 0x6c, 0xca, 0x20, 0xea, 0x25, 0x22, 0x11,
+	0x8b, 0xe3, 0x6d, 0x92, 0x70, 0x6a, 0x90, 0x7d, 0x15, 0x63, 0x10, 0xaf, 0xe7, 0xe0, 0xe0, 0xdc,
+	0xda, 0xe8, 0x32, 0x0c, 0xcb, 0xcf, 0xd7, 0x22, 0xf5, 0xc4, 0x0e, 0x4d, 0x1a, 0x0c, 0x1b, 0x98,
+	0x28, 0x84, 0xe3, 0xf2, 0xff, 0x5a, 0xe0, 0x6c, 0x6c, 0xb8, 0x55, 0x11, 0xbe, 0x82, 0x3b, 0x7f,
+	0x7f, 0x4c, 0x7a, 0x9a, 0x2e, 0x66, 0x21, 0x1d, 0xec, 0x95, 0x4e, 0x8b, 0x51, 0xcb, 0x84, 0xe3,
+	0x6c, 0xda, 0x68, 0x05, 0x26, 0xb9, 0x0d, 0xcc, 0xfc, 0x16, 0xa9, 0x6e, 0xcb, 0x0d, 0xc7, 0xb8,
+	0x46, 0xcd, 0xf1, 0xe7, 0x6a, 0x1a, 0x05, 0x67, 0xd5, 0x43, 0xef, 0xc0, 0x54, 0xb3, 0x75, 0xbb,
+	0xee, 0x86, 0x5b, 0xab, 0x7e, 0xc4, 0x4c, 0xc8, 0x66, 0x6b, 0xb5, 0x80, 0x84, 0xdc, 0x37, 0x98,
+	0x5d, 0xbd, 0x32, 0xba, 0x52, 0x39, 0x07, 0x0f, 0xe7, 0x52, 0x40, 0xf7, 0xe0, 0x78, 0x62, 0x21,
+	0x88, 0x30, 0x29, 0xa3, 0xf9, 0x29, 0xad, 0x2a, 0x59, 0x15, 0x44, 0xc4, 0xa1, 0x2c, 0x10, 0xce,
+	0x6e, 0x02, 0xbd, 0x02, 0xe0, 0x36, 0x97, 0x9c, 0x86, 0x5b, 0xa7, 0xcf, 0xd1, 0x49, 0xb6, 0x46,
+	0xe8, 0xd3, 0x04, 0x96, 0xcb, 0xb2, 0x94, 0x9e, 0xcd, 0xe2, 0xdf, 0x2e, 0xd6, 0xb0, 0xd1, 0x75,
+	0x18, 0x15, 0xff, 0x76, 0xc5, 0x94, 0x4e, 0xa8, 0xec, 0xa7, 0xa3, 0xb2, 0x86, 0x9a, 0xc7, 0x44,
+	0x09, 0x4e, 0xd4, 0x45, 0x9b, 0x70, 0x46, 0xa6, 0x5e, 0xd5, 0xd7, 0xa7, 0x9c, 0x83, 0x90, 0xe5,
+	0x91, 0x1a, 0xe0, 0x3e, 0x45, 0xb3, 0xed, 0x10, 0x71, 0x7b, 0x3a, 0xf4, 0x5e, 0xd7, 0x97, 0x39,
+	0xf7, 0x18, 0x3f, 0x1e, 0x47, 0xf1, 0xbc, 0x9e, 0x04, 0xe2, 0x34, 0x3e, 0xf2, 0xe1, 0xb8, 0xeb,
+	0x65, 0xad, 0xea, 0x13, 0x8c, 0xd0, 0x47, 0xb9, 0xb3, 0x7c, 0xfb, 0x15, 0x9d, 0x09, 0xc7, 0xd9,
+	0x74, 0xd1, 0x32, 0x4c, 0x46, 0xbc, 0x60, 0xc1, 0x0d, 0x79, 0x9a, 0x1a, 0xfa, 0xec, 0x3b, 0xc9,
+	0x9a, 0x3b, 0x49, 0x57, 0xf3, 0x5a, 0x1a, 0x8c, 0xb3, 0xea, 0xbc, 0x37, 0x03, 0xd0, 0x6f, 0x5a,
+	0xb4, 0xb6, 0xc6, 0xe8, 0xa3, 0xcf, 0xc0, 0xb0, 0x3e, 0x3e, 0x82, 0x69, 0x39, 0x9f, 0xcd, 0x07,
+	0x6b, 0xc7, 0x0b, 0x7f, 0x26, 0xa8, 0x23, 0x44, 0x87, 0x61, 0x83, 0x22, 0xaa, 0x66, 0x04, 0xb9,
+	0xb8, 0xd8, 0x1d, 0x53, 0xd4, 0xbd, 0xfd, 0x23, 0x81, 0xec, 0x9d, 0x83, 0xae, 0xc3, 0x40, 0xb5,
+	0xee, 0x12, 0x2f, 0x5a, 0x2e, 0xb7, 0x0b, 0xae, 0x3a, 0x2f, 0x70, 0xc4, 0x56, 0x14, 0xd9, 0xa5,
+	0x78, 0x19, 0x56, 0x14, 0xec, 0xcb, 0x30, 0x54, 0xa9, 0x13, 0xd2, 0xe4, 0x7e, 0x5c, 0xe8, 0x29,
+	0xf6, 0x30, 0x61, 0xac, 0xa5, 0xc5, 0x58, 0x4b, 0xfd, 0xcd, 0xc1, 0x98, 0x4a, 0x09, 0xb7, 0xff,
+	0xb8, 0x00, 0xa5, 0x0e, 0x49, 0xce, 0x12, 0xfa, 0x36, 0xab, 0x2b, 0x7d, 0xdb, 0x2c, 0x8c, 0xc5,
+	0xff, 0x74, 0x51, 0x9e, 0x32, 0x86, 0xbe, 0x69, 0x82, 0x71, 0x12, 0xbf, 0x6b, 0xbf, 0x16, 0x5d,
+	0x65, 0xd7, 0xd3, 0xd1, 0x33, 0xcb, 0x50, 0xd5, 0xf7, 0x76, 0xff, 0xf6, 0xce, 0x55, 0xbb, 0xda,
+	0xdf, 0x28, 0xc0, 0x71, 0x35, 0x84, 0xdf, 0xbb, 0x03, 0xb7, 0x9e, 0x1e, 0xb8, 0x07, 0xa0, 0xb4,
+	0xb6, 0x6f, 0x40, 0x1f, 0x8f, 0xf8, 0xda, 0x05, 0xcf, 0xff, 0x98, 0x19, 0x7c, 0x5f, 0xb1, 0x99,
+	0x46, 0x00, 0xfe, 0x1f, 0xb3, 0x60, 0x2c, 0xe1, 0x20, 0x89, 0xb0, 0xe6, 0x45, 0x7f, 0x3f, 0x7c,
+	0x79, 0x16, 0xc7, 0x7f, 0x0e, 0x7a, 0xb6, 0x7c, 0x65, 0xa4, 0xac, 0x30, 0xae, 0xfa, 0x61, 0x84,
+	0x19, 0xc4, 0xfe, 0x6b, 0x0b, 0x7a, 0xd7, 0x1c, 0xd7, 0x8b, 0xa4, 0xf6, 0xc3, 0xca, 0xd1, 0x7e,
+	0x74, 0xf3, 0x5d, 0xe8, 0x25, 0xe8, 0x23, 0x1b, 0x1b, 0xa4, 0x1a, 0x89, 0x59, 0x95, 0xd1, 0x34,
+	0xfa, 0x16, 0x59, 0x29, 0x65, 0x42, 0x59, 0x63, 0xfc, 0x2f, 0x16, 0xc8, 0xe8, 0x16, 0x0c, 0x46,
+	0x6e, 0x83, 0xcc, 0xd6, 0x6a, 0xc2, 0x26, 0xe0, 0x3e, 0x42, 0xc0, 0xac, 0x49, 0x02, 0x38, 0xa6,
+	0x65, 0x7f, 0xb9, 0x00, 0x10, 0x47, 0x98, 0xeb, 0xf4, 0x89, 0x73, 0x29, 0x6d, 0xf1, 0xf9, 0x0c,
+	0x6d, 0x31, 0x8a, 0x09, 0x66, 0xa8, 0x8a, 0xd5, 0x30, 0x15, 0xbb, 0x1a, 0xa6, 0x9e, 0xc3, 0x0c,
+	0xd3, 0x3c, 0x4c, 0xc4, 0x11, 0xf2, 0xcc, 0x00, 0xa1, 0xec, 0xfe, 0x5e, 0x4b, 0x02, 0x71, 0x1a,
+	0xdf, 0x26, 0x70, 0x4e, 0x05, 0x0a, 0x13, 0x77, 0x21, 0x73, 0x25, 0xd0, 0xb5, 0xef, 0x1d, 0xc6,
+	0x29, 0x56, 0x87, 0x17, 0x72, 0xd5, 0xe1, 0xbf, 0x60, 0xc1, 0xb1, 0x64, 0x3b, 0xcc, 0xef, 0xfe,
+	0x8b, 0x16, 0x1c, 0x8f, 0x73, 0xfc, 0xa4, 0x4d, 0x10, 0x5e, 0x6c, 0x1b, 0xfc, 0x2c, 0xa7, 0xc7,
+	0x71, 0xd8, 0x96, 0x95, 0x2c, 0xd2, 0x38, 0xbb, 0x45, 0xfb, 0xdf, 0xf5, 0xc0, 0x54, 0x5e, 0xd4,
+	0x34, 0xe6, 0x69, 0xe4, 0xdc, 0xad, 0x6c, 0x93, 0x3b, 0xc2, 0x9f, 0x23, 0xf6, 0x34, 0xe2, 0xc5,
+	0x58, 0xc2, 0x93, 0x69, 0x9d, 0x0a, 0x5d, 0xa6, 0x75, 0xda, 0x82, 0x89, 0x3b, 0x5b, 0xc4, 0x5b,
+	0xf7, 0x42, 0x27, 0x72, 0xc3, 0x0d, 0x97, 0x29, 0xd0, 0xf9, 0xba, 0x79, 0x45, 0x7a, 0x5d, 0xdc,
+	0x4a, 0x22, 0x1c, 0xec, 0x95, 0xce, 0x18, 0x05, 0x71, 0x97, 0xf9, 0x41, 0x82, 0xd3, 0x44, 0xd3,
+	0x59, 0xb1, 0x7a, 0x1e, 0x72, 0x56, 0xac, 0x86, 0x2b, 0xcc, 0x6e, 0xa4, 0x1b, 0x09, 0x7b, 0xb6,
+	0xae, 0xa8, 0x52, 0xac, 0x61, 0xa0, 0x4f, 0x01, 0xd2, 0xd3, 0x1a, 0x1a, 0x41, 0x6b, 0x9f, 0xdb,
+	0xdf, 0x2b, 0xa1, 0xd5, 0x14, 0xf4, 0x60, 0xaf, 0x34, 0x49, 0x4b, 0x97, 0x3d, 0xfa, 0xfc, 0x8d,
+	0x23, 0xfd, 0x65, 0x10, 0x42, 0xb7, 0x60, 0x9c, 0x96, 0xb2, 0x1d, 0x25, 0x23, 0xe2, 0xf2, 0x27,
+	0xeb, 0x33, 0xfb, 0x7b, 0xa5, 0xf1, 0xd5, 0x04, 0x2c, 0x8f, 0x74, 0x8a, 0x48, 0x46, 0x72, 0xac,
+	0x81, 0x6e, 0x93, 0x63, 0xd9, 0x5f, 0xb4, 0xe0, 0x14, 0xbd, 0xe0, 0x6a, 0xd7, 0x73, 0xb4, 0xe8,
+	0x4e, 0xd3, 0xe5, 0x7a, 0x1a, 0x71, 0xd5, 0x30, 0x59, 0x5d, 0x79, 0x99, 0x6b, 0x69, 0x14, 0x94,
+	0x9e, 0xf0, 0xdb, 0xae, 0x57, 0x4b, 0x9e, 0xf0, 0xd7, 0x5c, 0xaf, 0x86, 0x19, 0x44, 0x5d, 0x59,
+	0xc5, 0xdc, 0x08, 0xfb, 0x5f, 0xa3, 0x7b, 0x95, 0xf6, 0xe5, 0x3b, 0xda, 0x0d, 0xf4, 0x8c, 0xae,
+	0x53, 0x15, 0xe6, 0x93, 0xb9, 0xfa, 0xd4, 0x2f, 0x58, 0x20, 0xbc, 0xdf, 0xbb, 0xb8, 0x93, 0xdf,
+	0x86, 0xe1, 0x9d, 0x74, 0xca, 0xd7, 0x73, 0xf9, 0xe1, 0x00, 0x44, 0xa2, 0x57, 0xc5, 0xa2, 0x1b,
+	0xe9, 0x5d, 0x0d, 0x5a, 0x76, 0x0d, 0x04, 0x74, 0x81, 0x30, 0xad, 0x46, 0xe7, 0xde, 0x3c, 0x0f,
+	0x50, 0x63, 0xb8, 0x2c, 0x0f, 0x7c, 0xc1, 0xe4, 0xb8, 0x16, 0x14, 0x04, 0x6b, 0x58, 0xf6, 0xaf,
+	0x16, 0x61, 0x48, 0xa6, 0x18, 0x6d, 0x79, 0xdd, 0xc8, 0x1e, 0x75, 0xc6, 0xa9, 0xd0, 0x91, 0x71,
+	0x7a, 0x07, 0x26, 0x02, 0x52, 0x6d, 0x05, 0xa1, 0xbb, 0x43, 0x24, 0x58, 0x6c, 0x92, 0x19, 0x9e,
+	0xe0, 0x21, 0x01, 0x3c, 0x60, 0x21, 0xb2, 0x12, 0x85, 0x4c, 0x69, 0x9c, 0x26, 0x84, 0x2e, 0xc2,
+	0x20, 0x13, 0xbd, 0x97, 0x63, 0x81, 0xb0, 0x12, 0x7c, 0xad, 0x48, 0x00, 0x8e, 0x71, 0xd8, 0xe3,
+	0xa0, 0x75, 0x9b, 0xa1, 0x27, 0x3c, 0xc1, 0x2b, 0xbc, 0x18, 0x4b, 0x38, 0xfa, 0x38, 0x8c, 0xf3,
+	0x7a, 0x81, 0xdf, 0x74, 0x36, 0xb9, 0x4a, 0xb0, 0x57, 0x85, 0xd7, 0x19, 0x5f, 0x49, 0xc0, 0x0e,
+	0xf6, 0x4a, 0xc7, 0x92, 0x65, 0xac, 0xdb, 0x29, 0x2a, 0xcc, 0xf2, 0x8f, 0x37, 0x42, 0xef, 0x8c,
+	0x94, 0xc1, 0x60, 0x0c, 0xc2, 0x3a, 0x9e, 0xfd, 0xcf, 0x16, 0x4c, 0x68, 0x53, 0xd5, 0x75, 0x8e,
+	0x0d, 0x63, 0x90, 0x0a, 0x5d, 0x0c, 0xd2, 0xe1, 0xa2, 0x3d, 0x64, 0xce, 0x70, 0xcf, 0x03, 0x9a,
+	0x61, 0xfb, 0x33, 0x80, 0xd2, 0xf9, 0x6b, 0xd1, 0x9b, 0xdc, 0x90, 0xdf, 0x0d, 0x48, 0xad, 0x9d,
+	0xc2, 0x5f, 0x8f, 0x9c, 0x23, 0x3d, 0x57, 0x79, 0x2d, 0xac, 0xea, 0xdb, 0x3f, 0xde, 0x03, 0xe3,
+	0xc9, 0x58, 0x1d, 0xe8, 0x2a, 0xf4, 0x71, 0x2e, 0x5d, 0x90, 0x6f, 0x63, 0x4f, 0xa6, 0x45, 0xf8,
+	0xe0, 0xf9, 0x6f, 0x38, 0x77, 0x2f, 0xea, 0xa3, 0x77, 0x60, 0xa8, 0xe6, 0xdf, 0xf1, 0xee, 0x38,
+	0x41, 0x6d, 0xb6, 0xbc, 0x2c, 0x4e, 0x88, 0x4c, 0x01, 0xd4, 0x42, 0x8c, 0xa6, 0x47, 0x0d, 0x61,
+	0xb6, 0x13, 0x31, 0x08, 0xeb, 0xe4, 0xd0, 0x1a, 0x4b, 0xc9, 0xb4, 0xe1, 0x6e, 0xae, 0x38, 0xcd,
+	0x76, 0x5e, 0x5d, 0xf3, 0x12, 0x49, 0xa3, 0x3c, 0x22, 0xf2, 0x36, 0x71, 0x00, 0x8e, 0x09, 0xa1,
+	0xcf, 0xc1, 0x64, 0x98, 0xa3, 0x12, 0xcb, 0x4b, 0x67, 0xde, 0x4e, 0x4b, 0xc4, 0x85, 0x29, 0x59,
+	0xca, 0xb3, 0xac, 0x66, 0xd0, 0x5d, 0x40, 0x42, 0xf4, 0xbc, 0x16, 0xb4, 0xc2, 0x68, 0xae, 0xe5,
+	0xd5, 0xea, 0x32, 0x65, 0xd3, 0x87, 0xb3, 0xe5, 0x04, 0x49, 0x6c, 0xad, 0x6d, 0x16, 0x12, 0x38,
+	0x8d, 0x81, 0x33, 0xda, 0xb0, 0xbf, 0xd0, 0x03, 0xd3, 0x32, 0x61, 0x74, 0x86, 0xf7, 0xca, 0xe7,
+	0xad, 0x84, 0xfb, 0xca, 0x2b, 0xf9, 0x07, 0xfd, 0x43, 0x73, 0x62, 0xf9, 0x52, 0xda, 0x89, 0xe5,
+	0xb5, 0x43, 0x76, 0xe3, 0x81, 0xb9, 0xb2, 0x7c, 0xcf, 0xfa, 0x9f, 0xec, 0x1f, 0x03, 0xe3, 0x6a,
+	0x46, 0x98, 0xc7, 0x5b, 0x2f, 0x4b, 0xd5, 0x51, 0xce, 0xf3, 0xff, 0xaa, 0xc0, 0x31, 0x2e, 0xfb,
+	0x61, 0x19, 0x95, 0x9d, 0x9d, 0xb3, 0x8a, 0x0e, 0xa5, 0x49, 0x1a, 0xcd, 0x68, 0x77, 0xc1, 0x0d,
+	0x44, 0x8f, 0x33, 0x69, 0x2e, 0x0a, 0x9c, 0x34, 0x4d, 0x09, 0xc1, 0x8a, 0x0e, 0xda, 0x81, 0x89,
+	0x4d, 0x16, 0xf1, 0x49, 0xcb, 0xdd, 0x2c, 0xce, 0x85, 0xcc, 0x7d, 0x7b, 0x65, 0x7e, 0x31, 0x3f,
+	0xd1, 0x33, 0x7f, 0xfc, 0xa5, 0x50, 0x70, 0xba, 0x09, 0xba, 0x35, 0x8e, 0x39, 0x77, 0xc2, 0xc5,
+	0xba, 0x13, 0x46, 0x6e, 0x75, 0xae, 0xee, 0x57, 0xb7, 0x2b, 0x91, 0x1f, 0xc8, 0x04, 0x8f, 0x99,
+	0x6f, 0xaf, 0xd9, 0x5b, 0x95, 0x14, 0xbe, 0xd1, 0xfc, 0xd4, 0xfe, 0x5e, 0xe9, 0x58, 0x16, 0x16,
+	0xce, 0x6c, 0x0b, 0xad, 0x42, 0xff, 0xa6, 0x1b, 0x61, 0xd2, 0xf4, 0xc5, 0x69, 0x91, 0x79, 0x14,
+	0x5e, 0xe1, 0x28, 0x46, 0x4b, 0x2c, 0x22, 0x95, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xa9, 0x2e, 0x81,
+	0xbe, 0x7c, 0x01, 0x6c, 0xda, 0xf6, 0x2e, 0xf3, 0x1a, 0x78, 0x1d, 0x8a, 0xde, 0x46, 0xd8, 0x2e,
+	0x16, 0xcf, 0xea, 0x92, 0x21, 0x3f, 0x9b, 0xeb, 0xa7, 0x4f, 0xe3, 0xd5, 0xa5, 0x0a, 0xa6, 0x15,
+	0x99, 0xdb, 0x6b, 0x58, 0x0d, 0x5d, 0x91, 0x2c, 0x2a, 0xd3, 0x0b, 0x78, 0xb9, 0x32, 0x5f, 0x59,
+	0x36, 0x68, 0xb0, 0xa8, 0x86, 0xac, 0x18, 0xf3, 0xea, 0xe8, 0x26, 0x0c, 0x6e, 0xf2, 0x83, 0x6f,
+	0x23, 0x14, 0x49, 0xe3, 0x33, 0x2f, 0xa3, 0x2b, 0x12, 0xc9, 0xa0, 0xc7, 0xae, 0x0c, 0x05, 0xc2,
+	0x31, 0x29, 0xf4, 0x05, 0x0b, 0x8e, 0x27, 0xb3, 0xee, 0x33, 0x67, 0x35, 0x61, 0xa6, 0x96, 0xe9,
+	0x00, 0x50, 0xce, 0xaa, 0x60, 0x34, 0xc8, 0xd4, 0x2f, 0x99, 0x68, 0x38, 0xbb, 0x39, 0x3a, 0xd0,
+	0xc1, 0xed, 0x5a, 0xbb, 0xfc, 0x42, 0x89, 0xc0, 0x44, 0x7c, 0xa0, 0xf1, 0xdc, 0x02, 0xa6, 0x15,
+	0xd1, 0x1a, 0xc0, 0x46, 0x9d, 0x88, 0x88, 0x8f, 0xc2, 0x28, 0x2a, 0xf3, 0xf6, 0x5f, 0x52, 0x58,
+	0x82, 0x0e, 0x7b, 0x89, 0xc6, 0xa5, 0x58, 0xa3, 0x43, 0x97, 0x52, 0xd5, 0xf5, 0x6a, 0x24, 0x60,
+	0xca, 0xad, 0x9c, 0xa5, 0x34, 0xcf, 0x30, 0xd2, 0x4b, 0x89, 0x97, 0x63, 0x41, 0x81, 0xd1, 0x22,
+	0xcd, 0xad, 0x8d, 0xb0, 0x5d, 0x26, 0x8b, 0x79, 0xd2, 0xdc, 0x4a, 0x2c, 0x28, 0x4e, 0x8b, 0x95,
+	0x63, 0x41, 0x81, 0x6e, 0x99, 0x0d, 0xba, 0x81, 0x48, 0x30, 0x35, 0x96, 0xbf, 0x65, 0x96, 0x38,
+	0x4a, 0x7a, 0xcb, 0x08, 0x00, 0x96, 0x44, 0xd0, 0xa7, 0x4d, 0x6e, 0x67, 0x9c, 0xd1, 0x7c, 0xa6,
+	0x03, 0xb7, 0x63, 0xd0, 0x6d, 0xcf, 0xef, 0xbc, 0x02, 0x85, 0x8d, 0x2a, 0x53, 0x8a, 0xe5, 0xe8,
+	0x0c, 0x96, 0xe6, 0x0d, 0x6a, 0x2c, 0x32, 0xfc, 0xd2, 0x3c, 0x2e, 0x6c, 0x54, 0xe9, 0xd2, 0x77,
+	0xee, 0xb5, 0x02, 0xb2, 0xe4, 0xd6, 0x89, 0xc8, 0x6a, 0x91, 0xb9, 0xf4, 0x67, 0x25, 0x52, 0x7a,
+	0xe9, 0x2b, 0x10, 0x8e, 0x49, 0x51, 0xba, 0x31, 0x0f, 0x36, 0x99, 0x4f, 0x57, 0xb1, 0x5a, 0x69,
+	0xba, 0x99, 0x5c, 0xd8, 0x36, 0x8c, 0xec, 0x84, 0xcd, 0x2d, 0x22, 0x4f, 0x45, 0xa6, 0xae, 0xcb,
+	0x89, 0x54, 0x71, 0x53, 0x20, 0xba, 0x41, 0xd4, 0x72, 0xea, 0xa9, 0x83, 0x9c, 0x89, 0x56, 0x6e,
+	0xea, 0xc4, 0xb0, 0x49, 0x9b, 0x2e, 0x84, 0x77, 0x79, 0x38, 0x39, 0xa6, 0xb8, 0xcb, 0x59, 0x08,
+	0x19, 0x11, 0xe7, 0xf8, 0x42, 0x10, 0x00, 0x2c, 0x89, 0xa8, 0xc1, 0x66, 0x17, 0xd0, 0x89, 0x0e,
+	0x83, 0x9d, 0xea, 0x6f, 0x3c, 0xd8, 0xec, 0xc2, 0x89, 0x49, 0xb1, 0x8b, 0xa6, 0xb9, 0xe5, 0x47,
+	0xbe, 0x97, 0xb8, 0xe4, 0x4e, 0xe6, 0x5f, 0x34, 0xe5, 0x0c, 0xfc, 0xf4, 0x45, 0x93, 0x85, 0x85,
+	0x33, 0xdb, 0xa2, 0x1f, 0xd7, 0x94, 0x91, 0x01, 0x45, 0xe6, 0x8d, 0xa7, 0x72, 0x02, 0x6b, 0xa6,
+	0xc3, 0x07, 0xf2, 0x8f, 0x53, 0x20, 0x1c, 0x93, 0x42, 0x35, 0x18, 0x6d, 0x1a, 0x11, 0x67, 0x59,
+	0x06, 0x91, 0x1c, 0xbe, 0x20, 0x2b, 0x36, 0x2d, 0x97, 0x10, 0x99, 0x10, 0x9c, 0xa0, 0xc9, 0x2c,
+	0xf7, 0xb8, 0xab, 0x1f, 0x4b, 0x30, 0x92, 0x33, 0xd5, 0x19, 0xde, 0x80, 0x7c, 0xaa, 0x05, 0x00,
+	0x4b, 0x22, 0x74, 0x34, 0x84, 0x83, 0x9a, 0x1f, 0xb2, 0x3c, 0x3d, 0x79, 0x0a, 0xf6, 0x2c, 0x35,
+	0x91, 0x0c, 0xb3, 0x2e, 0x40, 0x38, 0x26, 0x45, 0x4f, 0x72, 0x7a, 0xe1, 0x9d, 0xce, 0x3f, 0xc9,
+	0x93, 0xd7, 0x1d, 0x3b, 0xc9, 0xe9, 0x65, 0x57, 0x14, 0x57, 0x9d, 0x8a, 0x0a, 0xce, 0x72, 0x8c,
+	0xe4, 0xf4, 0x4b, 0x85, 0x15, 0x4f, 0xf7, 0x4b, 0x81, 0x70, 0x4c, 0x8a, 0x5d, 0xc5, 0x2c, 0x34,
+	0xdd, 0xd9, 0x36, 0x57, 0x31, 0x45, 0xc8, 0xb8, 0x8a, 0xb5, 0xd0, 0x75, 0xf6, 0x8f, 0x17, 0xe0,
+	0x6c, 0xfb, 0x7d, 0x1b, 0xeb, 0xd0, 0xca, 0xb1, 0xcd, 0x52, 0x42, 0x87, 0xc6, 0x25, 0x3a, 0x31,
+	0x56, 0xd7, 0x01, 0x87, 0xaf, 0xc0, 0x84, 0x72, 0x47, 0xac, 0xbb, 0xd5, 0x5d, 0x2d, 0xb1, 0xa8,
+	0x0a, 0xcd, 0x53, 0x49, 0x22, 0xe0, 0x74, 0x1d, 0x34, 0x0b, 0x63, 0x46, 0xe1, 0xf2, 0x82, 0x78,
+	0xfe, 0xc7, 0xd9, 0x31, 0x4c, 0x30, 0x4e, 0xe2, 0xdb, 0xbf, 0x66, 0xc1, 0xc9, 0x9c, 0x3c, 0xf3,
+	0x5d, 0xc7, 0xd3, 0xdd, 0x80, 0xb1, 0xa6, 0x59, 0xb5, 0x43, 0x08, 0x70, 0x23, 0x9b, 0xbd, 0xea,
+	0x6b, 0x02, 0x80, 0x93, 0x44, 0xed, 0x5f, 0x29, 0xc0, 0x99, 0xb6, 0xf6, 0xf5, 0x08, 0xc3, 0x89,
+	0xcd, 0x46, 0xe8, 0xcc, 0x07, 0xa4, 0x46, 0xbc, 0xc8, 0x75, 0xea, 0x95, 0x26, 0xa9, 0x6a, 0x5a,
+	0x50, 0x66, 0xa8, 0x7e, 0x65, 0xa5, 0x32, 0x9b, 0xc6, 0xc0, 0x39, 0x35, 0xd1, 0x12, 0xa0, 0x34,
+	0x44, 0xcc, 0x30, 0x7b, 0xe2, 0xa6, 0xe9, 0xe1, 0x8c, 0x1a, 0xe8, 0x65, 0x18, 0x51, 0x76, 0xfb,
+	0xda, 0x8c, 0xb3, 0x0b, 0x02, 0xeb, 0x00, 0x6c, 0xe2, 0xa1, 0x4b, 0x3c, 0x6d, 0x92, 0x48, 0xb0,
+	0x25, 0x54, 0xa6, 0x63, 0x32, 0x27, 0x92, 0x28, 0xc6, 0x3a, 0xce, 0xdc, 0xe5, 0x3f, 0xfd, 0xf6,
+	0xd9, 0x0f, 0xfd, 0xc5, 0xb7, 0xcf, 0x7e, 0xe8, 0xaf, 0xbe, 0x7d, 0xf6, 0x43, 0x3f, 0xb4, 0x7f,
+	0xd6, 0xfa, 0xd3, 0xfd, 0xb3, 0xd6, 0x5f, 0xec, 0x9f, 0xb5, 0xfe, 0x6a, 0xff, 0xac, 0xf5, 0xff,
+	0xee, 0x9f, 0xb5, 0xbe, 0xfc, 0xb7, 0x67, 0x3f, 0xf4, 0x36, 0x8a, 0x23, 0x54, 0x5f, 0xa4, 0xb3,
+	0x73, 0x71, 0xe7, 0xd2, 0x7f, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x60, 0x45, 0x7a, 0xd6, 0xa3, 0x24,
+	0x01, 0x00,
+}
+
+func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AWSElasticBlockStoreVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AWSElasticBlockStoreVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x20
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Partition))
+	i--
+	dAtA[i] = 0x18
+	i -= len(m.FSType)
+	copy(dAtA[i:], m.FSType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.VolumeID)
+	copy(dAtA[i:], m.VolumeID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Affinity) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Affinity) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Affinity) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.PodAntiAffinity != nil {
+		{
+			size, err := m.PodAntiAffinity.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.PodAffinity != nil {
+		{
+			size, err := m.PodAffinity.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.NodeAffinity != nil {
+		{
+			size, err := m.NodeAffinity.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AppArmorProfile) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AppArmorProfile) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AppArmorProfile) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.LocalhostProfile != nil {
+		i -= len(*m.LocalhostProfile)
+		copy(dAtA[i:], *m.LocalhostProfile)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.LocalhostProfile)))
+		i--
+		dAtA[i] = 0x12
+	}
+	i -= len(m.Type)
+	copy(dAtA[i:], m.Type)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *AttachedVolume) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AttachedVolume) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AttachedVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.DevicePath)
+	copy(dAtA[i:], m.DevicePath)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *AvoidPods) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AvoidPods) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AvoidPods) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.PreferAvoidPods) > 0 {
+		for iNdEx := len(m.PreferAvoidPods) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.PreferAvoidPods[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AzureDiskVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AzureDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AzureDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Kind != nil {
+		i -= len(*m.Kind)
+		copy(dAtA[i:], *m.Kind)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind)))
+		i--
+		dAtA[i] = 0x32
+	}
+	if m.ReadOnly != nil {
+		i--
+		if *m.ReadOnly {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x28
+	}
+	if m.FSType != nil {
+		i -= len(*m.FSType)
+		copy(dAtA[i:], *m.FSType)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType)))
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.CachingMode != nil {
+		i -= len(*m.CachingMode)
+		copy(dAtA[i:], *m.CachingMode)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CachingMode)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	i -= len(m.DataDiskURI)
+	copy(dAtA[i:], m.DataDiskURI)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DataDiskURI)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.DiskName)
+	copy(dAtA[i:], m.DiskName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DiskName)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *AzureFilePersistentVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AzureFilePersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AzureFilePersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.SecretNamespace != nil {
+		i -= len(*m.SecretNamespace)
+		copy(dAtA[i:], *m.SecretNamespace)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SecretNamespace)))
+		i--
+		dAtA[i] = 0x22
+	}
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x18
+	i -= len(m.ShareName)
+	copy(dAtA[i:], m.ShareName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.SecretName)
+	copy(dAtA[i:], m.SecretName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *AzureFileVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AzureFileVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AzureFileVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x18
+	i -= len(m.ShareName)
+	copy(dAtA[i:], m.ShareName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.SecretName)
+	copy(dAtA[i:], m.SecretName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Binding) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Binding) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Binding) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Target.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *CSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CSIPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.NodeExpandSecretRef != nil {
+		{
+			size, err := m.NodeExpandSecretRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x52
+	}
+	if m.ControllerExpandSecretRef != nil {
+		{
+			size, err := m.ControllerExpandSecretRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4a
+	}
+	if m.NodePublishSecretRef != nil {
+		{
+			size, err := m.NodePublishSecretRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x42
+	}
+	if m.NodeStageSecretRef != nil {
+		{
+			size, err := m.NodeStageSecretRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3a
+	}
+	if m.ControllerPublishSecretRef != nil {
+		{
+			size, err := m.ControllerPublishSecretRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x32
+	}
+	if len(m.VolumeAttributes) > 0 {
+		keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes))
+		for k := range m.VolumeAttributes {
+			keysForVolumeAttributes = append(keysForVolumeAttributes, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes)
+		for iNdEx := len(keysForVolumeAttributes) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.VolumeAttributes[string(keysForVolumeAttributes[iNdEx])]
+			baseI := i
+			i -= len(v)
+			copy(dAtA[i:], v)
+			i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForVolumeAttributes[iNdEx])
+			copy(dAtA[i:], keysForVolumeAttributes[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForVolumeAttributes[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x2a
+		}
+	}
+	i -= len(m.FSType)
+	copy(dAtA[i:], m.FSType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+	i--
+	dAtA[i] = 0x22
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x18
+	i -= len(m.VolumeHandle)
+	copy(dAtA[i:], m.VolumeHandle)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeHandle)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Driver)
+	copy(dAtA[i:], m.Driver)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *CSIVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CSIVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CSIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.NodePublishSecretRef != nil {
+		{
+			size, err := m.NodePublishSecretRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x2a
+	}
+	if len(m.VolumeAttributes) > 0 {
+		keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes))
+		for k := range m.VolumeAttributes {
+			keysForVolumeAttributes = append(keysForVolumeAttributes, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes)
+		for iNdEx := len(keysForVolumeAttributes) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.VolumeAttributes[string(keysForVolumeAttributes[iNdEx])]
+			baseI := i
+			i -= len(v)
+			copy(dAtA[i:], v)
+			i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForVolumeAttributes[iNdEx])
+			copy(dAtA[i:], keysForVolumeAttributes[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForVolumeAttributes[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	if m.FSType != nil {
+		i -= len(*m.FSType)
+		copy(dAtA[i:], *m.FSType)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.ReadOnly != nil {
+		i--
+		if *m.ReadOnly {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x10
+	}
+	i -= len(m.Driver)
+	copy(dAtA[i:], m.Driver)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Capabilities) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Capabilities) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Capabilities) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Drop) > 0 {
+		for iNdEx := len(m.Drop) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Drop[iNdEx])
+			copy(dAtA[i:], m.Drop[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Drop[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Add) > 0 {
+		for iNdEx := len(m.Add) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Add[iNdEx])
+			copy(dAtA[i:], m.Add[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Add[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *CephFSPersistentVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CephFSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CephFSPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x30
+	if m.SecretRef != nil {
+		{
+			size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x2a
+	}
+	i -= len(m.SecretFile)
+	copy(dAtA[i:], m.SecretFile)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.User)
+	copy(dAtA[i:], m.User)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.User)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Path)
+	copy(dAtA[i:], m.Path)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
+	i--
+	dAtA[i] = 0x12
+	if len(m.Monitors) > 0 {
+		for iNdEx := len(m.Monitors) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Monitors[iNdEx])
+			copy(dAtA[i:], m.Monitors[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Monitors[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *CephFSVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CephFSVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x30
+	if m.SecretRef != nil {
+		{
+			size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x2a
+	}
+	i -= len(m.SecretFile)
+	copy(dAtA[i:], m.SecretFile)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.User)
+	copy(dAtA[i:], m.User)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.User)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Path)
+	copy(dAtA[i:], m.Path)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
+	i--
+	dAtA[i] = 0x12
+	if len(m.Monitors) > 0 {
+		for iNdEx := len(m.Monitors) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Monitors[iNdEx])
+			copy(dAtA[i:], m.Monitors[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Monitors[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *CinderPersistentVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CinderPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CinderPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.SecretRef != nil {
+		{
+			size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x18
+	i -= len(m.FSType)
+	copy(dAtA[i:], m.FSType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.VolumeID)
+	copy(dAtA[i:], m.VolumeID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *CinderVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CinderVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CinderVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.SecretRef != nil {
+		{
+			size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x18
+	i -= len(m.FSType)
+	copy(dAtA[i:], m.FSType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.VolumeID)
+	copy(dAtA[i:], m.VolumeID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ClientIPConfig) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClientIPConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.TimeoutSeconds != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ClusterTrustBundleProjection) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ClusterTrustBundleProjection) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterTrustBundleProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Optional != nil {
+		i--
+		if *m.Optional {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x28
+	}
+	i -= len(m.Path)
+	copy(dAtA[i:], m.Path)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
+	i--
+	dAtA[i] = 0x22
+	if m.LabelSelector != nil {
+		{
+			size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.SignerName != nil {
+		i -= len(*m.SignerName)
+		copy(dAtA[i:], *m.SignerName)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SignerName)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Name != nil {
+		i -= len(*m.Name)
+		copy(dAtA[i:], *m.Name)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ComponentCondition) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ComponentCondition) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ComponentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Error)
+	copy(dAtA[i:], m.Error)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Message)
+	copy(dAtA[i:], m.Message)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Status)
+	copy(dAtA[i:], m.Status)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Type)
+	copy(dAtA[i:], m.Type)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ComponentStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ComponentStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ComponentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Conditions) > 0 {
+		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ComponentStatusList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ComponentStatusList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ComponentStatusList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ConfigMap) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ConfigMap) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfigMap) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Immutable != nil {
+		i--
+		if *m.Immutable {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x20
+	}
+	if len(m.BinaryData) > 0 {
+		keysForBinaryData := make([]string, 0, len(m.BinaryData))
+		for k := range m.BinaryData {
+			keysForBinaryData = append(keysForBinaryData, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData)
+		for iNdEx := len(keysForBinaryData) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.BinaryData[string(keysForBinaryData[iNdEx])]
+			baseI := i
+			if v != nil {
+				i -= len(v)
+				copy(dAtA[i:], v)
+				i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+				i--
+				dAtA[i] = 0x12
+			}
+			i -= len(keysForBinaryData[iNdEx])
+			copy(dAtA[i:], keysForBinaryData[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForBinaryData[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.Data) > 0 {
+		keysForData := make([]string, 0, len(m.Data))
+		for k := range m.Data {
+			keysForData = append(keysForData, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForData)
+		for iNdEx := len(keysForData) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Data[string(keysForData[iNdEx])]
+			baseI := i
+			i -= len(v)
+			copy(dAtA[i:], v)
+			i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForData[iNdEx])
+			copy(dAtA[i:], keysForData[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForData[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ConfigMapEnvSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ConfigMapEnvSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfigMapEnvSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Optional != nil {
+		i--
+		if *m.Optional {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x10
+	}
+	{
+		size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ConfigMapKeySelector) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ConfigMapKeySelector) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfigMapKeySelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Optional != nil {
+		i--
+		if *m.Optional {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x18
+	}
+	i -= len(m.Key)
+	copy(dAtA[i:], m.Key)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ConfigMapList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ConfigMapList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfigMapList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ConfigMapNodeConfigSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ConfigMapNodeConfigSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfigMapNodeConfigSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.KubeletConfigKey)
+	copy(dAtA[i:], m.KubeletConfigKey)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletConfigKey)))
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.ResourceVersion)
+	copy(dAtA[i:], m.ResourceVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.UID)
+	copy(dAtA[i:], m.UID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Namespace)
+	copy(dAtA[i:], m.Namespace)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ConfigMapProjection) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ConfigMapProjection) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfigMapProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Optional != nil {
+		i--
+		if *m.Optional {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x20
+	}
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ConfigMapVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ConfigMapVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfigMapVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Optional != nil {
+		i--
+		if *m.Optional {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.DefaultMode != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode))
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Container) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Container) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.RestartPolicy != nil {
+		i -= len(*m.RestartPolicy)
+		copy(dAtA[i:], *m.RestartPolicy)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RestartPolicy)))
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xc2
+	}
+	if len(m.ResizePolicy) > 0 {
+		for iNdEx := len(m.ResizePolicy) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.ResizePolicy[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1
+			i--
+			dAtA[i] = 0xba
+		}
+	}
+	if m.StartupProbe != nil {
+		{
+			size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xb2
+	}
+	if len(m.VolumeDevices) > 0 {
+		for iNdEx := len(m.VolumeDevices) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.VolumeDevices[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1
+			i--
+			dAtA[i] = 0xaa
+		}
+	}
+	i -= len(m.TerminationMessagePolicy)
+	copy(dAtA[i:], m.TerminationMessagePolicy)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy)))
+	i--
+	dAtA[i] = 0x1
+	i--
+	dAtA[i] = 0xa2
+	if len(m.EnvFrom) > 0 {
+		for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1
+			i--
+			dAtA[i] = 0x9a
+		}
+	}
+	i--
+	if m.TTY {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x1
+	i--
+	dAtA[i] = 0x90
+	i--
+	if m.StdinOnce {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x1
+	i--
+	dAtA[i] = 0x88
+	i--
+	if m.Stdin {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x1
+	i--
+	dAtA[i] = 0x80
+	if m.SecurityContext != nil {
+		{
+			size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x7a
+	}
+	i -= len(m.ImagePullPolicy)
+	copy(dAtA[i:], m.ImagePullPolicy)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy)))
+	i--
+	dAtA[i] = 0x72
+	i -= len(m.TerminationMessagePath)
+	copy(dAtA[i:], m.TerminationMessagePath)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath)))
+	i--
+	dAtA[i] = 0x6a
+	if m.Lifecycle != nil {
+		{
+			size, err := m.Lifecycle.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x62
+	}
+	if m.ReadinessProbe != nil {
+		{
+			size, err := m.ReadinessProbe.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x5a
+	}
+	if m.LivenessProbe != nil {
+		{
+			size, err := m.LivenessProbe.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x52
+	}
+	if len(m.VolumeMounts) > 0 {
+		for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x4a
+		}
+	}
+	{
+		size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x42
+	if len(m.Env) > 0 {
+		for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x3a
+		}
+	}
+	if len(m.Ports) > 0 {
+		for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x32
+		}
+	}
+	i -= len(m.WorkingDir)
+	copy(dAtA[i:], m.WorkingDir)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir)))
+	i--
+	dAtA[i] = 0x2a
+	if len(m.Args) > 0 {
+		for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Args[iNdEx])
+			copy(dAtA[i:], m.Args[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx])))
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	if len(m.Command) > 0 {
+		for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Command[iNdEx])
+			copy(dAtA[i:], m.Command[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx])))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	i -= len(m.Image)
+	copy(dAtA[i:], m.Image)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ContainerImage) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ContainerImage) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ContainerImage) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i = encodeVarintGenerated(dAtA, i, uint64(m.SizeBytes))
+	i--
+	dAtA[i] = 0x10
+	if len(m.Names) > 0 {
+		for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Names[iNdEx])
+			copy(dAtA[i:], m.Names[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Names[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ContainerPort) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ContainerPort) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ContainerPort) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.HostIP)
+	copy(dAtA[i:], m.HostIP)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP)))
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.Protocol)
+	copy(dAtA[i:], m.Protocol)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol)))
+	i--
+	dAtA[i] = 0x22
+	i = encodeVarintGenerated(dAtA, i, uint64(m.ContainerPort))
+	i--
+	dAtA[i] = 0x18
+	i = encodeVarintGenerated(dAtA, i, uint64(m.HostPort))
+	i--
+	dAtA[i] = 0x10
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ContainerResizePolicy) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ContainerResizePolicy) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ContainerResizePolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.RestartPolicy)
+	copy(dAtA[i:], m.RestartPolicy)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.ResourceName)
+	copy(dAtA[i:], m.ResourceName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceName)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ContainerState) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ContainerState) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ContainerState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Terminated != nil {
+		{
+			size, err := m.Terminated.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.Running != nil {
+		{
+			size, err := m.Running.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Waiting != nil {
+		{
+			size, err := m.Waiting.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ContainerStateRunning) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ContainerStateRunning) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ContainerStateRunning) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ContainerStateTerminated) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ContainerStateTerminated) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ContainerStateTerminated) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.ContainerID)
+	copy(dAtA[i:], m.ContainerID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID)))
+	i--
+	dAtA[i] = 0x3a
+	{
+		size, err := m.FinishedAt.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x32
+	{
+		size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.Message)
+	copy(dAtA[i:], m.Message)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Reason)
+	copy(dAtA[i:], m.Reason)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+	i--
+	dAtA[i] = 0x1a
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Signal))
+	i--
+	dAtA[i] = 0x10
+	i = encodeVarintGenerated(dAtA, i, uint64(m.ExitCode))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *ContainerStateWaiting) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ContainerStateWaiting) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ContainerStateWaiting) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Message)
+	copy(dAtA[i:], m.Message)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Reason)
+	copy(dAtA[i:], m.Reason)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ContainerStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.AllocatedResourcesStatus) > 0 {
+		for iNdEx := len(m.AllocatedResourcesStatus) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.AllocatedResourcesStatus[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x72
+		}
+	}
+	if m.User != nil {
+		{
+			size, err := m.User.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x6a
+	}
+	if len(m.VolumeMounts) > 0 {
+		for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x62
+		}
+	}
+	if m.Resources != nil {
+		{
+			size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x5a
+	}
+	if len(m.AllocatedResources) > 0 {
+		keysForAllocatedResources := make([]string, 0, len(m.AllocatedResources))
+		for k := range m.AllocatedResources {
+			keysForAllocatedResources = append(keysForAllocatedResources, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources)
+		for iNdEx := len(keysForAllocatedResources) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.AllocatedResources[ResourceName(keysForAllocatedResources[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForAllocatedResources[iNdEx])
+			copy(dAtA[i:], keysForAllocatedResources[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatedResources[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x52
+		}
+	}
+	if m.Started != nil {
+		i--
+		if *m.Started {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x48
+	}
+	i -= len(m.ContainerID)
+	copy(dAtA[i:], m.ContainerID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID)))
+	i--
+	dAtA[i] = 0x42
+	i -= len(m.ImageID)
+	copy(dAtA[i:], m.ImageID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageID)))
+	i--
+	dAtA[i] = 0x3a
+	i -= len(m.Image)
+	copy(dAtA[i:], m.Image)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image)))
+	i--
+	dAtA[i] = 0x32
+	i = encodeVarintGenerated(dAtA, i, uint64(m.RestartCount))
+	i--
+	dAtA[i] = 0x28
+	i--
+	if m.Ready {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x20
+	{
+		size, err := m.LastTerminationState.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	{
+		size, err := m.State.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ContainerUser) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ContainerUser) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ContainerUser) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Linux != nil {
+		{
+			size, err := m.Linux.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DaemonEndpoint) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DaemonEndpoint) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DaemonEndpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Port))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *DownwardAPIProjection) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DownwardAPIProjection) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DownwardAPIProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DownwardAPIVolumeFile) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DownwardAPIVolumeFile) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DownwardAPIVolumeFile) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Mode != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.ResourceFieldRef != nil {
+		{
+			size, err := m.ResourceFieldRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.FieldRef != nil {
+		{
+			size, err := m.FieldRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	i -= len(m.Path)
+	copy(dAtA[i:], m.Path)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *DownwardAPIVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DownwardAPIVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DownwardAPIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.DefaultMode != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode))
+		i--
+		dAtA[i] = 0x10
+	}
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *EmptyDirVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EmptyDirVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.SizeLimit != nil {
+		{
+			size, err := m.SizeLimit.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	i -= len(m.Medium)
+	copy(dAtA[i:], m.Medium)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Medium)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *EndpointAddress) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *EndpointAddress) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EndpointAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.NodeName != nil {
+		i -= len(*m.NodeName)
+		copy(dAtA[i:], *m.NodeName)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName)))
+		i--
+		dAtA[i] = 0x22
+	}
+	i -= len(m.Hostname)
+	copy(dAtA[i:], m.Hostname)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname)))
+	i--
+	dAtA[i] = 0x1a
+	if m.TargetRef != nil {
+		{
+			size, err := m.TargetRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	i -= len(m.IP)
+	copy(dAtA[i:], m.IP)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *EndpointPort) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.AppProtocol != nil {
+		i -= len(*m.AppProtocol)
+		copy(dAtA[i:], *m.AppProtocol)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AppProtocol)))
+		i--
+		dAtA[i] = 0x22
+	}
+	i -= len(m.Protocol)
+	copy(dAtA[i:], m.Protocol)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol)))
+	i--
+	dAtA[i] = 0x1a
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Port))
+	i--
+	dAtA[i] = 0x10
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *EndpointSubset) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *EndpointSubset) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EndpointSubset) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Ports) > 0 {
+		for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.NotReadyAddresses) > 0 {
+		for iNdEx := len(m.NotReadyAddresses) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.NotReadyAddresses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Addresses) > 0 {
+		for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Endpoints) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Endpoints) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Endpoints) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Subsets) > 0 {
+		for iNdEx := len(m.Subsets) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Subsets[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *EndpointsList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *EndpointsList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EndpointsList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *EnvFromSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *EnvFromSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EnvFromSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.SecretRef != nil {
+		{
+			size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.ConfigMapRef != nil {
+		{
+			size, err := m.ConfigMapRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	i -= len(m.Prefix)
+	copy(dAtA[i:], m.Prefix)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Prefix)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *EnvVar) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *EnvVar) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EnvVar) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.ValueFrom != nil {
+		{
+			size, err := m.ValueFrom.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	i -= len(m.Value)
+	copy(dAtA[i:], m.Value)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *EnvVarSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *EnvVarSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EnvVarSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.SecretKeyRef != nil {
+		{
+			size, err := m.SecretKeyRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.ConfigMapKeyRef != nil {
+		{
+			size, err := m.ConfigMapKeyRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.ResourceFieldRef != nil {
+		{
+			size, err := m.ResourceFieldRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.FieldRef != nil {
+		{
+			size, err := m.FieldRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *EphemeralContainer) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *EphemeralContainer) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EphemeralContainer) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.TargetContainerName)
+	copy(dAtA[i:], m.TargetContainerName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetContainerName)))
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.EphemeralContainerCommon.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *EphemeralContainerCommon) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *EphemeralContainerCommon) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EphemeralContainerCommon) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.RestartPolicy != nil {
+		i -= len(*m.RestartPolicy)
+		copy(dAtA[i:], *m.RestartPolicy)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RestartPolicy)))
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xc2
+	}
+	if len(m.ResizePolicy) > 0 {
+		for iNdEx := len(m.ResizePolicy) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.ResizePolicy[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1
+			i--
+			dAtA[i] = 0xba
+		}
+	}
+	if m.StartupProbe != nil {
+		{
+			size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xb2
+	}
+	if len(m.VolumeDevices) > 0 {
+		for iNdEx := len(m.VolumeDevices) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.VolumeDevices[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1
+			i--
+			dAtA[i] = 0xaa
+		}
+	}
+	i -= len(m.TerminationMessagePolicy)
+	copy(dAtA[i:], m.TerminationMessagePolicy)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy)))
+	i--
+	dAtA[i] = 0x1
+	i--
+	dAtA[i] = 0xa2
+	if len(m.EnvFrom) > 0 {
+		for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1
+			i--
+			dAtA[i] = 0x9a
+		}
+	}
+	i--
+	if m.TTY {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x1
+	i--
+	dAtA[i] = 0x90
+	i--
+	if m.StdinOnce {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x1
+	i--
+	dAtA[i] = 0x88
+	i--
+	if m.Stdin {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x1
+	i--
+	dAtA[i] = 0x80
+	if m.SecurityContext != nil {
+		{
+			size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x7a
+	}
+	i -= len(m.ImagePullPolicy)
+	copy(dAtA[i:], m.ImagePullPolicy)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy)))
+	i--
+	dAtA[i] = 0x72
+	i -= len(m.TerminationMessagePath)
+	copy(dAtA[i:], m.TerminationMessagePath)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath)))
+	i--
+	dAtA[i] = 0x6a
+	if m.Lifecycle != nil {
+		{
+			size, err := m.Lifecycle.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x62
+	}
+	if m.ReadinessProbe != nil {
+		{
+			size, err := m.ReadinessProbe.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x5a
+	}
+	if m.LivenessProbe != nil {
+		{
+			size, err := m.LivenessProbe.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x52
+	}
+	if len(m.VolumeMounts) > 0 {
+		for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x4a
+		}
+	}
+	{
+		size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x42
+	if len(m.Env) > 0 {
+		for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x3a
+		}
+	}
+	if len(m.Ports) > 0 {
+		for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x32
+		}
+	}
+	i -= len(m.WorkingDir)
+	copy(dAtA[i:], m.WorkingDir)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir)))
+	i--
+	dAtA[i] = 0x2a
+	if len(m.Args) > 0 {
+		for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Args[iNdEx])
+			copy(dAtA[i:], m.Args[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx])))
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	if len(m.Command) > 0 {
+		for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Command[iNdEx])
+			copy(dAtA[i:], m.Command[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx])))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	i -= len(m.Image)
+	copy(dAtA[i:], m.Image)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *EphemeralVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *EphemeralVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EphemeralVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.VolumeClaimTemplate != nil {
+		{
+			size, err := m.VolumeClaimTemplate.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Event) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Event) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.ReportingInstance)
+	copy(dAtA[i:], m.ReportingInstance)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance)))
+	i--
+	dAtA[i] = 0x7a
+	i -= len(m.ReportingController)
+	copy(dAtA[i:], m.ReportingController)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController)))
+	i--
+	dAtA[i] = 0x72
+	if m.Related != nil {
+		{
+			size, err := m.Related.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x6a
+	}
+	i -= len(m.Action)
+	copy(dAtA[i:], m.Action)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action)))
+	i--
+	dAtA[i] = 0x62
+	if m.Series != nil {
+		{
+			size, err := m.Series.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x5a
+	}
+	{
+		size, err := m.EventTime.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x52
+	i -= len(m.Type)
+	copy(dAtA[i:], m.Type)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+	i--
+	dAtA[i] = 0x4a
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Count))
+	i--
+	dAtA[i] = 0x40
+	{
+		size, err := m.LastTimestamp.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x3a
+	{
+		size, err := m.FirstTimestamp.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x32
+	{
+		size, err := m.Source.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.Message)
+	copy(dAtA[i:], m.Message)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Reason)
+	copy(dAtA[i:], m.Reason)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+	i--
+	dAtA[i] = 0x1a
+	{
+		size, err := m.InvolvedObject.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *EventList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *EventList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EventList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *EventSeries) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EventSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.LastObservedTime.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Count))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *EventSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *EventSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Host)
+	copy(dAtA[i:], m.Host)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Component)
+	copy(dAtA[i:], m.Component)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Component)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ExecAction) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ExecAction) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ExecAction) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Command) > 0 {
+		for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Command[iNdEx])
+			copy(dAtA[i:], m.Command[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *FCVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *FCVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *FCVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.WWIDs) > 0 {
+		for iNdEx := len(m.WWIDs) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.WWIDs[iNdEx])
+			copy(dAtA[i:], m.WWIDs[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.WWIDs[iNdEx])))
+			i--
+			dAtA[i] = 0x2a
+		}
+	}
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x20
+	i -= len(m.FSType)
+	copy(dAtA[i:], m.FSType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+	i--
+	dAtA[i] = 0x1a
+	if m.Lun != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.Lun))
+		i--
+		dAtA[i] = 0x10
+	}
+	if len(m.TargetWWNs) > 0 {
+		for iNdEx := len(m.TargetWWNs) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.TargetWWNs[iNdEx])
+			copy(dAtA[i:], m.TargetWWNs[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetWWNs[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *FlexPersistentVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *FlexPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *FlexPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Options) > 0 {
+		keysForOptions := make([]string, 0, len(m.Options))
+		for k := range m.Options {
+			keysForOptions = append(keysForOptions, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForOptions)
+		for iNdEx := len(keysForOptions) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Options[string(keysForOptions[iNdEx])]
+			baseI := i
+			i -= len(v)
+			copy(dAtA[i:], v)
+			i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForOptions[iNdEx])
+			copy(dAtA[i:], keysForOptions[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOptions[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x2a
+		}
+	}
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x20
+	if m.SecretRef != nil {
+		{
+			size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	i -= len(m.FSType)
+	copy(dAtA[i:], m.FSType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Driver)
+	copy(dAtA[i:], m.Driver)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *FlexVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *FlexVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Options) > 0 {
+		keysForOptions := make([]string, 0, len(m.Options))
+		for k := range m.Options {
+			keysForOptions = append(keysForOptions, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForOptions)
+		for iNdEx := len(keysForOptions) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Options[string(keysForOptions[iNdEx])]
+			baseI := i
+			i -= len(v)
+			copy(dAtA[i:], v)
+			i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForOptions[iNdEx])
+			copy(dAtA[i:], keysForOptions[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOptions[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x2a
+		}
+	}
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x20
+	if m.SecretRef != nil {
+		{
+			size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	i -= len(m.FSType)
+	copy(dAtA[i:], m.FSType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Driver)
+	copy(dAtA[i:], m.Driver)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *FlockerVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *FlockerVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *FlockerVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.DatasetUUID)
+	copy(dAtA[i:], m.DatasetUUID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetUUID)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.DatasetName)
+	copy(dAtA[i:], m.DatasetName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetName)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *GCEPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GCEPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GCEPersistentDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x20
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Partition))
+	i--
+	dAtA[i] = 0x18
+	i -= len(m.FSType)
+	copy(dAtA[i:], m.FSType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.PDName)
+	copy(dAtA[i:], m.PDName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.PDName)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *GRPCAction) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GRPCAction) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GRPCAction) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Service != nil {
+		i -= len(*m.Service)
+		copy(dAtA[i:], *m.Service)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Service)))
+		i--
+		dAtA[i] = 0x12
+	}
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Port))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *GitRepoVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GitRepoVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GitRepoVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Directory)
+	copy(dAtA[i:], m.Directory)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Directory)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Revision)
+	copy(dAtA[i:], m.Revision)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Revision)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Repository)
+	copy(dAtA[i:], m.Repository)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Repository)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *GlusterfsPersistentVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GlusterfsPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GlusterfsPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.EndpointsNamespace != nil {
+		i -= len(*m.EndpointsNamespace)
+		copy(dAtA[i:], *m.EndpointsNamespace)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.EndpointsNamespace)))
+		i--
+		dAtA[i] = 0x22
+	}
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x18
+	i -= len(m.Path)
+	copy(dAtA[i:], m.Path)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.EndpointsName)
+	copy(dAtA[i:], m.EndpointsName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *GlusterfsVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GlusterfsVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GlusterfsVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x18
+	i -= len(m.Path)
+	copy(dAtA[i:], m.Path)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.EndpointsName)
+	copy(dAtA[i:], m.EndpointsName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *HTTPGetAction) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HTTPGetAction) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.HTTPHeaders) > 0 {
+		for iNdEx := len(m.HTTPHeaders) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.HTTPHeaders[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x2a
+		}
+	}
+	i -= len(m.Scheme)
+	copy(dAtA[i:], m.Scheme)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scheme)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Host)
+	copy(dAtA[i:], m.Host)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host)))
+	i--
+	dAtA[i] = 0x1a
+	{
+		size, err := m.Port.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Path)
+	copy(dAtA[i:], m.Path)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *HTTPHeader) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HTTPHeader) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HTTPHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Value)
+	copy(dAtA[i:], m.Value)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *HostAlias) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HostAlias) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HostAlias) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Hostnames) > 0 {
+		for iNdEx := len(m.Hostnames) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Hostnames[iNdEx])
+			copy(dAtA[i:], m.Hostnames[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostnames[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	i -= len(m.IP)
+	copy(dAtA[i:], m.IP)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *HostIP) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HostIP) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HostIP) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.IP)
+	copy(dAtA[i:], m.IP)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *HostPathVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HostPathVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HostPathVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Type != nil {
+		i -= len(*m.Type)
+		copy(dAtA[i:], *m.Type)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type)))
+		i--
+		dAtA[i] = 0x12
+	}
+	i -= len(m.Path)
+	copy(dAtA[i:], m.Path)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ISCSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ISCSIPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.InitiatorName != nil {
+		i -= len(*m.InitiatorName)
+		copy(dAtA[i:], *m.InitiatorName)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName)))
+		i--
+		dAtA[i] = 0x62
+	}
+	i--
+	if m.SessionCHAPAuth {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x58
+	if m.SecretRef != nil {
+		{
+			size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x52
+	}
+	i--
+	if m.DiscoveryCHAPAuth {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x40
+	if len(m.Portals) > 0 {
+		for iNdEx := len(m.Portals) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Portals[iNdEx])
+			copy(dAtA[i:], m.Portals[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Portals[iNdEx])))
+			i--
+			dAtA[i] = 0x3a
+		}
+	}
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x30
+	i -= len(m.FSType)
+	copy(dAtA[i:], m.FSType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.ISCSIInterface)
+	copy(dAtA[i:], m.ISCSIInterface)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface)))
+	i--
+	dAtA[i] = 0x22
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Lun))
+	i--
+	dAtA[i] = 0x18
+	i -= len(m.IQN)
+	copy(dAtA[i:], m.IQN)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.TargetPortal)
+	copy(dAtA[i:], m.TargetPortal)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ISCSIVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ISCSIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.InitiatorName != nil {
+		i -= len(*m.InitiatorName)
+		copy(dAtA[i:], *m.InitiatorName)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName)))
+		i--
+		dAtA[i] = 0x62
+	}
+	i--
+	if m.SessionCHAPAuth {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x58
+	if m.SecretRef != nil {
+		{
+			size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x52
+	}
+	i--
+	if m.DiscoveryCHAPAuth {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x40
+	if len(m.Portals) > 0 {
+		for iNdEx := len(m.Portals) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Portals[iNdEx])
+			copy(dAtA[i:], m.Portals[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Portals[iNdEx])))
+			i--
+			dAtA[i] = 0x3a
+		}
+	}
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x30
+	i -= len(m.FSType)
+	copy(dAtA[i:], m.FSType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.ISCSIInterface)
+	copy(dAtA[i:], m.ISCSIInterface)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface)))
+	i--
+	dAtA[i] = 0x22
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Lun))
+	i--
+	dAtA[i] = 0x18
+	i -= len(m.IQN)
+	copy(dAtA[i:], m.IQN)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.TargetPortal)
+	copy(dAtA[i:], m.TargetPortal)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ImageVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ImageVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.PullPolicy)
+	copy(dAtA[i:], m.PullPolicy)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.PullPolicy)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Reference)
+	copy(dAtA[i:], m.Reference)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reference)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *KeyToPath) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *KeyToPath) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KeyToPath) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Mode != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode))
+		i--
+		dAtA[i] = 0x18
+	}
+	i -= len(m.Path)
+	copy(dAtA[i:], m.Path)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Key)
+	copy(dAtA[i:], m.Key)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Lifecycle) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Lifecycle) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.PreStop != nil {
+		{
+			size, err := m.PreStop.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.PostStart != nil {
+		{
+			size, err := m.PostStart.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LifecycleHandler) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LifecycleHandler) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LifecycleHandler) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Sleep != nil {
+		{
+			size, err := m.Sleep.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.TCPSocket != nil {
+		{
+			size, err := m.TCPSocket.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.HTTPGet != nil {
+		{
+			size, err := m.HTTPGet.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Exec != nil {
+		{
+			size, err := m.Exec.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LimitRange) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LimitRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *LimitRangeItem) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LimitRangeItem) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.MaxLimitRequestRatio) > 0 {
+		keysForMaxLimitRequestRatio := make([]string, 0, len(m.MaxLimitRequestRatio))
+		for k := range m.MaxLimitRequestRatio {
+			keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio)
+		for iNdEx := len(keysForMaxLimitRequestRatio) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.MaxLimitRequestRatio[ResourceName(keysForMaxLimitRequestRatio[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForMaxLimitRequestRatio[iNdEx])
+			copy(dAtA[i:], keysForMaxLimitRequestRatio[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMaxLimitRequestRatio[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x32
+		}
+	}
+	if len(m.DefaultRequest) > 0 {
+		keysForDefaultRequest := make([]string, 0, len(m.DefaultRequest))
+		for k := range m.DefaultRequest {
+			keysForDefaultRequest = append(keysForDefaultRequest, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest)
+		for iNdEx := len(keysForDefaultRequest) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.DefaultRequest[ResourceName(keysForDefaultRequest[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForDefaultRequest[iNdEx])
+			copy(dAtA[i:], keysForDefaultRequest[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefaultRequest[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x2a
+		}
+	}
+	if len(m.Default) > 0 {
+		keysForDefault := make([]string, 0, len(m.Default))
+		for k := range m.Default {
+			keysForDefault = append(keysForDefault, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForDefault)
+		for iNdEx := len(keysForDefault) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Default[ResourceName(keysForDefault[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForDefault[iNdEx])
+			copy(dAtA[i:], keysForDefault[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefault[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	if len(m.Min) > 0 {
+		keysForMin := make([]string, 0, len(m.Min))
+		for k := range m.Min {
+			keysForMin = append(keysForMin, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForMin)
+		for iNdEx := len(keysForMin) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Min[ResourceName(keysForMin[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForMin[iNdEx])
+			copy(dAtA[i:], keysForMin[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMin[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.Max) > 0 {
+		keysForMax := make([]string, 0, len(m.Max))
+		for k := range m.Max {
+			keysForMax = append(keysForMax, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForMax)
+		for iNdEx := len(keysForMax) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Max[ResourceName(keysForMax[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForMax[iNdEx])
+			copy(dAtA[i:], keysForMax[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMax[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	i -= len(m.Type)
+	copy(dAtA[i:], m.Type)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *LimitRangeList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LimitRangeList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *LimitRangeSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LimitRangeSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LimitRangeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Limits) > 0 {
+		for iNdEx := len(m.Limits) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Limits[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LinuxContainerUser) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LinuxContainerUser) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LinuxContainerUser) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.SupplementalGroups) > 0 {
+		for iNdEx := len(m.SupplementalGroups) - 1; iNdEx >= 0; iNdEx-- {
+			i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups[iNdEx]))
+			i--
+			dAtA[i] = 0x18
+		}
+	}
+	i = encodeVarintGenerated(dAtA, i, uint64(m.GID))
+	i--
+	dAtA[i] = 0x10
+	i = encodeVarintGenerated(dAtA, i, uint64(m.UID))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *List) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *List) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *List) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *LoadBalancerIngress) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Ports) > 0 {
+		for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	if m.IPMode != nil {
+		i -= len(*m.IPMode)
+		copy(dAtA[i:], *m.IPMode)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPMode)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	i -= len(m.Hostname)
+	copy(dAtA[i:], m.Hostname)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.IP)
+	copy(dAtA[i:], m.IP)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *LoadBalancerStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LoadBalancerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Ingress) > 0 {
+		for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LocalObjectReference) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LocalObjectReference) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LocalObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *LocalVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LocalVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LocalVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.FSType != nil {
+		i -= len(*m.FSType)
+		copy(dAtA[i:], *m.FSType)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType)))
+		i--
+		dAtA[i] = 0x12
+	}
+	i -= len(m.Path)
+	copy(dAtA[i:], m.Path)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ModifyVolumeStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ModifyVolumeStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ModifyVolumeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Status)
+	copy(dAtA[i:], m.Status)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.TargetVolumeAttributesClassName)
+	copy(dAtA[i:], m.TargetVolumeAttributesClassName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetVolumeAttributesClassName)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *NFSVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NFSVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NFSVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x18
+	i -= len(m.Path)
+	copy(dAtA[i:], m.Path)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Server)
+	copy(dAtA[i:], m.Server)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Server)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Namespace) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Namespace) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Namespace) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *NamespaceCondition) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NamespaceCondition) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NamespaceCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Message)
+	copy(dAtA[i:], m.Message)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+	i--
+	dAtA[i] = 0x32
+	i -= len(m.Reason)
+	copy(dAtA[i:], m.Reason)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+	i--
+	dAtA[i] = 0x2a
+	{
+		size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Status)
+	copy(dAtA[i:], m.Status)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Type)
+	copy(dAtA[i:], m.Type)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *NamespaceList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NamespaceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *NamespaceSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NamespaceSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NamespaceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Finalizers) > 0 {
+		for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Finalizers[iNdEx])
+			copy(dAtA[i:], m.Finalizers[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *NamespaceStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NamespaceStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NamespaceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Conditions) > 0 {
+		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	i -= len(m.Phase)
+	copy(dAtA[i:], m.Phase)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Node) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Node) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Node) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *NodeAddress) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NodeAddress) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NodeAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Address)
+	copy(dAtA[i:], m.Address)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Address)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Type)
+	copy(dAtA[i:], m.Type)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *NodeAffinity) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NodeAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
+		for iNdEx := len(m.PreferredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.PreferredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+		{
+			size, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *NodeCondition) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NodeCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Message)
+	copy(dAtA[i:], m.Message)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+	i--
+	dAtA[i] = 0x32
+	i -= len(m.Reason)
+	copy(dAtA[i:], m.Reason)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+	i--
+	dAtA[i] = 0x2a
+	{
+		size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x22
+	{
+		size, err := m.LastHeartbeatTime.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Status)
+	copy(dAtA[i:], m.Status)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Type)
+	copy(dAtA[i:], m.Type)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *NodeConfigSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NodeConfigSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.ConfigMap != nil {
+		{
+			size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *NodeConfigStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NodeConfigStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NodeConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Error)
+	copy(dAtA[i:], m.Error)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error)))
+	i--
+	dAtA[i] = 0x22
+	if m.LastKnownGood != nil {
+		{
+			size, err := m.LastKnownGood.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.Active != nil {
+		{
+			size, err := m.Active.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Assigned != nil {
+		{
+			size, err := m.Assigned.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *NodeDaemonEndpoints) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NodeDaemonEndpoints) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.KubeletEndpoint.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *NodeFeatures) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NodeFeatures) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NodeFeatures) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.SupplementalGroupsPolicy != nil {
+		i--
+		if *m.SupplementalGroupsPolicy {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *NodeList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NodeList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NodeList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *NodeProxyOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NodeProxyOptions) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NodeProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Path)
+	copy(dAtA[i:], m.Path)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *NodeRuntimeHandler) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NodeRuntimeHandler) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NodeRuntimeHandler) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Features != nil {
+		{
+			size, err := m.Features.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *NodeRuntimeHandlerFeatures) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NodeRuntimeHandlerFeatures) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NodeRuntimeHandlerFeatures) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.UserNamespaces != nil {
+		i--
+		if *m.UserNamespaces {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.RecursiveReadOnlyMounts != nil {
+		i--
+		if *m.RecursiveReadOnlyMounts {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *NodeSelector) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NodeSelector) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NodeSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.NodeSelectorTerms) > 0 {
+		for iNdEx := len(m.NodeSelectorTerms) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.NodeSelectorTerms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *NodeSelectorRequirement) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NodeSelectorRequirement) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NodeSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Values) > 0 {
+		for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Values[iNdEx])
+			copy(dAtA[i:], m.Values[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx])))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	i -= len(m.Operator)
+	copy(dAtA[i:], m.Operator)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Key)
+	copy(dAtA[i:], m.Key)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *NodeSelectorTerm) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NodeSelectorTerm) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NodeSelectorTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.MatchFields) > 0 {
+		for iNdEx := len(m.MatchFields) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.MatchFields[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.MatchExpressions) > 0 {
+		for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *NodeSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.PodCIDRs) > 0 {
+		for iNdEx := len(m.PodCIDRs) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.PodCIDRs[iNdEx])
+			copy(dAtA[i:], m.PodCIDRs[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDRs[iNdEx])))
+			i--
+			dAtA[i] = 0x3a
+		}
+	}
+	if m.ConfigSource != nil {
+		{
+			size, err := m.ConfigSource.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x32
+	}
+	if len(m.Taints) > 0 {
+		for iNdEx := len(m.Taints) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Taints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x2a
+		}
+	}
+	i--
+	if m.Unschedulable {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x20
+	i -= len(m.ProviderID)
+	copy(dAtA[i:], m.ProviderID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderID)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.DoNotUseExternalID)
+	copy(dAtA[i:], m.DoNotUseExternalID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DoNotUseExternalID)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.PodCIDR)
+	copy(dAtA[i:], m.PodCIDR)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDR)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *NodeStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Features != nil {
+		{
+			size, err := m.Features.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x6a
+	}
+	if len(m.RuntimeHandlers) > 0 {
+		for iNdEx := len(m.RuntimeHandlers) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.RuntimeHandlers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x62
+		}
+	}
+	if m.Config != nil {
+		{
+			size, err := m.Config.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x5a
+	}
+	if len(m.VolumesAttached) > 0 {
+		for iNdEx := len(m.VolumesAttached) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.VolumesAttached[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x52
+		}
+	}
+	if len(m.VolumesInUse) > 0 {
+		for iNdEx := len(m.VolumesInUse) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.VolumesInUse[iNdEx])
+			copy(dAtA[i:], m.VolumesInUse[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumesInUse[iNdEx])))
+			i--
+			dAtA[i] = 0x4a
+		}
+	}
+	if len(m.Images) > 0 {
+		for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x42
+		}
+	}
+	{
+		size, err := m.NodeInfo.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x3a
+	{
+		size, err := m.DaemonEndpoints.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x32
+	if len(m.Addresses) > 0 {
+		for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x2a
+		}
+	}
+	if len(m.Conditions) > 0 {
+		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	i -= len(m.Phase)
+	copy(dAtA[i:], m.Phase)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase)))
+	i--
+	dAtA[i] = 0x1a
+	if len(m.Allocatable) > 0 {
+		keysForAllocatable := make([]string, 0, len(m.Allocatable))
+		for k := range m.Allocatable {
+			keysForAllocatable = append(keysForAllocatable, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable)
+		for iNdEx := len(keysForAllocatable) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Allocatable[ResourceName(keysForAllocatable[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForAllocatable[iNdEx])
+			copy(dAtA[i:], keysForAllocatable[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatable[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Capacity) > 0 {
+		keysForCapacity := make([]string, 0, len(m.Capacity))
+		for k := range m.Capacity {
+			keysForCapacity = append(keysForCapacity, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+		for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Capacity[ResourceName(keysForCapacity[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForCapacity[iNdEx])
+			copy(dAtA[i:], keysForCapacity[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *NodeSystemInfo) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NodeSystemInfo) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NodeSystemInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Architecture)
+	copy(dAtA[i:], m.Architecture)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture)))
+	i--
+	dAtA[i] = 0x52
+	i -= len(m.OperatingSystem)
+	copy(dAtA[i:], m.OperatingSystem)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.OperatingSystem)))
+	i--
+	dAtA[i] = 0x4a
+	i -= len(m.KubeProxyVersion)
+	copy(dAtA[i:], m.KubeProxyVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeProxyVersion)))
+	i--
+	dAtA[i] = 0x42
+	i -= len(m.KubeletVersion)
+	copy(dAtA[i:], m.KubeletVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletVersion)))
+	i--
+	dAtA[i] = 0x3a
+	i -= len(m.ContainerRuntimeVersion)
+	copy(dAtA[i:], m.ContainerRuntimeVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerRuntimeVersion)))
+	i--
+	dAtA[i] = 0x32
+	i -= len(m.OSImage)
+	copy(dAtA[i:], m.OSImage)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.OSImage)))
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.KernelVersion)
+	copy(dAtA[i:], m.KernelVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.KernelVersion)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.BootID)
+	copy(dAtA[i:], m.BootID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.BootID)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.SystemUUID)
+	copy(dAtA[i:], m.SystemUUID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SystemUUID)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.MachineID)
+	copy(dAtA[i:], m.MachineID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.MachineID)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ObjectFieldSelector) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ObjectFieldSelector) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ObjectFieldSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.FieldPath)
+	copy(dAtA[i:], m.FieldPath)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.APIVersion)
+	copy(dAtA[i:], m.APIVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ObjectReference) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.FieldPath)
+	copy(dAtA[i:], m.FieldPath)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath)))
+	i--
+	dAtA[i] = 0x3a
+	i -= len(m.ResourceVersion)
+	copy(dAtA[i:], m.ResourceVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion)))
+	i--
+	dAtA[i] = 0x32
+	i -= len(m.APIVersion)
+	copy(dAtA[i:], m.APIVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.UID)
+	copy(dAtA[i:], m.UID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Namespace)
+	copy(dAtA[i:], m.Namespace)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Kind)
+	copy(dAtA[i:], m.Kind)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PersistentVolume) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PersistentVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PersistentVolumeClaim) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PersistentVolumeClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PersistentVolumeClaimCondition) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PersistentVolumeClaimCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Message)
+	copy(dAtA[i:], m.Message)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+	i--
+	dAtA[i] = 0x32
+	i -= len(m.Reason)
+	copy(dAtA[i:], m.Reason)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+	i--
+	dAtA[i] = 0x2a
+	{
+		size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x22
+	{
+		size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Status)
+	copy(dAtA[i:], m.Status)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Type)
+	copy(dAtA[i:], m.Type)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PersistentVolumeClaimList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PersistentVolumeClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PersistentVolumeClaimSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PersistentVolumeClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.VolumeAttributesClassName != nil {
+		i -= len(*m.VolumeAttributesClassName)
+		copy(dAtA[i:], *m.VolumeAttributesClassName)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeAttributesClassName)))
+		i--
+		dAtA[i] = 0x4a
+	}
+	if m.DataSourceRef != nil {
+		{
+			size, err := m.DataSourceRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x42
+	}
+	if m.DataSource != nil {
+		{
+			size, err := m.DataSource.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3a
+	}
+	if m.VolumeMode != nil {
+		i -= len(*m.VolumeMode)
+		copy(dAtA[i:], *m.VolumeMode)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode)))
+		i--
+		dAtA[i] = 0x32
+	}
+	if m.StorageClassName != nil {
+		i -= len(*m.StorageClassName)
+		copy(dAtA[i:], *m.StorageClassName)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName)))
+		i--
+		dAtA[i] = 0x2a
+	}
+	if m.Selector != nil {
+		{
+			size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	i -= len(m.VolumeName)
+	copy(dAtA[i:], m.VolumeName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName)))
+	i--
+	dAtA[i] = 0x1a
+	{
+		size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	if len(m.AccessModes) > 0 {
+		for iNdEx := len(m.AccessModes) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.AccessModes[iNdEx])
+			copy(dAtA[i:], m.AccessModes[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.AccessModes[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *PersistentVolumeClaimStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PersistentVolumeClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.ModifyVolumeStatus != nil {
+		{
+			size, err := m.ModifyVolumeStatus.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4a
+	}
+	if m.CurrentVolumeAttributesClassName != nil {
+		i -= len(*m.CurrentVolumeAttributesClassName)
+		copy(dAtA[i:], *m.CurrentVolumeAttributesClassName)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CurrentVolumeAttributesClassName)))
+		i--
+		dAtA[i] = 0x42
+	}
+	if len(m.AllocatedResourceStatuses) > 0 {
+		keysForAllocatedResourceStatuses := make([]string, 0, len(m.AllocatedResourceStatuses))
+		for k := range m.AllocatedResourceStatuses {
+			keysForAllocatedResourceStatuses = append(keysForAllocatedResourceStatuses, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResourceStatuses)
+		for iNdEx := len(keysForAllocatedResourceStatuses) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.AllocatedResourceStatuses[ResourceName(keysForAllocatedResourceStatuses[iNdEx])]
+			baseI := i
+			i -= len(v)
+			copy(dAtA[i:], v)
+			i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForAllocatedResourceStatuses[iNdEx])
+			copy(dAtA[i:], keysForAllocatedResourceStatuses[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatedResourceStatuses[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x3a
+		}
+	}
+	if len(m.AllocatedResources) > 0 {
+		keysForAllocatedResources := make([]string, 0, len(m.AllocatedResources))
+		for k := range m.AllocatedResources {
+			keysForAllocatedResources = append(keysForAllocatedResources, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources)
+		for iNdEx := len(keysForAllocatedResources) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.AllocatedResources[ResourceName(keysForAllocatedResources[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForAllocatedResources[iNdEx])
+			copy(dAtA[i:], keysForAllocatedResources[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatedResources[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x2a
+		}
+	}
+	if len(m.Conditions) > 0 {
+		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	if len(m.Capacity) > 0 {
+		keysForCapacity := make([]string, 0, len(m.Capacity))
+		for k := range m.Capacity {
+			keysForCapacity = append(keysForCapacity, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+		for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Capacity[ResourceName(keysForCapacity[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForCapacity[iNdEx])
+			copy(dAtA[i:], keysForCapacity[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.AccessModes) > 0 {
+		for iNdEx := len(m.AccessModes) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.AccessModes[iNdEx])
+			copy(dAtA[i:], m.AccessModes[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.AccessModes[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	i -= len(m.Phase)
+	copy(dAtA[i:], m.Phase)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PersistentVolumeClaimTemplate) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PersistentVolumeClaimTemplate) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PersistentVolumeClaimTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PersistentVolumeClaimVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PersistentVolumeClaimVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PersistentVolumeClaimVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x10
+	i -= len(m.ClaimName)
+	copy(dAtA[i:], m.ClaimName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClaimName)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PersistentVolumeList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PersistentVolumeList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PersistentVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.CSI != nil {
+		{
+			size, err := m.CSI.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xb2
+	}
+	if m.StorageOS != nil {
+		{
+			size, err := m.StorageOS.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xaa
+	}
+	if m.Local != nil {
+		{
+			size, err := m.Local.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xa2
+	}
+	if m.ScaleIO != nil {
+		{
+			size, err := m.ScaleIO.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0x9a
+	}
+	if m.PortworxVolume != nil {
+		{
+			size, err := m.PortworxVolume.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0x92
+	}
+	if m.PhotonPersistentDisk != nil {
+		{
+			size, err := m.PhotonPersistentDisk.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0x8a
+	}
+	if m.AzureDisk != nil {
+		{
+			size, err := m.AzureDisk.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0x82
+	}
+	if m.Quobyte != nil {
+		{
+			size, err := m.Quobyte.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x7a
+	}
+	if m.VsphereVolume != nil {
+		{
+			size, err := m.VsphereVolume.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x72
+	}
+	if m.AzureFile != nil {
+		{
+			size, err := m.AzureFile.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x6a
+	}
+	if m.FlexVolume != nil {
+		{
+			size, err := m.FlexVolume.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x62
+	}
+	if m.Flocker != nil {
+		{
+			size, err := m.Flocker.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x5a
+	}
+	if m.FC != nil {
+		{
+			size, err := m.FC.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x52
+	}
+	if m.CephFS != nil {
+		{
+			size, err := m.CephFS.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4a
+	}
+	if m.Cinder != nil {
+		{
+			size, err := m.Cinder.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x42
+	}
+	if m.ISCSI != nil {
+		{
+			size, err := m.ISCSI.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3a
+	}
+	if m.RBD != nil {
+		{
+			size, err := m.RBD.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x32
+	}
+	if m.NFS != nil {
+		{
+			size, err := m.NFS.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x2a
+	}
+	if m.Glusterfs != nil {
+		{
+			size, err := m.Glusterfs.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.HostPath != nil {
+		{
+			size, err := m.HostPath.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.AWSElasticBlockStore != nil {
+		{
+			size, err := m.AWSElasticBlockStore.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.GCEPersistentDisk != nil {
+		{
+			size, err := m.GCEPersistentDisk.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *PersistentVolumeSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PersistentVolumeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.VolumeAttributesClassName != nil {
+		i -= len(*m.VolumeAttributesClassName)
+		copy(dAtA[i:], *m.VolumeAttributesClassName)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeAttributesClassName)))
+		i--
+		dAtA[i] = 0x52
+	}
+	if m.NodeAffinity != nil {
+		{
+			size, err := m.NodeAffinity.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4a
+	}
+	if m.VolumeMode != nil {
+		i -= len(*m.VolumeMode)
+		copy(dAtA[i:], *m.VolumeMode)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode)))
+		i--
+		dAtA[i] = 0x42
+	}
+	if len(m.MountOptions) > 0 {
+		for iNdEx := len(m.MountOptions) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.MountOptions[iNdEx])
+			copy(dAtA[i:], m.MountOptions[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountOptions[iNdEx])))
+			i--
+			dAtA[i] = 0x3a
+		}
+	}
+	i -= len(m.StorageClassName)
+	copy(dAtA[i:], m.StorageClassName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageClassName)))
+	i--
+	dAtA[i] = 0x32
+	i -= len(m.PersistentVolumeReclaimPolicy)
+	copy(dAtA[i:], m.PersistentVolumeReclaimPolicy)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.PersistentVolumeReclaimPolicy)))
+	i--
+	dAtA[i] = 0x2a
+	if m.ClaimRef != nil {
+		{
+			size, err := m.ClaimRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	if len(m.AccessModes) > 0 {
+		for iNdEx := len(m.AccessModes) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.AccessModes[iNdEx])
+			copy(dAtA[i:], m.AccessModes[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.AccessModes[iNdEx])))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	{
+		size, err := m.PersistentVolumeSource.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	if len(m.Capacity) > 0 {
+		keysForCapacity := make([]string, 0, len(m.Capacity))
+		for k := range m.Capacity {
+			keysForCapacity = append(keysForCapacity, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+		for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Capacity[ResourceName(keysForCapacity[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForCapacity[iNdEx])
+			copy(dAtA[i:], keysForCapacity[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *PersistentVolumeStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PersistentVolumeStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PersistentVolumeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.LastPhaseTransitionTime != nil {
+		{
+			size, err := m.LastPhaseTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	i -= len(m.Reason)
+	copy(dAtA[i:], m.Reason)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Message)
+	copy(dAtA[i:], m.Message)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Phase)
+	copy(dAtA[i:], m.Phase)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PhotonPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PhotonPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PhotonPersistentDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.FSType)
+	copy(dAtA[i:], m.FSType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.PdID)
+	copy(dAtA[i:], m.PdID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.PdID)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Pod) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Pod) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Pod) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PodAffinity) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodAffinity) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
+		for iNdEx := len(m.PreferredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.PreferredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 {
+		for iNdEx := len(m.RequiredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.RequiredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *PodAffinityTerm) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodAffinityTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.MismatchLabelKeys) > 0 {
+		for iNdEx := len(m.MismatchLabelKeys) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.MismatchLabelKeys[iNdEx])
+			copy(dAtA[i:], m.MismatchLabelKeys[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.MismatchLabelKeys[iNdEx])))
+			i--
+			dAtA[i] = 0x32
+		}
+	}
+	if len(m.MatchLabelKeys) > 0 {
+		for iNdEx := len(m.MatchLabelKeys) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.MatchLabelKeys[iNdEx])
+			copy(dAtA[i:], m.MatchLabelKeys[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.MatchLabelKeys[iNdEx])))
+			i--
+			dAtA[i] = 0x2a
+		}
+	}
+	if m.NamespaceSelector != nil {
+		{
+			size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	i -= len(m.TopologyKey)
+	copy(dAtA[i:], m.TopologyKey)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey)))
+	i--
+	dAtA[i] = 0x1a
+	if len(m.Namespaces) > 0 {
+		for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Namespaces[iNdEx])
+			copy(dAtA[i:], m.Namespaces[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.LabelSelector != nil {
+		{
+			size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *PodAntiAffinity) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodAntiAffinity) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodAntiAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
+		for iNdEx := len(m.PreferredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.PreferredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 {
+		for iNdEx := len(m.RequiredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.RequiredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *PodAttachOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodAttachOptions) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodAttachOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Container)
+	copy(dAtA[i:], m.Container)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container)))
+	i--
+	dAtA[i] = 0x2a
+	i--
+	if m.TTY {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x20
+	i--
+	if m.Stderr {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x18
+	i--
+	if m.Stdout {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x10
+	i--
+	if m.Stdin {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *PodCondition) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Message)
+	copy(dAtA[i:], m.Message)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+	i--
+	dAtA[i] = 0x32
+	i -= len(m.Reason)
+	copy(dAtA[i:], m.Reason)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+	i--
+	dAtA[i] = 0x2a
+	{
+		size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x22
+	{
+		size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Status)
+	copy(dAtA[i:], m.Status)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Type)
+	copy(dAtA[i:], m.Type)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PodDNSConfig) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodDNSConfig) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodDNSConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Options) > 0 {
+		for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.Searches) > 0 {
+		for iNdEx := len(m.Searches) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Searches[iNdEx])
+			copy(dAtA[i:], m.Searches[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Searches[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Nameservers) > 0 {
+		for iNdEx := len(m.Nameservers) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Nameservers[iNdEx])
+			copy(dAtA[i:], m.Nameservers[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Nameservers[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *PodDNSConfigOption) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodDNSConfigOption) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodDNSConfigOption) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Value != nil {
+		i -= len(*m.Value)
+		copy(dAtA[i:], *m.Value)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value)))
+		i--
+		dAtA[i] = 0x12
+	}
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PodExecOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodExecOptions) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodExecOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Command) > 0 {
+		for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Command[iNdEx])
+			copy(dAtA[i:], m.Command[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx])))
+			i--
+			dAtA[i] = 0x32
+		}
+	}
+	i -= len(m.Container)
+	copy(dAtA[i:], m.Container)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container)))
+	i--
+	dAtA[i] = 0x2a
+	i--
+	if m.TTY {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x20
+	i--
+	if m.Stderr {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x18
+	i--
+	if m.Stdout {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x10
+	i--
+	if m.Stdin {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *PodIP) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodIP) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodIP) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.IP)
+	copy(dAtA[i:], m.IP)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PodList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PodLogOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Stream != nil {
+		i -= len(*m.Stream)
+		copy(dAtA[i:], *m.Stream)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Stream)))
+		i--
+		dAtA[i] = 0x52
+	}
+	i--
+	if m.InsecureSkipTLSVerifyBackend {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x48
+	if m.LimitBytes != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes))
+		i--
+		dAtA[i] = 0x40
+	}
+	if m.TailLines != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines))
+		i--
+		dAtA[i] = 0x38
+	}
+	i--
+	if m.Timestamps {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x30
+	if m.SinceTime != nil {
+		{
+			size, err := m.SinceTime.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x2a
+	}
+	if m.SinceSeconds != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds))
+		i--
+		dAtA[i] = 0x20
+	}
+	i--
+	if m.Previous {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x18
+	i--
+	if m.Follow {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x10
+	i -= len(m.Container)
+	copy(dAtA[i:], m.Container)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PodOS) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodOS) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodOS) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PodPortForwardOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodPortForwardOptions) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodPortForwardOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Ports) > 0 {
+		for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- {
+			i = encodeVarintGenerated(dAtA, i, uint64(m.Ports[iNdEx]))
+			i--
+			dAtA[i] = 0x8
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *PodProxyOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodProxyOptions) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Path)
+	copy(dAtA[i:], m.Path)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PodReadinessGate) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodReadinessGate) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodReadinessGate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.ConditionType)
+	copy(dAtA[i:], m.ConditionType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConditionType)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PodResourceClaim) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodResourceClaim) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.ResourceClaimTemplateName != nil {
+		i -= len(*m.ResourceClaimTemplateName)
+		copy(dAtA[i:], *m.ResourceClaimTemplateName)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimTemplateName)))
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.ResourceClaimName != nil {
+		i -= len(*m.ResourceClaimName)
+		copy(dAtA[i:], *m.ResourceClaimName)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimName)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PodResourceClaimStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodResourceClaimStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.ResourceClaimName != nil {
+		i -= len(*m.ResourceClaimName)
+		copy(dAtA[i:], *m.ResourceClaimName)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceClaimName)))
+		i--
+		dAtA[i] = 0x12
+	}
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PodSchedulingGate) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodSchedulingGate) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodSchedulingGate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PodSecurityContext) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodSecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.SELinuxChangePolicy != nil {
+		i -= len(*m.SELinuxChangePolicy)
+		copy(dAtA[i:], *m.SELinuxChangePolicy)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SELinuxChangePolicy)))
+		i--
+		dAtA[i] = 0x6a
+	}
+	if m.SupplementalGroupsPolicy != nil {
+		i -= len(*m.SupplementalGroupsPolicy)
+		copy(dAtA[i:], *m.SupplementalGroupsPolicy)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SupplementalGroupsPolicy)))
+		i--
+		dAtA[i] = 0x62
+	}
+	if m.AppArmorProfile != nil {
+		{
+			size, err := m.AppArmorProfile.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x5a
+	}
+	if m.SeccompProfile != nil {
+		{
+			size, err := m.SeccompProfile.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x52
+	}
+	if m.FSGroupChangePolicy != nil {
+		i -= len(*m.FSGroupChangePolicy)
+		copy(dAtA[i:], *m.FSGroupChangePolicy)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSGroupChangePolicy)))
+		i--
+		dAtA[i] = 0x4a
+	}
+	if m.WindowsOptions != nil {
+		{
+			size, err := m.WindowsOptions.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x42
+	}
+	if len(m.Sysctls) > 0 {
+		for iNdEx := len(m.Sysctls) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Sysctls[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x3a
+		}
+	}
+	if m.RunAsGroup != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup))
+		i--
+		dAtA[i] = 0x30
+	}
+	if m.FSGroup != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.FSGroup))
+		i--
+		dAtA[i] = 0x28
+	}
+	if len(m.SupplementalGroups) > 0 {
+		for iNdEx := len(m.SupplementalGroups) - 1; iNdEx >= 0; iNdEx-- {
+			i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups[iNdEx]))
+			i--
+			dAtA[i] = 0x20
+		}
+	}
+	if m.RunAsNonRoot != nil {
+		i--
+		if *m.RunAsNonRoot {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.RunAsUser != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.SELinuxOptions != nil {
+		{
+			size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *PodSignature) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodSignature) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.PodController != nil {
+		{
+			size, err := m.PodController.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *PodSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Resources != nil {
+		{
+			size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x2
+		i--
+		dAtA[i] = 0xc2
+	}
+	if len(m.ResourceClaims) > 0 {
+		for iNdEx := len(m.ResourceClaims) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.ResourceClaims[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x2
+			i--
+			dAtA[i] = 0xba
+		}
+	}
+	if len(m.SchedulingGates) > 0 {
+		for iNdEx := len(m.SchedulingGates) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.SchedulingGates[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x2
+			i--
+			dAtA[i] = 0xb2
+		}
+	}
+	if m.HostUsers != nil {
+		i--
+		if *m.HostUsers {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x2
+		i--
+		dAtA[i] = 0xa8
+	}
+	if m.OS != nil {
+		{
+			size, err := m.OS.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x2
+		i--
+		dAtA[i] = 0xa2
+	}
+	if m.SetHostnameAsFQDN != nil {
+		i--
+		if *m.SetHostnameAsFQDN {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x2
+		i--
+		dAtA[i] = 0x98
+	}
+	if len(m.EphemeralContainers) > 0 {
+		for iNdEx := len(m.EphemeralContainers) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.EphemeralContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x2
+			i--
+			dAtA[i] = 0x92
+		}
+	}
+	if len(m.TopologySpreadConstraints) > 0 {
+		for iNdEx := len(m.TopologySpreadConstraints) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.TopologySpreadConstraints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x2
+			i--
+			dAtA[i] = 0x8a
+		}
+	}
+	if len(m.Overhead) > 0 {
+		keysForOverhead := make([]string, 0, len(m.Overhead))
+		for k := range m.Overhead {
+			keysForOverhead = append(keysForOverhead, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForOverhead)
+		for iNdEx := len(keysForOverhead) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Overhead[ResourceName(keysForOverhead[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForOverhead[iNdEx])
+			copy(dAtA[i:], keysForOverhead[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOverhead[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x2
+			i--
+			dAtA[i] = 0x82
+		}
+	}
+	if m.PreemptionPolicy != nil {
+		i -= len(*m.PreemptionPolicy)
+		copy(dAtA[i:], *m.PreemptionPolicy)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy)))
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xfa
+	}
+	if m.EnableServiceLinks != nil {
+		i--
+		if *m.EnableServiceLinks {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xf0
+	}
+	if m.RuntimeClassName != nil {
+		i -= len(*m.RuntimeClassName)
+		copy(dAtA[i:], *m.RuntimeClassName)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RuntimeClassName)))
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xea
+	}
+	if len(m.ReadinessGates) > 0 {
+		for iNdEx := len(m.ReadinessGates) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.ReadinessGates[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1
+			i--
+			dAtA[i] = 0xe2
+		}
+	}
+	if m.ShareProcessNamespace != nil {
+		i--
+		if *m.ShareProcessNamespace {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xd8
+	}
+	if m.DNSConfig != nil {
+		{
+			size, err := m.DNSConfig.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xd2
+	}
+	if m.Priority != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority))
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xc8
+	}
+	i -= len(m.PriorityClassName)
+	copy(dAtA[i:], m.PriorityClassName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName)))
+	i--
+	dAtA[i] = 0x1
+	i--
+	dAtA[i] = 0xc2
+	if len(m.HostAliases) > 0 {
+		for iNdEx := len(m.HostAliases) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.HostAliases[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1
+			i--
+			dAtA[i] = 0xba
+		}
+	}
+	if len(m.Tolerations) > 0 {
+		for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1
+			i--
+			dAtA[i] = 0xb2
+		}
+	}
+	if m.AutomountServiceAccountToken != nil {
+		i--
+		if *m.AutomountServiceAccountToken {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xa8
+	}
+	if len(m.InitContainers) > 0 {
+		for iNdEx := len(m.InitContainers) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.InitContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1
+			i--
+			dAtA[i] = 0xa2
+		}
+	}
+	i -= len(m.SchedulerName)
+	copy(dAtA[i:], m.SchedulerName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName)))
+	i--
+	dAtA[i] = 0x1
+	i--
+	dAtA[i] = 0x9a
+	if m.Affinity != nil {
+		{
+			size, err := m.Affinity.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0x92
+	}
+	i -= len(m.Subdomain)
+	copy(dAtA[i:], m.Subdomain)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain)))
+	i--
+	dAtA[i] = 0x1
+	i--
+	dAtA[i] = 0x8a
+	i -= len(m.Hostname)
+	copy(dAtA[i:], m.Hostname)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname)))
+	i--
+	dAtA[i] = 0x1
+	i--
+	dAtA[i] = 0x82
+	if len(m.ImagePullSecrets) > 0 {
+		for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.ImagePullSecrets[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x7a
+		}
+	}
+	if m.SecurityContext != nil {
+		{
+			size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x72
+	}
+	i--
+	if m.HostIPC {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x68
+	i--
+	if m.HostPID {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x60
+	i--
+	if m.HostNetwork {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x58
+	i -= len(m.NodeName)
+	copy(dAtA[i:], m.NodeName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName)))
+	i--
+	dAtA[i] = 0x52
+	i -= len(m.DeprecatedServiceAccount)
+	copy(dAtA[i:], m.DeprecatedServiceAccount)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedServiceAccount)))
+	i--
+	dAtA[i] = 0x4a
+	i -= len(m.ServiceAccountName)
+	copy(dAtA[i:], m.ServiceAccountName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName)))
+	i--
+	dAtA[i] = 0x42
+	if len(m.NodeSelector) > 0 {
+		keysForNodeSelector := make([]string, 0, len(m.NodeSelector))
+		for k := range m.NodeSelector {
+			keysForNodeSelector = append(keysForNodeSelector, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector)
+		for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.NodeSelector[string(keysForNodeSelector[iNdEx])]
+			baseI := i
+			i -= len(v)
+			copy(dAtA[i:], v)
+			i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForNodeSelector[iNdEx])
+			copy(dAtA[i:], keysForNodeSelector[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x3a
+		}
+	}
+	i -= len(m.DNSPolicy)
+	copy(dAtA[i:], m.DNSPolicy)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSPolicy)))
+	i--
+	dAtA[i] = 0x32
+	if m.ActiveDeadlineSeconds != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds))
+		i--
+		dAtA[i] = 0x28
+	}
+	if m.TerminationGracePeriodSeconds != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminationGracePeriodSeconds))
+		i--
+		dAtA[i] = 0x20
+	}
+	i -= len(m.RestartPolicy)
+	copy(dAtA[i:], m.RestartPolicy)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy)))
+	i--
+	dAtA[i] = 0x1a
+	if len(m.Containers) > 0 {
+		for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Volumes) > 0 {
+		for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *PodStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.HostIPs) > 0 {
+		for iNdEx := len(m.HostIPs) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.HostIPs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1
+			i--
+			dAtA[i] = 0x82
+		}
+	}
+	if len(m.ResourceClaimStatuses) > 0 {
+		for iNdEx := len(m.ResourceClaimStatuses) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.ResourceClaimStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x7a
+		}
+	}
+	i -= len(m.Resize)
+	copy(dAtA[i:], m.Resize)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resize)))
+	i--
+	dAtA[i] = 0x72
+	if len(m.EphemeralContainerStatuses) > 0 {
+		for iNdEx := len(m.EphemeralContainerStatuses) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.EphemeralContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x6a
+		}
+	}
+	if len(m.PodIPs) > 0 {
+		for iNdEx := len(m.PodIPs) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.PodIPs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x62
+		}
+	}
+	i -= len(m.NominatedNodeName)
+	copy(dAtA[i:], m.NominatedNodeName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.NominatedNodeName)))
+	i--
+	dAtA[i] = 0x5a
+	if len(m.InitContainerStatuses) > 0 {
+		for iNdEx := len(m.InitContainerStatuses) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.InitContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x52
+		}
+	}
+	i -= len(m.QOSClass)
+	copy(dAtA[i:], m.QOSClass)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.QOSClass)))
+	i--
+	dAtA[i] = 0x4a
+	if len(m.ContainerStatuses) > 0 {
+		for iNdEx := len(m.ContainerStatuses) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.ContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x42
+		}
+	}
+	if m.StartTime != nil {
+		{
+			size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3a
+	}
+	i -= len(m.PodIP)
+	copy(dAtA[i:], m.PodIP)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodIP)))
+	i--
+	dAtA[i] = 0x32
+	i -= len(m.HostIP)
+	copy(dAtA[i:], m.HostIP)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP)))
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.Reason)
+	copy(dAtA[i:], m.Reason)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Message)
+	copy(dAtA[i:], m.Message)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+	i--
+	dAtA[i] = 0x1a
+	if len(m.Conditions) > 0 {
+		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	i -= len(m.Phase)
+	copy(dAtA[i:], m.Phase)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PodStatusResult) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodStatusResult) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PodTemplate) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Template.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PodTemplateList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PodTemplateSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PodTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PortStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PortStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PortStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Error != nil {
+		i -= len(*m.Error)
+		copy(dAtA[i:], *m.Error)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Error)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	i -= len(m.Protocol)
+	copy(dAtA[i:], m.Protocol)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol)))
+	i--
+	dAtA[i] = 0x12
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Port))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *PortworxVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PortworxVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PortworxVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x18
+	i -= len(m.FSType)
+	copy(dAtA[i:], m.FSType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.VolumeID)
+	copy(dAtA[i:], m.VolumeID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Preconditions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Preconditions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.UID != nil {
+		i -= len(*m.UID)
+		copy(dAtA[i:], *m.UID)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *PreferAvoidPodsEntry) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PreferAvoidPodsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Message)
+	copy(dAtA[i:], m.Message)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Reason)
+	copy(dAtA[i:], m.Reason)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+	i--
+	dAtA[i] = 0x1a
+	{
+		size, err := m.EvictionTime.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.PodSignature.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PreferredSchedulingTerm) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PreferredSchedulingTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Preference.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Weight))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *Probe) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Probe) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Probe) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.TerminationGracePeriodSeconds != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminationGracePeriodSeconds))
+		i--
+		dAtA[i] = 0x38
+	}
+	i = encodeVarintGenerated(dAtA, i, uint64(m.FailureThreshold))
+	i--
+	dAtA[i] = 0x30
+	i = encodeVarintGenerated(dAtA, i, uint64(m.SuccessThreshold))
+	i--
+	dAtA[i] = 0x28
+	i = encodeVarintGenerated(dAtA, i, uint64(m.PeriodSeconds))
+	i--
+	dAtA[i] = 0x20
+	i = encodeVarintGenerated(dAtA, i, uint64(m.TimeoutSeconds))
+	i--
+	dAtA[i] = 0x18
+	i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds))
+	i--
+	dAtA[i] = 0x10
+	{
+		size, err := m.ProbeHandler.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ProbeHandler) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ProbeHandler) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProbeHandler) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.GRPC != nil {
+		{
+			size, err := m.GRPC.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.TCPSocket != nil {
+		{
+			size, err := m.TCPSocket.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.HTTPGet != nil {
+		{
+			size, err := m.HTTPGet.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Exec != nil {
+		{
+			size, err := m.Exec.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ProjectedVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ProjectedVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProjectedVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.DefaultMode != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode))
+		i--
+		dAtA[i] = 0x10
+	}
+	if len(m.Sources) > 0 {
+		for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Sources[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *QuobyteVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *QuobyteVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *QuobyteVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Tenant)
+	copy(dAtA[i:], m.Tenant)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tenant)))
+	i--
+	dAtA[i] = 0x32
+	i -= len(m.Group)
+	copy(dAtA[i:], m.Group)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.User)
+	copy(dAtA[i:], m.User)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.User)))
+	i--
+	dAtA[i] = 0x22
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x18
+	i -= len(m.Volume)
+	copy(dAtA[i:], m.Volume)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volume)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Registry)
+	copy(dAtA[i:], m.Registry)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Registry)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *RBDPersistentVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RBDPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x40
+	if m.SecretRef != nil {
+		{
+			size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3a
+	}
+	i -= len(m.Keyring)
+	copy(dAtA[i:], m.Keyring)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring)))
+	i--
+	dAtA[i] = 0x32
+	i -= len(m.RadosUser)
+	copy(dAtA[i:], m.RadosUser)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser)))
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.RBDPool)
+	copy(dAtA[i:], m.RBDPool)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.FSType)
+	copy(dAtA[i:], m.FSType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.RBDImage)
+	copy(dAtA[i:], m.RBDImage)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage)))
+	i--
+	dAtA[i] = 0x12
+	if len(m.CephMonitors) > 0 {
+		for iNdEx := len(m.CephMonitors) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.CephMonitors[iNdEx])
+			copy(dAtA[i:], m.CephMonitors[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.CephMonitors[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *RBDVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RBDVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x40
+	if m.SecretRef != nil {
+		{
+			size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3a
+	}
+	i -= len(m.Keyring)
+	copy(dAtA[i:], m.Keyring)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring)))
+	i--
+	dAtA[i] = 0x32
+	i -= len(m.RadosUser)
+	copy(dAtA[i:], m.RadosUser)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser)))
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.RBDPool)
+	copy(dAtA[i:], m.RBDPool)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.FSType)
+	copy(dAtA[i:], m.FSType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.RBDImage)
+	copy(dAtA[i:], m.RBDImage)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage)))
+	i--
+	dAtA[i] = 0x12
+	if len(m.CephMonitors) > 0 {
+		for iNdEx := len(m.CephMonitors) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.CephMonitors[iNdEx])
+			copy(dAtA[i:], m.CephMonitors[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.CephMonitors[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *RangeAllocation) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RangeAllocation) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Data != nil {
+		i -= len(m.Data)
+		copy(dAtA[i:], m.Data)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	i -= len(m.Range)
+	copy(dAtA[i:], m.Range)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range)))
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ReplicationController) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ReplicationController) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ReplicationControllerCondition) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ReplicationControllerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Message)
+	copy(dAtA[i:], m.Message)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.Reason)
+	copy(dAtA[i:], m.Reason)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+	i--
+	dAtA[i] = 0x22
+	{
+		size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Status)
+	copy(dAtA[i:], m.Status)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Type)
+	copy(dAtA[i:], m.Type)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ReplicationControllerList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ReplicationControllerList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ReplicationControllerSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ReplicationControllerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds))
+	i--
+	dAtA[i] = 0x20
+	if m.Template != nil {
+		{
+			size, err := m.Template.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if len(m.Selector) > 0 {
+		keysForSelector := make([]string, 0, len(m.Selector))
+		for k := range m.Selector {
+			keysForSelector = append(keysForSelector, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForSelector)
+		for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Selector[string(keysForSelector[iNdEx])]
+			baseI := i
+			i -= len(v)
+			copy(dAtA[i:], v)
+			i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForSelector[iNdEx])
+			copy(dAtA[i:], keysForSelector[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Replicas != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ReplicationControllerStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ReplicationControllerStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ReplicationControllerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Conditions) > 0 {
+		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x32
+		}
+	}
+	i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas))
+	i--
+	dAtA[i] = 0x28
+	i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas))
+	i--
+	dAtA[i] = 0x20
+	i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration))
+	i--
+	dAtA[i] = 0x18
+	i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas))
+	i--
+	dAtA[i] = 0x10
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourceClaim) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceClaim) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Request)
+	copy(dAtA[i:], m.Request)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourceFieldSelector) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceFieldSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Divisor.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Resource)
+	copy(dAtA[i:], m.Resource)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.ContainerName)
+	copy(dAtA[i:], m.ContainerName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourceHealth) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceHealth) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceHealth) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Health)
+	copy(dAtA[i:], m.Health)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Health)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.ResourceID)
+	copy(dAtA[i:], m.ResourceID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceID)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourceQuota) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceQuota) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourceQuotaList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceQuotaList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourceQuotaSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceQuotaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.ScopeSelector != nil {
+		{
+			size, err := m.ScopeSelector.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if len(m.Scopes) > 0 {
+		for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Scopes[iNdEx])
+			copy(dAtA[i:], m.Scopes[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Hard) > 0 {
+		keysForHard := make([]string, 0, len(m.Hard))
+		for k := range m.Hard {
+			keysForHard = append(keysForHard, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForHard)
+		for iNdEx := len(keysForHard) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Hard[ResourceName(keysForHard[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForHard[iNdEx])
+			copy(dAtA[i:], keysForHard[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHard[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourceQuotaStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceQuotaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Used) > 0 {
+		keysForUsed := make([]string, 0, len(m.Used))
+		for k := range m.Used {
+			keysForUsed = append(keysForUsed, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForUsed)
+		for iNdEx := len(keysForUsed) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Used[ResourceName(keysForUsed[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForUsed[iNdEx])
+			copy(dAtA[i:], keysForUsed[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUsed[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Hard) > 0 {
+		keysForHard := make([]string, 0, len(m.Hard))
+		for k := range m.Hard {
+			keysForHard = append(keysForHard, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForHard)
+		for iNdEx := len(keysForHard) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Hard[ResourceName(keysForHard[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForHard[iNdEx])
+			copy(dAtA[i:], keysForHard[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHard[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourceRequirements) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Claims) > 0 {
+		for iNdEx := len(m.Claims) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Claims[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.Requests) > 0 {
+		keysForRequests := make([]string, 0, len(m.Requests))
+		for k := range m.Requests {
+			keysForRequests = append(keysForRequests, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForRequests)
+		for iNdEx := len(keysForRequests) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Requests[ResourceName(keysForRequests[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForRequests[iNdEx])
+			copy(dAtA[i:], keysForRequests[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRequests[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Limits) > 0 {
+		keysForLimits := make([]string, 0, len(m.Limits))
+		for k := range m.Limits {
+			keysForLimits = append(keysForLimits, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForLimits)
+		for iNdEx := len(keysForLimits) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Limits[ResourceName(keysForLimits[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForLimits[iNdEx])
+			copy(dAtA[i:], keysForLimits[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLimits[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourceStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Resources) > 0 {
+		for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *SELinuxOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SELinuxOptions) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SELinuxOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Level)
+	copy(dAtA[i:], m.Level)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Type)
+	copy(dAtA[i:], m.Type)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Role)
+	copy(dAtA[i:], m.Role)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Role)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.User)
+	copy(dAtA[i:], m.User)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.User)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ScaleIOPersistentVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ScaleIOPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x50
+	i -= len(m.FSType)
+	copy(dAtA[i:], m.FSType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+	i--
+	dAtA[i] = 0x4a
+	i -= len(m.VolumeName)
+	copy(dAtA[i:], m.VolumeName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName)))
+	i--
+	dAtA[i] = 0x42
+	i -= len(m.StorageMode)
+	copy(dAtA[i:], m.StorageMode)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode)))
+	i--
+	dAtA[i] = 0x3a
+	i -= len(m.StoragePool)
+	copy(dAtA[i:], m.StoragePool)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool)))
+	i--
+	dAtA[i] = 0x32
+	i -= len(m.ProtectionDomain)
+	copy(dAtA[i:], m.ProtectionDomain)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain)))
+	i--
+	dAtA[i] = 0x2a
+	i--
+	if m.SSLEnabled {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x20
+	if m.SecretRef != nil {
+		{
+			size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	i -= len(m.System)
+	copy(dAtA[i:], m.System)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.System)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Gateway)
+	copy(dAtA[i:], m.Gateway)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ScaleIOVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ScaleIOVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x50
+	i -= len(m.FSType)
+	copy(dAtA[i:], m.FSType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+	i--
+	dAtA[i] = 0x4a
+	i -= len(m.VolumeName)
+	copy(dAtA[i:], m.VolumeName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName)))
+	i--
+	dAtA[i] = 0x42
+	i -= len(m.StorageMode)
+	copy(dAtA[i:], m.StorageMode)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode)))
+	i--
+	dAtA[i] = 0x3a
+	i -= len(m.StoragePool)
+	copy(dAtA[i:], m.StoragePool)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool)))
+	i--
+	dAtA[i] = 0x32
+	i -= len(m.ProtectionDomain)
+	copy(dAtA[i:], m.ProtectionDomain)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain)))
+	i--
+	dAtA[i] = 0x2a
+	i--
+	if m.SSLEnabled {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x20
+	if m.SecretRef != nil {
+		{
+			size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	i -= len(m.System)
+	copy(dAtA[i:], m.System)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.System)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Gateway)
+	copy(dAtA[i:], m.Gateway)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ScopeSelector) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ScopeSelector) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ScopeSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.MatchExpressions) > 0 {
+		for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ScopedResourceSelectorRequirement) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ScopedResourceSelectorRequirement) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ScopedResourceSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Values) > 0 {
+		for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Values[iNdEx])
+			copy(dAtA[i:], m.Values[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx])))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	i -= len(m.Operator)
+	copy(dAtA[i:], m.Operator)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.ScopeName)
+	copy(dAtA[i:], m.ScopeName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ScopeName)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *SeccompProfile) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SeccompProfile) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SeccompProfile) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.LocalhostProfile != nil {
+		i -= len(*m.LocalhostProfile)
+		copy(dAtA[i:], *m.LocalhostProfile)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.LocalhostProfile)))
+		i--
+		dAtA[i] = 0x12
+	}
+	i -= len(m.Type)
+	copy(dAtA[i:], m.Type)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Secret) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Secret) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Secret) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Immutable != nil {
+		i--
+		if *m.Immutable {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x28
+	}
+	if len(m.StringData) > 0 {
+		keysForStringData := make([]string, 0, len(m.StringData))
+		for k := range m.StringData {
+			keysForStringData = append(keysForStringData, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForStringData)
+		for iNdEx := len(keysForStringData) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.StringData[string(keysForStringData[iNdEx])]
+			baseI := i
+			i -= len(v)
+			copy(dAtA[i:], v)
+			i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForStringData[iNdEx])
+			copy(dAtA[i:], keysForStringData[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForStringData[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	i -= len(m.Type)
+	copy(dAtA[i:], m.Type)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+	i--
+	dAtA[i] = 0x1a
+	if len(m.Data) > 0 {
+		keysForData := make([]string, 0, len(m.Data))
+		for k := range m.Data {
+			keysForData = append(keysForData, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForData)
+		for iNdEx := len(keysForData) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Data[string(keysForData[iNdEx])]
+			baseI := i
+			if v != nil {
+				i -= len(v)
+				copy(dAtA[i:], v)
+				i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+				i--
+				dAtA[i] = 0x12
+			}
+			i -= len(keysForData[iNdEx])
+			copy(dAtA[i:], keysForData[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForData[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *SecretEnvSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SecretEnvSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Optional != nil {
+		i--
+		if *m.Optional {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x10
+	}
+	{
+		size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *SecretKeySelector) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SecretKeySelector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Optional != nil {
+		i--
+		if *m.Optional {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x18
+	}
+	i -= len(m.Key)
+	copy(dAtA[i:], m.Key)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *SecretList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SecretList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SecretList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *SecretProjection) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SecretProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Optional != nil {
+		i--
+		if *m.Optional {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x20
+	}
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *SecretReference) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SecretReference) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SecretReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Namespace)
+	copy(dAtA[i:], m.Namespace)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *SecretVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SecretVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SecretVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Optional != nil {
+		i--
+		if *m.Optional {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.DefaultMode != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode))
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	i -= len(m.SecretName)
+	copy(dAtA[i:], m.SecretName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *SecurityContext) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.AppArmorProfile != nil {
+		{
+			size, err := m.AppArmorProfile.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x62
+	}
+	if m.SeccompProfile != nil {
+		{
+			size, err := m.SeccompProfile.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x5a
+	}
+	if m.WindowsOptions != nil {
+		{
+			size, err := m.WindowsOptions.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x52
+	}
+	if m.ProcMount != nil {
+		i -= len(*m.ProcMount)
+		copy(dAtA[i:], *m.ProcMount)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ProcMount)))
+		i--
+		dAtA[i] = 0x4a
+	}
+	if m.RunAsGroup != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup))
+		i--
+		dAtA[i] = 0x40
+	}
+	if m.AllowPrivilegeEscalation != nil {
+		i--
+		if *m.AllowPrivilegeEscalation {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x38
+	}
+	if m.ReadOnlyRootFilesystem != nil {
+		i--
+		if *m.ReadOnlyRootFilesystem {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x30
+	}
+	if m.RunAsNonRoot != nil {
+		i--
+		if *m.RunAsNonRoot {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x28
+	}
+	if m.RunAsUser != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.SELinuxOptions != nil {
+		{
+			size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.Privileged != nil {
+		i--
+		if *m.Privileged {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Capabilities != nil {
+		{
+			size, err := m.Capabilities.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *SerializedReference) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SerializedReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Reference.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Service) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Service) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Service) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ServiceAccount) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ServiceAccount) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.AutomountServiceAccountToken != nil {
+		i--
+		if *m.AutomountServiceAccountToken {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x20
+	}
+	if len(m.ImagePullSecrets) > 0 {
+		for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.ImagePullSecrets[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.Secrets) > 0 {
+		for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Secrets[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ServiceAccountList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ServiceAccountList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ServiceAccountTokenProjection) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ServiceAccountTokenProjection) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ServiceAccountTokenProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Path)
+	copy(dAtA[i:], m.Path)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
+	i--
+	dAtA[i] = 0x1a
+	if m.ExpirationSeconds != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds))
+		i--
+		dAtA[i] = 0x10
+	}
+	i -= len(m.Audience)
+	copy(dAtA[i:], m.Audience)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ServiceList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ServiceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ServicePort) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ServicePort) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.AppProtocol != nil {
+		i -= len(*m.AppProtocol)
+		copy(dAtA[i:], *m.AppProtocol)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AppProtocol)))
+		i--
+		dAtA[i] = 0x32
+	}
+	i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort))
+	i--
+	dAtA[i] = 0x28
+	{
+		size, err := m.TargetPort.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x22
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Port))
+	i--
+	dAtA[i] = 0x18
+	i -= len(m.Protocol)
+	copy(dAtA[i:], m.Protocol)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ServiceProxyOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ServiceProxyOptions) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ServiceProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Path)
+	copy(dAtA[i:], m.Path)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ServiceSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.TrafficDistribution != nil {
+		i -= len(*m.TrafficDistribution)
+		copy(dAtA[i:], *m.TrafficDistribution)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.TrafficDistribution)))
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xba
+	}
+	if m.InternalTrafficPolicy != nil {
+		i -= len(*m.InternalTrafficPolicy)
+		copy(dAtA[i:], *m.InternalTrafficPolicy)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InternalTrafficPolicy)))
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xb2
+	}
+	if m.LoadBalancerClass != nil {
+		i -= len(*m.LoadBalancerClass)
+		copy(dAtA[i:], *m.LoadBalancerClass)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.LoadBalancerClass)))
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xaa
+	}
+	if m.AllocateLoadBalancerNodePorts != nil {
+		i--
+		if *m.AllocateLoadBalancerNodePorts {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xa0
+	}
+	if len(m.IPFamilies) > 0 {
+		for iNdEx := len(m.IPFamilies) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.IPFamilies[iNdEx])
+			copy(dAtA[i:], m.IPFamilies[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPFamilies[iNdEx])))
+			i--
+			dAtA[i] = 0x1
+			i--
+			dAtA[i] = 0x9a
+		}
+	}
+	if len(m.ClusterIPs) > 0 {
+		for iNdEx := len(m.ClusterIPs) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.ClusterIPs[iNdEx])
+			copy(dAtA[i:], m.ClusterIPs[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIPs[iNdEx])))
+			i--
+			dAtA[i] = 0x1
+			i--
+			dAtA[i] = 0x92
+		}
+	}
+	if m.IPFamilyPolicy != nil {
+		i -= len(*m.IPFamilyPolicy)
+		copy(dAtA[i:], *m.IPFamilyPolicy)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPFamilyPolicy)))
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0x8a
+	}
+	if m.SessionAffinityConfig != nil {
+		{
+			size, err := m.SessionAffinityConfig.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x72
+	}
+	i--
+	if m.PublishNotReadyAddresses {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x68
+	i = encodeVarintGenerated(dAtA, i, uint64(m.HealthCheckNodePort))
+	i--
+	dAtA[i] = 0x60
+	i -= len(m.ExternalTrafficPolicy)
+	copy(dAtA[i:], m.ExternalTrafficPolicy)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalTrafficPolicy)))
+	i--
+	dAtA[i] = 0x5a
+	i -= len(m.ExternalName)
+	copy(dAtA[i:], m.ExternalName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalName)))
+	i--
+	dAtA[i] = 0x52
+	if len(m.LoadBalancerSourceRanges) > 0 {
+		for iNdEx := len(m.LoadBalancerSourceRanges) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.LoadBalancerSourceRanges[iNdEx])
+			copy(dAtA[i:], m.LoadBalancerSourceRanges[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerSourceRanges[iNdEx])))
+			i--
+			dAtA[i] = 0x4a
+		}
+	}
+	i -= len(m.LoadBalancerIP)
+	copy(dAtA[i:], m.LoadBalancerIP)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerIP)))
+	i--
+	dAtA[i] = 0x42
+	i -= len(m.SessionAffinity)
+	copy(dAtA[i:], m.SessionAffinity)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SessionAffinity)))
+	i--
+	dAtA[i] = 0x3a
+	if len(m.ExternalIPs) > 0 {
+		for iNdEx := len(m.ExternalIPs) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.ExternalIPs[iNdEx])
+			copy(dAtA[i:], m.ExternalIPs[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalIPs[iNdEx])))
+			i--
+			dAtA[i] = 0x2a
+		}
+	}
+	i -= len(m.Type)
+	copy(dAtA[i:], m.Type)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.ClusterIP)
+	copy(dAtA[i:], m.ClusterIP)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIP)))
+	i--
+	dAtA[i] = 0x1a
+	if len(m.Selector) > 0 {
+		keysForSelector := make([]string, 0, len(m.Selector))
+		for k := range m.Selector {
+			keysForSelector = append(keysForSelector, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForSelector)
+		for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Selector[string(keysForSelector[iNdEx])]
+			baseI := i
+			i -= len(v)
+			copy(dAtA[i:], v)
+			i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForSelector[iNdEx])
+			copy(dAtA[i:], keysForSelector[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Ports) > 0 {
+		for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ServiceStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ServiceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Conditions) > 0 {
+		for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *SessionAffinityConfig) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SessionAffinityConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.ClientIP != nil {
+		{
+			size, err := m.ClientIP.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *SleepAction) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SleepAction) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SleepAction) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *StorageOSPersistentVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StorageOSPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.SecretRef != nil {
+		{
+			size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x2a
+	}
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x20
+	i -= len(m.FSType)
+	copy(dAtA[i:], m.FSType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.VolumeNamespace)
+	copy(dAtA[i:], m.VolumeNamespace)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.VolumeName)
+	copy(dAtA[i:], m.VolumeName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *StorageOSVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StorageOSVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.SecretRef != nil {
+		{
+			size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x2a
+	}
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x20
+	i -= len(m.FSType)
+	copy(dAtA[i:], m.FSType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.VolumeNamespace)
+	copy(dAtA[i:], m.VolumeNamespace)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.VolumeName)
+	copy(dAtA[i:], m.VolumeName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Sysctl) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Sysctl) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Sysctl) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Value)
+	copy(dAtA[i:], m.Value)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *TCPSocketAction) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TCPSocketAction) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Host)
+	copy(dAtA[i:], m.Host)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host)))
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.Port.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Taint) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Taint) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Taint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.TimeAdded != nil {
+		{
+			size, err := m.TimeAdded.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	i -= len(m.Effect)
+	copy(dAtA[i:], m.Effect)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Value)
+	copy(dAtA[i:], m.Value)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Key)
+	copy(dAtA[i:], m.Key)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Toleration) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Toleration) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Toleration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.TolerationSeconds != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds))
+		i--
+		dAtA[i] = 0x28
+	}
+	i -= len(m.Effect)
+	copy(dAtA[i:], m.Effect)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Value)
+	copy(dAtA[i:], m.Value)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Operator)
+	copy(dAtA[i:], m.Operator)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Key)
+	copy(dAtA[i:], m.Key)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *TopologySelectorLabelRequirement) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TopologySelectorLabelRequirement) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TopologySelectorLabelRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Values) > 0 {
+		for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Values[iNdEx])
+			copy(dAtA[i:], m.Values[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	i -= len(m.Key)
+	copy(dAtA[i:], m.Key)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *TopologySelectorTerm) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TopologySelectorTerm) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TopologySelectorTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.MatchLabelExpressions) > 0 {
+		for iNdEx := len(m.MatchLabelExpressions) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.MatchLabelExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *TopologySpreadConstraint) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TopologySpreadConstraint) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TopologySpreadConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.MatchLabelKeys) > 0 {
+		for iNdEx := len(m.MatchLabelKeys) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.MatchLabelKeys[iNdEx])
+			copy(dAtA[i:], m.MatchLabelKeys[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.MatchLabelKeys[iNdEx])))
+			i--
+			dAtA[i] = 0x42
+		}
+	}
+	if m.NodeTaintsPolicy != nil {
+		i -= len(*m.NodeTaintsPolicy)
+		copy(dAtA[i:], *m.NodeTaintsPolicy)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeTaintsPolicy)))
+		i--
+		dAtA[i] = 0x3a
+	}
+	if m.NodeAffinityPolicy != nil {
+		i -= len(*m.NodeAffinityPolicy)
+		copy(dAtA[i:], *m.NodeAffinityPolicy)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeAffinityPolicy)))
+		i--
+		dAtA[i] = 0x32
+	}
+	if m.MinDomains != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.MinDomains))
+		i--
+		dAtA[i] = 0x28
+	}
+	if m.LabelSelector != nil {
+		{
+			size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	i -= len(m.WhenUnsatisfiable)
+	copy(dAtA[i:], m.WhenUnsatisfiable)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.WhenUnsatisfiable)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.TopologyKey)
+	copy(dAtA[i:], m.TopologyKey)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey)))
+	i--
+	dAtA[i] = 0x12
+	i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSkew))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *TypedLocalObjectReference) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TypedLocalObjectReference) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TypedLocalObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Kind)
+	copy(dAtA[i:], m.Kind)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+	i--
+	dAtA[i] = 0x12
+	if m.APIGroup != nil {
+		i -= len(*m.APIGroup)
+		copy(dAtA[i:], *m.APIGroup)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *TypedObjectReference) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TypedObjectReference) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TypedObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Namespace != nil {
+		i -= len(*m.Namespace)
+		copy(dAtA[i:], *m.Namespace)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Namespace)))
+		i--
+		dAtA[i] = 0x22
+	}
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Kind)
+	copy(dAtA[i:], m.Kind)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+	i--
+	dAtA[i] = 0x12
+	if m.APIGroup != nil {
+		i -= len(*m.APIGroup)
+		copy(dAtA[i:], *m.APIGroup)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Volume) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Volume) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Volume) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.VolumeSource.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *VolumeDevice) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *VolumeDevice) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *VolumeDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.DevicePath)
+	copy(dAtA[i:], m.DevicePath)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *VolumeMount) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *VolumeMount) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.RecursiveReadOnly != nil {
+		i -= len(*m.RecursiveReadOnly)
+		copy(dAtA[i:], *m.RecursiveReadOnly)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RecursiveReadOnly)))
+		i--
+		dAtA[i] = 0x3a
+	}
+	i -= len(m.SubPathExpr)
+	copy(dAtA[i:], m.SubPathExpr)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPathExpr)))
+	i--
+	dAtA[i] = 0x32
+	if m.MountPropagation != nil {
+		i -= len(*m.MountPropagation)
+		copy(dAtA[i:], *m.MountPropagation)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MountPropagation)))
+		i--
+		dAtA[i] = 0x2a
+	}
+	i -= len(m.SubPath)
+	copy(dAtA[i:], m.SubPath)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPath)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.MountPath)
+	copy(dAtA[i:], m.MountPath)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath)))
+	i--
+	dAtA[i] = 0x1a
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x10
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *VolumeMountStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *VolumeMountStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *VolumeMountStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.RecursiveReadOnly != nil {
+		i -= len(*m.RecursiveReadOnly)
+		copy(dAtA[i:], *m.RecursiveReadOnly)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RecursiveReadOnly)))
+		i--
+		dAtA[i] = 0x22
+	}
+	i--
+	if m.ReadOnly {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x18
+	i -= len(m.MountPath)
+	copy(dAtA[i:], m.MountPath)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *VolumeNodeAffinity) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *VolumeNodeAffinity) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *VolumeNodeAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Required != nil {
+		{
+			size, err := m.Required.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *VolumeProjection) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *VolumeProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.ClusterTrustBundle != nil {
+		{
+			size, err := m.ClusterTrustBundle.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x2a
+	}
+	if m.ServiceAccountToken != nil {
+		{
+			size, err := m.ServiceAccountToken.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.ConfigMap != nil {
+		{
+			size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.DownwardAPI != nil {
+		{
+			size, err := m.DownwardAPI.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Secret != nil {
+		{
+			size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *VolumeResourceRequirements) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *VolumeResourceRequirements) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *VolumeResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Requests) > 0 {
+		keysForRequests := make([]string, 0, len(m.Requests))
+		for k := range m.Requests {
+			keysForRequests = append(keysForRequests, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForRequests)
+		for iNdEx := len(keysForRequests) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Requests[ResourceName(keysForRequests[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForRequests[iNdEx])
+			copy(dAtA[i:], keysForRequests[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRequests[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Limits) > 0 {
+		keysForLimits := make([]string, 0, len(m.Limits))
+		for k := range m.Limits {
+			keysForLimits = append(keysForLimits, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForLimits)
+		for iNdEx := len(keysForLimits) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Limits[ResourceName(keysForLimits[iNdEx])]
+			baseI := i
+			{
+				size, err := (&v).MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForLimits[iNdEx])
+			copy(dAtA[i:], keysForLimits[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLimits[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *VolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *VolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Image != nil {
+		{
+			size, err := m.Image.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xf2
+	}
+	if m.Ephemeral != nil {
+		{
+			size, err := m.Ephemeral.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xea
+	}
+	if m.CSI != nil {
+		{
+			size, err := m.CSI.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xe2
+	}
+	if m.StorageOS != nil {
+		{
+			size, err := m.StorageOS.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xda
+	}
+	if m.Projected != nil {
+		{
+			size, err := m.Projected.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xd2
+	}
+	if m.ScaleIO != nil {
+		{
+			size, err := m.ScaleIO.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xca
+	}
+	if m.PortworxVolume != nil {
+		{
+			size, err := m.PortworxVolume.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xc2
+	}
+	if m.PhotonPersistentDisk != nil {
+		{
+			size, err := m.PhotonPersistentDisk.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xba
+	}
+	if m.AzureDisk != nil {
+		{
+			size, err := m.AzureDisk.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xb2
+	}
+	if m.Quobyte != nil {
+		{
+			size, err := m.Quobyte.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xaa
+	}
+	if m.VsphereVolume != nil {
+		{
+			size, err := m.VsphereVolume.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0xa2
+	}
+	if m.ConfigMap != nil {
+		{
+			size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0x9a
+	}
+	if m.AzureFile != nil {
+		{
+			size, err := m.AzureFile.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0x92
+	}
+	if m.FC != nil {
+		{
+			size, err := m.FC.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0x8a
+	}
+	if m.DownwardAPI != nil {
+		{
+			size, err := m.DownwardAPI.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0x82
+	}
+	if m.Flocker != nil {
+		{
+			size, err := m.Flocker.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x7a
+	}
+	if m.CephFS != nil {
+		{
+			size, err := m.CephFS.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x72
+	}
+	if m.Cinder != nil {
+		{
+			size, err := m.Cinder.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x6a
+	}
+	if m.FlexVolume != nil {
+		{
+			size, err := m.FlexVolume.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x62
+	}
+	if m.RBD != nil {
+		{
+			size, err := m.RBD.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x5a
+	}
+	if m.PersistentVolumeClaim != nil {
+		{
+			size, err := m.PersistentVolumeClaim.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x52
+	}
+	if m.Glusterfs != nil {
+		{
+			size, err := m.Glusterfs.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4a
+	}
+	if m.ISCSI != nil {
+		{
+			size, err := m.ISCSI.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x42
+	}
+	if m.NFS != nil {
+		{
+			size, err := m.NFS.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3a
+	}
+	if m.Secret != nil {
+		{
+			size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x32
+	}
+	if m.GitRepo != nil {
+		{
+			size, err := m.GitRepo.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x2a
+	}
+	if m.AWSElasticBlockStore != nil {
+		{
+			size, err := m.AWSElasticBlockStore.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.GCEPersistentDisk != nil {
+		{
+			size, err := m.GCEPersistentDisk.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.EmptyDir != nil {
+		{
+			size, err := m.EmptyDir.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.HostPath != nil {
+		{
+			size, err := m.HostPath.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *VsphereVirtualDiskVolumeSource) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *VsphereVirtualDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *VsphereVirtualDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.StoragePolicyID)
+	copy(dAtA[i:], m.StoragePolicyID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyID)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.StoragePolicyName)
+	copy(dAtA[i:], m.StoragePolicyName)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyName)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.FSType)
+	copy(dAtA[i:], m.FSType)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.VolumePath)
+	copy(dAtA[i:], m.VolumePath)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumePath)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *WeightedPodAffinityTerm) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WeightedPodAffinityTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.PodAffinityTerm.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Weight))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *WindowsSecurityContextOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WindowsSecurityContextOptions) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WindowsSecurityContextOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.HostProcess != nil {
+		i--
+		if *m.HostProcess {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.RunAsUserName != nil {
+		i -= len(*m.RunAsUserName)
+		copy(dAtA[i:], *m.RunAsUserName)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RunAsUserName)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.GMSACredentialSpec != nil {
+		i -= len(*m.GMSACredentialSpec)
+		copy(dAtA[i:], *m.GMSACredentialSpec)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GMSACredentialSpec)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.GMSACredentialSpecName != nil {
+		i -= len(*m.GMSACredentialSpecName)
+		copy(dAtA[i:], *m.GMSACredentialSpecName)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GMSACredentialSpecName)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+	offset -= sovGenerated(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *AWSElasticBlockStoreVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.VolumeID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.FSType)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 1 + sovGenerated(uint64(m.Partition))
+	n += 2
+	return n
+}
+
+func (m *Affinity) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.NodeAffinity != nil {
+		l = m.NodeAffinity.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.PodAffinity != nil {
+		l = m.PodAffinity.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.PodAntiAffinity != nil {
+		l = m.PodAntiAffinity.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *AppArmorProfile) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Type)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.LocalhostProfile != nil {
+		l = len(*m.LocalhostProfile)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *AttachedVolume) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.DevicePath)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *AvoidPods) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.PreferAvoidPods) > 0 {
+		for _, e := range m.PreferAvoidPods {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *AzureDiskVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.DiskName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.DataDiskURI)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.CachingMode != nil {
+		l = len(*m.CachingMode)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.FSType != nil {
+		l = len(*m.FSType)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ReadOnly != nil {
+		n += 2
+	}
+	if m.Kind != nil {
+		l = len(*m.Kind)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *AzureFilePersistentVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.SecretName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ShareName)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	if m.SecretNamespace != nil {
+		l = len(*m.SecretNamespace)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *AzureFileVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.SecretName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ShareName)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	return n
+}
+
+func (m *Binding) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Target.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *CSIPersistentVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Driver)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.VolumeHandle)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	l = len(m.FSType)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.VolumeAttributes) > 0 {
+		for k, v := range m.VolumeAttributes {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if m.ControllerPublishSecretRef != nil {
+		l = m.ControllerPublishSecretRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.NodeStageSecretRef != nil {
+		l = m.NodeStageSecretRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.NodePublishSecretRef != nil {
+		l = m.NodePublishSecretRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ControllerExpandSecretRef != nil {
+		l = m.ControllerExpandSecretRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.NodeExpandSecretRef != nil {
+		l = m.NodeExpandSecretRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *CSIVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Driver)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.ReadOnly != nil {
+		n += 2
+	}
+	if m.FSType != nil {
+		l = len(*m.FSType)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.VolumeAttributes) > 0 {
+		for k, v := range m.VolumeAttributes {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if m.NodePublishSecretRef != nil {
+		l = m.NodePublishSecretRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *Capabilities) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Add) > 0 {
+		for _, s := range m.Add {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Drop) > 0 {
+		for _, s := range m.Drop {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *CephFSPersistentVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Monitors) > 0 {
+		for _, s := range m.Monitors {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.Path)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.User)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.SecretFile)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.SecretRef != nil {
+		l = m.SecretRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	n += 2
+	return n
+}
+
+func (m *CephFSVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Monitors) > 0 {
+		for _, s := range m.Monitors {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.Path)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.User)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.SecretFile)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.SecretRef != nil {
+		l = m.SecretRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	n += 2
+	return n
+}
+
+func (m *CinderPersistentVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.VolumeID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.FSType)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	if m.SecretRef != nil {
+		l = m.SecretRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *CinderVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.VolumeID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.FSType)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	if m.SecretRef != nil {
+		l = m.SecretRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *ClientIPConfig) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.TimeoutSeconds != nil {
+		n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
+	}
+	return n
+}
+
+func (m *ClusterTrustBundleProjection) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Name != nil {
+		l = len(*m.Name)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.SignerName != nil {
+		l = len(*m.SignerName)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.LabelSelector != nil {
+		l = m.LabelSelector.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	l = len(m.Path)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Optional != nil {
+		n += 2
+	}
+	return n
+}
+
+func (m *ComponentCondition) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Type)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Status)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Message)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Error)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ComponentStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Conditions) > 0 {
+		for _, e := range m.Conditions {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ComponentStatusList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ConfigMap) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Data) > 0 {
+		for k, v := range m.Data {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if len(m.BinaryData) > 0 {
+		for k, v := range m.BinaryData {
+			_ = k
+			_ = v
+			l = 0
+			if v != nil {
+				l = 1 + len(v) + sovGenerated(uint64(len(v)))
+			}
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if m.Immutable != nil {
+		n += 2
+	}
+	return n
+}
+
+func (m *ConfigMapEnvSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.LocalObjectReference.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Optional != nil {
+		n += 2
+	}
+	return n
+}
+
+func (m *ConfigMapKeySelector) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.LocalObjectReference.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Key)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Optional != nil {
+		n += 2
+	}
+	return n
+}
+
+func (m *ConfigMapList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ConfigMapNodeConfigSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Namespace)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.UID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ResourceVersion)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.KubeletConfigKey)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ConfigMapProjection) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.LocalObjectReference.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.Optional != nil {
+		n += 2
+	}
+	return n
+}
+
+func (m *ConfigMapVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.LocalObjectReference.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.DefaultMode != nil {
+		n += 1 + sovGenerated(uint64(*m.DefaultMode))
+	}
+	if m.Optional != nil {
+		n += 2
+	}
+	return n
+}
+
+func (m *Container) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Image)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Command) > 0 {
+		for _, s := range m.Command {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Args) > 0 {
+		for _, s := range m.Args {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.WorkingDir)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Ports) > 0 {
+		for _, e := range m.Ports {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Env) > 0 {
+		for _, e := range m.Env {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = m.Resources.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.VolumeMounts) > 0 {
+		for _, e := range m.VolumeMounts {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.LivenessProbe != nil {
+		l = m.LivenessProbe.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ReadinessProbe != nil {
+		l = m.ReadinessProbe.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Lifecycle != nil {
+		l = m.Lifecycle.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	l = len(m.TerminationMessagePath)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ImagePullPolicy)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.SecurityContext != nil {
+		l = m.SecurityContext.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	n += 3
+	n += 3
+	n += 3
+	if len(m.EnvFrom) > 0 {
+		for _, e := range m.EnvFrom {
+			l = e.Size()
+			n += 2 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.TerminationMessagePolicy)
+	n += 2 + l + sovGenerated(uint64(l))
+	if len(m.VolumeDevices) > 0 {
+		for _, e := range m.VolumeDevices {
+			l = e.Size()
+			n += 2 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.StartupProbe != nil {
+		l = m.StartupProbe.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if len(m.ResizePolicy) > 0 {
+		for _, e := range m.ResizePolicy {
+			l = e.Size()
+			n += 2 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.RestartPolicy != nil {
+		l = len(*m.RestartPolicy)
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *ContainerImage) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Names) > 0 {
+		for _, s := range m.Names {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	n += 1 + sovGenerated(uint64(m.SizeBytes))
+	return n
+}
+
+func (m *ContainerPort) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 1 + sovGenerated(uint64(m.HostPort))
+	n += 1 + sovGenerated(uint64(m.ContainerPort))
+	l = len(m.Protocol)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.HostIP)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ContainerResizePolicy) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.ResourceName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.RestartPolicy)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ContainerState) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Waiting != nil {
+		l = m.Waiting.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Running != nil {
+		l = m.Running.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Terminated != nil {
+		l = m.Terminated.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *ContainerStateRunning) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.StartedAt.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ContainerStateTerminated) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovGenerated(uint64(m.ExitCode))
+	n += 1 + sovGenerated(uint64(m.Signal))
+	l = len(m.Reason)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Message)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.StartedAt.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.FinishedAt.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ContainerID)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ContainerStateWaiting) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Reason)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Message)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ContainerStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.State.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.LastTerminationState.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	n += 1 + sovGenerated(uint64(m.RestartCount))
+	l = len(m.Image)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ImageID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ContainerID)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Started != nil {
+		n += 2
+	}
+	if len(m.AllocatedResources) > 0 {
+		for k, v := range m.AllocatedResources {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if m.Resources != nil {
+		l = m.Resources.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.VolumeMounts) > 0 {
+		for _, e := range m.VolumeMounts {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.User != nil {
+		l = m.User.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.AllocatedResourcesStatus) > 0 {
+		for _, e := range m.AllocatedResourcesStatus {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ContainerUser) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Linux != nil {
+		l = m.Linux.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *DaemonEndpoint) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovGenerated(uint64(m.Port))
+	return n
+}
+
+func (m *DownwardAPIProjection) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *DownwardAPIVolumeFile) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Path)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.FieldRef != nil {
+		l = m.FieldRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ResourceFieldRef != nil {
+		l = m.ResourceFieldRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Mode != nil {
+		n += 1 + sovGenerated(uint64(*m.Mode))
+	}
+	return n
+}
+
+func (m *DownwardAPIVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.DefaultMode != nil {
+		n += 1 + sovGenerated(uint64(*m.DefaultMode))
+	}
+	return n
+}
+
+func (m *EmptyDirVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Medium)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.SizeLimit != nil {
+		l = m.SizeLimit.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *EndpointAddress) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.IP)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.TargetRef != nil {
+		l = m.TargetRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	l = len(m.Hostname)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.NodeName != nil {
+		l = len(*m.NodeName)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *EndpointPort) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 1 + sovGenerated(uint64(m.Port))
+	l = len(m.Protocol)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.AppProtocol != nil {
+		l = len(*m.AppProtocol)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *EndpointSubset) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Addresses) > 0 {
+		for _, e := range m.Addresses {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.NotReadyAddresses) > 0 {
+		for _, e := range m.NotReadyAddresses {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Ports) > 0 {
+		for _, e := range m.Ports {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *Endpoints) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Subsets) > 0 {
+		for _, e := range m.Subsets {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *EndpointsList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *EnvFromSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Prefix)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.ConfigMapRef != nil {
+		l = m.ConfigMapRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.SecretRef != nil {
+		l = m.SecretRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *EnvVar) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Value)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.ValueFrom != nil {
+		l = m.ValueFrom.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *EnvVarSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.FieldRef != nil {
+		l = m.FieldRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ResourceFieldRef != nil {
+		l = m.ResourceFieldRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ConfigMapKeyRef != nil {
+		l = m.ConfigMapKeyRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.SecretKeyRef != nil {
+		l = m.SecretKeyRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *EphemeralContainer) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.EphemeralContainerCommon.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.TargetContainerName)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *EphemeralContainerCommon) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Image)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Command) > 0 {
+		for _, s := range m.Command {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Args) > 0 {
+		for _, s := range m.Args {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.WorkingDir)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Ports) > 0 {
+		for _, e := range m.Ports {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Env) > 0 {
+		for _, e := range m.Env {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = m.Resources.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.VolumeMounts) > 0 {
+		for _, e := range m.VolumeMounts {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.LivenessProbe != nil {
+		l = m.LivenessProbe.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ReadinessProbe != nil {
+		l = m.ReadinessProbe.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Lifecycle != nil {
+		l = m.Lifecycle.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	l = len(m.TerminationMessagePath)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ImagePullPolicy)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.SecurityContext != nil {
+		l = m.SecurityContext.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	n += 3
+	n += 3
+	n += 3
+	if len(m.EnvFrom) > 0 {
+		for _, e := range m.EnvFrom {
+			l = e.Size()
+			n += 2 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.TerminationMessagePolicy)
+	n += 2 + l + sovGenerated(uint64(l))
+	if len(m.VolumeDevices) > 0 {
+		for _, e := range m.VolumeDevices {
+			l = e.Size()
+			n += 2 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.StartupProbe != nil {
+		l = m.StartupProbe.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if len(m.ResizePolicy) > 0 {
+		for _, e := range m.ResizePolicy {
+			l = e.Size()
+			n += 2 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.RestartPolicy != nil {
+		l = len(*m.RestartPolicy)
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *EphemeralVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.VolumeClaimTemplate != nil {
+		l = m.VolumeClaimTemplate.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *Event) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.InvolvedObject.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Reason)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Message)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Source.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.FirstTimestamp.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.LastTimestamp.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 1 + sovGenerated(uint64(m.Count))
+	l = len(m.Type)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.EventTime.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Series != nil {
+		l = m.Series.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	l = len(m.Action)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Related != nil {
+		l = m.Related.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	l = len(m.ReportingController)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ReportingInstance)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *EventList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *EventSeries) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovGenerated(uint64(m.Count))
+	l = m.LastObservedTime.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *EventSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Component)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Host)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ExecAction) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Command) > 0 {
+		for _, s := range m.Command {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *FCVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.TargetWWNs) > 0 {
+		for _, s := range m.TargetWWNs {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.Lun != nil {
+		n += 1 + sovGenerated(uint64(*m.Lun))
+	}
+	l = len(m.FSType)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	if len(m.WWIDs) > 0 {
+		for _, s := range m.WWIDs {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *FlexPersistentVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Driver)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.FSType)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.SecretRef != nil {
+		l = m.SecretRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	n += 2
+	if len(m.Options) > 0 {
+		for k, v := range m.Options {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *FlexVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Driver)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.FSType)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.SecretRef != nil {
+		l = m.SecretRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	n += 2
+	if len(m.Options) > 0 {
+		for k, v := range m.Options {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *FlockerVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.DatasetName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.DatasetUUID)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *GCEPersistentDiskVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.PDName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.FSType)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 1 + sovGenerated(uint64(m.Partition))
+	n += 2
+	return n
+}
+
+func (m *GRPCAction) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovGenerated(uint64(m.Port))
+	if m.Service != nil {
+		l = len(*m.Service)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *GitRepoVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Repository)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Revision)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Directory)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *GlusterfsPersistentVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.EndpointsName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Path)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	if m.EndpointsNamespace != nil {
+		l = len(*m.EndpointsNamespace)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *GlusterfsVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.EndpointsName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Path)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	return n
+}
+
+func (m *HTTPGetAction) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Path)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Port.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Host)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Scheme)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.HTTPHeaders) > 0 {
+		for _, e := range m.HTTPHeaders {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *HTTPHeader) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Value)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *HostAlias) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.IP)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Hostnames) > 0 {
+		for _, s := range m.Hostnames {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *HostIP) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.IP)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *HostPathVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Path)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Type != nil {
+		l = len(*m.Type)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *ISCSIPersistentVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.TargetPortal)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.IQN)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 1 + sovGenerated(uint64(m.Lun))
+	l = len(m.ISCSIInterface)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.FSType)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	if len(m.Portals) > 0 {
+		for _, s := range m.Portals {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	n += 2
+	if m.SecretRef != nil {
+		l = m.SecretRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	n += 2
+	if m.InitiatorName != nil {
+		l = len(*m.InitiatorName)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *ISCSIVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.TargetPortal)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.IQN)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 1 + sovGenerated(uint64(m.Lun))
+	l = len(m.ISCSIInterface)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.FSType)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	if len(m.Portals) > 0 {
+		for _, s := range m.Portals {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	n += 2
+	if m.SecretRef != nil {
+		l = m.SecretRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	n += 2
+	if m.InitiatorName != nil {
+		l = len(*m.InitiatorName)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *ImageVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Reference)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.PullPolicy)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *KeyToPath) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Path)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Mode != nil {
+		n += 1 + sovGenerated(uint64(*m.Mode))
+	}
+	return n
+}
+
+func (m *Lifecycle) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.PostStart != nil {
+		l = m.PostStart.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.PreStop != nil {
+		l = m.PreStop.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *LifecycleHandler) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Exec != nil {
+		l = m.Exec.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.HTTPGet != nil {
+		l = m.HTTPGet.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.TCPSocket != nil {
+		l = m.TCPSocket.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Sleep != nil {
+		l = m.Sleep.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *LimitRange) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *LimitRangeItem) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Type)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Max) > 0 {
+		for k, v := range m.Max {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if len(m.Min) > 0 {
+		for k, v := range m.Min {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if len(m.Default) > 0 {
+		for k, v := range m.Default {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if len(m.DefaultRequest) > 0 {
+		for k, v := range m.DefaultRequest {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if len(m.MaxLimitRequestRatio) > 0 {
+		for k, v := range m.MaxLimitRequestRatio {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *LimitRangeList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *LimitRangeSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Limits) > 0 {
+		for _, e := range m.Limits {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *LinuxContainerUser) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovGenerated(uint64(m.UID))
+	n += 1 + sovGenerated(uint64(m.GID))
+	if len(m.SupplementalGroups) > 0 {
+		for _, e := range m.SupplementalGroups {
+			n += 1 + sovGenerated(uint64(e))
+		}
+	}
+	return n
+}
+
+func (m *List) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *LoadBalancerIngress) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.IP)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Hostname)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.IPMode != nil {
+		l = len(*m.IPMode)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.Ports) > 0 {
+		for _, e := range m.Ports {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *LoadBalancerStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Ingress) > 0 {
+		for _, e := range m.Ingress {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *LocalObjectReference) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *LocalVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Path)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.FSType != nil {
+		l = len(*m.FSType)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *ModifyVolumeStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.TargetVolumeAttributesClassName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Status)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *NFSVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Server)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Path)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	return n
+}
+
+func (m *Namespace) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Status.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *NamespaceCondition) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Type)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Status)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.LastTransitionTime.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Reason)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Message)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *NamespaceList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *NamespaceSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Finalizers) > 0 {
+		for _, s := range m.Finalizers {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *NamespaceStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Phase)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Conditions) > 0 {
+		for _, e := range m.Conditions {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *Node) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Status.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *NodeAddress) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Type)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Address)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *NodeAffinity) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+		l = m.RequiredDuringSchedulingIgnoredDuringExecution.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
+		for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *NodeCondition) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Type)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Status)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.LastHeartbeatTime.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.LastTransitionTime.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Reason)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Message)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *NodeConfigSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ConfigMap != nil {
+		l = m.ConfigMap.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *NodeConfigStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Assigned != nil {
+		l = m.Assigned.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Active != nil {
+		l = m.Active.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.LastKnownGood != nil {
+		l = m.LastKnownGood.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	l = len(m.Error)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *NodeDaemonEndpoints) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.KubeletEndpoint.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *NodeFeatures) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.SupplementalGroupsPolicy != nil {
+		n += 2
+	}
+	return n
+}
+
+func (m *NodeList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *NodeProxyOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Path)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *NodeRuntimeHandler) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Features != nil {
+		l = m.Features.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *NodeRuntimeHandlerFeatures) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.RecursiveReadOnlyMounts != nil {
+		n += 2
+	}
+	if m.UserNamespaces != nil {
+		n += 2
+	}
+	return n
+}
+
+func (m *NodeSelector) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.NodeSelectorTerms) > 0 {
+		for _, e := range m.NodeSelectorTerms {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *NodeSelectorRequirement) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Operator)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Values) > 0 {
+		for _, s := range m.Values {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *NodeSelectorTerm) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.MatchExpressions) > 0 {
+		for _, e := range m.MatchExpressions {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.MatchFields) > 0 {
+		for _, e := range m.MatchFields {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *NodeSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.PodCIDR)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.DoNotUseExternalID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ProviderID)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	if len(m.Taints) > 0 {
+		for _, e := range m.Taints {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.ConfigSource != nil {
+		l = m.ConfigSource.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.PodCIDRs) > 0 {
+		for _, s := range m.PodCIDRs {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *NodeStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Capacity) > 0 {
+		for k, v := range m.Capacity {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if len(m.Allocatable) > 0 {
+		for k, v := range m.Allocatable {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	l = len(m.Phase)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Conditions) > 0 {
+		for _, e := range m.Conditions {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Addresses) > 0 {
+		for _, e := range m.Addresses {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = m.DaemonEndpoints.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.NodeInfo.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Images) > 0 {
+		for _, e := range m.Images {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.VolumesInUse) > 0 {
+		for _, s := range m.VolumesInUse {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.VolumesAttached) > 0 {
+		for _, e := range m.VolumesAttached {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.Config != nil {
+		l = m.Config.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.RuntimeHandlers) > 0 {
+		for _, e := range m.RuntimeHandlers {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.Features != nil {
+		l = m.Features.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *NodeSystemInfo) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.MachineID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.SystemUUID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.BootID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.KernelVersion)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.OSImage)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ContainerRuntimeVersion)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.KubeletVersion)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.KubeProxyVersion)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.OperatingSystem)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Architecture)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ObjectFieldSelector) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.APIVersion)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.FieldPath)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ObjectReference) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Kind)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Namespace)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.UID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.APIVersion)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ResourceVersion)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.FieldPath)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *PersistentVolume) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Status.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *PersistentVolumeClaim) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Status.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *PersistentVolumeClaimCondition) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Type)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Status)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.LastProbeTime.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.LastTransitionTime.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Reason)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Message)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *PersistentVolumeClaimList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *PersistentVolumeClaimSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.AccessModes) > 0 {
+		for _, s := range m.AccessModes {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = m.Resources.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.VolumeName)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Selector != nil {
+		l = m.Selector.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.StorageClassName != nil {
+		l = len(*m.StorageClassName)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.VolumeMode != nil {
+		l = len(*m.VolumeMode)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.DataSource != nil {
+		l = m.DataSource.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.DataSourceRef != nil {
+		l = m.DataSourceRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.VolumeAttributesClassName != nil {
+		l = len(*m.VolumeAttributesClassName)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *PersistentVolumeClaimStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Phase)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.AccessModes) > 0 {
+		for _, s := range m.AccessModes {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Capacity) > 0 {
+		for k, v := range m.Capacity {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if len(m.Conditions) > 0 {
+		for _, e := range m.Conditions {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.AllocatedResources) > 0 {
+		for k, v := range m.AllocatedResources {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if len(m.AllocatedResourceStatuses) > 0 {
+		for k, v := range m.AllocatedResourceStatuses {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if m.CurrentVolumeAttributesClassName != nil {
+		l = len(*m.CurrentVolumeAttributesClassName)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ModifyVolumeStatus != nil {
+		l = m.ModifyVolumeStatus.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *PersistentVolumeClaimTemplate) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *PersistentVolumeClaimVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.ClaimName)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	return n
+}
+
+func (m *PersistentVolumeList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *PersistentVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.GCEPersistentDisk != nil {
+		l = m.GCEPersistentDisk.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.AWSElasticBlockStore != nil {
+		l = m.AWSElasticBlockStore.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.HostPath != nil {
+		l = m.HostPath.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Glusterfs != nil {
+		l = m.Glusterfs.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.NFS != nil {
+		l = m.NFS.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.RBD != nil {
+		l = m.RBD.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ISCSI != nil {
+		l = m.ISCSI.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Cinder != nil {
+		l = m.Cinder.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.CephFS != nil {
+		l = m.CephFS.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.FC != nil {
+		l = m.FC.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Flocker != nil {
+		l = m.Flocker.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.FlexVolume != nil {
+		l = m.FlexVolume.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.AzureFile != nil {
+		l = m.AzureFile.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.VsphereVolume != nil {
+		l = m.VsphereVolume.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Quobyte != nil {
+		l = m.Quobyte.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.AzureDisk != nil {
+		l = m.AzureDisk.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.PhotonPersistentDisk != nil {
+		l = m.PhotonPersistentDisk.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.PortworxVolume != nil {
+		l = m.PortworxVolume.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.ScaleIO != nil {
+		l = m.ScaleIO.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.Local != nil {
+		l = m.Local.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.StorageOS != nil {
+		l = m.StorageOS.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.CSI != nil {
+		l = m.CSI.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *PersistentVolumeSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Capacity) > 0 {
+		for k, v := range m.Capacity {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	l = m.PersistentVolumeSource.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.AccessModes) > 0 {
+		for _, s := range m.AccessModes {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.ClaimRef != nil {
+		l = m.ClaimRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	l = len(m.PersistentVolumeReclaimPolicy)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.StorageClassName)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.MountOptions) > 0 {
+		for _, s := range m.MountOptions {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.VolumeMode != nil {
+		l = len(*m.VolumeMode)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.NodeAffinity != nil {
+		l = m.NodeAffinity.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.VolumeAttributesClassName != nil {
+		l = len(*m.VolumeAttributesClassName)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *PersistentVolumeStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Phase)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Message)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Reason)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.LastPhaseTransitionTime != nil {
+		l = m.LastPhaseTransitionTime.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *PhotonPersistentDiskVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.PdID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.FSType)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *Pod) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Status.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *PodAffinity) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 {
+		for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
+		for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *PodAffinityTerm) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.LabelSelector != nil {
+		l = m.LabelSelector.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.Namespaces) > 0 {
+		for _, s := range m.Namespaces {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.TopologyKey)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.NamespaceSelector != nil {
+		l = m.NamespaceSelector.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.MatchLabelKeys) > 0 {
+		for _, s := range m.MatchLabelKeys {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.MismatchLabelKeys) > 0 {
+		for _, s := range m.MismatchLabelKeys {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *PodAntiAffinity) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 {
+		for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
+		for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *PodAttachOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 2
+	n += 2
+	n += 2
+	n += 2
+	l = len(m.Container)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *PodCondition) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Type)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Status)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.LastProbeTime.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.LastTransitionTime.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Reason)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Message)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *PodDNSConfig) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Nameservers) > 0 {
+		for _, s := range m.Nameservers {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Searches) > 0 {
+		for _, s := range m.Searches {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Options) > 0 {
+		for _, e := range m.Options {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *PodDNSConfigOption) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Value != nil {
+		l = len(*m.Value)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *PodExecOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 2
+	n += 2
+	n += 2
+	n += 2
+	l = len(m.Container)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Command) > 0 {
+		for _, s := range m.Command {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *PodIP) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.IP)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *PodList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *PodLogOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Container)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	n += 2
+	if m.SinceSeconds != nil {
+		n += 1 + sovGenerated(uint64(*m.SinceSeconds))
+	}
+	if m.SinceTime != nil {
+		l = m.SinceTime.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	n += 2
+	if m.TailLines != nil {
+		n += 1 + sovGenerated(uint64(*m.TailLines))
+	}
+	if m.LimitBytes != nil {
+		n += 1 + sovGenerated(uint64(*m.LimitBytes))
+	}
+	n += 2
+	if m.Stream != nil {
+		l = len(*m.Stream)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *PodOS) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *PodPortForwardOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Ports) > 0 {
+		for _, e := range m.Ports {
+			n += 1 + sovGenerated(uint64(e))
+		}
+	}
+	return n
+}
+
+func (m *PodProxyOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Path)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *PodReadinessGate) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.ConditionType)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *PodResourceClaim) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.ResourceClaimName != nil {
+		l = len(*m.ResourceClaimName)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ResourceClaimTemplateName != nil {
+		l = len(*m.ResourceClaimTemplateName)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *PodResourceClaimStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.ResourceClaimName != nil {
+		l = len(*m.ResourceClaimName)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *PodSchedulingGate) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *PodSecurityContext) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.SELinuxOptions != nil {
+		l = m.SELinuxOptions.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.RunAsUser != nil {
+		n += 1 + sovGenerated(uint64(*m.RunAsUser))
+	}
+	if m.RunAsNonRoot != nil {
+		n += 2
+	}
+	if len(m.SupplementalGroups) > 0 {
+		for _, e := range m.SupplementalGroups {
+			n += 1 + sovGenerated(uint64(e))
+		}
+	}
+	if m.FSGroup != nil {
+		n += 1 + sovGenerated(uint64(*m.FSGroup))
+	}
+	if m.RunAsGroup != nil {
+		n += 1 + sovGenerated(uint64(*m.RunAsGroup))
+	}
+	if len(m.Sysctls) > 0 {
+		for _, e := range m.Sysctls {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.WindowsOptions != nil {
+		l = m.WindowsOptions.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.FSGroupChangePolicy != nil {
+		l = len(*m.FSGroupChangePolicy)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.SeccompProfile != nil {
+		l = m.SeccompProfile.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.AppArmorProfile != nil {
+		l = m.AppArmorProfile.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.SupplementalGroupsPolicy != nil {
+		l = len(*m.SupplementalGroupsPolicy)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.SELinuxChangePolicy != nil {
+		l = len(*m.SELinuxChangePolicy)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *PodSignature) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.PodController != nil {
+		l = m.PodController.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *PodSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Volumes) > 0 {
+		for _, e := range m.Volumes {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Containers) > 0 {
+		for _, e := range m.Containers {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.RestartPolicy)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.TerminationGracePeriodSeconds != nil {
+		n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds))
+	}
+	if m.ActiveDeadlineSeconds != nil {
+		n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds))
+	}
+	l = len(m.DNSPolicy)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.NodeSelector) > 0 {
+		for k, v := range m.NodeSelector {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	l = len(m.ServiceAccountName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.DeprecatedServiceAccount)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.NodeName)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	n += 2
+	n += 2
+	if m.SecurityContext != nil {
+		l = m.SecurityContext.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.ImagePullSecrets) > 0 {
+		for _, e := range m.ImagePullSecrets {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.Hostname)
+	n += 2 + l + sovGenerated(uint64(l))
+	l = len(m.Subdomain)
+	n += 2 + l + sovGenerated(uint64(l))
+	if m.Affinity != nil {
+		l = m.Affinity.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	l = len(m.SchedulerName)
+	n += 2 + l + sovGenerated(uint64(l))
+	if len(m.InitContainers) > 0 {
+		for _, e := range m.InitContainers {
+			l = e.Size()
+			n += 2 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.AutomountServiceAccountToken != nil {
+		n += 3
+	}
+	if len(m.Tolerations) > 0 {
+		for _, e := range m.Tolerations {
+			l = e.Size()
+			n += 2 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.HostAliases) > 0 {
+		for _, e := range m.HostAliases {
+			l = e.Size()
+			n += 2 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.PriorityClassName)
+	n += 2 + l + sovGenerated(uint64(l))
+	if m.Priority != nil {
+		n += 2 + sovGenerated(uint64(*m.Priority))
+	}
+	if m.DNSConfig != nil {
+		l = m.DNSConfig.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.ShareProcessNamespace != nil {
+		n += 3
+	}
+	if len(m.ReadinessGates) > 0 {
+		for _, e := range m.ReadinessGates {
+			l = e.Size()
+			n += 2 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.RuntimeClassName != nil {
+		l = len(*m.RuntimeClassName)
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.EnableServiceLinks != nil {
+		n += 3
+	}
+	if m.PreemptionPolicy != nil {
+		l = len(*m.PreemptionPolicy)
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if len(m.Overhead) > 0 {
+		for k, v := range m.Overhead {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if len(m.TopologySpreadConstraints) > 0 {
+		for _, e := range m.TopologySpreadConstraints {
+			l = e.Size()
+			n += 2 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.EphemeralContainers) > 0 {
+		for _, e := range m.EphemeralContainers {
+			l = e.Size()
+			n += 2 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.SetHostnameAsFQDN != nil {
+		n += 3
+	}
+	if m.OS != nil {
+		l = m.OS.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.HostUsers != nil {
+		n += 3
+	}
+	if len(m.SchedulingGates) > 0 {
+		for _, e := range m.SchedulingGates {
+			l = e.Size()
+			n += 2 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.ResourceClaims) > 0 {
+		for _, e := range m.ResourceClaims {
+			l = e.Size()
+			n += 2 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.Resources != nil {
+		l = m.Resources.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *PodStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Phase)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Conditions) > 0 {
+		for _, e := range m.Conditions {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.Message)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Reason)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.HostIP)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.PodIP)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.StartTime != nil {
+		l = m.StartTime.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.ContainerStatuses) > 0 {
+		for _, e := range m.ContainerStatuses {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.QOSClass)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.InitContainerStatuses) > 0 {
+		for _, e := range m.InitContainerStatuses {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.NominatedNodeName)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.PodIPs) > 0 {
+		for _, e := range m.PodIPs {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.EphemeralContainerStatuses) > 0 {
+		for _, e := range m.EphemeralContainerStatuses {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.Resize)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.ResourceClaimStatuses) > 0 {
+		for _, e := range m.ResourceClaimStatuses {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.HostIPs) > 0 {
+		for _, e := range m.HostIPs {
+			l = e.Size()
+			n += 2 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *PodStatusResult) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Status.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *PodTemplate) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Template.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *PodTemplateList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *PodTemplateSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *PortStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovGenerated(uint64(m.Port))
+	l = len(m.Protocol)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Error != nil {
+		l = len(*m.Error)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *PortworxVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.VolumeID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.FSType)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	return n
+}
+
+func (m *Preconditions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.UID != nil {
+		l = len(*m.UID)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *PreferAvoidPodsEntry) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.PodSignature.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.EvictionTime.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Reason)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Message)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *PreferredSchedulingTerm) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovGenerated(uint64(m.Weight))
+	l = m.Preference.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *Probe) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ProbeHandler.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 1 + sovGenerated(uint64(m.InitialDelaySeconds))
+	n += 1 + sovGenerated(uint64(m.TimeoutSeconds))
+	n += 1 + sovGenerated(uint64(m.PeriodSeconds))
+	n += 1 + sovGenerated(uint64(m.SuccessThreshold))
+	n += 1 + sovGenerated(uint64(m.FailureThreshold))
+	if m.TerminationGracePeriodSeconds != nil {
+		n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds))
+	}
+	return n
+}
+
+func (m *ProbeHandler) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Exec != nil {
+		l = m.Exec.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.HTTPGet != nil {
+		l = m.HTTPGet.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.TCPSocket != nil {
+		l = m.TCPSocket.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.GRPC != nil {
+		l = m.GRPC.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *ProjectedVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Sources) > 0 {
+		for _, e := range m.Sources {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.DefaultMode != nil {
+		n += 1 + sovGenerated(uint64(*m.DefaultMode))
+	}
+	return n
+}
+
+func (m *QuobyteVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Registry)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Volume)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	l = len(m.User)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Group)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Tenant)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *RBDPersistentVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.CephMonitors) > 0 {
+		for _, s := range m.CephMonitors {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.RBDImage)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.FSType)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.RBDPool)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.RadosUser)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Keyring)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.SecretRef != nil {
+		l = m.SecretRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	n += 2
+	return n
+}
+
+func (m *RBDVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.CephMonitors) > 0 {
+		for _, s := range m.CephMonitors {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.RBDImage)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.FSType)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.RBDPool)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.RadosUser)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Keyring)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.SecretRef != nil {
+		l = m.SecretRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	n += 2
+	return n
+}
+
+func (m *RangeAllocation) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Range)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Data != nil {
+		l = len(m.Data)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *ReplicationController) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Status.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ReplicationControllerCondition) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Type)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Status)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.LastTransitionTime.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Reason)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Message)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ReplicationControllerList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ReplicationControllerSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Replicas != nil {
+		n += 1 + sovGenerated(uint64(*m.Replicas))
+	}
+	if len(m.Selector) > 0 {
+		for k, v := range m.Selector {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if m.Template != nil {
+		l = m.Template.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	n += 1 + sovGenerated(uint64(m.MinReadySeconds))
+	return n
+}
+
+func (m *ReplicationControllerStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovGenerated(uint64(m.Replicas))
+	n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas))
+	n += 1 + sovGenerated(uint64(m.ObservedGeneration))
+	n += 1 + sovGenerated(uint64(m.ReadyReplicas))
+	n += 1 + sovGenerated(uint64(m.AvailableReplicas))
+	if len(m.Conditions) > 0 {
+		for _, e := range m.Conditions {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ResourceClaim) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Request)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ResourceFieldSelector) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.ContainerName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Resource)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Divisor.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ResourceHealth) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.ResourceID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Health)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ResourceQuota) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Status.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ResourceQuotaList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ResourceQuotaSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Hard) > 0 {
+		for k, v := range m.Hard {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if len(m.Scopes) > 0 {
+		for _, s := range m.Scopes {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.ScopeSelector != nil {
+		l = m.ScopeSelector.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *ResourceQuotaStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Hard) > 0 {
+		for k, v := range m.Hard {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if len(m.Used) > 0 {
+		for k, v := range m.Used {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *ResourceRequirements) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Limits) > 0 {
+		for k, v := range m.Limits {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if len(m.Requests) > 0 {
+		for k, v := range m.Requests {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if len(m.Claims) > 0 {
+		for _, e := range m.Claims {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ResourceStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Resources) > 0 {
+		for _, e := range m.Resources {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *SELinuxOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.User)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Role)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Type)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Level)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ScaleIOPersistentVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Gateway)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.System)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.SecretRef != nil {
+		l = m.SecretRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	n += 2
+	l = len(m.ProtectionDomain)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.StoragePool)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.StorageMode)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.VolumeName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.FSType)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	return n
+}
+
+func (m *ScaleIOVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Gateway)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.System)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.SecretRef != nil {
+		l = m.SecretRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	n += 2
+	l = len(m.ProtectionDomain)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.StoragePool)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.StorageMode)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.VolumeName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.FSType)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	return n
+}
+
+func (m *ScopeSelector) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.MatchExpressions) > 0 {
+		for _, e := range m.MatchExpressions {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ScopedResourceSelectorRequirement) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.ScopeName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Operator)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Values) > 0 {
+		for _, s := range m.Values {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *SeccompProfile) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Type)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.LocalhostProfile != nil {
+		l = len(*m.LocalhostProfile)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *Secret) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Data) > 0 {
+		for k, v := range m.Data {
+			_ = k
+			_ = v
+			l = 0
+			if v != nil {
+				l = 1 + len(v) + sovGenerated(uint64(len(v)))
+			}
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	l = len(m.Type)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.StringData) > 0 {
+		for k, v := range m.StringData {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if m.Immutable != nil {
+		n += 2
+	}
+	return n
+}
+
+func (m *SecretEnvSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.LocalObjectReference.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Optional != nil {
+		n += 2
+	}
+	return n
+}
+
+func (m *SecretKeySelector) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.LocalObjectReference.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Key)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Optional != nil {
+		n += 2
+	}
+	return n
+}
+
+func (m *SecretList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *SecretProjection) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.LocalObjectReference.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.Optional != nil {
+		n += 2
+	}
+	return n
+}
+
+func (m *SecretReference) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Namespace)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *SecretVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.SecretName)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.DefaultMode != nil {
+		n += 1 + sovGenerated(uint64(*m.DefaultMode))
+	}
+	if m.Optional != nil {
+		n += 2
+	}
+	return n
+}
+
+func (m *SecurityContext) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Capabilities != nil {
+		l = m.Capabilities.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Privileged != nil {
+		n += 2
+	}
+	if m.SELinuxOptions != nil {
+		l = m.SELinuxOptions.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.RunAsUser != nil {
+		n += 1 + sovGenerated(uint64(*m.RunAsUser))
+	}
+	if m.RunAsNonRoot != nil {
+		n += 2
+	}
+	if m.ReadOnlyRootFilesystem != nil {
+		n += 2
+	}
+	if m.AllowPrivilegeEscalation != nil {
+		n += 2
+	}
+	if m.RunAsGroup != nil {
+		n += 1 + sovGenerated(uint64(*m.RunAsGroup))
+	}
+	if m.ProcMount != nil {
+		l = len(*m.ProcMount)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.WindowsOptions != nil {
+		l = m.WindowsOptions.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.SeccompProfile != nil {
+		l = m.SeccompProfile.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.AppArmorProfile != nil {
+		l = m.AppArmorProfile.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *SerializedReference) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.Reference.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *Service) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Status.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ServiceAccount) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Secrets) > 0 {
+		for _, e := range m.Secrets {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.ImagePullSecrets) > 0 {
+		for _, e := range m.ImagePullSecrets {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.AutomountServiceAccountToken != nil {
+		n += 2
+	}
+	return n
+}
+
+func (m *ServiceAccountList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ServiceAccountTokenProjection) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Audience)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.ExpirationSeconds != nil {
+		n += 1 + sovGenerated(uint64(*m.ExpirationSeconds))
+	}
+	l = len(m.Path)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ServiceList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ServicePort) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Protocol)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 1 + sovGenerated(uint64(m.Port))
+	l = m.TargetPort.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 1 + sovGenerated(uint64(m.NodePort))
+	if m.AppProtocol != nil {
+		l = len(*m.AppProtocol)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *ServiceProxyOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Path)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ServiceSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Ports) > 0 {
+		for _, e := range m.Ports {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Selector) > 0 {
+		for k, v := range m.Selector {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	l = len(m.ClusterIP)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Type)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.ExternalIPs) > 0 {
+		for _, s := range m.ExternalIPs {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.SessionAffinity)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.LoadBalancerIP)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.LoadBalancerSourceRanges) > 0 {
+		for _, s := range m.LoadBalancerSourceRanges {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = len(m.ExternalName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ExternalTrafficPolicy)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 1 + sovGenerated(uint64(m.HealthCheckNodePort))
+	n += 2
+	if m.SessionAffinityConfig != nil {
+		l = m.SessionAffinityConfig.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.IPFamilyPolicy != nil {
+		l = len(*m.IPFamilyPolicy)
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if len(m.ClusterIPs) > 0 {
+		for _, s := range m.ClusterIPs {
+			l = len(s)
+			n += 2 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.IPFamilies) > 0 {
+		for _, s := range m.IPFamilies {
+			l = len(s)
+			n += 2 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.AllocateLoadBalancerNodePorts != nil {
+		n += 3
+	}
+	if m.LoadBalancerClass != nil {
+		l = len(*m.LoadBalancerClass)
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.InternalTrafficPolicy != nil {
+		l = len(*m.InternalTrafficPolicy)
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.TrafficDistribution != nil {
+		l = len(*m.TrafficDistribution)
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *ServiceStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.LoadBalancer.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Conditions) > 0 {
+		for _, e := range m.Conditions {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *SessionAffinityConfig) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ClientIP != nil {
+		l = m.ClientIP.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *SleepAction) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovGenerated(uint64(m.Seconds))
+	return n
+}
+
+func (m *StorageOSPersistentVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.VolumeName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.VolumeNamespace)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.FSType)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	if m.SecretRef != nil {
+		l = m.SecretRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *StorageOSVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.VolumeName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.VolumeNamespace)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.FSType)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	if m.SecretRef != nil {
+		l = m.SecretRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *Sysctl) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Value)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *TCPSocketAction) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.Port.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Host)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *Taint) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Value)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Effect)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.TimeAdded != nil {
+		l = m.TimeAdded.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *Toleration) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Operator)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Value)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Effect)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.TolerationSeconds != nil {
+		n += 1 + sovGenerated(uint64(*m.TolerationSeconds))
+	}
+	return n
+}
+
+func (m *TopologySelectorLabelRequirement) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Values) > 0 {
+		for _, s := range m.Values {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *TopologySelectorTerm) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.MatchLabelExpressions) > 0 {
+		for _, e := range m.MatchLabelExpressions {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *TopologySpreadConstraint) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovGenerated(uint64(m.MaxSkew))
+	l = len(m.TopologyKey)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.WhenUnsatisfiable)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.LabelSelector != nil {
+		l = m.LabelSelector.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.MinDomains != nil {
+		n += 1 + sovGenerated(uint64(*m.MinDomains))
+	}
+	if m.NodeAffinityPolicy != nil {
+		l = len(*m.NodeAffinityPolicy)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.NodeTaintsPolicy != nil {
+		l = len(*m.NodeTaintsPolicy)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.MatchLabelKeys) > 0 {
+		for _, s := range m.MatchLabelKeys {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *TypedLocalObjectReference) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.APIGroup != nil {
+		l = len(*m.APIGroup)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	l = len(m.Kind)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *TypedObjectReference) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.APIGroup != nil {
+		l = len(*m.APIGroup)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	l = len(m.Kind)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Namespace != nil {
+		l = len(*m.Namespace)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *Volume) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.VolumeSource.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *VolumeDevice) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.DevicePath)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *VolumeMount) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	l = len(m.MountPath)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.SubPath)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.MountPropagation != nil {
+		l = len(*m.MountPropagation)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	l = len(m.SubPathExpr)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.RecursiveReadOnly != nil {
+		l = len(*m.RecursiveReadOnly)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *VolumeMountStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.MountPath)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	if m.RecursiveReadOnly != nil {
+		l = len(*m.RecursiveReadOnly)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *VolumeNodeAffinity) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Required != nil {
+		l = m.Required.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *VolumeProjection) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Secret != nil {
+		l = m.Secret.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.DownwardAPI != nil {
+		l = m.DownwardAPI.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ConfigMap != nil {
+		l = m.ConfigMap.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ServiceAccountToken != nil {
+		l = m.ServiceAccountToken.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ClusterTrustBundle != nil {
+		l = m.ClusterTrustBundle.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *VolumeResourceRequirements) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Limits) > 0 {
+		for k, v := range m.Limits {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	if len(m.Requests) > 0 {
+		for k, v := range m.Requests {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *VolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.HostPath != nil {
+		l = m.HostPath.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.EmptyDir != nil {
+		l = m.EmptyDir.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.GCEPersistentDisk != nil {
+		l = m.GCEPersistentDisk.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.AWSElasticBlockStore != nil {
+		l = m.AWSElasticBlockStore.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.GitRepo != nil {
+		l = m.GitRepo.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Secret != nil {
+		l = m.Secret.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.NFS != nil {
+		l = m.NFS.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ISCSI != nil {
+		l = m.ISCSI.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Glusterfs != nil {
+		l = m.Glusterfs.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.PersistentVolumeClaim != nil {
+		l = m.PersistentVolumeClaim.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.RBD != nil {
+		l = m.RBD.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.FlexVolume != nil {
+		l = m.FlexVolume.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Cinder != nil {
+		l = m.Cinder.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.CephFS != nil {
+		l = m.CephFS.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Flocker != nil {
+		l = m.Flocker.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.DownwardAPI != nil {
+		l = m.DownwardAPI.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.FC != nil {
+		l = m.FC.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.AzureFile != nil {
+		l = m.AzureFile.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.ConfigMap != nil {
+		l = m.ConfigMap.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.VsphereVolume != nil {
+		l = m.VsphereVolume.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.Quobyte != nil {
+		l = m.Quobyte.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.AzureDisk != nil {
+		l = m.AzureDisk.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.PhotonPersistentDisk != nil {
+		l = m.PhotonPersistentDisk.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.PortworxVolume != nil {
+		l = m.PortworxVolume.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.ScaleIO != nil {
+		l = m.ScaleIO.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.Projected != nil {
+		l = m.Projected.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.StorageOS != nil {
+		l = m.StorageOS.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.CSI != nil {
+		l = m.CSI.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.Ephemeral != nil {
+		l = m.Ephemeral.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	if m.Image != nil {
+		l = m.Image.Size()
+		n += 2 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *VsphereVirtualDiskVolumeSource) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.VolumePath)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.FSType)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.StoragePolicyName)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.StoragePolicyID)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *WeightedPodAffinityTerm) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovGenerated(uint64(m.Weight))
+	l = m.PodAffinityTerm.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *WindowsSecurityContextOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.GMSACredentialSpecName != nil {
+		l = len(*m.GMSACredentialSpecName)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.GMSACredentialSpec != nil {
+		l = len(*m.GMSACredentialSpec)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.RunAsUserName != nil {
+		l = len(*m.RunAsUserName)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.HostProcess != nil {
+		n += 2
+	}
+	return n
+}
+
+func sovGenerated(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *AWSElasticBlockStoreVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&AWSElasticBlockStoreVolumeSource{`,
+		`VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`,
+		`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+		`Partition:` + fmt.Sprintf("%v", this.Partition) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Affinity) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Affinity{`,
+		`NodeAffinity:` + strings.Replace(this.NodeAffinity.String(), "NodeAffinity", "NodeAffinity", 1) + `,`,
+		`PodAffinity:` + strings.Replace(this.PodAffinity.String(), "PodAffinity", "PodAffinity", 1) + `,`,
+		`PodAntiAffinity:` + strings.Replace(this.PodAntiAffinity.String(), "PodAntiAffinity", "PodAntiAffinity", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *AppArmorProfile) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&AppArmorProfile{`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`LocalhostProfile:` + valueToStringGenerated(this.LocalhostProfile) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *AttachedVolume) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&AttachedVolume{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *AvoidPods) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForPreferAvoidPods := "[]PreferAvoidPodsEntry{"
+	for _, f := range this.PreferAvoidPods {
+		repeatedStringForPreferAvoidPods += strings.Replace(strings.Replace(f.String(), "PreferAvoidPodsEntry", "PreferAvoidPodsEntry", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForPreferAvoidPods += "}"
+	s := strings.Join([]string{`&AvoidPods{`,
+		`PreferAvoidPods:` + repeatedStringForPreferAvoidPods + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *AzureDiskVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&AzureDiskVolumeSource{`,
+		`DiskName:` + fmt.Sprintf("%v", this.DiskName) + `,`,
+		`DataDiskURI:` + fmt.Sprintf("%v", this.DataDiskURI) + `,`,
+		`CachingMode:` + valueToStringGenerated(this.CachingMode) + `,`,
+		`FSType:` + valueToStringGenerated(this.FSType) + `,`,
+		`ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`,
+		`Kind:` + valueToStringGenerated(this.Kind) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *AzureFilePersistentVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&AzureFilePersistentVolumeSource{`,
+		`SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`,
+		`ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`SecretNamespace:` + valueToStringGenerated(this.SecretNamespace) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *AzureFileVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&AzureFileVolumeSource{`,
+		`SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`,
+		`ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Binding) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Binding{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Target:` + strings.Replace(strings.Replace(this.Target.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CSIPersistentVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes))
+	for k := range this.VolumeAttributes {
+		keysForVolumeAttributes = append(keysForVolumeAttributes, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes)
+	mapStringForVolumeAttributes := "map[string]string{"
+	for _, k := range keysForVolumeAttributes {
+		mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k])
+	}
+	mapStringForVolumeAttributes += "}"
+	s := strings.Join([]string{`&CSIPersistentVolumeSource{`,
+		`Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
+		`VolumeHandle:` + fmt.Sprintf("%v", this.VolumeHandle) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+		`VolumeAttributes:` + mapStringForVolumeAttributes + `,`,
+		`ControllerPublishSecretRef:` + strings.Replace(this.ControllerPublishSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`,
+		`NodeStageSecretRef:` + strings.Replace(this.NodeStageSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`,
+		`NodePublishSecretRef:` + strings.Replace(this.NodePublishSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`,
+		`ControllerExpandSecretRef:` + strings.Replace(this.ControllerExpandSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`,
+		`NodeExpandSecretRef:` + strings.Replace(this.NodeExpandSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CSIVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes))
+	for k := range this.VolumeAttributes {
+		keysForVolumeAttributes = append(keysForVolumeAttributes, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes)
+	mapStringForVolumeAttributes := "map[string]string{"
+	for _, k := range keysForVolumeAttributes {
+		mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k])
+	}
+	mapStringForVolumeAttributes += "}"
+	s := strings.Join([]string{`&CSIVolumeSource{`,
+		`Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
+		`ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`,
+		`FSType:` + valueToStringGenerated(this.FSType) + `,`,
+		`VolumeAttributes:` + mapStringForVolumeAttributes + `,`,
+		`NodePublishSecretRef:` + strings.Replace(this.NodePublishSecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Capabilities) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Capabilities{`,
+		`Add:` + fmt.Sprintf("%v", this.Add) + `,`,
+		`Drop:` + fmt.Sprintf("%v", this.Drop) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CephFSPersistentVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CephFSPersistentVolumeSource{`,
+		`Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`,
+		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+		`User:` + fmt.Sprintf("%v", this.User) + `,`,
+		`SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`,
+		`SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CephFSVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CephFSVolumeSource{`,
+		`Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`,
+		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+		`User:` + fmt.Sprintf("%v", this.User) + `,`,
+		`SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`,
+		`SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CinderPersistentVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CinderPersistentVolumeSource{`,
+		`VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`,
+		`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CinderVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CinderVolumeSource{`,
+		`VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`,
+		`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ClientIPConfig) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ClientIPConfig{`,
+		`TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ClusterTrustBundleProjection) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ClusterTrustBundleProjection{`,
+		`Name:` + valueToStringGenerated(this.Name) + `,`,
+		`SignerName:` + valueToStringGenerated(this.SignerName) + `,`,
+		`LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+		`Optional:` + valueToStringGenerated(this.Optional) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ComponentCondition) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ComponentCondition{`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+		`Error:` + fmt.Sprintf("%v", this.Error) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ComponentStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForConditions := "[]ComponentCondition{"
+	for _, f := range this.Conditions {
+		repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ComponentCondition", "ComponentCondition", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForConditions += "}"
+	s := strings.Join([]string{`&ComponentStatus{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Conditions:` + repeatedStringForConditions + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ComponentStatusList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]ComponentStatus{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ComponentStatus", "ComponentStatus", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&ComponentStatusList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ConfigMap) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForData := make([]string, 0, len(this.Data))
+	for k := range this.Data {
+		keysForData = append(keysForData, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForData)
+	mapStringForData := "map[string]string{"
+	for _, k := range keysForData {
+		mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k])
+	}
+	mapStringForData += "}"
+	keysForBinaryData := make([]string, 0, len(this.BinaryData))
+	for k := range this.BinaryData {
+		keysForBinaryData = append(keysForBinaryData, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData)
+	mapStringForBinaryData := "map[string][]byte{"
+	for _, k := range keysForBinaryData {
+		mapStringForBinaryData += fmt.Sprintf("%v: %v,", k, this.BinaryData[k])
+	}
+	mapStringForBinaryData += "}"
+	s := strings.Join([]string{`&ConfigMap{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Data:` + mapStringForData + `,`,
+		`BinaryData:` + mapStringForBinaryData + `,`,
+		`Immutable:` + valueToStringGenerated(this.Immutable) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ConfigMapEnvSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ConfigMapEnvSource{`,
+		`LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`,
+		`Optional:` + valueToStringGenerated(this.Optional) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ConfigMapKeySelector) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ConfigMapKeySelector{`,
+		`LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`,
+		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`Optional:` + valueToStringGenerated(this.Optional) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ConfigMapList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]ConfigMap{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ConfigMap", "ConfigMap", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&ConfigMapList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ConfigMapNodeConfigSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ConfigMapNodeConfigSource{`,
+		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+		`ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`,
+		`KubeletConfigKey:` + fmt.Sprintf("%v", this.KubeletConfigKey) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ConfigMapProjection) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]KeyToPath{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&ConfigMapProjection{`,
+		`LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`Optional:` + valueToStringGenerated(this.Optional) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ConfigMapVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]KeyToPath{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&ConfigMapVolumeSource{`,
+		`LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`,
+		`Optional:` + valueToStringGenerated(this.Optional) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Container) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForPorts := "[]ContainerPort{"
+	for _, f := range this.Ports {
+		repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForPorts += "}"
+	repeatedStringForEnv := "[]EnvVar{"
+	for _, f := range this.Env {
+		repeatedStringForEnv += strings.Replace(strings.Replace(f.String(), "EnvVar", "EnvVar", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForEnv += "}"
+	repeatedStringForVolumeMounts := "[]VolumeMount{"
+	for _, f := range this.VolumeMounts {
+		repeatedStringForVolumeMounts += strings.Replace(strings.Replace(f.String(), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForVolumeMounts += "}"
+	repeatedStringForEnvFrom := "[]EnvFromSource{"
+	for _, f := range this.EnvFrom {
+		repeatedStringForEnvFrom += strings.Replace(strings.Replace(f.String(), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForEnvFrom += "}"
+	repeatedStringForVolumeDevices := "[]VolumeDevice{"
+	for _, f := range this.VolumeDevices {
+		repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForVolumeDevices += "}"
+	repeatedStringForResizePolicy := "[]ContainerResizePolicy{"
+	for _, f := range this.ResizePolicy {
+		repeatedStringForResizePolicy += strings.Replace(strings.Replace(f.String(), "ContainerResizePolicy", "ContainerResizePolicy", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForResizePolicy += "}"
+	s := strings.Join([]string{`&Container{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Image:` + fmt.Sprintf("%v", this.Image) + `,`,
+		`Command:` + fmt.Sprintf("%v", this.Command) + `,`,
+		`Args:` + fmt.Sprintf("%v", this.Args) + `,`,
+		`WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`,
+		`Ports:` + repeatedStringForPorts + `,`,
+		`Env:` + repeatedStringForEnv + `,`,
+		`Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`,
+		`VolumeMounts:` + repeatedStringForVolumeMounts + `,`,
+		`LivenessProbe:` + strings.Replace(this.LivenessProbe.String(), "Probe", "Probe", 1) + `,`,
+		`ReadinessProbe:` + strings.Replace(this.ReadinessProbe.String(), "Probe", "Probe", 1) + `,`,
+		`Lifecycle:` + strings.Replace(this.Lifecycle.String(), "Lifecycle", "Lifecycle", 1) + `,`,
+		`TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`,
+		`ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`,
+		`SecurityContext:` + strings.Replace(this.SecurityContext.String(), "SecurityContext", "SecurityContext", 1) + `,`,
+		`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
+		`StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`,
+		`TTY:` + fmt.Sprintf("%v", this.TTY) + `,`,
+		`EnvFrom:` + repeatedStringForEnvFrom + `,`,
+		`TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`,
+		`VolumeDevices:` + repeatedStringForVolumeDevices + `,`,
+		`StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`,
+		`ResizePolicy:` + repeatedStringForResizePolicy + `,`,
+		`RestartPolicy:` + valueToStringGenerated(this.RestartPolicy) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ContainerImage) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ContainerImage{`,
+		`Names:` + fmt.Sprintf("%v", this.Names) + `,`,
+		`SizeBytes:` + fmt.Sprintf("%v", this.SizeBytes) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ContainerPort) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ContainerPort{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`,
+		`ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`,
+		`Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`,
+		`HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ContainerResizePolicy) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ContainerResizePolicy{`,
+		`ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`,
+		`RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ContainerState) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ContainerState{`,
+		`Waiting:` + strings.Replace(this.Waiting.String(), "ContainerStateWaiting", "ContainerStateWaiting", 1) + `,`,
+		`Running:` + strings.Replace(this.Running.String(), "ContainerStateRunning", "ContainerStateRunning", 1) + `,`,
+		`Terminated:` + strings.Replace(this.Terminated.String(), "ContainerStateTerminated", "ContainerStateTerminated", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ContainerStateRunning) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ContainerStateRunning{`,
+		`StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ContainerStateTerminated) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ContainerStateTerminated{`,
+		`ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`,
+		`Signal:` + fmt.Sprintf("%v", this.Signal) + `,`,
+		`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+		`StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+		`FinishedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FinishedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ContainerStateWaiting) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ContainerStateWaiting{`,
+		`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ContainerStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForVolumeMounts := "[]VolumeMountStatus{"
+	for _, f := range this.VolumeMounts {
+		repeatedStringForVolumeMounts += strings.Replace(strings.Replace(f.String(), "VolumeMountStatus", "VolumeMountStatus", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForVolumeMounts += "}"
+	repeatedStringForAllocatedResourcesStatus := "[]ResourceStatus{"
+	for _, f := range this.AllocatedResourcesStatus {
+		repeatedStringForAllocatedResourcesStatus += strings.Replace(strings.Replace(f.String(), "ResourceStatus", "ResourceStatus", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForAllocatedResourcesStatus += "}"
+	keysForAllocatedResources := make([]string, 0, len(this.AllocatedResources))
+	for k := range this.AllocatedResources {
+		keysForAllocatedResources = append(keysForAllocatedResources, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources)
+	mapStringForAllocatedResources := "ResourceList{"
+	for _, k := range keysForAllocatedResources {
+		mapStringForAllocatedResources += fmt.Sprintf("%v: %v,", k, this.AllocatedResources[ResourceName(k)])
+	}
+	mapStringForAllocatedResources += "}"
+	s := strings.Join([]string{`&ContainerStatus{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`,
+		`LastTerminationState:` + strings.Replace(strings.Replace(this.LastTerminationState.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`,
+		`Ready:` + fmt.Sprintf("%v", this.Ready) + `,`,
+		`RestartCount:` + fmt.Sprintf("%v", this.RestartCount) + `,`,
+		`Image:` + fmt.Sprintf("%v", this.Image) + `,`,
+		`ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`Started:` + valueToStringGenerated(this.Started) + `,`,
+		`AllocatedResources:` + mapStringForAllocatedResources + `,`,
+		`Resources:` + strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1) + `,`,
+		`VolumeMounts:` + repeatedStringForVolumeMounts + `,`,
+		`User:` + strings.Replace(this.User.String(), "ContainerUser", "ContainerUser", 1) + `,`,
+		`AllocatedResourcesStatus:` + repeatedStringForAllocatedResourcesStatus + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ContainerUser) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ContainerUser{`,
+		`Linux:` + strings.Replace(this.Linux.String(), "LinuxContainerUser", "LinuxContainerUser", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DaemonEndpoint) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DaemonEndpoint{`,
+		`Port:` + fmt.Sprintf("%v", this.Port) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DownwardAPIProjection) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]DownwardAPIVolumeFile{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&DownwardAPIProjection{`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DownwardAPIVolumeFile) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DownwardAPIVolumeFile{`,
+		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+		`FieldRef:` + strings.Replace(this.FieldRef.String(), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`,
+		`ResourceFieldRef:` + strings.Replace(this.ResourceFieldRef.String(), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`,
+		`Mode:` + valueToStringGenerated(this.Mode) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DownwardAPIVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]DownwardAPIVolumeFile{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&DownwardAPIVolumeSource{`,
+		`Items:` + repeatedStringForItems + `,`,
+		`DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *EmptyDirVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&EmptyDirVolumeSource{`,
+		`Medium:` + fmt.Sprintf("%v", this.Medium) + `,`,
+		`SizeLimit:` + strings.Replace(fmt.Sprintf("%v", this.SizeLimit), "Quantity", "resource.Quantity", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *EndpointAddress) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&EndpointAddress{`,
+		`IP:` + fmt.Sprintf("%v", this.IP) + `,`,
+		`TargetRef:` + strings.Replace(this.TargetRef.String(), "ObjectReference", "ObjectReference", 1) + `,`,
+		`Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`,
+		`NodeName:` + valueToStringGenerated(this.NodeName) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *EndpointPort) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&EndpointPort{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Port:` + fmt.Sprintf("%v", this.Port) + `,`,
+		`Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`,
+		`AppProtocol:` + valueToStringGenerated(this.AppProtocol) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *EndpointSubset) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForAddresses := "[]EndpointAddress{"
+	for _, f := range this.Addresses {
+		repeatedStringForAddresses += strings.Replace(strings.Replace(f.String(), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForAddresses += "}"
+	repeatedStringForNotReadyAddresses := "[]EndpointAddress{"
+	for _, f := range this.NotReadyAddresses {
+		repeatedStringForNotReadyAddresses += strings.Replace(strings.Replace(f.String(), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForNotReadyAddresses += "}"
+	repeatedStringForPorts := "[]EndpointPort{"
+	for _, f := range this.Ports {
+		repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForPorts += "}"
+	s := strings.Join([]string{`&EndpointSubset{`,
+		`Addresses:` + repeatedStringForAddresses + `,`,
+		`NotReadyAddresses:` + repeatedStringForNotReadyAddresses + `,`,
+		`Ports:` + repeatedStringForPorts + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Endpoints) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForSubsets := "[]EndpointSubset{"
+	for _, f := range this.Subsets {
+		repeatedStringForSubsets += strings.Replace(strings.Replace(f.String(), "EndpointSubset", "EndpointSubset", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForSubsets += "}"
+	s := strings.Join([]string{`&Endpoints{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Subsets:` + repeatedStringForSubsets + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *EndpointsList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]Endpoints{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Endpoints", "Endpoints", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&EndpointsList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *EnvFromSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&EnvFromSource{`,
+		`Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`,
+		`ConfigMapRef:` + strings.Replace(this.ConfigMapRef.String(), "ConfigMapEnvSource", "ConfigMapEnvSource", 1) + `,`,
+		`SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretEnvSource", "SecretEnvSource", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *EnvVar) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&EnvVar{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+		`ValueFrom:` + strings.Replace(this.ValueFrom.String(), "EnvVarSource", "EnvVarSource", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *EnvVarSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&EnvVarSource{`,
+		`FieldRef:` + strings.Replace(this.FieldRef.String(), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`,
+		`ResourceFieldRef:` + strings.Replace(this.ResourceFieldRef.String(), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`,
+		`ConfigMapKeyRef:` + strings.Replace(this.ConfigMapKeyRef.String(), "ConfigMapKeySelector", "ConfigMapKeySelector", 1) + `,`,
+		`SecretKeyRef:` + strings.Replace(this.SecretKeyRef.String(), "SecretKeySelector", "SecretKeySelector", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *EphemeralContainer) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&EphemeralContainer{`,
+		`EphemeralContainerCommon:` + strings.Replace(strings.Replace(this.EphemeralContainerCommon.String(), "EphemeralContainerCommon", "EphemeralContainerCommon", 1), `&`, ``, 1) + `,`,
+		`TargetContainerName:` + fmt.Sprintf("%v", this.TargetContainerName) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *EphemeralContainerCommon) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForPorts := "[]ContainerPort{"
+	for _, f := range this.Ports {
+		repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForPorts += "}"
+	repeatedStringForEnv := "[]EnvVar{"
+	for _, f := range this.Env {
+		repeatedStringForEnv += strings.Replace(strings.Replace(f.String(), "EnvVar", "EnvVar", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForEnv += "}"
+	repeatedStringForVolumeMounts := "[]VolumeMount{"
+	for _, f := range this.VolumeMounts {
+		repeatedStringForVolumeMounts += strings.Replace(strings.Replace(f.String(), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForVolumeMounts += "}"
+	repeatedStringForEnvFrom := "[]EnvFromSource{"
+	for _, f := range this.EnvFrom {
+		repeatedStringForEnvFrom += strings.Replace(strings.Replace(f.String(), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForEnvFrom += "}"
+	repeatedStringForVolumeDevices := "[]VolumeDevice{"
+	for _, f := range this.VolumeDevices {
+		repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForVolumeDevices += "}"
+	repeatedStringForResizePolicy := "[]ContainerResizePolicy{"
+	for _, f := range this.ResizePolicy {
+		repeatedStringForResizePolicy += strings.Replace(strings.Replace(f.String(), "ContainerResizePolicy", "ContainerResizePolicy", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForResizePolicy += "}"
+	s := strings.Join([]string{`&EphemeralContainerCommon{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Image:` + fmt.Sprintf("%v", this.Image) + `,`,
+		`Command:` + fmt.Sprintf("%v", this.Command) + `,`,
+		`Args:` + fmt.Sprintf("%v", this.Args) + `,`,
+		`WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`,
+		`Ports:` + repeatedStringForPorts + `,`,
+		`Env:` + repeatedStringForEnv + `,`,
+		`Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`,
+		`VolumeMounts:` + repeatedStringForVolumeMounts + `,`,
+		`LivenessProbe:` + strings.Replace(this.LivenessProbe.String(), "Probe", "Probe", 1) + `,`,
+		`ReadinessProbe:` + strings.Replace(this.ReadinessProbe.String(), "Probe", "Probe", 1) + `,`,
+		`Lifecycle:` + strings.Replace(this.Lifecycle.String(), "Lifecycle", "Lifecycle", 1) + `,`,
+		`TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`,
+		`ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`,
+		`SecurityContext:` + strings.Replace(this.SecurityContext.String(), "SecurityContext", "SecurityContext", 1) + `,`,
+		`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
+		`StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`,
+		`TTY:` + fmt.Sprintf("%v", this.TTY) + `,`,
+		`EnvFrom:` + repeatedStringForEnvFrom + `,`,
+		`TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`,
+		`VolumeDevices:` + repeatedStringForVolumeDevices + `,`,
+		`StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`,
+		`ResizePolicy:` + repeatedStringForResizePolicy + `,`,
+		`RestartPolicy:` + valueToStringGenerated(this.RestartPolicy) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *EphemeralVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&EphemeralVolumeSource{`,
+		`VolumeClaimTemplate:` + strings.Replace(this.VolumeClaimTemplate.String(), "PersistentVolumeClaimTemplate", "PersistentVolumeClaimTemplate", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Event) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Event{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`InvolvedObject:` + strings.Replace(strings.Replace(this.InvolvedObject.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`,
+		`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+		`Source:` + strings.Replace(strings.Replace(this.Source.String(), "EventSource", "EventSource", 1), `&`, ``, 1) + `,`,
+		`FirstTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FirstTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+		`LastTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+		`Count:` + fmt.Sprintf("%v", this.Count) + `,`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`EventTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EventTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`,
+		`Series:` + strings.Replace(this.Series.String(), "EventSeries", "EventSeries", 1) + `,`,
+		`Action:` + fmt.Sprintf("%v", this.Action) + `,`,
+		`Related:` + strings.Replace(this.Related.String(), "ObjectReference", "ObjectReference", 1) + `,`,
+		`ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`,
+		`ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *EventList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]Event{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Event", "Event", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&EventList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *EventSeries) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&EventSeries{`,
+		`Count:` + fmt.Sprintf("%v", this.Count) + `,`,
+		`LastObservedTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastObservedTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *EventSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&EventSource{`,
+		`Component:` + fmt.Sprintf("%v", this.Component) + `,`,
+		`Host:` + fmt.Sprintf("%v", this.Host) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ExecAction) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ExecAction{`,
+		`Command:` + fmt.Sprintf("%v", this.Command) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *FCVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&FCVolumeSource{`,
+		`TargetWWNs:` + fmt.Sprintf("%v", this.TargetWWNs) + `,`,
+		`Lun:` + valueToStringGenerated(this.Lun) + `,`,
+		`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`WWIDs:` + fmt.Sprintf("%v", this.WWIDs) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *FlexPersistentVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForOptions := make([]string, 0, len(this.Options))
+	for k := range this.Options {
+		keysForOptions = append(keysForOptions, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForOptions)
+	mapStringForOptions := "map[string]string{"
+	for _, k := range keysForOptions {
+		mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k])
+	}
+	mapStringForOptions += "}"
+	s := strings.Join([]string{`&FlexPersistentVolumeSource{`,
+		`Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
+		`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+		`SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`Options:` + mapStringForOptions + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *FlexVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForOptions := make([]string, 0, len(this.Options))
+	for k := range this.Options {
+		keysForOptions = append(keysForOptions, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForOptions)
+	mapStringForOptions := "map[string]string{"
+	for _, k := range keysForOptions {
+		mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k])
+	}
+	mapStringForOptions += "}"
+	s := strings.Join([]string{`&FlexVolumeSource{`,
+		`Driver:` + fmt.Sprintf("%v", this.Driver) + `,`,
+		`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+		`SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`Options:` + mapStringForOptions + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *FlockerVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&FlockerVolumeSource{`,
+		`DatasetName:` + fmt.Sprintf("%v", this.DatasetName) + `,`,
+		`DatasetUUID:` + fmt.Sprintf("%v", this.DatasetUUID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *GCEPersistentDiskVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&GCEPersistentDiskVolumeSource{`,
+		`PDName:` + fmt.Sprintf("%v", this.PDName) + `,`,
+		`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+		`Partition:` + fmt.Sprintf("%v", this.Partition) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *GRPCAction) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&GRPCAction{`,
+		`Port:` + fmt.Sprintf("%v", this.Port) + `,`,
+		`Service:` + valueToStringGenerated(this.Service) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *GitRepoVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&GitRepoVolumeSource{`,
+		`Repository:` + fmt.Sprintf("%v", this.Repository) + `,`,
+		`Revision:` + fmt.Sprintf("%v", this.Revision) + `,`,
+		`Directory:` + fmt.Sprintf("%v", this.Directory) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *GlusterfsPersistentVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&GlusterfsPersistentVolumeSource{`,
+		`EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`,
+		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`EndpointsNamespace:` + valueToStringGenerated(this.EndpointsNamespace) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *GlusterfsVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&GlusterfsVolumeSource{`,
+		`EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`,
+		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *HTTPGetAction) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForHTTPHeaders := "[]HTTPHeader{"
+	for _, f := range this.HTTPHeaders {
+		repeatedStringForHTTPHeaders += strings.Replace(strings.Replace(f.String(), "HTTPHeader", "HTTPHeader", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForHTTPHeaders += "}"
+	s := strings.Join([]string{`&HTTPGetAction{`,
+		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+		`Port:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`,
+		`Host:` + fmt.Sprintf("%v", this.Host) + `,`,
+		`Scheme:` + fmt.Sprintf("%v", this.Scheme) + `,`,
+		`HTTPHeaders:` + repeatedStringForHTTPHeaders + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *HTTPHeader) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&HTTPHeader{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *HostAlias) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&HostAlias{`,
+		`IP:` + fmt.Sprintf("%v", this.IP) + `,`,
+		`Hostnames:` + fmt.Sprintf("%v", this.Hostnames) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *HostIP) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&HostIP{`,
+		`IP:` + fmt.Sprintf("%v", this.IP) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *HostPathVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&HostPathVolumeSource{`,
+		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+		`Type:` + valueToStringGenerated(this.Type) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ISCSIPersistentVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ISCSIPersistentVolumeSource{`,
+		`TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`,
+		`IQN:` + fmt.Sprintf("%v", this.IQN) + `,`,
+		`Lun:` + fmt.Sprintf("%v", this.Lun) + `,`,
+		`ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`,
+		`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`Portals:` + fmt.Sprintf("%v", this.Portals) + `,`,
+		`DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`,
+		`SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`,
+		`SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`,
+		`InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ISCSIVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ISCSIVolumeSource{`,
+		`TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`,
+		`IQN:` + fmt.Sprintf("%v", this.IQN) + `,`,
+		`Lun:` + fmt.Sprintf("%v", this.Lun) + `,`,
+		`ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`,
+		`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`Portals:` + fmt.Sprintf("%v", this.Portals) + `,`,
+		`DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`,
+		`SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`,
+		`SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`,
+		`InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ImageVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ImageVolumeSource{`,
+		`Reference:` + fmt.Sprintf("%v", this.Reference) + `,`,
+		`PullPolicy:` + fmt.Sprintf("%v", this.PullPolicy) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *KeyToPath) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&KeyToPath{`,
+		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+		`Mode:` + valueToStringGenerated(this.Mode) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Lifecycle) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Lifecycle{`,
+		`PostStart:` + strings.Replace(this.PostStart.String(), "LifecycleHandler", "LifecycleHandler", 1) + `,`,
+		`PreStop:` + strings.Replace(this.PreStop.String(), "LifecycleHandler", "LifecycleHandler", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *LifecycleHandler) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&LifecycleHandler{`,
+		`Exec:` + strings.Replace(this.Exec.String(), "ExecAction", "ExecAction", 1) + `,`,
+		`HTTPGet:` + strings.Replace(this.HTTPGet.String(), "HTTPGetAction", "HTTPGetAction", 1) + `,`,
+		`TCPSocket:` + strings.Replace(this.TCPSocket.String(), "TCPSocketAction", "TCPSocketAction", 1) + `,`,
+		`Sleep:` + strings.Replace(this.Sleep.String(), "SleepAction", "SleepAction", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *LimitRange) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&LimitRange{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LimitRangeSpec", "LimitRangeSpec", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *LimitRangeItem) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForMax := make([]string, 0, len(this.Max))
+	for k := range this.Max {
+		keysForMax = append(keysForMax, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForMax)
+	mapStringForMax := "ResourceList{"
+	for _, k := range keysForMax {
+		mapStringForMax += fmt.Sprintf("%v: %v,", k, this.Max[ResourceName(k)])
+	}
+	mapStringForMax += "}"
+	keysForMin := make([]string, 0, len(this.Min))
+	for k := range this.Min {
+		keysForMin = append(keysForMin, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForMin)
+	mapStringForMin := "ResourceList{"
+	for _, k := range keysForMin {
+		mapStringForMin += fmt.Sprintf("%v: %v,", k, this.Min[ResourceName(k)])
+	}
+	mapStringForMin += "}"
+	keysForDefault := make([]string, 0, len(this.Default))
+	for k := range this.Default {
+		keysForDefault = append(keysForDefault, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForDefault)
+	mapStringForDefault := "ResourceList{"
+	for _, k := range keysForDefault {
+		mapStringForDefault += fmt.Sprintf("%v: %v,", k, this.Default[ResourceName(k)])
+	}
+	mapStringForDefault += "}"
+	keysForDefaultRequest := make([]string, 0, len(this.DefaultRequest))
+	for k := range this.DefaultRequest {
+		keysForDefaultRequest = append(keysForDefaultRequest, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest)
+	mapStringForDefaultRequest := "ResourceList{"
+	for _, k := range keysForDefaultRequest {
+		mapStringForDefaultRequest += fmt.Sprintf("%v: %v,", k, this.DefaultRequest[ResourceName(k)])
+	}
+	mapStringForDefaultRequest += "}"
+	keysForMaxLimitRequestRatio := make([]string, 0, len(this.MaxLimitRequestRatio))
+	for k := range this.MaxLimitRequestRatio {
+		keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio)
+	mapStringForMaxLimitRequestRatio := "ResourceList{"
+	for _, k := range keysForMaxLimitRequestRatio {
+		mapStringForMaxLimitRequestRatio += fmt.Sprintf("%v: %v,", k, this.MaxLimitRequestRatio[ResourceName(k)])
+	}
+	mapStringForMaxLimitRequestRatio += "}"
+	s := strings.Join([]string{`&LimitRangeItem{`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`Max:` + mapStringForMax + `,`,
+		`Min:` + mapStringForMin + `,`,
+		`Default:` + mapStringForDefault + `,`,
+		`DefaultRequest:` + mapStringForDefaultRequest + `,`,
+		`MaxLimitRequestRatio:` + mapStringForMaxLimitRequestRatio + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *LimitRangeList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]LimitRange{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "LimitRange", "LimitRange", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&LimitRangeList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *LimitRangeSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForLimits := "[]LimitRangeItem{"
+	for _, f := range this.Limits {
+		repeatedStringForLimits += strings.Replace(strings.Replace(f.String(), "LimitRangeItem", "LimitRangeItem", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForLimits += "}"
+	s := strings.Join([]string{`&LimitRangeSpec{`,
+		`Limits:` + repeatedStringForLimits + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *LinuxContainerUser) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&LinuxContainerUser{`,
+		`UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+		`GID:` + fmt.Sprintf("%v", this.GID) + `,`,
+		`SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *List) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]RawExtension{"
+	for _, f := range this.Items {
+		repeatedStringForItems += fmt.Sprintf("%v", f) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&List{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *LoadBalancerIngress) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForPorts := "[]PortStatus{"
+	for _, f := range this.Ports {
+		repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "PortStatus", "PortStatus", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForPorts += "}"
+	s := strings.Join([]string{`&LoadBalancerIngress{`,
+		`IP:` + fmt.Sprintf("%v", this.IP) + `,`,
+		`Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`,
+		`IPMode:` + valueToStringGenerated(this.IPMode) + `,`,
+		`Ports:` + repeatedStringForPorts + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *LoadBalancerStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForIngress := "[]LoadBalancerIngress{"
+	for _, f := range this.Ingress {
+		repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "LoadBalancerIngress", "LoadBalancerIngress", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForIngress += "}"
+	s := strings.Join([]string{`&LoadBalancerStatus{`,
+		`Ingress:` + repeatedStringForIngress + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *LocalObjectReference) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&LocalObjectReference{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *LocalVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&LocalVolumeSource{`,
+		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+		`FSType:` + valueToStringGenerated(this.FSType) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ModifyVolumeStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ModifyVolumeStatus{`,
+		`TargetVolumeAttributesClassName:` + fmt.Sprintf("%v", this.TargetVolumeAttributesClassName) + `,`,
+		`Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NFSVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&NFSVolumeSource{`,
+		`Server:` + fmt.Sprintf("%v", this.Server) + `,`,
+		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Namespace) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Namespace{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NamespaceSpec", "NamespaceSpec", 1), `&`, ``, 1) + `,`,
+		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "NamespaceStatus", "NamespaceStatus", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NamespaceCondition) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&NamespaceCondition{`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+		`LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+		`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NamespaceList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]Namespace{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&NamespaceList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NamespaceSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&NamespaceSpec{`,
+		`Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NamespaceStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForConditions := "[]NamespaceCondition{"
+	for _, f := range this.Conditions {
+		repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "NamespaceCondition", "NamespaceCondition", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForConditions += "}"
+	s := strings.Join([]string{`&NamespaceStatus{`,
+		`Phase:` + fmt.Sprintf("%v", this.Phase) + `,`,
+		`Conditions:` + repeatedStringForConditions + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Node) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Node{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`,
+		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NodeAddress) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&NodeAddress{`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`Address:` + fmt.Sprintf("%v", this.Address) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NodeAffinity) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution := "[]PreferredSchedulingTerm{"
+	for _, f := range this.PreferredDuringSchedulingIgnoredDuringExecution {
+		repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "PreferredSchedulingTerm", "PreferredSchedulingTerm", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += "}"
+	s := strings.Join([]string{`&NodeAffinity{`,
+		`RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(this.RequiredDuringSchedulingIgnoredDuringExecution.String(), "NodeSelector", "NodeSelector", 1) + `,`,
+		`PreferredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NodeCondition) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&NodeCondition{`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+		`LastHeartbeatTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastHeartbeatTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+		`LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+		`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NodeConfigSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&NodeConfigSource{`,
+		`ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapNodeConfigSource", "ConfigMapNodeConfigSource", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NodeConfigStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&NodeConfigStatus{`,
+		`Assigned:` + strings.Replace(this.Assigned.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`,
+		`Active:` + strings.Replace(this.Active.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`,
+		`LastKnownGood:` + strings.Replace(this.LastKnownGood.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`,
+		`Error:` + fmt.Sprintf("%v", this.Error) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NodeDaemonEndpoints) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&NodeDaemonEndpoints{`,
+		`KubeletEndpoint:` + strings.Replace(strings.Replace(this.KubeletEndpoint.String(), "DaemonEndpoint", "DaemonEndpoint", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NodeFeatures) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&NodeFeatures{`,
+		`SupplementalGroupsPolicy:` + valueToStringGenerated(this.SupplementalGroupsPolicy) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NodeList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]Node{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Node", "Node", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&NodeList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NodeProxyOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&NodeProxyOptions{`,
+		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NodeRuntimeHandler) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&NodeRuntimeHandler{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Features:` + strings.Replace(this.Features.String(), "NodeRuntimeHandlerFeatures", "NodeRuntimeHandlerFeatures", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NodeRuntimeHandlerFeatures) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&NodeRuntimeHandlerFeatures{`,
+		`RecursiveReadOnlyMounts:` + valueToStringGenerated(this.RecursiveReadOnlyMounts) + `,`,
+		`UserNamespaces:` + valueToStringGenerated(this.UserNamespaces) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NodeSelector) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForNodeSelectorTerms := "[]NodeSelectorTerm{"
+	for _, f := range this.NodeSelectorTerms {
+		repeatedStringForNodeSelectorTerms += strings.Replace(strings.Replace(f.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForNodeSelectorTerms += "}"
+	s := strings.Join([]string{`&NodeSelector{`,
+		`NodeSelectorTerms:` + repeatedStringForNodeSelectorTerms + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NodeSelectorRequirement) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&NodeSelectorRequirement{`,
+		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`Operator:` + fmt.Sprintf("%v", this.Operator) + `,`,
+		`Values:` + fmt.Sprintf("%v", this.Values) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NodeSelectorTerm) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForMatchExpressions := "[]NodeSelectorRequirement{"
+	for _, f := range this.MatchExpressions {
+		repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForMatchExpressions += "}"
+	repeatedStringForMatchFields := "[]NodeSelectorRequirement{"
+	for _, f := range this.MatchFields {
+		repeatedStringForMatchFields += strings.Replace(strings.Replace(f.String(), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForMatchFields += "}"
+	s := strings.Join([]string{`&NodeSelectorTerm{`,
+		`MatchExpressions:` + repeatedStringForMatchExpressions + `,`,
+		`MatchFields:` + repeatedStringForMatchFields + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NodeSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForTaints := "[]Taint{"
+	for _, f := range this.Taints {
+		repeatedStringForTaints += strings.Replace(strings.Replace(f.String(), "Taint", "Taint", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForTaints += "}"
+	s := strings.Join([]string{`&NodeSpec{`,
+		`PodCIDR:` + fmt.Sprintf("%v", this.PodCIDR) + `,`,
+		`DoNotUseExternalID:` + fmt.Sprintf("%v", this.DoNotUseExternalID) + `,`,
+		`ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`,
+		`Unschedulable:` + fmt.Sprintf("%v", this.Unschedulable) + `,`,
+		`Taints:` + repeatedStringForTaints + `,`,
+		`ConfigSource:` + strings.Replace(this.ConfigSource.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`,
+		`PodCIDRs:` + fmt.Sprintf("%v", this.PodCIDRs) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NodeStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForConditions := "[]NodeCondition{"
+	for _, f := range this.Conditions {
+		repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "NodeCondition", "NodeCondition", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForConditions += "}"
+	repeatedStringForAddresses := "[]NodeAddress{"
+	for _, f := range this.Addresses {
+		repeatedStringForAddresses += strings.Replace(strings.Replace(f.String(), "NodeAddress", "NodeAddress", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForAddresses += "}"
+	repeatedStringForImages := "[]ContainerImage{"
+	for _, f := range this.Images {
+		repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ContainerImage", "ContainerImage", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForImages += "}"
+	repeatedStringForVolumesAttached := "[]AttachedVolume{"
+	for _, f := range this.VolumesAttached {
+		repeatedStringForVolumesAttached += strings.Replace(strings.Replace(f.String(), "AttachedVolume", "AttachedVolume", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForVolumesAttached += "}"
+	repeatedStringForRuntimeHandlers := "[]NodeRuntimeHandler{"
+	for _, f := range this.RuntimeHandlers {
+		repeatedStringForRuntimeHandlers += strings.Replace(strings.Replace(f.String(), "NodeRuntimeHandler", "NodeRuntimeHandler", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForRuntimeHandlers += "}"
+	keysForCapacity := make([]string, 0, len(this.Capacity))
+	for k := range this.Capacity {
+		keysForCapacity = append(keysForCapacity, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+	mapStringForCapacity := "ResourceList{"
+	for _, k := range keysForCapacity {
+		mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)])
+	}
+	mapStringForCapacity += "}"
+	keysForAllocatable := make([]string, 0, len(this.Allocatable))
+	for k := range this.Allocatable {
+		keysForAllocatable = append(keysForAllocatable, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable)
+	mapStringForAllocatable := "ResourceList{"
+	for _, k := range keysForAllocatable {
+		mapStringForAllocatable += fmt.Sprintf("%v: %v,", k, this.Allocatable[ResourceName(k)])
+	}
+	mapStringForAllocatable += "}"
+	s := strings.Join([]string{`&NodeStatus{`,
+		`Capacity:` + mapStringForCapacity + `,`,
+		`Allocatable:` + mapStringForAllocatable + `,`,
+		`Phase:` + fmt.Sprintf("%v", this.Phase) + `,`,
+		`Conditions:` + repeatedStringForConditions + `,`,
+		`Addresses:` + repeatedStringForAddresses + `,`,
+		`DaemonEndpoints:` + strings.Replace(strings.Replace(this.DaemonEndpoints.String(), "NodeDaemonEndpoints", "NodeDaemonEndpoints", 1), `&`, ``, 1) + `,`,
+		`NodeInfo:` + strings.Replace(strings.Replace(this.NodeInfo.String(), "NodeSystemInfo", "NodeSystemInfo", 1), `&`, ``, 1) + `,`,
+		`Images:` + repeatedStringForImages + `,`,
+		`VolumesInUse:` + fmt.Sprintf("%v", this.VolumesInUse) + `,`,
+		`VolumesAttached:` + repeatedStringForVolumesAttached + `,`,
+		`Config:` + strings.Replace(this.Config.String(), "NodeConfigStatus", "NodeConfigStatus", 1) + `,`,
+		`RuntimeHandlers:` + repeatedStringForRuntimeHandlers + `,`,
+		`Features:` + strings.Replace(this.Features.String(), "NodeFeatures", "NodeFeatures", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NodeSystemInfo) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&NodeSystemInfo{`,
+		`MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`,
+		`SystemUUID:` + fmt.Sprintf("%v", this.SystemUUID) + `,`,
+		`BootID:` + fmt.Sprintf("%v", this.BootID) + `,`,
+		`KernelVersion:` + fmt.Sprintf("%v", this.KernelVersion) + `,`,
+		`OSImage:` + fmt.Sprintf("%v", this.OSImage) + `,`,
+		`ContainerRuntimeVersion:` + fmt.Sprintf("%v", this.ContainerRuntimeVersion) + `,`,
+		`KubeletVersion:` + fmt.Sprintf("%v", this.KubeletVersion) + `,`,
+		`KubeProxyVersion:` + fmt.Sprintf("%v", this.KubeProxyVersion) + `,`,
+		`OperatingSystem:` + fmt.Sprintf("%v", this.OperatingSystem) + `,`,
+		`Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ObjectFieldSelector) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ObjectFieldSelector{`,
+		`APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
+		`FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ObjectReference) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ObjectReference{`,
+		`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+		`APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
+		`ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`,
+		`FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PersistentVolume) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PersistentVolume{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeSpec", "PersistentVolumeSpec", 1), `&`, ``, 1) + `,`,
+		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeStatus", "PersistentVolumeStatus", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PersistentVolumeClaim) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PersistentVolumeClaim{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeClaimSpec", "PersistentVolumeClaimSpec", 1), `&`, ``, 1) + `,`,
+		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeClaimStatus", "PersistentVolumeClaimStatus", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PersistentVolumeClaimCondition) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PersistentVolumeClaimCondition{`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+		`LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+		`LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+		`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PersistentVolumeClaimList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]PersistentVolumeClaim{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PersistentVolumeClaim", "PersistentVolumeClaim", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&PersistentVolumeClaimList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PersistentVolumeClaimSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PersistentVolumeClaimSpec{`,
+		`AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`,
+		`Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "VolumeResourceRequirements", "VolumeResourceRequirements", 1), `&`, ``, 1) + `,`,
+		`VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`,
+		`Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+		`StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`,
+		`VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`,
+		`DataSource:` + strings.Replace(this.DataSource.String(), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`,
+		`DataSourceRef:` + strings.Replace(this.DataSourceRef.String(), "TypedObjectReference", "TypedObjectReference", 1) + `,`,
+		`VolumeAttributesClassName:` + valueToStringGenerated(this.VolumeAttributesClassName) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PersistentVolumeClaimStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForConditions := "[]PersistentVolumeClaimCondition{"
+	for _, f := range this.Conditions {
+		repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PersistentVolumeClaimCondition", "PersistentVolumeClaimCondition", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForConditions += "}"
+	keysForCapacity := make([]string, 0, len(this.Capacity))
+	for k := range this.Capacity {
+		keysForCapacity = append(keysForCapacity, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+	mapStringForCapacity := "ResourceList{"
+	for _, k := range keysForCapacity {
+		mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)])
+	}
+	mapStringForCapacity += "}"
+	keysForAllocatedResources := make([]string, 0, len(this.AllocatedResources))
+	for k := range this.AllocatedResources {
+		keysForAllocatedResources = append(keysForAllocatedResources, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResources)
+	mapStringForAllocatedResources := "ResourceList{"
+	for _, k := range keysForAllocatedResources {
+		mapStringForAllocatedResources += fmt.Sprintf("%v: %v,", k, this.AllocatedResources[ResourceName(k)])
+	}
+	mapStringForAllocatedResources += "}"
+	keysForAllocatedResourceStatuses := make([]string, 0, len(this.AllocatedResourceStatuses))
+	for k := range this.AllocatedResourceStatuses {
+		keysForAllocatedResourceStatuses = append(keysForAllocatedResourceStatuses, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatedResourceStatuses)
+	mapStringForAllocatedResourceStatuses := "map[ResourceName]ClaimResourceStatus{"
+	for _, k := range keysForAllocatedResourceStatuses {
+		mapStringForAllocatedResourceStatuses += fmt.Sprintf("%v: %v,", k, this.AllocatedResourceStatuses[ResourceName(k)])
+	}
+	mapStringForAllocatedResourceStatuses += "}"
+	s := strings.Join([]string{`&PersistentVolumeClaimStatus{`,
+		`Phase:` + fmt.Sprintf("%v", this.Phase) + `,`,
+		`AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`,
+		`Capacity:` + mapStringForCapacity + `,`,
+		`Conditions:` + repeatedStringForConditions + `,`,
+		`AllocatedResources:` + mapStringForAllocatedResources + `,`,
+		`AllocatedResourceStatuses:` + mapStringForAllocatedResourceStatuses + `,`,
+		`CurrentVolumeAttributesClassName:` + valueToStringGenerated(this.CurrentVolumeAttributesClassName) + `,`,
+		`ModifyVolumeStatus:` + strings.Replace(this.ModifyVolumeStatus.String(), "ModifyVolumeStatus", "ModifyVolumeStatus", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PersistentVolumeClaimTemplate) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PersistentVolumeClaimTemplate{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeClaimSpec", "PersistentVolumeClaimSpec", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PersistentVolumeClaimVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PersistentVolumeClaimVolumeSource{`,
+		`ClaimName:` + fmt.Sprintf("%v", this.ClaimName) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PersistentVolumeList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]PersistentVolume{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PersistentVolume", "PersistentVolume", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&PersistentVolumeList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PersistentVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PersistentVolumeSource{`,
+		`GCEPersistentDisk:` + strings.Replace(this.GCEPersistentDisk.String(), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`,
+		`AWSElasticBlockStore:` + strings.Replace(this.AWSElasticBlockStore.String(), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`,
+		`HostPath:` + strings.Replace(this.HostPath.String(), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`,
+		`Glusterfs:` + strings.Replace(this.Glusterfs.String(), "GlusterfsPersistentVolumeSource", "GlusterfsPersistentVolumeSource", 1) + `,`,
+		`NFS:` + strings.Replace(this.NFS.String(), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`,
+		`RBD:` + strings.Replace(this.RBD.String(), "RBDPersistentVolumeSource", "RBDPersistentVolumeSource", 1) + `,`,
+		`ISCSI:` + strings.Replace(this.ISCSI.String(), "ISCSIPersistentVolumeSource", "ISCSIPersistentVolumeSource", 1) + `,`,
+		`Cinder:` + strings.Replace(this.Cinder.String(), "CinderPersistentVolumeSource", "CinderPersistentVolumeSource", 1) + `,`,
+		`CephFS:` + strings.Replace(this.CephFS.String(), "CephFSPersistentVolumeSource", "CephFSPersistentVolumeSource", 1) + `,`,
+		`FC:` + strings.Replace(this.FC.String(), "FCVolumeSource", "FCVolumeSource", 1) + `,`,
+		`Flocker:` + strings.Replace(this.Flocker.String(), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`,
+		`FlexVolume:` + strings.Replace(this.FlexVolume.String(), "FlexPersistentVolumeSource", "FlexPersistentVolumeSource", 1) + `,`,
+		`AzureFile:` + strings.Replace(this.AzureFile.String(), "AzureFilePersistentVolumeSource", "AzureFilePersistentVolumeSource", 1) + `,`,
+		`VsphereVolume:` + strings.Replace(this.VsphereVolume.String(), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`,
+		`Quobyte:` + strings.Replace(this.Quobyte.String(), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`,
+		`AzureDisk:` + strings.Replace(this.AzureDisk.String(), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`,
+		`PhotonPersistentDisk:` + strings.Replace(this.PhotonPersistentDisk.String(), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`,
+		`PortworxVolume:` + strings.Replace(this.PortworxVolume.String(), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`,
+		`ScaleIO:` + strings.Replace(this.ScaleIO.String(), "ScaleIOPersistentVolumeSource", "ScaleIOPersistentVolumeSource", 1) + `,`,
+		`Local:` + strings.Replace(this.Local.String(), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`,
+		`StorageOS:` + strings.Replace(this.StorageOS.String(), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`,
+		`CSI:` + strings.Replace(this.CSI.String(), "CSIPersistentVolumeSource", "CSIPersistentVolumeSource", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PersistentVolumeSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForCapacity := make([]string, 0, len(this.Capacity))
+	for k := range this.Capacity {
+		keysForCapacity = append(keysForCapacity, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity)
+	mapStringForCapacity := "ResourceList{"
+	for _, k := range keysForCapacity {
+		mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)])
+	}
+	mapStringForCapacity += "}"
+	s := strings.Join([]string{`&PersistentVolumeSpec{`,
+		`Capacity:` + mapStringForCapacity + `,`,
+		`PersistentVolumeSource:` + strings.Replace(strings.Replace(this.PersistentVolumeSource.String(), "PersistentVolumeSource", "PersistentVolumeSource", 1), `&`, ``, 1) + `,`,
+		`AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`,
+		`ClaimRef:` + strings.Replace(this.ClaimRef.String(), "ObjectReference", "ObjectReference", 1) + `,`,
+		`PersistentVolumeReclaimPolicy:` + fmt.Sprintf("%v", this.PersistentVolumeReclaimPolicy) + `,`,
+		`StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`,
+		`MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`,
+		`VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`,
+		`NodeAffinity:` + strings.Replace(this.NodeAffinity.String(), "VolumeNodeAffinity", "VolumeNodeAffinity", 1) + `,`,
+		`VolumeAttributesClassName:` + valueToStringGenerated(this.VolumeAttributesClassName) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PersistentVolumeStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PersistentVolumeStatus{`,
+		`Phase:` + fmt.Sprintf("%v", this.Phase) + `,`,
+		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+		`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+		`LastPhaseTransitionTime:` + strings.Replace(fmt.Sprintf("%v", this.LastPhaseTransitionTime), "Time", "v1.Time", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PhotonPersistentDiskVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PhotonPersistentDiskVolumeSource{`,
+		`PdID:` + fmt.Sprintf("%v", this.PdID) + `,`,
+		`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Pod) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Pod{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`,
+		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodAffinity) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution := "[]PodAffinityTerm{"
+	for _, f := range this.RequiredDuringSchedulingIgnoredDuringExecution {
+		repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += "}"
+	repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution := "[]WeightedPodAffinityTerm{"
+	for _, f := range this.PreferredDuringSchedulingIgnoredDuringExecution {
+		repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += "}"
+	s := strings.Join([]string{`&PodAffinity{`,
+		`RequiredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution + `,`,
+		`PreferredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodAffinityTerm) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PodAffinityTerm{`,
+		`LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+		`Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`,
+		`TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`,
+		`NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+		`MatchLabelKeys:` + fmt.Sprintf("%v", this.MatchLabelKeys) + `,`,
+		`MismatchLabelKeys:` + fmt.Sprintf("%v", this.MismatchLabelKeys) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodAntiAffinity) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution := "[]PodAffinityTerm{"
+	for _, f := range this.RequiredDuringSchedulingIgnoredDuringExecution {
+		repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += "}"
+	repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution := "[]WeightedPodAffinityTerm{"
+	for _, f := range this.PreferredDuringSchedulingIgnoredDuringExecution {
+		repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += "}"
+	s := strings.Join([]string{`&PodAntiAffinity{`,
+		`RequiredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution + `,`,
+		`PreferredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodAttachOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PodAttachOptions{`,
+		`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
+		`Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`,
+		`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
+		`TTY:` + fmt.Sprintf("%v", this.TTY) + `,`,
+		`Container:` + fmt.Sprintf("%v", this.Container) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodCondition) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PodCondition{`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+		`LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+		`LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+		`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodDNSConfig) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForOptions := "[]PodDNSConfigOption{"
+	for _, f := range this.Options {
+		repeatedStringForOptions += strings.Replace(strings.Replace(f.String(), "PodDNSConfigOption", "PodDNSConfigOption", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForOptions += "}"
+	s := strings.Join([]string{`&PodDNSConfig{`,
+		`Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`,
+		`Searches:` + fmt.Sprintf("%v", this.Searches) + `,`,
+		`Options:` + repeatedStringForOptions + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodDNSConfigOption) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PodDNSConfigOption{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Value:` + valueToStringGenerated(this.Value) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodExecOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PodExecOptions{`,
+		`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
+		`Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`,
+		`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
+		`TTY:` + fmt.Sprintf("%v", this.TTY) + `,`,
+		`Container:` + fmt.Sprintf("%v", this.Container) + `,`,
+		`Command:` + fmt.Sprintf("%v", this.Command) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodIP) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PodIP{`,
+		`IP:` + fmt.Sprintf("%v", this.IP) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]Pod{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Pod", "Pod", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&PodList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodLogOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PodLogOptions{`,
+		`Container:` + fmt.Sprintf("%v", this.Container) + `,`,
+		`Follow:` + fmt.Sprintf("%v", this.Follow) + `,`,
+		`Previous:` + fmt.Sprintf("%v", this.Previous) + `,`,
+		`SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`,
+		`SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "v1.Time", 1) + `,`,
+		`Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`,
+		`TailLines:` + valueToStringGenerated(this.TailLines) + `,`,
+		`LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`,
+		`InsecureSkipTLSVerifyBackend:` + fmt.Sprintf("%v", this.InsecureSkipTLSVerifyBackend) + `,`,
+		`Stream:` + valueToStringGenerated(this.Stream) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodOS) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PodOS{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodPortForwardOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PodPortForwardOptions{`,
+		`Ports:` + fmt.Sprintf("%v", this.Ports) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodProxyOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PodProxyOptions{`,
+		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodReadinessGate) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PodReadinessGate{`,
+		`ConditionType:` + fmt.Sprintf("%v", this.ConditionType) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodResourceClaim) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PodResourceClaim{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`ResourceClaimName:` + valueToStringGenerated(this.ResourceClaimName) + `,`,
+		`ResourceClaimTemplateName:` + valueToStringGenerated(this.ResourceClaimTemplateName) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodResourceClaimStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PodResourceClaimStatus{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`ResourceClaimName:` + valueToStringGenerated(this.ResourceClaimName) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodSchedulingGate) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PodSchedulingGate{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodSecurityContext) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForSysctls := "[]Sysctl{"
+	for _, f := range this.Sysctls {
+		repeatedStringForSysctls += strings.Replace(strings.Replace(f.String(), "Sysctl", "Sysctl", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForSysctls += "}"
+	s := strings.Join([]string{`&PodSecurityContext{`,
+		`SELinuxOptions:` + strings.Replace(this.SELinuxOptions.String(), "SELinuxOptions", "SELinuxOptions", 1) + `,`,
+		`RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`,
+		`RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`,
+		`SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`,
+		`FSGroup:` + valueToStringGenerated(this.FSGroup) + `,`,
+		`RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`,
+		`Sysctls:` + repeatedStringForSysctls + `,`,
+		`WindowsOptions:` + strings.Replace(this.WindowsOptions.String(), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`,
+		`FSGroupChangePolicy:` + valueToStringGenerated(this.FSGroupChangePolicy) + `,`,
+		`SeccompProfile:` + strings.Replace(this.SeccompProfile.String(), "SeccompProfile", "SeccompProfile", 1) + `,`,
+		`AppArmorProfile:` + strings.Replace(this.AppArmorProfile.String(), "AppArmorProfile", "AppArmorProfile", 1) + `,`,
+		`SupplementalGroupsPolicy:` + valueToStringGenerated(this.SupplementalGroupsPolicy) + `,`,
+		`SELinuxChangePolicy:` + valueToStringGenerated(this.SELinuxChangePolicy) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodSignature) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PodSignature{`,
+		`PodController:` + strings.Replace(fmt.Sprintf("%v", this.PodController), "OwnerReference", "v1.OwnerReference", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForVolumes := "[]Volume{"
+	for _, f := range this.Volumes {
+		repeatedStringForVolumes += strings.Replace(strings.Replace(f.String(), "Volume", "Volume", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForVolumes += "}"
+	repeatedStringForContainers := "[]Container{"
+	for _, f := range this.Containers {
+		repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "Container", "Container", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForContainers += "}"
+	repeatedStringForImagePullSecrets := "[]LocalObjectReference{"
+	for _, f := range this.ImagePullSecrets {
+		repeatedStringForImagePullSecrets += strings.Replace(strings.Replace(f.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForImagePullSecrets += "}"
+	repeatedStringForInitContainers := "[]Container{"
+	for _, f := range this.InitContainers {
+		repeatedStringForInitContainers += strings.Replace(strings.Replace(f.String(), "Container", "Container", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForInitContainers += "}"
+	repeatedStringForTolerations := "[]Toleration{"
+	for _, f := range this.Tolerations {
+		repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "Toleration", "Toleration", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForTolerations += "}"
+	repeatedStringForHostAliases := "[]HostAlias{"
+	for _, f := range this.HostAliases {
+		repeatedStringForHostAliases += strings.Replace(strings.Replace(f.String(), "HostAlias", "HostAlias", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForHostAliases += "}"
+	repeatedStringForReadinessGates := "[]PodReadinessGate{"
+	for _, f := range this.ReadinessGates {
+		repeatedStringForReadinessGates += strings.Replace(strings.Replace(f.String(), "PodReadinessGate", "PodReadinessGate", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForReadinessGates += "}"
+	repeatedStringForTopologySpreadConstraints := "[]TopologySpreadConstraint{"
+	for _, f := range this.TopologySpreadConstraints {
+		repeatedStringForTopologySpreadConstraints += strings.Replace(strings.Replace(f.String(), "TopologySpreadConstraint", "TopologySpreadConstraint", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForTopologySpreadConstraints += "}"
+	repeatedStringForEphemeralContainers := "[]EphemeralContainer{"
+	for _, f := range this.EphemeralContainers {
+		repeatedStringForEphemeralContainers += strings.Replace(strings.Replace(f.String(), "EphemeralContainer", "EphemeralContainer", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForEphemeralContainers += "}"
+	repeatedStringForSchedulingGates := "[]PodSchedulingGate{"
+	for _, f := range this.SchedulingGates {
+		repeatedStringForSchedulingGates += strings.Replace(strings.Replace(f.String(), "PodSchedulingGate", "PodSchedulingGate", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForSchedulingGates += "}"
+	repeatedStringForResourceClaims := "[]PodResourceClaim{"
+	for _, f := range this.ResourceClaims {
+		repeatedStringForResourceClaims += strings.Replace(strings.Replace(f.String(), "PodResourceClaim", "PodResourceClaim", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForResourceClaims += "}"
+	keysForNodeSelector := make([]string, 0, len(this.NodeSelector))
+	for k := range this.NodeSelector {
+		keysForNodeSelector = append(keysForNodeSelector, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector)
+	mapStringForNodeSelector := "map[string]string{"
+	for _, k := range keysForNodeSelector {
+		mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k])
+	}
+	mapStringForNodeSelector += "}"
+	keysForOverhead := make([]string, 0, len(this.Overhead))
+	for k := range this.Overhead {
+		keysForOverhead = append(keysForOverhead, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForOverhead)
+	mapStringForOverhead := "ResourceList{"
+	for _, k := range keysForOverhead {
+		mapStringForOverhead += fmt.Sprintf("%v: %v,", k, this.Overhead[ResourceName(k)])
+	}
+	mapStringForOverhead += "}"
+	s := strings.Join([]string{`&PodSpec{`,
+		`Volumes:` + repeatedStringForVolumes + `,`,
+		`Containers:` + repeatedStringForContainers + `,`,
+		`RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`,
+		`TerminationGracePeriodSeconds:` + valueToStringGenerated(this.TerminationGracePeriodSeconds) + `,`,
+		`ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`,
+		`DNSPolicy:` + fmt.Sprintf("%v", this.DNSPolicy) + `,`,
+		`NodeSelector:` + mapStringForNodeSelector + `,`,
+		`ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`,
+		`DeprecatedServiceAccount:` + fmt.Sprintf("%v", this.DeprecatedServiceAccount) + `,`,
+		`NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`,
+		`HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`,
+		`HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`,
+		`HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`,
+		`SecurityContext:` + strings.Replace(this.SecurityContext.String(), "PodSecurityContext", "PodSecurityContext", 1) + `,`,
+		`ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`,
+		`Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`,
+		`Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`,
+		`Affinity:` + strings.Replace(this.Affinity.String(), "Affinity", "Affinity", 1) + `,`,
+		`SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`,
+		`InitContainers:` + repeatedStringForInitContainers + `,`,
+		`AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`,
+		`Tolerations:` + repeatedStringForTolerations + `,`,
+		`HostAliases:` + repeatedStringForHostAliases + `,`,
+		`PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`,
+		`Priority:` + valueToStringGenerated(this.Priority) + `,`,
+		`DNSConfig:` + strings.Replace(this.DNSConfig.String(), "PodDNSConfig", "PodDNSConfig", 1) + `,`,
+		`ShareProcessNamespace:` + valueToStringGenerated(this.ShareProcessNamespace) + `,`,
+		`ReadinessGates:` + repeatedStringForReadinessGates + `,`,
+		`RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`,
+		`EnableServiceLinks:` + valueToStringGenerated(this.EnableServiceLinks) + `,`,
+		`PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`,
+		`Overhead:` + mapStringForOverhead + `,`,
+		`TopologySpreadConstraints:` + repeatedStringForTopologySpreadConstraints + `,`,
+		`EphemeralContainers:` + repeatedStringForEphemeralContainers + `,`,
+		`SetHostnameAsFQDN:` + valueToStringGenerated(this.SetHostnameAsFQDN) + `,`,
+		`OS:` + strings.Replace(this.OS.String(), "PodOS", "PodOS", 1) + `,`,
+		`HostUsers:` + valueToStringGenerated(this.HostUsers) + `,`,
+		`SchedulingGates:` + repeatedStringForSchedulingGates + `,`,
+		`ResourceClaims:` + repeatedStringForResourceClaims + `,`,
+		`Resources:` + strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForConditions := "[]PodCondition{"
+	for _, f := range this.Conditions {
+		repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PodCondition", "PodCondition", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForConditions += "}"
+	repeatedStringForContainerStatuses := "[]ContainerStatus{"
+	for _, f := range this.ContainerStatuses {
+		repeatedStringForContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForContainerStatuses += "}"
+	repeatedStringForInitContainerStatuses := "[]ContainerStatus{"
+	for _, f := range this.InitContainerStatuses {
+		repeatedStringForInitContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForInitContainerStatuses += "}"
+	repeatedStringForPodIPs := "[]PodIP{"
+	for _, f := range this.PodIPs {
+		repeatedStringForPodIPs += strings.Replace(strings.Replace(f.String(), "PodIP", "PodIP", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForPodIPs += "}"
+	repeatedStringForEphemeralContainerStatuses := "[]ContainerStatus{"
+	for _, f := range this.EphemeralContainerStatuses {
+		repeatedStringForEphemeralContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForEphemeralContainerStatuses += "}"
+	repeatedStringForResourceClaimStatuses := "[]PodResourceClaimStatus{"
+	for _, f := range this.ResourceClaimStatuses {
+		repeatedStringForResourceClaimStatuses += strings.Replace(strings.Replace(f.String(), "PodResourceClaimStatus", "PodResourceClaimStatus", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForResourceClaimStatuses += "}"
+	repeatedStringForHostIPs := "[]HostIP{"
+	for _, f := range this.HostIPs {
+		repeatedStringForHostIPs += strings.Replace(strings.Replace(f.String(), "HostIP", "HostIP", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForHostIPs += "}"
+	s := strings.Join([]string{`&PodStatus{`,
+		`Phase:` + fmt.Sprintf("%v", this.Phase) + `,`,
+		`Conditions:` + repeatedStringForConditions + `,`,
+		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+		`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+		`HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`,
+		`PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`,
+		`StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1) + `,`,
+		`ContainerStatuses:` + repeatedStringForContainerStatuses + `,`,
+		`QOSClass:` + fmt.Sprintf("%v", this.QOSClass) + `,`,
+		`InitContainerStatuses:` + repeatedStringForInitContainerStatuses + `,`,
+		`NominatedNodeName:` + fmt.Sprintf("%v", this.NominatedNodeName) + `,`,
+		`PodIPs:` + repeatedStringForPodIPs + `,`,
+		`EphemeralContainerStatuses:` + repeatedStringForEphemeralContainerStatuses + `,`,
+		`Resize:` + fmt.Sprintf("%v", this.Resize) + `,`,
+		`ResourceClaimStatuses:` + repeatedStringForResourceClaimStatuses + `,`,
+		`HostIPs:` + repeatedStringForHostIPs + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodStatusResult) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PodStatusResult{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodTemplate) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PodTemplate{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodTemplateList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]PodTemplate{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodTemplate", "PodTemplate", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&PodTemplateList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PodTemplateSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PodTemplateSpec{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PortStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PortStatus{`,
+		`Port:` + fmt.Sprintf("%v", this.Port) + `,`,
+		`Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`,
+		`Error:` + valueToStringGenerated(this.Error) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PortworxVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PortworxVolumeSource{`,
+		`VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`,
+		`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Preconditions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Preconditions{`,
+		`UID:` + valueToStringGenerated(this.UID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PreferAvoidPodsEntry) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PreferAvoidPodsEntry{`,
+		`PodSignature:` + strings.Replace(strings.Replace(this.PodSignature.String(), "PodSignature", "PodSignature", 1), `&`, ``, 1) + `,`,
+		`EvictionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvictionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+		`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PreferredSchedulingTerm) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PreferredSchedulingTerm{`,
+		`Weight:` + fmt.Sprintf("%v", this.Weight) + `,`,
+		`Preference:` + strings.Replace(strings.Replace(this.Preference.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Probe) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Probe{`,
+		`ProbeHandler:` + strings.Replace(strings.Replace(this.ProbeHandler.String(), "ProbeHandler", "ProbeHandler", 1), `&`, ``, 1) + `,`,
+		`InitialDelaySeconds:` + fmt.Sprintf("%v", this.InitialDelaySeconds) + `,`,
+		`TimeoutSeconds:` + fmt.Sprintf("%v", this.TimeoutSeconds) + `,`,
+		`PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`,
+		`SuccessThreshold:` + fmt.Sprintf("%v", this.SuccessThreshold) + `,`,
+		`FailureThreshold:` + fmt.Sprintf("%v", this.FailureThreshold) + `,`,
+		`TerminationGracePeriodSeconds:` + valueToStringGenerated(this.TerminationGracePeriodSeconds) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ProbeHandler) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ProbeHandler{`,
+		`Exec:` + strings.Replace(this.Exec.String(), "ExecAction", "ExecAction", 1) + `,`,
+		`HTTPGet:` + strings.Replace(this.HTTPGet.String(), "HTTPGetAction", "HTTPGetAction", 1) + `,`,
+		`TCPSocket:` + strings.Replace(this.TCPSocket.String(), "TCPSocketAction", "TCPSocketAction", 1) + `,`,
+		`GRPC:` + strings.Replace(this.GRPC.String(), "GRPCAction", "GRPCAction", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ProjectedVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForSources := "[]VolumeProjection{"
+	for _, f := range this.Sources {
+		repeatedStringForSources += strings.Replace(strings.Replace(f.String(), "VolumeProjection", "VolumeProjection", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForSources += "}"
+	s := strings.Join([]string{`&ProjectedVolumeSource{`,
+		`Sources:` + repeatedStringForSources + `,`,
+		`DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *QuobyteVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&QuobyteVolumeSource{`,
+		`Registry:` + fmt.Sprintf("%v", this.Registry) + `,`,
+		`Volume:` + fmt.Sprintf("%v", this.Volume) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`User:` + fmt.Sprintf("%v", this.User) + `,`,
+		`Group:` + fmt.Sprintf("%v", this.Group) + `,`,
+		`Tenant:` + fmt.Sprintf("%v", this.Tenant) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *RBDPersistentVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&RBDPersistentVolumeSource{`,
+		`CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`,
+		`RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`,
+		`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+		`RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`,
+		`RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`,
+		`Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`,
+		`SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *RBDVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&RBDVolumeSource{`,
+		`CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`,
+		`RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`,
+		`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+		`RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`,
+		`RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`,
+		`Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`,
+		`SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *RangeAllocation) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&RangeAllocation{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Range:` + fmt.Sprintf("%v", this.Range) + `,`,
+		`Data:` + valueToStringGenerated(this.Data) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ReplicationController) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ReplicationController{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicationControllerSpec", "ReplicationControllerSpec", 1), `&`, ``, 1) + `,`,
+		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicationControllerStatus", "ReplicationControllerStatus", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ReplicationControllerCondition) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ReplicationControllerCondition{`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+		`LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`,
+		`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ReplicationControllerList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]ReplicationController{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicationController", "ReplicationController", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&ReplicationControllerList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ReplicationControllerSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForSelector := make([]string, 0, len(this.Selector))
+	for k := range this.Selector {
+		keysForSelector = append(keysForSelector, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForSelector)
+	mapStringForSelector := "map[string]string{"
+	for _, k := range keysForSelector {
+		mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k])
+	}
+	mapStringForSelector += "}"
+	s := strings.Join([]string{`&ReplicationControllerSpec{`,
+		`Replicas:` + valueToStringGenerated(this.Replicas) + `,`,
+		`Selector:` + mapStringForSelector + `,`,
+		`Template:` + strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1) + `,`,
+		`MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ReplicationControllerStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForConditions := "[]ReplicationControllerCondition{"
+	for _, f := range this.Conditions {
+		repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicationControllerCondition", "ReplicationControllerCondition", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForConditions += "}"
+	s := strings.Join([]string{`&ReplicationControllerStatus{`,
+		`Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`,
+		`FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`,
+		`ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
+		`ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`,
+		`AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`,
+		`Conditions:` + repeatedStringForConditions + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourceClaim) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ResourceClaim{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Request:` + fmt.Sprintf("%v", this.Request) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourceFieldSelector) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ResourceFieldSelector{`,
+		`ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`,
+		`Resource:` + fmt.Sprintf("%v", this.Resource) + `,`,
+		`Divisor:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Divisor), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourceHealth) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ResourceHealth{`,
+		`ResourceID:` + fmt.Sprintf("%v", this.ResourceID) + `,`,
+		`Health:` + fmt.Sprintf("%v", this.Health) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourceQuota) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ResourceQuota{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceQuotaSpec", "ResourceQuotaSpec", 1), `&`, ``, 1) + `,`,
+		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceQuotaStatus", "ResourceQuotaStatus", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourceQuotaList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]ResourceQuota{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceQuota", "ResourceQuota", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&ResourceQuotaList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourceQuotaSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForHard := make([]string, 0, len(this.Hard))
+	for k := range this.Hard {
+		keysForHard = append(keysForHard, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForHard)
+	mapStringForHard := "ResourceList{"
+	for _, k := range keysForHard {
+		mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)])
+	}
+	mapStringForHard += "}"
+	s := strings.Join([]string{`&ResourceQuotaSpec{`,
+		`Hard:` + mapStringForHard + `,`,
+		`Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`,
+		`ScopeSelector:` + strings.Replace(this.ScopeSelector.String(), "ScopeSelector", "ScopeSelector", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourceQuotaStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForHard := make([]string, 0, len(this.Hard))
+	for k := range this.Hard {
+		keysForHard = append(keysForHard, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForHard)
+	mapStringForHard := "ResourceList{"
+	for _, k := range keysForHard {
+		mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)])
+	}
+	mapStringForHard += "}"
+	keysForUsed := make([]string, 0, len(this.Used))
+	for k := range this.Used {
+		keysForUsed = append(keysForUsed, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForUsed)
+	mapStringForUsed := "ResourceList{"
+	for _, k := range keysForUsed {
+		mapStringForUsed += fmt.Sprintf("%v: %v,", k, this.Used[ResourceName(k)])
+	}
+	mapStringForUsed += "}"
+	s := strings.Join([]string{`&ResourceQuotaStatus{`,
+		`Hard:` + mapStringForHard + `,`,
+		`Used:` + mapStringForUsed + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourceRequirements) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForClaims := "[]ResourceClaim{"
+	for _, f := range this.Claims {
+		repeatedStringForClaims += strings.Replace(strings.Replace(f.String(), "ResourceClaim", "ResourceClaim", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForClaims += "}"
+	keysForLimits := make([]string, 0, len(this.Limits))
+	for k := range this.Limits {
+		keysForLimits = append(keysForLimits, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLimits)
+	mapStringForLimits := "ResourceList{"
+	for _, k := range keysForLimits {
+		mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)])
+	}
+	mapStringForLimits += "}"
+	keysForRequests := make([]string, 0, len(this.Requests))
+	for k := range this.Requests {
+		keysForRequests = append(keysForRequests, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForRequests)
+	mapStringForRequests := "ResourceList{"
+	for _, k := range keysForRequests {
+		mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)])
+	}
+	mapStringForRequests += "}"
+	s := strings.Join([]string{`&ResourceRequirements{`,
+		`Limits:` + mapStringForLimits + `,`,
+		`Requests:` + mapStringForRequests + `,`,
+		`Claims:` + repeatedStringForClaims + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourceStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForResources := "[]ResourceHealth{"
+	for _, f := range this.Resources {
+		repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "ResourceHealth", "ResourceHealth", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForResources += "}"
+	s := strings.Join([]string{`&ResourceStatus{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Resources:` + repeatedStringForResources + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *SELinuxOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&SELinuxOptions{`,
+		`User:` + fmt.Sprintf("%v", this.User) + `,`,
+		`Role:` + fmt.Sprintf("%v", this.Role) + `,`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`Level:` + fmt.Sprintf("%v", this.Level) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ScaleIOPersistentVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ScaleIOPersistentVolumeSource{`,
+		`Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`,
+		`System:` + fmt.Sprintf("%v", this.System) + `,`,
+		`SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`,
+		`SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`,
+		`ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`,
+		`StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`,
+		`StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`,
+		`VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`,
+		`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ScaleIOVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ScaleIOVolumeSource{`,
+		`Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`,
+		`System:` + fmt.Sprintf("%v", this.System) + `,`,
+		`SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`,
+		`SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`,
+		`ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`,
+		`StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`,
+		`StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`,
+		`VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`,
+		`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ScopeSelector) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForMatchExpressions := "[]ScopedResourceSelectorRequirement{"
+	for _, f := range this.MatchExpressions {
+		repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "ScopedResourceSelectorRequirement", "ScopedResourceSelectorRequirement", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForMatchExpressions += "}"
+	s := strings.Join([]string{`&ScopeSelector{`,
+		`MatchExpressions:` + repeatedStringForMatchExpressions + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ScopedResourceSelectorRequirement) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ScopedResourceSelectorRequirement{`,
+		`ScopeName:` + fmt.Sprintf("%v", this.ScopeName) + `,`,
+		`Operator:` + fmt.Sprintf("%v", this.Operator) + `,`,
+		`Values:` + fmt.Sprintf("%v", this.Values) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *SeccompProfile) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&SeccompProfile{`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`LocalhostProfile:` + valueToStringGenerated(this.LocalhostProfile) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Secret) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForData := make([]string, 0, len(this.Data))
+	for k := range this.Data {
+		keysForData = append(keysForData, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForData)
+	mapStringForData := "map[string][]byte{"
+	for _, k := range keysForData {
+		mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k])
+	}
+	mapStringForData += "}"
+	keysForStringData := make([]string, 0, len(this.StringData))
+	for k := range this.StringData {
+		keysForStringData = append(keysForStringData, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForStringData)
+	mapStringForStringData := "map[string]string{"
+	for _, k := range keysForStringData {
+		mapStringForStringData += fmt.Sprintf("%v: %v,", k, this.StringData[k])
+	}
+	mapStringForStringData += "}"
+	s := strings.Join([]string{`&Secret{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Data:` + mapStringForData + `,`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`StringData:` + mapStringForStringData + `,`,
+		`Immutable:` + valueToStringGenerated(this.Immutable) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *SecretEnvSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&SecretEnvSource{`,
+		`LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`,
+		`Optional:` + valueToStringGenerated(this.Optional) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *SecretKeySelector) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&SecretKeySelector{`,
+		`LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`,
+		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`Optional:` + valueToStringGenerated(this.Optional) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *SecretList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]Secret{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Secret", "Secret", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&SecretList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *SecretProjection) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]KeyToPath{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&SecretProjection{`,
+		`LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`Optional:` + valueToStringGenerated(this.Optional) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *SecretReference) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&SecretReference{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *SecretVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]KeyToPath{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&SecretVolumeSource{`,
+		`SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`,
+		`Optional:` + valueToStringGenerated(this.Optional) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *SecurityContext) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&SecurityContext{`,
+		`Capabilities:` + strings.Replace(this.Capabilities.String(), "Capabilities", "Capabilities", 1) + `,`,
+		`Privileged:` + valueToStringGenerated(this.Privileged) + `,`,
+		`SELinuxOptions:` + strings.Replace(this.SELinuxOptions.String(), "SELinuxOptions", "SELinuxOptions", 1) + `,`,
+		`RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`,
+		`RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`,
+		`ReadOnlyRootFilesystem:` + valueToStringGenerated(this.ReadOnlyRootFilesystem) + `,`,
+		`AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`,
+		`RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`,
+		`ProcMount:` + valueToStringGenerated(this.ProcMount) + `,`,
+		`WindowsOptions:` + strings.Replace(this.WindowsOptions.String(), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`,
+		`SeccompProfile:` + strings.Replace(this.SeccompProfile.String(), "SeccompProfile", "SeccompProfile", 1) + `,`,
+		`AppArmorProfile:` + strings.Replace(this.AppArmorProfile.String(), "AppArmorProfile", "AppArmorProfile", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *SerializedReference) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&SerializedReference{`,
+		`Reference:` + strings.Replace(strings.Replace(this.Reference.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Service) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Service{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`,
+		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceStatus", "ServiceStatus", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ServiceAccount) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForSecrets := "[]ObjectReference{"
+	for _, f := range this.Secrets {
+		repeatedStringForSecrets += strings.Replace(strings.Replace(f.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForSecrets += "}"
+	repeatedStringForImagePullSecrets := "[]LocalObjectReference{"
+	for _, f := range this.ImagePullSecrets {
+		repeatedStringForImagePullSecrets += strings.Replace(strings.Replace(f.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForImagePullSecrets += "}"
+	s := strings.Join([]string{`&ServiceAccount{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Secrets:` + repeatedStringForSecrets + `,`,
+		`ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`,
+		`AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ServiceAccountList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]ServiceAccount{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceAccount", "ServiceAccount", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&ServiceAccountList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ServiceAccountTokenProjection) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ServiceAccountTokenProjection{`,
+		`Audience:` + fmt.Sprintf("%v", this.Audience) + `,`,
+		`ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`,
+		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ServiceList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]Service{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Service", "Service", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&ServiceList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ServicePort) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ServicePort{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`,
+		`Port:` + fmt.Sprintf("%v", this.Port) + `,`,
+		`TargetPort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetPort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`,
+		`NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`,
+		`AppProtocol:` + valueToStringGenerated(this.AppProtocol) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ServiceProxyOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ServiceProxyOptions{`,
+		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ServiceSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForPorts := "[]ServicePort{"
+	for _, f := range this.Ports {
+		repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ServicePort", "ServicePort", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForPorts += "}"
+	keysForSelector := make([]string, 0, len(this.Selector))
+	for k := range this.Selector {
+		keysForSelector = append(keysForSelector, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForSelector)
+	mapStringForSelector := "map[string]string{"
+	for _, k := range keysForSelector {
+		mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k])
+	}
+	mapStringForSelector += "}"
+	s := strings.Join([]string{`&ServiceSpec{`,
+		`Ports:` + repeatedStringForPorts + `,`,
+		`Selector:` + mapStringForSelector + `,`,
+		`ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`ExternalIPs:` + fmt.Sprintf("%v", this.ExternalIPs) + `,`,
+		`SessionAffinity:` + fmt.Sprintf("%v", this.SessionAffinity) + `,`,
+		`LoadBalancerIP:` + fmt.Sprintf("%v", this.LoadBalancerIP) + `,`,
+		`LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`,
+		`ExternalName:` + fmt.Sprintf("%v", this.ExternalName) + `,`,
+		`ExternalTrafficPolicy:` + fmt.Sprintf("%v", this.ExternalTrafficPolicy) + `,`,
+		`HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`,
+		`PublishNotReadyAddresses:` + fmt.Sprintf("%v", this.PublishNotReadyAddresses) + `,`,
+		`SessionAffinityConfig:` + strings.Replace(this.SessionAffinityConfig.String(), "SessionAffinityConfig", "SessionAffinityConfig", 1) + `,`,
+		`IPFamilyPolicy:` + valueToStringGenerated(this.IPFamilyPolicy) + `,`,
+		`ClusterIPs:` + fmt.Sprintf("%v", this.ClusterIPs) + `,`,
+		`IPFamilies:` + fmt.Sprintf("%v", this.IPFamilies) + `,`,
+		`AllocateLoadBalancerNodePorts:` + valueToStringGenerated(this.AllocateLoadBalancerNodePorts) + `,`,
+		`LoadBalancerClass:` + valueToStringGenerated(this.LoadBalancerClass) + `,`,
+		`InternalTrafficPolicy:` + valueToStringGenerated(this.InternalTrafficPolicy) + `,`,
+		`TrafficDistribution:` + valueToStringGenerated(this.TrafficDistribution) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ServiceStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForConditions := "[]Condition{"
+	for _, f := range this.Conditions {
+		repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
+	}
+	repeatedStringForConditions += "}"
+	s := strings.Join([]string{`&ServiceStatus{`,
+		`LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "LoadBalancerStatus", 1), `&`, ``, 1) + `,`,
+		`Conditions:` + repeatedStringForConditions + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *SessionAffinityConfig) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&SessionAffinityConfig{`,
+		`ClientIP:` + strings.Replace(this.ClientIP.String(), "ClientIPConfig", "ClientIPConfig", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *SleepAction) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&SleepAction{`,
+		`Seconds:` + fmt.Sprintf("%v", this.Seconds) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *StorageOSPersistentVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&StorageOSPersistentVolumeSource{`,
+		`VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`,
+		`VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`,
+		`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`SecretRef:` + strings.Replace(this.SecretRef.String(), "ObjectReference", "ObjectReference", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *StorageOSVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&StorageOSVolumeSource{`,
+		`VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`,
+		`VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`,
+		`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Sysctl) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Sysctl{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TCPSocketAction) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TCPSocketAction{`,
+		`Port:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`,
+		`Host:` + fmt.Sprintf("%v", this.Host) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Taint) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Taint{`,
+		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+		`Effect:` + fmt.Sprintf("%v", this.Effect) + `,`,
+		`TimeAdded:` + strings.Replace(fmt.Sprintf("%v", this.TimeAdded), "Time", "v1.Time", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Toleration) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Toleration{`,
+		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`Operator:` + fmt.Sprintf("%v", this.Operator) + `,`,
+		`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+		`Effect:` + fmt.Sprintf("%v", this.Effect) + `,`,
+		`TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TopologySelectorLabelRequirement) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TopologySelectorLabelRequirement{`,
+		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`Values:` + fmt.Sprintf("%v", this.Values) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TopologySelectorTerm) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForMatchLabelExpressions := "[]TopologySelectorLabelRequirement{"
+	for _, f := range this.MatchLabelExpressions {
+		repeatedStringForMatchLabelExpressions += strings.Replace(strings.Replace(f.String(), "TopologySelectorLabelRequirement", "TopologySelectorLabelRequirement", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForMatchLabelExpressions += "}"
+	s := strings.Join([]string{`&TopologySelectorTerm{`,
+		`MatchLabelExpressions:` + repeatedStringForMatchLabelExpressions + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TopologySpreadConstraint) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TopologySpreadConstraint{`,
+		`MaxSkew:` + fmt.Sprintf("%v", this.MaxSkew) + `,`,
+		`TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`,
+		`WhenUnsatisfiable:` + fmt.Sprintf("%v", this.WhenUnsatisfiable) + `,`,
+		`LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+		`MinDomains:` + valueToStringGenerated(this.MinDomains) + `,`,
+		`NodeAffinityPolicy:` + valueToStringGenerated(this.NodeAffinityPolicy) + `,`,
+		`NodeTaintsPolicy:` + valueToStringGenerated(this.NodeTaintsPolicy) + `,`,
+		`MatchLabelKeys:` + fmt.Sprintf("%v", this.MatchLabelKeys) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TypedLocalObjectReference) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TypedLocalObjectReference{`,
+		`APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`,
+		`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TypedObjectReference) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TypedObjectReference{`,
+		`APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`,
+		`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Namespace:` + valueToStringGenerated(this.Namespace) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Volume) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Volume{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`VolumeSource:` + strings.Replace(strings.Replace(this.VolumeSource.String(), "VolumeSource", "VolumeSource", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *VolumeDevice) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&VolumeDevice{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *VolumeMount) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&VolumeMount{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`,
+		`SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`,
+		`MountPropagation:` + valueToStringGenerated(this.MountPropagation) + `,`,
+		`SubPathExpr:` + fmt.Sprintf("%v", this.SubPathExpr) + `,`,
+		`RecursiveReadOnly:` + valueToStringGenerated(this.RecursiveReadOnly) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *VolumeMountStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&VolumeMountStatus{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`,
+		`ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`,
+		`RecursiveReadOnly:` + valueToStringGenerated(this.RecursiveReadOnly) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *VolumeNodeAffinity) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&VolumeNodeAffinity{`,
+		`Required:` + strings.Replace(this.Required.String(), "NodeSelector", "NodeSelector", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *VolumeProjection) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&VolumeProjection{`,
+		`Secret:` + strings.Replace(this.Secret.String(), "SecretProjection", "SecretProjection", 1) + `,`,
+		`DownwardAPI:` + strings.Replace(this.DownwardAPI.String(), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`,
+		`ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`,
+		`ServiceAccountToken:` + strings.Replace(this.ServiceAccountToken.String(), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`,
+		`ClusterTrustBundle:` + strings.Replace(this.ClusterTrustBundle.String(), "ClusterTrustBundleProjection", "ClusterTrustBundleProjection", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *VolumeResourceRequirements) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForLimits := make([]string, 0, len(this.Limits))
+	for k := range this.Limits {
+		keysForLimits = append(keysForLimits, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLimits)
+	mapStringForLimits := "ResourceList{"
+	for _, k := range keysForLimits {
+		mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)])
+	}
+	mapStringForLimits += "}"
+	keysForRequests := make([]string, 0, len(this.Requests))
+	for k := range this.Requests {
+		keysForRequests = append(keysForRequests, string(k))
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForRequests)
+	mapStringForRequests := "ResourceList{"
+	for _, k := range keysForRequests {
+		mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)])
+	}
+	mapStringForRequests += "}"
+	s := strings.Join([]string{`&VolumeResourceRequirements{`,
+		`Limits:` + mapStringForLimits + `,`,
+		`Requests:` + mapStringForRequests + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *VolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&VolumeSource{`,
+		`HostPath:` + strings.Replace(this.HostPath.String(), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`,
+		`EmptyDir:` + strings.Replace(this.EmptyDir.String(), "EmptyDirVolumeSource", "EmptyDirVolumeSource", 1) + `,`,
+		`GCEPersistentDisk:` + strings.Replace(this.GCEPersistentDisk.String(), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`,
+		`AWSElasticBlockStore:` + strings.Replace(this.AWSElasticBlockStore.String(), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`,
+		`GitRepo:` + strings.Replace(this.GitRepo.String(), "GitRepoVolumeSource", "GitRepoVolumeSource", 1) + `,`,
+		`Secret:` + strings.Replace(this.Secret.String(), "SecretVolumeSource", "SecretVolumeSource", 1) + `,`,
+		`NFS:` + strings.Replace(this.NFS.String(), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`,
+		`ISCSI:` + strings.Replace(this.ISCSI.String(), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`,
+		`Glusterfs:` + strings.Replace(this.Glusterfs.String(), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`,
+		`PersistentVolumeClaim:` + strings.Replace(this.PersistentVolumeClaim.String(), "PersistentVolumeClaimVolumeSource", "PersistentVolumeClaimVolumeSource", 1) + `,`,
+		`RBD:` + strings.Replace(this.RBD.String(), "RBDVolumeSource", "RBDVolumeSource", 1) + `,`,
+		`FlexVolume:` + strings.Replace(this.FlexVolume.String(), "FlexVolumeSource", "FlexVolumeSource", 1) + `,`,
+		`Cinder:` + strings.Replace(this.Cinder.String(), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`,
+		`CephFS:` + strings.Replace(this.CephFS.String(), "CephFSVolumeSource", "CephFSVolumeSource", 1) + `,`,
+		`Flocker:` + strings.Replace(this.Flocker.String(), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`,
+		`DownwardAPI:` + strings.Replace(this.DownwardAPI.String(), "DownwardAPIVolumeSource", "DownwardAPIVolumeSource", 1) + `,`,
+		`FC:` + strings.Replace(this.FC.String(), "FCVolumeSource", "FCVolumeSource", 1) + `,`,
+		`AzureFile:` + strings.Replace(this.AzureFile.String(), "AzureFileVolumeSource", "AzureFileVolumeSource", 1) + `,`,
+		`ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapVolumeSource", "ConfigMapVolumeSource", 1) + `,`,
+		`VsphereVolume:` + strings.Replace(this.VsphereVolume.String(), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`,
+		`Quobyte:` + strings.Replace(this.Quobyte.String(), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`,
+		`AzureDisk:` + strings.Replace(this.AzureDisk.String(), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`,
+		`PhotonPersistentDisk:` + strings.Replace(this.PhotonPersistentDisk.String(), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`,
+		`PortworxVolume:` + strings.Replace(this.PortworxVolume.String(), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`,
+		`ScaleIO:` + strings.Replace(this.ScaleIO.String(), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`,
+		`Projected:` + strings.Replace(this.Projected.String(), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`,
+		`StorageOS:` + strings.Replace(this.StorageOS.String(), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`,
+		`CSI:` + strings.Replace(this.CSI.String(), "CSIVolumeSource", "CSIVolumeSource", 1) + `,`,
+		`Ephemeral:` + strings.Replace(this.Ephemeral.String(), "EphemeralVolumeSource", "EphemeralVolumeSource", 1) + `,`,
+		`Image:` + strings.Replace(this.Image.String(), "ImageVolumeSource", "ImageVolumeSource", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *VsphereVirtualDiskVolumeSource) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&VsphereVirtualDiskVolumeSource{`,
+		`VolumePath:` + fmt.Sprintf("%v", this.VolumePath) + `,`,
+		`FSType:` + fmt.Sprintf("%v", this.FSType) + `,`,
+		`StoragePolicyName:` + fmt.Sprintf("%v", this.StoragePolicyName) + `,`,
+		`StoragePolicyID:` + fmt.Sprintf("%v", this.StoragePolicyID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *WeightedPodAffinityTerm) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&WeightedPodAffinityTerm{`,
+		`Weight:` + fmt.Sprintf("%v", this.Weight) + `,`,
+		`PodAffinityTerm:` + strings.Replace(strings.Replace(this.PodAffinityTerm.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *WindowsSecurityContextOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&WindowsSecurityContextOptions{`,
+		`GMSACredentialSpecName:` + valueToStringGenerated(this.GMSACredentialSpecName) + `,`,
+		`GMSACredentialSpec:` + valueToStringGenerated(this.GMSACredentialSpec) + `,`,
+		`RunAsUserName:` + valueToStringGenerated(this.RunAsUserName) + `,`,
+		`HostProcess:` + valueToStringGenerated(this.HostProcess) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringGenerated(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumeID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FSType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType)
+			}
+			m.Partition = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Partition |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Affinity) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Affinity: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Affinity: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.NodeAffinity == nil {
+				m.NodeAffinity = &NodeAffinity{}
+			}
+			if err := m.NodeAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PodAffinity", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.PodAffinity == nil {
+				m.PodAffinity = &PodAffinity{}
+			}
+			if err := m.PodAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PodAntiAffinity", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.PodAntiAffinity == nil {
+				m.PodAntiAffinity = &PodAntiAffinity{}
+			}
+			if err := m.PodAntiAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AppArmorProfile) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AppArmorProfile: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AppArmorProfile: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Type = AppArmorProfileType(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LocalhostProfile", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.LocalhostProfile = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AttachedVolume) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AttachedVolume: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AttachedVolume: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = UniqueVolumeName(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DevicePath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AvoidPods) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AvoidPods: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AvoidPods: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PreferAvoidPods", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PreferAvoidPods = append(m.PreferAvoidPods, PreferAvoidPodsEntry{})
+			if err := m.PreferAvoidPods[len(m.PreferAvoidPods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AzureDiskVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AzureDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DiskName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DiskName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DataDiskURI", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DataDiskURI = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CachingMode", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := AzureDataDiskCachingMode(dAtA[iNdEx:postIndex])
+			m.CachingMode = &s
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.FSType = &s
+			iNdEx = postIndex
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.ReadOnly = &b
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := AzureDataDiskKind(dAtA[iNdEx:postIndex])
+			m.Kind = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AzureFilePersistentVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AzureFilePersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SecretName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ShareName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretNamespace", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.SecretNamespace = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AzureFileVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AzureFileVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SecretName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ShareName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Binding) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Binding: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Binding: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CSIPersistentVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Driver = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeHandle", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumeHandle = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FSType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.VolumeAttributes == nil {
+				m.VolumeAttributes = make(map[string]string)
+			}
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.VolumeAttributes[mapkey] = mapvalue
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ControllerPublishSecretRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ControllerPublishSecretRef == nil {
+				m.ControllerPublishSecretRef = &SecretReference{}
+			}
+			if err := m.ControllerPublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeStageSecretRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.NodeStageSecretRef == nil {
+				m.NodeStageSecretRef = &SecretReference{}
+			}
+			if err := m.NodeStageSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.NodePublishSecretRef == nil {
+				m.NodePublishSecretRef = &SecretReference{}
+			}
+			if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ControllerExpandSecretRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ControllerExpandSecretRef == nil {
+				m.ControllerExpandSecretRef = &SecretReference{}
+			}
+			if err := m.ControllerExpandSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeExpandSecretRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.NodeExpandSecretRef == nil {
+				m.NodeExpandSecretRef = &SecretReference{}
+			}
+			if err := m.NodeExpandSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CSIVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Driver = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.ReadOnly = &b
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.FSType = &s
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.VolumeAttributes == nil {
+				m.VolumeAttributes = make(map[string]string)
+			}
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.VolumeAttributes[mapkey] = mapvalue
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.NodePublishSecretRef == nil {
+				m.NodePublishSecretRef = &LocalObjectReference{}
+			}
+			if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Capabilities) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Capabilities: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Add = append(m.Add, Capability(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Drop = append(m.Drop, Capability(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CephFSPersistentVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CephFSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Path = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.User = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SecretFile = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SecretRef == nil {
+				m.SecretRef = &SecretReference{}
+			}
+			if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Path = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.User = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SecretFile = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SecretRef == nil {
+				m.SecretRef = &LocalObjectReference{}
+			}
+			if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CinderPersistentVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CinderPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumeID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FSType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SecretRef == nil {
+				m.SecretRef = &SecretReference{}
+			}
+			if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumeID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FSType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SecretRef == nil {
+				m.SecretRef = &LocalObjectReference{}
+			}
+			if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ClientIPConfig) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType)
+			}
+			var v int32
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TimeoutSeconds = &v
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ClusterTrustBundleProjection) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ClusterTrustBundleProjection: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ClusterTrustBundleProjection: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.Name = &s
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.SignerName = &s
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.LabelSelector == nil {
+				m.LabelSelector = &v1.LabelSelector{}
+			}
+			if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Path = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Optional = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ComponentCondition) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Type = ComponentConditionType(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Status = ConditionStatus(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Message = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Error = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ComponentStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Conditions = append(m.Conditions, ComponentCondition{})
+			if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ComponentStatusList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, ComponentStatus{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ConfigMap) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Data == nil {
+				m.Data = make(map[string]string)
+			}
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Data[mapkey] = mapvalue
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field BinaryData", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.BinaryData == nil {
+				m.BinaryData = make(map[string][]byte)
+			}
+			var mapkey string
+			mapvalue := []byte{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapbyteLen uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapbyteLen |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intMapbyteLen := int(mapbyteLen)
+					if intMapbyteLen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postbytesIndex := iNdEx + intMapbyteLen
+					if postbytesIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postbytesIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = make([]byte, mapbyteLen)
+					copy(mapvalue, dAtA[iNdEx:postbytesIndex])
+					iNdEx = postbytesIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.BinaryData[mapkey] = mapvalue
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Immutable", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Immutable = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ConfigMapEnvSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ConfigMapEnvSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Optional = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Optional = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ConfigMapList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ConfigMapList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ConfigMapList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, ConfigMap{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ConfigMapNodeConfigSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ConfigMapNodeConfigSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Namespace = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ResourceVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field KubeletConfigKey", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.KubeletConfigKey = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ConfigMapProjection: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ConfigMapProjection: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, KeyToPath{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Optional = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ConfigMapVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ConfigMapVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, KeyToPath{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType)
+			}
+			var v int32
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.DefaultMode = &v
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Optional = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Container) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Container: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Image = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Command = append(m.Command, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Args = append(m.Args, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.WorkingDir = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Ports = append(m.Ports, ContainerPort{})
+			if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Env = append(m.Env, EnvVar{})
+			if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumeMounts = append(m.VolumeMounts, VolumeMount{})
+			if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.LivenessProbe == nil {
+				m.LivenessProbe = &Probe{}
+			}
+			if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ReadinessProbe == nil {
+				m.ReadinessProbe = &Probe{}
+			}
+			if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 12:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Lifecycle == nil {
+				m.Lifecycle = &Lifecycle{}
+			}
+			if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 13:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TerminationMessagePath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 14:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 15:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SecurityContext == nil {
+				m.SecurityContext = &SecurityContext{}
+			}
+			if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 16:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Stdin = bool(v != 0)
+		case 17:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.StdinOnce = bool(v != 0)
+		case 18:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TTY = bool(v != 0)
+		case 19:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.EnvFrom = append(m.EnvFrom, EnvFromSource{})
+			if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 20:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 21:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{})
+			if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 22:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.StartupProbe == nil {
+				m.StartupProbe = &Probe{}
+			}
+			if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 23:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResizePolicy", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ResizePolicy = append(m.ResizePolicy, ContainerResizePolicy{})
+			if err := m.ResizePolicy[len(m.ResizePolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 24:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := ContainerRestartPolicy(dAtA[iNdEx:postIndex])
+			m.RestartPolicy = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ContainerImage) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Names = append(m.Names, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType)
+			}
+			m.SizeBytes = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.SizeBytes |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ContainerPort) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType)
+			}
+			m.HostPort = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.HostPort |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType)
+			}
+			m.ContainerPort = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ContainerPort |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Protocol = Protocol(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.HostIP = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ContainerResizePolicy) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ContainerResizePolicy: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ContainerResizePolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ResourceName = ResourceName(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RestartPolicy = ResourceResizeRestartPolicy(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ContainerState) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ContainerState: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Waiting == nil {
+				m.Waiting = &ContainerStateWaiting{}
+			}
+			if err := m.Waiting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Running == nil {
+				m.Running = &ContainerStateRunning{}
+			}
+			if err := m.Running.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Terminated", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Terminated == nil {
+				m.Terminated = &ContainerStateTerminated{}
+			}
+			if err := m.Terminated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ContainerStateTerminated: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ContainerStateTerminated: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType)
+			}
+			m.ExitCode = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ExitCode |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType)
+			}
+			m.Signal = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Signal |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Reason = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Message = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ContainerStateWaiting: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ContainerStateWaiting: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Reason = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Message = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ContainerStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field State", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.State.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LastTerminationState", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.LastTerminationState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Ready = bool(v != 0)
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RestartCount", wireType)
+			}
+			m.RestartCount = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.RestartCount |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Image = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ImageID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 9:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Started = &b
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResources", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AllocatedResources == nil {
+				m.AllocatedResources = make(ResourceList)
+			}
+			var mapkey ResourceName
+			mapvalue := &resource.Quantity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &resource.Quantity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.AllocatedResources[ResourceName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Resources == nil {
+				m.Resources = &ResourceRequirements{}
+			}
+			if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 12:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumeMounts = append(m.VolumeMounts, VolumeMountStatus{})
+			if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 13:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.User == nil {
+				m.User = &ContainerUser{}
+			}
+			if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 14:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResourcesStatus", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.AllocatedResourcesStatus = append(m.AllocatedResourcesStatus, ResourceStatus{})
+			if err := m.AllocatedResourcesStatus[len(m.AllocatedResourcesStatus)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ContainerUser) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ContainerUser: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ContainerUser: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Linux", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Linux == nil {
+				m.Linux = &LinuxContainerUser{}
+			}
+			if err := m.Linux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+			}
+			m.Port = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Port |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DownwardAPIProjection: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DownwardAPIProjection: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, DownwardAPIVolumeFile{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Path = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.FieldRef == nil {
+				m.FieldRef = &ObjectFieldSelector{}
+			}
+			if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ResourceFieldRef == nil {
+				m.ResourceFieldRef = &ResourceFieldSelector{}
+			}
+			if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType)
+			}
+			var v int32
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Mode = &v
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, DownwardAPIVolumeFile{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType)
+			}
+			var v int32
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.DefaultMode = &v
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: EmptyDirVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: EmptyDirVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Medium", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Medium = StorageMedium(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SizeLimit == nil {
+				m.SizeLimit = &resource.Quantity{}
+			}
+			if err := m.SizeLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *EndpointAddress) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.IP = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.TargetRef == nil {
+				m.TargetRef = &ObjectReference{}
+			}
+			if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Hostname = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.NodeName = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *EndpointPort) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+			}
+			m.Port = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Port |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Protocol = Protocol(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.AppProtocol = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *EndpointSubset) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Addresses = append(m.Addresses, EndpointAddress{})
+			if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{})
+			if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Ports = append(m.Ports, EndpointPort{})
+			if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Endpoints) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Endpoints: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Subsets = append(m.Subsets, EndpointSubset{})
+			if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *EndpointsList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, Endpoints{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *EnvFromSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Prefix = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ConfigMapRef == nil {
+				m.ConfigMapRef = &ConfigMapEnvSource{}
+			}
+			if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SecretRef == nil {
+				m.SecretRef = &SecretEnvSource{}
+			}
+			if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *EnvVar) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: EnvVar: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Value = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ValueFrom == nil {
+				m.ValueFrom = &EnvVarSource{}
+			}
+			if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *EnvVarSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.FieldRef == nil {
+				m.FieldRef = &ObjectFieldSelector{}
+			}
+			if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ResourceFieldRef == nil {
+				m.ResourceFieldRef = &ResourceFieldSelector{}
+			}
+			if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ConfigMapKeyRef == nil {
+				m.ConfigMapKeyRef = &ConfigMapKeySelector{}
+			}
+			if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SecretKeyRef == nil {
+				m.SecretKeyRef = &SecretKeySelector{}
+			}
+			if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *EphemeralContainer) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: EphemeralContainer: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: EphemeralContainer: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainerCommon", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.EphemeralContainerCommon.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TargetContainerName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TargetContainerName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: EphemeralContainerCommon: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: EphemeralContainerCommon: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Image = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Command = append(m.Command, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Args = append(m.Args, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.WorkingDir = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Ports = append(m.Ports, ContainerPort{})
+			if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Env = append(m.Env, EnvVar{})
+			if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumeMounts = append(m.VolumeMounts, VolumeMount{})
+			if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.LivenessProbe == nil {
+				m.LivenessProbe = &Probe{}
+			}
+			if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ReadinessProbe == nil {
+				m.ReadinessProbe = &Probe{}
+			}
+			if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 12:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Lifecycle == nil {
+				m.Lifecycle = &Lifecycle{}
+			}
+			if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 13:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TerminationMessagePath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 14:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 15:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SecurityContext == nil {
+				m.SecurityContext = &SecurityContext{}
+			}
+			if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 16:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Stdin = bool(v != 0)
+		case 17:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.StdinOnce = bool(v != 0)
+		case 18:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TTY = bool(v != 0)
+		case 19:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.EnvFrom = append(m.EnvFrom, EnvFromSource{})
+			if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 20:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 21:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{})
+			if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 22:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.StartupProbe == nil {
+				m.StartupProbe = &Probe{}
+			}
+			if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 23:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResizePolicy", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ResizePolicy = append(m.ResizePolicy, ContainerResizePolicy{})
+			if err := m.ResizePolicy[len(m.ResizePolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 24:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := ContainerRestartPolicy(dAtA[iNdEx:postIndex])
+			m.RestartPolicy = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *EphemeralVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: EphemeralVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: EphemeralVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplate", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.VolumeClaimTemplate == nil {
+				m.VolumeClaimTemplate = &PersistentVolumeClaimTemplate{}
+			}
+			if err := m.VolumeClaimTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Event) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Event: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field InvolvedObject", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.InvolvedObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Reason = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Message = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FirstTimestamp", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.FirstTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LastTimestamp", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.LastTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
+			}
+			m.Count = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Count |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Type = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EventTime", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.EventTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Series == nil {
+				m.Series = &EventSeries{}
+			}
+			if err := m.Series.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 12:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Action = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 13:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Related", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Related == nil {
+				m.Related = &ObjectReference{}
+			}
+			if err := m.Related.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 14:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReportingController", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ReportingController = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 15:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReportingInstance", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ReportingInstance = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *EventList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: EventList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, Event{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *EventSeries) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: EventSeries: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: EventSeries: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
+			}
+			m.Count = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Count |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LastObservedTime", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.LastObservedTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *EventSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: EventSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: EventSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Component", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Component = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Host = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ExecAction) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ExecAction: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ExecAction: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Command = append(m.Command, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *FCVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: FCVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: FCVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TargetWWNs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TargetWWNs = append(m.TargetWWNs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType)
+			}
+			var v int32
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Lun = &v
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FSType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field WWIDs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.WWIDs = append(m.WWIDs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: FlexPersistentVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: FlexPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Driver = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FSType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SecretRef == nil {
+				m.SecretRef = &SecretReference{}
+			}
+			if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Options == nil {
+				m.Options = make(map[string]string)
+			}
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Options[mapkey] = mapvalue
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: FlexVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: FlexVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Driver = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FSType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SecretRef == nil {
+				m.SecretRef = &LocalObjectReference{}
+			}
+			if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Options == nil {
+				m.Options = make(map[string]string)
+			}
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Options[mapkey] = mapvalue
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: FlockerVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: FlockerVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DatasetName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DatasetName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DatasetUUID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DatasetUUID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PDName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PDName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FSType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType)
+			}
+			m.Partition = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Partition |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *GRPCAction) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GRPCAction: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GRPCAction: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+			}
+			m.Port = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Port |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.Service = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GitRepoVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GitRepoVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Repository = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Revision = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Directory", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Directory = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GlusterfsPersistentVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GlusterfsPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EndpointsName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.EndpointsName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Path = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EndpointsNamespace", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.EndpointsNamespace = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GlusterfsVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GlusterfsVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EndpointsName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.EndpointsName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Path = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HTTPGetAction) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HTTPGetAction: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HTTPGetAction: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Path = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Host = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Scheme", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Scheme = URIScheme(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HTTPHeaders", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.HTTPHeaders = append(m.HTTPHeaders, HTTPHeader{})
+			if err := m.HTTPHeaders[len(m.HTTPHeaders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HTTPHeader) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HTTPHeader: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HTTPHeader: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Value = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HostAlias) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HostAlias: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HostAlias: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.IP = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Hostnames", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Hostnames = append(m.Hostnames, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HostIP) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HostIP: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HostIP: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.IP = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HostPathVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HostPathVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Path = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := HostPathType(dAtA[iNdEx:postIndex])
+			m.Type = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ISCSIPersistentVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ISCSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TargetPortal = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.IQN = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType)
+			}
+			m.Lun = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Lun |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ISCSIInterface = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FSType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DiscoveryCHAPAuth", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.DiscoveryCHAPAuth = bool(v != 0)
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SecretRef == nil {
+				m.SecretRef = &SecretReference{}
+			}
+			if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 11:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SessionCHAPAuth", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.SessionCHAPAuth = bool(v != 0)
+		case 12:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field InitiatorName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.InitiatorName = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ISCSIVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ISCSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TargetPortal = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.IQN = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType)
+			}
+			m.Lun = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Lun |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ISCSIInterface = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FSType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DiscoveryCHAPAuth", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.DiscoveryCHAPAuth = bool(v != 0)
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SecretRef == nil {
+				m.SecretRef = &LocalObjectReference{}
+			}
+			if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 11:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SessionCHAPAuth", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.SessionCHAPAuth = bool(v != 0)
+		case 12:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field InitiatorName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.InitiatorName = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ImageVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ImageVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ImageVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Reference = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PullPolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PullPolicy = PullPolicy(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *KeyToPath) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: KeyToPath: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: KeyToPath: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Path = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType)
+			}
+			var v int32
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Mode = &v
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Lifecycle) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Lifecycle: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Lifecycle: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PostStart", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.PostStart == nil {
+				m.PostStart = &LifecycleHandler{}
+			}
+			if err := m.PostStart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PreStop", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.PreStop == nil {
+				m.PreStop = &LifecycleHandler{}
+			}
+			if err := m.PreStop.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LifecycleHandler) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LifecycleHandler: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LifecycleHandler: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Exec == nil {
+				m.Exec = &ExecAction{}
+			}
+			if err := m.Exec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HTTPGet", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.HTTPGet == nil {
+				m.HTTPGet = &HTTPGetAction{}
+			}
+			if err := m.HTTPGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TCPSocket", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.TCPSocket == nil {
+				m.TCPSocket = &TCPSocketAction{}
+			}
+			if err := m.TCPSocket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Sleep", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Sleep == nil {
+				m.Sleep = &SleepAction{}
+			}
+			if err := m.Sleep.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LimitRange) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LimitRange: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LimitRange: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LimitRangeItem) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LimitRangeItem: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LimitRangeItem: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Type = LimitType(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Max == nil {
+				m.Max = make(ResourceList)
+			}
+			var mapkey ResourceName
+			mapvalue := &resource.Quantity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &resource.Quantity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Max[ResourceName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Min == nil {
+				m.Min = make(ResourceList)
+			}
+			var mapkey ResourceName
+			mapvalue := &resource.Quantity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &resource.Quantity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Min[ResourceName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Default == nil {
+				m.Default = make(ResourceList)
+			}
+			var mapkey ResourceName
+			mapvalue := &resource.Quantity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &resource.Quantity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Default[ResourceName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DefaultRequest", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.DefaultRequest == nil {
+				m.DefaultRequest = make(ResourceList)
+			}
+			var mapkey ResourceName
+			mapvalue := &resource.Quantity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &resource.Quantity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.DefaultRequest[ResourceName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MaxLimitRequestRatio", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.MaxLimitRequestRatio == nil {
+				m.MaxLimitRequestRatio = make(ResourceList)
+			}
+			var mapkey ResourceName
+			mapvalue := &resource.Quantity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &resource.Quantity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LimitRangeList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LimitRangeList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LimitRangeList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, LimitRange{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LimitRangeSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LimitRangeSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Limits = append(m.Limits, LimitRangeItem{})
+			if err := m.Limits[len(m.Limits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LinuxContainerUser) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LinuxContainerUser: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LinuxContainerUser: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+			}
+			m.UID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.UID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field GID", wireType)
+			}
+			m.GID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.GID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType == 0 {
+				var v int64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= int64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.SupplementalGroups = append(m.SupplementalGroups, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthGenerated
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthGenerated
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				var count int
+				for _, integer := range dAtA[iNdEx:postIndex] {
+					if integer < 128 {
+						count++
+					}
+				}
+				elementCount = count
+				if elementCount != 0 && len(m.SupplementalGroups) == 0 {
+					m.SupplementalGroups = make([]int64, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v int64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= int64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.SupplementalGroups = append(m.SupplementalGroups, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType)
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *List) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: List: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: List: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, runtime.RawExtension{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LoadBalancerIngress: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.IP = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Hostname = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IPMode", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := LoadBalancerIPMode(dAtA[iNdEx:postIndex])
+			m.IPMode = &s
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Ports = append(m.Ports, PortStatus{})
+			if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LoadBalancerStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Ingress = append(m.Ingress, LoadBalancerIngress{})
+			if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LocalObjectReference) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LocalObjectReference: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LocalObjectReference: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LocalVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LocalVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Path = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.FSType = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ModifyVolumeStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ModifyVolumeStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ModifyVolumeStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TargetVolumeAttributesClassName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TargetVolumeAttributesClassName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Status = PersistentVolumeClaimModifyVolumeStatus(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Server = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Path = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Namespace) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Namespace: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NamespaceCondition) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NamespaceCondition: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NamespaceCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Type = NamespaceConditionType(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Status = ConditionStatus(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Reason = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Message = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NamespaceList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NamespaceList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NamespaceList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, Namespace{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NamespaceSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NamespaceSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NamespaceSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Finalizers = append(m.Finalizers, FinalizerName(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NamespaceStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NamespaceStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NamespaceStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Phase = NamespacePhase(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Conditions = append(m.Conditions, NamespaceCondition{})
+			if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Node) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Node: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NodeAddress) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NodeAddress: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NodeAddress: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Type = NodeAddressType(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Address = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NodeAffinity) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NodeAffinity: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NodeAffinity: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.RequiredDuringSchedulingIgnoredDuringExecution == nil {
+				m.RequiredDuringSchedulingIgnoredDuringExecution = &NodeSelector{}
+			}
+			if err := m.RequiredDuringSchedulingIgnoredDuringExecution.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, PreferredSchedulingTerm{})
+			if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NodeCondition) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NodeCondition: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NodeCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Type = NodeConditionType(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Status = ConditionStatus(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LastHeartbeatTime", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.LastHeartbeatTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Reason = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Message = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NodeConfigSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NodeConfigSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NodeConfigSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ConfigMap == nil {
+				m.ConfigMap = &ConfigMapNodeConfigSource{}
+			}
+			if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NodeConfigStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NodeConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Assigned", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Assigned == nil {
+				m.Assigned = &NodeConfigSource{}
+			}
+			if err := m.Assigned.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Active == nil {
+				m.Active = &NodeConfigSource{}
+			}
+			if err := m.Active.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LastKnownGood", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.LastKnownGood == nil {
+				m.LastKnownGood = &NodeConfigSource{}
+			}
+			if err := m.LastKnownGood.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Error = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NodeDaemonEndpoints: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NodeDaemonEndpoints: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field KubeletEndpoint", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.KubeletEndpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NodeFeatures) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NodeFeatures: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NodeFeatures: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroupsPolicy", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.SupplementalGroupsPolicy = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NodeList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NodeList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NodeList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, Node{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NodeProxyOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NodeProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Path = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NodeRuntimeHandler) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NodeRuntimeHandler: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NodeRuntimeHandler: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Features", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Features == nil {
+				m.Features = &NodeRuntimeHandlerFeatures{}
+			}
+			if err := m.Features.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NodeRuntimeHandlerFeatures) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NodeRuntimeHandlerFeatures: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NodeRuntimeHandlerFeatures: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RecursiveReadOnlyMounts", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.RecursiveReadOnlyMounts = &b
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UserNamespaces", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.UserNamespaces = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NodeSelector) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NodeSelector: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NodeSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeSelectorTerms", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.NodeSelectorTerms = append(m.NodeSelectorTerms, NodeSelectorTerm{})
+			if err := m.NodeSelectorTerms[len(m.NodeSelectorTerms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NodeSelectorRequirement: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NodeSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Operator = NodeSelectorOperator(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Values = append(m.Values, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NodeSelectorTerm: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NodeSelectorTerm: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.MatchExpressions = append(m.MatchExpressions, NodeSelectorRequirement{})
+			if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MatchFields", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.MatchFields = append(m.MatchFields, NodeSelectorRequirement{})
+			if err := m.MatchFields[len(m.MatchFields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NodeSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NodeSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NodeSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PodCIDR", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PodCIDR = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DoNotUseExternalID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DoNotUseExternalID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ProviderID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ProviderID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Unschedulable", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Unschedulable = bool(v != 0)
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Taints = append(m.Taints, Taint{})
+			if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ConfigSource", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ConfigSource == nil {
+				m.ConfigSource = &NodeConfigSource{}
+			}
+			if err := m.ConfigSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PodCIDRs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PodCIDRs = append(m.PodCIDRs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NodeStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Capacity == nil {
+				m.Capacity = make(ResourceList)
+			}
+			var mapkey ResourceName
+			mapvalue := &resource.Quantity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &resource.Quantity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Capacity[ResourceName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Allocatable == nil {
+				m.Allocatable = make(ResourceList)
+			}
+			var mapkey ResourceName
+			mapvalue := &resource.Quantity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &resource.Quantity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Allocatable[ResourceName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Phase = NodePhase(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Conditions = append(m.Conditions, NodeCondition{})
+			if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Addresses = append(m.Addresses, NodeAddress{})
+			if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DaemonEndpoints", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.DaemonEndpoints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeInfo", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.NodeInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Images = append(m.Images, ContainerImage{})
+			if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumesInUse", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumesInUse = append(m.VolumesInUse, UniqueVolumeName(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumesAttached", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumesAttached = append(m.VolumesAttached, AttachedVolume{})
+			if err := m.VolumesAttached[len(m.VolumesAttached)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Config == nil {
+				m.Config = &NodeConfigStatus{}
+			}
+			if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 12:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RuntimeHandlers", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RuntimeHandlers = append(m.RuntimeHandlers, NodeRuntimeHandler{})
+			if err := m.RuntimeHandlers[len(m.RuntimeHandlers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 13:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Features", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Features == nil {
+				m.Features = &NodeFeatures{}
+			}
+			if err := m.Features.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NodeSystemInfo: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NodeSystemInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MachineID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.MachineID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SystemUUID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SystemUUID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field BootID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.BootID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field KernelVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.KernelVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field OSImage", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.OSImage = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerRuntimeVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerRuntimeVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field KubeletVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.KubeletVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field KubeProxyVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.KubeProxyVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field OperatingSystem", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.OperatingSystem = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Architecture = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ObjectFieldSelector: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ObjectFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.APIVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FieldPath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ObjectReference) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ObjectReference: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ObjectReference: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Kind = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Namespace = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.APIVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ResourceVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FieldPath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PersistentVolume) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PersistentVolume: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PersistentVolume: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PersistentVolumeClaim: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PersistentVolumeClaim: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PersistentVolumeClaimCondition: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PersistentVolumeClaimCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Type = PersistentVolumeClaimConditionType(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Status = ConditionStatus(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Reason = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Message = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PersistentVolumeClaimList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PersistentVolumeClaimList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, PersistentVolumeClaim{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PersistentVolumeClaimSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PersistentVolumeClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumeName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Selector == nil {
+				m.Selector = &v1.LabelSelector{}
+			}
+			if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StorageClassName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.StorageClassName = &s
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeMode", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := PersistentVolumeMode(dAtA[iNdEx:postIndex])
+			m.VolumeMode = &s
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DataSource", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.DataSource == nil {
+				m.DataSource = &TypedLocalObjectReference{}
+			}
+			if err := m.DataSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DataSourceRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.DataSourceRef == nil {
+				m.DataSourceRef = &TypedObjectReference{}
+			}
+			if err := m.DataSourceRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributesClassName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.VolumeAttributesClassName = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PersistentVolumeClaimStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PersistentVolumeClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Phase = PersistentVolumeClaimPhase(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Capacity == nil {
+				m.Capacity = make(ResourceList)
+			}
+			var mapkey ResourceName
+			mapvalue := &resource.Quantity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &resource.Quantity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Capacity[ResourceName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Conditions = append(m.Conditions, PersistentVolumeClaimCondition{})
+			if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResources", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AllocatedResources == nil {
+				m.AllocatedResources = make(ResourceList)
+			}
+			var mapkey ResourceName
+			mapvalue := &resource.Quantity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &resource.Quantity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.AllocatedResources[ResourceName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AllocatedResourceStatuses", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AllocatedResourceStatuses == nil {
+				m.AllocatedResourceStatuses = make(map[ResourceName]ClaimResourceStatus)
+			}
+			var mapkey ResourceName
+			var mapvalue ClaimResourceStatus
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = ClaimResourceStatus(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.AllocatedResourceStatuses[ResourceName(mapkey)] = ((ClaimResourceStatus)(mapvalue))
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CurrentVolumeAttributesClassName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.CurrentVolumeAttributesClassName = &s
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ModifyVolumeStatus", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ModifyVolumeStatus == nil {
+				m.ModifyVolumeStatus = &ModifyVolumeStatus{}
+			}
+			if err := m.ModifyVolumeStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PersistentVolumeClaimTemplate) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PersistentVolumeClaimTemplate: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PersistentVolumeClaimTemplate: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PersistentVolumeClaimVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PersistentVolumeClaimVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ClaimName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ClaimName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PersistentVolumeList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PersistentVolumeList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, PersistentVolume{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PersistentVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field GCEPersistentDisk", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.GCEPersistentDisk == nil {
+				m.GCEPersistentDisk = &GCEPersistentDiskVolumeSource{}
+			}
+			if err := m.GCEPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AWSElasticBlockStore", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AWSElasticBlockStore == nil {
+				m.AWSElasticBlockStore = &AWSElasticBlockStoreVolumeSource{}
+			}
+			if err := m.AWSElasticBlockStore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.HostPath == nil {
+				m.HostPath = &HostPathVolumeSource{}
+			}
+			if err := m.HostPath.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Glusterfs", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Glusterfs == nil {
+				m.Glusterfs = &GlusterfsPersistentVolumeSource{}
+			}
+			if err := m.Glusterfs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NFS", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.NFS == nil {
+				m.NFS = &NFSVolumeSource{}
+			}
+			if err := m.NFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RBD", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.RBD == nil {
+				m.RBD = &RBDPersistentVolumeSource{}
+			}
+			if err := m.RBD.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ISCSI", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ISCSI == nil {
+				m.ISCSI = &ISCSIPersistentVolumeSource{}
+			}
+			if err := m.ISCSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Cinder", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Cinder == nil {
+				m.Cinder = &CinderPersistentVolumeSource{}
+			}
+			if err := m.Cinder.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CephFS", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.CephFS == nil {
+				m.CephFS = &CephFSPersistentVolumeSource{}
+			}
+			if err := m.CephFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FC", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.FC == nil {
+				m.FC = &FCVolumeSource{}
+			}
+			if err := m.FC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Flocker", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Flocker == nil {
+				m.Flocker = &FlockerVolumeSource{}
+			}
+			if err := m.Flocker.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 12:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FlexVolume", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.FlexVolume == nil {
+				m.FlexVolume = &FlexPersistentVolumeSource{}
+			}
+			if err := m.FlexVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 13:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AzureFile", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AzureFile == nil {
+				m.AzureFile = &AzureFilePersistentVolumeSource{}
+			}
+			if err := m.AzureFile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 14:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VsphereVolume", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.VsphereVolume == nil {
+				m.VsphereVolume = &VsphereVirtualDiskVolumeSource{}
+			}
+			if err := m.VsphereVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 15:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Quobyte", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Quobyte == nil {
+				m.Quobyte = &QuobyteVolumeSource{}
+			}
+			if err := m.Quobyte.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 16:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AzureDisk == nil {
+				m.AzureDisk = &AzureDiskVolumeSource{}
+			}
+			if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 17:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PhotonPersistentDisk", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.PhotonPersistentDisk == nil {
+				m.PhotonPersistentDisk = &PhotonPersistentDiskVolumeSource{}
+			}
+			if err := m.PhotonPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 18:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PortworxVolume", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.PortworxVolume == nil {
+				m.PortworxVolume = &PortworxVolumeSource{}
+			}
+			if err := m.PortworxVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 19:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ScaleIO", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ScaleIO == nil {
+				m.ScaleIO = &ScaleIOPersistentVolumeSource{}
+			}
+			if err := m.ScaleIO.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 20:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Local", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Local == nil {
+				m.Local = &LocalVolumeSource{}
+			}
+			if err := m.Local.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 21:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StorageOS", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.StorageOS == nil {
+				m.StorageOS = &StorageOSPersistentVolumeSource{}
+			}
+			if err := m.StorageOS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 22:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CSI", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.CSI == nil {
+				m.CSI = &CSIPersistentVolumeSource{}
+			}
+			if err := m.CSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PersistentVolumeSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PersistentVolumeSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Capacity == nil {
+				m.Capacity = make(ResourceList)
+			}
+			var mapkey ResourceName
+			mapvalue := &resource.Quantity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &resource.Quantity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Capacity[ResourceName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeSource", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.PersistentVolumeSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ClaimRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ClaimRef == nil {
+				m.ClaimRef = &ObjectReference{}
+			}
+			if err := m.ClaimRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeReclaimPolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StorageClassName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.StorageClassName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeMode", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := PersistentVolumeMode(dAtA[iNdEx:postIndex])
+			m.VolumeMode = &s
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.NodeAffinity == nil {
+				m.NodeAffinity = &VolumeNodeAffinity{}
+			}
+			if err := m.NodeAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributesClassName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.VolumeAttributesClassName = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PersistentVolumeStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PersistentVolumeStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Phase = PersistentVolumePhase(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Message = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Reason = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LastPhaseTransitionTime", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.LastPhaseTransitionTime == nil {
+				m.LastPhaseTransitionTime = &v1.Time{}
+			}
+			if err := m.LastPhaseTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PhotonPersistentDiskVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PhotonPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PdID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PdID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FSType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Pod) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Pod: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Pod: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodAffinity) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodAffinity: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodAffinity: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{})
+			if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{})
+			if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodAffinityTerm: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.LabelSelector == nil {
+				m.LabelSelector = &v1.LabelSelector{}
+			}
+			if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TopologyKey", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TopologyKey = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.NamespaceSelector == nil {
+				m.NamespaceSelector = &v1.LabelSelector{}
+			}
+			if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MatchLabelKeys", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.MatchLabelKeys = append(m.MatchLabelKeys, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MismatchLabelKeys", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.MismatchLabelKeys = append(m.MismatchLabelKeys, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{})
+			if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{})
+			if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodAttachOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodAttachOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodAttachOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Stdin = bool(v != 0)
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Stdout = bool(v != 0)
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Stderr = bool(v != 0)
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TTY = bool(v != 0)
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Container = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodCondition) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodCondition: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Type = PodConditionType(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Status = ConditionStatus(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Reason = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Message = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodDNSConfig) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodDNSConfig: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodDNSConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Nameservers", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Nameservers = append(m.Nameservers, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Searches", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Searches = append(m.Searches, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Options = append(m.Options, PodDNSConfigOption{})
+			if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodDNSConfigOption: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodDNSConfigOption: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.Value = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodExecOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodExecOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodExecOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Stdin = bool(v != 0)
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Stdout = bool(v != 0)
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Stderr = bool(v != 0)
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TTY = bool(v != 0)
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Container = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Command = append(m.Command, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodIP) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodIP: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodIP: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.IP = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, Pod{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodLogOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodLogOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodLogOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Container = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Follow = bool(v != 0)
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Previous", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Previous = bool(v != 0)
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SinceSeconds", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.SinceSeconds = &v
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SinceTime", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SinceTime == nil {
+				m.SinceTime = &v1.Time{}
+			}
+			if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Timestamps = bool(v != 0)
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TailLines", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TailLines = &v
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LimitBytes", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.LimitBytes = &v
+		case 9:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field InsecureSkipTLSVerifyBackend", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.InsecureSkipTLSVerifyBackend = bool(v != 0)
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.Stream = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodOS) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodOS: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodOS: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = OSName(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodPortForwardOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodPortForwardOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType == 0 {
+				var v int32
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= int32(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.Ports = append(m.Ports, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthGenerated
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthGenerated
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				var count int
+				for _, integer := range dAtA[iNdEx:postIndex] {
+					if integer < 128 {
+						count++
+					}
+				}
+				elementCount = count
+				if elementCount != 0 && len(m.Ports) == 0 {
+					m.Ports = make([]int32, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v int32
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= int32(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.Ports = append(m.Ports, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodProxyOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodProxyOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Path = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodReadinessGate) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodReadinessGate: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodReadinessGate: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ConditionType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ConditionType = PodConditionType(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodResourceClaim) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodResourceClaim: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.ResourceClaimName = &s
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimTemplateName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.ResourceClaimTemplateName = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodResourceClaimStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodResourceClaimStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodResourceClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.ResourceClaimName = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodSchedulingGate) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodSchedulingGate: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodSchedulingGate: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodSecurityContext) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodSecurityContext: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodSecurityContext: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SELinuxOptions == nil {
+				m.SELinuxOptions = &SELinuxOptions{}
+			}
+			if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.RunAsUser = &v
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.RunAsNonRoot = &b
+		case 4:
+			if wireType == 0 {
+				var v int64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= int64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.SupplementalGroups = append(m.SupplementalGroups, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthGenerated
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthGenerated
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				var count int
+				for _, integer := range dAtA[iNdEx:postIndex] {
+					if integer < 128 {
+						count++
+					}
+				}
+				elementCount = count
+				if elementCount != 0 && len(m.SupplementalGroups) == 0 {
+					m.SupplementalGroups = make([]int64, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v int64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= int64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.SupplementalGroups = append(m.SupplementalGroups, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType)
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.FSGroup = &v
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.RunAsGroup = &v
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Sysctls", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Sysctls = append(m.Sysctls, Sysctl{})
+			if err := m.Sysctls[len(m.Sysctls)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field WindowsOptions", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.WindowsOptions == nil {
+				m.WindowsOptions = &WindowsSecurityContextOptions{}
+			}
+			if err := m.WindowsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSGroupChangePolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := PodFSGroupChangePolicy(dAtA[iNdEx:postIndex])
+			m.FSGroupChangePolicy = &s
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SeccompProfile", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SeccompProfile == nil {
+				m.SeccompProfile = &SeccompProfile{}
+			}
+			if err := m.SeccompProfile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AppArmorProfile", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AppArmorProfile == nil {
+				m.AppArmorProfile = &AppArmorProfile{}
+			}
+			if err := m.AppArmorProfile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 12:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroupsPolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := SupplementalGroupsPolicy(dAtA[iNdEx:postIndex])
+			m.SupplementalGroupsPolicy = &s
+			iNdEx = postIndex
+		case 13:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SELinuxChangePolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := PodSELinuxChangePolicy(dAtA[iNdEx:postIndex])
+			m.SELinuxChangePolicy = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodSignature) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodSignature: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodSignature: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PodController", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.PodController == nil {
+				m.PodController = &v1.OwnerReference{}
+			}
+			if err := m.PodController.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Volumes = append(m.Volumes, Volume{})
+			if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Containers = append(m.Containers, Container{})
+			if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RestartPolicy = RestartPolicy(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TerminationGracePeriodSeconds", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TerminationGracePeriodSeconds = &v
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ActiveDeadlineSeconds = &v
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DNSPolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DNSPolicy = DNSPolicy(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.NodeSelector == nil {
+				m.NodeSelector = make(map[string]string)
+			}
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.NodeSelector[mapkey] = mapvalue
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ServiceAccountName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedServiceAccount", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DeprecatedServiceAccount = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.NodeName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 11:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.HostNetwork = bool(v != 0)
+		case 12:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.HostPID = bool(v != 0)
+		case 13:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.HostIPC = bool(v != 0)
+		case 14:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SecurityContext == nil {
+				m.SecurityContext = &PodSecurityContext{}
+			}
+			if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 15:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{})
+			if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 16:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Hostname = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 17:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Subdomain", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Subdomain = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 18:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Affinity == nil {
+				m.Affinity = &Affinity{}
+			}
+			if err := m.Affinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 19:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SchedulerName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SchedulerName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 20:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field InitContainers", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.InitContainers = append(m.InitContainers, Container{})
+			if err := m.InitContainers[len(m.InitContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 21:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.AutomountServiceAccountToken = &b
+		case 22:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Tolerations = append(m.Tolerations, Toleration{})
+			if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 23:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HostAliases", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.HostAliases = append(m.HostAliases, HostAlias{})
+			if err := m.HostAliases[len(m.HostAliases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 24:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PriorityClassName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PriorityClassName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 25:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType)
+			}
+			var v int32
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Priority = &v
+		case 26:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DNSConfig", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.DNSConfig == nil {
+				m.DNSConfig = &PodDNSConfig{}
+			}
+			if err := m.DNSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 27:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ShareProcessNamespace", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.ShareProcessNamespace = &b
+		case 28:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadinessGates", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ReadinessGates = append(m.ReadinessGates, PodReadinessGate{})
+			if err := m.ReadinessGates[len(m.ReadinessGates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 29:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClassName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.RuntimeClassName = &s
+			iNdEx = postIndex
+		case 30:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EnableServiceLinks", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.EnableServiceLinks = &b
+		case 31:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := PreemptionPolicy(dAtA[iNdEx:postIndex])
+			m.PreemptionPolicy = &s
+			iNdEx = postIndex
+		case 32:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Overhead == nil {
+				m.Overhead = make(ResourceList)
+			}
+			var mapkey ResourceName
+			mapvalue := &resource.Quantity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &resource.Quantity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Overhead[ResourceName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		case 33:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TopologySpreadConstraints", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TopologySpreadConstraints = append(m.TopologySpreadConstraints, TopologySpreadConstraint{})
+			if err := m.TopologySpreadConstraints[len(m.TopologySpreadConstraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 34:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainers", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.EphemeralContainers = append(m.EphemeralContainers, EphemeralContainer{})
+			if err := m.EphemeralContainers[len(m.EphemeralContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 35:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SetHostnameAsFQDN", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.SetHostnameAsFQDN = &b
+		case 36:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.OS == nil {
+				m.OS = &PodOS{}
+			}
+			if err := m.OS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 37:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HostUsers", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.HostUsers = &b
+		case 38:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SchedulingGates", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SchedulingGates = append(m.SchedulingGates, PodSchedulingGate{})
+			if err := m.SchedulingGates[len(m.SchedulingGates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 39:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaims", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ResourceClaims = append(m.ResourceClaims, PodResourceClaim{})
+			if err := m.ResourceClaims[len(m.ResourceClaims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 40:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Resources == nil {
+				m.Resources = &ResourceRequirements{}
+			}
+			if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Phase = PodPhase(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Conditions = append(m.Conditions, PodCondition{})
+			if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Message = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Reason = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.HostIP = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PodIP", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PodIP = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.StartTime == nil {
+				m.StartTime = &v1.Time{}
+			}
+			if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerStatuses", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerStatuses = append(m.ContainerStatuses, ContainerStatus{})
+			if err := m.ContainerStatuses[len(m.ContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field QOSClass", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.QOSClass = PodQOSClass(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field InitContainerStatuses", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.InitContainerStatuses = append(m.InitContainerStatuses, ContainerStatus{})
+			if err := m.InitContainerStatuses[len(m.InitContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NominatedNodeName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.NominatedNodeName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 12:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PodIPs", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PodIPs = append(m.PodIPs, PodIP{})
+			if err := m.PodIPs[len(m.PodIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 13:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainerStatuses", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.EphemeralContainerStatuses = append(m.EphemeralContainerStatuses, ContainerStatus{})
+			if err := m.EphemeralContainerStatuses[len(m.EphemeralContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 14:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Resize", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Resize = PodResizeStatus(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 15:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaimStatuses", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ResourceClaimStatuses = append(m.ResourceClaimStatuses, PodResourceClaimStatus{})
+			if err := m.ResourceClaimStatuses[len(m.ResourceClaimStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 16:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HostIPs", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.HostIPs = append(m.HostIPs, HostIP{})
+			if err := m.HostIPs[len(m.HostIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodStatusResult) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodStatusResult: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodStatusResult: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodTemplate) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodTemplate: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodTemplate: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodTemplateList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodTemplateList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodTemplateList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, PodTemplate{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PodTemplateSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PodTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PortStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PortStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PortStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+			}
+			m.Port = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Port |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Protocol = Protocol(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.Error = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PortworxVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PortworxVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumeID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FSType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Preconditions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Preconditions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Preconditions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+			m.UID = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PreferAvoidPodsEntry: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PreferAvoidPodsEntry: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PodSignature", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.PodSignature.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EvictionTime", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.EvictionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Reason = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Message = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PreferredSchedulingTerm: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PreferredSchedulingTerm: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType)
+			}
+			m.Weight = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Weight |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Preference", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Preference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Probe) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Probe: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Probe: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ProbeHandler", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ProbeHandler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field InitialDelaySeconds", wireType)
+			}
+			m.InitialDelaySeconds = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.InitialDelaySeconds |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType)
+			}
+			m.TimeoutSeconds = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TimeoutSeconds |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PeriodSeconds", wireType)
+			}
+			m.PeriodSeconds = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.PeriodSeconds |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SuccessThreshold", wireType)
+			}
+			m.SuccessThreshold = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.SuccessThreshold |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FailureThreshold", wireType)
+			}
+			m.FailureThreshold = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.FailureThreshold |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TerminationGracePeriodSeconds", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TerminationGracePeriodSeconds = &v
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ProbeHandler) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ProbeHandler: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ProbeHandler: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Exec == nil {
+				m.Exec = &ExecAction{}
+			}
+			if err := m.Exec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HTTPGet", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.HTTPGet == nil {
+				m.HTTPGet = &HTTPGetAction{}
+			}
+			if err := m.HTTPGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TCPSocket", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.TCPSocket == nil {
+				m.TCPSocket = &TCPSocketAction{}
+			}
+			if err := m.TCPSocket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field GRPC", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.GRPC == nil {
+				m.GRPC = &GRPCAction{}
+			}
+			if err := m.GRPC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ProjectedVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ProjectedVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Sources = append(m.Sources, VolumeProjection{})
+			if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType)
+			}
+			var v int32
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.DefaultMode = &v
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: QuobyteVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: QuobyteVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Registry", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Registry = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Volume = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.User = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Group = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Tenant", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Tenant = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RBDPersistentVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RBDPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CephMonitors", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.CephMonitors = append(m.CephMonitors, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RBDImage", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RBDImage = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FSType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RBDPool", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RBDPool = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RadosUser", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RadosUser = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Keyring", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Keyring = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SecretRef == nil {
+				m.SecretRef = &SecretReference{}
+			}
+			if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RBDVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RBDVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CephMonitors", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.CephMonitors = append(m.CephMonitors, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RBDImage", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RBDImage = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FSType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RBDPool", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RBDPool = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RadosUser", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RadosUser = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Keyring", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Keyring = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SecretRef == nil {
+				m.SecretRef = &LocalObjectReference{}
+			}
+			if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *RangeAllocation) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RangeAllocation: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RangeAllocation: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Range = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+			if m.Data == nil {
+				m.Data = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ReplicationController) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ReplicationController: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ReplicationController: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ReplicationControllerCondition: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ReplicationControllerCondition: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Type = ReplicationControllerConditionType(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Status = ConditionStatus(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Reason = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Message = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ReplicationControllerList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ReplicationControllerList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, ReplicationController{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ReplicationControllerSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ReplicationControllerSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+			}
+			var v int32
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Replicas = &v
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Selector == nil {
+				m.Selector = make(map[string]string)
+			}
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Selector[mapkey] = mapvalue
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Template == nil {
+				m.Template = &PodTemplateSpec{}
+			}
+			if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType)
+			}
+			m.MinReadySeconds = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MinReadySeconds |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ReplicationControllerStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ReplicationControllerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType)
+			}
+			m.Replicas = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Replicas |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType)
+			}
+			m.FullyLabeledReplicas = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.FullyLabeledReplicas |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType)
+			}
+			m.ObservedGeneration = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ObservedGeneration |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType)
+			}
+			m.ReadyReplicas = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ReadyReplicas |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType)
+			}
+			m.AvailableReplicas = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.AvailableReplicas |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Conditions = append(m.Conditions, ReplicationControllerCondition{})
+			if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceClaim) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceClaim: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceClaim: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Request = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceFieldSelector: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Resource = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Divisor", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Divisor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceHealth) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceHealth: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceHealth: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ResourceID = ResourceID(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Health", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Health = ResourceHealthStatus(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceQuota) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceQuota: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceQuota: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceQuotaList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceQuotaList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, ResourceQuota{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceQuotaSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceQuotaSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Hard == nil {
+				m.Hard = make(ResourceList)
+			}
+			var mapkey ResourceName
+			mapvalue := &resource.Quantity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &resource.Quantity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Hard[ResourceName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Scopes = append(m.Scopes, ResourceQuotaScope(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ScopeSelector", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ScopeSelector == nil {
+				m.ScopeSelector = &ScopeSelector{}
+			}
+			if err := m.ScopeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceQuotaStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceQuotaStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Hard == nil {
+				m.Hard = make(ResourceList)
+			}
+			var mapkey ResourceName
+			mapvalue := &resource.Quantity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &resource.Quantity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Hard[ResourceName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Used", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Used == nil {
+				m.Used = make(ResourceList)
+			}
+			var mapkey ResourceName
+			mapvalue := &resource.Quantity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &resource.Quantity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Used[ResourceName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceRequirements) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceRequirements: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Limits == nil {
+				m.Limits = make(ResourceList)
+			}
+			var mapkey ResourceName
+			mapvalue := &resource.Quantity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &resource.Quantity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Limits[ResourceName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Requests == nil {
+				m.Requests = make(ResourceList)
+			}
+			var mapkey ResourceName
+			mapvalue := &resource.Quantity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &resource.Quantity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Requests[ResourceName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Claims", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Claims = append(m.Claims, ResourceClaim{})
+			if err := m.Claims[len(m.Claims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = ResourceName(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Resources = append(m.Resources, ResourceHealth{})
+			if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SELinuxOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SELinuxOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SELinuxOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.User = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Role = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Type = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Level = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ScaleIOPersistentVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ScaleIOPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Gateway = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field System", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.System = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SecretRef == nil {
+				m.SecretRef = &SecretReference{}
+			}
+			if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SSLEnabled", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.SSLEnabled = bool(v != 0)
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ProtectionDomain", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ProtectionDomain = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StoragePool", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.StoragePool = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StorageMode", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.StorageMode = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumeName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FSType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 10:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ScaleIOVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ScaleIOVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Gateway = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field System", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.System = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SecretRef == nil {
+				m.SecretRef = &LocalObjectReference{}
+			}
+			if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SSLEnabled", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.SSLEnabled = bool(v != 0)
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ProtectionDomain", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ProtectionDomain = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StoragePool", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.StoragePool = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StorageMode", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.StorageMode = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumeName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FSType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 10:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ScopeSelector) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ScopeSelector: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ScopeSelector: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.MatchExpressions = append(m.MatchExpressions, ScopedResourceSelectorRequirement{})
+			if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ScopedResourceSelectorRequirement: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ScopedResourceSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ScopeName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ScopeName = ResourceQuotaScope(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Operator = ScopeSelectorOperator(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Values = append(m.Values, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SeccompProfile) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SeccompProfile: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SeccompProfile: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Type = SeccompProfileType(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LocalhostProfile", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.LocalhostProfile = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Secret) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Secret: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Data == nil {
+				m.Data = make(map[string][]byte)
+			}
+			var mapkey string
+			mapvalue := []byte{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapbyteLen uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapbyteLen |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intMapbyteLen := int(mapbyteLen)
+					if intMapbyteLen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postbytesIndex := iNdEx + intMapbyteLen
+					if postbytesIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postbytesIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = make([]byte, mapbyteLen)
+					copy(mapvalue, dAtA[iNdEx:postbytesIndex])
+					iNdEx = postbytesIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Data[mapkey] = mapvalue
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Type = SecretType(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StringData", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.StringData == nil {
+				m.StringData = make(map[string]string)
+			}
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.StringData[mapkey] = mapvalue
+			iNdEx = postIndex
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Immutable", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Immutable = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SecretEnvSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SecretEnvSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SecretEnvSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Optional = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SecretKeySelector) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SecretKeySelector: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SecretKeySelector: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Optional = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SecretList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SecretList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SecretList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, Secret{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SecretProjection) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SecretProjection: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SecretProjection: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, KeyToPath{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Optional = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SecretReference) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SecretReference: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SecretReference: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Namespace = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SecretVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SecretVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SecretName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, KeyToPath{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType)
+			}
+			var v int32
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.DefaultMode = &v
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Optional = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SecurityContext) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SecurityContext: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SecurityContext: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Capabilities", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Capabilities == nil {
+				m.Capabilities = &Capabilities{}
+			}
+			if err := m.Capabilities.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Privileged = &b
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SELinuxOptions == nil {
+				m.SELinuxOptions = &SELinuxOptions{}
+			}
+			if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.RunAsUser = &v
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.RunAsNonRoot = &b
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.ReadOnlyRootFilesystem = &b
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.AllowPrivilegeEscalation = &b
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.RunAsGroup = &v
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ProcMount", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := ProcMountType(dAtA[iNdEx:postIndex])
+			m.ProcMount = &s
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field WindowsOptions", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.WindowsOptions == nil {
+				m.WindowsOptions = &WindowsSecurityContextOptions{}
+			}
+			if err := m.WindowsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SeccompProfile", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SeccompProfile == nil {
+				m.SeccompProfile = &SeccompProfile{}
+			}
+			if err := m.SeccompProfile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 12:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AppArmorProfile", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AppArmorProfile == nil {
+				m.AppArmorProfile = &AppArmorProfile{}
+			}
+			if err := m.AppArmorProfile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SerializedReference) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SerializedReference: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SerializedReference: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Reference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Service) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Service: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ServiceAccount) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ServiceAccount: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ServiceAccount: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Secrets = append(m.Secrets, ObjectReference{})
+			if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{})
+			if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.AutomountServiceAccountToken = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ServiceAccountList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ServiceAccountList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ServiceAccountList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, ServiceAccount{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ServiceAccountTokenProjection: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ServiceAccountTokenProjection: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Audience", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Audience = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ExpirationSeconds = &v
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Path = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ServiceList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ServiceList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ServiceList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, Service{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ServicePort) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ServicePort: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ServicePort: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Protocol = Protocol(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+			}
+			m.Port = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Port |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.TargetPort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodePort", wireType)
+			}
+			m.NodePort = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.NodePort |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.AppProtocol = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ServiceProxyOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ServiceProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Path = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ServiceSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ServiceSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Ports = append(m.Ports, ServicePort{})
+			if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Selector == nil {
+				m.Selector = make(map[string]string)
+			}
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Selector[mapkey] = mapvalue
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ClusterIP", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ClusterIP = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Type = ServiceType(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExternalIPs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ExternalIPs = append(m.ExternalIPs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinity", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SessionAffinity = ServiceAffinity(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerIP", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.LoadBalancerIP = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExternalName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ExternalName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExternalTrafficPolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ExternalTrafficPolicy = ServiceExternalTrafficPolicy(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 12:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HealthCheckNodePort", wireType)
+			}
+			m.HealthCheckNodePort = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.HealthCheckNodePort |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 13:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PublishNotReadyAddresses", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.PublishNotReadyAddresses = bool(v != 0)
+		case 14:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinityConfig", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SessionAffinityConfig == nil {
+				m.SessionAffinityConfig = &SessionAffinityConfig{}
+			}
+			if err := m.SessionAffinityConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 17:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IPFamilyPolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := IPFamilyPolicy(dAtA[iNdEx:postIndex])
+			m.IPFamilyPolicy = &s
+			iNdEx = postIndex
+		case 18:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ClusterIPs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ClusterIPs = append(m.ClusterIPs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 19:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IPFamilies", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.IPFamilies = append(m.IPFamilies, IPFamily(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 20:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AllocateLoadBalancerNodePorts", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.AllocateLoadBalancerNodePorts = &b
+		case 21:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerClass", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.LoadBalancerClass = &s
+			iNdEx = postIndex
+		case 22:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field InternalTrafficPolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := ServiceInternalTrafficPolicy(dAtA[iNdEx:postIndex])
+			m.InternalTrafficPolicy = &s
+			iNdEx = postIndex
+		case 23:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TrafficDistribution", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.TrafficDistribution = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ServiceStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ServiceStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Conditions = append(m.Conditions, v1.Condition{})
+			if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SessionAffinityConfig: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SessionAffinityConfig: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ClientIP", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ClientIP == nil {
+				m.ClientIP = &ClientIPConfig{}
+			}
+			if err := m.ClientIP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SleepAction) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SleepAction: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SleepAction: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType)
+			}
+			m.Seconds = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Seconds |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: StorageOSPersistentVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: StorageOSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumeName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeNamespace", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumeNamespace = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FSType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SecretRef == nil {
+				m.SecretRef = &ObjectReference{}
+			}
+			if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: StorageOSVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: StorageOSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumeName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeNamespace", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumeNamespace = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FSType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.SecretRef == nil {
+				m.SecretRef = &LocalObjectReference{}
+			}
+			if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Sysctl) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Sysctl: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Sysctl: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Value = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TCPSocketAction) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TCPSocketAction: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TCPSocketAction: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Host = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Taint) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Taint: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Taint: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Value = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Effect = TaintEffect(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TimeAdded", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.TimeAdded == nil {
+				m.TimeAdded = &v1.Time{}
+			}
+			if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Toleration) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Toleration: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Toleration: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Operator = TolerationOperator(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Value = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Effect = TaintEffect(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TolerationSeconds", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TolerationSeconds = &v
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TopologySelectorLabelRequirement: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TopologySelectorLabelRequirement: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Values = append(m.Values, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TopologySelectorTerm: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TopologySelectorTerm: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MatchLabelExpressions", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.MatchLabelExpressions = append(m.MatchLabelExpressions, TopologySelectorLabelRequirement{})
+			if err := m.MatchLabelExpressions[len(m.MatchLabelExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TopologySpreadConstraint: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TopologySpreadConstraint: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MaxSkew", wireType)
+			}
+			m.MaxSkew = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MaxSkew |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TopologyKey", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TopologyKey = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field WhenUnsatisfiable", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.WhenUnsatisfiable = UnsatisfiableConstraintAction(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.LabelSelector == nil {
+				m.LabelSelector = &v1.LabelSelector{}
+			}
+			if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MinDomains", wireType)
+			}
+			var v int32
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.MinDomains = &v
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinityPolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := NodeInclusionPolicy(dAtA[iNdEx:postIndex])
+			m.NodeAffinityPolicy = &s
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeTaintsPolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := NodeInclusionPolicy(dAtA[iNdEx:postIndex])
+			m.NodeTaintsPolicy = &s
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MatchLabelKeys", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.MatchLabelKeys = append(m.MatchLabelKeys, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TypedLocalObjectReference: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TypedLocalObjectReference: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.APIGroup = &s
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Kind = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TypedObjectReference) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TypedObjectReference: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TypedObjectReference: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.APIGroup = &s
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Kind = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.Namespace = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Volume) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Volume: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Volume: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeSource", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.VolumeSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *VolumeDevice) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: VolumeDevice: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: VolumeDevice: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DevicePath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *VolumeMount) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: VolumeMount: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: VolumeMount: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MountPath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.MountPath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SubPath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SubPath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MountPropagation", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := MountPropagationMode(dAtA[iNdEx:postIndex])
+			m.MountPropagation = &s
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SubPathExpr", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SubPathExpr = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RecursiveReadOnly", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := RecursiveReadOnlyMode(dAtA[iNdEx:postIndex])
+			m.RecursiveReadOnly = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *VolumeMountStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: VolumeMountStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: VolumeMountStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MountPath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.MountPath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ReadOnly = bool(v != 0)
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RecursiveReadOnly", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := RecursiveReadOnlyMode(dAtA[iNdEx:postIndex])
+			m.RecursiveReadOnly = &s
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: VolumeNodeAffinity: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: VolumeNodeAffinity: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Required", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Required == nil {
+				m.Required = &NodeSelector{}
+			}
+			if err := m.Required.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *VolumeProjection) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: VolumeProjection: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: VolumeProjection: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Secret == nil {
+				m.Secret = &SecretProjection{}
+			}
+			if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DownwardAPI", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.DownwardAPI == nil {
+				m.DownwardAPI = &DownwardAPIProjection{}
+			}
+			if err := m.DownwardAPI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ConfigMap == nil {
+				m.ConfigMap = &ConfigMapProjection{}
+			}
+			if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountToken", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ServiceAccountToken == nil {
+				m.ServiceAccountToken = &ServiceAccountTokenProjection{}
+			}
+			if err := m.ServiceAccountToken.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ClusterTrustBundle", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ClusterTrustBundle == nil {
+				m.ClusterTrustBundle = &ClusterTrustBundleProjection{}
+			}
+			if err := m.ClusterTrustBundle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *VolumeResourceRequirements) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: VolumeResourceRequirements: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: VolumeResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Limits == nil {
+				m.Limits = make(ResourceList)
+			}
+			var mapkey ResourceName
+			mapvalue := &resource.Quantity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &resource.Quantity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Limits[ResourceName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Requests == nil {
+				m.Requests = make(ResourceList)
+			}
+			var mapkey ResourceName
+			mapvalue := &resource.Quantity{}
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &resource.Quantity{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Requests[ResourceName(mapkey)] = *mapvalue
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *VolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: VolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: VolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.HostPath == nil {
+				m.HostPath = &HostPathVolumeSource{}
+			}
+			if err := m.HostPath.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EmptyDir", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.EmptyDir == nil {
+				m.EmptyDir = &EmptyDirVolumeSource{}
+			}
+			if err := m.EmptyDir.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field GCEPersistentDisk", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.GCEPersistentDisk == nil {
+				m.GCEPersistentDisk = &GCEPersistentDiskVolumeSource{}
+			}
+			if err := m.GCEPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AWSElasticBlockStore", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AWSElasticBlockStore == nil {
+				m.AWSElasticBlockStore = &AWSElasticBlockStoreVolumeSource{}
+			}
+			if err := m.AWSElasticBlockStore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field GitRepo", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.GitRepo == nil {
+				m.GitRepo = &GitRepoVolumeSource{}
+			}
+			if err := m.GitRepo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Secret == nil {
+				m.Secret = &SecretVolumeSource{}
+			}
+			if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NFS", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.NFS == nil {
+				m.NFS = &NFSVolumeSource{}
+			}
+			if err := m.NFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ISCSI", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ISCSI == nil {
+				m.ISCSI = &ISCSIVolumeSource{}
+			}
+			if err := m.ISCSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Glusterfs", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Glusterfs == nil {
+				m.Glusterfs = &GlusterfsVolumeSource{}
+			}
+			if err := m.Glusterfs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeClaim", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.PersistentVolumeClaim == nil {
+				m.PersistentVolumeClaim = &PersistentVolumeClaimVolumeSource{}
+			}
+			if err := m.PersistentVolumeClaim.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RBD", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.RBD == nil {
+				m.RBD = &RBDVolumeSource{}
+			}
+			if err := m.RBD.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 12:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FlexVolume", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.FlexVolume == nil {
+				m.FlexVolume = &FlexVolumeSource{}
+			}
+			if err := m.FlexVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 13:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Cinder", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Cinder == nil {
+				m.Cinder = &CinderVolumeSource{}
+			}
+			if err := m.Cinder.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 14:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CephFS", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.CephFS == nil {
+				m.CephFS = &CephFSVolumeSource{}
+			}
+			if err := m.CephFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 15:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Flocker", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Flocker == nil {
+				m.Flocker = &FlockerVolumeSource{}
+			}
+			if err := m.Flocker.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 16:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DownwardAPI", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.DownwardAPI == nil {
+				m.DownwardAPI = &DownwardAPIVolumeSource{}
+			}
+			if err := m.DownwardAPI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 17:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FC", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.FC == nil {
+				m.FC = &FCVolumeSource{}
+			}
+			if err := m.FC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 18:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AzureFile", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AzureFile == nil {
+				m.AzureFile = &AzureFileVolumeSource{}
+			}
+			if err := m.AzureFile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 19:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ConfigMap == nil {
+				m.ConfigMap = &ConfigMapVolumeSource{}
+			}
+			if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 20:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VsphereVolume", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.VsphereVolume == nil {
+				m.VsphereVolume = &VsphereVirtualDiskVolumeSource{}
+			}
+			if err := m.VsphereVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 21:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Quobyte", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Quobyte == nil {
+				m.Quobyte = &QuobyteVolumeSource{}
+			}
+			if err := m.Quobyte.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 22:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AzureDisk == nil {
+				m.AzureDisk = &AzureDiskVolumeSource{}
+			}
+			if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 23:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PhotonPersistentDisk", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.PhotonPersistentDisk == nil {
+				m.PhotonPersistentDisk = &PhotonPersistentDiskVolumeSource{}
+			}
+			if err := m.PhotonPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 24:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PortworxVolume", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.PortworxVolume == nil {
+				m.PortworxVolume = &PortworxVolumeSource{}
+			}
+			if err := m.PortworxVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 25:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ScaleIO", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ScaleIO == nil {
+				m.ScaleIO = &ScaleIOVolumeSource{}
+			}
+			if err := m.ScaleIO.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 26:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Projected", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Projected == nil {
+				m.Projected = &ProjectedVolumeSource{}
+			}
+			if err := m.Projected.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 27:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StorageOS", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.StorageOS == nil {
+				m.StorageOS = &StorageOSVolumeSource{}
+			}
+			if err := m.StorageOS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 28:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CSI", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.CSI == nil {
+				m.CSI = &CSIVolumeSource{}
+			}
+			if err := m.CSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 29:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Ephemeral", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Ephemeral == nil {
+				m.Ephemeral = &EphemeralVolumeSource{}
+			}
+			if err := m.Ephemeral.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 30:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Image == nil {
+				m.Image = &ImageVolumeSource{}
+			}
+			if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: VsphereVirtualDiskVolumeSource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: VsphereVirtualDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumePath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumePath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FSType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StoragePolicyName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.StoragePolicyName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StoragePolicyID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.StoragePolicyID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WeightedPodAffinityTerm: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WeightedPodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType)
+			}
+			m.Weight = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Weight |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PodAffinityTerm", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.PodAffinityTerm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WindowsSecurityContextOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WindowsSecurityContextOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field GMSACredentialSpecName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.GMSACredentialSpecName = &s
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field GMSACredentialSpec", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.GMSACredentialSpec = &s
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RunAsUserName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.RunAsUserName = &s
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HostProcess", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.HostProcess = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	depth := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+		case 1:
+			iNdEx += 8
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthGenerated
+			}
+			iNdEx += length
+		case 3:
+			depth++
+		case 4:
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
+		case 5:
+			iNdEx += 4
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
+	}
+	return 0, io.ErrUnexpectedEOF
+}
+
+var (
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/hack/tools/vendor/k8s.io/api/core/v1/generated.proto b/hack/tools/vendor/k8s.io/api/core/v1/generated.proto
new file mode 100644
index 0000000000..08706987c5
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/core/v1/generated.proto
@@ -0,0 +1,6920 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package k8s.io.api.core.v1;
+
+import "k8s.io/apimachinery/pkg/api/resource/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "k8s.io/api/core/v1";
+
+// Represents a Persistent Disk resource in AWS.
+//
+// An AWS EBS disk must exist before mounting to a container. The disk
+// must also be in the same AWS zone as the kubelet. An AWS EBS disk
+// can only be mounted as read/write once. AWS EBS volumes support
+// ownership management and SELinux relabeling.
+message AWSElasticBlockStoreVolumeSource {
+  // volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+  optional string volumeID = 1;
+
+  // fsType is the filesystem type of the volume that you want to mount.
+  // Tip: Ensure that the filesystem type is supported by the host operating system.
+  // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+  // TODO: how do we prevent errors in the filesystem from compromising the machine
+  // +optional
+  optional string fsType = 2;
+
+  // partition is the partition in the volume that you want to mount.
+  // If omitted, the default is to mount by volume name.
+  // Examples: For volume /dev/sda1, you specify the partition as "1".
+  // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+  // +optional
+  optional int32 partition = 3;
+
+  // readOnly value true will force the readOnly setting in VolumeMounts.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+  // +optional
+  optional bool readOnly = 4;
+}
+
+// Affinity is a group of affinity scheduling rules.
+message Affinity {
+  // Describes node affinity scheduling rules for the pod.
+  // +optional
+  optional NodeAffinity nodeAffinity = 1;
+
+  // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
+  // +optional
+  optional PodAffinity podAffinity = 2;
+
+  // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
+  // +optional
+  optional PodAntiAffinity podAntiAffinity = 3;
+}
+
+// AppArmorProfile defines a pod or container's AppArmor settings.
+// +union
+message AppArmorProfile {
+  // type indicates which kind of AppArmor profile will be applied.
+  // Valid options are:
+  //   Localhost - a profile pre-loaded on the node.
+  //   RuntimeDefault - the container runtime's default profile.
+  //   Unconfined - no AppArmor enforcement.
+  // +unionDiscriminator
+  optional string type = 1;
+
+  // localhostProfile indicates a profile loaded on the node that should be used.
+  // The profile must be preconfigured on the node to work.
+  // Must match the loaded name of the profile.
+  // Must be set if and only if type is "Localhost".
+  // +optional
+  optional string localhostProfile = 2;
+}
+
+// AttachedVolume describes a volume attached to a node
+message AttachedVolume {
+  // Name of the attached volume
+  optional string name = 1;
+
+  // DevicePath represents the device path where the volume should be available
+  optional string devicePath = 2;
+}
+
+// AvoidPods describes pods that should avoid this node. This is the value for a
+// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and
+// will eventually become a field of NodeStatus.
+message AvoidPods {
+  // Bounded-sized list of signatures of pods that should avoid this node, sorted
+  // in timestamp order from oldest to newest. Size of the slice is unspecified.
+  // +optional
+  // +listType=atomic
+  repeated PreferAvoidPodsEntry preferAvoidPods = 1;
+}
+
+// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+message AzureDiskVolumeSource {
+  // diskName is the Name of the data disk in the blob storage
+  optional string diskName = 1;
+
+  // diskURI is the URI of data disk in the blob storage
+  optional string diskURI = 2;
+
+  // cachingMode is the Host Caching mode: None, Read Only, Read Write.
+  // +optional
+  // +default=ref(AzureDataDiskCachingReadWrite)
+  optional string cachingMode = 3;
+
+  // fsType is Filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // +optional
+  // +default="ext4"
+  optional string fsType = 4;
+
+  // readOnly Defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  // +default=false
+  optional bool readOnly = 5;
+
+  // kind expected values are Shared: multiple blob disks per storage account  Dedicated: single blob disk per storage account  Managed: azure managed data disk (only in managed availability set). defaults to shared
+  // +default=ref(AzureSharedBlobDisk)
+  optional string kind = 6;
+}
+
+// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+message AzureFilePersistentVolumeSource {
+  // secretName is the name of secret that contains Azure Storage Account Name and Key
+  optional string secretName = 1;
+
+  // shareName is the azure Share Name
+  optional string shareName = 2;
+
+  // readOnly defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  optional bool readOnly = 3;
+
+  // secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key
+  // default is the same as the Pod
+  // +optional
+  optional string secretNamespace = 4;
+}
+
+// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+message AzureFileVolumeSource {
+  // secretName is the  name of secret that contains Azure Storage Account Name and Key
+  optional string secretName = 1;
+
+  // shareName is the azure share Name
+  optional string shareName = 2;
+
+  // readOnly defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  optional bool readOnly = 3;
+}
+
+// Binding ties one object to another; for example, a pod is bound to a node by a scheduler.
+message Binding {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // The target object that you want to bind to the standard object.
+  optional ObjectReference target = 2;
+}
+
+// Represents storage that is managed by an external CSI volume driver
+message CSIPersistentVolumeSource {
+  // driver is the name of the driver to use for this volume.
+  // Required.
+  optional string driver = 1;
+
+  // volumeHandle is the unique volume name returned by the CSI volume
+  // plugin’s CreateVolume to refer to the volume on all subsequent calls.
+  // Required.
+  optional string volumeHandle = 2;
+
+  // readOnly value to pass to ControllerPublishVolumeRequest.
+  // Defaults to false (read/write).
+  // +optional
+  optional bool readOnly = 3;
+
+  // fsType to mount. Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs".
+  // +optional
+  optional string fsType = 4;
+
+  // volumeAttributes of the volume to publish.
+  // +optional
+  map volumeAttributes = 5;
+
+  // controllerPublishSecretRef is a reference to the secret object containing
+  // sensitive information to pass to the CSI driver to complete the CSI
+  // ControllerPublishVolume and ControllerUnpublishVolume calls.
+  // This field is optional, and may be empty if no secret is required. If the
+  // secret object contains more than one secret, all secrets are passed.
+  // +optional
+  optional SecretReference controllerPublishSecretRef = 6;
+
+  // nodeStageSecretRef is a reference to the secret object containing sensitive
+  // information to pass to the CSI driver to complete the CSI NodeStageVolume
+  // and NodeStageVolume and NodeUnstageVolume calls.
+  // This field is optional, and may be empty if no secret is required. If the
+  // secret object contains more than one secret, all secrets are passed.
+  // +optional
+  optional SecretReference nodeStageSecretRef = 7;
+
+  // nodePublishSecretRef is a reference to the secret object containing
+  // sensitive information to pass to the CSI driver to complete the CSI
+  // NodePublishVolume and NodeUnpublishVolume calls.
+  // This field is optional, and may be empty if no secret is required. If the
+  // secret object contains more than one secret, all secrets are passed.
+  // +optional
+  optional SecretReference nodePublishSecretRef = 8;
+
+  // controllerExpandSecretRef is a reference to the secret object containing
+  // sensitive information to pass to the CSI driver to complete the CSI
+  // ControllerExpandVolume call.
+  // This field is optional, and may be empty if no secret is required. If the
+  // secret object contains more than one secret, all secrets are passed.
+  // +optional
+  optional SecretReference controllerExpandSecretRef = 9;
+
+  // nodeExpandSecretRef is a reference to the secret object containing
+  // sensitive information to pass to the CSI driver to complete the CSI
+  // NodeExpandVolume call.
+  // This field is optional, may be omitted if no secret is required. If the
+  // secret object contains more than one secret, all secrets are passed.
+  // +optional
+  optional SecretReference nodeExpandSecretRef = 10;
+}
+
+// Represents a source location of a volume to mount, managed by an external CSI driver
+message CSIVolumeSource {
+  // driver is the name of the CSI driver that handles this volume.
+  // Consult with your admin for the correct name as registered in the cluster.
+  optional string driver = 1;
+
+  // readOnly specifies a read-only configuration for the volume.
+  // Defaults to false (read/write).
+  // +optional
+  optional bool readOnly = 2;
+
+  // fsType to mount. Ex. "ext4", "xfs", "ntfs".
+  // If not provided, the empty value is passed to the associated CSI driver
+  // which will determine the default filesystem to apply.
+  // +optional
+  optional string fsType = 3;
+
+  // volumeAttributes stores driver-specific properties that are passed to the CSI
+  // driver. Consult your driver's documentation for supported values.
+  // +optional
+  map volumeAttributes = 4;
+
+  // nodePublishSecretRef is a reference to the secret object containing
+  // sensitive information to pass to the CSI driver to complete the CSI
+  // NodePublishVolume and NodeUnpublishVolume calls.
+  // This field is optional, and  may be empty if no secret is required. If the
+  // secret object contains more than one secret, all secret references are passed.
+  // +optional
+  optional LocalObjectReference nodePublishSecretRef = 5;
+}
+
+// Adds and removes POSIX capabilities from running containers.
+message Capabilities {
+  // Added capabilities
+  // +optional
+  // +listType=atomic
+  repeated string add = 1;
+
+  // Removed capabilities
+  // +optional
+  // +listType=atomic
+  repeated string drop = 2;
+}
+
+// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
+// Cephfs volumes do not support ownership management or SELinux relabeling.
+message CephFSPersistentVolumeSource {
+  // monitors is Required: Monitors is a collection of Ceph monitors
+  // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+  // +listType=atomic
+  repeated string monitors = 1;
+
+  // path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /
+  // +optional
+  optional string path = 2;
+
+  // user is Optional: User is the rados user name, default is admin
+  // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+  // +optional
+  optional string user = 3;
+
+  // secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+  // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+  // +optional
+  optional string secretFile = 4;
+
+  // secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
+  // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+  // +optional
+  optional SecretReference secretRef = 5;
+
+  // readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+  // +optional
+  optional bool readOnly = 6;
+}
+
+// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
+// Cephfs volumes do not support ownership management or SELinux relabeling.
+message CephFSVolumeSource {
+  // monitors is Required: Monitors is a collection of Ceph monitors
+  // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+  // +listType=atomic
+  repeated string monitors = 1;
+
+  // path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /
+  // +optional
+  optional string path = 2;
+
+  // user is optional: User is the rados user name, default is admin
+  // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+  // +optional
+  optional string user = 3;
+
+  // secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+  // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+  // +optional
+  optional string secretFile = 4;
+
+  // secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
+  // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+  // +optional
+  optional LocalObjectReference secretRef = 5;
+
+  // readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+  // +optional
+  optional bool readOnly = 6;
+}
+
+// Represents a cinder volume resource in Openstack.
+// A Cinder volume must exist before mounting to a container.
+// The volume must also be in the same region as the kubelet.
+// Cinder volumes support ownership management and SELinux relabeling.
+message CinderPersistentVolumeSource {
+  // volumeID used to identify the volume in cinder.
+  // More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+  optional string volumeID = 1;
+
+  // fsType Filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+  // +optional
+  optional string fsType = 2;
+
+  // readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+  // +optional
+  optional bool readOnly = 3;
+
+  // secretRef is Optional: points to a secret object containing parameters used to connect
+  // to OpenStack.
+  // +optional
+  optional SecretReference secretRef = 4;
+}
+
+// Represents a cinder volume resource in Openstack.
+// A Cinder volume must exist before mounting to a container.
+// The volume must also be in the same region as the kubelet.
+// Cinder volumes support ownership management and SELinux relabeling.
+message CinderVolumeSource {
+  // volumeID used to identify the volume in cinder.
+  // More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+  optional string volumeID = 1;
+
+  // fsType is the filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+  // +optional
+  optional string fsType = 2;
+
+  // readOnly defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+  // +optional
+  optional bool readOnly = 3;
+
+  // secretRef is optional: points to a secret object containing parameters used to connect
+  // to OpenStack.
+  // +optional
+  optional LocalObjectReference secretRef = 4;
+}
+
+// ClientIPConfig represents the configurations of Client IP based session affinity.
+message ClientIPConfig {
+  // timeoutSeconds specifies the seconds of ClientIP type session sticky time.
+  // The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP".
+  // Default value is 10800(for 3 hours).
+  // +optional
+  optional int32 timeoutSeconds = 1;
+}
+
+// ClusterTrustBundleProjection describes how to select a set of
+// ClusterTrustBundle objects and project their contents into the pod
+// filesystem.
+message ClusterTrustBundleProjection {
+  // Select a single ClusterTrustBundle by object name.  Mutually-exclusive
+  // with signerName and labelSelector.
+  // +optional
+  optional string name = 1;
+
+  // Select all ClusterTrustBundles that match this signer name.
+  // Mutually-exclusive with name.  The contents of all selected
+  // ClusterTrustBundles will be unified and deduplicated.
+  // +optional
+  optional string signerName = 2;
+
+  // Select all ClusterTrustBundles that match this label selector.  Only has
+  // effect if signerName is set.  Mutually-exclusive with name.  If unset,
+  // interpreted as "match nothing".  If set but empty, interpreted as "match
+  // everything".
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 3;
+
+  // If true, don't block pod startup if the referenced ClusterTrustBundle(s)
+  // aren't available.  If using name, then the named ClusterTrustBundle is
+  // allowed not to exist.  If using signerName, then the combination of
+  // signerName and labelSelector is allowed to match zero
+  // ClusterTrustBundles.
+  // +optional
+  optional bool optional = 5;
+
+  // Relative path from the volume root to write the bundle.
+  optional string path = 4;
+}
+
+// Information about the condition of a component.
+message ComponentCondition {
+  // Type of condition for a component.
+  // Valid value: "Healthy"
+  optional string type = 1;
+
+  // Status of the condition for a component.
+  // Valid values for "Healthy": "True", "False", or "Unknown".
+  optional string status = 2;
+
+  // Message about the condition for a component.
+  // For example, information about a health check.
+  // +optional
+  optional string message = 3;
+
+  // Condition error code for a component.
+  // For example, a health check error code.
+  // +optional
+  optional string error = 4;
+}
+
+// ComponentStatus (and ComponentStatusList) holds the cluster validation info.
+// Deprecated: This API is deprecated in v1.19+
+message ComponentStatus {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // List of component conditions observed
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=type
+  repeated ComponentCondition conditions = 2;
+}
+
+// Status of all the conditions for the component as a list of ComponentStatus objects.
+// Deprecated: This API is deprecated in v1.19+
+message ComponentStatusList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of ComponentStatus objects.
+  repeated ComponentStatus items = 2;
+}
+
+// ConfigMap holds configuration data for pods to consume.
+message ConfigMap {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Immutable, if set to true, ensures that data stored in the ConfigMap cannot
+  // be updated (only object metadata can be modified).
+  // If not set to true, the field can be modified at any time.
+  // Defaulted to nil.
+  // +optional
+  optional bool immutable = 4;
+
+  // Data contains the configuration data.
+  // Each key must consist of alphanumeric characters, '-', '_' or '.'.
+  // Values with non-UTF-8 byte sequences must use the BinaryData field.
+  // The keys stored in Data must not overlap with the keys in
+  // the BinaryData field, this is enforced during validation process.
+  // +optional
+  map data = 2;
+
+  // BinaryData contains the binary data.
+  // Each key must consist of alphanumeric characters, '-', '_' or '.'.
+  // BinaryData can contain byte sequences that are not in the UTF-8 range.
+  // The keys stored in BinaryData must not overlap with the ones in
+  // the Data field, this is enforced during validation process.
+  // Using this field will require 1.10+ apiserver and
+  // kubelet.
+  // +optional
+  map binaryData = 3;
+}
+
+// ConfigMapEnvSource selects a ConfigMap to populate the environment
+// variables with.
+//
+// The contents of the target ConfigMap's Data field will represent the
+// key-value pairs as environment variables.
+message ConfigMapEnvSource {
+  // The ConfigMap to select from.
+  optional LocalObjectReference localObjectReference = 1;
+
+  // Specify whether the ConfigMap must be defined
+  // +optional
+  optional bool optional = 2;
+}
+
+// Selects a key from a ConfigMap.
+// +structType=atomic
+message ConfigMapKeySelector {
+  // The ConfigMap to select from.
+  optional LocalObjectReference localObjectReference = 1;
+
+  // The key to select.
+  optional string key = 2;
+
+  // Specify whether the ConfigMap or its key must be defined
+  // +optional
+  optional bool optional = 3;
+}
+
+// ConfigMapList is a resource containing a list of ConfigMap objects.
+message ConfigMapList {
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is the list of ConfigMaps.
+  repeated ConfigMap items = 2;
+}
+
+// ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.
+// This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration
+message ConfigMapNodeConfigSource {
+  // Namespace is the metadata.namespace of the referenced ConfigMap.
+  // This field is required in all cases.
+  optional string namespace = 1;
+
+  // Name is the metadata.name of the referenced ConfigMap.
+  // This field is required in all cases.
+  optional string name = 2;
+
+  // UID is the metadata.UID of the referenced ConfigMap.
+  // This field is forbidden in Node.Spec, and required in Node.Status.
+  // +optional
+  optional string uid = 3;
+
+  // ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap.
+  // This field is forbidden in Node.Spec, and required in Node.Status.
+  // +optional
+  optional string resourceVersion = 4;
+
+  // KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure
+  // This field is required in all cases.
+  optional string kubeletConfigKey = 5;
+}
+
+// Adapts a ConfigMap into a projected volume.
+//
+// The contents of the target ConfigMap's Data field will be presented in a
+// projected volume as files using the keys in the Data field as the file names,
+// unless the items element is populated with specific mappings of keys to paths.
+// Note that this is identical to a configmap volume source without the default
+// mode.
+message ConfigMapProjection {
+  optional LocalObjectReference localObjectReference = 1;
+
+  // items if unspecified, each key-value pair in the Data field of the referenced
+  // ConfigMap will be projected into the volume as a file whose name is the
+  // key and content is the value. If specified, the listed keys will be
+  // projected into the specified paths, and unlisted keys will not be
+  // present. If a key is specified which is not present in the ConfigMap,
+  // the volume setup will error unless it is marked optional. Paths must be
+  // relative and may not contain the '..' path or start with '..'.
+  // +optional
+  // +listType=atomic
+  repeated KeyToPath items = 2;
+
+  // optional specify whether the ConfigMap or its keys must be defined
+  // +optional
+  optional bool optional = 4;
+}
+
+// Adapts a ConfigMap into a volume.
+//
+// The contents of the target ConfigMap's Data field will be presented in a
+// volume as files using the keys in the Data field as the file names, unless
+// the items element is populated with specific mappings of keys to paths.
+// ConfigMap volumes support ownership management and SELinux relabeling.
+message ConfigMapVolumeSource {
+  optional LocalObjectReference localObjectReference = 1;
+
+  // items if unspecified, each key-value pair in the Data field of the referenced
+  // ConfigMap will be projected into the volume as a file whose name is the
+  // key and content is the value. If specified, the listed keys will be
+  // projected into the specified paths, and unlisted keys will not be
+  // present. If a key is specified which is not present in the ConfigMap,
+  // the volume setup will error unless it is marked optional. Paths must be
+  // relative and may not contain the '..' path or start with '..'.
+  // +optional
+  // +listType=atomic
+  repeated KeyToPath items = 2;
+
+  // defaultMode is optional: mode bits used to set permissions on created files by default.
+  // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+  // YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+  // Defaults to 0644.
+  // Directories within the path are not affected by this setting.
+  // This might be in conflict with other options that affect the file
+  // mode, like fsGroup, and the result can be other mode bits set.
+  // +optional
+  optional int32 defaultMode = 3;
+
+  // optional specify whether the ConfigMap or its keys must be defined
+  // +optional
+  optional bool optional = 4;
+}
+
+// A single application container that you want to run within a pod.
+message Container {
+  // Name of the container specified as a DNS_LABEL.
+  // Each container in a pod must have a unique name (DNS_LABEL).
+  // Cannot be updated.
+  optional string name = 1;
+
+  // Container image name.
+  // More info: https://kubernetes.io/docs/concepts/containers/images
+  // This field is optional to allow higher level config management to default or override
+  // container images in workload controllers like Deployments and StatefulSets.
+  // +optional
+  optional string image = 2;
+
+  // Entrypoint array. Not executed within a shell.
+  // The container image's ENTRYPOINT is used if this is not provided.
+  // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+  // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+  // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+  // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+  // of whether the variable exists or not. Cannot be updated.
+  // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+  // +optional
+  // +listType=atomic
+  repeated string command = 3;
+
+  // Arguments to the entrypoint.
+  // The container image's CMD is used if this is not provided.
+  // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+  // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+  // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+  // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+  // of whether the variable exists or not. Cannot be updated.
+  // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+  // +optional
+  // +listType=atomic
+  repeated string args = 4;
+
+  // Container's working directory.
+  // If not specified, the container runtime's default will be used, which
+  // might be configured in the container image.
+  // Cannot be updated.
+  // +optional
+  optional string workingDir = 5;
+
+  // List of ports to expose from the container. Not specifying a port here
+  // DOES NOT prevent that port from being exposed. Any port which is
+  // listening on the default "0.0.0.0" address inside a container will be
+  // accessible from the network.
+  // Modifying this array with strategic merge patch may corrupt the data.
+  // For more information See https://github.com/kubernetes/kubernetes/issues/108255.
+  // Cannot be updated.
+  // +optional
+  // +patchMergeKey=containerPort
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=containerPort
+  // +listMapKey=protocol
+  repeated ContainerPort ports = 6;
+
+  // List of sources to populate environment variables in the container.
+  // The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+  // will be reported as an event when the container is starting. When a key exists in multiple
+  // sources, the value associated with the last source will take precedence.
+  // Values defined by an Env with a duplicate key will take precedence.
+  // Cannot be updated.
+  // +optional
+  // +listType=atomic
+  repeated EnvFromSource envFrom = 19;
+
+  // List of environment variables to set in the container.
+  // Cannot be updated.
+  // +optional
+  // +patchMergeKey=name
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=name
+  repeated EnvVar env = 7;
+
+  // Compute Resources required by this container.
+  // Cannot be updated.
+  // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+  // +optional
+  optional ResourceRequirements resources = 8;
+
+  // Resources resize policy for the container.
+  // +featureGate=InPlacePodVerticalScaling
+  // +optional
+  // +listType=atomic
+  repeated ContainerResizePolicy resizePolicy = 23;
+
+  // RestartPolicy defines the restart behavior of individual containers in a pod.
+  // This field may only be set for init containers, and the only allowed value is "Always".
+  // For non-init containers or when this field is not specified,
+  // the restart behavior is defined by the Pod's restart policy and the container type.
+  // Setting the RestartPolicy as "Always" for the init container will have the following effect:
+  // this init container will be continually restarted on
+  // exit until all regular containers have terminated. Once all regular
+  // containers have completed, all init containers with restartPolicy "Always"
+  // will be shut down. This lifecycle differs from normal init containers and
+  // is often referred to as a "sidecar" container. Although this init
+  // container still starts in the init container sequence, it does not wait
+  // for the container to complete before proceeding to the next init
+  // container. Instead, the next init container starts immediately after this
+  // init container is started, or after any startupProbe has successfully
+  // completed.
+  // +featureGate=SidecarContainers
+  // +optional
+  optional string restartPolicy = 24;
+
+  // Pod volumes to mount into the container's filesystem.
+  // Cannot be updated.
+  // +optional
+  // +patchMergeKey=mountPath
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=mountPath
+  repeated VolumeMount volumeMounts = 9;
+
+  // volumeDevices is the list of block devices to be used by the container.
+  // +patchMergeKey=devicePath
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=devicePath
+  // +optional
+  repeated VolumeDevice volumeDevices = 21;
+
+  // Periodic probe of container liveness.
+  // Container will be restarted if the probe fails.
+  // Cannot be updated.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+  // +optional
+  optional Probe livenessProbe = 10;
+
+  // Periodic probe of container service readiness.
+  // Container will be removed from service endpoints if the probe fails.
+  // Cannot be updated.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+  // +optional
+  optional Probe readinessProbe = 11;
+
+  // StartupProbe indicates that the Pod has successfully initialized.
+  // If specified, no other probes are executed until this completes successfully.
+  // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
+  // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
+  // when it might take a long time to load data or warm a cache, than during steady-state operation.
+  // This cannot be updated.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+  // +optional
+  optional Probe startupProbe = 22;
+
+  // Actions that the management system should take in response to container lifecycle events.
+  // Cannot be updated.
+  // +optional
+  optional Lifecycle lifecycle = 12;
+
+  // Optional: Path at which the file to which the container's termination message
+  // will be written is mounted into the container's filesystem.
+  // Message written is intended to be brief final status, such as an assertion failure message.
+  // Will be truncated by the node if greater than 4096 bytes. The total message length across
+  // all containers will be limited to 12kb.
+  // Defaults to /dev/termination-log.
+  // Cannot be updated.
+  // +optional
+  optional string terminationMessagePath = 13;
+
+  // Indicate how the termination message should be populated. File will use the contents of
+  // terminationMessagePath to populate the container status message on both success and failure.
+  // FallbackToLogsOnError will use the last chunk of container log output if the termination
+  // message file is empty and the container exited with an error.
+  // The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+  // Defaults to File.
+  // Cannot be updated.
+  // +optional
+  optional string terminationMessagePolicy = 20;
+
+  // Image pull policy.
+  // One of Always, Never, IfNotPresent.
+  // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+  // Cannot be updated.
+  // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+  // +optional
+  optional string imagePullPolicy = 14;
+
+  // SecurityContext defines the security options the container should be run with.
+  // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+  // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+  // +optional
+  optional SecurityContext securityContext = 15;
+
+  // Whether this container should allocate a buffer for stdin in the container runtime. If this
+  // is not set, reads from stdin in the container will always result in EOF.
+  // Default is false.
+  // +optional
+  optional bool stdin = 16;
+
+  // Whether the container runtime should close the stdin channel after it has been opened by
+  // a single attach. When stdin is true the stdin stream will remain open across multiple attach
+  // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+  // first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+  // at which time stdin is closed and remains closed until the container is restarted. If this
+  // flag is false, a container processes that reads from stdin will never receive an EOF.
+  // Default is false
+  // +optional
+  optional bool stdinOnce = 17;
+
+  // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+  // Default is false.
+  // +optional
+  optional bool tty = 18;
+}
+
+// Describe a container image
+message ContainerImage {
+  // Names by which this image is known.
+  // e.g. ["kubernetes.example/hyperkube:v1.0.7", "cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7"]
+  // +optional
+  // +listType=atomic
+  repeated string names = 1;
+
+  // The size of the image in bytes.
+  // +optional
+  optional int64 sizeBytes = 2;
+}
+
+// ContainerPort represents a network port in a single container.
+message ContainerPort {
+  // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+  // named port in a pod must have a unique name. Name for the port that can be
+  // referred to by services.
+  // +optional
+  optional string name = 1;
+
+  // Number of port to expose on the host.
+  // If specified, this must be a valid port number, 0 < x < 65536.
+  // If HostNetwork is specified, this must match ContainerPort.
+  // Most containers do not need this.
+  // +optional
+  optional int32 hostPort = 2;
+
+  // Number of port to expose on the pod's IP address.
+  // This must be a valid port number, 0 < x < 65536.
+  optional int32 containerPort = 3;
+
+  // Protocol for port. Must be UDP, TCP, or SCTP.
+  // Defaults to "TCP".
+  // +optional
+  // +default="TCP"
+  optional string protocol = 4;
+
+  // What host IP to bind the external port to.
+  // +optional
+  optional string hostIP = 5;
+}
+
+// ContainerResizePolicy represents resource resize policy for the container.
+message ContainerResizePolicy {
+  // Name of the resource to which this resource resize policy applies.
+  // Supported values: cpu, memory.
+  optional string resourceName = 1;
+
+  // Restart policy to apply when specified resource is resized.
+  // If not specified, it defaults to NotRequired.
+  optional string restartPolicy = 2;
+}
+
+// ContainerState holds a possible state of container.
+// Only one of its members may be specified.
+// If none of them is specified, the default one is ContainerStateWaiting.
+message ContainerState {
+  // Details about a waiting container
+  // +optional
+  optional ContainerStateWaiting waiting = 1;
+
+  // Details about a running container
+  // +optional
+  optional ContainerStateRunning running = 2;
+
+  // Details about a terminated container
+  // +optional
+  optional ContainerStateTerminated terminated = 3;
+}
+
+// ContainerStateRunning is a running state of a container.
+message ContainerStateRunning {
+  // Time at which the container was last (re-)started
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 1;
+}
+
+// ContainerStateTerminated is a terminated state of a container.
+message ContainerStateTerminated {
+  // Exit status from the last termination of the container
+  optional int32 exitCode = 1;
+
+  // Signal from the last termination of the container
+  // +optional
+  optional int32 signal = 2;
+
+  // (brief) reason from the last termination of the container
+  // +optional
+  optional string reason = 3;
+
+  // Message regarding the last termination of the container
+  // +optional
+  optional string message = 4;
+
+  // Time at which previous execution of the container started
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 5;
+
+  // Time at which the container last terminated
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 6;
+
+  // Container's ID in the format '://'
+  // +optional
+  optional string containerID = 7;
+}
+
+// ContainerStateWaiting is a waiting state of a container.
+message ContainerStateWaiting {
+  // (brief) reason the container is not yet running.
+  // +optional
+  optional string reason = 1;
+
+  // Message regarding why the container is not yet running.
+  // +optional
+  optional string message = 2;
+}
+
+// ContainerStatus contains details for the current status of this container.
+message ContainerStatus {
+  // Name is a DNS_LABEL representing the unique name of the container.
+  // Each container in a pod must have a unique name across all container types.
+  // Cannot be updated.
+  optional string name = 1;
+
+  // State holds details about the container's current condition.
+  // +optional
+  optional ContainerState state = 2;
+
+  // LastTerminationState holds the last termination state of the container to
+  // help debug container crashes and restarts. This field is not
+  // populated if the container is still running and RestartCount is 0.
+  // +optional
+  optional ContainerState lastState = 3;
+
+  // Ready specifies whether the container is currently passing its readiness check.
+  // The value will change as readiness probes keep executing. If no readiness
+  // probes are specified, this field defaults to true once the container is
+  // fully started (see Started field).
+  //
+  // The value is typically used to determine whether a container is ready to
+  // accept traffic.
+  optional bool ready = 4;
+
+  // RestartCount holds the number of times the container has been restarted.
+  // Kubelet makes an effort to always increment the value, but there
+  // are cases when the state may be lost due to node restarts and then the value
+  // may be reset to 0. The value is never negative.
+  optional int32 restartCount = 5;
+
+  // Image is the name of container image that the container is running.
+  // The container image may not match the image used in the PodSpec,
+  // as it may have been resolved by the runtime.
+  // More info: https://kubernetes.io/docs/concepts/containers/images.
+  optional string image = 6;
+
+  // ImageID is the image ID of the container's image. The image ID may not
+  // match the image ID of the image used in the PodSpec, as it may have been
+  // resolved by the runtime.
+  optional string imageID = 7;
+
+  // ContainerID is the ID of the container in the format '://'.
+  // Where type is a container runtime identifier, returned from Version call of CRI API
+  // (for example "containerd").
+  // +optional
+  optional string containerID = 8;
+
+  // Started indicates whether the container has finished its postStart lifecycle hook
+  // and passed its startup probe.
+  // Initialized as false, becomes true after startupProbe is considered
+  // successful. Resets to false when the container is restarted, or if kubelet
+  // loses state temporarily. In both cases, startup probes will run again.
+  // Is always true when no startupProbe is defined and container is running and
+  // has passed the postStart lifecycle hook. The null value must be treated the
+  // same as false.
+  // +optional
+  optional bool started = 9;
+
+  // AllocatedResources represents the compute resources allocated for this container by the
+  // node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission
+  // and after successfully admitting desired pod resize.
+  // +featureGate=InPlacePodVerticalScalingAllocatedStatus
+  // +optional
+  map allocatedResources = 10;
+
+  // Resources represents the compute resource requests and limits that have been successfully
+  // enacted on the running container after it has been started or has been successfully resized.
+  // +featureGate=InPlacePodVerticalScaling
+  // +optional
+  optional ResourceRequirements resources = 11;
+
+  // Status of volume mounts.
+  // +optional
+  // +patchMergeKey=mountPath
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=mountPath
+  // +featureGate=RecursiveReadOnlyMounts
+  repeated VolumeMountStatus volumeMounts = 12;
+
+  // User represents user identity information initially attached to the first process of the container
+  // +featureGate=SupplementalGroupsPolicy
+  // +optional
+  optional ContainerUser user = 13;
+
+  // AllocatedResourcesStatus represents the status of various resources
+  // allocated for this Pod.
+  // +featureGate=ResourceHealthStatus
+  // +optional
+  // +patchMergeKey=name
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=name
+  repeated ResourceStatus allocatedResourcesStatus = 14;
+}
+
+// ContainerUser represents user identity information
+message ContainerUser {
+  // Linux holds user identity information initially attached to the first process of the containers in Linux.
+  // Note that the actual running identity can be changed if the process has enough privilege to do so.
+  // +optional
+  optional LinuxContainerUser linux = 1;
+}
+
+// DaemonEndpoint contains information about a single Daemon endpoint.
+message DaemonEndpoint {
+  // Port number of the given endpoint.
+  optional int32 Port = 1;
+}
+
+// Represents downward API info for projecting into a projected volume.
+// Note that this is identical to a downwardAPI volume source without the default
+// mode.
+message DownwardAPIProjection {
+  // Items is a list of DownwardAPIVolume file
+  // +optional
+  // +listType=atomic
+  repeated DownwardAPIVolumeFile items = 1;
+}
+
+// DownwardAPIVolumeFile represents information to create the file containing the pod field
+message DownwardAPIVolumeFile {
+  // Required: Path is  the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
+  optional string path = 1;
+
+  // Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported.
+  // +optional
+  optional ObjectFieldSelector fieldRef = 2;
+
+  // Selects a resource of the container: only resources limits and requests
+  // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+  // +optional
+  optional ResourceFieldSelector resourceFieldRef = 3;
+
+  // Optional: mode bits used to set permissions on this file, must be an octal value
+  // between 0000 and 0777 or a decimal value between 0 and 511.
+  // YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+  // If not specified, the volume defaultMode will be used.
+  // This might be in conflict with other options that affect the file
+  // mode, like fsGroup, and the result can be other mode bits set.
+  // +optional
+  optional int32 mode = 4;
+}
+
+// DownwardAPIVolumeSource represents a volume containing downward API info.
+// Downward API volumes support ownership management and SELinux relabeling.
+message DownwardAPIVolumeSource {
+  // Items is a list of downward API volume file
+  // +optional
+  // +listType=atomic
+  repeated DownwardAPIVolumeFile items = 1;
+
+  // Optional: mode bits to use on created files by default. Must be a
+  // Optional: mode bits used to set permissions on created files by default.
+  // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+  // YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+  // Defaults to 0644.
+  // Directories within the path are not affected by this setting.
+  // This might be in conflict with other options that affect the file
+  // mode, like fsGroup, and the result can be other mode bits set.
+  // +optional
+  optional int32 defaultMode = 2;
+}
+
+// Represents an empty directory for a pod.
+// Empty directory volumes support ownership management and SELinux relabeling.
+message EmptyDirVolumeSource {
+  // medium represents what type of storage medium should back this directory.
+  // The default is "" which means to use the node's default medium.
+  // Must be an empty string (default) or Memory.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+  // +optional
+  optional string medium = 1;
+
+  // sizeLimit is the total amount of local storage required for this EmptyDir volume.
+  // The size limit is also applicable for memory medium.
+  // The maximum usage on memory medium EmptyDir would be the minimum value between
+  // the SizeLimit specified here and the sum of memory limits of all containers in a pod.
+  // The default is nil which means that the limit is undefined.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+  // +optional
+  optional .k8s.io.apimachinery.pkg.api.resource.Quantity sizeLimit = 2;
+}
+
+// EndpointAddress is a tuple that describes single IP address.
+// +structType=atomic
+message EndpointAddress {
+  // The IP of this endpoint.
+  // May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10),
+  // or link-local multicast (224.0.0.0/24 or ff02::/16).
+  optional string ip = 1;
+
+  // The Hostname of this endpoint
+  // +optional
+  optional string hostname = 3;
+
+  // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.
+  // +optional
+  optional string nodeName = 4;
+
+  // Reference to object providing the endpoint.
+  // +optional
+  optional ObjectReference targetRef = 2;
+}
+
+// EndpointPort is a tuple that describes a single port.
+// +structType=atomic
+message EndpointPort {
+  // The name of this port.  This must match the 'name' field in the
+  // corresponding ServicePort.
+  // Must be a DNS_LABEL.
+  // Optional only if one port is defined.
+  // +optional
+  optional string name = 1;
+
+  // The port number of the endpoint.
+  optional int32 port = 2;
+
+  // The IP protocol for this port.
+  // Must be UDP, TCP, or SCTP.
+  // Default is TCP.
+  // +optional
+  optional string protocol = 3;
+
+  // The application protocol for this port.
+  // This is used as a hint for implementations to offer richer behavior for protocols that they understand.
+  // This field follows standard Kubernetes label syntax.
+  // Valid values are either:
+  //
+  // * Un-prefixed protocol names - reserved for IANA standard service names (as per
+  // RFC-6335 and https://www.iana.org/assignments/service-names).
+  //
+  // * Kubernetes-defined prefixed names:
+  //   * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
+  //   * 'kubernetes.io/ws'  - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
+  //   * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
+  //
+  // * Other protocols should use implementation-defined prefixed names such as
+  // mycompany.com/my-custom-protocol.
+  // +optional
+  optional string appProtocol = 4;
+}
+
+// EndpointSubset is a group of addresses with a common set of ports. The
+// expanded set of endpoints is the Cartesian product of Addresses x Ports.
+// For example, given:
+//
+// 	{
+// 	  Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
+// 	  Ports:     [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
+// 	}
+//
+// The resulting set of endpoints can be viewed as:
+//
+// 	a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
+// 	b: [ 10.10.1.1:309, 10.10.2.2:309 ]
+message EndpointSubset {
+  // IP addresses which offer the related ports that are marked as ready. These endpoints
+  // should be considered safe for load balancers and clients to utilize.
+  // +optional
+  // +listType=atomic
+  repeated EndpointAddress addresses = 1;
+
+  // IP addresses which offer the related ports but are not currently marked as ready
+  // because they have not yet finished starting, have recently failed a readiness check,
+  // or have recently failed a liveness check.
+  // +optional
+  // +listType=atomic
+  repeated EndpointAddress notReadyAddresses = 2;
+
+  // Port numbers available on the related IP addresses.
+  // +optional
+  // +listType=atomic
+  repeated EndpointPort ports = 3;
+}
+
+// Endpoints is a collection of endpoints that implement the actual service. Example:
+//
+// 	 Name: "mysvc",
+// 	 Subsets: [
+// 	   {
+// 	     Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
+// 	     Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
+// 	   },
+// 	   {
+// 	     Addresses: [{"ip": "10.10.3.3"}],
+// 	     Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
+// 	   },
+// 	]
+message Endpoints {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // The set of all endpoints is the union of all subsets. Addresses are placed into
+  // subsets according to the IPs they share. A single address with multiple ports,
+  // some of which are ready and some of which are not (because they come from
+  // different containers) will result in the address being displayed in different
+  // subsets for the different ports. No address will appear in both Addresses and
+  // NotReadyAddresses in the same subset.
+  // Sets of addresses and ports that comprise a service.
+  // +optional
+  // +listType=atomic
+  repeated EndpointSubset subsets = 2;
+}
+
+// EndpointsList is a list of endpoints.
+message EndpointsList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of endpoints.
+  repeated Endpoints items = 2;
+}
+
+// EnvFromSource represents the source of a set of ConfigMaps
+message EnvFromSource {
+  // An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
+  // +optional
+  optional string prefix = 1;
+
+  // The ConfigMap to select from
+  // +optional
+  optional ConfigMapEnvSource configMapRef = 2;
+
+  // The Secret to select from
+  // +optional
+  optional SecretEnvSource secretRef = 3;
+}
+
+// EnvVar represents an environment variable present in a Container.
+message EnvVar {
+  // Name of the environment variable. Must be a C_IDENTIFIER.
+  optional string name = 1;
+
+  // Variable references $(VAR_NAME) are expanded
+  // using the previously defined environment variables in the container and
+  // any service environment variables. If a variable cannot be resolved,
+  // the reference in the input string will be unchanged. Double $$ are reduced
+  // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+  // "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+  // Escaped references will never be expanded, regardless of whether the variable
+  // exists or not.
+  // Defaults to "".
+  // +optional
+  optional string value = 2;
+
+  // Source for the environment variable's value. Cannot be used if value is not empty.
+  // +optional
+  optional EnvVarSource valueFrom = 3;
+}
+
+// EnvVarSource represents a source for the value of an EnvVar.
+message EnvVarSource {
+  // Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+  // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+  // +optional
+  optional ObjectFieldSelector fieldRef = 1;
+
+  // Selects a resource of the container: only resources limits and requests
+  // (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+  // +optional
+  optional ResourceFieldSelector resourceFieldRef = 2;
+
+  // Selects a key of a ConfigMap.
+  // +optional
+  optional ConfigMapKeySelector configMapKeyRef = 3;
+
+  // Selects a key of a secret in the pod's namespace
+  // +optional
+  optional SecretKeySelector secretKeyRef = 4;
+}
+
+// An EphemeralContainer is a temporary container that you may add to an existing Pod for
+// user-initiated activities such as debugging. Ephemeral containers have no resource or
+// scheduling guarantees, and they will not be restarted when they exit or when a Pod is
+// removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the
+// Pod to exceed its resource allocation.
+//
+// To add an ephemeral container, use the ephemeralcontainers subresource of an existing
+// Pod. Ephemeral containers may not be removed or restarted.
+message EphemeralContainer {
+  // Ephemeral containers have all of the fields of Container, plus additional fields
+  // specific to ephemeral containers. Fields in common with Container are in the
+  // following inlined struct so than an EphemeralContainer may easily be converted
+  // to a Container.
+  optional EphemeralContainerCommon ephemeralContainerCommon = 1;
+
+  // If set, the name of the container from PodSpec that this ephemeral container targets.
+  // The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.
+  // If not set then the ephemeral container uses the namespaces configured in the Pod spec.
+  //
+  // The container runtime must implement support for this feature. If the runtime does not
+  // support namespace targeting then the result of setting this field is undefined.
+  // +optional
+  optional string targetContainerName = 2;
+}
+
+// EphemeralContainerCommon is a copy of all fields in Container to be inlined in
+// EphemeralContainer. This separate type allows easy conversion from EphemeralContainer
+// to Container and allows separate documentation for the fields of EphemeralContainer.
+// When a new field is added to Container it must be added here as well.
+message EphemeralContainerCommon {
+  // Name of the ephemeral container specified as a DNS_LABEL.
+  // This name must be unique among all containers, init containers and ephemeral containers.
+  optional string name = 1;
+
+  // Container image name.
+  // More info: https://kubernetes.io/docs/concepts/containers/images
+  optional string image = 2;
+
+  // Entrypoint array. Not executed within a shell.
+  // The image's ENTRYPOINT is used if this is not provided.
+  // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+  // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+  // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+  // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+  // of whether the variable exists or not. Cannot be updated.
+  // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+  // +optional
+  // +listType=atomic
+  repeated string command = 3;
+
+  // Arguments to the entrypoint.
+  // The image's CMD is used if this is not provided.
+  // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+  // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+  // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+  // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+  // of whether the variable exists or not. Cannot be updated.
+  // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+  // +optional
+  // +listType=atomic
+  repeated string args = 4;
+
+  // Container's working directory.
+  // If not specified, the container runtime's default will be used, which
+  // might be configured in the container image.
+  // Cannot be updated.
+  // +optional
+  optional string workingDir = 5;
+
+  // Ports are not allowed for ephemeral containers.
+  // +optional
+  // +patchMergeKey=containerPort
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=containerPort
+  // +listMapKey=protocol
+  repeated ContainerPort ports = 6;
+
+  // List of sources to populate environment variables in the container.
+  // The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+  // will be reported as an event when the container is starting. When a key exists in multiple
+  // sources, the value associated with the last source will take precedence.
+  // Values defined by an Env with a duplicate key will take precedence.
+  // Cannot be updated.
+  // +optional
+  // +listType=atomic
+  repeated EnvFromSource envFrom = 19;
+
+  // List of environment variables to set in the container.
+  // Cannot be updated.
+  // +optional
+  // +patchMergeKey=name
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=name
+  repeated EnvVar env = 7;
+
+  // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources
+  // already allocated to the pod.
+  // +optional
+  optional ResourceRequirements resources = 8;
+
+  // Resources resize policy for the container.
+  // +featureGate=InPlacePodVerticalScaling
+  // +optional
+  // +listType=atomic
+  repeated ContainerResizePolicy resizePolicy = 23;
+
+  // Restart policy for the container to manage the restart behavior of each
+  // container within a pod.
+  // This may only be set for init containers. You cannot set this field on
+  // ephemeral containers.
+  // +featureGate=SidecarContainers
+  // +optional
+  optional string restartPolicy = 24;
+
+  // Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
+  // Cannot be updated.
+  // +optional
+  // +patchMergeKey=mountPath
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=mountPath
+  repeated VolumeMount volumeMounts = 9;
+
+  // volumeDevices is the list of block devices to be used by the container.
+  // +patchMergeKey=devicePath
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=devicePath
+  // +optional
+  repeated VolumeDevice volumeDevices = 21;
+
+  // Probes are not allowed for ephemeral containers.
+  // +optional
+  optional Probe livenessProbe = 10;
+
+  // Probes are not allowed for ephemeral containers.
+  // +optional
+  optional Probe readinessProbe = 11;
+
+  // Probes are not allowed for ephemeral containers.
+  // +optional
+  optional Probe startupProbe = 22;
+
+  // Lifecycle is not allowed for ephemeral containers.
+  // +optional
+  optional Lifecycle lifecycle = 12;
+
+  // Optional: Path at which the file to which the container's termination message
+  // will be written is mounted into the container's filesystem.
+  // Message written is intended to be brief final status, such as an assertion failure message.
+  // Will be truncated by the node if greater than 4096 bytes. The total message length across
+  // all containers will be limited to 12kb.
+  // Defaults to /dev/termination-log.
+  // Cannot be updated.
+  // +optional
+  optional string terminationMessagePath = 13;
+
+  // Indicate how the termination message should be populated. File will use the contents of
+  // terminationMessagePath to populate the container status message on both success and failure.
+  // FallbackToLogsOnError will use the last chunk of container log output if the termination
+  // message file is empty and the container exited with an error.
+  // The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+  // Defaults to File.
+  // Cannot be updated.
+  // +optional
+  optional string terminationMessagePolicy = 20;
+
+  // Image pull policy.
+  // One of Always, Never, IfNotPresent.
+  // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+  // Cannot be updated.
+  // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+  // +optional
+  optional string imagePullPolicy = 14;
+
+  // Optional: SecurityContext defines the security options the ephemeral container should be run with.
+  // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+  // +optional
+  optional SecurityContext securityContext = 15;
+
+  // Whether this container should allocate a buffer for stdin in the container runtime. If this
+  // is not set, reads from stdin in the container will always result in EOF.
+  // Default is false.
+  // +optional
+  optional bool stdin = 16;
+
+  // Whether the container runtime should close the stdin channel after it has been opened by
+  // a single attach. When stdin is true the stdin stream will remain open across multiple attach
+  // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+  // first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+  // at which time stdin is closed and remains closed until the container is restarted. If this
+  // flag is false, a container processes that reads from stdin will never receive an EOF.
+  // Default is false
+  // +optional
+  optional bool stdinOnce = 17;
+
+  // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+  // Default is false.
+  // +optional
+  optional bool tty = 18;
+}
+
+// Represents an ephemeral volume that is handled by a normal storage driver.
+message EphemeralVolumeSource {
+  // Will be used to create a stand-alone PVC to provision the volume.
+  // The pod in which this EphemeralVolumeSource is embedded will be the
+  // owner of the PVC, i.e. the PVC will be deleted together with the
+  // pod.  The name of the PVC will be `-` where
+  // `` is the name from the `PodSpec.Volumes` array
+  // entry. Pod validation will reject the pod if the concatenated name
+  // is not valid for a PVC (for example, too long).
+  //
+  // An existing PVC with that name that is not owned by the pod
+  // will *not* be used for the pod to avoid using an unrelated
+  // volume by mistake. Starting the pod is then blocked until
+  // the unrelated PVC is removed. If such a pre-created PVC is
+  // meant to be used by the pod, the PVC has to updated with an
+  // owner reference to the pod once the pod exists. Normally
+  // this should not be necessary, but it may be useful when
+  // manually reconstructing a broken cluster.
+  //
+  // This field is read-only and no changes will be made by Kubernetes
+  // to the PVC after it has been created.
+  //
+  // Required, must not be nil.
+  optional PersistentVolumeClaimTemplate volumeClaimTemplate = 1;
+}
+
+// Event is a report of an event somewhere in the cluster.  Events
+// have a limited retention time and triggers and messages may evolve
+// with time.  Event consumers should not rely on the timing of an event
+// with a given Reason reflecting a consistent underlying trigger, or the
+// continued existence of events with that Reason.  Events should be
+// treated as informative, best-effort, supplemental data.
+message Event {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // The object that this event is about.
+  optional ObjectReference involvedObject = 2;
+
+  // This should be a short, machine understandable string that gives the reason
+  // for the transition into the object's current status.
+  // TODO: provide exact specification for format.
+  // +optional
+  optional string reason = 3;
+
+  // A human-readable description of the status of this operation.
+  // TODO: decide on maximum length.
+  // +optional
+  optional string message = 4;
+
+  // The component reporting this event. Should be a short machine understandable string.
+  // +optional
+  optional EventSource source = 5;
+
+  // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time firstTimestamp = 6;
+
+  // The time at which the most recent occurrence of this event was recorded.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTimestamp = 7;
+
+  // The number of times this event has occurred.
+  // +optional
+  optional int32 count = 8;
+
+  // Type of this event (Normal, Warning), new types could be added in the future
+  // +optional
+  optional string type = 9;
+
+  // Time when this Event was first observed.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 10;
+
+  // Data about the Event series this event represents or nil if it's a singleton Event.
+  // +optional
+  optional EventSeries series = 11;
+
+  // What action was taken/failed regarding to the Regarding object.
+  // +optional
+  optional string action = 12;
+
+  // Optional secondary object for more complex actions.
+  // +optional
+  optional ObjectReference related = 13;
+
+  // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.
+  // +optional
+  optional string reportingComponent = 14;
+
+  // ID of the controller instance, e.g. `kubelet-xyzf`.
+  // +optional
+  optional string reportingInstance = 15;
+}
+
+// EventList is a list of events.
+message EventList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of events
+  repeated Event items = 2;
+}
+
+// EventSeries contain information on series of events, i.e. thing that was/is happening
+// continuously for some time.
+message EventSeries {
+  // Number of occurrences in this series up to the last heartbeat time
+  optional int32 count = 1;
+
+  // Time of the last occurrence observed
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2;
+}
+
+// EventSource contains information for an event.
+message EventSource {
+  // Component from which the event is generated.
+  // +optional
+  optional string component = 1;
+
+  // Node name on which the event is generated.
+  // +optional
+  optional string host = 2;
+}
+
+// ExecAction describes a "run in container" action.
+message ExecAction {
+  // Command is the command line to execute inside the container, the working directory for the
+  // command  is root ('/') in the container's filesystem. The command is simply exec'd, it is
+  // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+  // a shell, you need to explicitly call out to that shell.
+  // Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+  // +optional
+  // +listType=atomic
+  repeated string command = 1;
+}
+
+// Represents a Fibre Channel volume.
+// Fibre Channel volumes can only be mounted as read/write once.
+// Fibre Channel volumes support ownership management and SELinux relabeling.
+message FCVolumeSource {
+  // targetWWNs is Optional: FC target worldwide names (WWNs)
+  // +optional
+  // +listType=atomic
+  repeated string targetWWNs = 1;
+
+  // lun is Optional: FC target lun number
+  // +optional
+  optional int32 lun = 2;
+
+  // fsType is the filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // TODO: how do we prevent errors in the filesystem from compromising the machine
+  // +optional
+  optional string fsType = 3;
+
+  // readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  optional bool readOnly = 4;
+
+  // wwids Optional: FC volume world wide identifiers (wwids)
+  // Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
+  // +optional
+  // +listType=atomic
+  repeated string wwids = 5;
+}
+
+// FlexPersistentVolumeSource represents a generic persistent volume resource that is
+// provisioned/attached using an exec based plugin.
+message FlexPersistentVolumeSource {
+  // driver is the name of the driver to use for this volume.
+  optional string driver = 1;
+
+  // fsType is the Filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+  // +optional
+  optional string fsType = 2;
+
+  // secretRef is Optional: SecretRef is reference to the secret object containing
+  // sensitive information to pass to the plugin scripts. This may be
+  // empty if no secret object is specified. If the secret object
+  // contains more than one secret, all secrets are passed to the plugin
+  // scripts.
+  // +optional
+  optional SecretReference secretRef = 3;
+
+  // readOnly is Optional: defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  optional bool readOnly = 4;
+
+  // options is Optional: this field holds extra command options if any.
+  // +optional
+  map options = 5;
+}
+
+// FlexVolume represents a generic volume resource that is
+// provisioned/attached using an exec based plugin.
+message FlexVolumeSource {
+  // driver is the name of the driver to use for this volume.
+  optional string driver = 1;
+
+  // fsType is the filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+  // +optional
+  optional string fsType = 2;
+
+  // secretRef is Optional: secretRef is reference to the secret object containing
+  // sensitive information to pass to the plugin scripts. This may be
+  // empty if no secret object is specified. If the secret object
+  // contains more than one secret, all secrets are passed to the plugin
+  // scripts.
+  // +optional
+  optional LocalObjectReference secretRef = 3;
+
+  // readOnly is Optional: defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  optional bool readOnly = 4;
+
+  // options is Optional: this field holds extra command options if any.
+  // +optional
+  map options = 5;
+}
+
+// Represents a Flocker volume mounted by the Flocker agent.
+// One and only one of datasetName and datasetUUID should be set.
+// Flocker volumes do not support ownership management or SELinux relabeling.
+message FlockerVolumeSource {
+  // datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker
+  // should be considered as deprecated
+  // +optional
+  optional string datasetName = 1;
+
+  // datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset
+  // +optional
+  optional string datasetUUID = 2;
+}
+
+// Represents a Persistent Disk resource in Google Compute Engine.
+//
+// A GCE PD must exist before mounting to a container. The disk must
+// also be in the same GCE project and zone as the kubelet. A GCE PD
+// can only be mounted as read/write once or read-only many times. GCE
+// PDs support ownership management and SELinux relabeling.
+message GCEPersistentDiskVolumeSource {
+  // pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+  optional string pdName = 1;
+
+  // fsType is filesystem type of the volume that you want to mount.
+  // Tip: Ensure that the filesystem type is supported by the host operating system.
+  // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+  // TODO: how do we prevent errors in the filesystem from compromising the machine
+  // +optional
+  optional string fsType = 2;
+
+  // partition is the partition in the volume that you want to mount.
+  // If omitted, the default is to mount by volume name.
+  // Examples: For volume /dev/sda1, you specify the partition as "1".
+  // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+  // +optional
+  optional int32 partition = 3;
+
+  // readOnly here will force the ReadOnly setting in VolumeMounts.
+  // Defaults to false.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+  // +optional
+  optional bool readOnly = 4;
+}
+
+// GRPCAction specifies an action involving a GRPC service.
+message GRPCAction {
+  // Port number of the gRPC service. Number must be in the range 1 to 65535.
+  optional int32 port = 1;
+
+  // Service is the name of the service to place in the gRPC HealthCheckRequest
+  // (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+  //
+  // If this is not specified, the default behavior is defined by gRPC.
+  // +optional
+  // +default=""
+  optional string service = 2;
+}
+
+// Represents a volume that is populated with the contents of a git repository.
+// Git repo volumes do not support ownership management.
+// Git repo volumes support SELinux relabeling.
+//
+// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+// into the Pod's container.
+message GitRepoVolumeSource {
+  // repository is the URL
+  optional string repository = 1;
+
+  // revision is the commit hash for the specified revision.
+  // +optional
+  optional string revision = 2;
+
+  // directory is the target directory name.
+  // Must not contain or start with '..'.  If '.' is supplied, the volume directory will be the
+  // git repository.  Otherwise, if specified, the volume will contain the git repository in
+  // the subdirectory with the given name.
+  // +optional
+  optional string directory = 3;
+}
+
+// Represents a Glusterfs mount that lasts the lifetime of a pod.
+// Glusterfs volumes do not support ownership management or SELinux relabeling.
+message GlusterfsPersistentVolumeSource {
+  // endpoints is the endpoint name that details Glusterfs topology.
+  // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+  optional string endpoints = 1;
+
+  // path is the Glusterfs volume path.
+  // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+  optional string path = 2;
+
+  // readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+  // Defaults to false.
+  // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+  // +optional
+  optional bool readOnly = 3;
+
+  // endpointsNamespace is the namespace that contains Glusterfs endpoint.
+  // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC.
+  // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+  // +optional
+  optional string endpointsNamespace = 4;
+}
+
+// Represents a Glusterfs mount that lasts the lifetime of a pod.
+// Glusterfs volumes do not support ownership management or SELinux relabeling.
+message GlusterfsVolumeSource {
+  // endpoints is the endpoint name that details Glusterfs topology.
+  // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+  optional string endpoints = 1;
+
+  // path is the Glusterfs volume path.
+  // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+  optional string path = 2;
+
+  // readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+  // Defaults to false.
+  // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+  // +optional
+  optional bool readOnly = 3;
+}
+
+// HTTPGetAction describes an action based on HTTP Get requests.
+message HTTPGetAction {
+  // Path to access on the HTTP server.
+  // +optional
+  optional string path = 1;
+
+  // Name or number of the port to access on the container.
+  // Number must be in the range 1 to 65535.
+  // Name must be an IANA_SVC_NAME.
+  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2;
+
+  // Host name to connect to, defaults to the pod IP. You probably want to set
+  // "Host" in httpHeaders instead.
+  // +optional
+  optional string host = 3;
+
+  // Scheme to use for connecting to the host.
+  // Defaults to HTTP.
+  // +optional
+  optional string scheme = 4;
+
+  // Custom headers to set in the request. HTTP allows repeated headers.
+  // +optional
+  // +listType=atomic
+  repeated HTTPHeader httpHeaders = 5;
+}
+
+// HTTPHeader describes a custom header to be used in HTTP probes
+message HTTPHeader {
+  // The header field name.
+  // This will be canonicalized upon output, so case-variant names will be understood as the same header.
+  optional string name = 1;
+
+  // The header field value
+  optional string value = 2;
+}
+
+// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
+// pod's hosts file.
+message HostAlias {
+  // IP address of the host file entry.
+  // +required
+  optional string ip = 1;
+
+  // Hostnames for the above IP address.
+  // +listType=atomic
+  repeated string hostnames = 2;
+}
+
+// HostIP represents a single IP address allocated to the host.
+message HostIP {
+  // IP is the IP address assigned to the host
+  // +required
+  optional string ip = 1;
+}
+
+// Represents a host path mapped into a pod.
+// Host path volumes do not support ownership management or SELinux relabeling.
+message HostPathVolumeSource {
+  // path of the directory on the host.
+  // If the path is a symlink, it will follow the link to the real path.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+  optional string path = 1;
+
+  // type for HostPath Volume
+  // Defaults to ""
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+  // +optional
+  optional string type = 2;
+}
+
+// ISCSIPersistentVolumeSource represents an ISCSI disk.
+// ISCSI volumes can only be mounted as read/write once.
+// ISCSI volumes support ownership management and SELinux relabeling.
+message ISCSIPersistentVolumeSource {
+  // targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+  // is other than default (typically TCP ports 860 and 3260).
+  optional string targetPortal = 1;
+
+  // iqn is Target iSCSI Qualified Name.
+  optional string iqn = 2;
+
+  // lun is iSCSI Target Lun number.
+  optional int32 lun = 3;
+
+  // iscsiInterface is the interface Name that uses an iSCSI transport.
+  // Defaults to 'default' (tcp).
+  // +optional
+  // +default="default"
+  optional string iscsiInterface = 4;
+
+  // fsType is the filesystem type of the volume that you want to mount.
+  // Tip: Ensure that the filesystem type is supported by the host operating system.
+  // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+  // TODO: how do we prevent errors in the filesystem from compromising the machine
+  // +optional
+  optional string fsType = 5;
+
+  // readOnly here will force the ReadOnly setting in VolumeMounts.
+  // Defaults to false.
+  // +optional
+  optional bool readOnly = 6;
+
+  // portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port
+  // is other than default (typically TCP ports 860 and 3260).
+  // +optional
+  // +listType=atomic
+  repeated string portals = 7;
+
+  // chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
+  // +optional
+  optional bool chapAuthDiscovery = 8;
+
+  // chapAuthSession defines whether support iSCSI Session CHAP authentication
+  // +optional
+  optional bool chapAuthSession = 11;
+
+  // secretRef is the CHAP Secret for iSCSI target and initiator authentication
+  // +optional
+  optional SecretReference secretRef = 10;
+
+  // initiatorName is the custom iSCSI Initiator Name.
+  // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+  // : will be created for the connection.
+  // +optional
+  optional string initiatorName = 12;
+}
+
+// Represents an ISCSI disk.
+// ISCSI volumes can only be mounted as read/write once.
+// ISCSI volumes support ownership management and SELinux relabeling.
+message ISCSIVolumeSource {
+  // targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+  // is other than default (typically TCP ports 860 and 3260).
+  optional string targetPortal = 1;
+
+  // iqn is the target iSCSI Qualified Name.
+  optional string iqn = 2;
+
+  // lun represents iSCSI Target Lun number.
+  optional int32 lun = 3;
+
+  // iscsiInterface is the interface Name that uses an iSCSI transport.
+  // Defaults to 'default' (tcp).
+  // +optional
+  // +default="default"
+  optional string iscsiInterface = 4;
+
+  // fsType is the filesystem type of the volume that you want to mount.
+  // Tip: Ensure that the filesystem type is supported by the host operating system.
+  // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+  // TODO: how do we prevent errors in the filesystem from compromising the machine
+  // +optional
+  optional string fsType = 5;
+
+  // readOnly here will force the ReadOnly setting in VolumeMounts.
+  // Defaults to false.
+  // +optional
+  optional bool readOnly = 6;
+
+  // portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
+  // is other than default (typically TCP ports 860 and 3260).
+  // +optional
+  // +listType=atomic
+  repeated string portals = 7;
+
+  // chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
+  // +optional
+  optional bool chapAuthDiscovery = 8;
+
+  // chapAuthSession defines whether support iSCSI Session CHAP authentication
+  // +optional
+  optional bool chapAuthSession = 11;
+
+  // secretRef is the CHAP Secret for iSCSI target and initiator authentication
+  // +optional
+  optional LocalObjectReference secretRef = 10;
+
+  // initiatorName is the custom iSCSI Initiator Name.
+  // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+  // : will be created for the connection.
+  // +optional
+  optional string initiatorName = 12;
+}
+
+// ImageVolumeSource represents a image volume resource.
+message ImageVolumeSource {
+  // Required: Image or artifact reference to be used.
+  // Behaves in the same way as pod.spec.containers[*].image.
+  // Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets.
+  // More info: https://kubernetes.io/docs/concepts/containers/images
+  // This field is optional to allow higher level config management to default or override
+  // container images in workload controllers like Deployments and StatefulSets.
+  // +optional
+  optional string reference = 1;
+
+  // Policy for pulling OCI objects. Possible values are:
+  // Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
+  // Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
+  // IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
+  // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+  // +optional
+  optional string pullPolicy = 2;
+}
+
+// Maps a string key to a path within a volume.
+message KeyToPath {
+  // key is the key to project.
+  optional string key = 1;
+
+  // path is the relative path of the file to map the key to.
+  // May not be an absolute path.
+  // May not contain the path element '..'.
+  // May not start with the string '..'.
+  optional string path = 2;
+
+  // mode is Optional: mode bits used to set permissions on this file.
+  // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+  // YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+  // If not specified, the volume defaultMode will be used.
+  // This might be in conflict with other options that affect the file
+  // mode, like fsGroup, and the result can be other mode bits set.
+  // +optional
+  optional int32 mode = 3;
+}
+
+// Lifecycle describes actions that the management system should take in response to container lifecycle
+// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
+// until the action is complete, unless the container process fails, in which case the handler is aborted.
+message Lifecycle {
+  // PostStart is called immediately after a container is created. If the handler fails,
+  // the container is terminated and restarted according to its restart policy.
+  // Other management of the container blocks until the hook completes.
+  // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+  // +optional
+  optional LifecycleHandler postStart = 1;
+
+  // PreStop is called immediately before a container is terminated due to an
+  // API request or management event such as liveness/startup probe failure,
+  // preemption, resource contention, etc. The handler is not called if the
+  // container crashes or exits. The Pod's termination grace period countdown begins before the
+  // PreStop hook is executed. Regardless of the outcome of the handler, the
+  // container will eventually terminate within the Pod's termination grace
+  // period (unless delayed by finalizers). Other management of the container blocks until the hook completes
+  // or until the termination grace period is reached.
+  // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+  // +optional
+  optional LifecycleHandler preStop = 2;
+}
+
+// LifecycleHandler defines a specific action that should be taken in a lifecycle
+// hook. One and only one of the fields, except TCPSocket must be specified.
+message LifecycleHandler {
+  // Exec specifies a command to execute in the container.
+  // +optional
+  optional ExecAction exec = 1;
+
+  // HTTPGet specifies an HTTP GET request to perform.
+  // +optional
+  optional HTTPGetAction httpGet = 2;
+
+  // Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+  // for backward compatibility. There is no validation of this field and
+  // lifecycle hooks will fail at runtime when it is specified.
+  // +optional
+  optional TCPSocketAction tcpSocket = 3;
+
+  // Sleep represents a duration that the container should sleep.
+  // +featureGate=PodLifecycleSleepAction
+  // +optional
+  optional SleepAction sleep = 4;
+}
+
+// LimitRange sets resource usage limits for each kind of resource in a Namespace.
+message LimitRange {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec defines the limits enforced.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+  // +optional
+  optional LimitRangeSpec spec = 2;
+}
+
+// LimitRangeItem defines a min/max usage limit for any resource that matches on kind.
+message LimitRangeItem {
+  // Type of resource that this limit applies to.
+  optional string type = 1;
+
+  // Max usage constraints on this kind by resource name.
+  // +optional
+  map max = 2;
+
+  // Min usage constraints on this kind by resource name.
+  // +optional
+  map min = 3;
+
+  // Default resource requirement limit value by resource name if resource limit is omitted.
+  // +optional
+  map default = 4;
+
+  // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.
+  // +optional
+  map defaultRequest = 5;
+
+  // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.
+  // +optional
+  map maxLimitRequestRatio = 6;
+}
+
+// LimitRangeList is a list of LimitRange items.
+message LimitRangeList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of LimitRange objects.
+  // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+  repeated LimitRange items = 2;
+}
+
+// LimitRangeSpec defines a min/max usage limit for resources that match on kind.
+message LimitRangeSpec {
+  // Limits is the list of LimitRangeItem objects that are enforced.
+  // +listType=atomic
+  repeated LimitRangeItem limits = 1;
+}
+
+// LinuxContainerUser represents user identity information in Linux containers
+message LinuxContainerUser {
+  // UID is the primary uid initially attached to the first process in the container
+  optional int64 uid = 1;
+
+  // GID is the primary gid initially attached to the first process in the container
+  optional int64 gid = 2;
+
+  // SupplementalGroups are the supplemental groups initially attached to the first process in the container
+  // +optional
+  // +listType=atomic
+  repeated int64 supplementalGroups = 3;
+}
+
+// List holds a list of objects, which may not be known by the server.
+message List {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of objects
+  repeated .k8s.io.apimachinery.pkg.runtime.RawExtension items = 2;
+}
+
+// LoadBalancerIngress represents the status of a load-balancer ingress point:
+// traffic intended for the service should be sent to an ingress point.
+message LoadBalancerIngress {
+  // IP is set for load-balancer ingress points that are IP based
+  // (typically GCE or OpenStack load-balancers)
+  // +optional
+  optional string ip = 1;
+
+  // Hostname is set for load-balancer ingress points that are DNS based
+  // (typically AWS load-balancers)
+  // +optional
+  optional string hostname = 2;
+
+  // IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified.
+  // Setting this to "VIP" indicates that traffic is delivered to the node with
+  // the destination set to the load-balancer's IP and port.
+  // Setting this to "Proxy" indicates that traffic is delivered to the node or pod with
+  // the destination set to the node's IP and node port or the pod's IP and port.
+  // Service implementations may use this information to adjust traffic routing.
+  // +optional
+  optional string ipMode = 3;
+
+  // Ports is a list of records of service ports
+  // If used, every port defined in the service should have an entry in it
+  // +listType=atomic
+  // +optional
+  repeated PortStatus ports = 4;
+}
+
+// LoadBalancerStatus represents the status of a load-balancer.
+message LoadBalancerStatus {
+  // Ingress is a list containing ingress points for the load-balancer.
+  // Traffic intended for the service should be sent to these ingress points.
+  // +optional
+  // +listType=atomic
+  repeated LoadBalancerIngress ingress = 1;
+}
+
+// LocalObjectReference contains enough information to let you locate the
+// referenced object inside the same namespace.
+// ---
+// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
+//  1. Invalid usage help.  It is impossible to add specific help for individual usage.  In most embedded usages, there are particular
+//     restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
+//     Those cannot be well described when embedded.
+//  2. Inconsistent validation.  Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
+//  3. We cannot easily change it.  Because this type is embedded in many locations, updates to this type
+//     will affect numerous schemas.  Don't make new APIs embed an underspecified API type they do not control.
+//
+// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
+// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
+// +structType=atomic
+message LocalObjectReference {
+  // Name of the referent.
+  // This field is effectively required, but due to backwards compatibility is
+  // allowed to be empty. Instances of this type with an empty value here are
+  // almost certainly wrong.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+  // +optional
+  // +default=""
+  // +kubebuilder:default=""
+  // TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+  optional string name = 1;
+}
+
+// Local represents directly-attached storage with node affinity
+message LocalVolumeSource {
+  // path of the full path to the volume on the node.
+  // It can be either a directory or block device (disk, partition, ...).
+  optional string path = 1;
+
+  // fsType is the filesystem type to mount.
+  // It applies only when the Path is a block device.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a filesystem if unspecified.
+  // +optional
+  optional string fsType = 2;
+}
+
+// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation
+message ModifyVolumeStatus {
+  // targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled
+  optional string targetVolumeAttributesClassName = 1;
+
+  // status is the status of the ControllerModifyVolume operation. It can be in any of following states:
+  //  - Pending
+  //    Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as
+  //    the specified VolumeAttributesClass not existing.
+  //  - InProgress
+  //    InProgress indicates that the volume is being modified.
+  //  - Infeasible
+  //   Infeasible indicates that the request has been rejected as invalid by the CSI driver. To
+  // 	  resolve the error, a valid VolumeAttributesClass needs to be specified.
+  // Note: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately.
+  optional string status = 2;
+}
+
+// Represents an NFS mount that lasts the lifetime of a pod.
+// NFS volumes do not support ownership management or SELinux relabeling.
+message NFSVolumeSource {
+  // server is the hostname or IP address of the NFS server.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+  optional string server = 1;
+
+  // path that is exported by the NFS server.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+  optional string path = 2;
+
+  // readOnly here will force the NFS export to be mounted with read-only permissions.
+  // Defaults to false.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+  // +optional
+  optional bool readOnly = 3;
+}
+
+// Namespace provides a scope for Names.
+// Use of multiple namespaces is optional.
+message Namespace {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec defines the behavior of the Namespace.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+  // +optional
+  optional NamespaceSpec spec = 2;
+
+  // Status describes the current status of a Namespace.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+  // +optional
+  optional NamespaceStatus status = 3;
+}
+
+// NamespaceCondition contains details about state of namespace.
+message NamespaceCondition {
+  // Type of namespace controller condition.
+  optional string type = 1;
+
+  // Status of the condition, one of True, False, Unknown.
+  optional string status = 2;
+
+  // Last time the condition transitioned from one status to another.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
+
+  // Unique, one-word, CamelCase reason for the condition's last transition.
+  // +optional
+  optional string reason = 5;
+
+  // Human-readable message indicating details about last transition.
+  // +optional
+  optional string message = 6;
+}
+
+// NamespaceList is a list of Namespaces.
+message NamespaceList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is the list of Namespace objects in the list.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+  repeated Namespace items = 2;
+}
+
+// NamespaceSpec describes the attributes on a Namespace.
+message NamespaceSpec {
+  // Finalizers is an opaque list of values that must be empty to permanently remove object from storage.
+  // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
+  // +optional
+  // +listType=atomic
+  repeated string finalizers = 1;
+}
+
+// NamespaceStatus is information about the current status of a Namespace.
+message NamespaceStatus {
+  // Phase is the current lifecycle phase of the namespace.
+  // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
+  // +optional
+  optional string phase = 1;
+
+  // Represents the latest available observations of a namespace's current state.
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=type
+  repeated NamespaceCondition conditions = 2;
+}
+
+// Node is a worker node in Kubernetes.
+// Each node will have a unique identifier in the cache (i.e. in etcd).
+message Node {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec defines the behavior of a node.
+  // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+  // +optional
+  optional NodeSpec spec = 2;
+
+  // Most recently observed status of the node.
+  // Populated by the system.
+  // Read-only.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+  // +optional
+  optional NodeStatus status = 3;
+}
+
+// NodeAddress contains information for the node's address.
+message NodeAddress {
+  // Node address type, one of Hostname, ExternalIP or InternalIP.
+  optional string type = 1;
+
+  // The node address.
+  optional string address = 2;
+}
+
+// Node affinity is a group of node affinity scheduling rules.
+message NodeAffinity {
+  // If the affinity requirements specified by this field are not met at
+  // scheduling time, the pod will not be scheduled onto the node.
+  // If the affinity requirements specified by this field cease to be met
+  // at some point during pod execution (e.g. due to an update), the system
+  // may or may not try to eventually evict the pod from its node.
+  // +optional
+  optional NodeSelector requiredDuringSchedulingIgnoredDuringExecution = 1;
+
+  // The scheduler will prefer to schedule pods to nodes that satisfy
+  // the affinity expressions specified by this field, but it may choose
+  // a node that violates one or more of the expressions. The node that is
+  // most preferred is the one with the greatest sum of weights, i.e.
+  // for each node that meets all of the scheduling requirements (resource
+  // request, requiredDuringScheduling affinity expressions, etc.),
+  // compute a sum by iterating through the elements of this field and adding
+  // "weight" to the sum if the node matches the corresponding matchExpressions; the
+  // node(s) with the highest sum are the most preferred.
+  // +optional
+  // +listType=atomic
+  repeated PreferredSchedulingTerm preferredDuringSchedulingIgnoredDuringExecution = 2;
+}
+
+// NodeCondition contains condition information for a node.
+message NodeCondition {
+  // Type of node condition.
+  optional string type = 1;
+
+  // Status of the condition, one of True, False, Unknown.
+  optional string status = 2;
+
+  // Last time we got an update on a given condition.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastHeartbeatTime = 3;
+
+  // Last time the condition transit from one status to another.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
+
+  // (brief) reason for the condition's last transition.
+  // +optional
+  optional string reason = 5;
+
+  // Human readable message indicating details about last transition.
+  // +optional
+  optional string message = 6;
+}
+
+// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.
+// This API is deprecated since 1.22
+message NodeConfigSource {
+  // ConfigMap is a reference to a Node's ConfigMap
+  optional ConfigMapNodeConfigSource configMap = 2;
+}
+
+// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.
+message NodeConfigStatus {
+  // Assigned reports the checkpointed config the node will try to use.
+  // When Node.Spec.ConfigSource is updated, the node checkpoints the associated
+  // config payload to local disk, along with a record indicating intended
+  // config. The node refers to this record to choose its config checkpoint, and
+  // reports this record in Assigned. Assigned only updates in the status after
+  // the record has been checkpointed to disk. When the Kubelet is restarted,
+  // it tries to make the Assigned config the Active config by loading and
+  // validating the checkpointed payload identified by Assigned.
+  // +optional
+  optional NodeConfigSource assigned = 1;
+
+  // Active reports the checkpointed config the node is actively using.
+  // Active will represent either the current version of the Assigned config,
+  // or the current LastKnownGood config, depending on whether attempting to use the
+  // Assigned config results in an error.
+  // +optional
+  optional NodeConfigSource active = 2;
+
+  // LastKnownGood reports the checkpointed config the node will fall back to
+  // when it encounters an error attempting to use the Assigned config.
+  // The Assigned config becomes the LastKnownGood config when the node determines
+  // that the Assigned config is stable and correct.
+  // This is currently implemented as a 10-minute soak period starting when the local
+  // record of Assigned config is updated. If the Assigned config is Active at the end
+  // of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is
+  // reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil,
+  // because the local default config is always assumed good.
+  // You should not make assumptions about the node's method of determining config stability
+  // and correctness, as this may change or become configurable in the future.
+  // +optional
+  optional NodeConfigSource lastKnownGood = 3;
+
+  // Error describes any problems reconciling the Spec.ConfigSource to the Active config.
+  // Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned
+  // record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting
+  // to load or validate the Assigned config, etc.
+  // Errors may occur at different points while syncing config. Earlier errors (e.g. download or
+  // checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across
+  // Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in
+  // a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error
+  // by fixing the config assigned in Spec.ConfigSource.
+  // You can find additional information for debugging by searching the error message in the Kubelet log.
+  // Error is a human-readable description of the error state; machines can check whether or not Error
+  // is empty, but should not rely on the stability of the Error text across Kubelet versions.
+  // +optional
+  optional string error = 4;
+}
+
+// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
+message NodeDaemonEndpoints {
+  // Endpoint on which Kubelet is listening.
+  // +optional
+  optional DaemonEndpoint kubeletEndpoint = 1;
+}
+
+// NodeFeatures describes the set of features implemented by the CRI implementation.
+// The features contained in the NodeFeatures should depend only on the cri implementation
+// independent of runtime handlers.
+message NodeFeatures {
+  // SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.
+  // +optional
+  optional bool supplementalGroupsPolicy = 1;
+}
+
+// NodeList is the whole list of all Nodes which have been registered with master.
+message NodeList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of nodes
+  repeated Node items = 2;
+}
+
+// NodeProxyOptions is the query options to a Node's proxy call.
+message NodeProxyOptions {
+  // Path is the URL path to use for the current proxy request to node.
+  // +optional
+  optional string path = 1;
+}
+
+// NodeRuntimeHandler is a set of runtime handler information.
+message NodeRuntimeHandler {
+  // Runtime handler name.
+  // Empty for the default runtime handler.
+  // +optional
+  optional string name = 1;
+
+  // Supported features.
+  // +optional
+  optional NodeRuntimeHandlerFeatures features = 2;
+}
+
+// NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.
+message NodeRuntimeHandlerFeatures {
+  // RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.
+  // +featureGate=RecursiveReadOnlyMounts
+  // +optional
+  optional bool recursiveReadOnlyMounts = 1;
+
+  // UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes.
+  // +featureGate=UserNamespacesSupport
+  // +optional
+  optional bool userNamespaces = 2;
+}
+
+// A node selector represents the union of the results of one or more label queries
+// over a set of nodes; that is, it represents the OR of the selectors represented
+// by the node selector terms.
+// +structType=atomic
+message NodeSelector {
+  // Required. A list of node selector terms. The terms are ORed.
+  // +listType=atomic
+  repeated NodeSelectorTerm nodeSelectorTerms = 1;
+}
+
+// A node selector requirement is a selector that contains values, a key, and an operator
+// that relates the key and values.
+message NodeSelectorRequirement {
+  // The label key that the selector applies to.
+  optional string key = 1;
+
+  // Represents a key's relationship to a set of values.
+  // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+  optional string operator = 2;
+
+  // An array of string values. If the operator is In or NotIn,
+  // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+  // the values array must be empty. If the operator is Gt or Lt, the values
+  // array must have a single element, which will be interpreted as an integer.
+  // This array is replaced during a strategic merge patch.
+  // +optional
+  // +listType=atomic
+  repeated string values = 3;
+}
+
+// A null or empty node selector term matches no objects. The requirements of
+// them are ANDed.
+// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+// +structType=atomic
+message NodeSelectorTerm {
+  // A list of node selector requirements by node's labels.
+  // +optional
+  // +listType=atomic
+  repeated NodeSelectorRequirement matchExpressions = 1;
+
+  // A list of node selector requirements by node's fields.
+  // +optional
+  // +listType=atomic
+  repeated NodeSelectorRequirement matchFields = 2;
+}
+
+// NodeSpec describes the attributes that a node is created with.
+message NodeSpec {
+  // PodCIDR represents the pod IP range assigned to the node.
+  // +optional
+  optional string podCIDR = 1;
+
+  // podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this
+  // field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for
+  // each of IPv4 and IPv6.
+  // +optional
+  // +patchStrategy=merge
+  // +listType=set
+  repeated string podCIDRs = 7;
+
+  // ID of the node assigned by the cloud provider in the format: ://
+  // +optional
+  optional string providerID = 3;
+
+  // Unschedulable controls node schedulability of new pods. By default, node is schedulable.
+  // More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration
+  // +optional
+  optional bool unschedulable = 4;
+
+  // If specified, the node's taints.
+  // +optional
+  // +listType=atomic
+  repeated Taint taints = 5;
+
+  // Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed.
+  // +optional
+  optional NodeConfigSource configSource = 6;
+
+  // Deprecated. Not all kubelets will set this field. Remove field after 1.13.
+  // see: https://issues.k8s.io/61966
+  // +optional
+  optional string externalID = 2;
+}
+
+// NodeStatus is information about the current status of a node.
+message NodeStatus {
+  // Capacity represents the total resources of a node.
+  // More info: https://kubernetes.io/docs/reference/node/node-status/#capacity
+  // +optional
+  map capacity = 1;
+
+  // Allocatable represents the resources of a node that are available for scheduling.
+  // Defaults to Capacity.
+  // +optional
+  map allocatable = 2;
+
+  // NodePhase is the recently observed lifecycle phase of the node.
+  // More info: https://kubernetes.io/docs/concepts/nodes/node/#phase
+  // The field is never populated, and now is deprecated.
+  // +optional
+  optional string phase = 3;
+
+  // Conditions is an array of current observed node conditions.
+  // More info: https://kubernetes.io/docs/reference/node/node-status/#condition
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=type
+  repeated NodeCondition conditions = 4;
+
+  // List of addresses reachable to the node.
+  // Queried from cloud provider, if available.
+  // More info: https://kubernetes.io/docs/reference/node/node-status/#addresses
+  // Note: This field is declared as mergeable, but the merge key is not sufficiently
+  // unique, which can cause data corruption when it is merged. Callers should instead
+  // use a full-replacement patch. See https://pr.k8s.io/79391 for an example.
+  // Consumers should assume that addresses can change during the
+  // lifetime of a Node. However, there are some exceptions where this may not
+  // be possible, such as Pods that inherit a Node's address in its own status or
+  // consumers of the downward API (status.hostIP).
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=type
+  repeated NodeAddress addresses = 5;
+
+  // Endpoints of daemons running on the Node.
+  // +optional
+  optional NodeDaemonEndpoints daemonEndpoints = 6;
+
+  // Set of ids/uuids to uniquely identify the node.
+  // More info: https://kubernetes.io/docs/reference/node/node-status/#info
+  // +optional
+  optional NodeSystemInfo nodeInfo = 7;
+
+  // List of container images on this node
+  // +optional
+  // +listType=atomic
+  repeated ContainerImage images = 8;
+
+  // List of attachable volumes in use (mounted) by the node.
+  // +optional
+  // +listType=atomic
+  repeated string volumesInUse = 9;
+
+  // List of volumes that are attached to the node.
+  // +optional
+  // +listType=atomic
+  repeated AttachedVolume volumesAttached = 10;
+
+  // Status of the config assigned to the node via the dynamic Kubelet config feature.
+  // +optional
+  optional NodeConfigStatus config = 11;
+
+  // The available runtime handlers.
+  // +featureGate=RecursiveReadOnlyMounts
+  // +featureGate=UserNamespacesSupport
+  // +optional
+  // +listType=atomic
+  repeated NodeRuntimeHandler runtimeHandlers = 12;
+
+  // Features describes the set of features implemented by the CRI implementation.
+  // +featureGate=SupplementalGroupsPolicy
+  // +optional
+  optional NodeFeatures features = 13;
+}
+
+// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
+message NodeSystemInfo {
+  // MachineID reported by the node. For unique machine identification
+  // in the cluster this field is preferred. Learn more from man(5)
+  // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
+  optional string machineID = 1;
+
+  // SystemUUID reported by the node. For unique machine identification
+  // MachineID is preferred. This field is specific to Red Hat hosts
+  // https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid
+  optional string systemUUID = 2;
+
+  // Boot ID reported by the node.
+  optional string bootID = 3;
+
+  // Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
+  optional string kernelVersion = 4;
+
+  // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
+  optional string osImage = 5;
+
+  // ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2).
+  optional string containerRuntimeVersion = 6;
+
+  // Kubelet Version reported by the node.
+  optional string kubeletVersion = 7;
+
+  // Deprecated: KubeProxy Version reported by the node.
+  optional string kubeProxyVersion = 8;
+
+  // The Operating System reported by the node
+  optional string operatingSystem = 9;
+
+  // The Architecture reported by the node
+  optional string architecture = 10;
+}
+
+// ObjectFieldSelector selects an APIVersioned field of an object.
+// +structType=atomic
+message ObjectFieldSelector {
+  // Version of the schema the FieldPath is written in terms of, defaults to "v1".
+  // +optional
+  optional string apiVersion = 1;
+
+  // Path of the field to select in the specified API version.
+  optional string fieldPath = 2;
+}
+
+// ObjectReference contains enough information to let you inspect or modify the referred object.
+// ---
+// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
+//  1. Ignored fields.  It includes many fields which are not generally honored.  For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.
+//  2. Invalid usage help.  It is impossible to add specific help for individual usage.  In most embedded usages, there are particular
+//     restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
+//     Those cannot be well described when embedded.
+//  3. Inconsistent validation.  Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
+//  4. The fields are both imprecise and overly precise.  Kind is not a precise mapping to a URL. This can produce ambiguity
+//     during interpretation and require a REST mapping.  In most cases, the dependency is on the group,resource tuple
+//     and the version of the actual struct is irrelevant.
+//  5. We cannot easily change it.  Because this type is embedded in many locations, updates to this type
+//     will affect numerous schemas.  Don't make new APIs embed an underspecified API type they do not control.
+//
+// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
+// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +structType=atomic
+message ObjectReference {
+  // Kind of the referent.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional string kind = 1;
+
+  // Namespace of the referent.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+  // +optional
+  optional string namespace = 2;
+
+  // Name of the referent.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+  // +optional
+  optional string name = 3;
+
+  // UID of the referent.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+  // +optional
+  optional string uid = 4;
+
+  // API version of the referent.
+  // +optional
+  optional string apiVersion = 5;
+
+  // Specific resourceVersion to which this reference is made, if any.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+  // +optional
+  optional string resourceVersion = 6;
+
+  // If referring to a piece of an object instead of an entire object, this string
+  // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+  // For example, if the object reference is to a container within a pod, this would take on a value like:
+  // "spec.containers{name}" (where "name" refers to the name of the container that triggered
+  // the event) or if no container name is specified "spec.containers[2]" (container with
+  // index 2 in this pod). This syntax is chosen only to have some well-defined way of
+  // referencing a part of an object.
+  // TODO: this design is not final and this field is subject to change in the future.
+  // +optional
+  optional string fieldPath = 7;
+}
+
+// PersistentVolume (PV) is a storage resource provisioned by an administrator.
+// It is analogous to a node.
+// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
+message PersistentVolume {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // spec defines a specification of a persistent volume owned by the cluster.
+  // Provisioned by an administrator.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
+  // +optional
+  optional PersistentVolumeSpec spec = 2;
+
+  // status represents the current information/status for the persistent volume.
+  // Populated by the system.
+  // Read-only.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
+  // +optional
+  optional PersistentVolumeStatus status = 3;
+}
+
+// PersistentVolumeClaim is a user's request for and claim to a persistent volume
+message PersistentVolumeClaim {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // spec defines the desired characteristics of a volume requested by a pod author.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+  // +optional
+  optional PersistentVolumeClaimSpec spec = 2;
+
+  // status represents the current information/status of a persistent volume claim.
+  // Read-only.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+  // +optional
+  optional PersistentVolumeClaimStatus status = 3;
+}
+
+// PersistentVolumeClaimCondition contains details about state of pvc
+message PersistentVolumeClaimCondition {
+  // Type is the type of the condition.
+  // More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=set%20to%20%27ResizeStarted%27.-,PersistentVolumeClaimCondition,-contains%20details%20about
+  optional string type = 1;
+
+  // Status is the status of the condition.
+  // Can be True, False, Unknown.
+  // More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=state%20of%20pvc-,conditions.status,-(string)%2C%20required
+  optional string status = 2;
+
+  // lastProbeTime is the time we probed the condition.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3;
+
+  // lastTransitionTime is the time the condition transitioned from one status to another.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
+
+  // reason is a unique, this should be a short, machine understandable string that gives the reason
+  // for condition's last transition. If it reports "Resizing" that means the underlying
+  // persistent volume is being resized.
+  // +optional
+  optional string reason = 5;
+
+  // message is the human-readable message indicating details about last transition.
+  // +optional
+  optional string message = 6;
+}
+
+// PersistentVolumeClaimList is a list of PersistentVolumeClaim items.
+message PersistentVolumeClaimList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // items is a list of persistent volume claims.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+  repeated PersistentVolumeClaim items = 2;
+}
+
+// PersistentVolumeClaimSpec describes the common attributes of storage devices
+// and allows a Source for provider-specific attributes
+message PersistentVolumeClaimSpec {
+  // accessModes contains the desired access modes the volume should have.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+  // +optional
+  // +listType=atomic
+  repeated string accessModes = 1;
+
+  // selector is a label query over volumes to consider for binding.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
+
+  // resources represents the minimum resources the volume should have.
+  // If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+  // that are lower than previous value but must still be higher than capacity recorded in the
+  // status field of the claim.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+  // +optional
+  optional VolumeResourceRequirements resources = 2;
+
+  // volumeName is the binding reference to the PersistentVolume backing this claim.
+  // +optional
+  optional string volumeName = 3;
+
+  // storageClassName is the name of the StorageClass required by the claim.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+  // +optional
+  optional string storageClassName = 5;
+
+  // volumeMode defines what type of volume is required by the claim.
+  // Value of Filesystem is implied when not included in claim spec.
+  // +optional
+  optional string volumeMode = 6;
+
+  // dataSource field can be used to specify either:
+  // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+  // * An existing PVC (PersistentVolumeClaim)
+  // If the provisioner or an external controller can support the specified data source,
+  // it will create a new volume based on the contents of the specified data source.
+  // When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+  // and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+  // If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+  // +optional
+  optional TypedLocalObjectReference dataSource = 7;
+
+  // dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+  // volume is desired. This may be any object from a non-empty API group (non
+  // core object) or a PersistentVolumeClaim object.
+  // When this field is specified, volume binding will only succeed if the type of
+  // the specified object matches some installed volume populator or dynamic
+  // provisioner.
+  // This field will replace the functionality of the dataSource field and as such
+  // if both fields are non-empty, they must have the same value. For backwards
+  // compatibility, when namespace isn't specified in dataSourceRef,
+  // both fields (dataSource and dataSourceRef) will be set to the same
+  // value automatically if one of them is empty and the other is non-empty.
+  // When namespace is specified in dataSourceRef,
+  // dataSource isn't set to the same value and must be empty.
+  // There are three important differences between dataSource and dataSourceRef:
+  // * While dataSource only allows two specific types of objects, dataSourceRef
+  //   allows any non-core object, as well as PersistentVolumeClaim objects.
+  // * While dataSource ignores disallowed values (dropping them), dataSourceRef
+  //   preserves all values, and generates an error if a disallowed value is
+  //   specified.
+  // * While dataSource only allows local objects, dataSourceRef allows objects
+  //   in any namespaces.
+  // (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+  // (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+  // +optional
+  optional TypedObjectReference dataSourceRef = 8;
+
+  // volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+  // If specified, the CSI driver will create or update the volume with the attributes defined
+  // in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+  // it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+  // will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+  // If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+  // will be set by the persistentvolume controller if it exists.
+  // If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+  // set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+  // exists.
+  // More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+  // (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
+  // +featureGate=VolumeAttributesClass
+  // +optional
+  optional string volumeAttributesClassName = 9;
+}
+
+// PersistentVolumeClaimStatus is the current status of a persistent volume claim.
+message PersistentVolumeClaimStatus {
+  // phase represents the current phase of PersistentVolumeClaim.
+  // +optional
+  optional string phase = 1;
+
+  // accessModes contains the actual access modes the volume backing the PVC has.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+  // +optional
+  // +listType=atomic
+  repeated string accessModes = 2;
+
+  // capacity represents the actual resources of the underlying volume.
+  // +optional
+  map capacity = 3;
+
+  // conditions is the current Condition of persistent volume claim. If underlying persistent volume is being
+  // resized then the Condition will be set to 'Resizing'.
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=type
+  repeated PersistentVolumeClaimCondition conditions = 4;
+
+  // allocatedResources tracks the resources allocated to a PVC including its capacity.
+  // Key names follow standard Kubernetes label syntax. Valid values are either:
+  // 	* Un-prefixed keys:
+  // 		- storage - the capacity of the volume.
+  // 	* Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource"
+  // Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered
+  // reserved and hence may not be used.
+  //
+  // Capacity reported here may be larger than the actual capacity when a volume expansion operation
+  // is requested.
+  // For storage quota, the larger value from allocatedResources and PVC.spec.resources is used.
+  // If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation.
+  // If a volume expansion capacity request is lowered, allocatedResources is only
+  // lowered if there are no expansion operations in progress and if the actual volume capacity
+  // is equal or lower than the requested capacity.
+  //
+  // A controller that receives PVC update with previously unknown resourceName
+  // should ignore the update for the purpose it was designed. For example - a controller that
+  // only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid
+  // resources associated with PVC.
+  //
+  // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
+  // +featureGate=RecoverVolumeExpansionFailure
+  // +optional
+  map allocatedResources = 5;
+
+  // allocatedResourceStatuses stores status of resource being resized for the given PVC.
+  // Key names follow standard Kubernetes label syntax. Valid values are either:
+  // 	* Un-prefixed keys:
+  // 		- storage - the capacity of the volume.
+  // 	* Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource"
+  // Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered
+  // reserved and hence may not be used.
+  //
+  // ClaimResourceStatus can be in any of following states:
+  // 	- ControllerResizeInProgress:
+  // 		State set when resize controller starts resizing the volume in control-plane.
+  // 	- ControllerResizeFailed:
+  // 		State set when resize has failed in resize controller with a terminal error.
+  // 	- NodeResizePending:
+  // 		State set when resize controller has finished resizing the volume but further resizing of
+  // 		volume is needed on the node.
+  // 	- NodeResizeInProgress:
+  // 		State set when kubelet starts resizing the volume.
+  // 	- NodeResizeFailed:
+  // 		State set when resizing has failed in kubelet with a terminal error. Transient errors don't set
+  // 		NodeResizeFailed.
+  // For example: if expanding a PVC for more capacity - this field can be one of the following states:
+  // 	- pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeInProgress"
+  //      - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeFailed"
+  //      - pvc.status.allocatedResourceStatus['storage'] = "NodeResizePending"
+  //      - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeInProgress"
+  //      - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeFailed"
+  // When this field is not set, it means that no resize operation is in progress for the given PVC.
+  //
+  // A controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus
+  // should ignore the update for the purpose it was designed. For example - a controller that
+  // only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid
+  // resources associated with PVC.
+  //
+  // This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
+  // +featureGate=RecoverVolumeExpansionFailure
+  // +mapType=granular
+  // +optional
+  map allocatedResourceStatuses = 7;
+
+  // currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
+  // When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
+  // This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
+  // +featureGate=VolumeAttributesClass
+  // +optional
+  optional string currentVolumeAttributesClassName = 8;
+
+  // ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
+  // When this is unset, there is no ModifyVolume operation being attempted.
+  // This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
+  // +featureGate=VolumeAttributesClass
+  // +optional
+  optional ModifyVolumeStatus modifyVolumeStatus = 9;
+}
+
+// PersistentVolumeClaimTemplate is used to produce
+// PersistentVolumeClaim objects as part of an EphemeralVolumeSource.
+message PersistentVolumeClaimTemplate {
+  // May contain labels and annotations that will be copied into the PVC
+  // when creating it. No other fields are allowed and will be rejected during
+  // validation.
+  //
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // The specification for the PersistentVolumeClaim. The entire content is
+  // copied unchanged into the PVC that gets created from this
+  // template. The same fields as in a PersistentVolumeClaim
+  // are also valid here.
+  optional PersistentVolumeClaimSpec spec = 2;
+}
+
+// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
+// This volume finds the bound PV and mounts that volume for the pod. A
+// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another
+// type of volume that is owned by someone else (the system).
+message PersistentVolumeClaimVolumeSource {
+  // claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+  optional string claimName = 1;
+
+  // readOnly Will force the ReadOnly setting in VolumeMounts.
+  // Default false.
+  // +optional
+  optional bool readOnly = 2;
+}
+
+// PersistentVolumeList is a list of PersistentVolume items.
+message PersistentVolumeList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // items is a list of persistent volumes.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
+  repeated PersistentVolume items = 2;
+}
+
+// PersistentVolumeSource is similar to VolumeSource but meant for the
+// administrator who creates PVs. Exactly one of its members must be set.
+message PersistentVolumeSource {
+  // gcePersistentDisk represents a GCE Disk resource that is attached to a
+  // kubelet's host machine and then exposed to the pod. Provisioned by an admin.
+  // Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+  // gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+  // +optional
+  optional GCEPersistentDiskVolumeSource gcePersistentDisk = 1;
+
+  // awsElasticBlockStore represents an AWS Disk resource that is attached to a
+  // kubelet's host machine and then exposed to the pod.
+  // Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+  // awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+  // +optional
+  optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 2;
+
+  // hostPath represents a directory on the host.
+  // Provisioned by a developer or tester.
+  // This is useful for single-node development and testing only!
+  // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+  // +optional
+  optional HostPathVolumeSource hostPath = 3;
+
+  // glusterfs represents a Glusterfs volume that is attached to a host and
+  // exposed to the pod. Provisioned by an admin.
+  // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
+  // More info: https://examples.k8s.io/volumes/glusterfs/README.md
+  // +optional
+  optional GlusterfsPersistentVolumeSource glusterfs = 4;
+
+  // nfs represents an NFS mount on the host. Provisioned by an admin.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+  // +optional
+  optional NFSVolumeSource nfs = 5;
+
+  // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+  // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
+  // More info: https://examples.k8s.io/volumes/rbd/README.md
+  // +optional
+  optional RBDPersistentVolumeSource rbd = 6;
+
+  // iscsi represents an ISCSI Disk resource that is attached to a
+  // kubelet's host machine and then exposed to the pod. Provisioned by an admin.
+  // +optional
+  optional ISCSIPersistentVolumeSource iscsi = 7;
+
+  // cinder represents a cinder volume attached and mounted on kubelets host machine.
+  // Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+  // are redirected to the cinder.csi.openstack.org CSI driver.
+  // More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+  // +optional
+  optional CinderPersistentVolumeSource cinder = 8;
+
+  // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+  // Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
+  // +optional
+  optional CephFSPersistentVolumeSource cephfs = 9;
+
+  // fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
+  // +optional
+  optional FCVolumeSource fc = 10;
+
+  // flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running.
+  // Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
+  // +optional
+  optional FlockerVolumeSource flocker = 11;
+
+  // flexVolume represents a generic volume resource that is
+  // provisioned/attached using an exec based plugin.
+  // Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
+  // +optional
+  optional FlexPersistentVolumeSource flexVolume = 12;
+
+  // azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+  // Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+  // are redirected to the file.csi.azure.com CSI driver.
+  // +optional
+  optional AzureFilePersistentVolumeSource azureFile = 13;
+
+  // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+  // Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+  // are redirected to the csi.vsphere.vmware.com CSI driver.
+  // +optional
+  optional VsphereVirtualDiskVolumeSource vsphereVolume = 14;
+
+  // quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+  // Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
+  // +optional
+  optional QuobyteVolumeSource quobyte = 15;
+
+  // azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+  // Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+  // are redirected to the disk.csi.azure.com CSI driver.
+  // +optional
+  optional AzureDiskVolumeSource azureDisk = 16;
+
+  // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+  // Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
+  optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 17;
+
+  // portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+  // Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+  // are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+  // is on.
+  // +optional
+  optional PortworxVolumeSource portworxVolume = 18;
+
+  // scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+  // Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
+  // +optional
+  optional ScaleIOPersistentVolumeSource scaleIO = 19;
+
+  // local represents directly-attached storage with node affinity
+  // +optional
+  optional LocalVolumeSource local = 20;
+
+  // storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod.
+  // Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
+  // More info: https://examples.k8s.io/volumes/storageos/README.md
+  // +optional
+  optional StorageOSPersistentVolumeSource storageos = 21;
+
+  // csi represents storage that is handled by an external CSI driver.
+  // +optional
+  optional CSIPersistentVolumeSource csi = 22;
+}
+
+// PersistentVolumeSpec is the specification of a persistent volume.
+message PersistentVolumeSpec {
+  // capacity is the description of the persistent volume's resources and capacity.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
+  // +optional
+  map capacity = 1;
+
+  // persistentVolumeSource is the actual volume backing the persistent volume.
+  optional PersistentVolumeSource persistentVolumeSource = 2;
+
+  // accessModes contains all ways the volume can be mounted.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes
+  // +optional
+  // +listType=atomic
+  repeated string accessModes = 3;
+
+  // claimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim.
+  // Expected to be non-nil when bound.
+  // claim.VolumeName is the authoritative bind between PV and PVC.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding
+  // +optional
+  // +structType=granular
+  optional ObjectReference claimRef = 4;
+
+  // persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim.
+  // Valid options are Retain (default for manually created PersistentVolumes), Delete (default
+  // for dynamically provisioned PersistentVolumes), and Recycle (deprecated).
+  // Recycle must be supported by the volume plugin underlying this PersistentVolume.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming
+  // +optional
+  optional string persistentVolumeReclaimPolicy = 5;
+
+  // storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value
+  // means that this volume does not belong to any StorageClass.
+  // +optional
+  optional string storageClassName = 6;
+
+  // mountOptions is the list of mount options, e.g. ["ro", "soft"]. Not validated - mount will
+  // simply fail if one is invalid.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options
+  // +optional
+  // +listType=atomic
+  repeated string mountOptions = 7;
+
+  // volumeMode defines if a volume is intended to be used with a formatted filesystem
+  // or to remain in raw block state. Value of Filesystem is implied when not included in spec.
+  // +optional
+  optional string volumeMode = 8;
+
+  // nodeAffinity defines constraints that limit what nodes this volume can be accessed from.
+  // This field influences the scheduling of pods that use this volume.
+  // +optional
+  optional VolumeNodeAffinity nodeAffinity = 9;
+
+  // Name of VolumeAttributesClass to which this persistent volume belongs. Empty value
+  // is not allowed. When this field is not set, it indicates that this volume does not belong to any
+  // VolumeAttributesClass. This field is mutable and can be changed by the CSI driver
+  // after a volume has been updated successfully to a new class.
+  // For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound
+  // PersistentVolumeClaims during the binding process.
+  // This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
+  // +featureGate=VolumeAttributesClass
+  // +optional
+  optional string volumeAttributesClassName = 10;
+}
+
+// PersistentVolumeStatus is the current status of a persistent volume.
+message PersistentVolumeStatus {
+  // phase indicates if a volume is available, bound to a claim, or released by a claim.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase
+  // +optional
+  optional string phase = 1;
+
+  // message is a human-readable message indicating details about why the volume is in this state.
+  // +optional
+  optional string message = 2;
+
+  // reason is a brief CamelCase string that describes any failure and is meant
+  // for machine parsing and tidy display in the CLI.
+  // +optional
+  optional string reason = 3;
+
+  // lastPhaseTransitionTime is the time the phase transitioned from one to another
+  // and automatically resets to current time everytime a volume phase transitions.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastPhaseTransitionTime = 4;
+}
+
+// Represents a Photon Controller persistent disk resource.
+message PhotonPersistentDiskVolumeSource {
+  // pdID is the ID that identifies Photon Controller persistent disk
+  optional string pdID = 1;
+
+  // fsType is the filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  optional string fsType = 2;
+}
+
+// Pod is a collection of containers that can run on a host. This resource is created
+// by clients and scheduled onto hosts.
+message Pod {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the desired behavior of the pod.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+  // +optional
+  optional PodSpec spec = 2;
+
+  // Most recently observed status of the pod.
+  // This data may not be up to date.
+  // Populated by the system.
+  // Read-only.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+  // +optional
+  optional PodStatus status = 3;
+}
+
+// Pod affinity is a group of inter pod affinity scheduling rules.
+message PodAffinity {
+  // If the affinity requirements specified by this field are not met at
+  // scheduling time, the pod will not be scheduled onto the node.
+  // If the affinity requirements specified by this field cease to be met
+  // at some point during pod execution (e.g. due to a pod label update), the
+  // system may or may not try to eventually evict the pod from its node.
+  // When there are multiple elements, the lists of nodes corresponding to each
+  // podAffinityTerm are intersected, i.e. all terms must be satisfied.
+  // +optional
+  // +listType=atomic
+  repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1;
+
+  // The scheduler will prefer to schedule pods to nodes that satisfy
+  // the affinity expressions specified by this field, but it may choose
+  // a node that violates one or more of the expressions. The node that is
+  // most preferred is the one with the greatest sum of weights, i.e.
+  // for each node that meets all of the scheduling requirements (resource
+  // request, requiredDuringScheduling affinity expressions, etc.),
+  // compute a sum by iterating through the elements of this field and adding
+  // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+  // node(s) with the highest sum are the most preferred.
+  // +optional
+  // +listType=atomic
+  repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2;
+}
+
+// Defines a set of pods (namely those matching the labelSelector
+// relative to the given namespace(s)) that this pod should be
+// co-located (affinity) or not co-located (anti-affinity) with,
+// where co-located is defined as running on a node whose value of
+// the label with key  matches that of any node on which
+// a pod of the set of pods is running
+message PodAffinityTerm {
+  // A label query over a set of resources, in this case pods.
+  // If it's null, this PodAffinityTerm matches with no Pods.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1;
+
+  // namespaces specifies a static list of namespace names that the term applies to.
+  // The term is applied to the union of the namespaces listed in this field
+  // and the ones selected by namespaceSelector.
+  // null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+  // +optional
+  // +listType=atomic
+  repeated string namespaces = 2;
+
+  // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+  // the labelSelector in the specified namespaces, where co-located is defined as running on a node
+  // whose value of the label with key topologyKey matches that of any node on which any of the
+  // selected pods is running.
+  // Empty topologyKey is not allowed.
+  optional string topologyKey = 3;
+
+  // A label query over the set of namespaces that the term applies to.
+  // The term is applied to the union of the namespaces selected by this field
+  // and the ones listed in the namespaces field.
+  // null selector and null or empty namespaces list means "this pod's namespace".
+  // An empty selector ({}) matches all namespaces.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 4;
+
+  // MatchLabelKeys is a set of pod label keys to select which pods will
+  // be taken into consideration. The keys are used to lookup values from the
+  // incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+  // to select the group of existing pods which pods will be taken into consideration
+  // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+  // pod labels will be ignored. The default value is empty.
+  // The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+  // Also, matchLabelKeys cannot be set when labelSelector isn't set.
+  // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+  //
+  // +listType=atomic
+  // +optional
+  repeated string matchLabelKeys = 5;
+
+  // MismatchLabelKeys is a set of pod label keys to select which pods will
+  // be taken into consideration. The keys are used to lookup values from the
+  // incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+  // to select the group of existing pods which pods will be taken into consideration
+  // for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+  // pod labels will be ignored. The default value is empty.
+  // The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+  // Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+  // This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+  //
+  // +listType=atomic
+  // +optional
+  repeated string mismatchLabelKeys = 6;
+}
+
+// Pod anti affinity is a group of inter pod anti affinity scheduling rules.
+message PodAntiAffinity {
+  // If the anti-affinity requirements specified by this field are not met at
+  // scheduling time, the pod will not be scheduled onto the node.
+  // If the anti-affinity requirements specified by this field cease to be met
+  // at some point during pod execution (e.g. due to a pod label update), the
+  // system may or may not try to eventually evict the pod from its node.
+  // When there are multiple elements, the lists of nodes corresponding to each
+  // podAffinityTerm are intersected, i.e. all terms must be satisfied.
+  // +optional
+  // +listType=atomic
+  repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1;
+
+  // The scheduler will prefer to schedule pods to nodes that satisfy
+  // the anti-affinity expressions specified by this field, but it may choose
+  // a node that violates one or more of the expressions. The node that is
+  // most preferred is the one with the greatest sum of weights, i.e.
+  // for each node that meets all of the scheduling requirements (resource
+  // request, requiredDuringScheduling anti-affinity expressions, etc.),
+  // compute a sum by iterating through the elements of this field and adding
+  // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+  // node(s) with the highest sum are the most preferred.
+  // +optional
+  // +listType=atomic
+  repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2;
+}
+
+// PodAttachOptions is the query options to a Pod's remote attach call.
+// ---
+// TODO: merge w/ PodExecOptions below for stdin, stdout, etc
+// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
+message PodAttachOptions {
+  // Stdin if true, redirects the standard input stream of the pod for this call.
+  // Defaults to false.
+  // +optional
+  optional bool stdin = 1;
+
+  // Stdout if true indicates that stdout is to be redirected for the attach call.
+  // Defaults to true.
+  // +optional
+  optional bool stdout = 2;
+
+  // Stderr if true indicates that stderr is to be redirected for the attach call.
+  // Defaults to true.
+  // +optional
+  optional bool stderr = 3;
+
+  // TTY if true indicates that a tty will be allocated for the attach call.
+  // This is passed through the container runtime so the tty
+  // is allocated on the worker node by the container runtime.
+  // Defaults to false.
+  // +optional
+  optional bool tty = 4;
+
+  // The container in which to execute the command.
+  // Defaults to only container if there is only one container in the pod.
+  // +optional
+  optional string container = 5;
+}
+
+// PodCondition contains details for the current condition of this pod.
+message PodCondition {
+  // Type is the type of the condition.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
+  optional string type = 1;
+
+  // Status is the status of the condition.
+  // Can be True, False, Unknown.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
+  optional string status = 2;
+
+  // Last time we probed the condition.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3;
+
+  // Last time the condition transitioned from one status to another.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
+
+  // Unique, one-word, CamelCase reason for the condition's last transition.
+  // +optional
+  optional string reason = 5;
+
+  // Human-readable message indicating details about last transition.
+  // +optional
+  optional string message = 6;
+}
+
+// PodDNSConfig defines the DNS parameters of a pod in addition to
+// those generated from DNSPolicy.
+message PodDNSConfig {
+  // A list of DNS name server IP addresses.
+  // This will be appended to the base nameservers generated from DNSPolicy.
+  // Duplicated nameservers will be removed.
+  // +optional
+  // +listType=atomic
+  repeated string nameservers = 1;
+
+  // A list of DNS search domains for host-name lookup.
+  // This will be appended to the base search paths generated from DNSPolicy.
+  // Duplicated search paths will be removed.
+  // +optional
+  // +listType=atomic
+  repeated string searches = 2;
+
+  // A list of DNS resolver options.
+  // This will be merged with the base options generated from DNSPolicy.
+  // Duplicated entries will be removed. Resolution options given in Options
+  // will override those that appear in the base DNSPolicy.
+  // +optional
+  // +listType=atomic
+  repeated PodDNSConfigOption options = 3;
+}
+
+// PodDNSConfigOption defines DNS resolver options of a pod.
+message PodDNSConfigOption {
+  // Name is this DNS resolver option's name.
+  // Required.
+  optional string name = 1;
+
+  // Value is this DNS resolver option's value.
+  // +optional
+  optional string value = 2;
+}
+
+// PodExecOptions is the query options to a Pod's remote exec call.
+// ---
+// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging
+// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
+message PodExecOptions {
+  // Redirect the standard input stream of the pod for this call.
+  // Defaults to false.
+  // +optional
+  optional bool stdin = 1;
+
+  // Redirect the standard output stream of the pod for this call.
+  // +optional
+  optional bool stdout = 2;
+
+  // Redirect the standard error stream of the pod for this call.
+  // +optional
+  optional bool stderr = 3;
+
+  // TTY if true indicates that a tty will be allocated for the exec call.
+  // Defaults to false.
+  // +optional
+  optional bool tty = 4;
+
+  // Container in which to execute the command.
+  // Defaults to only container if there is only one container in the pod.
+  // +optional
+  optional string container = 5;
+
+  // Command is the remote command to execute. argv array. Not executed within a shell.
+  // +listType=atomic
+  repeated string command = 6;
+}
+
+// PodIP represents a single IP address allocated to the pod.
+message PodIP {
+  // IP is the IP address assigned to the pod
+  // +required
+  optional string ip = 1;
+}
+
+// PodList is a list of Pods.
+message PodList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of pods.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md
+  repeated Pod items = 2;
+}
+
+// PodLogOptions is the query options for a Pod's logs REST call.
+message PodLogOptions {
+  // The container for which to stream logs. Defaults to only container if there is one container in the pod.
+  // +optional
+  optional string container = 1;
+
+  // Follow the log stream of the pod. Defaults to false.
+  // +optional
+  optional bool follow = 2;
+
+  // Return previous terminated container logs. Defaults to false.
+  // +optional
+  optional bool previous = 3;
+
+  // A relative time in seconds before the current time from which to show logs. If this value
+  // precedes the time a pod was started, only logs since the pod start will be returned.
+  // If this value is in the future, no logs will be returned.
+  // Only one of sinceSeconds or sinceTime may be specified.
+  // +optional
+  optional int64 sinceSeconds = 4;
+
+  // An RFC3339 timestamp from which to show logs. If this value
+  // precedes the time a pod was started, only logs since the pod start will be returned.
+  // If this value is in the future, no logs will be returned.
+  // Only one of sinceSeconds or sinceTime may be specified.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5;
+
+  // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
+  // of log output. Defaults to false.
+  // +optional
+  optional bool timestamps = 6;
+
+  // If set, the number of lines from the end of the logs to show. If not specified,
+  // logs are shown from the creation of the container or sinceSeconds or sinceTime.
+  // Note that when "TailLines" is specified, "Stream" can only be set to nil or "All".
+  // +optional
+  optional int64 tailLines = 7;
+
+  // If set, the number of bytes to read from the server before terminating the
+  // log output. This may not display a complete final line of logging, and may return
+  // slightly more or slightly less than the specified limit.
+  // +optional
+  optional int64 limitBytes = 8;
+
+  // insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the
+  // serving certificate of the backend it is connecting to.  This will make the HTTPS connection between the apiserver
+  // and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real
+  // kubelet.  If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the
+  // connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept
+  // the actual log data coming from the real kubelet).
+  // +optional
+  optional bool insecureSkipTLSVerifyBackend = 9;
+
+  // Specify which container log stream to return to the client.
+  // Acceptable values are "All", "Stdout" and "Stderr". If not specified, "All" is used, and both stdout and stderr
+  // are returned interleaved.
+  // Note that when "TailLines" is specified, "Stream" can only be set to nil or "All".
+  // +featureGate=PodLogsQuerySplitStreams
+  // +optional
+  optional string stream = 10;
+}
+
+// PodOS defines the OS parameters of a pod.
+message PodOS {
+  // Name is the name of the operating system. The currently supported values are linux and windows.
+  // Additional value may be defined in future and can be one of:
+  // https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration
+  // Clients should expect to handle additional values and treat unrecognized values in this field as os: null
+  optional string name = 1;
+}
+
+// PodPortForwardOptions is the query options to a Pod's port forward call
+// when using WebSockets.
+// The `port` query parameter must specify the port or
+// ports (comma separated) to forward over.
+// Port forwarding over SPDY does not use these options. It requires the port
+// to be passed in the `port` header as part of request.
+message PodPortForwardOptions {
+  // List of ports to forward
+  // Required when using WebSockets
+  // +optional
+  // +listType=atomic
+  repeated int32 ports = 1;
+}
+
+// PodProxyOptions is the query options to a Pod's proxy call.
+message PodProxyOptions {
+  // Path is the URL path to use for the current proxy request to pod.
+  // +optional
+  optional string path = 1;
+}
+
+// PodReadinessGate contains the reference to a pod condition
+message PodReadinessGate {
+  // ConditionType refers to a condition in the pod's condition list with matching type.
+  optional string conditionType = 1;
+}
+
+// PodResourceClaim references exactly one ResourceClaim, either directly
+// or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim
+// for the pod.
+//
+// It adds a name to it that uniquely identifies the ResourceClaim inside the Pod.
+// Containers that need access to the ResourceClaim reference it with this name.
+message PodResourceClaim {
+  // Name uniquely identifies this resource claim inside the pod.
+  // This must be a DNS_LABEL.
+  optional string name = 1;
+
+  // ResourceClaimName is the name of a ResourceClaim object in the same
+  // namespace as this pod.
+  //
+  // Exactly one of ResourceClaimName and ResourceClaimTemplateName must
+  // be set.
+  optional string resourceClaimName = 3;
+
+  // ResourceClaimTemplateName is the name of a ResourceClaimTemplate
+  // object in the same namespace as this pod.
+  //
+  // The template will be used to create a new ResourceClaim, which will
+  // be bound to this pod. When this pod is deleted, the ResourceClaim
+  // will also be deleted. The pod name and resource name, along with a
+  // generated component, will be used to form a unique name for the
+  // ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.
+  //
+  // This field is immutable and no changes will be made to the
+  // corresponding ResourceClaim by the control plane after creating the
+  // ResourceClaim.
+  //
+  // Exactly one of ResourceClaimName and ResourceClaimTemplateName must
+  // be set.
+  optional string resourceClaimTemplateName = 4;
+}
+
+// PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim
+// which references a ResourceClaimTemplate. It stores the generated name for
+// the corresponding ResourceClaim.
+message PodResourceClaimStatus {
+  // Name uniquely identifies this resource claim inside the pod.
+  // This must match the name of an entry in pod.spec.resourceClaims,
+  // which implies that the string must be a DNS_LABEL.
+  optional string name = 1;
+
+  // ResourceClaimName is the name of the ResourceClaim that was
+  // generated for the Pod in the namespace of the Pod. If this is
+  // unset, then generating a ResourceClaim was not necessary. The
+  // pod.spec.resourceClaims entry can be ignored in this case.
+  //
+  // +optional
+  optional string resourceClaimName = 2;
+}
+
+// PodSchedulingGate is associated to a Pod to guard its scheduling.
+message PodSchedulingGate {
+  // Name of the scheduling gate.
+  // Each scheduling gate must have a unique name field.
+  optional string name = 1;
+}
+
+// PodSecurityContext holds pod-level security attributes and common container settings.
+// Some fields are also present in container.securityContext.  Field values of
+// container.securityContext take precedence over field values of PodSecurityContext.
+message PodSecurityContext {
+  // The SELinux context to be applied to all containers.
+  // If unspecified, the container runtime will allocate a random SELinux context for each
+  // container.  May also be set in SecurityContext.  If set in
+  // both SecurityContext and PodSecurityContext, the value specified in SecurityContext
+  // takes precedence for that container.
+  // Note that this field cannot be set when spec.os.name is windows.
+  // +optional
+  optional SELinuxOptions seLinuxOptions = 1;
+
+  // The Windows specific settings applied to all containers.
+  // If unspecified, the options within a container's SecurityContext will be used.
+  // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+  // Note that this field cannot be set when spec.os.name is linux.
+  // +optional
+  optional WindowsSecurityContextOptions windowsOptions = 8;
+
+  // The UID to run the entrypoint of the container process.
+  // Defaults to user specified in image metadata if unspecified.
+  // May also be set in SecurityContext.  If set in both SecurityContext and
+  // PodSecurityContext, the value specified in SecurityContext takes precedence
+  // for that container.
+  // Note that this field cannot be set when spec.os.name is windows.
+  // +optional
+  optional int64 runAsUser = 2;
+
+  // The GID to run the entrypoint of the container process.
+  // Uses runtime default if unset.
+  // May also be set in SecurityContext.  If set in both SecurityContext and
+  // PodSecurityContext, the value specified in SecurityContext takes precedence
+  // for that container.
+  // Note that this field cannot be set when spec.os.name is windows.
+  // +optional
+  optional int64 runAsGroup = 6;
+
+  // Indicates that the container must run as a non-root user.
+  // If true, the Kubelet will validate the image at runtime to ensure that it
+  // does not run as UID 0 (root) and fail to start the container if it does.
+  // If unset or false, no such validation will be performed.
+  // May also be set in SecurityContext.  If set in both SecurityContext and
+  // PodSecurityContext, the value specified in SecurityContext takes precedence.
+  // +optional
+  optional bool runAsNonRoot = 3;
+
+  // A list of groups applied to the first process run in each container, in
+  // addition to the container's primary GID and fsGroup (if specified).  If
+  // the SupplementalGroupsPolicy feature is enabled, the
+  // supplementalGroupsPolicy field determines whether these are in addition
+  // to or instead of any group memberships defined in the container image.
+  // If unspecified, no additional groups are added, though group memberships
+  // defined in the container image may still be used, depending on the
+  // supplementalGroupsPolicy field.
+  // Note that this field cannot be set when spec.os.name is windows.
+  // +optional
+  // +listType=atomic
+  repeated int64 supplementalGroups = 4;
+
+  // Defines how supplemental groups of the first container processes are calculated.
+  // Valid values are "Merge" and "Strict". If not specified, "Merge" is used.
+  // (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled
+  // and the container runtime must implement support for this feature.
+  // Note that this field cannot be set when spec.os.name is windows.
+  // TODO: update the default value to "Merge" when spec.os.name is not windows in v1.34
+  // +featureGate=SupplementalGroupsPolicy
+  // +optional
+  optional string supplementalGroupsPolicy = 12;
+
+  // A special supplemental group that applies to all containers in a pod.
+  // Some volume types allow the Kubelet to change the ownership of that volume
+  // to be owned by the pod:
+  //
+  // 1. The owning GID will be the FSGroup
+  // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
+  // 3. The permission bits are OR'd with rw-rw----
+  //
+  // If unset, the Kubelet will not modify the ownership and permissions of any volume.
+  // Note that this field cannot be set when spec.os.name is windows.
+  // +optional
+  optional int64 fsGroup = 5;
+
+  // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
+  // sysctls (by the container runtime) might fail to launch.
+  // Note that this field cannot be set when spec.os.name is windows.
+  // +optional
+  // +listType=atomic
+  repeated Sysctl sysctls = 7;
+
+  // fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
+  // before being exposed inside Pod. This field will only apply to
+  // volume types which support fsGroup based ownership(and permissions).
+  // It will have no effect on ephemeral volume types such as: secret, configmaps
+  // and emptydir.
+  // Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.
+  // Note that this field cannot be set when spec.os.name is windows.
+  // +optional
+  optional string fsGroupChangePolicy = 9;
+
+  // The seccomp options to use by the containers in this pod.
+  // Note that this field cannot be set when spec.os.name is windows.
+  // +optional
+  optional SeccompProfile seccompProfile = 10;
+
+  // appArmorProfile is the AppArmor options to use by the containers in this pod.
+  // Note that this field cannot be set when spec.os.name is windows.
+  // +optional
+  optional AppArmorProfile appArmorProfile = 11;
+
+  // seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod.
+  // It has no effect on nodes that do not support SELinux or to volumes does not support SELinux.
+  // Valid values are "MountOption" and "Recursive".
+  //
+  // "Recursive" means relabeling of all files on all Pod volumes by the container runtime.
+  // This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.
+  //
+  // "MountOption" mounts all eligible Pod volumes with `-o context` mount option.
+  // This requires all Pods that share the same volume to use the same SELinux label.
+  // It is not possible to share the same volume among privileged and unprivileged Pods.
+  // Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes
+  // whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their
+  // CSIDriver instance. Other volumes are always re-labelled recursively.
+  // "MountOption" value is allowed only when SELinuxMount feature gate is enabled.
+  //
+  // If not specified and SELinuxMount feature gate is enabled, "MountOption" is used.
+  // If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes
+  // and "Recursive" for all other volumes.
+  //
+  // This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers.
+  //
+  // All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state.
+  // Note that this field cannot be set when spec.os.name is windows.
+  // +featureGate=SELinuxChangePolicy
+  // +optional
+  optional string seLinuxChangePolicy = 13;
+}
+
+// Describes the class of pods that should avoid this node.
+// Exactly one field should be set.
+message PodSignature {
+  // Reference to controller whose pods should avoid this node.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference podController = 1;
+}
+
+// PodSpec is a description of a pod.
+message PodSpec {
+  // List of volumes that can be mounted by containers belonging to the pod.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes
+  // +optional
+  // +patchMergeKey=name
+  // +patchStrategy=merge,retainKeys
+  // +listType=map
+  // +listMapKey=name
+  repeated Volume volumes = 1;
+
+  // List of initialization containers belonging to the pod.
+  // Init containers are executed in order prior to containers being started. If any
+  // init container fails, the pod is considered to have failed and is handled according
+  // to its restartPolicy. The name for an init container or normal container must be
+  // unique among all containers.
+  // Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
+  // The resourceRequirements of an init container are taken into account during scheduling
+  // by finding the highest request/limit for each resource type, and then using the max of
+  // of that value or the sum of the normal containers. Limits are applied to init containers
+  // in a similar fashion.
+  // Init containers cannot currently be added or removed.
+  // Cannot be updated.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
+  // +patchMergeKey=name
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=name
+  repeated Container initContainers = 20;
+
+  // List of containers belonging to the pod.
+  // Containers cannot currently be added or removed.
+  // There must be at least one container in a Pod.
+  // Cannot be updated.
+  // +patchMergeKey=name
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=name
+  repeated Container containers = 2;
+
+  // List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing
+  // pod to perform user-initiated actions such as debugging. This list cannot be specified when
+  // creating a pod, and it cannot be modified by updating the pod spec. In order to add an
+  // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.
+  // +optional
+  // +patchMergeKey=name
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=name
+  repeated EphemeralContainer ephemeralContainers = 34;
+
+  // Restart policy for all containers within the pod.
+  // One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted.
+  // Default to Always.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
+  // +optional
+  optional string restartPolicy = 3;
+
+  // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
+  // Value must be non-negative integer. The value zero indicates stop immediately via
+  // the kill signal (no opportunity to shut down).
+  // If this value is nil, the default grace period will be used instead.
+  // The grace period is the duration in seconds after the processes running in the pod are sent
+  // a termination signal and the time when the processes are forcibly halted with a kill signal.
+  // Set this value longer than the expected cleanup time for your process.
+  // Defaults to 30 seconds.
+  // +optional
+  optional int64 terminationGracePeriodSeconds = 4;
+
+  // Optional duration in seconds the pod may be active on the node relative to
+  // StartTime before the system will actively try to mark it failed and kill associated containers.
+  // Value must be a positive integer.
+  // +optional
+  optional int64 activeDeadlineSeconds = 5;
+
+  // Set DNS policy for the pod.
+  // Defaults to "ClusterFirst".
+  // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
+  // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
+  // To have DNS options set along with hostNetwork, you have to specify DNS policy
+  // explicitly to 'ClusterFirstWithHostNet'.
+  // +optional
+  optional string dnsPolicy = 6;
+
+  // NodeSelector is a selector which must be true for the pod to fit on a node.
+  // Selector which must match a node's labels for the pod to be scheduled on that node.
+  // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+  // +optional
+  // +mapType=atomic
+  map nodeSelector = 7;
+
+  // ServiceAccountName is the name of the ServiceAccount to use to run this pod.
+  // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+  // +optional
+  optional string serviceAccountName = 8;
+
+  // DeprecatedServiceAccount is a deprecated alias for ServiceAccountName.
+  // Deprecated: Use serviceAccountName instead.
+  // +k8s:conversion-gen=false
+  // +optional
+  optional string serviceAccount = 9;
+
+  // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
+  // +optional
+  optional bool automountServiceAccountToken = 21;
+
+  // NodeName indicates in which node this pod is scheduled.
+  // If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName.
+  // Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod.
+  // This field should not be used to express a desire for the pod to be scheduled on a specific node.
+  // https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename
+  // +optional
+  optional string nodeName = 10;
+
+  // Host networking requested for this pod. Use the host's network namespace.
+  // If this option is set, the ports that will be used must be specified.
+  // Default to false.
+  // +k8s:conversion-gen=false
+  // +optional
+  optional bool hostNetwork = 11;
+
+  // Use the host's pid namespace.
+  // Optional: Default to false.
+  // +k8s:conversion-gen=false
+  // +optional
+  optional bool hostPID = 12;
+
+  // Use the host's ipc namespace.
+  // Optional: Default to false.
+  // +k8s:conversion-gen=false
+  // +optional
+  optional bool hostIPC = 13;
+
+  // Share a single process namespace between all of the containers in a pod.
+  // When this is set containers will be able to view and signal processes from other containers
+  // in the same pod, and the first process in each container will not be assigned PID 1.
+  // HostPID and ShareProcessNamespace cannot both be set.
+  // Optional: Default to false.
+  // +k8s:conversion-gen=false
+  // +optional
+  optional bool shareProcessNamespace = 27;
+
+  // SecurityContext holds pod-level security attributes and common container settings.
+  // Optional: Defaults to empty.  See type description for default values of each field.
+  // +optional
+  optional PodSecurityContext securityContext = 14;
+
+  // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
+  // If specified, these secrets will be passed to individual puller implementations for them to use.
+  // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
+  // +optional
+  // +patchMergeKey=name
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=name
+  repeated LocalObjectReference imagePullSecrets = 15;
+
+  // Specifies the hostname of the Pod
+  // If not specified, the pod's hostname will be set to a system-defined value.
+  // +optional
+  optional string hostname = 16;
+
+  // If specified, the fully qualified Pod hostname will be "...svc.".
+  // If not specified, the pod will not have a domainname at all.
+  // +optional
+  optional string subdomain = 17;
+
+  // If specified, the pod's scheduling constraints
+  // +optional
+  optional Affinity affinity = 18;
+
+  // If specified, the pod will be dispatched by specified scheduler.
+  // If not specified, the pod will be dispatched by default scheduler.
+  // +optional
+  optional string schedulerName = 19;
+
+  // If specified, the pod's tolerations.
+  // +optional
+  // +listType=atomic
+  repeated Toleration tolerations = 22;
+
+  // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
+  // file if specified.
+  // +optional
+  // +patchMergeKey=ip
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=ip
+  repeated HostAlias hostAliases = 23;
+
+  // If specified, indicates the pod's priority. "system-node-critical" and
+  // "system-cluster-critical" are two special keywords which indicate the
+  // highest priorities with the former being the highest priority. Any other
+  // name must be defined by creating a PriorityClass object with that name.
+  // If not specified, the pod priority will be default or zero if there is no
+  // default.
+  // +optional
+  optional string priorityClassName = 24;
+
+  // The priority value. Various system components use this field to find the
+  // priority of the pod. When Priority Admission Controller is enabled, it
+  // prevents users from setting this field. The admission controller populates
+  // this field from PriorityClassName.
+  // The higher the value, the higher the priority.
+  // +optional
+  optional int32 priority = 25;
+
+  // Specifies the DNS parameters of a pod.
+  // Parameters specified here will be merged to the generated DNS
+  // configuration based on DNSPolicy.
+  // +optional
+  optional PodDNSConfig dnsConfig = 26;
+
+  // If specified, all readiness gates will be evaluated for pod readiness.
+  // A pod is ready when all its containers are ready AND
+  // all conditions specified in the readiness gates have status equal to "True"
+  // More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates
+  // +optional
+  // +listType=atomic
+  repeated PodReadinessGate readinessGates = 28;
+
+  // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used
+  // to run this pod.  If no RuntimeClass resource matches the named class, the pod will not be run.
+  // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an
+  // empty definition that uses the default runtime handler.
+  // More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
+  // +optional
+  optional string runtimeClassName = 29;
+
+  // EnableServiceLinks indicates whether information about services should be injected into pod's
+  // environment variables, matching the syntax of Docker links.
+  // Optional: Defaults to true.
+  // +optional
+  optional bool enableServiceLinks = 30;
+
+  // PreemptionPolicy is the Policy for preempting pods with lower priority.
+  // One of Never, PreemptLowerPriority.
+  // Defaults to PreemptLowerPriority if unset.
+  // +optional
+  optional string preemptionPolicy = 31;
+
+  // Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.
+  // This field will be autopopulated at admission time by the RuntimeClass admission controller. If
+  // the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.
+  // The RuntimeClass admission controller will reject Pod create requests which have the overhead already
+  // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value
+  // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.
+  // More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
+  // +optional
+  map overhead = 32;
+
+  // TopologySpreadConstraints describes how a group of pods ought to spread across topology
+  // domains. Scheduler will schedule pods in a way which abides by the constraints.
+  // All topologySpreadConstraints are ANDed.
+  // +optional
+  // +patchMergeKey=topologyKey
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=topologyKey
+  // +listMapKey=whenUnsatisfiable
+  repeated TopologySpreadConstraint topologySpreadConstraints = 33;
+
+  // If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default).
+  // In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname).
+  // In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN.
+  // If a pod does not have FQDN, this has no effect.
+  // Default to false.
+  // +optional
+  optional bool setHostnameAsFQDN = 35;
+
+  // Specifies the OS of the containers in the pod.
+  // Some pod and container fields are restricted if this is set.
+  //
+  // If the OS field is set to linux, the following fields must be unset:
+  // -securityContext.windowsOptions
+  //
+  // If the OS field is set to windows, following fields must be unset:
+  // - spec.hostPID
+  // - spec.hostIPC
+  // - spec.hostUsers
+  // - spec.securityContext.appArmorProfile
+  // - spec.securityContext.seLinuxOptions
+  // - spec.securityContext.seccompProfile
+  // - spec.securityContext.fsGroup
+  // - spec.securityContext.fsGroupChangePolicy
+  // - spec.securityContext.sysctls
+  // - spec.shareProcessNamespace
+  // - spec.securityContext.runAsUser
+  // - spec.securityContext.runAsGroup
+  // - spec.securityContext.supplementalGroups
+  // - spec.securityContext.supplementalGroupsPolicy
+  // - spec.containers[*].securityContext.appArmorProfile
+  // - spec.containers[*].securityContext.seLinuxOptions
+  // - spec.containers[*].securityContext.seccompProfile
+  // - spec.containers[*].securityContext.capabilities
+  // - spec.containers[*].securityContext.readOnlyRootFilesystem
+  // - spec.containers[*].securityContext.privileged
+  // - spec.containers[*].securityContext.allowPrivilegeEscalation
+  // - spec.containers[*].securityContext.procMount
+  // - spec.containers[*].securityContext.runAsUser
+  // - spec.containers[*].securityContext.runAsGroup
+  // +optional
+  optional PodOS os = 36;
+
+  // Use the host's user namespace.
+  // Optional: Default to true.
+  // If set to true or not present, the pod will be run in the host user namespace, useful
+  // for when the pod needs a feature only available to the host user namespace, such as
+  // loading a kernel module with CAP_SYS_MODULE.
+  // When set to false, a new userns is created for the pod. Setting false is useful for
+  // mitigating container breakout vulnerabilities even allowing users to run their
+  // containers as root without actually having root privileges on the host.
+  // This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.
+  // +k8s:conversion-gen=false
+  // +optional
+  optional bool hostUsers = 37;
+
+  // SchedulingGates is an opaque list of values that if specified will block scheduling the pod.
+  // If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the
+  // scheduler will not attempt to schedule the pod.
+  //
+  // SchedulingGates can only be set at pod creation time, and be removed only afterwards.
+  //
+  // +patchMergeKey=name
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=name
+  // +optional
+  repeated PodSchedulingGate schedulingGates = 38;
+
+  // ResourceClaims defines which ResourceClaims must be allocated
+  // and reserved before the Pod is allowed to start. The resources
+  // will be made available to those containers which consume them
+  // by name.
+  //
+  // This is an alpha field and requires enabling the
+  // DynamicResourceAllocation feature gate.
+  //
+  // This field is immutable.
+  //
+  // +patchMergeKey=name
+  // +patchStrategy=merge,retainKeys
+  // +listType=map
+  // +listMapKey=name
+  // +featureGate=DynamicResourceAllocation
+  // +optional
+  repeated PodResourceClaim resourceClaims = 39;
+
+  // Resources is the total amount of CPU and Memory resources required by all
+  // containers in the pod. It supports specifying Requests and Limits for
+  // "cpu" and "memory" resource names only. ResourceClaims are not supported.
+  //
+  // This field enables fine-grained control over resource allocation for the
+  // entire pod, allowing resource sharing among containers in a pod.
+  // TODO: For beta graduation, expand this comment with a detailed explanation.
+  //
+  // This is an alpha field and requires enabling the PodLevelResources feature
+  // gate.
+  //
+  // +featureGate=PodLevelResources
+  // +optional
+  optional ResourceRequirements resources = 40;
+}
+
+// PodStatus represents information about the status of a pod. Status may trail the actual
+// state of a system, especially if the node that hosts the pod cannot contact the control
+// plane.
+message PodStatus {
+  // The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle.
+  // The conditions array, the reason and message fields, and the individual container status
+  // arrays contain more detail about the pod's status.
+  // There are five possible phase values:
+  //
+  // Pending: The pod has been accepted by the Kubernetes system, but one or more of the
+  // container images has not been created. This includes time before being scheduled as
+  // well as time spent downloading images over the network, which could take a while.
+  // Running: The pod has been bound to a node, and all of the containers have been created.
+  // At least one container is still running, or is in the process of starting or restarting.
+  // Succeeded: All containers in the pod have terminated in success, and will not be restarted.
+  // Failed: All containers in the pod have terminated, and at least one container has
+  // terminated in failure. The container either exited with non-zero status or was terminated
+  // by the system.
+  // Unknown: For some reason the state of the pod could not be obtained, typically due to an
+  // error in communicating with the host of the pod.
+  //
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase
+  // +optional
+  optional string phase = 1;
+
+  // Current service state of pod.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=type
+  repeated PodCondition conditions = 2;
+
+  // A human readable message indicating details about why the pod is in this condition.
+  // +optional
+  optional string message = 3;
+
+  // A brief CamelCase message indicating details about why the pod is in this state.
+  // e.g. 'Evicted'
+  // +optional
+  optional string reason = 4;
+
+  // nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be
+  // scheduled right away as preemption victims receive their graceful termination periods.
+  // This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide
+  // to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to
+  // give the resources on this node to a higher priority pod that is created after preemption.
+  // As a result, this field may be different than PodSpec.nodeName when the pod is
+  // scheduled.
+  // +optional
+  optional string nominatedNodeName = 11;
+
+  // hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet.
+  // A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will
+  // not be updated even if there is a node is assigned to pod
+  // +optional
+  optional string hostIP = 5;
+
+  // hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must
+  // match the hostIP field. This list is empty if the pod has not started yet.
+  // A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will
+  // not be updated even if there is a node is assigned to this pod.
+  // +optional
+  // +patchStrategy=merge
+  // +patchMergeKey=ip
+  // +listType=atomic
+  repeated HostIP hostIPs = 16;
+
+  // podIP address allocated to the pod. Routable at least within the cluster.
+  // Empty if not yet allocated.
+  // +optional
+  optional string podIP = 6;
+
+  // podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must
+  // match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list
+  // is empty if no IPs have been allocated yet.
+  // +optional
+  // +patchStrategy=merge
+  // +patchMergeKey=ip
+  // +listType=map
+  // +listMapKey=ip
+  repeated PodIP podIPs = 12;
+
+  // RFC 3339 date and time at which the object was acknowledged by the Kubelet.
+  // This is before the Kubelet pulled the container image(s) for the pod.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7;
+
+  // Statuses of init containers in this pod. The most recent successful non-restartable
+  // init container will have ready = true, the most recently started container will have
+  // startTime set.
+  // Each init container in the pod should have at most one status in this list,
+  // and all statuses should be for containers in the pod.
+  // However this is not enforced.
+  // If a status for a non-existent container is present in the list, or the list has duplicate names,
+  // the behavior of various Kubernetes components is not defined and those statuses might be
+  // ignored.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status
+  // +listType=atomic
+  repeated ContainerStatus initContainerStatuses = 10;
+
+  // Statuses of containers in this pod.
+  // Each container in the pod should have at most one status in this list,
+  // and all statuses should be for containers in the pod.
+  // However this is not enforced.
+  // If a status for a non-existent container is present in the list, or the list has duplicate names,
+  // the behavior of various Kubernetes components is not defined and those statuses might be
+  // ignored.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
+  // +optional
+  // +listType=atomic
+  repeated ContainerStatus containerStatuses = 8;
+
+  // The Quality of Service (QOS) classification assigned to the pod based on resource requirements
+  // See PodQOSClass type for available QOS classes
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes
+  // +optional
+  optional string qosClass = 9;
+
+  // Statuses for any ephemeral containers that have run in this pod.
+  // Each ephemeral container in the pod should have at most one status in this list,
+  // and all statuses should be for containers in the pod.
+  // However this is not enforced.
+  // If a status for a non-existent container is present in the list, or the list has duplicate names,
+  // the behavior of various Kubernetes components is not defined and those statuses might be
+  // ignored.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
+  // +optional
+  // +listType=atomic
+  repeated ContainerStatus ephemeralContainerStatuses = 13;
+
+  // Status of resources resize desired for pod's containers.
+  // It is empty if no resources resize is pending.
+  // Any changes to container resources will automatically set this to "Proposed"
+  // +featureGate=InPlacePodVerticalScaling
+  // +optional
+  optional string resize = 14;
+
+  // Status of resource claims.
+  // +patchMergeKey=name
+  // +patchStrategy=merge,retainKeys
+  // +listType=map
+  // +listMapKey=name
+  // +featureGate=DynamicResourceAllocation
+  // +optional
+  repeated PodResourceClaimStatus resourceClaimStatuses = 15;
+}
+
+// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
+message PodStatusResult {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Most recently observed status of the pod.
+  // This data may not be up to date.
+  // Populated by the system.
+  // Read-only.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+  // +optional
+  optional PodStatus status = 2;
+}
+
+// PodTemplate describes a template for creating copies of a predefined pod.
+message PodTemplate {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Template defines the pods that will be created from this pod template.
+  // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+  // +optional
+  optional PodTemplateSpec template = 2;
+}
+
+// PodTemplateList is a list of PodTemplates.
+message PodTemplateList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of pod templates
+  repeated PodTemplate items = 2;
+}
+
+// PodTemplateSpec describes the data a pod should have when created from a template
+message PodTemplateSpec {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the desired behavior of the pod.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+  // +optional
+  optional PodSpec spec = 2;
+}
+
+// PortStatus represents the error condition of a service port
+message PortStatus {
+  // Port is the port number of the service port of which status is recorded here
+  optional int32 port = 1;
+
+  // Protocol is the protocol of the service port of which status is recorded here
+  // The supported values are: "TCP", "UDP", "SCTP"
+  optional string protocol = 2;
+
+  // Error is to record the problem with the service port
+  // The format of the error shall comply with the following rules:
+  // - built-in error values shall be specified in this file and those shall use
+  //   CamelCase names
+  // - cloud provider specific error values must have names that comply with the
+  //   format foo.example.com/CamelCase.
+  // ---
+  // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+  // +optional
+  // +kubebuilder:validation:Required
+  // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`
+  // +kubebuilder:validation:MaxLength=316
+  optional string error = 3;
+}
+
+// PortworxVolumeSource represents a Portworx volume resource.
+message PortworxVolumeSource {
+  // volumeID uniquely identifies a Portworx volume
+  optional string volumeID = 1;
+
+  // fSType represents the filesystem type to mount
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
+  optional string fsType = 2;
+
+  // readOnly defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  optional bool readOnly = 3;
+}
+
+// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
+// +k8s:openapi-gen=false
+message Preconditions {
+  // Specifies the target UID.
+  // +optional
+  optional string uid = 1;
+}
+
+// Describes a class of pods that should avoid this node.
+message PreferAvoidPodsEntry {
+  // The class of pods.
+  optional PodSignature podSignature = 1;
+
+  // Time at which this entry was added to the list.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time evictionTime = 2;
+
+  // (brief) reason why this entry was added to the list.
+  // +optional
+  optional string reason = 3;
+
+  // Human readable message indicating why this entry was added to the list.
+  // +optional
+  optional string message = 4;
+}
+
+// An empty preferred scheduling term matches all objects with implicit weight 0
+// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+message PreferredSchedulingTerm {
+  // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+  optional int32 weight = 1;
+
+  // A node selector term, associated with the corresponding weight.
+  optional NodeSelectorTerm preference = 2;
+}
+
+// Probe describes a health check to be performed against a container to determine whether it is
+// alive or ready to receive traffic.
+message Probe {
+  // The action taken to determine the health of a container
+  optional ProbeHandler handler = 1;
+
+  // Number of seconds after the container has started before liveness probes are initiated.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+  // +optional
+  optional int32 initialDelaySeconds = 2;
+
+  // Number of seconds after which the probe times out.
+  // Defaults to 1 second. Minimum value is 1.
+  // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+  // +optional
+  optional int32 timeoutSeconds = 3;
+
+  // How often (in seconds) to perform the probe.
+  // Default to 10 seconds. Minimum value is 1.
+  // +optional
+  optional int32 periodSeconds = 4;
+
+  // Minimum consecutive successes for the probe to be considered successful after having failed.
+  // Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+  // +optional
+  optional int32 successThreshold = 5;
+
+  // Minimum consecutive failures for the probe to be considered failed after having succeeded.
+  // Defaults to 3. Minimum value is 1.
+  // +optional
+  optional int32 failureThreshold = 6;
+
+  // Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+  // The grace period is the duration in seconds after the processes running in the pod are sent
+  // a termination signal and the time when the processes are forcibly halted with a kill signal.
+  // Set this value longer than the expected cleanup time for your process.
+  // If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+  // value overrides the value provided by the pod spec.
+  // Value must be non-negative integer. The value zero indicates stop immediately via
+  // the kill signal (no opportunity to shut down).
+  // This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+  // Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+  // +optional
+  optional int64 terminationGracePeriodSeconds = 7;
+}
+
+// ProbeHandler defines a specific action that should be taken in a probe.
+// One and only one of the fields must be specified.
+message ProbeHandler {
+  // Exec specifies a command to execute in the container.
+  // +optional
+  optional ExecAction exec = 1;
+
+  // HTTPGet specifies an HTTP GET request to perform.
+  // +optional
+  optional HTTPGetAction httpGet = 2;
+
+  // TCPSocket specifies a connection to a TCP port.
+  // +optional
+  optional TCPSocketAction tcpSocket = 3;
+
+  // GRPC specifies a GRPC HealthCheckRequest.
+  // +optional
+  optional GRPCAction grpc = 4;
+}
+
+// Represents a projected volume source
+message ProjectedVolumeSource {
+  // sources is the list of volume projections. Each entry in this list
+  // handles one source.
+  // +optional
+  // +listType=atomic
+  repeated VolumeProjection sources = 1;
+
+  // defaultMode are the mode bits used to set permissions on created files by default.
+  // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+  // YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+  // Directories within the path are not affected by this setting.
+  // This might be in conflict with other options that affect the file
+  // mode, like fsGroup, and the result can be other mode bits set.
+  // +optional
+  optional int32 defaultMode = 2;
+}
+
+// Represents a Quobyte mount that lasts the lifetime of a pod.
+// Quobyte volumes do not support ownership management or SELinux relabeling.
+message QuobyteVolumeSource {
+  // registry represents a single or multiple Quobyte Registry services
+  // specified as a string as host:port pair (multiple entries are separated with commas)
+  // which acts as the central registry for volumes
+  optional string registry = 1;
+
+  // volume is a string that references an already created Quobyte volume by name.
+  optional string volume = 2;
+
+  // readOnly here will force the Quobyte volume to be mounted with read-only permissions.
+  // Defaults to false.
+  // +optional
+  optional bool readOnly = 3;
+
+  // user to map volume access to
+  // Defaults to serivceaccount user
+  // +optional
+  optional string user = 4;
+
+  // group to map volume access to
+  // Default is no group
+  // +optional
+  optional string group = 5;
+
+  // tenant owning the given Quobyte volume in the Backend
+  // Used with dynamically provisioned Quobyte volumes, value is set by the plugin
+  // +optional
+  optional string tenant = 6;
+}
+
+// Represents a Rados Block Device mount that lasts the lifetime of a pod.
+// RBD volumes support ownership management and SELinux relabeling.
+message RBDPersistentVolumeSource {
+  // monitors is a collection of Ceph monitors.
+  // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+  // +listType=atomic
+  repeated string monitors = 1;
+
+  // image is the rados image name.
+  // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+  optional string image = 2;
+
+  // fsType is the filesystem type of the volume that you want to mount.
+  // Tip: Ensure that the filesystem type is supported by the host operating system.
+  // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+  // TODO: how do we prevent errors in the filesystem from compromising the machine
+  // +optional
+  optional string fsType = 3;
+
+  // pool is the rados pool name.
+  // Default is rbd.
+  // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+  // +optional
+  // +default="rbd"
+  optional string pool = 4;
+
+  // user is the rados user name.
+  // Default is admin.
+  // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+  // +optional
+  // +default="admin"
+  optional string user = 5;
+
+  // keyring is the path to key ring for RBDUser.
+  // Default is /etc/ceph/keyring.
+  // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+  // +optional
+  // +default="/etc/ceph/keyring"
+  optional string keyring = 6;
+
+  // secretRef is name of the authentication secret for RBDUser. If provided
+  // overrides keyring.
+  // Default is nil.
+  // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+  // +optional
+  optional SecretReference secretRef = 7;
+
+  // readOnly here will force the ReadOnly setting in VolumeMounts.
+  // Defaults to false.
+  // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+  // +optional
+  optional bool readOnly = 8;
+}
+
+// Represents a Rados Block Device mount that lasts the lifetime of a pod.
+// RBD volumes support ownership management and SELinux relabeling.
+message RBDVolumeSource {
+  // monitors is a collection of Ceph monitors.
+  // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+  // +listType=atomic
+  repeated string monitors = 1;
+
+  // image is the rados image name.
+  // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+  optional string image = 2;
+
+  // fsType is the filesystem type of the volume that you want to mount.
+  // Tip: Ensure that the filesystem type is supported by the host operating system.
+  // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+  // TODO: how do we prevent errors in the filesystem from compromising the machine
+  // +optional
+  optional string fsType = 3;
+
+  // pool is the rados pool name.
+  // Default is rbd.
+  // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+  // +optional
+  // +default="rbd"
+  optional string pool = 4;
+
+  // user is the rados user name.
+  // Default is admin.
+  // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+  // +optional
+  // +default="admin"
+  optional string user = 5;
+
+  // keyring is the path to key ring for RBDUser.
+  // Default is /etc/ceph/keyring.
+  // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+  // +optional
+  // +default="/etc/ceph/keyring"
+  optional string keyring = 6;
+
+  // secretRef is name of the authentication secret for RBDUser. If provided
+  // overrides keyring.
+  // Default is nil.
+  // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+  // +optional
+  optional LocalObjectReference secretRef = 7;
+
+  // readOnly here will force the ReadOnly setting in VolumeMounts.
+  // Defaults to false.
+  // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+  // +optional
+  optional bool readOnly = 8;
+}
+
+// RangeAllocation is not a public type.
+message RangeAllocation {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Range is string that identifies the range represented by 'data'.
+  optional string range = 2;
+
+  // Data is a bit array containing all allocated addresses in the previous segment.
+  optional bytes data = 3;
+}
+
+// ReplicationController represents the configuration of a replication controller.
+message ReplicationController {
+  // If the Labels of a ReplicationController are empty, they are defaulted to
+  // be the same as the Pod(s) that the replication controller manages.
+  // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec defines the specification of the desired behavior of the replication controller.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+  // +optional
+  optional ReplicationControllerSpec spec = 2;
+
+  // Status is the most recently observed status of the replication controller.
+  // This data may be out of date by some window of time.
+  // Populated by the system.
+  // Read-only.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+  // +optional
+  optional ReplicationControllerStatus status = 3;
+}
+
+// ReplicationControllerCondition describes the state of a replication controller at a certain point.
+message ReplicationControllerCondition {
+  // Type of replication controller condition.
+  optional string type = 1;
+
+  // Status of the condition, one of True, False, Unknown.
+  optional string status = 2;
+
+  // The last time the condition transitioned from one status to another.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
+
+  // The reason for the condition's last transition.
+  // +optional
+  optional string reason = 4;
+
+  // A human readable message indicating details about the transition.
+  // +optional
+  optional string message = 5;
+}
+
+// ReplicationControllerList is a collection of replication controllers.
+message ReplicationControllerList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of replication controllers.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
+  repeated ReplicationController items = 2;
+}
+
+// ReplicationControllerSpec is the specification of a replication controller.
+message ReplicationControllerSpec {
+  // Replicas is the number of desired replicas.
+  // This is a pointer to distinguish between explicit zero and unspecified.
+  // Defaults to 1.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
+  // +optional
+  optional int32 replicas = 1;
+
+  // Minimum number of seconds for which a newly created pod should be ready
+  // without any of its container crashing, for it to be considered available.
+  // Defaults to 0 (pod will be considered available as soon as it is ready)
+  // +optional
+  optional int32 minReadySeconds = 4;
+
+  // Selector is a label query over pods that should match the Replicas count.
+  // If Selector is empty, it is defaulted to the labels present on the Pod template.
+  // Label keys and values that must match in order to be controlled by this replication
+  // controller, if empty defaulted to labels on Pod template.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+  // +optional
+  // +mapType=atomic
+  map selector = 2;
+
+  // Template is the object that describes the pod that will be created if
+  // insufficient replicas are detected. This takes precedence over a TemplateRef.
+  // The only allowed template.spec.restartPolicy value is "Always".
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+  // +optional
+  optional PodTemplateSpec template = 3;
+}
+
+// ReplicationControllerStatus represents the current status of a replication
+// controller.
+message ReplicationControllerStatus {
+  // Replicas is the most recently observed number of replicas.
+  // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
+  optional int32 replicas = 1;
+
+  // The number of pods that have labels matching the labels of the pod template of the replication controller.
+  // +optional
+  optional int32 fullyLabeledReplicas = 2;
+
+  // The number of ready replicas for this replication controller.
+  // +optional
+  optional int32 readyReplicas = 4;
+
+  // The number of available replicas (ready for at least minReadySeconds) for this replication controller.
+  // +optional
+  optional int32 availableReplicas = 5;
+
+  // ObservedGeneration reflects the generation of the most recently observed replication controller.
+  // +optional
+  optional int64 observedGeneration = 3;
+
+  // Represents the latest available observations of a replication controller's current state.
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=type
+  repeated ReplicationControllerCondition conditions = 6;
+}
+
+// ResourceClaim references one entry in PodSpec.ResourceClaims.
+message ResourceClaim {
+  // Name must match the name of one entry in pod.spec.resourceClaims of
+  // the Pod where this field is used. It makes that resource available
+  // inside a container.
+  optional string name = 1;
+
+  // Request is the name chosen for a request in the referenced claim.
+  // If empty, everything from the claim is made available, otherwise
+  // only the result of this request.
+  //
+  // +optional
+  optional string request = 2;
+}
+
+// ResourceFieldSelector represents container resources (cpu, memory) and their output format
+// +structType=atomic
+message ResourceFieldSelector {
+  // Container name: required for volumes, optional for env vars
+  // +optional
+  optional string containerName = 1;
+
+  // Required: resource to select
+  optional string resource = 2;
+
+  // Specifies the output format of the exposed resources, defaults to "1"
+  // +optional
+  optional .k8s.io.apimachinery.pkg.api.resource.Quantity divisor = 3;
+}
+
+// ResourceHealth represents the health of a resource. It has the latest device health information.
+// This is a part of KEP https://kep.k8s.io/4680.
+message ResourceHealth {
+  // ResourceID is the unique identifier of the resource. See the ResourceID type for more information.
+  optional string resourceID = 1;
+
+  // Health of the resource.
+  // can be one of:
+  //  - Healthy: operates as normal
+  //  - Unhealthy: reported unhealthy. We consider this a temporary health issue
+  //               since we do not have a mechanism today to distinguish
+  //               temporary and permanent issues.
+  //  - Unknown: The status cannot be determined.
+  //             For example, Device Plugin got unregistered and hasn't been re-registered since.
+  //
+  // In future we may want to introduce the PermanentlyUnhealthy Status.
+  optional string health = 2;
+}
+
+// ResourceQuota sets aggregate quota restrictions enforced per namespace
+message ResourceQuota {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec defines the desired quota.
+  // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+  // +optional
+  optional ResourceQuotaSpec spec = 2;
+
+  // Status defines the actual enforced quota and its current usage.
+  // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+  // +optional
+  optional ResourceQuotaStatus status = 3;
+}
+
+// ResourceQuotaList is a list of ResourceQuota items.
+message ResourceQuotaList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of ResourceQuota objects.
+  // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
+  repeated ResourceQuota items = 2;
+}
+
+// ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
+message ResourceQuotaSpec {
+  // hard is the set of desired hard limits for each named resource.
+  // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
+  // +optional
+  map hard = 1;
+
+  // A collection of filters that must match each object tracked by a quota.
+  // If not specified, the quota matches all objects.
+  // +optional
+  // +listType=atomic
+  repeated string scopes = 2;
+
+  // scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota
+  // but expressed using ScopeSelectorOperator in combination with possible values.
+  // For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.
+  // +optional
+  optional ScopeSelector scopeSelector = 3;
+}
+
+// ResourceQuotaStatus defines the enforced hard limits and observed use.
+message ResourceQuotaStatus {
+  // Hard is the set of enforced hard limits for each named resource.
+  // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
+  // +optional
+  map hard = 1;
+
+  // Used is the current observed total usage of the resource in the namespace.
+  // +optional
+  map used = 2;
+}
+
+// ResourceRequirements describes the compute resource requirements.
+message ResourceRequirements {
+  // Limits describes the maximum amount of compute resources allowed.
+  // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+  // +optional
+  map limits = 1;
+
+  // Requests describes the minimum amount of compute resources required.
+  // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+  // otherwise to an implementation-defined value. Requests cannot exceed Limits.
+  // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+  // +optional
+  map requests = 2;
+
+  // Claims lists the names of resources, defined in spec.resourceClaims,
+  // that are used by this container.
+  //
+  // This is an alpha field and requires enabling the
+  // DynamicResourceAllocation feature gate.
+  //
+  // This field is immutable. It can only be set for containers.
+  //
+  // +listType=map
+  // +listMapKey=name
+  // +featureGate=DynamicResourceAllocation
+  // +optional
+  repeated ResourceClaim claims = 3;
+}
+
+// ResourceStatus represents the status of a single resource allocated to a Pod.
+message ResourceStatus {
+  // Name of the resource. Must be unique within the pod and in case of non-DRA resource, match one of the resources from the pod spec.
+  // For DRA resources, the value must be "claim:/".
+  // When this status is reported about a container, the "claim_name" and "request" must match one of the claims of this container.
+  // +required
+  optional string name = 1;
+
+  // List of unique resources health. Each element in the list contains an unique resource ID and its health.
+  // At a minimum, for the lifetime of a Pod, resource ID must uniquely identify the resource allocated to the Pod on the Node.
+  // If other Pod on the same Node reports the status with the same resource ID, it must be the same resource they share.
+  // See ResourceID type definition for a specific format it has in various use cases.
+  // +listType=map
+  // +listMapKey=resourceID
+  repeated ResourceHealth resources = 2;
+}
+
+// SELinuxOptions are the labels to be applied to the container
+message SELinuxOptions {
+  // User is a SELinux user label that applies to the container.
+  // +optional
+  optional string user = 1;
+
+  // Role is a SELinux role label that applies to the container.
+  // +optional
+  optional string role = 2;
+
+  // Type is a SELinux type label that applies to the container.
+  // +optional
+  optional string type = 3;
+
+  // Level is SELinux level label that applies to the container.
+  // +optional
+  optional string level = 4;
+}
+
+// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume
+message ScaleIOPersistentVolumeSource {
+  // gateway is the host address of the ScaleIO API Gateway.
+  optional string gateway = 1;
+
+  // system is the name of the storage system as configured in ScaleIO.
+  optional string system = 2;
+
+  // secretRef references to the secret for ScaleIO user and other
+  // sensitive information. If this is not provided, Login operation will fail.
+  optional SecretReference secretRef = 3;
+
+  // sslEnabled is the flag to enable/disable SSL communication with Gateway, default false
+  // +optional
+  optional bool sslEnabled = 4;
+
+  // protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.
+  // +optional
+  optional string protectionDomain = 5;
+
+  // storagePool is the ScaleIO Storage Pool associated with the protection domain.
+  // +optional
+  optional string storagePool = 6;
+
+  // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+  // Default is ThinProvisioned.
+  // +optional
+  // +default="ThinProvisioned"
+  optional string storageMode = 7;
+
+  // volumeName is the name of a volume already created in the ScaleIO system
+  // that is associated with this volume source.
+  optional string volumeName = 8;
+
+  // fsType is the filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs".
+  // Default is "xfs"
+  // +optional
+  // +default="xfs"
+  optional string fsType = 9;
+
+  // readOnly defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  optional bool readOnly = 10;
+}
+
+// ScaleIOVolumeSource represents a persistent ScaleIO volume
+message ScaleIOVolumeSource {
+  // gateway is the host address of the ScaleIO API Gateway.
+  optional string gateway = 1;
+
+  // system is the name of the storage system as configured in ScaleIO.
+  optional string system = 2;
+
+  // secretRef references to the secret for ScaleIO user and other
+  // sensitive information. If this is not provided, Login operation will fail.
+  optional LocalObjectReference secretRef = 3;
+
+  // sslEnabled Flag enable/disable SSL communication with Gateway, default false
+  // +optional
+  optional bool sslEnabled = 4;
+
+  // protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.
+  // +optional
+  optional string protectionDomain = 5;
+
+  // storagePool is the ScaleIO Storage Pool associated with the protection domain.
+  // +optional
+  optional string storagePool = 6;
+
+  // storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+  // Default is ThinProvisioned.
+  // +optional
+  // +default="ThinProvisioned"
+  optional string storageMode = 7;
+
+  // volumeName is the name of a volume already created in the ScaleIO system
+  // that is associated with this volume source.
+  optional string volumeName = 8;
+
+  // fsType is the filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs".
+  // Default is "xfs".
+  // +optional
+  // +default="xfs"
+  optional string fsType = 9;
+
+  // readOnly Defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  optional bool readOnly = 10;
+}
+
+// A scope selector represents the AND of the selectors represented
+// by the scoped-resource selector requirements.
+// +structType=atomic
+message ScopeSelector {
+  // A list of scope selector requirements by scope of the resources.
+  // +optional
+  // +listType=atomic
+  repeated ScopedResourceSelectorRequirement matchExpressions = 1;
+}
+
+// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator
+// that relates the scope name and values.
+message ScopedResourceSelectorRequirement {
+  // The name of the scope that the selector applies to.
+  optional string scopeName = 1;
+
+  // Represents a scope's relationship to a set of values.
+  // Valid operators are In, NotIn, Exists, DoesNotExist.
+  optional string operator = 2;
+
+  // An array of string values. If the operator is In or NotIn,
+  // the values array must be non-empty. If the operator is Exists or DoesNotExist,
+  // the values array must be empty.
+  // This array is replaced during a strategic merge patch.
+  // +optional
+  // +listType=atomic
+  repeated string values = 3;
+}
+
+// SeccompProfile defines a pod/container's seccomp profile settings.
+// Only one profile source may be set.
+// +union
+message SeccompProfile {
+  // type indicates which kind of seccomp profile will be applied.
+  // Valid options are:
+  //
+  // Localhost - a profile defined in a file on the node should be used.
+  // RuntimeDefault - the container runtime default profile should be used.
+  // Unconfined - no profile should be applied.
+  // +unionDiscriminator
+  optional string type = 1;
+
+  // localhostProfile indicates a profile defined in a file on the node should be used.
+  // The profile must be preconfigured on the node to work.
+  // Must be a descending path, relative to the kubelet's configured seccomp profile location.
+  // Must be set if type is "Localhost". Must NOT be set for any other type.
+  // +optional
+  optional string localhostProfile = 2;
+}
+
+// Secret holds secret data of a certain type. The total bytes of the values in
+// the Data field must be less than MaxSecretSize bytes.
+message Secret {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Immutable, if set to true, ensures that data stored in the Secret cannot
+  // be updated (only object metadata can be modified).
+  // If not set to true, the field can be modified at any time.
+  // Defaulted to nil.
+  // +optional
+  optional bool immutable = 5;
+
+  // Data contains the secret data. Each key must consist of alphanumeric
+  // characters, '-', '_' or '.'. The serialized form of the secret data is a
+  // base64 encoded string, representing the arbitrary (possibly non-string)
+  // data value here. Described in https://tools.ietf.org/html/rfc4648#section-4
+  // +optional
+  map data = 2;
+
+  // stringData allows specifying non-binary secret data in string form.
+  // It is provided as a write-only input field for convenience.
+  // All keys and values are merged into the data field on write, overwriting any existing values.
+  // The stringData field is never output when reading from the API.
+  // +k8s:conversion-gen=false
+  // +optional
+  map stringData = 4;
+
+  // Used to facilitate programmatic handling of secret data.
+  // More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types
+  // +optional
+  optional string type = 3;
+}
+
+// SecretEnvSource selects a Secret to populate the environment
+// variables with.
+//
+// The contents of the target Secret's Data field will represent the
+// key-value pairs as environment variables.
+message SecretEnvSource {
+  // The Secret to select from.
+  optional LocalObjectReference localObjectReference = 1;
+
+  // Specify whether the Secret must be defined
+  // +optional
+  optional bool optional = 2;
+}
+
+// SecretKeySelector selects a key of a Secret.
+// +structType=atomic
+message SecretKeySelector {
+  // The name of the secret in the pod's namespace to select from.
+  optional LocalObjectReference localObjectReference = 1;
+
+  // The key of the secret to select from.  Must be a valid secret key.
+  optional string key = 2;
+
+  // Specify whether the Secret or its key must be defined
+  // +optional
+  optional bool optional = 3;
+}
+
+// SecretList is a list of Secret.
+message SecretList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is a list of secret objects.
+  // More info: https://kubernetes.io/docs/concepts/configuration/secret
+  repeated Secret items = 2;
+}
+
+// Adapts a secret into a projected volume.
+//
+// The contents of the target Secret's Data field will be presented in a
+// projected volume as files using the keys in the Data field as the file names.
+// Note that this is identical to a secret volume source without the default
+// mode.
+message SecretProjection {
+  optional LocalObjectReference localObjectReference = 1;
+
+  // items if unspecified, each key-value pair in the Data field of the referenced
+  // Secret will be projected into the volume as a file whose name is the
+  // key and content is the value. If specified, the listed keys will be
+  // projected into the specified paths, and unlisted keys will not be
+  // present. If a key is specified which is not present in the Secret,
+  // the volume setup will error unless it is marked optional. Paths must be
+  // relative and may not contain the '..' path or start with '..'.
+  // +optional
+  // +listType=atomic
+  repeated KeyToPath items = 2;
+
+  // optional field specify whether the Secret or its key must be defined
+  // +optional
+  optional bool optional = 4;
+}
+
+// SecretReference represents a Secret Reference. It has enough information to retrieve secret
+// in any namespace
+// +structType=atomic
+message SecretReference {
+  // name is unique within a namespace to reference a secret resource.
+  // +optional
+  optional string name = 1;
+
+  // namespace defines the space within which the secret name must be unique.
+  // +optional
+  optional string namespace = 2;
+}
+
+// Adapts a Secret into a volume.
+//
+// The contents of the target Secret's Data field will be presented in a volume
+// as files using the keys in the Data field as the file names.
+// Secret volumes support ownership management and SELinux relabeling.
+message SecretVolumeSource {
+  // secretName is the name of the secret in the pod's namespace to use.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+  // +optional
+  optional string secretName = 1;
+
+  // items If unspecified, each key-value pair in the Data field of the referenced
+  // Secret will be projected into the volume as a file whose name is the
+  // key and content is the value. If specified, the listed keys will be
+  // projected into the specified paths, and unlisted keys will not be
+  // present. If a key is specified which is not present in the Secret,
+  // the volume setup will error unless it is marked optional. Paths must be
+  // relative and may not contain the '..' path or start with '..'.
+  // +optional
+  // +listType=atomic
+  repeated KeyToPath items = 2;
+
+  // defaultMode is Optional: mode bits used to set permissions on created files by default.
+  // Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+  // YAML accepts both octal and decimal values, JSON requires decimal values
+  // for mode bits. Defaults to 0644.
+  // Directories within the path are not affected by this setting.
+  // This might be in conflict with other options that affect the file
+  // mode, like fsGroup, and the result can be other mode bits set.
+  // +optional
+  optional int32 defaultMode = 3;
+
+  // optional field specify whether the Secret or its keys must be defined
+  // +optional
+  optional bool optional = 4;
+}
+
+// SecurityContext holds security configuration that will be applied to a container.
+// Some fields are present in both SecurityContext and PodSecurityContext.  When both
+// are set, the values in SecurityContext take precedence.
+message SecurityContext {
+  // The capabilities to add/drop when running containers.
+  // Defaults to the default set of capabilities granted by the container runtime.
+  // Note that this field cannot be set when spec.os.name is windows.
+  // +optional
+  optional Capabilities capabilities = 1;
+
+  // Run container in privileged mode.
+  // Processes in privileged containers are essentially equivalent to root on the host.
+  // Defaults to false.
+  // Note that this field cannot be set when spec.os.name is windows.
+  // +optional
+  optional bool privileged = 2;
+
+  // The SELinux context to be applied to the container.
+  // If unspecified, the container runtime will allocate a random SELinux context for each
+  // container.  May also be set in PodSecurityContext.  If set in both SecurityContext and
+  // PodSecurityContext, the value specified in SecurityContext takes precedence.
+  // Note that this field cannot be set when spec.os.name is windows.
+  // +optional
+  optional SELinuxOptions seLinuxOptions = 3;
+
+  // The Windows specific settings applied to all containers.
+  // If unspecified, the options from the PodSecurityContext will be used.
+  // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+  // Note that this field cannot be set when spec.os.name is linux.
+  // +optional
+  optional WindowsSecurityContextOptions windowsOptions = 10;
+
+  // The UID to run the entrypoint of the container process.
+  // Defaults to user specified in image metadata if unspecified.
+  // May also be set in PodSecurityContext.  If set in both SecurityContext and
+  // PodSecurityContext, the value specified in SecurityContext takes precedence.
+  // Note that this field cannot be set when spec.os.name is windows.
+  // +optional
+  optional int64 runAsUser = 4;
+
+  // The GID to run the entrypoint of the container process.
+  // Uses runtime default if unset.
+  // May also be set in PodSecurityContext.  If set in both SecurityContext and
+  // PodSecurityContext, the value specified in SecurityContext takes precedence.
+  // Note that this field cannot be set when spec.os.name is windows.
+  // +optional
+  optional int64 runAsGroup = 8;
+
+  // Indicates that the container must run as a non-root user.
+  // If true, the Kubelet will validate the image at runtime to ensure that it
+  // does not run as UID 0 (root) and fail to start the container if it does.
+  // If unset or false, no such validation will be performed.
+  // May also be set in PodSecurityContext.  If set in both SecurityContext and
+  // PodSecurityContext, the value specified in SecurityContext takes precedence.
+  // +optional
+  optional bool runAsNonRoot = 5;
+
+  // Whether this container has a read-only root filesystem.
+  // Default is false.
+  // Note that this field cannot be set when spec.os.name is windows.
+  // +optional
+  optional bool readOnlyRootFilesystem = 6;
+
+  // AllowPrivilegeEscalation controls whether a process can gain more
+  // privileges than its parent process. This bool directly controls if
+  // the no_new_privs flag will be set on the container process.
+  // AllowPrivilegeEscalation is true always when the container is:
+  // 1) run as Privileged
+  // 2) has CAP_SYS_ADMIN
+  // Note that this field cannot be set when spec.os.name is windows.
+  // +optional
+  optional bool allowPrivilegeEscalation = 7;
+
+  // procMount denotes the type of proc mount to use for the containers.
+  // The default value is Default which uses the container runtime defaults for
+  // readonly paths and masked paths.
+  // This requires the ProcMountType feature flag to be enabled.
+  // Note that this field cannot be set when spec.os.name is windows.
+  // +optional
+  optional string procMount = 9;
+
+  // The seccomp options to use by this container. If seccomp options are
+  // provided at both the pod & container level, the container options
+  // override the pod options.
+  // Note that this field cannot be set when spec.os.name is windows.
+  // +optional
+  optional SeccompProfile seccompProfile = 11;
+
+  // appArmorProfile is the AppArmor options to use by this container. If set, this profile
+  // overrides the pod's appArmorProfile.
+  // Note that this field cannot be set when spec.os.name is windows.
+  // +optional
+  optional AppArmorProfile appArmorProfile = 12;
+}
+
+// SerializedReference is a reference to serialized object.
+message SerializedReference {
+  // The reference to an object in the system.
+  // +optional
+  optional ObjectReference reference = 1;
+}
+
+// Service is a named abstraction of software service (for example, mysql) consisting of local port
+// (for example 3306) that the proxy listens on, and the selector that determines which pods
+// will answer requests sent through the proxy.
+message Service {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec defines the behavior of a service.
+  // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+  // +optional
+  optional ServiceSpec spec = 2;
+
+  // Most recently observed status of the service.
+  // Populated by the system.
+  // Read-only.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+  // +optional
+  optional ServiceStatus status = 3;
+}
+
+// ServiceAccount binds together:
+// * a name, understood by users, and perhaps by peripheral systems, for an identity
+// * a principal that can be authenticated and authorized
+// * a set of secrets
+message ServiceAccount {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use.
+  // Pods are only limited to this list if this service account has a "kubernetes.io/enforce-mountable-secrets" annotation set to "true".
+  // The "kubernetes.io/enforce-mountable-secrets" annotation is deprecated since v1.32.
+  // Prefer separate namespaces to isolate access to mounted secrets.
+  // This field should not be used to find auto-generated service account token secrets for use outside of pods.
+  // Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created.
+  // More info: https://kubernetes.io/docs/concepts/configuration/secret
+  // +optional
+  // +patchMergeKey=name
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=name
+  repeated ObjectReference secrets = 2;
+
+  // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images
+  // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets
+  // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.
+  // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+  // +optional
+  // +listType=atomic
+  repeated LocalObjectReference imagePullSecrets = 3;
+
+  // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.
+  // Can be overridden at the pod level.
+  // +optional
+  optional bool automountServiceAccountToken = 4;
+}
+
+// ServiceAccountList is a list of ServiceAccount objects
+message ServiceAccountList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of ServiceAccounts.
+  // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+  repeated ServiceAccount items = 2;
+}
+
+// ServiceAccountTokenProjection represents a projected service account token
+// volume. This projection can be used to insert a service account token into
+// the pods runtime filesystem for use against APIs (Kubernetes API Server or
+// otherwise).
+message ServiceAccountTokenProjection {
+  // audience is the intended audience of the token. A recipient of a token
+  // must identify itself with an identifier specified in the audience of the
+  // token, and otherwise should reject the token. The audience defaults to the
+  // identifier of the apiserver.
+  // +optional
+  optional string audience = 1;
+
+  // expirationSeconds is the requested duration of validity of the service
+  // account token. As the token approaches expiration, the kubelet volume
+  // plugin will proactively rotate the service account token. The kubelet will
+  // start trying to rotate the token if the token is older than 80 percent of
+  // its time to live or if the token is older than 24 hours.Defaults to 1 hour
+  // and must be at least 10 minutes.
+  // +optional
+  optional int64 expirationSeconds = 2;
+
+  // path is the path relative to the mount point of the file to project the
+  // token into.
+  optional string path = 3;
+}
+
+// ServiceList holds a list of services.
+message ServiceList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // List of services
+  repeated Service items = 2;
+}
+
+// ServicePort contains information on service's port.
+message ServicePort {
+  // The name of this port within the service. This must be a DNS_LABEL.
+  // All ports within a ServiceSpec must have unique names. When considering
+  // the endpoints for a Service, this must match the 'name' field in the
+  // EndpointPort.
+  // Optional if only one ServicePort is defined on this service.
+  // +optional
+  optional string name = 1;
+
+  // The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
+  // Default is TCP.
+  // +default="TCP"
+  // +optional
+  optional string protocol = 2;
+
+  // The application protocol for this port.
+  // This is used as a hint for implementations to offer richer behavior for protocols that they understand.
+  // This field follows standard Kubernetes label syntax.
+  // Valid values are either:
+  //
+  // * Un-prefixed protocol names - reserved for IANA standard service names (as per
+  // RFC-6335 and https://www.iana.org/assignments/service-names).
+  //
+  // * Kubernetes-defined prefixed names:
+  //   * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
+  //   * 'kubernetes.io/ws'  - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
+  //   * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
+  //
+  // * Other protocols should use implementation-defined prefixed names such as
+  // mycompany.com/my-custom-protocol.
+  // +optional
+  optional string appProtocol = 6;
+
+  // The port that will be exposed by this service.
+  optional int32 port = 3;
+
+  // Number or name of the port to access on the pods targeted by the service.
+  // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+  // If this is a string, it will be looked up as a named port in the
+  // target Pod's container ports. If this is not specified, the value
+  // of the 'port' field is used (an identity map).
+  // This field is ignored for services with clusterIP=None, and should be
+  // omitted or set equal to the 'port' field.
+  // More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
+  // +optional
+  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4;
+
+  // The port on each node on which this service is exposed when type is
+  // NodePort or LoadBalancer.  Usually assigned by the system. If a value is
+  // specified, in-range, and not in use it will be used, otherwise the
+  // operation will fail.  If not specified, a port will be allocated if this
+  // Service requires one.  If this field is specified when creating a
+  // Service which does not need it, creation will fail. This field will be
+  // wiped when updating a Service to no longer need it (e.g. changing type
+  // from NodePort to ClusterIP).
+  // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+  // +optional
+  optional int32 nodePort = 5;
+}
+
+// ServiceProxyOptions is the query options to a Service's proxy call.
+message ServiceProxyOptions {
+  // Path is the part of URLs that include service endpoints, suffixes,
+  // and parameters to use for the current proxy request to service.
+  // For example, the whole request URL is
+  // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy.
+  // Path is _search?q=user:kimchy.
+  // +optional
+  optional string path = 1;
+}
+
+// ServiceSpec describes the attributes that a user creates on a service.
+message ServiceSpec {
+  // The list of ports that are exposed by this service.
+  // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+  // +patchMergeKey=port
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=port
+  // +listMapKey=protocol
+  repeated ServicePort ports = 1;
+
+  // Route service traffic to pods with label keys and values matching this
+  // selector. If empty or not present, the service is assumed to have an
+  // external process managing its endpoints, which Kubernetes will not
+  // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
+  // Ignored if type is ExternalName.
+  // More info: https://kubernetes.io/docs/concepts/services-networking/service/
+  // +optional
+  // +mapType=atomic
+  map selector = 2;
+
+  // clusterIP is the IP address of the service and is usually assigned
+  // randomly. If an address is specified manually, is in-range (as per
+  // system configuration), and is not in use, it will be allocated to the
+  // service; otherwise creation of the service will fail. This field may not
+  // be changed through updates unless the type field is also being changed
+  // to ExternalName (which requires this field to be blank) or the type
+  // field is being changed from ExternalName (in which case this field may
+  // optionally be specified, as describe above).  Valid values are "None",
+  // empty string (""), or a valid IP address. Setting this to "None" makes a
+  // "headless service" (no virtual IP), which is useful when direct endpoint
+  // connections are preferred and proxying is not required.  Only applies to
+  // types ClusterIP, NodePort, and LoadBalancer. If this field is specified
+  // when creating a Service of type ExternalName, creation will fail. This
+  // field will be wiped when updating a Service to type ExternalName.
+  // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+  // +optional
+  optional string clusterIP = 3;
+
+  // ClusterIPs is a list of IP addresses assigned to this service, and are
+  // usually assigned randomly.  If an address is specified manually, is
+  // in-range (as per system configuration), and is not in use, it will be
+  // allocated to the service; otherwise creation of the service will fail.
+  // This field may not be changed through updates unless the type field is
+  // also being changed to ExternalName (which requires this field to be
+  // empty) or the type field is being changed from ExternalName (in which
+  // case this field may optionally be specified, as describe above).  Valid
+  // values are "None", empty string (""), or a valid IP address.  Setting
+  // this to "None" makes a "headless service" (no virtual IP), which is
+  // useful when direct endpoint connections are preferred and proxying is
+  // not required.  Only applies to types ClusterIP, NodePort, and
+  // LoadBalancer. If this field is specified when creating a Service of type
+  // ExternalName, creation will fail. This field will be wiped when updating
+  // a Service to type ExternalName.  If this field is not specified, it will
+  // be initialized from the clusterIP field.  If this field is specified,
+  // clients must ensure that clusterIPs[0] and clusterIP have the same
+  // value.
+  //
+  // This field may hold a maximum of two entries (dual-stack IPs, in either order).
+  // These IPs must correspond to the values of the ipFamilies field. Both
+  // clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.
+  // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+  // +listType=atomic
+  // +optional
+  repeated string clusterIPs = 18;
+
+  // type determines how the Service is exposed. Defaults to ClusterIP. Valid
+  // options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
+  // "ClusterIP" allocates a cluster-internal IP address for load-balancing
+  // to endpoints. Endpoints are determined by the selector or if that is not
+  // specified, by manual construction of an Endpoints object or
+  // EndpointSlice objects. If clusterIP is "None", no virtual IP is
+  // allocated and the endpoints are published as a set of endpoints rather
+  // than a virtual IP.
+  // "NodePort" builds on ClusterIP and allocates a port on every node which
+  // routes to the same endpoints as the clusterIP.
+  // "LoadBalancer" builds on NodePort and creates an external load-balancer
+  // (if supported in the current cloud) which routes to the same endpoints
+  // as the clusterIP.
+  // "ExternalName" aliases this service to the specified externalName.
+  // Several other fields do not apply to ExternalName services.
+  // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
+  // +optional
+  optional string type = 4;
+
+  // externalIPs is a list of IP addresses for which nodes in the cluster
+  // will also accept traffic for this service.  These IPs are not managed by
+  // Kubernetes.  The user is responsible for ensuring that traffic arrives
+  // at a node with this IP.  A common example is external load-balancers
+  // that are not part of the Kubernetes system.
+  // +optional
+  // +listType=atomic
+  repeated string externalIPs = 5;
+
+  // Supports "ClientIP" and "None". Used to maintain session affinity.
+  // Enable client IP based session affinity.
+  // Must be ClientIP or None.
+  // Defaults to None.
+  // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+  // +optional
+  optional string sessionAffinity = 7;
+
+  // Only applies to Service Type: LoadBalancer.
+  // This feature depends on whether the underlying cloud-provider supports specifying
+  // the loadBalancerIP when a load balancer is created.
+  // This field will be ignored if the cloud-provider does not support the feature.
+  // Deprecated: This field was under-specified and its meaning varies across implementations.
+  // Using it is non-portable and it may not support dual-stack.
+  // Users are encouraged to use implementation-specific annotations when available.
+  // +optional
+  optional string loadBalancerIP = 8;
+
+  // If specified and supported by the platform, this will restrict traffic through the cloud-provider
+  // load-balancer will be restricted to the specified client IPs. This field will be ignored if the
+  // cloud-provider does not support the feature."
+  // More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/
+  // +optional
+  // +listType=atomic
+  repeated string loadBalancerSourceRanges = 9;
+
+  // externalName is the external reference that discovery mechanisms will
+  // return as an alias for this service (e.g. a DNS CNAME record). No
+  // proxying will be involved.  Must be a lowercase RFC-1123 hostname
+  // (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName".
+  // +optional
+  optional string externalName = 10;
+
+  // externalTrafficPolicy describes how nodes distribute service traffic they
+  // receive on one of the Service's "externally-facing" addresses (NodePorts,
+  // ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure
+  // the service in a way that assumes that external load balancers will take care
+  // of balancing the service traffic between nodes, and so each node will deliver
+  // traffic only to the node-local endpoints of the service, without masquerading
+  // the client source IP. (Traffic mistakenly sent to a node with no endpoints will
+  // be dropped.) The default value, "Cluster", uses the standard behavior of
+  // routing to all endpoints evenly (possibly modified by topology and other
+  // features). Note that traffic sent to an External IP or LoadBalancer IP from
+  // within the cluster will always get "Cluster" semantics, but clients sending to
+  // a NodePort from within the cluster may need to take traffic policy into account
+  // when picking a node.
+  // +optional
+  optional string externalTrafficPolicy = 11;
+
+  // healthCheckNodePort specifies the healthcheck nodePort for the service.
+  // This only applies when type is set to LoadBalancer and
+  // externalTrafficPolicy is set to Local. If a value is specified, is
+  // in-range, and is not in use, it will be used.  If not specified, a value
+  // will be automatically allocated.  External systems (e.g. load-balancers)
+  // can use this port to determine if a given node holds endpoints for this
+  // service or not.  If this field is specified when creating a Service
+  // which does not need it, creation will fail. This field will be wiped
+  // when updating a Service to no longer need it (e.g. changing type).
+  // This field cannot be updated once set.
+  // +optional
+  optional int32 healthCheckNodePort = 12;
+
+  // publishNotReadyAddresses indicates that any agent which deals with endpoints for this
+  // Service should disregard any indications of ready/not-ready.
+  // The primary use case for setting this field is for a StatefulSet's Headless Service to
+  // propagate SRV DNS records for its Pods for the purpose of peer discovery.
+  // The Kubernetes controllers that generate Endpoints and EndpointSlice resources for
+  // Services interpret this to mean that all endpoints are considered "ready" even if the
+  // Pods themselves are not. Agents which consume only Kubernetes generated endpoints
+  // through the Endpoints or EndpointSlice resources can safely assume this behavior.
+  // +optional
+  optional bool publishNotReadyAddresses = 13;
+
+  // sessionAffinityConfig contains the configurations of session affinity.
+  // +optional
+  optional SessionAffinityConfig sessionAffinityConfig = 14;
+
+  // IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this
+  // service. This field is usually assigned automatically based on cluster
+  // configuration and the ipFamilyPolicy field. If this field is specified
+  // manually, the requested family is available in the cluster,
+  // and ipFamilyPolicy allows it, it will be used; otherwise creation of
+  // the service will fail. This field is conditionally mutable: it allows
+  // for adding or removing a secondary IP family, but it does not allow
+  // changing the primary IP family of the Service. Valid values are "IPv4"
+  // and "IPv6".  This field only applies to Services of types ClusterIP,
+  // NodePort, and LoadBalancer, and does apply to "headless" services.
+  // This field will be wiped when updating a Service to type ExternalName.
+  //
+  // This field may hold a maximum of two entries (dual-stack families, in
+  // either order).  These families must correspond to the values of the
+  // clusterIPs field, if specified. Both clusterIPs and ipFamilies are
+  // governed by the ipFamilyPolicy field.
+  // +listType=atomic
+  // +optional
+  repeated string ipFamilies = 19;
+
+  // IPFamilyPolicy represents the dual-stack-ness requested or required by
+  // this Service. If there is no value provided, then this field will be set
+  // to SingleStack. Services can be "SingleStack" (a single IP family),
+  // "PreferDualStack" (two IP families on dual-stack configured clusters or
+  // a single IP family on single-stack clusters), or "RequireDualStack"
+  // (two IP families on dual-stack configured clusters, otherwise fail). The
+  // ipFamilies and clusterIPs fields depend on the value of this field. This
+  // field will be wiped when updating a service to type ExternalName.
+  // +optional
+  optional string ipFamilyPolicy = 17;
+
+  // allocateLoadBalancerNodePorts defines if NodePorts will be automatically
+  // allocated for services with type LoadBalancer.  Default is "true". It
+  // may be set to "false" if the cluster load-balancer does not rely on
+  // NodePorts.  If the caller requests specific NodePorts (by specifying a
+  // value), those requests will be respected, regardless of this field.
+  // This field may only be set for services with type LoadBalancer and will
+  // be cleared if the type is changed to any other type.
+  // +optional
+  optional bool allocateLoadBalancerNodePorts = 20;
+
+  // loadBalancerClass is the class of the load balancer implementation this Service belongs to.
+  // If specified, the value of this field must be a label-style identifier, with an optional prefix,
+  // e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users.
+  // This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load
+  // balancer implementation is used, today this is typically done through the cloud provider integration,
+  // but should apply for any default implementation. If set, it is assumed that a load balancer
+  // implementation is watching for Services with a matching class. Any default load balancer
+  // implementation (e.g. cloud providers) should ignore Services that set this field.
+  // This field can only be set when creating or updating a Service to type 'LoadBalancer'.
+  // Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.
+  // +optional
+  optional string loadBalancerClass = 21;
+
+  // InternalTrafficPolicy describes how nodes distribute service traffic they
+  // receive on the ClusterIP. If set to "Local", the proxy will assume that pods
+  // only want to talk to endpoints of the service on the same node as the pod,
+  // dropping the traffic if there are no local endpoints. The default value,
+  // "Cluster", uses the standard behavior of routing to all endpoints evenly
+  // (possibly modified by topology and other features).
+  // +optional
+  optional string internalTrafficPolicy = 22;
+
+  // TrafficDistribution offers a way to express preferences for how traffic is
+  // distributed to Service endpoints. Implementations can use this field as a
+  // hint, but are not required to guarantee strict adherence. If the field is
+  // not set, the implementation will apply its default routing strategy. If set
+  // to "PreferClose", implementations should prioritize endpoints that are
+  // topologically close (e.g., same zone).
+  // This is a beta field and requires enabling ServiceTrafficDistribution feature.
+  // +featureGate=ServiceTrafficDistribution
+  // +optional
+  optional string trafficDistribution = 23;
+}
+
+// ServiceStatus represents the current status of a service.
+message ServiceStatus {
+  // LoadBalancer contains the current status of the load-balancer,
+  // if one is present.
+  // +optional
+  optional LoadBalancerStatus loadBalancer = 1;
+
+  // Current service state
+  // +optional
+  // +patchMergeKey=type
+  // +patchStrategy=merge
+  // +listType=map
+  // +listMapKey=type
+  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 2;
+}
+
+// SessionAffinityConfig represents the configurations of session affinity.
+message SessionAffinityConfig {
+  // clientIP contains the configurations of Client IP based session affinity.
+  // +optional
+  optional ClientIPConfig clientIP = 1;
+}
+
+// SleepAction describes a "sleep" action.
+message SleepAction {
+  // Seconds is the number of seconds to sleep.
+  optional int64 seconds = 1;
+}
+
+// Represents a StorageOS persistent volume resource.
+message StorageOSPersistentVolumeSource {
+  // volumeName is the human-readable name of the StorageOS volume.  Volume
+  // names are only unique within a namespace.
+  optional string volumeName = 1;
+
+  // volumeNamespace specifies the scope of the volume within StorageOS.  If no
+  // namespace is specified then the Pod's namespace will be used.  This allows the
+  // Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+  // Set VolumeName to any name to override the default behaviour.
+  // Set to "default" if you are not using namespaces within StorageOS.
+  // Namespaces that do not pre-exist within StorageOS will be created.
+  // +optional
+  optional string volumeNamespace = 2;
+
+  // fsType is the filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // +optional
+  optional string fsType = 3;
+
+  // readOnly defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  optional bool readOnly = 4;
+
+  // secretRef specifies the secret to use for obtaining the StorageOS API
+  // credentials.  If not specified, default values will be attempted.
+  // +optional
+  optional ObjectReference secretRef = 5;
+}
+
+// Represents a StorageOS persistent volume resource.
+message StorageOSVolumeSource {
+  // volumeName is the human-readable name of the StorageOS volume.  Volume
+  // names are only unique within a namespace.
+  optional string volumeName = 1;
+
+  // volumeNamespace specifies the scope of the volume within StorageOS.  If no
+  // namespace is specified then the Pod's namespace will be used.  This allows the
+  // Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+  // Set VolumeName to any name to override the default behaviour.
+  // Set to "default" if you are not using namespaces within StorageOS.
+  // Namespaces that do not pre-exist within StorageOS will be created.
+  // +optional
+  optional string volumeNamespace = 2;
+
+  // fsType is the filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // +optional
+  optional string fsType = 3;
+
+  // readOnly defaults to false (read/write). ReadOnly here will force
+  // the ReadOnly setting in VolumeMounts.
+  // +optional
+  optional bool readOnly = 4;
+
+  // secretRef specifies the secret to use for obtaining the StorageOS API
+  // credentials.  If not specified, default values will be attempted.
+  // +optional
+  optional LocalObjectReference secretRef = 5;
+}
+
+// Sysctl defines a kernel parameter to be set
+message Sysctl {
+  // Name of a property to set
+  optional string name = 1;
+
+  // Value of a property to set
+  optional string value = 2;
+}
+
+// TCPSocketAction describes an action based on opening a socket
+message TCPSocketAction {
+  // Number or name of the port to access on the container.
+  // Number must be in the range 1 to 65535.
+  // Name must be an IANA_SVC_NAME.
+  optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 1;
+
+  // Optional: Host name to connect to, defaults to the pod IP.
+  // +optional
+  optional string host = 2;
+}
+
+// The node this Taint is attached to has the "effect" on
+// any pod that does not tolerate the Taint.
+message Taint {
+  // Required. The taint key to be applied to a node.
+  optional string key = 1;
+
+  // The taint value corresponding to the taint key.
+  // +optional
+  optional string value = 2;
+
+  // Required. The effect of the taint on pods
+  // that do not tolerate the taint.
+  // Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
+  optional string effect = 3;
+
+  // TimeAdded represents the time at which the taint was added.
+  // It is only written for NoExecute taints.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4;
+}
+
+// The pod this Toleration is attached to tolerates any taint that matches
+// the triple  using the matching operator .
+message Toleration {
+  // Key is the taint key that the toleration applies to. Empty means match all taint keys.
+  // If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+  // +optional
+  optional string key = 1;
+
+  // Operator represents a key's relationship to the value.
+  // Valid operators are Exists and Equal. Defaults to Equal.
+  // Exists is equivalent to wildcard for value, so that a pod can
+  // tolerate all taints of a particular category.
+  // +optional
+  optional string operator = 2;
+
+  // Value is the taint value the toleration matches to.
+  // If the operator is Exists, the value should be empty, otherwise just a regular string.
+  // +optional
+  optional string value = 3;
+
+  // Effect indicates the taint effect to match. Empty means match all taint effects.
+  // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+  // +optional
+  optional string effect = 4;
+
+  // TolerationSeconds represents the period of time the toleration (which must be
+  // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+  // it is not set, which means tolerate the taint forever (do not evict). Zero and
+  // negative values will be treated as 0 (evict immediately) by the system.
+  // +optional
+  optional int64 tolerationSeconds = 5;
+}
+
+// A topology selector requirement is a selector that matches given label.
+// This is an alpha feature and may change in the future.
+message TopologySelectorLabelRequirement {
+  // The label key that the selector applies to.
+  optional string key = 1;
+
+  // An array of string values. One value must match the label to be selected.
+  // Each entry in Values is ORed.
+  // +listType=atomic
+  repeated string values = 2;
+}
+
+// A topology selector term represents the result of label queries.
+// A null or empty topology selector term matches no objects.
+// The requirements of them are ANDed.
+// It provides a subset of functionality as NodeSelectorTerm.
+// This is an alpha feature and may change in the future.
+// +structType=atomic
+message TopologySelectorTerm {
+  // A list of topology selector requirements by labels.
+  // +optional
+  // +listType=atomic
+  repeated TopologySelectorLabelRequirement matchLabelExpressions = 1;
+}
+
+// TopologySpreadConstraint specifies how to spread matching pods among the given topology.
+message TopologySpreadConstraint {
+  // MaxSkew describes the degree to which pods may be unevenly distributed.
+  // When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference
+  // between the number of matching pods in the target topology and the global minimum.
+  // The global minimum is the minimum number of matching pods in an eligible domain
+  // or zero if the number of eligible domains is less than MinDomains.
+  // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+  // labelSelector spread as 2/2/1:
+  // In this case, the global minimum is 1.
+  // +-------+-------+-------+
+  // | zone1 | zone2 | zone3 |
+  // +-------+-------+-------+
+  // |  P P  |  P P  |   P   |
+  // +-------+-------+-------+
+  // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
+  // scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
+  // violate MaxSkew(1).
+  // - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
+  // When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence
+  // to topologies that satisfy it.
+  // It's a required field. Default value is 1 and 0 is not allowed.
+  optional int32 maxSkew = 1;
+
+  // TopologyKey is the key of node labels. Nodes that have a label with this key
+  // and identical values are considered to be in the same topology.
+  // We consider each  as a "bucket", and try to put balanced number
+  // of pods into each bucket.
+  // We define a domain as a particular instance of a topology.
+  // Also, we define an eligible domain as a domain whose nodes meet the requirements of
+  // nodeAffinityPolicy and nodeTaintsPolicy.
+  // e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
+  // And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
+  // It's a required field.
+  optional string topologyKey = 2;
+
+  // WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
+  // the spread constraint.
+  // - DoNotSchedule (default) tells the scheduler not to schedule it.
+  // - ScheduleAnyway tells the scheduler to schedule the pod in any location,
+  //   but giving higher precedence to topologies that would help reduce the
+  //   skew.
+  // A constraint is considered "Unsatisfiable" for an incoming pod
+  // if and only if every possible node assignment for that pod would violate
+  // "MaxSkew" on some topology.
+  // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+  // labelSelector spread as 3/1/1:
+  // +-------+-------+-------+
+  // | zone1 | zone2 | zone3 |
+  // +-------+-------+-------+
+  // | P P P |   P   |   P   |
+  // +-------+-------+-------+
+  // If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
+  // to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
+  // MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
+  // won't make it *more* imbalanced.
+  // It's a required field.
+  optional string whenUnsatisfiable = 3;
+
+  // LabelSelector is used to find matching pods.
+  // Pods that match this label selector are counted to determine the number of pods
+  // in their corresponding topology domain.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 4;
+
+  // MinDomains indicates a minimum number of eligible domains.
+  // When the number of eligible domains with matching topology keys is less than minDomains,
+  // Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed.
+  // And when the number of eligible domains with matching topology keys equals or greater than minDomains,
+  // this value has no effect on scheduling.
+  // As a result, when the number of eligible domains is less than minDomains,
+  // scheduler won't schedule more than maxSkew Pods to those domains.
+  // If value is nil, the constraint behaves as if MinDomains is equal to 1.
+  // Valid values are integers greater than 0.
+  // When value is not nil, WhenUnsatisfiable must be DoNotSchedule.
+  //
+  // For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
+  // labelSelector spread as 2/2/2:
+  // +-------+-------+-------+
+  // | zone1 | zone2 | zone3 |
+  // +-------+-------+-------+
+  // |  P P  |  P P  |  P P  |
+  // +-------+-------+-------+
+  // The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0.
+  // In this situation, new pod with the same labelSelector cannot be scheduled,
+  // because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
+  // it will violate MaxSkew.
+  // +optional
+  optional int32 minDomains = 5;
+
+  // NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
+  // when calculating pod topology spread skew. Options are:
+  // - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
+  // - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
+  //
+  // If this value is nil, the behavior is equivalent to the Honor policy.
+  // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+  // +optional
+  optional string nodeAffinityPolicy = 6;
+
+  // NodeTaintsPolicy indicates how we will treat node taints when calculating
+  // pod topology spread skew. Options are:
+  // - Honor: nodes without taints, along with tainted nodes for which the incoming pod
+  // has a toleration, are included.
+  // - Ignore: node taints are ignored. All nodes are included.
+  //
+  // If this value is nil, the behavior is equivalent to the Ignore policy.
+  // This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+  // +optional
+  optional string nodeTaintsPolicy = 7;
+
+  // MatchLabelKeys is a set of pod label keys to select the pods over which
+  // spreading will be calculated. The keys are used to lookup values from the
+  // incoming pod labels, those key-value labels are ANDed with labelSelector
+  // to select the group of existing pods over which spreading will be calculated
+  // for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+  // MatchLabelKeys cannot be set when LabelSelector isn't set.
+  // Keys that don't exist in the incoming pod labels will
+  // be ignored. A null or empty list means only match against labelSelector.
+  //
+  // This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).
+  // +listType=atomic
+  // +optional
+  repeated string matchLabelKeys = 8;
+}
+
+// TypedLocalObjectReference contains enough information to let you locate the
+// typed referenced object inside the same namespace.
+// ---
+// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
+//  1. Invalid usage help.  It is impossible to add specific help for individual usage.  In most embedded usages, there are particular
+//     restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
+//     Those cannot be well described when embedded.
+//  2. Inconsistent validation.  Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
+//  3. The fields are both imprecise and overly precise.  Kind is not a precise mapping to a URL. This can produce ambiguity
+//     during interpretation and require a REST mapping.  In most cases, the dependency is on the group,resource tuple
+//     and the version of the actual struct is irrelevant.
+//  4. We cannot easily change it.  Because this type is embedded in many locations, updates to this type
+//     will affect numerous schemas.  Don't make new APIs embed an underspecified API type they do not control.
+//
+// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
+// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
+// +structType=atomic
+message TypedLocalObjectReference {
+  // APIGroup is the group for the resource being referenced.
+  // If APIGroup is not specified, the specified Kind must be in the core API group.
+  // For any other third-party types, APIGroup is required.
+  // +optional
+  optional string apiGroup = 1;
+
+  // Kind is the type of resource being referenced
+  optional string kind = 2;
+
+  // Name is the name of resource being referenced
+  optional string name = 3;
+}
+
+// TypedObjectReference contains enough information to let you locate the typed referenced object
+message TypedObjectReference {
+  // APIGroup is the group for the resource being referenced.
+  // If APIGroup is not specified, the specified Kind must be in the core API group.
+  // For any other third-party types, APIGroup is required.
+  // +optional
+  optional string apiGroup = 1;
+
+  // Kind is the type of resource being referenced
+  optional string kind = 2;
+
+  // Name is the name of resource being referenced
+  optional string name = 3;
+
+  // Namespace is the namespace of resource being referenced
+  // Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+  // (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+  // +featureGate=CrossNamespaceVolumeDataSource
+  // +optional
+  optional string namespace = 4;
+}
+
+// Volume represents a named volume in a pod that may be accessed by any container in the pod.
+message Volume {
+  // name of the volume.
+  // Must be a DNS_LABEL and unique within the pod.
+  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+  optional string name = 1;
+
+  // volumeSource represents the location and type of the mounted volume.
+  // If not specified, the Volume is implied to be an EmptyDir.
+  // This implied behavior is deprecated and will be removed in a future version.
+  optional VolumeSource volumeSource = 2;
+}
+
+// volumeDevice describes a mapping of a raw block device within a container.
+message VolumeDevice {
+  // name must match the name of a persistentVolumeClaim in the pod
+  optional string name = 1;
+
+  // devicePath is the path inside of the container that the device will be mapped to.
+  optional string devicePath = 2;
+}
+
+// VolumeMount describes a mounting of a Volume within a container.
+message VolumeMount {
+  // This must match the Name of a Volume.
+  optional string name = 1;
+
+  // Mounted read-only if true, read-write otherwise (false or unspecified).
+  // Defaults to false.
+  // +optional
+  optional bool readOnly = 2;
+
+  // RecursiveReadOnly specifies whether read-only mounts should be handled
+  // recursively.
+  //
+  // If ReadOnly is false, this field has no meaning and must be unspecified.
+  //
+  // If ReadOnly is true, and this field is set to Disabled, the mount is not made
+  // recursively read-only.  If this field is set to IfPossible, the mount is made
+  // recursively read-only, if it is supported by the container runtime.  If this
+  // field is set to Enabled, the mount is made recursively read-only if it is
+  // supported by the container runtime, otherwise the pod will not be started and
+  // an error will be generated to indicate the reason.
+  //
+  // If this field is set to IfPossible or Enabled, MountPropagation must be set to
+  // None (or be unspecified, which defaults to None).
+  //
+  // If this field is not specified, it is treated as an equivalent of Disabled.
+  //
+  // +featureGate=RecursiveReadOnlyMounts
+  // +optional
+  optional string recursiveReadOnly = 7;
+
+  // Path within the container at which the volume should be mounted.  Must
+  // not contain ':'.
+  optional string mountPath = 3;
+
+  // Path within the volume from which the container's volume should be mounted.
+  // Defaults to "" (volume's root).
+  // +optional
+  optional string subPath = 4;
+
+  // mountPropagation determines how mounts are propagated from the host
+  // to container and the other way around.
+  // When not set, MountPropagationNone is used.
+  // This field is beta in 1.10.
+  // When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+  // (which defaults to None).
+  // +optional
+  optional string mountPropagation = 5;
+
+  // Expanded path within the volume from which the container's volume should be mounted.
+  // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+  // Defaults to "" (volume's root).
+  // SubPathExpr and SubPath are mutually exclusive.
+  // +optional
+  optional string subPathExpr = 6;
+}
+
+// VolumeMountStatus shows status of volume mounts.
+message VolumeMountStatus {
+  // Name corresponds to the name of the original VolumeMount.
+  optional string name = 1;
+
+  // MountPath corresponds to the original VolumeMount.
+  optional string mountPath = 2;
+
+  // ReadOnly corresponds to the original VolumeMount.
+  // +optional
+  optional bool readOnly = 3;
+
+  // RecursiveReadOnly must be set to Disabled, Enabled, or unspecified (for non-readonly mounts).
+  // An IfPossible value in the original VolumeMount must be translated to Disabled or Enabled,
+  // depending on the mount result.
+  // +featureGate=RecursiveReadOnlyMounts
+  // +optional
+  optional string recursiveReadOnly = 4;
+}
+
+// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.
+message VolumeNodeAffinity {
+  // required specifies hard node constraints that must be met.
+  optional NodeSelector required = 1;
+}
+
+// Projection that may be projected along with other supported volume types.
+// Exactly one of these fields must be set.
+message VolumeProjection {
+  // secret information about the secret data to project
+  // +optional
+  optional SecretProjection secret = 1;
+
+  // downwardAPI information about the downwardAPI data to project
+  // +optional
+  optional DownwardAPIProjection downwardAPI = 2;
+
+  // configMap information about the configMap data to project
+  // +optional
+  optional ConfigMapProjection configMap = 3;
+
+  // serviceAccountToken is information about the serviceAccountToken data to project
+  // +optional
+  optional ServiceAccountTokenProjection serviceAccountToken = 4;
+
+  // ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field
+  // of ClusterTrustBundle objects in an auto-updating file.
+  //
+  // Alpha, gated by the ClusterTrustBundleProjection feature gate.
+  //
+  // ClusterTrustBundle objects can either be selected by name, or by the
+  // combination of signer name and a label selector.
+  //
+  // Kubelet performs aggressive normalization of the PEM contents written
+  // into the pod filesystem.  Esoteric PEM features such as inter-block
+  // comments and block headers are stripped.  Certificates are deduplicated.
+  // The ordering of certificates within the file is arbitrary, and Kubelet
+  // may change the order over time.
+  //
+  // +featureGate=ClusterTrustBundleProjection
+  // +optional
+  optional ClusterTrustBundleProjection clusterTrustBundle = 5;
+}
+
+// VolumeResourceRequirements describes the storage resource requirements for a volume.
+message VolumeResourceRequirements {
+  // Limits describes the maximum amount of compute resources allowed.
+  // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+  // +optional
+  map limits = 1;
+
+  // Requests describes the minimum amount of compute resources required.
+  // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+  // otherwise to an implementation-defined value. Requests cannot exceed Limits.
+  // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+  // +optional
+  map requests = 2;
+}
+
+// Represents the source of a volume to mount.
+// Only one of its members may be specified.
+message VolumeSource {
+  // hostPath represents a pre-existing file or directory on the host
+  // machine that is directly exposed to the container. This is generally
+  // used for system agents or other privileged things that are allowed
+  // to see the host machine. Most containers will NOT need this.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+  // ---
+  // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
+  // mount host directories as read/write.
+  // +optional
+  optional HostPathVolumeSource hostPath = 1;
+
+  // emptyDir represents a temporary directory that shares a pod's lifetime.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+  // +optional
+  optional EmptyDirVolumeSource emptyDir = 2;
+
+  // gcePersistentDisk represents a GCE Disk resource that is attached to a
+  // kubelet's host machine and then exposed to the pod.
+  // Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+  // gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+  // +optional
+  optional GCEPersistentDiskVolumeSource gcePersistentDisk = 3;
+
+  // awsElasticBlockStore represents an AWS Disk resource that is attached to a
+  // kubelet's host machine and then exposed to the pod.
+  // Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+  // awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+  // +optional
+  optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4;
+
+  // gitRepo represents a git repository at a particular revision.
+  // Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
+  // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+  // into the Pod's container.
+  // +optional
+  optional GitRepoVolumeSource gitRepo = 5;
+
+  // secret represents a secret that should populate this volume.
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+  // +optional
+  optional SecretVolumeSource secret = 6;
+
+  // nfs represents an NFS mount on the host that shares a pod's lifetime
+  // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+  // +optional
+  optional NFSVolumeSource nfs = 7;
+
+  // iscsi represents an ISCSI Disk resource that is attached to a
+  // kubelet's host machine and then exposed to the pod.
+  // More info: https://examples.k8s.io/volumes/iscsi/README.md
+  // +optional
+  optional ISCSIVolumeSource iscsi = 8;
+
+  // glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+  // Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
+  // More info: https://examples.k8s.io/volumes/glusterfs/README.md
+  // +optional
+  optional GlusterfsVolumeSource glusterfs = 9;
+
+  // persistentVolumeClaimVolumeSource represents a reference to a
+  // PersistentVolumeClaim in the same namespace.
+  // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+  // +optional
+  optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10;
+
+  // rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+  // Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
+  // More info: https://examples.k8s.io/volumes/rbd/README.md
+  // +optional
+  optional RBDVolumeSource rbd = 11;
+
+  // flexVolume represents a generic volume resource that is
+  // provisioned/attached using an exec based plugin.
+  // Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
+  // +optional
+  optional FlexVolumeSource flexVolume = 12;
+
+  // cinder represents a cinder volume attached and mounted on kubelets host machine.
+  // Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+  // are redirected to the cinder.csi.openstack.org CSI driver.
+  // More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+  // +optional
+  optional CinderVolumeSource cinder = 13;
+
+  // cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+  // Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
+  // +optional
+  optional CephFSVolumeSource cephfs = 14;
+
+  // flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
+  // Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
+  // +optional
+  optional FlockerVolumeSource flocker = 15;
+
+  // downwardAPI represents downward API about the pod that should populate this volume
+  // +optional
+  optional DownwardAPIVolumeSource downwardAPI = 16;
+
+  // fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
+  // +optional
+  optional FCVolumeSource fc = 17;
+
+  // azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+  // Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+  // are redirected to the file.csi.azure.com CSI driver.
+  // +optional
+  optional AzureFileVolumeSource azureFile = 18;
+
+  // configMap represents a configMap that should populate this volume
+  // +optional
+  optional ConfigMapVolumeSource configMap = 19;
+
+  // vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+  // Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+  // are redirected to the csi.vsphere.vmware.com CSI driver.
+  // +optional
+  optional VsphereVirtualDiskVolumeSource vsphereVolume = 20;
+
+  // quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+  // Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
+  // +optional
+  optional QuobyteVolumeSource quobyte = 21;
+
+  // azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+  // Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+  // are redirected to the disk.csi.azure.com CSI driver.
+  // +optional
+  optional AzureDiskVolumeSource azureDisk = 22;
+
+  // photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+  // Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
+  optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 23;
+
+  // projected items for all in one resources secrets, configmaps, and downward API
+  optional ProjectedVolumeSource projected = 26;
+
+  // portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+  // Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+  // are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+  // is on.
+  // +optional
+  optional PortworxVolumeSource portworxVolume = 24;
+
+  // scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+  // Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
+  // +optional
+  optional ScaleIOVolumeSource scaleIO = 25;
+
+  // storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+  // Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
+  // +optional
+  optional StorageOSVolumeSource storageos = 27;
+
+  // csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.
+  // +optional
+  optional CSIVolumeSource csi = 28;
+
+  // ephemeral represents a volume that is handled by a cluster storage driver.
+  // The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
+  // and deleted when the pod is removed.
+  //
+  // Use this if:
+  // a) the volume is only needed while the pod runs,
+  // b) features of normal volumes like restoring from snapshot or capacity
+  //    tracking are needed,
+  // c) the storage driver is specified through a storage class, and
+  // d) the storage driver supports dynamic volume provisioning through
+  //    a PersistentVolumeClaim (see EphemeralVolumeSource for more
+  //    information on the connection between this volume type
+  //    and PersistentVolumeClaim).
+  //
+  // Use PersistentVolumeClaim or one of the vendor-specific
+  // APIs for volumes that persist for longer than the lifecycle
+  // of an individual pod.
+  //
+  // Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
+  // be used that way - see the documentation of the driver for
+  // more information.
+  //
+  // A pod can use both types of ephemeral volumes and
+  // persistent volumes at the same time.
+  //
+  // +optional
+  optional EphemeralVolumeSource ephemeral = 29;
+
+  // image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine.
+  // The volume is resolved at pod startup depending on which PullPolicy value is provided:
+  //
+  // - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
+  // - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
+  // - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
+  //
+  // The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation.
+  // A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message.
+  // The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
+  // The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
+  // The volume will be mounted read-only (ro) and non-executable files (noexec).
+  // Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
+  // The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
+  // +featureGate=ImageVolume
+  // +optional
+  optional ImageVolumeSource image = 30;
+}
+
+// Represents a vSphere volume resource.
+message VsphereVirtualDiskVolumeSource {
+  // volumePath is the path that identifies vSphere volume vmdk
+  optional string volumePath = 1;
+
+  // fsType is filesystem type to mount.
+  // Must be a filesystem type supported by the host operating system.
+  // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+  // +optional
+  optional string fsType = 2;
+
+  // storagePolicyName is the storage Policy Based Management (SPBM) profile name.
+  // +optional
+  optional string storagePolicyName = 3;
+
+  // storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.
+  // +optional
+  optional string storagePolicyID = 4;
+}
+
+// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+message WeightedPodAffinityTerm {
+  // weight associated with matching the corresponding podAffinityTerm,
+  // in the range 1-100.
+  optional int32 weight = 1;
+
+  // Required. A pod affinity term, associated with the corresponding weight.
+  optional PodAffinityTerm podAffinityTerm = 2;
+}
+
+// WindowsSecurityContextOptions contain Windows-specific options and credentials.
+message WindowsSecurityContextOptions {
+  // GMSACredentialSpecName is the name of the GMSA credential spec to use.
+  // +optional
+  optional string gmsaCredentialSpecName = 1;
+
+  // GMSACredentialSpec is where the GMSA admission webhook
+  // (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+  // GMSA credential spec named by the GMSACredentialSpecName field.
+  // +optional
+  optional string gmsaCredentialSpec = 2;
+
+  // The UserName in Windows to run the entrypoint of the container process.
+  // Defaults to the user specified in image metadata if unspecified.
+  // May also be set in PodSecurityContext. If set in both SecurityContext and
+  // PodSecurityContext, the value specified in SecurityContext takes precedence.
+  // +optional
+  optional string runAsUserName = 3;
+
+  // HostProcess determines if a container should be run as a 'Host Process' container.
+  // All of a Pod's containers must have the same effective HostProcess value
+  // (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+  // In addition, if HostProcess is true then HostNetwork must also be set to true.
+  // +optional
+  optional bool hostProcess = 4;
+}
+
diff --git a/hack/tools/vendor/k8s.io/api/core/v1/lifecycle.go b/hack/tools/vendor/k8s.io/api/core/v1/lifecycle.go
new file mode 100644
index 0000000000..21ca90e815
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/core/v1/lifecycle.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// APILifecycleIntroduced returns the release in which the API struct was introduced as int versions of major and minor for comparison.
+func (in *ComponentStatus) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleDeprecated returns the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+func (in *ComponentStatus) APILifecycleDeprecated() (major, minor int) {
+	return 1, 19
+}
+
+// APILifecycleIntroduced returns the release in which the API struct was introduced as int versions of major and minor for comparison.
+func (in *ComponentStatusList) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleDeprecated returns the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
+func (in *ComponentStatusList) APILifecycleDeprecated() (major, minor int) {
+	return 1, 19
+}
diff --git a/hack/tools/vendor/k8s.io/api/core/v1/objectreference.go b/hack/tools/vendor/k8s.io/api/core/v1/objectreference.go
new file mode 100644
index 0000000000..609cadc7aa
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/core/v1/objectreference.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// SetGroupVersionKind allows clients to preemptively get a reference to an API object and pass it to places that
+// intend only to get a reference to that object. This simplifies the event recording interface.
+func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) {
+	obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
+}
+
+func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind {
+	return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
+}
+
+func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj }
diff --git a/hack/tools/vendor/k8s.io/api/core/v1/register.go b/hack/tools/vendor/k8s.io/api/core/v1/register.go
new file mode 100644
index 0000000000..1aac0cb41e
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/core/v1/register.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = ""
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// We only register manually written functions here. The registration of the
+	// generated functions takes place in the generated files. The separation
+	// makes the code compile even when the generated files are missing.
+	SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+	AddToScheme   = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Pod{},
+		&PodList{},
+		&PodStatusResult{},
+		&PodTemplate{},
+		&PodTemplateList{},
+		&ReplicationController{},
+		&ReplicationControllerList{},
+		&Service{},
+		&ServiceProxyOptions{},
+		&ServiceList{},
+		&Endpoints{},
+		&EndpointsList{},
+		&Node{},
+		&NodeList{},
+		&NodeProxyOptions{},
+		&Binding{},
+		&Event{},
+		&EventList{},
+		&List{},
+		&LimitRange{},
+		&LimitRangeList{},
+		&ResourceQuota{},
+		&ResourceQuotaList{},
+		&Namespace{},
+		&NamespaceList{},
+		&Secret{},
+		&SecretList{},
+		&ServiceAccount{},
+		&ServiceAccountList{},
+		&PersistentVolume{},
+		&PersistentVolumeList{},
+		&PersistentVolumeClaim{},
+		&PersistentVolumeClaimList{},
+		&PodAttachOptions{},
+		&PodLogOptions{},
+		&PodExecOptions{},
+		&PodPortForwardOptions{},
+		&PodProxyOptions{},
+		&ComponentStatus{},
+		&ComponentStatusList{},
+		&SerializedReference{},
+		&RangeAllocation{},
+		&ConfigMap{},
+		&ConfigMapList{},
+	)
+
+	// Add common types
+	scheme.AddKnownTypes(SchemeGroupVersion, &metav1.Status{})
+
+	// Add the watch version that applies
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/api/core/v1/resource.go b/hack/tools/vendor/k8s.io/api/core/v1/resource.go
new file mode 100644
index 0000000000..4e249d03ab
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/core/v1/resource.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/apimachinery/pkg/api/resource"
+)
+
+// Returns string version of ResourceName.
+func (rn ResourceName) String() string {
+	return string(rn)
+}
+
+// Cpu returns the Cpu limit if specified.
+func (rl *ResourceList) Cpu() *resource.Quantity {
+	return rl.Name(ResourceCPU, resource.DecimalSI)
+}
+
+// Memory returns the Memory limit if specified.
+func (rl *ResourceList) Memory() *resource.Quantity {
+	return rl.Name(ResourceMemory, resource.BinarySI)
+}
+
+// Storage returns the Storage limit if specified.
+func (rl *ResourceList) Storage() *resource.Quantity {
+	return rl.Name(ResourceStorage, resource.BinarySI)
+}
+
+// Pods returns the list of pods
+func (rl *ResourceList) Pods() *resource.Quantity {
+	return rl.Name(ResourcePods, resource.DecimalSI)
+}
+
+// StorageEphemeral returns the list of ephemeral storage volumes, if any
+func (rl *ResourceList) StorageEphemeral() *resource.Quantity {
+	return rl.Name(ResourceEphemeralStorage, resource.BinarySI)
+}
+
+// Name returns the resource with name if specified, otherwise it returns a nil quantity with default format.
+func (rl *ResourceList) Name(name ResourceName, defaultFormat resource.Format) *resource.Quantity {
+	if val, ok := (*rl)[name]; ok {
+		return &val
+	}
+	return &resource.Quantity{Format: defaultFormat}
+}
diff --git a/hack/tools/vendor/k8s.io/api/core/v1/taint.go b/hack/tools/vendor/k8s.io/api/core/v1/taint.go
new file mode 100644
index 0000000000..db71bd2fd0
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/core/v1/taint.go
@@ -0,0 +1,39 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import "fmt"
+
+// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect,
+// if the two taints have same key:effect, regard as they match.
+func (t *Taint) MatchTaint(taintToMatch *Taint) bool {
+	return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect
+}
+
+// taint.ToString() converts taint struct to string in format '=:', '=:', ':', or ''.
+func (t *Taint) ToString() string {
+	if len(t.Effect) == 0 {
+		if len(t.Value) == 0 {
+			return fmt.Sprintf("%v", t.Key)
+		}
+		return fmt.Sprintf("%v=%v:", t.Key, t.Value)
+	}
+	if len(t.Value) == 0 {
+		return fmt.Sprintf("%v:%v", t.Key, t.Effect)
+	}
+	return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect)
+}
diff --git a/hack/tools/vendor/k8s.io/api/core/v1/toleration.go b/hack/tools/vendor/k8s.io/api/core/v1/toleration.go
new file mode 100644
index 0000000000..e803d518b5
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/core/v1/toleration.go
@@ -0,0 +1,57 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by ,
+// if the two tolerations have same  combination, regard as they match.
+// TODO: uniqueness check for tolerations in api validations.
+func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool {
+	return t.Key == tolerationToMatch.Key &&
+		t.Effect == tolerationToMatch.Effect &&
+		t.Operator == tolerationToMatch.Operator &&
+		t.Value == tolerationToMatch.Value
+}
+
+// ToleratesTaint checks if the toleration tolerates the taint.
+// The matching follows the rules below:
+//
+//  1. Empty toleration.effect means to match all taint effects,
+//     otherwise taint effect must equal to toleration.effect.
+//  2. If toleration.operator is 'Exists', it means to match all taint values.
+//  3. Empty toleration.key means to match all taint keys.
+//     If toleration.key is empty, toleration.operator must be 'Exists';
+//     this combination means to match all taint values and all taint keys.
+func (t *Toleration) ToleratesTaint(taint *Taint) bool {
+	if len(t.Effect) > 0 && t.Effect != taint.Effect {
+		return false
+	}
+
+	if len(t.Key) > 0 && t.Key != taint.Key {
+		return false
+	}
+
+	// TODO: Use proper defaulting when Toleration becomes a field of PodSpec
+	switch t.Operator {
+	// empty operator means Equal
+	case "", TolerationOpEqual:
+		return t.Value == taint.Value
+	case TolerationOpExists:
+		return true
+	default:
+		return false
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/api/core/v1/types.go b/hack/tools/vendor/k8s.io/api/core/v1/types.go
new file mode 100644
index 0000000000..fb2c1c7453
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/core/v1/types.go
@@ -0,0 +1,7966 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/apimachinery/pkg/api/resource"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/apimachinery/pkg/util/intstr"
+)
+
+const (
+	// NamespaceDefault means the object is in the default namespace which is applied when not specified by clients
+	NamespaceDefault string = "default"
+	// NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces
+	NamespaceAll string = ""
+	// NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats)
+	NamespaceNodeLease string = "kube-node-lease"
+)
+
+// Volume represents a named volume in a pod that may be accessed by any container in the pod.
+type Volume struct {
+	// name of the volume.
+	// Must be a DNS_LABEL and unique within the pod.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// volumeSource represents the location and type of the mounted volume.
+	// If not specified, the Volume is implied to be an EmptyDir.
+	// This implied behavior is deprecated and will be removed in a future version.
+	VolumeSource `json:",inline" protobuf:"bytes,2,opt,name=volumeSource"`
+}
+
+// Represents the source of a volume to mount.
+// Only one of its members may be specified.
+type VolumeSource struct {
+	// hostPath represents a pre-existing file or directory on the host
+	// machine that is directly exposed to the container. This is generally
+	// used for system agents or other privileged things that are allowed
+	// to see the host machine. Most containers will NOT need this.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+	// ---
+	// TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
+	// mount host directories as read/write.
+	// +optional
+	HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"`
+	// emptyDir represents a temporary directory that shares a pod's lifetime.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+	// +optional
+	EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"`
+	// gcePersistentDisk represents a GCE Disk resource that is attached to a
+	// kubelet's host machine and then exposed to the pod.
+	// Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+	// gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+	// +optional
+	GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"`
+	// awsElasticBlockStore represents an AWS Disk resource that is attached to a
+	// kubelet's host machine and then exposed to the pod.
+	// Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+	// awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+	// +optional
+	AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"`
+	// gitRepo represents a git repository at a particular revision.
+	// Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an
+	// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+	// into the Pod's container.
+	// +optional
+	GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"`
+	// secret represents a secret that should populate this volume.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+	// +optional
+	Secret *SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"`
+	// nfs represents an NFS mount on the host that shares a pod's lifetime
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+	// +optional
+	NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"`
+	// iscsi represents an ISCSI Disk resource that is attached to a
+	// kubelet's host machine and then exposed to the pod.
+	// More info: https://examples.k8s.io/volumes/iscsi/README.md
+	// +optional
+	ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"`
+	// glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+	// Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
+	// More info: https://examples.k8s.io/volumes/glusterfs/README.md
+	// +optional
+	Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"`
+	// persistentVolumeClaimVolumeSource represents a reference to a
+	// PersistentVolumeClaim in the same namespace.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+	// +optional
+	PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"`
+	// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+	// Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
+	// More info: https://examples.k8s.io/volumes/rbd/README.md
+	// +optional
+	RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"`
+	// flexVolume represents a generic volume resource that is
+	// provisioned/attached using an exec based plugin.
+	// Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
+	// +optional
+	FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
+	// cinder represents a cinder volume attached and mounted on kubelets host machine.
+	// Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+	// are redirected to the cinder.csi.openstack.org CSI driver.
+	// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+	// +optional
+	Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"`
+	// cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+	// Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
+	// +optional
+	CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"`
+	// flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running.
+	// Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
+	// +optional
+	Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"`
+	// downwardAPI represents downward API about the pod that should populate this volume
+	// +optional
+	DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty" protobuf:"bytes,16,opt,name=downwardAPI"`
+	// fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
+	// +optional
+	FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"`
+	// azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+	// Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+	// are redirected to the file.csi.azure.com CSI driver.
+	// +optional
+	AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"`
+	// configMap represents a configMap that should populate this volume
+	// +optional
+	ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"`
+	// vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+	// Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+	// are redirected to the csi.vsphere.vmware.com CSI driver.
+	// +optional
+	VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"`
+	// quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+	// Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
+	// +optional
+	Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"`
+	// azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+	// Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+	// are redirected to the disk.csi.azure.com CSI driver.
+	// +optional
+	AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"`
+	// photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+	// Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
+	PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"`
+	// projected items for all in one resources secrets, configmaps, and downward API
+	Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"`
+	// portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+	// Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+	// are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+	// is on.
+	// +optional
+	PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"`
+	// scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+	// Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
+	// +optional
+	ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"`
+	// storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
+	// Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
+	// +optional
+	StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"`
+	// csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.
+	// +optional
+	CSI *CSIVolumeSource `json:"csi,omitempty" protobuf:"bytes,28,opt,name=csi"`
+	// ephemeral represents a volume that is handled by a cluster storage driver.
+	// The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
+	// and deleted when the pod is removed.
+	//
+	// Use this if:
+	// a) the volume is only needed while the pod runs,
+	// b) features of normal volumes like restoring from snapshot or capacity
+	//    tracking are needed,
+	// c) the storage driver is specified through a storage class, and
+	// d) the storage driver supports dynamic volume provisioning through
+	//    a PersistentVolumeClaim (see EphemeralVolumeSource for more
+	//    information on the connection between this volume type
+	//    and PersistentVolumeClaim).
+	//
+	// Use PersistentVolumeClaim or one of the vendor-specific
+	// APIs for volumes that persist for longer than the lifecycle
+	// of an individual pod.
+	//
+	// Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
+	// be used that way - see the documentation of the driver for
+	// more information.
+	//
+	// A pod can use both types of ephemeral volumes and
+	// persistent volumes at the same time.
+	//
+	// +optional
+	Ephemeral *EphemeralVolumeSource `json:"ephemeral,omitempty" protobuf:"bytes,29,opt,name=ephemeral"`
+	// image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine.
+	// The volume is resolved at pod startup depending on which PullPolicy value is provided:
+	//
+	// - Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
+	// - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
+	// - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
+	//
+	// The volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation.
+	// A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message.
+	// The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field.
+	// The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images.
+	// The volume will be mounted read-only (ro) and non-executable files (noexec).
+	// Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath).
+	// The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.
+	// +featureGate=ImageVolume
+	// +optional
+	Image *ImageVolumeSource `json:"image,omitempty" protobuf:"bytes,30,opt,name=image"`
+}
+
+// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
+// This volume finds the bound PV and mounts that volume for the pod. A
+// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another
+// type of volume that is owned by someone else (the system).
+type PersistentVolumeClaimVolumeSource struct {
+	// claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+	ClaimName string `json:"claimName" protobuf:"bytes,1,opt,name=claimName"`
+	// readOnly Will force the ReadOnly setting in VolumeMounts.
+	// Default false.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
+}
+
+// PersistentVolumeSource is similar to VolumeSource but meant for the
+// administrator who creates PVs. Exactly one of its members must be set.
+type PersistentVolumeSource struct {
+	// gcePersistentDisk represents a GCE Disk resource that is attached to a
+	// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
+	// Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree
+	// gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+	// +optional
+	GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"`
+	// awsElasticBlockStore represents an AWS Disk resource that is attached to a
+	// kubelet's host machine and then exposed to the pod.
+	// Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree
+	// awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+	// +optional
+	AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"`
+	// hostPath represents a directory on the host.
+	// Provisioned by a developer or tester.
+	// This is useful for single-node development and testing only!
+	// On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+	// +optional
+	HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"`
+	// glusterfs represents a Glusterfs volume that is attached to a host and
+	// exposed to the pod. Provisioned by an admin.
+	// Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported.
+	// More info: https://examples.k8s.io/volumes/glusterfs/README.md
+	// +optional
+	Glusterfs *GlusterfsPersistentVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"`
+	// nfs represents an NFS mount on the host. Provisioned by an admin.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+	// +optional
+	NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"`
+	// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+	// Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported.
+	// More info: https://examples.k8s.io/volumes/rbd/README.md
+	// +optional
+	RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"`
+	// iscsi represents an ISCSI Disk resource that is attached to a
+	// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
+	// +optional
+	ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"`
+	// cinder represents a cinder volume attached and mounted on kubelets host machine.
+	// Deprecated: Cinder is deprecated. All operations for the in-tree cinder type
+	// are redirected to the cinder.csi.openstack.org CSI driver.
+	// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+	// +optional
+	Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"`
+	// cephFS represents a Ceph FS mount on the host that shares a pod's lifetime.
+	// Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.
+	// +optional
+	CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"`
+	// fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
+	// +optional
+	FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"`
+	// flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running.
+	// Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.
+	// +optional
+	Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"`
+	// flexVolume represents a generic volume resource that is
+	// provisioned/attached using an exec based plugin.
+	// Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.
+	// +optional
+	FlexVolume *FlexPersistentVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
+	// azureFile represents an Azure File Service mount on the host and bind mount to the pod.
+	// Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type
+	// are redirected to the file.csi.azure.com CSI driver.
+	// +optional
+	AzureFile *AzureFilePersistentVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"`
+	// vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine.
+	// Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type
+	// are redirected to the csi.vsphere.vmware.com CSI driver.
+	// +optional
+	VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"`
+	// quobyte represents a Quobyte mount on the host that shares a pod's lifetime.
+	// Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.
+	// +optional
+	Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"`
+	// azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+	// Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type
+	// are redirected to the disk.csi.azure.com CSI driver.
+	// +optional
+	AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"`
+	// photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine.
+	// Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.
+	PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"`
+	// portworxVolume represents a portworx volume attached and mounted on kubelets host machine.
+	// Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type
+	// are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate
+	// is on.
+	// +optional
+	PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"`
+	// scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
+	// Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.
+	// +optional
+	ScaleIO *ScaleIOPersistentVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"`
+	// local represents directly-attached storage with node affinity
+	// +optional
+	Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"`
+	// storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod.
+	// Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.
+	// More info: https://examples.k8s.io/volumes/storageos/README.md
+	// +optional
+	StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"`
+	// csi represents storage that is handled by an external CSI driver.
+	// +optional
+	CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"`
+}
+
+const (
+	// BetaStorageClassAnnotation represents the beta/previous StorageClass annotation.
+	// It's currently still used and will be held for backwards compatibility
+	BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class"
+
+	// MountOptionAnnotation defines mount option annotation used in PVs
+	MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// PersistentVolume (PV) is a storage resource provisioned by an administrator.
+// It is analogous to a node.
+// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
+type PersistentVolume struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// spec defines a specification of a persistent volume owned by the cluster.
+	// Provisioned by an administrator.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
+	// +optional
+	Spec PersistentVolumeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// status represents the current information/status for the persistent volume.
+	// Populated by the system.
+	// Read-only.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
+	// +optional
+	Status PersistentVolumeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// PersistentVolumeSpec is the specification of a persistent volume.
+type PersistentVolumeSpec struct {
+	// capacity is the description of the persistent volume's resources and capacity.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
+	// +optional
+	Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
+	// persistentVolumeSource is the actual volume backing the persistent volume.
+	PersistentVolumeSource `json:",inline" protobuf:"bytes,2,opt,name=persistentVolumeSource"`
+	// accessModes contains all ways the volume can be mounted.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes
+	// +optional
+	// +listType=atomic
+	AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
+	// claimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim.
+	// Expected to be non-nil when bound.
+	// claim.VolumeName is the authoritative bind between PV and PVC.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding
+	// +optional
+	// +structType=granular
+	ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"`
+	// persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim.
+	// Valid options are Retain (default for manually created PersistentVolumes), Delete (default
+	// for dynamically provisioned PersistentVolumes), and Recycle (deprecated).
+	// Recycle must be supported by the volume plugin underlying this PersistentVolume.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming
+	// +optional
+	PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"`
+	// storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value
+	// means that this volume does not belong to any StorageClass.
+	// +optional
+	StorageClassName string `json:"storageClassName,omitempty" protobuf:"bytes,6,opt,name=storageClassName"`
+	// mountOptions is the list of mount options, e.g. ["ro", "soft"]. Not validated - mount will
+	// simply fail if one is invalid.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options
+	// +optional
+	// +listType=atomic
+	MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"`
+	// volumeMode defines if a volume is intended to be used with a formatted filesystem
+	// or to remain in raw block state. Value of Filesystem is implied when not included in spec.
+	// +optional
+	VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"`
+	// nodeAffinity defines constraints that limit what nodes this volume can be accessed from.
+	// This field influences the scheduling of pods that use this volume.
+	// +optional
+	NodeAffinity *VolumeNodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,9,opt,name=nodeAffinity"`
+	// Name of VolumeAttributesClass to which this persistent volume belongs. Empty value
+	// is not allowed. When this field is not set, it indicates that this volume does not belong to any
+	// VolumeAttributesClass. This field is mutable and can be changed by the CSI driver
+	// after a volume has been updated successfully to a new class.
+	// For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound
+	// PersistentVolumeClaims during the binding process.
+	// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
+	// +featureGate=VolumeAttributesClass
+	// +optional
+	VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,10,opt,name=volumeAttributesClassName"`
+}
+
+// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.
+type VolumeNodeAffinity struct {
+	// required specifies hard node constraints that must be met.
+	Required *NodeSelector `json:"required,omitempty" protobuf:"bytes,1,opt,name=required"`
+}
+
+// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes.
+// +enum
+type PersistentVolumeReclaimPolicy string
+
+const (
+	// PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim.
+	// The volume plugin must support Recycling.
+	PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle"
+	// PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim.
+	// The volume plugin must support Deletion.
+	PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete"
+	// PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator.
+	// The default policy is Retain.
+	PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain"
+)
+
+// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem.
+// +enum
+type PersistentVolumeMode string
+
+const (
+	// PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device.
+	PersistentVolumeBlock PersistentVolumeMode = "Block"
+	// PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem.
+	PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem"
+)
+
+// PersistentVolumeStatus is the current status of a persistent volume.
+type PersistentVolumeStatus struct {
+	// phase indicates if a volume is available, bound to a claim, or released by a claim.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase
+	// +optional
+	Phase PersistentVolumePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumePhase"`
+	// message is a human-readable message indicating details about why the volume is in this state.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
+	// reason is a brief CamelCase string that describes any failure and is meant
+	// for machine parsing and tidy display in the CLI.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
+	// lastPhaseTransitionTime is the time the phase transitioned from one to another
+	// and automatically resets to current time everytime a volume phase transitions.
+	// +optional
+	LastPhaseTransitionTime *metav1.Time `json:"lastPhaseTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastPhaseTransitionTime"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// PersistentVolumeList is a list of PersistentVolume items.
+type PersistentVolumeList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+	// items is a list of persistent volumes.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
+	Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// PersistentVolumeClaim is a user's request for and claim to a persistent volume
+type PersistentVolumeClaim struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// spec defines the desired characteristics of a volume requested by a pod author.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+	// +optional
+	Spec PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// status represents the current information/status of a persistent volume claim.
+	// Read-only.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+	// +optional
+	Status PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// PersistentVolumeClaimList is a list of PersistentVolumeClaim items.
+type PersistentVolumeClaimList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+	// items is a list of persistent volume claims.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+	Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// PersistentVolumeClaimSpec describes the common attributes of storage devices
+// and allows a Source for provider-specific attributes
+type PersistentVolumeClaimSpec struct {
+	// accessModes contains the desired access modes the volume should have.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+	// +optional
+	// +listType=atomic
+	AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
+	// selector is a label query over volumes to consider for binding.
+	// +optional
+	Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"`
+	// resources represents the minimum resources the volume should have.
+	// If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+	// that are lower than previous value but must still be higher than capacity recorded in the
+	// status field of the claim.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+	// +optional
+	Resources VolumeResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"`
+	// volumeName is the binding reference to the PersistentVolume backing this claim.
+	// +optional
+	VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"`
+	// storageClassName is the name of the StorageClass required by the claim.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+	// +optional
+	StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"`
+	// volumeMode defines what type of volume is required by the claim.
+	// Value of Filesystem is implied when not included in claim spec.
+	// +optional
+	VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"`
+	// dataSource field can be used to specify either:
+	// * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+	// * An existing PVC (PersistentVolumeClaim)
+	// If the provisioner or an external controller can support the specified data source,
+	// it will create a new volume based on the contents of the specified data source.
+	// When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+	// and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+	// If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+	// +optional
+	DataSource *TypedLocalObjectReference `json:"dataSource,omitempty" protobuf:"bytes,7,opt,name=dataSource"`
+	// dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+	// volume is desired. This may be any object from a non-empty API group (non
+	// core object) or a PersistentVolumeClaim object.
+	// When this field is specified, volume binding will only succeed if the type of
+	// the specified object matches some installed volume populator or dynamic
+	// provisioner.
+	// This field will replace the functionality of the dataSource field and as such
+	// if both fields are non-empty, they must have the same value. For backwards
+	// compatibility, when namespace isn't specified in dataSourceRef,
+	// both fields (dataSource and dataSourceRef) will be set to the same
+	// value automatically if one of them is empty and the other is non-empty.
+	// When namespace is specified in dataSourceRef,
+	// dataSource isn't set to the same value and must be empty.
+	// There are three important differences between dataSource and dataSourceRef:
+	// * While dataSource only allows two specific types of objects, dataSourceRef
+	//   allows any non-core object, as well as PersistentVolumeClaim objects.
+	// * While dataSource ignores disallowed values (dropping them), dataSourceRef
+	//   preserves all values, and generates an error if a disallowed value is
+	//   specified.
+	// * While dataSource only allows local objects, dataSourceRef allows objects
+	//   in any namespaces.
+	// (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+	// (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+	// +optional
+	DataSourceRef *TypedObjectReference `json:"dataSourceRef,omitempty" protobuf:"bytes,8,opt,name=dataSourceRef"`
+	// volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+	// If specified, the CSI driver will create or update the volume with the attributes defined
+	// in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+	// it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+	// will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+	// If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+	// will be set by the persistentvolume controller if it exists.
+	// If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+	// set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+	// exists.
+	// More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/
+	// (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).
+	// +featureGate=VolumeAttributesClass
+	// +optional
+	VolumeAttributesClassName *string `json:"volumeAttributesClassName,omitempty" protobuf:"bytes,9,opt,name=volumeAttributesClassName"`
+}
+
+// TypedObjectReference contains enough information to let you locate the typed referenced object
+type TypedObjectReference struct {
+	// APIGroup is the group for the resource being referenced.
+	// If APIGroup is not specified, the specified Kind must be in the core API group.
+	// For any other third-party types, APIGroup is required.
+	// +optional
+	APIGroup *string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"`
+	// Kind is the type of resource being referenced
+	Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"`
+	// Name is the name of resource being referenced
+	Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
+	// Namespace is the namespace of resource being referenced
+	// Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+	// (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+	// +featureGate=CrossNamespaceVolumeDataSource
+	// +optional
+	Namespace *string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"`
+}
+
+// PersistentVolumeClaimConditionType defines the condition of PV claim.
+// Valid values are:
+//   - "Resizing", "FileSystemResizePending"
+//
+// If RecoverVolumeExpansionFailure feature gate is enabled, then following additional values can be expected:
+//   - "ControllerResizeError", "NodeResizeError"
+//
+// If VolumeAttributesClass feature gate is enabled, then following additional values can be expected:
+//   - "ModifyVolumeError", "ModifyingVolume"
+type PersistentVolumeClaimConditionType string
+
+// These are valid conditions of PVC
+const (
+	// PersistentVolumeClaimResizing - a user trigger resize of pvc has been started
+	PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing"
+	// PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node
+	PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending"
+
+	// PersistentVolumeClaimControllerResizeError indicates an error while resizing volume for size in the controller
+	PersistentVolumeClaimControllerResizeError PersistentVolumeClaimConditionType = "ControllerResizeError"
+	// PersistentVolumeClaimNodeResizeError indicates an error while resizing volume for size in the node.
+	PersistentVolumeClaimNodeResizeError PersistentVolumeClaimConditionType = "NodeResizeError"
+
+	// Applying the target VolumeAttributesClass encountered an error
+	PersistentVolumeClaimVolumeModifyVolumeError PersistentVolumeClaimConditionType = "ModifyVolumeError"
+	// Volume is being modified
+	PersistentVolumeClaimVolumeModifyingVolume PersistentVolumeClaimConditionType = "ModifyingVolume"
+)
+
+// +enum
+// When a controller receives persistentvolume claim update with ClaimResourceStatus for a resource
+// that it does not recognizes, then it should ignore that update and let other controllers
+// handle it.
+type ClaimResourceStatus string
+
+const (
+	// State set when resize controller starts resizing the volume in control-plane.
+	PersistentVolumeClaimControllerResizeInProgress ClaimResourceStatus = "ControllerResizeInProgress"
+
+	// State set when resize has failed in resize controller with a terminal unrecoverable error.
+	// Transient errors such as timeout should not set this status and should leave allocatedResourceStatus
+	// unmodified, so as resize controller can resume the volume expansion.
+	PersistentVolumeClaimControllerResizeInfeasible ClaimResourceStatus = "ControllerResizeInfeasible"
+
+	// State set when resize controller has finished resizing the volume but further resizing of volume
+	// is needed on the node.
+	PersistentVolumeClaimNodeResizePending ClaimResourceStatus = "NodeResizePending"
+	// State set when kubelet starts resizing the volume.
+	PersistentVolumeClaimNodeResizeInProgress ClaimResourceStatus = "NodeResizeInProgress"
+	// State set when resizing has failed in kubelet with a terminal unrecoverable error. Transient errors
+	// shouldn't set this status
+	PersistentVolumeClaimNodeResizeInfeasible ClaimResourceStatus = "NodeResizeInfeasible"
+)
+
+// +enum
+// New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately
+type PersistentVolumeClaimModifyVolumeStatus string
+
+const (
+	// Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as
+	// the specified VolumeAttributesClass not existing
+	PersistentVolumeClaimModifyVolumePending PersistentVolumeClaimModifyVolumeStatus = "Pending"
+	// InProgress indicates that the volume is being modified
+	PersistentVolumeClaimModifyVolumeInProgress PersistentVolumeClaimModifyVolumeStatus = "InProgress"
+	// Infeasible indicates that the request has been rejected as invalid by the CSI driver. To
+	// resolve the error, a valid VolumeAttributesClass needs to be specified
+	PersistentVolumeClaimModifyVolumeInfeasible PersistentVolumeClaimModifyVolumeStatus = "Infeasible"
+)
+
+// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation
+type ModifyVolumeStatus struct {
+	// targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled
+	TargetVolumeAttributesClassName string `json:"targetVolumeAttributesClassName,omitempty" protobuf:"bytes,1,opt,name=targetVolumeAttributesClassName"`
+	// status is the status of the ControllerModifyVolume operation. It can be in any of following states:
+	//  - Pending
+	//    Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as
+	//    the specified VolumeAttributesClass not existing.
+	//  - InProgress
+	//    InProgress indicates that the volume is being modified.
+	//  - Infeasible
+	//   Infeasible indicates that the request has been rejected as invalid by the CSI driver. To
+	// 	  resolve the error, a valid VolumeAttributesClass needs to be specified.
+	// Note: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately.
+	Status PersistentVolumeClaimModifyVolumeStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=PersistentVolumeClaimModifyVolumeStatus"`
+}
+
+// PersistentVolumeClaimCondition contains details about state of pvc
+type PersistentVolumeClaimCondition struct {
+	// Type is the type of the condition.
+	// More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=set%20to%20%27ResizeStarted%27.-,PersistentVolumeClaimCondition,-contains%20details%20about
+	Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"`
+	// Status is the status of the condition.
+	// Can be True, False, Unknown.
+	// More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=state%20of%20pvc-,conditions.status,-(string)%2C%20required
+	Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
+	// lastProbeTime is the time we probed the condition.
+	// +optional
+	LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
+	// lastTransitionTime is the time the condition transitioned from one status to another.
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
+	// reason is a unique, this should be a short, machine understandable string that gives the reason
+	// for condition's last transition. If it reports "Resizing" that means the underlying
+	// persistent volume is being resized.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
+	// message is the human-readable message indicating details about last transition.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
+}
+
+// PersistentVolumeClaimStatus is the current status of a persistent volume claim.
+type PersistentVolumeClaimStatus struct {
+	// phase represents the current phase of PersistentVolumeClaim.
+	// +optional
+	Phase PersistentVolumeClaimPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumeClaimPhase"`
+	// accessModes contains the actual access modes the volume backing the PVC has.
+	// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+	// +optional
+	// +listType=atomic
+	AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
+	// capacity represents the actual resources of the underlying volume.
+	// +optional
+	Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
+	// conditions is the current Condition of persistent volume claim. If underlying persistent volume is being
+	// resized then the Condition will be set to 'Resizing'.
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=type
+	Conditions []PersistentVolumeClaimCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
+	// allocatedResources tracks the resources allocated to a PVC including its capacity.
+	// Key names follow standard Kubernetes label syntax. Valid values are either:
+	// 	* Un-prefixed keys:
+	//		- storage - the capacity of the volume.
+	//	* Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource"
+	// Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered
+	// reserved and hence may not be used.
+	//
+	// Capacity reported here may be larger than the actual capacity when a volume expansion operation
+	// is requested.
+	// For storage quota, the larger value from allocatedResources and PVC.spec.resources is used.
+	// If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation.
+	// If a volume expansion capacity request is lowered, allocatedResources is only
+	// lowered if there are no expansion operations in progress and if the actual volume capacity
+	// is equal or lower than the requested capacity.
+	//
+	// A controller that receives PVC update with previously unknown resourceName
+	// should ignore the update for the purpose it was designed. For example - a controller that
+	// only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid
+	// resources associated with PVC.
+	//
+	// This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
+	// +featureGate=RecoverVolumeExpansionFailure
+	// +optional
+	AllocatedResources ResourceList `json:"allocatedResources,omitempty" protobuf:"bytes,5,rep,name=allocatedResources,casttype=ResourceList,castkey=ResourceName"`
+
+	// resizestatus is tombstoned since the field was replaced by allocatedResourceStatus
+	// ResizeStatus *PersistentVolumeClaimResizeStatus `json:"resizeStatus,omitempty" protobuf:"bytes,6,opt,name=resizeStatus,casttype=PersistentVolumeClaimResizeStatus"`
+
+	// allocatedResourceStatuses stores status of resource being resized for the given PVC.
+	// Key names follow standard Kubernetes label syntax. Valid values are either:
+	// 	* Un-prefixed keys:
+	//		- storage - the capacity of the volume.
+	//	* Custom resources must use implementation-defined prefixed names such as "example.com/my-custom-resource"
+	// Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered
+	// reserved and hence may not be used.
+	//
+	// ClaimResourceStatus can be in any of following states:
+	//	- ControllerResizeInProgress:
+	//		State set when resize controller starts resizing the volume in control-plane.
+	// 	- ControllerResizeFailed:
+	//		State set when resize has failed in resize controller with a terminal error.
+	//	- NodeResizePending:
+	//		State set when resize controller has finished resizing the volume but further resizing of
+	//		volume is needed on the node.
+	//	- NodeResizeInProgress:
+	//		State set when kubelet starts resizing the volume.
+	//	- NodeResizeFailed:
+	//		State set when resizing has failed in kubelet with a terminal error. Transient errors don't set
+	//		NodeResizeFailed.
+	// For example: if expanding a PVC for more capacity - this field can be one of the following states:
+	// 	- pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeInProgress"
+	//      - pvc.status.allocatedResourceStatus['storage'] = "ControllerResizeFailed"
+	//      - pvc.status.allocatedResourceStatus['storage'] = "NodeResizePending"
+	//      - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeInProgress"
+	//      - pvc.status.allocatedResourceStatus['storage'] = "NodeResizeFailed"
+	// When this field is not set, it means that no resize operation is in progress for the given PVC.
+	//
+	// A controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus
+	// should ignore the update for the purpose it was designed. For example - a controller that
+	// only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid
+	// resources associated with PVC.
+	//
+	// This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
+	// +featureGate=RecoverVolumeExpansionFailure
+	// +mapType=granular
+	// +optional
+	AllocatedResourceStatuses map[ResourceName]ClaimResourceStatus `json:"allocatedResourceStatuses,omitempty" protobuf:"bytes,7,rep,name=allocatedResourceStatuses"`
+	// currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using.
+	// When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim
+	// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
+	// +featureGate=VolumeAttributesClass
+	// +optional
+	CurrentVolumeAttributesClassName *string `json:"currentVolumeAttributesClassName,omitempty" protobuf:"bytes,8,opt,name=currentVolumeAttributesClassName"`
+	// ModifyVolumeStatus represents the status object of ControllerModifyVolume operation.
+	// When this is unset, there is no ModifyVolume operation being attempted.
+	// This is a beta field and requires enabling VolumeAttributesClass feature (off by default).
+	// +featureGate=VolumeAttributesClass
+	// +optional
+	ModifyVolumeStatus *ModifyVolumeStatus `json:"modifyVolumeStatus,omitempty" protobuf:"bytes,9,opt,name=modifyVolumeStatus"`
+}
+
+// +enum
+type PersistentVolumeAccessMode string
+
+const (
+	// can be mounted in read/write mode to exactly 1 host
+	ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce"
+	// can be mounted in read-only mode to many hosts
+	ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany"
+	// can be mounted in read/write mode to many hosts
+	ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany"
+	// can be mounted in read/write mode to exactly 1 pod
+	// cannot be used in combination with other access modes
+	ReadWriteOncePod PersistentVolumeAccessMode = "ReadWriteOncePod"
+)
+
+// +enum
+type PersistentVolumePhase string
+
+const (
+	// used for PersistentVolumes that are not available
+	VolumePending PersistentVolumePhase = "Pending"
+	// used for PersistentVolumes that are not yet bound
+	// Available volumes are held by the binder and matched to PersistentVolumeClaims
+	VolumeAvailable PersistentVolumePhase = "Available"
+	// used for PersistentVolumes that are bound
+	VolumeBound PersistentVolumePhase = "Bound"
+	// used for PersistentVolumes where the bound PersistentVolumeClaim was deleted
+	// released volumes must be recycled before becoming available again
+	// this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource
+	VolumeReleased PersistentVolumePhase = "Released"
+	// used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim
+	VolumeFailed PersistentVolumePhase = "Failed"
+)
+
+// +enum
+type PersistentVolumeClaimPhase string
+
+const (
+	// used for PersistentVolumeClaims that are not yet bound
+	ClaimPending PersistentVolumeClaimPhase = "Pending"
+	// used for PersistentVolumeClaims that are bound
+	ClaimBound PersistentVolumeClaimPhase = "Bound"
+	// used for PersistentVolumeClaims that lost their underlying
+	// PersistentVolume. The claim was bound to a PersistentVolume and this
+	// volume does not exist any longer and all data on it was lost.
+	ClaimLost PersistentVolumeClaimPhase = "Lost"
+)
+
+// +enum
+type HostPathType string
+
+const (
+	// For backwards compatible, leave it empty if unset
+	HostPathUnset HostPathType = ""
+	// If nothing exists at the given path, an empty directory will be created there
+	// as needed with file mode 0755, having the same group and ownership with Kubelet.
+	HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate"
+	// A directory must exist at the given path
+	HostPathDirectory HostPathType = "Directory"
+	// If nothing exists at the given path, an empty file will be created there
+	// as needed with file mode 0644, having the same group and ownership with Kubelet.
+	HostPathFileOrCreate HostPathType = "FileOrCreate"
+	// A file must exist at the given path
+	HostPathFile HostPathType = "File"
+	// A UNIX socket must exist at the given path
+	HostPathSocket HostPathType = "Socket"
+	// A character device must exist at the given path
+	HostPathCharDev HostPathType = "CharDevice"
+	// A block device must exist at the given path
+	HostPathBlockDev HostPathType = "BlockDevice"
+)
+
+// Represents a host path mapped into a pod.
+// Host path volumes do not support ownership management or SELinux relabeling.
+type HostPathVolumeSource struct {
+	// path of the directory on the host.
+	// If the path is a symlink, it will follow the link to the real path.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+	Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
+	// type for HostPath Volume
+	// Defaults to ""
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+	// +optional
+	Type *HostPathType `json:"type,omitempty" protobuf:"bytes,2,opt,name=type"`
+}
+
+// Represents an empty directory for a pod.
+// Empty directory volumes support ownership management and SELinux relabeling.
+type EmptyDirVolumeSource struct {
+	// medium represents what type of storage medium should back this directory.
+	// The default is "" which means to use the node's default medium.
+	// Must be an empty string (default) or Memory.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+	// +optional
+	Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"`
+	// sizeLimit is the total amount of local storage required for this EmptyDir volume.
+	// The size limit is also applicable for memory medium.
+	// The maximum usage on memory medium EmptyDir would be the minimum value between
+	// the SizeLimit specified here and the sum of memory limits of all containers in a pod.
+	// The default is nil which means that the limit is undefined.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+	// +optional
+	SizeLimit *resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"`
+}
+
+// Represents a Glusterfs mount that lasts the lifetime of a pod.
+// Glusterfs volumes do not support ownership management or SELinux relabeling.
+type GlusterfsVolumeSource struct {
+	// endpoints is the endpoint name that details Glusterfs topology.
+	// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+	EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"`
+
+	// path is the Glusterfs volume path.
+	// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+	Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
+
+	// readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+	// Defaults to false.
+	// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+}
+
+// Represents a Glusterfs mount that lasts the lifetime of a pod.
+// Glusterfs volumes do not support ownership management or SELinux relabeling.
+type GlusterfsPersistentVolumeSource struct {
+	// endpoints is the endpoint name that details Glusterfs topology.
+	// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+	EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"`
+
+	// path is the Glusterfs volume path.
+	// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+	Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
+
+	// readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+	// Defaults to false.
+	// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+
+	// endpointsNamespace is the namespace that contains Glusterfs endpoint.
+	// If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC.
+	// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+	// +optional
+	EndpointsNamespace *string `json:"endpointsNamespace,omitempty" protobuf:"bytes,4,opt,name=endpointsNamespace"`
+}
+
+// Represents a Rados Block Device mount that lasts the lifetime of a pod.
+// RBD volumes support ownership management and SELinux relabeling.
+type RBDVolumeSource struct {
+	// monitors is a collection of Ceph monitors.
+	// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+	// +listType=atomic
+	CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
+	// image is the rados image name.
+	// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+	RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"`
+	// fsType is the filesystem type of the volume that you want to mount.
+	// Tip: Ensure that the filesystem type is supported by the host operating system.
+	// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+	// TODO: how do we prevent errors in the filesystem from compromising the machine
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
+	// pool is the rados pool name.
+	// Default is rbd.
+	// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+	// +optional
+	// +default="rbd"
+	RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
+	// user is the rados user name.
+	// Default is admin.
+	// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+	// +optional
+	// +default="admin"
+	RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
+	// keyring is the path to key ring for RBDUser.
+	// Default is /etc/ceph/keyring.
+	// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+	// +optional
+	// +default="/etc/ceph/keyring"
+	Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
+	// secretRef is name of the authentication secret for RBDUser. If provided
+	// overrides keyring.
+	// Default is nil.
+	// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+	// +optional
+	SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"`
+	// readOnly here will force the ReadOnly setting in VolumeMounts.
+	// Defaults to false.
+	// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"`
+}
+
+// Represents a Rados Block Device mount that lasts the lifetime of a pod.
+// RBD volumes support ownership management and SELinux relabeling.
+type RBDPersistentVolumeSource struct {
+	// monitors is a collection of Ceph monitors.
+	// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+	// +listType=atomic
+	CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
+	// image is the rados image name.
+	// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+	RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"`
+	// fsType is the filesystem type of the volume that you want to mount.
+	// Tip: Ensure that the filesystem type is supported by the host operating system.
+	// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+	// TODO: how do we prevent errors in the filesystem from compromising the machine
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
+	// pool is the rados pool name.
+	// Default is rbd.
+	// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+	// +optional
+	// +default="rbd"
+	RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
+	// user is the rados user name.
+	// Default is admin.
+	// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+	// +optional
+	// +default="admin"
+	RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
+	// keyring is the path to key ring for RBDUser.
+	// Default is /etc/ceph/keyring.
+	// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+	// +optional
+	// +default="/etc/ceph/keyring"
+	Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
+	// secretRef is name of the authentication secret for RBDUser. If provided
+	// overrides keyring.
+	// Default is nil.
+	// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+	// +optional
+	SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"`
+	// readOnly here will force the ReadOnly setting in VolumeMounts.
+	// Defaults to false.
+	// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"`
+}
+
+// Represents a cinder volume resource in Openstack.
+// A Cinder volume must exist before mounting to a container.
+// The volume must also be in the same region as the kubelet.
+// Cinder volumes support ownership management and SELinux relabeling.
+type CinderVolumeSource struct {
+	// volumeID used to identify the volume in cinder.
+	// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+	VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
+	// fsType is the filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+	// readOnly defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+	// secretRef is optional: points to a secret object containing parameters used to connect
+	// to OpenStack.
+	// +optional
+	SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"`
+}
+
+// Represents a cinder volume resource in Openstack.
+// A Cinder volume must exist before mounting to a container.
+// The volume must also be in the same region as the kubelet.
+// Cinder volumes support ownership management and SELinux relabeling.
+type CinderPersistentVolumeSource struct {
+	// volumeID used to identify the volume in cinder.
+	// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+	VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
+	// fsType Filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+	// readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+	// secretRef is Optional: points to a secret object containing parameters used to connect
+	// to OpenStack.
+	// +optional
+	SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"`
+}
+
+// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
+// Cephfs volumes do not support ownership management or SELinux relabeling.
+type CephFSVolumeSource struct {
+	// monitors is Required: Monitors is a collection of Ceph monitors
+	// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+	// +listType=atomic
+	Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
+	// path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /
+	// +optional
+	Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
+	// user is optional: User is the rados user name, default is admin
+	// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+	// +optional
+	User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
+	// secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+	// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+	// +optional
+	SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"`
+	// secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
+	// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+	// +optional
+	SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
+	// readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
+}
+
+// SecretReference represents a Secret Reference. It has enough information to retrieve secret
+// in any namespace
+// +structType=atomic
+type SecretReference struct {
+	// name is unique within a namespace to reference a secret resource.
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+	// namespace defines the space within which the secret name must be unique.
+	// +optional
+	Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
+}
+
+// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
+// Cephfs volumes do not support ownership management or SELinux relabeling.
+type CephFSPersistentVolumeSource struct {
+	// monitors is Required: Monitors is a collection of Ceph monitors
+	// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+	// +listType=atomic
+	Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
+	// path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /
+	// +optional
+	Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
+	// user is Optional: User is the rados user name, default is admin
+	// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+	// +optional
+	User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
+	// secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+	// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+	// +optional
+	SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"`
+	// secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
+	// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+	// +optional
+	SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
+	// readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
+}
+
+// Represents a Flocker volume mounted by the Flocker agent.
+// One and only one of datasetName and datasetUUID should be set.
+// Flocker volumes do not support ownership management or SELinux relabeling.
+type FlockerVolumeSource struct {
+	// datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker
+	// should be considered as deprecated
+	// +optional
+	DatasetName string `json:"datasetName,omitempty" protobuf:"bytes,1,opt,name=datasetName"`
+	// datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset
+	// +optional
+	DatasetUUID string `json:"datasetUUID,omitempty" protobuf:"bytes,2,opt,name=datasetUUID"`
+}
+
+// StorageMedium defines ways that storage can be allocated to a volume.
+type StorageMedium string
+
+const (
+	StorageMediumDefault         StorageMedium = ""           // use whatever the default is for the node, assume anything we don't explicitly handle is this
+	StorageMediumMemory          StorageMedium = "Memory"     // use memory (e.g. tmpfs on linux)
+	StorageMediumHugePages       StorageMedium = "HugePages"  // use hugepages
+	StorageMediumHugePagesPrefix StorageMedium = "HugePages-" // prefix for full medium notation HugePages-
+)
+
+// Protocol defines network protocols supported for things like container ports.
+// +enum
+type Protocol string
+
+const (
+	// ProtocolTCP is the TCP protocol.
+	ProtocolTCP Protocol = "TCP"
+	// ProtocolUDP is the UDP protocol.
+	ProtocolUDP Protocol = "UDP"
+	// ProtocolSCTP is the SCTP protocol.
+	ProtocolSCTP Protocol = "SCTP"
+)
+
+// Represents a Persistent Disk resource in Google Compute Engine.
+//
+// A GCE PD must exist before mounting to a container. The disk must
+// also be in the same GCE project and zone as the kubelet. A GCE PD
+// can only be mounted as read/write once or read-only many times. GCE
+// PDs support ownership management and SELinux relabeling.
+type GCEPersistentDiskVolumeSource struct {
+	// pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+	PDName string `json:"pdName" protobuf:"bytes,1,opt,name=pdName"`
+	// fsType is filesystem type of the volume that you want to mount.
+	// Tip: Ensure that the filesystem type is supported by the host operating system.
+	// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+	// TODO: how do we prevent errors in the filesystem from compromising the machine
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+	// partition is the partition in the volume that you want to mount.
+	// If omitted, the default is to mount by volume name.
+	// Examples: For volume /dev/sda1, you specify the partition as "1".
+	// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+	// +optional
+	Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
+	// readOnly here will force the ReadOnly setting in VolumeMounts.
+	// Defaults to false.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
+}
+
+// Represents a Quobyte mount that lasts the lifetime of a pod.
+// Quobyte volumes do not support ownership management or SELinux relabeling.
+type QuobyteVolumeSource struct {
+	// registry represents a single or multiple Quobyte Registry services
+	// specified as a string as host:port pair (multiple entries are separated with commas)
+	// which acts as the central registry for volumes
+	Registry string `json:"registry" protobuf:"bytes,1,opt,name=registry"`
+
+	// volume is a string that references an already created Quobyte volume by name.
+	Volume string `json:"volume" protobuf:"bytes,2,opt,name=volume"`
+
+	// readOnly here will force the Quobyte volume to be mounted with read-only permissions.
+	// Defaults to false.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+
+	// user to map volume access to
+	// Defaults to serivceaccount user
+	// +optional
+	User string `json:"user,omitempty" protobuf:"bytes,4,opt,name=user"`
+
+	// group to map volume access to
+	// Default is no group
+	// +optional
+	Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"`
+
+	// tenant owning the given Quobyte volume in the Backend
+	// Used with dynamically provisioned Quobyte volumes, value is set by the plugin
+	// +optional
+	Tenant string `json:"tenant,omitempty" protobuf:"bytes,6,opt,name=tenant"`
+}
+
+// FlexPersistentVolumeSource represents a generic persistent volume resource that is
+// provisioned/attached using an exec based plugin.
+type FlexPersistentVolumeSource struct {
+	// driver is the name of the driver to use for this volume.
+	Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
+	// fsType is the Filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+	// secretRef is Optional: SecretRef is reference to the secret object containing
+	// sensitive information to pass to the plugin scripts. This may be
+	// empty if no secret object is specified. If the secret object
+	// contains more than one secret, all secrets are passed to the plugin
+	// scripts.
+	// +optional
+	SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
+	// readOnly is Optional: defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
+	// options is Optional: this field holds extra command options if any.
+	// +optional
+	Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"`
+}
+
+// FlexVolume represents a generic volume resource that is
+// provisioned/attached using an exec based plugin.
+type FlexVolumeSource struct {
+	// driver is the name of the driver to use for this volume.
+	Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
+	// fsType is the filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+	// secretRef is Optional: secretRef is reference to the secret object containing
+	// sensitive information to pass to the plugin scripts. This may be
+	// empty if no secret object is specified. If the secret object
+	// contains more than one secret, all secrets are passed to the plugin
+	// scripts.
+	// +optional
+	SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
+	// readOnly is Optional: defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
+	// options is Optional: this field holds extra command options if any.
+	// +optional
+	Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"`
+}
+
+// Represents a Persistent Disk resource in AWS.
+//
+// An AWS EBS disk must exist before mounting to a container. The disk
+// must also be in the same AWS zone as the kubelet. An AWS EBS disk
+// can only be mounted as read/write once. AWS EBS volumes support
+// ownership management and SELinux relabeling.
+type AWSElasticBlockStoreVolumeSource struct {
+	// volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+	VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
+	// fsType is the filesystem type of the volume that you want to mount.
+	// Tip: Ensure that the filesystem type is supported by the host operating system.
+	// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+	// TODO: how do we prevent errors in the filesystem from compromising the machine
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+	// partition is the partition in the volume that you want to mount.
+	// If omitted, the default is to mount by volume name.
+	// Examples: For volume /dev/sda1, you specify the partition as "1".
+	// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+	// +optional
+	Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
+	// readOnly value true will force the readOnly setting in VolumeMounts.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
+}
+
+// Represents a volume that is populated with the contents of a git repository.
+// Git repo volumes do not support ownership management.
+// Git repo volumes support SELinux relabeling.
+//
+// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+// into the Pod's container.
+type GitRepoVolumeSource struct {
+	// repository is the URL
+	Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"`
+	// revision is the commit hash for the specified revision.
+	// +optional
+	Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"`
+	// directory is the target directory name.
+	// Must not contain or start with '..'.  If '.' is supplied, the volume directory will be the
+	// git repository.  Otherwise, if specified, the volume will contain the git repository in
+	// the subdirectory with the given name.
+	// +optional
+	Directory string `json:"directory,omitempty" protobuf:"bytes,3,opt,name=directory"`
+}
+
+// Adapts a Secret into a volume.
+//
+// The contents of the target Secret's Data field will be presented in a volume
+// as files using the keys in the Data field as the file names.
+// Secret volumes support ownership management and SELinux relabeling.
+type SecretVolumeSource struct {
+	// secretName is the name of the secret in the pod's namespace to use.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+	// +optional
+	SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"`
+	// items If unspecified, each key-value pair in the Data field of the referenced
+	// Secret will be projected into the volume as a file whose name is the
+	// key and content is the value. If specified, the listed keys will be
+	// projected into the specified paths, and unlisted keys will not be
+	// present. If a key is specified which is not present in the Secret,
+	// the volume setup will error unless it is marked optional. Paths must be
+	// relative and may not contain the '..' path or start with '..'.
+	// +optional
+	// +listType=atomic
+	Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
+	// defaultMode is Optional: mode bits used to set permissions on created files by default.
+	// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+	// YAML accepts both octal and decimal values, JSON requires decimal values
+	// for mode bits. Defaults to 0644.
+	// Directories within the path are not affected by this setting.
+	// This might be in conflict with other options that affect the file
+	// mode, like fsGroup, and the result can be other mode bits set.
+	// +optional
+	DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"`
+	// optional field specify whether the Secret or its keys must be defined
+	// +optional
+	Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
+}
+
+const (
+	SecretVolumeSourceDefaultMode int32 = 0644
+)
+
+// Adapts a secret into a projected volume.
+//
+// The contents of the target Secret's Data field will be presented in a
+// projected volume as files using the keys in the Data field as the file names.
+// Note that this is identical to a secret volume source without the default
+// mode.
+type SecretProjection struct {
+	LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
+	// items if unspecified, each key-value pair in the Data field of the referenced
+	// Secret will be projected into the volume as a file whose name is the
+	// key and content is the value. If specified, the listed keys will be
+	// projected into the specified paths, and unlisted keys will not be
+	// present. If a key is specified which is not present in the Secret,
+	// the volume setup will error unless it is marked optional. Paths must be
+	// relative and may not contain the '..' path or start with '..'.
+	// +optional
+	// +listType=atomic
+	Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
+	// optional field specify whether the Secret or its key must be defined
+	// +optional
+	Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
+}
+
+// Represents an NFS mount that lasts the lifetime of a pod.
+// NFS volumes do not support ownership management or SELinux relabeling.
+type NFSVolumeSource struct {
+	// server is the hostname or IP address of the NFS server.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+	Server string `json:"server" protobuf:"bytes,1,opt,name=server"`
+
+	// path that is exported by the NFS server.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+	Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
+
+	// readOnly here will force the NFS export to be mounted with read-only permissions.
+	// Defaults to false.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+}
+
+// Represents an ISCSI disk.
+// ISCSI volumes can only be mounted as read/write once.
+// ISCSI volumes support ownership management and SELinux relabeling.
+type ISCSIVolumeSource struct {
+	// targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+	// is other than default (typically TCP ports 860 and 3260).
+	TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"`
+	// iqn is the target iSCSI Qualified Name.
+	IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"`
+	// lun represents iSCSI Target Lun number.
+	Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"`
+	// iscsiInterface is the interface Name that uses an iSCSI transport.
+	// Defaults to 'default' (tcp).
+	// +optional
+	// +default="default"
+	ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
+	// fsType is the filesystem type of the volume that you want to mount.
+	// Tip: Ensure that the filesystem type is supported by the host operating system.
+	// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+	// TODO: how do we prevent errors in the filesystem from compromising the machine
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"`
+	// readOnly here will force the ReadOnly setting in VolumeMounts.
+	// Defaults to false.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
+	// portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
+	// is other than default (typically TCP ports 860 and 3260).
+	// +optional
+	// +listType=atomic
+	Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"`
+	// chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
+	// +optional
+	DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"`
+	// chapAuthSession defines whether support iSCSI Session CHAP authentication
+	// +optional
+	SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"`
+	// secretRef is the CHAP Secret for iSCSI target and initiator authentication
+	// +optional
+	SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"`
+	// initiatorName is the custom iSCSI Initiator Name.
+	// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+	// : will be created for the connection.
+	// +optional
+	InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"`
+}
+
+// ISCSIPersistentVolumeSource represents an ISCSI disk.
+// ISCSI volumes can only be mounted as read/write once.
+// ISCSI volumes support ownership management and SELinux relabeling.
+type ISCSIPersistentVolumeSource struct {
+	// targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+	// is other than default (typically TCP ports 860 and 3260).
+	TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"`
+	// iqn is Target iSCSI Qualified Name.
+	IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"`
+	// lun is iSCSI Target Lun number.
+	Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"`
+	// iscsiInterface is the interface Name that uses an iSCSI transport.
+	// Defaults to 'default' (tcp).
+	// +optional
+	// +default="default"
+	ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
+	// fsType is the filesystem type of the volume that you want to mount.
+	// Tip: Ensure that the filesystem type is supported by the host operating system.
+	// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+	// TODO: how do we prevent errors in the filesystem from compromising the machine
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"`
+	// readOnly here will force the ReadOnly setting in VolumeMounts.
+	// Defaults to false.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
+	// portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port
+	// is other than default (typically TCP ports 860 and 3260).
+	// +optional
+	// +listType=atomic
+	Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"`
+	// chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
+	// +optional
+	DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"`
+	// chapAuthSession defines whether support iSCSI Session CHAP authentication
+	// +optional
+	SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"`
+	// secretRef is the CHAP Secret for iSCSI target and initiator authentication
+	// +optional
+	SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"`
+	// initiatorName is the custom iSCSI Initiator Name.
+	// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+	// : will be created for the connection.
+	// +optional
+	InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"`
+}
+
+// Represents a Fibre Channel volume.
+// Fibre Channel volumes can only be mounted as read/write once.
+// Fibre Channel volumes support ownership management and SELinux relabeling.
+type FCVolumeSource struct {
+	// targetWWNs is Optional: FC target worldwide names (WWNs)
+	// +optional
+	// +listType=atomic
+	TargetWWNs []string `json:"targetWWNs,omitempty" protobuf:"bytes,1,rep,name=targetWWNs"`
+	// lun is Optional: FC target lun number
+	// +optional
+	Lun *int32 `json:"lun,omitempty" protobuf:"varint,2,opt,name=lun"`
+	// fsType is the filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// TODO: how do we prevent errors in the filesystem from compromising the machine
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
+	// readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
+	// wwids Optional: FC volume world wide identifiers (wwids)
+	// Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
+	// +optional
+	// +listType=atomic
+	WWIDs []string `json:"wwids,omitempty" protobuf:"bytes,5,rep,name=wwids"`
+}
+
+// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+type AzureFileVolumeSource struct {
+	// secretName is the  name of secret that contains Azure Storage Account Name and Key
+	SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"`
+	// shareName is the azure share Name
+	ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"`
+	// readOnly defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+}
+
+// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
+type AzureFilePersistentVolumeSource struct {
+	// secretName is the name of secret that contains Azure Storage Account Name and Key
+	SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"`
+	// shareName is the azure Share Name
+	ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"`
+	// readOnly defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+	// secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key
+	// default is the same as the Pod
+	// +optional
+	SecretNamespace *string `json:"secretNamespace" protobuf:"bytes,4,opt,name=secretNamespace"`
+}
+
+// Represents a vSphere volume resource.
+type VsphereVirtualDiskVolumeSource struct {
+	// volumePath is the path that identifies vSphere volume vmdk
+	VolumePath string `json:"volumePath" protobuf:"bytes,1,opt,name=volumePath"`
+	// fsType is filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+	// storagePolicyName is the storage Policy Based Management (SPBM) profile name.
+	// +optional
+	StoragePolicyName string `json:"storagePolicyName,omitempty" protobuf:"bytes,3,opt,name=storagePolicyName"`
+	// storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.
+	// +optional
+	StoragePolicyID string `json:"storagePolicyID,omitempty" protobuf:"bytes,4,opt,name=storagePolicyID"`
+}
+
+// Represents a Photon Controller persistent disk resource.
+type PhotonPersistentDiskVolumeSource struct {
+	// pdID is the ID that identifies Photon Controller persistent disk
+	PdID string `json:"pdID" protobuf:"bytes,1,opt,name=pdID"`
+	// fsType is the filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+}
+
+// +enum
+type AzureDataDiskCachingMode string
+
+// +enum
+type AzureDataDiskKind string
+
+const (
+	AzureDataDiskCachingNone      AzureDataDiskCachingMode = "None"
+	AzureDataDiskCachingReadOnly  AzureDataDiskCachingMode = "ReadOnly"
+	AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite"
+
+	AzureSharedBlobDisk    AzureDataDiskKind = "Shared"
+	AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated"
+	AzureManagedDisk       AzureDataDiskKind = "Managed"
+)
+
+// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
+type AzureDiskVolumeSource struct {
+	// diskName is the Name of the data disk in the blob storage
+	DiskName string `json:"diskName" protobuf:"bytes,1,opt,name=diskName"`
+	// diskURI is the URI of data disk in the blob storage
+	DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"`
+	// cachingMode is the Host Caching mode: None, Read Only, Read Write.
+	// +optional
+	// +default=ref(AzureDataDiskCachingReadWrite)
+	CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"`
+	// fsType is Filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// +optional
+	// +default="ext4"
+	FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
+	// readOnly Defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	// +default=false
+	ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"`
+	// kind expected values are Shared: multiple blob disks per storage account  Dedicated: single blob disk per storage account  Managed: azure managed data disk (only in managed availability set). defaults to shared
+	// +default=ref(AzureSharedBlobDisk)
+	Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"`
+}
+
+// PortworxVolumeSource represents a Portworx volume resource.
+type PortworxVolumeSource struct {
+	// volumeID uniquely identifies a Portworx volume
+	VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
+	// fSType represents the filesystem type to mount
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+	// readOnly defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+}
+
+// ScaleIOVolumeSource represents a persistent ScaleIO volume
+type ScaleIOVolumeSource struct {
+	// gateway is the host address of the ScaleIO API Gateway.
+	Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"`
+	// system is the name of the storage system as configured in ScaleIO.
+	System string `json:"system" protobuf:"bytes,2,opt,name=system"`
+	// secretRef references to the secret for ScaleIO user and other
+	// sensitive information. If this is not provided, Login operation will fail.
+	SecretRef *LocalObjectReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
+	// sslEnabled Flag enable/disable SSL communication with Gateway, default false
+	// +optional
+	SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"`
+	// protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.
+	// +optional
+	ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"`
+	// storagePool is the ScaleIO Storage Pool associated with the protection domain.
+	// +optional
+	StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
+	// storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+	// Default is ThinProvisioned.
+	// +optional
+	// +default="ThinProvisioned"
+	StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
+	// volumeName is the name of a volume already created in the ScaleIO system
+	// that is associated with this volume source.
+	VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
+	// fsType is the filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs".
+	// Default is "xfs".
+	// +optional
+	// +default="xfs"
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
+	// readOnly Defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"`
+}
+
+// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume
+type ScaleIOPersistentVolumeSource struct {
+	// gateway is the host address of the ScaleIO API Gateway.
+	Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"`
+	// system is the name of the storage system as configured in ScaleIO.
+	System string `json:"system" protobuf:"bytes,2,opt,name=system"`
+	// secretRef references to the secret for ScaleIO user and other
+	// sensitive information. If this is not provided, Login operation will fail.
+	SecretRef *SecretReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
+	// sslEnabled is the flag to enable/disable SSL communication with Gateway, default false
+	// +optional
+	SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"`
+	// protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.
+	// +optional
+	ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"`
+	// storagePool is the ScaleIO Storage Pool associated with the protection domain.
+	// +optional
+	StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
+	// storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+	// Default is ThinProvisioned.
+	// +optional
+	// +default="ThinProvisioned"
+	StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
+	// volumeName is the name of a volume already created in the ScaleIO system
+	// that is associated with this volume source.
+	VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
+	// fsType is the filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs".
+	// Default is "xfs"
+	// +optional
+	// +default="xfs"
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
+	// readOnly defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"`
+}
+
+// Represents a StorageOS persistent volume resource.
+type StorageOSVolumeSource struct {
+	// volumeName is the human-readable name of the StorageOS volume.  Volume
+	// names are only unique within a namespace.
+	VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"`
+	// volumeNamespace specifies the scope of the volume within StorageOS.  If no
+	// namespace is specified then the Pod's namespace will be used.  This allows the
+	// Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+	// Set VolumeName to any name to override the default behaviour.
+	// Set to "default" if you are not using namespaces within StorageOS.
+	// Namespaces that do not pre-exist within StorageOS will be created.
+	// +optional
+	VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"`
+	// fsType is the filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
+	// readOnly defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
+	// secretRef specifies the secret to use for obtaining the StorageOS API
+	// credentials.  If not specified, default values will be attempted.
+	// +optional
+	SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
+}
+
+// Represents a StorageOS persistent volume resource.
+type StorageOSPersistentVolumeSource struct {
+	// volumeName is the human-readable name of the StorageOS volume.  Volume
+	// names are only unique within a namespace.
+	VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"`
+	// volumeNamespace specifies the scope of the volume within StorageOS.  If no
+	// namespace is specified then the Pod's namespace will be used.  This allows the
+	// Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+	// Set VolumeName to any name to override the default behaviour.
+	// Set to "default" if you are not using namespaces within StorageOS.
+	// Namespaces that do not pre-exist within StorageOS will be created.
+	// +optional
+	VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"`
+	// fsType is the filesystem type to mount.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
+	// readOnly defaults to false (read/write). ReadOnly here will force
+	// the ReadOnly setting in VolumeMounts.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
+	// secretRef specifies the secret to use for obtaining the StorageOS API
+	// credentials.  If not specified, default values will be attempted.
+	// +optional
+	SecretRef *ObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
+}
+
+// Adapts a ConfigMap into a volume.
+//
+// The contents of the target ConfigMap's Data field will be presented in a
+// volume as files using the keys in the Data field as the file names, unless
+// the items element is populated with specific mappings of keys to paths.
+// ConfigMap volumes support ownership management and SELinux relabeling.
+type ConfigMapVolumeSource struct {
+	LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
+	// items if unspecified, each key-value pair in the Data field of the referenced
+	// ConfigMap will be projected into the volume as a file whose name is the
+	// key and content is the value. If specified, the listed keys will be
+	// projected into the specified paths, and unlisted keys will not be
+	// present. If a key is specified which is not present in the ConfigMap,
+	// the volume setup will error unless it is marked optional. Paths must be
+	// relative and may not contain the '..' path or start with '..'.
+	// +optional
+	// +listType=atomic
+	Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
+	// defaultMode is optional: mode bits used to set permissions on created files by default.
+	// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+	// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+	// Defaults to 0644.
+	// Directories within the path are not affected by this setting.
+	// This might be in conflict with other options that affect the file
+	// mode, like fsGroup, and the result can be other mode bits set.
+	// +optional
+	DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"`
+	// optional specify whether the ConfigMap or its keys must be defined
+	// +optional
+	Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
+}
+
+const (
+	ConfigMapVolumeSourceDefaultMode int32 = 0644
+)
+
+// Adapts a ConfigMap into a projected volume.
+//
+// The contents of the target ConfigMap's Data field will be presented in a
+// projected volume as files using the keys in the Data field as the file names,
+// unless the items element is populated with specific mappings of keys to paths.
+// Note that this is identical to a configmap volume source without the default
+// mode.
+type ConfigMapProjection struct {
+	LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
+	// items if unspecified, each key-value pair in the Data field of the referenced
+	// ConfigMap will be projected into the volume as a file whose name is the
+	// key and content is the value. If specified, the listed keys will be
+	// projected into the specified paths, and unlisted keys will not be
+	// present. If a key is specified which is not present in the ConfigMap,
+	// the volume setup will error unless it is marked optional. Paths must be
+	// relative and may not contain the '..' path or start with '..'.
+	// +optional
+	// +listType=atomic
+	Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
+	// optional specify whether the ConfigMap or its keys must be defined
+	// +optional
+	Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
+}
+
+// ServiceAccountTokenProjection represents a projected service account token
+// volume. This projection can be used to insert a service account token into
+// the pods runtime filesystem for use against APIs (Kubernetes API Server or
+// otherwise).
+type ServiceAccountTokenProjection struct {
+	// audience is the intended audience of the token. A recipient of a token
+	// must identify itself with an identifier specified in the audience of the
+	// token, and otherwise should reject the token. The audience defaults to the
+	// identifier of the apiserver.
+	// +optional
+	Audience string `json:"audience,omitempty" protobuf:"bytes,1,rep,name=audience"`
+	// expirationSeconds is the requested duration of validity of the service
+	// account token. As the token approaches expiration, the kubelet volume
+	// plugin will proactively rotate the service account token. The kubelet will
+	// start trying to rotate the token if the token is older than 80 percent of
+	// its time to live or if the token is older than 24 hours.Defaults to 1 hour
+	// and must be at least 10 minutes.
+	// +optional
+	ExpirationSeconds *int64 `json:"expirationSeconds,omitempty" protobuf:"varint,2,opt,name=expirationSeconds"`
+	// path is the path relative to the mount point of the file to project the
+	// token into.
+	Path string `json:"path" protobuf:"bytes,3,opt,name=path"`
+}
+
+// ClusterTrustBundleProjection describes how to select a set of
+// ClusterTrustBundle objects and project their contents into the pod
+// filesystem.
+type ClusterTrustBundleProjection struct {
+	// Select a single ClusterTrustBundle by object name.  Mutually-exclusive
+	// with signerName and labelSelector.
+	// +optional
+	Name *string `json:"name,omitempty" protobuf:"bytes,1,rep,name=name"`
+
+	// Select all ClusterTrustBundles that match this signer name.
+	// Mutually-exclusive with name.  The contents of all selected
+	// ClusterTrustBundles will be unified and deduplicated.
+	// +optional
+	SignerName *string `json:"signerName,omitempty" protobuf:"bytes,2,rep,name=signerName"`
+
+	// Select all ClusterTrustBundles that match this label selector.  Only has
+	// effect if signerName is set.  Mutually-exclusive with name.  If unset,
+	// interpreted as "match nothing".  If set but empty, interpreted as "match
+	// everything".
+	// +optional
+	LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,3,rep,name=labelSelector"`
+
+	// If true, don't block pod startup if the referenced ClusterTrustBundle(s)
+	// aren't available.  If using name, then the named ClusterTrustBundle is
+	// allowed not to exist.  If using signerName, then the combination of
+	// signerName and labelSelector is allowed to match zero
+	// ClusterTrustBundles.
+	// +optional
+	Optional *bool `json:"optional,omitempty" protobuf:"varint,5,opt,name=optional"`
+
+	// Relative path from the volume root to write the bundle.
+	Path string `json:"path" protobuf:"bytes,4,rep,name=path"`
+}
+
+// Represents a projected volume source
+type ProjectedVolumeSource struct {
+	// sources is the list of volume projections. Each entry in this list
+	// handles one source.
+	// +optional
+	// +listType=atomic
+	Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"`
+	// defaultMode are the mode bits used to set permissions on created files by default.
+	// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+	// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+	// Directories within the path are not affected by this setting.
+	// This might be in conflict with other options that affect the file
+	// mode, like fsGroup, and the result can be other mode bits set.
+	// +optional
+	DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
+}
+
+// Projection that may be projected along with other supported volume types.
+// Exactly one of these fields must be set.
+type VolumeProjection struct {
+	// secret information about the secret data to project
+	// +optional
+	Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"`
+	// downwardAPI information about the downwardAPI data to project
+	// +optional
+	DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" protobuf:"bytes,2,opt,name=downwardAPI"`
+	// configMap information about the configMap data to project
+	// +optional
+	ConfigMap *ConfigMapProjection `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"`
+	// serviceAccountToken is information about the serviceAccountToken data to project
+	// +optional
+	ServiceAccountToken *ServiceAccountTokenProjection `json:"serviceAccountToken,omitempty" protobuf:"bytes,4,opt,name=serviceAccountToken"`
+
+	// ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field
+	// of ClusterTrustBundle objects in an auto-updating file.
+	//
+	// Alpha, gated by the ClusterTrustBundleProjection feature gate.
+	//
+	// ClusterTrustBundle objects can either be selected by name, or by the
+	// combination of signer name and a label selector.
+	//
+	// Kubelet performs aggressive normalization of the PEM contents written
+	// into the pod filesystem.  Esoteric PEM features such as inter-block
+	// comments and block headers are stripped.  Certificates are deduplicated.
+	// The ordering of certificates within the file is arbitrary, and Kubelet
+	// may change the order over time.
+	//
+	// +featureGate=ClusterTrustBundleProjection
+	// +optional
+	ClusterTrustBundle *ClusterTrustBundleProjection `json:"clusterTrustBundle,omitempty" protobuf:"bytes,5,opt,name=clusterTrustBundle"`
+}
+
+const (
+	ProjectedVolumeSourceDefaultMode int32 = 0644
+)
+
+// Maps a string key to a path within a volume.
+type KeyToPath struct {
+	// key is the key to project.
+	Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
+
+	// path is the relative path of the file to map the key to.
+	// May not be an absolute path.
+	// May not contain the path element '..'.
+	// May not start with the string '..'.
+	Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
+	// mode is Optional: mode bits used to set permissions on this file.
+	// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+	// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+	// If not specified, the volume defaultMode will be used.
+	// This might be in conflict with other options that affect the file
+	// mode, like fsGroup, and the result can be other mode bits set.
+	// +optional
+	Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"`
+}
+
+// Local represents directly-attached storage with node affinity
+type LocalVolumeSource struct {
+	// path of the full path to the volume on the node.
+	// It can be either a directory or block device (disk, partition, ...).
+	Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
+
+	// fsType is the filesystem type to mount.
+	// It applies only when the Path is a block device.
+	// Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a filesystem if unspecified.
+	// +optional
+	FSType *string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
+}
+
+// Represents storage that is managed by an external CSI volume driver
+type CSIPersistentVolumeSource struct {
+	// driver is the name of the driver to use for this volume.
+	// Required.
+	Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
+
+	// volumeHandle is the unique volume name returned by the CSI volume
+	// plugin’s CreateVolume to refer to the volume on all subsequent calls.
+	// Required.
+	VolumeHandle string `json:"volumeHandle" protobuf:"bytes,2,opt,name=volumeHandle"`
+
+	// readOnly value to pass to ControllerPublishVolumeRequest.
+	// Defaults to false (read/write).
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+
+	// fsType to mount. Must be a filesystem type supported by the host operating system.
+	// Ex. "ext4", "xfs", "ntfs".
+	// +optional
+	FSType string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
+
+	// volumeAttributes of the volume to publish.
+	// +optional
+	VolumeAttributes map[string]string `json:"volumeAttributes,omitempty" protobuf:"bytes,5,rep,name=volumeAttributes"`
+
+	// controllerPublishSecretRef is a reference to the secret object containing
+	// sensitive information to pass to the CSI driver to complete the CSI
+	// ControllerPublishVolume and ControllerUnpublishVolume calls.
+	// This field is optional, and may be empty if no secret is required. If the
+	// secret object contains more than one secret, all secrets are passed.
+	// +optional
+	ControllerPublishSecretRef *SecretReference `json:"controllerPublishSecretRef,omitempty" protobuf:"bytes,6,opt,name=controllerPublishSecretRef"`
+
+	// nodeStageSecretRef is a reference to the secret object containing sensitive
+	// information to pass to the CSI driver to complete the CSI NodeStageVolume
+	// and NodeStageVolume and NodeUnstageVolume calls.
+	// This field is optional, and may be empty if no secret is required. If the
+	// secret object contains more than one secret, all secrets are passed.
+	// +optional
+	NodeStageSecretRef *SecretReference `json:"nodeStageSecretRef,omitempty" protobuf:"bytes,7,opt,name=nodeStageSecretRef"`
+
+	// nodePublishSecretRef is a reference to the secret object containing
+	// sensitive information to pass to the CSI driver to complete the CSI
+	// NodePublishVolume and NodeUnpublishVolume calls.
+	// This field is optional, and may be empty if no secret is required. If the
+	// secret object contains more than one secret, all secrets are passed.
+	// +optional
+	NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"`
+
+	// controllerExpandSecretRef is a reference to the secret object containing
+	// sensitive information to pass to the CSI driver to complete the CSI
+	// ControllerExpandVolume call.
+	// This field is optional, and may be empty if no secret is required. If the
+	// secret object contains more than one secret, all secrets are passed.
+	// +optional
+	ControllerExpandSecretRef *SecretReference `json:"controllerExpandSecretRef,omitempty" protobuf:"bytes,9,opt,name=controllerExpandSecretRef"`
+
+	// nodeExpandSecretRef is a reference to the secret object containing
+	// sensitive information to pass to the CSI driver to complete the CSI
+	// NodeExpandVolume call.
+	// This field is optional, may be omitted if no secret is required. If the
+	// secret object contains more than one secret, all secrets are passed.
+	// +optional
+	NodeExpandSecretRef *SecretReference `json:"nodeExpandSecretRef,omitempty" protobuf:"bytes,10,opt,name=nodeExpandSecretRef"`
+}
+
+// Represents a source location of a volume to mount, managed by an external CSI driver
+type CSIVolumeSource struct {
+	// driver is the name of the CSI driver that handles this volume.
+	// Consult with your admin for the correct name as registered in the cluster.
+	Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
+
+	// readOnly specifies a read-only configuration for the volume.
+	// Defaults to false (read/write).
+	// +optional
+	ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
+
+	// fsType to mount. Ex. "ext4", "xfs", "ntfs".
+	// If not provided, the empty value is passed to the associated CSI driver
+	// which will determine the default filesystem to apply.
+	// +optional
+	FSType *string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
+
+	// volumeAttributes stores driver-specific properties that are passed to the CSI
+	// driver. Consult your driver's documentation for supported values.
+	// +optional
+	VolumeAttributes map[string]string `json:"volumeAttributes,omitempty" protobuf:"bytes,4,rep,name=volumeAttributes"`
+
+	// nodePublishSecretRef is a reference to the secret object containing
+	// sensitive information to pass to the CSI driver to complete the CSI
+	// NodePublishVolume and NodeUnpublishVolume calls.
+	// This field is optional, and  may be empty if no secret is required. If the
+	// secret object contains more than one secret, all secret references are passed.
+	// +optional
+	NodePublishSecretRef *LocalObjectReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,5,opt,name=nodePublishSecretRef"`
+}
+
+// Represents an ephemeral volume that is handled by a normal storage driver.
+type EphemeralVolumeSource struct {
+	// Will be used to create a stand-alone PVC to provision the volume.
+	// The pod in which this EphemeralVolumeSource is embedded will be the
+	// owner of the PVC, i.e. the PVC will be deleted together with the
+	// pod.  The name of the PVC will be `-` where
+	// `` is the name from the `PodSpec.Volumes` array
+	// entry. Pod validation will reject the pod if the concatenated name
+	// is not valid for a PVC (for example, too long).
+	//
+	// An existing PVC with that name that is not owned by the pod
+	// will *not* be used for the pod to avoid using an unrelated
+	// volume by mistake. Starting the pod is then blocked until
+	// the unrelated PVC is removed. If such a pre-created PVC is
+	// meant to be used by the pod, the PVC has to updated with an
+	// owner reference to the pod once the pod exists. Normally
+	// this should not be necessary, but it may be useful when
+	// manually reconstructing a broken cluster.
+	//
+	// This field is read-only and no changes will be made by Kubernetes
+	// to the PVC after it has been created.
+	//
+	// Required, must not be nil.
+	VolumeClaimTemplate *PersistentVolumeClaimTemplate `json:"volumeClaimTemplate,omitempty" protobuf:"bytes,1,opt,name=volumeClaimTemplate"`
+
+	// ReadOnly is tombstoned to show why 2 is a reserved protobuf tag.
+	// ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
+}
+
+// PersistentVolumeClaimTemplate is used to produce
+// PersistentVolumeClaim objects as part of an EphemeralVolumeSource.
+type PersistentVolumeClaimTemplate struct {
+	// May contain labels and annotations that will be copied into the PVC
+	// when creating it. No other fields are allowed and will be rejected during
+	// validation.
+	//
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// The specification for the PersistentVolumeClaim. The entire content is
+	// copied unchanged into the PVC that gets created from this
+	// template. The same fields as in a PersistentVolumeClaim
+	// are also valid here.
+	Spec PersistentVolumeClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"`
+}
+
+// ContainerPort represents a network port in a single container.
+type ContainerPort struct {
+	// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+	// named port in a pod must have a unique name. Name for the port that can be
+	// referred to by services.
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+	// Number of port to expose on the host.
+	// If specified, this must be a valid port number, 0 < x < 65536.
+	// If HostNetwork is specified, this must match ContainerPort.
+	// Most containers do not need this.
+	// +optional
+	HostPort int32 `json:"hostPort,omitempty" protobuf:"varint,2,opt,name=hostPort"`
+	// Number of port to expose on the pod's IP address.
+	// This must be a valid port number, 0 < x < 65536.
+	ContainerPort int32 `json:"containerPort" protobuf:"varint,3,opt,name=containerPort"`
+	// Protocol for port. Must be UDP, TCP, or SCTP.
+	// Defaults to "TCP".
+	// +optional
+	// +default="TCP"
+	Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"`
+	// What host IP to bind the external port to.
+	// +optional
+	HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
+}
+
+// VolumeMount describes a mounting of a Volume within a container.
+type VolumeMount struct {
+	// This must match the Name of a Volume.
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// Mounted read-only if true, read-write otherwise (false or unspecified).
+	// Defaults to false.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
+	// RecursiveReadOnly specifies whether read-only mounts should be handled
+	// recursively.
+	//
+	// If ReadOnly is false, this field has no meaning and must be unspecified.
+	//
+	// If ReadOnly is true, and this field is set to Disabled, the mount is not made
+	// recursively read-only.  If this field is set to IfPossible, the mount is made
+	// recursively read-only, if it is supported by the container runtime.  If this
+	// field is set to Enabled, the mount is made recursively read-only if it is
+	// supported by the container runtime, otherwise the pod will not be started and
+	// an error will be generated to indicate the reason.
+	//
+	// If this field is set to IfPossible or Enabled, MountPropagation must be set to
+	// None (or be unspecified, which defaults to None).
+	//
+	// If this field is not specified, it is treated as an equivalent of Disabled.
+	//
+	// +featureGate=RecursiveReadOnlyMounts
+	// +optional
+	RecursiveReadOnly *RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty" protobuf:"bytes,7,opt,name=recursiveReadOnly,casttype=RecursiveReadOnlyMode"`
+	// Path within the container at which the volume should be mounted.  Must
+	// not contain ':'.
+	MountPath string `json:"mountPath" protobuf:"bytes,3,opt,name=mountPath"`
+	// Path within the volume from which the container's volume should be mounted.
+	// Defaults to "" (volume's root).
+	// +optional
+	SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"`
+	// mountPropagation determines how mounts are propagated from the host
+	// to container and the other way around.
+	// When not set, MountPropagationNone is used.
+	// This field is beta in 1.10.
+	// When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified
+	// (which defaults to None).
+	// +optional
+	MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"`
+	// Expanded path within the volume from which the container's volume should be mounted.
+	// Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+	// Defaults to "" (volume's root).
+	// SubPathExpr and SubPath are mutually exclusive.
+	// +optional
+	SubPathExpr string `json:"subPathExpr,omitempty" protobuf:"bytes,6,opt,name=subPathExpr"`
+}
+
+// MountPropagationMode describes mount propagation.
+// +enum
+type MountPropagationMode string
+
+const (
+	// MountPropagationNone means that the volume in a container will
+	// not receive new mounts from the host or other containers, and filesystems
+	// mounted inside the container won't be propagated to the host or other
+	// containers.
+	// Note that this mode corresponds to "private" in Linux terminology.
+	MountPropagationNone MountPropagationMode = "None"
+	// MountPropagationHostToContainer means that the volume in a container will
+	// receive new mounts from the host or other containers, but filesystems
+	// mounted inside the container won't be propagated to the host or other
+	// containers.
+	// Note that this mode is recursively applied to all mounts in the volume
+	// ("rslave" in Linux terminology).
+	MountPropagationHostToContainer MountPropagationMode = "HostToContainer"
+	// MountPropagationBidirectional means that the volume in a container will
+	// receive new mounts from the host or other containers, and its own mounts
+	// will be propagated from the container to the host or other containers.
+	// Note that this mode is recursively applied to all mounts in the volume
+	// ("rshared" in Linux terminology).
+	MountPropagationBidirectional MountPropagationMode = "Bidirectional"
+)
+
+// RecursiveReadOnlyMode describes recursive-readonly mode.
+type RecursiveReadOnlyMode string
+
+const (
+	// RecursiveReadOnlyDisabled disables recursive-readonly mode.
+	RecursiveReadOnlyDisabled RecursiveReadOnlyMode = "Disabled"
+	// RecursiveReadOnlyIfPossible enables recursive-readonly mode if possible.
+	RecursiveReadOnlyIfPossible RecursiveReadOnlyMode = "IfPossible"
+	// RecursiveReadOnlyEnabled enables recursive-readonly mode, or raise an error.
+	RecursiveReadOnlyEnabled RecursiveReadOnlyMode = "Enabled"
+)
+
+// volumeDevice describes a mapping of a raw block device within a container.
+type VolumeDevice struct {
+	// name must match the name of a persistentVolumeClaim in the pod
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// devicePath is the path inside of the container that the device will be mapped to.
+	DevicePath string `json:"devicePath" protobuf:"bytes,2,opt,name=devicePath"`
+}
+
+// EnvVar represents an environment variable present in a Container.
+type EnvVar struct {
+	// Name of the environment variable. Must be a C_IDENTIFIER.
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+
+	// Optional: no more than one of the following may be specified.
+
+	// Variable references $(VAR_NAME) are expanded
+	// using the previously defined environment variables in the container and
+	// any service environment variables. If a variable cannot be resolved,
+	// the reference in the input string will be unchanged. Double $$ are reduced
+	// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+	// "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+	// Escaped references will never be expanded, regardless of whether the variable
+	// exists or not.
+	// Defaults to "".
+	// +optional
+	Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
+	// Source for the environment variable's value. Cannot be used if value is not empty.
+	// +optional
+	ValueFrom *EnvVarSource `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"`
+}
+
+// EnvVarSource represents a source for the value of an EnvVar.
+type EnvVarSource struct {
+	// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+	// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+	// +optional
+	FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"`
+	// Selects a resource of the container: only resources limits and requests
+	// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+	// +optional
+	ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,2,opt,name=resourceFieldRef"`
+	// Selects a key of a ConfigMap.
+	// +optional
+	ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,3,opt,name=configMapKeyRef"`
+	// Selects a key of a secret in the pod's namespace
+	// +optional
+	SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"`
+}
+
+// ObjectFieldSelector selects an APIVersioned field of an object.
+// +structType=atomic
+type ObjectFieldSelector struct {
+	// Version of the schema the FieldPath is written in terms of, defaults to "v1".
+	// +optional
+	APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"`
+	// Path of the field to select in the specified API version.
+	FieldPath string `json:"fieldPath" protobuf:"bytes,2,opt,name=fieldPath"`
+}
+
+// ResourceFieldSelector represents container resources (cpu, memory) and their output format
+// +structType=atomic
+type ResourceFieldSelector struct {
+	// Container name: required for volumes, optional for env vars
+	// +optional
+	ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"`
+	// Required: resource to select
+	Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"`
+	// Specifies the output format of the exposed resources, defaults to "1"
+	// +optional
+	Divisor resource.Quantity `json:"divisor,omitempty" protobuf:"bytes,3,opt,name=divisor"`
+}
+
+// Selects a key from a ConfigMap.
+// +structType=atomic
+type ConfigMapKeySelector struct {
+	// The ConfigMap to select from.
+	LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
+	// The key to select.
+	Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
+	// Specify whether the ConfigMap or its key must be defined
+	// +optional
+	Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
+}
+
+// SecretKeySelector selects a key of a Secret.
+// +structType=atomic
+type SecretKeySelector struct {
+	// The name of the secret in the pod's namespace to select from.
+	LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
+	// The key of the secret to select from.  Must be a valid secret key.
+	Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
+	// Specify whether the Secret or its key must be defined
+	// +optional
+	Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
+}
+
+// EnvFromSource represents the source of a set of ConfigMaps
+type EnvFromSource struct {
+	// An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
+	// +optional
+	Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"`
+	// The ConfigMap to select from
+	// +optional
+	ConfigMapRef *ConfigMapEnvSource `json:"configMapRef,omitempty" protobuf:"bytes,2,opt,name=configMapRef"`
+	// The Secret to select from
+	// +optional
+	SecretRef *SecretEnvSource `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
+}
+
+// ConfigMapEnvSource selects a ConfigMap to populate the environment
+// variables with.
+//
+// The contents of the target ConfigMap's Data field will represent the
+// key-value pairs as environment variables.
+type ConfigMapEnvSource struct {
+	// The ConfigMap to select from.
+	LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
+	// Specify whether the ConfigMap must be defined
+	// +optional
+	Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"`
+}
+
+// SecretEnvSource selects a Secret to populate the environment
+// variables with.
+//
+// The contents of the target Secret's Data field will represent the
+// key-value pairs as environment variables.
+type SecretEnvSource struct {
+	// The Secret to select from.
+	LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
+	// Specify whether the Secret must be defined
+	// +optional
+	Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"`
+}
+
+// HTTPHeader describes a custom header to be used in HTTP probes
+type HTTPHeader struct {
+	// The header field name.
+	// This will be canonicalized upon output, so case-variant names will be understood as the same header.
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// The header field value
+	Value string `json:"value" protobuf:"bytes,2,opt,name=value"`
+}
+
+// HTTPGetAction describes an action based on HTTP Get requests.
+type HTTPGetAction struct {
+	// Path to access on the HTTP server.
+	// +optional
+	Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
+	// Name or number of the port to access on the container.
+	// Number must be in the range 1 to 65535.
+	// Name must be an IANA_SVC_NAME.
+	Port intstr.IntOrString `json:"port" protobuf:"bytes,2,opt,name=port"`
+	// Host name to connect to, defaults to the pod IP. You probably want to set
+	// "Host" in httpHeaders instead.
+	// +optional
+	Host string `json:"host,omitempty" protobuf:"bytes,3,opt,name=host"`
+	// Scheme to use for connecting to the host.
+	// Defaults to HTTP.
+	// +optional
+	Scheme URIScheme `json:"scheme,omitempty" protobuf:"bytes,4,opt,name=scheme,casttype=URIScheme"`
+	// Custom headers to set in the request. HTTP allows repeated headers.
+	// +optional
+	// +listType=atomic
+	HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty" protobuf:"bytes,5,rep,name=httpHeaders"`
+}
+
+// URIScheme identifies the scheme used for connection to a host for Get actions
+// +enum
+type URIScheme string
+
+const (
+	// URISchemeHTTP means that the scheme used will be http://
+	URISchemeHTTP URIScheme = "HTTP"
+	// URISchemeHTTPS means that the scheme used will be https://
+	URISchemeHTTPS URIScheme = "HTTPS"
+)
+
+// TCPSocketAction describes an action based on opening a socket
+type TCPSocketAction struct {
+	// Number or name of the port to access on the container.
+	// Number must be in the range 1 to 65535.
+	// Name must be an IANA_SVC_NAME.
+	Port intstr.IntOrString `json:"port" protobuf:"bytes,1,opt,name=port"`
+	// Optional: Host name to connect to, defaults to the pod IP.
+	// +optional
+	Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
+}
+
+// GRPCAction specifies an action involving a GRPC service.
+type GRPCAction struct {
+	// Port number of the gRPC service. Number must be in the range 1 to 65535.
+	Port int32 `json:"port" protobuf:"bytes,1,opt,name=port"`
+
+	// Service is the name of the service to place in the gRPC HealthCheckRequest
+	// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+	//
+	// If this is not specified, the default behavior is defined by gRPC.
+	// +optional
+	// +default=""
+	Service *string `json:"service" protobuf:"bytes,2,opt,name=service"`
+}
+
+// ExecAction describes a "run in container" action.
+type ExecAction struct {
+	// Command is the command line to execute inside the container, the working directory for the
+	// command  is root ('/') in the container's filesystem. The command is simply exec'd, it is
+	// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+	// a shell, you need to explicitly call out to that shell.
+	// Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+	// +optional
+	// +listType=atomic
+	Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"`
+}
+
+// SleepAction describes a "sleep" action.
+type SleepAction struct {
+	// Seconds is the number of seconds to sleep.
+	Seconds int64 `json:"seconds" protobuf:"bytes,1,opt,name=seconds"`
+}
+
+// Probe describes a health check to be performed against a container to determine whether it is
+// alive or ready to receive traffic.
+type Probe struct {
+	// The action taken to determine the health of a container
+	ProbeHandler `json:",inline" protobuf:"bytes,1,opt,name=handler"`
+	// Number of seconds after the container has started before liveness probes are initiated.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+	// +optional
+	InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty" protobuf:"varint,2,opt,name=initialDelaySeconds"`
+	// Number of seconds after which the probe times out.
+	// Defaults to 1 second. Minimum value is 1.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+	// +optional
+	TimeoutSeconds int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"`
+	// How often (in seconds) to perform the probe.
+	// Default to 10 seconds. Minimum value is 1.
+	// +optional
+	PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"`
+	// Minimum consecutive successes for the probe to be considered successful after having failed.
+	// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+	// +optional
+	SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"`
+	// Minimum consecutive failures for the probe to be considered failed after having succeeded.
+	// Defaults to 3. Minimum value is 1.
+	// +optional
+	FailureThreshold int32 `json:"failureThreshold,omitempty" protobuf:"varint,6,opt,name=failureThreshold"`
+	// Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+	// The grace period is the duration in seconds after the processes running in the pod are sent
+	// a termination signal and the time when the processes are forcibly halted with a kill signal.
+	// Set this value longer than the expected cleanup time for your process.
+	// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+	// value overrides the value provided by the pod spec.
+	// Value must be non-negative integer. The value zero indicates stop immediately via
+	// the kill signal (no opportunity to shut down).
+	// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+	// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+	// +optional
+	TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,7,opt,name=terminationGracePeriodSeconds"`
+}
+
+// PullPolicy describes a policy for if/when to pull a container image
+// +enum
+type PullPolicy string
+
+const (
+	// PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails.
+	PullAlways PullPolicy = "Always"
+	// PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present
+	PullNever PullPolicy = "Never"
+	// PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails.
+	PullIfNotPresent PullPolicy = "IfNotPresent"
+)
+
+// ResourceResizeRestartPolicy specifies how to handle container resource resize.
+type ResourceResizeRestartPolicy string
+
+// These are the valid resource resize restart policy values:
+const (
+	// 'NotRequired' means Kubernetes will try to resize the container
+	// without restarting it, if possible. Kubernetes may however choose to
+	// restart the container if it is unable to actuate resize without a
+	// restart. For e.g. the runtime doesn't support restart-free resizing.
+	NotRequired ResourceResizeRestartPolicy = "NotRequired"
+	// 'RestartContainer' means Kubernetes will resize the container in-place
+	// by stopping and starting the container when new resources are applied.
+	// This is needed for legacy applications. For e.g. java apps using the
+	// -xmxN flag which are unable to use resized memory without restarting.
+	RestartContainer ResourceResizeRestartPolicy = "RestartContainer"
+)
+
+// ContainerResizePolicy represents resource resize policy for the container.
+type ContainerResizePolicy struct {
+	// Name of the resource to which this resource resize policy applies.
+	// Supported values: cpu, memory.
+	ResourceName ResourceName `json:"resourceName" protobuf:"bytes,1,opt,name=resourceName,casttype=ResourceName"`
+	// Restart policy to apply when specified resource is resized.
+	// If not specified, it defaults to NotRequired.
+	RestartPolicy ResourceResizeRestartPolicy `json:"restartPolicy" protobuf:"bytes,2,opt,name=restartPolicy,casttype=ResourceResizeRestartPolicy"`
+}
+
+// PreemptionPolicy describes a policy for if/when to preempt a pod.
+// +enum
+type PreemptionPolicy string
+
+const (
+	// PreemptLowerPriority means that pod can preempt other pods with lower priority.
+	PreemptLowerPriority PreemptionPolicy = "PreemptLowerPriority"
+	// PreemptNever means that pod never preempts other pods with lower priority.
+	PreemptNever PreemptionPolicy = "Never"
+)
+
+// TerminationMessagePolicy describes how termination messages are retrieved from a container.
+// +enum
+type TerminationMessagePolicy string
+
+const (
+	// TerminationMessageReadFile is the default behavior and will set the container status message to
+	// the contents of the container's terminationMessagePath when the container exits.
+	TerminationMessageReadFile TerminationMessagePolicy = "File"
+	// TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs
+	// for the container status message when the container exits with an error and the
+	// terminationMessagePath has no contents.
+	TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError"
+)
+
+// Capability represent POSIX capabilities type
+type Capability string
+
+// Adds and removes POSIX capabilities from running containers.
+type Capabilities struct {
+	// Added capabilities
+	// +optional
+	// +listType=atomic
+	Add []Capability `json:"add,omitempty" protobuf:"bytes,1,rep,name=add,casttype=Capability"`
+	// Removed capabilities
+	// +optional
+	// +listType=atomic
+	Drop []Capability `json:"drop,omitempty" protobuf:"bytes,2,rep,name=drop,casttype=Capability"`
+}
+
+// ResourceRequirements describes the compute resource requirements.
+type ResourceRequirements struct {
+	// Limits describes the maximum amount of compute resources allowed.
+	// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+	// +optional
+	Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"`
+	// Requests describes the minimum amount of compute resources required.
+	// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+	// otherwise to an implementation-defined value. Requests cannot exceed Limits.
+	// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+	// +optional
+	Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"`
+	// Claims lists the names of resources, defined in spec.resourceClaims,
+	// that are used by this container.
+	//
+	// This is an alpha field and requires enabling the
+	// DynamicResourceAllocation feature gate.
+	//
+	// This field is immutable. It can only be set for containers.
+	//
+	// +listType=map
+	// +listMapKey=name
+	// +featureGate=DynamicResourceAllocation
+	// +optional
+	Claims []ResourceClaim `json:"claims,omitempty" protobuf:"bytes,3,opt,name=claims"`
+}
+
+// VolumeResourceRequirements describes the storage resource requirements for a volume.
+type VolumeResourceRequirements struct {
+	// Limits describes the maximum amount of compute resources allowed.
+	// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+	// +optional
+	Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"`
+	// Requests describes the minimum amount of compute resources required.
+	// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+	// otherwise to an implementation-defined value. Requests cannot exceed Limits.
+	// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+	// +optional
+	Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"`
+
+	// Claims got added by accident when volumes shared the ResourceRequirements struct
+	// with containers. Stripping the field got added in 1.27 and was backported to 1.26.
+	// Starting with Kubernetes 1.28, this field is not part of the volume API anymore.
+	//
+	// Future extensions must not use "claims" or field number 3.
+	// Claims []ResourceClaim `json:"claims,omitempty" protobuf:"bytes,3,opt,name=claims"`
+}
+
+// ResourceClaim references one entry in PodSpec.ResourceClaims.
+type ResourceClaim struct {
+	// Name must match the name of one entry in pod.spec.resourceClaims of
+	// the Pod where this field is used. It makes that resource available
+	// inside a container.
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+
+	// Request is the name chosen for a request in the referenced claim.
+	// If empty, everything from the claim is made available, otherwise
+	// only the result of this request.
+	//
+	// +optional
+	Request string `json:"request,omitempty" protobuf:"bytes,2,opt,name=request"`
+}
+
+const (
+	// TerminationMessagePathDefault means the default path to capture the application termination message running in a container
+	TerminationMessagePathDefault string = "/dev/termination-log"
+)
+
+// A single application container that you want to run within a pod.
+type Container struct {
+	// Name of the container specified as a DNS_LABEL.
+	// Each container in a pod must have a unique name (DNS_LABEL).
+	// Cannot be updated.
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// Container image name.
+	// More info: https://kubernetes.io/docs/concepts/containers/images
+	// This field is optional to allow higher level config management to default or override
+	// container images in workload controllers like Deployments and StatefulSets.
+	// +optional
+	Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
+	// Entrypoint array. Not executed within a shell.
+	// The container image's ENTRYPOINT is used if this is not provided.
+	// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+	// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+	// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+	// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+	// of whether the variable exists or not. Cannot be updated.
+	// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+	// +optional
+	// +listType=atomic
+	Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
+	// Arguments to the entrypoint.
+	// The container image's CMD is used if this is not provided.
+	// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+	// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+	// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+	// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+	// of whether the variable exists or not. Cannot be updated.
+	// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+	// +optional
+	// +listType=atomic
+	Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
+	// Container's working directory.
+	// If not specified, the container runtime's default will be used, which
+	// might be configured in the container image.
+	// Cannot be updated.
+	// +optional
+	WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
+	// List of ports to expose from the container. Not specifying a port here
+	// DOES NOT prevent that port from being exposed. Any port which is
+	// listening on the default "0.0.0.0" address inside a container will be
+	// accessible from the network.
+	// Modifying this array with strategic merge patch may corrupt the data.
+	// For more information See https://github.com/kubernetes/kubernetes/issues/108255.
+	// Cannot be updated.
+	// +optional
+	// +patchMergeKey=containerPort
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=containerPort
+	// +listMapKey=protocol
+	Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
+	// List of sources to populate environment variables in the container.
+	// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+	// will be reported as an event when the container is starting. When a key exists in multiple
+	// sources, the value associated with the last source will take precedence.
+	// Values defined by an Env with a duplicate key will take precedence.
+	// Cannot be updated.
+	// +optional
+	// +listType=atomic
+	EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
+	// List of environment variables to set in the container.
+	// Cannot be updated.
+	// +optional
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=name
+	Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"`
+	// Compute Resources required by this container.
+	// Cannot be updated.
+	// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+	// +optional
+	Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
+	// Resources resize policy for the container.
+	// +featureGate=InPlacePodVerticalScaling
+	// +optional
+	// +listType=atomic
+	ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"`
+	// RestartPolicy defines the restart behavior of individual containers in a pod.
+	// This field may only be set for init containers, and the only allowed value is "Always".
+	// For non-init containers or when this field is not specified,
+	// the restart behavior is defined by the Pod's restart policy and the container type.
+	// Setting the RestartPolicy as "Always" for the init container will have the following effect:
+	// this init container will be continually restarted on
+	// exit until all regular containers have terminated. Once all regular
+	// containers have completed, all init containers with restartPolicy "Always"
+	// will be shut down. This lifecycle differs from normal init containers and
+	// is often referred to as a "sidecar" container. Although this init
+	// container still starts in the init container sequence, it does not wait
+	// for the container to complete before proceeding to the next init
+	// container. Instead, the next init container starts immediately after this
+	// init container is started, or after any startupProbe has successfully
+	// completed.
+	// +featureGate=SidecarContainers
+	// +optional
+	RestartPolicy *ContainerRestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,24,opt,name=restartPolicy,casttype=ContainerRestartPolicy"`
+	// Pod volumes to mount into the container's filesystem.
+	// Cannot be updated.
+	// +optional
+	// +patchMergeKey=mountPath
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=mountPath
+	VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
+	// volumeDevices is the list of block devices to be used by the container.
+	// +patchMergeKey=devicePath
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=devicePath
+	// +optional
+	VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"`
+	// Periodic probe of container liveness.
+	// Container will be restarted if the probe fails.
+	// Cannot be updated.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+	// +optional
+	LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"`
+	// Periodic probe of container service readiness.
+	// Container will be removed from service endpoints if the probe fails.
+	// Cannot be updated.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+	// +optional
+	ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"`
+	// StartupProbe indicates that the Pod has successfully initialized.
+	// If specified, no other probes are executed until this completes successfully.
+	// If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
+	// This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
+	// when it might take a long time to load data or warm a cache, than during steady-state operation.
+	// This cannot be updated.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+	// +optional
+	StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"`
+	// Actions that the management system should take in response to container lifecycle events.
+	// Cannot be updated.
+	// +optional
+	Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"`
+	// Optional: Path at which the file to which the container's termination message
+	// will be written is mounted into the container's filesystem.
+	// Message written is intended to be brief final status, such as an assertion failure message.
+	// Will be truncated by the node if greater than 4096 bytes. The total message length across
+	// all containers will be limited to 12kb.
+	// Defaults to /dev/termination-log.
+	// Cannot be updated.
+	// +optional
+	TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"`
+	// Indicate how the termination message should be populated. File will use the contents of
+	// terminationMessagePath to populate the container status message on both success and failure.
+	// FallbackToLogsOnError will use the last chunk of container log output if the termination
+	// message file is empty and the container exited with an error.
+	// The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+	// Defaults to File.
+	// Cannot be updated.
+	// +optional
+	TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"`
+	// Image pull policy.
+	// One of Always, Never, IfNotPresent.
+	// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+	// Cannot be updated.
+	// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+	// +optional
+	ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
+	// SecurityContext defines the security options the container should be run with.
+	// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+	// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+	// +optional
+	SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
+
+	// Variables for interactive containers, these have very specialized use-cases (e.g. debugging)
+	// and shouldn't be used for general purpose containers.
+
+	// Whether this container should allocate a buffer for stdin in the container runtime. If this
+	// is not set, reads from stdin in the container will always result in EOF.
+	// Default is false.
+	// +optional
+	Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"`
+	// Whether the container runtime should close the stdin channel after it has been opened by
+	// a single attach. When stdin is true the stdin stream will remain open across multiple attach
+	// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+	// first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+	// at which time stdin is closed and remains closed until the container is restarted. If this
+	// flag is false, a container processes that reads from stdin will never receive an EOF.
+	// Default is false
+	// +optional
+	StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"`
+	// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+	// Default is false.
+	// +optional
+	TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"`
+}
+
+// ProbeHandler defines a specific action that should be taken in a probe.
+// One and only one of the fields must be specified.
+type ProbeHandler struct {
+	// Exec specifies a command to execute in the container.
+	// +optional
+	Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"`
+	// HTTPGet specifies an HTTP GET request to perform.
+	// +optional
+	HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"`
+	// TCPSocket specifies a connection to a TCP port.
+	// +optional
+	TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"`
+	// GRPC specifies a GRPC HealthCheckRequest.
+	// +optional
+	GRPC *GRPCAction `json:"grpc,omitempty" protobuf:"bytes,4,opt,name=grpc"`
+}
+
+// LifecycleHandler defines a specific action that should be taken in a lifecycle
+// hook. One and only one of the fields, except TCPSocket must be specified.
+type LifecycleHandler struct {
+	// Exec specifies a command to execute in the container.
+	// +optional
+	Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"`
+	// HTTPGet specifies an HTTP GET request to perform.
+	// +optional
+	HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"`
+	// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+	// for backward compatibility. There is no validation of this field and
+	// lifecycle hooks will fail at runtime when it is specified.
+	// +optional
+	TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"`
+	// Sleep represents a duration that the container should sleep.
+	// +featureGate=PodLifecycleSleepAction
+	// +optional
+	Sleep *SleepAction `json:"sleep,omitempty" protobuf:"bytes,4,opt,name=sleep"`
+}
+
+// Lifecycle describes actions that the management system should take in response to container lifecycle
+// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
+// until the action is complete, unless the container process fails, in which case the handler is aborted.
+type Lifecycle struct {
+	// PostStart is called immediately after a container is created. If the handler fails,
+	// the container is terminated and restarted according to its restart policy.
+	// Other management of the container blocks until the hook completes.
+	// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+	// +optional
+	PostStart *LifecycleHandler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"`
+	// PreStop is called immediately before a container is terminated due to an
+	// API request or management event such as liveness/startup probe failure,
+	// preemption, resource contention, etc. The handler is not called if the
+	// container crashes or exits. The Pod's termination grace period countdown begins before the
+	// PreStop hook is executed. Regardless of the outcome of the handler, the
+	// container will eventually terminate within the Pod's termination grace
+	// period (unless delayed by finalizers). Other management of the container blocks until the hook completes
+	// or until the termination grace period is reached.
+	// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+	// +optional
+	PreStop *LifecycleHandler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"`
+}
+
+type ConditionStatus string
+
+// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
+// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
+// can't decide if a resource is in the condition or not. In the future, we could add other
+// intermediate conditions, e.g. ConditionDegraded.
+const (
+	ConditionTrue    ConditionStatus = "True"
+	ConditionFalse   ConditionStatus = "False"
+	ConditionUnknown ConditionStatus = "Unknown"
+)
+
+// ContainerStateWaiting is a waiting state of a container.
+type ContainerStateWaiting struct {
+	// (brief) reason the container is not yet running.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason"`
+	// Message regarding why the container is not yet running.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
+}
+
+// ContainerStateRunning is a running state of a container.
+type ContainerStateRunning struct {
+	// Time at which the container was last (re-)started
+	// +optional
+	StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,1,opt,name=startedAt"`
+}
+
+// ContainerStateTerminated is a terminated state of a container.
+type ContainerStateTerminated struct {
+	// Exit status from the last termination of the container
+	ExitCode int32 `json:"exitCode" protobuf:"varint,1,opt,name=exitCode"`
+	// Signal from the last termination of the container
+	// +optional
+	Signal int32 `json:"signal,omitempty" protobuf:"varint,2,opt,name=signal"`
+	// (brief) reason from the last termination of the container
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
+	// Message regarding the last termination of the container
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
+	// Time at which previous execution of the container started
+	// +optional
+	StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,5,opt,name=startedAt"`
+	// Time at which the container last terminated
+	// +optional
+	FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,6,opt,name=finishedAt"`
+	// Container's ID in the format '://'
+	// +optional
+	ContainerID string `json:"containerID,omitempty" protobuf:"bytes,7,opt,name=containerID"`
+}
+
+// ContainerState holds a possible state of container.
+// Only one of its members may be specified.
+// If none of them is specified, the default one is ContainerStateWaiting.
+type ContainerState struct {
+	// Details about a waiting container
+	// +optional
+	Waiting *ContainerStateWaiting `json:"waiting,omitempty" protobuf:"bytes,1,opt,name=waiting"`
+	// Details about a running container
+	// +optional
+	Running *ContainerStateRunning `json:"running,omitempty" protobuf:"bytes,2,opt,name=running"`
+	// Details about a terminated container
+	// +optional
+	Terminated *ContainerStateTerminated `json:"terminated,omitempty" protobuf:"bytes,3,opt,name=terminated"`
+}
+
+// ContainerStatus contains details for the current status of this container.
+type ContainerStatus struct {
+	// Name is a DNS_LABEL representing the unique name of the container.
+	// Each container in a pod must have a unique name across all container types.
+	// Cannot be updated.
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// State holds details about the container's current condition.
+	// +optional
+	State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"`
+	// LastTerminationState holds the last termination state of the container to
+	// help debug container crashes and restarts. This field is not
+	// populated if the container is still running and RestartCount is 0.
+	// +optional
+	LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"`
+	// Ready specifies whether the container is currently passing its readiness check.
+	// The value will change as readiness probes keep executing. If no readiness
+	// probes are specified, this field defaults to true once the container is
+	// fully started (see Started field).
+	//
+	// The value is typically used to determine whether a container is ready to
+	// accept traffic.
+	Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"`
+	// RestartCount holds the number of times the container has been restarted.
+	// Kubelet makes an effort to always increment the value, but there
+	// are cases when the state may be lost due to node restarts and then the value
+	// may be reset to 0. The value is never negative.
+	RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"`
+	// Image is the name of container image that the container is running.
+	// The container image may not match the image used in the PodSpec,
+	// as it may have been resolved by the runtime.
+	// More info: https://kubernetes.io/docs/concepts/containers/images.
+	Image string `json:"image" protobuf:"bytes,6,opt,name=image"`
+	// ImageID is the image ID of the container's image. The image ID may not
+	// match the image ID of the image used in the PodSpec, as it may have been
+	// resolved by the runtime.
+	ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"`
+	// ContainerID is the ID of the container in the format '://'.
+	// Where type is a container runtime identifier, returned from Version call of CRI API
+	// (for example "containerd").
+	// +optional
+	ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"`
+	// Started indicates whether the container has finished its postStart lifecycle hook
+	// and passed its startup probe.
+	// Initialized as false, becomes true after startupProbe is considered
+	// successful. Resets to false when the container is restarted, or if kubelet
+	// loses state temporarily. In both cases, startup probes will run again.
+	// Is always true when no startupProbe is defined and container is running and
+	// has passed the postStart lifecycle hook. The null value must be treated the
+	// same as false.
+	// +optional
+	Started *bool `json:"started,omitempty" protobuf:"varint,9,opt,name=started"`
+	// AllocatedResources represents the compute resources allocated for this container by the
+	// node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission
+	// and after successfully admitting desired pod resize.
+	// +featureGate=InPlacePodVerticalScalingAllocatedStatus
+	// +optional
+	AllocatedResources ResourceList `json:"allocatedResources,omitempty" protobuf:"bytes,10,rep,name=allocatedResources,casttype=ResourceList,castkey=ResourceName"`
+	// Resources represents the compute resource requests and limits that have been successfully
+	// enacted on the running container after it has been started or has been successfully resized.
+	// +featureGate=InPlacePodVerticalScaling
+	// +optional
+	Resources *ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,11,opt,name=resources"`
+	// Status of volume mounts.
+	// +optional
+	// +patchMergeKey=mountPath
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=mountPath
+	// +featureGate=RecursiveReadOnlyMounts
+	VolumeMounts []VolumeMountStatus `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,12,rep,name=volumeMounts"`
+	// User represents user identity information initially attached to the first process of the container
+	// +featureGate=SupplementalGroupsPolicy
+	// +optional
+	User *ContainerUser `json:"user,omitempty" protobuf:"bytes,13,opt,name=user,casttype=ContainerUser"`
+	// AllocatedResourcesStatus represents the status of various resources
+	// allocated for this Pod.
+	// +featureGate=ResourceHealthStatus
+	// +optional
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=name
+	AllocatedResourcesStatus []ResourceStatus `json:"allocatedResourcesStatus,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,14,rep,name=allocatedResourcesStatus"`
+}
+
+// ResourceStatus represents the status of a single resource allocated to a Pod.
+type ResourceStatus struct {
+	// Name of the resource. Must be unique within the pod and in case of non-DRA resource, match one of the resources from the pod spec.
+	// For DRA resources, the value must be "claim:/".
+	// When this status is reported about a container, the "claim_name" and "request" must match one of the claims of this container.
+	// +required
+	Name ResourceName `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// List of unique resources health. Each element in the list contains an unique resource ID and its health.
+	// At a minimum, for the lifetime of a Pod, resource ID must uniquely identify the resource allocated to the Pod on the Node.
+	// If other Pod on the same Node reports the status with the same resource ID, it must be the same resource they share.
+	// See ResourceID type definition for a specific format it has in various use cases.
+	// +listType=map
+	// +listMapKey=resourceID
+	Resources []ResourceHealth `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"`
+}
+
+type ResourceHealthStatus string
+
+const (
+	ResourceHealthStatusHealthy   ResourceHealthStatus = "Healthy"
+	ResourceHealthStatusUnhealthy ResourceHealthStatus = "Unhealthy"
+	ResourceHealthStatusUnknown   ResourceHealthStatus = "Unknown"
+)
+
+// ResourceID is calculated based on the source of this resource health information.
+// For DevicePlugin:
+//
+//	DeviceID, where DeviceID is from the Device structure of DevicePlugin's ListAndWatchResponse type: https://github.com/kubernetes/kubernetes/blob/eda1c780543a27c078450e2f17d674471e00f494/staging/src/k8s.io/kubelet/pkg/apis/deviceplugin/v1alpha/api.proto#L61-L73
+//
+// DevicePlugin ID is usually a constant for the lifetime of a Node and typically can be used to uniquely identify the device on the node.
+// For DRA:
+//
+//	//: such a device can be looked up in the information published by that DRA driver to learn more about it. It is designed to be globally unique in a cluster.
+type ResourceID string
+
+// ResourceHealth represents the health of a resource. It has the latest device health information.
+// This is a part of KEP https://kep.k8s.io/4680.
+type ResourceHealth struct {
+	// ResourceID is the unique identifier of the resource. See the ResourceID type for more information.
+	ResourceID ResourceID `json:"resourceID" protobuf:"bytes,1,opt,name=resourceID"`
+	// Health of the resource.
+	// can be one of:
+	//  - Healthy: operates as normal
+	//  - Unhealthy: reported unhealthy. We consider this a temporary health issue
+	//               since we do not have a mechanism today to distinguish
+	//               temporary and permanent issues.
+	//  - Unknown: The status cannot be determined.
+	//             For example, Device Plugin got unregistered and hasn't been re-registered since.
+	//
+	// In future we may want to introduce the PermanentlyUnhealthy Status.
+	Health ResourceHealthStatus `json:"health,omitempty" protobuf:"bytes,2,name=health"`
+}
+
+// ContainerUser represents user identity information
+type ContainerUser struct {
+	// Linux holds user identity information initially attached to the first process of the containers in Linux.
+	// Note that the actual running identity can be changed if the process has enough privilege to do so.
+	// +optional
+	Linux *LinuxContainerUser `json:"linux,omitempty" protobuf:"bytes,1,opt,name=linux,casttype=LinuxContainerUser"`
+
+	// Windows holds user identity information initially attached to the first process of the containers in Windows
+	// This is just reserved for future use.
+	// Windows *WindowsContainerUser
+}
+
+// LinuxContainerUser represents user identity information in Linux containers
+type LinuxContainerUser struct {
+	// UID is the primary uid initially attached to the first process in the container
+	UID int64 `json:"uid" protobuf:"varint,1,name=uid"`
+	// GID is the primary gid initially attached to the first process in the container
+	GID int64 `json:"gid" protobuf:"varint,2,name=gid"`
+	// SupplementalGroups are the supplemental groups initially attached to the first process in the container
+	// +optional
+	// +listType=atomic
+	SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,3,rep,name=supplementalGroups"`
+}
+
+// PodPhase is a label for the condition of a pod at the current time.
+// +enum
+type PodPhase string
+
+// These are the valid statuses of pods.
+const (
+	// PodPending means the pod has been accepted by the system, but one or more of the containers
+	// has not been started. This includes time before being bound to a node, as well as time spent
+	// pulling images onto the host.
+	PodPending PodPhase = "Pending"
+	// PodRunning means the pod has been bound to a node and all of the containers have been started.
+	// At least one container is still running or is in the process of being restarted.
+	PodRunning PodPhase = "Running"
+	// PodSucceeded means that all containers in the pod have voluntarily terminated
+	// with a container exit code of 0, and the system is not going to restart any of these containers.
+	PodSucceeded PodPhase = "Succeeded"
+	// PodFailed means that all containers in the pod have terminated, and at least one container has
+	// terminated in a failure (exited with a non-zero exit code or was stopped by the system).
+	PodFailed PodPhase = "Failed"
+	// PodUnknown means that for some reason the state of the pod could not be obtained, typically due
+	// to an error in communicating with the host of the pod.
+	// Deprecated: It isn't being set since 2015 (74da3b14b0c0f658b3bb8d2def5094686d0e9095)
+	PodUnknown PodPhase = "Unknown"
+)
+
+// PodConditionType is a valid value for PodCondition.Type
+type PodConditionType string
+
+// These are built-in conditions of pod. An application may use a custom condition not listed here.
+const (
+	// ContainersReady indicates whether all containers in the pod are ready.
+	ContainersReady PodConditionType = "ContainersReady"
+	// PodInitialized means that all init containers in the pod have started successfully.
+	PodInitialized PodConditionType = "Initialized"
+	// PodReady means the pod is able to service requests and should be added to the
+	// load balancing pools of all matching services.
+	PodReady PodConditionType = "Ready"
+	// PodScheduled represents status of the scheduling process for this pod.
+	PodScheduled PodConditionType = "PodScheduled"
+	// DisruptionTarget indicates the pod is about to be terminated due to a
+	// disruption (such as preemption, eviction API or garbage-collection).
+	DisruptionTarget PodConditionType = "DisruptionTarget"
+	// PodReadyToStartContainers pod sandbox is successfully configured and
+	// the pod is ready to launch containers.
+	PodReadyToStartContainers PodConditionType = "PodReadyToStartContainers"
+)
+
+// These are reasons for a pod's transition to a condition.
+const (
+	// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
+	// can't schedule the pod right now, for example due to insufficient resources in the cluster.
+	PodReasonUnschedulable = "Unschedulable"
+
+	// PodReasonSchedulingGated reason in PodScheduled PodCondition means that the scheduler
+	// skips scheduling the pod because one or more scheduling gates are still present.
+	PodReasonSchedulingGated = "SchedulingGated"
+
+	// PodReasonSchedulerError reason in PodScheduled PodCondition means that some internal error happens
+	// during scheduling, for example due to nodeAffinity parsing errors.
+	PodReasonSchedulerError = "SchedulerError"
+
+	// PodReasonTerminationByKubelet reason in DisruptionTarget pod condition indicates that the termination
+	// is initiated by kubelet
+	PodReasonTerminationByKubelet = "TerminationByKubelet"
+
+	// PodReasonPreemptionByScheduler reason in DisruptionTarget pod condition indicates that the
+	// disruption was initiated by scheduler's preemption.
+	PodReasonPreemptionByScheduler = "PreemptionByScheduler"
+)
+
+// PodCondition contains details for the current condition of this pod.
+type PodCondition struct {
+	// Type is the type of the condition.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
+	Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"`
+	// Status is the status of the condition.
+	// Can be True, False, Unknown.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
+	Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
+	// Last time we probed the condition.
+	// +optional
+	LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
+	// Last time the condition transitioned from one status to another.
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
+	// Unique, one-word, CamelCase reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
+	// Human-readable message indicating details about last transition.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
+}
+
+// PodResizeStatus shows status of desired resize of a pod's containers.
+type PodResizeStatus string
+
+const (
+	// Pod resources resize has been requested and will be evaluated by node.
+	PodResizeStatusProposed PodResizeStatus = "Proposed"
+	// Pod resources resize has been accepted by node and is being actuated.
+	PodResizeStatusInProgress PodResizeStatus = "InProgress"
+	// Node cannot resize the pod at this time and will keep retrying.
+	PodResizeStatusDeferred PodResizeStatus = "Deferred"
+	// Requested pod resize is not feasible and will not be re-evaluated.
+	PodResizeStatusInfeasible PodResizeStatus = "Infeasible"
+)
+
+// VolumeMountStatus shows status of volume mounts.
+type VolumeMountStatus struct {
+	// Name corresponds to the name of the original VolumeMount.
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// MountPath corresponds to the original VolumeMount.
+	MountPath string `json:"mountPath" protobuf:"bytes,2,opt,name=mountPath"`
+	// ReadOnly corresponds to the original VolumeMount.
+	// +optional
+	ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
+	// RecursiveReadOnly must be set to Disabled, Enabled, or unspecified (for non-readonly mounts).
+	// An IfPossible value in the original VolumeMount must be translated to Disabled or Enabled,
+	// depending on the mount result.
+	// +featureGate=RecursiveReadOnlyMounts
+	// +optional
+	RecursiveReadOnly *RecursiveReadOnlyMode `json:"recursiveReadOnly,omitempty" protobuf:"bytes,4,opt,name=recursiveReadOnly,casttype=RecursiveReadOnlyMode"`
+}
+
+// RestartPolicy describes how the container should be restarted.
+// Only one of the following restart policies may be specified.
+// If none of the following policies is specified, the default one
+// is RestartPolicyAlways.
+// +enum
+type RestartPolicy string
+
+const (
+	RestartPolicyAlways    RestartPolicy = "Always"
+	RestartPolicyOnFailure RestartPolicy = "OnFailure"
+	RestartPolicyNever     RestartPolicy = "Never"
+)
+
+// ContainerRestartPolicy is the restart policy for a single container.
+// This may only be set for init containers and only allowed value is "Always".
+type ContainerRestartPolicy string
+
+const (
+	ContainerRestartPolicyAlways ContainerRestartPolicy = "Always"
+)
+
+// DNSPolicy defines how a pod's DNS will be configured.
+// +enum
+type DNSPolicy string
+
+const (
+	// DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS
+	// first, if it is available, then fall back on the default
+	// (as determined by kubelet) DNS settings.
+	DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet"
+
+	// DNSClusterFirst indicates that the pod should use cluster DNS
+	// first unless hostNetwork is true, if it is available, then
+	// fall back on the default (as determined by kubelet) DNS settings.
+	DNSClusterFirst DNSPolicy = "ClusterFirst"
+
+	// DNSDefault indicates that the pod should use the default (as
+	// determined by kubelet) DNS settings.
+	DNSDefault DNSPolicy = "Default"
+
+	// DNSNone indicates that the pod should use empty DNS settings. DNS
+	// parameters such as nameservers and search paths should be defined via
+	// DNSConfig.
+	DNSNone DNSPolicy = "None"
+)
+
+const (
+	// DefaultTerminationGracePeriodSeconds indicates the default duration in
+	// seconds a pod needs to terminate gracefully.
+	DefaultTerminationGracePeriodSeconds = 30
+)
+
+// A node selector represents the union of the results of one or more label queries
+// over a set of nodes; that is, it represents the OR of the selectors represented
+// by the node selector terms.
+// +structType=atomic
+type NodeSelector struct {
+	// Required. A list of node selector terms. The terms are ORed.
+	// +listType=atomic
+	NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"`
+}
+
+// A null or empty node selector term matches no objects. The requirements of
+// them are ANDed.
+// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+// +structType=atomic
+type NodeSelectorTerm struct {
+	// A list of node selector requirements by node's labels.
+	// +optional
+	// +listType=atomic
+	MatchExpressions []NodeSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"`
+	// A list of node selector requirements by node's fields.
+	// +optional
+	// +listType=atomic
+	MatchFields []NodeSelectorRequirement `json:"matchFields,omitempty" protobuf:"bytes,2,rep,name=matchFields"`
+}
+
+// A node selector requirement is a selector that contains values, a key, and an operator
+// that relates the key and values.
+type NodeSelectorRequirement struct {
+	// The label key that the selector applies to.
+	Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
+	// Represents a key's relationship to a set of values.
+	// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+	Operator NodeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=NodeSelectorOperator"`
+	// An array of string values. If the operator is In or NotIn,
+	// the values array must be non-empty. If the operator is Exists or DoesNotExist,
+	// the values array must be empty. If the operator is Gt or Lt, the values
+	// array must have a single element, which will be interpreted as an integer.
+	// This array is replaced during a strategic merge patch.
+	// +optional
+	// +listType=atomic
+	Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
+}
+
+// A node selector operator is the set of operators that can be used in
+// a node selector requirement.
+// +enum
+type NodeSelectorOperator string
+
+const (
+	NodeSelectorOpIn           NodeSelectorOperator = "In"
+	NodeSelectorOpNotIn        NodeSelectorOperator = "NotIn"
+	NodeSelectorOpExists       NodeSelectorOperator = "Exists"
+	NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist"
+	NodeSelectorOpGt           NodeSelectorOperator = "Gt"
+	NodeSelectorOpLt           NodeSelectorOperator = "Lt"
+)
+
+// A topology selector term represents the result of label queries.
+// A null or empty topology selector term matches no objects.
+// The requirements of them are ANDed.
+// It provides a subset of functionality as NodeSelectorTerm.
+// This is an alpha feature and may change in the future.
+// +structType=atomic
+type TopologySelectorTerm struct {
+	// Usage: Fields of type []TopologySelectorTerm must be listType=atomic.
+
+	// A list of topology selector requirements by labels.
+	// +optional
+	// +listType=atomic
+	MatchLabelExpressions []TopologySelectorLabelRequirement `json:"matchLabelExpressions,omitempty" protobuf:"bytes,1,rep,name=matchLabelExpressions"`
+}
+
+// A topology selector requirement is a selector that matches given label.
+// This is an alpha feature and may change in the future.
+type TopologySelectorLabelRequirement struct {
+	// The label key that the selector applies to.
+	Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
+	// An array of string values. One value must match the label to be selected.
+	// Each entry in Values is ORed.
+	// +listType=atomic
+	Values []string `json:"values" protobuf:"bytes,2,rep,name=values"`
+}
+
+// Affinity is a group of affinity scheduling rules.
+type Affinity struct {
+	// Describes node affinity scheduling rules for the pod.
+	// +optional
+	NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAffinity"`
+	// Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
+	// +optional
+	PodAffinity *PodAffinity `json:"podAffinity,omitempty" protobuf:"bytes,2,opt,name=podAffinity"`
+	// Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
+	// +optional
+	PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty" protobuf:"bytes,3,opt,name=podAntiAffinity"`
+}
+
+// Pod affinity is a group of inter pod affinity scheduling rules.
+type PodAffinity struct {
+	// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
+	// If the affinity requirements specified by this field are not met at
+	// scheduling time, the pod will not be scheduled onto the node.
+	// If the affinity requirements specified by this field cease to be met
+	// at some point during pod execution (e.g. due to a pod label update), the
+	// system will try to eventually evict the pod from its node.
+	// When there are multiple elements, the lists of nodes corresponding to each
+	// podAffinityTerm are intersected, i.e. all terms must be satisfied.
+	// +optional
+	// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm  `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
+
+	// If the affinity requirements specified by this field are not met at
+	// scheduling time, the pod will not be scheduled onto the node.
+	// If the affinity requirements specified by this field cease to be met
+	// at some point during pod execution (e.g. due to a pod label update), the
+	// system may or may not try to eventually evict the pod from its node.
+	// When there are multiple elements, the lists of nodes corresponding to each
+	// podAffinityTerm are intersected, i.e. all terms must be satisfied.
+	// +optional
+	// +listType=atomic
+	RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
+	// The scheduler will prefer to schedule pods to nodes that satisfy
+	// the affinity expressions specified by this field, but it may choose
+	// a node that violates one or more of the expressions. The node that is
+	// most preferred is the one with the greatest sum of weights, i.e.
+	// for each node that meets all of the scheduling requirements (resource
+	// request, requiredDuringScheduling affinity expressions, etc.),
+	// compute a sum by iterating through the elements of this field and adding
+	// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+	// node(s) with the highest sum are the most preferred.
+	// +optional
+	// +listType=atomic
+	PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
+}
+
+// Pod anti affinity is a group of inter pod anti affinity scheduling rules.
+type PodAntiAffinity struct {
+	// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
+	// If the anti-affinity requirements specified by this field are not met at
+	// scheduling time, the pod will not be scheduled onto the node.
+	// If the anti-affinity requirements specified by this field cease to be met
+	// at some point during pod execution (e.g. due to a pod label update), the
+	// system will try to eventually evict the pod from its node.
+	// When there are multiple elements, the lists of nodes corresponding to each
+	// podAffinityTerm are intersected, i.e. all terms must be satisfied.
+	// +optional
+	// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm  `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
+
+	// If the anti-affinity requirements specified by this field are not met at
+	// scheduling time, the pod will not be scheduled onto the node.
+	// If the anti-affinity requirements specified by this field cease to be met
+	// at some point during pod execution (e.g. due to a pod label update), the
+	// system may or may not try to eventually evict the pod from its node.
+	// When there are multiple elements, the lists of nodes corresponding to each
+	// podAffinityTerm are intersected, i.e. all terms must be satisfied.
+	// +optional
+	// +listType=atomic
+	RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
+	// The scheduler will prefer to schedule pods to nodes that satisfy
+	// the anti-affinity expressions specified by this field, but it may choose
+	// a node that violates one or more of the expressions. The node that is
+	// most preferred is the one with the greatest sum of weights, i.e.
+	// for each node that meets all of the scheduling requirements (resource
+	// request, requiredDuringScheduling anti-affinity expressions, etc.),
+	// compute a sum by iterating through the elements of this field and adding
+	// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+	// node(s) with the highest sum are the most preferred.
+	// +optional
+	// +listType=atomic
+	PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
+}
+
+// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
+type WeightedPodAffinityTerm struct {
+	// weight associated with matching the corresponding podAffinityTerm,
+	// in the range 1-100.
+	Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
+	// Required. A pod affinity term, associated with the corresponding weight.
+	PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm" protobuf:"bytes,2,opt,name=podAffinityTerm"`
+}
+
+// Defines a set of pods (namely those matching the labelSelector
+// relative to the given namespace(s)) that this pod should be
+// co-located (affinity) or not co-located (anti-affinity) with,
+// where co-located is defined as running on a node whose value of
+// the label with key  matches that of any node on which
+// a pod of the set of pods is running
+type PodAffinityTerm struct {
+	// A label query over a set of resources, in this case pods.
+	// If it's null, this PodAffinityTerm matches with no Pods.
+	// +optional
+	LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
+	// namespaces specifies a static list of namespace names that the term applies to.
+	// The term is applied to the union of the namespaces listed in this field
+	// and the ones selected by namespaceSelector.
+	// null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+	// +optional
+	// +listType=atomic
+	Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,2,rep,name=namespaces"`
+	// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+	// the labelSelector in the specified namespaces, where co-located is defined as running on a node
+	// whose value of the label with key topologyKey matches that of any node on which any of the
+	// selected pods is running.
+	// Empty topologyKey is not allowed.
+	TopologyKey string `json:"topologyKey" protobuf:"bytes,3,opt,name=topologyKey"`
+	// A label query over the set of namespaces that the term applies to.
+	// The term is applied to the union of the namespaces selected by this field
+	// and the ones listed in the namespaces field.
+	// null selector and null or empty namespaces list means "this pod's namespace".
+	// An empty selector ({}) matches all namespaces.
+	// +optional
+	NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,4,opt,name=namespaceSelector"`
+	// MatchLabelKeys is a set of pod label keys to select which pods will
+	// be taken into consideration. The keys are used to lookup values from the
+	// incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)`
+	// to select the group of existing pods which pods will be taken into consideration
+	// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+	// pod labels will be ignored. The default value is empty.
+	// The same key is forbidden to exist in both matchLabelKeys and labelSelector.
+	// Also, matchLabelKeys cannot be set when labelSelector isn't set.
+	// This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+	//
+	// +listType=atomic
+	// +optional
+	MatchLabelKeys []string `json:"matchLabelKeys,omitempty" protobuf:"bytes,5,opt,name=matchLabelKeys"`
+	// MismatchLabelKeys is a set of pod label keys to select which pods will
+	// be taken into consideration. The keys are used to lookup values from the
+	// incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)`
+	// to select the group of existing pods which pods will be taken into consideration
+	// for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+	// pod labels will be ignored. The default value is empty.
+	// The same key is forbidden to exist in both mismatchLabelKeys and labelSelector.
+	// Also, mismatchLabelKeys cannot be set when labelSelector isn't set.
+	// This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).
+	//
+	// +listType=atomic
+	// +optional
+	MismatchLabelKeys []string `json:"mismatchLabelKeys,omitempty" protobuf:"bytes,6,opt,name=mismatchLabelKeys"`
+}
+
+// Node affinity is a group of node affinity scheduling rules.
+type NodeAffinity struct {
+	// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
+	// If the affinity requirements specified by this field are not met at
+	// scheduling time, the pod will not be scheduled onto the node.
+	// If the affinity requirements specified by this field cease to be met
+	// at some point during pod execution (e.g. due to an update), the system
+	// will try to eventually evict the pod from its node.
+	// +optional
+	// RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
+
+	// If the affinity requirements specified by this field are not met at
+	// scheduling time, the pod will not be scheduled onto the node.
+	// If the affinity requirements specified by this field cease to be met
+	// at some point during pod execution (e.g. due to an update), the system
+	// may or may not try to eventually evict the pod from its node.
+	// +optional
+	RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,opt,name=requiredDuringSchedulingIgnoredDuringExecution"`
+	// The scheduler will prefer to schedule pods to nodes that satisfy
+	// the affinity expressions specified by this field, but it may choose
+	// a node that violates one or more of the expressions. The node that is
+	// most preferred is the one with the greatest sum of weights, i.e.
+	// for each node that meets all of the scheduling requirements (resource
+	// request, requiredDuringScheduling affinity expressions, etc.),
+	// compute a sum by iterating through the elements of this field and adding
+	// "weight" to the sum if the node matches the corresponding matchExpressions; the
+	// node(s) with the highest sum are the most preferred.
+	// +optional
+	// +listType=atomic
+	PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
+}
+
+// An empty preferred scheduling term matches all objects with implicit weight 0
+// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+type PreferredSchedulingTerm struct {
+	// Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
+	Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
+	// A node selector term, associated with the corresponding weight.
+	Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"`
+}
+
+// The node this Taint is attached to has the "effect" on
+// any pod that does not tolerate the Taint.
+type Taint struct {
+	// Required. The taint key to be applied to a node.
+	Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
+	// The taint value corresponding to the taint key.
+	// +optional
+	Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
+	// Required. The effect of the taint on pods
+	// that do not tolerate the taint.
+	// Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
+	Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"`
+	// TimeAdded represents the time at which the taint was added.
+	// It is only written for NoExecute taints.
+	// +optional
+	TimeAdded *metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"`
+}
+
+// +enum
+type TaintEffect string
+
+const (
+	// Do not allow new pods to schedule onto the node unless they tolerate the taint,
+	// but allow all pods submitted to Kubelet without going through the scheduler
+	// to start, and allow all already-running pods to continue running.
+	// Enforced by the scheduler.
+	TaintEffectNoSchedule TaintEffect = "NoSchedule"
+	// Like TaintEffectNoSchedule, but the scheduler tries not to schedule
+	// new pods onto the node, rather than prohibiting new pods from scheduling
+	// onto the node entirely. Enforced by the scheduler.
+	TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule"
+	// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
+	// Like TaintEffectNoSchedule, but additionally do not allow pods submitted to
+	// Kubelet without going through the scheduler to start.
+	// Enforced by Kubelet and the scheduler.
+	// TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit"
+
+	// Evict any already-running pods that do not tolerate the taint.
+	// Currently enforced by NodeController.
+	TaintEffectNoExecute TaintEffect = "NoExecute"
+)
+
+// The pod this Toleration is attached to tolerates any taint that matches
+// the triple  using the matching operator .
+type Toleration struct {
+	// Key is the taint key that the toleration applies to. Empty means match all taint keys.
+	// If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+	// +optional
+	Key string `json:"key,omitempty" protobuf:"bytes,1,opt,name=key"`
+	// Operator represents a key's relationship to the value.
+	// Valid operators are Exists and Equal. Defaults to Equal.
+	// Exists is equivalent to wildcard for value, so that a pod can
+	// tolerate all taints of a particular category.
+	// +optional
+	Operator TolerationOperator `json:"operator,omitempty" protobuf:"bytes,2,opt,name=operator,casttype=TolerationOperator"`
+	// Value is the taint value the toleration matches to.
+	// If the operator is Exists, the value should be empty, otherwise just a regular string.
+	// +optional
+	Value string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"`
+	// Effect indicates the taint effect to match. Empty means match all taint effects.
+	// When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+	// +optional
+	Effect TaintEffect `json:"effect,omitempty" protobuf:"bytes,4,opt,name=effect,casttype=TaintEffect"`
+	// TolerationSeconds represents the period of time the toleration (which must be
+	// of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+	// it is not set, which means tolerate the taint forever (do not evict). Zero and
+	// negative values will be treated as 0 (evict immediately) by the system.
+	// +optional
+	TolerationSeconds *int64 `json:"tolerationSeconds,omitempty" protobuf:"varint,5,opt,name=tolerationSeconds"`
+}
+
+// A toleration operator is the set of operators that can be used in a toleration.
+// +enum
+type TolerationOperator string
+
+const (
+	TolerationOpExists TolerationOperator = "Exists"
+	TolerationOpEqual  TolerationOperator = "Equal"
+)
+
+// PodReadinessGate contains the reference to a pod condition
+type PodReadinessGate struct {
+	// ConditionType refers to a condition in the pod's condition list with matching type.
+	ConditionType PodConditionType `json:"conditionType" protobuf:"bytes,1,opt,name=conditionType,casttype=PodConditionType"`
+}
+
+// PodSpec is a description of a pod.
+type PodSpec struct {
+	// List of volumes that can be mounted by containers belonging to the pod.
+	// More info: https://kubernetes.io/docs/concepts/storage/volumes
+	// +optional
+	// +patchMergeKey=name
+	// +patchStrategy=merge,retainKeys
+	// +listType=map
+	// +listMapKey=name
+	Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"`
+	// List of initialization containers belonging to the pod.
+	// Init containers are executed in order prior to containers being started. If any
+	// init container fails, the pod is considered to have failed and is handled according
+	// to its restartPolicy. The name for an init container or normal container must be
+	// unique among all containers.
+	// Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
+	// The resourceRequirements of an init container are taken into account during scheduling
+	// by finding the highest request/limit for each resource type, and then using the max of
+	// of that value or the sum of the normal containers. Limits are applied to init containers
+	// in a similar fashion.
+	// Init containers cannot currently be added or removed.
+	// Cannot be updated.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=name
+	InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"`
+	// List of containers belonging to the pod.
+	// Containers cannot currently be added or removed.
+	// There must be at least one container in a Pod.
+	// Cannot be updated.
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=name
+	Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"`
+	// List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing
+	// pod to perform user-initiated actions such as debugging. This list cannot be specified when
+	// creating a pod, and it cannot be modified by updating the pod spec. In order to add an
+	// ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.
+	// +optional
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=name
+	EphemeralContainers []EphemeralContainer `json:"ephemeralContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,34,rep,name=ephemeralContainers"`
+	// Restart policy for all containers within the pod.
+	// One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted.
+	// Default to Always.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
+	// +optional
+	RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"`
+	// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
+	// Value must be non-negative integer. The value zero indicates stop immediately via
+	// the kill signal (no opportunity to shut down).
+	// If this value is nil, the default grace period will be used instead.
+	// The grace period is the duration in seconds after the processes running in the pod are sent
+	// a termination signal and the time when the processes are forcibly halted with a kill signal.
+	// Set this value longer than the expected cleanup time for your process.
+	// Defaults to 30 seconds.
+	// +optional
+	TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"`
+	// Optional duration in seconds the pod may be active on the node relative to
+	// StartTime before the system will actively try to mark it failed and kill associated containers.
+	// Value must be a positive integer.
+	// +optional
+	ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"`
+	// Set DNS policy for the pod.
+	// Defaults to "ClusterFirst".
+	// Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
+	// DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
+	// To have DNS options set along with hostNetwork, you have to specify DNS policy
+	// explicitly to 'ClusterFirstWithHostNet'.
+	// +optional
+	DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"`
+	// NodeSelector is a selector which must be true for the pod to fit on a node.
+	// Selector which must match a node's labels for the pod to be scheduled on that node.
+	// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+	// +optional
+	// +mapType=atomic
+	NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"`
+
+	// ServiceAccountName is the name of the ServiceAccount to use to run this pod.
+	// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+	// +optional
+	ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"`
+	// DeprecatedServiceAccount is a deprecated alias for ServiceAccountName.
+	// Deprecated: Use serviceAccountName instead.
+	// +k8s:conversion-gen=false
+	// +optional
+	DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"`
+	// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
+	// +optional
+	AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"`
+
+	// NodeName indicates in which node this pod is scheduled.
+	// If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName.
+	// Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod.
+	// This field should not be used to express a desire for the pod to be scheduled on a specific node.
+	// https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename
+	// +optional
+	NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
+	// Host networking requested for this pod. Use the host's network namespace.
+	// If this option is set, the ports that will be used must be specified.
+	// Default to false.
+	// +k8s:conversion-gen=false
+	// +optional
+	HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"`
+	// Use the host's pid namespace.
+	// Optional: Default to false.
+	// +k8s:conversion-gen=false
+	// +optional
+	HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"`
+	// Use the host's ipc namespace.
+	// Optional: Default to false.
+	// +k8s:conversion-gen=false
+	// +optional
+	HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"`
+	// Share a single process namespace between all of the containers in a pod.
+	// When this is set containers will be able to view and signal processes from other containers
+	// in the same pod, and the first process in each container will not be assigned PID 1.
+	// HostPID and ShareProcessNamespace cannot both be set.
+	// Optional: Default to false.
+	// +k8s:conversion-gen=false
+	// +optional
+	ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"`
+	// SecurityContext holds pod-level security attributes and common container settings.
+	// Optional: Defaults to empty.  See type description for default values of each field.
+	// +optional
+	SecurityContext *PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"`
+	// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
+	// If specified, these secrets will be passed to individual puller implementations for them to use.
+	// More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
+	// +optional
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=name
+	ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"`
+	// Specifies the hostname of the Pod
+	// If not specified, the pod's hostname will be set to a system-defined value.
+	// +optional
+	Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"`
+	// If specified, the fully qualified Pod hostname will be "...svc.".
+	// If not specified, the pod will not have a domainname at all.
+	// +optional
+	Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"`
+	// If specified, the pod's scheduling constraints
+	// +optional
+	Affinity *Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"`
+	// If specified, the pod will be dispatched by specified scheduler.
+	// If not specified, the pod will be dispatched by default scheduler.
+	// +optional
+	SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"`
+	// If specified, the pod's tolerations.
+	// +optional
+	// +listType=atomic
+	Tolerations []Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"`
+	// HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
+	// file if specified.
+	// +optional
+	// +patchMergeKey=ip
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=ip
+	HostAliases []HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,23,rep,name=hostAliases"`
+	// If specified, indicates the pod's priority. "system-node-critical" and
+	// "system-cluster-critical" are two special keywords which indicate the
+	// highest priorities with the former being the highest priority. Any other
+	// name must be defined by creating a PriorityClass object with that name.
+	// If not specified, the pod priority will be default or zero if there is no
+	// default.
+	// +optional
+	PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,24,opt,name=priorityClassName"`
+	// The priority value. Various system components use this field to find the
+	// priority of the pod. When Priority Admission Controller is enabled, it
+	// prevents users from setting this field. The admission controller populates
+	// this field from PriorityClassName.
+	// The higher the value, the higher the priority.
+	// +optional
+	Priority *int32 `json:"priority,omitempty" protobuf:"bytes,25,opt,name=priority"`
+	// Specifies the DNS parameters of a pod.
+	// Parameters specified here will be merged to the generated DNS
+	// configuration based on DNSPolicy.
+	// +optional
+	DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"`
+	// If specified, all readiness gates will be evaluated for pod readiness.
+	// A pod is ready when all its containers are ready AND
+	// all conditions specified in the readiness gates have status equal to "True"
+	// More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates
+	// +optional
+	// +listType=atomic
+	ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" protobuf:"bytes,28,opt,name=readinessGates"`
+	// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used
+	// to run this pod.  If no RuntimeClass resource matches the named class, the pod will not be run.
+	// If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an
+	// empty definition that uses the default runtime handler.
+	// More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
+	// +optional
+	RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"`
+	// EnableServiceLinks indicates whether information about services should be injected into pod's
+	// environment variables, matching the syntax of Docker links.
+	// Optional: Defaults to true.
+	// +optional
+	EnableServiceLinks *bool `json:"enableServiceLinks,omitempty" protobuf:"varint,30,opt,name=enableServiceLinks"`
+	// PreemptionPolicy is the Policy for preempting pods with lower priority.
+	// One of Never, PreemptLowerPriority.
+	// Defaults to PreemptLowerPriority if unset.
+	// +optional
+	PreemptionPolicy *PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,31,opt,name=preemptionPolicy"`
+	// Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.
+	// This field will be autopopulated at admission time by the RuntimeClass admission controller. If
+	// the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.
+	// The RuntimeClass admission controller will reject Pod create requests which have the overhead already
+	// set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value
+	// defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.
+	// More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
+	// +optional
+	Overhead ResourceList `json:"overhead,omitempty" protobuf:"bytes,32,opt,name=overhead"`
+	// TopologySpreadConstraints describes how a group of pods ought to spread across topology
+	// domains. Scheduler will schedule pods in a way which abides by the constraints.
+	// All topologySpreadConstraints are ANDed.
+	// +optional
+	// +patchMergeKey=topologyKey
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=topologyKey
+	// +listMapKey=whenUnsatisfiable
+	TopologySpreadConstraints []TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty" patchStrategy:"merge" patchMergeKey:"topologyKey" protobuf:"bytes,33,opt,name=topologySpreadConstraints"`
+	// If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default).
+	// In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname).
+	// In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN.
+	// If a pod does not have FQDN, this has no effect.
+	// Default to false.
+	// +optional
+	SetHostnameAsFQDN *bool `json:"setHostnameAsFQDN,omitempty" protobuf:"varint,35,opt,name=setHostnameAsFQDN"`
+	// Specifies the OS of the containers in the pod.
+	// Some pod and container fields are restricted if this is set.
+	//
+	// If the OS field is set to linux, the following fields must be unset:
+	// -securityContext.windowsOptions
+	//
+	// If the OS field is set to windows, following fields must be unset:
+	// - spec.hostPID
+	// - spec.hostIPC
+	// - spec.hostUsers
+	// - spec.securityContext.appArmorProfile
+	// - spec.securityContext.seLinuxOptions
+	// - spec.securityContext.seccompProfile
+	// - spec.securityContext.fsGroup
+	// - spec.securityContext.fsGroupChangePolicy
+	// - spec.securityContext.sysctls
+	// - spec.shareProcessNamespace
+	// - spec.securityContext.runAsUser
+	// - spec.securityContext.runAsGroup
+	// - spec.securityContext.supplementalGroups
+	// - spec.securityContext.supplementalGroupsPolicy
+	// - spec.containers[*].securityContext.appArmorProfile
+	// - spec.containers[*].securityContext.seLinuxOptions
+	// - spec.containers[*].securityContext.seccompProfile
+	// - spec.containers[*].securityContext.capabilities
+	// - spec.containers[*].securityContext.readOnlyRootFilesystem
+	// - spec.containers[*].securityContext.privileged
+	// - spec.containers[*].securityContext.allowPrivilegeEscalation
+	// - spec.containers[*].securityContext.procMount
+	// - spec.containers[*].securityContext.runAsUser
+	// - spec.containers[*].securityContext.runAsGroup
+	// +optional
+	OS *PodOS `json:"os,omitempty" protobuf:"bytes,36,opt,name=os"`
+
+	// Use the host's user namespace.
+	// Optional: Default to true.
+	// If set to true or not present, the pod will be run in the host user namespace, useful
+	// for when the pod needs a feature only available to the host user namespace, such as
+	// loading a kernel module with CAP_SYS_MODULE.
+	// When set to false, a new userns is created for the pod. Setting false is useful for
+	// mitigating container breakout vulnerabilities even allowing users to run their
+	// containers as root without actually having root privileges on the host.
+	// This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.
+	// +k8s:conversion-gen=false
+	// +optional
+	HostUsers *bool `json:"hostUsers,omitempty" protobuf:"bytes,37,opt,name=hostUsers"`
+
+	// SchedulingGates is an opaque list of values that if specified will block scheduling the pod.
+	// If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the
+	// scheduler will not attempt to schedule the pod.
+	//
+	// SchedulingGates can only be set at pod creation time, and be removed only afterwards.
+	//
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=name
+	// +optional
+	SchedulingGates []PodSchedulingGate `json:"schedulingGates,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,38,opt,name=schedulingGates"`
+	// ResourceClaims defines which ResourceClaims must be allocated
+	// and reserved before the Pod is allowed to start. The resources
+	// will be made available to those containers which consume them
+	// by name.
+	//
+	// This is an alpha field and requires enabling the
+	// DynamicResourceAllocation feature gate.
+	//
+	// This field is immutable.
+	//
+	// +patchMergeKey=name
+	// +patchStrategy=merge,retainKeys
+	// +listType=map
+	// +listMapKey=name
+	// +featureGate=DynamicResourceAllocation
+	// +optional
+	ResourceClaims []PodResourceClaim `json:"resourceClaims,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,39,rep,name=resourceClaims"`
+	// Resources is the total amount of CPU and Memory resources required by all
+	// containers in the pod. It supports specifying Requests and Limits for
+	// "cpu" and "memory" resource names only. ResourceClaims are not supported.
+	//
+	// This field enables fine-grained control over resource allocation for the
+	// entire pod, allowing resource sharing among containers in a pod.
+	// TODO: For beta graduation, expand this comment with a detailed explanation.
+	//
+	// This is an alpha field and requires enabling the PodLevelResources feature
+	// gate.
+	//
+	// +featureGate=PodLevelResources
+	// +optional
+	Resources *ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,40,opt,name=resources"`
+}
+
+// PodResourceClaim references exactly one ResourceClaim, either directly
+// or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim
+// for the pod.
+//
+// It adds a name to it that uniquely identifies the ResourceClaim inside the Pod.
+// Containers that need access to the ResourceClaim reference it with this name.
+type PodResourceClaim struct {
+	// Name uniquely identifies this resource claim inside the pod.
+	// This must be a DNS_LABEL.
+	Name string `json:"name" protobuf:"bytes,1,name=name"`
+
+	// Source is tombstoned since Kubernetes 1.31 where it got replaced with
+	// the inlined fields below.
+	//
+	// Source ClaimSource `json:"source,omitempty" protobuf:"bytes,2,name=source"`
+
+	// ResourceClaimName is the name of a ResourceClaim object in the same
+	// namespace as this pod.
+	//
+	// Exactly one of ResourceClaimName and ResourceClaimTemplateName must
+	// be set.
+	ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,3,opt,name=resourceClaimName"`
+
+	// ResourceClaimTemplateName is the name of a ResourceClaimTemplate
+	// object in the same namespace as this pod.
+	//
+	// The template will be used to create a new ResourceClaim, which will
+	// be bound to this pod. When this pod is deleted, the ResourceClaim
+	// will also be deleted. The pod name and resource name, along with a
+	// generated component, will be used to form a unique name for the
+	// ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.
+	//
+	// This field is immutable and no changes will be made to the
+	// corresponding ResourceClaim by the control plane after creating the
+	// ResourceClaim.
+	//
+	// Exactly one of ResourceClaimName and ResourceClaimTemplateName must
+	// be set.
+	ResourceClaimTemplateName *string `json:"resourceClaimTemplateName,omitempty" protobuf:"bytes,4,opt,name=resourceClaimTemplateName"`
+}
+
+// PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim
+// which references a ResourceClaimTemplate. It stores the generated name for
+// the corresponding ResourceClaim.
+type PodResourceClaimStatus struct {
+	// Name uniquely identifies this resource claim inside the pod.
+	// This must match the name of an entry in pod.spec.resourceClaims,
+	// which implies that the string must be a DNS_LABEL.
+	Name string `json:"name" protobuf:"bytes,1,name=name"`
+
+	// ResourceClaimName is the name of the ResourceClaim that was
+	// generated for the Pod in the namespace of the Pod. If this is
+	// unset, then generating a ResourceClaim was not necessary. The
+	// pod.spec.resourceClaims entry can be ignored in this case.
+	//
+	// +optional
+	ResourceClaimName *string `json:"resourceClaimName,omitempty" protobuf:"bytes,2,opt,name=resourceClaimName"`
+}
+
+// OSName is the set of OS'es that can be used in OS.
+type OSName string
+
+// These are valid values for OSName
+const (
+	Linux   OSName = "linux"
+	Windows OSName = "windows"
+)
+
+// PodOS defines the OS parameters of a pod.
+type PodOS struct {
+	// Name is the name of the operating system. The currently supported values are linux and windows.
+	// Additional value may be defined in future and can be one of:
+	// https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration
+	// Clients should expect to handle additional values and treat unrecognized values in this field as os: null
+	Name OSName `json:"name" protobuf:"bytes,1,opt,name=name"`
+}
+
+// PodSchedulingGate is associated to a Pod to guard its scheduling.
+type PodSchedulingGate struct {
+	// Name of the scheduling gate.
+	// Each scheduling gate must have a unique name field.
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+}
+
+// +enum
+type UnsatisfiableConstraintAction string
+
+const (
+	// DoNotSchedule instructs the scheduler not to schedule the pod
+	// when constraints are not satisfied.
+	DoNotSchedule UnsatisfiableConstraintAction = "DoNotSchedule"
+	// ScheduleAnyway instructs the scheduler to schedule the pod
+	// even if constraints are not satisfied.
+	ScheduleAnyway UnsatisfiableConstraintAction = "ScheduleAnyway"
+)
+
+// NodeInclusionPolicy defines the type of node inclusion policy
+// +enum
+type NodeInclusionPolicy string
+
+const (
+	// NodeInclusionPolicyIgnore means ignore this scheduling directive when calculating pod topology spread skew.
+	NodeInclusionPolicyIgnore NodeInclusionPolicy = "Ignore"
+	// NodeInclusionPolicyHonor means use this scheduling directive when calculating pod topology spread skew.
+	NodeInclusionPolicyHonor NodeInclusionPolicy = "Honor"
+)
+
+// TopologySpreadConstraint specifies how to spread matching pods among the given topology.
+type TopologySpreadConstraint struct {
+	// MaxSkew describes the degree to which pods may be unevenly distributed.
+	// When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference
+	// between the number of matching pods in the target topology and the global minimum.
+	// The global minimum is the minimum number of matching pods in an eligible domain
+	// or zero if the number of eligible domains is less than MinDomains.
+	// For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+	// labelSelector spread as 2/2/1:
+	// In this case, the global minimum is 1.
+	// +-------+-------+-------+
+	// | zone1 | zone2 | zone3 |
+	// +-------+-------+-------+
+	// |  P P  |  P P  |   P   |
+	// +-------+-------+-------+
+	// - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
+	// scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
+	// violate MaxSkew(1).
+	// - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
+	// When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence
+	// to topologies that satisfy it.
+	// It's a required field. Default value is 1 and 0 is not allowed.
+	MaxSkew int32 `json:"maxSkew" protobuf:"varint,1,opt,name=maxSkew"`
+	// TopologyKey is the key of node labels. Nodes that have a label with this key
+	// and identical values are considered to be in the same topology.
+	// We consider each  as a "bucket", and try to put balanced number
+	// of pods into each bucket.
+	// We define a domain as a particular instance of a topology.
+	// Also, we define an eligible domain as a domain whose nodes meet the requirements of
+	// nodeAffinityPolicy and nodeTaintsPolicy.
+	// e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
+	// And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
+	// It's a required field.
+	TopologyKey string `json:"topologyKey" protobuf:"bytes,2,opt,name=topologyKey"`
+	// WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
+	// the spread constraint.
+	// - DoNotSchedule (default) tells the scheduler not to schedule it.
+	// - ScheduleAnyway tells the scheduler to schedule the pod in any location,
+	//   but giving higher precedence to topologies that would help reduce the
+	//   skew.
+	// A constraint is considered "Unsatisfiable" for an incoming pod
+	// if and only if every possible node assignment for that pod would violate
+	// "MaxSkew" on some topology.
+	// For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+	// labelSelector spread as 3/1/1:
+	// +-------+-------+-------+
+	// | zone1 | zone2 | zone3 |
+	// +-------+-------+-------+
+	// | P P P |   P   |   P   |
+	// +-------+-------+-------+
+	// If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
+	// to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
+	// MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
+	// won't make it *more* imbalanced.
+	// It's a required field.
+	WhenUnsatisfiable UnsatisfiableConstraintAction `json:"whenUnsatisfiable" protobuf:"bytes,3,opt,name=whenUnsatisfiable,casttype=UnsatisfiableConstraintAction"`
+	// LabelSelector is used to find matching pods.
+	// Pods that match this label selector are counted to determine the number of pods
+	// in their corresponding topology domain.
+	// +optional
+	LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,4,opt,name=labelSelector"`
+	// MinDomains indicates a minimum number of eligible domains.
+	// When the number of eligible domains with matching topology keys is less than minDomains,
+	// Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed.
+	// And when the number of eligible domains with matching topology keys equals or greater than minDomains,
+	// this value has no effect on scheduling.
+	// As a result, when the number of eligible domains is less than minDomains,
+	// scheduler won't schedule more than maxSkew Pods to those domains.
+	// If value is nil, the constraint behaves as if MinDomains is equal to 1.
+	// Valid values are integers greater than 0.
+	// When value is not nil, WhenUnsatisfiable must be DoNotSchedule.
+	//
+	// For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
+	// labelSelector spread as 2/2/2:
+	// +-------+-------+-------+
+	// | zone1 | zone2 | zone3 |
+	// +-------+-------+-------+
+	// |  P P  |  P P  |  P P  |
+	// +-------+-------+-------+
+	// The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0.
+	// In this situation, new pod with the same labelSelector cannot be scheduled,
+	// because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
+	// it will violate MaxSkew.
+	// +optional
+	MinDomains *int32 `json:"minDomains,omitempty" protobuf:"varint,5,opt,name=minDomains"`
+	// NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
+	// when calculating pod topology spread skew. Options are:
+	// - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
+	// - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
+	//
+	// If this value is nil, the behavior is equivalent to the Honor policy.
+	// This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+	// +optional
+	NodeAffinityPolicy *NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty" protobuf:"bytes,6,opt,name=nodeAffinityPolicy"`
+	// NodeTaintsPolicy indicates how we will treat node taints when calculating
+	// pod topology spread skew. Options are:
+	// - Honor: nodes without taints, along with tainted nodes for which the incoming pod
+	// has a toleration, are included.
+	// - Ignore: node taints are ignored. All nodes are included.
+	//
+	// If this value is nil, the behavior is equivalent to the Ignore policy.
+	// This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+	// +optional
+	NodeTaintsPolicy *NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty" protobuf:"bytes,7,opt,name=nodeTaintsPolicy"`
+	// MatchLabelKeys is a set of pod label keys to select the pods over which
+	// spreading will be calculated. The keys are used to lookup values from the
+	// incoming pod labels, those key-value labels are ANDed with labelSelector
+	// to select the group of existing pods over which spreading will be calculated
+	// for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+	// MatchLabelKeys cannot be set when LabelSelector isn't set.
+	// Keys that don't exist in the incoming pod labels will
+	// be ignored. A null or empty list means only match against labelSelector.
+	//
+	// This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).
+	// +listType=atomic
+	// +optional
+	MatchLabelKeys []string `json:"matchLabelKeys,omitempty" protobuf:"bytes,8,opt,name=matchLabelKeys"`
+}
+
+const (
+	// The default value for enableServiceLinks attribute.
+	DefaultEnableServiceLinks = true
+)
+
+// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
+// pod's hosts file.
+type HostAlias struct {
+	// IP address of the host file entry.
+	// +required
+	IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"`
+	// Hostnames for the above IP address.
+	// +listType=atomic
+	Hostnames []string `json:"hostnames,omitempty" protobuf:"bytes,2,rep,name=hostnames"`
+}
+
+// PodFSGroupChangePolicy holds policies that will be used for applying fsGroup to a volume
+// when volume is mounted.
+// +enum
+type PodFSGroupChangePolicy string
+
+const (
+	// FSGroupChangeOnRootMismatch indicates that volume's ownership and permissions will be changed
+	// only when permission and ownership of root directory does not match with expected
+	// permissions on the volume. This can help shorten the time it takes to change
+	// ownership and permissions of a volume.
+	FSGroupChangeOnRootMismatch PodFSGroupChangePolicy = "OnRootMismatch"
+	// FSGroupChangeAlways indicates that volume's ownership and permissions
+	// should always be changed whenever volume is mounted inside a Pod. This the default
+	// behavior.
+	FSGroupChangeAlways PodFSGroupChangePolicy = "Always"
+)
+
+// SupplementalGroupsPolicy defines how supplemental groups
+// of the first container processes are calculated.
+// +enum
+type SupplementalGroupsPolicy string
+
+const (
+	// SupplementalGroupsPolicyMerge means that the container's provided
+	// SupplementalGroups and FsGroup (specified in SecurityContext) will be
+	// merged with the primary user's groups as defined in the container image
+	// (in /etc/group).
+	SupplementalGroupsPolicyMerge SupplementalGroupsPolicy = "Merge"
+	// SupplementalGroupsPolicyStrict means that the container's provided
+	// SupplementalGroups and FsGroup (specified in SecurityContext) will be
+	// used instead of any groups defined in the container image.
+	SupplementalGroupsPolicyStrict SupplementalGroupsPolicy = "Strict"
+)
+
+// PodSELinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod.
+type PodSELinuxChangePolicy string
+
+const (
+	// Recursive relabeling of all Pod volumes by the container runtime.
+	// This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.
+	SELinuxChangePolicyRecursive PodSELinuxChangePolicy = "Recursive"
+	// MountOption mounts all eligible Pod volumes with `-o context` mount option.
+	// This requires all Pods that share the same volume to use the same SELinux label.
+	// It is not possible to share the same volume among privileged and unprivileged Pods.
+	// Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes
+	// whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their
+	// CSIDriver instance. Other volumes are always re-labelled recursively.
+	SELinuxChangePolicyMountOption PodSELinuxChangePolicy = "MountOption"
+)
+
+// PodSecurityContext holds pod-level security attributes and common container settings.
+// Some fields are also present in container.securityContext.  Field values of
+// container.securityContext take precedence over field values of PodSecurityContext.
+type PodSecurityContext struct {
+	// The SELinux context to be applied to all containers.
+	// If unspecified, the container runtime will allocate a random SELinux context for each
+	// container.  May also be set in SecurityContext.  If set in
+	// both SecurityContext and PodSecurityContext, the value specified in SecurityContext
+	// takes precedence for that container.
+	// Note that this field cannot be set when spec.os.name is windows.
+	// +optional
+	SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"`
+	// The Windows specific settings applied to all containers.
+	// If unspecified, the options within a container's SecurityContext will be used.
+	// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+	// Note that this field cannot be set when spec.os.name is linux.
+	// +optional
+	WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,8,opt,name=windowsOptions"`
+	// The UID to run the entrypoint of the container process.
+	// Defaults to user specified in image metadata if unspecified.
+	// May also be set in SecurityContext.  If set in both SecurityContext and
+	// PodSecurityContext, the value specified in SecurityContext takes precedence
+	// for that container.
+	// Note that this field cannot be set when spec.os.name is windows.
+	// +optional
+	RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,2,opt,name=runAsUser"`
+	// The GID to run the entrypoint of the container process.
+	// Uses runtime default if unset.
+	// May also be set in SecurityContext.  If set in both SecurityContext and
+	// PodSecurityContext, the value specified in SecurityContext takes precedence
+	// for that container.
+	// Note that this field cannot be set when spec.os.name is windows.
+	// +optional
+	RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,6,opt,name=runAsGroup"`
+	// Indicates that the container must run as a non-root user.
+	// If true, the Kubelet will validate the image at runtime to ensure that it
+	// does not run as UID 0 (root) and fail to start the container if it does.
+	// If unset or false, no such validation will be performed.
+	// May also be set in SecurityContext.  If set in both SecurityContext and
+	// PodSecurityContext, the value specified in SecurityContext takes precedence.
+	// +optional
+	RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"`
+	// A list of groups applied to the first process run in each container, in
+	// addition to the container's primary GID and fsGroup (if specified).  If
+	// the SupplementalGroupsPolicy feature is enabled, the
+	// supplementalGroupsPolicy field determines whether these are in addition
+	// to or instead of any group memberships defined in the container image.
+	// If unspecified, no additional groups are added, though group memberships
+	// defined in the container image may still be used, depending on the
+	// supplementalGroupsPolicy field.
+	// Note that this field cannot be set when spec.os.name is windows.
+	// +optional
+	// +listType=atomic
+	SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"`
+	// Defines how supplemental groups of the first container processes are calculated.
+	// Valid values are "Merge" and "Strict". If not specified, "Merge" is used.
+	// (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled
+	// and the container runtime must implement support for this feature.
+	// Note that this field cannot be set when spec.os.name is windows.
+	// TODO: update the default value to "Merge" when spec.os.name is not windows in v1.34
+	// +featureGate=SupplementalGroupsPolicy
+	// +optional
+	SupplementalGroupsPolicy *SupplementalGroupsPolicy `json:"supplementalGroupsPolicy,omitempty" protobuf:"bytes,12,opt,name=supplementalGroupsPolicy"`
+	// A special supplemental group that applies to all containers in a pod.
+	// Some volume types allow the Kubelet to change the ownership of that volume
+	// to be owned by the pod:
+	//
+	// 1. The owning GID will be the FSGroup
+	// 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
+	// 3. The permission bits are OR'd with rw-rw----
+	//
+	// If unset, the Kubelet will not modify the ownership and permissions of any volume.
+	// Note that this field cannot be set when spec.os.name is windows.
+	// +optional
+	FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"`
+	// Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
+	// sysctls (by the container runtime) might fail to launch.
+	// Note that this field cannot be set when spec.os.name is windows.
+	// +optional
+	// +listType=atomic
+	Sysctls []Sysctl `json:"sysctls,omitempty" protobuf:"bytes,7,rep,name=sysctls"`
+	// fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
+	// before being exposed inside Pod. This field will only apply to
+	// volume types which support fsGroup based ownership(and permissions).
+	// It will have no effect on ephemeral volume types such as: secret, configmaps
+	// and emptydir.
+	// Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.
+	// Note that this field cannot be set when spec.os.name is windows.
+	// +optional
+	FSGroupChangePolicy *PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty" protobuf:"bytes,9,opt,name=fsGroupChangePolicy"`
+	// The seccomp options to use by the containers in this pod.
+	// Note that this field cannot be set when spec.os.name is windows.
+	// +optional
+	SeccompProfile *SeccompProfile `json:"seccompProfile,omitempty" protobuf:"bytes,10,opt,name=seccompProfile"`
+	// appArmorProfile is the AppArmor options to use by the containers in this pod.
+	// Note that this field cannot be set when spec.os.name is windows.
+	// +optional
+	AppArmorProfile *AppArmorProfile `json:"appArmorProfile,omitempty" protobuf:"bytes,11,opt,name=appArmorProfile"`
+	// seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod.
+	// It has no effect on nodes that do not support SELinux or to volumes does not support SELinux.
+	// Valid values are "MountOption" and "Recursive".
+	//
+	// "Recursive" means relabeling of all files on all Pod volumes by the container runtime.
+	// This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.
+	//
+	// "MountOption" mounts all eligible Pod volumes with `-o context` mount option.
+	// This requires all Pods that share the same volume to use the same SELinux label.
+	// It is not possible to share the same volume among privileged and unprivileged Pods.
+	// Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes
+	// whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their
+	// CSIDriver instance. Other volumes are always re-labelled recursively.
+	// "MountOption" value is allowed only when SELinuxMount feature gate is enabled.
+	//
+	// If not specified and SELinuxMount feature gate is enabled, "MountOption" is used.
+	// If not specified and SELinuxMount feature gate is disabled, "MountOption" is used for ReadWriteOncePod volumes
+	// and "Recursive" for all other volumes.
+	//
+	// This field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers.
+	//
+	// All Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state.
+	// Note that this field cannot be set when spec.os.name is windows.
+	// +featureGate=SELinuxChangePolicy
+	// +optional
+	SELinuxChangePolicy *PodSELinuxChangePolicy `json:"seLinuxChangePolicy,omitempty" protobuf:"bytes,13,opt,name=seLinuxChangePolicy"`
+}
+
+// SeccompProfile defines a pod/container's seccomp profile settings.
+// Only one profile source may be set.
+// +union
+type SeccompProfile struct {
+	// type indicates which kind of seccomp profile will be applied.
+	// Valid options are:
+	//
+	// Localhost - a profile defined in a file on the node should be used.
+	// RuntimeDefault - the container runtime default profile should be used.
+	// Unconfined - no profile should be applied.
+	// +unionDiscriminator
+	Type SeccompProfileType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=SeccompProfileType"`
+	// localhostProfile indicates a profile defined in a file on the node should be used.
+	// The profile must be preconfigured on the node to work.
+	// Must be a descending path, relative to the kubelet's configured seccomp profile location.
+	// Must be set if type is "Localhost". Must NOT be set for any other type.
+	// +optional
+	LocalhostProfile *string `json:"localhostProfile,omitempty" protobuf:"bytes,2,opt,name=localhostProfile"`
+}
+
+// SeccompProfileType defines the supported seccomp profile types.
+// +enum
+type SeccompProfileType string
+
+const (
+	// SeccompProfileTypeUnconfined indicates no seccomp profile is applied (A.K.A. unconfined).
+	SeccompProfileTypeUnconfined SeccompProfileType = "Unconfined"
+	// SeccompProfileTypeRuntimeDefault represents the default container runtime seccomp profile.
+	SeccompProfileTypeRuntimeDefault SeccompProfileType = "RuntimeDefault"
+	// SeccompProfileTypeLocalhost indicates a profile defined in a file on the node should be used.
+	// The file's location relative to /seccomp.
+	SeccompProfileTypeLocalhost SeccompProfileType = "Localhost"
+)
+
+// AppArmorProfile defines a pod or container's AppArmor settings.
+// +union
+type AppArmorProfile struct {
+	// type indicates which kind of AppArmor profile will be applied.
+	// Valid options are:
+	//   Localhost - a profile pre-loaded on the node.
+	//   RuntimeDefault - the container runtime's default profile.
+	//   Unconfined - no AppArmor enforcement.
+	// +unionDiscriminator
+	Type AppArmorProfileType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=AppArmorProfileType"`
+
+	// localhostProfile indicates a profile loaded on the node that should be used.
+	// The profile must be preconfigured on the node to work.
+	// Must match the loaded name of the profile.
+	// Must be set if and only if type is "Localhost".
+	// +optional
+	LocalhostProfile *string `json:"localhostProfile,omitempty" protobuf:"bytes,2,opt,name=localhostProfile"`
+}
+
+// +enum
+type AppArmorProfileType string
+
+const (
+	// AppArmorProfileTypeUnconfined indicates that no AppArmor profile should be enforced.
+	AppArmorProfileTypeUnconfined AppArmorProfileType = "Unconfined"
+	// AppArmorProfileTypeRuntimeDefault indicates that the container runtime's default AppArmor
+	// profile should be used.
+	AppArmorProfileTypeRuntimeDefault AppArmorProfileType = "RuntimeDefault"
+	// AppArmorProfileTypeLocalhost indicates that a profile pre-loaded on the node should be used.
+	AppArmorProfileTypeLocalhost AppArmorProfileType = "Localhost"
+)
+
+// PodQOSClass defines the supported qos classes of Pods.
+// +enum
+type PodQOSClass string
+
+const (
+	// PodQOSGuaranteed is the Guaranteed qos class.
+	PodQOSGuaranteed PodQOSClass = "Guaranteed"
+	// PodQOSBurstable is the Burstable qos class.
+	PodQOSBurstable PodQOSClass = "Burstable"
+	// PodQOSBestEffort is the BestEffort qos class.
+	PodQOSBestEffort PodQOSClass = "BestEffort"
+)
+
+// PodDNSConfig defines the DNS parameters of a pod in addition to
+// those generated from DNSPolicy.
+type PodDNSConfig struct {
+	// A list of DNS name server IP addresses.
+	// This will be appended to the base nameservers generated from DNSPolicy.
+	// Duplicated nameservers will be removed.
+	// +optional
+	// +listType=atomic
+	Nameservers []string `json:"nameservers,omitempty" protobuf:"bytes,1,rep,name=nameservers"`
+	// A list of DNS search domains for host-name lookup.
+	// This will be appended to the base search paths generated from DNSPolicy.
+	// Duplicated search paths will be removed.
+	// +optional
+	// +listType=atomic
+	Searches []string `json:"searches,omitempty" protobuf:"bytes,2,rep,name=searches"`
+	// A list of DNS resolver options.
+	// This will be merged with the base options generated from DNSPolicy.
+	// Duplicated entries will be removed. Resolution options given in Options
+	// will override those that appear in the base DNSPolicy.
+	// +optional
+	// +listType=atomic
+	Options []PodDNSConfigOption `json:"options,omitempty" protobuf:"bytes,3,rep,name=options"`
+}
+
+// PodDNSConfigOption defines DNS resolver options of a pod.
+type PodDNSConfigOption struct {
+	// Name is this DNS resolver option's name.
+	// Required.
+	Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+	// Value is this DNS resolver option's value.
+	// +optional
+	Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
+}
+
+// PodIP represents a single IP address allocated to the pod.
+type PodIP struct {
+	// IP is the IP address assigned to the pod
+	// +required
+	IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"`
+}
+
+// HostIP represents a single IP address allocated to the host.
+type HostIP struct {
+	// IP is the IP address assigned to the host
+	// +required
+	IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"`
+}
+
+// EphemeralContainerCommon is a copy of all fields in Container to be inlined in
+// EphemeralContainer. This separate type allows easy conversion from EphemeralContainer
+// to Container and allows separate documentation for the fields of EphemeralContainer.
+// When a new field is added to Container it must be added here as well.
+type EphemeralContainerCommon struct {
+	// Name of the ephemeral container specified as a DNS_LABEL.
+	// This name must be unique among all containers, init containers and ephemeral containers.
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// Container image name.
+	// More info: https://kubernetes.io/docs/concepts/containers/images
+	Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
+	// Entrypoint array. Not executed within a shell.
+	// The image's ENTRYPOINT is used if this is not provided.
+	// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+	// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+	// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+	// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+	// of whether the variable exists or not. Cannot be updated.
+	// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+	// +optional
+	// +listType=atomic
+	Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
+	// Arguments to the entrypoint.
+	// The image's CMD is used if this is not provided.
+	// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+	// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+	// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+	// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+	// of whether the variable exists or not. Cannot be updated.
+	// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+	// +optional
+	// +listType=atomic
+	Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
+	// Container's working directory.
+	// If not specified, the container runtime's default will be used, which
+	// might be configured in the container image.
+	// Cannot be updated.
+	// +optional
+	WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
+	// Ports are not allowed for ephemeral containers.
+	// +optional
+	// +patchMergeKey=containerPort
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=containerPort
+	// +listMapKey=protocol
+	Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
+	// List of sources to populate environment variables in the container.
+	// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+	// will be reported as an event when the container is starting. When a key exists in multiple
+	// sources, the value associated with the last source will take precedence.
+	// Values defined by an Env with a duplicate key will take precedence.
+	// Cannot be updated.
+	// +optional
+	// +listType=atomic
+	EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
+	// List of environment variables to set in the container.
+	// Cannot be updated.
+	// +optional
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=name
+	Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"`
+	// Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources
+	// already allocated to the pod.
+	// +optional
+	Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
+	// Resources resize policy for the container.
+	// +featureGate=InPlacePodVerticalScaling
+	// +optional
+	// +listType=atomic
+	ResizePolicy []ContainerResizePolicy `json:"resizePolicy,omitempty" protobuf:"bytes,23,rep,name=resizePolicy"`
+	// Restart policy for the container to manage the restart behavior of each
+	// container within a pod.
+	// This may only be set for init containers. You cannot set this field on
+	// ephemeral containers.
+	// +featureGate=SidecarContainers
+	// +optional
+	RestartPolicy *ContainerRestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,24,opt,name=restartPolicy,casttype=ContainerRestartPolicy"`
+	// Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
+	// Cannot be updated.
+	// +optional
+	// +patchMergeKey=mountPath
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=mountPath
+	VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
+	// volumeDevices is the list of block devices to be used by the container.
+	// +patchMergeKey=devicePath
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=devicePath
+	// +optional
+	VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"`
+	// Probes are not allowed for ephemeral containers.
+	// +optional
+	LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"`
+	// Probes are not allowed for ephemeral containers.
+	// +optional
+	ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"`
+	// Probes are not allowed for ephemeral containers.
+	// +optional
+	StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"`
+	// Lifecycle is not allowed for ephemeral containers.
+	// +optional
+	Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"`
+	// Optional: Path at which the file to which the container's termination message
+	// will be written is mounted into the container's filesystem.
+	// Message written is intended to be brief final status, such as an assertion failure message.
+	// Will be truncated by the node if greater than 4096 bytes. The total message length across
+	// all containers will be limited to 12kb.
+	// Defaults to /dev/termination-log.
+	// Cannot be updated.
+	// +optional
+	TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"`
+	// Indicate how the termination message should be populated. File will use the contents of
+	// terminationMessagePath to populate the container status message on both success and failure.
+	// FallbackToLogsOnError will use the last chunk of container log output if the termination
+	// message file is empty and the container exited with an error.
+	// The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+	// Defaults to File.
+	// Cannot be updated.
+	// +optional
+	TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"`
+	// Image pull policy.
+	// One of Always, Never, IfNotPresent.
+	// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+	// Cannot be updated.
+	// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+	// +optional
+	ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
+	// Optional: SecurityContext defines the security options the ephemeral container should be run with.
+	// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+	// +optional
+	SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
+
+	// Variables for interactive containers, these have very specialized use-cases (e.g. debugging)
+	// and shouldn't be used for general purpose containers.
+
+	// Whether this container should allocate a buffer for stdin in the container runtime. If this
+	// is not set, reads from stdin in the container will always result in EOF.
+	// Default is false.
+	// +optional
+	Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"`
+	// Whether the container runtime should close the stdin channel after it has been opened by
+	// a single attach. When stdin is true the stdin stream will remain open across multiple attach
+	// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+	// first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+	// at which time stdin is closed and remains closed until the container is restarted. If this
+	// flag is false, a container processes that reads from stdin will never receive an EOF.
+	// Default is false
+	// +optional
+	StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"`
+	// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+	// Default is false.
+	// +optional
+	TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"`
+}
+
+// EphemeralContainerCommon converts to Container. All fields must be kept in sync between
+// these two types.
+var _ = Container(EphemeralContainerCommon{})
+
+// An EphemeralContainer is a temporary container that you may add to an existing Pod for
+// user-initiated activities such as debugging. Ephemeral containers have no resource or
+// scheduling guarantees, and they will not be restarted when they exit or when a Pod is
+// removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the
+// Pod to exceed its resource allocation.
+//
+// To add an ephemeral container, use the ephemeralcontainers subresource of an existing
+// Pod. Ephemeral containers may not be removed or restarted.
+type EphemeralContainer struct {
+	// Ephemeral containers have all of the fields of Container, plus additional fields
+	// specific to ephemeral containers. Fields in common with Container are in the
+	// following inlined struct so than an EphemeralContainer may easily be converted
+	// to a Container.
+	EphemeralContainerCommon `json:",inline" protobuf:"bytes,1,req"`
+
+	// If set, the name of the container from PodSpec that this ephemeral container targets.
+	// The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.
+	// If not set then the ephemeral container uses the namespaces configured in the Pod spec.
+	//
+	// The container runtime must implement support for this feature. If the runtime does not
+	// support namespace targeting then the result of setting this field is undefined.
+	// +optional
+	TargetContainerName string `json:"targetContainerName,omitempty" protobuf:"bytes,2,opt,name=targetContainerName"`
+}
+
+// PodStatus represents information about the status of a pod. Status may trail the actual
+// state of a system, especially if the node that hosts the pod cannot contact the control
+// plane.
+type PodStatus struct {
+	// The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle.
+	// The conditions array, the reason and message fields, and the individual container status
+	// arrays contain more detail about the pod's status.
+	// There are five possible phase values:
+	//
+	// Pending: The pod has been accepted by the Kubernetes system, but one or more of the
+	// container images has not been created. This includes time before being scheduled as
+	// well as time spent downloading images over the network, which could take a while.
+	// Running: The pod has been bound to a node, and all of the containers have been created.
+	// At least one container is still running, or is in the process of starting or restarting.
+	// Succeeded: All containers in the pod have terminated in success, and will not be restarted.
+	// Failed: All containers in the pod have terminated, and at least one container has
+	// terminated in failure. The container either exited with non-zero status or was terminated
+	// by the system.
+	// Unknown: For some reason the state of the pod could not be obtained, typically due to an
+	// error in communicating with the host of the pod.
+	//
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase
+	// +optional
+	Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"`
+	// Current service state of pod.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=type
+	Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
+	// A human readable message indicating details about why the pod is in this condition.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
+	// A brief CamelCase message indicating details about why the pod is in this state.
+	// e.g. 'Evicted'
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+	// nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be
+	// scheduled right away as preemption victims receive their graceful termination periods.
+	// This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide
+	// to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to
+	// give the resources on this node to a higher priority pod that is created after preemption.
+	// As a result, this field may be different than PodSpec.nodeName when the pod is
+	// scheduled.
+	// +optional
+	NominatedNodeName string `json:"nominatedNodeName,omitempty" protobuf:"bytes,11,opt,name=nominatedNodeName"`
+
+	// hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet.
+	// A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will
+	// not be updated even if there is a node is assigned to pod
+	// +optional
+	HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
+
+	// hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must
+	// match the hostIP field. This list is empty if the pod has not started yet.
+	// A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will
+	// not be updated even if there is a node is assigned to this pod.
+	// +optional
+	// +patchStrategy=merge
+	// +patchMergeKey=ip
+	// +listType=atomic
+	HostIPs []HostIP `json:"hostIPs,omitempty" protobuf:"bytes,16,rep,name=hostIPs" patchStrategy:"merge" patchMergeKey:"ip"`
+
+	// podIP address allocated to the pod. Routable at least within the cluster.
+	// Empty if not yet allocated.
+	// +optional
+	PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"`
+
+	// podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must
+	// match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list
+	// is empty if no IPs have been allocated yet.
+	// +optional
+	// +patchStrategy=merge
+	// +patchMergeKey=ip
+	// +listType=map
+	// +listMapKey=ip
+	PodIPs []PodIP `json:"podIPs,omitempty" protobuf:"bytes,12,rep,name=podIPs" patchStrategy:"merge" patchMergeKey:"ip"`
+
+	// RFC 3339 date and time at which the object was acknowledged by the Kubelet.
+	// This is before the Kubelet pulled the container image(s) for the pod.
+	// +optional
+	StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"`
+
+	// Statuses of init containers in this pod. The most recent successful non-restartable
+	// init container will have ready = true, the most recently started container will have
+	// startTime set.
+	// Each init container in the pod should have at most one status in this list,
+	// and all statuses should be for containers in the pod.
+	// However this is not enforced.
+	// If a status for a non-existent container is present in the list, or the list has duplicate names,
+	// the behavior of various Kubernetes components is not defined and those statuses might be
+	// ignored.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status
+	// +listType=atomic
+	InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"`
+
+	// Statuses of containers in this pod.
+	// Each container in the pod should have at most one status in this list,
+	// and all statuses should be for containers in the pod.
+	// However this is not enforced.
+	// If a status for a non-existent container is present in the list, or the list has duplicate names,
+	// the behavior of various Kubernetes components is not defined and those statuses might be
+	// ignored.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
+	// +optional
+	// +listType=atomic
+	ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"`
+
+	// The Quality of Service (QOS) classification assigned to the pod based on resource requirements
+	// See PodQOSClass type for available QOS classes
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes
+	// +optional
+	QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"`
+
+	// Statuses for any ephemeral containers that have run in this pod.
+	// Each ephemeral container in the pod should have at most one status in this list,
+	// and all statuses should be for containers in the pod.
+	// However this is not enforced.
+	// If a status for a non-existent container is present in the list, or the list has duplicate names,
+	// the behavior of various Kubernetes components is not defined and those statuses might be
+	// ignored.
+	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
+	// +optional
+	// +listType=atomic
+	EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty" protobuf:"bytes,13,rep,name=ephemeralContainerStatuses"`
+
+	// Status of resources resize desired for pod's containers.
+	// It is empty if no resources resize is pending.
+	// Any changes to container resources will automatically set this to "Proposed"
+	// +featureGate=InPlacePodVerticalScaling
+	// +optional
+	Resize PodResizeStatus `json:"resize,omitempty" protobuf:"bytes,14,opt,name=resize,casttype=PodResizeStatus"`
+
+	// Status of resource claims.
+	// +patchMergeKey=name
+	// +patchStrategy=merge,retainKeys
+	// +listType=map
+	// +listMapKey=name
+	// +featureGate=DynamicResourceAllocation
+	// +optional
+	ResourceClaimStatuses []PodResourceClaimStatus `json:"resourceClaimStatuses,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,15,rep,name=resourceClaimStatuses"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
+type PodStatusResult struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+	// Most recently observed status of the pod.
+	// This data may not be up to date.
+	// Populated by the system.
+	// Read-only.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+	// +optional
+	Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
+}
+
+// +genclient
+// +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers
+// +genclient:method=UpdateResize,verb=update,subresource=resize
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// Pod is a collection of containers that can run on a host. This resource is created
+// by clients and scheduled onto hosts.
+type Pod struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Specification of the desired behavior of the pod.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+	// +optional
+	Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Most recently observed status of the pod.
+	// This data may not be up to date.
+	// Populated by the system.
+	// Read-only.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+	// +optional
+	Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// PodList is a list of Pods.
+type PodList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of pods.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md
+	Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// PodTemplateSpec describes the data a pod should have when created from a template
+type PodTemplateSpec struct {
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Specification of the desired behavior of the pod.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+	// +optional
+	Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// PodTemplate describes a template for creating copies of a predefined pod.
+type PodTemplate struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Template defines the pods that will be created from this pod template.
+	// https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+	// +optional
+	Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// PodTemplateList is a list of PodTemplates.
+type PodTemplateList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of pod templates
+	Items []PodTemplate `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// ReplicationControllerSpec is the specification of a replication controller.
+type ReplicationControllerSpec struct {
+	// Replicas is the number of desired replicas.
+	// This is a pointer to distinguish between explicit zero and unspecified.
+	// Defaults to 1.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
+	// +optional
+	Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
+
+	// Minimum number of seconds for which a newly created pod should be ready
+	// without any of its container crashing, for it to be considered available.
+	// Defaults to 0 (pod will be considered available as soon as it is ready)
+	// +optional
+	MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
+
+	// Selector is a label query over pods that should match the Replicas count.
+	// If Selector is empty, it is defaulted to the labels present on the Pod template.
+	// Label keys and values that must match in order to be controlled by this replication
+	// controller, if empty defaulted to labels on Pod template.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+	// +optional
+	// +mapType=atomic
+	Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
+
+	// TemplateRef is a reference to an object that describes the pod that will be created if
+	// insufficient replicas are detected.
+	// Reference to an object that describes the pod that will be created if insufficient replicas are detected.
+	// +optional
+	// TemplateRef *ObjectReference `json:"templateRef,omitempty"`
+
+	// Template is the object that describes the pod that will be created if
+	// insufficient replicas are detected. This takes precedence over a TemplateRef.
+	// The only allowed template.spec.restartPolicy value is "Always".
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
+	// +optional
+	Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
+}
+
+// ReplicationControllerStatus represents the current status of a replication
+// controller.
+type ReplicationControllerStatus struct {
+	// Replicas is the most recently observed number of replicas.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
+	Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
+
+	// The number of pods that have labels matching the labels of the pod template of the replication controller.
+	// +optional
+	FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
+
+	// The number of ready replicas for this replication controller.
+	// +optional
+	ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
+
+	// The number of available replicas (ready for at least minReadySeconds) for this replication controller.
+	// +optional
+	AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
+
+	// ObservedGeneration reflects the generation of the most recently observed replication controller.
+	// +optional
+	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
+
+	// Represents the latest available observations of a replication controller's current state.
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=type
+	Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
+}
+
+type ReplicationControllerConditionType string
+
+// These are valid conditions of a replication controller.
+const (
+	// ReplicationControllerReplicaFailure is added in a replication controller when one of its pods
+	// fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors,
+	// etc. or deleted due to kubelet being down or finalizers are failing.
+	ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure"
+)
+
+// ReplicationControllerCondition describes the state of a replication controller at a certain point.
+type ReplicationControllerCondition struct {
+	// Type of replication controller condition.
+	Type ReplicationControllerConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicationControllerConditionType"`
+	// Status of the condition, one of True, False, Unknown.
+	Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
+	// The last time the condition transitioned from one status to another.
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
+	// The reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
+	// A human readable message indicating details about the transition.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
+}
+
+// +genclient
+// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
+// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// ReplicationController represents the configuration of a replication controller.
+type ReplicationController struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// If the Labels of a ReplicationController are empty, they are defaulted to
+	// be the same as the Pod(s) that the replication controller manages.
+	// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec defines the specification of the desired behavior of the replication controller.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+	// +optional
+	Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is the most recently observed status of the replication controller.
+	// This data may be out of date by some window of time.
+	// Populated by the system.
+	// Read-only.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+	// +optional
+	Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// ReplicationControllerList is a collection of replication controllers.
+type ReplicationControllerList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of replication controllers.
+	// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
+	Items []ReplicationController `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// Session Affinity Type string
+// +enum
+type ServiceAffinity string
+
+const (
+	// ServiceAffinityClientIP is the Client IP based.
+	ServiceAffinityClientIP ServiceAffinity = "ClientIP"
+
+	// ServiceAffinityNone - no session affinity.
+	ServiceAffinityNone ServiceAffinity = "None"
+)
+
+const DefaultClientIPServiceAffinitySeconds int32 = 10800
+
+// SessionAffinityConfig represents the configurations of session affinity.
+type SessionAffinityConfig struct {
+	// clientIP contains the configurations of Client IP based session affinity.
+	// +optional
+	ClientIP *ClientIPConfig `json:"clientIP,omitempty" protobuf:"bytes,1,opt,name=clientIP"`
+}
+
+// ClientIPConfig represents the configurations of Client IP based session affinity.
+type ClientIPConfig struct {
+	// timeoutSeconds specifies the seconds of ClientIP type session sticky time.
+	// The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP".
+	// Default value is 10800(for 3 hours).
+	// +optional
+	TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"`
+}
+
+// Service Type string describes ingress methods for a service
+// +enum
+type ServiceType string
+
+const (
+	// ServiceTypeClusterIP means a service will only be accessible inside the
+	// cluster, via the cluster IP.
+	ServiceTypeClusterIP ServiceType = "ClusterIP"
+
+	// ServiceTypeNodePort means a service will be exposed on one port of
+	// every node, in addition to 'ClusterIP' type.
+	ServiceTypeNodePort ServiceType = "NodePort"
+
+	// ServiceTypeLoadBalancer means a service will be exposed via an
+	// external load balancer (if the cloud provider supports it), in addition
+	// to 'NodePort' type.
+	ServiceTypeLoadBalancer ServiceType = "LoadBalancer"
+
+	// ServiceTypeExternalName means a service consists of only a reference to
+	// an external name that kubedns or equivalent will return as a CNAME
+	// record, with no exposing or proxying of any pods involved.
+	ServiceTypeExternalName ServiceType = "ExternalName"
+)
+
+// ServiceInternalTrafficPolicy describes how nodes distribute service traffic they
+// receive on the ClusterIP.
+// +enum
+type ServiceInternalTrafficPolicy string
+
+const (
+	// ServiceInternalTrafficPolicyCluster routes traffic to all endpoints.
+	ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicy = "Cluster"
+
+	// ServiceInternalTrafficPolicyLocal routes traffic only to endpoints on the same
+	// node as the client pod (dropping the traffic if there are no local endpoints).
+	ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicy = "Local"
+)
+
+// for backwards compat
+// +enum
+type ServiceInternalTrafficPolicyType = ServiceInternalTrafficPolicy
+
+// ServiceExternalTrafficPolicy describes how nodes distribute service traffic they
+// receive on one of the Service's "externally-facing" addresses (NodePorts, ExternalIPs,
+// and LoadBalancer IPs.
+// +enum
+type ServiceExternalTrafficPolicy string
+
+const (
+	// ServiceExternalTrafficPolicyCluster routes traffic to all endpoints.
+	ServiceExternalTrafficPolicyCluster ServiceExternalTrafficPolicy = "Cluster"
+
+	// ServiceExternalTrafficPolicyLocal preserves the source IP of the traffic by
+	// routing only to endpoints on the same node as the traffic was received on
+	// (dropping the traffic if there are no local endpoints).
+	ServiceExternalTrafficPolicyLocal ServiceExternalTrafficPolicy = "Local"
+)
+
+// for backwards compat
+// +enum
+type ServiceExternalTrafficPolicyType = ServiceExternalTrafficPolicy
+
+const (
+	ServiceExternalTrafficPolicyTypeLocal   = ServiceExternalTrafficPolicyLocal
+	ServiceExternalTrafficPolicyTypeCluster = ServiceExternalTrafficPolicyCluster
+)
+
+// These are valid values for the TrafficDistribution field of a Service.
+const (
+	// Indicates a preference for routing traffic to endpoints that are
+	// topologically proximate to the client. The interpretation of "topologically
+	// proximate" may vary across implementations and could encompass endpoints
+	// within the same node, rack, zone, or even region. Setting this value gives
+	// implementations permission to make different tradeoffs, e.g. optimizing for
+	// proximity rather than equal distribution of load. Users should not set this
+	// value if such tradeoffs are not acceptable.
+	ServiceTrafficDistributionPreferClose = "PreferClose"
+)
+
+// These are the valid conditions of a service.
+const (
+	// LoadBalancerPortsError represents the condition of the requested ports
+	// on the cloud load balancer instance.
+	LoadBalancerPortsError = "LoadBalancerPortsError"
+	// LoadBalancerPortsErrorReason reason in ServiceStatus condition LoadBalancerPortsError
+	// means the LoadBalancer was not able to be configured correctly.
+	LoadBalancerPortsErrorReason = "LoadBalancerMixedProtocolNotSupported"
+)
+
+// ServiceStatus represents the current status of a service.
+type ServiceStatus struct {
+	// LoadBalancer contains the current status of the load-balancer,
+	// if one is present.
+	// +optional
+	LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"`
+	// Current service state
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=type
+	Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
+}
+
+// LoadBalancerStatus represents the status of a load-balancer.
+type LoadBalancerStatus struct {
+	// Ingress is a list containing ingress points for the load-balancer.
+	// Traffic intended for the service should be sent to these ingress points.
+	// +optional
+	// +listType=atomic
+	Ingress []LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"`
+}
+
+// LoadBalancerIngress represents the status of a load-balancer ingress point:
+// traffic intended for the service should be sent to an ingress point.
+type LoadBalancerIngress struct {
+	// IP is set for load-balancer ingress points that are IP based
+	// (typically GCE or OpenStack load-balancers)
+	// +optional
+	IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
+
+	// Hostname is set for load-balancer ingress points that are DNS based
+	// (typically AWS load-balancers)
+	// +optional
+	Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"`
+
+	// IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified.
+	// Setting this to "VIP" indicates that traffic is delivered to the node with
+	// the destination set to the load-balancer's IP and port.
+	// Setting this to "Proxy" indicates that traffic is delivered to the node or pod with
+	// the destination set to the node's IP and node port or the pod's IP and port.
+	// Service implementations may use this information to adjust traffic routing.
+	// +optional
+	IPMode *LoadBalancerIPMode `json:"ipMode,omitempty" protobuf:"bytes,3,opt,name=ipMode"`
+
+	// Ports is a list of records of service ports
+	// If used, every port defined in the service should have an entry in it
+	// +listType=atomic
+	// +optional
+	Ports []PortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"`
+}
+
+// IPFamily represents the IP Family (IPv4 or IPv6). This type is used
+// to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies).
+// +enum
+type IPFamily string
+
+const (
+	// IPv4Protocol indicates that this IP is IPv4 protocol
+	IPv4Protocol IPFamily = "IPv4"
+	// IPv6Protocol indicates that this IP is IPv6 protocol
+	IPv6Protocol IPFamily = "IPv6"
+	// IPFamilyUnknown indicates that this IP is unknown protocol
+	IPFamilyUnknown IPFamily = ""
+)
+
+// IPFamilyPolicy represents the dual-stack-ness requested or required by a Service
+// +enum
+type IPFamilyPolicy string
+
+const (
+	// IPFamilyPolicySingleStack indicates that this service is required to have a single IPFamily.
+	// The IPFamily assigned is based on the default IPFamily used by the cluster
+	// or as identified by service.spec.ipFamilies field
+	IPFamilyPolicySingleStack IPFamilyPolicy = "SingleStack"
+	// IPFamilyPolicyPreferDualStack indicates that this service prefers dual-stack when
+	// the cluster is configured for dual-stack. If the cluster is not configured
+	// for dual-stack the service will be assigned a single IPFamily. If the IPFamily is not
+	// set in service.spec.ipFamilies then the service will be assigned the default IPFamily
+	// configured on the cluster
+	IPFamilyPolicyPreferDualStack IPFamilyPolicy = "PreferDualStack"
+	// IPFamilyPolicyRequireDualStack indicates that this service requires dual-stack. Using
+	// IPFamilyPolicyRequireDualStack on a single stack cluster will result in validation errors. The
+	// IPFamilies (and their order) assigned  to this service is based on service.spec.ipFamilies. If
+	// service.spec.ipFamilies was not provided then it will be assigned according to how they are
+	// configured on the cluster. If service.spec.ipFamilies has only one entry then the alternative
+	// IPFamily will be added by apiserver
+	IPFamilyPolicyRequireDualStack IPFamilyPolicy = "RequireDualStack"
+)
+
+// for backwards compat
+// +enum
+type IPFamilyPolicyType = IPFamilyPolicy
+
+// ServiceSpec describes the attributes that a user creates on a service.
+type ServiceSpec struct {
+	// The list of ports that are exposed by this service.
+	// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+	// +patchMergeKey=port
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=port
+	// +listMapKey=protocol
+	Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"`
+
+	// Route service traffic to pods with label keys and values matching this
+	// selector. If empty or not present, the service is assumed to have an
+	// external process managing its endpoints, which Kubernetes will not
+	// modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
+	// Ignored if type is ExternalName.
+	// More info: https://kubernetes.io/docs/concepts/services-networking/service/
+	// +optional
+	// +mapType=atomic
+	Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
+
+	// clusterIP is the IP address of the service and is usually assigned
+	// randomly. If an address is specified manually, is in-range (as per
+	// system configuration), and is not in use, it will be allocated to the
+	// service; otherwise creation of the service will fail. This field may not
+	// be changed through updates unless the type field is also being changed
+	// to ExternalName (which requires this field to be blank) or the type
+	// field is being changed from ExternalName (in which case this field may
+	// optionally be specified, as describe above).  Valid values are "None",
+	// empty string (""), or a valid IP address. Setting this to "None" makes a
+	// "headless service" (no virtual IP), which is useful when direct endpoint
+	// connections are preferred and proxying is not required.  Only applies to
+	// types ClusterIP, NodePort, and LoadBalancer. If this field is specified
+	// when creating a Service of type ExternalName, creation will fail. This
+	// field will be wiped when updating a Service to type ExternalName.
+	// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+	// +optional
+	ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"`
+
+	// ClusterIPs is a list of IP addresses assigned to this service, and are
+	// usually assigned randomly.  If an address is specified manually, is
+	// in-range (as per system configuration), and is not in use, it will be
+	// allocated to the service; otherwise creation of the service will fail.
+	// This field may not be changed through updates unless the type field is
+	// also being changed to ExternalName (which requires this field to be
+	// empty) or the type field is being changed from ExternalName (in which
+	// case this field may optionally be specified, as describe above).  Valid
+	// values are "None", empty string (""), or a valid IP address.  Setting
+	// this to "None" makes a "headless service" (no virtual IP), which is
+	// useful when direct endpoint connections are preferred and proxying is
+	// not required.  Only applies to types ClusterIP, NodePort, and
+	// LoadBalancer. If this field is specified when creating a Service of type
+	// ExternalName, creation will fail. This field will be wiped when updating
+	// a Service to type ExternalName.  If this field is not specified, it will
+	// be initialized from the clusterIP field.  If this field is specified,
+	// clients must ensure that clusterIPs[0] and clusterIP have the same
+	// value.
+	//
+	// This field may hold a maximum of two entries (dual-stack IPs, in either order).
+	// These IPs must correspond to the values of the ipFamilies field. Both
+	// clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.
+	// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+	// +listType=atomic
+	// +optional
+	ClusterIPs []string `json:"clusterIPs,omitempty" protobuf:"bytes,18,opt,name=clusterIPs"`
+
+	// type determines how the Service is exposed. Defaults to ClusterIP. Valid
+	// options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
+	// "ClusterIP" allocates a cluster-internal IP address for load-balancing
+	// to endpoints. Endpoints are determined by the selector or if that is not
+	// specified, by manual construction of an Endpoints object or
+	// EndpointSlice objects. If clusterIP is "None", no virtual IP is
+	// allocated and the endpoints are published as a set of endpoints rather
+	// than a virtual IP.
+	// "NodePort" builds on ClusterIP and allocates a port on every node which
+	// routes to the same endpoints as the clusterIP.
+	// "LoadBalancer" builds on NodePort and creates an external load-balancer
+	// (if supported in the current cloud) which routes to the same endpoints
+	// as the clusterIP.
+	// "ExternalName" aliases this service to the specified externalName.
+	// Several other fields do not apply to ExternalName services.
+	// More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
+	// +optional
+	Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"`
+
+	// externalIPs is a list of IP addresses for which nodes in the cluster
+	// will also accept traffic for this service.  These IPs are not managed by
+	// Kubernetes.  The user is responsible for ensuring that traffic arrives
+	// at a node with this IP.  A common example is external load-balancers
+	// that are not part of the Kubernetes system.
+	// +optional
+	// +listType=atomic
+	ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"`
+
+	// Supports "ClientIP" and "None". Used to maintain session affinity.
+	// Enable client IP based session affinity.
+	// Must be ClientIP or None.
+	// Defaults to None.
+	// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
+	// +optional
+	SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" protobuf:"bytes,7,opt,name=sessionAffinity,casttype=ServiceAffinity"`
+
+	// Only applies to Service Type: LoadBalancer.
+	// This feature depends on whether the underlying cloud-provider supports specifying
+	// the loadBalancerIP when a load balancer is created.
+	// This field will be ignored if the cloud-provider does not support the feature.
+	// Deprecated: This field was under-specified and its meaning varies across implementations.
+	// Using it is non-portable and it may not support dual-stack.
+	// Users are encouraged to use implementation-specific annotations when available.
+	// +optional
+	LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"`
+
+	// If specified and supported by the platform, this will restrict traffic through the cloud-provider
+	// load-balancer will be restricted to the specified client IPs. This field will be ignored if the
+	// cloud-provider does not support the feature."
+	// More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/
+	// +optional
+	// +listType=atomic
+	LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"`
+
+	// externalName is the external reference that discovery mechanisms will
+	// return as an alias for this service (e.g. a DNS CNAME record). No
+	// proxying will be involved.  Must be a lowercase RFC-1123 hostname
+	// (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName".
+	// +optional
+	ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"`
+
+	// externalTrafficPolicy describes how nodes distribute service traffic they
+	// receive on one of the Service's "externally-facing" addresses (NodePorts,
+	// ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure
+	// the service in a way that assumes that external load balancers will take care
+	// of balancing the service traffic between nodes, and so each node will deliver
+	// traffic only to the node-local endpoints of the service, without masquerading
+	// the client source IP. (Traffic mistakenly sent to a node with no endpoints will
+	// be dropped.) The default value, "Cluster", uses the standard behavior of
+	// routing to all endpoints evenly (possibly modified by topology and other
+	// features). Note that traffic sent to an External IP or LoadBalancer IP from
+	// within the cluster will always get "Cluster" semantics, but clients sending to
+	// a NodePort from within the cluster may need to take traffic policy into account
+	// when picking a node.
+	// +optional
+	ExternalTrafficPolicy ServiceExternalTrafficPolicy `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"`
+
+	// healthCheckNodePort specifies the healthcheck nodePort for the service.
+	// This only applies when type is set to LoadBalancer and
+	// externalTrafficPolicy is set to Local. If a value is specified, is
+	// in-range, and is not in use, it will be used.  If not specified, a value
+	// will be automatically allocated.  External systems (e.g. load-balancers)
+	// can use this port to determine if a given node holds endpoints for this
+	// service or not.  If this field is specified when creating a Service
+	// which does not need it, creation will fail. This field will be wiped
+	// when updating a Service to no longer need it (e.g. changing type).
+	// This field cannot be updated once set.
+	// +optional
+	HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"`
+
+	// publishNotReadyAddresses indicates that any agent which deals with endpoints for this
+	// Service should disregard any indications of ready/not-ready.
+	// The primary use case for setting this field is for a StatefulSet's Headless Service to
+	// propagate SRV DNS records for its Pods for the purpose of peer discovery.
+	// The Kubernetes controllers that generate Endpoints and EndpointSlice resources for
+	// Services interpret this to mean that all endpoints are considered "ready" even if the
+	// Pods themselves are not. Agents which consume only Kubernetes generated endpoints
+	// through the Endpoints or EndpointSlice resources can safely assume this behavior.
+	// +optional
+	PublishNotReadyAddresses bool `json:"publishNotReadyAddresses,omitempty" protobuf:"varint,13,opt,name=publishNotReadyAddresses"`
+
+	// sessionAffinityConfig contains the configurations of session affinity.
+	// +optional
+	SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"`
+
+	// TopologyKeys is tombstoned to show why 16 is reserved protobuf tag.
+	// TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"`
+
+	// IPFamily is tombstoned to show why 15 is a reserved protobuf tag.
+	// IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"`
+
+	// IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this
+	// service. This field is usually assigned automatically based on cluster
+	// configuration and the ipFamilyPolicy field. If this field is specified
+	// manually, the requested family is available in the cluster,
+	// and ipFamilyPolicy allows it, it will be used; otherwise creation of
+	// the service will fail. This field is conditionally mutable: it allows
+	// for adding or removing a secondary IP family, but it does not allow
+	// changing the primary IP family of the Service. Valid values are "IPv4"
+	// and "IPv6".  This field only applies to Services of types ClusterIP,
+	// NodePort, and LoadBalancer, and does apply to "headless" services.
+	// This field will be wiped when updating a Service to type ExternalName.
+	//
+	// This field may hold a maximum of two entries (dual-stack families, in
+	// either order).  These families must correspond to the values of the
+	// clusterIPs field, if specified. Both clusterIPs and ipFamilies are
+	// governed by the ipFamilyPolicy field.
+	// +listType=atomic
+	// +optional
+	IPFamilies []IPFamily `json:"ipFamilies,omitempty" protobuf:"bytes,19,opt,name=ipFamilies,casttype=IPFamily"`
+
+	// IPFamilyPolicy represents the dual-stack-ness requested or required by
+	// this Service. If there is no value provided, then this field will be set
+	// to SingleStack. Services can be "SingleStack" (a single IP family),
+	// "PreferDualStack" (two IP families on dual-stack configured clusters or
+	// a single IP family on single-stack clusters), or "RequireDualStack"
+	// (two IP families on dual-stack configured clusters, otherwise fail). The
+	// ipFamilies and clusterIPs fields depend on the value of this field. This
+	// field will be wiped when updating a service to type ExternalName.
+	// +optional
+	IPFamilyPolicy *IPFamilyPolicy `json:"ipFamilyPolicy,omitempty" protobuf:"bytes,17,opt,name=ipFamilyPolicy,casttype=IPFamilyPolicy"`
+
+	// allocateLoadBalancerNodePorts defines if NodePorts will be automatically
+	// allocated for services with type LoadBalancer.  Default is "true". It
+	// may be set to "false" if the cluster load-balancer does not rely on
+	// NodePorts.  If the caller requests specific NodePorts (by specifying a
+	// value), those requests will be respected, regardless of this field.
+	// This field may only be set for services with type LoadBalancer and will
+	// be cleared if the type is changed to any other type.
+	// +optional
+	AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty" protobuf:"bytes,20,opt,name=allocateLoadBalancerNodePorts"`
+
+	// loadBalancerClass is the class of the load balancer implementation this Service belongs to.
+	// If specified, the value of this field must be a label-style identifier, with an optional prefix,
+	// e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users.
+	// This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load
+	// balancer implementation is used, today this is typically done through the cloud provider integration,
+	// but should apply for any default implementation. If set, it is assumed that a load balancer
+	// implementation is watching for Services with a matching class. Any default load balancer
+	// implementation (e.g. cloud providers) should ignore Services that set this field.
+	// This field can only be set when creating or updating a Service to type 'LoadBalancer'.
+	// Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.
+	// +optional
+	LoadBalancerClass *string `json:"loadBalancerClass,omitempty" protobuf:"bytes,21,opt,name=loadBalancerClass"`
+
+	// InternalTrafficPolicy describes how nodes distribute service traffic they
+	// receive on the ClusterIP. If set to "Local", the proxy will assume that pods
+	// only want to talk to endpoints of the service on the same node as the pod,
+	// dropping the traffic if there are no local endpoints. The default value,
+	// "Cluster", uses the standard behavior of routing to all endpoints evenly
+	// (possibly modified by topology and other features).
+	// +optional
+	InternalTrafficPolicy *ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty" protobuf:"bytes,22,opt,name=internalTrafficPolicy"`
+
+	// TrafficDistribution offers a way to express preferences for how traffic is
+	// distributed to Service endpoints. Implementations can use this field as a
+	// hint, but are not required to guarantee strict adherence. If the field is
+	// not set, the implementation will apply its default routing strategy. If set
+	// to "PreferClose", implementations should prioritize endpoints that are
+	// topologically close (e.g., same zone).
+	// This is a beta field and requires enabling ServiceTrafficDistribution feature.
+	// +featureGate=ServiceTrafficDistribution
+	// +optional
+	TrafficDistribution *string `json:"trafficDistribution,omitempty" protobuf:"bytes,23,opt,name=trafficDistribution"`
+}
+
+// ServicePort contains information on service's port.
+type ServicePort struct {
+	// The name of this port within the service. This must be a DNS_LABEL.
+	// All ports within a ServiceSpec must have unique names. When considering
+	// the endpoints for a Service, this must match the 'name' field in the
+	// EndpointPort.
+	// Optional if only one ServicePort is defined on this service.
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+
+	// The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
+	// Default is TCP.
+	// +default="TCP"
+	// +optional
+	Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"`
+
+	// The application protocol for this port.
+	// This is used as a hint for implementations to offer richer behavior for protocols that they understand.
+	// This field follows standard Kubernetes label syntax.
+	// Valid values are either:
+	//
+	// * Un-prefixed protocol names - reserved for IANA standard service names (as per
+	// RFC-6335 and https://www.iana.org/assignments/service-names).
+	//
+	// * Kubernetes-defined prefixed names:
+	//   * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
+	//   * 'kubernetes.io/ws'  - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
+	//   * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
+	//
+	// * Other protocols should use implementation-defined prefixed names such as
+	// mycompany.com/my-custom-protocol.
+	// +optional
+	AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,6,opt,name=appProtocol"`
+
+	// The port that will be exposed by this service.
+	Port int32 `json:"port" protobuf:"varint,3,opt,name=port"`
+
+	// Number or name of the port to access on the pods targeted by the service.
+	// Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
+	// If this is a string, it will be looked up as a named port in the
+	// target Pod's container ports. If this is not specified, the value
+	// of the 'port' field is used (an identity map).
+	// This field is ignored for services with clusterIP=None, and should be
+	// omitted or set equal to the 'port' field.
+	// More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
+	// +optional
+	TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"`
+
+	// The port on each node on which this service is exposed when type is
+	// NodePort or LoadBalancer.  Usually assigned by the system. If a value is
+	// specified, in-range, and not in use it will be used, otherwise the
+	// operation will fail.  If not specified, a port will be allocated if this
+	// Service requires one.  If this field is specified when creating a
+	// Service which does not need it, creation will fail. This field will be
+	// wiped when updating a Service to no longer need it (e.g. changing type
+	// from NodePort to ClusterIP).
+	// More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+	// +optional
+	NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"`
+}
+
+// +genclient
+// +genclient:skipVerbs=deleteCollection
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// Service is a named abstraction of software service (for example, mysql) consisting of local port
+// (for example 3306) that the proxy listens on, and the selector that determines which pods
+// will answer requests sent through the proxy.
+type Service struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec defines the behavior of a service.
+	// https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+	// +optional
+	Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Most recently observed status of the service.
+	// Populated by the system.
+	// Read-only.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+	// +optional
+	Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+const (
+	// ClusterIPNone - do not assign a cluster IP
+	// no proxying required and no environment variables should be created for pods
+	ClusterIPNone = "None"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// ServiceList holds a list of services.
+type ServiceList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of services
+	Items []Service `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +genclient:method=CreateToken,verb=create,subresource=token,input=k8s.io/api/authentication/v1.TokenRequest,result=k8s.io/api/authentication/v1.TokenRequest
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// ServiceAccount binds together:
+// * a name, understood by users, and perhaps by peripheral systems, for an identity
+// * a principal that can be authenticated and authorized
+// * a set of secrets
+type ServiceAccount struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use.
+	// Pods are only limited to this list if this service account has a "kubernetes.io/enforce-mountable-secrets" annotation set to "true".
+	// The "kubernetes.io/enforce-mountable-secrets" annotation is deprecated since v1.32.
+	// Prefer separate namespaces to isolate access to mounted secrets.
+	// This field should not be used to find auto-generated service account token secrets for use outside of pods.
+	// Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created.
+	// More info: https://kubernetes.io/docs/concepts/configuration/secret
+	// +optional
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=name
+	Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"`
+
+	// ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images
+	// in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets
+	// can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.
+	// More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+	// +optional
+	// +listType=atomic
+	ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"`
+
+	// AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.
+	// Can be overridden at the pod level.
+	// +optional
+	AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,4,opt,name=automountServiceAccountToken"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// ServiceAccountList is a list of ServiceAccount objects
+type ServiceAccountList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of ServiceAccounts.
+	// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+	Items []ServiceAccount `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// Endpoints is a collection of endpoints that implement the actual service. Example:
+//
+//	 Name: "mysvc",
+//	 Subsets: [
+//	   {
+//	     Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
+//	     Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
+//	   },
+//	   {
+//	     Addresses: [{"ip": "10.10.3.3"}],
+//	     Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
+//	   },
+//	]
+type Endpoints struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// The set of all endpoints is the union of all subsets. Addresses are placed into
+	// subsets according to the IPs they share. A single address with multiple ports,
+	// some of which are ready and some of which are not (because they come from
+	// different containers) will result in the address being displayed in different
+	// subsets for the different ports. No address will appear in both Addresses and
+	// NotReadyAddresses in the same subset.
+	// Sets of addresses and ports that comprise a service.
+	// +optional
+	// +listType=atomic
+	Subsets []EndpointSubset `json:"subsets,omitempty" protobuf:"bytes,2,rep,name=subsets"`
+}
+
+// EndpointSubset is a group of addresses with a common set of ports. The
+// expanded set of endpoints is the Cartesian product of Addresses x Ports.
+// For example, given:
+//
+//	{
+//	  Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
+//	  Ports:     [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
+//	}
+//
+// The resulting set of endpoints can be viewed as:
+//
+//	a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
+//	b: [ 10.10.1.1:309, 10.10.2.2:309 ]
+type EndpointSubset struct {
+	// IP addresses which offer the related ports that are marked as ready. These endpoints
+	// should be considered safe for load balancers and clients to utilize.
+	// +optional
+	// +listType=atomic
+	Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"`
+	// IP addresses which offer the related ports but are not currently marked as ready
+	// because they have not yet finished starting, have recently failed a readiness check,
+	// or have recently failed a liveness check.
+	// +optional
+	// +listType=atomic
+	NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty" protobuf:"bytes,2,rep,name=notReadyAddresses"`
+	// Port numbers available on the related IP addresses.
+	// +optional
+	// +listType=atomic
+	Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"`
+}
+
+// EndpointAddress is a tuple that describes single IP address.
+// +structType=atomic
+type EndpointAddress struct {
+	// The IP of this endpoint.
+	// May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10),
+	// or link-local multicast (224.0.0.0/24 or ff02::/16).
+	IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"`
+	// The Hostname of this endpoint
+	// +optional
+	Hostname string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"`
+	// Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.
+	// +optional
+	NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,4,opt,name=nodeName"`
+	// Reference to object providing the endpoint.
+	// +optional
+	TargetRef *ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,2,opt,name=targetRef"`
+}
+
+// EndpointPort is a tuple that describes a single port.
+// +structType=atomic
+type EndpointPort struct {
+	// The name of this port.  This must match the 'name' field in the
+	// corresponding ServicePort.
+	// Must be a DNS_LABEL.
+	// Optional only if one port is defined.
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+
+	// The port number of the endpoint.
+	Port int32 `json:"port" protobuf:"varint,2,opt,name=port"`
+
+	// The IP protocol for this port.
+	// Must be UDP, TCP, or SCTP.
+	// Default is TCP.
+	// +optional
+	Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"`
+
+	// The application protocol for this port.
+	// This is used as a hint for implementations to offer richer behavior for protocols that they understand.
+	// This field follows standard Kubernetes label syntax.
+	// Valid values are either:
+	//
+	// * Un-prefixed protocol names - reserved for IANA standard service names (as per
+	// RFC-6335 and https://www.iana.org/assignments/service-names).
+	//
+	// * Kubernetes-defined prefixed names:
+	//   * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-
+	//   * 'kubernetes.io/ws'  - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455
+	//   * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455
+	//
+	// * Other protocols should use implementation-defined prefixed names such as
+	// mycompany.com/my-custom-protocol.
+	// +optional
+	AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,opt,name=appProtocol"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// EndpointsList is a list of endpoints.
+type EndpointsList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of endpoints.
+	Items []Endpoints `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// NodeSpec describes the attributes that a node is created with.
+type NodeSpec struct {
+	// PodCIDR represents the pod IP range assigned to the node.
+	// +optional
+	PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"`
+
+	// podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this
+	// field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for
+	// each of IPv4 and IPv6.
+	// +optional
+	// +patchStrategy=merge
+	// +listType=set
+	PodCIDRs []string `json:"podCIDRs,omitempty" protobuf:"bytes,7,opt,name=podCIDRs" patchStrategy:"merge"`
+
+	// ID of the node assigned by the cloud provider in the format: ://
+	// +optional
+	ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"`
+	// Unschedulable controls node schedulability of new pods. By default, node is schedulable.
+	// More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration
+	// +optional
+	Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"`
+	// If specified, the node's taints.
+	// +optional
+	// +listType=atomic
+	Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"`
+
+	// Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed.
+	// +optional
+	ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"`
+
+	// Deprecated. Not all kubelets will set this field. Remove field after 1.13.
+	// see: https://issues.k8s.io/61966
+	// +optional
+	DoNotUseExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"`
+}
+
+// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.
+// This API is deprecated since 1.22
+type NodeConfigSource struct {
+	// For historical context, regarding the below kind, apiVersion, and configMapRef deprecation tags:
+	// 1. kind/apiVersion were used by the kubelet to persist this struct to disk (they had no protobuf tags)
+	// 2. configMapRef and proto tag 1 were used by the API to refer to a configmap,
+	//    but used a generic ObjectReference type that didn't really have the fields we needed
+	// All uses/persistence of the NodeConfigSource struct prior to 1.11 were gated by alpha feature flags,
+	// so there was no persisted data for these fields that needed to be migrated/handled.
+
+	// +k8s:deprecated=kind
+	// +k8s:deprecated=apiVersion
+	// +k8s:deprecated=configMapRef,protobuf=1
+
+	// ConfigMap is a reference to a Node's ConfigMap
+	ConfigMap *ConfigMapNodeConfigSource `json:"configMap,omitempty" protobuf:"bytes,2,opt,name=configMap"`
+}
+
+// ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.
+// This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration
+type ConfigMapNodeConfigSource struct {
+	// Namespace is the metadata.namespace of the referenced ConfigMap.
+	// This field is required in all cases.
+	Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
+
+	// Name is the metadata.name of the referenced ConfigMap.
+	// This field is required in all cases.
+	Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
+
+	// UID is the metadata.UID of the referenced ConfigMap.
+	// This field is forbidden in Node.Spec, and required in Node.Status.
+	// +optional
+	UID types.UID `json:"uid,omitempty" protobuf:"bytes,3,opt,name=uid"`
+
+	// ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap.
+	// This field is forbidden in Node.Spec, and required in Node.Status.
+	// +optional
+	ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"`
+
+	// KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure
+	// This field is required in all cases.
+	KubeletConfigKey string `json:"kubeletConfigKey" protobuf:"bytes,5,opt,name=kubeletConfigKey"`
+}
+
+// DaemonEndpoint contains information about a single Daemon endpoint.
+type DaemonEndpoint struct {
+	/*
+		The port tag was not properly in quotes in earlier releases, so it must be
+		uppercased for backwards compat (since it was falling back to var name of
+		'Port').
+	*/
+
+	// Port number of the given endpoint.
+	Port int32 `json:"Port" protobuf:"varint,1,opt,name=Port"`
+}
+
+// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
+type NodeDaemonEndpoints struct {
+	// Endpoint on which Kubelet is listening.
+	// +optional
+	KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"`
+}
+
+// NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.
+type NodeRuntimeHandlerFeatures struct {
+	// RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.
+	// +featureGate=RecursiveReadOnlyMounts
+	// +optional
+	RecursiveReadOnlyMounts *bool `json:"recursiveReadOnlyMounts,omitempty" protobuf:"varint,1,opt,name=recursiveReadOnlyMounts"`
+	// UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes.
+	// +featureGate=UserNamespacesSupport
+	// +optional
+	UserNamespaces *bool `json:"userNamespaces,omitempty" protobuf:"varint,2,opt,name=userNamespaces"`
+}
+
+// NodeRuntimeHandler is a set of runtime handler information.
+type NodeRuntimeHandler struct {
+	// Runtime handler name.
+	// Empty for the default runtime handler.
+	// +optional
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// Supported features.
+	// +optional
+	Features *NodeRuntimeHandlerFeatures `json:"features,omitempty" protobuf:"bytes,2,opt,name=features"`
+}
+
+// NodeFeatures describes the set of features implemented by the CRI implementation.
+// The features contained in the NodeFeatures should depend only on the cri implementation
+// independent of runtime handlers.
+type NodeFeatures struct {
+	// SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.
+	// +optional
+	SupplementalGroupsPolicy *bool `json:"supplementalGroupsPolicy,omitempty" protobuf:"varint,1,opt,name=supplementalGroupsPolicy"`
+}
+
+// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
+type NodeSystemInfo struct {
+	// MachineID reported by the node. For unique machine identification
+	// in the cluster this field is preferred. Learn more from man(5)
+	// machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
+	MachineID string `json:"machineID" protobuf:"bytes,1,opt,name=machineID"`
+	// SystemUUID reported by the node. For unique machine identification
+	// MachineID is preferred. This field is specific to Red Hat hosts
+	// https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid
+	SystemUUID string `json:"systemUUID" protobuf:"bytes,2,opt,name=systemUUID"`
+	// Boot ID reported by the node.
+	BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"`
+	// Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
+	KernelVersion string `json:"kernelVersion" protobuf:"bytes,4,opt,name=kernelVersion"`
+	// OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
+	OSImage string `json:"osImage" protobuf:"bytes,5,opt,name=osImage"`
+	// ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2).
+	ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"`
+	// Kubelet Version reported by the node.
+	KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"`
+	// Deprecated: KubeProxy Version reported by the node.
+	KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"`
+	// The Operating System reported by the node
+	OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"`
+	// The Architecture reported by the node
+	Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"`
+}
+
+// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.
+type NodeConfigStatus struct {
+	// Assigned reports the checkpointed config the node will try to use.
+	// When Node.Spec.ConfigSource is updated, the node checkpoints the associated
+	// config payload to local disk, along with a record indicating intended
+	// config. The node refers to this record to choose its config checkpoint, and
+	// reports this record in Assigned. Assigned only updates in the status after
+	// the record has been checkpointed to disk. When the Kubelet is restarted,
+	// it tries to make the Assigned config the Active config by loading and
+	// validating the checkpointed payload identified by Assigned.
+	// +optional
+	Assigned *NodeConfigSource `json:"assigned,omitempty" protobuf:"bytes,1,opt,name=assigned"`
+	// Active reports the checkpointed config the node is actively using.
+	// Active will represent either the current version of the Assigned config,
+	// or the current LastKnownGood config, depending on whether attempting to use the
+	// Assigned config results in an error.
+	// +optional
+	Active *NodeConfigSource `json:"active,omitempty" protobuf:"bytes,2,opt,name=active"`
+	// LastKnownGood reports the checkpointed config the node will fall back to
+	// when it encounters an error attempting to use the Assigned config.
+	// The Assigned config becomes the LastKnownGood config when the node determines
+	// that the Assigned config is stable and correct.
+	// This is currently implemented as a 10-minute soak period starting when the local
+	// record of Assigned config is updated. If the Assigned config is Active at the end
+	// of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is
+	// reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil,
+	// because the local default config is always assumed good.
+	// You should not make assumptions about the node's method of determining config stability
+	// and correctness, as this may change or become configurable in the future.
+	// +optional
+	LastKnownGood *NodeConfigSource `json:"lastKnownGood,omitempty" protobuf:"bytes,3,opt,name=lastKnownGood"`
+	// Error describes any problems reconciling the Spec.ConfigSource to the Active config.
+	// Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned
+	// record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting
+	// to load or validate the Assigned config, etc.
+	// Errors may occur at different points while syncing config. Earlier errors (e.g. download or
+	// checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across
+	// Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in
+	// a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error
+	// by fixing the config assigned in Spec.ConfigSource.
+	// You can find additional information for debugging by searching the error message in the Kubelet log.
+	// Error is a human-readable description of the error state; machines can check whether or not Error
+	// is empty, but should not rely on the stability of the Error text across Kubelet versions.
+	// +optional
+	Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"`
+}
+
+// NodeStatus is information about the current status of a node.
+type NodeStatus struct {
+	// Capacity represents the total resources of a node.
+	// More info: https://kubernetes.io/docs/reference/node/node-status/#capacity
+	// +optional
+	Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
+	// Allocatable represents the resources of a node that are available for scheduling.
+	// Defaults to Capacity.
+	// +optional
+	Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,2,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"`
+	// NodePhase is the recently observed lifecycle phase of the node.
+	// More info: https://kubernetes.io/docs/concepts/nodes/node/#phase
+	// The field is never populated, and now is deprecated.
+	// +optional
+	Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"`
+	// Conditions is an array of current observed node conditions.
+	// More info: https://kubernetes.io/docs/reference/node/node-status/#condition
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=type
+	Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
+	// List of addresses reachable to the node.
+	// Queried from cloud provider, if available.
+	// More info: https://kubernetes.io/docs/reference/node/node-status/#addresses
+	// Note: This field is declared as mergeable, but the merge key is not sufficiently
+	// unique, which can cause data corruption when it is merged. Callers should instead
+	// use a full-replacement patch. See https://pr.k8s.io/79391 for an example.
+	// Consumers should assume that addresses can change during the
+	// lifetime of a Node. However, there are some exceptions where this may not
+	// be possible, such as Pods that inherit a Node's address in its own status or
+	// consumers of the downward API (status.hostIP).
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=type
+	Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"`
+	// Endpoints of daemons running on the Node.
+	// +optional
+	DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"`
+	// Set of ids/uuids to uniquely identify the node.
+	// More info: https://kubernetes.io/docs/reference/node/node-status/#info
+	// +optional
+	NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
+	// List of container images on this node
+	// +optional
+	// +listType=atomic
+	Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"`
+	// List of attachable volumes in use (mounted) by the node.
+	// +optional
+	// +listType=atomic
+	VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"`
+	// List of volumes that are attached to the node.
+	// +optional
+	// +listType=atomic
+	VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"`
+	// Status of the config assigned to the node via the dynamic Kubelet config feature.
+	// +optional
+	Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"`
+	// The available runtime handlers.
+	// +featureGate=RecursiveReadOnlyMounts
+	// +featureGate=UserNamespacesSupport
+	// +optional
+	// +listType=atomic
+	RuntimeHandlers []NodeRuntimeHandler `json:"runtimeHandlers,omitempty" protobuf:"bytes,12,rep,name=runtimeHandlers"`
+	// Features describes the set of features implemented by the CRI implementation.
+	// +featureGate=SupplementalGroupsPolicy
+	// +optional
+	Features *NodeFeatures `json:"features,omitempty" protobuf:"bytes,13,rep,name=features"`
+}
+
+type UniqueVolumeName string
+
+// AttachedVolume describes a volume attached to a node
+type AttachedVolume struct {
+	// Name of the attached volume
+	Name UniqueVolumeName `json:"name" protobuf:"bytes,1,rep,name=name"`
+
+	// DevicePath represents the device path where the volume should be available
+	DevicePath string `json:"devicePath" protobuf:"bytes,2,rep,name=devicePath"`
+}
+
+// AvoidPods describes pods that should avoid this node. This is the value for a
+// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and
+// will eventually become a field of NodeStatus.
+type AvoidPods struct {
+	// Bounded-sized list of signatures of pods that should avoid this node, sorted
+	// in timestamp order from oldest to newest. Size of the slice is unspecified.
+	// +optional
+	// +listType=atomic
+	PreferAvoidPods []PreferAvoidPodsEntry `json:"preferAvoidPods,omitempty" protobuf:"bytes,1,rep,name=preferAvoidPods"`
+}
+
+// Describes a class of pods that should avoid this node.
+type PreferAvoidPodsEntry struct {
+	// The class of pods.
+	PodSignature PodSignature `json:"podSignature" protobuf:"bytes,1,opt,name=podSignature"`
+	// Time at which this entry was added to the list.
+	// +optional
+	EvictionTime metav1.Time `json:"evictionTime,omitempty" protobuf:"bytes,2,opt,name=evictionTime"`
+	// (brief) reason why this entry was added to the list.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
+	// Human readable message indicating why this entry was added to the list.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
+}
+
+// Describes the class of pods that should avoid this node.
+// Exactly one field should be set.
+type PodSignature struct {
+	// Reference to controller whose pods should avoid this node.
+	// +optional
+	PodController *metav1.OwnerReference `json:"podController,omitempty" protobuf:"bytes,1,opt,name=podController"`
+}
+
+// Describe a container image
+type ContainerImage struct {
+	// Names by which this image is known.
+	// e.g. ["kubernetes.example/hyperkube:v1.0.7", "cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7"]
+	// +optional
+	// +listType=atomic
+	Names []string `json:"names" protobuf:"bytes,1,rep,name=names"`
+	// The size of the image in bytes.
+	// +optional
+	SizeBytes int64 `json:"sizeBytes,omitempty" protobuf:"varint,2,opt,name=sizeBytes"`
+}
+
+// +enum
+type NodePhase string
+
+// These are the valid phases of node.
+const (
+	// NodePending means the node has been created/added by the system, but not configured.
+	NodePending NodePhase = "Pending"
+	// NodeRunning means the node has been configured and has Kubernetes components running.
+	NodeRunning NodePhase = "Running"
+	// NodeTerminated means the node has been removed from the cluster.
+	NodeTerminated NodePhase = "Terminated"
+)
+
+type NodeConditionType string
+
+// These are valid but not exhaustive conditions of node. A cloud provider may set a condition not listed here.
+// Relevant events contain "NodeReady", "NodeNotReady", "NodeSchedulable", and "NodeNotSchedulable".
+const (
+	// NodeReady means kubelet is healthy and ready to accept pods.
+	NodeReady NodeConditionType = "Ready"
+	// NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory.
+	NodeMemoryPressure NodeConditionType = "MemoryPressure"
+	// NodeDiskPressure means the kubelet is under pressure due to insufficient available disk.
+	NodeDiskPressure NodeConditionType = "DiskPressure"
+	// NodePIDPressure means the kubelet is under pressure due to insufficient available PID.
+	NodePIDPressure NodeConditionType = "PIDPressure"
+	// NodeNetworkUnavailable means that network for the node is not correctly configured.
+	NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
+)
+
+// NodeCondition contains condition information for a node.
+type NodeCondition struct {
+	// Type of node condition.
+	Type NodeConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeConditionType"`
+	// Status of the condition, one of True, False, Unknown.
+	Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
+	// Last time we got an update on a given condition.
+	// +optional
+	LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty" protobuf:"bytes,3,opt,name=lastHeartbeatTime"`
+	// Last time the condition transit from one status to another.
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
+	// (brief) reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
+	// Human readable message indicating details about last transition.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
+}
+
+type NodeAddressType string
+
+// These are built-in addresses type of node. A cloud provider may set a type not listed here.
+const (
+	// NodeHostName identifies a name of the node. Although every node can be assumed
+	// to have a NodeAddress of this type, its exact syntax and semantics are not
+	// defined, and are not consistent between different clusters.
+	NodeHostName NodeAddressType = "Hostname"
+
+	// NodeInternalIP identifies an IP address which is assigned to one of the node's
+	// network interfaces. Every node should have at least one address of this type.
+	//
+	// An internal IP is normally expected to be reachable from every other node, but
+	// may not be visible to hosts outside the cluster. By default it is assumed that
+	// kube-apiserver can reach node internal IPs, though it is possible to configure
+	// clusters where this is not the case.
+	//
+	// NodeInternalIP is the default type of node IP, and does not necessarily imply
+	// that the IP is ONLY reachable internally. If a node has multiple internal IPs,
+	// no specific semantics are assigned to the additional IPs.
+	NodeInternalIP NodeAddressType = "InternalIP"
+
+	// NodeExternalIP identifies an IP address which is, in some way, intended to be
+	// more usable from outside the cluster then an internal IP, though no specific
+	// semantics are defined. It may be a globally routable IP, though it is not
+	// required to be.
+	//
+	// External IPs may be assigned directly to an interface on the node, like a
+	// NodeInternalIP, or alternatively, packets sent to the external IP may be NAT'ed
+	// to an internal node IP rather than being delivered directly (making the IP less
+	// efficient for node-to-node traffic than a NodeInternalIP).
+	NodeExternalIP NodeAddressType = "ExternalIP"
+
+	// NodeInternalDNS identifies a DNS name which resolves to an IP address which has
+	// the characteristics of a NodeInternalIP. The IP it resolves to may or may not
+	// be a listed NodeInternalIP address.
+	NodeInternalDNS NodeAddressType = "InternalDNS"
+
+	// NodeExternalDNS identifies a DNS name which resolves to an IP address which has
+	// the characteristics of a NodeExternalIP. The IP it resolves to may or may not
+	// be a listed NodeExternalIP address.
+	NodeExternalDNS NodeAddressType = "ExternalDNS"
+)
+
+// NodeAddress contains information for the node's address.
+type NodeAddress struct {
+	// Node address type, one of Hostname, ExternalIP or InternalIP.
+	Type NodeAddressType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeAddressType"`
+	// The node address.
+	Address string `json:"address" protobuf:"bytes,2,opt,name=address"`
+}
+
+// ResourceName is the name identifying various resources in a ResourceList.
+type ResourceName string
+
+// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters,
+// with the -, _, and . characters allowed anywhere, except the first or last character.
+// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than
+// camel case, separating compound words.
+// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name.
+const (
+	// CPU, in cores. (500m = .5 cores)
+	ResourceCPU ResourceName = "cpu"
+	// Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
+	ResourceMemory ResourceName = "memory"
+	// Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024)
+	ResourceStorage ResourceName = "storage"
+	// Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
+	ResourceEphemeralStorage ResourceName = "ephemeral-storage"
+)
+
+const (
+	// Default namespace prefix.
+	ResourceDefaultNamespacePrefix = "kubernetes.io/"
+	// Name prefix for huge page resources (alpha).
+	ResourceHugePagesPrefix = "hugepages-"
+	// Name prefix for storage resource limits
+	ResourceAttachableVolumesPrefix = "attachable-volumes-"
+)
+
+// ResourceList is a set of (resource name, quantity) pairs.
+type ResourceList map[ResourceName]resource.Quantity
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// Node is a worker node in Kubernetes.
+// Each node will have a unique identifier in the cache (i.e. in etcd).
+type Node struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec defines the behavior of a node.
+	// https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+	// +optional
+	Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Most recently observed status of the node.
+	// Populated by the system.
+	// Read-only.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+	// +optional
+	Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// NodeList is the whole list of all Nodes which have been registered with master.
+type NodeList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of nodes
+	Items []Node `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// FinalizerName is the name identifying a finalizer during namespace lifecycle.
+type FinalizerName string
+
+// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or
+// in metav1.
+const (
+	FinalizerKubernetes FinalizerName = "kubernetes"
+)
+
+// NamespaceSpec describes the attributes on a Namespace.
+type NamespaceSpec struct {
+	// Finalizers is an opaque list of values that must be empty to permanently remove object from storage.
+	// More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
+	// +optional
+	// +listType=atomic
+	Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"`
+}
+
+// NamespaceStatus is information about the current status of a Namespace.
+type NamespaceStatus struct {
+	// Phase is the current lifecycle phase of the namespace.
+	// More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
+	// +optional
+	Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"`
+
+	// Represents the latest available observations of a namespace's current state.
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=type
+	Conditions []NamespaceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
+}
+
+// +enum
+type NamespacePhase string
+
+// These are the valid phases of a namespace.
+const (
+	// NamespaceActive means the namespace is available for use in the system
+	NamespaceActive NamespacePhase = "Active"
+	// NamespaceTerminating means the namespace is undergoing graceful termination
+	NamespaceTerminating NamespacePhase = "Terminating"
+)
+
+const (
+	// NamespaceTerminatingCause is returned as a defaults.cause item when a change is
+	// forbidden due to the namespace being terminated.
+	NamespaceTerminatingCause metav1.CauseType = "NamespaceTerminating"
+)
+
+type NamespaceConditionType string
+
+// These are built-in conditions of a namespace.
+const (
+	// NamespaceDeletionDiscoveryFailure contains information about namespace deleter errors during resource discovery.
+	NamespaceDeletionDiscoveryFailure NamespaceConditionType = "NamespaceDeletionDiscoveryFailure"
+	// NamespaceDeletionContentFailure contains information about namespace deleter errors during deletion of resources.
+	NamespaceDeletionContentFailure NamespaceConditionType = "NamespaceDeletionContentFailure"
+	// NamespaceDeletionGVParsingFailure contains information about namespace deleter errors parsing GV for legacy types.
+	NamespaceDeletionGVParsingFailure NamespaceConditionType = "NamespaceDeletionGroupVersionParsingFailure"
+	// NamespaceContentRemaining contains information about resources remaining in a namespace.
+	NamespaceContentRemaining NamespaceConditionType = "NamespaceContentRemaining"
+	// NamespaceFinalizersRemaining contains information about which finalizers are on resources remaining in a namespace.
+	NamespaceFinalizersRemaining NamespaceConditionType = "NamespaceFinalizersRemaining"
+)
+
+// NamespaceCondition contains details about state of namespace.
+type NamespaceCondition struct {
+	// Type of namespace controller condition.
+	Type NamespaceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NamespaceConditionType"`
+	// Status of the condition, one of True, False, Unknown.
+	Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
+	// Last time the condition transitioned from one status to another.
+	// +optional
+	LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
+	// Unique, one-word, CamelCase reason for the condition's last transition.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
+	// Human-readable message indicating details about last transition.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +genclient:skipVerbs=deleteCollection
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// Namespace provides a scope for Names.
+// Use of multiple namespaces is optional.
+type Namespace struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec defines the behavior of the Namespace.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+	// +optional
+	Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status describes the current status of a Namespace.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+	// +optional
+	Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// NamespaceList is a list of Namespaces.
+type NamespaceList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is the list of Namespace objects in the list.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+	Items []Namespace `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// Binding ties one object to another; for example, a pod is bound to a node by a scheduler.
+type Binding struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// The target object that you want to bind to the standard object.
+	Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"`
+}
+
+// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
+// +k8s:openapi-gen=false
+type Preconditions struct {
+	// Specifies the target UID.
+	// +optional
+	UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
+}
+
+const (
+	// LogStreamStdout is the stream type for stdout.
+	LogStreamStdout = "Stdout"
+	// LogStreamStderr is the stream type for stderr.
+	LogStreamStderr = "Stderr"
+	// LogStreamAll represents the combined stdout and stderr.
+	LogStreamAll = "All"
+)
+
+// +k8s:conversion-gen:explicit-from=net/url.Values
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// PodLogOptions is the query options for a Pod's logs REST call.
+type PodLogOptions struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// The container for which to stream logs. Defaults to only container if there is one container in the pod.
+	// +optional
+	Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"`
+	// Follow the log stream of the pod. Defaults to false.
+	// +optional
+	Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"`
+	// Return previous terminated container logs. Defaults to false.
+	// +optional
+	Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"`
+	// A relative time in seconds before the current time from which to show logs. If this value
+	// precedes the time a pod was started, only logs since the pod start will be returned.
+	// If this value is in the future, no logs will be returned.
+	// Only one of sinceSeconds or sinceTime may be specified.
+	// +optional
+	SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"`
+	// An RFC3339 timestamp from which to show logs. If this value
+	// precedes the time a pod was started, only logs since the pod start will be returned.
+	// If this value is in the future, no logs will be returned.
+	// Only one of sinceSeconds or sinceTime may be specified.
+	// +optional
+	SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"`
+	// If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
+	// of log output. Defaults to false.
+	// +optional
+	Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"`
+	// If set, the number of lines from the end of the logs to show. If not specified,
+	// logs are shown from the creation of the container or sinceSeconds or sinceTime.
+	// Note that when "TailLines" is specified, "Stream" can only be set to nil or "All".
+	// +optional
+	TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"`
+	// If set, the number of bytes to read from the server before terminating the
+	// log output. This may not display a complete final line of logging, and may return
+	// slightly more or slightly less than the specified limit.
+	// +optional
+	LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"`
+
+	// insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the
+	// serving certificate of the backend it is connecting to.  This will make the HTTPS connection between the apiserver
+	// and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real
+	// kubelet.  If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the
+	// connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept
+	// the actual log data coming from the real kubelet).
+	// +optional
+	InsecureSkipTLSVerifyBackend bool `json:"insecureSkipTLSVerifyBackend,omitempty" protobuf:"varint,9,opt,name=insecureSkipTLSVerifyBackend"`
+
+	// Specify which container log stream to return to the client.
+	// Acceptable values are "All", "Stdout" and "Stderr". If not specified, "All" is used, and both stdout and stderr
+	// are returned interleaved.
+	// Note that when "TailLines" is specified, "Stream" can only be set to nil or "All".
+	// +featureGate=PodLogsQuerySplitStreams
+	// +optional
+	Stream *string `json:"stream,omitempty" protobuf:"varint,10,opt,name=stream"`
+}
+
+// +k8s:conversion-gen:explicit-from=net/url.Values
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.1
+
+// PodAttachOptions is the query options to a Pod's remote attach call.
+// ---
+// TODO: merge w/ PodExecOptions below for stdin, stdout, etc
+// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
+type PodAttachOptions struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Stdin if true, redirects the standard input stream of the pod for this call.
+	// Defaults to false.
+	// +optional
+	Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
+
+	// Stdout if true indicates that stdout is to be redirected for the attach call.
+	// Defaults to true.
+	// +optional
+	Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
+
+	// Stderr if true indicates that stderr is to be redirected for the attach call.
+	// Defaults to true.
+	// +optional
+	Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
+
+	// TTY if true indicates that a tty will be allocated for the attach call.
+	// This is passed through the container runtime so the tty
+	// is allocated on the worker node by the container runtime.
+	// Defaults to false.
+	// +optional
+	TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
+
+	// The container in which to execute the command.
+	// Defaults to only container if there is only one container in the pod.
+	// +optional
+	Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
+}
+
+// +k8s:conversion-gen:explicit-from=net/url.Values
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// PodExecOptions is the query options to a Pod's remote exec call.
+// ---
+// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging
+// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
+type PodExecOptions struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Redirect the standard input stream of the pod for this call.
+	// Defaults to false.
+	// +optional
+	Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
+
+	// Redirect the standard output stream of the pod for this call.
+	// +optional
+	Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
+
+	// Redirect the standard error stream of the pod for this call.
+	// +optional
+	Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
+
+	// TTY if true indicates that a tty will be allocated for the exec call.
+	// Defaults to false.
+	// +optional
+	TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
+
+	// Container in which to execute the command.
+	// Defaults to only container if there is only one container in the pod.
+	// +optional
+	Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
+
+	// Command is the remote command to execute. argv array. Not executed within a shell.
+	// +listType=atomic
+	Command []string `json:"command" protobuf:"bytes,6,rep,name=command"`
+}
+
+// +k8s:conversion-gen:explicit-from=net/url.Values
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.6
+
+// PodPortForwardOptions is the query options to a Pod's port forward call
+// when using WebSockets.
+// The `port` query parameter must specify the port or
+// ports (comma separated) to forward over.
+// Port forwarding over SPDY does not use these options. It requires the port
+// to be passed in the `port` header as part of request.
+type PodPortForwardOptions struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// List of ports to forward
+	// Required when using WebSockets
+	// +optional
+	// +listType=atomic
+	Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"`
+}
+
+// +k8s:conversion-gen:explicit-from=net/url.Values
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// PodProxyOptions is the query options to a Pod's proxy call.
+type PodProxyOptions struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Path is the URL path to use for the current proxy request to pod.
+	// +optional
+	Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
+}
+
+// +k8s:conversion-gen:explicit-from=net/url.Values
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.2
+
+// NodeProxyOptions is the query options to a Node's proxy call.
+type NodeProxyOptions struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Path is the URL path to use for the current proxy request to node.
+	// +optional
+	Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
+}
+
+// +k8s:conversion-gen:explicit-from=net/url.Values
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.2
+
+// ServiceProxyOptions is the query options to a Service's proxy call.
+type ServiceProxyOptions struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Path is the part of URLs that include service endpoints, suffixes,
+	// and parameters to use for the current proxy request to service.
+	// For example, the whole request URL is
+	// http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy.
+	// Path is _search?q=user:kimchy.
+	// +optional
+	Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
+}
+
+// ObjectReference contains enough information to let you inspect or modify the referred object.
+// ---
+// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
+//  1. Ignored fields.  It includes many fields which are not generally honored.  For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.
+//  2. Invalid usage help.  It is impossible to add specific help for individual usage.  In most embedded usages, there are particular
+//     restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
+//     Those cannot be well described when embedded.
+//  3. Inconsistent validation.  Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
+//  4. The fields are both imprecise and overly precise.  Kind is not a precise mapping to a URL. This can produce ambiguity
+//     during interpretation and require a REST mapping.  In most cases, the dependency is on the group,resource tuple
+//     and the version of the actual struct is irrelevant.
+//  5. We cannot easily change it.  Because this type is embedded in many locations, updates to this type
+//     will affect numerous schemas.  Don't make new APIs embed an underspecified API type they do not control.
+//
+// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
+// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +structType=atomic
+type ObjectReference struct {
+	// Kind of the referent.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
+	// Namespace of the referent.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
+	// +optional
+	Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
+	// Name of the referent.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"`
+	// UID of the referent.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
+	// +optional
+	UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
+	// API version of the referent.
+	// +optional
+	APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"`
+	// Specific resourceVersion to which this reference is made, if any.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
+	// +optional
+	ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"`
+
+	// If referring to a piece of an object instead of an entire object, this string
+	// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
+	// For example, if the object reference is to a container within a pod, this would take on a value like:
+	// "spec.containers{name}" (where "name" refers to the name of the container that triggered
+	// the event) or if no container name is specified "spec.containers[2]" (container with
+	// index 2 in this pod). This syntax is chosen only to have some well-defined way of
+	// referencing a part of an object.
+	// TODO: this design is not final and this field is subject to change in the future.
+	// +optional
+	FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,7,opt,name=fieldPath"`
+}
+
+// LocalObjectReference contains enough information to let you locate the
+// referenced object inside the same namespace.
+// ---
+// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
+//  1. Invalid usage help.  It is impossible to add specific help for individual usage.  In most embedded usages, there are particular
+//     restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
+//     Those cannot be well described when embedded.
+//  2. Inconsistent validation.  Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
+//  3. We cannot easily change it.  Because this type is embedded in many locations, updates to this type
+//     will affect numerous schemas.  Don't make new APIs embed an underspecified API type they do not control.
+//
+// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
+// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
+// +structType=atomic
+type LocalObjectReference struct {
+	// Name of the referent.
+	// This field is effectively required, but due to backwards compatibility is
+	// allowed to be empty. Instances of this type with an empty value here are
+	// almost certainly wrong.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+	// +optional
+	// +default=""
+	// +kubebuilder:default=""
+	// TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896.
+	Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+}
+
+// TypedLocalObjectReference contains enough information to let you locate the
+// typed referenced object inside the same namespace.
+// ---
+// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
+//  1. Invalid usage help.  It is impossible to add specific help for individual usage.  In most embedded usages, there are particular
+//     restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
+//     Those cannot be well described when embedded.
+//  2. Inconsistent validation.  Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
+//  3. The fields are both imprecise and overly precise.  Kind is not a precise mapping to a URL. This can produce ambiguity
+//     during interpretation and require a REST mapping.  In most cases, the dependency is on the group,resource tuple
+//     and the version of the actual struct is irrelevant.
+//  4. We cannot easily change it.  Because this type is embedded in many locations, updates to this type
+//     will affect numerous schemas.  Don't make new APIs embed an underspecified API type they do not control.
+//
+// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
+// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
+// +structType=atomic
+type TypedLocalObjectReference struct {
+	// APIGroup is the group for the resource being referenced.
+	// If APIGroup is not specified, the specified Kind must be in the core API group.
+	// For any other third-party types, APIGroup is required.
+	// +optional
+	APIGroup *string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"`
+	// Kind is the type of resource being referenced
+	Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"`
+	// Name is the name of resource being referenced
+	Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// SerializedReference is a reference to serialized object.
+type SerializedReference struct {
+	metav1.TypeMeta `json:",inline"`
+	// The reference to an object in the system.
+	// +optional
+	Reference ObjectReference `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"`
+}
+
+// EventSource contains information for an event.
+type EventSource struct {
+	// Component from which the event is generated.
+	// +optional
+	Component string `json:"component,omitempty" protobuf:"bytes,1,opt,name=component"`
+	// Node name on which the event is generated.
+	// +optional
+	Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
+}
+
+// Valid values for event types (new types could be added in future)
+const (
+	// Information only and will not cause any problems
+	EventTypeNormal string = "Normal"
+	// These events are to warn that something might go wrong
+	EventTypeWarning string = "Warning"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// Event is a report of an event somewhere in the cluster.  Events
+// have a limited retention time and triggers and messages may evolve
+// with time.  Event consumers should not rely on the timing of an event
+// with a given Reason reflecting a consistent underlying trigger, or the
+// continued existence of events with that Reason.  Events should be
+// treated as informative, best-effort, supplemental data.
+type Event struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
+
+	// The object that this event is about.
+	InvolvedObject ObjectReference `json:"involvedObject" protobuf:"bytes,2,opt,name=involvedObject"`
+
+	// This should be a short, machine understandable string that gives the reason
+	// for the transition into the object's current status.
+	// TODO: provide exact specification for format.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
+
+	// A human-readable description of the status of this operation.
+	// TODO: decide on maximum length.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
+
+	// The component reporting this event. Should be a short machine understandable string.
+	// +optional
+	Source EventSource `json:"source,omitempty" protobuf:"bytes,5,opt,name=source"`
+
+	// The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)
+	// +optional
+	FirstTimestamp metav1.Time `json:"firstTimestamp,omitempty" protobuf:"bytes,6,opt,name=firstTimestamp"`
+
+	// The time at which the most recent occurrence of this event was recorded.
+	// +optional
+	LastTimestamp metav1.Time `json:"lastTimestamp,omitempty" protobuf:"bytes,7,opt,name=lastTimestamp"`
+
+	// The number of times this event has occurred.
+	// +optional
+	Count int32 `json:"count,omitempty" protobuf:"varint,8,opt,name=count"`
+
+	// Type of this event (Normal, Warning), new types could be added in the future
+	// +optional
+	Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"`
+
+	// Time when this Event was first observed.
+	// +optional
+	EventTime metav1.MicroTime `json:"eventTime,omitempty" protobuf:"bytes,10,opt,name=eventTime"`
+
+	// Data about the Event series this event represents or nil if it's a singleton Event.
+	// +optional
+	Series *EventSeries `json:"series,omitempty" protobuf:"bytes,11,opt,name=series"`
+
+	// What action was taken/failed regarding to the Regarding object.
+	// +optional
+	Action string `json:"action,omitempty" protobuf:"bytes,12,opt,name=action"`
+
+	// Optional secondary object for more complex actions.
+	// +optional
+	Related *ObjectReference `json:"related,omitempty" protobuf:"bytes,13,opt,name=related"`
+
+	// Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.
+	// +optional
+	ReportingController string `json:"reportingComponent" protobuf:"bytes,14,opt,name=reportingComponent"`
+
+	// ID of the controller instance, e.g. `kubelet-xyzf`.
+	// +optional
+	ReportingInstance string `json:"reportingInstance" protobuf:"bytes,15,opt,name=reportingInstance"`
+}
+
+// EventSeries contain information on series of events, i.e. thing that was/is happening
+// continuously for some time.
+type EventSeries struct {
+	// Number of occurrences in this series up to the last heartbeat time
+	Count int32 `json:"count,omitempty" protobuf:"varint,1,name=count"`
+	// Time of the last occurrence observed
+	LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty" protobuf:"bytes,2,name=lastObservedTime"`
+
+	// +k8s:deprecated=state,protobuf=3
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// EventList is a list of events.
+type EventList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of events
+	Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// List holds a list of objects, which may not be known by the server.
+type List metav1.List
+
+// LimitType is a type of object that is limited. It can be Pod, Container, PersistentVolumeClaim or
+// a fully qualified resource name.
+type LimitType string
+
+const (
+	// Limit that applies to all pods in a namespace
+	LimitTypePod LimitType = "Pod"
+	// Limit that applies to all containers in a namespace
+	LimitTypeContainer LimitType = "Container"
+	// Limit that applies to all persistent volume claims in a namespace
+	LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim"
+)
+
+// LimitRangeItem defines a min/max usage limit for any resource that matches on kind.
+type LimitRangeItem struct {
+	// Type of resource that this limit applies to.
+	Type LimitType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=LimitType"`
+	// Max usage constraints on this kind by resource name.
+	// +optional
+	Max ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max,casttype=ResourceList,castkey=ResourceName"`
+	// Min usage constraints on this kind by resource name.
+	// +optional
+	Min ResourceList `json:"min,omitempty" protobuf:"bytes,3,rep,name=min,casttype=ResourceList,castkey=ResourceName"`
+	// Default resource requirement limit value by resource name if resource limit is omitted.
+	// +optional
+	Default ResourceList `json:"default,omitempty" protobuf:"bytes,4,rep,name=default,casttype=ResourceList,castkey=ResourceName"`
+	// DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.
+	// +optional
+	DefaultRequest ResourceList `json:"defaultRequest,omitempty" protobuf:"bytes,5,rep,name=defaultRequest,casttype=ResourceList,castkey=ResourceName"`
+	// MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.
+	// +optional
+	MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty" protobuf:"bytes,6,rep,name=maxLimitRequestRatio,casttype=ResourceList,castkey=ResourceName"`
+}
+
+// LimitRangeSpec defines a min/max usage limit for resources that match on kind.
+type LimitRangeSpec struct {
+	// Limits is the list of LimitRangeItem objects that are enforced.
+	// +listType=atomic
+	Limits []LimitRangeItem `json:"limits" protobuf:"bytes,1,rep,name=limits"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// LimitRange sets resource usage limits for each kind of resource in a Namespace.
+type LimitRange struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec defines the limits enforced.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+	// +optional
+	Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// LimitRangeList is a list of LimitRange items.
+type LimitRangeList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of LimitRange objects.
+	// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+	Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// The following identify resource constants for Kubernetes object types
+const (
+	// Pods, number
+	ResourcePods ResourceName = "pods"
+	// Services, number
+	ResourceServices ResourceName = "services"
+	// ReplicationControllers, number
+	ResourceReplicationControllers ResourceName = "replicationcontrollers"
+	// ResourceQuotas, number
+	ResourceQuotas ResourceName = "resourcequotas"
+	// ResourceSecrets, number
+	ResourceSecrets ResourceName = "secrets"
+	// ResourceConfigMaps, number
+	ResourceConfigMaps ResourceName = "configmaps"
+	// ResourcePersistentVolumeClaims, number
+	ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims"
+	// ResourceServicesNodePorts, number
+	ResourceServicesNodePorts ResourceName = "services.nodeports"
+	// ResourceServicesLoadBalancers, number
+	ResourceServicesLoadBalancers ResourceName = "services.loadbalancers"
+	// CPU request, in cores. (500m = .5 cores)
+	ResourceRequestsCPU ResourceName = "requests.cpu"
+	// Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
+	ResourceRequestsMemory ResourceName = "requests.memory"
+	// Storage request, in bytes
+	ResourceRequestsStorage ResourceName = "requests.storage"
+	// Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
+	ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage"
+	// CPU limit, in cores. (500m = .5 cores)
+	ResourceLimitsCPU ResourceName = "limits.cpu"
+	// Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
+	ResourceLimitsMemory ResourceName = "limits.memory"
+	// Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
+	ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
+	// resource.k8s.io devices requested with a certain DeviceClass, number
+	ResourceClaimsPerClass string = ".deviceclass.resource.k8s.io/devices"
+)
+
+// The following identify resource prefix for Kubernetes object types
+const (
+	// HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
+	// As burst is not supported for HugePages, we would only quota its request, and ignore the limit.
+	ResourceRequestsHugePagesPrefix = "requests.hugepages-"
+	// Default resource requests prefix
+	DefaultResourceRequestsPrefix = "requests."
+)
+
+// A ResourceQuotaScope defines a filter that must match each object tracked by a quota
+// +enum
+type ResourceQuotaScope string
+
+const (
+	// Match all pod objects where spec.activeDeadlineSeconds >=0
+	ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating"
+	// Match all pod objects where spec.activeDeadlineSeconds is nil
+	ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating"
+	// Match all pod objects that have best effort quality of service
+	ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort"
+	// Match all pod objects that do not have best effort quality of service
+	ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort"
+	// Match all pod objects that have priority class mentioned
+	ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass"
+	// Match all pod objects that have cross-namespace pod (anti)affinity mentioned.
+	ResourceQuotaScopeCrossNamespacePodAffinity ResourceQuotaScope = "CrossNamespacePodAffinity"
+)
+
+// ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
+type ResourceQuotaSpec struct {
+	// hard is the set of desired hard limits for each named resource.
+	// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
+	// +optional
+	Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
+	// A collection of filters that must match each object tracked by a quota.
+	// If not specified, the quota matches all objects.
+	// +optional
+	// +listType=atomic
+	Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"`
+	// scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota
+	// but expressed using ScopeSelectorOperator in combination with possible values.
+	// For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.
+	// +optional
+	ScopeSelector *ScopeSelector `json:"scopeSelector,omitempty" protobuf:"bytes,3,opt,name=scopeSelector"`
+}
+
+// A scope selector represents the AND of the selectors represented
+// by the scoped-resource selector requirements.
+// +structType=atomic
+type ScopeSelector struct {
+	// A list of scope selector requirements by scope of the resources.
+	// +optional
+	// +listType=atomic
+	MatchExpressions []ScopedResourceSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"`
+}
+
+// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator
+// that relates the scope name and values.
+type ScopedResourceSelectorRequirement struct {
+	// The name of the scope that the selector applies to.
+	ScopeName ResourceQuotaScope `json:"scopeName" protobuf:"bytes,1,opt,name=scopeName"`
+	// Represents a scope's relationship to a set of values.
+	// Valid operators are In, NotIn, Exists, DoesNotExist.
+	Operator ScopeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=ScopedResourceSelectorOperator"`
+	// An array of string values. If the operator is In or NotIn,
+	// the values array must be non-empty. If the operator is Exists or DoesNotExist,
+	// the values array must be empty.
+	// This array is replaced during a strategic merge patch.
+	// +optional
+	// +listType=atomic
+	Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
+}
+
+// A scope selector operator is the set of operators that can be used in
+// a scope selector requirement.
+// +enum
+type ScopeSelectorOperator string
+
+const (
+	ScopeSelectorOpIn           ScopeSelectorOperator = "In"
+	ScopeSelectorOpNotIn        ScopeSelectorOperator = "NotIn"
+	ScopeSelectorOpExists       ScopeSelectorOperator = "Exists"
+	ScopeSelectorOpDoesNotExist ScopeSelectorOperator = "DoesNotExist"
+)
+
+// ResourceQuotaStatus defines the enforced hard limits and observed use.
+type ResourceQuotaStatus struct {
+	// Hard is the set of enforced hard limits for each named resource.
+	// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
+	// +optional
+	Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
+	// Used is the current observed total usage of the resource in the namespace.
+	// +optional
+	Used ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// ResourceQuota sets aggregate quota restrictions enforced per namespace
+type ResourceQuota struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec defines the desired quota.
+	// https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+	// +optional
+	Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status defines the actual enforced quota and its current usage.
+	// https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+	// +optional
+	Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// ResourceQuotaList is a list of ResourceQuota items.
+type ResourceQuotaList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of ResourceQuota objects.
+	// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
+	Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// Secret holds secret data of a certain type. The total bytes of the values in
+// the Data field must be less than MaxSecretSize bytes.
+type Secret struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Immutable, if set to true, ensures that data stored in the Secret cannot
+	// be updated (only object metadata can be modified).
+	// If not set to true, the field can be modified at any time.
+	// Defaulted to nil.
+	// +optional
+	Immutable *bool `json:"immutable,omitempty" protobuf:"varint,5,opt,name=immutable"`
+
+	// Data contains the secret data. Each key must consist of alphanumeric
+	// characters, '-', '_' or '.'. The serialized form of the secret data is a
+	// base64 encoded string, representing the arbitrary (possibly non-string)
+	// data value here. Described in https://tools.ietf.org/html/rfc4648#section-4
+	// +optional
+	Data map[string][]byte `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
+
+	// stringData allows specifying non-binary secret data in string form.
+	// It is provided as a write-only input field for convenience.
+	// All keys and values are merged into the data field on write, overwriting any existing values.
+	// The stringData field is never output when reading from the API.
+	// +k8s:conversion-gen=false
+	// +optional
+	StringData map[string]string `json:"stringData,omitempty" protobuf:"bytes,4,rep,name=stringData"`
+
+	// Used to facilitate programmatic handling of secret data.
+	// More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types
+	// +optional
+	Type SecretType `json:"type,omitempty" protobuf:"bytes,3,opt,name=type,casttype=SecretType"`
+}
+
+const MaxSecretSize = 1 * 1024 * 1024
+
+type SecretType string
+
+const (
+	// SecretTypeOpaque is the default. Arbitrary user-defined data
+	SecretTypeOpaque SecretType = "Opaque"
+
+	// SecretTypeServiceAccountToken contains a token that identifies a service account to the API
+	//
+	// Required fields:
+	// - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies
+	// - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies
+	// - Secret.Data["token"] - a token that identifies the service account to the API
+	SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token"
+
+	// ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
+	ServiceAccountNameKey = "kubernetes.io/service-account.name"
+	// ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
+	ServiceAccountUIDKey = "kubernetes.io/service-account.uid"
+	// ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets
+	ServiceAccountTokenKey = "token"
+	// ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets
+	ServiceAccountKubeconfigKey = "kubernetes.kubeconfig"
+	// ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets
+	ServiceAccountRootCAKey = "ca.crt"
+	// ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls
+	ServiceAccountNamespaceKey = "namespace"
+
+	// SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg
+	//
+	// Required fields:
+	// - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file
+	SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg"
+
+	// DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets
+	DockerConfigKey = ".dockercfg"
+
+	// SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json
+	//
+	// Required fields:
+	// - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file
+	SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson"
+
+	// DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets
+	DockerConfigJsonKey = ".dockerconfigjson"
+
+	// SecretTypeBasicAuth contains data needed for basic authentication.
+	//
+	// Required at least one of fields:
+	// - Secret.Data["username"] - username used for authentication
+	// - Secret.Data["password"] - password or token needed for authentication
+	SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth"
+
+	// BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets
+	BasicAuthUsernameKey = "username"
+	// BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets
+	BasicAuthPasswordKey = "password"
+
+	// SecretTypeSSHAuth contains data needed for SSH authetication.
+	//
+	// Required field:
+	// - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication
+	SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth"
+
+	// SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets
+	SSHAuthPrivateKey = "ssh-privatekey"
+	// SecretTypeTLS contains information about a TLS client or server secret. It
+	// is primarily used with TLS termination of the Ingress resource, but may be
+	// used in other types.
+	//
+	// Required fields:
+	// - Secret.Data["tls.key"] - TLS private key.
+	//   Secret.Data["tls.crt"] - TLS certificate.
+	// TODO: Consider supporting different formats, specifying CA/destinationCA.
+	SecretTypeTLS SecretType = "kubernetes.io/tls"
+
+	// TLSCertKey is the key for tls certificates in a TLS secret.
+	TLSCertKey = "tls.crt"
+	// TLSPrivateKeyKey is the key for the private key field in a TLS secret.
+	TLSPrivateKeyKey = "tls.key"
+	// SecretTypeBootstrapToken is used during the automated bootstrap process (first
+	// implemented by kubeadm). It stores tokens that are used to sign well known
+	// ConfigMaps. They are used for authn.
+	SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// SecretList is a list of Secret.
+type SecretList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is a list of secret objects.
+	// More info: https://kubernetes.io/docs/concepts/configuration/secret
+	Items []Secret `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.2
+
+// ConfigMap holds configuration data for pods to consume.
+type ConfigMap struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Immutable, if set to true, ensures that data stored in the ConfigMap cannot
+	// be updated (only object metadata can be modified).
+	// If not set to true, the field can be modified at any time.
+	// Defaulted to nil.
+	// +optional
+	Immutable *bool `json:"immutable,omitempty" protobuf:"varint,4,opt,name=immutable"`
+
+	// Data contains the configuration data.
+	// Each key must consist of alphanumeric characters, '-', '_' or '.'.
+	// Values with non-UTF-8 byte sequences must use the BinaryData field.
+	// The keys stored in Data must not overlap with the keys in
+	// the BinaryData field, this is enforced during validation process.
+	// +optional
+	Data map[string]string `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
+
+	// BinaryData contains the binary data.
+	// Each key must consist of alphanumeric characters, '-', '_' or '.'.
+	// BinaryData can contain byte sequences that are not in the UTF-8 range.
+	// The keys stored in BinaryData must not overlap with the ones in
+	// the Data field, this is enforced during validation process.
+	// Using this field will require 1.10+ apiserver and
+	// kubelet.
+	// +optional
+	BinaryData map[string][]byte `json:"binaryData,omitempty" protobuf:"bytes,3,rep,name=binaryData"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.2
+
+// ConfigMapList is a resource containing a list of ConfigMap objects.
+type ConfigMapList struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is the list of ConfigMaps.
+	Items []ConfigMap `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// Type and constants for component health validation.
+type ComponentConditionType string
+
+// These are the valid conditions for the component.
+const (
+	ComponentHealthy ComponentConditionType = "Healthy"
+)
+
+// Information about the condition of a component.
+type ComponentCondition struct {
+	// Type of condition for a component.
+	// Valid value: "Healthy"
+	Type ComponentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ComponentConditionType"`
+	// Status of the condition for a component.
+	// Valid values for "Healthy": "True", "False", or "Unknown".
+	Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
+	// Message about the condition for a component.
+	// For example, information about a health check.
+	// +optional
+	Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
+	// Condition error code for a component.
+	// For example, a health check error code.
+	// +optional
+	Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// ComponentStatus (and ComponentStatusList) holds the cluster validation info.
+// Deprecated: This API is deprecated in v1.19+
+type ComponentStatus struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of component conditions observed
+	// +optional
+	// +patchMergeKey=type
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=type
+	Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// Status of all the conditions for the component as a list of ComponentStatus objects.
+// Deprecated: This API is deprecated in v1.19+
+type ComponentStatusList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// List of ComponentStatus objects.
+	Items []ComponentStatus `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// DownwardAPIVolumeSource represents a volume containing downward API info.
+// Downward API volumes support ownership management and SELinux relabeling.
+type DownwardAPIVolumeSource struct {
+	// Items is a list of downward API volume file
+	// +optional
+	// +listType=atomic
+	Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
+	// Optional: mode bits to use on created files by default. Must be a
+	// Optional: mode bits used to set permissions on created files by default.
+	// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+	// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+	// Defaults to 0644.
+	// Directories within the path are not affected by this setting.
+	// This might be in conflict with other options that affect the file
+	// mode, like fsGroup, and the result can be other mode bits set.
+	// +optional
+	DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
+}
+
+const (
+	DownwardAPIVolumeSourceDefaultMode int32 = 0644
+)
+
+// DownwardAPIVolumeFile represents information to create the file containing the pod field
+type DownwardAPIVolumeFile struct {
+	// Required: Path is  the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
+	Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
+	// Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported.
+	// +optional
+	FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"`
+	// Selects a resource of the container: only resources limits and requests
+	// (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+	// +optional
+	ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,3,opt,name=resourceFieldRef"`
+	// Optional: mode bits used to set permissions on this file, must be an octal value
+	// between 0000 and 0777 or a decimal value between 0 and 511.
+	// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+	// If not specified, the volume defaultMode will be used.
+	// This might be in conflict with other options that affect the file
+	// mode, like fsGroup, and the result can be other mode bits set.
+	// +optional
+	Mode *int32 `json:"mode,omitempty" protobuf:"varint,4,opt,name=mode"`
+}
+
+// Represents downward API info for projecting into a projected volume.
+// Note that this is identical to a downwardAPI volume source without the default
+// mode.
+type DownwardAPIProjection struct {
+	// Items is a list of DownwardAPIVolume file
+	// +optional
+	// +listType=atomic
+	Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
+}
+
+// SecurityContext holds security configuration that will be applied to a container.
+// Some fields are present in both SecurityContext and PodSecurityContext.  When both
+// are set, the values in SecurityContext take precedence.
+type SecurityContext struct {
+	// The capabilities to add/drop when running containers.
+	// Defaults to the default set of capabilities granted by the container runtime.
+	// Note that this field cannot be set when spec.os.name is windows.
+	// +optional
+	Capabilities *Capabilities `json:"capabilities,omitempty" protobuf:"bytes,1,opt,name=capabilities"`
+	// Run container in privileged mode.
+	// Processes in privileged containers are essentially equivalent to root on the host.
+	// Defaults to false.
+	// Note that this field cannot be set when spec.os.name is windows.
+	// +optional
+	Privileged *bool `json:"privileged,omitempty" protobuf:"varint,2,opt,name=privileged"`
+	// The SELinux context to be applied to the container.
+	// If unspecified, the container runtime will allocate a random SELinux context for each
+	// container.  May also be set in PodSecurityContext.  If set in both SecurityContext and
+	// PodSecurityContext, the value specified in SecurityContext takes precedence.
+	// Note that this field cannot be set when spec.os.name is windows.
+	// +optional
+	SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"`
+	// The Windows specific settings applied to all containers.
+	// If unspecified, the options from the PodSecurityContext will be used.
+	// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+	// Note that this field cannot be set when spec.os.name is linux.
+	// +optional
+	WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,10,opt,name=windowsOptions"`
+	// The UID to run the entrypoint of the container process.
+	// Defaults to user specified in image metadata if unspecified.
+	// May also be set in PodSecurityContext.  If set in both SecurityContext and
+	// PodSecurityContext, the value specified in SecurityContext takes precedence.
+	// Note that this field cannot be set when spec.os.name is windows.
+	// +optional
+	RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,4,opt,name=runAsUser"`
+	// The GID to run the entrypoint of the container process.
+	// Uses runtime default if unset.
+	// May also be set in PodSecurityContext.  If set in both SecurityContext and
+	// PodSecurityContext, the value specified in SecurityContext takes precedence.
+	// Note that this field cannot be set when spec.os.name is windows.
+	// +optional
+	RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,8,opt,name=runAsGroup"`
+	// Indicates that the container must run as a non-root user.
+	// If true, the Kubelet will validate the image at runtime to ensure that it
+	// does not run as UID 0 (root) and fail to start the container if it does.
+	// If unset or false, no such validation will be performed.
+	// May also be set in PodSecurityContext.  If set in both SecurityContext and
+	// PodSecurityContext, the value specified in SecurityContext takes precedence.
+	// +optional
+	RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,5,opt,name=runAsNonRoot"`
+	// Whether this container has a read-only root filesystem.
+	// Default is false.
+	// Note that this field cannot be set when spec.os.name is windows.
+	// +optional
+	ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"`
+	// AllowPrivilegeEscalation controls whether a process can gain more
+	// privileges than its parent process. This bool directly controls if
+	// the no_new_privs flag will be set on the container process.
+	// AllowPrivilegeEscalation is true always when the container is:
+	// 1) run as Privileged
+	// 2) has CAP_SYS_ADMIN
+	// Note that this field cannot be set when spec.os.name is windows.
+	// +optional
+	AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"`
+	// procMount denotes the type of proc mount to use for the containers.
+	// The default value is Default which uses the container runtime defaults for
+	// readonly paths and masked paths.
+	// This requires the ProcMountType feature flag to be enabled.
+	// Note that this field cannot be set when spec.os.name is windows.
+	// +optional
+	ProcMount *ProcMountType `json:"procMount,omitempty" protobuf:"bytes,9,opt,name=procMount"`
+	// The seccomp options to use by this container. If seccomp options are
+	// provided at both the pod & container level, the container options
+	// override the pod options.
+	// Note that this field cannot be set when spec.os.name is windows.
+	// +optional
+	SeccompProfile *SeccompProfile `json:"seccompProfile,omitempty" protobuf:"bytes,11,opt,name=seccompProfile"`
+	// appArmorProfile is the AppArmor options to use by this container. If set, this profile
+	// overrides the pod's appArmorProfile.
+	// Note that this field cannot be set when spec.os.name is windows.
+	// +optional
+	AppArmorProfile *AppArmorProfile `json:"appArmorProfile,omitempty" protobuf:"bytes,12,opt,name=appArmorProfile"`
+}
+
+// +enum
+type ProcMountType string
+
+const (
+	// DefaultProcMount uses the container runtime defaults for readonly and masked
+	// paths for /proc.  Most container runtimes mask certain paths in /proc to avoid
+	// accidental security exposure of special devices or information.
+	DefaultProcMount ProcMountType = "Default"
+
+	// UnmaskedProcMount bypasses the default masking behavior of the container
+	// runtime and ensures the newly created /proc the container stays in tact with
+	// no modifications.
+	UnmaskedProcMount ProcMountType = "Unmasked"
+)
+
+// SELinuxOptions are the labels to be applied to the container
+type SELinuxOptions struct {
+	// User is a SELinux user label that applies to the container.
+	// +optional
+	User string `json:"user,omitempty" protobuf:"bytes,1,opt,name=user"`
+	// Role is a SELinux role label that applies to the container.
+	// +optional
+	Role string `json:"role,omitempty" protobuf:"bytes,2,opt,name=role"`
+	// Type is a SELinux type label that applies to the container.
+	// +optional
+	Type string `json:"type,omitempty" protobuf:"bytes,3,opt,name=type"`
+	// Level is SELinux level label that applies to the container.
+	// +optional
+	Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"`
+}
+
+// WindowsSecurityContextOptions contain Windows-specific options and credentials.
+type WindowsSecurityContextOptions struct {
+	// GMSACredentialSpecName is the name of the GMSA credential spec to use.
+	// +optional
+	GMSACredentialSpecName *string `json:"gmsaCredentialSpecName,omitempty" protobuf:"bytes,1,opt,name=gmsaCredentialSpecName"`
+
+	// GMSACredentialSpec is where the GMSA admission webhook
+	// (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+	// GMSA credential spec named by the GMSACredentialSpecName field.
+	// +optional
+	GMSACredentialSpec *string `json:"gmsaCredentialSpec,omitempty" protobuf:"bytes,2,opt,name=gmsaCredentialSpec"`
+
+	// The UserName in Windows to run the entrypoint of the container process.
+	// Defaults to the user specified in image metadata if unspecified.
+	// May also be set in PodSecurityContext. If set in both SecurityContext and
+	// PodSecurityContext, the value specified in SecurityContext takes precedence.
+	// +optional
+	RunAsUserName *string `json:"runAsUserName,omitempty" protobuf:"bytes,3,opt,name=runAsUserName"`
+
+	// HostProcess determines if a container should be run as a 'Host Process' container.
+	// All of a Pod's containers must have the same effective HostProcess value
+	// (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+	// In addition, if HostProcess is true then HostNetwork must also be set to true.
+	// +optional
+	HostProcess *bool `json:"hostProcess,omitempty" protobuf:"bytes,4,opt,name=hostProcess"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.0
+
+// RangeAllocation is not a public type.
+type RangeAllocation struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Range is string that identifies the range represented by 'data'.
+	Range string `json:"range" protobuf:"bytes,2,opt,name=range"`
+	// Data is a bit array containing all allocated addresses in the previous segment.
+	Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"`
+}
+
+const (
+	// DefaultSchedulerName defines the name of default scheduler.
+	DefaultSchedulerName = "default-scheduler"
+
+	// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
+	// corresponding to every RequiredDuringScheduling affinity rule.
+	// When the --hard-pod-affinity-weight scheduler flag is not specified,
+	// DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule.
+	DefaultHardPodAffinitySymmetricWeight int32 = 1
+)
+
+// Sysctl defines a kernel parameter to be set
+type Sysctl struct {
+	// Name of a property to set
+	Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+	// Value of a property to set
+	Value string `json:"value" protobuf:"bytes,2,opt,name=value"`
+}
+
+const (
+	// Enable stdin for remote command execution
+	ExecStdinParam = "input"
+	// Enable stdout for remote command execution
+	ExecStdoutParam = "output"
+	// Enable stderr for remote command execution
+	ExecStderrParam = "error"
+	// Enable TTY for remote command execution
+	ExecTTYParam = "tty"
+	// Command to run for remote command execution
+	ExecCommandParam = "command"
+
+	// Name of header that specifies stream type
+	StreamType = "streamType"
+	// Value for streamType header for stdin stream
+	StreamTypeStdin = "stdin"
+	// Value for streamType header for stdout stream
+	StreamTypeStdout = "stdout"
+	// Value for streamType header for stderr stream
+	StreamTypeStderr = "stderr"
+	// Value for streamType header for data stream
+	StreamTypeData = "data"
+	// Value for streamType header for error stream
+	StreamTypeError = "error"
+	// Value for streamType header for terminal resize stream
+	StreamTypeResize = "resize"
+
+	// Name of header that specifies the port being forwarded
+	PortHeader = "port"
+	// Name of header that specifies a request ID used to associate the error
+	// and data streams for a single forwarded connection
+	PortForwardRequestIDHeader = "requestID"
+)
+
+const (
+	// MixedProtocolNotSupported error in PortStatus means that the cloud provider
+	// can't publish the port on the load balancer because mixed values of protocols
+	// on the same LoadBalancer type of Service are not supported by the cloud provider.
+	MixedProtocolNotSupported = "MixedProtocolNotSupported"
+)
+
+// PortStatus represents the error condition of a service port
+type PortStatus struct {
+	// Port is the port number of the service port of which status is recorded here
+	Port int32 `json:"port" protobuf:"varint,1,opt,name=port"`
+	// Protocol is the protocol of the service port of which status is recorded here
+	// The supported values are: "TCP", "UDP", "SCTP"
+	Protocol Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"`
+	// Error is to record the problem with the service port
+	// The format of the error shall comply with the following rules:
+	// - built-in error values shall be specified in this file and those shall use
+	//   CamelCase names
+	// - cloud provider specific error values must have names that comply with the
+	//   format foo.example.com/CamelCase.
+	// ---
+	// The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+	// +optional
+	// +kubebuilder:validation:Required
+	// +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`
+	// +kubebuilder:validation:MaxLength=316
+	Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"`
+}
+
+// LoadBalancerIPMode represents the mode of the LoadBalancer ingress IP
+type LoadBalancerIPMode string
+
+const (
+	// LoadBalancerIPModeVIP indicates that traffic is delivered to the node with
+	// the destination set to the load-balancer's IP and port.
+	LoadBalancerIPModeVIP LoadBalancerIPMode = "VIP"
+	// LoadBalancerIPModeProxy indicates that traffic is delivered to the node or pod with
+	// the destination set to the node's IP and port or the pod's IP and port.
+	LoadBalancerIPModeProxy LoadBalancerIPMode = "Proxy"
+)
+
+// ImageVolumeSource represents a image volume resource.
+type ImageVolumeSource struct {
+	// Required: Image or artifact reference to be used.
+	// Behaves in the same way as pod.spec.containers[*].image.
+	// Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets.
+	// More info: https://kubernetes.io/docs/concepts/containers/images
+	// This field is optional to allow higher level config management to default or override
+	// container images in workload controllers like Deployments and StatefulSets.
+	// +optional
+	Reference string `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"`
+
+	// Policy for pulling OCI objects. Possible values are:
+	// Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails.
+	// Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present.
+	// IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.
+	// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+	// +optional
+	PullPolicy PullPolicy `json:"pullPolicy,omitempty" protobuf:"bytes,2,opt,name=pullPolicy,casttype=PullPolicy"`
+}
diff --git a/hack/tools/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/hack/tools/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
new file mode 100644
index 0000000000..89ce3d2303
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
@@ -0,0 +1,2804 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-codegen.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_AWSElasticBlockStoreVolumeSource = map[string]string{
+	"":          "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.",
+	"volumeID":  "volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
+	"fsType":    "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
+	"partition": "partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).",
+	"readOnly":  "readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
+}
+
+func (AWSElasticBlockStoreVolumeSource) SwaggerDoc() map[string]string {
+	return map_AWSElasticBlockStoreVolumeSource
+}
+
+var map_Affinity = map[string]string{
+	"":                "Affinity is a group of affinity scheduling rules.",
+	"nodeAffinity":    "Describes node affinity scheduling rules for the pod.",
+	"podAffinity":     "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).",
+	"podAntiAffinity": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).",
+}
+
+func (Affinity) SwaggerDoc() map[string]string {
+	return map_Affinity
+}
+
+var map_AppArmorProfile = map[string]string{
+	"":                 "AppArmorProfile defines a pod or container's AppArmor settings.",
+	"type":             "type indicates which kind of AppArmor profile will be applied. Valid options are:\n  Localhost - a profile pre-loaded on the node.\n  RuntimeDefault - the container runtime's default profile.\n  Unconfined - no AppArmor enforcement.",
+	"localhostProfile": "localhostProfile indicates a profile loaded on the node that should be used. The profile must be preconfigured on the node to work. Must match the loaded name of the profile. Must be set if and only if type is \"Localhost\".",
+}
+
+func (AppArmorProfile) SwaggerDoc() map[string]string {
+	return map_AppArmorProfile
+}
+
+var map_AttachedVolume = map[string]string{
+	"":           "AttachedVolume describes a volume attached to a node",
+	"name":       "Name of the attached volume",
+	"devicePath": "DevicePath represents the device path where the volume should be available",
+}
+
+func (AttachedVolume) SwaggerDoc() map[string]string {
+	return map_AttachedVolume
+}
+
+var map_AvoidPods = map[string]string{
+	"":                "AvoidPods describes pods that should avoid this node. This is the value for a Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and will eventually become a field of NodeStatus.",
+	"preferAvoidPods": "Bounded-sized list of signatures of pods that should avoid this node, sorted in timestamp order from oldest to newest. Size of the slice is unspecified.",
+}
+
+func (AvoidPods) SwaggerDoc() map[string]string {
+	return map_AvoidPods
+}
+
+var map_AzureDiskVolumeSource = map[string]string{
+	"":            "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
+	"diskName":    "diskName is the Name of the data disk in the blob storage",
+	"diskURI":     "diskURI is the URI of data disk in the blob storage",
+	"cachingMode": "cachingMode is the Host Caching mode: None, Read Only, Read Write.",
+	"fsType":      "fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+	"readOnly":    "readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+	"kind":        "kind expected values are Shared: multiple blob disks per storage account  Dedicated: single blob disk per storage account  Managed: azure managed data disk (only in managed availability set). defaults to shared",
+}
+
+func (AzureDiskVolumeSource) SwaggerDoc() map[string]string {
+	return map_AzureDiskVolumeSource
+}
+
+var map_AzureFilePersistentVolumeSource = map[string]string{
+	"":                "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
+	"secretName":      "secretName is the name of secret that contains Azure Storage Account Name and Key",
+	"shareName":       "shareName is the azure Share Name",
+	"readOnly":        "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+	"secretNamespace": "secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod",
+}
+
+func (AzureFilePersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_AzureFilePersistentVolumeSource
+}
+
+var map_AzureFileVolumeSource = map[string]string{
+	"":           "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
+	"secretName": "secretName is the  name of secret that contains Azure Storage Account Name and Key",
+	"shareName":  "shareName is the azure share Name",
+	"readOnly":   "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+}
+
+func (AzureFileVolumeSource) SwaggerDoc() map[string]string {
+	return map_AzureFileVolumeSource
+}
+
+var map_Binding = map[string]string{
+	"":         "Binding ties one object to another; for example, a pod is bound to a node by a scheduler.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"target":   "The target object that you want to bind to the standard object.",
+}
+
+func (Binding) SwaggerDoc() map[string]string {
+	return map_Binding
+}
+
+var map_CSIPersistentVolumeSource = map[string]string{
+	"":                           "Represents storage that is managed by an external CSI volume driver",
+	"driver":                     "driver is the name of the driver to use for this volume. Required.",
+	"volumeHandle":               "volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.",
+	"readOnly":                   "readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).",
+	"fsType":                     "fsType to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\".",
+	"volumeAttributes":           "volumeAttributes of the volume to publish.",
+	"controllerPublishSecretRef": "controllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
+	"nodeStageSecretRef":         "nodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
+	"nodePublishSecretRef":       "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
+	"controllerExpandSecretRef":  "controllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
+	"nodeExpandSecretRef":        "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
+}
+
+func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_CSIPersistentVolumeSource
+}
+
+var map_CSIVolumeSource = map[string]string{
+	"":                     "Represents a source location of a volume to mount, managed by an external CSI driver",
+	"driver":               "driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.",
+	"readOnly":             "readOnly specifies a read-only configuration for the volume. Defaults to false (read/write).",
+	"fsType":               "fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.",
+	"volumeAttributes":     "volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.",
+	"nodePublishSecretRef": "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and  may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.",
+}
+
+func (CSIVolumeSource) SwaggerDoc() map[string]string {
+	return map_CSIVolumeSource
+}
+
+var map_Capabilities = map[string]string{
+	"":     "Adds and removes POSIX capabilities from running containers.",
+	"add":  "Added capabilities",
+	"drop": "Removed capabilities",
+}
+
+func (Capabilities) SwaggerDoc() map[string]string {
+	return map_Capabilities
+}
+
+var map_CephFSPersistentVolumeSource = map[string]string{
+	"":           "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.",
+	"monitors":   "monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
+	"path":       "path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /",
+	"user":       "user is Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
+	"secretFile": "secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
+	"secretRef":  "secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
+	"readOnly":   "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
+}
+
+func (CephFSPersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_CephFSPersistentVolumeSource
+}
+
+var map_CephFSVolumeSource = map[string]string{
+	"":           "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.",
+	"monitors":   "monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
+	"path":       "path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /",
+	"user":       "user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
+	"secretFile": "secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
+	"secretRef":  "secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
+	"readOnly":   "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it",
+}
+
+func (CephFSVolumeSource) SwaggerDoc() map[string]string {
+	return map_CephFSVolumeSource
+}
+
+var map_CinderPersistentVolumeSource = map[string]string{
+	"":          "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.",
+	"volumeID":  "volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
+	"fsType":    "fsType Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
+	"readOnly":  "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
+	"secretRef": "secretRef is Optional: points to a secret object containing parameters used to connect to OpenStack.",
+}
+
+func (CinderPersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_CinderPersistentVolumeSource
+}
+
+var map_CinderVolumeSource = map[string]string{
+	"":          "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.",
+	"volumeID":  "volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
+	"fsType":    "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
+	"readOnly":  "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
+	"secretRef": "secretRef is optional: points to a secret object containing parameters used to connect to OpenStack.",
+}
+
+func (CinderVolumeSource) SwaggerDoc() map[string]string {
+	return map_CinderVolumeSource
+}
+
+var map_ClientIPConfig = map[string]string{
+	"":               "ClientIPConfig represents the configurations of Client IP based session affinity.",
+	"timeoutSeconds": "timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours).",
+}
+
+func (ClientIPConfig) SwaggerDoc() map[string]string {
+	return map_ClientIPConfig
+}
+
+var map_ClusterTrustBundleProjection = map[string]string{
+	"":              "ClusterTrustBundleProjection describes how to select a set of ClusterTrustBundle objects and project their contents into the pod filesystem.",
+	"name":          "Select a single ClusterTrustBundle by object name.  Mutually-exclusive with signerName and labelSelector.",
+	"signerName":    "Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name.  The contents of all selected ClusterTrustBundles will be unified and deduplicated.",
+	"labelSelector": "Select all ClusterTrustBundles that match this label selector.  Only has effect if signerName is set.  Mutually-exclusive with name.  If unset, interpreted as \"match nothing\".  If set but empty, interpreted as \"match everything\".",
+	"optional":      "If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available.  If using name, then the named ClusterTrustBundle is allowed not to exist.  If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles.",
+	"path":          "Relative path from the volume root to write the bundle.",
+}
+
+func (ClusterTrustBundleProjection) SwaggerDoc() map[string]string {
+	return map_ClusterTrustBundleProjection
+}
+
+var map_ComponentCondition = map[string]string{
+	"":        "Information about the condition of a component.",
+	"type":    "Type of condition for a component. Valid value: \"Healthy\"",
+	"status":  "Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\".",
+	"message": "Message about the condition for a component. For example, information about a health check.",
+	"error":   "Condition error code for a component. For example, a health check error code.",
+}
+
+func (ComponentCondition) SwaggerDoc() map[string]string {
+	return map_ComponentCondition
+}
+
+var map_ComponentStatus = map[string]string{
+	"":           "ComponentStatus (and ComponentStatusList) holds the cluster validation info. Deprecated: This API is deprecated in v1.19+",
+	"metadata":   "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"conditions": "List of component conditions observed",
+}
+
+func (ComponentStatus) SwaggerDoc() map[string]string {
+	return map_ComponentStatus
+}
+
+var map_ComponentStatusList = map[string]string{
+	"":         "Status of all the conditions for the component as a list of ComponentStatus objects. Deprecated: This API is deprecated in v1.19+",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"items":    "List of ComponentStatus objects.",
+}
+
+func (ComponentStatusList) SwaggerDoc() map[string]string {
+	return map_ComponentStatusList
+}
+
+var map_ConfigMap = map[string]string{
+	"":           "ConfigMap holds configuration data for pods to consume.",
+	"metadata":   "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"immutable":  "Immutable, if set to true, ensures that data stored in the ConfigMap cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil.",
+	"data":       "Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process.",
+	"binaryData": "BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet.",
+}
+
+func (ConfigMap) SwaggerDoc() map[string]string {
+	return map_ConfigMap
+}
+
+var map_ConfigMapEnvSource = map[string]string{
+	"":         "ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.",
+	"optional": "Specify whether the ConfigMap must be defined",
+}
+
+func (ConfigMapEnvSource) SwaggerDoc() map[string]string {
+	return map_ConfigMapEnvSource
+}
+
+var map_ConfigMapKeySelector = map[string]string{
+	"":         "Selects a key from a ConfigMap.",
+	"key":      "The key to select.",
+	"optional": "Specify whether the ConfigMap or its key must be defined",
+}
+
+func (ConfigMapKeySelector) SwaggerDoc() map[string]string {
+	return map_ConfigMapKeySelector
+}
+
+var map_ConfigMapList = map[string]string{
+	"":         "ConfigMapList is a resource containing a list of ConfigMap objects.",
+	"metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"items":    "Items is the list of ConfigMaps.",
+}
+
+func (ConfigMapList) SwaggerDoc() map[string]string {
+	return map_ConfigMapList
+}
+
+var map_ConfigMapNodeConfigSource = map[string]string{
+	"":                 "ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration",
+	"namespace":        "Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases.",
+	"name":             "Name is the metadata.name of the referenced ConfigMap. This field is required in all cases.",
+	"uid":              "UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.",
+	"resourceVersion":  "ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.",
+	"kubeletConfigKey": "KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases.",
+}
+
+func (ConfigMapNodeConfigSource) SwaggerDoc() map[string]string {
+	return map_ConfigMapNodeConfigSource
+}
+
+var map_ConfigMapProjection = map[string]string{
+	"":         "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.",
+	"items":    "items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
+	"optional": "optional specify whether the ConfigMap or its keys must be defined",
+}
+
+func (ConfigMapProjection) SwaggerDoc() map[string]string {
+	return map_ConfigMapProjection
+}
+
+var map_ConfigMapVolumeSource = map[string]string{
+	"":            "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.",
+	"items":       "items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
+	"defaultMode": "defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
+	"optional":    "optional specify whether the ConfigMap or its keys must be defined",
+}
+
+func (ConfigMapVolumeSource) SwaggerDoc() map[string]string {
+	return map_ConfigMapVolumeSource
+}
+
+var map_Container = map[string]string{
+	"":                         "A single application container that you want to run within a pod.",
+	"name":                     "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.",
+	"image":                    "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
+	"command":                  "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+	"args":                     "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+	"workingDir":               "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
+	"ports":                    "List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.",
+	"envFrom":                  "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
+	"env":                      "List of environment variables to set in the container. Cannot be updated.",
+	"resources":                "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
+	"resizePolicy":             "Resources resize policy for the container.",
+	"restartPolicy":            "RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.",
+	"volumeMounts":             "Pod volumes to mount into the container's filesystem. Cannot be updated.",
+	"volumeDevices":            "volumeDevices is the list of block devices to be used by the container.",
+	"livenessProbe":            "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+	"readinessProbe":           "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+	"startupProbe":             "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+	"lifecycle":                "Actions that the management system should take in response to container lifecycle events. Cannot be updated.",
+	"terminationMessagePath":   "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.",
+	"terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.",
+	"imagePullPolicy":          "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images",
+	"securityContext":          "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
+	"stdin":                    "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.",
+	"stdinOnce":                "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false",
+	"tty":                      "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.",
+}
+
+func (Container) SwaggerDoc() map[string]string {
+	return map_Container
+}
+
+var map_ContainerImage = map[string]string{
+	"":          "Describe a container image",
+	"names":     "Names by which this image is known. e.g. [\"kubernetes.example/hyperkube:v1.0.7\", \"cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7\"]",
+	"sizeBytes": "The size of the image in bytes.",
+}
+
+func (ContainerImage) SwaggerDoc() map[string]string {
+	return map_ContainerImage
+}
+
+var map_ContainerPort = map[string]string{
+	"":              "ContainerPort represents a network port in a single container.",
+	"name":          "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.",
+	"hostPort":      "Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.",
+	"containerPort": "Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.",
+	"protocol":      "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".",
+	"hostIP":        "What host IP to bind the external port to.",
+}
+
+func (ContainerPort) SwaggerDoc() map[string]string {
+	return map_ContainerPort
+}
+
+var map_ContainerResizePolicy = map[string]string{
+	"":              "ContainerResizePolicy represents resource resize policy for the container.",
+	"resourceName":  "Name of the resource to which this resource resize policy applies. Supported values: cpu, memory.",
+	"restartPolicy": "Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired.",
+}
+
+func (ContainerResizePolicy) SwaggerDoc() map[string]string {
+	return map_ContainerResizePolicy
+}
+
+var map_ContainerState = map[string]string{
+	"":           "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.",
+	"waiting":    "Details about a waiting container",
+	"running":    "Details about a running container",
+	"terminated": "Details about a terminated container",
+}
+
+func (ContainerState) SwaggerDoc() map[string]string {
+	return map_ContainerState
+}
+
+var map_ContainerStateRunning = map[string]string{
+	"":          "ContainerStateRunning is a running state of a container.",
+	"startedAt": "Time at which the container was last (re-)started",
+}
+
+func (ContainerStateRunning) SwaggerDoc() map[string]string {
+	return map_ContainerStateRunning
+}
+
+var map_ContainerStateTerminated = map[string]string{
+	"":            "ContainerStateTerminated is a terminated state of a container.",
+	"exitCode":    "Exit status from the last termination of the container",
+	"signal":      "Signal from the last termination of the container",
+	"reason":      "(brief) reason from the last termination of the container",
+	"message":     "Message regarding the last termination of the container",
+	"startedAt":   "Time at which previous execution of the container started",
+	"finishedAt":  "Time at which the container last terminated",
+	"containerID": "Container's ID in the format '://'",
+}
+
+func (ContainerStateTerminated) SwaggerDoc() map[string]string {
+	return map_ContainerStateTerminated
+}
+
+var map_ContainerStateWaiting = map[string]string{
+	"":        "ContainerStateWaiting is a waiting state of a container.",
+	"reason":  "(brief) reason the container is not yet running.",
+	"message": "Message regarding why the container is not yet running.",
+}
+
+func (ContainerStateWaiting) SwaggerDoc() map[string]string {
+	return map_ContainerStateWaiting
+}
+
+var map_ContainerStatus = map[string]string{
+	"":                         "ContainerStatus contains details for the current status of this container.",
+	"name":                     "Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated.",
+	"state":                    "State holds details about the container's current condition.",
+	"lastState":                "LastTerminationState holds the last termination state of the container to help debug container crashes and restarts. This field is not populated if the container is still running and RestartCount is 0.",
+	"ready":                    "Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field).\n\nThe value is typically used to determine whether a container is ready to accept traffic.",
+	"restartCount":             "RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative.",
+	"image":                    "Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images.",
+	"imageID":                  "ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime.",
+	"containerID":              "ContainerID is the ID of the container in the format '://'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\").",
+	"started":                  "Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false.",
+	"allocatedResources":       "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.",
+	"resources":                "Resources represents the compute resource requests and limits that have been successfully enacted on the running container after it has been started or has been successfully resized.",
+	"volumeMounts":             "Status of volume mounts.",
+	"user":                     "User represents user identity information initially attached to the first process of the container",
+	"allocatedResourcesStatus": "AllocatedResourcesStatus represents the status of various resources allocated for this Pod.",
+}
+
+func (ContainerStatus) SwaggerDoc() map[string]string {
+	return map_ContainerStatus
+}
+
+var map_ContainerUser = map[string]string{
+	"":      "ContainerUser represents user identity information",
+	"linux": "Linux holds user identity information initially attached to the first process of the containers in Linux. Note that the actual running identity can be changed if the process has enough privilege to do so.",
+}
+
+func (ContainerUser) SwaggerDoc() map[string]string {
+	return map_ContainerUser
+}
+
+var map_DaemonEndpoint = map[string]string{
+	"":     "DaemonEndpoint contains information about a single Daemon endpoint.",
+	"Port": "Port number of the given endpoint.",
+}
+
+func (DaemonEndpoint) SwaggerDoc() map[string]string {
+	return map_DaemonEndpoint
+}
+
+var map_DownwardAPIProjection = map[string]string{
+	"":      "Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.",
+	"items": "Items is a list of DownwardAPIVolume file",
+}
+
+func (DownwardAPIProjection) SwaggerDoc() map[string]string {
+	return map_DownwardAPIProjection
+}
+
+var map_DownwardAPIVolumeFile = map[string]string{
+	"":                 "DownwardAPIVolumeFile represents information to create the file containing the pod field",
+	"path":             "Required: Path is  the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'",
+	"fieldRef":         "Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported.",
+	"resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.",
+	"mode":             "Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
+}
+
+func (DownwardAPIVolumeFile) SwaggerDoc() map[string]string {
+	return map_DownwardAPIVolumeFile
+}
+
+var map_DownwardAPIVolumeSource = map[string]string{
+	"":            "DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.",
+	"items":       "Items is a list of downward API volume file",
+	"defaultMode": "Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
+}
+
+func (DownwardAPIVolumeSource) SwaggerDoc() map[string]string {
+	return map_DownwardAPIVolumeSource
+}
+
+var map_EmptyDirVolumeSource = map[string]string{
+	"":          "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.",
+	"medium":    "medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir",
+	"sizeLimit": "sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir",
+}
+
+func (EmptyDirVolumeSource) SwaggerDoc() map[string]string {
+	return map_EmptyDirVolumeSource
+}
+
+var map_EndpointAddress = map[string]string{
+	"":          "EndpointAddress is a tuple that describes single IP address.",
+	"ip":        "The IP of this endpoint. May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), or link-local multicast (224.0.0.0/24 or ff02::/16).",
+	"hostname":  "The Hostname of this endpoint",
+	"nodeName":  "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.",
+	"targetRef": "Reference to object providing the endpoint.",
+}
+
+func (EndpointAddress) SwaggerDoc() map[string]string {
+	return map_EndpointAddress
+}
+
+var map_EndpointPort = map[string]string{
+	"":            "EndpointPort is a tuple that describes a single port.",
+	"name":        "The name of this port.  This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.",
+	"port":        "The port number of the endpoint.",
+	"protocol":    "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
+	"appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n  * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n  * 'kubernetes.io/ws'  - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n  * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.",
+}
+
+func (EndpointPort) SwaggerDoc() map[string]string {
+	return map_EndpointPort
+}
+
+var map_EndpointSubset = map[string]string{
+	"":                  "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n\n\t{\n\t  Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t  Ports:     [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t}\n\nThe resulting set of endpoints can be viewed as:\n\n\ta: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n\tb: [ 10.10.1.1:309, 10.10.2.2:309 ]",
+	"addresses":         "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.",
+	"notReadyAddresses": "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.",
+	"ports":             "Port numbers available on the related IP addresses.",
+}
+
+func (EndpointSubset) SwaggerDoc() map[string]string {
+	return map_EndpointSubset
+}
+
+var map_Endpoints = map[string]string{
+	"":         "Endpoints is a collection of endpoints that implement the actual service. Example:\n\n\t Name: \"mysvc\",\n\t Subsets: [\n\t   {\n\t     Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t     Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t   },\n\t   {\n\t     Addresses: [{\"ip\": \"10.10.3.3\"}],\n\t     Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n\t   },\n\t]",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"subsets":  "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.",
+}
+
+func (Endpoints) SwaggerDoc() map[string]string {
+	return map_Endpoints
+}
+
+var map_EndpointsList = map[string]string{
+	"":         "EndpointsList is a list of endpoints.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"items":    "List of endpoints.",
+}
+
+func (EndpointsList) SwaggerDoc() map[string]string {
+	return map_EndpointsList
+}
+
+var map_EnvFromSource = map[string]string{
+	"":             "EnvFromSource represents the source of a set of ConfigMaps",
+	"prefix":       "An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.",
+	"configMapRef": "The ConfigMap to select from",
+	"secretRef":    "The Secret to select from",
+}
+
+func (EnvFromSource) SwaggerDoc() map[string]string {
+	return map_EnvFromSource
+}
+
+var map_EnvVar = map[string]string{
+	"":          "EnvVar represents an environment variable present in a Container.",
+	"name":      "Name of the environment variable. Must be a C_IDENTIFIER.",
+	"value":     "Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".",
+	"valueFrom": "Source for the environment variable's value. Cannot be used if value is not empty.",
+}
+
+func (EnvVar) SwaggerDoc() map[string]string {
+	return map_EnvVar
+}
+
+var map_EnvVarSource = map[string]string{
+	"":                 "EnvVarSource represents a source for the value of an EnvVar.",
+	"fieldRef":         "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.",
+	"resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.",
+	"configMapKeyRef":  "Selects a key of a ConfigMap.",
+	"secretKeyRef":     "Selects a key of a secret in the pod's namespace",
+}
+
+func (EnvVarSource) SwaggerDoc() map[string]string {
+	return map_EnvVarSource
+}
+
+var map_EphemeralContainer = map[string]string{
+	"":                    "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation.\n\nTo add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted.",
+	"targetContainerName": "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec.\n\nThe container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined.",
+}
+
+func (EphemeralContainer) SwaggerDoc() map[string]string {
+	return map_EphemeralContainer
+}
+
+var map_EphemeralContainerCommon = map[string]string{
+	"":                         "EphemeralContainerCommon is a copy of all fields in Container to be inlined in EphemeralContainer. This separate type allows easy conversion from EphemeralContainer to Container and allows separate documentation for the fields of EphemeralContainer. When a new field is added to Container it must be added here as well.",
+	"name":                     "Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.",
+	"image":                    "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images",
+	"command":                  "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+	"args":                     "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+	"workingDir":               "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
+	"ports":                    "Ports are not allowed for ephemeral containers.",
+	"envFrom":                  "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
+	"env":                      "List of environment variables to set in the container. Cannot be updated.",
+	"resources":                "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.",
+	"resizePolicy":             "Resources resize policy for the container.",
+	"restartPolicy":            "Restart policy for the container to manage the restart behavior of each container within a pod. This may only be set for init containers. You cannot set this field on ephemeral containers.",
+	"volumeMounts":             "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.",
+	"volumeDevices":            "volumeDevices is the list of block devices to be used by the container.",
+	"livenessProbe":            "Probes are not allowed for ephemeral containers.",
+	"readinessProbe":           "Probes are not allowed for ephemeral containers.",
+	"startupProbe":             "Probes are not allowed for ephemeral containers.",
+	"lifecycle":                "Lifecycle is not allowed for ephemeral containers.",
+	"terminationMessagePath":   "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.",
+	"terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.",
+	"imagePullPolicy":          "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images",
+	"securityContext":          "Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.",
+	"stdin":                    "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.",
+	"stdinOnce":                "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false",
+	"tty":                      "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.",
+}
+
+func (EphemeralContainerCommon) SwaggerDoc() map[string]string {
+	return map_EphemeralContainerCommon
+}
+
+var map_EphemeralVolumeSource = map[string]string{
+	"":                    "Represents an ephemeral volume that is handled by a normal storage driver.",
+	"volumeClaimTemplate": "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod.  The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil.",
+}
+
+func (EphemeralVolumeSource) SwaggerDoc() map[string]string {
+	return map_EphemeralVolumeSource
+}
+
+var map_Event = map[string]string{
+	"":                   "Event is a report of an event somewhere in the cluster.  Events have a limited retention time and triggers and messages may evolve with time.  Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason.  Events should be treated as informative, best-effort, supplemental data.",
+	"metadata":           "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"involvedObject":     "The object that this event is about.",
+	"reason":             "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.",
+	"message":            "A human-readable description of the status of this operation.",
+	"source":             "The component reporting this event. Should be a short machine understandable string.",
+	"firstTimestamp":     "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)",
+	"lastTimestamp":      "The time at which the most recent occurrence of this event was recorded.",
+	"count":              "The number of times this event has occurred.",
+	"type":               "Type of this event (Normal, Warning), new types could be added in the future",
+	"eventTime":          "Time when this Event was first observed.",
+	"series":             "Data about the Event series this event represents or nil if it's a singleton Event.",
+	"action":             "What action was taken/failed regarding to the Regarding object.",
+	"related":            "Optional secondary object for more complex actions.",
+	"reportingComponent": "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.",
+	"reportingInstance":  "ID of the controller instance, e.g. `kubelet-xyzf`.",
+}
+
+func (Event) SwaggerDoc() map[string]string {
+	return map_Event
+}
+
+var map_EventList = map[string]string{
+	"":         "EventList is a list of events.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"items":    "List of events",
+}
+
+func (EventList) SwaggerDoc() map[string]string {
+	return map_EventList
+}
+
+var map_EventSeries = map[string]string{
+	"":                 "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.",
+	"count":            "Number of occurrences in this series up to the last heartbeat time",
+	"lastObservedTime": "Time of the last occurrence observed",
+}
+
+func (EventSeries) SwaggerDoc() map[string]string {
+	return map_EventSeries
+}
+
+var map_EventSource = map[string]string{
+	"":          "EventSource contains information for an event.",
+	"component": "Component from which the event is generated.",
+	"host":      "Node name on which the event is generated.",
+}
+
+func (EventSource) SwaggerDoc() map[string]string {
+	return map_EventSource
+}
+
+var map_ExecAction = map[string]string{
+	"":        "ExecAction describes a \"run in container\" action.",
+	"command": "Command is the command line to execute inside the container, the working directory for the command  is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.",
+}
+
+func (ExecAction) SwaggerDoc() map[string]string {
+	return map_ExecAction
+}
+
+var map_FCVolumeSource = map[string]string{
+	"":           "Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.",
+	"targetWWNs": "targetWWNs is Optional: FC target worldwide names (WWNs)",
+	"lun":        "lun is Optional: FC target lun number",
+	"fsType":     "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+	"readOnly":   "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+	"wwids":      "wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.",
+}
+
+func (FCVolumeSource) SwaggerDoc() map[string]string {
+	return map_FCVolumeSource
+}
+
+var map_FlexPersistentVolumeSource = map[string]string{
+	"":          "FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.",
+	"driver":    "driver is the name of the driver to use for this volume.",
+	"fsType":    "fsType is the Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.",
+	"secretRef": "secretRef is Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.",
+	"readOnly":  "readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+	"options":   "options is Optional: this field holds extra command options if any.",
+}
+
+func (FlexPersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_FlexPersistentVolumeSource
+}
+
+var map_FlexVolumeSource = map[string]string{
+	"":          "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
+	"driver":    "driver is the name of the driver to use for this volume.",
+	"fsType":    "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.",
+	"secretRef": "secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.",
+	"readOnly":  "readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+	"options":   "options is Optional: this field holds extra command options if any.",
+}
+
+func (FlexVolumeSource) SwaggerDoc() map[string]string {
+	return map_FlexVolumeSource
+}
+
+var map_FlockerVolumeSource = map[string]string{
+	"":            "Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.",
+	"datasetName": "datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated",
+	"datasetUUID": "datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset",
+}
+
+func (FlockerVolumeSource) SwaggerDoc() map[string]string {
+	return map_FlockerVolumeSource
+}
+
+var map_GCEPersistentDiskVolumeSource = map[string]string{
+	"":          "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.",
+	"pdName":    "pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+	"fsType":    "fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+	"partition": "partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+	"readOnly":  "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+}
+
+func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string {
+	return map_GCEPersistentDiskVolumeSource
+}
+
+var map_GRPCAction = map[string]string{
+	"":        "GRPCAction specifies an action involving a GRPC service.",
+	"port":    "Port number of the gRPC service. Number must be in the range 1 to 65535.",
+	"service": "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\nIf this is not specified, the default behavior is defined by gRPC.",
+}
+
+func (GRPCAction) SwaggerDoc() map[string]string {
+	return map_GRPCAction
+}
+
+var map_GitRepoVolumeSource = map[string]string{
+	"":           "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.",
+	"repository": "repository is the URL",
+	"revision":   "revision is the commit hash for the specified revision.",
+	"directory":  "directory is the target directory name. Must not contain or start with '..'.  If '.' is supplied, the volume directory will be the git repository.  Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.",
+}
+
+func (GitRepoVolumeSource) SwaggerDoc() map[string]string {
+	return map_GitRepoVolumeSource
+}
+
+var map_GlusterfsPersistentVolumeSource = map[string]string{
+	"":                   "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.",
+	"endpoints":          "endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
+	"path":               "path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
+	"readOnly":           "readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
+	"endpointsNamespace": "endpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
+}
+
+func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_GlusterfsPersistentVolumeSource
+}
+
+var map_GlusterfsVolumeSource = map[string]string{
+	"":          "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.",
+	"endpoints": "endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
+	"path":      "path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
+	"readOnly":  "readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod",
+}
+
+func (GlusterfsVolumeSource) SwaggerDoc() map[string]string {
+	return map_GlusterfsVolumeSource
+}
+
+var map_HTTPGetAction = map[string]string{
+	"":            "HTTPGetAction describes an action based on HTTP Get requests.",
+	"path":        "Path to access on the HTTP server.",
+	"port":        "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.",
+	"host":        "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.",
+	"scheme":      "Scheme to use for connecting to the host. Defaults to HTTP.",
+	"httpHeaders": "Custom headers to set in the request. HTTP allows repeated headers.",
+}
+
+func (HTTPGetAction) SwaggerDoc() map[string]string {
+	return map_HTTPGetAction
+}
+
+var map_HTTPHeader = map[string]string{
+	"":      "HTTPHeader describes a custom header to be used in HTTP probes",
+	"name":  "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.",
+	"value": "The header field value",
+}
+
+func (HTTPHeader) SwaggerDoc() map[string]string {
+	return map_HTTPHeader
+}
+
+var map_HostAlias = map[string]string{
+	"":          "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.",
+	"ip":        "IP address of the host file entry.",
+	"hostnames": "Hostnames for the above IP address.",
+}
+
+func (HostAlias) SwaggerDoc() map[string]string {
+	return map_HostAlias
+}
+
+var map_HostIP = map[string]string{
+	"":   "HostIP represents a single IP address allocated to the host.",
+	"ip": "IP is the IP address assigned to the host",
+}
+
+func (HostIP) SwaggerDoc() map[string]string {
+	return map_HostIP
+}
+
+var map_HostPathVolumeSource = map[string]string{
+	"":     "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.",
+	"path": "path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
+	"type": "type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
+}
+
+func (HostPathVolumeSource) SwaggerDoc() map[string]string {
+	return map_HostPathVolumeSource
+}
+
+var map_ISCSIPersistentVolumeSource = map[string]string{
+	"":                  "ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.",
+	"targetPortal":      "targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).",
+	"iqn":               "iqn is Target iSCSI Qualified Name.",
+	"lun":               "lun is iSCSI Target Lun number.",
+	"iscsiInterface":    "iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).",
+	"fsType":            "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi",
+	"readOnly":          "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.",
+	"portals":           "portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).",
+	"chapAuthDiscovery": "chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication",
+	"chapAuthSession":   "chapAuthSession defines whether support iSCSI Session CHAP authentication",
+	"secretRef":         "secretRef is the CHAP Secret for iSCSI target and initiator authentication",
+	"initiatorName":     "initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.",
+}
+
+func (ISCSIPersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_ISCSIPersistentVolumeSource
+}
+
+var map_ISCSIVolumeSource = map[string]string{
+	"":                  "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.",
+	"targetPortal":      "targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).",
+	"iqn":               "iqn is the target iSCSI Qualified Name.",
+	"lun":               "lun represents iSCSI Target Lun number.",
+	"iscsiInterface":    "iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).",
+	"fsType":            "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi",
+	"readOnly":          "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.",
+	"portals":           "portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).",
+	"chapAuthDiscovery": "chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication",
+	"chapAuthSession":   "chapAuthSession defines whether support iSCSI Session CHAP authentication",
+	"secretRef":         "secretRef is the CHAP Secret for iSCSI target and initiator authentication",
+	"initiatorName":     "initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.",
+}
+
+func (ISCSIVolumeSource) SwaggerDoc() map[string]string {
+	return map_ISCSIVolumeSource
+}
+
+var map_ImageVolumeSource = map[string]string{
+	"":           "ImageVolumeSource represents a image volume resource.",
+	"reference":  "Required: Image or artifact reference to be used. Behaves in the same way as pod.spec.containers[*].image. Pull secrets will be assembled in the same way as for the container image by looking up node credentials, SA image pull secrets, and pod spec image pull secrets. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
+	"pullPolicy": "Policy for pulling OCI objects. Possible values are: Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.",
+}
+
+func (ImageVolumeSource) SwaggerDoc() map[string]string {
+	return map_ImageVolumeSource
+}
+
+var map_KeyToPath = map[string]string{
+	"":     "Maps a string key to a path within a volume.",
+	"key":  "key is the key to project.",
+	"path": "path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.",
+	"mode": "mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
+}
+
+func (KeyToPath) SwaggerDoc() map[string]string {
+	return map_KeyToPath
+}
+
+var map_Lifecycle = map[string]string{
+	"":          "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.",
+	"postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
+	"preStop":   "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks",
+}
+
+func (Lifecycle) SwaggerDoc() map[string]string {
+	return map_Lifecycle
+}
+
+var map_LifecycleHandler = map[string]string{
+	"":          "LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified.",
+	"exec":      "Exec specifies a command to execute in the container.",
+	"httpGet":   "HTTPGet specifies an HTTP GET request to perform.",
+	"tcpSocket": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for backward compatibility. There is no validation of this field and lifecycle hooks will fail at runtime when it is specified.",
+	"sleep":     "Sleep represents a duration that the container should sleep.",
+}
+
+func (LifecycleHandler) SwaggerDoc() map[string]string {
+	return map_LifecycleHandler
+}
+
+var map_LimitRange = map[string]string{
+	"":         "LimitRange sets resource usage limits for each kind of resource in a Namespace.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"spec":     "Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+}
+
+func (LimitRange) SwaggerDoc() map[string]string {
+	return map_LimitRange
+}
+
+var map_LimitRangeItem = map[string]string{
+	"":                     "LimitRangeItem defines a min/max usage limit for any resource that matches on kind.",
+	"type":                 "Type of resource that this limit applies to.",
+	"max":                  "Max usage constraints on this kind by resource name.",
+	"min":                  "Min usage constraints on this kind by resource name.",
+	"default":              "Default resource requirement limit value by resource name if resource limit is omitted.",
+	"defaultRequest":       "DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.",
+	"maxLimitRequestRatio": "MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.",
+}
+
+func (LimitRangeItem) SwaggerDoc() map[string]string {
+	return map_LimitRangeItem
+}
+
+var map_LimitRangeList = map[string]string{
+	"":         "LimitRangeList is a list of LimitRange items.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"items":    "Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
+}
+
+func (LimitRangeList) SwaggerDoc() map[string]string {
+	return map_LimitRangeList
+}
+
+var map_LimitRangeSpec = map[string]string{
+	"":       "LimitRangeSpec defines a min/max usage limit for resources that match on kind.",
+	"limits": "Limits is the list of LimitRangeItem objects that are enforced.",
+}
+
+func (LimitRangeSpec) SwaggerDoc() map[string]string {
+	return map_LimitRangeSpec
+}
+
+var map_LinuxContainerUser = map[string]string{
+	"":                   "LinuxContainerUser represents user identity information in Linux containers",
+	"uid":                "UID is the primary uid initially attached to the first process in the container",
+	"gid":                "GID is the primary gid initially attached to the first process in the container",
+	"supplementalGroups": "SupplementalGroups are the supplemental groups initially attached to the first process in the container",
+}
+
+func (LinuxContainerUser) SwaggerDoc() map[string]string {
+	return map_LinuxContainerUser
+}
+
+var map_LoadBalancerIngress = map[string]string{
+	"":         "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.",
+	"ip":       "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)",
+	"hostname": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)",
+	"ipMode":   "IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. Setting this to \"VIP\" indicates that traffic is delivered to the node with the destination set to the load-balancer's IP and port. Setting this to \"Proxy\" indicates that traffic is delivered to the node or pod with the destination set to the node's IP and node port or the pod's IP and port. Service implementations may use this information to adjust traffic routing.",
+	"ports":    "Ports is a list of records of service ports If used, every port defined in the service should have an entry in it",
+}
+
+func (LoadBalancerIngress) SwaggerDoc() map[string]string {
+	return map_LoadBalancerIngress
+}
+
+var map_LoadBalancerStatus = map[string]string{
+	"":        "LoadBalancerStatus represents the status of a load-balancer.",
+	"ingress": "Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points.",
+}
+
+func (LoadBalancerStatus) SwaggerDoc() map[string]string {
+	return map_LoadBalancerStatus
+}
+
+var map_LocalObjectReference = map[string]string{
+	"":     "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.",
+	"name": "Name of the referent. This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+}
+
+func (LocalObjectReference) SwaggerDoc() map[string]string {
+	return map_LocalObjectReference
+}
+
+var map_LocalVolumeSource = map[string]string{
+	"":       "Local represents directly-attached storage with node affinity",
+	"path":   "path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...).",
+	"fsType": "fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a filesystem if unspecified.",
+}
+
+func (LocalVolumeSource) SwaggerDoc() map[string]string {
+	return map_LocalVolumeSource
+}
+
+var map_ModifyVolumeStatus = map[string]string{
+	"":                                "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation",
+	"targetVolumeAttributesClassName": "targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled",
+	"status":                          "status is the status of the ControllerModifyVolume operation. It can be in any of following states:\n - Pending\n   Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as\n   the specified VolumeAttributesClass not existing.\n - InProgress\n   InProgress indicates that the volume is being modified.\n - Infeasible\n  Infeasible indicates that the request has been rejected as invalid by the CSI driver. To\n\t  resolve the error, a valid VolumeAttributesClass needs to be specified.\nNote: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately.",
+}
+
+func (ModifyVolumeStatus) SwaggerDoc() map[string]string {
+	return map_ModifyVolumeStatus
+}
+
+var map_NFSVolumeSource = map[string]string{
+	"":         "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.",
+	"server":   "server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
+	"path":     "path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
+	"readOnly": "readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
+}
+
+func (NFSVolumeSource) SwaggerDoc() map[string]string {
+	return map_NFSVolumeSource
+}
+
+var map_Namespace = map[string]string{
+	"":         "Namespace provides a scope for Names. Use of multiple namespaces is optional.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"spec":     "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+	"status":   "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+}
+
+func (Namespace) SwaggerDoc() map[string]string {
+	return map_Namespace
+}
+
+var map_NamespaceCondition = map[string]string{
+	"":                   "NamespaceCondition contains details about state of namespace.",
+	"type":               "Type of namespace controller condition.",
+	"status":             "Status of the condition, one of True, False, Unknown.",
+	"lastTransitionTime": "Last time the condition transitioned from one status to another.",
+	"reason":             "Unique, one-word, CamelCase reason for the condition's last transition.",
+	"message":            "Human-readable message indicating details about last transition.",
+}
+
+func (NamespaceCondition) SwaggerDoc() map[string]string {
+	return map_NamespaceCondition
+}
+
+var map_NamespaceList = map[string]string{
+	"":         "NamespaceList is a list of Namespaces.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"items":    "Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/",
+}
+
+func (NamespaceList) SwaggerDoc() map[string]string {
+	return map_NamespaceList
+}
+
+var map_NamespaceSpec = map[string]string{
+	"":           "NamespaceSpec describes the attributes on a Namespace.",
+	"finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/",
+}
+
+func (NamespaceSpec) SwaggerDoc() map[string]string {
+	return map_NamespaceSpec
+}
+
+var map_NamespaceStatus = map[string]string{
+	"":           "NamespaceStatus is information about the current status of a Namespace.",
+	"phase":      "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/",
+	"conditions": "Represents the latest available observations of a namespace's current state.",
+}
+
+func (NamespaceStatus) SwaggerDoc() map[string]string {
+	return map_NamespaceStatus
+}
+
+var map_Node = map[string]string{
+	"":         "Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"spec":     "Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+	"status":   "Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+}
+
+func (Node) SwaggerDoc() map[string]string {
+	return map_Node
+}
+
+var map_NodeAddress = map[string]string{
+	"":        "NodeAddress contains information for the node's address.",
+	"type":    "Node address type, one of Hostname, ExternalIP or InternalIP.",
+	"address": "The node address.",
+}
+
+func (NodeAddress) SwaggerDoc() map[string]string {
+	return map_NodeAddress
+}
+
+var map_NodeAffinity = map[string]string{
+	"": "Node affinity is a group of node affinity scheduling rules.",
+	"requiredDuringSchedulingIgnoredDuringExecution":  "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.",
+	"preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.",
+}
+
+func (NodeAffinity) SwaggerDoc() map[string]string {
+	return map_NodeAffinity
+}
+
+var map_NodeCondition = map[string]string{
+	"":                   "NodeCondition contains condition information for a node.",
+	"type":               "Type of node condition.",
+	"status":             "Status of the condition, one of True, False, Unknown.",
+	"lastHeartbeatTime":  "Last time we got an update on a given condition.",
+	"lastTransitionTime": "Last time the condition transit from one status to another.",
+	"reason":             "(brief) reason for the condition's last transition.",
+	"message":            "Human readable message indicating details about last transition.",
+}
+
+func (NodeCondition) SwaggerDoc() map[string]string {
+	return map_NodeCondition
+}
+
+var map_NodeConfigSource = map[string]string{
+	"":          "NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22",
+	"configMap": "ConfigMap is a reference to a Node's ConfigMap",
+}
+
+func (NodeConfigSource) SwaggerDoc() map[string]string {
+	return map_NodeConfigSource
+}
+
+var map_NodeConfigStatus = map[string]string{
+	"":              "NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.",
+	"assigned":      "Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned.",
+	"active":        "Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error.",
+	"lastKnownGood": "LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node's method of determining config stability and correctness, as this may change or become configurable in the future.",
+	"error":         "Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions.",
+}
+
+func (NodeConfigStatus) SwaggerDoc() map[string]string {
+	return map_NodeConfigStatus
+}
+
+var map_NodeDaemonEndpoints = map[string]string{
+	"":                "NodeDaemonEndpoints lists ports opened by daemons running on the Node.",
+	"kubeletEndpoint": "Endpoint on which Kubelet is listening.",
+}
+
+func (NodeDaemonEndpoints) SwaggerDoc() map[string]string {
+	return map_NodeDaemonEndpoints
+}
+
+var map_NodeFeatures = map[string]string{
+	"":                         "NodeFeatures describes the set of features implemented by the CRI implementation. The features contained in the NodeFeatures should depend only on the cri implementation independent of runtime handlers.",
+	"supplementalGroupsPolicy": "SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.",
+}
+
+func (NodeFeatures) SwaggerDoc() map[string]string {
+	return map_NodeFeatures
+}
+
+var map_NodeList = map[string]string{
+	"":         "NodeList is the whole list of all Nodes which have been registered with master.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"items":    "List of nodes",
+}
+
+func (NodeList) SwaggerDoc() map[string]string {
+	return map_NodeList
+}
+
+var map_NodeProxyOptions = map[string]string{
+	"":     "NodeProxyOptions is the query options to a Node's proxy call.",
+	"path": "Path is the URL path to use for the current proxy request to node.",
+}
+
+func (NodeProxyOptions) SwaggerDoc() map[string]string {
+	return map_NodeProxyOptions
+}
+
+var map_NodeRuntimeHandler = map[string]string{
+	"":         "NodeRuntimeHandler is a set of runtime handler information.",
+	"name":     "Runtime handler name. Empty for the default runtime handler.",
+	"features": "Supported features.",
+}
+
+func (NodeRuntimeHandler) SwaggerDoc() map[string]string {
+	return map_NodeRuntimeHandler
+}
+
+var map_NodeRuntimeHandlerFeatures = map[string]string{
+	"":                        "NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.",
+	"recursiveReadOnlyMounts": "RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.",
+	"userNamespaces":          "UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes.",
+}
+
+func (NodeRuntimeHandlerFeatures) SwaggerDoc() map[string]string {
+	return map_NodeRuntimeHandlerFeatures
+}
+
+var map_NodeSelector = map[string]string{
+	"":                  "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.",
+	"nodeSelectorTerms": "Required. A list of node selector terms. The terms are ORed.",
+}
+
+func (NodeSelector) SwaggerDoc() map[string]string {
+	return map_NodeSelector
+}
+
+var map_NodeSelectorRequirement = map[string]string{
+	"":         "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.",
+	"key":      "The label key that the selector applies to.",
+	"operator": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.",
+	"values":   "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.",
+}
+
+func (NodeSelectorRequirement) SwaggerDoc() map[string]string {
+	return map_NodeSelectorRequirement
+}
+
+var map_NodeSelectorTerm = map[string]string{
+	"":                 "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.",
+	"matchExpressions": "A list of node selector requirements by node's labels.",
+	"matchFields":      "A list of node selector requirements by node's fields.",
+}
+
+func (NodeSelectorTerm) SwaggerDoc() map[string]string {
+	return map_NodeSelectorTerm
+}
+
+var map_NodeSpec = map[string]string{
+	"":              "NodeSpec describes the attributes that a node is created with.",
+	"podCIDR":       "PodCIDR represents the pod IP range assigned to the node.",
+	"podCIDRs":      "podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for each of IPv4 and IPv6.",
+	"providerID":    "ID of the node assigned by the cloud provider in the format: ://",
+	"unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration",
+	"taints":        "If specified, the node's taints.",
+	"configSource":  "Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed.",
+	"externalID":    "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966",
+}
+
+func (NodeSpec) SwaggerDoc() map[string]string {
+	return map_NodeSpec
+}
+
+var map_NodeStatus = map[string]string{
+	"":                "NodeStatus is information about the current status of a node.",
+	"capacity":        "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/reference/node/node-status/#capacity",
+	"allocatable":     "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.",
+	"phase":           "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.",
+	"conditions":      "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/reference/node/node-status/#condition",
+	"addresses":       "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/reference/node/node-status/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).",
+	"daemonEndpoints": "Endpoints of daemons running on the Node.",
+	"nodeInfo":        "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/reference/node/node-status/#info",
+	"images":          "List of container images on this node",
+	"volumesInUse":    "List of attachable volumes in use (mounted) by the node.",
+	"volumesAttached": "List of volumes that are attached to the node.",
+	"config":          "Status of the config assigned to the node via the dynamic Kubelet config feature.",
+	"runtimeHandlers": "The available runtime handlers.",
+	"features":        "Features describes the set of features implemented by the CRI implementation.",
+}
+
+func (NodeStatus) SwaggerDoc() map[string]string {
+	return map_NodeStatus
+}
+
+var map_NodeSystemInfo = map[string]string{
+	"":                        "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.",
+	"machineID":               "MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html",
+	"systemUUID":              "SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid",
+	"bootID":                  "Boot ID reported by the node.",
+	"kernelVersion":           "Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).",
+	"osImage":                 "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).",
+	"containerRuntimeVersion": "ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2).",
+	"kubeletVersion":          "Kubelet Version reported by the node.",
+	"kubeProxyVersion":        "Deprecated: KubeProxy Version reported by the node.",
+	"operatingSystem":         "The Operating System reported by the node",
+	"architecture":            "The Architecture reported by the node",
+}
+
+func (NodeSystemInfo) SwaggerDoc() map[string]string {
+	return map_NodeSystemInfo
+}
+
+var map_ObjectFieldSelector = map[string]string{
+	"":           "ObjectFieldSelector selects an APIVersioned field of an object.",
+	"apiVersion": "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".",
+	"fieldPath":  "Path of the field to select in the specified API version.",
+}
+
+func (ObjectFieldSelector) SwaggerDoc() map[string]string {
+	return map_ObjectFieldSelector
+}
+
+var map_ObjectReference = map[string]string{
+	"":                "ObjectReference contains enough information to let you inspect or modify the referred object.",
+	"kind":            "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"namespace":       "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/",
+	"name":            "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+	"uid":             "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids",
+	"apiVersion":      "API version of the referent.",
+	"resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency",
+	"fieldPath":       "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.",
+}
+
+func (ObjectReference) SwaggerDoc() map[string]string {
+	return map_ObjectReference
+}
+
+var map_PersistentVolume = map[string]string{
+	"":         "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"spec":     "spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes",
+	"status":   "status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes",
+}
+
+func (PersistentVolume) SwaggerDoc() map[string]string {
+	return map_PersistentVolume
+}
+
+var map_PersistentVolumeClaim = map[string]string{
+	"":         "PersistentVolumeClaim is a user's request for and claim to a persistent volume",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"spec":     "spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+	"status":   "status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+}
+
+func (PersistentVolumeClaim) SwaggerDoc() map[string]string {
+	return map_PersistentVolumeClaim
+}
+
+var map_PersistentVolumeClaimCondition = map[string]string{
+	"":                   "PersistentVolumeClaimCondition contains details about state of pvc",
+	"type":               "Type is the type of the condition. More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=set%20to%20%27ResizeStarted%27.-,PersistentVolumeClaimCondition,-contains%20details%20about",
+	"status":             "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/reference/kubernetes-api/config-and-storage-resources/persistent-volume-claim-v1/#:~:text=state%20of%20pvc-,conditions.status,-(string)%2C%20required",
+	"lastProbeTime":      "lastProbeTime is the time we probed the condition.",
+	"lastTransitionTime": "lastTransitionTime is the time the condition transitioned from one status to another.",
+	"reason":             "reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"Resizing\" that means the underlying persistent volume is being resized.",
+	"message":            "message is the human-readable message indicating details about last transition.",
+}
+
+func (PersistentVolumeClaimCondition) SwaggerDoc() map[string]string {
+	return map_PersistentVolumeClaimCondition
+}
+
+var map_PersistentVolumeClaimList = map[string]string{
+	"":         "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"items":    "items is a list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+}
+
+func (PersistentVolumeClaimList) SwaggerDoc() map[string]string {
+	return map_PersistentVolumeClaimList
+}
+
+var map_PersistentVolumeClaimSpec = map[string]string{
+	"":                          "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes",
+	"accessModes":               "accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1",
+	"selector":                  "selector is a label query over volumes to consider for binding.",
+	"resources":                 "resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources",
+	"volumeName":                "volumeName is the binding reference to the PersistentVolume backing this claim.",
+	"storageClassName":          "storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1",
+	"volumeMode":                "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.",
+	"dataSource":                "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.",
+	"dataSourceRef":             "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n  allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n  preserves all values, and generates an error if a disallowed value is\n  specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n  in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.",
+	"volumeAttributesClassName": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default).",
+}
+
+func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string {
+	return map_PersistentVolumeClaimSpec
+}
+
+var map_PersistentVolumeClaimStatus = map[string]string{
+	"":                                 "PersistentVolumeClaimStatus is the current status of a persistent volume claim.",
+	"phase":                            "phase represents the current phase of PersistentVolumeClaim.",
+	"accessModes":                      "accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1",
+	"capacity":                         "capacity represents the actual resources of the underlying volume.",
+	"conditions":                       "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'Resizing'.",
+	"allocatedResources":               "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
+	"allocatedResourceStatuses":        "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n     - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n     - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n     - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n     - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.",
+	"currentVolumeAttributesClassName": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
+	"modifyVolumeStatus":               "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
+}
+
+func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string {
+	return map_PersistentVolumeClaimStatus
+}
+
+var map_PersistentVolumeClaimTemplate = map[string]string{
+	"":         "PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.",
+	"metadata": "May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.",
+	"spec":     "The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here.",
+}
+
+func (PersistentVolumeClaimTemplate) SwaggerDoc() map[string]string {
+	return map_PersistentVolumeClaimTemplate
+}
+
+var map_PersistentVolumeClaimVolumeSource = map[string]string{
+	"":          "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).",
+	"claimName": "claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+	"readOnly":  "readOnly Will force the ReadOnly setting in VolumeMounts. Default false.",
+}
+
+func (PersistentVolumeClaimVolumeSource) SwaggerDoc() map[string]string {
+	return map_PersistentVolumeClaimVolumeSource
+}
+
+var map_PersistentVolumeList = map[string]string{
+	"":         "PersistentVolumeList is a list of PersistentVolume items.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"items":    "items is a list of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes",
+}
+
+func (PersistentVolumeList) SwaggerDoc() map[string]string {
+	return map_PersistentVolumeList
+}
+
+var map_PersistentVolumeSource = map[string]string{
+	"":                     "PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. Exactly one of its members must be set.",
+	"gcePersistentDisk":    "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+	"awsElasticBlockStore": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
+	"hostPath":             "hostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
+	"glusterfs":            "glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
+	"nfs":                  "nfs represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
+	"rbd":                  "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. More info: https://examples.k8s.io/volumes/rbd/README.md",
+	"iscsi":                "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.",
+	"cinder":               "cinder represents a cinder volume attached and mounted on kubelets host machine. Deprecated: Cinder is deprecated. All operations for the in-tree cinder type are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
+	"cephfs":               "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.",
+	"fc":                   "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
+	"flocker":              "flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running. Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.",
+	"flexVolume":           "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.",
+	"azureFile":            "azureFile represents an Azure File Service mount on the host and bind mount to the pod. Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type are redirected to the file.csi.azure.com CSI driver.",
+	"vsphereVolume":        "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type are redirected to the csi.vsphere.vmware.com CSI driver.",
+	"quobyte":              "quobyte represents a Quobyte mount on the host that shares a pod's lifetime. Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.",
+	"azureDisk":            "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type are redirected to the disk.csi.azure.com CSI driver.",
+	"photonPersistentDisk": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.",
+	"portworxVolume":       "portworxVolume represents a portworx volume attached and mounted on kubelets host machine. Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate is on.",
+	"scaleIO":              "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.",
+	"local":                "local represents directly-attached storage with node affinity",
+	"storageos":            "storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod. Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. More info: https://examples.k8s.io/volumes/storageos/README.md",
+	"csi":                  "csi represents storage that is handled by an external CSI driver.",
+}
+
+func (PersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_PersistentVolumeSource
+}
+
+var map_PersistentVolumeSpec = map[string]string{
+	"":                              "PersistentVolumeSpec is the specification of a persistent volume.",
+	"capacity":                      "capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity",
+	"accessModes":                   "accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes",
+	"claimRef":                      "claimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding",
+	"persistentVolumeReclaimPolicy": "persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming",
+	"storageClassName":              "storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.",
+	"mountOptions":                  "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options",
+	"volumeMode":                    "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.",
+	"nodeAffinity":                  "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.",
+	"volumeAttributesClassName":     "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is a beta field and requires enabling VolumeAttributesClass feature (off by default).",
+}
+
+func (PersistentVolumeSpec) SwaggerDoc() map[string]string {
+	return map_PersistentVolumeSpec
+}
+
+var map_PersistentVolumeStatus = map[string]string{
+	"":                        "PersistentVolumeStatus is the current status of a persistent volume.",
+	"phase":                   "phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase",
+	"message":                 "message is a human-readable message indicating details about why the volume is in this state.",
+	"reason":                  "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.",
+	"lastPhaseTransitionTime": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions.",
+}
+
+func (PersistentVolumeStatus) SwaggerDoc() map[string]string {
+	return map_PersistentVolumeStatus
+}
+
+var map_PhotonPersistentDiskVolumeSource = map[string]string{
+	"":       "Represents a Photon Controller persistent disk resource.",
+	"pdID":   "pdID is the ID that identifies Photon Controller persistent disk",
+	"fsType": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+}
+
+func (PhotonPersistentDiskVolumeSource) SwaggerDoc() map[string]string {
+	return map_PhotonPersistentDiskVolumeSource
+}
+
+var map_Pod = map[string]string{
+	"":         "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"spec":     "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+	"status":   "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+}
+
+func (Pod) SwaggerDoc() map[string]string {
+	return map_Pod
+}
+
+var map_PodAffinity = map[string]string{
+	"": "Pod affinity is a group of inter pod affinity scheduling rules.",
+	"requiredDuringSchedulingIgnoredDuringExecution":  "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.",
+	"preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
+}
+
+func (PodAffinity) SwaggerDoc() map[string]string {
+	return map_PodAffinity
+}
+
+var map_PodAffinityTerm = map[string]string{
+	"":                  "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key  matches that of any node on which a pod of the set of pods is running",
+	"labelSelector":     "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods.",
+	"namespaces":        "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".",
+	"topologyKey":       "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.",
+	"namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces.",
+	"matchLabelKeys":    "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).",
+	"mismatchLabelKeys": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default).",
+}
+
+func (PodAffinityTerm) SwaggerDoc() map[string]string {
+	return map_PodAffinityTerm
+}
+
+var map_PodAntiAffinity = map[string]string{
+	"": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.",
+	"requiredDuringSchedulingIgnoredDuringExecution":  "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.",
+	"preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.",
+}
+
+func (PodAntiAffinity) SwaggerDoc() map[string]string {
+	return map_PodAntiAffinity
+}
+
+var map_PodAttachOptions = map[string]string{
+	"":          "PodAttachOptions is the query options to a Pod's remote attach call.",
+	"stdin":     "Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.",
+	"stdout":    "Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true.",
+	"stderr":    "Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true.",
+	"tty":       "TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false.",
+	"container": "The container in which to execute the command. Defaults to only container if there is only one container in the pod.",
+}
+
+func (PodAttachOptions) SwaggerDoc() map[string]string {
+	return map_PodAttachOptions
+}
+
+var map_PodCondition = map[string]string{
+	"":                   "PodCondition contains details for the current condition of this pod.",
+	"type":               "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
+	"status":             "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
+	"lastProbeTime":      "Last time we probed the condition.",
+	"lastTransitionTime": "Last time the condition transitioned from one status to another.",
+	"reason":             "Unique, one-word, CamelCase reason for the condition's last transition.",
+	"message":            "Human-readable message indicating details about last transition.",
+}
+
+func (PodCondition) SwaggerDoc() map[string]string {
+	return map_PodCondition
+}
+
+var map_PodDNSConfig = map[string]string{
+	"":            "PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.",
+	"nameservers": "A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.",
+	"searches":    "A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.",
+	"options":     "A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.",
+}
+
+func (PodDNSConfig) SwaggerDoc() map[string]string {
+	return map_PodDNSConfig
+}
+
+var map_PodDNSConfigOption = map[string]string{
+	"":      "PodDNSConfigOption defines DNS resolver options of a pod.",
+	"name":  "Name is this DNS resolver option's name. Required.",
+	"value": "Value is this DNS resolver option's value.",
+}
+
+func (PodDNSConfigOption) SwaggerDoc() map[string]string {
+	return map_PodDNSConfigOption
+}
+
+var map_PodExecOptions = map[string]string{
+	"":          "PodExecOptions is the query options to a Pod's remote exec call.",
+	"stdin":     "Redirect the standard input stream of the pod for this call. Defaults to false.",
+	"stdout":    "Redirect the standard output stream of the pod for this call.",
+	"stderr":    "Redirect the standard error stream of the pod for this call.",
+	"tty":       "TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.",
+	"container": "Container in which to execute the command. Defaults to only container if there is only one container in the pod.",
+	"command":   "Command is the remote command to execute. argv array. Not executed within a shell.",
+}
+
+func (PodExecOptions) SwaggerDoc() map[string]string {
+	return map_PodExecOptions
+}
+
+var map_PodIP = map[string]string{
+	"":   "PodIP represents a single IP address allocated to the pod.",
+	"ip": "IP is the IP address assigned to the pod",
+}
+
+func (PodIP) SwaggerDoc() map[string]string {
+	return map_PodIP
+}
+
+var map_PodList = map[string]string{
+	"":         "PodList is a list of Pods.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"items":    "List of pods. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md",
+}
+
+func (PodList) SwaggerDoc() map[string]string {
+	return map_PodList
+}
+
+var map_PodLogOptions = map[string]string{
+	"":                             "PodLogOptions is the query options for a Pod's logs REST call.",
+	"container":                    "The container for which to stream logs. Defaults to only container if there is one container in the pod.",
+	"follow":                       "Follow the log stream of the pod. Defaults to false.",
+	"previous":                     "Return previous terminated container logs. Defaults to false.",
+	"sinceSeconds":                 "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
+	"sinceTime":                    "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.",
+	"timestamps":                   "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.",
+	"tailLines":                    "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime. Note that when \"TailLines\" is specified, \"Stream\" can only be set to nil or \"All\".",
+	"limitBytes":                   "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.",
+	"insecureSkipTLSVerifyBackend": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to.  This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet.  If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet).",
+	"stream":                       "Specify which container log stream to return to the client. Acceptable values are \"All\", \"Stdout\" and \"Stderr\". If not specified, \"All\" is used, and both stdout and stderr are returned interleaved. Note that when \"TailLines\" is specified, \"Stream\" can only be set to nil or \"All\".",
+}
+
+func (PodLogOptions) SwaggerDoc() map[string]string {
+	return map_PodLogOptions
+}
+
+var map_PodOS = map[string]string{
+	"":     "PodOS defines the OS parameters of a pod.",
+	"name": "Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null",
+}
+
+func (PodOS) SwaggerDoc() map[string]string {
+	return map_PodOS
+}
+
+var map_PodPortForwardOptions = map[string]string{
+	"":      "PodPortForwardOptions is the query options to a Pod's port forward call when using WebSockets. The `port` query parameter must specify the port or ports (comma separated) to forward over. Port forwarding over SPDY does not use these options. It requires the port to be passed in the `port` header as part of request.",
+	"ports": "List of ports to forward Required when using WebSockets",
+}
+
+func (PodPortForwardOptions) SwaggerDoc() map[string]string {
+	return map_PodPortForwardOptions
+}
+
+var map_PodProxyOptions = map[string]string{
+	"":     "PodProxyOptions is the query options to a Pod's proxy call.",
+	"path": "Path is the URL path to use for the current proxy request to pod.",
+}
+
+func (PodProxyOptions) SwaggerDoc() map[string]string {
+	return map_PodProxyOptions
+}
+
+var map_PodReadinessGate = map[string]string{
+	"":              "PodReadinessGate contains the reference to a pod condition",
+	"conditionType": "ConditionType refers to a condition in the pod's condition list with matching type.",
+}
+
+func (PodReadinessGate) SwaggerDoc() map[string]string {
+	return map_PodReadinessGate
+}
+
+var map_PodResourceClaim = map[string]string{
+	"":                          "PodResourceClaim references exactly one ResourceClaim, either directly or by naming a ResourceClaimTemplate which is then turned into a ResourceClaim for the pod.\n\nIt adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.",
+	"name":                      "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.",
+	"resourceClaimName":         "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.\n\nExactly one of ResourceClaimName and ResourceClaimTemplateName must be set.",
+	"resourceClaimTemplateName": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.\n\nExactly one of ResourceClaimName and ResourceClaimTemplateName must be set.",
+}
+
+func (PodResourceClaim) SwaggerDoc() map[string]string {
+	return map_PodResourceClaim
+}
+
+var map_PodResourceClaimStatus = map[string]string{
+	"":                  "PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim which references a ResourceClaimTemplate. It stores the generated name for the corresponding ResourceClaim.",
+	"name":              "Name uniquely identifies this resource claim inside the pod. This must match the name of an entry in pod.spec.resourceClaims, which implies that the string must be a DNS_LABEL.",
+	"resourceClaimName": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. If this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.",
+}
+
+func (PodResourceClaimStatus) SwaggerDoc() map[string]string {
+	return map_PodResourceClaimStatus
+}
+
+var map_PodSchedulingGate = map[string]string{
+	"":     "PodSchedulingGate is associated to a Pod to guard its scheduling.",
+	"name": "Name of the scheduling gate. Each scheduling gate must have a unique name field.",
+}
+
+func (PodSchedulingGate) SwaggerDoc() map[string]string {
+	return map_PodSchedulingGate
+}
+
+var map_PodSecurityContext = map[string]string{
+	"":                         "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext.  Field values of container.securityContext take precedence over field values of PodSecurityContext.",
+	"seLinuxOptions":           "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container.  May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.",
+	"windowsOptions":           "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.",
+	"runAsUser":                "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.",
+	"runAsGroup":               "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.",
+	"runAsNonRoot":             "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+	"supplementalGroups":       "A list of groups applied to the first process run in each container, in addition to the container's primary GID and fsGroup (if specified).  If the SupplementalGroupsPolicy feature is enabled, the supplementalGroupsPolicy field determines whether these are in addition to or instead of any group memberships defined in the container image. If unspecified, no additional groups are added, though group memberships defined in the container image may still be used, depending on the supplementalGroupsPolicy field. Note that this field cannot be set when spec.os.name is windows.",
+	"supplementalGroupsPolicy": "Defines how supplemental groups of the first container processes are calculated. Valid values are \"Merge\" and \"Strict\". If not specified, \"Merge\" is used. (Alpha) Using the field requires the SupplementalGroupsPolicy feature gate to be enabled and the container runtime must implement support for this feature. Note that this field cannot be set when spec.os.name is windows.",
+	"fsGroup":                  "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ",
+	"sysctls":                  "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.",
+	"fsGroupChangePolicy":      "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.",
+	"seccompProfile":           "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.",
+	"appArmorProfile":          "appArmorProfile is the AppArmor options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.",
+	"seLinuxChangePolicy":      "seLinuxChangePolicy defines how the container's SELinux label is applied to all volumes used by the Pod. It has no effect on nodes that do not support SELinux or to volumes does not support SELinux. Valid values are \"MountOption\" and \"Recursive\".\n\n\"Recursive\" means relabeling of all files on all Pod volumes by the container runtime. This may be slow for large volumes, but allows mixing privileged and unprivileged Pods sharing the same volume on the same node.\n\n\"MountOption\" mounts all eligible Pod volumes with `-o context` mount option. This requires all Pods that share the same volume to use the same SELinux label. It is not possible to share the same volume among privileged and unprivileged Pods. Eligible volumes are in-tree FibreChannel and iSCSI volumes, and all CSI volumes whose CSI driver announces SELinux support by setting spec.seLinuxMount: true in their CSIDriver instance. Other volumes are always re-labelled recursively. \"MountOption\" value is allowed only when SELinuxMount feature gate is enabled.\n\nIf not specified and SELinuxMount feature gate is enabled, \"MountOption\" is used. If not specified and SELinuxMount feature gate is disabled, \"MountOption\" is used for ReadWriteOncePod volumes and \"Recursive\" for all other volumes.\n\nThis field affects only Pods that have SELinux label set, either in PodSecurityContext or in SecurityContext of all containers.\n\nAll Pods that use the same volume should use the same seLinuxChangePolicy, otherwise some pods can get stuck in ContainerCreating state. Note that this field cannot be set when spec.os.name is windows.",
+}
+
+func (PodSecurityContext) SwaggerDoc() map[string]string {
+	return map_PodSecurityContext
+}
+
+var map_PodSignature = map[string]string{
+	"":              "Describes the class of pods that should avoid this node. Exactly one field should be set.",
+	"podController": "Reference to controller whose pods should avoid this node.",
+}
+
+func (PodSignature) SwaggerDoc() map[string]string {
+	return map_PodSignature
+}
+
+var map_PodSpec = map[string]string{
+	"":                              "PodSpec is a description of a pod.",
+	"volumes":                       "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes",
+	"initContainers":                "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/",
+	"containers":                    "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.",
+	"ephemeralContainers":           "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.",
+	"restartPolicy":                 "Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy",
+	"terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.",
+	"activeDeadlineSeconds":         "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.",
+	"dnsPolicy":                     "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.",
+	"nodeSelector":                  "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/",
+	"serviceAccountName":            "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/",
+	"serviceAccount":                "DeprecatedServiceAccount is a deprecated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.",
+	"automountServiceAccountToken":  "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.",
+	"nodeName":                      "NodeName indicates in which node this pod is scheduled. If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. This field should not be used to express a desire for the pod to be scheduled on a specific node. https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename",
+	"hostNetwork":                   "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.",
+	"hostPID":                       "Use the host's pid namespace. Optional: Default to false.",
+	"hostIPC":                       "Use the host's ipc namespace. Optional: Default to false.",
+	"shareProcessNamespace":         "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.",
+	"securityContext":               "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty.  See type description for default values of each field.",
+	"imagePullSecrets":              "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod",
+	"hostname":                      "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.",
+	"subdomain":                     "If specified, the fully qualified Pod hostname will be \"...svc.\". If not specified, the pod will not have a domainname at all.",
+	"affinity":                      "If specified, the pod's scheduling constraints",
+	"schedulerName":                 "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.",
+	"tolerations":                   "If specified, the pod's tolerations.",
+	"hostAliases":                   "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified.",
+	"priorityClassName":             "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
+	"priority":                      "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.",
+	"dnsConfig":                     "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.",
+	"readinessGates":                "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates",
+	"runtimeClassName":              "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod.  If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class",
+	"enableServiceLinks":            "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.",
+	"preemptionPolicy":              "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.",
+	"overhead":                      "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md",
+	"topologySpreadConstraints":     "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.",
+	"setHostnameAsFQDN":             "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.",
+	"os":                            "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.appArmorProfile - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.securityContext.supplementalGroupsPolicy - spec.containers[*].securityContext.appArmorProfile - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup",
+	"hostUsers":                     "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.",
+	"schedulingGates":               "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.",
+	"resourceClaims":                "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.",
+	"resources":                     "Resources is the total amount of CPU and Memory resources required by all containers in the pod. It supports specifying Requests and Limits for \"cpu\" and \"memory\" resource names only. ResourceClaims are not supported.\n\nThis field enables fine-grained control over resource allocation for the entire pod, allowing resource sharing among containers in a pod.\n\nThis is an alpha field and requires enabling the PodLevelResources feature gate.",
+}
+
+func (PodSpec) SwaggerDoc() map[string]string {
+	return map_PodSpec
+}
+
+var map_PodStatus = map[string]string{
+	"":                           "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.",
+	"phase":                      "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase",
+	"conditions":                 "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions",
+	"message":                    "A human readable message indicating details about why the pod is in this condition.",
+	"reason":                     "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'",
+	"nominatedNodeName":          "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.",
+	"hostIP":                     "hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod",
+	"hostIPs":                    "hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod.",
+	"podIP":                      "podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.",
+	"podIPs":                     "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.",
+	"startTime":                  "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.",
+	"initContainerStatuses":      "Statuses of init containers in this pod. The most recent successful non-restartable init container will have ready = true, the most recently started container will have startTime set. Each init container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status",
+	"containerStatuses":          "Statuses of containers in this pod. Each container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
+	"qosClass":                   "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes",
+	"ephemeralContainerStatuses": "Statuses for any ephemeral containers that have run in this pod. Each ephemeral container in the pod should have at most one status in this list, and all statuses should be for containers in the pod. However this is not enforced. If a status for a non-existent container is present in the list, or the list has duplicate names, the behavior of various Kubernetes components is not defined and those statuses might be ignored. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status",
+	"resize":                     "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\"",
+	"resourceClaimStatuses":      "Status of resource claims.",
+}
+
+func (PodStatus) SwaggerDoc() map[string]string {
+	return map_PodStatus
+}
+
+var map_PodStatusResult = map[string]string{
+	"":         "PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"status":   "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+}
+
+func (PodStatusResult) SwaggerDoc() map[string]string {
+	return map_PodStatusResult
+}
+
+var map_PodTemplate = map[string]string{
+	"":         "PodTemplate describes a template for creating copies of a predefined pod.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"template": "Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+}
+
+func (PodTemplate) SwaggerDoc() map[string]string {
+	return map_PodTemplate
+}
+
+var map_PodTemplateList = map[string]string{
+	"":         "PodTemplateList is a list of PodTemplates.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"items":    "List of pod templates",
+}
+
+func (PodTemplateList) SwaggerDoc() map[string]string {
+	return map_PodTemplateList
+}
+
+var map_PodTemplateSpec = map[string]string{
+	"":         "PodTemplateSpec describes the data a pod should have when created from a template",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"spec":     "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+}
+
+func (PodTemplateSpec) SwaggerDoc() map[string]string {
+	return map_PodTemplateSpec
+}
+
+var map_PortStatus = map[string]string{
+	"":         "PortStatus represents the error condition of a service port",
+	"port":     "Port is the port number of the service port of which status is recorded here",
+	"protocol": "Protocol is the protocol of the service port of which status is recorded here The supported values are: \"TCP\", \"UDP\", \"SCTP\"",
+	"error":    "Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n  CamelCase names\n- cloud provider specific error values must have names that comply with the\n  format foo.example.com/CamelCase.",
+}
+
+func (PortStatus) SwaggerDoc() map[string]string {
+	return map_PortStatus
+}
+
+var map_PortworxVolumeSource = map[string]string{
+	"":         "PortworxVolumeSource represents a Portworx volume resource.",
+	"volumeID": "volumeID uniquely identifies a Portworx volume",
+	"fsType":   "fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+	"readOnly": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+}
+
+func (PortworxVolumeSource) SwaggerDoc() map[string]string {
+	return map_PortworxVolumeSource
+}
+
+var map_Preconditions = map[string]string{
+	"":    "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.",
+	"uid": "Specifies the target UID.",
+}
+
+func (Preconditions) SwaggerDoc() map[string]string {
+	return map_Preconditions
+}
+
+var map_PreferAvoidPodsEntry = map[string]string{
+	"":             "Describes a class of pods that should avoid this node.",
+	"podSignature": "The class of pods.",
+	"evictionTime": "Time at which this entry was added to the list.",
+	"reason":       "(brief) reason why this entry was added to the list.",
+	"message":      "Human readable message indicating why this entry was added to the list.",
+}
+
+func (PreferAvoidPodsEntry) SwaggerDoc() map[string]string {
+	return map_PreferAvoidPodsEntry
+}
+
+var map_PreferredSchedulingTerm = map[string]string{
+	"":           "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).",
+	"weight":     "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.",
+	"preference": "A node selector term, associated with the corresponding weight.",
+}
+
+func (PreferredSchedulingTerm) SwaggerDoc() map[string]string {
+	return map_PreferredSchedulingTerm
+}
+
+var map_Probe = map[string]string{
+	"":                              "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.",
+	"initialDelaySeconds":           "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+	"timeoutSeconds":                "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+	"periodSeconds":                 "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.",
+	"successThreshold":              "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.",
+	"failureThreshold":              "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.",
+	"terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.",
+}
+
+func (Probe) SwaggerDoc() map[string]string {
+	return map_Probe
+}
+
+var map_ProbeHandler = map[string]string{
+	"":          "ProbeHandler defines a specific action that should be taken in a probe. One and only one of the fields must be specified.",
+	"exec":      "Exec specifies a command to execute in the container.",
+	"httpGet":   "HTTPGet specifies an HTTP GET request to perform.",
+	"tcpSocket": "TCPSocket specifies a connection to a TCP port.",
+	"grpc":      "GRPC specifies a GRPC HealthCheckRequest.",
+}
+
+func (ProbeHandler) SwaggerDoc() map[string]string {
+	return map_ProbeHandler
+}
+
+var map_ProjectedVolumeSource = map[string]string{
+	"":            "Represents a projected volume source",
+	"sources":     "sources is the list of volume projections. Each entry in this list handles one source.",
+	"defaultMode": "defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
+}
+
+func (ProjectedVolumeSource) SwaggerDoc() map[string]string {
+	return map_ProjectedVolumeSource
+}
+
+var map_QuobyteVolumeSource = map[string]string{
+	"":         "Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.",
+	"registry": "registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes",
+	"volume":   "volume is a string that references an already created Quobyte volume by name.",
+	"readOnly": "readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.",
+	"user":     "user to map volume access to Defaults to serivceaccount user",
+	"group":    "group to map volume access to Default is no group",
+	"tenant":   "tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin",
+}
+
+func (QuobyteVolumeSource) SwaggerDoc() map[string]string {
+	return map_QuobyteVolumeSource
+}
+
+var map_RBDPersistentVolumeSource = map[string]string{
+	"":          "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.",
+	"monitors":  "monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
+	"image":     "image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
+	"fsType":    "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd",
+	"pool":      "pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
+	"user":      "user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
+	"keyring":   "keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
+	"secretRef": "secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
+	"readOnly":  "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
+}
+
+func (RBDPersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_RBDPersistentVolumeSource
+}
+
+var map_RBDVolumeSource = map[string]string{
+	"":          "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.",
+	"monitors":  "monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
+	"image":     "image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
+	"fsType":    "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd",
+	"pool":      "pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
+	"user":      "user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
+	"keyring":   "keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
+	"secretRef": "secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
+	"readOnly":  "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it",
+}
+
+func (RBDVolumeSource) SwaggerDoc() map[string]string {
+	return map_RBDVolumeSource
+}
+
+var map_RangeAllocation = map[string]string{
+	"":         "RangeAllocation is not a public type.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"range":    "Range is string that identifies the range represented by 'data'.",
+	"data":     "Data is a bit array containing all allocated addresses in the previous segment.",
+}
+
+func (RangeAllocation) SwaggerDoc() map[string]string {
+	return map_RangeAllocation
+}
+
+var map_ReplicationController = map[string]string{
+	"":         "ReplicationController represents the configuration of a replication controller.",
+	"metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"spec":     "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+	"status":   "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+}
+
+func (ReplicationController) SwaggerDoc() map[string]string {
+	return map_ReplicationController
+}
+
+var map_ReplicationControllerCondition = map[string]string{
+	"":                   "ReplicationControllerCondition describes the state of a replication controller at a certain point.",
+	"type":               "Type of replication controller condition.",
+	"status":             "Status of the condition, one of True, False, Unknown.",
+	"lastTransitionTime": "The last time the condition transitioned from one status to another.",
+	"reason":             "The reason for the condition's last transition.",
+	"message":            "A human readable message indicating details about the transition.",
+}
+
+func (ReplicationControllerCondition) SwaggerDoc() map[string]string {
+	return map_ReplicationControllerCondition
+}
+
+var map_ReplicationControllerList = map[string]string{
+	"":         "ReplicationControllerList is a collection of replication controllers.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"items":    "List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller",
+}
+
+func (ReplicationControllerList) SwaggerDoc() map[string]string {
+	return map_ReplicationControllerList
+}
+
+var map_ReplicationControllerSpec = map[string]string{
+	"":                "ReplicationControllerSpec is the specification of a replication controller.",
+	"replicas":        "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller",
+	"minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
+	"selector":        "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors",
+	"template":        "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. The only allowed template.spec.restartPolicy value is \"Always\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template",
+}
+
+func (ReplicationControllerSpec) SwaggerDoc() map[string]string {
+	return map_ReplicationControllerSpec
+}
+
+var map_ReplicationControllerStatus = map[string]string{
+	"":                     "ReplicationControllerStatus represents the current status of a replication controller.",
+	"replicas":             "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller",
+	"fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replication controller.",
+	"readyReplicas":        "The number of ready replicas for this replication controller.",
+	"availableReplicas":    "The number of available replicas (ready for at least minReadySeconds) for this replication controller.",
+	"observedGeneration":   "ObservedGeneration reflects the generation of the most recently observed replication controller.",
+	"conditions":           "Represents the latest available observations of a replication controller's current state.",
+}
+
+func (ReplicationControllerStatus) SwaggerDoc() map[string]string {
+	return map_ReplicationControllerStatus
+}
+
+var map_ResourceClaim = map[string]string{
+	"":        "ResourceClaim references one entry in PodSpec.ResourceClaims.",
+	"name":    "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.",
+	"request": "Request is the name chosen for a request in the referenced claim. If empty, everything from the claim is made available, otherwise only the result of this request.",
+}
+
+func (ResourceClaim) SwaggerDoc() map[string]string {
+	return map_ResourceClaim
+}
+
+var map_ResourceFieldSelector = map[string]string{
+	"":              "ResourceFieldSelector represents container resources (cpu, memory) and their output format",
+	"containerName": "Container name: required for volumes, optional for env vars",
+	"resource":      "Required: resource to select",
+	"divisor":       "Specifies the output format of the exposed resources, defaults to \"1\"",
+}
+
+func (ResourceFieldSelector) SwaggerDoc() map[string]string {
+	return map_ResourceFieldSelector
+}
+
+var map_ResourceHealth = map[string]string{
+	"":           "ResourceHealth represents the health of a resource. It has the latest device health information. This is a part of KEP https://kep.k8s.io/4680.",
+	"resourceID": "ResourceID is the unique identifier of the resource. See the ResourceID type for more information.",
+	"health":     "Health of the resource. can be one of:\n - Healthy: operates as normal\n - Unhealthy: reported unhealthy. We consider this a temporary health issue\n              since we do not have a mechanism today to distinguish\n              temporary and permanent issues.\n - Unknown: The status cannot be determined.\n            For example, Device Plugin got unregistered and hasn't been re-registered since.\n\nIn future we may want to introduce the PermanentlyUnhealthy Status.",
+}
+
+func (ResourceHealth) SwaggerDoc() map[string]string {
+	return map_ResourceHealth
+}
+
+var map_ResourceQuota = map[string]string{
+	"":         "ResourceQuota sets aggregate quota restrictions enforced per namespace",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"spec":     "Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+	"status":   "Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+}
+
+func (ResourceQuota) SwaggerDoc() map[string]string {
+	return map_ResourceQuota
+}
+
+var map_ResourceQuotaList = map[string]string{
+	"":         "ResourceQuotaList is a list of ResourceQuota items.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"items":    "Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/",
+}
+
+func (ResourceQuotaList) SwaggerDoc() map[string]string {
+	return map_ResourceQuotaList
+}
+
+var map_ResourceQuotaSpec = map[string]string{
+	"":              "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.",
+	"hard":          "hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/",
+	"scopes":        "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.",
+	"scopeSelector": "scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota but expressed using ScopeSelectorOperator in combination with possible values. For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.",
+}
+
+func (ResourceQuotaSpec) SwaggerDoc() map[string]string {
+	return map_ResourceQuotaSpec
+}
+
+var map_ResourceQuotaStatus = map[string]string{
+	"":     "ResourceQuotaStatus defines the enforced hard limits and observed use.",
+	"hard": "Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/",
+	"used": "Used is the current observed total usage of the resource in the namespace.",
+}
+
+func (ResourceQuotaStatus) SwaggerDoc() map[string]string {
+	return map_ResourceQuotaStatus
+}
+
+var map_ResourceRequirements = map[string]string{
+	"":         "ResourceRequirements describes the compute resource requirements.",
+	"limits":   "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
+	"requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
+	"claims":   "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.",
+}
+
+func (ResourceRequirements) SwaggerDoc() map[string]string {
+	return map_ResourceRequirements
+}
+
+var map_ResourceStatus = map[string]string{
+	"":          "ResourceStatus represents the status of a single resource allocated to a Pod.",
+	"name":      "Name of the resource. Must be unique within the pod and in case of non-DRA resource, match one of the resources from the pod spec. For DRA resources, the value must be \"claim:/\". When this status is reported about a container, the \"claim_name\" and \"request\" must match one of the claims of this container.",
+	"resources": "List of unique resources health. Each element in the list contains an unique resource ID and its health. At a minimum, for the lifetime of a Pod, resource ID must uniquely identify the resource allocated to the Pod on the Node. If other Pod on the same Node reports the status with the same resource ID, it must be the same resource they share. See ResourceID type definition for a specific format it has in various use cases.",
+}
+
+func (ResourceStatus) SwaggerDoc() map[string]string {
+	return map_ResourceStatus
+}
+
+var map_SELinuxOptions = map[string]string{
+	"":      "SELinuxOptions are the labels to be applied to the container",
+	"user":  "User is a SELinux user label that applies to the container.",
+	"role":  "Role is a SELinux role label that applies to the container.",
+	"type":  "Type is a SELinux type label that applies to the container.",
+	"level": "Level is SELinux level label that applies to the container.",
+}
+
+func (SELinuxOptions) SwaggerDoc() map[string]string {
+	return map_SELinuxOptions
+}
+
+var map_ScaleIOPersistentVolumeSource = map[string]string{
+	"":                 "ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume",
+	"gateway":          "gateway is the host address of the ScaleIO API Gateway.",
+	"system":           "system is the name of the storage system as configured in ScaleIO.",
+	"secretRef":        "secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.",
+	"sslEnabled":       "sslEnabled is the flag to enable/disable SSL communication with Gateway, default false",
+	"protectionDomain": "protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.",
+	"storagePool":      "storagePool is the ScaleIO Storage Pool associated with the protection domain.",
+	"storageMode":      "storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.",
+	"volumeName":       "volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source.",
+	"fsType":           "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\"",
+	"readOnly":         "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+}
+
+func (ScaleIOPersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_ScaleIOPersistentVolumeSource
+}
+
+var map_ScaleIOVolumeSource = map[string]string{
+	"":                 "ScaleIOVolumeSource represents a persistent ScaleIO volume",
+	"gateway":          "gateway is the host address of the ScaleIO API Gateway.",
+	"system":           "system is the name of the storage system as configured in ScaleIO.",
+	"secretRef":        "secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.",
+	"sslEnabled":       "sslEnabled Flag enable/disable SSL communication with Gateway, default false",
+	"protectionDomain": "protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.",
+	"storagePool":      "storagePool is the ScaleIO Storage Pool associated with the protection domain.",
+	"storageMode":      "storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.",
+	"volumeName":       "volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source.",
+	"fsType":           "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".",
+	"readOnly":         "readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+}
+
+func (ScaleIOVolumeSource) SwaggerDoc() map[string]string {
+	return map_ScaleIOVolumeSource
+}
+
+var map_ScopeSelector = map[string]string{
+	"":                 "A scope selector represents the AND of the selectors represented by the scoped-resource selector requirements.",
+	"matchExpressions": "A list of scope selector requirements by scope of the resources.",
+}
+
+func (ScopeSelector) SwaggerDoc() map[string]string {
+	return map_ScopeSelector
+}
+
+var map_ScopedResourceSelectorRequirement = map[string]string{
+	"":          "A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator that relates the scope name and values.",
+	"scopeName": "The name of the scope that the selector applies to.",
+	"operator":  "Represents a scope's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist.",
+	"values":    "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.",
+}
+
+func (ScopedResourceSelectorRequirement) SwaggerDoc() map[string]string {
+	return map_ScopedResourceSelectorRequirement
+}
+
+var map_SeccompProfile = map[string]string{
+	"":                 "SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.",
+	"type":             "type indicates which kind of seccomp profile will be applied. Valid options are:\n\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.",
+	"localhostProfile": "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type.",
+}
+
+func (SeccompProfile) SwaggerDoc() map[string]string {
+	return map_SeccompProfile
+}
+
+var map_Secret = map[string]string{
+	"":           "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.",
+	"metadata":   "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"immutable":  "Immutable, if set to true, ensures that data stored in the Secret cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil.",
+	"data":       "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4",
+	"stringData": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only input field for convenience. All keys and values are merged into the data field on write, overwriting any existing values. The stringData field is never output when reading from the API.",
+	"type":       "Used to facilitate programmatic handling of secret data. More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types",
+}
+
+func (Secret) SwaggerDoc() map[string]string {
+	return map_Secret
+}
+
+var map_SecretEnvSource = map[string]string{
+	"":         "SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables.",
+	"optional": "Specify whether the Secret must be defined",
+}
+
+func (SecretEnvSource) SwaggerDoc() map[string]string {
+	return map_SecretEnvSource
+}
+
+var map_SecretKeySelector = map[string]string{
+	"":         "SecretKeySelector selects a key of a Secret.",
+	"key":      "The key of the secret to select from.  Must be a valid secret key.",
+	"optional": "Specify whether the Secret or its key must be defined",
+}
+
+func (SecretKeySelector) SwaggerDoc() map[string]string {
+	return map_SecretKeySelector
+}
+
+var map_SecretList = map[string]string{
+	"":         "SecretList is a list of Secret.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"items":    "Items is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret",
+}
+
+func (SecretList) SwaggerDoc() map[string]string {
+	return map_SecretList
+}
+
+var map_SecretProjection = map[string]string{
+	"":         "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.",
+	"items":    "items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
+	"optional": "optional field specify whether the Secret or its key must be defined",
+}
+
+func (SecretProjection) SwaggerDoc() map[string]string {
+	return map_SecretProjection
+}
+
+var map_SecretReference = map[string]string{
+	"":          "SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace",
+	"name":      "name is unique within a namespace to reference a secret resource.",
+	"namespace": "namespace defines the space within which the secret name must be unique.",
+}
+
+func (SecretReference) SwaggerDoc() map[string]string {
+	return map_SecretReference
+}
+
+var map_SecretVolumeSource = map[string]string{
+	"":            "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.",
+	"secretName":  "secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
+	"items":       "items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.",
+	"defaultMode": "defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.",
+	"optional":    "optional field specify whether the Secret or its keys must be defined",
+}
+
+func (SecretVolumeSource) SwaggerDoc() map[string]string {
+	return map_SecretVolumeSource
+}
+
+var map_SecurityContext = map[string]string{
+	"":                         "SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext.  When both are set, the values in SecurityContext take precedence.",
+	"capabilities":             "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows.",
+	"privileged":               "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows.",
+	"seLinuxOptions":           "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container.  May also be set in PodSecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.",
+	"windowsOptions":           "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.",
+	"runAsUser":                "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.",
+	"runAsGroup":               "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.",
+	"runAsNonRoot":             "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+	"readOnlyRootFilesystem":   "Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.",
+	"allowPrivilegeEscalation": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.",
+	"procMount":                "procMount denotes the type of proc mount to use for the containers. The default value is Default which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.",
+	"seccompProfile":           "The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.",
+	"appArmorProfile":          "appArmorProfile is the AppArmor options to use by this container. If set, this profile overrides the pod's appArmorProfile. Note that this field cannot be set when spec.os.name is windows.",
+}
+
+func (SecurityContext) SwaggerDoc() map[string]string {
+	return map_SecurityContext
+}
+
+var map_SerializedReference = map[string]string{
+	"":          "SerializedReference is a reference to serialized object.",
+	"reference": "The reference to an object in the system.",
+}
+
+func (SerializedReference) SwaggerDoc() map[string]string {
+	return map_SerializedReference
+}
+
+var map_Service = map[string]string{
+	"":         "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"spec":     "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+	"status":   "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+}
+
+func (Service) SwaggerDoc() map[string]string {
+	return map_Service
+}
+
+var map_ServiceAccount = map[string]string{
+	"":                             "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets",
+	"metadata":                     "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"secrets":                      "Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. Pods are only limited to this list if this service account has a \"kubernetes.io/enforce-mountable-secrets\" annotation set to \"true\". The \"kubernetes.io/enforce-mountable-secrets\" annotation is deprecated since v1.32. Prefer separate namespaces to isolate access to mounted secrets. This field should not be used to find auto-generated service account token secrets for use outside of pods. Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. More info: https://kubernetes.io/docs/concepts/configuration/secret",
+	"imagePullSecrets":             "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod",
+	"automountServiceAccountToken": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.",
+}
+
+func (ServiceAccount) SwaggerDoc() map[string]string {
+	return map_ServiceAccount
+}
+
+var map_ServiceAccountList = map[string]string{
+	"":         "ServiceAccountList is a list of ServiceAccount objects",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"items":    "List of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/",
+}
+
+func (ServiceAccountList) SwaggerDoc() map[string]string {
+	return map_ServiceAccountList
+}
+
+var map_ServiceAccountTokenProjection = map[string]string{
+	"":                  "ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).",
+	"audience":          "audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.",
+	"expirationSeconds": "expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.",
+	"path":              "path is the path relative to the mount point of the file to project the token into.",
+}
+
+func (ServiceAccountTokenProjection) SwaggerDoc() map[string]string {
+	return map_ServiceAccountTokenProjection
+}
+
+var map_ServiceList = map[string]string{
+	"":         "ServiceList holds a list of services.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"items":    "List of services",
+}
+
+func (ServiceList) SwaggerDoc() map[string]string {
+	return map_ServiceList
+}
+
+var map_ServicePort = map[string]string{
+	"":            "ServicePort contains information on service's port.",
+	"name":        "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.",
+	"protocol":    "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.",
+	"appProtocol": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n  * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n  * 'kubernetes.io/ws'  - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n  * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.",
+	"port":        "The port that will be exposed by this service.",
+	"targetPort":  "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service",
+	"nodePort":    "The port on each node on which this service is exposed when type is NodePort or LoadBalancer.  Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail.  If not specified, a port will be allocated if this Service requires one.  If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport",
+}
+
+func (ServicePort) SwaggerDoc() map[string]string {
+	return map_ServicePort
+}
+
+var map_ServiceProxyOptions = map[string]string{
+	"":     "ServiceProxyOptions is the query options to a Service's proxy call.",
+	"path": "Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.",
+}
+
+func (ServiceProxyOptions) SwaggerDoc() map[string]string {
+	return map_ServiceProxyOptions
+}
+
+var map_ServiceSpec = map[string]string{
+	"":                              "ServiceSpec describes the attributes that a user creates on a service.",
+	"ports":                         "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
+	"selector":                      "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/",
+	"clusterIP":                     "clusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be blank) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above).  Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required.  Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
+	"clusterIPs":                    "ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly.  If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above).  Valid values are \"None\", empty string (\"\"), or a valid IP address.  Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required.  Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName.  If this field is not specified, it will be initialized from the clusterIP field.  If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\n\nThis field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
+	"type":                          "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \"ExternalName\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types",
+	"externalIPs":                   "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service.  These IPs are not managed by Kubernetes.  The user is responsible for ensuring that traffic arrives at a node with this IP.  A common example is external load-balancers that are not part of the Kubernetes system.",
+	"sessionAffinity":               "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
+	"loadBalancerIP":                "Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was under-specified and its meaning varies across implementations. Using it is non-portable and it may not support dual-stack. Users are encouraged to use implementation-specific annotations when available.",
+	"loadBalancerSourceRanges":      "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/",
+	"externalName":                  "externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved.  Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be \"ExternalName\".",
+	"externalTrafficPolicy":         "externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's \"externally-facing\" addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to \"Local\", the proxy will configure the service in a way that assumes that external load balancers will take care of balancing the service traffic between nodes, and so each node will deliver traffic only to the node-local endpoints of the service, without masquerading the client source IP. (Traffic mistakenly sent to a node with no endpoints will be dropped.) The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features). Note that traffic sent to an External IP or LoadBalancer IP from within the cluster will always get \"Cluster\" semantics, but clients sending to a NodePort from within the cluster may need to take traffic policy into account when picking a node.",
+	"healthCheckNodePort":           "healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used.  If not specified, a value will be automatically allocated.  External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not.  If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type). This field cannot be updated once set.",
+	"publishNotReadyAddresses":      "publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered \"ready\" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior.",
+	"sessionAffinityConfig":         "sessionAffinityConfig contains the configurations of session affinity.",
+	"ipFamilies":                    "IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \"IPv4\" and \"IPv6\".  This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \"headless\" services. This field will be wiped when updating a Service to type ExternalName.\n\nThis field may hold a maximum of two entries (dual-stack families, in either order).  These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.",
+	"ipFamilyPolicy":                "IPFamilyPolicy represents the dual-stack-ness requested or required by this Service. If there is no value provided, then this field will be set to SingleStack. Services can be \"SingleStack\" (a single IP family), \"PreferDualStack\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \"RequireDualStack\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName.",
+	"allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer.  Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts.  If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type.",
+	"loadBalancerClass":             "loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.",
+	"internalTrafficPolicy":         "InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \"Local\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features).",
+	"trafficDistribution":           "TrafficDistribution offers a way to express preferences for how traffic is distributed to Service endpoints. Implementations can use this field as a hint, but are not required to guarantee strict adherence. If the field is not set, the implementation will apply its default routing strategy. If set to \"PreferClose\", implementations should prioritize endpoints that are topologically close (e.g., same zone). This is a beta field and requires enabling ServiceTrafficDistribution feature.",
+}
+
+func (ServiceSpec) SwaggerDoc() map[string]string {
+	return map_ServiceSpec
+}
+
+var map_ServiceStatus = map[string]string{
+	"":             "ServiceStatus represents the current status of a service.",
+	"loadBalancer": "LoadBalancer contains the current status of the load-balancer, if one is present.",
+	"conditions":   "Current service state",
+}
+
+func (ServiceStatus) SwaggerDoc() map[string]string {
+	return map_ServiceStatus
+}
+
+var map_SessionAffinityConfig = map[string]string{
+	"":         "SessionAffinityConfig represents the configurations of session affinity.",
+	"clientIP": "clientIP contains the configurations of Client IP based session affinity.",
+}
+
+func (SessionAffinityConfig) SwaggerDoc() map[string]string {
+	return map_SessionAffinityConfig
+}
+
+var map_SleepAction = map[string]string{
+	"":        "SleepAction describes a \"sleep\" action.",
+	"seconds": "Seconds is the number of seconds to sleep.",
+}
+
+func (SleepAction) SwaggerDoc() map[string]string {
+	return map_SleepAction
+}
+
+var map_StorageOSPersistentVolumeSource = map[string]string{
+	"":                "Represents a StorageOS persistent volume resource.",
+	"volumeName":      "volumeName is the human-readable name of the StorageOS volume.  Volume names are only unique within a namespace.",
+	"volumeNamespace": "volumeNamespace specifies the scope of the volume within StorageOS.  If no namespace is specified then the Pod's namespace will be used.  This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.",
+	"fsType":          "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+	"readOnly":        "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+	"secretRef":       "secretRef specifies the secret to use for obtaining the StorageOS API credentials.  If not specified, default values will be attempted.",
+}
+
+func (StorageOSPersistentVolumeSource) SwaggerDoc() map[string]string {
+	return map_StorageOSPersistentVolumeSource
+}
+
+var map_StorageOSVolumeSource = map[string]string{
+	"":                "Represents a StorageOS persistent volume resource.",
+	"volumeName":      "volumeName is the human-readable name of the StorageOS volume.  Volume names are only unique within a namespace.",
+	"volumeNamespace": "volumeNamespace specifies the scope of the volume within StorageOS.  If no namespace is specified then the Pod's namespace will be used.  This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.",
+	"fsType":          "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+	"readOnly":        "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
+	"secretRef":       "secretRef specifies the secret to use for obtaining the StorageOS API credentials.  If not specified, default values will be attempted.",
+}
+
+func (StorageOSVolumeSource) SwaggerDoc() map[string]string {
+	return map_StorageOSVolumeSource
+}
+
+var map_Sysctl = map[string]string{
+	"":      "Sysctl defines a kernel parameter to be set",
+	"name":  "Name of a property to set",
+	"value": "Value of a property to set",
+}
+
+func (Sysctl) SwaggerDoc() map[string]string {
+	return map_Sysctl
+}
+
+var map_TCPSocketAction = map[string]string{
+	"":     "TCPSocketAction describes an action based on opening a socket",
+	"port": "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.",
+	"host": "Optional: Host name to connect to, defaults to the pod IP.",
+}
+
+func (TCPSocketAction) SwaggerDoc() map[string]string {
+	return map_TCPSocketAction
+}
+
+var map_Taint = map[string]string{
+	"":          "The node this Taint is attached to has the \"effect\" on any pod that does not tolerate the Taint.",
+	"key":       "Required. The taint key to be applied to a node.",
+	"value":     "The taint value corresponding to the taint key.",
+	"effect":    "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.",
+	"timeAdded": "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.",
+}
+
+func (Taint) SwaggerDoc() map[string]string {
+	return map_Taint
+}
+
+var map_Toleration = map[string]string{
+	"":                  "The pod this Toleration is attached to tolerates any taint that matches the triple  using the matching operator .",
+	"key":               "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.",
+	"operator":          "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.",
+	"value":             "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.",
+	"effect":            "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.",
+	"tolerationSeconds": "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.",
+}
+
+func (Toleration) SwaggerDoc() map[string]string {
+	return map_Toleration
+}
+
+var map_TopologySelectorLabelRequirement = map[string]string{
+	"":       "A topology selector requirement is a selector that matches given label. This is an alpha feature and may change in the future.",
+	"key":    "The label key that the selector applies to.",
+	"values": "An array of string values. One value must match the label to be selected. Each entry in Values is ORed.",
+}
+
+func (TopologySelectorLabelRequirement) SwaggerDoc() map[string]string {
+	return map_TopologySelectorLabelRequirement
+}
+
+var map_TopologySelectorTerm = map[string]string{
+	"":                      "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.",
+	"matchLabelExpressions": "A list of topology selector requirements by labels.",
+}
+
+func (TopologySelectorTerm) SwaggerDoc() map[string]string {
+	return map_TopologySelectorTerm
+}
+
+var map_TopologySpreadConstraint = map[string]string{
+	"":                   "TopologySpreadConstraint specifies how to spread matching pods among the given topology.",
+	"maxSkew":            "MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. ",
+	"topologyKey":        "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each  as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.",
+	"whenUnsatisfiable":  "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n  but giving higher precedence to topologies that would help reduce the\n  skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ",
+	"labelSelector":      "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.",
+	"minDomains":         "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: ",
+	"nodeAffinityPolicy": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
+	"nodeTaintsPolicy":   "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.",
+	"matchLabelKeys":     "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.\n\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).",
+}
+
+func (TopologySpreadConstraint) SwaggerDoc() map[string]string {
+	return map_TopologySpreadConstraint
+}
+
+var map_TypedLocalObjectReference = map[string]string{
+	"":         "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.",
+	"apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.",
+	"kind":     "Kind is the type of resource being referenced",
+	"name":     "Name is the name of resource being referenced",
+}
+
+func (TypedLocalObjectReference) SwaggerDoc() map[string]string {
+	return map_TypedLocalObjectReference
+}
+
+var map_TypedObjectReference = map[string]string{
+	"":          "TypedObjectReference contains enough information to let you locate the typed referenced object",
+	"apiGroup":  "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.",
+	"kind":      "Kind is the type of resource being referenced",
+	"name":      "Name is the name of resource being referenced",
+	"namespace": "Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.",
+}
+
+func (TypedObjectReference) SwaggerDoc() map[string]string {
+	return map_TypedObjectReference
+}
+
+var map_Volume = map[string]string{
+	"":     "Volume represents a named volume in a pod that may be accessed by any container in the pod.",
+	"name": "name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names",
+}
+
+func (Volume) SwaggerDoc() map[string]string {
+	return map_Volume
+}
+
+var map_VolumeDevice = map[string]string{
+	"":           "volumeDevice describes a mapping of a raw block device within a container.",
+	"name":       "name must match the name of a persistentVolumeClaim in the pod",
+	"devicePath": "devicePath is the path inside of the container that the device will be mapped to.",
+}
+
+func (VolumeDevice) SwaggerDoc() map[string]string {
+	return map_VolumeDevice
+}
+
+var map_VolumeMount = map[string]string{
+	"":                  "VolumeMount describes a mounting of a Volume within a container.",
+	"name":              "This must match the Name of a Volume.",
+	"readOnly":          "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.",
+	"recursiveReadOnly": "RecursiveReadOnly specifies whether read-only mounts should be handled recursively.\n\nIf ReadOnly is false, this field has no meaning and must be unspecified.\n\nIf ReadOnly is true, and this field is set to Disabled, the mount is not made recursively read-only.  If this field is set to IfPossible, the mount is made recursively read-only, if it is supported by the container runtime.  If this field is set to Enabled, the mount is made recursively read-only if it is supported by the container runtime, otherwise the pod will not be started and an error will be generated to indicate the reason.\n\nIf this field is set to IfPossible or Enabled, MountPropagation must be set to None (or be unspecified, which defaults to None).\n\nIf this field is not specified, it is treated as an equivalent of Disabled.",
+	"mountPath":         "Path within the container at which the volume should be mounted.  Must not contain ':'.",
+	"subPath":           "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).",
+	"mountPropagation":  "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified (which defaults to None).",
+	"subPathExpr":       "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive.",
+}
+
+func (VolumeMount) SwaggerDoc() map[string]string {
+	return map_VolumeMount
+}
+
+var map_VolumeMountStatus = map[string]string{
+	"":                  "VolumeMountStatus shows status of volume mounts.",
+	"name":              "Name corresponds to the name of the original VolumeMount.",
+	"mountPath":         "MountPath corresponds to the original VolumeMount.",
+	"readOnly":          "ReadOnly corresponds to the original VolumeMount.",
+	"recursiveReadOnly": "RecursiveReadOnly must be set to Disabled, Enabled, or unspecified (for non-readonly mounts). An IfPossible value in the original VolumeMount must be translated to Disabled or Enabled, depending on the mount result.",
+}
+
+func (VolumeMountStatus) SwaggerDoc() map[string]string {
+	return map_VolumeMountStatus
+}
+
+var map_VolumeNodeAffinity = map[string]string{
+	"":         "VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.",
+	"required": "required specifies hard node constraints that must be met.",
+}
+
+func (VolumeNodeAffinity) SwaggerDoc() map[string]string {
+	return map_VolumeNodeAffinity
+}
+
+var map_VolumeProjection = map[string]string{
+	"":                    "Projection that may be projected along with other supported volume types. Exactly one of these fields must be set.",
+	"secret":              "secret information about the secret data to project",
+	"downwardAPI":         "downwardAPI information about the downwardAPI data to project",
+	"configMap":           "configMap information about the configMap data to project",
+	"serviceAccountToken": "serviceAccountToken is information about the serviceAccountToken data to project",
+	"clusterTrustBundle":  "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file.\n\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\n\nClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector.\n\nKubelet performs aggressive normalization of the PEM contents written into the pod filesystem.  Esoteric PEM features such as inter-block comments and block headers are stripped.  Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time.",
+}
+
+func (VolumeProjection) SwaggerDoc() map[string]string {
+	return map_VolumeProjection
+}
+
+var map_VolumeResourceRequirements = map[string]string{
+	"":         "VolumeResourceRequirements describes the storage resource requirements for a volume.",
+	"limits":   "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
+	"requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/",
+}
+
+func (VolumeResourceRequirements) SwaggerDoc() map[string]string {
+	return map_VolumeResourceRequirements
+}
+
+var map_VolumeSource = map[string]string{
+	"":                      "Represents the source of a volume to mount. Only one of its members may be specified.",
+	"hostPath":              "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath",
+	"emptyDir":              "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir",
+	"gcePersistentDisk":     "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk",
+	"awsElasticBlockStore":  "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore",
+	"gitRepo":               "gitRepo represents a git repository at a particular revision. Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.",
+	"secret":                "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret",
+	"nfs":                   "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs",
+	"iscsi":                 "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md",
+	"glusterfs":             "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. More info: https://examples.k8s.io/volumes/glusterfs/README.md",
+	"persistentVolumeClaim": "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
+	"rbd":                   "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. More info: https://examples.k8s.io/volumes/rbd/README.md",
+	"flexVolume":            "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead.",
+	"cinder":                "cinder represents a cinder volume attached and mounted on kubelets host machine. Deprecated: Cinder is deprecated. All operations for the in-tree cinder type are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md",
+	"cephfs":                "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported.",
+	"flocker":               "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported.",
+	"downwardAPI":           "downwardAPI represents downward API about the pod that should populate this volume",
+	"fc":                    "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
+	"azureFile":             "azureFile represents an Azure File Service mount on the host and bind mount to the pod. Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type are redirected to the file.csi.azure.com CSI driver.",
+	"configMap":             "configMap represents a configMap that should populate this volume",
+	"vsphereVolume":         "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type are redirected to the csi.vsphere.vmware.com CSI driver.",
+	"quobyte":               "quobyte represents a Quobyte mount on the host that shares a pod's lifetime. Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported.",
+	"azureDisk":             "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type are redirected to the disk.csi.azure.com CSI driver.",
+	"photonPersistentDisk":  "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported.",
+	"projected":             "projected items for all in one resources secrets, configmaps, and downward API",
+	"portworxVolume":        "portworxVolume represents a portworx volume attached and mounted on kubelets host machine. Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate is on.",
+	"scaleIO":               "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported.",
+	"storageos":             "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported.",
+	"csi":                   "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.",
+	"ephemeral":             "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n   tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n   a PersistentVolumeClaim (see EphemeralVolumeSource for more\n   information on the connection between this volume type\n   and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.",
+	"image":                 "image represents an OCI object (a container image or artifact) pulled and mounted on the kubelet's host machine. The volume is resolved at pod startup depending on which PullPolicy value is provided:\n\n- Always: the kubelet always attempts to pull the reference. Container creation will fail If the pull fails. - Never: the kubelet never pulls the reference and only uses a local image or artifact. Container creation will fail if the reference isn't present. - IfNotPresent: the kubelet pulls if the reference isn't already present on disk. Container creation will fail if the reference isn't present and the pull fails.\n\nThe volume gets re-resolved if the pod gets deleted and recreated, which means that new remote content will become available on pod recreation. A failure to resolve or pull the image during pod startup will block containers from starting and may add significant latency. Failures will be retried using normal volume backoff and will be reported on the pod reason and message. The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type.",
+}
+
+func (VolumeSource) SwaggerDoc() map[string]string {
+	return map_VolumeSource
+}
+
+var map_VsphereVirtualDiskVolumeSource = map[string]string{
+	"":                  "Represents a vSphere volume resource.",
+	"volumePath":        "volumePath is the path that identifies vSphere volume vmdk",
+	"fsType":            "fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
+	"storagePolicyName": "storagePolicyName is the storage Policy Based Management (SPBM) profile name.",
+	"storagePolicyID":   "storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.",
+}
+
+func (VsphereVirtualDiskVolumeSource) SwaggerDoc() map[string]string {
+	return map_VsphereVirtualDiskVolumeSource
+}
+
+var map_WeightedPodAffinityTerm = map[string]string{
+	"":                "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)",
+	"weight":          "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.",
+	"podAffinityTerm": "Required. A pod affinity term, associated with the corresponding weight.",
+}
+
+func (WeightedPodAffinityTerm) SwaggerDoc() map[string]string {
+	return map_WeightedPodAffinityTerm
+}
+
+var map_WindowsSecurityContextOptions = map[string]string{
+	"":                       "WindowsSecurityContextOptions contain Windows-specific options and credentials.",
+	"gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use.",
+	"gmsaCredentialSpec":     "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.",
+	"runAsUserName":          "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+	"hostProcess":            "HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.",
+}
+
+func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string {
+	return map_WindowsSecurityContextOptions
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/hack/tools/vendor/k8s.io/api/core/v1/well_known_labels.go b/hack/tools/vendor/k8s.io/api/core/v1/well_known_labels.go
new file mode 100644
index 0000000000..8c3cb87b82
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/core/v1/well_known_labels.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+const (
+	LabelHostname = "kubernetes.io/hostname"
+
+	// Label value is the network location of kube-apiserver stored as 
+	// Stored in APIServer Identity lease objects to view what address is used for peer proxy
+	AnnotationPeerAdvertiseAddress = "kubernetes.io/peer-advertise-address"
+
+	LabelTopologyZone   = "topology.kubernetes.io/zone"
+	LabelTopologyRegion = "topology.kubernetes.io/region"
+
+	// These label have been deprecated since 1.17, but will be supported for
+	// the foreseeable future, to accommodate things like long-lived PVs that
+	// use them.  New users should prefer the "topology.kubernetes.io/*"
+	// equivalents.
+	LabelFailureDomainBetaZone   = "failure-domain.beta.kubernetes.io/zone"   // deprecated
+	LabelFailureDomainBetaRegion = "failure-domain.beta.kubernetes.io/region" // deprecated
+
+	// Retained for compat when vendored.  Do not use these consts in new code.
+	LabelZoneFailureDomain       = LabelFailureDomainBetaZone   // deprecated
+	LabelZoneRegion              = LabelFailureDomainBetaRegion // deprecated
+	LabelZoneFailureDomainStable = LabelTopologyZone            // deprecated
+	LabelZoneRegionStable        = LabelTopologyRegion          // deprecated
+
+	LabelInstanceType       = "beta.kubernetes.io/instance-type"
+	LabelInstanceTypeStable = "node.kubernetes.io/instance-type"
+
+	LabelOSStable   = "kubernetes.io/os"
+	LabelArchStable = "kubernetes.io/arch"
+
+	// LabelWindowsBuild is used on Windows nodes to specify the Windows build number starting with v1.17.0.
+	// It's in the format MajorVersion.MinorVersion.BuildNumber (for ex: 10.0.17763)
+	LabelWindowsBuild = "node.kubernetes.io/windows-build"
+
+	// LabelNamespaceSuffixKubelet is an allowed label namespace suffix kubelets can self-set ([*.]kubelet.kubernetes.io/*)
+	LabelNamespaceSuffixKubelet = "kubelet.kubernetes.io"
+	// LabelNamespaceSuffixNode is an allowed label namespace suffix kubelets can self-set ([*.]node.kubernetes.io/*)
+	LabelNamespaceSuffixNode = "node.kubernetes.io"
+
+	// LabelNamespaceNodeRestriction is a forbidden label namespace that kubelets may not self-set when the NodeRestriction admission plugin is enabled
+	LabelNamespaceNodeRestriction = "node-restriction.kubernetes.io"
+
+	// IsHeadlessService is added by Controller to an Endpoint denoting if its parent
+	// Service is Headless. The existence of this label can be used further by other
+	// controllers and kube-proxy to check if the Endpoint objects should be replicated when
+	// using Headless Services
+	IsHeadlessService = "service.kubernetes.io/headless"
+
+	// LabelNodeExcludeBalancers specifies that the node should not be considered as a target
+	// for external load-balancers which use nodes as a second hop (e.g. many cloud LBs which only
+	// understand nodes). For services that use externalTrafficPolicy=Local, this may mean that
+	// any backends on excluded nodes are not reachable by those external load-balancers.
+	// Implementations of this exclusion may vary based on provider.
+	LabelNodeExcludeBalancers = "node.kubernetes.io/exclude-from-external-load-balancers"
+	// LabelMetadataName is the label name which, in-tree, is used to automatically label namespaces, so they can be selected easily by tools which require definitive labels
+	LabelMetadataName = "kubernetes.io/metadata.name"
+)
diff --git a/hack/tools/vendor/k8s.io/api/core/v1/well_known_taints.go b/hack/tools/vendor/k8s.io/api/core/v1/well_known_taints.go
new file mode 100644
index 0000000000..a6d8c272b2
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/core/v1/well_known_taints.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+const (
+	// TaintNodeNotReady will be added when node is not ready
+	// and removed when node becomes ready.
+	TaintNodeNotReady = "node.kubernetes.io/not-ready"
+
+	// TaintNodeUnreachable will be added when node becomes unreachable
+	// (corresponding to NodeReady status ConditionUnknown)
+	// and removed when node becomes reachable (NodeReady status ConditionTrue).
+	TaintNodeUnreachable = "node.kubernetes.io/unreachable"
+
+	// TaintNodeUnschedulable will be added when node becomes unschedulable
+	// and removed when node becomes schedulable.
+	TaintNodeUnschedulable = "node.kubernetes.io/unschedulable"
+
+	// TaintNodeMemoryPressure will be added when node has memory pressure
+	// and removed when node has enough memory.
+	TaintNodeMemoryPressure = "node.kubernetes.io/memory-pressure"
+
+	// TaintNodeDiskPressure will be added when node has disk pressure
+	// and removed when node has enough disk.
+	TaintNodeDiskPressure = "node.kubernetes.io/disk-pressure"
+
+	// TaintNodeNetworkUnavailable will be added when node's network is unavailable
+	// and removed when network becomes ready.
+	TaintNodeNetworkUnavailable = "node.kubernetes.io/network-unavailable"
+
+	// TaintNodePIDPressure will be added when node has pid pressure
+	// and removed when node has enough pid.
+	TaintNodePIDPressure = "node.kubernetes.io/pid-pressure"
+
+	// TaintNodeOutOfService can be added when node is out of service in case of
+	// a non-graceful shutdown
+	TaintNodeOutOfService = "node.kubernetes.io/out-of-service"
+)
diff --git a/hack/tools/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/hack/tools/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..3f669092ef
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
@@ -0,0 +1,6655 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	types "k8s.io/apimachinery/pkg/types"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSElasticBlockStoreVolumeSource) DeepCopyInto(out *AWSElasticBlockStoreVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSElasticBlockStoreVolumeSource.
+func (in *AWSElasticBlockStoreVolumeSource) DeepCopy() *AWSElasticBlockStoreVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(AWSElasticBlockStoreVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Affinity) DeepCopyInto(out *Affinity) {
+	*out = *in
+	if in.NodeAffinity != nil {
+		in, out := &in.NodeAffinity, &out.NodeAffinity
+		*out = new(NodeAffinity)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.PodAffinity != nil {
+		in, out := &in.PodAffinity, &out.PodAffinity
+		*out = new(PodAffinity)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.PodAntiAffinity != nil {
+		in, out := &in.PodAntiAffinity, &out.PodAntiAffinity
+		*out = new(PodAntiAffinity)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Affinity.
+func (in *Affinity) DeepCopy() *Affinity {
+	if in == nil {
+		return nil
+	}
+	out := new(Affinity)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AppArmorProfile) DeepCopyInto(out *AppArmorProfile) {
+	*out = *in
+	if in.LocalhostProfile != nil {
+		in, out := &in.LocalhostProfile, &out.LocalhostProfile
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppArmorProfile.
+func (in *AppArmorProfile) DeepCopy() *AppArmorProfile {
+	if in == nil {
+		return nil
+	}
+	out := new(AppArmorProfile)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AttachedVolume) DeepCopyInto(out *AttachedVolume) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedVolume.
+func (in *AttachedVolume) DeepCopy() *AttachedVolume {
+	if in == nil {
+		return nil
+	}
+	out := new(AttachedVolume)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AvoidPods) DeepCopyInto(out *AvoidPods) {
+	*out = *in
+	if in.PreferAvoidPods != nil {
+		in, out := &in.PreferAvoidPods, &out.PreferAvoidPods
+		*out = make([]PreferAvoidPodsEntry, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvoidPods.
+func (in *AvoidPods) DeepCopy() *AvoidPods {
+	if in == nil {
+		return nil
+	}
+	out := new(AvoidPods)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureDiskVolumeSource) DeepCopyInto(out *AzureDiskVolumeSource) {
+	*out = *in
+	if in.CachingMode != nil {
+		in, out := &in.CachingMode, &out.CachingMode
+		*out = new(AzureDataDiskCachingMode)
+		**out = **in
+	}
+	if in.FSType != nil {
+		in, out := &in.FSType, &out.FSType
+		*out = new(string)
+		**out = **in
+	}
+	if in.ReadOnly != nil {
+		in, out := &in.ReadOnly, &out.ReadOnly
+		*out = new(bool)
+		**out = **in
+	}
+	if in.Kind != nil {
+		in, out := &in.Kind, &out.Kind
+		*out = new(AzureDataDiskKind)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiskVolumeSource.
+func (in *AzureDiskVolumeSource) DeepCopy() *AzureDiskVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(AzureDiskVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureFilePersistentVolumeSource) DeepCopyInto(out *AzureFilePersistentVolumeSource) {
+	*out = *in
+	if in.SecretNamespace != nil {
+		in, out := &in.SecretNamespace, &out.SecretNamespace
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFilePersistentVolumeSource.
+func (in *AzureFilePersistentVolumeSource) DeepCopy() *AzureFilePersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(AzureFilePersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureFileVolumeSource) DeepCopyInto(out *AzureFileVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFileVolumeSource.
+func (in *AzureFileVolumeSource) DeepCopy() *AzureFileVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(AzureFileVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Binding) DeepCopyInto(out *Binding) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	out.Target = in.Target
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Binding.
+func (in *Binding) DeepCopy() *Binding {
+	if in == nil {
+		return nil
+	}
+	out := new(Binding)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Binding) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource) {
+	*out = *in
+	if in.VolumeAttributes != nil {
+		in, out := &in.VolumeAttributes, &out.VolumeAttributes
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.ControllerPublishSecretRef != nil {
+		in, out := &in.ControllerPublishSecretRef, &out.ControllerPublishSecretRef
+		*out = new(SecretReference)
+		**out = **in
+	}
+	if in.NodeStageSecretRef != nil {
+		in, out := &in.NodeStageSecretRef, &out.NodeStageSecretRef
+		*out = new(SecretReference)
+		**out = **in
+	}
+	if in.NodePublishSecretRef != nil {
+		in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef
+		*out = new(SecretReference)
+		**out = **in
+	}
+	if in.ControllerExpandSecretRef != nil {
+		in, out := &in.ControllerExpandSecretRef, &out.ControllerExpandSecretRef
+		*out = new(SecretReference)
+		**out = **in
+	}
+	if in.NodeExpandSecretRef != nil {
+		in, out := &in.NodeExpandSecretRef, &out.NodeExpandSecretRef
+		*out = new(SecretReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIPersistentVolumeSource.
+func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(CSIPersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CSIVolumeSource) DeepCopyInto(out *CSIVolumeSource) {
+	*out = *in
+	if in.ReadOnly != nil {
+		in, out := &in.ReadOnly, &out.ReadOnly
+		*out = new(bool)
+		**out = **in
+	}
+	if in.FSType != nil {
+		in, out := &in.FSType, &out.FSType
+		*out = new(string)
+		**out = **in
+	}
+	if in.VolumeAttributes != nil {
+		in, out := &in.VolumeAttributes, &out.VolumeAttributes
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.NodePublishSecretRef != nil {
+		in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef
+		*out = new(LocalObjectReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIVolumeSource.
+func (in *CSIVolumeSource) DeepCopy() *CSIVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(CSIVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Capabilities) DeepCopyInto(out *Capabilities) {
+	*out = *in
+	if in.Add != nil {
+		in, out := &in.Add, &out.Add
+		*out = make([]Capability, len(*in))
+		copy(*out, *in)
+	}
+	if in.Drop != nil {
+		in, out := &in.Drop, &out.Drop
+		*out = make([]Capability, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capabilities.
+func (in *Capabilities) DeepCopy() *Capabilities {
+	if in == nil {
+		return nil
+	}
+	out := new(Capabilities)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CephFSPersistentVolumeSource) DeepCopyInto(out *CephFSPersistentVolumeSource) {
+	*out = *in
+	if in.Monitors != nil {
+		in, out := &in.Monitors, &out.Monitors
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(SecretReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSPersistentVolumeSource.
+func (in *CephFSPersistentVolumeSource) DeepCopy() *CephFSPersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(CephFSPersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CephFSVolumeSource) DeepCopyInto(out *CephFSVolumeSource) {
+	*out = *in
+	if in.Monitors != nil {
+		in, out := &in.Monitors, &out.Monitors
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(LocalObjectReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSVolumeSource.
+func (in *CephFSVolumeSource) DeepCopy() *CephFSVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(CephFSVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CinderPersistentVolumeSource) DeepCopyInto(out *CinderPersistentVolumeSource) {
+	*out = *in
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(SecretReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderPersistentVolumeSource.
+func (in *CinderPersistentVolumeSource) DeepCopy() *CinderPersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(CinderPersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CinderVolumeSource) DeepCopyInto(out *CinderVolumeSource) {
+	*out = *in
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(LocalObjectReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderVolumeSource.
+func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(CinderVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) {
+	*out = *in
+	if in.TimeoutSeconds != nil {
+		in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientIPConfig.
+func (in *ClientIPConfig) DeepCopy() *ClientIPConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(ClientIPConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClusterTrustBundleProjection) DeepCopyInto(out *ClusterTrustBundleProjection) {
+	*out = *in
+	if in.Name != nil {
+		in, out := &in.Name, &out.Name
+		*out = new(string)
+		**out = **in
+	}
+	if in.SignerName != nil {
+		in, out := &in.SignerName, &out.SignerName
+		*out = new(string)
+		**out = **in
+	}
+	if in.LabelSelector != nil {
+		in, out := &in.LabelSelector, &out.LabelSelector
+		*out = new(metav1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Optional != nil {
+		in, out := &in.Optional, &out.Optional
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTrustBundleProjection.
+func (in *ClusterTrustBundleProjection) DeepCopy() *ClusterTrustBundleProjection {
+	if in == nil {
+		return nil
+	}
+	out := new(ClusterTrustBundleProjection)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentCondition.
+func (in *ComponentCondition) DeepCopy() *ComponentCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(ComponentCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ComponentStatus) DeepCopyInto(out *ComponentStatus) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]ComponentCondition, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatus.
+func (in *ComponentStatus) DeepCopy() *ComponentStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ComponentStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ComponentStatus) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ComponentStatus, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatusList.
+func (in *ComponentStatusList) DeepCopy() *ComponentStatusList {
+	if in == nil {
+		return nil
+	}
+	out := new(ComponentStatusList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ComponentStatusList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigMap) DeepCopyInto(out *ConfigMap) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Immutable != nil {
+		in, out := &in.Immutable, &out.Immutable
+		*out = new(bool)
+		**out = **in
+	}
+	if in.Data != nil {
+		in, out := &in.Data, &out.Data
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.BinaryData != nil {
+		in, out := &in.BinaryData, &out.BinaryData
+		*out = make(map[string][]byte, len(*in))
+		for key, val := range *in {
+			var outVal []byte
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				in, out := &val, &outVal
+				*out = make([]byte, len(*in))
+				copy(*out, *in)
+			}
+			(*out)[key] = outVal
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMap.
+func (in *ConfigMap) DeepCopy() *ConfigMap {
+	if in == nil {
+		return nil
+	}
+	out := new(ConfigMap)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ConfigMap) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigMapEnvSource) DeepCopyInto(out *ConfigMapEnvSource) {
+	*out = *in
+	out.LocalObjectReference = in.LocalObjectReference
+	if in.Optional != nil {
+		in, out := &in.Optional, &out.Optional
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapEnvSource.
+func (in *ConfigMapEnvSource) DeepCopy() *ConfigMapEnvSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ConfigMapEnvSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigMapKeySelector) DeepCopyInto(out *ConfigMapKeySelector) {
+	*out = *in
+	out.LocalObjectReference = in.LocalObjectReference
+	if in.Optional != nil {
+		in, out := &in.Optional, &out.Optional
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapKeySelector.
+func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector {
+	if in == nil {
+		return nil
+	}
+	out := new(ConfigMapKeySelector)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ConfigMap, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapList.
+func (in *ConfigMapList) DeepCopy() *ConfigMapList {
+	if in == nil {
+		return nil
+	}
+	out := new(ConfigMapList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ConfigMapList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigMapNodeConfigSource) DeepCopyInto(out *ConfigMapNodeConfigSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNodeConfigSource.
+func (in *ConfigMapNodeConfigSource) DeepCopy() *ConfigMapNodeConfigSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ConfigMapNodeConfigSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigMapProjection) DeepCopyInto(out *ConfigMapProjection) {
+	*out = *in
+	out.LocalObjectReference = in.LocalObjectReference
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]KeyToPath, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Optional != nil {
+		in, out := &in.Optional, &out.Optional
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapProjection.
+func (in *ConfigMapProjection) DeepCopy() *ConfigMapProjection {
+	if in == nil {
+		return nil
+	}
+	out := new(ConfigMapProjection)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ConfigMapVolumeSource) DeepCopyInto(out *ConfigMapVolumeSource) {
+	*out = *in
+	out.LocalObjectReference = in.LocalObjectReference
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]KeyToPath, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.DefaultMode != nil {
+		in, out := &in.DefaultMode, &out.DefaultMode
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Optional != nil {
+		in, out := &in.Optional, &out.Optional
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapVolumeSource.
+func (in *ConfigMapVolumeSource) DeepCopy() *ConfigMapVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ConfigMapVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Container) DeepCopyInto(out *Container) {
+	*out = *in
+	if in.Command != nil {
+		in, out := &in.Command, &out.Command
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Args != nil {
+		in, out := &in.Args, &out.Args
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Ports != nil {
+		in, out := &in.Ports, &out.Ports
+		*out = make([]ContainerPort, len(*in))
+		copy(*out, *in)
+	}
+	if in.EnvFrom != nil {
+		in, out := &in.EnvFrom, &out.EnvFrom
+		*out = make([]EnvFromSource, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Env != nil {
+		in, out := &in.Env, &out.Env
+		*out = make([]EnvVar, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	in.Resources.DeepCopyInto(&out.Resources)
+	if in.ResizePolicy != nil {
+		in, out := &in.ResizePolicy, &out.ResizePolicy
+		*out = make([]ContainerResizePolicy, len(*in))
+		copy(*out, *in)
+	}
+	if in.RestartPolicy != nil {
+		in, out := &in.RestartPolicy, &out.RestartPolicy
+		*out = new(ContainerRestartPolicy)
+		**out = **in
+	}
+	if in.VolumeMounts != nil {
+		in, out := &in.VolumeMounts, &out.VolumeMounts
+		*out = make([]VolumeMount, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.VolumeDevices != nil {
+		in, out := &in.VolumeDevices, &out.VolumeDevices
+		*out = make([]VolumeDevice, len(*in))
+		copy(*out, *in)
+	}
+	if in.LivenessProbe != nil {
+		in, out := &in.LivenessProbe, &out.LivenessProbe
+		*out = new(Probe)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.ReadinessProbe != nil {
+		in, out := &in.ReadinessProbe, &out.ReadinessProbe
+		*out = new(Probe)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.StartupProbe != nil {
+		in, out := &in.StartupProbe, &out.StartupProbe
+		*out = new(Probe)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Lifecycle != nil {
+		in, out := &in.Lifecycle, &out.Lifecycle
+		*out = new(Lifecycle)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.SecurityContext != nil {
+		in, out := &in.SecurityContext, &out.SecurityContext
+		*out = new(SecurityContext)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container.
+func (in *Container) DeepCopy() *Container {
+	if in == nil {
+		return nil
+	}
+	out := new(Container)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerImage) DeepCopyInto(out *ContainerImage) {
+	*out = *in
+	if in.Names != nil {
+		in, out := &in.Names, &out.Names
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerImage.
+func (in *ContainerImage) DeepCopy() *ContainerImage {
+	if in == nil {
+		return nil
+	}
+	out := new(ContainerImage)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerPort) DeepCopyInto(out *ContainerPort) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPort.
+func (in *ContainerPort) DeepCopy() *ContainerPort {
+	if in == nil {
+		return nil
+	}
+	out := new(ContainerPort)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerResizePolicy) DeepCopyInto(out *ContainerResizePolicy) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResizePolicy.
+func (in *ContainerResizePolicy) DeepCopy() *ContainerResizePolicy {
+	if in == nil {
+		return nil
+	}
+	out := new(ContainerResizePolicy)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerState) DeepCopyInto(out *ContainerState) {
+	*out = *in
+	if in.Waiting != nil {
+		in, out := &in.Waiting, &out.Waiting
+		*out = new(ContainerStateWaiting)
+		**out = **in
+	}
+	if in.Running != nil {
+		in, out := &in.Running, &out.Running
+		*out = new(ContainerStateRunning)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Terminated != nil {
+		in, out := &in.Terminated, &out.Terminated
+		*out = new(ContainerStateTerminated)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerState.
+func (in *ContainerState) DeepCopy() *ContainerState {
+	if in == nil {
+		return nil
+	}
+	out := new(ContainerState)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerStateRunning) DeepCopyInto(out *ContainerStateRunning) {
+	*out = *in
+	in.StartedAt.DeepCopyInto(&out.StartedAt)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateRunning.
+func (in *ContainerStateRunning) DeepCopy() *ContainerStateRunning {
+	if in == nil {
+		return nil
+	}
+	out := new(ContainerStateRunning)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerStateTerminated) DeepCopyInto(out *ContainerStateTerminated) {
+	*out = *in
+	in.StartedAt.DeepCopyInto(&out.StartedAt)
+	in.FinishedAt.DeepCopyInto(&out.FinishedAt)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateTerminated.
+func (in *ContainerStateTerminated) DeepCopy() *ContainerStateTerminated {
+	if in == nil {
+		return nil
+	}
+	out := new(ContainerStateTerminated)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerStateWaiting) DeepCopyInto(out *ContainerStateWaiting) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateWaiting.
+func (in *ContainerStateWaiting) DeepCopy() *ContainerStateWaiting {
+	if in == nil {
+		return nil
+	}
+	out := new(ContainerStateWaiting)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) {
+	*out = *in
+	in.State.DeepCopyInto(&out.State)
+	in.LastTerminationState.DeepCopyInto(&out.LastTerminationState)
+	if in.Started != nil {
+		in, out := &in.Started, &out.Started
+		*out = new(bool)
+		**out = **in
+	}
+	if in.AllocatedResources != nil {
+		in, out := &in.AllocatedResources, &out.AllocatedResources
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.Resources != nil {
+		in, out := &in.Resources, &out.Resources
+		*out = new(ResourceRequirements)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.VolumeMounts != nil {
+		in, out := &in.VolumeMounts, &out.VolumeMounts
+		*out = make([]VolumeMountStatus, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.User != nil {
+		in, out := &in.User, &out.User
+		*out = new(ContainerUser)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.AllocatedResourcesStatus != nil {
+		in, out := &in.AllocatedResourcesStatus, &out.AllocatedResourcesStatus
+		*out = make([]ResourceStatus, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStatus.
+func (in *ContainerStatus) DeepCopy() *ContainerStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ContainerStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerUser) DeepCopyInto(out *ContainerUser) {
+	*out = *in
+	if in.Linux != nil {
+		in, out := &in.Linux, &out.Linux
+		*out = new(LinuxContainerUser)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerUser.
+func (in *ContainerUser) DeepCopy() *ContainerUser {
+	if in == nil {
+		return nil
+	}
+	out := new(ContainerUser)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonEndpoint.
+func (in *DaemonEndpoint) DeepCopy() *DaemonEndpoint {
+	if in == nil {
+		return nil
+	}
+	out := new(DaemonEndpoint)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DownwardAPIProjection) DeepCopyInto(out *DownwardAPIProjection) {
+	*out = *in
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]DownwardAPIVolumeFile, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIProjection.
+func (in *DownwardAPIProjection) DeepCopy() *DownwardAPIProjection {
+	if in == nil {
+		return nil
+	}
+	out := new(DownwardAPIProjection)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DownwardAPIVolumeFile) DeepCopyInto(out *DownwardAPIVolumeFile) {
+	*out = *in
+	if in.FieldRef != nil {
+		in, out := &in.FieldRef, &out.FieldRef
+		*out = new(ObjectFieldSelector)
+		**out = **in
+	}
+	if in.ResourceFieldRef != nil {
+		in, out := &in.ResourceFieldRef, &out.ResourceFieldRef
+		*out = new(ResourceFieldSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Mode != nil {
+		in, out := &in.Mode, &out.Mode
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeFile.
+func (in *DownwardAPIVolumeFile) DeepCopy() *DownwardAPIVolumeFile {
+	if in == nil {
+		return nil
+	}
+	out := new(DownwardAPIVolumeFile)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *DownwardAPIVolumeSource) DeepCopyInto(out *DownwardAPIVolumeSource) {
+	*out = *in
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]DownwardAPIVolumeFile, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.DefaultMode != nil {
+		in, out := &in.DefaultMode, &out.DefaultMode
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeSource.
+func (in *DownwardAPIVolumeSource) DeepCopy() *DownwardAPIVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(DownwardAPIVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) {
+	*out = *in
+	if in.SizeLimit != nil {
+		in, out := &in.SizeLimit, &out.SizeLimit
+		x := (*in).DeepCopy()
+		*out = &x
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirVolumeSource.
+func (in *EmptyDirVolumeSource) DeepCopy() *EmptyDirVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(EmptyDirVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointAddress) DeepCopyInto(out *EndpointAddress) {
+	*out = *in
+	if in.NodeName != nil {
+		in, out := &in.NodeName, &out.NodeName
+		*out = new(string)
+		**out = **in
+	}
+	if in.TargetRef != nil {
+		in, out := &in.TargetRef, &out.TargetRef
+		*out = new(ObjectReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAddress.
+func (in *EndpointAddress) DeepCopy() *EndpointAddress {
+	if in == nil {
+		return nil
+	}
+	out := new(EndpointAddress)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointPort) DeepCopyInto(out *EndpointPort) {
+	*out = *in
+	if in.AppProtocol != nil {
+		in, out := &in.AppProtocol, &out.AppProtocol
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort.
+func (in *EndpointPort) DeepCopy() *EndpointPort {
+	if in == nil {
+		return nil
+	}
+	out := new(EndpointPort)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointSubset) DeepCopyInto(out *EndpointSubset) {
+	*out = *in
+	if in.Addresses != nil {
+		in, out := &in.Addresses, &out.Addresses
+		*out = make([]EndpointAddress, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.NotReadyAddresses != nil {
+		in, out := &in.NotReadyAddresses, &out.NotReadyAddresses
+		*out = make([]EndpointAddress, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Ports != nil {
+		in, out := &in.Ports, &out.Ports
+		*out = make([]EndpointPort, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSubset.
+func (in *EndpointSubset) DeepCopy() *EndpointSubset {
+	if in == nil {
+		return nil
+	}
+	out := new(EndpointSubset)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Endpoints) DeepCopyInto(out *Endpoints) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Subsets != nil {
+		in, out := &in.Subsets, &out.Subsets
+		*out = make([]EndpointSubset, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoints.
+func (in *Endpoints) DeepCopy() *Endpoints {
+	if in == nil {
+		return nil
+	}
+	out := new(Endpoints)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Endpoints) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointsList) DeepCopyInto(out *EndpointsList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Endpoints, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsList.
+func (in *EndpointsList) DeepCopy() *EndpointsList {
+	if in == nil {
+		return nil
+	}
+	out := new(EndpointsList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EndpointsList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EnvFromSource) DeepCopyInto(out *EnvFromSource) {
+	*out = *in
+	if in.ConfigMapRef != nil {
+		in, out := &in.ConfigMapRef, &out.ConfigMapRef
+		*out = new(ConfigMapEnvSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(SecretEnvSource)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvFromSource.
+func (in *EnvFromSource) DeepCopy() *EnvFromSource {
+	if in == nil {
+		return nil
+	}
+	out := new(EnvFromSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EnvVar) DeepCopyInto(out *EnvVar) {
+	*out = *in
+	if in.ValueFrom != nil {
+		in, out := &in.ValueFrom, &out.ValueFrom
+		*out = new(EnvVarSource)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar.
+func (in *EnvVar) DeepCopy() *EnvVar {
+	if in == nil {
+		return nil
+	}
+	out := new(EnvVar)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) {
+	*out = *in
+	if in.FieldRef != nil {
+		in, out := &in.FieldRef, &out.FieldRef
+		*out = new(ObjectFieldSelector)
+		**out = **in
+	}
+	if in.ResourceFieldRef != nil {
+		in, out := &in.ResourceFieldRef, &out.ResourceFieldRef
+		*out = new(ResourceFieldSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.ConfigMapKeyRef != nil {
+		in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef
+		*out = new(ConfigMapKeySelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.SecretKeyRef != nil {
+		in, out := &in.SecretKeyRef, &out.SecretKeyRef
+		*out = new(SecretKeySelector)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarSource.
+func (in *EnvVarSource) DeepCopy() *EnvVarSource {
+	if in == nil {
+		return nil
+	}
+	out := new(EnvVarSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EphemeralContainer) DeepCopyInto(out *EphemeralContainer) {
+	*out = *in
+	in.EphemeralContainerCommon.DeepCopyInto(&out.EphemeralContainerCommon)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainer.
+func (in *EphemeralContainer) DeepCopy() *EphemeralContainer {
+	if in == nil {
+		return nil
+	}
+	out := new(EphemeralContainer)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon) {
+	*out = *in
+	if in.Command != nil {
+		in, out := &in.Command, &out.Command
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Args != nil {
+		in, out := &in.Args, &out.Args
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Ports != nil {
+		in, out := &in.Ports, &out.Ports
+		*out = make([]ContainerPort, len(*in))
+		copy(*out, *in)
+	}
+	if in.EnvFrom != nil {
+		in, out := &in.EnvFrom, &out.EnvFrom
+		*out = make([]EnvFromSource, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Env != nil {
+		in, out := &in.Env, &out.Env
+		*out = make([]EnvVar, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	in.Resources.DeepCopyInto(&out.Resources)
+	if in.ResizePolicy != nil {
+		in, out := &in.ResizePolicy, &out.ResizePolicy
+		*out = make([]ContainerResizePolicy, len(*in))
+		copy(*out, *in)
+	}
+	if in.RestartPolicy != nil {
+		in, out := &in.RestartPolicy, &out.RestartPolicy
+		*out = new(ContainerRestartPolicy)
+		**out = **in
+	}
+	if in.VolumeMounts != nil {
+		in, out := &in.VolumeMounts, &out.VolumeMounts
+		*out = make([]VolumeMount, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.VolumeDevices != nil {
+		in, out := &in.VolumeDevices, &out.VolumeDevices
+		*out = make([]VolumeDevice, len(*in))
+		copy(*out, *in)
+	}
+	if in.LivenessProbe != nil {
+		in, out := &in.LivenessProbe, &out.LivenessProbe
+		*out = new(Probe)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.ReadinessProbe != nil {
+		in, out := &in.ReadinessProbe, &out.ReadinessProbe
+		*out = new(Probe)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.StartupProbe != nil {
+		in, out := &in.StartupProbe, &out.StartupProbe
+		*out = new(Probe)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Lifecycle != nil {
+		in, out := &in.Lifecycle, &out.Lifecycle
+		*out = new(Lifecycle)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.SecurityContext != nil {
+		in, out := &in.SecurityContext, &out.SecurityContext
+		*out = new(SecurityContext)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainerCommon.
+func (in *EphemeralContainerCommon) DeepCopy() *EphemeralContainerCommon {
+	if in == nil {
+		return nil
+	}
+	out := new(EphemeralContainerCommon)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EphemeralVolumeSource) DeepCopyInto(out *EphemeralVolumeSource) {
+	*out = *in
+	if in.VolumeClaimTemplate != nil {
+		in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate
+		*out = new(PersistentVolumeClaimTemplate)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralVolumeSource.
+func (in *EphemeralVolumeSource) DeepCopy() *EphemeralVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(EphemeralVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Event) DeepCopyInto(out *Event) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	out.InvolvedObject = in.InvolvedObject
+	out.Source = in.Source
+	in.FirstTimestamp.DeepCopyInto(&out.FirstTimestamp)
+	in.LastTimestamp.DeepCopyInto(&out.LastTimestamp)
+	in.EventTime.DeepCopyInto(&out.EventTime)
+	if in.Series != nil {
+		in, out := &in.Series, &out.Series
+		*out = new(EventSeries)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Related != nil {
+		in, out := &in.Related, &out.Related
+		*out = new(ObjectReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event.
+func (in *Event) DeepCopy() *Event {
+	if in == nil {
+		return nil
+	}
+	out := new(Event)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Event) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventList) DeepCopyInto(out *EventList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Event, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList.
+func (in *EventList) DeepCopy() *EventList {
+	if in == nil {
+		return nil
+	}
+	out := new(EventList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EventList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventSeries) DeepCopyInto(out *EventSeries) {
+	*out = *in
+	in.LastObservedTime.DeepCopyInto(&out.LastObservedTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries.
+func (in *EventSeries) DeepCopy() *EventSeries {
+	if in == nil {
+		return nil
+	}
+	out := new(EventSeries)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventSource) DeepCopyInto(out *EventSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSource.
+func (in *EventSource) DeepCopy() *EventSource {
+	if in == nil {
+		return nil
+	}
+	out := new(EventSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecAction) DeepCopyInto(out *ExecAction) {
+	*out = *in
+	if in.Command != nil {
+		in, out := &in.Command, &out.Command
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecAction.
+func (in *ExecAction) DeepCopy() *ExecAction {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecAction)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FCVolumeSource) DeepCopyInto(out *FCVolumeSource) {
+	*out = *in
+	if in.TargetWWNs != nil {
+		in, out := &in.TargetWWNs, &out.TargetWWNs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Lun != nil {
+		in, out := &in.Lun, &out.Lun
+		*out = new(int32)
+		**out = **in
+	}
+	if in.WWIDs != nil {
+		in, out := &in.WWIDs, &out.WWIDs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FCVolumeSource.
+func (in *FCVolumeSource) DeepCopy() *FCVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(FCVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) {
+	*out = *in
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(SecretReference)
+		**out = **in
+	}
+	if in.Options != nil {
+		in, out := &in.Options, &out.Options
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexPersistentVolumeSource.
+func (in *FlexPersistentVolumeSource) DeepCopy() *FlexPersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(FlexPersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) {
+	*out = *in
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(LocalObjectReference)
+		**out = **in
+	}
+	if in.Options != nil {
+		in, out := &in.Options, &out.Options
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexVolumeSource.
+func (in *FlexVolumeSource) DeepCopy() *FlexVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(FlexVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FlockerVolumeSource) DeepCopyInto(out *FlockerVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlockerVolumeSource.
+func (in *FlockerVolumeSource) DeepCopy() *FlockerVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(FlockerVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GCEPersistentDiskVolumeSource) DeepCopyInto(out *GCEPersistentDiskVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEPersistentDiskVolumeSource.
+func (in *GCEPersistentDiskVolumeSource) DeepCopy() *GCEPersistentDiskVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(GCEPersistentDiskVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GRPCAction) DeepCopyInto(out *GRPCAction) {
+	*out = *in
+	if in.Service != nil {
+		in, out := &in.Service, &out.Service
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCAction.
+func (in *GRPCAction) DeepCopy() *GRPCAction {
+	if in == nil {
+		return nil
+	}
+	out := new(GRPCAction)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitRepoVolumeSource) DeepCopyInto(out *GitRepoVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepoVolumeSource.
+func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(GitRepoVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GlusterfsPersistentVolumeSource) DeepCopyInto(out *GlusterfsPersistentVolumeSource) {
+	*out = *in
+	if in.EndpointsNamespace != nil {
+		in, out := &in.EndpointsNamespace, &out.EndpointsNamespace
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsPersistentVolumeSource.
+func (in *GlusterfsPersistentVolumeSource) DeepCopy() *GlusterfsPersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(GlusterfsPersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsVolumeSource.
+func (in *GlusterfsVolumeSource) DeepCopy() *GlusterfsVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(GlusterfsVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HTTPGetAction) DeepCopyInto(out *HTTPGetAction) {
+	*out = *in
+	out.Port = in.Port
+	if in.HTTPHeaders != nil {
+		in, out := &in.HTTPHeaders, &out.HTTPHeaders
+		*out = make([]HTTPHeader, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPGetAction.
+func (in *HTTPGetAction) DeepCopy() *HTTPGetAction {
+	if in == nil {
+		return nil
+	}
+	out := new(HTTPGetAction)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader.
+func (in *HTTPHeader) DeepCopy() *HTTPHeader {
+	if in == nil {
+		return nil
+	}
+	out := new(HTTPHeader)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostAlias) DeepCopyInto(out *HostAlias) {
+	*out = *in
+	if in.Hostnames != nil {
+		in, out := &in.Hostnames, &out.Hostnames
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAlias.
+func (in *HostAlias) DeepCopy() *HostAlias {
+	if in == nil {
+		return nil
+	}
+	out := new(HostAlias)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostIP) DeepCopyInto(out *HostIP) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostIP.
+func (in *HostIP) DeepCopy() *HostIP {
+	if in == nil {
+		return nil
+	}
+	out := new(HostIP)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) {
+	*out = *in
+	if in.Type != nil {
+		in, out := &in.Type, &out.Type
+		*out = new(HostPathType)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathVolumeSource.
+func (in *HostPathVolumeSource) DeepCopy() *HostPathVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(HostPathVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ISCSIPersistentVolumeSource) DeepCopyInto(out *ISCSIPersistentVolumeSource) {
+	*out = *in
+	if in.Portals != nil {
+		in, out := &in.Portals, &out.Portals
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(SecretReference)
+		**out = **in
+	}
+	if in.InitiatorName != nil {
+		in, out := &in.InitiatorName, &out.InitiatorName
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIPersistentVolumeSource.
+func (in *ISCSIPersistentVolumeSource) DeepCopy() *ISCSIPersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ISCSIPersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) {
+	*out = *in
+	if in.Portals != nil {
+		in, out := &in.Portals, &out.Portals
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(LocalObjectReference)
+		**out = **in
+	}
+	if in.InitiatorName != nil {
+		in, out := &in.InitiatorName, &out.InitiatorName
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIVolumeSource.
+func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ISCSIVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageVolumeSource) DeepCopyInto(out *ImageVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageVolumeSource.
+func (in *ImageVolumeSource) DeepCopy() *ImageVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ImageVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KeyToPath) DeepCopyInto(out *KeyToPath) {
+	*out = *in
+	if in.Mode != nil {
+		in, out := &in.Mode, &out.Mode
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyToPath.
+func (in *KeyToPath) DeepCopy() *KeyToPath {
+	if in == nil {
+		return nil
+	}
+	out := new(KeyToPath)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Lifecycle) DeepCopyInto(out *Lifecycle) {
+	*out = *in
+	if in.PostStart != nil {
+		in, out := &in.PostStart, &out.PostStart
+		*out = new(LifecycleHandler)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.PreStop != nil {
+		in, out := &in.PreStop, &out.PreStop
+		*out = new(LifecycleHandler)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lifecycle.
+func (in *Lifecycle) DeepCopy() *Lifecycle {
+	if in == nil {
+		return nil
+	}
+	out := new(Lifecycle)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LifecycleHandler) DeepCopyInto(out *LifecycleHandler) {
+	*out = *in
+	if in.Exec != nil {
+		in, out := &in.Exec, &out.Exec
+		*out = new(ExecAction)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.HTTPGet != nil {
+		in, out := &in.HTTPGet, &out.HTTPGet
+		*out = new(HTTPGetAction)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.TCPSocket != nil {
+		in, out := &in.TCPSocket, &out.TCPSocket
+		*out = new(TCPSocketAction)
+		**out = **in
+	}
+	if in.Sleep != nil {
+		in, out := &in.Sleep, &out.Sleep
+		*out = new(SleepAction)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHandler.
+func (in *LifecycleHandler) DeepCopy() *LifecycleHandler {
+	if in == nil {
+		return nil
+	}
+	out := new(LifecycleHandler)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LimitRange) DeepCopyInto(out *LimitRange) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRange.
+func (in *LimitRange) DeepCopy() *LimitRange {
+	if in == nil {
+		return nil
+	}
+	out := new(LimitRange)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *LimitRange) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LimitRangeItem) DeepCopyInto(out *LimitRangeItem) {
+	*out = *in
+	if in.Max != nil {
+		in, out := &in.Max, &out.Max
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.Min != nil {
+		in, out := &in.Min, &out.Min
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.Default != nil {
+		in, out := &in.Default, &out.Default
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.DefaultRequest != nil {
+		in, out := &in.DefaultRequest, &out.DefaultRequest
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.MaxLimitRequestRatio != nil {
+		in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeItem.
+func (in *LimitRangeItem) DeepCopy() *LimitRangeItem {
+	if in == nil {
+		return nil
+	}
+	out := new(LimitRangeItem)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]LimitRange, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeList.
+func (in *LimitRangeList) DeepCopy() *LimitRangeList {
+	if in == nil {
+		return nil
+	}
+	out := new(LimitRangeList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *LimitRangeList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LimitRangeSpec) DeepCopyInto(out *LimitRangeSpec) {
+	*out = *in
+	if in.Limits != nil {
+		in, out := &in.Limits, &out.Limits
+		*out = make([]LimitRangeItem, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeSpec.
+func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(LimitRangeSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LinuxContainerUser) DeepCopyInto(out *LinuxContainerUser) {
+	*out = *in
+	if in.SupplementalGroups != nil {
+		in, out := &in.SupplementalGroups, &out.SupplementalGroups
+		*out = make([]int64, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LinuxContainerUser.
+func (in *LinuxContainerUser) DeepCopy() *LinuxContainerUser {
+	if in == nil {
+		return nil
+	}
+	out := new(LinuxContainerUser)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *List) DeepCopyInto(out *List) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]runtime.RawExtension, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List.
+func (in *List) DeepCopy() *List {
+	if in == nil {
+		return nil
+	}
+	out := new(List)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *List) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) {
+	*out = *in
+	if in.IPMode != nil {
+		in, out := &in.IPMode, &out.IPMode
+		*out = new(LoadBalancerIPMode)
+		**out = **in
+	}
+	if in.Ports != nil {
+		in, out := &in.Ports, &out.Ports
+		*out = make([]PortStatus, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerIngress.
+func (in *LoadBalancerIngress) DeepCopy() *LoadBalancerIngress {
+	if in == nil {
+		return nil
+	}
+	out := new(LoadBalancerIngress)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) {
+	*out = *in
+	if in.Ingress != nil {
+		in, out := &in.Ingress, &out.Ingress
+		*out = make([]LoadBalancerIngress, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStatus.
+func (in *LoadBalancerStatus) DeepCopy() *LoadBalancerStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(LoadBalancerStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference.
+func (in *LocalObjectReference) DeepCopy() *LocalObjectReference {
+	if in == nil {
+		return nil
+	}
+	out := new(LocalObjectReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LocalVolumeSource) DeepCopyInto(out *LocalVolumeSource) {
+	*out = *in
+	if in.FSType != nil {
+		in, out := &in.FSType, &out.FSType
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalVolumeSource.
+func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(LocalVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ModifyVolumeStatus) DeepCopyInto(out *ModifyVolumeStatus) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModifyVolumeStatus.
+func (in *ModifyVolumeStatus) DeepCopy() *ModifyVolumeStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ModifyVolumeStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSVolumeSource.
+func (in *NFSVolumeSource) DeepCopy() *NFSVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(NFSVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Namespace) DeepCopyInto(out *Namespace) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespace.
+func (in *Namespace) DeepCopy() *Namespace {
+	if in == nil {
+		return nil
+	}
+	out := new(Namespace)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Namespace) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamespaceCondition) DeepCopyInto(out *NamespaceCondition) {
+	*out = *in
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceCondition.
+func (in *NamespaceCondition) DeepCopy() *NamespaceCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(NamespaceCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamespaceList) DeepCopyInto(out *NamespaceList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Namespace, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceList.
+func (in *NamespaceList) DeepCopy() *NamespaceList {
+	if in == nil {
+		return nil
+	}
+	out := new(NamespaceList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NamespaceList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamespaceSpec) DeepCopyInto(out *NamespaceSpec) {
+	*out = *in
+	if in.Finalizers != nil {
+		in, out := &in.Finalizers, &out.Finalizers
+		*out = make([]FinalizerName, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSpec.
+func (in *NamespaceSpec) DeepCopy() *NamespaceSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(NamespaceSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) {
+	*out = *in
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]NamespaceCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceStatus.
+func (in *NamespaceStatus) DeepCopy() *NamespaceStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(NamespaceStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Node) DeepCopyInto(out *Node) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node.
+func (in *Node) DeepCopy() *Node {
+	if in == nil {
+		return nil
+	}
+	out := new(Node)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Node) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeAddress) DeepCopyInto(out *NodeAddress) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress.
+func (in *NodeAddress) DeepCopy() *NodeAddress {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeAddress)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeAffinity) DeepCopyInto(out *NodeAffinity) {
+	*out = *in
+	if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+		in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
+		*out = new(NodeSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+		in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
+		*out = make([]PreferredSchedulingTerm, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinity.
+func (in *NodeAffinity) DeepCopy() *NodeAffinity {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeAffinity)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeCondition) DeepCopyInto(out *NodeCondition) {
+	*out = *in
+	in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime)
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCondition.
+func (in *NodeCondition) DeepCopy() *NodeCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeConfigSource) DeepCopyInto(out *NodeConfigSource) {
+	*out = *in
+	if in.ConfigMap != nil {
+		in, out := &in.ConfigMap, &out.ConfigMap
+		*out = new(ConfigMapNodeConfigSource)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigSource.
+func (in *NodeConfigSource) DeepCopy() *NodeConfigSource {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeConfigSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeConfigStatus) DeepCopyInto(out *NodeConfigStatus) {
+	*out = *in
+	if in.Assigned != nil {
+		in, out := &in.Assigned, &out.Assigned
+		*out = new(NodeConfigSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Active != nil {
+		in, out := &in.Active, &out.Active
+		*out = new(NodeConfigSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.LastKnownGood != nil {
+		in, out := &in.LastKnownGood, &out.LastKnownGood
+		*out = new(NodeConfigSource)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigStatus.
+func (in *NodeConfigStatus) DeepCopy() *NodeConfigStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeConfigStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) {
+	*out = *in
+	out.KubeletEndpoint = in.KubeletEndpoint
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDaemonEndpoints.
+func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeDaemonEndpoints)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeFeatures) DeepCopyInto(out *NodeFeatures) {
+	*out = *in
+	if in.SupplementalGroupsPolicy != nil {
+		in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeFeatures.
+func (in *NodeFeatures) DeepCopy() *NodeFeatures {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeFeatures)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeList) DeepCopyInto(out *NodeList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Node, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList.
+func (in *NodeList) DeepCopy() *NodeList {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NodeList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeProxyOptions) DeepCopyInto(out *NodeProxyOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeProxyOptions.
+func (in *NodeProxyOptions) DeepCopy() *NodeProxyOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeProxyOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NodeProxyOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeRuntimeHandler) DeepCopyInto(out *NodeRuntimeHandler) {
+	*out = *in
+	if in.Features != nil {
+		in, out := &in.Features, &out.Features
+		*out = new(NodeRuntimeHandlerFeatures)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeRuntimeHandler.
+func (in *NodeRuntimeHandler) DeepCopy() *NodeRuntimeHandler {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeRuntimeHandler)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeRuntimeHandlerFeatures) DeepCopyInto(out *NodeRuntimeHandlerFeatures) {
+	*out = *in
+	if in.RecursiveReadOnlyMounts != nil {
+		in, out := &in.RecursiveReadOnlyMounts, &out.RecursiveReadOnlyMounts
+		*out = new(bool)
+		**out = **in
+	}
+	if in.UserNamespaces != nil {
+		in, out := &in.UserNamespaces, &out.UserNamespaces
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeRuntimeHandlerFeatures.
+func (in *NodeRuntimeHandlerFeatures) DeepCopy() *NodeRuntimeHandlerFeatures {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeRuntimeHandlerFeatures)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeSelector) DeepCopyInto(out *NodeSelector) {
+	*out = *in
+	if in.NodeSelectorTerms != nil {
+		in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms
+		*out = make([]NodeSelectorTerm, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelector.
+func (in *NodeSelector) DeepCopy() *NodeSelector {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeSelector)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeSelectorRequirement) DeepCopyInto(out *NodeSelectorRequirement) {
+	*out = *in
+	if in.Values != nil {
+		in, out := &in.Values, &out.Values
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorRequirement.
+func (in *NodeSelectorRequirement) DeepCopy() *NodeSelectorRequirement {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeSelectorRequirement)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeSelectorTerm) DeepCopyInto(out *NodeSelectorTerm) {
+	*out = *in
+	if in.MatchExpressions != nil {
+		in, out := &in.MatchExpressions, &out.MatchExpressions
+		*out = make([]NodeSelectorRequirement, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.MatchFields != nil {
+		in, out := &in.MatchFields, &out.MatchFields
+		*out = make([]NodeSelectorRequirement, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorTerm.
+func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeSelectorTerm)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeSpec) DeepCopyInto(out *NodeSpec) {
+	*out = *in
+	if in.PodCIDRs != nil {
+		in, out := &in.PodCIDRs, &out.PodCIDRs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Taints != nil {
+		in, out := &in.Taints, &out.Taints
+		*out = make([]Taint, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.ConfigSource != nil {
+		in, out := &in.ConfigSource, &out.ConfigSource
+		*out = new(NodeConfigSource)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec.
+func (in *NodeSpec) DeepCopy() *NodeSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
+	*out = *in
+	if in.Capacity != nil {
+		in, out := &in.Capacity, &out.Capacity
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.Allocatable != nil {
+		in, out := &in.Allocatable, &out.Allocatable
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]NodeCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Addresses != nil {
+		in, out := &in.Addresses, &out.Addresses
+		*out = make([]NodeAddress, len(*in))
+		copy(*out, *in)
+	}
+	out.DaemonEndpoints = in.DaemonEndpoints
+	out.NodeInfo = in.NodeInfo
+	if in.Images != nil {
+		in, out := &in.Images, &out.Images
+		*out = make([]ContainerImage, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.VolumesInUse != nil {
+		in, out := &in.VolumesInUse, &out.VolumesInUse
+		*out = make([]UniqueVolumeName, len(*in))
+		copy(*out, *in)
+	}
+	if in.VolumesAttached != nil {
+		in, out := &in.VolumesAttached, &out.VolumesAttached
+		*out = make([]AttachedVolume, len(*in))
+		copy(*out, *in)
+	}
+	if in.Config != nil {
+		in, out := &in.Config, &out.Config
+		*out = new(NodeConfigStatus)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.RuntimeHandlers != nil {
+		in, out := &in.RuntimeHandlers, &out.RuntimeHandlers
+		*out = make([]NodeRuntimeHandler, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Features != nil {
+		in, out := &in.Features, &out.Features
+		*out = new(NodeFeatures)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus.
+func (in *NodeStatus) DeepCopy() *NodeStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeSystemInfo) DeepCopyInto(out *NodeSystemInfo) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSystemInfo.
+func (in *NodeSystemInfo) DeepCopy() *NodeSystemInfo {
+	if in == nil {
+		return nil
+	}
+	out := new(NodeSystemInfo)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectFieldSelector) DeepCopyInto(out *ObjectFieldSelector) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectFieldSelector.
+func (in *ObjectFieldSelector) DeepCopy() *ObjectFieldSelector {
+	if in == nil {
+		return nil
+	}
+	out := new(ObjectFieldSelector)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectReference) DeepCopyInto(out *ObjectReference) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference.
+func (in *ObjectReference) DeepCopy() *ObjectReference {
+	if in == nil {
+		return nil
+	}
+	out := new(ObjectReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ObjectReference) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolume) DeepCopyInto(out *PersistentVolume) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolume.
+func (in *PersistentVolume) DeepCopy() *PersistentVolume {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolume)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PersistentVolume) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeClaim) DeepCopyInto(out *PersistentVolumeClaim) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaim.
+func (in *PersistentVolumeClaim) DeepCopy() *PersistentVolumeClaim {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolumeClaim)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PersistentVolumeClaim) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeClaimCondition) DeepCopyInto(out *PersistentVolumeClaimCondition) {
+	*out = *in
+	in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimCondition.
+func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolumeClaimCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]PersistentVolumeClaim, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimList.
+func (in *PersistentVolumeClaimList) DeepCopy() *PersistentVolumeClaimList {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolumeClaimList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PersistentVolumeClaimList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec) {
+	*out = *in
+	if in.AccessModes != nil {
+		in, out := &in.AccessModes, &out.AccessModes
+		*out = make([]PersistentVolumeAccessMode, len(*in))
+		copy(*out, *in)
+	}
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(metav1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	in.Resources.DeepCopyInto(&out.Resources)
+	if in.StorageClassName != nil {
+		in, out := &in.StorageClassName, &out.StorageClassName
+		*out = new(string)
+		**out = **in
+	}
+	if in.VolumeMode != nil {
+		in, out := &in.VolumeMode, &out.VolumeMode
+		*out = new(PersistentVolumeMode)
+		**out = **in
+	}
+	if in.DataSource != nil {
+		in, out := &in.DataSource, &out.DataSource
+		*out = new(TypedLocalObjectReference)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.DataSourceRef != nil {
+		in, out := &in.DataSourceRef, &out.DataSourceRef
+		*out = new(TypedObjectReference)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.VolumeAttributesClassName != nil {
+		in, out := &in.VolumeAttributesClassName, &out.VolumeAttributesClassName
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimSpec.
+func (in *PersistentVolumeClaimSpec) DeepCopy() *PersistentVolumeClaimSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolumeClaimSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimStatus) {
+	*out = *in
+	if in.AccessModes != nil {
+		in, out := &in.AccessModes, &out.AccessModes
+		*out = make([]PersistentVolumeAccessMode, len(*in))
+		copy(*out, *in)
+	}
+	if in.Capacity != nil {
+		in, out := &in.Capacity, &out.Capacity
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]PersistentVolumeClaimCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.AllocatedResources != nil {
+		in, out := &in.AllocatedResources, &out.AllocatedResources
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.AllocatedResourceStatuses != nil {
+		in, out := &in.AllocatedResourceStatuses, &out.AllocatedResourceStatuses
+		*out = make(map[ResourceName]ClaimResourceStatus, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.CurrentVolumeAttributesClassName != nil {
+		in, out := &in.CurrentVolumeAttributesClassName, &out.CurrentVolumeAttributesClassName
+		*out = new(string)
+		**out = **in
+	}
+	if in.ModifyVolumeStatus != nil {
+		in, out := &in.ModifyVolumeStatus, &out.ModifyVolumeStatus
+		*out = new(ModifyVolumeStatus)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimStatus.
+func (in *PersistentVolumeClaimStatus) DeepCopy() *PersistentVolumeClaimStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolumeClaimStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeClaimTemplate) DeepCopyInto(out *PersistentVolumeClaimTemplate) {
+	*out = *in
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimTemplate.
+func (in *PersistentVolumeClaimTemplate) DeepCopy() *PersistentVolumeClaimTemplate {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolumeClaimTemplate)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeClaimVolumeSource) DeepCopyInto(out *PersistentVolumeClaimVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimVolumeSource.
+func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolumeClaimVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]PersistentVolume, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeList.
+func (in *PersistentVolumeList) DeepCopy() *PersistentVolumeList {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolumeList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PersistentVolumeList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) {
+	*out = *in
+	if in.GCEPersistentDisk != nil {
+		in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk
+		*out = new(GCEPersistentDiskVolumeSource)
+		**out = **in
+	}
+	if in.AWSElasticBlockStore != nil {
+		in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore
+		*out = new(AWSElasticBlockStoreVolumeSource)
+		**out = **in
+	}
+	if in.HostPath != nil {
+		in, out := &in.HostPath, &out.HostPath
+		*out = new(HostPathVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Glusterfs != nil {
+		in, out := &in.Glusterfs, &out.Glusterfs
+		*out = new(GlusterfsPersistentVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.NFS != nil {
+		in, out := &in.NFS, &out.NFS
+		*out = new(NFSVolumeSource)
+		**out = **in
+	}
+	if in.RBD != nil {
+		in, out := &in.RBD, &out.RBD
+		*out = new(RBDPersistentVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.ISCSI != nil {
+		in, out := &in.ISCSI, &out.ISCSI
+		*out = new(ISCSIPersistentVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Cinder != nil {
+		in, out := &in.Cinder, &out.Cinder
+		*out = new(CinderPersistentVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.CephFS != nil {
+		in, out := &in.CephFS, &out.CephFS
+		*out = new(CephFSPersistentVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.FC != nil {
+		in, out := &in.FC, &out.FC
+		*out = new(FCVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Flocker != nil {
+		in, out := &in.Flocker, &out.Flocker
+		*out = new(FlockerVolumeSource)
+		**out = **in
+	}
+	if in.FlexVolume != nil {
+		in, out := &in.FlexVolume, &out.FlexVolume
+		*out = new(FlexPersistentVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.AzureFile != nil {
+		in, out := &in.AzureFile, &out.AzureFile
+		*out = new(AzureFilePersistentVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.VsphereVolume != nil {
+		in, out := &in.VsphereVolume, &out.VsphereVolume
+		*out = new(VsphereVirtualDiskVolumeSource)
+		**out = **in
+	}
+	if in.Quobyte != nil {
+		in, out := &in.Quobyte, &out.Quobyte
+		*out = new(QuobyteVolumeSource)
+		**out = **in
+	}
+	if in.AzureDisk != nil {
+		in, out := &in.AzureDisk, &out.AzureDisk
+		*out = new(AzureDiskVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.PhotonPersistentDisk != nil {
+		in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk
+		*out = new(PhotonPersistentDiskVolumeSource)
+		**out = **in
+	}
+	if in.PortworxVolume != nil {
+		in, out := &in.PortworxVolume, &out.PortworxVolume
+		*out = new(PortworxVolumeSource)
+		**out = **in
+	}
+	if in.ScaleIO != nil {
+		in, out := &in.ScaleIO, &out.ScaleIO
+		*out = new(ScaleIOPersistentVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Local != nil {
+		in, out := &in.Local, &out.Local
+		*out = new(LocalVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.StorageOS != nil {
+		in, out := &in.StorageOS, &out.StorageOS
+		*out = new(StorageOSPersistentVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.CSI != nil {
+		in, out := &in.CSI, &out.CSI
+		*out = new(CSIPersistentVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSource.
+func (in *PersistentVolumeSource) DeepCopy() *PersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) {
+	*out = *in
+	if in.Capacity != nil {
+		in, out := &in.Capacity, &out.Capacity
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	in.PersistentVolumeSource.DeepCopyInto(&out.PersistentVolumeSource)
+	if in.AccessModes != nil {
+		in, out := &in.AccessModes, &out.AccessModes
+		*out = make([]PersistentVolumeAccessMode, len(*in))
+		copy(*out, *in)
+	}
+	if in.ClaimRef != nil {
+		in, out := &in.ClaimRef, &out.ClaimRef
+		*out = new(ObjectReference)
+		**out = **in
+	}
+	if in.MountOptions != nil {
+		in, out := &in.MountOptions, &out.MountOptions
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.VolumeMode != nil {
+		in, out := &in.VolumeMode, &out.VolumeMode
+		*out = new(PersistentVolumeMode)
+		**out = **in
+	}
+	if in.NodeAffinity != nil {
+		in, out := &in.NodeAffinity, &out.NodeAffinity
+		*out = new(VolumeNodeAffinity)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.VolumeAttributesClassName != nil {
+		in, out := &in.VolumeAttributesClassName, &out.VolumeAttributesClassName
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSpec.
+func (in *PersistentVolumeSpec) DeepCopy() *PersistentVolumeSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolumeSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PersistentVolumeStatus) DeepCopyInto(out *PersistentVolumeStatus) {
+	*out = *in
+	if in.LastPhaseTransitionTime != nil {
+		in, out := &in.LastPhaseTransitionTime, &out.LastPhaseTransitionTime
+		*out = (*in).DeepCopy()
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeStatus.
+func (in *PersistentVolumeStatus) DeepCopy() *PersistentVolumeStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(PersistentVolumeStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PhotonPersistentDiskVolumeSource) DeepCopyInto(out *PhotonPersistentDiskVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhotonPersistentDiskVolumeSource.
+func (in *PhotonPersistentDiskVolumeSource) DeepCopy() *PhotonPersistentDiskVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(PhotonPersistentDiskVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Pod) DeepCopyInto(out *Pod) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod.
+func (in *Pod) DeepCopy() *Pod {
+	if in == nil {
+		return nil
+	}
+	out := new(Pod)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Pod) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodAffinity) DeepCopyInto(out *PodAffinity) {
+	*out = *in
+	if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+		in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
+		*out = make([]PodAffinityTerm, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+		in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
+		*out = make([]WeightedPodAffinityTerm, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinity.
+func (in *PodAffinity) DeepCopy() *PodAffinity {
+	if in == nil {
+		return nil
+	}
+	out := new(PodAffinity)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) {
+	*out = *in
+	if in.LabelSelector != nil {
+		in, out := &in.LabelSelector, &out.LabelSelector
+		*out = new(metav1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Namespaces != nil {
+		in, out := &in.Namespaces, &out.Namespaces
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.NamespaceSelector != nil {
+		in, out := &in.NamespaceSelector, &out.NamespaceSelector
+		*out = new(metav1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.MatchLabelKeys != nil {
+		in, out := &in.MatchLabelKeys, &out.MatchLabelKeys
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.MismatchLabelKeys != nil {
+		in, out := &in.MismatchLabelKeys, &out.MismatchLabelKeys
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinityTerm.
+func (in *PodAffinityTerm) DeepCopy() *PodAffinityTerm {
+	if in == nil {
+		return nil
+	}
+	out := new(PodAffinityTerm)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodAntiAffinity) DeepCopyInto(out *PodAntiAffinity) {
+	*out = *in
+	if in.RequiredDuringSchedulingIgnoredDuringExecution != nil {
+		in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution
+		*out = make([]PodAffinityTerm, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.PreferredDuringSchedulingIgnoredDuringExecution != nil {
+		in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution
+		*out = make([]WeightedPodAffinityTerm, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAntiAffinity.
+func (in *PodAntiAffinity) DeepCopy() *PodAntiAffinity {
+	if in == nil {
+		return nil
+	}
+	out := new(PodAntiAffinity)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodAttachOptions) DeepCopyInto(out *PodAttachOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAttachOptions.
+func (in *PodAttachOptions) DeepCopy() *PodAttachOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(PodAttachOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodAttachOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodCondition) DeepCopyInto(out *PodCondition) {
+	*out = *in
+	in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCondition.
+func (in *PodCondition) DeepCopy() *PodCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(PodCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodDNSConfig) DeepCopyInto(out *PodDNSConfig) {
+	*out = *in
+	if in.Nameservers != nil {
+		in, out := &in.Nameservers, &out.Nameservers
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Searches != nil {
+		in, out := &in.Searches, &out.Searches
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Options != nil {
+		in, out := &in.Options, &out.Options
+		*out = make([]PodDNSConfigOption, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfig.
+func (in *PodDNSConfig) DeepCopy() *PodDNSConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(PodDNSConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) {
+	*out = *in
+	if in.Value != nil {
+		in, out := &in.Value, &out.Value
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfigOption.
+func (in *PodDNSConfigOption) DeepCopy() *PodDNSConfigOption {
+	if in == nil {
+		return nil
+	}
+	out := new(PodDNSConfigOption)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodExecOptions) DeepCopyInto(out *PodExecOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Command != nil {
+		in, out := &in.Command, &out.Command
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExecOptions.
+func (in *PodExecOptions) DeepCopy() *PodExecOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(PodExecOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodExecOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodIP) DeepCopyInto(out *PodIP) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodIP.
+func (in *PodIP) DeepCopy() *PodIP {
+	if in == nil {
+		return nil
+	}
+	out := new(PodIP)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodList) DeepCopyInto(out *PodList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Pod, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodList.
+func (in *PodList) DeepCopy() *PodList {
+	if in == nil {
+		return nil
+	}
+	out := new(PodList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.SinceSeconds != nil {
+		in, out := &in.SinceSeconds, &out.SinceSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	if in.SinceTime != nil {
+		in, out := &in.SinceTime, &out.SinceTime
+		*out = (*in).DeepCopy()
+	}
+	if in.TailLines != nil {
+		in, out := &in.TailLines, &out.TailLines
+		*out = new(int64)
+		**out = **in
+	}
+	if in.LimitBytes != nil {
+		in, out := &in.LimitBytes, &out.LimitBytes
+		*out = new(int64)
+		**out = **in
+	}
+	if in.Stream != nil {
+		in, out := &in.Stream, &out.Stream
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLogOptions.
+func (in *PodLogOptions) DeepCopy() *PodLogOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(PodLogOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodLogOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodOS) DeepCopyInto(out *PodOS) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodOS.
+func (in *PodOS) DeepCopy() *PodOS {
+	if in == nil {
+		return nil
+	}
+	out := new(PodOS)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodPortForwardOptions) DeepCopyInto(out *PodPortForwardOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Ports != nil {
+		in, out := &in.Ports, &out.Ports
+		*out = make([]int32, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPortForwardOptions.
+func (in *PodPortForwardOptions) DeepCopy() *PodPortForwardOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(PodPortForwardOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodPortForwardOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodProxyOptions) DeepCopyInto(out *PodProxyOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProxyOptions.
+func (in *PodProxyOptions) DeepCopy() *PodProxyOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(PodProxyOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodProxyOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodReadinessGate) DeepCopyInto(out *PodReadinessGate) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodReadinessGate.
+func (in *PodReadinessGate) DeepCopy() *PodReadinessGate {
+	if in == nil {
+		return nil
+	}
+	out := new(PodReadinessGate)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodResourceClaim) DeepCopyInto(out *PodResourceClaim) {
+	*out = *in
+	if in.ResourceClaimName != nil {
+		in, out := &in.ResourceClaimName, &out.ResourceClaimName
+		*out = new(string)
+		**out = **in
+	}
+	if in.ResourceClaimTemplateName != nil {
+		in, out := &in.ResourceClaimTemplateName, &out.ResourceClaimTemplateName
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodResourceClaim.
+func (in *PodResourceClaim) DeepCopy() *PodResourceClaim {
+	if in == nil {
+		return nil
+	}
+	out := new(PodResourceClaim)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodResourceClaimStatus) DeepCopyInto(out *PodResourceClaimStatus) {
+	*out = *in
+	if in.ResourceClaimName != nil {
+		in, out := &in.ResourceClaimName, &out.ResourceClaimName
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodResourceClaimStatus.
+func (in *PodResourceClaimStatus) DeepCopy() *PodResourceClaimStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(PodResourceClaimStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodSchedulingGate) DeepCopyInto(out *PodSchedulingGate) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingGate.
+func (in *PodSchedulingGate) DeepCopy() *PodSchedulingGate {
+	if in == nil {
+		return nil
+	}
+	out := new(PodSchedulingGate)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) {
+	*out = *in
+	if in.SELinuxOptions != nil {
+		in, out := &in.SELinuxOptions, &out.SELinuxOptions
+		*out = new(SELinuxOptions)
+		**out = **in
+	}
+	if in.WindowsOptions != nil {
+		in, out := &in.WindowsOptions, &out.WindowsOptions
+		*out = new(WindowsSecurityContextOptions)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.RunAsUser != nil {
+		in, out := &in.RunAsUser, &out.RunAsUser
+		*out = new(int64)
+		**out = **in
+	}
+	if in.RunAsGroup != nil {
+		in, out := &in.RunAsGroup, &out.RunAsGroup
+		*out = new(int64)
+		**out = **in
+	}
+	if in.RunAsNonRoot != nil {
+		in, out := &in.RunAsNonRoot, &out.RunAsNonRoot
+		*out = new(bool)
+		**out = **in
+	}
+	if in.SupplementalGroups != nil {
+		in, out := &in.SupplementalGroups, &out.SupplementalGroups
+		*out = make([]int64, len(*in))
+		copy(*out, *in)
+	}
+	if in.SupplementalGroupsPolicy != nil {
+		in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy
+		*out = new(SupplementalGroupsPolicy)
+		**out = **in
+	}
+	if in.FSGroup != nil {
+		in, out := &in.FSGroup, &out.FSGroup
+		*out = new(int64)
+		**out = **in
+	}
+	if in.Sysctls != nil {
+		in, out := &in.Sysctls, &out.Sysctls
+		*out = make([]Sysctl, len(*in))
+		copy(*out, *in)
+	}
+	if in.FSGroupChangePolicy != nil {
+		in, out := &in.FSGroupChangePolicy, &out.FSGroupChangePolicy
+		*out = new(PodFSGroupChangePolicy)
+		**out = **in
+	}
+	if in.SeccompProfile != nil {
+		in, out := &in.SeccompProfile, &out.SeccompProfile
+		*out = new(SeccompProfile)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.AppArmorProfile != nil {
+		in, out := &in.AppArmorProfile, &out.AppArmorProfile
+		*out = new(AppArmorProfile)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.SELinuxChangePolicy != nil {
+		in, out := &in.SELinuxChangePolicy, &out.SELinuxChangePolicy
+		*out = new(PodSELinuxChangePolicy)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityContext.
+func (in *PodSecurityContext) DeepCopy() *PodSecurityContext {
+	if in == nil {
+		return nil
+	}
+	out := new(PodSecurityContext)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodSignature) DeepCopyInto(out *PodSignature) {
+	*out = *in
+	if in.PodController != nil {
+		in, out := &in.PodController, &out.PodController
+		*out = new(metav1.OwnerReference)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSignature.
+func (in *PodSignature) DeepCopy() *PodSignature {
+	if in == nil {
+		return nil
+	}
+	out := new(PodSignature)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodSpec) DeepCopyInto(out *PodSpec) {
+	*out = *in
+	if in.Volumes != nil {
+		in, out := &in.Volumes, &out.Volumes
+		*out = make([]Volume, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.InitContainers != nil {
+		in, out := &in.InitContainers, &out.InitContainers
+		*out = make([]Container, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Containers != nil {
+		in, out := &in.Containers, &out.Containers
+		*out = make([]Container, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.EphemeralContainers != nil {
+		in, out := &in.EphemeralContainers, &out.EphemeralContainers
+		*out = make([]EphemeralContainer, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.TerminationGracePeriodSeconds != nil {
+		in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	if in.ActiveDeadlineSeconds != nil {
+		in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	if in.NodeSelector != nil {
+		in, out := &in.NodeSelector, &out.NodeSelector
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.AutomountServiceAccountToken != nil {
+		in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken
+		*out = new(bool)
+		**out = **in
+	}
+	if in.ShareProcessNamespace != nil {
+		in, out := &in.ShareProcessNamespace, &out.ShareProcessNamespace
+		*out = new(bool)
+		**out = **in
+	}
+	if in.SecurityContext != nil {
+		in, out := &in.SecurityContext, &out.SecurityContext
+		*out = new(PodSecurityContext)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.ImagePullSecrets != nil {
+		in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
+		*out = make([]LocalObjectReference, len(*in))
+		copy(*out, *in)
+	}
+	if in.Affinity != nil {
+		in, out := &in.Affinity, &out.Affinity
+		*out = new(Affinity)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Tolerations != nil {
+		in, out := &in.Tolerations, &out.Tolerations
+		*out = make([]Toleration, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.HostAliases != nil {
+		in, out := &in.HostAliases, &out.HostAliases
+		*out = make([]HostAlias, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Priority != nil {
+		in, out := &in.Priority, &out.Priority
+		*out = new(int32)
+		**out = **in
+	}
+	if in.DNSConfig != nil {
+		in, out := &in.DNSConfig, &out.DNSConfig
+		*out = new(PodDNSConfig)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.ReadinessGates != nil {
+		in, out := &in.ReadinessGates, &out.ReadinessGates
+		*out = make([]PodReadinessGate, len(*in))
+		copy(*out, *in)
+	}
+	if in.RuntimeClassName != nil {
+		in, out := &in.RuntimeClassName, &out.RuntimeClassName
+		*out = new(string)
+		**out = **in
+	}
+	if in.EnableServiceLinks != nil {
+		in, out := &in.EnableServiceLinks, &out.EnableServiceLinks
+		*out = new(bool)
+		**out = **in
+	}
+	if in.PreemptionPolicy != nil {
+		in, out := &in.PreemptionPolicy, &out.PreemptionPolicy
+		*out = new(PreemptionPolicy)
+		**out = **in
+	}
+	if in.Overhead != nil {
+		in, out := &in.Overhead, &out.Overhead
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.TopologySpreadConstraints != nil {
+		in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints
+		*out = make([]TopologySpreadConstraint, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.SetHostnameAsFQDN != nil {
+		in, out := &in.SetHostnameAsFQDN, &out.SetHostnameAsFQDN
+		*out = new(bool)
+		**out = **in
+	}
+	if in.OS != nil {
+		in, out := &in.OS, &out.OS
+		*out = new(PodOS)
+		**out = **in
+	}
+	if in.HostUsers != nil {
+		in, out := &in.HostUsers, &out.HostUsers
+		*out = new(bool)
+		**out = **in
+	}
+	if in.SchedulingGates != nil {
+		in, out := &in.SchedulingGates, &out.SchedulingGates
+		*out = make([]PodSchedulingGate, len(*in))
+		copy(*out, *in)
+	}
+	if in.ResourceClaims != nil {
+		in, out := &in.ResourceClaims, &out.ResourceClaims
+		*out = make([]PodResourceClaim, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Resources != nil {
+		in, out := &in.Resources, &out.Resources
+		*out = new(ResourceRequirements)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec.
+func (in *PodSpec) DeepCopy() *PodSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(PodSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodStatus) DeepCopyInto(out *PodStatus) {
+	*out = *in
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]PodCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.HostIPs != nil {
+		in, out := &in.HostIPs, &out.HostIPs
+		*out = make([]HostIP, len(*in))
+		copy(*out, *in)
+	}
+	if in.PodIPs != nil {
+		in, out := &in.PodIPs, &out.PodIPs
+		*out = make([]PodIP, len(*in))
+		copy(*out, *in)
+	}
+	if in.StartTime != nil {
+		in, out := &in.StartTime, &out.StartTime
+		*out = (*in).DeepCopy()
+	}
+	if in.InitContainerStatuses != nil {
+		in, out := &in.InitContainerStatuses, &out.InitContainerStatuses
+		*out = make([]ContainerStatus, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.ContainerStatuses != nil {
+		in, out := &in.ContainerStatuses, &out.ContainerStatuses
+		*out = make([]ContainerStatus, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.EphemeralContainerStatuses != nil {
+		in, out := &in.EphemeralContainerStatuses, &out.EphemeralContainerStatuses
+		*out = make([]ContainerStatus, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.ResourceClaimStatuses != nil {
+		in, out := &in.ResourceClaimStatuses, &out.ResourceClaimStatuses
+		*out = make([]PodResourceClaimStatus, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatus.
+func (in *PodStatus) DeepCopy() *PodStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(PodStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodStatusResult) DeepCopyInto(out *PodStatusResult) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatusResult.
+func (in *PodStatusResult) DeepCopy() *PodStatusResult {
+	if in == nil {
+		return nil
+	}
+	out := new(PodStatusResult)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodStatusResult) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodTemplate) DeepCopyInto(out *PodTemplate) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Template.DeepCopyInto(&out.Template)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplate.
+func (in *PodTemplate) DeepCopy() *PodTemplate {
+	if in == nil {
+		return nil
+	}
+	out := new(PodTemplate)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodTemplate) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]PodTemplate, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateList.
+func (in *PodTemplateList) DeepCopy() *PodTemplateList {
+	if in == nil {
+		return nil
+	}
+	out := new(PodTemplateList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PodTemplateList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodTemplateSpec) DeepCopyInto(out *PodTemplateSpec) {
+	*out = *in
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateSpec.
+func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(PodTemplateSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PortStatus) DeepCopyInto(out *PortStatus) {
+	*out = *in
+	if in.Error != nil {
+		in, out := &in.Error, &out.Error
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortStatus.
+func (in *PortStatus) DeepCopy() *PortStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(PortStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortworxVolumeSource.
+func (in *PortworxVolumeSource) DeepCopy() *PortworxVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(PortworxVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Preconditions) DeepCopyInto(out *Preconditions) {
+	*out = *in
+	if in.UID != nil {
+		in, out := &in.UID, &out.UID
+		*out = new(types.UID)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions.
+func (in *Preconditions) DeepCopy() *Preconditions {
+	if in == nil {
+		return nil
+	}
+	out := new(Preconditions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PreferAvoidPodsEntry) DeepCopyInto(out *PreferAvoidPodsEntry) {
+	*out = *in
+	in.PodSignature.DeepCopyInto(&out.PodSignature)
+	in.EvictionTime.DeepCopyInto(&out.EvictionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferAvoidPodsEntry.
+func (in *PreferAvoidPodsEntry) DeepCopy() *PreferAvoidPodsEntry {
+	if in == nil {
+		return nil
+	}
+	out := new(PreferAvoidPodsEntry)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PreferredSchedulingTerm) DeepCopyInto(out *PreferredSchedulingTerm) {
+	*out = *in
+	in.Preference.DeepCopyInto(&out.Preference)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferredSchedulingTerm.
+func (in *PreferredSchedulingTerm) DeepCopy() *PreferredSchedulingTerm {
+	if in == nil {
+		return nil
+	}
+	out := new(PreferredSchedulingTerm)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Probe) DeepCopyInto(out *Probe) {
+	*out = *in
+	in.ProbeHandler.DeepCopyInto(&out.ProbeHandler)
+	if in.TerminationGracePeriodSeconds != nil {
+		in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe.
+func (in *Probe) DeepCopy() *Probe {
+	if in == nil {
+		return nil
+	}
+	out := new(Probe)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProbeHandler) DeepCopyInto(out *ProbeHandler) {
+	*out = *in
+	if in.Exec != nil {
+		in, out := &in.Exec, &out.Exec
+		*out = new(ExecAction)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.HTTPGet != nil {
+		in, out := &in.HTTPGet, &out.HTTPGet
+		*out = new(HTTPGetAction)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.TCPSocket != nil {
+		in, out := &in.TCPSocket, &out.TCPSocket
+		*out = new(TCPSocketAction)
+		**out = **in
+	}
+	if in.GRPC != nil {
+		in, out := &in.GRPC, &out.GRPC
+		*out = new(GRPCAction)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeHandler.
+func (in *ProbeHandler) DeepCopy() *ProbeHandler {
+	if in == nil {
+		return nil
+	}
+	out := new(ProbeHandler)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProjectedVolumeSource) DeepCopyInto(out *ProjectedVolumeSource) {
+	*out = *in
+	if in.Sources != nil {
+		in, out := &in.Sources, &out.Sources
+		*out = make([]VolumeProjection, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.DefaultMode != nil {
+		in, out := &in.DefaultMode, &out.DefaultMode
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectedVolumeSource.
+func (in *ProjectedVolumeSource) DeepCopy() *ProjectedVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ProjectedVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *QuobyteVolumeSource) DeepCopyInto(out *QuobyteVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuobyteVolumeSource.
+func (in *QuobyteVolumeSource) DeepCopy() *QuobyteVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(QuobyteVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RBDPersistentVolumeSource) DeepCopyInto(out *RBDPersistentVolumeSource) {
+	*out = *in
+	if in.CephMonitors != nil {
+		in, out := &in.CephMonitors, &out.CephMonitors
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(SecretReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDPersistentVolumeSource.
+func (in *RBDPersistentVolumeSource) DeepCopy() *RBDPersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(RBDPersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) {
+	*out = *in
+	if in.CephMonitors != nil {
+		in, out := &in.CephMonitors, &out.CephMonitors
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(LocalObjectReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDVolumeSource.
+func (in *RBDVolumeSource) DeepCopy() *RBDVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(RBDVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RangeAllocation) DeepCopyInto(out *RangeAllocation) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Data != nil {
+		in, out := &in.Data, &out.Data
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocation.
+func (in *RangeAllocation) DeepCopy() *RangeAllocation {
+	if in == nil {
+		return nil
+	}
+	out := new(RangeAllocation)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *RangeAllocation) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicationController) DeepCopyInto(out *ReplicationController) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationController.
+func (in *ReplicationController) DeepCopy() *ReplicationController {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicationController)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ReplicationController) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicationControllerCondition) DeepCopyInto(out *ReplicationControllerCondition) {
+	*out = *in
+	in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerCondition.
+func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicationControllerCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ReplicationController, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerList.
+func (in *ReplicationControllerList) DeepCopy() *ReplicationControllerList {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicationControllerList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ReplicationControllerList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicationControllerSpec) DeepCopyInto(out *ReplicationControllerSpec) {
+	*out = *in
+	if in.Replicas != nil {
+		in, out := &in.Replicas, &out.Replicas
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.Template != nil {
+		in, out := &in.Template, &out.Template
+		*out = new(PodTemplateSpec)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerSpec.
+func (in *ReplicationControllerSpec) DeepCopy() *ReplicationControllerSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicationControllerSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ReplicationControllerStatus) DeepCopyInto(out *ReplicationControllerStatus) {
+	*out = *in
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]ReplicationControllerCondition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerStatus.
+func (in *ReplicationControllerStatus) DeepCopy() *ReplicationControllerStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ReplicationControllerStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaim.
+func (in *ResourceClaim) DeepCopy() *ResourceClaim {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceClaim)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceFieldSelector) DeepCopyInto(out *ResourceFieldSelector) {
+	*out = *in
+	out.Divisor = in.Divisor.DeepCopy()
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFieldSelector.
+func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceFieldSelector)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceHealth) DeepCopyInto(out *ResourceHealth) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceHealth.
+func (in *ResourceHealth) DeepCopy() *ResourceHealth {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceHealth)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in ResourceList) DeepCopyInto(out *ResourceList) {
+	{
+		in := &in
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+		return
+	}
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList.
+func (in ResourceList) DeepCopy() ResourceList {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceList)
+	in.DeepCopyInto(out)
+	return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceQuota) DeepCopyInto(out *ResourceQuota) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuota.
+func (in *ResourceQuota) DeepCopy() *ResourceQuota {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceQuota)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ResourceQuota) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ResourceQuota, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaList.
+func (in *ResourceQuotaList) DeepCopy() *ResourceQuotaList {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceQuotaList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ResourceQuotaList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceQuotaSpec) DeepCopyInto(out *ResourceQuotaSpec) {
+	*out = *in
+	if in.Hard != nil {
+		in, out := &in.Hard, &out.Hard
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.Scopes != nil {
+		in, out := &in.Scopes, &out.Scopes
+		*out = make([]ResourceQuotaScope, len(*in))
+		copy(*out, *in)
+	}
+	if in.ScopeSelector != nil {
+		in, out := &in.ScopeSelector, &out.ScopeSelector
+		*out = new(ScopeSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaSpec.
+func (in *ResourceQuotaSpec) DeepCopy() *ResourceQuotaSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceQuotaSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceQuotaStatus) DeepCopyInto(out *ResourceQuotaStatus) {
+	*out = *in
+	if in.Hard != nil {
+		in, out := &in.Hard, &out.Hard
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.Used != nil {
+		in, out := &in.Used, &out.Used
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatus.
+func (in *ResourceQuotaStatus) DeepCopy() *ResourceQuotaStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceQuotaStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) {
+	*out = *in
+	if in.Limits != nil {
+		in, out := &in.Limits, &out.Limits
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.Requests != nil {
+		in, out := &in.Requests, &out.Requests
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.Claims != nil {
+		in, out := &in.Claims, &out.Claims
+		*out = make([]ResourceClaim, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements.
+func (in *ResourceRequirements) DeepCopy() *ResourceRequirements {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceRequirements)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceStatus) DeepCopyInto(out *ResourceStatus) {
+	*out = *in
+	if in.Resources != nil {
+		in, out := &in.Resources, &out.Resources
+		*out = make([]ResourceHealth, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceStatus.
+func (in *ResourceStatus) DeepCopy() *ResourceStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxOptions.
+func (in *SELinuxOptions) DeepCopy() *SELinuxOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(SELinuxOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScaleIOPersistentVolumeSource) DeepCopyInto(out *ScaleIOPersistentVolumeSource) {
+	*out = *in
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(SecretReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOPersistentVolumeSource.
+func (in *ScaleIOPersistentVolumeSource) DeepCopy() *ScaleIOPersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ScaleIOPersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) {
+	*out = *in
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(LocalObjectReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOVolumeSource.
+func (in *ScaleIOVolumeSource) DeepCopy() *ScaleIOVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(ScaleIOVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScopeSelector) DeepCopyInto(out *ScopeSelector) {
+	*out = *in
+	if in.MatchExpressions != nil {
+		in, out := &in.MatchExpressions, &out.MatchExpressions
+		*out = make([]ScopedResourceSelectorRequirement, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeSelector.
+func (in *ScopeSelector) DeepCopy() *ScopeSelector {
+	if in == nil {
+		return nil
+	}
+	out := new(ScopeSelector)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScopedResourceSelectorRequirement) DeepCopyInto(out *ScopedResourceSelectorRequirement) {
+	*out = *in
+	if in.Values != nil {
+		in, out := &in.Values, &out.Values
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopedResourceSelectorRequirement.
+func (in *ScopedResourceSelectorRequirement) DeepCopy() *ScopedResourceSelectorRequirement {
+	if in == nil {
+		return nil
+	}
+	out := new(ScopedResourceSelectorRequirement)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SeccompProfile) DeepCopyInto(out *SeccompProfile) {
+	*out = *in
+	if in.LocalhostProfile != nil {
+		in, out := &in.LocalhostProfile, &out.LocalhostProfile
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeccompProfile.
+func (in *SeccompProfile) DeepCopy() *SeccompProfile {
+	if in == nil {
+		return nil
+	}
+	out := new(SeccompProfile)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Secret) DeepCopyInto(out *Secret) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Immutable != nil {
+		in, out := &in.Immutable, &out.Immutable
+		*out = new(bool)
+		**out = **in
+	}
+	if in.Data != nil {
+		in, out := &in.Data, &out.Data
+		*out = make(map[string][]byte, len(*in))
+		for key, val := range *in {
+			var outVal []byte
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				in, out := &val, &outVal
+				*out = make([]byte, len(*in))
+				copy(*out, *in)
+			}
+			(*out)[key] = outVal
+		}
+	}
+	if in.StringData != nil {
+		in, out := &in.StringData, &out.StringData
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret.
+func (in *Secret) DeepCopy() *Secret {
+	if in == nil {
+		return nil
+	}
+	out := new(Secret)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Secret) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretEnvSource) DeepCopyInto(out *SecretEnvSource) {
+	*out = *in
+	out.LocalObjectReference = in.LocalObjectReference
+	if in.Optional != nil {
+		in, out := &in.Optional, &out.Optional
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEnvSource.
+func (in *SecretEnvSource) DeepCopy() *SecretEnvSource {
+	if in == nil {
+		return nil
+	}
+	out := new(SecretEnvSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) {
+	*out = *in
+	out.LocalObjectReference = in.LocalObjectReference
+	if in.Optional != nil {
+		in, out := &in.Optional, &out.Optional
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector.
+func (in *SecretKeySelector) DeepCopy() *SecretKeySelector {
+	if in == nil {
+		return nil
+	}
+	out := new(SecretKeySelector)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretList) DeepCopyInto(out *SecretList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Secret, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList.
+func (in *SecretList) DeepCopy() *SecretList {
+	if in == nil {
+		return nil
+	}
+	out := new(SecretList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SecretList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretProjection) DeepCopyInto(out *SecretProjection) {
+	*out = *in
+	out.LocalObjectReference = in.LocalObjectReference
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]KeyToPath, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Optional != nil {
+		in, out := &in.Optional, &out.Optional
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretProjection.
+func (in *SecretProjection) DeepCopy() *SecretProjection {
+	if in == nil {
+		return nil
+	}
+	out := new(SecretProjection)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretReference) DeepCopyInto(out *SecretReference) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
+func (in *SecretReference) DeepCopy() *SecretReference {
+	if in == nil {
+		return nil
+	}
+	out := new(SecretReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretVolumeSource) DeepCopyInto(out *SecretVolumeSource) {
+	*out = *in
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]KeyToPath, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.DefaultMode != nil {
+		in, out := &in.DefaultMode, &out.DefaultMode
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Optional != nil {
+		in, out := &in.Optional, &out.Optional
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVolumeSource.
+func (in *SecretVolumeSource) DeepCopy() *SecretVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(SecretVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecurityContext) DeepCopyInto(out *SecurityContext) {
+	*out = *in
+	if in.Capabilities != nil {
+		in, out := &in.Capabilities, &out.Capabilities
+		*out = new(Capabilities)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Privileged != nil {
+		in, out := &in.Privileged, &out.Privileged
+		*out = new(bool)
+		**out = **in
+	}
+	if in.SELinuxOptions != nil {
+		in, out := &in.SELinuxOptions, &out.SELinuxOptions
+		*out = new(SELinuxOptions)
+		**out = **in
+	}
+	if in.WindowsOptions != nil {
+		in, out := &in.WindowsOptions, &out.WindowsOptions
+		*out = new(WindowsSecurityContextOptions)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.RunAsUser != nil {
+		in, out := &in.RunAsUser, &out.RunAsUser
+		*out = new(int64)
+		**out = **in
+	}
+	if in.RunAsGroup != nil {
+		in, out := &in.RunAsGroup, &out.RunAsGroup
+		*out = new(int64)
+		**out = **in
+	}
+	if in.RunAsNonRoot != nil {
+		in, out := &in.RunAsNonRoot, &out.RunAsNonRoot
+		*out = new(bool)
+		**out = **in
+	}
+	if in.ReadOnlyRootFilesystem != nil {
+		in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem
+		*out = new(bool)
+		**out = **in
+	}
+	if in.AllowPrivilegeEscalation != nil {
+		in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation
+		*out = new(bool)
+		**out = **in
+	}
+	if in.ProcMount != nil {
+		in, out := &in.ProcMount, &out.ProcMount
+		*out = new(ProcMountType)
+		**out = **in
+	}
+	if in.SeccompProfile != nil {
+		in, out := &in.SeccompProfile, &out.SeccompProfile
+		*out = new(SeccompProfile)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.AppArmorProfile != nil {
+		in, out := &in.AppArmorProfile, &out.AppArmorProfile
+		*out = new(AppArmorProfile)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContext.
+func (in *SecurityContext) DeepCopy() *SecurityContext {
+	if in == nil {
+		return nil
+	}
+	out := new(SecurityContext)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SerializedReference) DeepCopyInto(out *SerializedReference) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	out.Reference = in.Reference
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializedReference.
+func (in *SerializedReference) DeepCopy() *SerializedReference {
+	if in == nil {
+		return nil
+	}
+	out := new(SerializedReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SerializedReference) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Service) DeepCopyInto(out *Service) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service.
+func (in *Service) DeepCopy() *Service {
+	if in == nil {
+		return nil
+	}
+	out := new(Service)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Service) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceAccount) DeepCopyInto(out *ServiceAccount) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Secrets != nil {
+		in, out := &in.Secrets, &out.Secrets
+		*out = make([]ObjectReference, len(*in))
+		copy(*out, *in)
+	}
+	if in.ImagePullSecrets != nil {
+		in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
+		*out = make([]LocalObjectReference, len(*in))
+		copy(*out, *in)
+	}
+	if in.AutomountServiceAccountToken != nil {
+		in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccount.
+func (in *ServiceAccount) DeepCopy() *ServiceAccount {
+	if in == nil {
+		return nil
+	}
+	out := new(ServiceAccount)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ServiceAccount) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ServiceAccount, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountList.
+func (in *ServiceAccountList) DeepCopy() *ServiceAccountList {
+	if in == nil {
+		return nil
+	}
+	out := new(ServiceAccountList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ServiceAccountList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceAccountTokenProjection) DeepCopyInto(out *ServiceAccountTokenProjection) {
+	*out = *in
+	if in.ExpirationSeconds != nil {
+		in, out := &in.ExpirationSeconds, &out.ExpirationSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountTokenProjection.
+func (in *ServiceAccountTokenProjection) DeepCopy() *ServiceAccountTokenProjection {
+	if in == nil {
+		return nil
+	}
+	out := new(ServiceAccountTokenProjection)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceList) DeepCopyInto(out *ServiceList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Service, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList.
+func (in *ServiceList) DeepCopy() *ServiceList {
+	if in == nil {
+		return nil
+	}
+	out := new(ServiceList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ServiceList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServicePort) DeepCopyInto(out *ServicePort) {
+	*out = *in
+	if in.AppProtocol != nil {
+		in, out := &in.AppProtocol, &out.AppProtocol
+		*out = new(string)
+		**out = **in
+	}
+	out.TargetPort = in.TargetPort
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePort.
+func (in *ServicePort) DeepCopy() *ServicePort {
+	if in == nil {
+		return nil
+	}
+	out := new(ServicePort)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceProxyOptions) DeepCopyInto(out *ServiceProxyOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceProxyOptions.
+func (in *ServiceProxyOptions) DeepCopy() *ServiceProxyOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(ServiceProxyOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ServiceProxyOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
+	*out = *in
+	if in.Ports != nil {
+		in, out := &in.Ports, &out.Ports
+		*out = make([]ServicePort, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.ClusterIPs != nil {
+		in, out := &in.ClusterIPs, &out.ClusterIPs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ExternalIPs != nil {
+		in, out := &in.ExternalIPs, &out.ExternalIPs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.LoadBalancerSourceRanges != nil {
+		in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.SessionAffinityConfig != nil {
+		in, out := &in.SessionAffinityConfig, &out.SessionAffinityConfig
+		*out = new(SessionAffinityConfig)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.IPFamilies != nil {
+		in, out := &in.IPFamilies, &out.IPFamilies
+		*out = make([]IPFamily, len(*in))
+		copy(*out, *in)
+	}
+	if in.IPFamilyPolicy != nil {
+		in, out := &in.IPFamilyPolicy, &out.IPFamilyPolicy
+		*out = new(IPFamilyPolicy)
+		**out = **in
+	}
+	if in.AllocateLoadBalancerNodePorts != nil {
+		in, out := &in.AllocateLoadBalancerNodePorts, &out.AllocateLoadBalancerNodePorts
+		*out = new(bool)
+		**out = **in
+	}
+	if in.LoadBalancerClass != nil {
+		in, out := &in.LoadBalancerClass, &out.LoadBalancerClass
+		*out = new(string)
+		**out = **in
+	}
+	if in.InternalTrafficPolicy != nil {
+		in, out := &in.InternalTrafficPolicy, &out.InternalTrafficPolicy
+		*out = new(ServiceInternalTrafficPolicy)
+		**out = **in
+	}
+	if in.TrafficDistribution != nil {
+		in, out := &in.TrafficDistribution, &out.TrafficDistribution
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec.
+func (in *ServiceSpec) DeepCopy() *ServiceSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ServiceSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) {
+	*out = *in
+	in.LoadBalancer.DeepCopyInto(&out.LoadBalancer)
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]metav1.Condition, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus.
+func (in *ServiceStatus) DeepCopy() *ServiceStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ServiceStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SessionAffinityConfig) DeepCopyInto(out *SessionAffinityConfig) {
+	*out = *in
+	if in.ClientIP != nil {
+		in, out := &in.ClientIP, &out.ClientIP
+		*out = new(ClientIPConfig)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionAffinityConfig.
+func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(SessionAffinityConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SleepAction) DeepCopyInto(out *SleepAction) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SleepAction.
+func (in *SleepAction) DeepCopy() *SleepAction {
+	if in == nil {
+		return nil
+	}
+	out := new(SleepAction)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) {
+	*out = *in
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(ObjectReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSPersistentVolumeSource.
+func (in *StorageOSPersistentVolumeSource) DeepCopy() *StorageOSPersistentVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(StorageOSPersistentVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StorageOSVolumeSource) DeepCopyInto(out *StorageOSVolumeSource) {
+	*out = *in
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(LocalObjectReference)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSVolumeSource.
+func (in *StorageOSVolumeSource) DeepCopy() *StorageOSVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(StorageOSVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Sysctl) DeepCopyInto(out *Sysctl) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sysctl.
+func (in *Sysctl) DeepCopy() *Sysctl {
+	if in == nil {
+		return nil
+	}
+	out := new(Sysctl)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TCPSocketAction) DeepCopyInto(out *TCPSocketAction) {
+	*out = *in
+	out.Port = in.Port
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPSocketAction.
+func (in *TCPSocketAction) DeepCopy() *TCPSocketAction {
+	if in == nil {
+		return nil
+	}
+	out := new(TCPSocketAction)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Taint) DeepCopyInto(out *Taint) {
+	*out = *in
+	if in.TimeAdded != nil {
+		in, out := &in.TimeAdded, &out.TimeAdded
+		*out = (*in).DeepCopy()
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint.
+func (in *Taint) DeepCopy() *Taint {
+	if in == nil {
+		return nil
+	}
+	out := new(Taint)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Toleration) DeepCopyInto(out *Toleration) {
+	*out = *in
+	if in.TolerationSeconds != nil {
+		in, out := &in.TolerationSeconds, &out.TolerationSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration.
+func (in *Toleration) DeepCopy() *Toleration {
+	if in == nil {
+		return nil
+	}
+	out := new(Toleration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TopologySelectorLabelRequirement) DeepCopyInto(out *TopologySelectorLabelRequirement) {
+	*out = *in
+	if in.Values != nil {
+		in, out := &in.Values, &out.Values
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorLabelRequirement.
+func (in *TopologySelectorLabelRequirement) DeepCopy() *TopologySelectorLabelRequirement {
+	if in == nil {
+		return nil
+	}
+	out := new(TopologySelectorLabelRequirement)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TopologySelectorTerm) DeepCopyInto(out *TopologySelectorTerm) {
+	*out = *in
+	if in.MatchLabelExpressions != nil {
+		in, out := &in.MatchLabelExpressions, &out.MatchLabelExpressions
+		*out = make([]TopologySelectorLabelRequirement, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorTerm.
+func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm {
+	if in == nil {
+		return nil
+	}
+	out := new(TopologySelectorTerm)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint) {
+	*out = *in
+	if in.LabelSelector != nil {
+		in, out := &in.LabelSelector, &out.LabelSelector
+		*out = new(metav1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.MinDomains != nil {
+		in, out := &in.MinDomains, &out.MinDomains
+		*out = new(int32)
+		**out = **in
+	}
+	if in.NodeAffinityPolicy != nil {
+		in, out := &in.NodeAffinityPolicy, &out.NodeAffinityPolicy
+		*out = new(NodeInclusionPolicy)
+		**out = **in
+	}
+	if in.NodeTaintsPolicy != nil {
+		in, out := &in.NodeTaintsPolicy, &out.NodeTaintsPolicy
+		*out = new(NodeInclusionPolicy)
+		**out = **in
+	}
+	if in.MatchLabelKeys != nil {
+		in, out := &in.MatchLabelKeys, &out.MatchLabelKeys
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySpreadConstraint.
+func (in *TopologySpreadConstraint) DeepCopy() *TopologySpreadConstraint {
+	if in == nil {
+		return nil
+	}
+	out := new(TopologySpreadConstraint)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) {
+	*out = *in
+	if in.APIGroup != nil {
+		in, out := &in.APIGroup, &out.APIGroup
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedLocalObjectReference.
+func (in *TypedLocalObjectReference) DeepCopy() *TypedLocalObjectReference {
+	if in == nil {
+		return nil
+	}
+	out := new(TypedLocalObjectReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TypedObjectReference) DeepCopyInto(out *TypedObjectReference) {
+	*out = *in
+	if in.APIGroup != nil {
+		in, out := &in.APIGroup, &out.APIGroup
+		*out = new(string)
+		**out = **in
+	}
+	if in.Namespace != nil {
+		in, out := &in.Namespace, &out.Namespace
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedObjectReference.
+func (in *TypedObjectReference) DeepCopy() *TypedObjectReference {
+	if in == nil {
+		return nil
+	}
+	out := new(TypedObjectReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Volume) DeepCopyInto(out *Volume) {
+	*out = *in
+	in.VolumeSource.DeepCopyInto(&out.VolumeSource)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume.
+func (in *Volume) DeepCopy() *Volume {
+	if in == nil {
+		return nil
+	}
+	out := new(Volume)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeDevice) DeepCopyInto(out *VolumeDevice) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeDevice.
+func (in *VolumeDevice) DeepCopy() *VolumeDevice {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeDevice)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeMount) DeepCopyInto(out *VolumeMount) {
+	*out = *in
+	if in.RecursiveReadOnly != nil {
+		in, out := &in.RecursiveReadOnly, &out.RecursiveReadOnly
+		*out = new(RecursiveReadOnlyMode)
+		**out = **in
+	}
+	if in.MountPropagation != nil {
+		in, out := &in.MountPropagation, &out.MountPropagation
+		*out = new(MountPropagationMode)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMount.
+func (in *VolumeMount) DeepCopy() *VolumeMount {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeMount)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeMountStatus) DeepCopyInto(out *VolumeMountStatus) {
+	*out = *in
+	if in.RecursiveReadOnly != nil {
+		in, out := &in.RecursiveReadOnly, &out.RecursiveReadOnly
+		*out = new(RecursiveReadOnlyMode)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMountStatus.
+func (in *VolumeMountStatus) DeepCopy() *VolumeMountStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeMountStatus)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeNodeAffinity) DeepCopyInto(out *VolumeNodeAffinity) {
+	*out = *in
+	if in.Required != nil {
+		in, out := &in.Required, &out.Required
+		*out = new(NodeSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeAffinity.
+func (in *VolumeNodeAffinity) DeepCopy() *VolumeNodeAffinity {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeNodeAffinity)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) {
+	*out = *in
+	if in.Secret != nil {
+		in, out := &in.Secret, &out.Secret
+		*out = new(SecretProjection)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.DownwardAPI != nil {
+		in, out := &in.DownwardAPI, &out.DownwardAPI
+		*out = new(DownwardAPIProjection)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.ConfigMap != nil {
+		in, out := &in.ConfigMap, &out.ConfigMap
+		*out = new(ConfigMapProjection)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.ServiceAccountToken != nil {
+		in, out := &in.ServiceAccountToken, &out.ServiceAccountToken
+		*out = new(ServiceAccountTokenProjection)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.ClusterTrustBundle != nil {
+		in, out := &in.ClusterTrustBundle, &out.ClusterTrustBundle
+		*out = new(ClusterTrustBundleProjection)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeProjection.
+func (in *VolumeProjection) DeepCopy() *VolumeProjection {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeProjection)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeResourceRequirements) DeepCopyInto(out *VolumeResourceRequirements) {
+	*out = *in
+	if in.Limits != nil {
+		in, out := &in.Limits, &out.Limits
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	if in.Requests != nil {
+		in, out := &in.Requests, &out.Requests
+		*out = make(ResourceList, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val.DeepCopy()
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeResourceRequirements.
+func (in *VolumeResourceRequirements) DeepCopy() *VolumeResourceRequirements {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeResourceRequirements)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VolumeSource) DeepCopyInto(out *VolumeSource) {
+	*out = *in
+	if in.HostPath != nil {
+		in, out := &in.HostPath, &out.HostPath
+		*out = new(HostPathVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.EmptyDir != nil {
+		in, out := &in.EmptyDir, &out.EmptyDir
+		*out = new(EmptyDirVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.GCEPersistentDisk != nil {
+		in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk
+		*out = new(GCEPersistentDiskVolumeSource)
+		**out = **in
+	}
+	if in.AWSElasticBlockStore != nil {
+		in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore
+		*out = new(AWSElasticBlockStoreVolumeSource)
+		**out = **in
+	}
+	if in.GitRepo != nil {
+		in, out := &in.GitRepo, &out.GitRepo
+		*out = new(GitRepoVolumeSource)
+		**out = **in
+	}
+	if in.Secret != nil {
+		in, out := &in.Secret, &out.Secret
+		*out = new(SecretVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.NFS != nil {
+		in, out := &in.NFS, &out.NFS
+		*out = new(NFSVolumeSource)
+		**out = **in
+	}
+	if in.ISCSI != nil {
+		in, out := &in.ISCSI, &out.ISCSI
+		*out = new(ISCSIVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Glusterfs != nil {
+		in, out := &in.Glusterfs, &out.Glusterfs
+		*out = new(GlusterfsVolumeSource)
+		**out = **in
+	}
+	if in.PersistentVolumeClaim != nil {
+		in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim
+		*out = new(PersistentVolumeClaimVolumeSource)
+		**out = **in
+	}
+	if in.RBD != nil {
+		in, out := &in.RBD, &out.RBD
+		*out = new(RBDVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.FlexVolume != nil {
+		in, out := &in.FlexVolume, &out.FlexVolume
+		*out = new(FlexVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Cinder != nil {
+		in, out := &in.Cinder, &out.Cinder
+		*out = new(CinderVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.CephFS != nil {
+		in, out := &in.CephFS, &out.CephFS
+		*out = new(CephFSVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Flocker != nil {
+		in, out := &in.Flocker, &out.Flocker
+		*out = new(FlockerVolumeSource)
+		**out = **in
+	}
+	if in.DownwardAPI != nil {
+		in, out := &in.DownwardAPI, &out.DownwardAPI
+		*out = new(DownwardAPIVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.FC != nil {
+		in, out := &in.FC, &out.FC
+		*out = new(FCVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.AzureFile != nil {
+		in, out := &in.AzureFile, &out.AzureFile
+		*out = new(AzureFileVolumeSource)
+		**out = **in
+	}
+	if in.ConfigMap != nil {
+		in, out := &in.ConfigMap, &out.ConfigMap
+		*out = new(ConfigMapVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.VsphereVolume != nil {
+		in, out := &in.VsphereVolume, &out.VsphereVolume
+		*out = new(VsphereVirtualDiskVolumeSource)
+		**out = **in
+	}
+	if in.Quobyte != nil {
+		in, out := &in.Quobyte, &out.Quobyte
+		*out = new(QuobyteVolumeSource)
+		**out = **in
+	}
+	if in.AzureDisk != nil {
+		in, out := &in.AzureDisk, &out.AzureDisk
+		*out = new(AzureDiskVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.PhotonPersistentDisk != nil {
+		in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk
+		*out = new(PhotonPersistentDiskVolumeSource)
+		**out = **in
+	}
+	if in.Projected != nil {
+		in, out := &in.Projected, &out.Projected
+		*out = new(ProjectedVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.PortworxVolume != nil {
+		in, out := &in.PortworxVolume, &out.PortworxVolume
+		*out = new(PortworxVolumeSource)
+		**out = **in
+	}
+	if in.ScaleIO != nil {
+		in, out := &in.ScaleIO, &out.ScaleIO
+		*out = new(ScaleIOVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.StorageOS != nil {
+		in, out := &in.StorageOS, &out.StorageOS
+		*out = new(StorageOSVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.CSI != nil {
+		in, out := &in.CSI, &out.CSI
+		*out = new(CSIVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Ephemeral != nil {
+		in, out := &in.Ephemeral, &out.Ephemeral
+		*out = new(EphemeralVolumeSource)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Image != nil {
+		in, out := &in.Image, &out.Image
+		*out = new(ImageVolumeSource)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSource.
+func (in *VolumeSource) DeepCopy() *VolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(VolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VsphereVirtualDiskVolumeSource) DeepCopyInto(out *VsphereVirtualDiskVolumeSource) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereVirtualDiskVolumeSource.
+func (in *VsphereVirtualDiskVolumeSource) DeepCopy() *VsphereVirtualDiskVolumeSource {
+	if in == nil {
+		return nil
+	}
+	out := new(VsphereVirtualDiskVolumeSource)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WeightedPodAffinityTerm) DeepCopyInto(out *WeightedPodAffinityTerm) {
+	*out = *in
+	in.PodAffinityTerm.DeepCopyInto(&out.PodAffinityTerm)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedPodAffinityTerm.
+func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm {
+	if in == nil {
+		return nil
+	}
+	out := new(WeightedPodAffinityTerm)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WindowsSecurityContextOptions) DeepCopyInto(out *WindowsSecurityContextOptions) {
+	*out = *in
+	if in.GMSACredentialSpecName != nil {
+		in, out := &in.GMSACredentialSpecName, &out.GMSACredentialSpecName
+		*out = new(string)
+		**out = **in
+	}
+	if in.GMSACredentialSpec != nil {
+		in, out := &in.GMSACredentialSpec, &out.GMSACredentialSpec
+		*out = new(string)
+		**out = **in
+	}
+	if in.RunAsUserName != nil {
+		in, out := &in.RunAsUserName, &out.RunAsUserName
+		*out = new(string)
+		**out = **in
+	}
+	if in.HostProcess != nil {
+		in, out := &in.HostProcess, &out.HostProcess
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsSecurityContextOptions.
+func (in *WindowsSecurityContextOptions) DeepCopy() *WindowsSecurityContextOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(WindowsSecurityContextOptions)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/hack/tools/vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go b/hack/tools/vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go
new file mode 100644
index 0000000000..6710a96d1c
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go
@@ -0,0 +1,274 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
+
+package v1
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *Binding) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ConfigMap) APILifecycleIntroduced() (major, minor int) {
+	return 1, 2
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ConfigMapList) APILifecycleIntroduced() (major, minor int) {
+	return 1, 2
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *Endpoints) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *EndpointsList) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *Event) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *EventList) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *LimitRange) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *LimitRangeList) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *List) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *Namespace) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *NamespaceList) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *Node) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *NodeList) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *NodeProxyOptions) APILifecycleIntroduced() (major, minor int) {
+	return 1, 2
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *PersistentVolume) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *PersistentVolumeClaim) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *PersistentVolumeClaimList) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *PersistentVolumeList) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *Pod) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *PodAttachOptions) APILifecycleIntroduced() (major, minor int) {
+	return 1, 1
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *PodExecOptions) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *PodList) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *PodLogOptions) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *PodPortForwardOptions) APILifecycleIntroduced() (major, minor int) {
+	return 1, 6
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *PodProxyOptions) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *PodStatusResult) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *PodTemplate) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *PodTemplateList) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *RangeAllocation) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ReplicationController) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ReplicationControllerList) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ResourceQuota) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ResourceQuotaList) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *Secret) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *SecretList) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *SerializedReference) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *Service) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ServiceAccount) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ServiceAccountList) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ServiceList) APILifecycleIntroduced() (major, minor int) {
+	return 1, 0
+}
+
+// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
+// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
+func (in *ServiceProxyOptions) APILifecycleIntroduced() (major, minor int) {
+	return 1, 2
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apihelpers/helpers.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apihelpers/helpers.go
new file mode 100644
index 0000000000..6ead5b1113
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apihelpers/helpers.go
@@ -0,0 +1,260 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apihelpers
+
+import (
+	"fmt"
+	"net/url"
+	"strings"
+	"time"
+
+	apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// IsProtectedCommunityGroup returns whether or not a group specified for a CRD is protected for the community and needs
+// to have the v1beta1.KubeAPIApprovalAnnotation set.
+func IsProtectedCommunityGroup(group string) bool {
+	switch {
+	case group == "k8s.io" || strings.HasSuffix(group, ".k8s.io"):
+		return true
+	case group == "kubernetes.io" || strings.HasSuffix(group, ".kubernetes.io"):
+		return true
+	default:
+		return false
+	}
+
+}
+
+// APIApprovalState covers the various options for API approval annotation states
+type APIApprovalState int
+
+const (
+	// APIApprovalInvalid means the annotation doesn't have an expected value
+	APIApprovalInvalid APIApprovalState = iota
+	// APIApproved if the annotation has a URL (this means the API is approved)
+	APIApproved
+	// APIApprovalBypassed if the annotation starts with "unapproved" indicating that for whatever reason the API isn't approved, but we should allow its creation
+	APIApprovalBypassed
+	// APIApprovalMissing means the annotation is empty
+	APIApprovalMissing
+)
+
+// GetAPIApprovalState returns the state of the API approval and reason for that state
+func GetAPIApprovalState(annotations map[string]string) (state APIApprovalState, reason string) {
+	annotation := annotations[apiextensionsv1.KubeAPIApprovedAnnotation]
+
+	// we use the result of this parsing in the switch/case below
+	url, annotationURLParseErr := url.ParseRequestURI(annotation)
+	switch {
+	case len(annotation) == 0:
+		return APIApprovalMissing, fmt.Sprintf("protected groups must have approval annotation %q, see https://github.com/kubernetes/enhancements/pull/1111", apiextensionsv1.KubeAPIApprovedAnnotation)
+	case strings.HasPrefix(annotation, "unapproved"):
+		return APIApprovalBypassed, fmt.Sprintf("not approved: %q", annotation)
+	case annotationURLParseErr == nil && url != nil && len(url.Host) > 0 && len(url.Scheme) > 0:
+		return APIApproved, fmt.Sprintf("approved in %v", annotation)
+	default:
+		return APIApprovalInvalid, fmt.Sprintf("protected groups must have approval annotation %q with either a URL or a reason starting with \"unapproved\", see https://github.com/kubernetes/enhancements/pull/1111", apiextensionsv1.KubeAPIApprovedAnnotation)
+	}
+}
+
+// SetCRDCondition sets the status condition. It either overwrites the existing one or creates a new one.
+func SetCRDCondition(crd *apiextensionsv1.CustomResourceDefinition, newCondition apiextensionsv1.CustomResourceDefinitionCondition) {
+	newCondition.LastTransitionTime = metav1.NewTime(time.Now())
+
+	existingCondition := FindCRDCondition(crd, newCondition.Type)
+	if existingCondition == nil {
+		crd.Status.Conditions = append(crd.Status.Conditions, newCondition)
+		return
+	}
+
+	if existingCondition.Status != newCondition.Status || existingCondition.LastTransitionTime.IsZero() {
+		existingCondition.LastTransitionTime = newCondition.LastTransitionTime
+	}
+
+	existingCondition.Status = newCondition.Status
+	existingCondition.Reason = newCondition.Reason
+	existingCondition.Message = newCondition.Message
+}
+
+// RemoveCRDCondition removes the status condition.
+func RemoveCRDCondition(crd *apiextensionsv1.CustomResourceDefinition, conditionType apiextensionsv1.CustomResourceDefinitionConditionType) {
+	newConditions := []apiextensionsv1.CustomResourceDefinitionCondition{}
+	for _, condition := range crd.Status.Conditions {
+		if condition.Type != conditionType {
+			newConditions = append(newConditions, condition)
+		}
+	}
+	crd.Status.Conditions = newConditions
+}
+
+// FindCRDCondition returns the condition you're looking for or nil.
+func FindCRDCondition(crd *apiextensionsv1.CustomResourceDefinition, conditionType apiextensionsv1.CustomResourceDefinitionConditionType) *apiextensionsv1.CustomResourceDefinitionCondition {
+	for i := range crd.Status.Conditions {
+		if crd.Status.Conditions[i].Type == conditionType {
+			return &crd.Status.Conditions[i]
+		}
+	}
+
+	return nil
+}
+
+// IsCRDConditionTrue indicates if the condition is present and strictly true.
+func IsCRDConditionTrue(crd *apiextensionsv1.CustomResourceDefinition, conditionType apiextensionsv1.CustomResourceDefinitionConditionType) bool {
+	return IsCRDConditionPresentAndEqual(crd, conditionType, apiextensionsv1.ConditionTrue)
+}
+
+// IsCRDConditionFalse indicates if the condition is present and false.
+func IsCRDConditionFalse(crd *apiextensionsv1.CustomResourceDefinition, conditionType apiextensionsv1.CustomResourceDefinitionConditionType) bool {
+	return IsCRDConditionPresentAndEqual(crd, conditionType, apiextensionsv1.ConditionFalse)
+}
+
+// IsCRDConditionPresentAndEqual indicates if the condition is present and equal to the given status.
+func IsCRDConditionPresentAndEqual(crd *apiextensionsv1.CustomResourceDefinition, conditionType apiextensionsv1.CustomResourceDefinitionConditionType, status apiextensionsv1.ConditionStatus) bool {
+	for _, condition := range crd.Status.Conditions {
+		if condition.Type == conditionType {
+			return condition.Status == status
+		}
+	}
+	return false
+}
+
+// IsCRDConditionEquivalent returns true if the lhs and rhs are equivalent except for times.
+func IsCRDConditionEquivalent(lhs, rhs *apiextensionsv1.CustomResourceDefinitionCondition) bool {
+	if lhs == nil && rhs == nil {
+		return true
+	}
+	if lhs == nil || rhs == nil {
+		return false
+	}
+
+	return lhs.Message == rhs.Message && lhs.Reason == rhs.Reason && lhs.Status == rhs.Status && lhs.Type == rhs.Type
+}
+
+// CRDHasFinalizer returns true if the finalizer is in the list.
+func CRDHasFinalizer(crd *apiextensionsv1.CustomResourceDefinition, needle string) bool {
+	for _, finalizer := range crd.Finalizers {
+		if finalizer == needle {
+			return true
+		}
+	}
+
+	return false
+}
+
+// CRDRemoveFinalizer removes the finalizer if present.
+func CRDRemoveFinalizer(crd *apiextensionsv1.CustomResourceDefinition, needle string) {
+	newFinalizers := []string{}
+	for _, finalizer := range crd.Finalizers {
+		if finalizer != needle {
+			newFinalizers = append(newFinalizers, finalizer)
+		}
+	}
+	crd.Finalizers = newFinalizers
+}
+
+// HasServedCRDVersion returns true if the given version is in the list of CRD's versions and the Served flag is set.
+func HasServedCRDVersion(crd *apiextensionsv1.CustomResourceDefinition, version string) bool {
+	for _, v := range crd.Spec.Versions {
+		if v.Name == version {
+			return v.Served
+		}
+	}
+	return false
+}
+
+// GetCRDStorageVersion returns the storage version for given CRD.
+func GetCRDStorageVersion(crd *apiextensionsv1.CustomResourceDefinition) (string, error) {
+	for _, v := range crd.Spec.Versions {
+		if v.Storage {
+			return v.Name, nil
+		}
+	}
+	// This should not happened if crd is valid
+	return "", fmt.Errorf("invalid apiextensionsv1.CustomResourceDefinition, no storage version")
+}
+
+// IsStoredVersion returns whether the given version is the storage version of the CRD.
+func IsStoredVersion(crd *apiextensionsv1.CustomResourceDefinition, version string) bool {
+	for _, v := range crd.Status.StoredVersions {
+		if version == v {
+			return true
+		}
+	}
+	return false
+}
+
+// GetSchemaForVersion returns the validation schema for the given version or nil.
+func GetSchemaForVersion(crd *apiextensionsv1.CustomResourceDefinition, version string) (*apiextensionsv1.CustomResourceValidation, error) {
+	for _, v := range crd.Spec.Versions {
+		if version == v.Name {
+			return v.Schema, nil
+		}
+	}
+	return nil, fmt.Errorf("version %s not found in apiextensionsv1.CustomResourceDefinition: %v", version, crd.Name)
+}
+
+// GetSubresourcesForVersion returns the subresources for given version or nil.
+func GetSubresourcesForVersion(crd *apiextensionsv1.CustomResourceDefinition, version string) (*apiextensionsv1.CustomResourceSubresources, error) {
+	for _, v := range crd.Spec.Versions {
+		if version == v.Name {
+			return v.Subresources, nil
+		}
+	}
+	return nil, fmt.Errorf("version %s not found in apiextensionsv1.CustomResourceDefinition: %v", version, crd.Name)
+}
+
+// HasPerVersionSchema returns true if a CRD uses per-version schema.
+func HasPerVersionSchema(versions []apiextensionsv1.CustomResourceDefinitionVersion) bool {
+	for _, v := range versions {
+		if v.Schema != nil {
+			return true
+		}
+	}
+	return false
+}
+
+// HasPerVersionSubresources returns true if a CRD uses per-version subresources.
+func HasPerVersionSubresources(versions []apiextensionsv1.CustomResourceDefinitionVersion) bool {
+	for _, v := range versions {
+		if v.Subresources != nil {
+			return true
+		}
+	}
+	return false
+}
+
+// HasPerVersionColumns returns true if a CRD uses per-version columns.
+func HasPerVersionColumns(versions []apiextensionsv1.CustomResourceDefinitionVersion) bool {
+	for _, v := range versions {
+		if len(v.AdditionalPrinterColumns) > 0 {
+			return true
+		}
+	}
+	return false
+}
+
+// HasVersionServed returns true if given CRD has given version served.
+func HasVersionServed(crd *apiextensionsv1.CustomResourceDefinition, version string) bool {
+	for _, v := range crd.Spec.Versions {
+		if !v.Served || v.Name != version {
+			continue
+		}
+		return true
+	}
+	return false
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go
index 321bec385c..6ade24a82f 100644
--- a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go
@@ -20,12 +20,42 @@ import (
 	"bytes"
 	"errors"
 
+	cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
 	"k8s.io/apimachinery/pkg/util/json"
 )
 
 var jsTrue = []byte("true")
 var jsFalse = []byte("false")
 
+// The CBOR parsing related constants and functions below are not exported so they can be
+// easily removed at a future date when the CBOR library provides equivalent functionality.
+
+type cborMajorType int
+
+const (
+	// https://www.rfc-editor.org/rfc/rfc8949.html#section-3.1
+	cborUnsignedInteger cborMajorType = 0
+	cborNegativeInteger cborMajorType = 1
+	cborByteString      cborMajorType = 2
+	cborTextString      cborMajorType = 3
+	cborArray           cborMajorType = 4
+	cborMap             cborMajorType = 5
+	cborTag             cborMajorType = 6
+	cborOther           cborMajorType = 7
+)
+
+const (
+	// from https://www.rfc-editor.org/rfc/rfc8949.html#name-jump-table-for-initial-byte.
+	// additionally, see https://www.rfc-editor.org/rfc/rfc8949.html#section-3.3-5.
+	cborFalseValue = 0xf4
+	cborTrueValue  = 0xf5
+	cborNullValue  = 0xf6
+)
+
+func cborType(b byte) cborMajorType {
+	return cborMajorType(b >> 5)
+}
+
 func (s JSONSchemaPropsOrBool) MarshalJSON() ([]byte, error) {
 	if s.Schema != nil {
 		return json.Marshal(s.Schema)
@@ -59,6 +89,39 @@ func (s *JSONSchemaPropsOrBool) UnmarshalJSON(data []byte) error {
 	return nil
 }
 
+func (s JSONSchemaPropsOrBool) MarshalCBOR() ([]byte, error) {
+	if s.Schema != nil {
+		return cbor.Marshal(s.Schema)
+	}
+	return cbor.Marshal(s.Allows)
+}
+
+func (s *JSONSchemaPropsOrBool) UnmarshalCBOR(data []byte) error {
+	switch {
+	case len(data) == 0:
+		// ideally we would avoid modifying *s here, but we are matching the behavior of UnmarshalJSON
+		*s = JSONSchemaPropsOrBool{}
+		return nil
+	case cborType(data[0]) == cborMap:
+		var p JSONSchemaProps
+		if err := cbor.Unmarshal(data, &p); err != nil {
+			return err
+		}
+		*s = JSONSchemaPropsOrBool{Allows: true, Schema: &p}
+		return nil
+	case data[0] == cborTrueValue:
+		*s = JSONSchemaPropsOrBool{Allows: true}
+		return nil
+	case data[0] == cborFalseValue:
+		*s = JSONSchemaPropsOrBool{Allows: false}
+		return nil
+	default:
+		// ideally, this case would not also capture a null input value,
+		// but we are matching the behavior of the UnmarshalJSON
+		return errors.New("boolean or JSON schema expected")
+	}
+}
+
 func (s JSONSchemaPropsOrStringArray) MarshalJSON() ([]byte, error) {
 	if len(s.Property) > 0 {
 		return json.Marshal(s.Property)
@@ -91,6 +154,40 @@ func (s *JSONSchemaPropsOrStringArray) UnmarshalJSON(data []byte) error {
 	return nil
 }
 
+func (s JSONSchemaPropsOrStringArray) MarshalCBOR() ([]byte, error) {
+	if len(s.Property) > 0 {
+		return cbor.Marshal(s.Property)
+	}
+	if s.Schema != nil {
+		return cbor.Marshal(s.Schema)
+	}
+	return cbor.Marshal(nil)
+}
+
+func (s *JSONSchemaPropsOrStringArray) UnmarshalCBOR(data []byte) error {
+	if len(data) > 0 && cborType(data[0]) == cborArray {
+		var a []string
+		if err := cbor.Unmarshal(data, &a); err != nil {
+			return err
+		}
+		*s = JSONSchemaPropsOrStringArray{Property: a}
+		return nil
+	}
+	if len(data) > 0 && cborType(data[0]) == cborMap {
+		var p JSONSchemaProps
+		if err := cbor.Unmarshal(data, &p); err != nil {
+			return err
+		}
+		*s = JSONSchemaPropsOrStringArray{Schema: &p}
+		return nil
+	}
+	// At this point we either have: empty data, a null value, or an
+	// unexpected type. In order to match the behavior of the existing
+	// UnmarshalJSON, no error is returned and *s is overwritten here.
+	*s = JSONSchemaPropsOrStringArray{}
+	return nil
+}
+
 func (s JSONSchemaPropsOrArray) MarshalJSON() ([]byte, error) {
 	if len(s.JSONSchemas) > 0 {
 		return json.Marshal(s.JSONSchemas)
@@ -120,6 +217,37 @@ func (s *JSONSchemaPropsOrArray) UnmarshalJSON(data []byte) error {
 	return nil
 }
 
+func (s JSONSchemaPropsOrArray) MarshalCBOR() ([]byte, error) {
+	if len(s.JSONSchemas) > 0 {
+		return cbor.Marshal(s.JSONSchemas)
+	}
+	return cbor.Marshal(s.Schema)
+}
+
+func (s *JSONSchemaPropsOrArray) UnmarshalCBOR(data []byte) error {
+	if len(data) > 0 && cborType(data[0]) == cborMap {
+		var p JSONSchemaProps
+		if err := cbor.Unmarshal(data, &p); err != nil {
+			return err
+		}
+		*s = JSONSchemaPropsOrArray{Schema: &p}
+		return nil
+	}
+	if len(data) > 0 && cborType(data[0]) == cborArray {
+		var a []JSONSchemaProps
+		if err := cbor.Unmarshal(data, &a); err != nil {
+			return err
+		}
+		*s = JSONSchemaPropsOrArray{JSONSchemas: a}
+		return nil
+	}
+	// At this point we either have: empty data, a null value, or an
+	// unexpected type. In order to match the behavior of the existing
+	// UnmarshalJSON, no error is returned and *s is overwritten here.
+	*s = JSONSchemaPropsOrArray{}
+	return nil
+}
+
 func (s JSON) MarshalJSON() ([]byte, error) {
 	if len(s.Raw) > 0 {
 		return s.Raw, nil
@@ -134,3 +262,34 @@ func (s *JSON) UnmarshalJSON(data []byte) error {
 	}
 	return nil
 }
+
+func (s JSON) MarshalCBOR() ([]byte, error) {
+	// Note that non-semantic whitespace is lost during the transcoding performed here.
+	// We do not forsee this to be a problem given the current known uses of this type.
+	// Other limitations that arise when roundtripping JSON via dynamic clients also apply
+	// here, for example: insignificant whitespace handling, number handling, and map key ordering.
+	if len(s.Raw) == 0 {
+		return []byte{cborNullValue}, nil
+	}
+	var u any
+	if err := json.Unmarshal(s.Raw, &u); err != nil {
+		return nil, err
+	}
+	return cbor.Marshal(u)
+}
+
+func (s *JSON) UnmarshalCBOR(data []byte) error {
+	if len(data) == 0 || data[0] == cborNullValue {
+		return nil
+	}
+	var u any
+	if err := cbor.Unmarshal(data, &u); err != nil {
+		return err
+	}
+	raw, err := json.Marshal(u)
+	if err != nil {
+		return err
+	}
+	s.Raw = raw
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go
index 43b9038787..5e6e825329 100644
--- a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go
@@ -20,12 +20,40 @@ import (
 	"bytes"
 	"errors"
 
+	cbor "k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct"
 	"k8s.io/apimachinery/pkg/util/json"
 )
 
 var jsTrue = []byte("true")
 var jsFalse = []byte("false")
 
+// The CBOR parsing related constants and functions below are not exported so they can be
+// easily removed at a future date when the CBOR library provides equivalent functionality.
+
+type cborMajorType int
+
+const (
+	// https://www.rfc-editor.org/rfc/rfc8949.html#section-3.1
+	cborUnsignedInteger cborMajorType = 0
+	cborNegativeInteger cborMajorType = 1
+	cborByteString      cborMajorType = 2
+	cborTextString      cborMajorType = 3
+	cborArray           cborMajorType = 4
+	cborMap             cborMajorType = 5
+	cborTag             cborMajorType = 6
+	cborOther           cborMajorType = 7
+)
+
+const (
+	cborFalseValue = 0xf4
+	cborTrueValue  = 0xf5
+	cborNullValue  = 0xf6
+)
+
+func cborType(b byte) cborMajorType {
+	return cborMajorType(b >> 5)
+}
+
 func (s JSONSchemaPropsOrBool) MarshalJSON() ([]byte, error) {
 	if s.Schema != nil {
 		return json.Marshal(s.Schema)
@@ -59,6 +87,39 @@ func (s *JSONSchemaPropsOrBool) UnmarshalJSON(data []byte) error {
 	return nil
 }
 
+func (s JSONSchemaPropsOrBool) MarshalCBOR() ([]byte, error) {
+	if s.Schema != nil {
+		return cbor.Marshal(s.Schema)
+	}
+	return cbor.Marshal(s.Allows)
+}
+
+func (s *JSONSchemaPropsOrBool) UnmarshalCBOR(data []byte) error {
+	switch {
+	case len(data) == 0:
+		// ideally we would avoid modifying *s here, but we are matching the behavior of UnmarshalJSON
+		*s = JSONSchemaPropsOrBool{}
+		return nil
+	case cborType(data[0]) == cborMap:
+		var p JSONSchemaProps
+		if err := cbor.Unmarshal(data, &p); err != nil {
+			return err
+		}
+		*s = JSONSchemaPropsOrBool{Allows: true, Schema: &p}
+		return nil
+	case data[0] == cborTrueValue:
+		*s = JSONSchemaPropsOrBool{Allows: true}
+		return nil
+	case data[0] == cborFalseValue:
+		*s = JSONSchemaPropsOrBool{Allows: false}
+		return nil
+	default:
+		// ideally, this case would not also capture a null input value,
+		// but we are matching the behavior of the UnmarshalJSON
+		return errors.New("boolean or JSON schema expected")
+	}
+}
+
 func (s JSONSchemaPropsOrStringArray) MarshalJSON() ([]byte, error) {
 	if len(s.Property) > 0 {
 		return json.Marshal(s.Property)
@@ -91,6 +152,40 @@ func (s *JSONSchemaPropsOrStringArray) UnmarshalJSON(data []byte) error {
 	return nil
 }
 
+func (s JSONSchemaPropsOrStringArray) MarshalCBOR() ([]byte, error) {
+	if len(s.Property) > 0 {
+		return cbor.Marshal(s.Property)
+	}
+	if s.Schema != nil {
+		return cbor.Marshal(s.Schema)
+	}
+	return cbor.Marshal(nil)
+}
+
+func (s *JSONSchemaPropsOrStringArray) UnmarshalCBOR(data []byte) error {
+	if len(data) > 0 && cborType(data[0]) == cborArray {
+		var a []string
+		if err := cbor.Unmarshal(data, &a); err != nil {
+			return err
+		}
+		*s = JSONSchemaPropsOrStringArray{Property: a}
+		return nil
+	}
+	if len(data) > 0 && cborType(data[0]) == cborMap {
+		var p JSONSchemaProps
+		if err := cbor.Unmarshal(data, &p); err != nil {
+			return err
+		}
+		*s = JSONSchemaPropsOrStringArray{Schema: &p}
+		return nil
+	}
+	// At this point we either have: empty data, a null value, or an
+	// unexpected type. In order to match the behavior of the existing
+	// UnmarshalJSON, no error is returned and *s is overwritten here.
+	*s = JSONSchemaPropsOrStringArray{}
+	return nil
+}
+
 func (s JSONSchemaPropsOrArray) MarshalJSON() ([]byte, error) {
 	if len(s.JSONSchemas) > 0 {
 		return json.Marshal(s.JSONSchemas)
@@ -120,6 +215,37 @@ func (s *JSONSchemaPropsOrArray) UnmarshalJSON(data []byte) error {
 	return nil
 }
 
+func (s JSONSchemaPropsOrArray) MarshalCBOR() ([]byte, error) {
+	if len(s.JSONSchemas) > 0 {
+		return cbor.Marshal(s.JSONSchemas)
+	}
+	return cbor.Marshal(s.Schema)
+}
+
+func (s *JSONSchemaPropsOrArray) UnmarshalCBOR(data []byte) error {
+	if len(data) > 0 && cborType(data[0]) == cborMap {
+		var p JSONSchemaProps
+		if err := cbor.Unmarshal(data, &p); err != nil {
+			return err
+		}
+		*s = JSONSchemaPropsOrArray{Schema: &p}
+		return nil
+	}
+	if len(data) > 0 && cborType(data[0]) == cborArray {
+		var a []JSONSchemaProps
+		if err := cbor.Unmarshal(data, &a); err != nil {
+			return err
+		}
+		*s = JSONSchemaPropsOrArray{JSONSchemas: a}
+		return nil
+	}
+	// At this point we either have: empty data, a null value, or an
+	// unexpected type. In order to match the behavior of the existing
+	// UnmarshalJSON, no error is returned and *s is overwritten here.
+	*s = JSONSchemaPropsOrArray{}
+	return nil
+}
+
 func (s JSON) MarshalJSON() ([]byte, error) {
 	if len(s.Raw) > 0 {
 		return s.Raw, nil
@@ -134,3 +260,34 @@ func (s *JSON) UnmarshalJSON(data []byte) error {
 	}
 	return nil
 }
+
+func (s JSON) MarshalCBOR() ([]byte, error) {
+	// Note that non-semantic whitespace is lost during the transcoding performed here.
+	// We do not forsee this to be a problem given the current known uses of this type.
+	// Other limitations that arise when roundtripping JSON via dynamic clients also apply
+	// here, for example: insignificant whitespace handling, number handling, and map key ordering.
+	if len(s.Raw) == 0 {
+		return []byte{cborNullValue}, nil
+	}
+	var u any
+	if err := json.Unmarshal(s.Raw, &u); err != nil {
+		return nil, err
+	}
+	return cbor.Marshal(u)
+}
+
+func (s *JSON) UnmarshalCBOR(data []byte) error {
+	if len(data) == 0 || data[0] == cborNullValue {
+		return nil
+	}
+	var u any
+	if err := cbor.Unmarshal(data, &u); err != nil {
+		return err
+	}
+	raw, err := json.Marshal(u)
+	if err != nil {
+		return err
+	}
+	s.Raw = raw
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/cel_validation.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/cel_validation.go
new file mode 100644
index 0000000000..a03615c28e
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/cel_validation.go
@@ -0,0 +1,336 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+	"fmt"
+	"math"
+	"sort"
+
+	"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
+	structuralschema "k8s.io/apiextensions-apiserver/pkg/apiserver/schema"
+	"k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model"
+	"k8s.io/apimachinery/pkg/util/validation/field"
+	"k8s.io/apiserver/pkg/cel"
+)
+
+// unbounded uses nil to represent an unbounded cardinality value.
+var unbounded *uint64 = nil
+
+// CELSchemaContext keeps track of data used by x-kubernetes-validations rules for a specific schema node.
+type CELSchemaContext struct {
+	// withinValidationRuleScope is true if the schema at the current level or above have x-kubernetes-validations rules. typeInfo
+	// should only be populated for schema nodes where this is true.
+	withinValidationRuleScope bool
+
+	// typeInfo is lazily loaded for schema nodes withinValidationRuleScope and may be
+	// populated one of two possible ways:
+	//   1. Using a typeInfoAccessor to access it from the parent's type info. This is a cheap operation and should be
+	//      used when a schema at a higher level already has type info.
+	//   2. Using a converter to construct type info from the jsonSchema. This is an expensive operation.
+	typeInfo *CELTypeInfo
+	// typeInfoErr is any cached error resulting from an attempt to lazily load typeInfo.
+	typeInfoErr error
+
+	// parent is the context of the parent schema node, or nil if this is the context for the root schema node.
+	parent *CELSchemaContext
+	// typeInfoAccessor provides a way to access the type info of this schema node from the parent CELSchemaContext.
+	// nil if not extraction is possible, or the parent is nil.
+	typeInfoAccessor typeInfoAccessor
+
+	// jsonSchema is the schema for this CELSchemaContext node. It must be non-nil.
+	jsonSchema *apiextensions.JSONSchemaProps
+	// converter converts a JSONSchemaProps to CELTypeInfo.
+	// Tests that check how many conversions are performed during CRD validation wrap DefaultConverter
+	// with a converter that counts how many conversion operations.
+	converter converter
+
+	// MaxCardinality represents a limit to the number of data elements that can exist for the current
+	// schema based on MaxProperties or MaxItems limits present on parent schemas, If all parent
+	// map and array schemas have MaxProperties or MaxItems limits declared MaxCardinality is
+	// an int pointer representing the product of these limits.  If least one parent map or list schema
+	// does not have a MaxProperties or MaxItems limits set, the MaxCardinality is nil, indicating
+	// that the parent schemas offer no bound to the number of times a data element for the current
+	// schema can exist.
+	MaxCardinality *uint64
+	// TotalCost accumulates the x-kubernetes-validators estimated rule cost total for an entire custom resource
+	// definition. A single TotalCost is allocated for each CustomResourceDefinition and passed through the stack as the
+	// CustomResourceDefinition's OpenAPIv3 schema is recursively validated.
+	TotalCost *TotalCost
+}
+
+// TypeInfo returns the CELTypeInfo for this CELSchemaContext node.  Returns nil, nil if this CELSchemaContext is nil,
+// or if current level or above does not have x-kubernetes-validations rules. The returned type info is shared and
+// should not be modified by the caller.
+func (c *CELSchemaContext) TypeInfo() (*CELTypeInfo, error) {
+	if c == nil || !c.withinValidationRuleScope {
+		return nil, nil
+	}
+	if c.typeInfo != nil || c.typeInfoErr != nil {
+		return c.typeInfo, c.typeInfoErr // return already computed result if available
+	}
+
+	// If able to get the type info from the parent's type info, prefer this approach
+	// since it is more efficient.
+	if c.parent != nil {
+		parentTypeInfo, parentErr := c.parent.TypeInfo()
+		if parentErr != nil {
+			c.typeInfoErr = parentErr
+			return nil, parentErr
+		}
+		if parentTypeInfo != nil && c.typeInfoAccessor != nil {
+			c.typeInfo = c.typeInfoAccessor.accessTypeInfo(parentTypeInfo)
+			if c.typeInfo != nil {
+				return c.typeInfo, nil
+			}
+		}
+	}
+	// If unable to get the type info from the parent, convert the jsonSchema to type info.
+	// This is expensive for large schemas.
+	c.typeInfo, c.typeInfoErr = c.converter(c.jsonSchema, c.parent == nil || c.jsonSchema.XEmbeddedResource)
+	return c.typeInfo, c.typeInfoErr
+}
+
+// CELTypeInfo represents all the typeInfo needed by CEL to compile x-kubernetes-validations rules for a schema node.
+type CELTypeInfo struct {
+	// Schema is a structural schema for this CELSchemaContext node. It must be non-nil.
+	Schema *structuralschema.Structural
+	// DeclType is a CEL declaration representation of Schema of this CELSchemaContext node. It must be non-nil.
+	DeclType *cel.DeclType
+}
+
+// converter converts from JSON schema to a structural schema and a CEL declType, or returns an error if the conversion
+// fails. This should be defaultConverter except in tests where it is useful to wrap it with a converter that tracks
+// how many conversions have been performed.
+type converter func(schema *apiextensions.JSONSchemaProps, isRoot bool) (*CELTypeInfo, error)
+
+func defaultConverter(schema *apiextensions.JSONSchemaProps, isRoot bool) (*CELTypeInfo, error) {
+	structural, err := structuralschema.NewStructural(schema)
+	if err != nil {
+		return nil, err
+	}
+	declType := model.SchemaDeclType(structural, isRoot)
+	if declType == nil {
+		return nil, fmt.Errorf("unable to convert structural schema to CEL declarations")
+	}
+	return &CELTypeInfo{structural, declType}, nil
+}
+
+// RootCELContext constructs CELSchemaContext for the given root schema.
+func RootCELContext(schema *apiextensions.JSONSchemaProps) *CELSchemaContext {
+	rootCardinality := uint64(1)
+	r := &CELSchemaContext{
+		jsonSchema:                schema,
+		withinValidationRuleScope: len(schema.XValidations) > 0,
+		MaxCardinality:            &rootCardinality,
+		TotalCost:                 &TotalCost{},
+		converter:                 defaultConverter,
+	}
+	return r
+}
+
+// ChildPropertyContext returns nil, nil if this CELSchemaContext is nil, otherwise constructs and returns a
+// CELSchemaContext for propertyName.
+func (c *CELSchemaContext) ChildPropertyContext(propSchema *apiextensions.JSONSchemaProps, propertyName string) *CELSchemaContext {
+	if c == nil {
+		return nil
+	}
+	return c.childContext(propSchema, propertyTypeInfoAccessor{propertyName: propertyName})
+}
+
+// ChildAdditionalPropertiesContext returns nil, nil if this CELSchemaContext is nil, otherwise it constructs and returns
+// a CELSchemaContext for the properties of an object if this CELSchemaContext is an object.
+// schema must be non-nil and have a non-nil schema.AdditionalProperties.
+func (c *CELSchemaContext) ChildAdditionalPropertiesContext(propsSchema *apiextensions.JSONSchemaProps) *CELSchemaContext {
+	if c == nil {
+		return nil
+	}
+	return c.childContext(propsSchema, additionalItemsTypeInfoAccessor{})
+}
+
+// ChildItemsContext returns nil, nil if this CELSchemaContext is nil, otherwise it constructs and returns a CELSchemaContext
+// for the items of an array if this CELSchemaContext is an array.
+func (c *CELSchemaContext) ChildItemsContext(itemsSchema *apiextensions.JSONSchemaProps) *CELSchemaContext {
+	if c == nil {
+		return nil
+	}
+	return c.childContext(itemsSchema, itemsTypeInfoAccessor{})
+}
+
+// childContext returns nil, nil if this CELSchemaContext is nil, otherwise it constructs a new CELSchemaContext for the
+// given child schema of the current schema context.
+// accessor optionally provides a way to access CELTypeInfo of the child from the current schema context's CELTypeInfo.
+// childContext returns a CELSchemaContext where the MaxCardinality is multiplied by the
+// factor that the schema increases the cardinality of its children. If the CELSchemaContext's
+// MaxCardinality is unbounded (nil) or the factor that the schema increase the cardinality
+// is unbounded, the resulting CELSchemaContext's MaxCardinality is also unbounded.
+func (c *CELSchemaContext) childContext(child *apiextensions.JSONSchemaProps, accessor typeInfoAccessor) *CELSchemaContext {
+	result := &CELSchemaContext{
+		parent:                    c,
+		typeInfoAccessor:          accessor,
+		withinValidationRuleScope: c.withinValidationRuleScope,
+		TotalCost:                 c.TotalCost,
+		MaxCardinality:            unbounded,
+		converter:                 c.converter,
+	}
+	if child != nil {
+		result.jsonSchema = child
+		if len(child.XValidations) > 0 {
+			result.withinValidationRuleScope = true
+		}
+	}
+	if c.jsonSchema == nil {
+		// nil schemas can be passed since we call ChildSchemaContext
+		// before ValidateCustomResourceDefinitionOpenAPISchema performs its nil check
+		return result
+	}
+	if c.MaxCardinality == unbounded {
+		return result
+	}
+	maxElements := extractMaxElements(c.jsonSchema)
+	if maxElements == unbounded {
+		return result
+	}
+	result.MaxCardinality = uint64ptr(multiplyWithOverflowGuard(*c.MaxCardinality, *maxElements))
+	return result
+}
+
+type typeInfoAccessor interface {
+	// accessTypeInfo looks up type information for a child schema from a non-nil parentTypeInfo and returns it,
+	// or returns nil if the child schema information is not accessible. For example, a nil
+	// return value is expected when a property name is unescapable in CEL.
+	// The caller MUST ensure the provided parentTypeInfo is non-nil.
+	accessTypeInfo(parentTypeInfo *CELTypeInfo) *CELTypeInfo
+}
+
+type propertyTypeInfoAccessor struct {
+	// propertyName is the property name in the parent schema that this schema is declared at.
+	propertyName string
+}
+
+func (c propertyTypeInfoAccessor) accessTypeInfo(parentTypeInfo *CELTypeInfo) *CELTypeInfo {
+	if parentTypeInfo.Schema.Properties != nil {
+		propSchema := parentTypeInfo.Schema.Properties[c.propertyName]
+		if escapedPropName, ok := cel.Escape(c.propertyName); ok {
+			if fieldDeclType, ok := parentTypeInfo.DeclType.Fields[escapedPropName]; ok {
+				return &CELTypeInfo{Schema: &propSchema, DeclType: fieldDeclType.Type}
+			} // else fields with unknown types are omitted from CEL validation entirely
+		} // fields with unescapable names are expected to be absent
+	}
+	return nil
+}
+
+type itemsTypeInfoAccessor struct{}
+
+func (c itemsTypeInfoAccessor) accessTypeInfo(parentTypeInfo *CELTypeInfo) *CELTypeInfo {
+	if parentTypeInfo.Schema.Items != nil {
+		itemsSchema := parentTypeInfo.Schema.Items
+		itemsDeclType := parentTypeInfo.DeclType.ElemType
+		return &CELTypeInfo{Schema: itemsSchema, DeclType: itemsDeclType}
+	}
+	return nil
+}
+
+type additionalItemsTypeInfoAccessor struct{}
+
+func (c additionalItemsTypeInfoAccessor) accessTypeInfo(parentTypeInfo *CELTypeInfo) *CELTypeInfo {
+	if parentTypeInfo.Schema.AdditionalProperties != nil {
+		propsSchema := parentTypeInfo.Schema.AdditionalProperties.Structural
+		valuesDeclType := parentTypeInfo.DeclType.ElemType
+		return &CELTypeInfo{Schema: propsSchema, DeclType: valuesDeclType}
+	}
+	return nil
+}
+
+// TotalCost tracks the total cost of evaluating all the x-kubernetes-validations rules of a CustomResourceDefinition.
+type TotalCost struct {
+	// Total accumulates the x-kubernetes-validations estimated rule cost total.
+	Total uint64
+	// MostExpensive accumulates the top 4 most expensive rules contributing to the Total. Only rules
+	// that accumulate at least 1% of total cost limit are included.
+	MostExpensive []RuleCost
+}
+
+// ObserveExpressionCost accumulates the cost of evaluating a -kubernetes-validations rule.
+func (c *TotalCost) ObserveExpressionCost(path *field.Path, cost uint64) {
+	if math.MaxUint64-c.Total < cost {
+		c.Total = math.MaxUint64
+	} else {
+		c.Total += cost
+	}
+
+	if cost < StaticEstimatedCRDCostLimit/100 { // ignore rules that contribute < 1% of total cost limit
+		return
+	}
+	c.MostExpensive = append(c.MostExpensive, RuleCost{Path: path, Cost: cost})
+	sort.Slice(c.MostExpensive, func(i, j int) bool {
+		// sort in descending order so the most expensive rule is first
+		return c.MostExpensive[i].Cost > c.MostExpensive[j].Cost
+	})
+	if len(c.MostExpensive) > 4 {
+		c.MostExpensive = c.MostExpensive[:4]
+	}
+}
+
+// RuleCost represents the cost of evaluating a single x-kubernetes-validations rule.
+type RuleCost struct {
+	Path *field.Path
+	Cost uint64
+}
+
+// extractMaxElements returns the factor by which the schema increases the cardinality
+// (number of possible data elements) of its children.  If schema is a map and has
+// MaxProperties or an array has MaxItems, the int pointer of the max value is returned.
+// If schema is a map or array and does not have MaxProperties or MaxItems,
+// unbounded (nil) is returned to indicate that there is no limit to the possible
+// number of data elements imposed by the current schema.  If the schema is an object, 1 is
+// returned to indicate that there is no increase to the number of possible data elements
+// for its children.  Primitives do not have children, but 1 is returned for simplicity.
+func extractMaxElements(schema *apiextensions.JSONSchemaProps) *uint64 {
+	switch schema.Type {
+	case "object":
+		if schema.AdditionalProperties != nil {
+			if schema.MaxProperties != nil {
+				maxProps := uint64(zeroIfNegative(*schema.MaxProperties))
+				return &maxProps
+			}
+			return unbounded
+		}
+		// return 1 to indicate that all fields of an object exist at most one for
+		// each occurrence of the object they are fields of
+		return uint64ptr(1)
+	case "array":
+		if schema.MaxItems != nil {
+			maxItems := uint64(zeroIfNegative(*schema.MaxItems))
+			return &maxItems
+		}
+		return unbounded
+	default:
+		return uint64ptr(1)
+	}
+}
+
+func zeroIfNegative(v int64) int64 {
+	if v < 0 {
+		return 0
+	}
+	return v
+}
+
+func uint64ptr(i uint64) *uint64 {
+	return &i
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go
new file mode 100644
index 0000000000..fa3a1cbe91
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go
@@ -0,0 +1,1891 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+	"context"
+	"fmt"
+	"math"
+	"reflect"
+	"regexp"
+	"strings"
+	"sync"
+	"unicode"
+	"unicode/utf8"
+
+	celgo "github.com/google/cel-go/cel"
+	"k8s.io/apiextensions-apiserver/pkg/apihelpers"
+	"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
+	apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+	apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
+	structuralschema "k8s.io/apiextensions-apiserver/pkg/apiserver/schema"
+	"k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel"
+	structuraldefaulting "k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting"
+	apiservervalidation "k8s.io/apiextensions-apiserver/pkg/apiserver/validation"
+	apiequality "k8s.io/apimachinery/pkg/api/equality"
+	genericvalidation "k8s.io/apimachinery/pkg/api/validation"
+	"k8s.io/apimachinery/pkg/util/sets"
+	utilvalidation "k8s.io/apimachinery/pkg/util/validation"
+	"k8s.io/apimachinery/pkg/util/validation/field"
+	celconfig "k8s.io/apiserver/pkg/apis/cel"
+	apiservercel "k8s.io/apiserver/pkg/cel"
+	"k8s.io/apiserver/pkg/cel/environment"
+	"k8s.io/apiserver/pkg/util/webhook"
+)
+
+var (
+	printerColumnDatatypes                = sets.NewString("integer", "number", "string", "boolean", "date")
+	customResourceColumnDefinitionFormats = sets.NewString("int32", "int64", "float", "double", "byte", "date", "date-time", "password")
+	openapiV3Types                        = sets.NewString("string", "number", "integer", "boolean", "array", "object")
+)
+
+const (
+	// StaticEstimatedCostLimit represents the largest-allowed static CEL cost on a per-expression basis.
+	StaticEstimatedCostLimit = 10000000
+	// StaticEstimatedCRDCostLimit represents the largest-allowed total cost for the x-kubernetes-validations rules of a CRD.
+	StaticEstimatedCRDCostLimit = 100000000
+
+	MaxSelectableFields = 8
+)
+
+var supportedValidationReason = sets.NewString(
+	string(apiextensions.FieldValueRequired),
+	string(apiextensions.FieldValueForbidden),
+	string(apiextensions.FieldValueInvalid),
+	string(apiextensions.FieldValueDuplicate),
+)
+
+// ValidateCustomResourceDefinition statically validates
+// context is passed for supporting context cancellation during cel validation when validating defaults
+func ValidateCustomResourceDefinition(ctx context.Context, obj *apiextensions.CustomResourceDefinition) field.ErrorList {
+	nameValidationFn := func(name string, prefix bool) []string {
+		ret := genericvalidation.NameIsDNSSubdomain(name, prefix)
+		requiredName := obj.Spec.Names.Plural + "." + obj.Spec.Group
+		if name != requiredName {
+			ret = append(ret, fmt.Sprintf(`must be spec.names.plural+"."+spec.group`))
+		}
+		return ret
+	}
+
+	opts := validationOptions{
+		allowDefaults:                            true,
+		requireRecognizedConversionReviewVersion: true,
+		requireImmutableNames:                    false,
+		requireOpenAPISchema:                     true,
+		requireValidPropertyType:                 true,
+		requireStructuralSchema:                  true,
+		requirePrunedDefaults:                    true,
+		requireAtomicSetType:                     true,
+		requireMapListKeysMapSetValidation:       true,
+		// strictCost is always true to enforce cost limits.
+		celEnvironmentSet: environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true),
+		// allowInvalidCABundle is set to true since the CRD is not established yet.
+		allowInvalidCABundle: true,
+	}
+
+	allErrs := genericvalidation.ValidateObjectMeta(&obj.ObjectMeta, false, nameValidationFn, field.NewPath("metadata"))
+	allErrs = append(allErrs, validateCustomResourceDefinitionSpec(ctx, &obj.Spec, opts, field.NewPath("spec"))...)
+	allErrs = append(allErrs, ValidateCustomResourceDefinitionStatus(&obj.Status, field.NewPath("status"))...)
+	allErrs = append(allErrs, ValidateCustomResourceDefinitionStoredVersions(obj.Status.StoredVersions, obj.Spec.Versions, field.NewPath("status").Child("storedVersions"))...)
+	allErrs = append(allErrs, validateAPIApproval(obj, nil)...)
+	allErrs = append(allErrs, validatePreserveUnknownFields(obj, nil)...)
+	return allErrs
+}
+
+// validationOptions groups several validation options, to avoid passing multiple bool parameters to methods
+type validationOptions struct {
+	// allowDefaults permits the validation schema to contain default attributes
+	allowDefaults bool
+	// disallowDefaultsReason gives a reason as to why allowDefaults is false (for better user feedback)
+	disallowDefaultsReason string
+	// requireRecognizedConversionReviewVersion requires accepted webhook conversion versions to contain a recognized version
+	requireRecognizedConversionReviewVersion bool
+	// requireImmutableNames disables changing spec.names
+	requireImmutableNames bool
+	// requireOpenAPISchema requires an openapi V3 schema be specified
+	requireOpenAPISchema bool
+	// requireValidPropertyType requires property types specified in the validation schema to be valid openapi v3 types
+	requireValidPropertyType bool
+	// requireStructuralSchema indicates that any schemas present must be structural
+	requireStructuralSchema bool
+	// requirePrunedDefaults indicates that defaults must be pruned
+	requirePrunedDefaults bool
+	// requireAtomicSetType indicates that the items type for a x-kubernetes-list-type=set list must be atomic.
+	requireAtomicSetType bool
+	// requireMapListKeysMapSetValidation indicates that:
+	// 1. For x-kubernetes-list-type=map list, key fields are not nullable, and are required or have a default
+	// 2. For x-kubernetes-list-type=map or x-kubernetes-list-type=set list, the whole item must not be nullable.
+	requireMapListKeysMapSetValidation bool
+	// preexistingExpressions tracks which CEL expressions existed in an object before an update. May be nil for create.
+	preexistingExpressions preexistingExpressions
+	// versionsWithUnchangedSchemas tracks schemas of which versions are unchanged when updating a CRD.
+	// Does not apply to creation or deletion.
+	// Some checks use this to avoid rejecting previously accepted versions due to a control plane upgrade/downgrade.
+	versionsWithUnchangedSchemas sets.Set[string]
+	// suppressPerExpressionCost indicates whether CEL per-expression cost limit should be suppressed.
+	// It will be automatically set during Versions validation if the version is in versionsWithUnchangedSchemas.
+	suppressPerExpressionCost bool
+
+	celEnvironmentSet *environment.EnvSet
+	// allowInvalidCABundle allows an invalid conversion webhook CABundle on update only if the existing CABundle is invalid.
+	// An invalid CABundle is also permitted on create and before a CRD is in an Established=True condition.
+	allowInvalidCABundle bool
+}
+
+type preexistingExpressions struct {
+	rules              sets.Set[string]
+	messageExpressions sets.Set[string]
+}
+
+func (pe preexistingExpressions) RuleEnv(envSet *environment.EnvSet, expression string) *celgo.Env {
+	if pe.rules.Has(expression) {
+		return envSet.StoredExpressionsEnv()
+	}
+	return envSet.NewExpressionsEnv()
+}
+
+func (pe preexistingExpressions) MessageExpressionEnv(envSet *environment.EnvSet, expression string) *celgo.Env {
+	if pe.messageExpressions.Has(expression) {
+		return envSet.StoredExpressionsEnv()
+	}
+	return envSet.NewExpressionsEnv()
+}
+
+func findPreexistingExpressions(spec *apiextensions.CustomResourceDefinitionSpec) preexistingExpressions {
+	expressions := preexistingExpressions{rules: sets.New[string](), messageExpressions: sets.New[string]()}
+	if spec.Validation != nil && spec.Validation.OpenAPIV3Schema != nil {
+		findPreexistingExpressionsInSchema(spec.Validation.OpenAPIV3Schema, expressions)
+	}
+	for _, v := range spec.Versions {
+		if v.Schema != nil && v.Schema.OpenAPIV3Schema != nil {
+			findPreexistingExpressionsInSchema(v.Schema.OpenAPIV3Schema, expressions)
+		}
+	}
+	return expressions
+}
+
+func findPreexistingExpressionsInSchema(schema *apiextensions.JSONSchemaProps, expressions preexistingExpressions) {
+	SchemaHas(schema, func(s *apiextensions.JSONSchemaProps) bool {
+		for _, v := range s.XValidations {
+			expressions.rules.Insert(v.Rule)
+			if len(v.MessageExpression) > 0 {
+				expressions.messageExpressions.Insert(v.MessageExpression)
+			}
+		}
+		return false
+	})
+}
+
+// findVersionsWithUnchangedSchemas finds each version that is in the new CRD object and differs from that of the old CRD object.
+// It returns a set of the names of mutated versions.
+// This function does not check for duplicated versions, top-level version not in versions, or coexistence of
+// top-level and per-version schemas, as further validations will check for these problems.
+func findVersionsWithUnchangedSchemas(obj, oldObject *apiextensions.CustomResourceDefinition) sets.Set[string] {
+	versionsWithUnchangedSchemas := sets.New[string]()
+	for _, version := range obj.Spec.Versions {
+		newSchema, err := apiextensions.GetSchemaForVersion(obj, version.Name)
+		if err != nil {
+			continue
+		}
+		oldSchema, err := apiextensions.GetSchemaForVersion(oldObject, version.Name)
+		if err != nil {
+			continue
+		}
+		if apiequality.Semantic.DeepEqual(newSchema, oldSchema) {
+			versionsWithUnchangedSchemas.Insert(version.Name)
+		}
+	}
+	return versionsWithUnchangedSchemas
+}
+
+// suppressExpressionCostForUnchangedSchema returns a copy of opts with suppressPerExpressionCost set to true if
+// the specified version's schema is unchanged.
+func suppressExpressionCostForUnchangedSchema(opts validationOptions, version string) validationOptions {
+	if opts.versionsWithUnchangedSchemas.Has(version) {
+		opts.suppressPerExpressionCost = true
+	}
+	return opts
+}
+
+// ValidateCustomResourceDefinitionUpdate statically validates
+// context is passed for supporting context cancellation during cel validation when validating defaults
+func ValidateCustomResourceDefinitionUpdate(ctx context.Context, obj, oldObj *apiextensions.CustomResourceDefinition) field.ErrorList {
+	opts := validationOptions{
+		allowDefaults:                            true,
+		requireRecognizedConversionReviewVersion: oldObj.Spec.Conversion == nil || hasValidConversionReviewVersionOrEmpty(oldObj.Spec.Conversion.ConversionReviewVersions),
+		requireImmutableNames:                    apiextensions.IsCRDConditionTrue(oldObj, apiextensions.Established),
+		requireOpenAPISchema:                     requireOpenAPISchema(&oldObj.Spec),
+		requireValidPropertyType:                 requireValidPropertyType(&oldObj.Spec),
+		requireStructuralSchema:                  requireStructuralSchema(&oldObj.Spec),
+		requirePrunedDefaults:                    requirePrunedDefaults(&oldObj.Spec),
+		requireAtomicSetType:                     requireAtomicSetType(&oldObj.Spec),
+		requireMapListKeysMapSetValidation:       requireMapListKeysMapSetValidation(&oldObj.Spec),
+		preexistingExpressions:                   findPreexistingExpressions(&oldObj.Spec),
+		versionsWithUnchangedSchemas:             findVersionsWithUnchangedSchemas(obj, oldObj),
+		// strictCost is always true to enforce cost limits.
+		celEnvironmentSet:    environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true),
+		allowInvalidCABundle: allowInvalidCABundle(oldObj),
+	}
+	return validateCustomResourceDefinitionUpdate(ctx, obj, oldObj, opts)
+}
+
+func validateCustomResourceDefinitionUpdate(ctx context.Context, obj, oldObj *apiextensions.CustomResourceDefinition, opts validationOptions) field.ErrorList {
+	allErrs := genericvalidation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))
+	allErrs = append(allErrs, validateCustomResourceDefinitionSpecUpdate(ctx, &obj.Spec, &oldObj.Spec, opts, field.NewPath("spec"))...)
+	allErrs = append(allErrs, ValidateCustomResourceDefinitionStatus(&obj.Status, field.NewPath("status"))...)
+	allErrs = append(allErrs, ValidateCustomResourceDefinitionStoredVersions(obj.Status.StoredVersions, obj.Spec.Versions, field.NewPath("status").Child("storedVersions"))...)
+	allErrs = append(allErrs, validateAPIApproval(obj, oldObj)...)
+	allErrs = append(allErrs, validatePreserveUnknownFields(obj, oldObj)...)
+	return allErrs
+}
+
+// ValidateCustomResourceDefinitionStoredVersions statically validates
+func ValidateCustomResourceDefinitionStoredVersions(storedVersions []string, versions []apiextensions.CustomResourceDefinitionVersion, fldPath *field.Path) field.ErrorList {
+	if len(storedVersions) == 0 {
+		return field.ErrorList{field.Invalid(fldPath, storedVersions, "must have at least one stored version")}
+	}
+	allErrs := field.ErrorList{}
+	storedVersionsMap := map[string]int{}
+	for i, v := range storedVersions {
+		storedVersionsMap[v] = i
+	}
+	for _, v := range versions {
+		_, ok := storedVersionsMap[v.Name]
+		if v.Storage && !ok {
+			allErrs = append(allErrs, field.Invalid(fldPath, storedVersions, "must have the storage version "+v.Name))
+		}
+		if ok {
+			delete(storedVersionsMap, v.Name)
+		}
+	}
+
+	for v, i := range storedVersionsMap {
+		allErrs = append(allErrs, field.Invalid(fldPath.Index(i), v, "must appear in spec.versions"))
+	}
+
+	return allErrs
+}
+
+// ValidateUpdateCustomResourceDefinitionStatus statically validates
+func ValidateUpdateCustomResourceDefinitionStatus(obj, oldObj *apiextensions.CustomResourceDefinition) field.ErrorList {
+	allErrs := genericvalidation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))
+	allErrs = append(allErrs, ValidateCustomResourceDefinitionStatus(&obj.Status, field.NewPath("status"))...)
+	return allErrs
+}
+
+// validateCustomResourceDefinitionVersion statically validates.
+// context is passed for supporting context cancellation during cel validation when validating defaults
+func validateCustomResourceDefinitionVersion(ctx context.Context, version *apiextensions.CustomResourceDefinitionVersion, fldPath *field.Path, statusEnabled bool, opts validationOptions) field.ErrorList {
+	allErrs := field.ErrorList{}
+	for _, err := range validateDeprecationWarning(version.Deprecated, version.DeprecationWarning) {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("deprecationWarning"), version.DeprecationWarning, err))
+	}
+	opts = suppressExpressionCostForUnchangedSchema(opts, version.Name)
+	allErrs = append(allErrs, validateCustomResourceDefinitionValidation(ctx, version.Schema, statusEnabled, opts, fldPath.Child("schema"))...)
+	allErrs = append(allErrs, ValidateCustomResourceDefinitionSubresources(version.Subresources, fldPath.Child("subresources"))...)
+	for i := range version.AdditionalPrinterColumns {
+		allErrs = append(allErrs, ValidateCustomResourceColumnDefinition(&version.AdditionalPrinterColumns[i], fldPath.Child("additionalPrinterColumns").Index(i))...)
+	}
+
+	if len(version.SelectableFields) > 0 {
+		if version.Schema == nil || version.Schema.OpenAPIV3Schema == nil {
+			allErrs = append(allErrs, field.Invalid(fldPath.Child("selectableFields"), "", "selectableFields may only be set when version.schema.openAPIV3Schema is not included"))
+		} else {
+			schema, err := structuralschema.NewStructural(version.Schema.OpenAPIV3Schema)
+			if err != nil {
+				allErrs = append(allErrs, field.Invalid(fldPath.Child("schema.openAPIV3Schema"), "", err.Error()))
+			}
+			allErrs = append(allErrs, ValidateCustomResourceSelectableFields(version.SelectableFields, schema, fldPath.Child("selectableFields"))...)
+		}
+	}
+	return allErrs
+}
+
+func validateDeprecationWarning(deprecated bool, deprecationWarning *string) []string {
+	if !deprecated && deprecationWarning != nil {
+		return []string{"can only be set for deprecated versions"}
+	}
+	if deprecationWarning == nil {
+		return nil
+	}
+	var errors []string
+	if len(*deprecationWarning) > 256 {
+		errors = append(errors, "must be <= 256 characters long")
+	}
+	if len(*deprecationWarning) == 0 {
+		errors = append(errors, "must not be an empty string")
+	}
+	for i, r := range *deprecationWarning {
+		if !unicode.IsPrint(r) {
+			errors = append(errors, fmt.Sprintf("must only contain printable UTF-8 characters; non-printable character found at index %d", i))
+			break
+		}
+		if unicode.IsControl(r) {
+			errors = append(errors, fmt.Sprintf("must only contain printable UTF-8 characters; control character found at index %d", i))
+			break
+		}
+	}
+	if !utf8.ValidString(*deprecationWarning) {
+		errors = append(errors, "must only contain printable UTF-8 characters")
+	}
+	return errors
+}
+
+// context is passed for supporting context cancellation during cel validation when validating defaults
+func validateCustomResourceDefinitionSpec(ctx context.Context, spec *apiextensions.CustomResourceDefinitionSpec, opts validationOptions, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+
+	if len(spec.Group) == 0 {
+		allErrs = append(allErrs, field.Required(fldPath.Child("group"), ""))
+	} else if errs := utilvalidation.IsDNS1123Subdomain(spec.Group); len(errs) > 0 {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("group"), spec.Group, strings.Join(errs, ",")))
+	} else if len(strings.Split(spec.Group, ".")) < 2 {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("group"), spec.Group, "should be a domain with at least one dot"))
+	}
+
+	allErrs = append(allErrs, validateEnumStrings(fldPath.Child("scope"), string(spec.Scope), []string{string(apiextensions.ClusterScoped), string(apiextensions.NamespaceScoped)}, true)...)
+
+	// enabling pruning requires structural schemas
+	if spec.PreserveUnknownFields == nil || *spec.PreserveUnknownFields == false {
+		opts.requireStructuralSchema = true
+	}
+
+	if opts.requireOpenAPISchema {
+		// check that either a global schema or versioned schemas are set in all versions
+		if spec.Validation == nil || spec.Validation.OpenAPIV3Schema == nil {
+			for i, v := range spec.Versions {
+				if v.Schema == nil || v.Schema.OpenAPIV3Schema == nil {
+					allErrs = append(allErrs, field.Required(fldPath.Child("versions").Index(i).Child("schema").Child("openAPIV3Schema"), "schemas are required"))
+				}
+			}
+		}
+	} else if spec.PreserveUnknownFields == nil || *spec.PreserveUnknownFields == false {
+		// check that either a global schema or versioned schemas are set in served versions
+		if spec.Validation == nil || spec.Validation.OpenAPIV3Schema == nil {
+			for i, v := range spec.Versions {
+				schemaPath := fldPath.Child("versions").Index(i).Child("schema", "openAPIV3Schema")
+				if v.Served && (v.Schema == nil || v.Schema.OpenAPIV3Schema == nil) {
+					allErrs = append(allErrs, field.Required(schemaPath, "because otherwise all fields are pruned"))
+				}
+			}
+		}
+	}
+	if opts.allowDefaults && specHasDefaults(spec) {
+		opts.requireStructuralSchema = true
+		if spec.PreserveUnknownFields == nil || *spec.PreserveUnknownFields {
+			allErrs = append(allErrs, field.Invalid(fldPath.Child("preserveUnknownFields"), true, "must be false in order to use defaults in the schema"))
+		}
+	}
+	if specHasKubernetesExtensions(spec) {
+		opts.requireStructuralSchema = true
+	}
+
+	storageFlagCount := 0
+	versionsMap := map[string]bool{}
+	uniqueNames := true
+	for i, version := range spec.Versions {
+		if version.Storage {
+			storageFlagCount++
+		}
+		if versionsMap[version.Name] {
+			uniqueNames = false
+		} else {
+			versionsMap[version.Name] = true
+		}
+		if errs := utilvalidation.IsDNS1035Label(version.Name); len(errs) > 0 {
+			allErrs = append(allErrs, field.Invalid(fldPath.Child("versions").Index(i).Child("name"), spec.Versions[i].Name, strings.Join(errs, ",")))
+		}
+		subresources := getSubresourcesForVersion(spec, version.Name)
+		allErrs = append(allErrs, validateCustomResourceDefinitionVersion(ctx, &version, fldPath.Child("versions").Index(i), hasStatusEnabled(subresources), opts)...)
+	}
+
+	// The top-level and per-version fields are mutual exclusive
+	if spec.Validation != nil && hasPerVersionSchema(spec.Versions) {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("validation"), "top-level and per-version schemas are mutually exclusive"))
+	}
+	if spec.Subresources != nil && hasPerVersionSubresources(spec.Versions) {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("subresources"), "top-level and per-version subresources are mutually exclusive"))
+	}
+	if len(spec.AdditionalPrinterColumns) > 0 && hasPerVersionColumns(spec.Versions) {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("additionalPrinterColumns"), "top-level and per-version additionalPrinterColumns are mutually exclusive"))
+	}
+
+	// Per-version fields may not all be set to identical values (top-level field should be used instead)
+	if hasIdenticalPerVersionSchema(spec.Versions) {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("versions"), spec.Versions, "per-version schemas may not all be set to identical values (top-level validation should be used instead)"))
+	}
+	if hasIdenticalPerVersionSubresources(spec.Versions) {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("versions"), spec.Versions, "per-version subresources may not all be set to identical values (top-level subresources should be used instead)"))
+	}
+	if hasIdenticalPerVersionColumns(spec.Versions) {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("versions"), spec.Versions, "per-version additionalPrinterColumns may not all be set to identical values (top-level additionalPrinterColumns should be used instead)"))
+	}
+
+	if !uniqueNames {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("versions"), spec.Versions, "must contain unique version names"))
+	}
+	if storageFlagCount != 1 {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("versions"), spec.Versions, "must have exactly one version marked as storage version"))
+	}
+	if len(spec.Version) != 0 {
+		if errs := utilvalidation.IsDNS1035Label(spec.Version); len(errs) > 0 {
+			allErrs = append(allErrs, field.Invalid(fldPath.Child("version"), spec.Version, strings.Join(errs, ",")))
+		}
+		if len(spec.Versions) >= 1 && spec.Versions[0].Name != spec.Version {
+			allErrs = append(allErrs, field.Invalid(fldPath.Child("version"), spec.Version, "must match the first version in spec.versions"))
+		}
+	}
+
+	// in addition to the basic name restrictions, some names are required for spec, but not for status
+	if len(spec.Names.Plural) == 0 {
+		allErrs = append(allErrs, field.Required(fldPath.Child("names", "plural"), ""))
+	}
+	if len(spec.Names.Singular) == 0 {
+		allErrs = append(allErrs, field.Required(fldPath.Child("names", "singular"), ""))
+	}
+	if len(spec.Names.Kind) == 0 {
+		allErrs = append(allErrs, field.Required(fldPath.Child("names", "kind"), ""))
+	}
+	if len(spec.Names.ListKind) == 0 {
+		allErrs = append(allErrs, field.Required(fldPath.Child("names", "listKind"), ""))
+	}
+
+	allErrs = append(allErrs, ValidateCustomResourceDefinitionNames(&spec.Names, fldPath.Child("names"))...)
+	allErrs = append(allErrs, validateCustomResourceDefinitionValidation(ctx, spec.Validation, hasAnyStatusEnabled(spec), suppressExpressionCostForUnchangedSchema(opts, spec.Version), fldPath.Child("validation"))...)
+	allErrs = append(allErrs, ValidateCustomResourceDefinitionSubresources(spec.Subresources, fldPath.Child("subresources"))...)
+
+	for i := range spec.AdditionalPrinterColumns {
+		if errs := ValidateCustomResourceColumnDefinition(&spec.AdditionalPrinterColumns[i], fldPath.Child("additionalPrinterColumns").Index(i)); len(errs) > 0 {
+			allErrs = append(allErrs, errs...)
+		}
+	}
+
+	if len(spec.SelectableFields) > 0 {
+		if spec.Validation == nil {
+			allErrs = append(allErrs, field.Invalid(fldPath.Child("selectableFields"), "", "selectableFields may only be set when validations.schema is included"))
+		} else {
+			schema, err := structuralschema.NewStructural(spec.Validation.OpenAPIV3Schema)
+			if err != nil {
+				allErrs = append(allErrs, field.Invalid(fldPath.Child("schema.openAPIV3Schema"), "", err.Error()))
+			}
+
+			allErrs = append(allErrs, ValidateCustomResourceSelectableFields(spec.SelectableFields, schema, fldPath.Child("selectableFields"))...)
+		}
+	}
+
+	if (spec.Conversion != nil && spec.Conversion.Strategy != apiextensions.NoneConverter) && (spec.PreserveUnknownFields == nil || *spec.PreserveUnknownFields) {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("conversion").Child("strategy"), spec.Conversion.Strategy, "must be None if spec.preserveUnknownFields is true"))
+	}
+	allErrs = append(allErrs, validateCustomResourceConversion(spec.Conversion, opts.requireRecognizedConversionReviewVersion, fldPath.Child("conversion"), opts)...)
+
+	return allErrs
+}
+
+func validateEnumStrings(fldPath *field.Path, value string, accepted []string, required bool) field.ErrorList {
+	if value == "" {
+		if required {
+			return field.ErrorList{field.Required(fldPath, "")}
+		}
+		return field.ErrorList{}
+	}
+	for _, a := range accepted {
+		if a == value {
+			return field.ErrorList{}
+		}
+	}
+	return field.ErrorList{field.NotSupported(fldPath, value, accepted)}
+}
+
+// AcceptedConversionReviewVersions contains the list of ConversionReview versions the *prior* version of the API server understands.
+// 1.15: server understands v1beta1; accepted versions are ["v1beta1"]
+// 1.16: server understands v1, v1beta1; accepted versions are ["v1beta1"]
+// 1.17+: server understands v1, v1beta1; accepted versions are ["v1","v1beta1"]
+var acceptedConversionReviewVersions = sets.NewString(apiextensionsv1.SchemeGroupVersion.Version, apiextensionsv1beta1.SchemeGroupVersion.Version)
+
+func isAcceptedConversionReviewVersion(v string) bool {
+	return acceptedConversionReviewVersions.Has(v)
+}
+
+func validateConversionReviewVersions(versions []string, requireRecognizedVersion bool, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+	if len(versions) < 1 {
+		allErrs = append(allErrs, field.Required(fldPath, ""))
+	} else {
+		seen := map[string]bool{}
+		hasAcceptedVersion := false
+		for i, v := range versions {
+			if seen[v] {
+				allErrs = append(allErrs, field.Invalid(fldPath.Index(i), v, "duplicate version"))
+				continue
+			}
+			seen[v] = true
+			for _, errString := range utilvalidation.IsDNS1035Label(v) {
+				allErrs = append(allErrs, field.Invalid(fldPath.Index(i), v, errString))
+			}
+			if isAcceptedConversionReviewVersion(v) {
+				hasAcceptedVersion = true
+			}
+		}
+		if requireRecognizedVersion && !hasAcceptedVersion {
+			allErrs = append(allErrs, field.Invalid(
+				fldPath, versions,
+				fmt.Sprintf("must include at least one of %v",
+					strings.Join(acceptedConversionReviewVersions.List(), ", "))))
+		}
+	}
+	return allErrs
+}
+
+// Allows invalid CA Bundle to be specified only if the existing CABundle is invalid
+// or if the CRD is not established yet.
+func allowInvalidCABundle(oldCRD *apiextensions.CustomResourceDefinition) bool {
+	if !apiextensions.IsCRDConditionTrue(oldCRD, apiextensions.Established) {
+		return true
+	}
+	oldConversion := oldCRD.Spec.Conversion
+	if oldConversion == nil || oldConversion.WebhookClientConfig == nil ||
+		len(oldConversion.WebhookClientConfig.CABundle) == 0 {
+		return false
+	}
+	return len(webhook.ValidateCABundle(field.NewPath("caBundle"), oldConversion.WebhookClientConfig.CABundle)) > 0
+}
+
+// hasValidConversionReviewVersion return true if there is a valid version or if the list is empty.
+func hasValidConversionReviewVersionOrEmpty(versions []string) bool {
+	if len(versions) < 1 {
+		return true
+	}
+	for _, v := range versions {
+		if isAcceptedConversionReviewVersion(v) {
+			return true
+		}
+	}
+	return false
+}
+
+func validateCustomResourceConversion(conversion *apiextensions.CustomResourceConversion, requireRecognizedVersion bool, fldPath *field.Path, opts validationOptions) field.ErrorList {
+	allErrs := field.ErrorList{}
+	if conversion == nil {
+		return allErrs
+	}
+	allErrs = append(allErrs, validateEnumStrings(fldPath.Child("strategy"), string(conversion.Strategy), []string{string(apiextensions.NoneConverter), string(apiextensions.WebhookConverter)}, true)...)
+	if conversion.Strategy == apiextensions.WebhookConverter {
+		if conversion.WebhookClientConfig == nil {
+			allErrs = append(allErrs, field.Required(fldPath.Child("webhookClientConfig"), "required when strategy is set to Webhook"))
+		} else {
+			cc := conversion.WebhookClientConfig
+			switch {
+			case (cc.URL == nil) == (cc.Service == nil):
+				allErrs = append(allErrs, field.Required(fldPath.Child("webhookClientConfig"), "exactly one of url or service is required"))
+			case cc.URL != nil:
+				allErrs = append(allErrs, webhook.ValidateWebhookURL(fldPath.Child("webhookClientConfig").Child("url"), *cc.URL, true)...)
+			case cc.Service != nil:
+				allErrs = append(allErrs, webhook.ValidateWebhookService(fldPath.Child("webhookClientConfig").Child("service"), cc.Service.Name, cc.Service.Namespace, cc.Service.Path, cc.Service.Port)...)
+			}
+			if len(cc.CABundle) > 0 && !opts.allowInvalidCABundle {
+				allErrs = append(allErrs, webhook.ValidateCABundle(fldPath.Child("webhookClientConfig").Child("caBundle"), cc.CABundle)...)
+			}
+		}
+		allErrs = append(allErrs, validateConversionReviewVersions(conversion.ConversionReviewVersions, requireRecognizedVersion, fldPath.Child("conversionReviewVersions"))...)
+	} else {
+		if conversion.WebhookClientConfig != nil {
+			allErrs = append(allErrs, field.Forbidden(fldPath.Child("webhookClientConfig"), "should not be set when strategy is not set to Webhook"))
+		}
+		if len(conversion.ConversionReviewVersions) > 0 {
+			allErrs = append(allErrs, field.Forbidden(fldPath.Child("conversionReviewVersions"), "should not be set when strategy is not set to Webhook"))
+		}
+	}
+	return allErrs
+}
+
+// validateCustomResourceDefinitionSpecUpdate statically validates
+// context is passed for supporting context cancellation during cel validation when validating defaults
+func validateCustomResourceDefinitionSpecUpdate(ctx context.Context, spec, oldSpec *apiextensions.CustomResourceDefinitionSpec, opts validationOptions, fldPath *field.Path) field.ErrorList {
+	allErrs := validateCustomResourceDefinitionSpec(ctx, spec, opts, fldPath)
+
+	if opts.requireImmutableNames {
+		// these effect the storage and cannot be changed therefore
+		allErrs = append(allErrs, genericvalidation.ValidateImmutableField(spec.Scope, oldSpec.Scope, fldPath.Child("scope"))...)
+		allErrs = append(allErrs, genericvalidation.ValidateImmutableField(spec.Names.Kind, oldSpec.Names.Kind, fldPath.Child("names", "kind"))...)
+	}
+
+	// these affects the resource name, which is always immutable, so this can't be updated.
+	allErrs = append(allErrs, genericvalidation.ValidateImmutableField(spec.Group, oldSpec.Group, fldPath.Child("group"))...)
+	allErrs = append(allErrs, genericvalidation.ValidateImmutableField(spec.Names.Plural, oldSpec.Names.Plural, fldPath.Child("names", "plural"))...)
+
+	return allErrs
+}
+
+// getSubresourcesForVersion returns the subresources for given version in given CRD spec.
+// NOTE That this function assumes version always exist since it's used by the validation process
+// that iterates through the existing versions.
+func getSubresourcesForVersion(crd *apiextensions.CustomResourceDefinitionSpec, version string) *apiextensions.CustomResourceSubresources {
+	if !hasPerVersionSubresources(crd.Versions) {
+		return crd.Subresources
+	}
+	for _, v := range crd.Versions {
+		if version == v.Name {
+			return v.Subresources
+		}
+	}
+	return nil
+}
+
+// hasAnyStatusEnabled returns true if given CRD spec has at least one Status Subresource set
+// among the top-level and per-version Subresources.
+func hasAnyStatusEnabled(crd *apiextensions.CustomResourceDefinitionSpec) bool {
+	if hasStatusEnabled(crd.Subresources) {
+		return true
+	}
+	for _, v := range crd.Versions {
+		if hasStatusEnabled(v.Subresources) {
+			return true
+		}
+	}
+	return false
+}
+
+// hasStatusEnabled returns true if given CRD Subresources has non-nil Status set.
+func hasStatusEnabled(subresources *apiextensions.CustomResourceSubresources) bool {
+	if subresources != nil && subresources.Status != nil {
+		return true
+	}
+	return false
+}
+
+// hasPerVersionSchema returns true if a CRD uses per-version schema.
+func hasPerVersionSchema(versions []apiextensions.CustomResourceDefinitionVersion) bool {
+	for _, v := range versions {
+		if v.Schema != nil {
+			return true
+		}
+	}
+	return false
+}
+
+// hasPerVersionSubresources returns true if a CRD uses per-version subresources.
+func hasPerVersionSubresources(versions []apiextensions.CustomResourceDefinitionVersion) bool {
+	for _, v := range versions {
+		if v.Subresources != nil {
+			return true
+		}
+	}
+	return false
+}
+
+// hasPerVersionColumns returns true if a CRD uses per-version columns.
+func hasPerVersionColumns(versions []apiextensions.CustomResourceDefinitionVersion) bool {
+	for _, v := range versions {
+		if len(v.AdditionalPrinterColumns) > 0 {
+			return true
+		}
+	}
+	return false
+}
+
+// hasIdenticalPerVersionSchema returns true if a CRD sets identical non-nil values
+// to all per-version schemas
+func hasIdenticalPerVersionSchema(versions []apiextensions.CustomResourceDefinitionVersion) bool {
+	if len(versions) == 0 {
+		return false
+	}
+	value := versions[0].Schema
+	for _, v := range versions {
+		if v.Schema == nil || !apiequality.Semantic.DeepEqual(v.Schema, value) {
+			return false
+		}
+	}
+	return true
+}
+
+// hasIdenticalPerVersionSubresources returns true if a CRD sets identical non-nil values
+// to all per-version subresources
+func hasIdenticalPerVersionSubresources(versions []apiextensions.CustomResourceDefinitionVersion) bool {
+	if len(versions) == 0 {
+		return false
+	}
+	value := versions[0].Subresources
+	for _, v := range versions {
+		if v.Subresources == nil || !apiequality.Semantic.DeepEqual(v.Subresources, value) {
+			return false
+		}
+	}
+	return true
+}
+
+// hasIdenticalPerVersionColumns returns true if a CRD sets identical non-nil values
+// to all per-version columns
+func hasIdenticalPerVersionColumns(versions []apiextensions.CustomResourceDefinitionVersion) bool {
+	if len(versions) == 0 {
+		return false
+	}
+	value := versions[0].AdditionalPrinterColumns
+	for _, v := range versions {
+		if len(v.AdditionalPrinterColumns) == 0 || !apiequality.Semantic.DeepEqual(v.AdditionalPrinterColumns, value) {
+			return false
+		}
+	}
+	return true
+}
+
+// ValidateCustomResourceDefinitionStatus statically validates
+func ValidateCustomResourceDefinitionStatus(status *apiextensions.CustomResourceDefinitionStatus, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+	allErrs = append(allErrs, ValidateCustomResourceDefinitionNames(&status.AcceptedNames, fldPath.Child("acceptedNames"))...)
+	return allErrs
+}
+
+// ValidateCustomResourceDefinitionNames statically validates
+func ValidateCustomResourceDefinitionNames(names *apiextensions.CustomResourceDefinitionNames, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+	if errs := utilvalidation.IsDNS1035Label(names.Plural); len(names.Plural) > 0 && len(errs) > 0 {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("plural"), names.Plural, strings.Join(errs, ",")))
+	}
+	if errs := utilvalidation.IsDNS1035Label(names.Singular); len(names.Singular) > 0 && len(errs) > 0 {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("singular"), names.Singular, strings.Join(errs, ",")))
+	}
+	if errs := utilvalidation.IsDNS1035Label(strings.ToLower(names.Kind)); len(names.Kind) > 0 && len(errs) > 0 {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("kind"), names.Kind, "may have mixed case, but should otherwise match: "+strings.Join(errs, ",")))
+	}
+	if errs := utilvalidation.IsDNS1035Label(strings.ToLower(names.ListKind)); len(names.ListKind) > 0 && len(errs) > 0 {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("listKind"), names.ListKind, "may have mixed case, but should otherwise match: "+strings.Join(errs, ",")))
+	}
+
+	for i, shortName := range names.ShortNames {
+		if errs := utilvalidation.IsDNS1035Label(shortName); len(errs) > 0 {
+			allErrs = append(allErrs, field.Invalid(fldPath.Child("shortNames").Index(i), shortName, strings.Join(errs, ",")))
+		}
+	}
+
+	// kind and listKind may not be the same or parsing become ambiguous
+	if len(names.Kind) > 0 && names.Kind == names.ListKind {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("listKind"), names.ListKind, "kind and listKind may not be the same"))
+	}
+
+	for i, category := range names.Categories {
+		if errs := utilvalidation.IsDNS1035Label(category); len(errs) > 0 {
+			allErrs = append(allErrs, field.Invalid(fldPath.Child("categories").Index(i), category, strings.Join(errs, ",")))
+		}
+	}
+
+	return allErrs
+}
+
+// ValidateCustomResourceColumnDefinition statically validates a printer column.
+func ValidateCustomResourceColumnDefinition(col *apiextensions.CustomResourceColumnDefinition, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+
+	if len(col.Name) == 0 {
+		allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
+	}
+
+	if len(col.Type) == 0 {
+		allErrs = append(allErrs, field.Required(fldPath.Child("type"), fmt.Sprintf("must be one of %s", strings.Join(printerColumnDatatypes.List(), ","))))
+	} else if !printerColumnDatatypes.Has(col.Type) {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("type"), col.Type, fmt.Sprintf("must be one of %s", strings.Join(printerColumnDatatypes.List(), ","))))
+	}
+
+	if len(col.Format) > 0 && !customResourceColumnDefinitionFormats.Has(col.Format) {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("format"), col.Format, fmt.Sprintf("must be one of %s", strings.Join(customResourceColumnDefinitionFormats.List(), ","))))
+	}
+
+	if len(col.JSONPath) == 0 {
+		allErrs = append(allErrs, field.Required(fldPath.Child("JSONPath"), ""))
+	} else if errs := validateSimpleJSONPath(col.JSONPath, fldPath.Child("JSONPath")); len(errs) > 0 {
+		allErrs = append(allErrs, errs...)
+	}
+
+	return allErrs
+}
+
+func ValidateCustomResourceSelectableFields(selectableFields []apiextensions.SelectableField, schema *structuralschema.Structural, fldPath *field.Path) (allErrs field.ErrorList) {
+	uniqueSelectableFields := sets.New[string]()
+	for i, selectableField := range selectableFields {
+		indexFldPath := fldPath.Index(i)
+		if len(selectableField.JSONPath) == 0 {
+			allErrs = append(allErrs, field.Required(indexFldPath.Child("jsonPath"), ""))
+			continue
+		}
+		// Leverage the field path validation originally built for use with CEL features
+		path, foundSchema, err := cel.ValidFieldPath(selectableField.JSONPath, schema, cel.WithFieldPathAllowArrayNotation(false))
+		if err != nil {
+			allErrs = append(allErrs, field.Invalid(indexFldPath.Child("jsonPath"), selectableField.JSONPath, fmt.Sprintf("is an invalid path: %v", err)))
+			continue
+		}
+		if path.Root().String() == "metadata" {
+			allErrs = append(allErrs, field.Invalid(indexFldPath.Child("jsonPath"), selectableField.JSONPath, "must not point to fields in metadata"))
+		}
+		if !allowedSelectableFieldSchema(foundSchema) {
+			allErrs = append(allErrs, field.Invalid(indexFldPath.Child("jsonPath"), selectableField.JSONPath, "must point to a field of type string, boolean or integer. Enum string fields and strings with formats are allowed."))
+		}
+		if uniqueSelectableFields.Has(path.String()) {
+			allErrs = append(allErrs, field.Duplicate(indexFldPath.Child("jsonPath"), selectableField.JSONPath))
+		} else {
+			uniqueSelectableFields.Insert(path.String())
+		}
+	}
+	uniqueSelectableFieldCount := uniqueSelectableFields.Len()
+	if uniqueSelectableFieldCount > MaxSelectableFields {
+		allErrs = append(allErrs, field.TooMany(fldPath, uniqueSelectableFieldCount, MaxSelectableFields))
+	}
+	return allErrs
+}
+
+func allowedSelectableFieldSchema(schema *structuralschema.Structural) bool {
+	if schema == nil {
+		return false
+	}
+	switch schema.Type {
+	case "string", "boolean", "integer":
+		return true
+	default:
+		return false
+	}
+}
+
+// specStandardValidator applies validations for different OpenAPI specification versions.
+type specStandardValidator interface {
+	validate(spec *apiextensions.JSONSchemaProps, fldPath *field.Path) field.ErrorList
+	withForbiddenDefaults(reason string) specStandardValidator
+
+	// insideResourceMeta returns true when validating either TypeMeta or ObjectMeta, from an embedded resource or on the top-level.
+	insideResourceMeta() bool
+	withInsideResourceMeta() specStandardValidator
+
+	// forbidOldSelfValidations returns the path to the first ancestor of the visited path that can't be safely correlated between two revisions of an object, or nil if there is no such path
+	forbidOldSelfValidations() *field.Path
+	withForbidOldSelfValidations(path *field.Path) specStandardValidator
+}
+
+// validateCustomResourceDefinitionValidation statically validates
+// context is passed for supporting context cancellation during cel validation when validating defaults
+func validateCustomResourceDefinitionValidation(ctx context.Context, customResourceValidation *apiextensions.CustomResourceValidation, statusSubresourceEnabled bool, opts validationOptions, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+
+	if customResourceValidation == nil {
+		return allErrs
+	}
+
+	if schema := customResourceValidation.OpenAPIV3Schema; schema != nil {
+		// if the status subresource is enabled, only certain fields are allowed inside the root schema.
+		// these fields are chosen such that, if status is extracted as properties["status"], it's validation is not lost.
+		if statusSubresourceEnabled {
+			v := reflect.ValueOf(schema).Elem()
+			for i := 0; i < v.NumField(); i++ {
+				// skip zero values
+				if value := v.Field(i).Interface(); reflect.DeepEqual(value, reflect.Zero(reflect.TypeOf(value)).Interface()) {
+					continue
+				}
+
+				fieldName := v.Type().Field(i).Name
+
+				// only "object" type is valid at root of the schema since validation schema for status is extracted as properties["status"]
+				if fieldName == "Type" {
+					if schema.Type != "object" {
+						allErrs = append(allErrs, field.Invalid(fldPath.Child("openAPIV3Schema.type"), schema.Type, fmt.Sprintf(`only "object" is allowed as the type at the root of the schema if the status subresource is enabled`)))
+						break
+					}
+					continue
+				}
+
+				if !allowedAtRootSchema(fieldName) {
+					allErrs = append(allErrs, field.Invalid(fldPath.Child("openAPIV3Schema"), *schema, fmt.Sprintf(`only %v fields are allowed at the root of the schema if the status subresource is enabled`, allowedFieldsAtRootSchema)))
+					break
+				}
+			}
+		}
+
+		if schema.Nullable {
+			allErrs = append(allErrs, field.Forbidden(fldPath.Child("openAPIV3Schema.nullable"), fmt.Sprintf(`nullable cannot be true at the root`)))
+		}
+
+		openAPIV3Schema := &specStandardValidatorV3{
+			allowDefaults:            opts.allowDefaults,
+			disallowDefaultsReason:   opts.disallowDefaultsReason,
+			requireValidPropertyType: opts.requireValidPropertyType,
+		}
+
+		var celContext *CELSchemaContext
+		var structuralSchemaInitErrs field.ErrorList
+		if opts.requireStructuralSchema {
+			if ss, err := structuralschema.NewStructural(schema); err != nil {
+				// These validation errors overlap with  OpenAPISchema validation errors so we keep track of them
+				// separately and only show them if OpenAPISchema validation does not report any errors.
+				structuralSchemaInitErrs = append(structuralSchemaInitErrs, field.Invalid(fldPath.Child("openAPIV3Schema"), "", err.Error()))
+			} else if validationErrors := structuralschema.ValidateStructural(fldPath.Child("openAPIV3Schema"), ss); len(validationErrors) > 0 {
+				allErrs = append(allErrs, validationErrors...)
+			} else if validationErrors, err := structuraldefaulting.ValidateDefaults(ctx, fldPath.Child("openAPIV3Schema"), ss, true, opts.requirePrunedDefaults); err != nil {
+				// this should never happen
+				allErrs = append(allErrs, field.Invalid(fldPath.Child("openAPIV3Schema"), "", err.Error()))
+			} else if len(validationErrors) > 0 {
+				allErrs = append(allErrs, validationErrors...)
+			} else {
+				// Only initialize CEL rule validation context if the structural schemas are valid.
+				// A nil CELSchemaContext indicates that no CEL validation should be attempted.
+				celContext = RootCELContext(schema)
+			}
+		}
+		allErrs = append(allErrs, ValidateCustomResourceDefinitionOpenAPISchema(schema, fldPath.Child("openAPIV3Schema"), openAPIV3Schema, true, &opts, celContext).AllErrors()...)
+
+		if len(allErrs) == 0 && len(structuralSchemaInitErrs) > 0 {
+			// Structural schema initialization errors overlap with OpenAPISchema validation errors so we only show them
+			// if there are no OpenAPISchema validation errors.
+			allErrs = append(allErrs, structuralSchemaInitErrs...)
+		}
+
+		if celContext != nil && celContext.TotalCost != nil {
+			if celContext.TotalCost.Total > StaticEstimatedCRDCostLimit {
+				for _, expensive := range celContext.TotalCost.MostExpensive {
+					costErrorMsg := fmt.Sprintf("contributed to estimated rule cost total exceeding cost limit for entire OpenAPIv3 schema")
+					allErrs = append(allErrs, field.Forbidden(expensive.Path, costErrorMsg))
+				}
+
+				costErrorMsg := getCostErrorMessage("x-kubernetes-validations estimated rule cost total for entire OpenAPIv3 schema", celContext.TotalCost.Total, StaticEstimatedCRDCostLimit)
+				allErrs = append(allErrs, field.Forbidden(fldPath.Child("openAPIV3Schema"), costErrorMsg))
+			}
+		}
+	}
+
+	// if validation passed otherwise, make sure we can actually construct a schema validator from this custom resource validation.
+	if len(allErrs) == 0 {
+		if _, _, err := apiservervalidation.NewSchemaValidator(customResourceValidation.OpenAPIV3Schema); err != nil {
+			allErrs = append(allErrs, field.Invalid(fldPath, "", fmt.Sprintf("error building validator: %v", err)))
+		}
+	}
+	return allErrs
+}
+
+var metaFields = sets.NewString("metadata", "kind", "apiVersion")
+
+// OpenAPISchemaErrorList tracks all validation errors reported ValidateCustomResourceDefinitionOpenAPISchema
+// with CEL related errors kept separate from schema related errors.
+type OpenAPISchemaErrorList struct {
+	SchemaErrors field.ErrorList
+	CELErrors    field.ErrorList
+}
+
+// AppendErrors appends all errors in the provided list with the errors of this list.
+func (o *OpenAPISchemaErrorList) AppendErrors(list *OpenAPISchemaErrorList) {
+	if o == nil || list == nil {
+		return
+	}
+	o.SchemaErrors = append(o.SchemaErrors, list.SchemaErrors...)
+	o.CELErrors = append(o.CELErrors, list.CELErrors...)
+}
+
+// AllErrors returns a list containing both schema and CEL errors.
+func (o *OpenAPISchemaErrorList) AllErrors() field.ErrorList {
+	if o == nil {
+		return field.ErrorList{}
+	}
+	return append(o.SchemaErrors, o.CELErrors...)
+}
+
+// ValidateCustomResourceDefinitionOpenAPISchema statically validates
+func ValidateCustomResourceDefinitionOpenAPISchema(schema *apiextensions.JSONSchemaProps, fldPath *field.Path, ssv specStandardValidator, isRoot bool, opts *validationOptions, celContext *CELSchemaContext) *OpenAPISchemaErrorList {
+	allErrs := &OpenAPISchemaErrorList{SchemaErrors: field.ErrorList{}, CELErrors: field.ErrorList{}}
+
+	if schema == nil {
+		return allErrs
+	}
+	allErrs.SchemaErrors = append(allErrs.SchemaErrors, ssv.validate(schema, fldPath)...)
+
+	if schema.UniqueItems {
+		allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Forbidden(fldPath.Child("uniqueItems"), "uniqueItems cannot be set to true since the runtime complexity becomes quadratic"))
+	}
+
+	// additionalProperties and properties are mutual exclusive because otherwise they
+	// contradict Kubernetes' API convention to ignore unknown fields.
+	//
+	// In other words:
+	// - properties are for structs,
+	// - additionalProperties are for map[string]interface{}
+	//
+	// Note: when patternProperties is added to OpenAPI some day, this will have to be
+	//       restricted like additionalProperties.
+	if schema.AdditionalProperties != nil {
+		if len(schema.Properties) != 0 {
+			if !schema.AdditionalProperties.Allows || schema.AdditionalProperties.Schema != nil {
+				allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Forbidden(fldPath.Child("additionalProperties"), "additionalProperties and properties are mutual exclusive"))
+			}
+		}
+		// Note: we forbid additionalProperties at resource root, both embedded and top-level.
+		//       But further inside, additionalProperites is possible, e.g. for labels or annotations.
+		subSsv := ssv
+		if ssv.insideResourceMeta() {
+			// we have to forbid defaults inside additionalProperties because pruning without actual value is ambiguous
+			subSsv = ssv.withForbiddenDefaults("inside additionalProperties applying to object metadata")
+		}
+		allErrs.AppendErrors(ValidateCustomResourceDefinitionOpenAPISchema(schema.AdditionalProperties.Schema, fldPath.Child("additionalProperties"), subSsv, false, opts, celContext.ChildAdditionalPropertiesContext(schema.AdditionalProperties.Schema)))
+	}
+
+	if len(schema.Properties) != 0 {
+		for property, jsonSchema := range schema.Properties {
+			subSsv := ssv
+
+			if !cel.MapIsCorrelatable(schema.XMapType) {
+				subSsv = subSsv.withForbidOldSelfValidations(fldPath)
+			}
+
+			if (isRoot || schema.XEmbeddedResource) && metaFields.Has(property) {
+				// we recurse into the schema that applies to ObjectMeta.
+				subSsv = subSsv.withInsideResourceMeta()
+				if isRoot {
+					subSsv = subSsv.withForbiddenDefaults(fmt.Sprintf("in top-level %s", property))
+				}
+			}
+			propertySchema := jsonSchema
+			allErrs.AppendErrors(ValidateCustomResourceDefinitionOpenAPISchema(&propertySchema, fldPath.Child("properties").Key(property), subSsv, false, opts, celContext.ChildPropertyContext(&propertySchema, property)))
+		}
+	}
+
+	allErrs.AppendErrors(ValidateCustomResourceDefinitionOpenAPISchema(schema.Not, fldPath.Child("not"), ssv, false, opts, nil))
+
+	if len(schema.AllOf) != 0 {
+		for i, jsonSchema := range schema.AllOf {
+			allOfSchema := jsonSchema
+			allErrs.AppendErrors(ValidateCustomResourceDefinitionOpenAPISchema(&allOfSchema, fldPath.Child("allOf").Index(i), ssv, false, opts, nil))
+		}
+	}
+
+	if len(schema.OneOf) != 0 {
+		for i, jsonSchema := range schema.OneOf {
+			oneOfSchema := jsonSchema
+			allErrs.AppendErrors(ValidateCustomResourceDefinitionOpenAPISchema(&oneOfSchema, fldPath.Child("oneOf").Index(i), ssv, false, opts, nil))
+		}
+	}
+
+	if len(schema.AnyOf) != 0 {
+		for i, jsonSchema := range schema.AnyOf {
+			anyOfSchema := jsonSchema
+			allErrs.AppendErrors(ValidateCustomResourceDefinitionOpenAPISchema(&anyOfSchema, fldPath.Child("anyOf").Index(i), ssv, false, opts, nil))
+		}
+	}
+
+	if len(schema.Definitions) != 0 {
+		for definition, jsonSchema := range schema.Definitions {
+			definitionSchema := jsonSchema
+			allErrs.AppendErrors(ValidateCustomResourceDefinitionOpenAPISchema(&definitionSchema, fldPath.Child("definitions").Key(definition), ssv, false, opts, nil))
+		}
+	}
+
+	if schema.Items != nil {
+		subSsv := ssv
+
+		// we can only correlate old/new items for "map" and "set" lists, and correlation of
+		// "set" elements by identity is not supported for cel (x-kubernetes-validations)
+		// rules. an unset list type defaults to "atomic".
+		if schema.XListType == nil || *schema.XListType != "map" {
+			subSsv = subSsv.withForbidOldSelfValidations(fldPath)
+		}
+
+		allErrs.AppendErrors(ValidateCustomResourceDefinitionOpenAPISchema(schema.Items.Schema, fldPath.Child("items"), subSsv, false, opts, celContext.ChildItemsContext(schema.Items.Schema)))
+		if len(schema.Items.JSONSchemas) != 0 {
+			for i, jsonSchema := range schema.Items.JSONSchemas {
+				itemsSchema := jsonSchema
+				allErrs.AppendErrors(ValidateCustomResourceDefinitionOpenAPISchema(&itemsSchema, fldPath.Child("items").Index(i), subSsv, false, opts, celContext.ChildItemsContext(&itemsSchema)))
+			}
+		}
+	}
+
+	if schema.Dependencies != nil {
+		for dependency, jsonSchemaPropsOrStringArray := range schema.Dependencies {
+			allErrs.AppendErrors(ValidateCustomResourceDefinitionOpenAPISchema(jsonSchemaPropsOrStringArray.Schema, fldPath.Child("dependencies").Key(dependency), ssv, false, opts, nil))
+		}
+	}
+
+	if schema.XPreserveUnknownFields != nil && !*schema.XPreserveUnknownFields {
+		allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Invalid(fldPath.Child("x-kubernetes-preserve-unknown-fields"), *schema.XPreserveUnknownFields, "must be true or undefined"))
+	}
+
+	if schema.XMapType != nil && schema.Type != "object" {
+		if len(schema.Type) == 0 {
+			allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Required(fldPath.Child("type"), "must be object if x-kubernetes-map-type is specified"))
+		} else {
+			allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Invalid(fldPath.Child("type"), schema.Type, "must be object if x-kubernetes-map-type is specified"))
+		}
+	}
+
+	if schema.XMapType != nil && *schema.XMapType != "atomic" && *schema.XMapType != "granular" {
+		allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.NotSupported(fldPath.Child("x-kubernetes-map-type"), *schema.XMapType, []string{"atomic", "granular"}))
+	}
+
+	if schema.XListType != nil && schema.Type != "array" {
+		if len(schema.Type) == 0 {
+			allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Required(fldPath.Child("type"), "must be array if x-kubernetes-list-type is specified"))
+		} else {
+			allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Invalid(fldPath.Child("type"), schema.Type, "must be array if x-kubernetes-list-type is specified"))
+		}
+	} else if opts.requireAtomicSetType && schema.XListType != nil && *schema.XListType == "set" && schema.Items != nil && schema.Items.Schema != nil { // by structural schema items are present
+		is := schema.Items.Schema
+		switch is.Type {
+		case "array":
+			if is.XListType != nil && *is.XListType != "atomic" { // atomic is the implicit default behaviour if unset, hence != atomic is wrong
+				allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Invalid(fldPath.Child("items").Child("x-kubernetes-list-type"), is.XListType, "must be atomic as item of a list with x-kubernetes-list-type=set"))
+			}
+		case "object":
+			if is.XMapType == nil || *is.XMapType != "atomic" { // granular is the implicit default behaviour if unset, hence nil and != atomic are wrong
+				allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Invalid(fldPath.Child("items").Child("x-kubernetes-map-type"), is.XListType, "must be atomic as item of a list with x-kubernetes-list-type=set"))
+			}
+		}
+	}
+
+	if schema.XListType != nil && *schema.XListType != "atomic" && *schema.XListType != "set" && *schema.XListType != "map" {
+		allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.NotSupported(fldPath.Child("x-kubernetes-list-type"), *schema.XListType, []string{"atomic", "set", "map"}))
+	}
+
+	if len(schema.XListMapKeys) > 0 {
+		if schema.XListType == nil {
+			allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Required(fldPath.Child("x-kubernetes-list-type"), "must be map if x-kubernetes-list-map-keys is non-empty"))
+		} else if *schema.XListType != "map" {
+			allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Invalid(fldPath.Child("x-kubernetes-list-type"), *schema.XListType, "must be map if x-kubernetes-list-map-keys is non-empty"))
+		}
+	}
+
+	if schema.XListType != nil && *schema.XListType == "map" {
+		if len(schema.XListMapKeys) == 0 {
+			allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Required(fldPath.Child("x-kubernetes-list-map-keys"), "must not be empty if x-kubernetes-list-type is map"))
+		}
+
+		if schema.Items == nil {
+			allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Required(fldPath.Child("items"), "must have a schema if x-kubernetes-list-type is map"))
+		}
+
+		if schema.Items != nil && schema.Items.Schema == nil {
+			allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Invalid(fldPath.Child("items"), schema.Items, "must only have a single schema if x-kubernetes-list-type is map"))
+		}
+
+		if schema.Items != nil && schema.Items.Schema != nil && schema.Items.Schema.Type != "object" {
+			allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Invalid(fldPath.Child("items").Child("type"), schema.Items.Schema.Type, "must be object if parent array's x-kubernetes-list-type is map"))
+		}
+
+		if schema.Items != nil && schema.Items.Schema != nil && schema.Items.Schema.Type == "object" {
+			keys := map[string]struct{}{}
+			for _, k := range schema.XListMapKeys {
+				if s, ok := schema.Items.Schema.Properties[k]; ok {
+					if s.Type == "array" || s.Type == "object" {
+						allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Invalid(fldPath.Child("items").Child("properties").Key(k).Child("type"), schema.Items.Schema.Type, "must be a scalar type if parent array's x-kubernetes-list-type is map"))
+					}
+				} else {
+					allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Invalid(fldPath.Child("x-kubernetes-list-map-keys"), schema.XListMapKeys, "entries must all be names of item properties"))
+				}
+				if _, ok := keys[k]; ok {
+					allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Invalid(fldPath.Child("x-kubernetes-list-map-keys"), schema.XListMapKeys, "must not contain duplicate entries"))
+				}
+				keys[k] = struct{}{}
+			}
+		}
+	}
+
+	if opts.requireMapListKeysMapSetValidation {
+		allErrs.SchemaErrors = append(allErrs.SchemaErrors, validateMapListKeysMapSet(schema, fldPath)...)
+	}
+	if len(schema.XValidations) > 0 {
+		for i, rule := range schema.XValidations {
+			trimmedRule := strings.TrimSpace(rule.Rule)
+			trimmedMsg := strings.TrimSpace(rule.Message)
+			trimmedMsgExpr := strings.TrimSpace(rule.MessageExpression)
+			if len(trimmedRule) == 0 {
+				allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Required(fldPath.Child("x-kubernetes-validations").Index(i).Child("rule"), "rule is not specified"))
+			} else if len(rule.Message) > 0 && len(trimmedMsg) == 0 {
+				allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Invalid(fldPath.Child("x-kubernetes-validations").Index(i).Child("message"), rule.Message, "message must be non-empty if specified"))
+			} else if hasNewlines(trimmedMsg) {
+				allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Invalid(fldPath.Child("x-kubernetes-validations").Index(i).Child("message"), rule.Message, "message must not contain line breaks"))
+			} else if hasNewlines(trimmedRule) && len(trimmedMsg) == 0 {
+				allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Required(fldPath.Child("x-kubernetes-validations").Index(i).Child("message"), "message must be specified if rule contains line breaks"))
+			}
+			if len(rule.MessageExpression) > 0 && len(trimmedMsgExpr) == 0 {
+				allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Required(fldPath.Child("x-kubernetes-validations").Index(i).Child("messageExpression"), "messageExpression must be non-empty if specified"))
+			}
+			if rule.Reason != nil && !supportedValidationReason.Has(string(*rule.Reason)) {
+				allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.NotSupported(fldPath.Child("x-kubernetes-validations").Index(i).Child("reason"), *rule.Reason, supportedValidationReason.List()))
+			}
+			trimmedFieldPath := strings.TrimSpace(rule.FieldPath)
+			if len(rule.FieldPath) > 0 && len(trimmedFieldPath) == 0 {
+				allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Invalid(fldPath.Child("x-kubernetes-validations").Index(i).Child("fieldPath"), rule.FieldPath, "fieldPath must be non-empty if specified"))
+			}
+			if hasNewlines(rule.FieldPath) {
+				allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Invalid(fldPath.Child("x-kubernetes-validations").Index(i).Child("fieldPath"), rule.FieldPath, "fieldPath must not contain line breaks"))
+			}
+			if len(rule.FieldPath) > 0 {
+				if !pathValid(schema, rule.FieldPath) {
+					allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Invalid(fldPath.Child("x-kubernetes-validations").Index(i).Child("fieldPath"), rule.FieldPath, "fieldPath must be a valid path"))
+				}
+
+			}
+		}
+
+		// If any schema related validation errors have been found at this level or deeper, skip CEL expression validation.
+		// Invalid OpenAPISchemas are not always possible to convert into valid CEL DeclTypes, and can lead to CEL
+		// validation error messages that are not actionable (will go away once the schema errors are resolved) and that
+		// are difficult for CEL expression authors to understand.
+		if len(allErrs.SchemaErrors) == 0 && celContext != nil {
+			typeInfo, err := celContext.TypeInfo()
+			if err != nil {
+				allErrs.CELErrors = append(allErrs.CELErrors, field.InternalError(fldPath.Child("x-kubernetes-validations"), fmt.Errorf("internal error: failed to construct type information for x-kubernetes-validations rules: %s", err)))
+			} else if typeInfo == nil {
+				allErrs.CELErrors = append(allErrs.CELErrors, field.InternalError(fldPath.Child("x-kubernetes-validations"), fmt.Errorf("internal error: failed to retrieve type information for x-kubernetes-validations")))
+			} else {
+				compResults, err := cel.Compile(typeInfo.Schema, typeInfo.DeclType, celconfig.PerCallLimit, opts.celEnvironmentSet, opts.preexistingExpressions)
+				if err != nil {
+					allErrs.CELErrors = append(allErrs.CELErrors, field.InternalError(fldPath.Child("x-kubernetes-validations"), err))
+				} else {
+					for i, cr := range compResults {
+						expressionCost := getExpressionCost(cr, celContext)
+						if !opts.suppressPerExpressionCost && expressionCost > StaticEstimatedCostLimit {
+							costErrorMsg := getCostErrorMessage("estimated rule cost", expressionCost, StaticEstimatedCostLimit)
+							allErrs.CELErrors = append(allErrs.CELErrors, field.Forbidden(fldPath.Child("x-kubernetes-validations").Index(i).Child("rule"), costErrorMsg))
+						}
+						if celContext.TotalCost != nil {
+							celContext.TotalCost.ObserveExpressionCost(fldPath.Child("x-kubernetes-validations").Index(i).Child("rule"), expressionCost)
+						}
+						if cr.Error != nil {
+							if cr.Error.Type == apiservercel.ErrorTypeRequired {
+								allErrs.CELErrors = append(allErrs.CELErrors, field.Required(fldPath.Child("x-kubernetes-validations").Index(i).Child("rule"), cr.Error.Detail))
+							} else {
+								allErrs.CELErrors = append(allErrs.CELErrors, field.Invalid(fldPath.Child("x-kubernetes-validations").Index(i).Child("rule"), schema.XValidations[i], cr.Error.Detail))
+							}
+						}
+						if cr.MessageExpressionError != nil {
+							allErrs.CELErrors = append(allErrs.CELErrors, field.Invalid(fldPath.Child("x-kubernetes-validations").Index(i).Child("messageExpression"), schema.XValidations[i], cr.MessageExpressionError.Detail))
+						} else {
+							if cr.MessageExpression != nil {
+								if !opts.suppressPerExpressionCost && cr.MessageExpressionMaxCost > StaticEstimatedCostLimit {
+									costErrorMsg := getCostErrorMessage("estimated messageExpression cost", cr.MessageExpressionMaxCost, StaticEstimatedCostLimit)
+									allErrs.CELErrors = append(allErrs.CELErrors, field.Forbidden(fldPath.Child("x-kubernetes-validations").Index(i).Child("messageExpression"), costErrorMsg))
+								}
+								if celContext.TotalCost != nil {
+									celContext.TotalCost.ObserveExpressionCost(fldPath.Child("x-kubernetes-validations").Index(i).Child("messageExpression"), cr.MessageExpressionMaxCost)
+								}
+							}
+						}
+						if cr.UsesOldSelf {
+							if uncorrelatablePath := ssv.forbidOldSelfValidations(); uncorrelatablePath != nil {
+								allErrs.CELErrors = append(allErrs.CELErrors, field.Invalid(fldPath.Child("x-kubernetes-validations").Index(i).Child("rule"), schema.XValidations[i].Rule, fmt.Sprintf("oldSelf cannot be used on the uncorrelatable portion of the schema within %v", uncorrelatablePath)))
+							}
+						} else if schema.XValidations[i].OptionalOldSelf != nil {
+							allErrs.CELErrors = append(allErrs.CELErrors, field.Invalid(fldPath.Child("x-kubernetes-validations").Index(i).Child("optionalOldSelf"), *schema.XValidations[i].OptionalOldSelf, "may not be set if oldSelf is not used in rule"))
+						}
+					}
+				}
+			}
+		}
+	}
+
+	return allErrs
+}
+
+func pathValid(schema *apiextensions.JSONSchemaProps, path string) bool {
+	// To avoid duplicated code and better maintain, using ValidaFieldPath func to check if the path is valid
+	if ss, err := structuralschema.NewStructural(schema); err == nil {
+		_, _, err := cel.ValidFieldPath(path, ss)
+		return err == nil
+	}
+	return true
+}
+
+// multiplyWithOverflowGuard returns the product of baseCost and cardinality unless that product
+// would exceed math.MaxUint, in which case math.MaxUint is returned.
+func multiplyWithOverflowGuard(baseCost, cardinality uint64) uint64 {
+	if baseCost == 0 {
+		// an empty rule can return 0, so guard for that here
+		return 0
+	} else if math.MaxUint/baseCost < cardinality {
+		return math.MaxUint
+	}
+	return baseCost * cardinality
+}
+
+func getExpressionCost(cr cel.CompilationResult, cardinalityCost *CELSchemaContext) uint64 {
+	if cardinalityCost.MaxCardinality != unbounded {
+		return multiplyWithOverflowGuard(cr.MaxCost, *cardinalityCost.MaxCardinality)
+	}
+	return multiplyWithOverflowGuard(cr.MaxCost, cr.MaxCardinality)
+}
+
+func getCostErrorMessage(costName string, expressionCost, costLimit uint64) string {
+	exceedFactor := float64(expressionCost) / float64(costLimit)
+	var factor string
+	if exceedFactor > 100.0 {
+		// if exceedFactor is greater than 2 orders of magnitude, the rule is likely O(n^2) or worse
+		// and will probably never validate without some set limits
+		// also in such cases the cost estimation is generally large enough to not add any value
+		factor = fmt.Sprintf("more than 100x")
+	} else if exceedFactor < 1.5 {
+		factor = fmt.Sprintf("%fx", exceedFactor) // avoid reporting "exceeds budge by a factor of 1.0x"
+	} else {
+		factor = fmt.Sprintf("%.1fx", exceedFactor)
+	}
+	return fmt.Sprintf("%s exceeds budget by factor of %s (try simplifying the rule, or adding maxItems, maxProperties, and maxLength where arrays, maps, and strings are declared)", costName, factor)
+}
+
+var newlineMatcher = regexp.MustCompile(`[\n\r]+`) // valid newline chars in CEL grammar
+func hasNewlines(s string) bool {
+	return newlineMatcher.MatchString(s)
+}
+
+func validateMapListKeysMapSet(schema *apiextensions.JSONSchemaProps, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+
+	if schema.Items == nil || schema.Items.Schema == nil {
+		return nil
+	}
+	if schema.XListType == nil {
+		return nil
+	}
+	if *schema.XListType != "set" && *schema.XListType != "map" {
+		return nil
+	}
+
+	// set and map list items cannot be nullable
+	if schema.Items.Schema.Nullable {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("items").Child("nullable"), "cannot be nullable when x-kubernetes-list-type is "+*schema.XListType))
+	}
+
+	switch *schema.XListType {
+	case "map":
+		// ensure all map keys are required or have a default
+		isRequired := make(map[string]bool, len(schema.Items.Schema.Required))
+		for _, required := range schema.Items.Schema.Required {
+			isRequired[required] = true
+		}
+
+		for _, k := range schema.XListMapKeys {
+			obj, ok := schema.Items.Schema.Properties[k]
+			if !ok {
+				// we validate that all XListMapKeys are existing properties in ValidateCustomResourceDefinitionOpenAPISchema, so skipping here is ok
+				continue
+			}
+
+			if isRequired[k] == false && obj.Default == nil {
+				allErrs = append(allErrs, field.Required(fldPath.Child("items").Child("properties").Key(k).Child("default"), "this property is in x-kubernetes-list-map-keys, so it must have a default or be a required property"))
+			}
+
+			if obj.Nullable {
+				allErrs = append(allErrs, field.Forbidden(fldPath.Child("items").Child("properties").Key(k).Child("nullable"), "this property is in x-kubernetes-list-map-keys, so it cannot be nullable"))
+			}
+		}
+	case "set":
+		// no other set-specific validation
+	}
+
+	return allErrs
+}
+
+type specStandardValidatorV3 struct {
+	allowDefaults                       bool
+	disallowDefaultsReason              string
+	isInsideResourceMeta                bool
+	requireValidPropertyType            bool
+	uncorrelatableOldSelfValidationPath *field.Path
+}
+
+func (v *specStandardValidatorV3) withForbiddenDefaults(reason string) specStandardValidator {
+	clone := *v
+	clone.disallowDefaultsReason = reason
+	clone.allowDefaults = false
+	return &clone
+}
+
+func (v *specStandardValidatorV3) withInsideResourceMeta() specStandardValidator {
+	clone := *v
+	clone.isInsideResourceMeta = true
+	return &clone
+}
+
+func (v *specStandardValidatorV3) insideResourceMeta() bool {
+	return v.isInsideResourceMeta
+}
+
+func (v *specStandardValidatorV3) withForbidOldSelfValidations(path *field.Path) specStandardValidator {
+	if v.uncorrelatableOldSelfValidationPath != nil {
+		// oldSelf validations are already forbidden. preserve the highest-level path
+		// causing oldSelf validations to be forbidden
+		return v
+	}
+	clone := *v
+	clone.uncorrelatableOldSelfValidationPath = path
+	return &clone
+}
+
+func (v *specStandardValidatorV3) forbidOldSelfValidations() *field.Path {
+	return v.uncorrelatableOldSelfValidationPath
+}
+
+// validate validates against OpenAPI Schema v3.
+func (v *specStandardValidatorV3) validate(schema *apiextensions.JSONSchemaProps, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+
+	if schema == nil {
+		return allErrs
+	}
+
+	//
+	// WARNING: if anything new is allowed below, NewStructural must be adapted to support it.
+	//
+
+	if v.requireValidPropertyType && len(schema.Type) > 0 && !openapiV3Types.Has(schema.Type) {
+		allErrs = append(allErrs, field.NotSupported(fldPath.Child("type"), schema.Type, openapiV3Types.List()))
+	}
+
+	if schema.Default != nil && !v.allowDefaults {
+		detail := "must not be set"
+		if len(v.disallowDefaultsReason) > 0 {
+			detail += " " + v.disallowDefaultsReason
+		}
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("default"), detail))
+	}
+
+	if schema.ID != "" {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("id"), "id is not supported"))
+	}
+
+	if schema.AdditionalItems != nil {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("additionalItems"), "additionalItems is not supported"))
+	}
+
+	if len(schema.PatternProperties) != 0 {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("patternProperties"), "patternProperties is not supported"))
+	}
+
+	if len(schema.Definitions) != 0 {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("definitions"), "definitions is not supported"))
+	}
+
+	if schema.Dependencies != nil {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("dependencies"), "dependencies is not supported"))
+	}
+
+	if schema.Ref != nil {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("$ref"), "$ref is not supported"))
+	}
+
+	if schema.Type == "null" {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("type"), "type cannot be set to null, use nullable as an alternative"))
+	}
+
+	if schema.Items != nil && len(schema.Items.JSONSchemas) != 0 {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("items"), "items must be a schema object and not an array"))
+	}
+
+	if v.isInsideResourceMeta && schema.XEmbeddedResource {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("x-kubernetes-embedded-resource"), "must not be used inside of resource meta"))
+	}
+
+	return allErrs
+}
+
+// ValidateCustomResourceDefinitionSubresources statically validates
+func ValidateCustomResourceDefinitionSubresources(subresources *apiextensions.CustomResourceSubresources, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+
+	if subresources == nil {
+		return allErrs
+	}
+
+	if subresources.Scale != nil {
+		if len(subresources.Scale.SpecReplicasPath) == 0 {
+			allErrs = append(allErrs, field.Required(fldPath.Child("scale.specReplicasPath"), ""))
+		} else {
+			// should be constrained json path under .spec
+			if errs := validateSimpleJSONPath(subresources.Scale.SpecReplicasPath, fldPath.Child("scale.specReplicasPath")); len(errs) > 0 {
+				allErrs = append(allErrs, errs...)
+			} else if !strings.HasPrefix(subresources.Scale.SpecReplicasPath, ".spec.") {
+				allErrs = append(allErrs, field.Invalid(fldPath.Child("scale.specReplicasPath"), subresources.Scale.SpecReplicasPath, "should be a json path under .spec"))
+			}
+		}
+
+		if len(subresources.Scale.StatusReplicasPath) == 0 {
+			allErrs = append(allErrs, field.Required(fldPath.Child("scale.statusReplicasPath"), ""))
+		} else {
+			// should be constrained json path under .status
+			if errs := validateSimpleJSONPath(subresources.Scale.StatusReplicasPath, fldPath.Child("scale.statusReplicasPath")); len(errs) > 0 {
+				allErrs = append(allErrs, errs...)
+			} else if !strings.HasPrefix(subresources.Scale.StatusReplicasPath, ".status.") {
+				allErrs = append(allErrs, field.Invalid(fldPath.Child("scale.statusReplicasPath"), subresources.Scale.StatusReplicasPath, "should be a json path under .status"))
+			}
+		}
+
+		// if labelSelectorPath is present, it should be a constrained json path under .status
+		if subresources.Scale.LabelSelectorPath != nil && len(*subresources.Scale.LabelSelectorPath) > 0 {
+			if errs := validateSimpleJSONPath(*subresources.Scale.LabelSelectorPath, fldPath.Child("scale.labelSelectorPath")); len(errs) > 0 {
+				allErrs = append(allErrs, errs...)
+			} else if !strings.HasPrefix(*subresources.Scale.LabelSelectorPath, ".spec.") && !strings.HasPrefix(*subresources.Scale.LabelSelectorPath, ".status.") {
+				allErrs = append(allErrs, field.Invalid(fldPath.Child("scale.labelSelectorPath"), subresources.Scale.LabelSelectorPath, "should be a json path under either .spec or .status"))
+			}
+		}
+	}
+
+	return allErrs
+}
+
+func validateSimpleJSONPath(s string, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+
+	switch {
+	case len(s) == 0:
+		allErrs = append(allErrs, field.Invalid(fldPath, s, "must not be empty"))
+	case s[0] != '.':
+		allErrs = append(allErrs, field.Invalid(fldPath, s, "must be a simple json path starting with ."))
+	case s != ".":
+		if cs := strings.Split(s[1:], "."); len(cs) < 1 {
+			allErrs = append(allErrs, field.Invalid(fldPath, s, "must be a json path in the dot notation"))
+		}
+	}
+
+	return allErrs
+}
+
+var allowedFieldsAtRootSchema = []string{"Description", "Type", "Format", "Title", "Maximum", "ExclusiveMaximum", "Minimum", "ExclusiveMinimum", "MaxLength", "MinLength", "Pattern", "MaxItems", "MinItems", "UniqueItems", "MultipleOf", "Required", "Items", "Properties", "ExternalDocs", "Example", "XPreserveUnknownFields", "XValidations"}
+
+func allowedAtRootSchema(field string) bool {
+	for _, v := range allowedFieldsAtRootSchema {
+		if field == v {
+			return true
+		}
+	}
+	return false
+}
+
+// requireOpenAPISchema returns true if the request group version requires a schema
+func requireOpenAPISchema(oldCRDSpec *apiextensions.CustomResourceDefinitionSpec) bool {
+	if oldCRDSpec != nil && !allVersionsSpecifyOpenAPISchema(oldCRDSpec) {
+		// don't tighten validation on existing persisted data
+		return false
+	}
+	return true
+}
+func allVersionsSpecifyOpenAPISchema(spec *apiextensions.CustomResourceDefinitionSpec) bool {
+	if spec.Validation != nil && spec.Validation.OpenAPIV3Schema != nil {
+		return true
+	}
+	for _, v := range spec.Versions {
+		if v.Schema == nil || v.Schema.OpenAPIV3Schema == nil {
+			return false
+		}
+	}
+	return true
+}
+
+func specHasDefaults(spec *apiextensions.CustomResourceDefinitionSpec) bool {
+	return HasSchemaWith(spec, schemaHasDefaults)
+}
+
+func schemaHasDefaults(s *apiextensions.JSONSchemaProps) bool {
+	return SchemaHas(s, func(s *apiextensions.JSONSchemaProps) bool {
+		return s.Default != nil
+	})
+}
+
+func HasSchemaWith(spec *apiextensions.CustomResourceDefinitionSpec, pred func(s *apiextensions.JSONSchemaProps) bool) bool {
+	if spec.Validation != nil && spec.Validation.OpenAPIV3Schema != nil && pred(spec.Validation.OpenAPIV3Schema) {
+		return true
+	}
+	for _, v := range spec.Versions {
+		if v.Schema != nil && v.Schema.OpenAPIV3Schema != nil && pred(v.Schema.OpenAPIV3Schema) {
+			return true
+		}
+	}
+	return false
+}
+
+var schemaPool = sync.Pool{
+	New: func() any {
+		return new(apiextensions.JSONSchemaProps)
+	},
+}
+
+func schemaHasRecurse(s *apiextensions.JSONSchemaProps, pred func(s *apiextensions.JSONSchemaProps) bool) bool {
+	if s == nil {
+		return false
+	}
+	schema := schemaPool.Get().(*apiextensions.JSONSchemaProps)
+	defer schemaPool.Put(schema)
+	*schema = *s
+	return SchemaHas(schema, pred)
+}
+
+// SchemaHas recursively traverses the Schema and calls the `pred`
+// predicate to see if the schema contains specific values.
+//
+// The predicate MUST NOT keep a copy of the json schema NOR modify the
+// schema.
+func SchemaHas(s *apiextensions.JSONSchemaProps, pred func(s *apiextensions.JSONSchemaProps) bool) bool {
+	if s == nil {
+		return false
+	}
+
+	if pred(s) {
+		return true
+	}
+
+	if s.Items != nil {
+		if s.Items != nil && schemaHasRecurse(s.Items.Schema, pred) {
+			return true
+		}
+		for i := range s.Items.JSONSchemas {
+			if schemaHasRecurse(&s.Items.JSONSchemas[i], pred) {
+				return true
+			}
+		}
+	}
+	for i := range s.AllOf {
+		if schemaHasRecurse(&s.AllOf[i], pred) {
+			return true
+		}
+	}
+	for i := range s.AnyOf {
+		if schemaHasRecurse(&s.AnyOf[i], pred) {
+			return true
+		}
+	}
+	for i := range s.OneOf {
+		if schemaHasRecurse(&s.OneOf[i], pred) {
+			return true
+		}
+	}
+	if schemaHasRecurse(s.Not, pred) {
+		return true
+	}
+	for _, s := range s.Properties {
+		if schemaHasRecurse(&s, pred) {
+			return true
+		}
+	}
+	if s.AdditionalProperties != nil {
+		if schemaHasRecurse(s.AdditionalProperties.Schema, pred) {
+			return true
+		}
+	}
+	for _, s := range s.PatternProperties {
+		if schemaHasRecurse(&s, pred) {
+			return true
+		}
+	}
+	if s.AdditionalItems != nil {
+		if schemaHasRecurse(s.AdditionalItems.Schema, pred) {
+			return true
+		}
+	}
+	for _, s := range s.Definitions {
+		if schemaHasRecurse(&s, pred) {
+			return true
+		}
+	}
+	for _, d := range s.Dependencies {
+		if schemaHasRecurse(d.Schema, pred) {
+			return true
+		}
+	}
+
+	return false
+}
+
+func specHasKubernetesExtensions(spec *apiextensions.CustomResourceDefinitionSpec) bool {
+	if spec.Validation != nil && schemaHasKubernetesExtensions(spec.Validation.OpenAPIV3Schema) {
+		return true
+	}
+	for _, v := range spec.Versions {
+		if v.Schema != nil && schemaHasKubernetesExtensions(v.Schema.OpenAPIV3Schema) {
+			return true
+		}
+	}
+	return false
+}
+
+func schemaHasKubernetesExtensions(s *apiextensions.JSONSchemaProps) bool {
+	return SchemaHas(s, func(s *apiextensions.JSONSchemaProps) bool {
+		return s.XEmbeddedResource || s.XPreserveUnknownFields != nil || s.XIntOrString || len(s.XListMapKeys) > 0 || s.XListType != nil || len(s.XValidations) > 0
+	})
+}
+
+// requireStructuralSchema returns true if schemas specified must be structural
+func requireStructuralSchema(oldCRDSpec *apiextensions.CustomResourceDefinitionSpec) bool {
+	if oldCRDSpec != nil && specHasNonStructuralSchema(oldCRDSpec) {
+		// don't tighten validation on existing persisted data
+		return false
+	}
+	return true
+}
+
+func specHasNonStructuralSchema(spec *apiextensions.CustomResourceDefinitionSpec) bool {
+	if spec.Validation != nil && schemaIsNonStructural(spec.Validation.OpenAPIV3Schema) {
+		return true
+	}
+	for _, v := range spec.Versions {
+		if v.Schema != nil && schemaIsNonStructural(v.Schema.OpenAPIV3Schema) {
+			return true
+		}
+	}
+	return false
+}
+func schemaIsNonStructural(schema *apiextensions.JSONSchemaProps) bool {
+	if schema == nil {
+		return false
+	}
+	ss, err := structuralschema.NewStructural(schema)
+	if err != nil {
+		return true
+	}
+	return len(structuralschema.ValidateStructural(nil, ss)) > 0
+}
+
+// requirePrunedDefaults returns false if there are any unpruned default in oldCRDSpec, and true otherwise.
+func requirePrunedDefaults(oldCRDSpec *apiextensions.CustomResourceDefinitionSpec) bool {
+	if oldCRDSpec.Validation != nil {
+		if has, err := schemaHasUnprunedDefaults(oldCRDSpec.Validation.OpenAPIV3Schema); err == nil && has {
+			return false
+		}
+	}
+	for _, v := range oldCRDSpec.Versions {
+		if v.Schema == nil {
+			continue
+		}
+		if has, err := schemaHasUnprunedDefaults(v.Schema.OpenAPIV3Schema); err == nil && has {
+			return false
+		}
+	}
+	return true
+}
+func schemaHasUnprunedDefaults(schema *apiextensions.JSONSchemaProps) (bool, error) {
+	if schema == nil || !schemaHasDefaults(schema) {
+		return false, nil
+	}
+	ss, err := structuralschema.NewStructural(schema)
+	if err != nil {
+		return false, err
+	}
+	if errs := structuralschema.ValidateStructural(nil, ss); len(errs) > 0 {
+		return false, errs.ToAggregate()
+	}
+	pruned := ss.DeepCopy()
+	if err := structuraldefaulting.PruneDefaults(pruned); err != nil {
+		return false, err
+	}
+	return !reflect.DeepEqual(ss, pruned), nil
+}
+
+// requireAtomicSetType returns true if the old CRD spec as at least one x-kubernetes-list-type=set with non-atomic items type.
+func requireAtomicSetType(oldCRDSpec *apiextensions.CustomResourceDefinitionSpec) bool {
+	return !HasSchemaWith(oldCRDSpec, hasNonAtomicSetType)
+}
+
+// hasNonAtomicSetType recurses over the schema and returns whether any list of type "set" as non-atomic item types.
+func hasNonAtomicSetType(schema *apiextensions.JSONSchemaProps) bool {
+	return SchemaHas(schema, func(schema *apiextensions.JSONSchemaProps) bool {
+		if schema.XListType != nil && *schema.XListType == "set" && schema.Items != nil && schema.Items.Schema != nil { // we don't support schema.Items.JSONSchemas
+			is := schema.Items.Schema
+			switch is.Type {
+			case "array":
+				return is.XListType != nil && *is.XListType != "atomic" // atomic is the implicit default behaviour if unset, hence != atomic is wrong
+			case "object":
+				return is.XMapType == nil || *is.XMapType != "atomic" // granular is the implicit default behaviour if unset, hence nil and != atomic are wrong
+			default:
+				return false // scalar types are always atomic
+			}
+		}
+		return false
+	})
+}
+
+func requireMapListKeysMapSetValidation(oldCRDSpec *apiextensions.CustomResourceDefinitionSpec) bool {
+	return !HasSchemaWith(oldCRDSpec, hasInvalidMapListKeysMapSet)
+}
+
+func hasInvalidMapListKeysMapSet(schema *apiextensions.JSONSchemaProps) bool {
+	return SchemaHas(schema, func(schema *apiextensions.JSONSchemaProps) bool {
+		return len(validateMapListKeysMapSet(schema, field.NewPath(""))) > 0
+	})
+}
+
+// requireValidPropertyType returns true if valid openapi v3 types should be required for the given API version
+func requireValidPropertyType(oldCRDSpec *apiextensions.CustomResourceDefinitionSpec) bool {
+	if oldCRDSpec != nil && specHasInvalidTypes(oldCRDSpec) {
+		// don't tighten validation on existing persisted data
+		return false
+	}
+	return true
+}
+
+// validateAPIApproval returns a list of errors if the API approval annotation isn't valid
+func validateAPIApproval(newCRD, oldCRD *apiextensions.CustomResourceDefinition) field.ErrorList {
+	// check to see if we need confirm API approval for kube group.
+	if !apihelpers.IsProtectedCommunityGroup(newCRD.Spec.Group) {
+		// no-op for non-protected groups
+		return nil
+	}
+
+	// default to a state that allows missing values to continue to be missing
+	var oldApprovalState *apihelpers.APIApprovalState
+	if oldCRD != nil {
+		t, _ := apihelpers.GetAPIApprovalState(oldCRD.Annotations)
+		oldApprovalState = &t // +k8s:verify-mutation:reason=clone
+	}
+	newApprovalState, reason := apihelpers.GetAPIApprovalState(newCRD.Annotations)
+
+	// if the approval state hasn't changed, never fail on approval validation
+	// this is allowed so that a v1 client that is simply updating spec and not mutating this value doesn't get rejected.  Imagine a controller controlling a CRD spec.
+	if oldApprovalState != nil && *oldApprovalState == newApprovalState {
+		return nil
+	}
+
+	// in v1, we require valid approval strings
+	switch newApprovalState {
+	case apihelpers.APIApprovalInvalid:
+		return field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations").Key(apiextensionsv1beta1.KubeAPIApprovedAnnotation), newCRD.Annotations[apiextensionsv1beta1.KubeAPIApprovedAnnotation], reason)}
+	case apihelpers.APIApprovalMissing:
+		return field.ErrorList{field.Required(field.NewPath("metadata", "annotations").Key(apiextensionsv1beta1.KubeAPIApprovedAnnotation), reason)}
+	case apihelpers.APIApproved, apihelpers.APIApprovalBypassed:
+		// success
+		return nil
+	default:
+		return field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations").Key(apiextensionsv1beta1.KubeAPIApprovedAnnotation), newCRD.Annotations[apiextensionsv1beta1.KubeAPIApprovedAnnotation], reason)}
+	}
+}
+
+func validatePreserveUnknownFields(crd, oldCRD *apiextensions.CustomResourceDefinition) field.ErrorList {
+	if oldCRD != nil && oldCRD.Spec.PreserveUnknownFields != nil && *oldCRD.Spec.PreserveUnknownFields {
+		// no-op for compatibility with existing data
+		return nil
+	}
+
+	var errs field.ErrorList
+	if crd != nil && crd.Spec.PreserveUnknownFields != nil && *crd.Spec.PreserveUnknownFields {
+		// disallow changing spec.preserveUnknownFields=false to spec.preserveUnknownFields=true
+		errs = append(errs, field.Invalid(field.NewPath("spec").Child("preserveUnknownFields"), crd.Spec.PreserveUnknownFields, "cannot set to true, set x-kubernetes-preserve-unknown-fields to true in spec.versions[*].schema instead"))
+	}
+	return errs
+}
+
+func specHasInvalidTypes(spec *apiextensions.CustomResourceDefinitionSpec) bool {
+	if spec.Validation != nil && SchemaHasInvalidTypes(spec.Validation.OpenAPIV3Schema) {
+		return true
+	}
+	for _, v := range spec.Versions {
+		if v.Schema != nil && SchemaHasInvalidTypes(v.Schema.OpenAPIV3Schema) {
+			return true
+		}
+	}
+	return false
+}
+
+// SchemaHasInvalidTypes returns true if it contains invalid offending openapi-v3 specification.
+func SchemaHasInvalidTypes(s *apiextensions.JSONSchemaProps) bool {
+	return SchemaHas(s, func(s *apiextensions.JSONSchemaProps) bool {
+		return len(s.Type) > 0 && !openapiV3Types.Has(s.Type)
+	})
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/compilation.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/compilation.go
new file mode 100644
index 0000000000..1f61a87fa4
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/compilation.go
@@ -0,0 +1,360 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cel
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/checker"
+	"github.com/google/cel-go/common/types"
+
+	apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+	"k8s.io/apiextensions-apiserver/pkg/apiserver/schema"
+	"k8s.io/apimachinery/pkg/util/version"
+	celconfig "k8s.io/apiserver/pkg/apis/cel"
+	apiservercel "k8s.io/apiserver/pkg/cel"
+	"k8s.io/apiserver/pkg/cel/environment"
+	"k8s.io/apiserver/pkg/cel/library"
+	"k8s.io/apiserver/pkg/cel/metrics"
+)
+
+const (
+	// ScopedVarName is the variable name assigned to the locally scoped data element of a CEL validation
+	// expression.
+	ScopedVarName = "self"
+
+	// OldScopedVarName is the variable name assigned to the existing value of the locally scoped data element of a
+	// CEL validation expression.
+	OldScopedVarName = "oldSelf"
+)
+
+// CompilationResult represents the cel compilation result for one rule
+type CompilationResult struct {
+	Program cel.Program
+	Error   *apiservercel.Error
+	// If true, the compiled expression contains a reference to the identifier "oldSelf".
+	UsesOldSelf bool
+	// Represents the worst-case cost of the compiled expression in terms of CEL's cost units, as used by cel.EstimateCost.
+	MaxCost uint64
+	// MaxCardinality represents the worse case number of times this validation rule could be invoked if contained under an
+	// unbounded map or list in an OpenAPIv3 schema.
+	MaxCardinality uint64
+	// MessageExpression represents the cel Program that should be evaluated to generate an error message if the rule
+	// fails to validate. If no MessageExpression was given, or if this expression failed to compile, this will be nil.
+	MessageExpression cel.Program
+	// MessageExpressionError represents an error encountered during compilation of MessageExpression. If no error was
+	// encountered, this will be nil.
+	MessageExpressionError *apiservercel.Error
+	// MessageExpressionMaxCost represents the worst-case cost of the compiled MessageExpression in terms of CEL's cost units,
+	// as used by cel.EstimateCost.
+	MessageExpressionMaxCost uint64
+	// NormalizedRuleFieldPath represents the relative fieldPath specified by user after normalization.
+	NormalizedRuleFieldPath string
+}
+
+// EnvLoader delegates the decision of which CEL environment to use for each expression.
+// Callers should return the appropriate CEL environment based on the guidelines from
+// environment.NewExpressions and environment.StoredExpressions.
+type EnvLoader interface {
+	// RuleEnv returns the appropriate environment from the EnvSet for the given CEL rule.
+	RuleEnv(envSet *environment.EnvSet, expression string) *cel.Env
+	// MessageExpressionEnv returns the appropriate environment from the EnvSet for the given
+	// CEL messageExpressions.
+	MessageExpressionEnv(envSet *environment.EnvSet, expression string) *cel.Env
+}
+
+// NewExpressionsEnvLoader creates an EnvLoader that always uses the NewExpressions environment type.
+func NewExpressionsEnvLoader() EnvLoader {
+	return alwaysNewEnvLoader{loadFn: func(envSet *environment.EnvSet) *cel.Env {
+		return envSet.NewExpressionsEnv()
+	}}
+}
+
+// StoredExpressionsEnvLoader creates an EnvLoader that always uses the StoredExpressions environment type.
+func StoredExpressionsEnvLoader() EnvLoader {
+	return alwaysNewEnvLoader{loadFn: func(envSet *environment.EnvSet) *cel.Env {
+		return envSet.StoredExpressionsEnv()
+	}}
+}
+
+type alwaysNewEnvLoader struct {
+	loadFn func(envSet *environment.EnvSet) *cel.Env
+}
+
+func (pe alwaysNewEnvLoader) RuleEnv(envSet *environment.EnvSet, _ string) *cel.Env {
+	return pe.loadFn(envSet)
+}
+
+func (pe alwaysNewEnvLoader) MessageExpressionEnv(envSet *environment.EnvSet, _ string) *cel.Env {
+	return pe.loadFn(envSet)
+}
+
+// Compile compiles all the XValidations rules (without recursing into the schema) and returns a slice containing a
+// CompilationResult for each ValidationRule, or an error. declType is expected to be a CEL DeclType corresponding
+// to the structural schema.
+// Each CompilationResult may contain:
+//   - non-nil Program, nil Error: The program was compiled successfully
+//   - nil Program, non-nil Error: Compilation resulted in an error
+//   - nil Program, nil Error: The provided rule was empty so compilation was not attempted
+//
+// perCallLimit was added for testing purpose only. Callers should always use const PerCallLimit from k8s.io/apiserver/pkg/apis/cel/config.go as input.
+// baseEnv is used as the base CEL environment, see common.BaseEnvironment.
+func Compile(s *schema.Structural, declType *apiservercel.DeclType, perCallLimit uint64, baseEnvSet *environment.EnvSet, envLoader EnvLoader) ([]CompilationResult, error) {
+	t := time.Now()
+	defer func() {
+		metrics.Metrics.ObserveCompilation(time.Since(t))
+	}()
+
+	if len(s.XValidations) == 0 {
+		return nil, nil
+	}
+	if declType == nil {
+		return nil, errors.New("failed to convert to declType for CEL validation rules")
+	}
+	celRules := s.XValidations
+
+	oldSelfEnvSet, optionalOldSelfEnvSet, err := prepareEnvSet(baseEnvSet, declType)
+	if err != nil {
+		return nil, err
+	}
+	estimator := newCostEstimator(declType)
+	// compResults is the return value which saves a list of compilation results in the same order as x-kubernetes-validations rules.
+	compResults := make([]CompilationResult, len(celRules))
+	maxCardinality := maxCardinality(declType.MinSerializedSize)
+	for i, rule := range celRules {
+		ruleEnvSet := oldSelfEnvSet
+		if rule.OptionalOldSelf != nil && *rule.OptionalOldSelf {
+			ruleEnvSet = optionalOldSelfEnvSet
+		}
+		compResults[i] = compileRule(s, rule, ruleEnvSet, envLoader, estimator, maxCardinality, perCallLimit)
+	}
+
+	return compResults, nil
+}
+
+func prepareEnvSet(baseEnvSet *environment.EnvSet, declType *apiservercel.DeclType) (oldSelfEnvSet *environment.EnvSet, optionalOldSelfEnvSet *environment.EnvSet, err error) {
+	scopedType := declType.MaybeAssignTypeName(generateUniqueSelfTypeName())
+
+	oldSelfEnvSet, err = baseEnvSet.Extend(
+		environment.VersionedOptions{
+			// Feature epoch was actually 1.23, but we artificially set it to 1.0 because these
+			// options should always be present.
+			IntroducedVersion: version.MajorMinor(1, 0),
+			EnvOptions: []cel.EnvOption{
+				cel.Variable(ScopedVarName, scopedType.CelType()),
+			},
+			DeclTypes: []*apiservercel.DeclType{
+				scopedType,
+			},
+		},
+		environment.VersionedOptions{
+			IntroducedVersion: version.MajorMinor(1, 24),
+			EnvOptions: []cel.EnvOption{
+				cel.Variable(OldScopedVarName, scopedType.CelType()),
+			},
+		},
+	)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	optionalOldSelfEnvSet, err = baseEnvSet.Extend(
+		environment.VersionedOptions{
+			// Feature epoch was actually 1.23, but we artificially set it to 1.0 because these
+			// options should always be present.
+			IntroducedVersion: version.MajorMinor(1, 0),
+			EnvOptions: []cel.EnvOption{
+				cel.Variable(ScopedVarName, scopedType.CelType()),
+			},
+			DeclTypes: []*apiservercel.DeclType{
+				scopedType,
+			},
+		},
+		environment.VersionedOptions{
+			IntroducedVersion: version.MajorMinor(1, 24),
+			EnvOptions: []cel.EnvOption{
+				cel.Variable(OldScopedVarName, types.NewOptionalType(scopedType.CelType())),
+			},
+		},
+	)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return oldSelfEnvSet, optionalOldSelfEnvSet, nil
+}
+
+func compileRule(s *schema.Structural, rule apiextensions.ValidationRule, envSet *environment.EnvSet, envLoader EnvLoader, estimator *library.CostEstimator, maxCardinality uint64, perCallLimit uint64) (compilationResult CompilationResult) {
+	if len(strings.TrimSpace(rule.Rule)) == 0 {
+		// include a compilation result, but leave both program and error nil per documented return semantics of this
+		// function
+		return
+	}
+	ruleEnv := envLoader.RuleEnv(envSet, rule.Rule)
+	ast, issues := ruleEnv.Compile(rule.Rule)
+	if issues != nil {
+		compilationResult.Error = &apiservercel.Error{Type: apiservercel.ErrorTypeInvalid, Detail: "compilation failed: " + issues.String()}
+		return
+	}
+	if ast.OutputType() != cel.BoolType {
+		compilationResult.Error = &apiservercel.Error{Type: apiservercel.ErrorTypeInvalid, Detail: "cel expression must evaluate to a bool"}
+		return
+	}
+
+	checkedExpr, err := cel.AstToCheckedExpr(ast)
+	if err != nil {
+		// should be impossible since env.Compile returned no issues
+		compilationResult.Error = &apiservercel.Error{Type: apiservercel.ErrorTypeInternal, Detail: "unexpected compilation error: " + err.Error()}
+		return
+	}
+	for _, ref := range checkedExpr.ReferenceMap {
+		if ref.Name == OldScopedVarName {
+			compilationResult.UsesOldSelf = true
+			break
+		}
+	}
+
+	// TODO: Ideally we could configure the per expression limit at validation time and set it to the remaining overall budget, but we would either need a way to pass in a limit at evaluation time or move program creation to validation time
+	prog, err := ruleEnv.Program(ast,
+		cel.CostLimit(perCallLimit),
+		cel.CostTracking(estimator),
+		cel.InterruptCheckFrequency(celconfig.CheckFrequency),
+	)
+	if err != nil {
+		compilationResult.Error = &apiservercel.Error{Type: apiservercel.ErrorTypeInvalid, Detail: "program instantiation failed: " + err.Error()}
+		return
+	}
+	costEst, err := ruleEnv.EstimateCost(ast, estimator)
+	if err != nil {
+		compilationResult.Error = &apiservercel.Error{Type: apiservercel.ErrorTypeInternal, Detail: "cost estimation failed: " + err.Error()}
+		return
+	}
+	compilationResult.MaxCost = costEst.Max
+	compilationResult.MaxCardinality = maxCardinality
+	compilationResult.Program = prog
+	if rule.MessageExpression != "" {
+		messageEnv := envLoader.MessageExpressionEnv(envSet, rule.MessageExpression)
+		ast, issues := messageEnv.Compile(rule.MessageExpression)
+		if issues != nil {
+			compilationResult.MessageExpressionError = &apiservercel.Error{Type: apiservercel.ErrorTypeInvalid, Detail: "messageExpression compilation failed: " + issues.String()}
+			return
+		}
+		if ast.OutputType() != cel.StringType {
+			compilationResult.MessageExpressionError = &apiservercel.Error{Type: apiservercel.ErrorTypeInvalid, Detail: "messageExpression must evaluate to a string"}
+			return
+		}
+
+		_, err := cel.AstToCheckedExpr(ast)
+		if err != nil {
+			compilationResult.MessageExpressionError = &apiservercel.Error{Type: apiservercel.ErrorTypeInternal, Detail: "unexpected messageExpression compilation error: " + err.Error()}
+			return
+		}
+
+		msgProg, err := messageEnv.Program(ast,
+			cel.CostLimit(perCallLimit),
+			cel.CostTracking(estimator),
+			cel.InterruptCheckFrequency(celconfig.CheckFrequency),
+		)
+		if err != nil {
+			compilationResult.MessageExpressionError = &apiservercel.Error{Type: apiservercel.ErrorTypeInvalid, Detail: "messageExpression instantiation failed: " + err.Error()}
+			return
+		}
+		costEst, err := messageEnv.EstimateCost(ast, estimator)
+		if err != nil {
+			compilationResult.MessageExpressionError = &apiservercel.Error{Type: apiservercel.ErrorTypeInternal, Detail: "cost estimation failed for messageExpression: " + err.Error()}
+			return
+		}
+		compilationResult.MessageExpression = msgProg
+		compilationResult.MessageExpressionMaxCost = costEst.Max
+	}
+	if rule.FieldPath != "" {
+		validFieldPath, _, err := ValidFieldPath(rule.FieldPath, s)
+		if err == nil {
+			compilationResult.NormalizedRuleFieldPath = validFieldPath.String()
+		}
+	}
+	return
+}
+
+// generateUniqueSelfTypeName creates a placeholder type name to use in a CEL programs for cases
+// where we do not wish to expose a stable type name to CEL validator rule authors. For this to effectively prevent
+// developers from depending on the generated name (i.e. using it in CEL programs), it must be changed each time a
+// CRD is created or updated.
+func generateUniqueSelfTypeName() string {
+	return fmt.Sprintf("selfType%d", time.Now().Nanosecond())
+}
+
+func newCostEstimator(root *apiservercel.DeclType) *library.CostEstimator {
+	return &library.CostEstimator{SizeEstimator: &sizeEstimator{root: root}}
+}
+
+type sizeEstimator struct {
+	root *apiservercel.DeclType
+}
+
+func (c *sizeEstimator) EstimateSize(element checker.AstNode) *checker.SizeEstimate {
+	if len(element.Path()) == 0 {
+		// Path() can return an empty list, early exit if it does since we can't
+		// provide size estimates when that happens
+		return nil
+	}
+	currentNode := c.root
+	// cut off "self" from path, since we always start there
+	for _, name := range element.Path()[1:] {
+		switch name {
+		case "@items", "@values":
+			if currentNode.ElemType == nil {
+				return nil
+			}
+			currentNode = currentNode.ElemType
+		case "@keys":
+			if currentNode.KeyType == nil {
+				return nil
+			}
+			currentNode = currentNode.KeyType
+		default:
+			field, ok := currentNode.Fields[name]
+			if !ok {
+				return nil
+			}
+			if field.Type == nil {
+				return nil
+			}
+			currentNode = field.Type
+		}
+	}
+	return &checker.SizeEstimate{Min: 0, Max: uint64(currentNode.MaxElements)}
+}
+
+func (c *sizeEstimator) EstimateCallCost(function, overloadID string, target *checker.AstNode, args []checker.AstNode) *checker.CallEstimate {
+	return nil
+}
+
+// maxCardinality returns the maximum number of times data conforming to the minimum size given could possibly exist in
+// an object serialized to JSON. For cases where a schema is contained under map or array schemas of unbounded
+// size, this can be used as an estimate as the worst case number of times data matching the schema could be repeated.
+// Note that this only assumes a single comma between data elements, so if the schema is contained under only maps,
+// this estimates a higher cardinality that would be possible. DeclType.MinSerializedSize is meant to be passed to
+// this function.
+func maxCardinality(minSize int64) uint64 {
+	sz := minSize + 1 // assume at least one comma between elements
+	return uint64(celconfig.MaxRequestSizeBytes / sz)
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/maplist.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/maplist.go
new file mode 100644
index 0000000000..32cdf86fef
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/maplist.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cel
+
+import (
+	"k8s.io/apiextensions-apiserver/pkg/apiserver/schema"
+	"k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model"
+	"k8s.io/apiserver/pkg/cel/common"
+)
+
+// makeMapList returns a queryable interface over the provided x-kubernetes-list-type=map
+// keyedItems. If the provided schema is _not_ an array with x-kubernetes-list-type=map, returns an
+// empty mapList.
+func makeMapList(sts *schema.Structural, items []interface{}) (rv common.MapList) {
+	return common.MakeMapList(&model.Structural{Structural: sts}, items)
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model/adaptor.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model/adaptor.go
new file mode 100644
index 0000000000..a66ab42937
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model/adaptor.go
@@ -0,0 +1,331 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package model
+
+import (
+	"k8s.io/apiextensions-apiserver/pkg/apiserver/schema"
+	"k8s.io/apiserver/pkg/cel/common"
+)
+
+var _ common.Schema = (*Structural)(nil)
+var _ common.SchemaOrBool = (*StructuralOrBool)(nil)
+
+type Structural struct {
+	Structural *schema.Structural
+}
+
+type StructuralOrBool struct {
+	StructuralOrBool *schema.StructuralOrBool
+}
+
+func (sb *StructuralOrBool) Schema() common.Schema {
+	if sb.StructuralOrBool.Structural == nil {
+		return nil
+	}
+	return &Structural{Structural: sb.StructuralOrBool.Structural}
+}
+
+func (sb *StructuralOrBool) Allows() bool {
+	return sb.StructuralOrBool.Bool
+}
+
+func (s *Structural) Type() string {
+	return s.Structural.Type
+}
+
+func (s *Structural) Format() string {
+	if s.Structural.ValueValidation == nil {
+		return ""
+	}
+	return s.Structural.ValueValidation.Format
+}
+
+func (s *Structural) Pattern() string {
+	if s.Structural.ValueValidation == nil {
+		return ""
+	}
+	return s.Structural.ValueValidation.Pattern
+}
+
+func (s *Structural) Items() common.Schema {
+	if s.Structural.Items == nil {
+		return nil
+	}
+	return &Structural{Structural: s.Structural.Items}
+}
+
+func (s *Structural) Properties() map[string]common.Schema {
+	if s.Structural.Properties == nil {
+		return nil
+	}
+	res := make(map[string]common.Schema, len(s.Structural.Properties))
+	for n, prop := range s.Structural.Properties {
+		s := prop
+		res[n] = &Structural{Structural: &s}
+	}
+	return res
+}
+
+func (s *Structural) AdditionalProperties() common.SchemaOrBool {
+	if s.Structural.AdditionalProperties == nil {
+		return nil
+	}
+	return &StructuralOrBool{StructuralOrBool: s.Structural.AdditionalProperties}
+}
+
+func (s *Structural) Default() any {
+	return s.Structural.Default.Object
+}
+
+func (s *Structural) Minimum() *float64 {
+	if s.Structural.ValueValidation == nil {
+		return nil
+	}
+	return s.Structural.ValueValidation.Minimum
+}
+
+func (s *Structural) IsExclusiveMinimum() bool {
+	if s.Structural.ValueValidation == nil {
+		return false
+	}
+	return s.Structural.ValueValidation.ExclusiveMinimum
+}
+
+func (s *Structural) Maximum() *float64 {
+	if s.Structural.ValueValidation == nil {
+		return nil
+	}
+	return s.Structural.ValueValidation.Maximum
+}
+
+func (s *Structural) IsExclusiveMaximum() bool {
+	if s.Structural.ValueValidation == nil {
+		return false
+	}
+	return s.Structural.ValueValidation.ExclusiveMaximum
+}
+
+func (s *Structural) MultipleOf() *float64 {
+	if s.Structural.ValueValidation == nil {
+		return nil
+	}
+	return s.Structural.ValueValidation.MultipleOf
+}
+
+func (s *Structural) MinItems() *int64 {
+	if s.Structural.ValueValidation == nil {
+		return nil
+	}
+	return s.Structural.ValueValidation.MinItems
+}
+
+func (s *Structural) MaxItems() *int64 {
+	if s.Structural.ValueValidation == nil {
+		return nil
+	}
+	return s.Structural.ValueValidation.MaxItems
+}
+
+func (s *Structural) MinLength() *int64 {
+	if s.Structural.ValueValidation == nil {
+		return nil
+	}
+	return s.Structural.ValueValidation.MinLength
+}
+
+func (s *Structural) MaxLength() *int64 {
+	if s.Structural.ValueValidation == nil {
+		return nil
+	}
+	return s.Structural.ValueValidation.MaxLength
+}
+
+func (s *Structural) MinProperties() *int64 {
+	if s.Structural.ValueValidation == nil {
+		return nil
+	}
+	return s.Structural.ValueValidation.MinProperties
+}
+
+func (s *Structural) MaxProperties() *int64 {
+	if s.Structural.ValueValidation == nil {
+		return nil
+	}
+	return s.Structural.ValueValidation.MaxProperties
+}
+
+func (s *Structural) Required() []string {
+	if s.Structural.ValueValidation == nil {
+		return nil
+	}
+	return s.Structural.ValueValidation.Required
+}
+
+func (s *Structural) UniqueItems() bool {
+	// This field is forbidden in structural schema.
+	// but you can just you x-kubernetes-list-type:set to get around it :)
+	return false
+}
+
+func (s *Structural) Enum() []any {
+	if s.Structural.ValueValidation == nil {
+		return nil
+	}
+	ret := make([]any, 0, len(s.Structural.ValueValidation.Enum))
+	for _, e := range s.Structural.ValueValidation.Enum {
+		ret = append(ret, e.Object)
+	}
+	return ret
+}
+
+func (s *Structural) Nullable() bool {
+	return s.Structural.Nullable
+}
+
+func (s *Structural) IsXIntOrString() bool {
+	return s.Structural.XIntOrString
+}
+
+func (s *Structural) IsXEmbeddedResource() bool {
+	return s.Structural.XEmbeddedResource
+}
+
+func (s *Structural) IsXPreserveUnknownFields() bool {
+	return s.Structural.XPreserveUnknownFields
+}
+
+func (s *Structural) XListType() string {
+	if s.Structural.XListType == nil {
+		return ""
+	}
+	return *s.Structural.XListType
+}
+
+func (s *Structural) XMapType() string {
+	if s.Structural.XMapType == nil {
+		return ""
+	}
+	return *s.Structural.XMapType
+}
+
+func (s *Structural) XListMapKeys() []string {
+	return s.Structural.XListMapKeys
+}
+
+func (s *Structural) AllOf() []common.Schema {
+	var res []common.Schema
+	for _, subSchema := range s.Structural.ValueValidation.AllOf {
+		subSchema := subSchema
+		res = append(res, nestedValueValidationToStructural(&subSchema))
+	}
+	return res
+}
+
+func (s *Structural) AnyOf() []common.Schema {
+	var res []common.Schema
+	for _, subSchema := range s.Structural.ValueValidation.AnyOf {
+		subSchema := subSchema
+		res = append(res, nestedValueValidationToStructural(&subSchema))
+	}
+	return res
+}
+
+func (s *Structural) OneOf() []common.Schema {
+	var res []common.Schema
+	for _, subSchema := range s.Structural.ValueValidation.OneOf {
+		subSchema := subSchema
+		res = append(res, nestedValueValidationToStructural(&subSchema))
+	}
+	return res
+}
+
+func (s *Structural) Not() common.Schema {
+	if s.Structural.ValueValidation.Not == nil {
+		return nil
+	}
+	return nestedValueValidationToStructural(s.Structural.ValueValidation.Not)
+}
+
+// nestedValueValidationToStructural converts a nested value validation to
+// an equivalent structural schema instance.
+//
+// This lets us avoid needing a separate adaptor for the nested value
+// validations, and doesn't cost too much since since we are usually exploring the
+// entire schema anyway.
+func nestedValueValidationToStructural(nvv *schema.NestedValueValidation) *Structural {
+	var newItems *schema.Structural
+	if nvv.Items != nil {
+		newItems = nestedValueValidationToStructural(nvv.Items).Structural
+	}
+
+	var newProperties map[string]schema.Structural
+	for k, v := range nvv.Properties {
+		if newProperties == nil {
+			newProperties = make(map[string]schema.Structural)
+		}
+
+		v := v
+		newProperties[k] = *nestedValueValidationToStructural(&v).Structural
+	}
+
+	var newAdditionalProperties *schema.StructuralOrBool
+	if nvv.AdditionalProperties != nil {
+		newAdditionalProperties = &schema.StructuralOrBool{Structural: nestedValueValidationToStructural(nvv.AdditionalProperties).Structural}
+	}
+
+	return &Structural{
+		Structural: &schema.Structural{
+			Items:                newItems,
+			Properties:           newProperties,
+			AdditionalProperties: newAdditionalProperties,
+			ValueValidation:      &nvv.ValueValidation,
+			ValidationExtensions: nvv.ValidationExtensions,
+		},
+	}
+}
+
+type StructuralValidationRule struct {
+	rule, message, messageExpression, fieldPath string
+}
+
+func (s *StructuralValidationRule) Rule() string {
+	return s.rule
+}
+func (s *StructuralValidationRule) Message() string {
+	return s.message
+}
+func (s *StructuralValidationRule) FieldPath() string {
+	return s.fieldPath
+}
+func (s *StructuralValidationRule) MessageExpression() string {
+	return s.messageExpression
+}
+
+func (s *Structural) XValidations() []common.ValidationRule {
+	if len(s.Structural.XValidations) == 0 {
+		return nil
+	}
+	result := make([]common.ValidationRule, len(s.Structural.XValidations))
+	for i, v := range s.Structural.XValidations {
+		result[i] = &StructuralValidationRule{rule: v.Rule, message: v.Message, messageExpression: v.MessageExpression, fieldPath: v.FieldPath}
+	}
+	return result
+}
+
+func (s *Structural) WithTypeAndObjectMeta() common.Schema {
+	return &Structural{Structural: WithTypeAndObjectMeta(s.Structural)}
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model/schemas.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model/schemas.go
new file mode 100644
index 0000000000..f1c5ad9e9b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model/schemas.go
@@ -0,0 +1,75 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package model
+
+import (
+	apiservercel "k8s.io/apiserver/pkg/cel"
+	"k8s.io/apiserver/pkg/cel/common"
+
+	"k8s.io/apiextensions-apiserver/pkg/apiserver/schema"
+)
+
+// SchemaDeclType converts the structural schema to a CEL declaration, or returns nil if the
+// structural schema should not be exposed in CEL expressions.
+// Set isResourceRoot to true for the root of a custom resource or embedded resource.
+//
+// Schemas with XPreserveUnknownFields not exposed unless they are objects. Array and "maps" schemas
+// are not exposed if their items or additionalProperties schemas are not exposed. Object Properties are not exposed
+// if their schema is not exposed.
+//
+// The CEL declaration for objects with XPreserveUnknownFields does not expose unknown fields.
+func SchemaDeclType(s *schema.Structural, isResourceRoot bool) *apiservercel.DeclType {
+	return common.SchemaDeclType(&Structural{Structural: s}, isResourceRoot)
+}
+
+// WithTypeAndObjectMeta ensures the kind, apiVersion and
+// metadata.name and metadata.generateName properties are specified, making a shallow copy of the provided schema if needed.
+func WithTypeAndObjectMeta(s *schema.Structural) *schema.Structural {
+	if s.Properties != nil &&
+		s.Properties["kind"].Type == "string" &&
+		s.Properties["apiVersion"].Type == "string" &&
+		s.Properties["metadata"].Type == "object" &&
+		s.Properties["metadata"].Properties != nil &&
+		s.Properties["metadata"].Properties["name"].Type == "string" &&
+		s.Properties["metadata"].Properties["generateName"].Type == "string" {
+		return s
+	}
+	result := &schema.Structural{
+		AdditionalProperties: s.AdditionalProperties,
+		Generic:              s.Generic,
+		Extensions:           s.Extensions,
+		ValueValidation:      s.ValueValidation,
+		ValidationExtensions: s.ValidationExtensions,
+	}
+	props := make(map[string]schema.Structural, len(s.Properties))
+	for k, prop := range s.Properties {
+		props[k] = prop
+	}
+	stringType := schema.Structural{Generic: schema.Generic{Type: "string"}}
+	props["kind"] = stringType
+	props["apiVersion"] = stringType
+	props["metadata"] = schema.Structural{
+		Generic: schema.Generic{Type: "object"},
+		Properties: map[string]schema.Structural{
+			"name":         stringType,
+			"generateName": stringType,
+		},
+	}
+	result.Properties = props
+
+	return result
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/validation.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/validation.go
new file mode 100644
index 0000000000..575fd5e2e9
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/validation.go
@@ -0,0 +1,905 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cel
+
+import (
+	"bufio"
+	"context"
+	"fmt"
+	"math"
+	"reflect"
+	"regexp"
+	"strings"
+	"time"
+
+	celgo "github.com/google/cel-go/cel"
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+	"github.com/google/cel-go/interpreter"
+
+	apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+	"k8s.io/apiextensions-apiserver/pkg/apiserver/schema"
+	"k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model"
+	"k8s.io/apiextensions-apiserver/pkg/features"
+	"k8s.io/apimachinery/pkg/util/validation/field"
+	"k8s.io/apiserver/pkg/cel"
+	"k8s.io/apiserver/pkg/cel/common"
+	"k8s.io/apiserver/pkg/cel/environment"
+	"k8s.io/apiserver/pkg/cel/metrics"
+	utilfeature "k8s.io/apiserver/pkg/util/feature"
+	"k8s.io/apiserver/pkg/warning"
+	"k8s.io/klog/v2"
+	"k8s.io/utils/ptr"
+
+	celconfig "k8s.io/apiserver/pkg/apis/cel"
+)
+
+// Validator parallels the structure of schema.Structural and includes the compiled CEL programs
+// for the x-kubernetes-validations of each schema node.
+type Validator struct {
+	Items                *Validator
+	Properties           map[string]Validator
+	AllOfValidators      []*Validator
+	AdditionalProperties *Validator
+
+	Schema *schema.Structural
+
+	uncompiledRules []apiextensions.ValidationRule
+	compiledRules   []CompilationResult
+
+	// Program compilation is pre-checked at CRD creation/update time, so we don't expect compilation to fail
+	// they are recompiled and added to this type, and it does, it is an internal bug.
+	// But if somehow we get any compilation errors, we track them and then surface them as validation errors.
+	compilationErr error
+
+	// isResourceRoot is true if this validator node is for the root of a resource. Either the root of the
+	// custom resource being validated, or the root of an XEmbeddedResource object.
+	isResourceRoot bool
+
+	// celActivationFactory produces a Activations, which resolve identifiers
+	// (e.g. self and oldSelf) to CEL values. One activation must be produced
+	// for each of the cases when oldSelf is optional and non-optional.
+	celActivationFactory func(sts *schema.Structural, obj, oldObj interface{}) (activation interpreter.Activation, optionalOldSelfActivation interpreter.Activation)
+}
+
+// NewValidator returns compiles all the CEL programs defined in x-kubernetes-validations extensions
+// of the Structural schema and returns a custom resource validator that contains nested
+// validators for all items, properties and additionalProperties that transitively contain validator rules.
+// Returns nil if there are no validator rules in the Structural schema. May return a validator containing only errors.
+// Adding perCallLimit as input arg for testing purpose only. Callers should always use const PerCallLimit from k8s.io/apiserver/pkg/apis/cel/config.go as input
+func NewValidator(s *schema.Structural, isResourceRoot bool, perCallLimit uint64) *Validator {
+	if !hasXValidations(s) {
+		return nil
+	}
+	return validator(s, s, isResourceRoot, model.SchemaDeclType(s, isResourceRoot), perCallLimit)
+}
+
+// validator creates a Validator for all x-kubernetes-validations at the level of the provided schema and lower and
+// returns the Validator if any x-kubernetes-validations exist in the schema, or nil if no x-kubernetes-validations
+// exist. declType is expected to be a CEL DeclType corresponding to the structural schema.
+// perCallLimit was added for testing purpose only. Callers should always use const PerCallLimit from k8s.io/apiserver/pkg/apis/cel/config.go as input.
+func validator(validationSchema, nodeSchema *schema.Structural, isResourceRoot bool, declType *cel.DeclType, perCallLimit uint64) *Validator {
+	compilationSchema := *nodeSchema
+	compilationSchema.XValidations = validationSchema.XValidations
+	compiledRules, err := Compile(&compilationSchema, declType, perCallLimit, environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true), StoredExpressionsEnvLoader())
+
+	var itemsValidator, additionalPropertiesValidator *Validator
+	var propertiesValidators map[string]Validator
+	var allOfValidators []*Validator
+	var elemType *cel.DeclType
+	if declType != nil {
+		elemType = declType.ElemType
+	} else {
+		elemType = declType
+	}
+
+	if validationSchema.Items != nil && nodeSchema.Items != nil {
+		itemsValidator = validator(validationSchema.Items, nodeSchema.Items, nodeSchema.Items.XEmbeddedResource, elemType, perCallLimit)
+	}
+
+	if len(validationSchema.Properties) > 0 {
+		propertiesValidators = make(map[string]Validator, len(validationSchema.Properties))
+		for k, validationProperty := range validationSchema.Properties {
+			nodeProperty, ok := nodeSchema.Properties[k]
+			if !ok {
+				// Can only add value validations for fields that are on the
+				// structural spine of the schema.
+				continue
+			}
+
+			var fieldType *cel.DeclType
+			if escapedPropName, ok := cel.Escape(k); ok {
+				if declType == nil {
+					continue
+				}
+				if f, ok := declType.Fields[escapedPropName]; ok {
+					fieldType = f.Type
+				} else {
+					// fields with unknown types are omitted from CEL validation entirely
+					continue
+				}
+			} else {
+				// field may be absent from declType if the property name is unescapable, in which case we should convert
+				// the field value type to a DeclType.
+				fieldType = model.SchemaDeclType(&nodeProperty, nodeProperty.XEmbeddedResource)
+				if fieldType == nil {
+					continue
+				}
+			}
+			if p := validator(&validationProperty, &nodeProperty, nodeProperty.XEmbeddedResource, fieldType, perCallLimit); p != nil {
+				propertiesValidators[k] = *p
+			}
+		}
+	}
+	if validationSchema.AdditionalProperties != nil && validationSchema.AdditionalProperties.Structural != nil &&
+		nodeSchema.AdditionalProperties != nil && nodeSchema.AdditionalProperties.Structural != nil {
+		additionalPropertiesValidator = validator(validationSchema.AdditionalProperties.Structural, nodeSchema.AdditionalProperties.Structural, nodeSchema.AdditionalProperties.Structural.XEmbeddedResource, elemType, perCallLimit)
+	}
+
+	if validationSchema.ValueValidation != nil && len(validationSchema.ValueValidation.AllOf) > 0 {
+		allOfValidators = make([]*Validator, 0, len(validationSchema.ValueValidation.AllOf))
+		for _, allOf := range validationSchema.ValueValidation.AllOf {
+			allOfValidator := validator(nestedToStructural(&allOf), nodeSchema, isResourceRoot, declType, perCallLimit)
+			if allOfValidator != nil {
+				allOfValidators = append(allOfValidators, allOfValidator)
+			}
+		}
+	}
+
+	if len(compiledRules) > 0 || err != nil || itemsValidator != nil || additionalPropertiesValidator != nil || len(propertiesValidators) > 0 || len(allOfValidators) > 0 {
+		activationFactory := validationActivationWithoutOldSelf
+		for _, rule := range compiledRules {
+			if rule.UsesOldSelf {
+				activationFactory = validationActivationWithOldSelf
+				break
+			}
+		}
+
+		return &Validator{
+			compiledRules:        compiledRules,
+			uncompiledRules:      validationSchema.XValidations,
+			compilationErr:       err,
+			isResourceRoot:       isResourceRoot,
+			Items:                itemsValidator,
+			AdditionalProperties: additionalPropertiesValidator,
+			Properties:           propertiesValidators,
+			AllOfValidators:      allOfValidators,
+			celActivationFactory: activationFactory,
+			Schema:               nodeSchema,
+		}
+	}
+
+	return nil
+}
+
+type options struct {
+	ratchetingOptions
+}
+
+type Option func(*options)
+
+func WithRatcheting(correlation *common.CorrelatedObject) Option {
+	return func(o *options) {
+		o.currentCorrelation = correlation
+	}
+}
+
+// Validate validates all x-kubernetes-validations rules in Validator against obj and returns any errors.
+// If the validation rules exceed the costBudget, subsequent evaluations will be skipped, the list of errs returned will not be empty, and a negative remainingBudget will be returned.
+// Most callers can ignore the returned remainingBudget value unless another validate call is going to be made
+// context is passed for supporting context cancellation during cel validation
+func (s *Validator) Validate(ctx context.Context, fldPath *field.Path, _ *schema.Structural, obj, oldObj interface{}, costBudget int64, opts ...Option) (errs field.ErrorList, remainingBudget int64) {
+	opt := options{}
+	for _, o := range opts {
+		o(&opt)
+	}
+
+	return s.validate(ctx, fldPath, obj, oldObj, opt.ratchetingOptions, costBudget)
+}
+
+// ratchetingOptions stores the current correlation object and the nearest
+// parent which was correlatable. The parent is stored so that we can check at
+// the point an error is thrown whether it should be ratcheted using simple
+// logic
+// Key and Index should be used as normally to traverse to the next node.
+type ratchetingOptions struct {
+	// Current correlation object. If nil, then this node is from an uncorrelatable
+	// part of the schema
+	currentCorrelation *common.CorrelatedObject
+
+	// If currentCorrelation is nil, this is the nearest parent to this node
+	// which was correlatable. If the parent is deepequal to its old value,
+	// then errors thrown by this node are ratcheted
+	nearestParentCorrelation *common.CorrelatedObject
+}
+
+// shouldRatchetError returns true if the errors raised by the current node
+// should be ratcheted.
+//
+// Errors for the current node should be ratcheted if one of the following is true:
+//  1. The current node is correlatable, and it is equal to its old value
+//  2. The current node has a correlatable ancestor, and the ancestor is equal
+//     to its old value.
+func (r ratchetingOptions) shouldRatchetError() bool {
+	if r.currentCorrelation != nil {
+		return r.currentCorrelation.CachedDeepEqual()
+	}
+
+	return r.nearestParentCorrelation.CachedDeepEqual()
+}
+
+// Finds the next node following the field in the tree and returns options using
+// that node. If none could be found, then retains a reference to the last
+// correlatable ancestor for ratcheting purposes
+func (r ratchetingOptions) key(field string) ratchetingOptions {
+	if r.currentCorrelation == nil {
+		return r
+	} else if r.nearestParentCorrelation == nil && (field == "apiVersion" || field == "kind") {
+		// We cannot ratchet changes to the APIVersion and kind fields field since
+		// they aren't visible. (both old and new are converted to the same type)
+		//
+		return ratchetingOptions{}
+	}
+
+	// nearestParentCorrelation is always non-nil except for the root node.
+	// The below line ensures that the next nearestParentCorrelation is set
+	// to a non-nil r.currentCorrelation
+	return ratchetingOptions{currentCorrelation: r.currentCorrelation.Key(field), nearestParentCorrelation: r.currentCorrelation}
+}
+
+// Finds the next node following the index in the tree and returns options using
+// that node. If none could be found, then retains a reference to the last
+// correlatable ancestor for ratcheting purposes
+func (r ratchetingOptions) index(idx int) ratchetingOptions {
+	if r.currentCorrelation == nil {
+		return r
+	}
+
+	return ratchetingOptions{currentCorrelation: r.currentCorrelation.Index(idx), nearestParentCorrelation: r.currentCorrelation}
+}
+
+func nestedToStructural(nested *schema.NestedValueValidation) *schema.Structural {
+	if nested == nil {
+		return nil
+	}
+
+	structuralConversion := &schema.Structural{
+		ValueValidation:      &nested.ValueValidation,
+		ValidationExtensions: nested.ValidationExtensions,
+		Generic:              nested.ForbiddenGenerics,
+		Extensions:           nested.ForbiddenExtensions,
+		Items:                nestedToStructural(nested.Items),
+	}
+
+	if len(nested.Properties) > 0 {
+		structuralConversion.Properties = make(map[string]schema.Structural, len(nested.Properties))
+		for k, v := range nested.Properties {
+			structuralConversion.Properties[k] = *nestedToStructural(&v)
+		}
+	}
+
+	if nested.AdditionalProperties != nil {
+		structuralConversion.AdditionalProperties = &schema.StructuralOrBool{
+			Structural: nestedToStructural(nested.AdditionalProperties),
+		}
+	}
+
+	return structuralConversion
+}
+
+func (s *Validator) validate(ctx context.Context, fldPath *field.Path, obj, oldObj interface{}, correlation ratchetingOptions, costBudget int64) (errs field.ErrorList, remainingBudget int64) {
+	t := time.Now()
+	defer func() {
+		metrics.Metrics.ObserveEvaluation(time.Since(t))
+	}()
+	remainingBudget = costBudget
+	if s == nil || obj == nil {
+		return nil, remainingBudget
+	}
+
+	errs, remainingBudget = s.validateExpressions(ctx, fldPath, obj, oldObj, correlation, remainingBudget)
+
+	if remainingBudget < 0 {
+		return errs, remainingBudget
+	}
+
+	// If the schema has allOf, recurse through those elements to see if there
+	// are any validation rules that need to be evaluated.
+	for _, allOfValidator := range s.AllOfValidators {
+		var allOfErrs field.ErrorList
+		// Pass options with nil currentCorrelation to mirror schema ratcheting
+		// behavior which does not ratchet allOf errors. This may change in the
+		// future for allOf.
+		allOfErrs, remainingBudget = allOfValidator.validate(ctx, fldPath, obj, oldObj, ratchetingOptions{nearestParentCorrelation: correlation.nearestParentCorrelation}, remainingBudget)
+		errs = append(errs, allOfErrs...)
+		if remainingBudget < 0 {
+			return errs, remainingBudget
+		}
+	}
+
+	switch obj := obj.(type) {
+	case []interface{}:
+		oldArray, _ := oldObj.([]interface{})
+		var arrayErrs field.ErrorList
+		arrayErrs, remainingBudget = s.validateArray(ctx, fldPath, obj, oldArray, correlation, remainingBudget)
+		errs = append(errs, arrayErrs...)
+		return errs, remainingBudget
+	case map[string]interface{}:
+		oldMap, _ := oldObj.(map[string]interface{})
+		var mapErrs field.ErrorList
+		mapErrs, remainingBudget = s.validateMap(ctx, fldPath, obj, oldMap, correlation, remainingBudget)
+		errs = append(errs, mapErrs...)
+		return errs, remainingBudget
+	}
+
+	return errs, remainingBudget
+}
+
+func (s *Validator) validateExpressions(ctx context.Context, fldPath *field.Path, obj, oldObj interface{}, correlation ratchetingOptions, costBudget int64) (errs field.ErrorList, remainingBudget int64) {
+	sts := s.Schema
+
+	// guard against oldObj being a non-nil interface with a nil value
+	if oldObj != nil {
+		v := reflect.ValueOf(oldObj)
+		switch v.Kind() {
+		case reflect.Map, reflect.Pointer, reflect.Interface, reflect.Slice:
+			if v.IsNil() {
+				oldObj = nil // +k8s:verify-mutation:reason=clone
+			}
+		}
+	}
+
+	remainingBudget = costBudget
+	if obj == nil {
+		// We only validate non-null values. Rules that need to check for the state of a nullable value or the presence of an optional
+		// field must do so from the surrounding schema. E.g. if an array has nullable string items, a rule on the array
+		// schema can check if items are null, but a rule on the nullable string schema only validates the non-null strings.
+		return nil, remainingBudget
+	}
+	if s.compilationErr != nil {
+		errs = append(errs, field.Invalid(fldPath, sts.Type, fmt.Sprintf("rule compiler initialization error: %v", s.compilationErr)))
+		return errs, remainingBudget
+	}
+	if len(s.compiledRules) == 0 {
+		return nil, remainingBudget // nothing to do
+	}
+	if remainingBudget <= 0 {
+		errs = append(errs, field.Invalid(fldPath, sts.Type, fmt.Sprintf("validation failed due to running out of cost budget, no further validation rules will be run")))
+		return errs, -1
+	}
+	if s.isResourceRoot {
+		sts = model.WithTypeAndObjectMeta(sts)
+	}
+	activation, optionalOldSelfActivation := s.celActivationFactory(sts, obj, oldObj)
+	for i, compiled := range s.compiledRules {
+		rule := s.uncompiledRules[i]
+		if compiled.Error != nil {
+			errs = append(errs, field.Invalid(fldPath, sts.Type, fmt.Sprintf("rule compile error: %v", compiled.Error)))
+			continue
+		}
+		if compiled.Program == nil {
+			// rule is empty
+			continue
+		}
+
+		// If ratcheting is enabled, allow rule with oldSelf to evaluate
+		// when `optionalOldSelf` is set to true
+		optionalOldSelfRule := ptr.Deref(rule.OptionalOldSelf, false)
+		if compiled.UsesOldSelf && oldObj == nil {
+			// transition rules are evaluated only if there is a comparable existing value
+			// But if the rule uses optional oldSelf and gate is enabled we allow
+			// the rule to be evaluated
+			if !utilfeature.DefaultFeatureGate.Enabled(features.CRDValidationRatcheting) {
+				continue
+			}
+
+			if !optionalOldSelfRule {
+				continue
+			}
+		}
+
+		ruleActivation := activation
+		if optionalOldSelfRule {
+			ruleActivation = optionalOldSelfActivation
+		}
+
+		evalResult, evalDetails, err := compiled.Program.ContextEval(ctx, ruleActivation)
+		if evalDetails == nil {
+			errs = append(errs, field.InternalError(fldPath, fmt.Errorf("runtime cost could not be calculated for validation rule: %v, no further validation rules will be run", ruleErrorString(rule))))
+			return errs, -1
+		} else {
+			rtCost := evalDetails.ActualCost()
+			if rtCost == nil {
+				errs = append(errs, field.Invalid(fldPath, sts.Type, fmt.Sprintf("runtime cost could not be calculated for validation rule: %v, no further validation rules will be run", ruleErrorString(rule))))
+				return errs, -1
+			} else {
+				if *rtCost > math.MaxInt64 || int64(*rtCost) > remainingBudget {
+					errs = append(errs, field.Invalid(fldPath, sts.Type, fmt.Sprintf("validation failed due to running out of cost budget, no further validation rules will be run")))
+					return errs, -1
+				}
+				remainingBudget -= int64(*rtCost)
+			}
+		}
+		if err != nil {
+			// see types.Err for list of well defined error types
+			if strings.HasPrefix(err.Error(), "no such overload") {
+				// Most overload errors are caught by the compiler, which provides details on where exactly in the rule
+				// error was found. Here, an overload error has occurred at runtime no details are provided, so we
+				// append a more descriptive error message. This error can only occur when static type checking has
+				// been bypassed. int-or-string is typed as dynamic and so bypasses compiler type checking.
+				errs = append(errs, field.Invalid(fldPath, sts.Type, fmt.Sprintf("'%v': call arguments did not match a supported operator, function or macro signature for rule: %v", err, ruleErrorString(rule))))
+			} else if strings.HasPrefix(err.Error(), "operation cancelled: actual cost limit exceeded") {
+				errs = append(errs, field.Invalid(fldPath, sts.Type, fmt.Sprintf("'%v': no further validation rules will be run due to call cost exceeds limit for rule: %v", err, ruleErrorString(rule))))
+				return errs, -1
+			} else {
+				// no such key: {key}, index out of bounds: {index}, integer overflow, division by zero, ...
+				errs = append(errs, field.Invalid(fldPath, sts.Type, fmt.Sprintf("%v evaluating rule: %v", err, ruleErrorString(rule))))
+			}
+			continue
+		}
+		if evalResult != types.True {
+			currentFldPath := fldPath
+			if len(compiled.NormalizedRuleFieldPath) > 0 {
+				currentFldPath = currentFldPath.Child(compiled.NormalizedRuleFieldPath)
+			}
+
+			addErr := func(e *field.Error) {
+				if !compiled.UsesOldSelf && correlation.shouldRatchetError() {
+					warning.AddWarning(ctx, "", e.Error())
+				} else {
+					errs = append(errs, e)
+				}
+			}
+
+			if compiled.MessageExpression != nil {
+				messageExpression, newRemainingBudget, msgErr := evalMessageExpression(ctx, compiled.MessageExpression, rule.MessageExpression, activation, remainingBudget)
+				if msgErr != nil {
+					if msgErr.Type == cel.ErrorTypeInternal {
+						addErr(field.InternalError(currentFldPath, msgErr))
+						return errs, -1
+					} else if msgErr.Type == cel.ErrorTypeInvalid {
+						addErr(field.Invalid(currentFldPath, sts.Type, msgErr.Error()))
+						return errs, -1
+					} else {
+						klog.V(2).ErrorS(msgErr, "messageExpression evaluation failed")
+						addErr(fieldErrorForReason(currentFldPath, sts.Type, ruleMessageOrDefault(rule), rule.Reason))
+						remainingBudget = newRemainingBudget
+					}
+				} else {
+					addErr(fieldErrorForReason(currentFldPath, sts.Type, messageExpression, rule.Reason))
+					remainingBudget = newRemainingBudget
+				}
+			} else {
+				addErr(fieldErrorForReason(currentFldPath, sts.Type, ruleMessageOrDefault(rule), rule.Reason))
+			}
+		}
+	}
+	return errs, remainingBudget
+}
+
+var unescapeMatcher = regexp.MustCompile(`\\.`)
+
+func unescapeSingleQuote(s string) (string, error) {
+	var err error
+	unescaped := unescapeMatcher.ReplaceAllStringFunc(s, func(matchStr string) string {
+		directive := matchStr[1]
+		switch directive {
+		case 'a':
+			return "\a"
+		case 'b':
+			return "\b"
+		case 'f':
+			return "\f"
+		case 'n':
+			return "\n"
+		case 'r':
+			return "\r"
+		case 't':
+			return "\t"
+		case 'v':
+			return "\v"
+		case '\'':
+			return "'"
+		case '\\':
+			return "\\"
+		default:
+			err = fmt.Errorf("invalid escape char %s", matchStr)
+			return ""
+		}
+	})
+	return unescaped, err
+}
+
+type validFieldPathOptions struct {
+	allowArrayNotation bool
+}
+
+// ValidFieldPathOption provides vararg options for ValidFieldPath.
+type ValidFieldPathOption func(*validFieldPathOptions)
+
+// WithFieldPathAllowArrayNotation sets of array annotation ('[]') is allowed
+// in field paths.
+// Defaults to true
+func WithFieldPathAllowArrayNotation(allow bool) ValidFieldPathOption {
+	return func(options *validFieldPathOptions) {
+		options.allowArrayNotation = allow
+	}
+}
+
+// ValidFieldPath validates that jsonPath is a valid JSON Path containing only field and map accessors
+// that are valid for the given schema, and returns a field.Path representation of the validated jsonPath or an error.
+func ValidFieldPath(jsonPath string, schema *schema.Structural, options ...ValidFieldPathOption) (validFieldPath *field.Path, foundSchema *schema.Structural, err error) {
+	opts := &validFieldPathOptions{allowArrayNotation: true}
+	for _, opt := range options {
+		opt(opts)
+	}
+
+	appendToPath := func(name string, isNamed bool) error {
+		if !isNamed {
+			validFieldPath = validFieldPath.Key(name)
+			schema = schema.AdditionalProperties.Structural
+		} else {
+			validFieldPath = validFieldPath.Child(name)
+			val, ok := schema.Properties[name]
+			if !ok {
+				return fmt.Errorf("does not refer to a valid field")
+			}
+			schema = &val
+		}
+		return nil
+	}
+
+	validFieldPath = nil
+
+	scanner := bufio.NewScanner(strings.NewReader(jsonPath))
+
+	// configure the scanner to split the string into tokens.
+	// The three delimiters ('.', '[', ']') will be returned as single char tokens.
+	// All other text between delimiters is returned as string tokens.
+	scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) {
+		if len(data) > 0 {
+			for i := 0; i < len(data); i++ {
+				// If in a single quoted string, look for the end of string
+				// ignoring delimiters.
+				if data[0] == '\'' {
+					if i > 0 && data[i] == '\'' && data[i-1] != '\\' {
+						// Return quoted string
+						return i + 1, data[:i+1], nil
+					}
+					continue
+				}
+				switch data[i] {
+				case '.', '[', ']': // delimiters
+					if i == 0 {
+						// Return the delimiter.
+						return 1, data[:1], nil
+					} else {
+						// Return identifier leading up to the delimiter.
+						// The next call to split will return the delimiter.
+						return i, data[:i], nil
+					}
+				}
+			}
+			if atEOF {
+				// Return the string.
+				return len(data), data, nil
+			}
+		}
+		return 0, nil, nil
+	})
+
+	var tok string
+	var isNamed bool
+	for scanner.Scan() {
+		tok = scanner.Text()
+		switch tok {
+		case "[":
+			if !opts.allowArrayNotation {
+				return nil, nil, fmt.Errorf("array notation is not allowed")
+			}
+			if !scanner.Scan() {
+				return nil, nil, fmt.Errorf("unexpected end of JSON path")
+			}
+			tok = scanner.Text()
+			if len(tok) < 2 || tok[0] != '\'' || tok[len(tok)-1] != '\'' {
+				return nil, nil, fmt.Errorf("expected single quoted string but got %s", tok)
+			}
+			unescaped, err := unescapeSingleQuote(tok[1 : len(tok)-1])
+			if err != nil {
+				return nil, nil, fmt.Errorf("invalid string literal: %w", err)
+			}
+
+			if schema.Properties != nil {
+				isNamed = true
+			} else if schema.AdditionalProperties != nil {
+				isNamed = false
+			} else {
+				return nil, nil, fmt.Errorf("does not refer to a valid field")
+			}
+			if err := appendToPath(unescaped, isNamed); err != nil {
+				return nil, nil, err
+			}
+			if !scanner.Scan() {
+				return nil, nil, fmt.Errorf("unexpected end of JSON path")
+			}
+			tok = scanner.Text()
+			if tok != "]" {
+				return nil, nil, fmt.Errorf("expected ] but got %s", tok)
+			}
+		case ".":
+			if !scanner.Scan() {
+				return nil, nil, fmt.Errorf("unexpected end of JSON path")
+			}
+			tok = scanner.Text()
+			if schema.Properties != nil {
+				isNamed = true
+			} else if schema.AdditionalProperties != nil {
+				isNamed = false
+			} else {
+				return nil, nil, fmt.Errorf("does not refer to a valid field")
+			}
+			if err := appendToPath(tok, isNamed); err != nil {
+				return nil, nil, err
+			}
+		default:
+			return nil, nil, fmt.Errorf("expected [ or . but got: %s", tok)
+		}
+	}
+
+	return validFieldPath, schema, nil
+}
+
+func fieldErrorForReason(fldPath *field.Path, value interface{}, detail string, reason *apiextensions.FieldValueErrorReason) *field.Error {
+	if reason == nil {
+		return field.Invalid(fldPath, value, detail)
+	}
+	switch *reason {
+	case apiextensions.FieldValueForbidden:
+		return field.Forbidden(fldPath, detail)
+	case apiextensions.FieldValueRequired:
+		return field.Required(fldPath, detail)
+	case apiextensions.FieldValueDuplicate:
+		return field.Duplicate(fldPath, value)
+	default:
+		return field.Invalid(fldPath, value, detail)
+	}
+}
+
+// evalMessageExpression evaluates the given message expression and returns the evaluated string form and the remaining budget, or an error if one
+// occurred during evaluation.
+func evalMessageExpression(ctx context.Context, expr celgo.Program, exprSrc string, activation interpreter.Activation, remainingBudget int64) (string, int64, *cel.Error) {
+	evalResult, evalDetails, err := expr.ContextEval(ctx, activation)
+	if evalDetails == nil {
+		return "", -1, &cel.Error{
+			Type:   cel.ErrorTypeInternal,
+			Detail: fmt.Sprintf("runtime cost could not be calculated for messageExpression: %q", exprSrc),
+		}
+	}
+	rtCost := evalDetails.ActualCost()
+	if rtCost == nil {
+		return "", -1, &cel.Error{
+			Type:   cel.ErrorTypeInternal,
+			Detail: fmt.Sprintf("runtime cost could not be calculated for messageExpression: %q", exprSrc),
+		}
+	} else if *rtCost > math.MaxInt64 || int64(*rtCost) > remainingBudget {
+		return "", -1, &cel.Error{
+			Type:   cel.ErrorTypeInvalid,
+			Detail: "messageExpression evaluation failed due to running out of cost budget, no further validation rules will be run",
+		}
+	}
+	if err != nil {
+		if strings.HasPrefix(err.Error(), "operation cancelled: actual cost limit exceeded") {
+			return "", -1, &cel.Error{
+				Type:   cel.ErrorTypeInvalid,
+				Detail: fmt.Sprintf("no further validation rules will be run due to call cost exceeds limit for messageExpression: %q", exprSrc),
+			}
+		}
+		return "", remainingBudget - int64(*rtCost), &cel.Error{
+			Detail: fmt.Sprintf("messageExpression evaluation failed due to: %v", err.Error()),
+		}
+	}
+	messageStr, ok := evalResult.Value().(string)
+	if !ok {
+		return "", remainingBudget - int64(*rtCost), &cel.Error{
+			Detail: "messageExpression failed to convert to string",
+		}
+	}
+	trimmedMsgStr := strings.TrimSpace(messageStr)
+	if len(trimmedMsgStr) > celconfig.MaxEvaluatedMessageExpressionSizeBytes {
+		return "", remainingBudget - int64(*rtCost), &cel.Error{
+			Detail: fmt.Sprintf("messageExpression beyond allowable length of %d", celconfig.MaxEvaluatedMessageExpressionSizeBytes),
+		}
+	} else if hasNewlines(trimmedMsgStr) {
+		return "", remainingBudget - int64(*rtCost), &cel.Error{
+			Detail: "messageExpression should not contain line breaks",
+		}
+	} else if len(trimmedMsgStr) == 0 {
+		return "", remainingBudget - int64(*rtCost), &cel.Error{
+			Detail: "messageExpression should evaluate to a non-empty string",
+		}
+	}
+	return trimmedMsgStr, remainingBudget - int64(*rtCost), nil
+}
+
+var newlineMatcher = regexp.MustCompile(`[\n]+`)
+
+func hasNewlines(s string) bool {
+	return newlineMatcher.MatchString(s)
+}
+
+func ruleMessageOrDefault(rule apiextensions.ValidationRule) string {
+	if len(rule.Message) == 0 {
+		return fmt.Sprintf("failed rule: %s", ruleErrorString(rule))
+	} else {
+		return strings.TrimSpace(rule.Message)
+	}
+}
+
+func ruleErrorString(rule apiextensions.ValidationRule) string {
+	if len(rule.Message) > 0 {
+		return strings.TrimSpace(rule.Message)
+	}
+	return strings.TrimSpace(rule.Rule)
+}
+
+type validationActivation struct {
+	self, oldSelf ref.Val
+	hasOldSelf    bool
+}
+
+func validationActivationWithOldSelf(sts *schema.Structural, obj, oldObj interface{}) (activation interpreter.Activation, optionalOldSelfActivation interpreter.Activation) {
+	va := &validationActivation{
+		self: UnstructuredToVal(obj, sts),
+	}
+	optionalVA := &validationActivation{
+		self:       va.self,
+		hasOldSelf: true, // this means the oldSelf variable is defined for CEL to reference, not that it has a value
+		oldSelf:    types.OptionalNone,
+	}
+
+	if oldObj != nil {
+		va.oldSelf = UnstructuredToVal(oldObj, sts) // +k8s:verify-mutation:reason=clone
+		va.hasOldSelf = true
+
+		optionalVA.oldSelf = types.OptionalOf(va.oldSelf) // +k8s:verify-mutation:reason=clone
+	}
+
+	return va, optionalVA
+}
+
+func validationActivationWithoutOldSelf(sts *schema.Structural, obj, _ interface{}) (interpreter.Activation, interpreter.Activation) {
+	res := &validationActivation{
+		self: UnstructuredToVal(obj, sts),
+	}
+	return res, res
+}
+
+func (a *validationActivation) ResolveName(name string) (interface{}, bool) {
+	switch name {
+	case ScopedVarName:
+		return a.self, true
+	case OldScopedVarName:
+		return a.oldSelf, a.hasOldSelf
+	default:
+		return nil, false
+	}
+}
+
+func (a *validationActivation) Parent() interpreter.Activation {
+	return nil
+}
+
+func (s *Validator) validateMap(ctx context.Context, fldPath *field.Path, obj, oldObj map[string]interface{}, correlation ratchetingOptions, costBudget int64) (errs field.ErrorList, remainingBudget int64) {
+	remainingBudget = costBudget
+	if remainingBudget < 0 {
+		return errs, remainingBudget
+	}
+	if s == nil || obj == nil {
+		return nil, remainingBudget
+	}
+
+	correlatable := MapIsCorrelatable(s.Schema.XMapType)
+
+	if s.AdditionalProperties != nil {
+		for k, v := range obj {
+			var oldV interface{}
+			if correlatable {
+				oldV = oldObj[k] // +k8s:verify-mutation:reason=clone
+			}
+
+			var err field.ErrorList
+			err, remainingBudget = s.AdditionalProperties.validate(ctx, fldPath.Key(k), v, oldV, correlation.key(k), remainingBudget)
+			errs = append(errs, err...)
+			if remainingBudget < 0 {
+				return errs, remainingBudget
+			}
+		}
+	}
+	if s.Properties != nil {
+		for k, v := range obj {
+			sub, ok := s.Properties[k]
+			if ok {
+				var oldV interface{}
+				if correlatable {
+					oldV = oldObj[k] // +k8s:verify-mutation:reason=clone
+				}
+
+				var err field.ErrorList
+				err, remainingBudget = sub.validate(ctx, fldPath.Child(k), v, oldV, correlation.key(k), remainingBudget)
+				errs = append(errs, err...)
+				if remainingBudget < 0 {
+					return errs, remainingBudget
+				}
+			}
+		}
+	}
+
+	return errs, remainingBudget
+}
+
+func (s *Validator) validateArray(ctx context.Context, fldPath *field.Path, obj, oldObj []interface{}, correlation ratchetingOptions, costBudget int64) (errs field.ErrorList, remainingBudget int64) {
+	remainingBudget = costBudget
+	if remainingBudget < 0 {
+		return errs, remainingBudget
+	}
+
+	if s.Items != nil {
+		// only map-type lists support self-oldSelf correlation for cel rules. if this isn't a
+		// map-type list, then makeMapList returns an implementation that always returns nil
+		correlatableOldItems := makeMapList(s.Schema, oldObj)
+		for i := range obj {
+			var err field.ErrorList
+			err, remainingBudget = s.Items.validate(ctx, fldPath.Index(i), obj[i], correlatableOldItems.Get(obj[i]), correlation.index(i), remainingBudget)
+			errs = append(errs, err...)
+			if remainingBudget < 0 {
+				return errs, remainingBudget
+			}
+		}
+	}
+
+	return errs, remainingBudget
+}
+
+// MapIsCorrelatable returns true if the mapType can be used to correlate the data elements of a map after an update
+// with the data elements of the map from before the updated.
+func MapIsCorrelatable(mapType *string) bool {
+	// if a third map type is introduced, assume it's not correlatable. granular is the default if unspecified.
+	return mapType == nil || *mapType == "granular" || *mapType == "atomic"
+}
+
+func hasXValidations(s *schema.Structural) bool {
+	if s == nil {
+		return false
+	}
+	if len(s.XValidations) > 0 {
+		return true
+	}
+	if hasXValidations(s.Items) {
+		return true
+	}
+	if s.AdditionalProperties != nil && hasXValidations(s.AdditionalProperties.Structural) {
+		return true
+	}
+	if s.Properties != nil {
+		for _, prop := range s.Properties {
+			if hasXValidations(&prop) {
+				return true
+			}
+		}
+	}
+	return false
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/values.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/values.go
new file mode 100644
index 0000000000..8b879eaf20
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/values.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cel
+
+import (
+	"github.com/google/cel-go/common/types/ref"
+
+	structuralschema "k8s.io/apiextensions-apiserver/pkg/apiserver/schema"
+	"k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel/model"
+	celopenapi "k8s.io/apiserver/pkg/cel/common"
+)
+
+// UnstructuredToVal converts a Kubernetes unstructured data element to a CEL Val.
+// The root schema of custom resource schema is expected contain type meta and object meta schemas.
+// If Embedded resources do not contain type meta and object meta schemas, they will be added automatically.
+func UnstructuredToVal(unstructured interface{}, schema *structuralschema.Structural) ref.Val {
+	return celopenapi.UnstructuredToVal(unstructured, &model.Structural{Structural: schema})
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/complete.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/complete.go
new file mode 100644
index 0000000000..65bf3cef4c
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/complete.go
@@ -0,0 +1,106 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package schema
+
+import (
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+// validateStructuralCompleteness checks that all value validations in s have
+// a structural counterpart so that every value validation applies to a value
+// with a known schema:
+// - validations for specific properties must have that property (or additionalProperties under an option) structurally defined
+// - additionalProperties validations must have additionalProperties defined in the structural portion of the schema corresponding to that node
+// - Items validations must have also have a corresponding items structurally
+//
+// The "structural" portion of the schema refers to all nodes in the
+// schema traversible without following any NestedValueValidations.
+func validateStructuralCompleteness(s *Structural, fldPath *field.Path, opts ValidationOptions) field.ErrorList {
+	if s == nil {
+		return nil
+	}
+
+	return validateValueValidationCompleteness(s.ValueValidation, s, fldPath, fldPath, opts)
+}
+
+func validateValueValidationCompleteness(v *ValueValidation, s *Structural, sPath, vPath *field.Path, opts ValidationOptions) field.ErrorList {
+	if v == nil {
+		return nil
+	}
+	if s == nil {
+		return field.ErrorList{field.Required(sPath, fmt.Sprintf("because it is defined in %s", vPath.String()))}
+	}
+
+	allErrs := field.ErrorList{}
+
+	allErrs = append(allErrs, validateNestedValueValidationCompleteness(v.Not, s, sPath, vPath.Child("not"), opts)...)
+	for i := range v.AllOf {
+		allErrs = append(allErrs, validateNestedValueValidationCompleteness(&v.AllOf[i], s, sPath, vPath.Child("allOf").Index(i), opts)...)
+	}
+	for i := range v.AnyOf {
+		allErrs = append(allErrs, validateNestedValueValidationCompleteness(&v.AnyOf[i], s, sPath, vPath.Child("anyOf").Index(i), opts)...)
+	}
+	for i := range v.OneOf {
+		allErrs = append(allErrs, validateNestedValueValidationCompleteness(&v.OneOf[i], s, sPath, vPath.Child("oneOf").Index(i), opts)...)
+	}
+
+	return allErrs
+}
+
+func validateNestedValueValidationCompleteness(v *NestedValueValidation, s *Structural, sPath, vPath *field.Path, opts ValidationOptions) field.ErrorList {
+	if v == nil {
+		return nil
+	}
+	if s == nil {
+		return field.ErrorList{field.Required(sPath, fmt.Sprintf("because it is defined in %s", vPath.String()))}
+	}
+
+	allErrs := field.ErrorList{}
+
+	allErrs = append(allErrs, validateValueValidationCompleteness(&v.ValueValidation, s, sPath, vPath, opts)...)
+	allErrs = append(allErrs, validateNestedValueValidationCompleteness(v.Items, s.Items, sPath.Child("items"), vPath.Child("items"), opts)...)
+
+	var sAdditionalPropertiesSchema *Structural
+	if s.AdditionalProperties != nil {
+		sAdditionalPropertiesSchema = s.AdditionalProperties.Structural
+	}
+
+	for k, vFld := range v.Properties {
+		if sFld, ok := s.Properties[k]; !ok {
+			if sAdditionalPropertiesSchema == nil || !opts.AllowValidationPropertiesWithAdditionalProperties {
+				allErrs = append(allErrs, field.Required(sPath.Child("properties").Key(k), fmt.Sprintf("because it is defined in %s", vPath.Child("properties").Key(k))))
+			} else {
+				// Allow validations on specific properties if there exists an
+				// additionalProperties structural schema specified instead of
+				// direct properties
+				// NOTE: This does not allow `additionalProperties: true` structural
+				// schema to be combined with specific property validations.
+				allErrs = append(allErrs, validateNestedValueValidationCompleteness(&vFld, sAdditionalPropertiesSchema, sPath.Child("additionalProperties"), vPath.Child("properties").Key(k), opts)...)
+			}
+		} else {
+			allErrs = append(allErrs, validateNestedValueValidationCompleteness(&vFld, &sFld, sPath.Child("properties").Key(k), vPath.Child("properties").Key(k), opts)...)
+		}
+	}
+
+	if v.AdditionalProperties != nil && opts.AllowNestedAdditionalProperties {
+		allErrs = append(allErrs, validateNestedValueValidationCompleteness(v.AdditionalProperties, sAdditionalPropertiesSchema, sPath.Child("additionalProperties"), vPath.Child("additionalProperties"), opts)...)
+	}
+
+	return allErrs
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/convert.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/convert.go
new file mode 100644
index 0000000000..78350c135f
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/convert.go
@@ -0,0 +1,325 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package schema
+
+import (
+	"fmt"
+
+	"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
+	apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+)
+
+// NewStructural converts an OpenAPI v3 schema into a structural schema. A pre-validated JSONSchemaProps will
+// not fail on NewStructural. This means that we require that:
+//
+// - items is not an array of schemas
+// - the following fields are not set:
+//   - id
+//   - schema
+//   - $ref
+//   - patternProperties
+//   - dependencies
+//   - additionalItems
+//   - definitions.
+//
+// The follow fields are not preserved:
+// - externalDocs
+// - example.
+func NewStructural(s *apiextensions.JSONSchemaProps) (*Structural, error) {
+	if s == nil {
+		return nil, nil
+	}
+
+	if err := validateUnsupportedFields(s); err != nil {
+		return nil, err
+	}
+
+	vv, err := newValueValidation(s)
+	if err != nil {
+		return nil, err
+	}
+
+	g, err := newGenerics(s)
+	if err != nil {
+		return nil, err
+	}
+
+	x, err := newExtensions(s)
+	if err != nil {
+		return nil, err
+	}
+
+	vx, err := newValidationExtensions(s)
+	if err != nil {
+		return nil, err
+	}
+
+	ss := &Structural{
+		Generic:              *g,
+		Extensions:           *x,
+		ValueValidation:      vv,
+		ValidationExtensions: *vx,
+	}
+
+	if s.Items != nil {
+		if len(s.Items.JSONSchemas) > 0 {
+			// we validate that it is not an array
+			return nil, fmt.Errorf("OpenAPIV3Schema 'items' must be a schema, but is an array")
+		}
+		item, err := NewStructural(s.Items.Schema)
+		if err != nil {
+			return nil, err
+		}
+		ss.Items = item
+	}
+
+	if len(s.Properties) > 0 {
+		ss.Properties = make(map[string]Structural, len(s.Properties))
+		for k, x := range s.Properties {
+			fld, err := NewStructural(&x)
+			if err != nil {
+				return nil, err
+			}
+			ss.Properties[k] = *fld
+		}
+	}
+
+	if s.AdditionalProperties != nil {
+		if s.AdditionalProperties.Schema != nil {
+			additionalPropertiesSchema, err := NewStructural(s.AdditionalProperties.Schema)
+			if err != nil {
+				return nil, err
+			}
+			ss.AdditionalProperties = &StructuralOrBool{Structural: additionalPropertiesSchema, Bool: true}
+		} else {
+			ss.AdditionalProperties = &StructuralOrBool{Bool: s.AdditionalProperties.Allows}
+		}
+	}
+
+	return ss, nil
+}
+
+func newGenerics(s *apiextensions.JSONSchemaProps) (*Generic, error) {
+	if s == nil {
+		return nil, nil
+	}
+	g := &Generic{
+		Type:        s.Type,
+		Description: s.Description,
+		Title:       s.Title,
+		Nullable:    s.Nullable,
+	}
+	if s.Default != nil {
+		g.Default = JSON{interface{}(*s.Default)}
+	}
+
+	return g, nil
+}
+
+func newValueValidation(s *apiextensions.JSONSchemaProps) (*ValueValidation, error) {
+	if s == nil {
+		return nil, nil
+	}
+	not, err := newNestedValueValidation(s.Not)
+	if err != nil {
+		return nil, err
+	}
+	v := &ValueValidation{
+		Format:           s.Format,
+		Maximum:          s.Maximum,
+		ExclusiveMaximum: s.ExclusiveMaximum,
+		Minimum:          s.Minimum,
+		ExclusiveMinimum: s.ExclusiveMinimum,
+		MaxLength:        s.MaxLength,
+		MinLength:        s.MinLength,
+		Pattern:          s.Pattern,
+		MaxItems:         s.MaxItems,
+		MinItems:         s.MinItems,
+		UniqueItems:      s.UniqueItems,
+		MultipleOf:       s.MultipleOf,
+		MaxProperties:    s.MaxProperties,
+		MinProperties:    s.MinProperties,
+		Required:         s.Required,
+		Not:              not,
+	}
+
+	for _, e := range s.Enum {
+		v.Enum = append(v.Enum, JSON{e})
+	}
+
+	for _, x := range s.AllOf {
+		clause, err := newNestedValueValidation(&x)
+		if err != nil {
+			return nil, err
+		}
+		v.AllOf = append(v.AllOf, *clause)
+	}
+
+	for _, x := range s.AnyOf {
+		clause, err := newNestedValueValidation(&x)
+		if err != nil {
+			return nil, err
+		}
+		v.AnyOf = append(v.AnyOf, *clause)
+	}
+
+	for _, x := range s.OneOf {
+		clause, err := newNestedValueValidation(&x)
+		if err != nil {
+			return nil, err
+		}
+		v.OneOf = append(v.OneOf, *clause)
+	}
+
+	return v, nil
+}
+
+func newNestedValueValidation(s *apiextensions.JSONSchemaProps) (*NestedValueValidation, error) {
+	if s == nil {
+		return nil, nil
+	}
+
+	if err := validateUnsupportedFields(s); err != nil {
+		return nil, err
+	}
+
+	vv, err := newValueValidation(s)
+	if err != nil {
+		return nil, err
+	}
+
+	g, err := newGenerics(s)
+	if err != nil {
+		return nil, err
+	}
+
+	x, err := newExtensions(s)
+	if err != nil {
+		return nil, err
+	}
+
+	vx, err := newValidationExtensions(s)
+	if err != nil {
+		return nil, err
+	}
+
+	v := &NestedValueValidation{
+		ValueValidation:      *vv,
+		ValidationExtensions: *vx,
+		ForbiddenGenerics:    *g,
+		ForbiddenExtensions:  *x,
+	}
+
+	if s.Items != nil {
+		if len(s.Items.JSONSchemas) > 0 {
+			// we validate that it is not an array
+			return nil, fmt.Errorf("OpenAPIV3Schema 'items' must be a schema, but is an array")
+		}
+		nvv, err := newNestedValueValidation(s.Items.Schema)
+		if err != nil {
+			return nil, err
+		}
+		v.Items = nvv
+	}
+	if s.Properties != nil {
+		v.Properties = make(map[string]NestedValueValidation, len(s.Properties))
+		for k, x := range s.Properties {
+			nvv, err := newNestedValueValidation(&x)
+			if err != nil {
+				return nil, err
+			}
+			v.Properties[k] = *nvv
+		}
+	}
+	if s.AdditionalProperties != nil {
+		if s.AdditionalProperties.Schema != nil {
+			additionalPropertiesSchema, err := newNestedValueValidation(s.AdditionalProperties.Schema)
+			if err != nil {
+				return nil, err
+			}
+			v.AdditionalProperties = additionalPropertiesSchema
+		} else if s.AdditionalProperties.Allows {
+			v.AdditionalProperties = &NestedValueValidation{}
+		}
+
+	}
+
+	return v, nil
+}
+
+func newExtensions(s *apiextensions.JSONSchemaProps) (*Extensions, error) {
+	if s == nil {
+		return nil, nil
+	}
+
+	ret := &Extensions{
+		XEmbeddedResource: s.XEmbeddedResource,
+		XIntOrString:      s.XIntOrString,
+		XListMapKeys:      s.XListMapKeys,
+		XListType:         s.XListType,
+		XMapType:          s.XMapType,
+	}
+
+	if s.XPreserveUnknownFields != nil {
+		if !*s.XPreserveUnknownFields {
+			return nil, fmt.Errorf("internal error: 'x-kubernetes-preserve-unknown-fields' must be true or undefined")
+		}
+		ret.XPreserveUnknownFields = true
+	}
+
+	return ret, nil
+}
+
+func newValidationExtensions(s *apiextensions.JSONSchemaProps) (*ValidationExtensions, error) {
+	if s == nil {
+		return nil, nil
+	}
+
+	ret := &ValidationExtensions{}
+	if err := apiextensionsv1.Convert_apiextensions_ValidationRules_To_v1_ValidationRules(&s.XValidations, &ret.XValidations, nil); err != nil {
+		return nil, err
+	}
+
+	return ret, nil
+}
+
+// validateUnsupportedFields checks that those fields rejected by validation are actually unset.
+func validateUnsupportedFields(s *apiextensions.JSONSchemaProps) error {
+	if len(s.ID) > 0 {
+		return fmt.Errorf("OpenAPIV3Schema 'id' is not supported")
+	}
+	if len(s.Schema) > 0 {
+		return fmt.Errorf("OpenAPIV3Schema 'schema' is not supported")
+	}
+	if s.Ref != nil && len(*s.Ref) > 0 {
+		return fmt.Errorf("OpenAPIV3Schema '$ref' is not supported")
+	}
+	if len(s.PatternProperties) > 0 {
+		return fmt.Errorf("OpenAPIV3Schema 'patternProperties' is not supported")
+	}
+	if len(s.Dependencies) > 0 {
+		return fmt.Errorf("OpenAPIV3Schema 'dependencies' is not supported")
+	}
+	if s.AdditionalItems != nil {
+		return fmt.Errorf("OpenAPIV3Schema 'additionalItems' is not supported")
+	}
+	if len(s.Definitions) > 0 {
+		return fmt.Errorf("OpenAPIV3Schema 'definitions' is not supported")
+	}
+
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/algorithm.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/algorithm.go
new file mode 100644
index 0000000000..8d6f55fb03
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/algorithm.go
@@ -0,0 +1,69 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package defaulting
+
+import (
+	structuralschema "k8s.io/apiextensions-apiserver/pkg/apiserver/schema"
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// isNonNullalbeNull returns true if the item is nil AND it's nullable
+func isNonNullableNull(x interface{}, s *structuralschema.Structural) bool {
+	return x == nil && s != nil && s.Generic.Nullable == false
+}
+
+// Default does defaulting of x depending on default values in s.
+// Default values from s are deep-copied.
+//
+// PruneNonNullableNullsWithoutDefaults has left the non-nullable nulls
+// that have a default here.
+func Default(x interface{}, s *structuralschema.Structural) {
+	if s == nil {
+		return
+	}
+
+	switch x := x.(type) {
+	case map[string]interface{}:
+		for k, prop := range s.Properties {
+			if prop.Default.Object == nil {
+				continue
+			}
+			if _, found := x[k]; !found || isNonNullableNull(x[k], &prop) {
+				x[k] = runtime.DeepCopyJSONValue(prop.Default.Object)
+			}
+		}
+		for k := range x {
+			if prop, found := s.Properties[k]; found {
+				Default(x[k], &prop)
+			} else if s.AdditionalProperties != nil {
+				if isNonNullableNull(x[k], s.AdditionalProperties.Structural) {
+					x[k] = runtime.DeepCopyJSONValue(s.AdditionalProperties.Structural.Default.Object)
+				}
+				Default(x[k], s.AdditionalProperties.Structural)
+			}
+		}
+	case []interface{}:
+		for i := range x {
+			if isNonNullableNull(x[i], s.Items) {
+				x[i] = runtime.DeepCopyJSONValue(s.Items.Default.Object)
+			}
+			Default(x[i], s.Items)
+		}
+	default:
+		// scalars, do nothing
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/prune.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/prune.go
new file mode 100644
index 0000000000..45c3789aef
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/prune.go
@@ -0,0 +1,91 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package defaulting
+
+import (
+	"fmt"
+	"reflect"
+
+	structuralschema "k8s.io/apiextensions-apiserver/pkg/apiserver/schema"
+	structuralobjectmeta "k8s.io/apiextensions-apiserver/pkg/apiserver/schema/objectmeta"
+	"k8s.io/apiextensions-apiserver/pkg/apiserver/schema/pruning"
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// PruneDefaults prunes default values according to the schema and according to
+// the ObjectMeta definition of the running server. It mutates the passed schema.
+func PruneDefaults(s *structuralschema.Structural) error {
+	p := pruner{s}
+	_, err := p.pruneDefaults(s, NewRootObjectFunc())
+	return err
+}
+
+type pruner struct {
+	rootSchema *structuralschema.Structural
+}
+
+func (p *pruner) pruneDefaults(s *structuralschema.Structural, f SurroundingObjectFunc) (changed bool, err error) {
+	if s == nil {
+		return false, nil
+	}
+
+	if s.Default.Object != nil {
+		orig := runtime.DeepCopyJSONValue(s.Default.Object)
+
+		obj, acc, err := f(s.Default.Object)
+		if err != nil {
+			return false, fmt.Errorf("failed to prune default value: %v", err)
+		}
+		if err := structuralobjectmeta.Coerce(nil, obj, p.rootSchema, true, true); err != nil {
+			return false, fmt.Errorf("failed to prune default value: %v", err)
+		}
+		pruning.Prune(obj, p.rootSchema, true)
+		s.Default.Object, _, err = acc(obj)
+		if err != nil {
+			return false, fmt.Errorf("failed to prune default value: %v", err)
+		}
+
+		changed = changed || !reflect.DeepEqual(orig, s.Default.Object)
+	}
+
+	if s.AdditionalProperties != nil && s.AdditionalProperties.Structural != nil {
+		c, err := p.pruneDefaults(s.AdditionalProperties.Structural, f.Child("*"))
+		if err != nil {
+			return false, err
+		}
+		changed = changed || c
+	}
+	if s.Items != nil {
+		c, err := p.pruneDefaults(s.Items, f.Index())
+		if err != nil {
+			return false, err
+		}
+		changed = changed || c
+	}
+	for k, subSchema := range s.Properties {
+		c, err := p.pruneDefaults(&subSchema, f.Child(k))
+		if err != nil {
+			return false, err
+		}
+		if c {
+			s.Properties[k] = subSchema
+			changed = true
+		}
+	}
+
+	return changed, nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/prunenulls.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/prunenulls.go
new file mode 100644
index 0000000000..bc8221ac7d
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/prunenulls.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package defaulting
+
+import structuralschema "k8s.io/apiextensions-apiserver/pkg/apiserver/schema"
+
+func isNonNullableNonDefaultableNull(x interface{}, s *structuralschema.Structural) bool {
+	return x == nil && s != nil && s.Generic.Nullable == false && s.Default.Object == nil
+}
+
+func getSchemaForField(field string, s *structuralschema.Structural) *structuralschema.Structural {
+	if s == nil {
+		return nil
+	}
+	schema, ok := s.Properties[field]
+	if ok {
+		return &schema
+	}
+	if s.AdditionalProperties != nil {
+		return s.AdditionalProperties.Structural
+	}
+	return nil
+}
+
+// PruneNonNullableNullsWithoutDefaults removes non-nullable
+// non-defaultable null values from object.
+//
+// Non-nullable nulls that have a default are left alone here and will
+// be defaulted later.
+func PruneNonNullableNullsWithoutDefaults(x interface{}, s *structuralschema.Structural) {
+	switch x := x.(type) {
+	case map[string]interface{}:
+		for k, v := range x {
+			schema := getSchemaForField(k, s)
+			if isNonNullableNonDefaultableNull(v, schema) {
+				delete(x, k)
+			} else {
+				PruneNonNullableNullsWithoutDefaults(v, schema)
+			}
+		}
+	case []interface{}:
+		var schema *structuralschema.Structural
+		if s != nil {
+			schema = s.Items
+		}
+		for i := range x {
+			PruneNonNullableNullsWithoutDefaults(x[i], schema)
+		}
+	default:
+		// scalars, do nothing
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/surroundingobject.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/surroundingobject.go
new file mode 100644
index 0000000000..fba2b75af5
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/surroundingobject.go
@@ -0,0 +1,147 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package defaulting
+
+import (
+	"fmt"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// AccessorFunc returns a node x in obj on a fixed (implicitly encoded) JSON path
+// if that path exists in obj (found==true). If it does not exist, found is false.
+// If on the path the type of a field is wrong, an error is returned.
+type AccessorFunc func(obj map[string]interface{}) (x interface{}, found bool, err error)
+
+// SurroundingObjectFunc is a surrounding object builder with a given x at a leaf.
+// Which leave is determined by the series of Index() and Child(k) calls.
+// It also returns the inverse of the builder, namely the accessor that extracts x
+// from the test object.
+//
+// With obj, acc, _ := someSurroundingObjectFunc(x) we get:
+//
+//	acc(obj) == x
+//	reflect.DeepEqual(acc(DeepCopy(obj), x) == x
+//
+// where x is the original instance for slices and maps.
+//
+// If after computation of acc the node holding x in obj is mutated (e.g. pruned),
+// the accessor will return that mutated node value (e.g. the pruned x).
+//
+// Example (ignoring the last two return values):
+//
+//	NewRootObjectFunc()(x) == x
+//	NewRootObjectFunc().Index()(x) == [x]
+//	NewRootObjectFunc().Index().Child("foo") == [{"foo": x}]
+//	NewRootObjectFunc().Index().Child("foo").Child("bar") == [{"foo": {"bar":x}}]
+//	NewRootObjectFunc().Index().Child("foo").Child("bar").Index() == [{"foo": {"bar":[x]}}]
+//
+// and:
+//
+//	NewRootObjectFunc(), then acc(x) == x
+//	NewRootObjectFunc().Index(), then acc([x]) == x
+//	NewRootObjectFunc().Index().Child("foo"), then acc([{"foo": x}]) == x
+//	NewRootObjectFunc().Index().Child("foo").Child("bar"), then acc([{"foo": {"bar":x}}]) == x
+//	NewRootObjectFunc().Index().Child("foo").Child("bar").Index(), then acc([{"foo": {"bar":[x]}}]) == x
+type SurroundingObjectFunc func(focus interface{}) (map[string]interface{}, AccessorFunc, error)
+
+// NewRootObjectFunc returns the identity function. The passed focus value
+// must be an object.
+func NewRootObjectFunc() SurroundingObjectFunc {
+	return func(x interface{}) (map[string]interface{}, AccessorFunc, error) {
+		obj, ok := x.(map[string]interface{})
+		if !ok {
+			return nil, nil, fmt.Errorf("object root default value must be of object type")
+		}
+		return obj, func(root map[string]interface{}) (interface{}, bool, error) {
+			return root, true, nil
+		}, nil
+	}
+}
+
+// WithTypeMeta returns a closure with the TypeMeta fields set if they are defined.
+// This mutates f(x).
+func (f SurroundingObjectFunc) WithTypeMeta(meta metav1.TypeMeta) SurroundingObjectFunc {
+	return func(x interface{}) (map[string]interface{}, AccessorFunc, error) {
+		obj, acc, err := f(x)
+		if err != nil {
+			return nil, nil, err
+		}
+		if obj == nil {
+			obj = map[string]interface{}{}
+		}
+		if _, found := obj["kind"]; !found {
+			obj["kind"] = meta.Kind
+		}
+		if _, found := obj["apiVersion"]; !found {
+			obj["apiVersion"] = meta.APIVersion
+		}
+		return obj, acc, err
+	}
+}
+
+// Child returns a function x => f({k: x}) and the corresponding accessor.
+func (f SurroundingObjectFunc) Child(k string) SurroundingObjectFunc {
+	return func(x interface{}) (map[string]interface{}, AccessorFunc, error) {
+		obj, acc, err := f(map[string]interface{}{k: x})
+		if err != nil {
+			return nil, nil, err
+		}
+		return obj, func(obj map[string]interface{}) (interface{}, bool, error) {
+			x, found, err := acc(obj)
+			if err != nil {
+				return nil, false, fmt.Errorf(".%s%v", k, err)
+			}
+			if !found {
+				return nil, false, nil
+			}
+			if x, ok := x.(map[string]interface{}); !ok {
+				return nil, false, fmt.Errorf(".%s must be of object type", k)
+			} else if v, found := x[k]; !found {
+				return nil, false, nil
+			} else {
+				return v, true, nil
+			}
+		}, err
+	}
+}
+
+// Index returns a function x => f([x]) and the corresponding accessor.
+func (f SurroundingObjectFunc) Index() SurroundingObjectFunc {
+	return func(focus interface{}) (map[string]interface{}, AccessorFunc, error) {
+		obj, acc, err := f([]interface{}{focus})
+		if err != nil {
+			return nil, nil, err
+		}
+		return obj, func(obj map[string]interface{}) (interface{}, bool, error) {
+			x, found, err := acc(obj)
+			if err != nil {
+				return nil, false, fmt.Errorf("[]%v", err)
+			}
+			if !found {
+				return nil, false, nil
+			}
+			if x, ok := x.([]interface{}); !ok {
+				return nil, false, fmt.Errorf("[] must be of array type")
+			} else if len(x) == 0 {
+				return nil, false, nil
+			} else {
+				return x[0], true, nil
+			}
+		}, err
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/validation.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/validation.go
new file mode 100644
index 0000000000..e529d5a3eb
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/defaulting/validation.go
@@ -0,0 +1,199 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package defaulting
+
+import (
+	"context"
+	"fmt"
+	"reflect"
+
+	structuralschema "k8s.io/apiextensions-apiserver/pkg/apiserver/schema"
+	"k8s.io/apiextensions-apiserver/pkg/apiserver/schema/cel"
+	schemaobjectmeta "k8s.io/apiextensions-apiserver/pkg/apiserver/schema/objectmeta"
+	"k8s.io/apiextensions-apiserver/pkg/apiserver/schema/pruning"
+	apiservervalidation "k8s.io/apiextensions-apiserver/pkg/apiserver/validation"
+	apiextensionsfeatures "k8s.io/apiextensions-apiserver/pkg/features"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/util/validation/field"
+	celconfig "k8s.io/apiserver/pkg/apis/cel"
+	utilfeature "k8s.io/apiserver/pkg/util/feature"
+)
+
+// ValidateDefaults checks that default values validate and are properly pruned.
+// context is passed for supporting context cancellation during cel validation
+func ValidateDefaults(ctx context.Context, pth *field.Path, s *structuralschema.Structural, isResourceRoot, requirePrunedDefaults bool) (field.ErrorList, error) {
+	f := NewRootObjectFunc().WithTypeMeta(metav1.TypeMeta{APIVersion: "validation/v1", Kind: "Validation"})
+
+	if isResourceRoot {
+		if s == nil {
+			s = &structuralschema.Structural{}
+		}
+		if !s.XEmbeddedResource {
+			clone := *s
+			clone.XEmbeddedResource = true
+			s = &clone
+		}
+	}
+
+	allErr, error, _ := validate(ctx, pth, s, s, f, false, requirePrunedDefaults, celconfig.RuntimeCELCostBudget)
+	return allErr, error
+}
+
+// validate is the recursive step func for the validation. insideMeta is true if s specifies
+// TypeMeta or ObjectMeta. The SurroundingObjectFunc f is used to validate defaults of
+// TypeMeta or ObjectMeta fields.
+// context is passed for supporting context cancellation during cel validation
+func validate(ctx context.Context, pth *field.Path, s *structuralschema.Structural, rootSchema *structuralschema.Structural, f SurroundingObjectFunc, insideMeta, requirePrunedDefaults bool, costBudget int64) (allErrs field.ErrorList, error error, remainingCost int64) {
+	remainingCost = costBudget
+	if s == nil {
+		return nil, nil, remainingCost
+	}
+
+	if s.XEmbeddedResource {
+		insideMeta = false
+		f = NewRootObjectFunc().WithTypeMeta(metav1.TypeMeta{APIVersion: "validation/v1", Kind: "Validation"})
+		rootSchema = s
+	}
+
+	isResourceRoot := s == rootSchema
+
+	if s.Default.Object != nil {
+		validator := apiservervalidation.NewSchemaValidatorFromOpenAPI(s.ToKubeOpenAPI())
+
+		if insideMeta {
+			obj, _, err := f(runtime.DeepCopyJSONValue(s.Default.Object))
+			if err != nil {
+				// this should never happen. f(s.Default.Object) only gives an error if f is the
+				// root object func, but the default value is not a map. But then we wouldn't be
+				// in this case.
+				return nil, fmt.Errorf("failed to validate default value inside metadata: %v", err), remainingCost
+			}
+
+			// check ObjectMeta/TypeMeta and everything else
+			if err := schemaobjectmeta.Coerce(nil, obj, rootSchema, true, false); err != nil {
+				allErrs = append(allErrs, field.Invalid(pth.Child("default"), s.Default.Object, fmt.Sprintf("must result in valid metadata: %v", err)))
+			} else if errs := schemaobjectmeta.Validate(nil, obj, rootSchema, true); len(errs) > 0 {
+				allErrs = append(allErrs, field.Invalid(pth.Child("default"), s.Default.Object, fmt.Sprintf("must result in valid metadata: %v", errs.ToAggregate())))
+			} else if errs := apiservervalidation.ValidateCustomResource(pth.Child("default"), s.Default.Object, validator); len(errs) > 0 {
+				allErrs = append(allErrs, errs...)
+			} else if celValidator := cel.NewValidator(s, isResourceRoot, celconfig.PerCallLimit); celValidator != nil {
+				celErrs, rmCost := celValidator.Validate(ctx, pth.Child("default"), s, s.Default.Object, s.Default.Object, remainingCost)
+				allErrs = append(allErrs, celErrs...)
+
+				if len(celErrs) == 0 && utilfeature.DefaultFeatureGate.Enabled(apiextensionsfeatures.CRDValidationRatcheting) {
+					// If ratcheting is enabled some CEL rules may use optionalOldSelf
+					// For such rules the above validation is not sufficient for
+					// determining if the default value is a valid value to introduce
+					// via create or uncorrelated update.
+					//
+					// Validate an update from nil to the default value to ensure
+					// that the default value pass
+					celErrs, rmCostWithoutOldObject := celValidator.Validate(ctx, pth.Child("default"), s, s.Default.Object, nil, remainingCost)
+					allErrs = append(allErrs, celErrs...)
+
+					// capture the cost of both types of runs and take whichever
+					// leaves less remaining cost
+					if rmCostWithoutOldObject < rmCost {
+						rmCost = rmCostWithoutOldObject
+					}
+				}
+
+				remainingCost = rmCost
+				if remainingCost < 0 {
+					return allErrs, nil, remainingCost
+				}
+			}
+		} else {
+			// check whether default is pruned
+			if requirePrunedDefaults {
+				pruned := runtime.DeepCopyJSONValue(s.Default.Object)
+				pruning.Prune(pruned, s, s.XEmbeddedResource)
+				if !reflect.DeepEqual(pruned, s.Default.Object) {
+					allErrs = append(allErrs, field.Invalid(pth.Child("default"), s.Default.Object, "must not have unknown fields"))
+				}
+			}
+
+			// check ObjectMeta/TypeMeta and everything else
+			if err := schemaobjectmeta.Coerce(pth.Child("default"), s.Default.Object, s, s.XEmbeddedResource, false); err != nil {
+				allErrs = append(allErrs, err)
+			} else if errs := schemaobjectmeta.Validate(pth.Child("default"), s.Default.Object, s, s.XEmbeddedResource); len(errs) > 0 {
+				allErrs = append(allErrs, errs...)
+			} else if errs := apiservervalidation.ValidateCustomResource(pth.Child("default"), s.Default.Object, validator); len(errs) > 0 {
+				allErrs = append(allErrs, errs...)
+			} else if celValidator := cel.NewValidator(s, isResourceRoot, celconfig.PerCallLimit); celValidator != nil {
+				celErrs, rmCost := celValidator.Validate(ctx, pth.Child("default"), s, s.Default.Object, s.Default.Object, remainingCost)
+				allErrs = append(allErrs, celErrs...)
+
+				if len(celErrs) == 0 && utilfeature.DefaultFeatureGate.Enabled(apiextensionsfeatures.CRDValidationRatcheting) {
+					// If ratcheting is enabled some CEL rules may use optionalOldSelf
+					// For such rules the above validation is not sufficient for
+					// determining if the default value is a valid value to introduce
+					// via create or uncorrelated update.
+					//
+					// Validate an update from nil to the default value to ensure
+					// that the default value pass
+					celErrs, rmCostWithoutOldObject := celValidator.Validate(ctx, pth.Child("default"), s, s.Default.Object, nil, remainingCost)
+					allErrs = append(allErrs, celErrs...)
+
+					// capture the cost of both types of runs and take whichever
+					// leaves less remaining cost
+					if rmCostWithoutOldObject < rmCost {
+						rmCost = rmCostWithoutOldObject
+					}
+				}
+
+				remainingCost = rmCost
+				if remainingCost < 0 {
+					return allErrs, nil, remainingCost
+				}
+			}
+		}
+	}
+
+	// do not follow additionalProperties because defaults are forbidden there
+
+	if s.Items != nil {
+		errs, err, rCost := validate(ctx, pth.Child("items"), s.Items, rootSchema, f.Index(), insideMeta, requirePrunedDefaults, remainingCost)
+		remainingCost = rCost
+		allErrs = append(allErrs, errs...)
+		if err != nil {
+			return nil, err, remainingCost
+		}
+		if remainingCost < 0 {
+			return allErrs, nil, remainingCost
+		}
+	}
+
+	for k, subSchema := range s.Properties {
+		subInsideMeta := insideMeta
+		if s.XEmbeddedResource && (k == "metadata" || k == "apiVersion" || k == "kind") {
+			subInsideMeta = true
+		}
+		errs, err, rCost := validate(ctx, pth.Child("properties").Key(k), &subSchema, rootSchema, f.Child(k), subInsideMeta, requirePrunedDefaults, remainingCost)
+		remainingCost = rCost
+		allErrs = append(allErrs, errs...)
+		if err != nil {
+			return nil, err, remainingCost
+		}
+		if remainingCost < 0 {
+			return allErrs, nil, remainingCost
+		}
+	}
+
+	return allErrs, nil, remainingCost
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/kubeopenapi.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/kubeopenapi.go
new file mode 100644
index 0000000000..df78ba77e6
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/kubeopenapi.go
@@ -0,0 +1,161 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package schema
+
+import (
+	"k8s.io/kube-openapi/pkg/validation/spec"
+)
+
+// ToKubeOpenAPI converts a structural schema to go-openapi schema. It is faithful and roundtrippable.
+func (s *Structural) ToKubeOpenAPI() *spec.Schema {
+	if s == nil {
+		return nil
+	}
+
+	ret := &spec.Schema{}
+
+	if s.Items != nil {
+		ret.Items = &spec.SchemaOrArray{Schema: s.Items.ToKubeOpenAPI()}
+	}
+	if s.Properties != nil {
+		ret.Properties = make(map[string]spec.Schema, len(s.Properties))
+		for k, v := range s.Properties {
+			ret.Properties[k] = *v.ToKubeOpenAPI()
+		}
+	}
+	if s.AdditionalProperties != nil {
+		ret.AdditionalProperties = &spec.SchemaOrBool{
+			Allows: s.AdditionalProperties.Bool,
+			Schema: s.AdditionalProperties.Structural.ToKubeOpenAPI(),
+		}
+	}
+	s.Generic.toKubeOpenAPI(ret)
+	s.Extensions.toKubeOpenAPI(ret)
+	s.ValueValidation.toKubeOpenAPI(ret)
+	s.ValidationExtensions.toKubeOpenAPI(ret)
+	return ret
+}
+
+func (g *Generic) toKubeOpenAPI(ret *spec.Schema) {
+	if g == nil {
+		return
+	}
+
+	if len(g.Type) != 0 {
+		ret.Type = spec.StringOrArray{g.Type}
+	}
+	ret.Nullable = g.Nullable
+	ret.Description = g.Description
+	ret.Title = g.Title
+	ret.Default = g.Default.Object
+}
+
+func (x *Extensions) toKubeOpenAPI(ret *spec.Schema) {
+	if x == nil {
+		return
+	}
+
+	if x.XPreserveUnknownFields {
+		ret.VendorExtensible.AddExtension("x-kubernetes-preserve-unknown-fields", true)
+	}
+	if x.XEmbeddedResource {
+		ret.VendorExtensible.AddExtension("x-kubernetes-embedded-resource", true)
+	}
+	if x.XIntOrString {
+		ret.VendorExtensible.AddExtension("x-kubernetes-int-or-string", true)
+	}
+	if len(x.XListMapKeys) > 0 {
+		ret.VendorExtensible.AddExtension("x-kubernetes-list-map-keys", x.XListMapKeys)
+	}
+	if x.XListType != nil {
+		ret.VendorExtensible.AddExtension("x-kubernetes-list-type", *x.XListType)
+	}
+	if x.XMapType != nil {
+		ret.VendorExtensible.AddExtension("x-kubernetes-map-type", *x.XMapType)
+	}
+}
+
+func (x *ValidationExtensions) toKubeOpenAPI(ret *spec.Schema) {
+	if x == nil {
+		return
+	}
+
+	if len(x.XValidations) > 0 {
+		ret.VendorExtensible.AddExtension("x-kubernetes-validations", x.XValidations)
+	}
+}
+
+func (v *ValueValidation) toKubeOpenAPI(ret *spec.Schema) {
+	if v == nil {
+		return
+	}
+
+	ret.Format = v.Format
+	ret.Maximum = v.Maximum
+	ret.ExclusiveMaximum = v.ExclusiveMaximum
+	ret.Minimum = v.Minimum
+	ret.ExclusiveMinimum = v.ExclusiveMinimum
+	ret.MaxLength = v.MaxLength
+	ret.MinLength = v.MinLength
+	ret.Pattern = v.Pattern
+	ret.MaxItems = v.MaxItems
+	ret.MinItems = v.MinItems
+	ret.UniqueItems = v.UniqueItems
+	ret.MultipleOf = v.MultipleOf
+	if v.Enum != nil {
+		ret.Enum = make([]interface{}, 0, len(v.Enum))
+		for i := range v.Enum {
+			ret.Enum = append(ret.Enum, v.Enum[i].Object)
+		}
+	}
+	ret.MaxProperties = v.MaxProperties
+	ret.MinProperties = v.MinProperties
+	ret.Required = v.Required
+	for i := range v.AllOf {
+		ret.AllOf = append(ret.AllOf, *v.AllOf[i].toKubeOpenAPI())
+	}
+	for i := range v.AnyOf {
+		ret.AnyOf = append(ret.AnyOf, *v.AnyOf[i].toKubeOpenAPI())
+	}
+	for i := range v.OneOf {
+		ret.OneOf = append(ret.OneOf, *v.OneOf[i].toKubeOpenAPI())
+	}
+	ret.Not = v.Not.toKubeOpenAPI()
+}
+
+func (vv *NestedValueValidation) toKubeOpenAPI() *spec.Schema {
+	if vv == nil {
+		return nil
+	}
+
+	ret := &spec.Schema{}
+
+	vv.ValueValidation.toKubeOpenAPI(ret)
+	vv.ValidationExtensions.toKubeOpenAPI(ret)
+	if vv.Items != nil {
+		ret.Items = &spec.SchemaOrArray{Schema: vv.Items.toKubeOpenAPI()}
+	}
+	if vv.Properties != nil {
+		ret.Properties = make(map[string]spec.Schema, len(vv.Properties))
+		for k, v := range vv.Properties {
+			ret.Properties[k] = *v.toKubeOpenAPI()
+		}
+	}
+	vv.ForbiddenGenerics.toKubeOpenAPI(ret)   // normally empty. Exception: int-or-string
+	vv.ForbiddenExtensions.toKubeOpenAPI(ret) // shouldn't do anything
+	return ret
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/objectmeta/algorithm.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/objectmeta/algorithm.go
new file mode 100644
index 0000000000..811c38377b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/objectmeta/algorithm.go
@@ -0,0 +1,147 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package objectmeta
+
+import (
+	"sort"
+
+	structuralschema "k8s.io/apiextensions-apiserver/pkg/apiserver/schema"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+// CoerceOptions gives the ability to ReturnUnknownFieldPaths for fields
+// unrecognized by the schema or DropInvalidFields for fields that are a part
+// of the schema, but are malformed.
+type CoerceOptions struct {
+	// DropInvalidFields discards malformed serialized metadata fields that
+	// cannot be successfully decoded to the corresponding ObjectMeta field.
+	// This only applies to fields that are recognized as part of the schema,
+	// but of an invalid type (i.e. cause an error when unmarshaling, rather
+	// than being dropped or causing a strictErr).
+	DropInvalidFields bool
+	// ReturnUnknownFieldPaths will return the paths to fields that are not
+	// recognized as part of the schema.
+	ReturnUnknownFieldPaths bool
+}
+
+// Coerce checks types of embedded ObjectMeta and TypeMeta and prunes unknown fields inside the former.
+// It does coerce ObjectMeta and TypeMeta at the root if isResourceRoot is true.
+// If opts.ReturnUnknownFieldPaths is true, it will return the paths of any fields that are not a part of the
+// schema that are dropped when unmarshaling.
+// If opts.DropInvalidFields is true, fields of wrong type will be dropped.
+func CoerceWithOptions(pth *field.Path, obj interface{}, s *structuralschema.Structural, isResourceRoot bool, opts CoerceOptions) (*field.Error, []string) {
+	if isResourceRoot {
+		if s == nil {
+			s = &structuralschema.Structural{}
+		}
+		if !s.XEmbeddedResource {
+			clone := *s
+			clone.XEmbeddedResource = true
+			s = &clone
+		}
+	}
+	c := coercer{DropInvalidFields: opts.DropInvalidFields, ReturnUnknownFieldPaths: opts.ReturnUnknownFieldPaths}
+	schemaOpts := &structuralschema.UnknownFieldPathOptions{
+		TrackUnknownFieldPaths: opts.ReturnUnknownFieldPaths,
+	}
+	fieldErr := c.coerce(pth, obj, s, schemaOpts)
+	sort.Strings(schemaOpts.UnknownFieldPaths)
+	return fieldErr, schemaOpts.UnknownFieldPaths
+}
+
+// Coerce calls CoerceWithOptions without returning unknown field paths.
+func Coerce(pth *field.Path, obj interface{}, s *structuralschema.Structural, isResourceRoot, dropInvalidFields bool) *field.Error {
+	fieldErr, _ := CoerceWithOptions(pth, obj, s, isResourceRoot, CoerceOptions{DropInvalidFields: dropInvalidFields})
+	return fieldErr
+}
+
+type coercer struct {
+	DropInvalidFields       bool
+	ReturnUnknownFieldPaths bool
+}
+
+func (c *coercer) coerce(pth *field.Path, x interface{}, s *structuralschema.Structural, opts *structuralschema.UnknownFieldPathOptions) *field.Error {
+	if s == nil {
+		return nil
+	}
+	origPathLen := len(opts.ParentPath)
+	defer func() {
+		opts.ParentPath = opts.ParentPath[:origPathLen]
+	}()
+	switch x := x.(type) {
+	case map[string]interface{}:
+		for k, v := range x {
+			if s.XEmbeddedResource {
+				switch k {
+				case "apiVersion", "kind":
+					if _, ok := v.(string); !ok && c.DropInvalidFields {
+						delete(x, k)
+					} else if !ok {
+						return field.Invalid(pth.Child(k), v, "must be a string")
+					}
+				case "metadata":
+					meta, found, unknownFields, err := GetObjectMetaWithOptions(x, ObjectMetaOptions{
+						DropMalformedFields:     c.DropInvalidFields,
+						ReturnUnknownFieldPaths: c.ReturnUnknownFieldPaths,
+						ParentPath:              pth,
+					})
+					opts.UnknownFieldPaths = append(opts.UnknownFieldPaths, unknownFields...)
+					if err != nil {
+						if !c.DropInvalidFields {
+							return field.Invalid(pth.Child("metadata"), v, err.Error())
+						}
+						// pass through on error if DropInvalidFields is true
+					} else if found {
+						if err := SetObjectMeta(x, meta); err != nil {
+							return field.Invalid(pth.Child("metadata"), v, err.Error())
+						}
+						if meta.CreationTimestamp.IsZero() {
+							unstructured.RemoveNestedField(x, "metadata", "creationTimestamp")
+						}
+					}
+				}
+			}
+			prop, ok := s.Properties[k]
+			if ok {
+				opts.AppendKey(k)
+				if err := c.coerce(pth.Child(k), v, &prop, opts); err != nil {
+					return err
+				}
+				opts.ParentPath = opts.ParentPath[:origPathLen]
+			} else if s.AdditionalProperties != nil {
+				opts.AppendKey(k)
+				if err := c.coerce(pth.Key(k), v, s.AdditionalProperties.Structural, opts); err != nil {
+					return err
+				}
+				opts.ParentPath = opts.ParentPath[:origPathLen]
+			}
+		}
+	case []interface{}:
+		for i, v := range x {
+			opts.AppendIndex(i)
+			if err := c.coerce(pth.Index(i), v, s.Items, opts); err != nil {
+				return err
+			}
+			opts.ParentPath = opts.ParentPath[:origPathLen]
+		}
+	default:
+		// scalars, do nothing
+	}
+
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/objectmeta/coerce.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/objectmeta/coerce.go
new file mode 100644
index 0000000000..42b968b8ac
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/objectmeta/coerce.go
@@ -0,0 +1,151 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package objectmeta
+
+import (
+	"fmt"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime"
+	utiljson "k8s.io/apimachinery/pkg/util/json"
+	"k8s.io/apimachinery/pkg/util/validation/field"
+	kjson "sigs.k8s.io/json"
+)
+
+// GetObjectMeta calls GetObjectMetaWithOptions without returning unknown field paths.
+func GetObjectMeta(obj map[string]interface{}, dropMalformedFields bool) (*metav1.ObjectMeta, bool, error) {
+	meta, found, _, err := GetObjectMetaWithOptions(obj, ObjectMetaOptions{
+		DropMalformedFields: dropMalformedFields,
+	})
+	return meta, found, err
+}
+
+// ObjectMetaOptions provides the options for how GetObjectMeta should retrieve the object meta.
+type ObjectMetaOptions struct {
+	// DropMalformedFields discards malformed serialized metadata fields that
+	// cannot be successfully decoded to the corresponding ObjectMeta field.
+	// This only applies to fields that are recognized as part of the schema,
+	// but of an invalid type (i.e. cause an error when unmarshaling, rather
+	// than being dropped or causing a strictErr).
+	DropMalformedFields bool
+	// ReturnUnknownFieldPaths will return the paths to fields that are not
+	// recognized as part of the schema.
+	ReturnUnknownFieldPaths bool
+	// ParentPath provides the current path up to the given ObjectMeta.
+	// If nil, the metadata is assumed to be at the root of the object.
+	ParentPath *field.Path
+}
+
+// GetObjectMetaWithOptions  does conversion of JSON to ObjectMeta.
+// It first tries json.Unmarshal into a metav1.ObjectMeta
+// type. If that does not work and opts.DropMalformedFields is true, it does field-by-field best-effort conversion
+// throwing away fields which lead to errors.
+// If opts.ReturnedUnknownFields is true, it will UnmarshalStrict instead, returning the paths of any unknown fields
+// it encounters (i.e. paths returned as strict errs from UnmarshalStrict)
+func GetObjectMetaWithOptions(obj map[string]interface{}, opts ObjectMetaOptions) (*metav1.ObjectMeta, bool, []string, error) {
+	metadata, found := obj["metadata"]
+	if !found {
+		return nil, false, nil, nil
+	}
+
+	// round-trip through JSON first, hoping that unmarshalling just works
+	objectMeta := &metav1.ObjectMeta{}
+	metadataBytes, err := utiljson.Marshal(metadata)
+	if err != nil {
+		return nil, false, nil, err
+	}
+	var unmarshalErr error
+	if opts.ReturnUnknownFieldPaths {
+		var strictErrs []error
+		strictErrs, unmarshalErr = kjson.UnmarshalStrict(metadataBytes, objectMeta)
+		if unmarshalErr == nil {
+			if len(strictErrs) > 0 {
+				unknownPaths := []string{}
+				prefix := opts.ParentPath.Child("metadata").String()
+				for _, err := range strictErrs {
+					if fieldPathErr, ok := err.(kjson.FieldError); ok {
+						unknownPaths = append(unknownPaths, prefix+"."+fieldPathErr.FieldPath())
+					}
+				}
+				return objectMeta, true, unknownPaths, nil
+			}
+			return objectMeta, true, nil, nil
+		}
+	} else {
+		if unmarshalErr = utiljson.Unmarshal(metadataBytes, objectMeta); unmarshalErr == nil {
+			// if successful, return
+			return objectMeta, true, nil, nil
+		}
+	}
+	if !opts.DropMalformedFields {
+		// if we're not trying to drop malformed fields, return the error
+		return nil, true, nil, unmarshalErr
+	}
+
+	metadataMap, ok := metadata.(map[string]interface{})
+	if !ok {
+		return nil, false, nil, fmt.Errorf("invalid metadata: expected object, got %T", metadata)
+	}
+
+	// Go field by field accumulating into the metadata object.
+	// This takes advantage of the fact that you can repeatedly unmarshal individual fields into a single struct,
+	// each iteration preserving the old key-values.
+	accumulatedObjectMeta := &metav1.ObjectMeta{}
+	testObjectMeta := &metav1.ObjectMeta{}
+	var unknownFields []string
+	for k, v := range metadataMap {
+		// serialize a single field
+		if singleFieldBytes, err := utiljson.Marshal(map[string]interface{}{k: v}); err == nil {
+			// do a test unmarshal
+			if utiljson.Unmarshal(singleFieldBytes, testObjectMeta) == nil {
+				// if that succeeds, unmarshal for real
+				if opts.ReturnUnknownFieldPaths {
+					strictErrs, _ := kjson.UnmarshalStrict(singleFieldBytes, accumulatedObjectMeta)
+					if len(strictErrs) > 0 {
+						prefix := opts.ParentPath.Child("metadata").String()
+						for _, err := range strictErrs {
+							if fieldPathErr, ok := err.(kjson.FieldError); ok {
+								unknownFields = append(unknownFields, prefix+"."+fieldPathErr.FieldPath())
+							}
+						}
+					}
+				} else {
+					utiljson.Unmarshal(singleFieldBytes, accumulatedObjectMeta)
+				}
+			}
+		}
+	}
+
+	return accumulatedObjectMeta, true, unknownFields, nil
+}
+
+// SetObjectMeta writes back ObjectMeta into a JSON data structure.
+func SetObjectMeta(obj map[string]interface{}, objectMeta *metav1.ObjectMeta) error {
+	if objectMeta == nil {
+		unstructured.RemoveNestedField(obj, "metadata")
+		return nil
+	}
+
+	metadata, err := runtime.DefaultUnstructuredConverter.ToUnstructured(objectMeta)
+	if err != nil {
+		return err
+	}
+
+	obj["metadata"] = metadata
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/objectmeta/validation.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/objectmeta/validation.go
new file mode 100644
index 0000000000..195150ae23
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/objectmeta/validation.go
@@ -0,0 +1,121 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package objectmeta
+
+import (
+	"strings"
+
+	structuralschema "k8s.io/apiextensions-apiserver/pkg/apiserver/schema"
+	metavalidation "k8s.io/apimachinery/pkg/api/validation"
+	"k8s.io/apimachinery/pkg/api/validation/path"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	utilvalidation "k8s.io/apimachinery/pkg/util/validation"
+	"k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+// Validate validates embedded ObjectMeta and TypeMeta.
+// It also validate those at the root if isResourceRoot is true.
+func Validate(pth *field.Path, obj interface{}, s *structuralschema.Structural, isResourceRoot bool) field.ErrorList {
+	if isResourceRoot {
+		if s == nil {
+			s = &structuralschema.Structural{}
+		}
+		if !s.XEmbeddedResource {
+			clone := *s
+			clone.XEmbeddedResource = true
+			s = &clone
+		}
+	}
+	return validate(pth, obj, s)
+}
+
+func validate(pth *field.Path, x interface{}, s *structuralschema.Structural) field.ErrorList {
+	if s == nil {
+		return nil
+	}
+
+	var allErrs field.ErrorList
+
+	switch x := x.(type) {
+	case map[string]interface{}:
+		if s.XEmbeddedResource {
+			allErrs = append(allErrs, validateEmbeddedResource(pth, x, s)...)
+		}
+
+		for k, v := range x {
+			prop, ok := s.Properties[k]
+			if ok {
+				allErrs = append(allErrs, validate(pth.Child(k), v, &prop)...)
+			} else if s.AdditionalProperties != nil {
+				allErrs = append(allErrs, validate(pth.Key(k), v, s.AdditionalProperties.Structural)...)
+			}
+		}
+	case []interface{}:
+		for i, v := range x {
+			allErrs = append(allErrs, validate(pth.Index(i), v, s.Items)...)
+		}
+	default:
+		// scalars, do nothing
+	}
+
+	return allErrs
+}
+
+func validateEmbeddedResource(pth *field.Path, x map[string]interface{}, s *structuralschema.Structural) field.ErrorList {
+	var allErrs field.ErrorList
+
+	// require apiVersion and kind, but not metadata
+	if _, found := x["apiVersion"]; !found {
+		allErrs = append(allErrs, field.Required(pth.Child("apiVersion"), "must not be empty"))
+	}
+	if _, found := x["kind"]; !found {
+		allErrs = append(allErrs, field.Required(pth.Child("kind"), "must not be empty"))
+	}
+
+	for k, v := range x {
+		switch k {
+		case "apiVersion":
+			if apiVersion, ok := v.(string); !ok {
+				allErrs = append(allErrs, field.Invalid(pth.Child("apiVersion"), v, "must be a string"))
+			} else if len(apiVersion) == 0 {
+				allErrs = append(allErrs, field.Invalid(pth.Child("apiVersion"), apiVersion, "must not be empty"))
+			} else if _, err := schema.ParseGroupVersion(apiVersion); err != nil {
+				allErrs = append(allErrs, field.Invalid(pth.Child("apiVersion"), apiVersion, err.Error()))
+			}
+		case "kind":
+			if kind, ok := v.(string); !ok {
+				allErrs = append(allErrs, field.Invalid(pth.Child("kind"), v, "must be a string"))
+			} else if len(kind) == 0 {
+				allErrs = append(allErrs, field.Invalid(pth.Child("kind"), kind, "must not be empty"))
+			} else if errs := utilvalidation.IsDNS1035Label(strings.ToLower(kind)); len(errs) > 0 {
+				allErrs = append(allErrs, field.Invalid(pth.Child("kind"), kind, "may have mixed case, but should otherwise match: "+strings.Join(errs, ",")))
+			}
+		case "metadata":
+			meta, _, err := GetObjectMeta(x, false)
+			if err != nil {
+				allErrs = append(allErrs, field.Invalid(pth.Child("metadata"), v, err.Error()))
+			} else {
+				if len(meta.Name) == 0 {
+					meta.Name = "fakename" // we have to set something to avoid an error
+				}
+				allErrs = append(allErrs, metavalidation.ValidateObjectMeta(meta, len(meta.Namespace) > 0, path.ValidatePathSegmentName, pth.Child("metadata"))...)
+			}
+		}
+	}
+
+	return allErrs
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/options.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/options.go
new file mode 100644
index 0000000000..547e591751
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/options.go
@@ -0,0 +1,67 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package schema
+
+import (
+	"strconv"
+	"strings"
+)
+
+// UnknownFieldPathOptions allow for tracking paths to unknown fields.
+type UnknownFieldPathOptions struct {
+	// TrackUnknownFieldPaths determines whether or not unknown field
+	// paths should be stored or not.
+	TrackUnknownFieldPaths bool
+	// ParentPath builds the path to unknown fields as the object
+	// is recursively traversed.
+	ParentPath []string
+	// UnknownFieldPaths is the list of all unknown fields identified.
+	UnknownFieldPaths []string
+}
+
+// RecordUnknownFields adds a path to an unknown field to the
+// record of UnknownFieldPaths, if TrackUnknownFieldPaths is true
+func (o *UnknownFieldPathOptions) RecordUnknownField(field string) {
+	if !o.TrackUnknownFieldPaths {
+		return
+	}
+	l := len(o.ParentPath)
+	o.AppendKey(field)
+	o.UnknownFieldPaths = append(o.UnknownFieldPaths, strings.Join(o.ParentPath, ""))
+	o.ParentPath = o.ParentPath[:l]
+}
+
+// AppendKey adds a key (i.e. field) to the current parent
+// path, if TrackUnknownFieldPaths is true.
+func (o *UnknownFieldPathOptions) AppendKey(key string) {
+	if !o.TrackUnknownFieldPaths {
+		return
+	}
+	if len(o.ParentPath) > 0 {
+		o.ParentPath = append(o.ParentPath, ".")
+	}
+	o.ParentPath = append(o.ParentPath, key)
+}
+
+// AppendIndex adds an index to the most recent field of
+// the current parent path, if TrackUnknownFieldPaths is true.
+func (o *UnknownFieldPathOptions) AppendIndex(index int) {
+	if !o.TrackUnknownFieldPaths {
+		return
+	}
+	o.ParentPath = append(o.ParentPath, "[", strconv.Itoa(index), "]")
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/pruning/algorithm.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/pruning/algorithm.go
new file mode 100644
index 0000000000..a929e52522
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/pruning/algorithm.go
@@ -0,0 +1,149 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pruning
+
+import (
+	"sort"
+
+	structuralschema "k8s.io/apiextensions-apiserver/pkg/apiserver/schema"
+)
+
+// PruneWithOptions removes object fields in obj which are not specified in s. It skips TypeMeta
+// and ObjectMeta fields if XEmbeddedResource is set to true, or for the root if isResourceRoot=true,
+// i.e. it does not prune unknown metadata fields.
+// It returns the set of fields that it prunes if opts.TrackUnknownFieldPaths is true
+func PruneWithOptions(obj interface{}, s *structuralschema.Structural, isResourceRoot bool, opts structuralschema.UnknownFieldPathOptions) []string {
+	if isResourceRoot {
+		if s == nil {
+			s = &structuralschema.Structural{}
+		}
+		if !s.XEmbeddedResource {
+			clone := *s
+			clone.XEmbeddedResource = true
+			s = &clone
+		}
+	}
+	prune(obj, s, &opts)
+	sort.Strings(opts.UnknownFieldPaths)
+	return opts.UnknownFieldPaths
+}
+
+// Prune is equivalent to
+// PruneWithOptions(obj, s, isResourceRoot, structuralschema.UnknownFieldPathOptions{})
+func Prune(obj interface{}, s *structuralschema.Structural, isResourceRoot bool) {
+	PruneWithOptions(obj, s, isResourceRoot, structuralschema.UnknownFieldPathOptions{})
+}
+
+var metaFields = map[string]bool{
+	"apiVersion": true,
+	"kind":       true,
+	"metadata":   true,
+}
+
+func prune(x interface{}, s *structuralschema.Structural, opts *structuralschema.UnknownFieldPathOptions) {
+	if s != nil && s.XPreserveUnknownFields {
+		skipPrune(x, s, opts)
+		return
+	}
+
+	origPathLen := len(opts.ParentPath)
+	defer func() {
+		opts.ParentPath = opts.ParentPath[:origPathLen]
+	}()
+	switch x := x.(type) {
+	case map[string]interface{}:
+		if s == nil {
+			for k := range x {
+				opts.RecordUnknownField(k)
+				delete(x, k)
+			}
+			return
+		}
+		for k, v := range x {
+			if s.XEmbeddedResource && metaFields[k] {
+				continue
+			}
+			prop, ok := s.Properties[k]
+			if ok {
+				opts.AppendKey(k)
+				prune(v, &prop, opts)
+				opts.ParentPath = opts.ParentPath[:origPathLen]
+			} else if s.AdditionalProperties != nil {
+				opts.AppendKey(k)
+				prune(v, s.AdditionalProperties.Structural, opts)
+				opts.ParentPath = opts.ParentPath[:origPathLen]
+			} else {
+				if !metaFields[k] || len(opts.ParentPath) > 0 {
+					opts.RecordUnknownField(k)
+				}
+				delete(x, k)
+			}
+		}
+	case []interface{}:
+		if s == nil {
+			for i, v := range x {
+				opts.AppendIndex(i)
+				prune(v, nil, opts)
+				opts.ParentPath = opts.ParentPath[:origPathLen]
+			}
+			return
+		}
+		for i, v := range x {
+			opts.AppendIndex(i)
+			prune(v, s.Items, opts)
+			opts.ParentPath = opts.ParentPath[:origPathLen]
+		}
+	default:
+		// scalars, do nothing
+	}
+}
+
+func skipPrune(x interface{}, s *structuralschema.Structural, opts *structuralschema.UnknownFieldPathOptions) {
+	if s == nil {
+		return
+	}
+	origPathLen := len(opts.ParentPath)
+	defer func() {
+		opts.ParentPath = opts.ParentPath[:origPathLen]
+	}()
+
+	switch x := x.(type) {
+	case map[string]interface{}:
+		for k, v := range x {
+			if s.XEmbeddedResource && metaFields[k] {
+				continue
+			}
+			if prop, ok := s.Properties[k]; ok {
+				opts.AppendKey(k)
+				prune(v, &prop, opts)
+				opts.ParentPath = opts.ParentPath[:origPathLen]
+			} else if s.AdditionalProperties != nil {
+				opts.AppendKey(k)
+				prune(v, s.AdditionalProperties.Structural, opts)
+				opts.ParentPath = opts.ParentPath[:origPathLen]
+			}
+		}
+	case []interface{}:
+		for i, v := range x {
+			opts.AppendIndex(i)
+			skipPrune(v, s.Items, opts)
+			opts.ParentPath = opts.ParentPath[:origPathLen]
+		}
+	default:
+		// scalars, do nothing
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/skeleton.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/skeleton.go
new file mode 100644
index 0000000000..c7a9892e5f
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/skeleton.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package schema
+
+// StripValueValidations returns a copy without value validations.
+func (s *Structural) StripValueValidations() *Structural {
+	s = s.DeepCopy()
+	v := Visitor{
+		Structural: func(s *Structural) bool {
+			changed := false
+			if s.ValueValidation != nil {
+				s.ValueValidation = nil
+				changed = true
+			}
+			return changed
+		},
+	}
+	v.Visit(s)
+	return s
+}
+
+// StripNullable returns a copy without nullable.
+func (s *Structural) StripNullable() *Structural {
+	s = s.DeepCopy()
+	v := Visitor{
+		Structural: func(s *Structural) bool {
+			changed := s.Nullable
+			s.Nullable = false
+			return changed
+		},
+	}
+	v.Visit(s)
+	return s
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/structural.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/structural.go
new file mode 100644
index 0000000000..5688c2ac40
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/structural.go
@@ -0,0 +1,211 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package schema
+
+import (
+	apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// +k8s:deepcopy-gen=true
+
+// Structural represents a structural schema.
+type Structural struct {
+	Items                *Structural
+	Properties           map[string]Structural
+	AdditionalProperties *StructuralOrBool
+
+	Generic
+	Extensions
+	ValidationExtensions
+
+	ValueValidation *ValueValidation
+}
+
+// +k8s:deepcopy-gen=true
+
+// StructuralOrBool is either a structural schema or a boolean.
+type StructuralOrBool struct {
+	Structural *Structural
+	Bool       bool
+}
+
+// +k8s:deepcopy-gen=true
+
+// Generic contains the generic schema fields not allowed in value validation.
+type Generic struct {
+	Description string
+	// type specifies the type of a value.
+	// It can be object, array, number, integer, boolean, string.
+	// It is optional only if x-kubernetes-preserve-unknown-fields
+	// or x-kubernetes-int-or-string is true.
+	Type     string
+	Title    string
+	Default  JSON
+	Nullable bool
+}
+
+// +k8s:deepcopy-gen=true
+
+// Extensions contains the Kubernetes OpenAPI v3 vendor extensions.
+type Extensions struct {
+	// x-kubernetes-preserve-unknown-fields stops the API server
+	// decoding step from pruning fields which are not specified
+	// in the validation schema. This affects fields recursively,
+	// but switches back to normal pruning behaviour if nested
+	// properties or additionalProperties are specified in the schema.
+	// False means that the pruning behaviour is inherited from the parent.
+	// False does not mean to activate pruning.
+	XPreserveUnknownFields bool
+
+	// x-kubernetes-embedded-resource defines that the value is an
+	// embedded Kubernetes runtime.Object, with TypeMeta and
+	// ObjectMeta. The type must be object. It is allowed to further
+	// restrict the embedded object. Both ObjectMeta and TypeMeta
+	// are validated automatically. x-kubernetes-preserve-unknown-fields
+	// must be true.
+	XEmbeddedResource bool
+
+	// x-kubernetes-int-or-string specifies that this value is
+	// either an integer or a string. If this is true, an empty
+	// type is allowed and type as child of anyOf is permitted
+	// if following one of the following patterns:
+	//
+	// 1) anyOf:
+	//    - type: integer
+	//    - type: string
+	// 2) allOf:
+	//    - anyOf:
+	//      - type: integer
+	//      - type: string
+	//    - ... zero or more
+	XIntOrString bool
+
+	// x-kubernetes-list-map-keys annotates lists with the x-kubernetes-list-type `map` by specifying the keys used
+	// as the index of the map.
+	//
+	// This tag MUST only be used on lists that have the "x-kubernetes-list-type"
+	// extension set to "map". Also, the values specified for this attribute must
+	// be a scalar typed field of the child structure (no nesting is supported).
+	XListMapKeys []string
+
+	// x-kubernetes-list-type annotates a list to further describe its topology.
+	// This extension must only be used on lists and may have 3 possible values:
+	//
+	// 1) `atomic`: the list is treated as a single entity, like a scalar.
+	//      Atomic lists will be entirely replaced when updated. This extension
+	//      may be used on any type of list (struct, scalar, ...).
+	// 2) `set`:
+	//      Sets are lists that must not have multiple items with the same value. Each
+	//      value must be a scalar (or another atomic type).
+	// 3) `map`:
+	//      These lists are like maps in that their elements have a non-index key
+	//      used to identify them. Order is preserved upon merge. The map tag
+	//      must only be used on a list with elements of type object.
+	XListType *string
+
+	// x-kubernetes-map-type annotates an object to further describe its topology.
+	// This extension must only be used when type is object and may have 2 possible values:
+	//
+	// 1) `granular`:
+	//      These maps are actual maps (key-value pairs) and each fields are independent
+	//      from each other (they can each be manipulated by separate actors). This is
+	//      the default behaviour for all maps.
+	// 2) `atomic`: the list is treated as a single entity, like a scalar.
+	//      Atomic maps will be entirely replaced when updated.
+	// +optional
+	XMapType *string
+}
+
+// +k8s:deepcopy-gen=true
+
+// ValidationExtensions contains the Kubernetes OpenAPI v3 extensions that are
+// used for validation rather than structure.
+type ValidationExtensions struct {
+	// x-kubernetes-validations describes a list of validation rules for expression validation.
+	// Use the v1 struct since this gets serialized as an extension.
+	XValidations apiextensionsv1.ValidationRules
+}
+
+// +k8s:deepcopy-gen=true
+
+// ValueValidation contains all schema fields not contributing to the structure of the schema.
+type ValueValidation struct {
+	Format           string
+	Maximum          *float64
+	ExclusiveMaximum bool
+	Minimum          *float64
+	ExclusiveMinimum bool
+	MaxLength        *int64
+	MinLength        *int64
+	Pattern          string
+	MaxItems         *int64
+	MinItems         *int64
+	UniqueItems      bool
+	MultipleOf       *float64
+	Enum             []JSON
+	MaxProperties    *int64
+	MinProperties    *int64
+	Required         []string
+	AllOf            []NestedValueValidation
+	OneOf            []NestedValueValidation
+	AnyOf            []NestedValueValidation
+	Not              *NestedValueValidation
+}
+
+// +k8s:deepcopy-gen=true
+
+// NestedValueValidation contains value validations, items and properties usable when nested
+// under a logical junctor, and catch all structs for generic and vendor extensions schema fields.
+type NestedValueValidation struct {
+	ValueValidation
+	ValidationExtensions
+
+	Items                *NestedValueValidation
+	Properties           map[string]NestedValueValidation
+	AdditionalProperties *NestedValueValidation
+
+	// Anything set in the following will make the scheme
+	// non-structural, with the exception of these two patterns if
+	// x-kubernetes-int-or-string is true:
+	//
+	// 1) anyOf:
+	//    - type: integer
+	//    - type: string
+	// 2) allOf:
+	//    - anyOf:
+	//      - type: integer
+	//      - type: string
+	//    - ... zero or more
+	ForbiddenGenerics   Generic
+	ForbiddenExtensions Extensions
+}
+
+// JSON wraps an arbitrary JSON value to be able to implement deepcopy.
+type JSON struct {
+	Object interface{}
+}
+
+// DeepCopy creates a deep copy of the wrapped JSON value.
+func (j JSON) DeepCopy() JSON {
+	return JSON{runtime.DeepCopyJSONValue(j.Object)}
+}
+
+// DeepCopyInto creates a deep copy of the wrapped JSON value and stores it in into.
+func (j JSON) DeepCopyInto(into *JSON) {
+	into.Object = runtime.DeepCopyJSONValue(j.Object)
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/unfold.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/unfold.go
new file mode 100644
index 0000000000..5c69a4f300
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/unfold.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package schema
+
+// Unfold expands vendor extensions of a structural schema.
+// It mutates the receiver.
+func (s *Structural) Unfold() *Structural {
+	if s == nil {
+		return nil
+	}
+
+	mapper := Visitor{
+		Structural: func(s *Structural) bool {
+			if !s.XIntOrString {
+				return false
+			}
+
+			skipAnyOf := isIntOrStringAnyOfPattern(s)
+			skipFirstAllOfAnyOf := isIntOrStringAllOfPattern(s)
+			if skipAnyOf || skipFirstAllOfAnyOf {
+				return false
+			}
+
+			if s.ValueValidation == nil {
+				s.ValueValidation = &ValueValidation{}
+			}
+			if s.ValueValidation.AnyOf == nil {
+				s.ValueValidation.AnyOf = []NestedValueValidation{
+					{ForbiddenGenerics: Generic{Type: "integer"}},
+					{ForbiddenGenerics: Generic{Type: "string"}},
+				}
+			} else {
+				s.ValueValidation.AllOf = append([]NestedValueValidation{
+					{
+						ValueValidation: ValueValidation{
+							AnyOf: []NestedValueValidation{
+								{ForbiddenGenerics: Generic{Type: "integer"}},
+								{ForbiddenGenerics: Generic{Type: "string"}},
+							},
+						},
+					},
+				}, s.ValueValidation.AllOf...)
+			}
+
+			return true
+		},
+		NestedValueValidation: nil, // x-kubernetes-int-or-string cannot be set in nested value validation
+	}
+	mapper.Visit(s)
+
+	return s
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go
new file mode 100644
index 0000000000..a9caddb38d
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/validation.go
@@ -0,0 +1,372 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package schema
+
+import (
+	"fmt"
+	"reflect"
+	"regexp"
+	"sort"
+
+	"k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+var intOrStringAnyOf = []NestedValueValidation{
+	{ForbiddenGenerics: Generic{
+		Type: "integer",
+	}},
+	{ForbiddenGenerics: Generic{
+		Type: "string",
+	}},
+}
+
+type level int
+
+const (
+	rootLevel level = iota
+	itemLevel
+	fieldLevel
+)
+
+type ValidationOptions struct {
+	// AllowNestedAdditionalProperties allows additionalProperties to be specified in
+	// nested contexts (allOf, anyOf, oneOf, not).
+	AllowNestedAdditionalProperties bool
+
+	// AllowNestedXValidations allows x-kubernetes-validations to be specified in
+	// nested contexts (allOf, anyOf, oneOf, not).
+	AllowNestedXValidations bool
+
+	// AllowValidationPropertiesWithAdditionalProperties allows
+	// value validations on specific properties that are structually
+	// defined by additionalProperties. i.e. additionalProperties in main structural
+	// schema, but properties within an allOf.
+	AllowValidationPropertiesWithAdditionalProperties bool
+}
+
+// ValidateStructural checks that s is a structural schema with the invariants:
+//
+// * structurality: both `ForbiddenGenerics` and `ForbiddenExtensions` only have zero values, with the two exceptions for IntOrString.
+// * RawExtension: for every schema with `x-kubernetes-embedded-resource: true`, `x-kubernetes-preserve-unknown-fields: true` and `type: object` are set
+// * IntOrString: for `x-kubernetes-int-or-string: true` either `type` is empty under `anyOf` and `allOf` or the schema structure is one of these:
+//
+//  1. anyOf:
+//     - type: integer
+//     - type: string
+//  2. allOf:
+//     - anyOf:
+//     - type: integer
+//     - type: string
+//     - ... zero or more
+//
+// * every specified field or array in s is also specified outside of value validation.
+// * metadata at the root can only restrict the name and generateName, and not be specified at all in nested contexts.
+// * additionalProperties at the root is not allowed.
+func ValidateStructural(fldPath *field.Path, s *Structural) field.ErrorList {
+	return ValidateStructuralWithOptions(fldPath, s, ValidationOptions{
+		// This would widen the schema for CRD if set to true, so first few releases will still
+		// not admit any. But it can still be used by libraries and
+		// declarative validation for native types
+		AllowNestedAdditionalProperties:                   false,
+		AllowNestedXValidations:                           false,
+		AllowValidationPropertiesWithAdditionalProperties: false,
+	})
+}
+
+func ValidateStructuralWithOptions(fldPath *field.Path, s *Structural, opts ValidationOptions) field.ErrorList {
+	allErrs := field.ErrorList{}
+
+	allErrs = append(allErrs, validateStructuralInvariants(s, rootLevel, fldPath, opts)...)
+	allErrs = append(allErrs, validateStructuralCompleteness(s, fldPath, opts)...)
+
+	// sort error messages. Otherwise, the errors slice will change every time due to
+	// maps in the types and randomized iteration.
+	sort.Slice(allErrs, func(i, j int) bool {
+		return allErrs[i].Error() < allErrs[j].Error()
+	})
+
+	return allErrs
+}
+
+// validateStructuralInvariants checks the invariants of a structural schema.
+func validateStructuralInvariants(s *Structural, lvl level, fldPath *field.Path, opts ValidationOptions) field.ErrorList {
+	if s == nil {
+		return nil
+	}
+
+	allErrs := field.ErrorList{}
+
+	if s.Type == "array" && s.Items == nil {
+		allErrs = append(allErrs, field.Required(fldPath.Child("items"), "must be specified"))
+	}
+	allErrs = append(allErrs, validateStructuralInvariants(s.Items, itemLevel, fldPath.Child("items"), opts)...)
+
+	for k, v := range s.Properties {
+		allErrs = append(allErrs, validateStructuralInvariants(&v, fieldLevel, fldPath.Child("properties").Key(k), opts)...)
+	}
+
+	if s.AdditionalProperties != nil {
+		if lvl == rootLevel {
+			allErrs = append(allErrs, field.Forbidden(fldPath.Child("additionalProperties"), "must not be used at the root"))
+		}
+		if s.AdditionalProperties.Structural != nil {
+			allErrs = append(allErrs, validateStructuralInvariants(s.AdditionalProperties.Structural, fieldLevel, fldPath.Child("additionalProperties"), opts)...)
+		}
+	}
+
+	allErrs = append(allErrs, validateGeneric(&s.Generic, lvl, fldPath)...)
+	allErrs = append(allErrs, validateExtensions(&s.Extensions, fldPath)...)
+
+	// detect the two IntOrString exceptions:
+	// 1) anyOf:
+	//    - type: integer
+	//    - type: string
+	// 2) allOf:
+	//    - anyOf:
+	//      - type: integer
+	//      - type: string
+	//    - ... zero or more
+	skipAnyOf := isIntOrStringAnyOfPattern(s)
+	skipFirstAllOfAnyOf := isIntOrStringAllOfPattern(s)
+
+	allErrs = append(allErrs, validateValueValidation(s.ValueValidation, skipAnyOf, skipFirstAllOfAnyOf, lvl, fldPath, opts)...)
+
+	checkMetadata := (lvl == rootLevel) || s.XEmbeddedResource
+
+	if s.XEmbeddedResource && s.Type != "object" {
+		if len(s.Type) == 0 {
+			allErrs = append(allErrs, field.Required(fldPath.Child("type"), "must be object if x-kubernetes-embedded-resource is true"))
+		} else {
+			allErrs = append(allErrs, field.Invalid(fldPath.Child("type"), s.Type, "must be object if x-kubernetes-embedded-resource is true"))
+		}
+	} else if len(s.Type) == 0 && !s.Extensions.XIntOrString && !s.Extensions.XPreserveUnknownFields {
+		switch lvl {
+		case rootLevel:
+			allErrs = append(allErrs, field.Required(fldPath.Child("type"), "must not be empty at the root"))
+		case itemLevel:
+			allErrs = append(allErrs, field.Required(fldPath.Child("type"), "must not be empty for specified array items"))
+		case fieldLevel:
+			allErrs = append(allErrs, field.Required(fldPath.Child("type"), "must not be empty for specified object fields"))
+		}
+	}
+	if s.XEmbeddedResource && s.AdditionalProperties != nil {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("additionalProperties"), "must not be used if x-kubernetes-embedded-resource is set"))
+	}
+
+	if lvl == rootLevel && len(s.Type) > 0 && s.Type != "object" {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("type"), s.Type, "must be object at the root"))
+	}
+
+	// restrict metadata schemas to name and generateName only
+	if kind, found := s.Properties["kind"]; found && checkMetadata {
+		if kind.Type != "string" {
+			allErrs = append(allErrs, field.Invalid(fldPath.Child("properties").Key("kind").Child("type"), kind.Type, "must be string"))
+		}
+	}
+	if apiVersion, found := s.Properties["apiVersion"]; found && checkMetadata {
+		if apiVersion.Type != "string" {
+			allErrs = append(allErrs, field.Invalid(fldPath.Child("properties").Key("apiVersion").Child("type"), apiVersion.Type, "must be string"))
+		}
+	}
+
+	if metadata, found := s.Properties["metadata"]; found {
+		allErrs = append(allErrs, validateStructuralMetadataInvariants(&metadata, checkMetadata, lvl, fldPath.Child("properties").Key("metadata"))...)
+	}
+
+	if s.XEmbeddedResource && !s.XPreserveUnknownFields && len(s.Properties) == 0 {
+		allErrs = append(allErrs, field.Required(fldPath.Child("properties"), "must not be empty if x-kubernetes-embedded-resource is true without x-kubernetes-preserve-unknown-fields"))
+	}
+
+	return allErrs
+}
+
+func validateStructuralMetadataInvariants(s *Structural, checkMetadata bool, lvl level, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+
+	if checkMetadata && s.Type != "object" {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("type"), s.Type, "must be object"))
+	}
+
+	if lvl == rootLevel {
+		// metadata is a shallow copy. We can mutate it.
+		_, foundName := s.Properties["name"]
+		_, foundGenerateName := s.Properties["generateName"]
+		if foundName && foundGenerateName && len(s.Properties) == 2 {
+			s.Properties = nil
+		} else if (foundName || foundGenerateName) && len(s.Properties) == 1 {
+			s.Properties = nil
+		}
+		s.Type = ""
+		s.Default.Object = nil // this is checked in API validation (and also tested)
+		if s.ValueValidation == nil {
+			s.ValueValidation = &ValueValidation{}
+		}
+		if !reflect.DeepEqual(*s, Structural{ValueValidation: &ValueValidation{}}) {
+			// TODO: this is actually a field.Invalid error, but we cannot do JSON serialization of metadata here to get a proper message
+			allErrs = append(allErrs, field.Forbidden(fldPath, "must not specify anything other than name and generateName, but metadata is implicitly specified"))
+		}
+	}
+
+	return allErrs
+}
+
+func isIntOrStringAnyOfPattern(s *Structural) bool {
+	if s == nil || s.ValueValidation == nil {
+		return false
+	}
+	return len(s.ValueValidation.AnyOf) == 2 && reflect.DeepEqual(s.ValueValidation.AnyOf, intOrStringAnyOf)
+}
+
+func isIntOrStringAllOfPattern(s *Structural) bool {
+	if s == nil || s.ValueValidation == nil {
+		return false
+	}
+	return len(s.ValueValidation.AllOf) >= 1 && len(s.ValueValidation.AllOf[0].AnyOf) == 2 && reflect.DeepEqual(s.ValueValidation.AllOf[0].AnyOf, intOrStringAnyOf)
+}
+
+// validateGeneric checks the generic fields of a structural schema.
+func validateGeneric(g *Generic, lvl level, fldPath *field.Path) field.ErrorList {
+	if g == nil {
+		return nil
+	}
+
+	return nil
+}
+
+// validateExtensions checks Kubernetes vendor extensions of a structural schema.
+func validateExtensions(x *Extensions, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+
+	if x.XIntOrString && x.XPreserveUnknownFields {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("x-kubernetes-preserve-unknown-fields"), x.XPreserveUnknownFields, "must be false if x-kubernetes-int-or-string is true"))
+	}
+	if x.XIntOrString && x.XEmbeddedResource {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("x-kubernetes-embedded-resource"), x.XEmbeddedResource, "must be false if x-kubernetes-int-or-string is true"))
+	}
+
+	return allErrs
+}
+
+// validateValueValidation checks the value validation in a structural schema.
+func validateValueValidation(v *ValueValidation, skipAnyOf, skipFirstAllOfAnyOf bool, lvl level, fldPath *field.Path, opts ValidationOptions) field.ErrorList {
+	if v == nil {
+		return nil
+	}
+
+	allErrs := field.ErrorList{}
+
+	// We still unconditionally forbid XValidations in quantifiers, the only
+	// quantifier that is allowed to have right now is AllOf. This is due to
+	// implementation constraints - the SchemaValidator would need to become
+	// aware of CEL to properly implement the other quantifiers.
+	optsWithCELDisabled := opts
+	optsWithCELDisabled.AllowNestedXValidations = false
+
+	if !skipAnyOf {
+		for i := range v.AnyOf {
+			allErrs = append(allErrs, validateNestedValueValidation(&v.AnyOf[i], false, false, lvl, fldPath.Child("anyOf").Index(i), optsWithCELDisabled)...)
+		}
+	}
+
+	for i := range v.AllOf {
+		skipAnyOf := false
+		if skipFirstAllOfAnyOf && i == 0 {
+			skipAnyOf = true
+		}
+		allErrs = append(allErrs, validateNestedValueValidation(&v.AllOf[i], skipAnyOf, false, lvl, fldPath.Child("allOf").Index(i), opts)...)
+	}
+
+	for i := range v.OneOf {
+		allErrs = append(allErrs, validateNestedValueValidation(&v.OneOf[i], false, false, lvl, fldPath.Child("oneOf").Index(i), optsWithCELDisabled)...)
+	}
+
+	allErrs = append(allErrs, validateNestedValueValidation(v.Not, false, false, lvl, fldPath.Child("not"), optsWithCELDisabled)...)
+
+	if len(v.Pattern) > 0 {
+		if _, err := regexp.Compile(v.Pattern); err != nil {
+			allErrs = append(allErrs, field.Invalid(fldPath.Child("pattern"), v.Pattern, fmt.Sprintf("must be a valid regular expression, but isn't: %v", err)))
+		}
+	}
+
+	return allErrs
+}
+
+// validateNestedValueValidation checks the nested value validation under a logic junctor in a structural schema.
+func validateNestedValueValidation(v *NestedValueValidation, skipAnyOf, skipAllOfAnyOf bool, lvl level, fldPath *field.Path, opts ValidationOptions) field.ErrorList {
+	if v == nil {
+		return nil
+	}
+
+	allErrs := field.ErrorList{}
+
+	allErrs = append(allErrs, validateValueValidation(&v.ValueValidation, skipAnyOf, skipAllOfAnyOf, lvl, fldPath, opts)...)
+	allErrs = append(allErrs, validateNestedValueValidation(v.Items, false, false, lvl, fldPath.Child("items"), opts)...)
+
+	for k, fld := range v.Properties {
+		allErrs = append(allErrs, validateNestedValueValidation(&fld, false, false, fieldLevel, fldPath.Child("properties").Key(k), opts)...)
+	}
+
+	if len(v.ForbiddenGenerics.Type) > 0 {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("type"), "must be empty to be structural"))
+	}
+	if v.AdditionalProperties != nil && !opts.AllowNestedAdditionalProperties {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("additionalProperties"), "must be undefined to be structural"))
+	} else {
+		allErrs = append(allErrs, validateNestedValueValidation(v.AdditionalProperties, false, false, lvl, fldPath.Child("additionalProperties"), opts)...)
+	}
+	if v.ForbiddenGenerics.Default.Object != nil {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("default"), "must be undefined to be structural"))
+	}
+	if len(v.ForbiddenGenerics.Title) > 0 {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("title"), "must be empty to be structural"))
+	}
+	if len(v.ForbiddenGenerics.Description) > 0 {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("description"), "must be empty to be structural"))
+	}
+	if v.ForbiddenGenerics.Nullable {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("nullable"), "must be false to be structural"))
+	}
+
+	if v.ForbiddenExtensions.XPreserveUnknownFields {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("x-kubernetes-preserve-unknown-fields"), "must be false to be structural"))
+	}
+	if v.ForbiddenExtensions.XEmbeddedResource {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("x-kubernetes-embedded-resource"), "must be false to be structural"))
+	}
+	if v.ForbiddenExtensions.XIntOrString {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("x-kubernetes-int-or-string"), "must be false to be structural"))
+	}
+	if len(v.ForbiddenExtensions.XListMapKeys) > 0 {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("x-kubernetes-list-map-keys"), "must be empty to be structural"))
+	}
+	if v.ForbiddenExtensions.XListType != nil {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("x-kubernetes-list-type"), "must be undefined to be structural"))
+	}
+	if v.ForbiddenExtensions.XMapType != nil {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("x-kubernetes-map-type"), "must be undefined to be structural"))
+	}
+	if len(v.ValidationExtensions.XValidations) > 0 && !opts.AllowNestedXValidations {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("x-kubernetes-validations"), "must be empty to be structural"))
+	}
+
+	// forbid reasoning about metadata because it can lead to metadata restriction we don't want
+	if _, found := v.Properties["metadata"]; found {
+		allErrs = append(allErrs, field.Forbidden(fldPath.Child("properties").Key("metadata"), "must not be specified in a nested context"))
+	}
+
+	return allErrs
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/visitor.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/visitor.go
new file mode 100644
index 0000000000..37eb72ed01
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/visitor.go
@@ -0,0 +1,106 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package schema
+
+// Visitor recursively walks through a structural schema.
+type Visitor struct {
+	// Structural is called on each Structural node in the schema, before recursing into
+	// the subtrees. It is allowed to mutate s. Return true if something has been changed.
+	// +optional
+	Structural func(s *Structural) bool
+	// NestedValueValidation is called on each NestedValueValidation node in the schema,
+	// before recursing into subtrees. It is allowed to mutate vv. Return true if something
+	// has been changed.
+	// +optional
+	NestedValueValidation func(vv *NestedValueValidation) bool
+}
+
+// Visit recursively walks through the structural schema and calls the given callbacks
+// at each node of those types.
+func (m *Visitor) Visit(s *Structural) {
+	m.visitStructural(s)
+}
+
+func (m *Visitor) visitStructural(s *Structural) bool {
+	ret := false
+	if m.Structural != nil {
+		ret = m.Structural(s)
+	}
+
+	if s.Items != nil {
+		m.visitStructural(s.Items)
+	}
+	for k, v := range s.Properties {
+		if changed := m.visitStructural(&v); changed {
+			ret = true
+			s.Properties[k] = v
+		}
+	}
+	if s.AdditionalProperties != nil && s.AdditionalProperties.Structural != nil {
+		m.visitStructural(s.AdditionalProperties.Structural)
+	}
+	if s.ValueValidation != nil {
+		for i := range s.ValueValidation.AllOf {
+			m.visitNestedValueValidation(&s.ValueValidation.AllOf[i])
+		}
+		for i := range s.ValueValidation.AnyOf {
+			m.visitNestedValueValidation(&s.ValueValidation.AnyOf[i])
+		}
+		for i := range s.ValueValidation.OneOf {
+			m.visitNestedValueValidation(&s.ValueValidation.OneOf[i])
+		}
+		if s.ValueValidation.Not != nil {
+			m.visitNestedValueValidation(s.ValueValidation.Not)
+		}
+	}
+
+	return ret
+}
+
+func (m *Visitor) visitNestedValueValidation(vv *NestedValueValidation) bool {
+	ret := false
+	if m.NestedValueValidation != nil {
+		ret = m.NestedValueValidation(vv)
+	}
+
+	if vv.Items != nil {
+		m.visitNestedValueValidation(vv.Items)
+	}
+	for k, v := range vv.Properties {
+		if changed := m.visitNestedValueValidation(&v); changed {
+			ret = true
+			vv.Properties[k] = v
+		}
+	}
+	if vv.AdditionalProperties != nil {
+		m.visitNestedValueValidation(vv.AdditionalProperties)
+	}
+	for i := range vv.ValueValidation.AllOf {
+		m.visitNestedValueValidation(&vv.ValueValidation.AllOf[i])
+	}
+	for i := range vv.ValueValidation.AnyOf {
+		m.visitNestedValueValidation(&vv.ValueValidation.AnyOf[i])
+	}
+	for i := range vv.ValueValidation.OneOf {
+		m.visitNestedValueValidation(&vv.ValueValidation.OneOf[i])
+	}
+	if vv.ValueValidation.Not != nil {
+		m.visitNestedValueValidation(vv.ValueValidation.Not)
+	}
+
+	return ret
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/zz_generated.deepcopy.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..9cec0262b5
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/zz_generated.deepcopy.go
@@ -0,0 +1,295 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package schema
+
+import (
+	v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Extensions) DeepCopyInto(out *Extensions) {
+	*out = *in
+	if in.XListMapKeys != nil {
+		in, out := &in.XListMapKeys, &out.XListMapKeys
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.XListType != nil {
+		in, out := &in.XListType, &out.XListType
+		*out = new(string)
+		**out = **in
+	}
+	if in.XMapType != nil {
+		in, out := &in.XMapType, &out.XMapType
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extensions.
+func (in *Extensions) DeepCopy() *Extensions {
+	if in == nil {
+		return nil
+	}
+	out := new(Extensions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Generic) DeepCopyInto(out *Generic) {
+	*out = *in
+	out.Default = in.Default.DeepCopy()
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Generic.
+func (in *Generic) DeepCopy() *Generic {
+	if in == nil {
+		return nil
+	}
+	out := new(Generic)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NestedValueValidation) DeepCopyInto(out *NestedValueValidation) {
+	*out = *in
+	in.ValueValidation.DeepCopyInto(&out.ValueValidation)
+	in.ValidationExtensions.DeepCopyInto(&out.ValidationExtensions)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = new(NestedValueValidation)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Properties != nil {
+		in, out := &in.Properties, &out.Properties
+		*out = make(map[string]NestedValueValidation, len(*in))
+		for key, val := range *in {
+			(*out)[key] = *val.DeepCopy()
+		}
+	}
+	if in.AdditionalProperties != nil {
+		in, out := &in.AdditionalProperties, &out.AdditionalProperties
+		*out = new(NestedValueValidation)
+		(*in).DeepCopyInto(*out)
+	}
+	in.ForbiddenGenerics.DeepCopyInto(&out.ForbiddenGenerics)
+	in.ForbiddenExtensions.DeepCopyInto(&out.ForbiddenExtensions)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NestedValueValidation.
+func (in *NestedValueValidation) DeepCopy() *NestedValueValidation {
+	if in == nil {
+		return nil
+	}
+	out := new(NestedValueValidation)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Structural) DeepCopyInto(out *Structural) {
+	*out = *in
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = new(Structural)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Properties != nil {
+		in, out := &in.Properties, &out.Properties
+		*out = make(map[string]Structural, len(*in))
+		for key, val := range *in {
+			(*out)[key] = *val.DeepCopy()
+		}
+	}
+	if in.AdditionalProperties != nil {
+		in, out := &in.AdditionalProperties, &out.AdditionalProperties
+		*out = new(StructuralOrBool)
+		(*in).DeepCopyInto(*out)
+	}
+	in.Generic.DeepCopyInto(&out.Generic)
+	in.Extensions.DeepCopyInto(&out.Extensions)
+	in.ValidationExtensions.DeepCopyInto(&out.ValidationExtensions)
+	if in.ValueValidation != nil {
+		in, out := &in.ValueValidation, &out.ValueValidation
+		*out = new(ValueValidation)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Structural.
+func (in *Structural) DeepCopy() *Structural {
+	if in == nil {
+		return nil
+	}
+	out := new(Structural)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *StructuralOrBool) DeepCopyInto(out *StructuralOrBool) {
+	*out = *in
+	if in.Structural != nil {
+		in, out := &in.Structural, &out.Structural
+		*out = new(Structural)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StructuralOrBool.
+func (in *StructuralOrBool) DeepCopy() *StructuralOrBool {
+	if in == nil {
+		return nil
+	}
+	out := new(StructuralOrBool)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ValidationExtensions) DeepCopyInto(out *ValidationExtensions) {
+	*out = *in
+	if in.XValidations != nil {
+		in, out := &in.XValidations, &out.XValidations
+		*out = make(v1.ValidationRules, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidationExtensions.
+func (in *ValidationExtensions) DeepCopy() *ValidationExtensions {
+	if in == nil {
+		return nil
+	}
+	out := new(ValidationExtensions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ValueValidation) DeepCopyInto(out *ValueValidation) {
+	*out = *in
+	if in.Maximum != nil {
+		in, out := &in.Maximum, &out.Maximum
+		*out = new(float64)
+		**out = **in
+	}
+	if in.Minimum != nil {
+		in, out := &in.Minimum, &out.Minimum
+		*out = new(float64)
+		**out = **in
+	}
+	if in.MaxLength != nil {
+		in, out := &in.MaxLength, &out.MaxLength
+		*out = new(int64)
+		**out = **in
+	}
+	if in.MinLength != nil {
+		in, out := &in.MinLength, &out.MinLength
+		*out = new(int64)
+		**out = **in
+	}
+	if in.MaxItems != nil {
+		in, out := &in.MaxItems, &out.MaxItems
+		*out = new(int64)
+		**out = **in
+	}
+	if in.MinItems != nil {
+		in, out := &in.MinItems, &out.MinItems
+		*out = new(int64)
+		**out = **in
+	}
+	if in.MultipleOf != nil {
+		in, out := &in.MultipleOf, &out.MultipleOf
+		*out = new(float64)
+		**out = **in
+	}
+	if in.Enum != nil {
+		in, out := &in.Enum, &out.Enum
+		*out = make([]JSON, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.MaxProperties != nil {
+		in, out := &in.MaxProperties, &out.MaxProperties
+		*out = new(int64)
+		**out = **in
+	}
+	if in.MinProperties != nil {
+		in, out := &in.MinProperties, &out.MinProperties
+		*out = new(int64)
+		**out = **in
+	}
+	if in.Required != nil {
+		in, out := &in.Required, &out.Required
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.AllOf != nil {
+		in, out := &in.AllOf, &out.AllOf
+		*out = make([]NestedValueValidation, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.OneOf != nil {
+		in, out := &in.OneOf, &out.OneOf
+		*out = make([]NestedValueValidation, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.AnyOf != nil {
+		in, out := &in.AnyOf, &out.AnyOf
+		*out = make([]NestedValueValidation, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Not != nil {
+		in, out := &in.Not, &out.Not
+		*out = new(NestedValueValidation)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueValidation.
+func (in *ValueValidation) DeepCopy() *ValueValidation {
+	if in == nil {
+		return nil
+	}
+	out := new(ValueValidation)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/formats.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/formats.go
new file mode 100644
index 0000000000..f67c1c58e3
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/formats.go
@@ -0,0 +1,65 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+	"strings"
+
+	"k8s.io/apimachinery/pkg/util/sets"
+	"k8s.io/kube-openapi/pkg/validation/spec"
+)
+
+var supportedFormats = sets.NewString(
+	"bsonobjectid", // bson object ID
+	"uri",          // an URI as parsed by Golang net/url.ParseRequestURI
+	"email",        // an email address as parsed by Golang net/mail.ParseAddress
+	"hostname",     // a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034].
+	"ipv4",         // an IPv4 IP as parsed by Golang net.ParseIP
+	"ipv6",         // an IPv6 IP as parsed by Golang net.ParseIP
+	"cidr",         // a CIDR as parsed by Golang net.ParseCIDR
+	"mac",          // a MAC address as parsed by Golang net.ParseMAC
+	"uuid",         // an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$
+	"uuid3",        // an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$
+	"uuid4",        // an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$
+	"uuid5",        // an UUID6 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$
+	"isbn",         // an ISBN10 or ISBN13 number string like "0321751043" or "978-0321751041"
+	"isbn10",       // an ISBN10 number string like "0321751043"
+	"isbn13",       // an ISBN13 number string like "978-0321751041"
+	"creditcard",   // a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$ with any non digit characters mixed in
+	"ssn",          // a U.S. social security number following the regex ^\\d{3}[- ]?\\d{2}[- ]?\\d{4}$
+	"hexcolor",     // an hexadecimal color code like "#FFFFFF", following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$
+	"rgbcolor",     // an RGB color code like rgb like "rgb(255,255,2559"
+	"byte",         // base64 encoded binary data
+	"password",     // any kind of string
+	"date",         // a date string like "2006-01-02" as defined by full-date in RFC3339
+	"duration",     // a duration string like "22 ns" as parsed by Golang time.ParseDuration or compatible with Scala duration format
+	"datetime",     // a date time string like "2014-12-15T19:30:20.000Z" as defined by date-time in RFC3339
+)
+
+// StripUnsupportedFormatsPostProcess sets unsupported formats to empty string.
+func StripUnsupportedFormatsPostProcess(s *spec.Schema) error {
+	if len(s.Format) == 0 {
+		return nil
+	}
+
+	normalized := strings.Replace(s.Format, "-", "", -1) // go-openapi default format name normalization
+	if !supportedFormats.Has(normalized) {
+		s.Format = ""
+	}
+
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/metrics.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/metrics.go
new file mode 100644
index 0000000000..7dcc7938e0
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/metrics.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+	"time"
+
+	"k8s.io/component-base/metrics"
+	"k8s.io/component-base/metrics/legacyregistry"
+)
+
+const (
+	namespace = "apiextensions_apiserver"
+	subsystem = "validation"
+)
+
+// Interface to stub for tests
+type ValidationMetrics interface {
+	ObserveRatchetingTime(d time.Duration)
+}
+
+var Metrics ValidationMetrics = &validationMetrics{
+	RatchetingTime: metrics.NewHistogram(&metrics.HistogramOpts{
+		Namespace:      namespace,
+		Subsystem:      subsystem,
+		Name:           "ratcheting_seconds",
+		Help:           "Time for comparison of old to new for the purposes of CRDValidationRatcheting during an UPDATE in seconds.",
+		StabilityLevel: metrics.ALPHA,
+		// Start 0.01ms with the last bucket being [~2.5s, +Inf)
+		Buckets: metrics.ExponentialBuckets(0.00001, 4, 10),
+	}),
+}
+
+func init() {
+	legacyregistry.MustRegister(Metrics.(*validationMetrics).RatchetingTime)
+}
+
+type validationMetrics struct {
+	RatchetingTime *metrics.Histogram
+}
+
+// ObserveRatchetingTime records the time spent on ratcheting
+func (m *validationMetrics) ObserveRatchetingTime(d time.Duration) {
+	m.RatchetingTime.Observe(d.Seconds())
+}
+
+// Reset resets the metrics. This is meant to be used for testing. Panics
+// if the metrics cannot be re-registered. Returns all the reset metrics
+func (m *validationMetrics) Reset() []metrics.Registerable {
+	m.RatchetingTime = metrics.NewHistogram(m.RatchetingTime.HistogramOpts)
+	return []metrics.Registerable{m.RatchetingTime}
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/ratcheting.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/ratcheting.go
new file mode 100644
index 0000000000..9cd0168686
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/ratcheting.go
@@ -0,0 +1,222 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+	"reflect"
+
+	"k8s.io/apiserver/pkg/cel/common"
+	celopenapi "k8s.io/apiserver/pkg/cel/openapi"
+	"k8s.io/kube-openapi/pkg/validation/spec"
+	"k8s.io/kube-openapi/pkg/validation/strfmt"
+	"k8s.io/kube-openapi/pkg/validation/validate"
+)
+
+// schemaArgs are the arguments to constructor for OpenAPI schema validator,
+// NewSchemaValidator
+type schemaArgs struct {
+	schema       *spec.Schema
+	root         interface{}
+	path         string
+	knownFormats strfmt.Registry
+	options      []validate.Option
+}
+
+// RatchetingSchemaValidator wraps kube-openapis SchemaValidator to provide a
+// ValidateUpdate function which allows ratcheting
+type RatchetingSchemaValidator struct {
+	schemaArgs
+}
+
+func NewRatchetingSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, options ...validate.Option) *RatchetingSchemaValidator {
+	return &RatchetingSchemaValidator{
+		schemaArgs: schemaArgs{
+			schema:       schema,
+			root:         rootSchema,
+			path:         root,
+			knownFormats: formats,
+			options:      options,
+		},
+	}
+}
+
+func (r *RatchetingSchemaValidator) Validate(new interface{}, options ...ValidationOption) *validate.Result {
+	sv := validate.NewSchemaValidator(r.schema, r.root, r.path, r.knownFormats, r.options...)
+	return sv.Validate(new)
+}
+
+func (r *RatchetingSchemaValidator) ValidateUpdate(new, old interface{}, options ...ValidationOption) *validate.Result {
+	opts := NewValidationOptions(options...)
+
+	if !opts.Ratcheting {
+		sv := validate.NewSchemaValidator(r.schema, r.root, r.path, r.knownFormats, r.options...)
+		return sv.Validate(new)
+	}
+
+	correlation := opts.CorrelatedObject
+	if correlation == nil {
+		correlation = common.NewCorrelatedObject(new, old, &celopenapi.Schema{Schema: r.schema})
+	}
+
+	return newRatchetingValueValidator(
+		correlation,
+		r.schemaArgs,
+	).Validate(new)
+}
+
+// ratchetingValueValidator represents an invocation of SchemaValidator.ValidateUpdate
+// for specific arguments for `old` and `new`
+//
+// It follows the openapi SchemaValidator down its traversal of the new value
+// by injecting validate.Option into each recursive invocation.
+//
+// A ratchetingValueValidator will be constructed and added to the tree for
+// each explored sub-index and sub-property during validation.
+//
+// It's main job is to keep the old/new values correlated as the traversal
+// continues, and postprocess errors according to our ratcheting policy.
+//
+// ratchetingValueValidator is not thread safe.
+type ratchetingValueValidator struct {
+	// schemaArgs provides the arguments to use in the temporary SchemaValidator
+	// that is created during a call to Validate.
+	schemaArgs
+	correlation *common.CorrelatedObject
+}
+
+func newRatchetingValueValidator(correlation *common.CorrelatedObject, args schemaArgs) *ratchetingValueValidator {
+	return &ratchetingValueValidator{
+		schemaArgs:  args,
+		correlation: correlation,
+	}
+}
+
+// getValidateOption provides a kube-openapi validate.Option for SchemaValidator
+// that injects a ratchetingValueValidator to be used for all subkeys and subindices
+func (r *ratchetingValueValidator) getValidateOption() validate.Option {
+	return func(svo *validate.SchemaValidatorOptions) {
+		svo.NewValidatorForField = r.SubPropertyValidator
+		svo.NewValidatorForIndex = r.SubIndexValidator
+	}
+}
+
+// Validate validates the update from r.oldValue to r.value
+//
+// During evaluation, a temporary tree of ratchetingValueValidator is built for all
+// traversed field paths. It is necessary to build the tree to take advantage of
+// DeepEqual checks performed by lower levels of the object during validation without
+// greatly modifying `kube-openapi`'s implementation.
+//
+// The tree, and all cache storage/scratch space for the validation of a single
+// call to `Validate` is thrown away at the end of the top-level call
+// to `Validate`.
+//
+// `Validate` will create a node in the tree to for each of the explored children.
+// The node's main purpose is to store a lazily computed DeepEqual check between
+// the oldValue and the currently passed value. If the check is performed, it
+// will be stored in the node to be re-used by a parent node during a DeepEqual
+// comparison, if necessary.
+//
+// This call has a side-effect of populating it's `children` variable with
+// the explored nodes of the object tree.
+func (r *ratchetingValueValidator) Validate(new interface{}) *validate.Result {
+	opts := append([]validate.Option{
+		r.getValidateOption(),
+	}, r.options...)
+
+	s := validate.NewSchemaValidator(r.schema, r.root, r.path, r.knownFormats, opts...)
+
+	res := s.Validate(r.correlation.Value)
+
+	if res.IsValid() {
+		return res
+	}
+
+	// Current ratcheting rule is to ratchet errors if DeepEqual(old, new) is true.
+	if r.correlation.CachedDeepEqual() {
+		newRes := &validate.Result{}
+		newRes.MergeAsWarnings(res)
+		return newRes
+	}
+
+	return res
+}
+
+// SubPropertyValidator overrides the standard validator constructor for sub-properties by
+// returning our special ratcheting variant.
+//
+// If we can correlate an old value, we return a ratcheting validator to
+// use for the child.
+//
+// If the old value cannot be correlated, then default validation is used.
+func (r *ratchetingValueValidator) SubPropertyValidator(field string, schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, options ...validate.Option) validate.ValueValidator {
+	childNode := r.correlation.Key(field)
+	if childNode == nil || (r.path == "" && isTypeMetaField(field)) {
+		// Defer to default validation if we cannot correlate the old value
+		// or if we are validating the root object and the field is a metadata
+		// field.
+		//
+		// We cannot ratchet changes to the APIVersion field since they aren't visible.
+		// (both old and new are converted to the same type)
+		return validate.NewSchemaValidator(schema, rootSchema, root, formats, options...)
+	}
+
+	return newRatchetingValueValidator(childNode, schemaArgs{
+		schema:       schema,
+		root:         rootSchema,
+		path:         root,
+		knownFormats: formats,
+		options:      options,
+	})
+}
+
+// SubIndexValidator overrides the standard validator constructor for sub-indicies by
+// returning our special ratcheting variant.
+//
+// If we can correlate an old value, we return a ratcheting validator to
+// use for the child.
+//
+// If the old value cannot be correlated, then default validation is used.
+func (r *ratchetingValueValidator) SubIndexValidator(index int, schema *spec.Schema, rootSchema interface{}, root string, formats strfmt.Registry, options ...validate.Option) validate.ValueValidator {
+	childNode := r.correlation.Index(index)
+	if childNode == nil {
+		return validate.NewSchemaValidator(schema, rootSchema, root, formats, options...)
+	}
+
+	return newRatchetingValueValidator(childNode, schemaArgs{
+		schema:       schema,
+		root:         rootSchema,
+		path:         root,
+		knownFormats: formats,
+		options:      options,
+	})
+}
+
+var _ validate.ValueValidator = (&ratchetingValueValidator{})
+
+func (r ratchetingValueValidator) SetPath(path string) {
+	// Do nothing
+	// Unused by kube-openapi
+}
+
+func (r ratchetingValueValidator) Applies(source interface{}, valueKind reflect.Kind) bool {
+	return true
+}
+
+func isTypeMetaField(path string) bool {
+	return path == "kind" || path == "apiVersion"
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go
new file mode 100644
index 0000000000..85f1ffca7f
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/validation/validation.go
@@ -0,0 +1,485 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+	"encoding/json"
+	"strings"
+
+	"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
+	apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+	"k8s.io/apiextensions-apiserver/pkg/features"
+	"k8s.io/apimachinery/pkg/util/validation/field"
+	"k8s.io/apiserver/pkg/cel/common"
+	utilfeature "k8s.io/apiserver/pkg/util/feature"
+	openapierrors "k8s.io/kube-openapi/pkg/validation/errors"
+	"k8s.io/kube-openapi/pkg/validation/spec"
+	"k8s.io/kube-openapi/pkg/validation/strfmt"
+	"k8s.io/kube-openapi/pkg/validation/validate"
+)
+
+type SchemaValidator interface {
+	SchemaCreateValidator
+	ValidateUpdate(new, old interface{}, options ...ValidationOption) *validate.Result
+}
+
+type SchemaCreateValidator interface {
+	Validate(value interface{}, options ...ValidationOption) *validate.Result
+}
+
+type ValidationOptions struct {
+	// Whether errors from unchanged portions of the schema should be ratcheted
+	// This field is ignored for Validate
+	Ratcheting bool
+
+	// Correlation between old and new arguments.
+	// If set, this is expected to be the correlation between the `new` and
+	// `old` arguments to ValidateUpdate, and values for `new` and `old` will
+	// be taken from the correlation.
+	//
+	// This field is ignored for Validate
+	//
+	// Used for ratcheting, but left as a separate field since it may be used
+	// for other purposes in the future.
+	CorrelatedObject *common.CorrelatedObject
+}
+
+type ValidationOption func(*ValidationOptions)
+
+func NewValidationOptions(opts ...ValidationOption) ValidationOptions {
+	options := ValidationOptions{}
+	for _, opt := range opts {
+		opt(&options)
+	}
+	return options
+}
+
+func WithRatcheting(correlation *common.CorrelatedObject) ValidationOption {
+	return func(options *ValidationOptions) {
+		options.Ratcheting = true
+		options.CorrelatedObject = correlation
+	}
+}
+
+// basicSchemaValidator wraps a kube-openapi SchemaCreateValidator to
+// support ValidateUpdate. It implements ValidateUpdate by simply validating
+// the new value via kube-openapi, ignoring the old value
+type basicSchemaValidator struct {
+	*validate.SchemaValidator
+}
+
+func (s basicSchemaValidator) Validate(new interface{}, options ...ValidationOption) *validate.Result {
+	return s.SchemaValidator.Validate(new)
+}
+
+func (s basicSchemaValidator) ValidateUpdate(new, old interface{}, options ...ValidationOption) *validate.Result {
+	return s.Validate(new, options...)
+}
+
+// NewSchemaValidator creates an openapi schema validator for the given CRD validation.
+//
+// If feature `CRDValidationRatcheting` is disabled, this returns validator which
+// validates all `Update`s and `Create`s as a `Create` - without considering old value.
+//
+// If feature `CRDValidationRatcheting` is enabled - the validator returned
+// will support ratcheting unchanged correlatable fields across an update.
+func NewSchemaValidator(customResourceValidation *apiextensions.JSONSchemaProps) (SchemaValidator, *spec.Schema, error) {
+	// Convert CRD schema to openapi schema
+	openapiSchema := &spec.Schema{}
+	if customResourceValidation != nil {
+		// TODO: replace with NewStructural(...).ToGoOpenAPI
+		if err := ConvertJSONSchemaPropsWithPostProcess(customResourceValidation, openapiSchema, StripUnsupportedFormatsPostProcess); err != nil {
+			return nil, nil, err
+		}
+	}
+	return NewSchemaValidatorFromOpenAPI(openapiSchema), openapiSchema, nil
+}
+
+func NewSchemaValidatorFromOpenAPI(openapiSchema *spec.Schema) SchemaValidator {
+	if utilfeature.DefaultFeatureGate.Enabled(features.CRDValidationRatcheting) {
+		return NewRatchetingSchemaValidator(openapiSchema, nil, "", strfmt.Default)
+	}
+	return basicSchemaValidator{validate.NewSchemaValidator(openapiSchema, nil, "", strfmt.Default)}
+
+}
+
+// ValidateCustomResourceUpdate validates the transition of Custom Resource from
+// `old` to `new` against the schema in the CustomResourceDefinition.
+// Both customResource and old represent a JSON data structures.
+//
+// If feature `CRDValidationRatcheting` is disabled, this behaves identically to
+// ValidateCustomResource(customResource).
+func ValidateCustomResourceUpdate(fldPath *field.Path, customResource, old interface{}, validator SchemaValidator, options ...ValidationOption) field.ErrorList {
+	// Additional feature gate check for sanity
+	if !utilfeature.DefaultFeatureGate.Enabled(features.CRDValidationRatcheting) {
+		return ValidateCustomResource(nil, customResource, validator)
+	} else if validator == nil {
+		return nil
+	}
+
+	result := validator.ValidateUpdate(customResource, old, options...)
+	if result.IsValid() {
+		return nil
+	}
+
+	return kubeOpenAPIResultToFieldErrors(fldPath, result)
+}
+
+// ValidateCustomResource validates the Custom Resource against the schema in the CustomResourceDefinition.
+// CustomResource is a JSON data structure.
+func ValidateCustomResource(fldPath *field.Path, customResource interface{}, validator SchemaCreateValidator, options ...ValidationOption) field.ErrorList {
+	if validator == nil {
+		return nil
+	}
+
+	result := validator.Validate(customResource, options...)
+	if result.IsValid() {
+		return nil
+	}
+
+	return kubeOpenAPIResultToFieldErrors(fldPath, result)
+}
+
+func kubeOpenAPIResultToFieldErrors(fldPath *field.Path, result *validate.Result) field.ErrorList {
+	var allErrs field.ErrorList
+	for _, err := range result.Errors {
+		switch err := err.(type) {
+
+		case *openapierrors.Validation:
+			errPath := fldPath
+			if len(err.Name) > 0 && err.Name != "." {
+				errPath = errPath.Child(strings.TrimPrefix(err.Name, "."))
+			}
+
+			switch err.Code() {
+			case openapierrors.RequiredFailCode:
+				allErrs = append(allErrs, field.Required(errPath, ""))
+
+			case openapierrors.EnumFailCode:
+				values := []string{}
+				for _, allowedValue := range err.Values {
+					if s, ok := allowedValue.(string); ok {
+						values = append(values, s)
+					} else {
+						allowedJSON, _ := json.Marshal(allowedValue)
+						values = append(values, string(allowedJSON))
+					}
+				}
+				allErrs = append(allErrs, field.NotSupported(errPath, err.Value, values))
+
+			case openapierrors.TooLongFailCode:
+				max := int64(-1)
+				if i, ok := err.Valid.(int64); ok {
+					max = i
+				}
+				allErrs = append(allErrs, field.TooLong(errPath, "" /*unused*/, int(max)))
+
+			case openapierrors.MaxItemsFailCode:
+				actual := int64(-1)
+				if i, ok := err.Value.(int64); ok {
+					actual = i
+				}
+				max := int64(-1)
+				if i, ok := err.Valid.(int64); ok {
+					max = i
+				}
+				allErrs = append(allErrs, field.TooMany(errPath, int(actual), int(max)))
+
+			case openapierrors.TooManyPropertiesCode:
+				actual := int64(-1)
+				if i, ok := err.Value.(int64); ok {
+					actual = i
+				}
+				max := int64(-1)
+				if i, ok := err.Valid.(int64); ok {
+					max = i
+				}
+				allErrs = append(allErrs, field.TooMany(errPath, int(actual), int(max)))
+
+			case openapierrors.InvalidTypeCode:
+				value := interface{}("")
+				if err.Value != nil {
+					value = err.Value
+				}
+				allErrs = append(allErrs, field.TypeInvalid(errPath, value, err.Error()))
+
+			default:
+				value := interface{}("")
+				if err.Value != nil {
+					value = err.Value
+				}
+				allErrs = append(allErrs, field.Invalid(errPath, value, err.Error()))
+			}
+
+		default:
+			allErrs = append(allErrs, field.Invalid(fldPath, "", err.Error()))
+		}
+	}
+	return allErrs
+}
+
+// ConvertJSONSchemaProps converts the schema from apiextensions.JSONSchemaPropos to go-openapi/spec.Schema.
+func ConvertJSONSchemaProps(in *apiextensions.JSONSchemaProps, out *spec.Schema) error {
+	return ConvertJSONSchemaPropsWithPostProcess(in, out, nil)
+}
+
+// PostProcessFunc post-processes one node of a spec.Schema.
+type PostProcessFunc func(*spec.Schema) error
+
+// ConvertJSONSchemaPropsWithPostProcess converts the schema from apiextensions.JSONSchemaPropos to go-openapi/spec.Schema
+// and run a post process step on each JSONSchemaProps node. postProcess is never called for nil schemas.
+func ConvertJSONSchemaPropsWithPostProcess(in *apiextensions.JSONSchemaProps, out *spec.Schema, postProcess PostProcessFunc) error {
+	if in == nil {
+		return nil
+	}
+
+	out.ID = in.ID
+	out.Schema = spec.SchemaURL(in.Schema)
+	out.Description = in.Description
+	if in.Type != "" {
+		out.Type = spec.StringOrArray([]string{in.Type})
+	}
+	if in.XIntOrString {
+		out.VendorExtensible.AddExtension("x-kubernetes-int-or-string", true)
+		out.Type = spec.StringOrArray{"integer", "string"}
+	}
+	out.Nullable = in.Nullable
+	out.Format = in.Format
+	out.Title = in.Title
+	out.Maximum = in.Maximum
+	out.ExclusiveMaximum = in.ExclusiveMaximum
+	out.Minimum = in.Minimum
+	out.ExclusiveMinimum = in.ExclusiveMinimum
+	out.MaxLength = in.MaxLength
+	out.MinLength = in.MinLength
+	out.Pattern = in.Pattern
+	out.MaxItems = in.MaxItems
+	out.MinItems = in.MinItems
+	out.UniqueItems = in.UniqueItems
+	out.MultipleOf = in.MultipleOf
+	out.MaxProperties = in.MaxProperties
+	out.MinProperties = in.MinProperties
+	out.Required = in.Required
+
+	if in.Default != nil {
+		out.Default = *(in.Default)
+	}
+	if in.Example != nil {
+		out.Example = *(in.Example)
+	}
+
+	if in.Enum != nil {
+		out.Enum = make([]interface{}, len(in.Enum))
+		for k, v := range in.Enum {
+			out.Enum[k] = v
+		}
+	}
+
+	if err := convertSliceOfJSONSchemaProps(&in.AllOf, &out.AllOf, postProcess); err != nil {
+		return err
+	}
+	if err := convertSliceOfJSONSchemaProps(&in.OneOf, &out.OneOf, postProcess); err != nil {
+		return err
+	}
+	if err := convertSliceOfJSONSchemaProps(&in.AnyOf, &out.AnyOf, postProcess); err != nil {
+		return err
+	}
+
+	if in.Not != nil {
+		in, out := &in.Not, &out.Not
+		*out = new(spec.Schema)
+		if err := ConvertJSONSchemaPropsWithPostProcess(*in, *out, postProcess); err != nil {
+			return err
+		}
+	}
+
+	var err error
+	out.Properties, err = convertMapOfJSONSchemaProps(in.Properties, postProcess)
+	if err != nil {
+		return err
+	}
+
+	out.PatternProperties, err = convertMapOfJSONSchemaProps(in.PatternProperties, postProcess)
+	if err != nil {
+		return err
+	}
+
+	out.Definitions, err = convertMapOfJSONSchemaProps(in.Definitions, postProcess)
+	if err != nil {
+		return err
+	}
+
+	if in.Ref != nil {
+		out.Ref, err = spec.NewRef(*in.Ref)
+		if err != nil {
+			return err
+		}
+	}
+
+	if in.AdditionalProperties != nil {
+		in, out := &in.AdditionalProperties, &out.AdditionalProperties
+		*out = new(spec.SchemaOrBool)
+		if err := convertJSONSchemaPropsorBool(*in, *out, postProcess); err != nil {
+			return err
+		}
+	}
+
+	if in.AdditionalItems != nil {
+		in, out := &in.AdditionalItems, &out.AdditionalItems
+		*out = new(spec.SchemaOrBool)
+		if err := convertJSONSchemaPropsorBool(*in, *out, postProcess); err != nil {
+			return err
+		}
+	}
+
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = new(spec.SchemaOrArray)
+		if err := convertJSONSchemaPropsOrArray(*in, *out, postProcess); err != nil {
+			return err
+		}
+	}
+
+	if in.Dependencies != nil {
+		in, out := &in.Dependencies, &out.Dependencies
+		*out = make(spec.Dependencies, len(*in))
+		for key, val := range *in {
+			newVal := new(spec.SchemaOrStringArray)
+			if err := convertJSONSchemaPropsOrStringArray(&val, newVal, postProcess); err != nil {
+				return err
+			}
+			(*out)[key] = *newVal
+		}
+	}
+
+	if in.ExternalDocs != nil {
+		out.ExternalDocs = &spec.ExternalDocumentation{}
+		out.ExternalDocs.Description = in.ExternalDocs.Description
+		out.ExternalDocs.URL = in.ExternalDocs.URL
+	}
+
+	if postProcess != nil {
+		if err := postProcess(out); err != nil {
+			return err
+		}
+	}
+
+	if in.XPreserveUnknownFields != nil {
+		out.VendorExtensible.AddExtension("x-kubernetes-preserve-unknown-fields", *in.XPreserveUnknownFields)
+	}
+	if in.XEmbeddedResource {
+		out.VendorExtensible.AddExtension("x-kubernetes-embedded-resource", true)
+	}
+	if len(in.XListMapKeys) != 0 {
+		out.VendorExtensible.AddExtension("x-kubernetes-list-map-keys", convertSliceToInterfaceSlice(in.XListMapKeys))
+	}
+	if in.XListType != nil {
+		out.VendorExtensible.AddExtension("x-kubernetes-list-type", *in.XListType)
+	}
+	if in.XMapType != nil {
+		out.VendorExtensible.AddExtension("x-kubernetes-map-type", *in.XMapType)
+	}
+	if len(in.XValidations) != 0 {
+		var serializationValidationRules apiextensionsv1.ValidationRules
+		if err := apiextensionsv1.Convert_apiextensions_ValidationRules_To_v1_ValidationRules(&in.XValidations, &serializationValidationRules, nil); err != nil {
+			return err
+		}
+		out.VendorExtensible.AddExtension("x-kubernetes-validations", convertSliceToInterfaceSlice(serializationValidationRules))
+	}
+	return nil
+}
+
+func convertSliceToInterfaceSlice[T any](in []T) []interface{} {
+	var res []interface{}
+	for _, v := range in {
+		res = append(res, v)
+	}
+	return res
+}
+
+func convertSliceOfJSONSchemaProps(in *[]apiextensions.JSONSchemaProps, out *[]spec.Schema, postProcess PostProcessFunc) error {
+	if in != nil {
+		for _, jsonSchemaProps := range *in {
+			schema := spec.Schema{}
+			if err := ConvertJSONSchemaPropsWithPostProcess(&jsonSchemaProps, &schema, postProcess); err != nil {
+				return err
+			}
+			*out = append(*out, schema)
+		}
+	}
+	return nil
+}
+
+func convertMapOfJSONSchemaProps(in map[string]apiextensions.JSONSchemaProps, postProcess PostProcessFunc) (map[string]spec.Schema, error) {
+	if in == nil {
+		return nil, nil
+	}
+
+	out := make(map[string]spec.Schema)
+	for k, jsonSchemaProps := range in {
+		schema := spec.Schema{}
+		if err := ConvertJSONSchemaPropsWithPostProcess(&jsonSchemaProps, &schema, postProcess); err != nil {
+			return nil, err
+		}
+		out[k] = schema
+	}
+	return out, nil
+}
+
+func convertJSONSchemaPropsOrArray(in *apiextensions.JSONSchemaPropsOrArray, out *spec.SchemaOrArray, postProcess PostProcessFunc) error {
+	if in.Schema != nil {
+		in, out := &in.Schema, &out.Schema
+		*out = new(spec.Schema)
+		if err := ConvertJSONSchemaPropsWithPostProcess(*in, *out, postProcess); err != nil {
+			return err
+		}
+	}
+	if in.JSONSchemas != nil {
+		in, out := &in.JSONSchemas, &out.Schemas
+		*out = make([]spec.Schema, len(*in))
+		for i := range *in {
+			if err := ConvertJSONSchemaPropsWithPostProcess(&(*in)[i], &(*out)[i], postProcess); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func convertJSONSchemaPropsorBool(in *apiextensions.JSONSchemaPropsOrBool, out *spec.SchemaOrBool, postProcess PostProcessFunc) error {
+	out.Allows = in.Allows
+	if in.Schema != nil {
+		in, out := &in.Schema, &out.Schema
+		*out = new(spec.Schema)
+		if err := ConvertJSONSchemaPropsWithPostProcess(*in, *out, postProcess); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func convertJSONSchemaPropsOrStringArray(in *apiextensions.JSONSchemaPropsOrStringArray, out *spec.SchemaOrStringArray, postProcess PostProcessFunc) error {
+	out.Property = in.Property
+	if in.Schema != nil {
+		in, out := &in.Schema, &out.Schema
+		*out = new(spec.Schema)
+		if err := ConvertJSONSchemaPropsWithPostProcess(*in, *out, postProcess); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/features/OWNERS b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/features/OWNERS
new file mode 100644
index 0000000000..3e1dd9f081
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/features/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+  - feature-approvers
diff --git a/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go
new file mode 100644
index 0000000000..c562263f32
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go
@@ -0,0 +1,65 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package features
+
+import (
+	"k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/apimachinery/pkg/util/version"
+	utilfeature "k8s.io/apiserver/pkg/util/feature"
+	"k8s.io/component-base/featuregate"
+)
+
+const (
+	// Every feature gate should add method here following this template:
+	//
+	// // owner: @username
+	// MyFeature() bool
+
+	// owner: @alexzielenski
+	//
+	// Ignores errors raised on unchanged fields of Custom Resources
+	// across UPDATE/PATCH requests.
+	CRDValidationRatcheting featuregate.Feature = "CRDValidationRatcheting"
+
+	// owner: @jpbetz
+	//
+	// CustomResourceDefinitions may include SelectableFields to declare which fields
+	// may be used as field selectors.
+	CustomResourceFieldSelectors featuregate.Feature = "CustomResourceFieldSelectors"
+)
+
+func init() {
+	runtime.Must(utilfeature.DefaultMutableFeatureGate.AddVersioned(defaultVersionedKubernetesFeatureGates))
+}
+
+// defaultVersionedKubernetesFeatureGates consists of all known Kubernetes-specific feature keys with VersionedSpecs.
+// To add a new feature, define a key for it above and add it below. The features will be
+// available throughout Kubernetes binaries.
+// To support n-3 compatibility version, features may only be removed 3 releases after graduation.
+//
+// Entries are alphabetized.
+var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate.VersionedSpecs{
+	CRDValidationRatcheting: {
+		{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
+		{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
+	},
+	CustomResourceFieldSelectors: {
+		{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
+		{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
+		{Version: version.MustParse("1.32"), Default: true, LockToDefault: true, PreRelease: featuregate.GA},
+	},
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
index 57e0e71f67..6a3ab8f24e 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
@@ -54,6 +54,7 @@ var knownReasons = map[metav1.StatusReason]struct{}{
 	metav1.StatusReasonGone:                  {},
 	metav1.StatusReasonInvalid:               {},
 	metav1.StatusReasonServerTimeout:         {},
+	metav1.StatusReasonStoreReadError:        {},
 	metav1.StatusReasonTimeout:               {},
 	metav1.StatusReasonTooManyRequests:       {},
 	metav1.StatusReasonBadRequest:            {},
@@ -775,6 +776,12 @@ func IsUnexpectedObjectError(err error) bool {
 	return err != nil && (ok || errors.As(err, &uoe))
 }
 
+// IsStoreReadError determines if err is due to either failure to transform the
+// data from the storage, or failure to decode the object appropriately.
+func IsStoreReadError(err error) bool {
+	return ReasonForError(err) == metav1.StatusReasonStoreReadError
+}
+
 // SuggestsClientDelay returns true if this error suggests a client delay as well as the
 // suggested seconds to wait, or false if the error does not imply a wait. It does not
 // address whether the error *should* be retried, since some errors (like a 3xx) may
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/hack/tools/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS
index 1e1330fff2..3bd8bf535e 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS
@@ -10,5 +10,6 @@ reviewers:
   - mikedanese
   - liggitt
   - janetkuo
-  - ncdc
   - dims
+emeritus_reviewers:
+  - ncdc
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
index 50af8334f0..d0aada9dd7 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
@@ -20,7 +20,7 @@ import (
 	"bytes"
 	"errors"
 	"fmt"
-	"math"
+	math "math"
 	"math/big"
 	"strconv"
 	"strings"
@@ -460,9 +460,10 @@ func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) {
 	}
 }
 
-// AsApproximateFloat64 returns a float64 representation of the quantity which may
-// lose precision. If the value of the quantity is outside the range of a float64
-// +Inf/-Inf will be returned.
+// AsApproximateFloat64 returns a float64 representation of the quantity which
+// may lose precision. If precision matter more than performance, see
+// AsFloat64Slow. If the value of the quantity is outside the range of a
+// float64 +Inf/-Inf will be returned.
 func (q *Quantity) AsApproximateFloat64() float64 {
 	var base float64
 	var exponent int
@@ -480,6 +481,36 @@ func (q *Quantity) AsApproximateFloat64() float64 {
 	return base * math.Pow10(exponent)
 }
 
+// AsFloat64Slow returns a float64 representation of the quantity.  This is
+// more precise than AsApproximateFloat64 but significantly slower.  If the
+// value of the quantity is outside the range of a float64 +Inf/-Inf will be
+// returned.
+func (q *Quantity) AsFloat64Slow() float64 {
+	infDec := q.AsDec()
+
+	var absScale int64
+	if infDec.Scale() < 0 {
+		absScale = int64(-infDec.Scale())
+	} else {
+		absScale = int64(infDec.Scale())
+	}
+	pow10AbsScale := big.NewInt(10)
+	pow10AbsScale = pow10AbsScale.Exp(pow10AbsScale, big.NewInt(absScale), nil)
+
+	var resultBigFloat *big.Float
+	if infDec.Scale() < 0 {
+		resultBigInt := new(big.Int).Mul(infDec.UnscaledBig(), pow10AbsScale)
+		resultBigFloat = new(big.Float).SetInt(resultBigInt)
+	} else {
+		pow10AbsScaleFloat := new(big.Float).SetInt(pow10AbsScale)
+		resultBigFloat = new(big.Float).SetInt(infDec.UnscaledBig())
+		resultBigFloat = resultBigFloat.Quo(resultBigFloat, pow10AbsScaleFloat)
+	}
+
+	result, _ := resultBigFloat.Float64()
+	return result
+}
+
 // AsInt64 returns a representation of the current value as an int64 if a fast conversion
 // is possible. If false is returned, callers must use the inf.Dec form of this quantity.
 func (q *Quantity) AsInt64() (int64, bool) {
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go
index 593d7ba8cf..54a2883a35 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go
@@ -50,7 +50,7 @@ func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) fie
 		}
 	}
 	if err := ValidateAnnotationsSize(annotations); err != nil {
-		allErrs = append(allErrs, field.TooLong(fldPath, "", TotalAnnotationSizeLimitB))
+		allErrs = append(allErrs, field.TooLong(fldPath, "" /*unused*/, TotalAnnotationSizeLimitB))
 	}
 	return allErrs
 }
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/api/validation/path/name.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/api/validation/path/name.go
new file mode 100644
index 0000000000..ffb9f56d80
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/api/validation/path/name.go
@@ -0,0 +1,68 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package path
+
+import (
+	"fmt"
+	"strings"
+)
+
+// NameMayNotBe specifies strings that cannot be used as names specified as path segments (like the REST API or etcd store)
+var NameMayNotBe = []string{".", ".."}
+
+// NameMayNotContain specifies substrings that cannot be used in names specified as path segments (like the REST API or etcd store)
+var NameMayNotContain = []string{"/", "%"}
+
+// IsValidPathSegmentName validates the name can be safely encoded as a path segment
+func IsValidPathSegmentName(name string) []string {
+	for _, illegalName := range NameMayNotBe {
+		if name == illegalName {
+			return []string{fmt.Sprintf(`may not be '%s'`, illegalName)}
+		}
+	}
+
+	var errors []string
+	for _, illegalContent := range NameMayNotContain {
+		if strings.Contains(name, illegalContent) {
+			errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent))
+		}
+	}
+
+	return errors
+}
+
+// IsValidPathSegmentPrefix validates the name can be used as a prefix for a name which will be encoded as a path segment
+// It does not check for exact matches with disallowed names, since an arbitrary suffix might make the name valid
+func IsValidPathSegmentPrefix(name string) []string {
+	var errors []string
+	for _, illegalContent := range NameMayNotContain {
+		if strings.Contains(name, illegalContent) {
+			errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent))
+		}
+	}
+
+	return errors
+}
+
+// ValidatePathSegmentName validates the name can be safely encoded as a path segment
+func ValidatePathSegmentName(name string, prefix bool) []string {
+	if prefix {
+		return IsValidPathSegmentPrefix(name)
+	}
+
+	return IsValidPathSegmentName(name)
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go
new file mode 100644
index 0000000000..29c6a48b6a
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/defaults.go
@@ -0,0 +1,38 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package internalversion
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// SetListOptionsDefaults sets defaults on the provided ListOptions if applicable.
+//
+// TODO(#115478): once the watch-list fg is always on we register this function in the scheme (via AddTypeDefaultingFunc).
+// TODO(#115478): when the function is registered in the scheme remove all callers of this method.
+func SetListOptionsDefaults(obj *ListOptions, isWatchListFeatureEnabled bool) {
+	if !isWatchListFeatureEnabled {
+		return
+	}
+	if obj.SendInitialEvents != nil || len(obj.ResourceVersionMatch) != 0 {
+		return
+	}
+	legacy := obj.ResourceVersion == "" || obj.ResourceVersion == "0"
+	if obj.Watch && legacy {
+		turnOnInitialEvents := true
+		obj.SendInitialEvents = &turnOnInitialEvents
+		obj.ResourceVersionMatch = metav1.ResourceVersionMatchNotOlderThan
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go
new file mode 100644
index 0000000000..2741ee2c80
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:conversion-gen=k8s.io/apimachinery/pkg/apis/meta/v1
+
+package internalversion // import "k8s.io/apimachinery/pkg/apis/meta/internalversion"
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go
new file mode 100644
index 0000000000..a59ac71268
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go
@@ -0,0 +1,88 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package internalversion
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name for this API.
+const GroupName = "meta.k8s.io"
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      runtime.SchemeBuilder
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+	return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// addToGroupVersion registers common meta types into schemas.
+func addToGroupVersion(scheme *runtime.Scheme) error {
+	if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil {
+		return err
+	}
+	// ListOptions is the only options struct which needs conversion (it exposes labels and fields
+	// as selectors for convenience). The other types have only a single representation today.
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&ListOptions{},
+		&metav1.GetOptions{},
+		&metav1.DeleteOptions{},
+		&metav1.CreateOptions{},
+		&metav1.UpdateOptions{},
+	)
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&metav1.Table{},
+		&metav1.TableOptions{},
+		&metav1beta1.PartialObjectMetadata{},
+		&metav1beta1.PartialObjectMetadataList{},
+	)
+	if err := metav1beta1.AddMetaToScheme(scheme); err != nil {
+		return err
+	}
+	if err := metav1.AddMetaToScheme(scheme); err != nil {
+		return err
+	}
+	// Allow delete options to be decoded across all version in this scheme (we may want to be more clever than this)
+	scheme.AddUnversionedTypes(SchemeGroupVersion,
+		&metav1.DeleteOptions{},
+		&metav1.CreateOptions{},
+		&metav1.UpdateOptions{})
+
+	metav1.AddToGroupVersion(scheme, metav1.SchemeGroupVersion)
+	if err := metav1beta1.RegisterConversions(scheme); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Unlike other API groups, meta internal knows about all meta external versions, but keeps
+// the logic for conversion private.
+func init() {
+	localSchemeBuilder.Register(addToGroupVersion)
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go
new file mode 100644
index 0000000000..a45fa2a8a5
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go
@@ -0,0 +1,17 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package scheme // import "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme"
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go
new file mode 100644
index 0000000000..585d7f44bd
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go
@@ -0,0 +1,39 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package scheme
+
+import (
+	"k8s.io/apimachinery/pkg/apis/meta/internalversion"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/serializer"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// Scheme is the registry for any type that adheres to the meta API spec.
+var Scheme = runtime.NewScheme()
+
+// Codecs provides access to encoding and decoding for the scheme.
+var Codecs = serializer.NewCodecFactory(Scheme)
+
+// ParameterCodec handles versioning of objects that are converted to query parameters.
+var ParameterCodec = runtime.NewParameterCodec(Scheme)
+
+// Unlike other API groups, meta internal knows about all meta external versions, but keeps
+// the logic for conversion private.
+func init() {
+	utilruntime.Must(internalversion.AddToScheme(Scheme))
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go
new file mode 100644
index 0000000000..00d2b8c689
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go
@@ -0,0 +1,105 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package internalversion
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/fields"
+	"k8s.io/apimachinery/pkg/labels"
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ListOptions is the query options to a standard REST list call.
+type ListOptions struct {
+	metav1.TypeMeta
+
+	// A selector based on labels
+	LabelSelector labels.Selector
+	// A selector based on fields
+	FieldSelector fields.Selector
+	// If true, watch for changes to this list
+	Watch bool
+	// allowWatchBookmarks requests watch events with type "BOOKMARK".
+	// Servers that do not implement bookmarks may ignore this flag and
+	// bookmarks are sent at the server's discretion. Clients should not
+	// assume bookmarks are returned at any specific interval, nor may they
+	// assume the server will send any BOOKMARK event during a session.
+	// If this is not a watch, this field is ignored.
+	// If the feature gate WatchBookmarks is not enabled in apiserver,
+	// this field is ignored.
+	AllowWatchBookmarks bool
+	// resourceVersion sets a constraint on what resource versions a request may be served from.
+	// See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
+	// details.
+	ResourceVersion string
+	// resourceVersionMatch determines how resourceVersion is applied to list calls.
+	// It is highly recommended that resourceVersionMatch be set for list calls where
+	// resourceVersion is set.
+	// See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for
+	// details.
+	ResourceVersionMatch metav1.ResourceVersionMatch
+
+	// Timeout for the list/watch call.
+	TimeoutSeconds *int64
+	// Limit specifies the maximum number of results to return from the server. The server may
+	// not support this field on all resource types, but if it does and more results remain it
+	// will set the continue field on the returned list object.
+	Limit int64
+	// Continue is a token returned by the server that lets a client retrieve chunks of results
+	// from the server by specifying limit. The server may reject requests for continuation tokens
+	// it does not recognize and will return a 410 error if the token can no longer be used because
+	// it has expired.
+	Continue string
+
+	// `sendInitialEvents=true` may be set together with `watch=true`.
+	// In that case, the watch stream will begin with synthetic events to
+	// produce the current state of objects in the collection. Once all such
+	// events have been sent, a synthetic "Bookmark" event  will be sent.
+	// The bookmark will report the ResourceVersion (RV) corresponding to the
+	// set of objects, and be marked with `"k8s.io/initial-events-end": "true"` annotation.
+	// Afterwards, the watch stream will proceed as usual, sending watch events
+	// corresponding to changes (subsequent to the RV) to objects watched.
+	//
+	// When `sendInitialEvents` option is set, we require `resourceVersionMatch`
+	// option to also be set. The semantic of the watch request is as following:
+	// - `resourceVersionMatch` = NotOlderThan
+	//   is interpreted as "data at least as new as the provided `resourceVersion`"
+	//   and the bookmark event is send when the state is synced
+	//   to a `resourceVersion` at least as fresh as the one provided by the ListOptions.
+	//   If `resourceVersion` is unset, this is interpreted as "consistent read" and the
+	//   bookmark event is send when the state is synced at least to the moment
+	//   when request started being processed.
+	// - `resourceVersionMatch` set to any other value or unset
+	//   Invalid error is returned.
+	//
+	// Defaults to true if `resourceVersion=""` or `resourceVersion="0"` (for backward
+	// compatibility reasons) and to false otherwise.
+	SendInitialEvents *bool
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// List holds a list of objects, which may not be known by the server.
+type List struct {
+	metav1.TypeMeta
+	// +optional
+	metav1.ListMeta
+
+	Items []runtime.Object
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go
new file mode 100644
index 0000000000..a6552c276e
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go
@@ -0,0 +1,148 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package internalversion
+
+import (
+	unsafe "unsafe"
+
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	conversion "k8s.io/apimachinery/pkg/conversion"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+func init() {
+	localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+	if err := s.AddGeneratedConversionFunc((*List)(nil), (*v1.List)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_internalversion_List_To_v1_List(a.(*List), b.(*v1.List), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*v1.List)(nil), (*List)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_List_To_internalversion_List(a.(*v1.List), b.(*List), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ListOptions)(nil), (*v1.ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_internalversion_ListOptions_To_v1_ListOptions(a.(*ListOptions), b.(*v1.ListOptions), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*v1.ListOptions)(nil), (*ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_ListOptions_To_internalversion_ListOptions(a.(*v1.ListOptions), b.(*ListOptions), scope)
+	}); err != nil {
+		return err
+	}
+	return nil
+}
+
+func autoConvert_internalversion_List_To_v1_List(in *List, out *v1.List, s conversion.Scope) error {
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]runtime.RawExtension, len(*in))
+		for i := range *in {
+			if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil {
+				return err
+			}
+		}
+	} else {
+		out.Items = nil
+	}
+	return nil
+}
+
+// Convert_internalversion_List_To_v1_List is an autogenerated conversion function.
+func Convert_internalversion_List_To_v1_List(in *List, out *v1.List, s conversion.Scope) error {
+	return autoConvert_internalversion_List_To_v1_List(in, out, s)
+}
+
+func autoConvert_v1_List_To_internalversion_List(in *v1.List, out *List, s conversion.Scope) error {
+	out.ListMeta = in.ListMeta
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]runtime.Object, len(*in))
+		for i := range *in {
+			if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil {
+				return err
+			}
+		}
+	} else {
+		out.Items = nil
+	}
+	return nil
+}
+
+// Convert_v1_List_To_internalversion_List is an autogenerated conversion function.
+func Convert_v1_List_To_internalversion_List(in *v1.List, out *List, s conversion.Scope) error {
+	return autoConvert_v1_List_To_internalversion_List(in, out, s)
+}
+
+func autoConvert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *v1.ListOptions, s conversion.Scope) error {
+	if err := v1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil {
+		return err
+	}
+	if err := v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil {
+		return err
+	}
+	out.Watch = in.Watch
+	out.AllowWatchBookmarks = in.AllowWatchBookmarks
+	out.ResourceVersion = in.ResourceVersion
+	out.ResourceVersionMatch = v1.ResourceVersionMatch(in.ResourceVersionMatch)
+	out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds))
+	out.Limit = in.Limit
+	out.Continue = in.Continue
+	out.SendInitialEvents = (*bool)(unsafe.Pointer(in.SendInitialEvents))
+	return nil
+}
+
+// Convert_internalversion_ListOptions_To_v1_ListOptions is an autogenerated conversion function.
+func Convert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *v1.ListOptions, s conversion.Scope) error {
+	return autoConvert_internalversion_ListOptions_To_v1_ListOptions(in, out, s)
+}
+
+func autoConvert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOptions, out *ListOptions, s conversion.Scope) error {
+	if err := v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil {
+		return err
+	}
+	if err := v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil {
+		return err
+	}
+	out.Watch = in.Watch
+	out.AllowWatchBookmarks = in.AllowWatchBookmarks
+	out.ResourceVersion = in.ResourceVersion
+	out.ResourceVersionMatch = v1.ResourceVersionMatch(in.ResourceVersionMatch)
+	out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds))
+	out.Limit = in.Limit
+	out.Continue = in.Continue
+	out.SendInitialEvents = (*bool)(unsafe.Pointer(in.SendInitialEvents))
+	return nil
+}
+
+// Convert_v1_ListOptions_To_internalversion_ListOptions is an autogenerated conversion function.
+func Convert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOptions, out *ListOptions, s conversion.Scope) error {
+	return autoConvert_v1_ListOptions_To_internalversion_ListOptions(in, out, s)
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..af66a2ac4c
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go
@@ -0,0 +1,102 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package internalversion
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *List) DeepCopyInto(out *List) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]runtime.Object, len(*in))
+		for i := range *in {
+			if (*in)[i] != nil {
+				(*out)[i] = (*in)[i].DeepCopyObject()
+			}
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List.
+func (in *List) DeepCopy() *List {
+	if in == nil {
+		return nil
+	}
+	out := new(List)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *List) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ListOptions) DeepCopyInto(out *ListOptions) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.LabelSelector != nil {
+		out.LabelSelector = in.LabelSelector.DeepCopySelector()
+	}
+	if in.FieldSelector != nil {
+		out.FieldSelector = in.FieldSelector.DeepCopySelector()
+	}
+	if in.TimeoutSeconds != nil {
+		in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
+		*out = new(int64)
+		**out = **in
+	}
+	if in.SendInitialEvents != nil {
+		in, out := &in.SendInitialEvents, &out.SendInitialEvents
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions.
+func (in *ListOptions) DeepCopy() *ListOptions {
+	if in == nil {
+		return nil
+	}
+	out := new(ListOptions)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ListOptions) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
index e7e5c152d0..ec414a84b9 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS
@@ -11,6 +11,7 @@ reviewers:
   - luxas
   - janetkuo
   - justinsb
-  - ncdc
   - soltysh
   - dims
+emeritus_reviewers:
+  - ncdc
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
index 229ea2c2c2..9ee6c05918 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
@@ -1355,187 +1355,190 @@ func init() {
 }
 
 var fileDescriptor_a8431b6e0aeeb761 = []byte{
-	// 2873 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x5d, 0x6f, 0x23, 0x57,
-	0x35, 0x63, 0xc7, 0x89, 0x7d, 0x6c, 0xe7, 0xe3, 0x6e, 0x16, 0xbc, 0x41, 0xc4, 0xe9, 0xb4, 0xaa,
-	0xb6, 0xd0, 0x3a, 0xdd, 0xa5, 0x54, 0xdb, 0x2d, 0x2d, 0xc4, 0xf1, 0x66, 0x9b, 0x76, 0xd3, 0x44,
-	0x37, 0xbb, 0x0b, 0x94, 0x0a, 0x75, 0xe2, 0xb9, 0x71, 0x86, 0x8c, 0x67, 0xdc, 0x7b, 0xc7, 0x49,
-	0x0d, 0x0f, 0xf4, 0x01, 0x04, 0x48, 0xa8, 0x2a, 0x6f, 0x3c, 0xa1, 0x56, 0xf0, 0x03, 0x10, 0x4f,
-	0xbc, 0x83, 0x44, 0x1f, 0x8b, 0x78, 0xa9, 0x04, 0xb2, 0xba, 0xe1, 0x81, 0x47, 0xc4, 0x6b, 0x84,
-	0x04, 0xba, 0x1f, 0x33, 0x73, 0xc7, 0x1f, 0x9b, 0xf1, 0xee, 0x52, 0xf1, 0xe6, 0x39, 0xdf, 0xf7,
-	0xde, 0x73, 0xce, 0x3d, 0xe7, 0x5c, 0xc3, 0x73, 0x47, 0xd7, 0x58, 0xcd, 0xf1, 0xd7, 0xac, 0x8e,
-	0xd3, 0xb6, 0x9a, 0x87, 0x8e, 0x47, 0x68, 0x6f, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0xb6, 0xd6, 0x26,
-	0x81, 0xb5, 0x76, 0x7c, 0x65, 0xad, 0x45, 0x3c, 0x42, 0xad, 0x80, 0xd8, 0xb5, 0x0e, 0xf5, 0x03,
-	0x1f, 0x3d, 0x21, 0xb9, 0x6a, 0x3a, 0x57, 0xad, 0x73, 0xd4, 0xe2, 0x00, 0x56, 0xe3, 0x5c, 0xb5,
-	0xe3, 0x2b, 0xcb, 0xcf, 0xb4, 0x9c, 0xe0, 0xb0, 0xbb, 0x5f, 0x6b, 0xfa, 0xed, 0xb5, 0x96, 0xdf,
-	0xf2, 0xd7, 0x04, 0xf3, 0x7e, 0xf7, 0x40, 0x7c, 0x89, 0x0f, 0xf1, 0x4b, 0x0a, 0x5d, 0x5e, 0x1b,
-	0x67, 0x0a, 0xed, 0x7a, 0x81, 0xd3, 0x26, 0x83, 0x56, 0x2c, 0x3f, 0x7f, 0x1e, 0x03, 0x6b, 0x1e,
-	0x92, 0xb6, 0x35, 0xc8, 0x67, 0xfe, 0x29, 0x0b, 0xf9, 0xf5, 0xdd, 0xad, 0x9b, 0xd4, 0xef, 0x76,
-	0xd0, 0x2a, 0x4c, 0x7b, 0x56, 0x9b, 0x54, 0x8c, 0x55, 0xe3, 0x72, 0xa1, 0x5e, 0xfa, 0xa8, 0x5f,
-	0x9d, 0x3a, 0xed, 0x57, 0xa7, 0x5f, 0xb7, 0xda, 0x04, 0x0b, 0x0c, 0x72, 0x21, 0x7f, 0x4c, 0x28,
-	0x73, 0x7c, 0x8f, 0x55, 0x32, 0xab, 0xd9, 0xcb, 0xc5, 0xab, 0x2f, 0xd7, 0xd2, 0xac, 0xbf, 0x26,
-	0x14, 0xdc, 0x95, 0xac, 0x9b, 0x3e, 0x6d, 0x38, 0xac, 0xe9, 0x1f, 0x13, 0xda, 0xab, 0x2f, 0x28,
-	0x2d, 0x79, 0x85, 0x64, 0x38, 0xd2, 0x80, 0x7e, 0x64, 0xc0, 0x42, 0x87, 0x92, 0x03, 0x42, 0x29,
-	0xb1, 0x15, 0xbe, 0x92, 0x5d, 0x35, 0x1e, 0x81, 0xda, 0x8a, 0x52, 0xbb, 0xb0, 0x3b, 0x20, 0x1f,
-	0x0f, 0x69, 0x44, 0xbf, 0x36, 0x60, 0x99, 0x11, 0x7a, 0x4c, 0xe8, 0xba, 0x6d, 0x53, 0xc2, 0x58,
-	0xbd, 0xb7, 0xe1, 0x3a, 0xc4, 0x0b, 0x36, 0xb6, 0x1a, 0x98, 0x55, 0xa6, 0xc5, 0x3e, 0x7c, 0x3d,
-	0x9d, 0x41, 0x7b, 0xe3, 0xe4, 0xd4, 0x4d, 0x65, 0xd1, 0xf2, 0x58, 0x12, 0x86, 0xef, 0x63, 0x86,
-	0x79, 0x00, 0xa5, 0xf0, 0x20, 0x6f, 0x39, 0x2c, 0x40, 0x77, 0x61, 0xa6, 0xc5, 0x3f, 0x58, 0xc5,
-	0x10, 0x06, 0xd6, 0xd2, 0x19, 0x18, 0xca, 0xa8, 0xcf, 0x29, 0x7b, 0x66, 0xc4, 0x27, 0xc3, 0x4a,
-	0x9a, 0xf9, 0xb3, 0x69, 0x28, 0xae, 0xef, 0x6e, 0x61, 0xc2, 0xfc, 0x2e, 0x6d, 0x92, 0x14, 0x4e,
-	0x73, 0x0d, 0x4a, 0xcc, 0xf1, 0x5a, 0x5d, 0xd7, 0xa2, 0x1c, 0x5a, 0x99, 0x11, 0x94, 0x4b, 0x8a,
-	0xb2, 0xb4, 0xa7, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x05, 0xe0, 0x12, 0x58, 0xc7, 0x6a, 0x12, 0xbb,
-	0x92, 0x59, 0x35, 0x2e, 0xe7, 0xeb, 0x48, 0xf1, 0xc1, 0xeb, 0x11, 0x06, 0x6b, 0x54, 0xe8, 0x71,
-	0xc8, 0x09, 0x4b, 0x2b, 0x79, 0xa1, 0xa6, 0xac, 0xc8, 0x73, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x53,
-	0x30, 0xab, 0xbc, 0xac, 0x52, 0x10, 0x64, 0xf3, 0x8a, 0x6c, 0x36, 0x74, 0x83, 0x10, 0xcf, 0xd7,
-	0x77, 0xe4, 0x78, 0xb6, 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0x39, 0x9e, 0x8d, 0x05, 0x06, 0xdd, 0x82,
-	0xdc, 0x31, 0xa1, 0xfb, 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x39, 0xdd, 0x46, 0xdf, 0xe5, 0x2c, 0xf5,
-	0x02, 0x37, 0x4d, 0xfc, 0xc4, 0x52, 0x08, 0xaa, 0x01, 0xb0, 0x43, 0x9f, 0x06, 0x62, 0x79, 0x95,
-	0xdc, 0x6a, 0xf6, 0x72, 0xa1, 0x3e, 0xc7, 0xd7, 0xbb, 0x17, 0x41, 0xb1, 0x46, 0xc1, 0xe9, 0x9b,
-	0x56, 0x40, 0x5a, 0x3e, 0x75, 0x08, 0xab, 0xcc, 0xc6, 0xf4, 0x1b, 0x11, 0x14, 0x6b, 0x14, 0xe8,
-	0x55, 0x40, 0x2c, 0xf0, 0xa9, 0xd5, 0x22, 0x6a, 0xa9, 0xaf, 0x58, 0xec, 0xb0, 0x02, 0x62, 0x75,
-	0xcb, 0x6a, 0x75, 0x68, 0x6f, 0x88, 0x02, 0x8f, 0xe0, 0x32, 0x7f, 0x67, 0xc0, 0xbc, 0xe6, 0x0b,
-	0xc2, 0xef, 0xae, 0x41, 0xa9, 0xa5, 0x45, 0x9d, 0xf2, 0x8b, 0xe8, 0xb4, 0xf5, 0x88, 0xc4, 0x09,
-	0x4a, 0x44, 0xa0, 0x40, 0x95, 0xa4, 0x30, 0xbb, 0x5c, 0x49, 0xed, 0xb4, 0xa1, 0x0d, 0xb1, 0x26,
-	0x0d, 0xc8, 0x70, 0x2c, 0xd9, 0xfc, 0x87, 0x21, 0x1c, 0x38, 0xcc, 0x37, 0xe8, 0xb2, 0x96, 0xd3,
-	0x0c, 0xb1, 0x7d, 0xa5, 0x31, 0xf9, 0xe8, 0x9c, 0x44, 0x90, 0xf9, 0xbf, 0x48, 0x04, 0xd7, 0xf3,
-	0xbf, 0xfc, 0xa0, 0x3a, 0xf5, 0xee, 0xdf, 0x56, 0xa7, 0xcc, 0x5f, 0x18, 0x50, 0x5a, 0xef, 0x74,
-	0xdc, 0xde, 0x4e, 0x27, 0x10, 0x0b, 0x30, 0x61, 0xc6, 0xa6, 0x3d, 0xdc, 0xf5, 0xd4, 0x42, 0x81,
-	0xc7, 0x77, 0x43, 0x40, 0xb0, 0xc2, 0xf0, 0xf8, 0x39, 0xf0, 0x69, 0x93, 0xa8, 0x70, 0x8b, 0xe2,
-	0x67, 0x93, 0x03, 0xb1, 0xc4, 0xf1, 0x43, 0x3e, 0x70, 0x88, 0x6b, 0x6f, 0x5b, 0x9e, 0xd5, 0x22,
-	0x54, 0x05, 0x47, 0xb4, 0xf5, 0x9b, 0x1a, 0x0e, 0x27, 0x28, 0xcd, 0xff, 0x64, 0xa0, 0xb0, 0xe1,
-	0x7b, 0xb6, 0x13, 0xa8, 0xe0, 0x0a, 0x7a, 0x9d, 0xa1, 0xe4, 0x71, 0xbb, 0xd7, 0x21, 0x58, 0x60,
-	0xd0, 0x0b, 0x30, 0xc3, 0x02, 0x2b, 0xe8, 0x32, 0x61, 0x4f, 0xa1, 0xfe, 0x58, 0x98, 0x96, 0xf6,
-	0x04, 0xf4, 0xac, 0x5f, 0x9d, 0x8f, 0xc4, 0x49, 0x10, 0x56, 0x0c, 0xdc, 0xd3, 0xfd, 0x7d, 0xb1,
-	0x51, 0xf6, 0x4d, 0x79, 0xed, 0x85, 0xf7, 0x47, 0x36, 0xf6, 0xf4, 0x9d, 0x21, 0x0a, 0x3c, 0x82,
-	0x0b, 0x1d, 0x03, 0x72, 0x2d, 0x16, 0xdc, 0xa6, 0x96, 0xc7, 0x84, 0xae, 0xdb, 0x4e, 0x9b, 0xa8,
-	0x80, 0xff, 0x52, 0xba, 0x13, 0xe7, 0x1c, 0xb1, 0xde, 0x5b, 0x43, 0xd2, 0xf0, 0x08, 0x0d, 0xe8,
-	0x49, 0x98, 0xa1, 0xc4, 0x62, 0xbe, 0x57, 0xc9, 0x89, 0xe5, 0x47, 0x59, 0x19, 0x0b, 0x28, 0x56,
-	0x58, 0x9e, 0xd0, 0xda, 0x84, 0x31, 0xab, 0x15, 0xa6, 0xd7, 0x28, 0xa1, 0x6d, 0x4b, 0x30, 0x0e,
-	0xf1, 0xe6, 0x6f, 0x0d, 0x28, 0x6f, 0x50, 0x62, 0x05, 0x64, 0x12, 0xb7, 0x78, 0xe0, 0x13, 0x47,
-	0xeb, 0x30, 0x2f, 0xbe, 0xef, 0x5a, 0xae, 0x63, 0xcb, 0x33, 0x98, 0x16, 0xcc, 0x9f, 0x57, 0xcc,
-	0xf3, 0x9b, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x92, 0x85, 0x72, 0x83, 0xb8, 0x24, 0x36, 0x79,
-	0x13, 0x50, 0x8b, 0x5a, 0x4d, 0xb2, 0x4b, 0xa8, 0xe3, 0xdb, 0x7b, 0xa4, 0xe9, 0x7b, 0x36, 0x13,
-	0x6e, 0x94, 0xad, 0x7f, 0x8e, 0xef, 0xef, 0xcd, 0x21, 0x2c, 0x1e, 0xc1, 0x81, 0x5c, 0x28, 0x77,
-	0xa8, 0xf8, 0x2d, 0xf6, 0x5c, 0x7a, 0x59, 0xf1, 0xea, 0x57, 0xd2, 0x1d, 0xe9, 0xae, 0xce, 0x5a,
-	0x5f, 0x3c, 0xed, 0x57, 0xcb, 0x09, 0x10, 0x4e, 0x0a, 0x47, 0xdf, 0x80, 0x05, 0x9f, 0x76, 0x0e,
-	0x2d, 0xaf, 0x41, 0x3a, 0xc4, 0xb3, 0x89, 0x17, 0x30, 0xb1, 0x91, 0xf9, 0xfa, 0x12, 0xaf, 0x45,
-	0x76, 0x06, 0x70, 0x78, 0x88, 0x1a, 0xbd, 0x01, 0x8b, 0x1d, 0xea, 0x77, 0xac, 0x96, 0xd8, 0x98,
-	0x5d, 0xdf, 0x75, 0x9a, 0x3d, 0xb5, 0x9d, 0x4f, 0x9f, 0xf6, 0xab, 0x8b, 0xbb, 0x83, 0xc8, 0xb3,
-	0x7e, 0xf5, 0x82, 0xd8, 0x3a, 0x0e, 0x89, 0x91, 0x78, 0x58, 0x8c, 0xe6, 0x06, 0xb9, 0x71, 0x6e,
-	0x60, 0x6e, 0x41, 0xbe, 0xd1, 0x55, 0x31, 0xf1, 0x12, 0xe4, 0x6d, 0xf5, 0x5b, 0xed, 0x7c, 0x18,
-	0x9c, 0x11, 0xcd, 0x59, 0xbf, 0x5a, 0xe6, 0xe5, 0x67, 0x2d, 0x04, 0xe0, 0x88, 0xc5, 0xfc, 0x8d,
-	0x01, 0x15, 0x71, 0xf2, 0x7b, 0xc4, 0x25, 0xcd, 0xc0, 0xa7, 0x98, 0xbc, 0xdd, 0x75, 0x28, 0x69,
-	0x13, 0x2f, 0x40, 0x5f, 0x84, 0xec, 0x11, 0xe9, 0xa9, 0xbc, 0x50, 0x54, 0x62, 0xb3, 0xaf, 0x91,
-	0x1e, 0xe6, 0x70, 0x74, 0x03, 0xf2, 0x7e, 0x87, 0xc7, 0xa6, 0x4f, 0x55, 0x5e, 0x78, 0x2a, 0x54,
-	0xbd, 0xa3, 0xe0, 0x67, 0xfd, 0xea, 0xc5, 0x84, 0xf8, 0x10, 0x81, 0x23, 0x56, 0xbe, 0xe2, 0x63,
-	0xcb, 0xed, 0x12, 0x7e, 0x0a, 0xd1, 0x8a, 0xef, 0x0a, 0x08, 0x56, 0x18, 0xf3, 0x49, 0xc8, 0x0b,
-	0x31, 0xec, 0xee, 0x15, 0xb4, 0x00, 0x59, 0x6c, 0x9d, 0x08, 0xab, 0x4a, 0x98, 0xff, 0xd4, 0x92,
-	0xed, 0x0e, 0xc0, 0x4d, 0x12, 0x84, 0xfe, 0xb9, 0x0e, 0xf3, 0xe1, 0x8d, 0x93, 0xbc, 0x08, 0x23,
-	0xa7, 0xc7, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0xdf, 0x84, 0x82, 0xb8, 0x2c, 0x79, 0xa5, 0x11, 0x57,
-	0x35, 0xc6, 0x7d, 0xaa, 0x9a, 0xb0, 0x54, 0xc9, 0x8c, 0x2b, 0x55, 0x34, 0x73, 0x5d, 0x28, 0x4b,
-	0xde, 0xb0, 0x8e, 0x4b, 0xa5, 0xe1, 0x69, 0xc8, 0x87, 0x66, 0x2a, 0x2d, 0x51, 0xfd, 0x1e, 0x0a,
-	0xc2, 0x11, 0x85, 0xa6, 0xed, 0x10, 0x12, 0x17, 0x7f, 0x3a, 0x65, 0x5a, 0x91, 0x96, 0xb9, 0x7f,
-	0x91, 0xa6, 0x69, 0xfa, 0x21, 0x54, 0xc6, 0x15, 0xfd, 0x0f, 0x51, 0x9a, 0xa4, 0x37, 0xc5, 0x7c,
-	0xcf, 0x80, 0x05, 0x5d, 0x52, 0xfa, 0xe3, 0x4b, 0xaf, 0xe4, 0xfc, 0xa2, 0x54, 0xdb, 0x91, 0x5f,
-	0x19, 0xb0, 0x94, 0x58, 0xda, 0x44, 0x27, 0x3e, 0x81, 0x51, 0xba, 0x73, 0x64, 0x27, 0x70, 0x8e,
-	0xbf, 0x64, 0xa0, 0x7c, 0xcb, 0xda, 0x27, 0x6e, 0x18, 0xa9, 0xe8, 0x07, 0x50, 0x6c, 0x5b, 0x41,
-	0xf3, 0x50, 0x40, 0xc3, 0x06, 0xa6, 0x91, 0x2e, 0x27, 0x27, 0x24, 0xd5, 0xb6, 0x63, 0x31, 0x37,
-	0xbc, 0x80, 0xf6, 0xea, 0x17, 0x94, 0x49, 0x45, 0x0d, 0x83, 0x75, 0x6d, 0xa2, 0xeb, 0x14, 0xdf,
-	0x37, 0xde, 0xe9, 0xf0, 0xea, 0x6a, 0xf2, 0x66, 0x37, 0x61, 0x82, 0x96, 0xd5, 0xe2, 0xae, 0x73,
-	0x7b, 0x40, 0x3e, 0x1e, 0xd2, 0xb8, 0xfc, 0x32, 0x2c, 0x0c, 0x1a, 0xcf, 0xf3, 0x4f, 0x94, 0x15,
-	0x65, 0x22, 0x5c, 0x82, 0x9c, 0xc8, 0x53, 0xf2, 0x70, 0xb0, 0xfc, 0xb8, 0x9e, 0xb9, 0x66, 0x88,
-	0xf4, 0x3a, 0xce, 0x90, 0x47, 0x94, 0x5e, 0x13, 0xe2, 0x1f, 0x30, 0xbd, 0xfe, 0xde, 0x80, 0x69,
-	0xd1, 0x37, 0xbc, 0x09, 0x79, 0xbe, 0x7f, 0xb6, 0x15, 0x58, 0xc2, 0xae, 0xd4, 0x1d, 0x2b, 0xe7,
-	0xde, 0x26, 0x81, 0x15, 0x7b, 0x5b, 0x08, 0xc1, 0x91, 0x44, 0x84, 0x21, 0xe7, 0x04, 0xa4, 0x1d,
-	0x1e, 0xe4, 0x33, 0x63, 0x45, 0xab, 0x79, 0x49, 0x0d, 0x5b, 0x27, 0x37, 0xde, 0x09, 0x88, 0xc7,
-	0x0f, 0x23, 0x0e, 0x8d, 0x2d, 0x2e, 0x03, 0x4b, 0x51, 0xe6, 0xbf, 0x0c, 0x88, 0x54, 0x71, 0xe7,
-	0x67, 0xc4, 0x3d, 0xb8, 0xe5, 0x78, 0x47, 0x6a, 0x5b, 0x23, 0x73, 0xf6, 0x14, 0x1c, 0x47, 0x14,
-	0xa3, 0xae, 0x87, 0xcc, 0x64, 0xd7, 0x03, 0x57, 0xd8, 0xf4, 0xbd, 0xc0, 0xf1, 0xba, 0x43, 0xd1,
-	0xb6, 0xa1, 0xe0, 0x38, 0xa2, 0xe0, 0xf5, 0x12, 0x25, 0x6d, 0xcb, 0xf1, 0x1c, 0xaf, 0xc5, 0x17,
-	0xb1, 0xe1, 0x77, 0xbd, 0x40, 0x14, 0x0e, 0xaa, 0x5e, 0xc2, 0x43, 0x58, 0x3c, 0x82, 0xc3, 0xfc,
-	0xf7, 0x34, 0x14, 0xf9, 0x9a, 0xc3, 0x7b, 0xee, 0x45, 0x28, 0xbb, 0xba, 0x17, 0xa8, 0xb5, 0x5f,
-	0x54, 0xa6, 0x24, 0xe3, 0x1a, 0x27, 0x69, 0x39, 0xf3, 0x81, 0x7e, 0x43, 0xab, 0x3d, 0x88, 0x98,
-	0x93, 0xd5, 0x41, 0x92, 0x96, 0x67, 0xaf, 0x13, 0x1e, 0x1f, 0xaa, 0x80, 0x8a, 0x8e, 0xe8, 0x9b,
-	0x1c, 0x88, 0x25, 0x0e, 0x6d, 0xc3, 0x05, 0xcb, 0x75, 0xfd, 0x13, 0x01, 0xac, 0xfb, 0xfe, 0x51,
-	0xdb, 0xa2, 0x47, 0x4c, 0xf4, 0xfc, 0xf9, 0xfa, 0x17, 0x14, 0xcb, 0x85, 0xf5, 0x61, 0x12, 0x3c,
-	0x8a, 0x6f, 0xd4, 0xb1, 0x4d, 0x4f, 0x78, 0x6c, 0x87, 0xb0, 0x34, 0x00, 0x12, 0x51, 0xae, 0x1a,
-	0xf0, 0xe7, 0x94, 0x9c, 0x25, 0x3c, 0x82, 0xe6, 0x6c, 0x0c, 0x1c, 0x8f, 0x94, 0x88, 0xae, 0xc3,
-	0x1c, 0xf7, 0x64, 0xbf, 0x1b, 0x84, 0xe5, 0x71, 0x4e, 0x1c, 0x37, 0x3a, 0xed, 0x57, 0xe7, 0x6e,
-	0x27, 0x30, 0x78, 0x80, 0x92, 0x6f, 0xae, 0xeb, 0xb4, 0x9d, 0xa0, 0x32, 0x2b, 0x58, 0xa2, 0xcd,
-	0xbd, 0xc5, 0x81, 0x58, 0xe2, 0x12, 0x1e, 0x98, 0x3f, 0xd7, 0x03, 0x37, 0x60, 0x91, 0x11, 0xcf,
-	0xde, 0xf2, 0x9c, 0xc0, 0xb1, 0xdc, 0x1b, 0xc7, 0xa2, 0xf8, 0x2d, 0x8a, 0x83, 0xb8, 0xc8, 0x2b,
-	0xd7, 0xbd, 0x41, 0x24, 0x1e, 0xa6, 0x37, 0xff, 0x9c, 0x05, 0x24, 0xfb, 0x0a, 0x5b, 0x16, 0x65,
-	0x32, 0x2f, 0xf2, 0xee, 0x47, 0xf5, 0x25, 0xc6, 0x40, 0xf7, 0xa3, 0x5a, 0x92, 0x10, 0x8f, 0xb6,
-	0xa1, 0x20, 0xf3, 0x53, 0x1c, 0x73, 0x6b, 0x8a, 0xb8, 0xb0, 0x13, 0x22, 0xce, 0xfa, 0xd5, 0xe5,
-	0x84, 0x9a, 0x08, 0x23, 0x3a, 0xd3, 0x58, 0x02, 0xba, 0x0a, 0x60, 0x75, 0x1c, 0x7d, 0x36, 0x59,
-	0x88, 0x27, 0x54, 0xf1, 0x94, 0x01, 0x6b, 0x54, 0xe8, 0x15, 0x98, 0x0e, 0x1e, 0xac, 0x7b, 0xcc,
-	0x8b, 0xe6, 0x98, 0xf7, 0x8a, 0x42, 0x02, 0xd7, 0x2e, 0x82, 0x82, 0x71, 0xb3, 0x54, 0xe3, 0x17,
-	0x69, 0xdf, 0x8c, 0x30, 0x58, 0xa3, 0x42, 0xdf, 0x82, 0xfc, 0x81, 0xaa, 0x67, 0xc5, 0xe9, 0xa6,
-	0xce, 0xb3, 0x61, 0x15, 0x2c, 0xc7, 0x23, 0xe1, 0x17, 0x8e, 0xa4, 0xa1, 0xaf, 0x42, 0x91, 0x75,
-	0xf7, 0xa3, 0x12, 0x40, 0xba, 0x44, 0x74, 0xdf, 0xee, 0xc5, 0x28, 0xac, 0xd3, 0x99, 0x6f, 0x43,
-	0x61, 0xdb, 0x69, 0x52, 0x5f, 0xf4, 0xbb, 0x4f, 0xc1, 0x2c, 0x4b, 0x34, 0x73, 0xd1, 0x49, 0x86,
-	0xae, 0x1a, 0xe2, 0xb9, 0x8f, 0x7a, 0x96, 0xe7, 0xcb, 0x96, 0x2d, 0x17, 0xfb, 0xe8, 0xeb, 0x1c,
-	0x88, 0x25, 0xee, 0xfa, 0x12, 0xaf, 0x32, 0x7e, 0xfa, 0x61, 0x75, 0xea, 0xfd, 0x0f, 0xab, 0x53,
-	0x1f, 0x7c, 0xa8, 0x2a, 0x8e, 0x3f, 0x00, 0xc0, 0xce, 0xfe, 0xf7, 0x48, 0x53, 0xe6, 0xee, 0x54,
-	0x23, 0xcc, 0x70, 0x72, 0x2e, 0x46, 0x98, 0x99, 0x81, 0xca, 0x51, 0xc3, 0xe1, 0x04, 0x25, 0x5a,
-	0x83, 0x42, 0x34, 0x9c, 0x54, 0xfe, 0xb1, 0x18, 0xfa, 0x5b, 0x34, 0xc1, 0xc4, 0x31, 0x4d, 0xe2,
-	0x22, 0x99, 0x3e, 0xf7, 0x22, 0xa9, 0x43, 0xb6, 0xeb, 0xd8, 0x6a, 0x38, 0xf0, 0x6c, 0x78, 0x91,
-	0xdf, 0xd9, 0x6a, 0x9c, 0xf5, 0xab, 0x8f, 0x8d, 0x7b, 0x13, 0x08, 0x7a, 0x1d, 0xc2, 0x6a, 0x77,
-	0xb6, 0x1a, 0x98, 0x33, 0x8f, 0xca, 0x6a, 0x33, 0x13, 0x66, 0xb5, 0xab, 0x00, 0xad, 0x78, 0xc4,
-	0x22, 0x93, 0x46, 0xe4, 0x88, 0xda, 0x68, 0x45, 0xa3, 0x42, 0x0c, 0x16, 0x9b, 0x94, 0x58, 0xe1,
-	0xa8, 0x83, 0x05, 0x56, 0x5b, 0x0e, 0x6d, 0x27, 0x8b, 0x89, 0x4b, 0x4a, 0xcd, 0xe2, 0xc6, 0xa0,
-	0x30, 0x3c, 0x2c, 0x1f, 0xf9, 0xb0, 0x68, 0xab, 0x6e, 0x38, 0x56, 0x5a, 0x98, 0x58, 0xa9, 0xc8,
-	0x58, 0x8d, 0x41, 0x41, 0x78, 0x58, 0x36, 0xfa, 0x2e, 0x2c, 0x87, 0xc0, 0xe1, 0x91, 0x84, 0xc8,
-	0xfa, 0xd9, 0xfa, 0xca, 0x69, 0xbf, 0xba, 0xdc, 0x18, 0x4b, 0x85, 0xef, 0x23, 0x01, 0xd9, 0x30,
-	0xe3, 0xca, 0x2a, 0xb9, 0x28, 0x2a, 0x9b, 0xaf, 0xa5, 0x5b, 0x45, 0xec, 0xfd, 0x35, 0xbd, 0x3a,
-	0x8e, 0xc6, 0x4b, 0xaa, 0x30, 0x56, 0xb2, 0xd1, 0x3b, 0x50, 0xb4, 0x3c, 0xcf, 0x0f, 0x2c, 0x39,
-	0x24, 0x29, 0x09, 0x55, 0xeb, 0x13, 0xab, 0x5a, 0x8f, 0x65, 0x0c, 0x54, 0xe3, 0x1a, 0x06, 0xeb,
-	0xaa, 0xd0, 0x09, 0xcc, 0xfb, 0x27, 0x1e, 0xa1, 0x98, 0x1c, 0x10, 0x4a, 0xbc, 0x26, 0x61, 0x95,
-	0xb2, 0xd0, 0xfe, 0x5c, 0x4a, 0xed, 0x09, 0xe6, 0xd8, 0xa5, 0x93, 0x70, 0x86, 0x07, 0xb5, 0xa0,
-	0x1a, 0xcf, 0xad, 0x9e, 0xe5, 0x3a, 0xdf, 0x27, 0x94, 0x55, 0xe6, 0xe2, 0xb9, 0xfa, 0x66, 0x04,
-	0xc5, 0x1a, 0x05, 0xea, 0x42, 0xb9, 0xad, 0x5f, 0x19, 0x95, 0x45, 0x61, 0xe6, 0xb5, 0x74, 0x66,
-	0x0e, 0x5f, 0x6a, 0x71, 0x19, 0x94, 0xc0, 0xe1, 0xa4, 0x96, 0xe5, 0x17, 0xa0, 0xf8, 0x80, 0x1d,
-	0x02, 0xef, 0x30, 0x06, 0x0f, 0x64, 0xa2, 0x0e, 0xe3, 0x8f, 0x19, 0x98, 0x4b, 0x6e, 0xe3, 0xc0,
-	0x75, 0x98, 0x4b, 0x75, 0x1d, 0x86, 0xbd, 0xac, 0x31, 0xf6, 0x81, 0x25, 0xcc, 0xcf, 0xd9, 0xb1,
-	0xf9, 0x59, 0xa5, 0xc1, 0xe9, 0x87, 0x49, 0x83, 0x35, 0x00, 0x5e, 0xac, 0x50, 0xdf, 0x75, 0x09,
-	0x15, 0x19, 0x30, 0xaf, 0x1e, 0x52, 0x22, 0x28, 0xd6, 0x28, 0x78, 0x49, 0xbd, 0xef, 0xfa, 0xcd,
-	0x23, 0xb1, 0x05, 0x61, 0xf4, 0x8a, 0xdc, 0x97, 0x97, 0x25, 0x75, 0x7d, 0x08, 0x8b, 0x47, 0x70,
-	0x98, 0x3d, 0xb8, 0xb8, 0x6b, 0x51, 0x5e, 0xe4, 0xc4, 0x91, 0x22, 0x7a, 0x96, 0xb7, 0x86, 0x3a,
-	0xa2, 0x67, 0x27, 0x8d, 0xb8, 0x78, 0xf3, 0x63, 0x58, 0xdc, 0x15, 0x99, 0x7f, 0x35, 0xe0, 0xd2,
-	0x48, 0xdd, 0x9f, 0x41, 0x47, 0xf6, 0x56, 0xb2, 0x23, 0x7b, 0x31, 0xe5, 0xc4, 0x75, 0x94, 0xb5,
-	0x63, 0xfa, 0xb3, 0x59, 0xc8, 0xed, 0xf2, 0x4a, 0xd8, 0xfc, 0xd8, 0x80, 0x92, 0xf8, 0x35, 0xc9,
-	0xc0, 0xbb, 0x9a, 0x7c, 0x07, 0x29, 0x3c, 0xba, 0x37, 0x90, 0x47, 0x31, 0x11, 0x7f, 0xcf, 0x80,
-	0xe4, 0xa8, 0x19, 0xbd, 0x2c, 0x43, 0xc0, 0x88, 0x66, 0xc1, 0x13, 0xba, 0xff, 0x4b, 0xe3, 0x5a,
-	0xd2, 0x0b, 0xa9, 0xa6, 0x95, 0x4f, 0x43, 0x01, 0xfb, 0x7e, 0xb0, 0x6b, 0x05, 0x87, 0x8c, 0xef,
-	0x5d, 0x87, 0xff, 0x50, 0xdb, 0x2b, 0xf6, 0x4e, 0x60, 0xb0, 0x84, 0x9b, 0x3f, 0x37, 0xe0, 0xd2,
-	0xd8, 0xe7, 0x2d, 0x9e, 0x45, 0x9a, 0xd1, 0x97, 0x5a, 0x51, 0xe4, 0xc8, 0x31, 0x1d, 0xd6, 0xa8,
-	0x78, 0x2f, 0x99, 0x78, 0x13, 0x1b, 0xec, 0x25, 0x13, 0xda, 0x70, 0x92, 0xd6, 0xfc, 0x67, 0x06,
-	0xd4, 0x7b, 0xd2, 0xff, 0xd8, 0xe9, 0x9f, 0x1c, 0x78, 0xcd, 0x9a, 0x4b, 0xbe, 0x66, 0x45, 0x4f,
-	0x57, 0xda, 0x73, 0x4e, 0xf6, 0xfe, 0xcf, 0x39, 0xe8, 0xf9, 0xe8, 0x85, 0x48, 0xfa, 0xd0, 0x4a,
-	0xf2, 0x85, 0xe8, 0xac, 0x5f, 0x2d, 0x29, 0xe1, 0xc9, 0x17, 0xa3, 0x37, 0x60, 0xd6, 0x26, 0x81,
-	0xe5, 0xb8, 0xb2, 0x2f, 0x4c, 0xfd, 0xe6, 0x21, 0x85, 0x35, 0x24, 0x6b, 0xbd, 0xc8, 0x6d, 0x52,
-	0x1f, 0x38, 0x14, 0xc8, 0x13, 0x76, 0xd3, 0xb7, 0x65, 0x47, 0x92, 0x8b, 0x13, 0xf6, 0x86, 0x6f,
-	0x13, 0x2c, 0x30, 0xe6, 0xfb, 0x06, 0x14, 0xa5, 0xa4, 0x0d, 0xab, 0xcb, 0x08, 0xba, 0x12, 0xad,
-	0x42, 0x1e, 0xf7, 0x25, 0xfd, 0x29, 0xf0, 0xac, 0x5f, 0x2d, 0x08, 0x32, 0xd1, 0xcc, 0x8c, 0x78,
-	0xf2, 0xca, 0x9c, 0xb3, 0x47, 0x8f, 0x43, 0x4e, 0x04, 0x90, 0xda, 0xcc, 0xf8, 0x4d, 0x93, 0x03,
-	0xb1, 0xc4, 0x99, 0x9f, 0x66, 0xa0, 0x9c, 0x58, 0x5c, 0x8a, 0xbe, 0x20, 0x1a, 0xa1, 0x66, 0x52,
-	0x8c, 0xe5, 0xc7, 0xff, 0x83, 0x40, 0x5d, 0x5f, 0x33, 0x0f, 0x73, 0x7d, 0x7d, 0x1b, 0x66, 0x9a,
-	0x7c, 0x8f, 0xc2, 0x3f, 0xa4, 0x5c, 0x99, 0xe4, 0x38, 0xc5, 0xee, 0xc6, 0xde, 0x28, 0x3e, 0x19,
-	0x56, 0x02, 0xd1, 0x4d, 0x58, 0xa4, 0x24, 0xa0, 0xbd, 0xf5, 0x83, 0x80, 0x50, 0x7d, 0x98, 0x90,
-	0x8b, 0xab, 0x6f, 0x3c, 0x48, 0x80, 0x87, 0x79, 0xcc, 0x7d, 0x28, 0xdd, 0xb6, 0xf6, 0xdd, 0xe8,
-	0x15, 0x0f, 0x43, 0xd9, 0xf1, 0x9a, 0x6e, 0xd7, 0x26, 0x32, 0xa1, 0x87, 0xd9, 0x2b, 0x0c, 0xda,
-	0x2d, 0x1d, 0x79, 0xd6, 0xaf, 0x5e, 0x48, 0x00, 0xe4, 0xb3, 0x15, 0x4e, 0x8a, 0x30, 0x5d, 0x98,
-	0xfe, 0x0c, 0x3b, 0xc9, 0xef, 0x40, 0x21, 0xae, 0xf5, 0x1f, 0xb1, 0x4a, 0xf3, 0x2d, 0xc8, 0x73,
-	0x8f, 0x0f, 0x7b, 0xd4, 0x73, 0xaa, 0xa4, 0x64, 0xed, 0x95, 0x49, 0x53, 0x7b, 0x89, 0xb7, 0xe0,
-	0x3b, 0x1d, 0xfb, 0x21, 0xdf, 0x82, 0x33, 0x0f, 0x73, 0xf3, 0x65, 0x27, 0xbc, 0xf9, 0xae, 0x82,
-	0xfc, 0xbf, 0x0c, 0xbf, 0x64, 0x64, 0x01, 0xa1, 0x5d, 0x32, 0xfa, 0xfd, 0xaf, 0xbd, 0x30, 0xfc,
-	0xd8, 0x00, 0x10, 0xa3, 0x3c, 0x31, 0x46, 0x4a, 0xf1, 0xaf, 0x83, 0x3b, 0x30, 0xe3, 0x4b, 0x8f,
-	0x94, 0xef, 0xc1, 0x13, 0xce, 0x8b, 0xa3, 0x40, 0x92, 0x3e, 0x89, 0x95, 0xb0, 0xfa, 0xab, 0x1f,
-	0xdd, 0x5b, 0x99, 0xfa, 0xf8, 0xde, 0xca, 0xd4, 0x27, 0xf7, 0x56, 0xa6, 0xde, 0x3d, 0x5d, 0x31,
-	0x3e, 0x3a, 0x5d, 0x31, 0x3e, 0x3e, 0x5d, 0x31, 0x3e, 0x39, 0x5d, 0x31, 0x3e, 0x3d, 0x5d, 0x31,
-	0xde, 0xff, 0xfb, 0xca, 0xd4, 0x1b, 0x4f, 0xa4, 0xf9, 0x1f, 0xe2, 0x7f, 0x03, 0x00, 0x00, 0xff,
-	0xff, 0xd3, 0xee, 0xe4, 0x1c, 0xae, 0x28, 0x00, 0x00,
+	// 2928 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x3a, 0x4d, 0x6c, 0x24, 0x47,
+	0xd5, 0xee, 0xf9, 0xb1, 0x67, 0xde, 0x78, 0xfc, 0x53, 0xeb, 0xfd, 0xbe, 0x59, 0x23, 0x3c, 0x4e,
+	0x27, 0x8a, 0x36, 0x90, 0x8c, 0x77, 0x97, 0x25, 0xda, 0x6c, 0x48, 0xc0, 0xe3, 0x59, 0x6f, 0x9c,
+	0xac, 0x63, 0xab, 0xbc, 0xbb, 0x81, 0x10, 0xa1, 0x94, 0xa7, 0xcb, 0xe3, 0xc6, 0x3d, 0xdd, 0x93,
+	0xaa, 0x1e, 0x6f, 0x06, 0x0e, 0xe4, 0x00, 0x12, 0x48, 0x28, 0x0a, 0x37, 0x4e, 0x28, 0x11, 0x9c,
+	0x38, 0x21, 0x4e, 0xdc, 0x41, 0x22, 0xc7, 0x20, 0x2e, 0x91, 0x40, 0xa3, 0xac, 0x39, 0x70, 0x44,
+	0x5c, 0x2d, 0x24, 0x50, 0xfd, 0xf4, 0xdf, 0xfc, 0xac, 0x7b, 0x76, 0x97, 0x88, 0xdb, 0xf4, 0xfb,
+	0xaf, 0xaa, 0xf7, 0x5e, 0xbd, 0xf7, 0x6a, 0xe0, 0xea, 0xd1, 0x35, 0x5e, 0xb3, 0xbd, 0x35, 0xd2,
+	0xb1, 0xdb, 0xa4, 0x79, 0x68, 0xbb, 0x94, 0xf5, 0xd6, 0x3a, 0x47, 0x2d, 0x01, 0xe0, 0x6b, 0x6d,
+	0xea, 0x93, 0xb5, 0xe3, 0xcb, 0x6b, 0x2d, 0xea, 0x52, 0x46, 0x7c, 0x6a, 0xd5, 0x3a, 0xcc, 0xf3,
+	0x3d, 0xf4, 0x94, 0xe2, 0xaa, 0xc5, 0xb9, 0x6a, 0x9d, 0xa3, 0x96, 0x00, 0xf0, 0x9a, 0xe0, 0xaa,
+	0x1d, 0x5f, 0x5e, 0x7e, 0xae, 0x65, 0xfb, 0x87, 0xdd, 0xfd, 0x5a, 0xd3, 0x6b, 0xaf, 0xb5, 0xbc,
+	0x96, 0xb7, 0x26, 0x99, 0xf7, 0xbb, 0x07, 0xf2, 0x4b, 0x7e, 0xc8, 0x5f, 0x4a, 0xe8, 0xf2, 0xda,
+	0x38, 0x53, 0x58, 0xd7, 0xf5, 0xed, 0x36, 0x1d, 0xb4, 0x62, 0xf9, 0xf9, 0xb3, 0x18, 0x78, 0xf3,
+	0x90, 0xb6, 0xc9, 0x20, 0x9f, 0xf9, 0xc7, 0x2c, 0x14, 0xd6, 0x77, 0xb7, 0x6e, 0x32, 0xaf, 0xdb,
+	0x41, 0xab, 0x90, 0x73, 0x49, 0x9b, 0x56, 0x8c, 0x55, 0xe3, 0x62, 0xb1, 0x3e, 0xfb, 0x71, 0xbf,
+	0x3a, 0x75, 0xd2, 0xaf, 0xe6, 0x5e, 0x27, 0x6d, 0x8a, 0x25, 0x06, 0x39, 0x50, 0x38, 0xa6, 0x8c,
+	0xdb, 0x9e, 0xcb, 0x2b, 0x99, 0xd5, 0xec, 0xc5, 0xd2, 0x95, 0x97, 0x6b, 0x69, 0xd6, 0x5f, 0x93,
+	0x0a, 0xee, 0x2a, 0xd6, 0x4d, 0x8f, 0x35, 0x6c, 0xde, 0xf4, 0x8e, 0x29, 0xeb, 0xd5, 0x17, 0xb4,
+	0x96, 0x82, 0x46, 0x72, 0x1c, 0x6a, 0x40, 0x3f, 0x34, 0x60, 0xa1, 0xc3, 0xe8, 0x01, 0x65, 0x8c,
+	0x5a, 0x1a, 0x5f, 0xc9, 0xae, 0x1a, 0x8f, 0x41, 0x6d, 0x45, 0xab, 0x5d, 0xd8, 0x1d, 0x90, 0x8f,
+	0x87, 0x34, 0xa2, 0x5f, 0x1a, 0xb0, 0xcc, 0x29, 0x3b, 0xa6, 0x6c, 0xdd, 0xb2, 0x18, 0xe5, 0xbc,
+	0xde, 0xdb, 0x70, 0x6c, 0xea, 0xfa, 0x1b, 0x5b, 0x0d, 0xcc, 0x2b, 0x39, 0xb9, 0x0f, 0x5f, 0x4f,
+	0x67, 0xd0, 0xde, 0x38, 0x39, 0x75, 0x53, 0x5b, 0xb4, 0x3c, 0x96, 0x84, 0xe3, 0x07, 0x98, 0x61,
+	0x1e, 0xc0, 0x6c, 0x70, 0x90, 0xb7, 0x6c, 0xee, 0xa3, 0xbb, 0x30, 0xdd, 0x12, 0x1f, 0xbc, 0x62,
+	0x48, 0x03, 0x6b, 0xe9, 0x0c, 0x0c, 0x64, 0xd4, 0xe7, 0xb4, 0x3d, 0xd3, 0xf2, 0x93, 0x63, 0x2d,
+	0xcd, 0xfc, 0x49, 0x0e, 0x4a, 0xeb, 0xbb, 0x5b, 0x98, 0x72, 0xaf, 0xcb, 0x9a, 0x34, 0x85, 0xd3,
+	0x5c, 0x83, 0x59, 0x6e, 0xbb, 0xad, 0xae, 0x43, 0x98, 0x80, 0x56, 0xa6, 0x25, 0xe5, 0x92, 0xa6,
+	0x9c, 0xdd, 0x8b, 0xe1, 0x70, 0x82, 0x12, 0x5d, 0x01, 0x10, 0x12, 0x78, 0x87, 0x34, 0xa9, 0x55,
+	0xc9, 0xac, 0x1a, 0x17, 0x0b, 0x75, 0xa4, 0xf9, 0xe0, 0xf5, 0x10, 0x83, 0x63, 0x54, 0xe8, 0x49,
+	0xc8, 0x4b, 0x4b, 0x2b, 0x05, 0xa9, 0xa6, 0xac, 0xc9, 0xf3, 0x72, 0x19, 0x58, 0xe1, 0xd0, 0x33,
+	0x30, 0xa3, 0xbd, 0xac, 0x52, 0x94, 0x64, 0xf3, 0x9a, 0x6c, 0x26, 0x70, 0x83, 0x00, 0x2f, 0xd6,
+	0x77, 0x64, 0xbb, 0x96, 0xf4, 0xbb, 0xd8, 0xfa, 0x5e, 0xb3, 0x5d, 0x0b, 0x4b, 0x0c, 0xba, 0x05,
+	0xf9, 0x63, 0xca, 0xf6, 0x85, 0x27, 0x08, 0xd7, 0xfc, 0x72, 0xba, 0x8d, 0xbe, 0x2b, 0x58, 0xea,
+	0x45, 0x61, 0x9a, 0xfc, 0x89, 0x95, 0x10, 0x54, 0x03, 0xe0, 0x87, 0x1e, 0xf3, 0xe5, 0xf2, 0x2a,
+	0xf9, 0xd5, 0xec, 0xc5, 0x62, 0x7d, 0x4e, 0xac, 0x77, 0x2f, 0x84, 0xe2, 0x18, 0x85, 0xa0, 0x6f,
+	0x12, 0x9f, 0xb6, 0x3c, 0x66, 0x53, 0x5e, 0x99, 0x89, 0xe8, 0x37, 0x42, 0x28, 0x8e, 0x51, 0xa0,
+	0x57, 0x01, 0x71, 0xdf, 0x63, 0xa4, 0x45, 0xf5, 0x52, 0x5f, 0x21, 0xfc, 0xb0, 0x02, 0x72, 0x75,
+	0xcb, 0x7a, 0x75, 0x68, 0x6f, 0x88, 0x02, 0x8f, 0xe0, 0x32, 0x7f, 0x6b, 0xc0, 0x7c, 0xcc, 0x17,
+	0xa4, 0xdf, 0x5d, 0x83, 0xd9, 0x56, 0x2c, 0xea, 0xb4, 0x5f, 0x84, 0xa7, 0x1d, 0x8f, 0x48, 0x9c,
+	0xa0, 0x44, 0x14, 0x8a, 0x4c, 0x4b, 0x0a, 0xb2, 0xcb, 0xe5, 0xd4, 0x4e, 0x1b, 0xd8, 0x10, 0x69,
+	0x8a, 0x01, 0x39, 0x8e, 0x24, 0x9b, 0x7f, 0x37, 0xa4, 0x03, 0x07, 0xf9, 0x06, 0x5d, 0x8c, 0xe5,
+	0x34, 0x43, 0x6e, 0xdf, 0xec, 0x98, 0x7c, 0x74, 0x46, 0x22, 0xc8, 0xfc, 0x4f, 0x24, 0x82, 0xeb,
+	0x85, 0x9f, 0x7f, 0x58, 0x9d, 0x7a, 0xef, 0xaf, 0xab, 0x53, 0xe6, 0xcf, 0x0c, 0x98, 0x5d, 0xef,
+	0x74, 0x9c, 0xde, 0x4e, 0xc7, 0x97, 0x0b, 0x30, 0x61, 0xda, 0x62, 0x3d, 0xdc, 0x75, 0xf5, 0x42,
+	0x41, 0xc4, 0x77, 0x43, 0x42, 0xb0, 0xc6, 0x88, 0xf8, 0x39, 0xf0, 0x58, 0x93, 0xea, 0x70, 0x0b,
+	0xe3, 0x67, 0x53, 0x00, 0xb1, 0xc2, 0x89, 0x43, 0x3e, 0xb0, 0xa9, 0x63, 0x6d, 0x13, 0x97, 0xb4,
+	0x28, 0xd3, 0xc1, 0x11, 0x6e, 0xfd, 0x66, 0x0c, 0x87, 0x13, 0x94, 0xe6, 0xbf, 0x33, 0x50, 0xdc,
+	0xf0, 0x5c, 0xcb, 0xf6, 0x75, 0x70, 0xf9, 0xbd, 0xce, 0x50, 0xf2, 0xb8, 0xdd, 0xeb, 0x50, 0x2c,
+	0x31, 0xe8, 0x05, 0x98, 0xe6, 0x3e, 0xf1, 0xbb, 0x5c, 0xda, 0x53, 0xac, 0x3f, 0x11, 0xa4, 0xa5,
+	0x3d, 0x09, 0x3d, 0xed, 0x57, 0xe7, 0x43, 0x71, 0x0a, 0x84, 0x35, 0x83, 0xf0, 0x74, 0x6f, 0x5f,
+	0x6e, 0x94, 0x75, 0x53, 0x5d, 0x7b, 0xc1, 0xfd, 0x91, 0x8d, 0x3c, 0x7d, 0x67, 0x88, 0x02, 0x8f,
+	0xe0, 0x42, 0xc7, 0x80, 0x1c, 0xc2, 0xfd, 0xdb, 0x8c, 0xb8, 0x5c, 0xea, 0xba, 0x6d, 0xb7, 0xa9,
+	0x0e, 0xf8, 0x2f, 0xa5, 0x3b, 0x71, 0xc1, 0x11, 0xe9, 0xbd, 0x35, 0x24, 0x0d, 0x8f, 0xd0, 0x80,
+	0x9e, 0x86, 0x69, 0x46, 0x09, 0xf7, 0xdc, 0x4a, 0x5e, 0x2e, 0x3f, 0xcc, 0xca, 0x58, 0x42, 0xb1,
+	0xc6, 0x8a, 0x84, 0xd6, 0xa6, 0x9c, 0x93, 0x56, 0x90, 0x5e, 0xc3, 0x84, 0xb6, 0xad, 0xc0, 0x38,
+	0xc0, 0x9b, 0xbf, 0x31, 0xa0, 0xbc, 0xc1, 0x28, 0xf1, 0xe9, 0x24, 0x6e, 0xf1, 0xd0, 0x27, 0x8e,
+	0xd6, 0x61, 0x5e, 0x7e, 0xdf, 0x25, 0x8e, 0x6d, 0xa9, 0x33, 0xc8, 0x49, 0xe6, 0xff, 0xd7, 0xcc,
+	0xf3, 0x9b, 0x49, 0x34, 0x1e, 0xa4, 0x37, 0x7f, 0x9d, 0x83, 0x72, 0x83, 0x3a, 0x34, 0x32, 0x79,
+	0x13, 0x50, 0x8b, 0x91, 0x26, 0xdd, 0xa5, 0xcc, 0xf6, 0xac, 0x3d, 0xda, 0xf4, 0x5c, 0x8b, 0x4b,
+	0x37, 0xca, 0xd6, 0xff, 0x4f, 0xec, 0xef, 0xcd, 0x21, 0x2c, 0x1e, 0xc1, 0x81, 0x1c, 0x28, 0x77,
+	0x98, 0xfc, 0x2d, 0xf7, 0x5c, 0x79, 0x59, 0xe9, 0xca, 0x57, 0xd2, 0x1d, 0xe9, 0x6e, 0x9c, 0xb5,
+	0xbe, 0x78, 0xd2, 0xaf, 0x96, 0x13, 0x20, 0x9c, 0x14, 0x8e, 0xbe, 0x01, 0x0b, 0x1e, 0xeb, 0x1c,
+	0x12, 0xb7, 0x41, 0x3b, 0xd4, 0xb5, 0xa8, 0xeb, 0x73, 0xb9, 0x91, 0x85, 0xfa, 0x92, 0xa8, 0x45,
+	0x76, 0x06, 0x70, 0x78, 0x88, 0x1a, 0xbd, 0x09, 0x8b, 0x1d, 0xe6, 0x75, 0x48, 0x4b, 0x6e, 0xcc,
+	0xae, 0xe7, 0xd8, 0xcd, 0x9e, 0xde, 0xce, 0x67, 0x4f, 0xfa, 0xd5, 0xc5, 0xdd, 0x41, 0xe4, 0x69,
+	0xbf, 0x7a, 0x4e, 0x6e, 0x9d, 0x80, 0x44, 0x48, 0x3c, 0x2c, 0x26, 0xe6, 0x06, 0xf9, 0xb1, 0x6e,
+	0xf0, 0xa1, 0x01, 0x97, 0xec, 0x96, 0xeb, 0x31, 0x2a, 0xae, 0x08, 0x8a, 0x29, 0xb1, 0x6e, 0x30,
+	0xe6, 0xb1, 0x37, 0x6c, 0xff, 0x70, 0xc3, 0xe9, 0x72, 0x9f, 0xb2, 0x3a, 0xa3, 0xe4, 0xc8, 0x76,
+	0x5b, 0xbb, 0x9e, 0x4f, 0x5d, 0xdf, 0x26, 0x8e, 0xf4, 0xc8, 0x42, 0xfd, 0xea, 0x49, 0xbf, 0x7a,
+	0x69, 0x6b, 0x42, 0x5e, 0x3c, 0xb1, 0x36, 0x73, 0x0b, 0x0a, 0x8d, 0xae, 0x0e, 0xdb, 0x97, 0xa0,
+	0x60, 0xe9, 0xdf, 0xda, 0x39, 0x82, 0xfc, 0x11, 0xd2, 0x9c, 0xf6, 0xab, 0x65, 0x51, 0x21, 0xd7,
+	0x02, 0x00, 0x0e, 0x59, 0xcc, 0x5f, 0x19, 0x50, 0x91, 0xce, 0xb9, 0x47, 0x1d, 0xda, 0xf4, 0x3d,
+	0x86, 0xe9, 0x3b, 0x5d, 0x9b, 0xd1, 0x36, 0x75, 0x7d, 0xf4, 0x45, 0xc8, 0x1e, 0xd1, 0x9e, 0x4e,
+	0x5d, 0x25, 0x2d, 0x36, 0xfb, 0x1a, 0xed, 0x61, 0x01, 0x47, 0x37, 0xa0, 0xe0, 0x75, 0x44, 0xfa,
+	0xf0, 0x98, 0x4e, 0x5d, 0xcf, 0x04, 0xaa, 0x77, 0x34, 0xfc, 0xb4, 0x5f, 0x3d, 0x9f, 0x10, 0x1f,
+	0x20, 0x70, 0xc8, 0x2a, 0x0e, 0xe5, 0x98, 0x38, 0x5d, 0x2a, 0x1c, 0x25, 0x3c, 0x94, 0xbb, 0x12,
+	0x82, 0x35, 0xc6, 0x7c, 0x1a, 0x0a, 0x52, 0x0c, 0xbf, 0x7b, 0x19, 0x2d, 0x40, 0x16, 0x93, 0x7b,
+	0xd2, 0xaa, 0x59, 0x2c, 0x7e, 0xc6, 0xee, 0x83, 0x1d, 0x80, 0x9b, 0xd4, 0x0f, 0x42, 0x68, 0x1d,
+	0xe6, 0x83, 0x4b, 0x31, 0x79, 0x57, 0x87, 0x71, 0x89, 0x93, 0x68, 0x3c, 0x48, 0x6f, 0xbe, 0x05,
+	0x45, 0x79, 0x9f, 0x8b, 0x62, 0x28, 0x2a, 0xbc, 0x8c, 0x07, 0x14, 0x5e, 0x41, 0x35, 0x95, 0x19,
+	0x57, 0x4d, 0xc5, 0xcc, 0x75, 0xa0, 0xac, 0x78, 0x83, 0x52, 0x33, 0x95, 0x86, 0x67, 0xa1, 0x10,
+	0x98, 0xa9, 0xb5, 0x84, 0x2d, 0x46, 0x20, 0x08, 0x87, 0x14, 0x31, 0x6d, 0x87, 0x90, 0xa8, 0x4d,
+	0xd2, 0x29, 0x8b, 0xd5, 0x91, 0x99, 0x07, 0xd7, 0x91, 0x31, 0x4d, 0x3f, 0x80, 0xca, 0xb8, 0xbe,
+	0xe4, 0x11, 0xaa, 0xa7, 0xf4, 0xa6, 0x98, 0xef, 0x1b, 0xb0, 0x10, 0x97, 0x94, 0xfe, 0xf8, 0xd2,
+	0x2b, 0x39, 0xbb, 0x6e, 0x8e, 0xed, 0xc8, 0x2f, 0x0c, 0x58, 0x4a, 0x2c, 0x6d, 0xa2, 0x13, 0x9f,
+	0xc0, 0xa8, 0xb8, 0x73, 0x64, 0x27, 0x70, 0x8e, 0x3f, 0x67, 0xa0, 0x7c, 0x8b, 0xec, 0x53, 0x27,
+	0x88, 0x54, 0xf4, 0x7d, 0x28, 0xb5, 0x89, 0xdf, 0x3c, 0x94, 0xd0, 0xa0, 0xc7, 0x6a, 0xa4, 0xbb,
+	0x36, 0x12, 0x92, 0x6a, 0xdb, 0x91, 0x98, 0x1b, 0xae, 0xcf, 0x7a, 0xf5, 0x73, 0xda, 0xa4, 0x52,
+	0x0c, 0x83, 0xe3, 0xda, 0x64, 0x63, 0x2c, 0xbf, 0x6f, 0xbc, 0xdb, 0x11, 0x05, 0xe0, 0xe4, 0xfd,
+	0x78, 0xc2, 0x84, 0x58, 0x56, 0x8b, 0x1a, 0xe3, 0xed, 0x01, 0xf9, 0x78, 0x48, 0xe3, 0xf2, 0xcb,
+	0xb0, 0x30, 0x68, 0xbc, 0xc8, 0x3f, 0x61, 0x56, 0x54, 0x89, 0x70, 0x09, 0xf2, 0x32, 0x4f, 0xa9,
+	0xc3, 0xc1, 0xea, 0xe3, 0x7a, 0xe6, 0x9a, 0x21, 0xd3, 0xeb, 0x38, 0x43, 0x1e, 0x53, 0x7a, 0x4d,
+	0x88, 0x7f, 0xc8, 0xf4, 0xfa, 0x3b, 0x03, 0x72, 0xb2, 0xb5, 0x79, 0x0b, 0x0a, 0x62, 0xff, 0x2c,
+	0xe2, 0x13, 0x69, 0x57, 0xea, 0xa6, 0x5a, 0x70, 0x6f, 0x53, 0x9f, 0x44, 0xde, 0x16, 0x40, 0x70,
+	0x28, 0x11, 0x61, 0xc8, 0xdb, 0x3e, 0x6d, 0x07, 0x07, 0xf9, 0xdc, 0x58, 0xd1, 0x7a, 0xa4, 0x53,
+	0xc3, 0xe4, 0xde, 0x8d, 0x77, 0x7d, 0xea, 0x8a, 0xc3, 0x88, 0x42, 0x63, 0x4b, 0xc8, 0xc0, 0x4a,
+	0x94, 0xf9, 0x4f, 0x03, 0x42, 0x55, 0xc2, 0xf9, 0x39, 0x75, 0x0e, 0x6e, 0xd9, 0xee, 0x91, 0xde,
+	0xd6, 0xd0, 0x9c, 0x3d, 0x0d, 0xc7, 0x21, 0xc5, 0xa8, 0xeb, 0x21, 0x33, 0xd9, 0xf5, 0x20, 0x14,
+	0x36, 0x3d, 0xd7, 0xb7, 0xdd, 0xee, 0x50, 0xb4, 0x6d, 0x68, 0x38, 0x0e, 0x29, 0x44, 0x49, 0xc7,
+	0x68, 0x9b, 0xd8, 0xae, 0xed, 0xb6, 0xc4, 0x22, 0x36, 0xbc, 0xae, 0xeb, 0xcb, 0xda, 0x46, 0x97,
+	0x74, 0x78, 0x08, 0x8b, 0x47, 0x70, 0x98, 0xff, 0xca, 0x41, 0x49, 0xac, 0x39, 0xb8, 0xe7, 0x5e,
+	0x84, 0xb2, 0x13, 0xf7, 0x02, 0xbd, 0xf6, 0xf3, 0xda, 0x94, 0x64, 0x5c, 0xe3, 0x24, 0xad, 0x60,
+	0x3e, 0x88, 0xdf, 0xd0, 0x7a, 0x0f, 0x42, 0xe6, 0x64, 0x75, 0x90, 0xa4, 0x15, 0xd9, 0xeb, 0x9e,
+	0x88, 0x0f, 0x5d, 0xe3, 0x85, 0x47, 0xf4, 0x86, 0x00, 0x62, 0x85, 0x43, 0xdb, 0x70, 0x8e, 0x38,
+	0x8e, 0x77, 0x4f, 0x02, 0xeb, 0x9e, 0x77, 0xd4, 0x26, 0xec, 0x88, 0xcb, 0xb1, 0x44, 0xa1, 0xfe,
+	0x05, 0xcd, 0x72, 0x6e, 0x7d, 0x98, 0x04, 0x8f, 0xe2, 0x1b, 0x75, 0x6c, 0xb9, 0x09, 0x8f, 0xed,
+	0x10, 0x96, 0x06, 0x40, 0x32, 0xca, 0xf5, 0x8c, 0xe0, 0xaa, 0x96, 0xb3, 0x84, 0x47, 0xd0, 0x9c,
+	0x8e, 0x81, 0xe3, 0x91, 0x12, 0xd1, 0x75, 0x98, 0x13, 0x9e, 0xec, 0x75, 0xfd, 0xa0, 0x82, 0xcf,
+	0xcb, 0xe3, 0x46, 0x27, 0xfd, 0xea, 0xdc, 0xed, 0x04, 0x06, 0x0f, 0x50, 0x8a, 0xcd, 0x75, 0xec,
+	0xb6, 0xed, 0x57, 0x66, 0x24, 0x4b, 0xb8, 0xb9, 0xb7, 0x04, 0x10, 0x2b, 0x5c, 0xc2, 0x03, 0x0b,
+	0x67, 0x7a, 0xe0, 0x06, 0x2c, 0x72, 0xea, 0x5a, 0x5b, 0xae, 0x2d, 0x0a, 0xc9, 0x1b, 0xc7, 0xb2,
+	0x3e, 0x2f, 0xc9, 0x83, 0x38, 0x2f, 0x8a, 0xeb, 0xbd, 0x41, 0x24, 0x1e, 0xa6, 0x37, 0xff, 0x94,
+	0x05, 0xa4, 0x5a, 0x1f, 0x4b, 0x15, 0x65, 0x2a, 0x2f, 0x8a, 0x06, 0x4d, 0xb7, 0x4e, 0xc6, 0x40,
+	0x83, 0xa6, 0xbb, 0xa6, 0x00, 0x8f, 0xb6, 0xa1, 0xa8, 0xf2, 0x53, 0x14, 0x73, 0x6b, 0x9a, 0xb8,
+	0xb8, 0x13, 0x20, 0x4e, 0xfb, 0xd5, 0xe5, 0x84, 0x9a, 0x10, 0x23, 0x9b, 0xe7, 0x48, 0x02, 0xba,
+	0x02, 0x40, 0x3a, 0x76, 0x7c, 0x7c, 0x5a, 0x8c, 0x86, 0x68, 0xd1, 0x20, 0x04, 0xc7, 0xa8, 0xd0,
+	0x2b, 0x90, 0xf3, 0x1f, 0xae, 0xc1, 0x2d, 0xc8, 0xfe, 0x5d, 0xb4, 0xb3, 0x52, 0x82, 0xd0, 0x2e,
+	0x83, 0x82, 0x0b, 0xb3, 0x74, 0x6f, 0x1a, 0x6a, 0xdf, 0x0c, 0x31, 0x38, 0x46, 0x85, 0xbe, 0x09,
+	0x85, 0x03, 0x5d, 0xcf, 0xca, 0xd3, 0x4d, 0x9d, 0x67, 0x83, 0x2a, 0x58, 0x4d, 0x70, 0x82, 0x2f,
+	0x1c, 0x4a, 0x43, 0x5f, 0x85, 0x12, 0xef, 0xee, 0x87, 0x25, 0x80, 0x72, 0x89, 0xf0, 0xbe, 0xdd,
+	0x8b, 0x50, 0x38, 0x4e, 0x67, 0xbe, 0x03, 0xc5, 0x6d, 0xbb, 0xc9, 0x3c, 0xd9, 0x92, 0x3f, 0x03,
+	0x33, 0x3c, 0xd1, 0x6f, 0x86, 0x27, 0x19, 0xb8, 0x6a, 0x80, 0x17, 0x3e, 0xea, 0x12, 0xd7, 0x53,
+	0x5d, 0x65, 0x3e, 0xf2, 0xd1, 0xd7, 0x05, 0x10, 0x2b, 0xdc, 0xf5, 0x25, 0x51, 0x65, 0xfc, 0xf8,
+	0xa3, 0xea, 0xd4, 0x07, 0x1f, 0x55, 0xa7, 0x3e, 0xfc, 0x48, 0x57, 0x1c, 0xbf, 0x07, 0x80, 0x9d,
+	0xfd, 0xef, 0xd2, 0xa6, 0xca, 0xdd, 0xa9, 0xa6, 0xac, 0xc1, 0x70, 0x5f, 0x4e, 0x59, 0x33, 0x03,
+	0x95, 0x63, 0x0c, 0x87, 0x13, 0x94, 0x68, 0x0d, 0x8a, 0xe1, 0xfc, 0x54, 0xfb, 0xc7, 0x62, 0xe0,
+	0x6f, 0xe1, 0x90, 0x15, 0x47, 0x34, 0x89, 0x8b, 0x24, 0x77, 0xe6, 0x45, 0x52, 0x87, 0x6c, 0xd7,
+	0xb6, 0xf4, 0xfc, 0xe2, 0x52, 0x70, 0x91, 0xdf, 0xd9, 0x6a, 0x9c, 0xf6, 0xab, 0x4f, 0x8c, 0x7b,
+	0xb6, 0xf0, 0x7b, 0x1d, 0xca, 0x6b, 0x77, 0xb6, 0x1a, 0x58, 0x30, 0x8f, 0xca, 0x6a, 0xd3, 0x13,
+	0x66, 0xb5, 0x2b, 0x00, 0xad, 0x68, 0x0a, 0xa4, 0x92, 0x46, 0xe8, 0x88, 0xb1, 0xe9, 0x4f, 0x8c,
+	0x0a, 0x71, 0x58, 0x6c, 0x32, 0x4a, 0x82, 0x69, 0x0c, 0xf7, 0x49, 0x5b, 0xcd, 0x95, 0x27, 0x8b,
+	0x89, 0x0b, 0x5a, 0xcd, 0xe2, 0xc6, 0xa0, 0x30, 0x3c, 0x2c, 0x1f, 0x79, 0xb0, 0x68, 0xe9, 0x86,
+	0x3d, 0x52, 0x5a, 0x9c, 0x58, 0xa9, 0xcc, 0x58, 0x8d, 0x41, 0x41, 0x78, 0x58, 0x36, 0xfa, 0x0e,
+	0x2c, 0x07, 0xc0, 0xe1, 0xa9, 0x89, 0xcc, 0xfa, 0xd9, 0xfa, 0xca, 0x49, 0xbf, 0xba, 0xdc, 0x18,
+	0x4b, 0x85, 0x1f, 0x20, 0x01, 0x59, 0x30, 0xed, 0xa8, 0x2a, 0xb9, 0x24, 0x2b, 0x9b, 0xaf, 0xa5,
+	0x5b, 0x45, 0xe4, 0xfd, 0xb5, 0x78, 0x75, 0x1c, 0x4e, 0xc0, 0x74, 0x61, 0xac, 0x65, 0xa3, 0x77,
+	0xa1, 0x44, 0x5c, 0xd7, 0xf3, 0x89, 0x9a, 0xe3, 0xcc, 0x4a, 0x55, 0xeb, 0x13, 0xab, 0x5a, 0x8f,
+	0x64, 0x0c, 0x54, 0xe3, 0x31, 0x0c, 0x8e, 0xab, 0x42, 0xf7, 0x60, 0xde, 0xbb, 0xe7, 0x52, 0x86,
+	0xe9, 0x01, 0x65, 0xd4, 0x6d, 0x52, 0x5e, 0x29, 0x4b, 0xed, 0x57, 0x53, 0x6a, 0x4f, 0x30, 0x47,
+	0x2e, 0x9d, 0x84, 0x73, 0x3c, 0xa8, 0x05, 0xd5, 0x44, 0x6e, 0x75, 0x89, 0x63, 0x7f, 0x8f, 0x32,
+	0x5e, 0x99, 0x8b, 0x46, 0xff, 0x9b, 0x21, 0x14, 0xc7, 0x28, 0x50, 0x17, 0xca, 0xed, 0xf8, 0x95,
+	0x51, 0x59, 0x94, 0x66, 0x5e, 0x4b, 0x67, 0xe6, 0xf0, 0xa5, 0x16, 0x95, 0x41, 0x09, 0x1c, 0x4e,
+	0x6a, 0x59, 0x7e, 0x01, 0x4a, 0x0f, 0xd9, 0x21, 0x88, 0x0e, 0x63, 0xf0, 0x40, 0x26, 0xea, 0x30,
+	0xfe, 0x90, 0x81, 0xb9, 0xe4, 0x36, 0x0e, 0x5c, 0x87, 0xf9, 0x54, 0xd7, 0x61, 0xd0, 0xcb, 0x1a,
+	0x63, 0xdf, 0x80, 0x82, 0xfc, 0x9c, 0x1d, 0x9b, 0x9f, 0x75, 0x1a, 0xcc, 0x3d, 0x4a, 0x1a, 0xac,
+	0x01, 0x88, 0x62, 0x85, 0x79, 0x8e, 0x43, 0x99, 0x1e, 0xab, 0xa9, 0xb7, 0x9e, 0x10, 0x8a, 0x63,
+	0x14, 0xa2, 0xa4, 0xde, 0x77, 0xbc, 0xe6, 0x91, 0xdc, 0x82, 0x20, 0x7a, 0x65, 0xee, 0x2b, 0xa8,
+	0x92, 0xba, 0x3e, 0x84, 0xc5, 0x23, 0x38, 0xcc, 0x1e, 0x9c, 0xdf, 0x25, 0x4c, 0x14, 0x39, 0x51,
+	0xa4, 0xc8, 0x9e, 0xe5, 0xed, 0xa1, 0x8e, 0xe8, 0xd2, 0xa4, 0x11, 0x17, 0x6d, 0x7e, 0x04, 0x8b,
+	0xba, 0x22, 0xf3, 0x2f, 0x06, 0x5c, 0x18, 0xa9, 0xfb, 0x73, 0xe8, 0xc8, 0xde, 0x4e, 0x76, 0x64,
+	0x2f, 0xa6, 0x1c, 0x0a, 0x8f, 0xb2, 0x76, 0x4c, 0x7f, 0x36, 0x03, 0xf9, 0x5d, 0x51, 0x09, 0x9b,
+	0x9f, 0x18, 0x30, 0x2b, 0x7f, 0x4d, 0x32, 0x93, 0xaf, 0x26, 0x9f, 0x6a, 0x8a, 0x8f, 0xef, 0x99,
+	0xe6, 0x71, 0x0c, 0xed, 0xdf, 0x37, 0x20, 0x39, 0x0d, 0x47, 0x2f, 0xab, 0x10, 0x30, 0xc2, 0x71,
+	0xf5, 0x84, 0xee, 0xff, 0xd2, 0xb8, 0x96, 0xf4, 0x5c, 0xaa, 0x69, 0xe5, 0xb3, 0x50, 0xc4, 0x9e,
+	0xe7, 0xef, 0x12, 0xff, 0x90, 0x8b, 0xbd, 0xeb, 0x88, 0x1f, 0x7a, 0x7b, 0xe5, 0xde, 0x49, 0x0c,
+	0x56, 0x70, 0xf3, 0xa7, 0x06, 0x5c, 0x18, 0xfb, 0x02, 0x27, 0xb2, 0x48, 0x33, 0xfc, 0xd2, 0x2b,
+	0x0a, 0x1d, 0x39, 0xa2, 0xc3, 0x31, 0x2a, 0xd1, 0x4b, 0x26, 0x9e, 0xed, 0x06, 0x7b, 0xc9, 0x84,
+	0x36, 0x9c, 0xa4, 0x35, 0xff, 0x91, 0x01, 0xfd, 0xe4, 0xf5, 0x5f, 0x76, 0xfa, 0xa7, 0x07, 0x1e,
+	0xdc, 0xe6, 0x92, 0x0f, 0x6e, 0xe1, 0xeb, 0x5a, 0xec, 0xc5, 0x29, 0xfb, 0xe0, 0x17, 0x27, 0xf4,
+	0x7c, 0xf8, 0x88, 0xa5, 0x7c, 0x68, 0x25, 0xf9, 0x88, 0x75, 0xda, 0xaf, 0xce, 0x6a, 0xe1, 0xc9,
+	0x47, 0xad, 0x37, 0x61, 0xc6, 0xa2, 0x3e, 0xb1, 0x1d, 0xd5, 0x17, 0xa6, 0x7e, 0x96, 0x51, 0xc2,
+	0x1a, 0x8a, 0xb5, 0x5e, 0x12, 0x36, 0xe9, 0x0f, 0x1c, 0x08, 0x14, 0x09, 0xbb, 0xe9, 0x59, 0xaa,
+	0x23, 0xc9, 0x47, 0x09, 0x7b, 0xc3, 0xb3, 0x28, 0x96, 0x18, 0xf3, 0x03, 0x03, 0x4a, 0x4a, 0xd2,
+	0x06, 0xe9, 0x72, 0x8a, 0x2e, 0x87, 0xab, 0x50, 0xc7, 0x7d, 0x21, 0xfe, 0x5a, 0x79, 0xda, 0xaf,
+	0x16, 0x25, 0x99, 0x6c, 0x66, 0x46, 0xbc, 0xca, 0x65, 0xce, 0xd8, 0xa3, 0x27, 0x21, 0x2f, 0x03,
+	0x48, 0x6f, 0x66, 0xf4, 0xec, 0x2a, 0x80, 0x58, 0xe1, 0xcc, 0xcf, 0x32, 0x50, 0x4e, 0x2c, 0x2e,
+	0x45, 0x5f, 0x10, 0x8e, 0x50, 0x33, 0x29, 0xc6, 0xf2, 0xe3, 0xff, 0xe4, 0xa0, 0xaf, 0xaf, 0xe9,
+	0x47, 0xb9, 0xbe, 0xbe, 0x05, 0xd3, 0x4d, 0xb1, 0x47, 0xc1, 0x7f, 0x66, 0x2e, 0x4f, 0x72, 0x9c,
+	0x72, 0x77, 0x23, 0x6f, 0x94, 0x9f, 0x1c, 0x6b, 0x81, 0xe8, 0x26, 0x2c, 0x32, 0xea, 0xb3, 0xde,
+	0xfa, 0x81, 0x4f, 0x59, 0x7c, 0x98, 0x90, 0x8f, 0xaa, 0x6f, 0x3c, 0x48, 0x80, 0x87, 0x79, 0xcc,
+	0x7d, 0x98, 0xbd, 0x4d, 0xf6, 0x9d, 0xf0, 0xa1, 0x11, 0x43, 0xd9, 0x76, 0x9b, 0x4e, 0xd7, 0xa2,
+	0x2a, 0xa1, 0x07, 0xd9, 0x2b, 0x08, 0xda, 0xad, 0x38, 0xf2, 0xb4, 0x5f, 0x3d, 0x97, 0x00, 0xa8,
+	0x97, 0x35, 0x9c, 0x14, 0x61, 0x3a, 0x90, 0xfb, 0x1c, 0x3b, 0xc9, 0x6f, 0x43, 0x31, 0xaa, 0xf5,
+	0x1f, 0xb3, 0x4a, 0xf3, 0x6d, 0x28, 0x08, 0x8f, 0x0f, 0x7a, 0xd4, 0x33, 0xaa, 0xa4, 0x64, 0xed,
+	0x95, 0x49, 0x53, 0x7b, 0xc9, 0xe7, 0xea, 0x3b, 0x1d, 0xeb, 0x11, 0x9f, 0xab, 0x33, 0x8f, 0x72,
+	0xf3, 0x65, 0x27, 0xbc, 0xf9, 0xae, 0x80, 0xfa, 0x4b, 0x8f, 0xb8, 0x64, 0x54, 0x01, 0x11, 0xbb,
+	0x64, 0xe2, 0xf7, 0x7f, 0xec, 0x85, 0xe1, 0x47, 0x06, 0x80, 0x1c, 0xe5, 0xc9, 0x31, 0x52, 0x8a,
+	0x3f, 0x46, 0xdc, 0x81, 0x69, 0x4f, 0x79, 0xa4, 0x7a, 0xb2, 0x9e, 0x70, 0x5e, 0x1c, 0x06, 0x92,
+	0xf2, 0x49, 0xac, 0x85, 0xd5, 0x5f, 0xfd, 0xf8, 0xfe, 0xca, 0xd4, 0x27, 0xf7, 0x57, 0xa6, 0x3e,
+	0xbd, 0xbf, 0x32, 0xf5, 0xde, 0xc9, 0x8a, 0xf1, 0xf1, 0xc9, 0x8a, 0xf1, 0xc9, 0xc9, 0x8a, 0xf1,
+	0xe9, 0xc9, 0x8a, 0xf1, 0xd9, 0xc9, 0x8a, 0xf1, 0xc1, 0xdf, 0x56, 0xa6, 0xde, 0x7c, 0x2a, 0xcd,
+	0x5f, 0x25, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xda, 0x63, 0x4c, 0x51, 0x29, 0x00, 0x00,
 }
 
 func (m *APIGroup) Marshal() (dAtA []byte, err error) {
@@ -1983,6 +1986,16 @@ func (m *DeleteOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	_ = i
 	var l int
 	_ = l
+	if m.IgnoreStoreReadErrorWithClusterBreakingPotential != nil {
+		i--
+		if *m.IgnoreStoreReadErrorWithClusterBreakingPotential {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x30
+	}
 	if len(m.DryRun) > 0 {
 		for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- {
 			i -= len(m.DryRun[iNdEx])
@@ -3773,6 +3786,9 @@ func (m *DeleteOptions) Size() (n int) {
 			n += 1 + l + sovGenerated(uint64(l))
 		}
 	}
+	if m.IgnoreStoreReadErrorWithClusterBreakingPotential != nil {
+		n += 2
+	}
 	return n
 }
 
@@ -4506,6 +4522,7 @@ func (this *DeleteOptions) String() string {
 		`OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`,
 		`PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`,
 		`DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`,
+		`IgnoreStoreReadErrorWithClusterBreakingPotential:` + valueToStringGenerated(this.IgnoreStoreReadErrorWithClusterBreakingPotential) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -6456,6 +6473,27 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error {
 			}
 			m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex]))
 			iNdEx = postIndex
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IgnoreStoreReadErrorWithClusterBreakingPotential", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.IgnoreStoreReadErrorWithClusterBreakingPotential = &b
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
index 18dd0b067c..865d3e7caa 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
@@ -315,6 +315,21 @@ message DeleteOptions {
   // +optional
   // +listType=atomic
   repeated string dryRun = 5;
+
+  // if set to true, it will trigger an unsafe deletion of the resource in
+  // case the normal deletion flow fails with a corrupt object error.
+  // A resource is considered corrupt if it can not be retrieved from
+  // the underlying storage successfully because of a) its data can
+  // not be transformed e.g. decryption failure, or b) it fails
+  // to decode into an object.
+  // NOTE: unsafe deletion ignores finalizer constraints, skips
+  // precondition checks, and removes the object from the storage.
+  // WARNING: This may potentially break the cluster if the workload
+  // associated with the resource being unsafe-deleted relies on normal
+  // deletion flow. Use only if you REALLY know what you are doing.
+  // The default value is false, and the user must opt in to enable it
+  // +optional
+  optional bool ignoreStoreReadErrorWithClusterBreakingPotential = 6;
 }
 
 // Duration is a wrapper around time.Duration which supports correct
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
index 473adb9ef5..4cf3f47956 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
@@ -439,6 +439,20 @@ const (
 	//
 	// The annotation is added to a "Bookmark" event.
 	InitialEventsAnnotationKey = "k8s.io/initial-events-end"
+
+	// InitialEventsListBlueprintAnnotationKey is the name of the key
+	// where an empty, versioned list is encoded in the requested format
+	// (e.g., protobuf, JSON, CBOR), then base64-encoded and stored as a string.
+	//
+	// This encoding matches the request encoding format, which may be
+	// protobuf, JSON, CBOR, or others, depending on what the client requested.
+	// This ensures that the reconstructed list can be processed through the
+	// same decoder chain that would handle a standard LIST call response.
+	//
+	// The annotation is added to a "Bookmark" event and is used by clients
+	// to guarantee the format consistency when reconstructing
+	// the list during WatchList processing.
+	InitialEventsListBlueprintAnnotationKey = "kubernetes.io/initial-events-list-blueprint"
 )
 
 // resourceVersionMatch specifies how the resourceVersion parameter is applied. resourceVersionMatch
@@ -546,6 +560,21 @@ type DeleteOptions struct {
 	// +optional
 	// +listType=atomic
 	DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"`
+
+	// if set to true, it will trigger an unsafe deletion of the resource in
+	// case the normal deletion flow fails with a corrupt object error.
+	// A resource is considered corrupt if it can not be retrieved from
+	// the underlying storage successfully because of a) its data can
+	// not be transformed e.g. decryption failure, or b) it fails
+	// to decode into an object.
+	// NOTE: unsafe deletion ignores finalizer constraints, skips
+	// precondition checks, and removes the object from the storage.
+	// WARNING: This may potentially break the cluster if the workload
+	// associated with the resource being unsafe-deleted relies on normal
+	// deletion flow. Use only if you REALLY know what you are doing.
+	// The default value is false, and the user must opt in to enable it
+	// +optional
+	IgnoreStoreReadErrorWithClusterBreakingPotential *bool `json:"ignoreStoreReadErrorWithClusterBreakingPotential,omitempty" protobuf:"varint,6,opt,name=ignoreStoreReadErrorWithClusterBreakingPotential"`
 }
 
 const (
@@ -902,6 +931,22 @@ const (
 	// Status code 500
 	StatusReasonServerTimeout StatusReason = "ServerTimeout"
 
+	// StatusReasonStoreReadError means that the server encountered an error while
+	// retrieving resources from the backend object store.
+	// This may be due to backend database error, or because processing of the read
+	// resource failed.
+	// Details:
+	//   "kind" string - the kind attribute of the resource being acted on.
+	//   "name" string - the prefix where the reading error(s) occurred
+	//   "causes" []StatusCause
+	//      - (optional):
+	//        - "type" CauseType - CauseTypeUnexpectedServerResponse
+	//        - "message" string - the error message from the store backend
+	//        - "field" string - the full path with the key of the resource that failed reading
+	//
+	// Status code 500
+	StatusReasonStoreReadError StatusReason = "StorageReadError"
+
 	// StatusReasonTimeout means that the request could not be completed within the given time.
 	// Clients can get this response only when they specified a timeout param in the request,
 	// or if the server cannot complete the operation within a reasonable amount of time.
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
index 1fa37215cd..405496d3df 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
@@ -129,6 +129,7 @@ var map_DeleteOptions = map[string]string{
 	"orphanDependents":   "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
 	"propagationPolicy":  "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
 	"dryRun":             "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+	"ignoreStoreReadErrorWithClusterBreakingPotential": "if set to true, it will trigger an unsafe deletion of the resource in case the normal deletion flow fails with a corrupt object error. A resource is considered corrupt if it can not be retrieved from the underlying storage successfully because of a) its data can not be transformed e.g. decryption failure, or b) it fails to decode into an object. NOTE: unsafe deletion ignores finalizer constraints, skips precondition checks, and removes the object from the storage. WARNING: This may potentially break the cluster if the workload associated with the resource being unsafe-deleted relies on normal deletion flow. Use only if you REALLY know what you are doing. The default value is false, and the user must opt in to enable it",
 }
 
 func (DeleteOptions) SwaggerDoc() map[string]string {
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
index 0f58d66c09..71f7b163a1 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go
@@ -20,6 +20,7 @@ import (
 	gojson "encoding/json"
 	"fmt"
 	"io"
+	"math/big"
 	"strings"
 
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -125,6 +126,29 @@ func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool, err
 	return i, true, nil
 }
 
+// NestedNumberAsFloat64 returns the float64 value of a nested field. If the field's value is a
+// float64, it is returned. If the field's value is an int64 that can be losslessly converted to
+// float64, it will be converted and returned.  Returns false if value is not found and an error if
+// not a float64 or an int64 that can be accurately represented as a float64.
+func NestedNumberAsFloat64(obj map[string]interface{}, fields ...string) (float64, bool, error) {
+	val, found, err := NestedFieldNoCopy(obj, fields...)
+	if !found || err != nil {
+		return 0, found, err
+	}
+	switch x := val.(type) {
+	case int64:
+		f, accuracy := big.NewInt(x).Float64()
+		if accuracy != big.Exact {
+			return 0, false, fmt.Errorf("%v accessor error: int64 value %v cannot be losslessly converted to float64", jsonPath(fields), x)
+		}
+		return f, true, nil
+	case float64:
+		return x, true, nil
+	default:
+		return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected float64 or int64", jsonPath(fields), val, val)
+	}
+}
+
 // NestedStringSlice returns a copy of []string value of a nested field.
 // Returns false if value is not found and an error if not a []interface{} or contains non-string items in the slice.
 func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, bool, error) {
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
index 40d289f375..5e36a91ee4 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go
@@ -450,10 +450,14 @@ func (u *Unstructured) SetFinalizers(finalizers []string) {
 }
 
 func (u *Unstructured) GetManagedFields() []metav1.ManagedFieldsEntry {
-	items, found, err := NestedSlice(u.Object, "metadata", "managedFields")
+	v, found, err := NestedFieldNoCopy(u.Object, "metadata", "managedFields")
 	if !found || err != nil {
 		return nil
 	}
+	items, ok := v.([]interface{})
+	if !ok {
+		return nil
+	}
 	managedFields := []metav1.ManagedFieldsEntry{}
 	for _, item := range items {
 		m, ok := item.(map[string]interface{})
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go
index 3eba5ba541..b1eb1bbfc4 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go
@@ -26,6 +26,8 @@ import (
 	"k8s.io/apimachinery/pkg/util/sets"
 	"k8s.io/apimachinery/pkg/util/validation"
 	"k8s.io/apimachinery/pkg/util/validation/field"
+
+	"k8s.io/utils/ptr"
 )
 
 // LabelSelectorValidationOptions is a struct that can be passed to ValidateLabelSelector to record the validate options
@@ -165,6 +167,7 @@ func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList {
 		allErrs = append(allErrs, field.NotSupported(field.NewPath("propagationPolicy"), options.PropagationPolicy, []string{string(metav1.DeletePropagationForeground), string(metav1.DeletePropagationBackground), string(metav1.DeletePropagationOrphan), "nil"}))
 	}
 	allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...)
+	allErrs = append(allErrs, ValidateIgnoreStoreReadError(field.NewPath("ignoreStoreReadErrorWithClusterBreakingPotential"), options)...)
 	return allErrs
 }
 
@@ -186,15 +189,16 @@ func ValidateUpdateOptions(options *metav1.UpdateOptions) field.ErrorList {
 
 func ValidatePatchOptions(options *metav1.PatchOptions, patchType types.PatchType) field.ErrorList {
 	allErrs := field.ErrorList{}
-	if patchType != types.ApplyPatchType {
-		if options.Force != nil {
-			allErrs = append(allErrs, field.Forbidden(field.NewPath("force"), "may not be specified for non-apply patch"))
-		}
-	} else {
+	switch patchType {
+	case types.ApplyYAMLPatchType, types.ApplyCBORPatchType:
 		if options.FieldManager == "" {
 			// This field is defaulted to "kubectl" by kubectl, but HAS TO be explicitly set by controllers.
 			allErrs = append(allErrs, field.Required(field.NewPath("fieldManager"), "is required for apply patch"))
 		}
+	default:
+		if options.Force != nil {
+			allErrs = append(allErrs, field.Forbidden(field.NewPath("force"), "may not be specified for non-apply patch"))
+		}
 	}
 	allErrs = append(allErrs, ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager"))...)
 	allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...)
@@ -212,7 +216,7 @@ func ValidateFieldManager(fieldManager string, fldPath *field.Path) field.ErrorL
 	// considered as not set and is defaulted by the rest of the process
 	// (unless apply is used, in which case it is required).
 	if len(fieldManager) > FieldManagerMaxLength {
-		allErrs = append(allErrs, field.TooLong(fldPath, fieldManager, FieldManagerMaxLength))
+		allErrs = append(allErrs, field.TooLong(fldPath, "" /*unused*/, FieldManagerMaxLength))
 	}
 	// Verify that all characters are printable.
 	for i, r := range fieldManager {
@@ -277,7 +281,7 @@ func ValidateManagedFields(fieldsList []metav1.ManagedFieldsEntry, fldPath *fiel
 		allErrs = append(allErrs, ValidateFieldManager(fields.Manager, fldPath.Child("manager"))...)
 
 		if len(fields.Subresource) > MaxSubresourceNameLength {
-			allErrs = append(allErrs, field.TooLong(fldPath.Child("subresource"), fields.Subresource, MaxSubresourceNameLength))
+			allErrs = append(allErrs, field.TooLong(fldPath.Child("subresource"), "" /*unused*/, MaxSubresourceNameLength))
 		}
 	}
 	return allErrs
@@ -334,12 +338,12 @@ func ValidateCondition(condition metav1.Condition, fldPath *field.Path) field.Er
 			allErrs = append(allErrs, field.Invalid(fldPath.Child("reason"), condition.Reason, currErr))
 		}
 		if len(condition.Reason) > maxReasonLen {
-			allErrs = append(allErrs, field.TooLong(fldPath.Child("reason"), condition.Reason, maxReasonLen))
+			allErrs = append(allErrs, field.TooLong(fldPath.Child("reason"), "" /*unused*/, maxReasonLen))
 		}
 	}
 
 	if len(condition.Message) > maxMessageLen {
-		allErrs = append(allErrs, field.TooLong(fldPath.Child("message"), condition.Message, maxMessageLen))
+		allErrs = append(allErrs, field.TooLong(fldPath.Child("message"), "" /*unused*/, maxMessageLen))
 	}
 
 	return allErrs
@@ -357,3 +361,31 @@ func isValidConditionReason(value string) []string {
 	}
 	return nil
 }
+
+// ValidateIgnoreStoreReadError validates that delete options are valid when
+// ignoreStoreReadErrorWithClusterBreakingPotential is enabled
+func ValidateIgnoreStoreReadError(fldPath *field.Path, options *metav1.DeleteOptions) field.ErrorList {
+	allErrs := field.ErrorList{}
+	if enabled := ptr.Deref[bool](options.IgnoreStoreReadErrorWithClusterBreakingPotential, false); !enabled {
+		return allErrs
+	}
+
+	if len(options.DryRun) > 0 {
+		allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .dryRun"))
+	}
+	if options.PropagationPolicy != nil {
+		allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .propagationPolicy"))
+	}
+	//nolint:staticcheck // Keep validation for deprecated OrphanDependents option until it's being removed
+	if options.OrphanDependents != nil {
+		allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .orphanDependents"))
+	}
+	if options.GracePeriodSeconds != nil {
+		allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .gracePeriodSeconds"))
+	}
+	if options.Preconditions != nil {
+		allErrs = append(allErrs, field.Invalid(fldPath, true, "cannot be set together with .preconditions"))
+	}
+
+	return allErrs
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
index afe01ed5a4..82e2722404 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go
@@ -339,6 +339,13 @@ func autoConvert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptio
 	} else {
 		out.DryRun = nil
 	}
+	if values, ok := map[string][]string(*in)["ignoreStoreReadErrorWithClusterBreakingPotential"]; ok && len(values) > 0 {
+		if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.IgnoreStoreReadErrorWithClusterBreakingPotential, s); err != nil {
+			return err
+		}
+	} else {
+		out.IgnoreStoreReadErrorWithClusterBreakingPotential = nil
+	}
 	return nil
 }
 
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
index 90cc54a7e7..6b0d0dfee9 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go
@@ -290,6 +290,11 @@ func (in *DeleteOptions) DeepCopyInto(out *DeleteOptions) {
 		*out = make([]string, len(*in))
 		copy(*out, *in)
 	}
+	if in.IgnoreStoreReadErrorWithClusterBreakingPotential != nil {
+		in, out := &in.IgnoreStoreReadErrorWithClusterBreakingPotential, &out.IgnoreStoreReadErrorWithClusterBreakingPotential
+		*out = new(bool)
+		**out = **in
+	}
 	return
 }
 
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go
new file mode 100644
index 0000000000..5cac6fba5a
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go
@@ -0,0 +1,46 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	"unsafe"
+
+	"k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/conversion"
+)
+
+// Convert_Slice_string_To_v1beta1_IncludeObjectPolicy allows converting a URL query parameter value
+func Convert_Slice_string_To_v1beta1_IncludeObjectPolicy(in *[]string, out *IncludeObjectPolicy, s conversion.Scope) error {
+	if len(*in) > 0 {
+		*out = IncludeObjectPolicy((*in)[0])
+	}
+	return nil
+}
+
+// Convert_v1beta1_PartialObjectMetadataList_To_v1_PartialObjectMetadataList allows converting PartialObjectMetadataList between versions
+func Convert_v1beta1_PartialObjectMetadataList_To_v1_PartialObjectMetadataList(in *PartialObjectMetadataList, out *v1.PartialObjectMetadataList, s conversion.Scope) error {
+	out.ListMeta = in.ListMeta
+	out.Items = *(*[]v1.PartialObjectMetadata)(unsafe.Pointer(&in.Items))
+	return nil
+}
+
+// Convert_v1_PartialObjectMetadataList_To_v1beta1_PartialObjectMetadataList allows converting PartialObjectMetadataList between versions
+func Convert_v1_PartialObjectMetadataList_To_v1beta1_PartialObjectMetadataList(in *v1.PartialObjectMetadataList, out *PartialObjectMetadataList, s conversion.Scope) error {
+	out.ListMeta = in.ListMeta
+	out.Items = *(*[]v1.PartialObjectMetadata)(unsafe.Pointer(&in.Items))
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go
new file mode 100644
index 0000000000..2b7e8ca0bf
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go
@@ -0,0 +1,17 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go
new file mode 100644
index 0000000000..20c9d2ec73
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+// +k8s:defaulter-gen=TypeMeta
+
+// +groupName=meta.k8s.io
+
+package v1beta1 // import "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go
new file mode 100644
index 0000000000..819d936fe5
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go
@@ -0,0 +1,411 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto
+
+package v1beta1
+
+import (
+	fmt "fmt"
+
+	io "io"
+
+	proto "github.com/gogo/protobuf/proto"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+	math "math"
+	math_bits "math/bits"
+	reflect "reflect"
+	strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *PartialObjectMetadataList) Reset()      { *m = PartialObjectMetadataList{} }
+func (*PartialObjectMetadataList) ProtoMessage() {}
+func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_39237a8d8061b52f, []int{0}
+}
+func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PartialObjectMetadataList.Merge(m, src)
+}
+func (m *PartialObjectMetadataList) XXX_Size() int {
+	return m.Size()
+}
+func (m *PartialObjectMetadataList) XXX_DiscardUnknown() {
+	xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadataList")
+}
+
+func init() {
+	proto.RegisterFile("k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptor_39237a8d8061b52f)
+}
+
+var fileDescriptor_39237a8d8061b52f = []byte{
+	// 303 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x4b, 0xf3, 0x30,
+	0x1c, 0xc6, 0x9b, 0xf7, 0x65, 0x30, 0x3a, 0x04, 0xd9, 0x69, 0xee, 0x90, 0x0d, 0x4f, 0xdb, 0xc1,
+	0x84, 0x0d, 0x11, 0xc5, 0xdb, 0x6e, 0x82, 0x32, 0xd9, 0x51, 0x3c, 0x98, 0x76, 0x31, 0x8b, 0x35,
+	0x4d, 0x69, 0xfe, 0x15, 0xbc, 0xf9, 0x11, 0xfc, 0x58, 0x3d, 0xee, 0x38, 0x10, 0x86, 0x8d, 0x5f,
+	0x44, 0xd2, 0x56, 0x91, 0xa1, 0xd0, 0x5b, 0x9e, 0x07, 0x7e, 0xbf, 0x3c, 0x81, 0xf8, 0x67, 0xd1,
+	0xa9, 0x21, 0x52, 0x53, 0x96, 0x48, 0xc5, 0xc2, 0x95, 0x8c, 0x79, 0xfa, 0x4c, 0x93, 0x48, 0xb8,
+	0xc2, 0x50, 0xc5, 0x81, 0xd1, 0xa7, 0x49, 0xc0, 0x81, 0x4d, 0xa8, 0xe0, 0x31, 0x4f, 0x19, 0xf0,
+	0x25, 0x49, 0x52, 0x0d, 0xba, 0x3b, 0xae, 0x50, 0xf2, 0x13, 0x25, 0x49, 0x24, 0x5c, 0x61, 0x88,
+	0x43, 0x49, 0x8d, 0xf6, 0x8f, 0x84, 0x84, 0x55, 0x16, 0x90, 0x50, 0x2b, 0x2a, 0xb4, 0xd0, 0xb4,
+	0x34, 0x04, 0xd9, 0x7d, 0x99, 0xca, 0x50, 0x9e, 0x2a, 0x73, 0xff, 0xb8, 0xc9, 0xa8, 0xdd, 0x3d,
+	0xfd, 0x93, 0xbf, 0xa8, 0x34, 0x8b, 0x41, 0x2a, 0x4e, 0x4d, 0xb8, 0xe2, 0x8a, 0xed, 0x72, 0x87,
+	0x6f, 0xc8, 0x3f, 0xb8, 0x66, 0x29, 0x48, 0xf6, 0x38, 0x0f, 0x1e, 0x78, 0x08, 0x57, 0x1c, 0xd8,
+	0x92, 0x01, 0xbb, 0x94, 0x06, 0xba, 0xb7, 0x7e, 0x5b, 0xd5, 0xb9, 0xf7, 0x6f, 0x88, 0x46, 0x9d,
+	0x29, 0x21, 0x4d, 0x1e, 0x4e, 0x1c, 0xed, 0x4c, 0xb3, 0xfd, 0x7c, 0x3b, 0xf0, 0xec, 0x76, 0xd0,
+	0xfe, 0x6a, 0x16, 0xdf, 0xc6, 0xee, 0x9d, 0xdf, 0x92, 0xc0, 0x95, 0xe9, 0xa1, 0xe1, 0xff, 0x51,
+	0x67, 0x7a, 0xde, 0x4c, 0xfd, 0xeb, 0xda, 0xd9, 0x5e, 0x7d, 0x4f, 0xeb, 0xc2, 0x19, 0x17, 0x95,
+	0x78, 0x36, 0xcf, 0x0b, 0xec, 0xad, 0x0b, 0xec, 0x6d, 0x0a, 0xec, 0xbd, 0x58, 0x8c, 0x72, 0x8b,
+	0xd1, 0xda, 0x62, 0xb4, 0xb1, 0x18, 0xbd, 0x5b, 0x8c, 0x5e, 0x3f, 0xb0, 0x77, 0x33, 0x6e, 0xfc,
+	0x0d, 0x3e, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfe, 0x0f, 0xd7, 0x36, 0x32, 0x02, 0x00, 0x00,
+}
+
+func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PartialObjectMetadataList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+	offset -= sovGenerated(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *PartialObjectMetadataList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func sovGenerated(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *PartialObjectMetadataList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]PartialObjectMetadata{"
+	for _, f := range this.Items {
+		repeatedStringForItems += fmt.Sprintf("%v", f) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&PartialObjectMetadataList{`,
+		`Items:` + repeatedStringForItems + `,`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringGenerated(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, v1.PartialObjectMetadata{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	depth := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+		case 1:
+			iNdEx += 8
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthGenerated
+			}
+			iNdEx += length
+		case 3:
+			depth++
+		case 4:
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
+		case 5:
+			iNdEx += 4
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
+	}
+	return 0, io.ErrUnexpectedEOF
+}
+
+var (
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto
new file mode 100644
index 0000000000..fcec553542
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto
@@ -0,0 +1,41 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package k8s.io.apimachinery.pkg.apis.meta.v1beta1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "k8s.io/apimachinery/pkg/apis/meta/v1beta1";
+
+// PartialObjectMetadataList contains a list of objects containing only their metadata.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+message PartialObjectMetadataList {
+  // Standard list metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2;
+
+  // items contains each of the included items.
+  repeated .k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1;
+}
+
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go
new file mode 100644
index 0000000000..a4a412e335
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go
@@ -0,0 +1,62 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	"k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/conversion"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name for this API.
+const GroupName = "meta.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Kind takes an unqualified kind and returns a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+	return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// AddMetaToScheme registers base meta types into schemas.
+func AddMetaToScheme(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Table{},
+		&TableOptions{},
+		&PartialObjectMetadata{},
+		&PartialObjectMetadataList{},
+	)
+
+	return nil
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+func RegisterConversions(s *runtime.Scheme) error {
+	if err := s.AddGeneratedConversionFunc((*PartialObjectMetadataList)(nil), (*v1.PartialObjectMetadataList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_PartialObjectMetadataList_To_v1_PartialObjectMetadataList(a.(*PartialObjectMetadataList), b.(*v1.PartialObjectMetadataList), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*v1.PartialObjectMetadataList)(nil), (*PartialObjectMetadataList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_PartialObjectMetadataList_To_v1beta1_PartialObjectMetadataList(a.(*v1.PartialObjectMetadataList), b.(*PartialObjectMetadataList), scope)
+	}); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go
new file mode 100644
index 0000000000..f16170a37b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go
@@ -0,0 +1,84 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// package v1beta1 is alpha objects from meta that will be introduced.
+package v1beta1
+
+import (
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// Table is a tabular representation of a set of API resources. The server transforms the
+// object into a set of preferred columns for quickly reviewing the objects.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +protobuf=false
+type Table = v1.Table
+
+// TableColumnDefinition contains information about a column returned in the Table.
+// +protobuf=false
+type TableColumnDefinition = v1.TableColumnDefinition
+
+// TableRow is an individual row in a table.
+// +protobuf=false
+type TableRow = v1.TableRow
+
+// TableRowCondition allows a row to be marked with additional information.
+// +protobuf=false
+type TableRowCondition = v1.TableRowCondition
+
+type RowConditionType = v1.RowConditionType
+
+type ConditionStatus = v1.ConditionStatus
+
+type IncludeObjectPolicy = v1.IncludeObjectPolicy
+
+// TableOptions are used when a Table is requested by the caller.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type TableOptions = v1.TableOptions
+
+// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients
+// to get access to a particular ObjectMeta schema without knowing the details of the version.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type PartialObjectMetadata = v1.PartialObjectMetadata
+
+// IMPORTANT: PartialObjectMetadataList has different protobuf field ids in v1beta1 than
+// v1 because ListMeta was accidentally omitted prior to 1.15. Therefore this type must
+// remain independent of v1.PartialObjectMetadataList to preserve mappings.
+
+// PartialObjectMetadataList contains a list of objects containing only their metadata.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type PartialObjectMetadataList struct {
+	v1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+	// +optional
+	v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,2,opt,name=metadata"`
+
+	// items contains each of the included items.
+	Items []v1.PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"`
+}
+
+const (
+	RowCompleted = v1.RowCompleted
+
+	ConditionTrue    = v1.ConditionTrue
+	ConditionFalse   = v1.ConditionFalse
+	ConditionUnknown = v1.ConditionUnknown
+
+	IncludeNone     = v1.IncludeNone
+	IncludeMetadata = v1.IncludeMetadata
+	IncludeObject   = v1.IncludeObject
+)
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go
new file mode 100644
index 0000000000..dff735dcf3
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go
@@ -0,0 +1,40 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-codegen.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_PartialObjectMetadataList = map[string]string{
+	"":         "PartialObjectMetadataList contains a list of objects containing only their metadata.",
+	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+	"items":    "items contains each of the included items.",
+}
+
+func (PartialObjectMetadataList) SwaggerDoc() map[string]string {
+	return map_PartialObjectMetadataList
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..972b7f03ea
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,60 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]v1.PartialObjectMetadata, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadataList.
+func (in *PartialObjectMetadataList) DeepCopy() *PartialObjectMetadataList {
+	if in == nil {
+		return nil
+	}
+	out := new(PartialObjectMetadataList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go
new file mode 100644
index 0000000000..198b5be4af
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go
@@ -0,0 +1,33 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/labels/selector.go
index 9e22a00564..fafa81a3d2 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/labels/selector.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/labels/selector.go
@@ -18,6 +18,7 @@ package labels
 
 import (
 	"fmt"
+	"slices"
 	"sort"
 	"strconv"
 	"strings"
@@ -27,7 +28,6 @@ import (
 	"k8s.io/apimachinery/pkg/util/validation"
 	"k8s.io/apimachinery/pkg/util/validation/field"
 	"k8s.io/klog/v2"
-	stringslices "k8s.io/utils/strings/slices"
 )
 
 var (
@@ -313,7 +313,7 @@ func (r Requirement) Equal(x Requirement) bool {
 	if r.operator != x.operator {
 		return false
 	}
-	return stringslices.Equal(r.strValues, x.strValues)
+	return slices.Equal(r.strValues, x.strValues)
 }
 
 // Empty returns true if the internalSelector doesn't restrict selection space
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/helper.go
index cc0a77bba6..395dfdbd02 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/helper.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/helper.go
@@ -284,3 +284,21 @@ func (e *encoderWithAllocator) Encode(obj Object, w io.Writer) error {
 func (e *encoderWithAllocator) Identifier() Identifier {
 	return e.encoder.Identifier()
 }
+
+type nondeterministicEncoderToEncoderAdapter struct {
+	NondeterministicEncoder
+}
+
+func (e nondeterministicEncoderToEncoderAdapter) Encode(obj Object, w io.Writer) error {
+	return e.EncodeNondeterministic(obj, w)
+}
+
+// UseNondeterministicEncoding returns an Encoder that encodes objects using the provided Encoder's
+// EncodeNondeterministic method if it implements NondeterministicEncoder, otherwise it returns the
+// provided Encoder as-is.
+func UseNondeterministicEncoding(encoder Encoder) Encoder {
+	if nondeterministic, ok := encoder.(NondeterministicEncoder); ok {
+		return nondeterministicEncoderToEncoderAdapter{nondeterministic}
+	}
+	return encoder
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
index e89ea89391..2703300cd5 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go
@@ -69,6 +69,19 @@ type Encoder interface {
 	Identifier() Identifier
 }
 
+// NondeterministicEncoder is implemented by Encoders that can serialize objects more efficiently in
+// cases where the output does not need to be deterministic.
+type NondeterministicEncoder interface {
+	Encoder
+
+	// EncodeNondeterministic writes an object to the stream. Unlike the Encode method of
+	// Encoder, EncodeNondeterministic does not guarantee that any two invocations will write
+	// the same sequence of bytes to the io.Writer. Any differences will not be significant to a
+	// generic decoder. For example, map entries and struct fields might be encoded in any
+	// order.
+	EncodeNondeterministic(Object, io.Writer) error
+}
+
 // MemoryAllocator is responsible for allocating memory.
 // By encapsulating memory allocation into its own interface, we can reuse the memory
 // across many operations in places we know it can significantly improve the performance.
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/cbor.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/cbor.go
new file mode 100644
index 0000000000..4d069a903a
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/cbor.go
@@ -0,0 +1,389 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cbor
+
+import (
+	"bytes"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"io"
+	"strings"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes"
+	"k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
+	util "k8s.io/apimachinery/pkg/util/runtime"
+
+	"github.com/fxamacker/cbor/v2"
+)
+
+type metaFactory interface {
+	// Interpret should return the version and kind of the wire-format of the object.
+	Interpret(data []byte) (*schema.GroupVersionKind, error)
+}
+
+type defaultMetaFactory struct{}
+
+func (mf *defaultMetaFactory) Interpret(data []byte) (*schema.GroupVersionKind, error) {
+	var tm metav1.TypeMeta
+	// The input is expected to include additional map keys besides apiVersion and kind, so use
+	// lax mode for decoding into TypeMeta.
+	if err := modes.DecodeLax.Unmarshal(data, &tm); err != nil {
+		return nil, fmt.Errorf("unable to determine group/version/kind: %w", err)
+	}
+	actual := tm.GetObjectKind().GroupVersionKind()
+	return &actual, nil
+}
+
+type Serializer interface {
+	runtime.Serializer
+	runtime.NondeterministicEncoder
+	recognizer.RecognizingDecoder
+
+	// NewSerializer returns a value of this interface type rather than exporting the serializer
+	// type and returning one of those because the zero value of serializer isn't ready to
+	// use. Users aren't intended to implement cbor.Serializer themselves, and this unexported
+	// interface method is here to prevent that (https://go.dev/blog/module-compatibility).
+	private()
+}
+
+var _ Serializer = &serializer{}
+
+type options struct {
+	strict    bool
+	transcode bool
+}
+
+type Option func(*options)
+
+// Strict configures a serializer to return a strict decoding error when it encounters map keys that
+// do not correspond to a field in the target object of a decode operation. This option is disabled
+// by default.
+func Strict(s bool) Option {
+	return func(opts *options) {
+		opts.strict = s
+	}
+}
+
+// Transcode configures a serializer to transcode the "raw" bytes of a decoded runtime.RawExtension
+// or metav1.FieldsV1 object to JSON. This is enabled by default to support existing programs that
+// depend on the assumption that objects of either type contain valid JSON.
+func Transcode(s bool) Option {
+	return func(opts *options) {
+		opts.transcode = s
+	}
+}
+
+type serializer struct {
+	metaFactory metaFactory
+	creater     runtime.ObjectCreater
+	typer       runtime.ObjectTyper
+	options     options
+}
+
+func (serializer) private() {}
+
+// NewSerializer creates and returns a serializer configured with the provided options. The default
+// options are equivalent to explicitly passing Strict(false) and Transcode(true).
+func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, options ...Option) Serializer {
+	return newSerializer(&defaultMetaFactory{}, creater, typer, options...)
+}
+
+func newSerializer(metaFactory metaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, options ...Option) *serializer {
+	s := &serializer{
+		metaFactory: metaFactory,
+		creater:     creater,
+		typer:       typer,
+	}
+	s.options.transcode = true
+	for _, o := range options {
+		o(&s.options)
+	}
+	return s
+}
+
+func (s *serializer) Identifier() runtime.Identifier {
+	return "cbor"
+}
+
+// Encode writes a CBOR representation of the given object.
+//
+// Because the CBOR data item written by a call to Encode is always enclosed in the "self-described
+// CBOR" tag, its encoded form always has the prefix 0xd9d9f7. This prefix is suitable for use as a
+// "magic number" for distinguishing encoded CBOR from other protocols.
+//
+// The default serialization behavior for any given object replicates the behavior of the JSON
+// serializer as far as it is necessary to allow the CBOR serializer to be used as a drop-in
+// replacement for the JSON serializer, with limited exceptions. For example, the distinction
+// between integers and floating-point numbers is preserved in CBOR due to its distinct
+// representations for each type.
+//
+// Objects implementing runtime.Unstructured will have their unstructured content encoded rather
+// than following the default behavior for their dynamic type.
+func (s *serializer) Encode(obj runtime.Object, w io.Writer) error {
+	return s.encode(modes.Encode, obj, w)
+}
+
+func (s *serializer) EncodeNondeterministic(obj runtime.Object, w io.Writer) error {
+	return s.encode(modes.EncodeNondeterministic, obj, w)
+}
+
+func (s *serializer) encode(mode modes.EncMode, obj runtime.Object, w io.Writer) error {
+	var v interface{} = obj
+	if u, ok := obj.(runtime.Unstructured); ok {
+		v = u.UnstructuredContent()
+	}
+
+	if err := modes.RejectCustomMarshalers(v); err != nil {
+		return err
+	}
+
+	if _, err := w.Write(selfDescribedCBOR); err != nil {
+		return err
+	}
+
+	return mode.MarshalTo(v, w)
+}
+
+// gvkWithDefaults returns group kind and version defaulting from provided default
+func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind {
+	if len(actual.Kind) == 0 {
+		actual.Kind = defaultGVK.Kind
+	}
+	if len(actual.Version) == 0 && len(actual.Group) == 0 {
+		actual.Group = defaultGVK.Group
+		actual.Version = defaultGVK.Version
+	}
+	if len(actual.Version) == 0 && actual.Group == defaultGVK.Group {
+		actual.Version = defaultGVK.Version
+	}
+	return actual
+}
+
+// diagnose returns the diagnostic encoding of a well-formed CBOR data item.
+func diagnose(data []byte) string {
+	diag, err := modes.Diagnostic.Diagnose(data)
+	if err != nil {
+		// Since the input must already be well-formed CBOR, converting it to diagnostic
+		// notation should not fail.
+		util.HandleError(err)
+
+		return hex.EncodeToString(data)
+	}
+	return diag
+}
+
+// unmarshal unmarshals CBOR data from the provided byte slice into a Go object. If the decoder is
+// configured to report strict errors, the first error return value may be a non-nil strict decoding
+// error. If the last error return value is non-nil, then the unmarshal failed entirely and the
+// state of the destination object should not be relied on.
+func (s *serializer) unmarshal(data []byte, into interface{}) (strict, lax error) {
+	if u, ok := into.(runtime.Unstructured); ok {
+		var content map[string]interface{}
+		defer func() {
+			switch u := u.(type) {
+			case *unstructured.UnstructuredList:
+				// UnstructuredList's implementation of SetUnstructuredContent
+				// produces different objects than those produced by a decode using
+				// UnstructuredJSONScheme:
+				//
+				//   1. SetUnstructuredContent retains the "items" key in the list's
+				//      Object field. It is omitted from Object when decoding with
+				//      UnstructuredJSONScheme.
+				//   2. SetUnstructuredContent does not populate "apiVersion" and
+				//      "kind" on each entry of its Items
+				//      field. UnstructuredJSONScheme does, inferring the singular
+				//      Kind from the list Kind.
+				//   3. SetUnstructuredContent ignores entries of "items" that are
+				//      not JSON objects or are objects without
+				//      "kind". UnstructuredJSONScheme returns an error in either
+				//      case.
+				//
+				// UnstructuredJSONScheme's behavior is replicated here.
+				var items []interface{}
+				if uncast, present := content["items"]; present {
+					var cast bool
+					items, cast = uncast.([]interface{})
+					if !cast {
+						strict, lax = nil, fmt.Errorf("items field of UnstructuredList must be encoded as an array or null if present")
+						return
+					}
+				}
+				apiVersion, _ := content["apiVersion"].(string)
+				kind, _ := content["kind"].(string)
+				kind = strings.TrimSuffix(kind, "List")
+				var unstructureds []unstructured.Unstructured
+				if len(items) > 0 {
+					unstructureds = make([]unstructured.Unstructured, len(items))
+				}
+				for i := range items {
+					object, cast := items[i].(map[string]interface{})
+					if !cast {
+						strict, lax = nil, fmt.Errorf("elements of the items field of UnstructuredList must be encoded as a map")
+						return
+					}
+
+					// As in UnstructuredJSONScheme, only set the heuristic
+					// singular GVK when both "apiVersion" and "kind" are either
+					// missing, non-string, or empty.
+					object["apiVersion"], _ = object["apiVersion"].(string)
+					object["kind"], _ = object["kind"].(string)
+					if object["apiVersion"] == "" && object["kind"] == "" {
+						object["apiVersion"] = apiVersion
+						object["kind"] = kind
+					}
+
+					if object["kind"] == "" {
+						strict, lax = nil, runtime.NewMissingKindErr(diagnose(data))
+						return
+					}
+					if object["apiVersion"] == "" {
+						strict, lax = nil, runtime.NewMissingVersionErr(diagnose(data))
+						return
+					}
+
+					unstructureds[i].Object = object
+				}
+				delete(content, "items")
+				u.Object = content
+				u.Items = unstructureds
+			default:
+				u.SetUnstructuredContent(content)
+			}
+		}()
+		into = &content
+	} else if err := modes.RejectCustomMarshalers(into); err != nil {
+		return nil, err
+	}
+
+	if !s.options.strict {
+		return nil, modes.DecodeLax.Unmarshal(data, into)
+	}
+
+	err := modes.Decode.Unmarshal(data, into)
+	// TODO: UnknownFieldError is ambiguous. It only provides the index of the first problematic
+	// map entry encountered and does not indicate which map the index refers to.
+	var unknownField *cbor.UnknownFieldError
+	if errors.As(err, &unknownField) {
+		// Unlike JSON, there are no strict errors in CBOR for duplicate map keys. CBOR maps
+		// with duplicate keys are considered invalid according to the spec and are rejected
+		// entirely.
+		return runtime.NewStrictDecodingError([]error{unknownField}), modes.DecodeLax.Unmarshal(data, into)
+	}
+	return nil, err
+}
+
+func (s *serializer) Decode(data []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	// A preliminary pass over the input to obtain the actual GVK is redundant on a successful
+	// decode into Unstructured.
+	if _, ok := into.(runtime.Unstructured); ok {
+		if _, unmarshalErr := s.unmarshal(data, into); unmarshalErr != nil {
+			actual, interpretErr := s.metaFactory.Interpret(data)
+			if interpretErr != nil {
+				return nil, nil, interpretErr
+			}
+
+			if gvk != nil {
+				*actual = gvkWithDefaults(*actual, *gvk)
+			}
+
+			return nil, actual, unmarshalErr
+		}
+
+		actual := into.GetObjectKind().GroupVersionKind()
+		if len(actual.Kind) == 0 {
+			return nil, &actual, runtime.NewMissingKindErr(diagnose(data))
+		}
+		if len(actual.Version) == 0 {
+			return nil, &actual, runtime.NewMissingVersionErr(diagnose(data))
+		}
+
+		return into, &actual, nil
+	}
+
+	actual, err := s.metaFactory.Interpret(data)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	if gvk != nil {
+		*actual = gvkWithDefaults(*actual, *gvk)
+	}
+
+	if into != nil {
+		types, _, err := s.typer.ObjectKinds(into)
+		if err != nil {
+			return nil, actual, err
+		}
+		*actual = gvkWithDefaults(*actual, types[0])
+	}
+
+	if len(actual.Kind) == 0 {
+		return nil, actual, runtime.NewMissingKindErr(diagnose(data))
+	}
+	if len(actual.Version) == 0 {
+		return nil, actual, runtime.NewMissingVersionErr(diagnose(data))
+	}
+
+	obj, err := runtime.UseOrCreateObject(s.typer, s.creater, *actual, into)
+	if err != nil {
+		return nil, actual, err
+	}
+
+	strict, err := s.unmarshal(data, obj)
+	if err != nil {
+		return nil, actual, err
+	}
+
+	if s.options.transcode {
+		if err := transcodeRawTypes(obj); err != nil {
+			return nil, actual, err
+		}
+	}
+
+	return obj, actual, strict
+}
+
+// selfDescribedCBOR is the CBOR encoding of the head of tag number 55799. This tag, specified in
+// RFC 8949 Section 3.4.6 "Self-Described CBOR", encloses all output from the encoder, has no
+// special semantics, and is used as a magic number to recognize CBOR-encoded data items.
+//
+// See https://www.rfc-editor.org/rfc/rfc8949.html#name-self-described-cbor.
+var selfDescribedCBOR = []byte{0xd9, 0xd9, 0xf7}
+
+func (s *serializer) RecognizesData(data []byte) (ok, unknown bool, err error) {
+	return bytes.HasPrefix(data, selfDescribedCBOR), false, nil
+}
+
+// NewSerializerInfo returns a default SerializerInfo for CBOR using the given creater and typer.
+func NewSerializerInfo(creater runtime.ObjectCreater, typer runtime.ObjectTyper) runtime.SerializerInfo {
+	return runtime.SerializerInfo{
+		MediaType:        "application/cbor",
+		MediaTypeType:    "application",
+		MediaTypeSubType: "cbor",
+		Serializer:       NewSerializer(creater, typer),
+		StrictSerializer: NewSerializer(creater, typer, Strict(true)),
+		StreamSerializer: &runtime.StreamSerializerInfo{
+			Framer:     NewFramer(),
+			Serializer: NewSerializer(creater, typer, Transcode(false)),
+		},
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go
index cd78b1df26..a71a487f9e 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go
@@ -23,14 +23,39 @@ import (
 	"k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes"
 )
 
+// Marshal serializes a value to CBOR. If there is more than one way to encode the value, it will
+// make the same choice as the CBOR implementation of runtime.Serializer.
+//
+// Note: Support for CBOR is at an alpha stage. If the value (or, for composite types, any of its
+// nested values) implement any of the interfaces encoding.TextMarshaler, encoding.TextUnmarshaler,
+// encoding/json.Marshaler, or encoding/json.Unmarshaler, a non-nil error will be returned unless
+// the value also implements the corresponding CBOR interfaces. This limitation will ultimately be
+// removed in favor of automatic transcoding to CBOR.
 func Marshal(src interface{}) ([]byte, error) {
+	if err := modes.RejectCustomMarshalers(src); err != nil {
+		return nil, err
+	}
 	return modes.Encode.Marshal(src)
 }
 
+// Unmarshal deserializes from CBOR into an addressable value. If there is more than one way to
+// unmarshal a value, it will make the same choice as the CBOR implementation of runtime.Serializer.
+//
+// Note: Support for CBOR is at an alpha stage. If the value (or, for composite types, any of its
+// nested values) implement any of the interfaces encoding.TextMarshaler, encoding.TextUnmarshaler,
+// encoding/json.Marshaler, or encoding/json.Unmarshaler, a non-nil error will be returned unless
+// the value also implements the corresponding CBOR interfaces. This limitation will ultimately be
+// removed in favor of automatic transcoding to CBOR.
 func Unmarshal(src []byte, dst interface{}) error {
+	if err := modes.RejectCustomMarshalers(dst); err != nil {
+		return err
+	}
 	return modes.Decode.Unmarshal(src, dst)
 }
 
+// Diagnose accepts well-formed CBOR bytes and returns a string representing the same data item in
+// human-readable diagnostic notation (RFC 8949 Section 8). The diagnostic notation is not meant to
+// be parsed.
 func Diagnose(src []byte) (string, error) {
 	return modes.Diagnostic.Diagnose(src)
 }
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/framer.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/framer.go
new file mode 100644
index 0000000000..28a733c673
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/framer.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cbor
+
+import (
+	"io"
+
+	"k8s.io/apimachinery/pkg/runtime"
+
+	"github.com/fxamacker/cbor/v2"
+)
+
+// NewFramer returns a runtime.Framer based on RFC 8742 CBOR Sequences. Each frame contains exactly
+// one encoded CBOR data item.
+func NewFramer() runtime.Framer {
+	return framer{}
+}
+
+var _ runtime.Framer = framer{}
+
+type framer struct{}
+
+func (framer) NewFrameReader(rc io.ReadCloser) io.ReadCloser {
+	return &frameReader{
+		decoder: cbor.NewDecoder(rc),
+		closer:  rc,
+	}
+}
+
+func (framer) NewFrameWriter(w io.Writer) io.Writer {
+	// Each data item in a CBOR sequence is self-delimiting (like JSON objects).
+	return w
+}
+
+type frameReader struct {
+	decoder *cbor.Decoder
+	closer  io.Closer
+
+	overflow []byte
+}
+
+func (fr *frameReader) Read(dst []byte) (int, error) {
+	if len(fr.overflow) > 0 {
+		// We read a frame that was too large for the destination slice in a previous call
+		// to Read and have bytes left over.
+		n := copy(dst, fr.overflow)
+		if n < len(fr.overflow) {
+			fr.overflow = fr.overflow[n:]
+			return n, io.ErrShortBuffer
+		}
+		fr.overflow = nil
+		return n, nil
+	}
+
+	// The Reader contract allows implementations to use all of dst[0:len(dst)] as scratch
+	// space, even if n < len(dst), but it does not allow implementations to use
+	// dst[len(dst):cap(dst)]. Slicing it up-front allows us to append to it without worrying
+	// about overwriting dst[len(dst):cap(dst)].
+	m := cbor.RawMessage(dst[0:0:len(dst)])
+	if err := fr.decoder.Decode(&m); err != nil {
+		return 0, err
+	}
+
+	if len(m) > len(dst) {
+		// The frame was too big, m has a newly-allocated underlying array to accommodate
+		// it.
+		fr.overflow = m[len(dst):]
+		return copy(dst, m), io.ErrShortBuffer
+	}
+
+	return len(m), nil
+}
+
+func (fr *frameReader) Close() error {
+	return fr.closer.Close()
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go
index c669313844..5fae141518 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go
@@ -105,7 +105,7 @@ var Encode = EncMode{
 var EncodeNondeterministic = EncMode{
 	delegate: func() cbor.UserBufferEncMode {
 		opts := Encode.options()
-		opts.Sort = cbor.SortNone // TODO: Use cbor.SortFastShuffle after bump to v2.7.0.
+		opts.Sort = cbor.SortFastShuffle
 		em, err := opts.UserBufferEncMode()
 		if err != nil {
 			panic(err)
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/raw.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/raw.go
new file mode 100644
index 0000000000..09d1340f93
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/raw.go
@@ -0,0 +1,236 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cbor
+
+import (
+	"fmt"
+	"reflect"
+	"sync"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+var sharedTranscoders transcoders
+
+var rawTypeTranscodeFuncs = map[reflect.Type]func(reflect.Value) error{
+	reflect.TypeFor[runtime.RawExtension](): func(rv reflect.Value) error {
+		if !rv.CanAddr() {
+			return nil
+		}
+		re := rv.Addr().Interface().(*runtime.RawExtension)
+		if re.Raw == nil {
+			// When Raw is nil it encodes to null. Don't change nil Raw values during
+			// transcoding, they would have unmarshalled from JSON as nil too.
+			return nil
+		}
+		j, err := re.MarshalJSON()
+		if err != nil {
+			return fmt.Errorf("failed to transcode RawExtension to JSON: %w", err)
+		}
+		re.Raw = j
+		return nil
+	},
+	reflect.TypeFor[metav1.FieldsV1](): func(rv reflect.Value) error {
+		if !rv.CanAddr() {
+			return nil
+		}
+		fields := rv.Addr().Interface().(*metav1.FieldsV1)
+		if fields.Raw == nil {
+			// When Raw is nil it encodes to null. Don't change nil Raw values during
+			// transcoding, they would have unmarshalled from JSON as nil too.
+			return nil
+		}
+		j, err := fields.MarshalJSON()
+		if err != nil {
+			return fmt.Errorf("failed to transcode FieldsV1 to JSON: %w", err)
+		}
+		fields.Raw = j
+		return nil
+	},
+}
+
+func transcodeRawTypes(v interface{}) error {
+	if v == nil {
+		return nil
+	}
+
+	rv := reflect.ValueOf(v)
+	return sharedTranscoders.getTranscoder(rv.Type()).fn(rv)
+}
+
+type transcoder struct {
+	fn func(rv reflect.Value) error
+}
+
+var noop = transcoder{
+	fn: func(reflect.Value) error {
+		return nil
+	},
+}
+
+type transcoders struct {
+	lock sync.RWMutex
+	m    map[reflect.Type]**transcoder
+}
+
+func (ts *transcoders) getTranscoder(rt reflect.Type) transcoder {
+	ts.lock.RLock()
+	tpp, ok := ts.m[rt]
+	ts.lock.RUnlock()
+	if ok {
+		return **tpp
+	}
+
+	ts.lock.Lock()
+	defer ts.lock.Unlock()
+	tp := ts.getTranscoderLocked(rt)
+	return *tp
+}
+
+func (ts *transcoders) getTranscoderLocked(rt reflect.Type) *transcoder {
+	if tpp, ok := ts.m[rt]; ok {
+		// A transcoder for this type was cached while waiting to acquire the lock.
+		return *tpp
+	}
+
+	// Cache the transcoder now, before populating fn, so that circular references between types
+	// don't overflow the call stack.
+	t := new(transcoder)
+	if ts.m == nil {
+		ts.m = make(map[reflect.Type]**transcoder)
+	}
+	ts.m[rt] = &t
+
+	for rawType, fn := range rawTypeTranscodeFuncs {
+		if rt == rawType {
+			t = &transcoder{fn: fn}
+			return t
+		}
+	}
+
+	switch rt.Kind() {
+	case reflect.Array:
+		te := ts.getTranscoderLocked(rt.Elem())
+		rtlen := rt.Len()
+		if rtlen == 0 || te == &noop {
+			t = &noop
+			break
+		}
+		t.fn = func(rv reflect.Value) error {
+			for i := 0; i < rtlen; i++ {
+				if err := te.fn(rv.Index(i)); err != nil {
+					return err
+				}
+			}
+			return nil
+		}
+	case reflect.Interface:
+		// Any interface value might have a dynamic type involving RawExtension. It needs to
+		// be checked.
+		t.fn = func(rv reflect.Value) error {
+			if rv.IsNil() {
+				return nil
+			}
+			rv = rv.Elem()
+			// The interface element's type is dynamic so its transcoder can't be
+			// determined statically.
+			return ts.getTranscoder(rv.Type()).fn(rv)
+		}
+	case reflect.Map:
+		rtk := rt.Key()
+		tk := ts.getTranscoderLocked(rtk)
+		rte := rt.Elem()
+		te := ts.getTranscoderLocked(rte)
+		if tk == &noop && te == &noop {
+			t = &noop
+			break
+		}
+		t.fn = func(rv reflect.Value) error {
+			iter := rv.MapRange()
+			rvk := reflect.New(rtk).Elem()
+			rve := reflect.New(rte).Elem()
+			for iter.Next() {
+				rvk.SetIterKey(iter)
+				if err := tk.fn(rvk); err != nil {
+					return err
+				}
+				rve.SetIterValue(iter)
+				if err := te.fn(rve); err != nil {
+					return err
+				}
+			}
+			return nil
+		}
+	case reflect.Pointer:
+		te := ts.getTranscoderLocked(rt.Elem())
+		if te == &noop {
+			t = &noop
+			break
+		}
+		t.fn = func(rv reflect.Value) error {
+			if rv.IsNil() {
+				return nil
+			}
+			return te.fn(rv.Elem())
+		}
+	case reflect.Slice:
+		te := ts.getTranscoderLocked(rt.Elem())
+		if te == &noop {
+			t = &noop
+			break
+		}
+		t.fn = func(rv reflect.Value) error {
+			for i := 0; i < rv.Len(); i++ {
+				if err := te.fn(rv.Index(i)); err != nil {
+					return err
+				}
+			}
+			return nil
+		}
+	case reflect.Struct:
+		type fieldTranscoder struct {
+			Index      int
+			Transcoder *transcoder
+		}
+		var fieldTranscoders []fieldTranscoder
+		for i := 0; i < rt.NumField(); i++ {
+			f := rt.Field(i)
+			tf := ts.getTranscoderLocked(f.Type)
+			if tf == &noop {
+				continue
+			}
+			fieldTranscoders = append(fieldTranscoders, fieldTranscoder{Index: i, Transcoder: tf})
+		}
+		if len(fieldTranscoders) == 0 {
+			t = &noop
+			break
+		}
+		t.fn = func(rv reflect.Value) error {
+			for _, ft := range fieldTranscoders {
+				if err := ft.Transcoder.fn(rv.Field(ft.Index)); err != nil {
+					return err
+				}
+			}
+			return nil
+		}
+	default:
+		t = &noop
+	}
+
+	return t
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
index ff98208420..77bb307452 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go
@@ -17,9 +17,6 @@ limitations under the License.
 package serializer
 
 import (
-	"mime"
-	"strings"
-
 	"k8s.io/apimachinery/pkg/runtime"
 	"k8s.io/apimachinery/pkg/runtime/schema"
 	"k8s.io/apimachinery/pkg/runtime/serializer/json"
@@ -28,41 +25,26 @@ import (
 	"k8s.io/apimachinery/pkg/runtime/serializer/versioning"
 )
 
-// serializerExtensions are for serializers that are conditionally compiled in
-var serializerExtensions = []func(*runtime.Scheme) (serializerType, bool){}
-
-type serializerType struct {
-	AcceptContentTypes []string
-	ContentType        string
-	FileExtensions     []string
-	// EncodesAsText should be true if this content type can be represented safely in UTF-8
-	EncodesAsText bool
-
-	Serializer       runtime.Serializer
-	PrettySerializer runtime.Serializer
-	StrictSerializer runtime.Serializer
-
-	AcceptStreamContentTypes []string
-	StreamContentType        string
-
-	Framer           runtime.Framer
-	StreamSerializer runtime.Serializer
-}
-
-func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []serializerType {
+func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []runtime.SerializerInfo {
 	jsonSerializer := json.NewSerializerWithOptions(
 		mf, scheme, scheme,
 		json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict},
 	)
-	jsonSerializerType := serializerType{
-		AcceptContentTypes: []string{runtime.ContentTypeJSON},
-		ContentType:        runtime.ContentTypeJSON,
-		FileExtensions:     []string{"json"},
-		EncodesAsText:      true,
-		Serializer:         jsonSerializer,
-
-		Framer:           json.Framer,
-		StreamSerializer: jsonSerializer,
+	jsonSerializerType := runtime.SerializerInfo{
+		MediaType:        runtime.ContentTypeJSON,
+		MediaTypeType:    "application",
+		MediaTypeSubType: "json",
+		EncodesAsText:    true,
+		Serializer:       jsonSerializer,
+		StrictSerializer: json.NewSerializerWithOptions(
+			mf, scheme, scheme,
+			json.SerializerOptions{Yaml: false, Pretty: false, Strict: true},
+		),
+		StreamSerializer: &runtime.StreamSerializerInfo{
+			EncodesAsText: true,
+			Serializer:    jsonSerializer,
+			Framer:        json.Framer,
+		},
 	}
 	if options.Pretty {
 		jsonSerializerType.PrettySerializer = json.NewSerializerWithOptions(
@@ -71,12 +53,6 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option
 		)
 	}
 
-	strictJSONSerializer := json.NewSerializerWithOptions(
-		mf, scheme, scheme,
-		json.SerializerOptions{Yaml: false, Pretty: false, Strict: true},
-	)
-	jsonSerializerType.StrictSerializer = strictJSONSerializer
-
 	yamlSerializer := json.NewSerializerWithOptions(
 		mf, scheme, scheme,
 		json.SerializerOptions{Yaml: true, Pretty: false, Strict: options.Strict},
@@ -88,35 +64,35 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option
 	protoSerializer := protobuf.NewSerializer(scheme, scheme)
 	protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme)
 
-	serializers := []serializerType{
+	serializers := []runtime.SerializerInfo{
 		jsonSerializerType,
 		{
-			AcceptContentTypes: []string{runtime.ContentTypeYAML},
-			ContentType:        runtime.ContentTypeYAML,
-			FileExtensions:     []string{"yaml"},
-			EncodesAsText:      true,
-			Serializer:         yamlSerializer,
-			StrictSerializer:   strictYAMLSerializer,
+			MediaType:        runtime.ContentTypeYAML,
+			MediaTypeType:    "application",
+			MediaTypeSubType: "yaml",
+			EncodesAsText:    true,
+			Serializer:       yamlSerializer,
+			StrictSerializer: strictYAMLSerializer,
 		},
 		{
-			AcceptContentTypes: []string{runtime.ContentTypeProtobuf},
-			ContentType:        runtime.ContentTypeProtobuf,
-			FileExtensions:     []string{"pb"},
-			Serializer:         protoSerializer,
+			MediaType:        runtime.ContentTypeProtobuf,
+			MediaTypeType:    "application",
+			MediaTypeSubType: "vnd.kubernetes.protobuf",
+			Serializer:       protoSerializer,
 			// note, strict decoding is unsupported for protobuf,
 			// fall back to regular serializing
 			StrictSerializer: protoSerializer,
-
-			Framer:           protobuf.LengthDelimitedFramer,
-			StreamSerializer: protoRawSerializer,
+			StreamSerializer: &runtime.StreamSerializerInfo{
+				Serializer: protoRawSerializer,
+				Framer:     protobuf.LengthDelimitedFramer,
+			},
 		},
 	}
 
-	for _, fn := range serializerExtensions {
-		if serializer, ok := fn(scheme); ok {
-			serializers = append(serializers, serializer)
-		}
+	for _, f := range options.serializers {
+		serializers = append(serializers, f(scheme, scheme))
 	}
+
 	return serializers
 }
 
@@ -136,6 +112,8 @@ type CodecFactoryOptions struct {
 	Strict bool
 	// Pretty includes a pretty serializer along with the non-pretty one
 	Pretty bool
+
+	serializers []func(runtime.ObjectCreater, runtime.ObjectTyper) runtime.SerializerInfo
 }
 
 // CodecFactoryOptionsMutator takes a pointer to an options struct and then modifies it.
@@ -162,6 +140,13 @@ func DisableStrict(options *CodecFactoryOptions) {
 	options.Strict = false
 }
 
+// WithSerializer configures a serializer to be supported in addition to the default serializers.
+func WithSerializer(f func(runtime.ObjectCreater, runtime.ObjectTyper) runtime.SerializerInfo) CodecFactoryOptionsMutator {
+	return func(options *CodecFactoryOptions) {
+		options.serializers = append(options.serializers, f)
+	}
+}
+
 // NewCodecFactory provides methods for retrieving serializers for the supported wire formats
 // and conversion wrappers to define preferred internal and external versions. In the future,
 // as the internal version is used less, callers may instead use a defaulting serializer and
@@ -184,7 +169,7 @@ func NewCodecFactory(scheme *runtime.Scheme, mutators ...CodecFactoryOptionsMuta
 }
 
 // newCodecFactory is a helper for testing that allows a different metafactory to be specified.
-func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) CodecFactory {
+func newCodecFactory(scheme *runtime.Scheme, serializers []runtime.SerializerInfo) CodecFactory {
 	decoders := make([]runtime.Decoder, 0, len(serializers))
 	var accepts []runtime.SerializerInfo
 	alreadyAccepted := make(map[string]struct{})
@@ -192,38 +177,20 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec
 	var legacySerializer runtime.Serializer
 	for _, d := range serializers {
 		decoders = append(decoders, d.Serializer)
-		for _, mediaType := range d.AcceptContentTypes {
-			if _, ok := alreadyAccepted[mediaType]; ok {
-				continue
-			}
-			alreadyAccepted[mediaType] = struct{}{}
-			info := runtime.SerializerInfo{
-				MediaType:        d.ContentType,
-				EncodesAsText:    d.EncodesAsText,
-				Serializer:       d.Serializer,
-				PrettySerializer: d.PrettySerializer,
-				StrictSerializer: d.StrictSerializer,
-			}
-
-			mediaType, _, err := mime.ParseMediaType(info.MediaType)
-			if err != nil {
-				panic(err)
-			}
-			parts := strings.SplitN(mediaType, "/", 2)
-			info.MediaTypeType = parts[0]
-			info.MediaTypeSubType = parts[1]
-
-			if d.StreamSerializer != nil {
-				info.StreamSerializer = &runtime.StreamSerializerInfo{
-					Serializer:    d.StreamSerializer,
-					EncodesAsText: d.EncodesAsText,
-					Framer:        d.Framer,
-				}
-			}
-			accepts = append(accepts, info)
-			if mediaType == runtime.ContentTypeJSON {
-				legacySerializer = d.Serializer
-			}
+		if _, ok := alreadyAccepted[d.MediaType]; ok {
+			continue
+		}
+		alreadyAccepted[d.MediaType] = struct{}{}
+
+		acceptedSerializerShallowCopy := d
+		if d.StreamSerializer != nil {
+			cloned := *d.StreamSerializer
+			acceptedSerializerShallowCopy.StreamSerializer = &cloned
+		}
+		accepts = append(accepts, acceptedSerializerShallowCopy)
+
+		if d.MediaType == runtime.ContentTypeJSON {
+			legacySerializer = d.Serializer
 		}
 	}
 	if legacySerializer == nil {
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
new file mode 100644
index 0000000000..971c46d496
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go
@@ -0,0 +1,136 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package streaming implements encoder and decoder for streams
+// of runtime.Objects over io.Writer/Readers.
+package streaming
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// Encoder is a runtime.Encoder on a stream.
+type Encoder interface {
+	// Encode will write the provided object to the stream or return an error. It obeys the same
+	// contract as runtime.VersionedEncoder.
+	Encode(obj runtime.Object) error
+}
+
+// Decoder is a runtime.Decoder from a stream.
+type Decoder interface {
+	// Decode will return io.EOF when no more objects are available.
+	Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error)
+	// Close closes the underlying stream.
+	Close() error
+}
+
+// Serializer is a factory for creating encoders and decoders that work over streams.
+type Serializer interface {
+	NewEncoder(w io.Writer) Encoder
+	NewDecoder(r io.ReadCloser) Decoder
+}
+
+type decoder struct {
+	reader    io.ReadCloser
+	decoder   runtime.Decoder
+	buf       []byte
+	maxBytes  int
+	resetRead bool
+}
+
+// NewDecoder creates a streaming decoder that reads object chunks from r and decodes them with d.
+// The reader is expected to return ErrShortRead if the provided buffer is not large enough to read
+// an entire object.
+func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder {
+	return &decoder{
+		reader:   r,
+		decoder:  d,
+		buf:      make([]byte, 1024),
+		maxBytes: 16 * 1024 * 1024,
+	}
+}
+
+var ErrObjectTooLarge = fmt.Errorf("object to decode was longer than maximum allowed size")
+
+// Decode reads the next object from the stream and decodes it.
+func (d *decoder) Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	base := 0
+	for {
+		n, err := d.reader.Read(d.buf[base:])
+		if err == io.ErrShortBuffer {
+			if n == 0 {
+				return nil, nil, fmt.Errorf("got short buffer with n=0, base=%d, cap=%d", base, cap(d.buf))
+			}
+			if d.resetRead {
+				continue
+			}
+			// double the buffer size up to maxBytes
+			if len(d.buf) < d.maxBytes {
+				base += n
+				d.buf = append(d.buf, make([]byte, len(d.buf))...)
+				continue
+			}
+			// must read the rest of the frame (until we stop getting ErrShortBuffer)
+			d.resetRead = true
+			return nil, nil, ErrObjectTooLarge
+		}
+		if err != nil {
+			return nil, nil, err
+		}
+		if d.resetRead {
+			// now that we have drained the large read, continue
+			d.resetRead = false
+			continue
+		}
+		base += n
+		break
+	}
+	return d.decoder.Decode(d.buf[:base], defaults, into)
+}
+
+func (d *decoder) Close() error {
+	return d.reader.Close()
+}
+
+type encoder struct {
+	writer  io.Writer
+	encoder runtime.Encoder
+	buf     *bytes.Buffer
+}
+
+// NewEncoder returns a new streaming encoder.
+func NewEncoder(w io.Writer, e runtime.Encoder) Encoder {
+	return &encoder{
+		writer:  w,
+		encoder: e,
+		buf:     &bytes.Buffer{},
+	}
+}
+
+// Encode writes the provided object to the nested writer.
+func (e *encoder) Encode(obj runtime.Object) error {
+	if err := e.encoder.Encode(obj, e.buf); err != nil {
+		return err
+	}
+	_, err := e.writer.Write(e.buf.Bytes())
+	e.buf.Reset()
+	return err
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/types.go
index 1680c149f9..ca7b7cc2d4 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/types.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/runtime/types.go
@@ -43,10 +43,11 @@ type TypeMeta struct {
 }
 
 const (
-	ContentTypeJSON     string = "application/json"
-	ContentTypeYAML     string = "application/yaml"
-	ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf"
-	ContentTypeCBOR     string = "application/cbor"
+	ContentTypeJSON         string = "application/json"
+	ContentTypeYAML         string = "application/yaml"
+	ContentTypeProtobuf     string = "application/vnd.kubernetes.protobuf"
+	ContentTypeCBOR         string = "application/cbor"     // RFC 8949
+	ContentTypeCBORSequence string = "application/cbor-seq" // RFC 8742
 )
 
 // RawExtension is used to hold extensions in external versions.
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/types/patch.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/types/patch.go
index fe8ecaaffa..d338cf213d 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/types/patch.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/types/patch.go
@@ -25,5 +25,7 @@ const (
 	JSONPatchType           PatchType = "application/json-patch+json"
 	MergePatchType          PatchType = "application/merge-patch+json"
 	StrategicMergePatchType PatchType = "application/strategic-merge-patch+json"
-	ApplyPatchType          PatchType = "application/apply-patch+yaml"
+	ApplyPatchType          PatchType = ApplyYAMLPatchType
+	ApplyYAMLPatchType      PatchType = "application/apply-patch+yaml"
+	ApplyCBORPatchType      PatchType = "application/apply-patch+cbor"
 )
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/util/dump/dump.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/dump/dump.go
new file mode 100644
index 0000000000..cf61ef76ae
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/dump/dump.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package dump
+
+import (
+	"github.com/davecgh/go-spew/spew"
+)
+
+var prettyPrintConfig = &spew.ConfigState{
+	Indent:                  "  ",
+	DisableMethods:          true,
+	DisablePointerAddresses: true,
+	DisableCapacities:       true,
+}
+
+// The config MUST NOT be changed because that could change the result of a hash operation
+var prettyPrintConfigForHash = &spew.ConfigState{
+	Indent:                  " ",
+	SortKeys:                true,
+	DisableMethods:          true,
+	SpewKeys:                true,
+	DisablePointerAddresses: true,
+	DisableCapacities:       true,
+}
+
+// Pretty wrap the spew.Sdump with Indent, and disabled methods like error() and String()
+// The output may change over time, so for guaranteed output please take more direct control
+func Pretty(a interface{}) string {
+	return prettyPrintConfig.Sdump(a)
+}
+
+// ForHash keeps the original Spew.Sprintf format to ensure the same checksum
+func ForHash(a interface{}) string {
+	return prettyPrintConfigForHash.Sprintf("%#v", a)
+}
+
+// OneLine outputs the object in one line
+func OneLine(a interface{}) string {
+	return prettyPrintConfig.Sprintf("%#v", a)
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go
index 978ffb3c3e..de540c82ff 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/managedfields/fieldmanager.go
@@ -19,11 +19,12 @@ package managedfields
 import (
 	"fmt"
 
+	"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
+
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime"
 	"k8s.io/apimachinery/pkg/runtime/schema"
 	"k8s.io/apimachinery/pkg/util/managedfields/internal"
-	"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
 )
 
 // FieldManager updates the managed fields and merges applied
@@ -32,7 +33,7 @@ type FieldManager = internal.FieldManager
 
 // NewDefaultFieldManager creates a new FieldManager that merges apply requests
 // and update managed fields for other types of requests.
-func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (*FieldManager, error) {
+func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]fieldpath.Filter) (*FieldManager, error) {
 	f, err := internal.NewStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields)
 	if err != nil {
 		return nil, fmt.Errorf("failed to create field manager: %v", err)
@@ -43,7 +44,7 @@ func NewDefaultFieldManager(typeConverter TypeConverter, objectConverter runtime
 // NewDefaultCRDFieldManager creates a new FieldManager specifically for
 // CRDs. This allows for the possibility of fields which are not defined
 // in models, as well as having no models defined at all.
-func NewDefaultCRDFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ *FieldManager, err error) {
+func NewDefaultCRDFieldManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, hub schema.GroupVersion, subresource string, resetFields map[fieldpath.APIVersion]fieldpath.Filter) (_ *FieldManager, err error) {
 	f, err := internal.NewCRDStructuredMergeManager(typeConverter, objectConverter, objectDefaulter, kind.GroupVersion(), hub, resetFields)
 	if err != nil {
 		return nil, fmt.Errorf("failed to create field manager: %v", err)
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go
index 786ad991c2..3fe36edc9d 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go
@@ -19,13 +19,14 @@ package internal
 import (
 	"fmt"
 
+	"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
+	"sigs.k8s.io/structured-merge-diff/v4/merge"
+	"sigs.k8s.io/structured-merge-diff/v4/typed"
+
 	"k8s.io/apimachinery/pkg/api/errors"
 	"k8s.io/apimachinery/pkg/api/meta"
 	"k8s.io/apimachinery/pkg/runtime"
 	"k8s.io/apimachinery/pkg/runtime/schema"
-	"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
-	"sigs.k8s.io/structured-merge-diff/v4/merge"
-	"sigs.k8s.io/structured-merge-diff/v4/typed"
 )
 
 type structuredMergeManager struct {
@@ -41,7 +42,7 @@ var _ Manager = &structuredMergeManager{}
 
 // NewStructuredMergeManager creates a new Manager that merges apply requests
 // and update managed fields for other types of requests.
-func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (Manager, error) {
+func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]fieldpath.Filter) (Manager, error) {
 	if typeConverter == nil {
 		return nil, fmt.Errorf("typeconverter must not be nil")
 	}
@@ -52,8 +53,8 @@ func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runt
 		groupVersion:    gv,
 		hubVersion:      hub,
 		updater: merge.Updater{
-			Converter:     newVersionConverter(typeConverter, objectConverter, hub), // This is the converter provided to SMD from k8s
-			IgnoredFields: resetFields,
+			Converter:    newVersionConverter(typeConverter, objectConverter, hub), // This is the converter provided to SMD from k8s
+			IgnoreFilter: resetFields,
 		},
 	}, nil
 }
@@ -61,7 +62,7 @@ func NewStructuredMergeManager(typeConverter TypeConverter, objectConverter runt
 // NewCRDStructuredMergeManager creates a new Manager specifically for
 // CRDs. This allows for the possibility of fields which are not defined
 // in models, as well as having no models defined at all.
-func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]*fieldpath.Set) (_ Manager, err error) {
+func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectDefaulter runtime.ObjectDefaulter, gv schema.GroupVersion, hub schema.GroupVersion, resetFields map[fieldpath.APIVersion]fieldpath.Filter) (_ Manager, err error) {
 	return &structuredMergeManager{
 		typeConverter:   typeConverter,
 		objectConverter: objectConverter,
@@ -69,8 +70,8 @@ func NewCRDStructuredMergeManager(typeConverter TypeConverter, objectConverter r
 		groupVersion:    gv,
 		hubVersion:      hub,
 		updater: merge.Updater{
-			Converter:     newCRDVersionConverter(typeConverter, objectConverter, hub),
-			IgnoredFields: resetFields,
+			Converter:    newCRDVersionConverter(typeConverter, objectConverter, hub),
+			IgnoreFilter: resetFields,
 		},
 	}, nil
 }
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
index 4fe0c5eb25..df374949dd 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go
@@ -45,7 +45,7 @@ var PanicHandlers = []func(context.Context, interface{}){logPanic}
 //
 // E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully.
 //
-// TODO(pohly): logcheck:context // HandleCrashWithContext should be used instead of HandleCrash in code which supports contextual logging.
+// Contextual logging: HandleCrashWithContext should be used instead of HandleCrash in code which supports contextual logging.
 func HandleCrash(additionalHandlers ...func(interface{})) {
 	if r := recover(); r != nil {
 		additionalHandlersWithContext := make([]func(context.Context, interface{}), len(additionalHandlers))
@@ -146,7 +146,7 @@ type ErrorHandler func(ctx context.Context, err error, msg string, keysAndValues
 // is preferable to logging the error - the default behavior is to log but the
 // errors may be sent to a remote server for analysis.
 //
-// TODO(pohly): logcheck:context // HandleErrorWithContext should be used instead of HandleError in code which supports contextual logging.
+// Contextual logging: HandleErrorWithContext should be used instead of HandleError in code which supports contextual logging.
 func HandleError(err error) {
 	// this is sometimes called with a nil error.  We probably shouldn't fail and should do nothing instead
 	if err == nil {
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
index bc387d0116..f1634bc0df 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
@@ -220,26 +220,24 @@ func Forbidden(field *Path, detail string) *Error {
 	return &Error{ErrorTypeForbidden, field.String(), "", detail}
 }
 
-// TooLong returns a *Error indicating "too long".  This is used to
-// report that the given value is too long.  This is similar to
-// Invalid, but the returned error will not include the too-long
-// value.
+// TooLong returns a *Error indicating "too long".  This is used to report that
+// the given value is too long.  This is similar to Invalid, but the returned
+// error will not include the too-long value. If maxLength is negative, it will
+// be included in the message.  The value argument is not used.
 func TooLong(field *Path, value interface{}, maxLength int) *Error {
-	return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d bytes", maxLength)}
-}
-
-// TooLongMaxLength returns a *Error indicating "too long".  This is used to
-// report that the given value is too long.  This is similar to
-// Invalid, but the returned error will not include the too-long
-// value. If maxLength is negative, no max length will be included in the message.
-func TooLongMaxLength(field *Path, value interface{}, maxLength int) *Error {
 	var msg string
 	if maxLength >= 0 {
-		msg = fmt.Sprintf("may not be longer than %d", maxLength)
+		msg = fmt.Sprintf("may not be more than %d bytes", maxLength)
 	} else {
 		msg = "value is too long"
 	}
-	return &Error{ErrorTypeTooLong, field.String(), value, msg}
+	return &Error{ErrorTypeTooLong, field.String(), "", msg}
+}
+
+// TooLongMaxLength returns a *Error indicating "too long".
+// Deprecated: Use TooLong instead.
+func TooLongMaxLength(field *Path, value interface{}, maxLength int) *Error {
+	return TooLong(field, "", maxLength)
 }
 
 // TooMany returns a *Error indicating "too many". This is used to
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
index b32644902b..9bc393cf58 100644
--- a/hack/tools/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
@@ -175,6 +175,8 @@ func IsValidLabelValue(value string) []string {
 }
 
 const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?"
+const dns1123LabelFmtWithUnderscore string = "_?[a-z0-9]([-_a-z0-9]*[a-z0-9])?"
+
 const dns1123LabelErrMsg string = "a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character"
 
 // DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123)
@@ -204,10 +206,14 @@ func IsDNS1123Label(value string) []string {
 const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*"
 const dns1123SubdomainErrorMsg string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character"
 
+const dns1123SubdomainFmtWithUnderscore string = dns1123LabelFmtWithUnderscore + "(\\." + dns1123LabelFmtWithUnderscore + ")*"
+const dns1123SubdomainErrorMsgFG string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '_', '-' or '.', and must start and end with an alphanumeric character"
+
 // DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123)
 const DNS1123SubdomainMaxLength int = 253
 
 var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$")
+var dns1123SubdomainRegexpWithUnderscore = regexp.MustCompile("^" + dns1123SubdomainFmtWithUnderscore + "$")
 
 // IsDNS1123Subdomain tests for a string that conforms to the definition of a
 // subdomain in DNS (RFC 1123).
@@ -222,6 +228,19 @@ func IsDNS1123Subdomain(value string) []string {
 	return errs
 }
 
+// IsDNS1123SubdomainWithUnderscore tests for a string that conforms to the definition of a
+// subdomain in DNS (RFC 1123), but allows the use of an underscore in the string
+func IsDNS1123SubdomainWithUnderscore(value string) []string {
+	var errs []string
+	if len(value) > DNS1123SubdomainMaxLength {
+		errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength))
+	}
+	if !dns1123SubdomainRegexpWithUnderscore.MatchString(value) {
+		errs = append(errs, RegexError(dns1123SubdomainErrorMsgFG, dns1123SubdomainFmt, "example.com"))
+	}
+	return errs
+}
+
 const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?"
 const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character"
 
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/util/version/doc.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/version/doc.go
new file mode 100644
index 0000000000..5b2b22b6d0
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/version/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package version provides utilities for version number comparisons
+package version // import "k8s.io/apimachinery/pkg/util/version"
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/util/version/version.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/version/version.go
new file mode 100644
index 0000000000..b7812ff2d1
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/version/version.go
@@ -0,0 +1,484 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+
+	apimachineryversion "k8s.io/apimachinery/pkg/version"
+)
+
+// Version is an opaque representation of a version number
+type Version struct {
+	components    []uint
+	semver        bool
+	preRelease    string
+	buildMetadata string
+	info          apimachineryversion.Info
+}
+
+var (
+	// versionMatchRE splits a version string into numeric and "extra" parts
+	versionMatchRE = regexp.MustCompile(`^\s*v?([0-9]+(?:\.[0-9]+)*)(.*)*$`)
+	// extraMatchRE splits the "extra" part of versionMatchRE into semver pre-release and build metadata; it does not validate the "no leading zeroes" constraint for pre-release
+	extraMatchRE = regexp.MustCompile(`^(?:-([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?(?:\+([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?\s*$`)
+)
+
+func parse(str string, semver bool) (*Version, error) {
+	parts := versionMatchRE.FindStringSubmatch(str)
+	if parts == nil {
+		return nil, fmt.Errorf("could not parse %q as version", str)
+	}
+	numbers, extra := parts[1], parts[2]
+
+	components := strings.Split(numbers, ".")
+	if (semver && len(components) != 3) || (!semver && len(components) < 2) {
+		return nil, fmt.Errorf("illegal version string %q", str)
+	}
+
+	v := &Version{
+		components: make([]uint, len(components)),
+		semver:     semver,
+	}
+	for i, comp := range components {
+		if (i == 0 || semver) && strings.HasPrefix(comp, "0") && comp != "0" {
+			return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str)
+		}
+		num, err := strconv.ParseUint(comp, 10, 0)
+		if err != nil {
+			return nil, fmt.Errorf("illegal non-numeric version component %q in %q: %v", comp, str, err)
+		}
+		v.components[i] = uint(num)
+	}
+
+	if semver && extra != "" {
+		extraParts := extraMatchRE.FindStringSubmatch(extra)
+		if extraParts == nil {
+			return nil, fmt.Errorf("could not parse pre-release/metadata (%s) in version %q", extra, str)
+		}
+		v.preRelease, v.buildMetadata = extraParts[1], extraParts[2]
+
+		for _, comp := range strings.Split(v.preRelease, ".") {
+			if _, err := strconv.ParseUint(comp, 10, 0); err == nil {
+				if strings.HasPrefix(comp, "0") && comp != "0" {
+					return nil, fmt.Errorf("illegal zero-prefixed version component %q in %q", comp, str)
+				}
+			}
+		}
+	}
+
+	return v, nil
+}
+
+// HighestSupportedVersion returns the highest supported version
+// This function assumes that the highest supported version must be v1.x.
+func HighestSupportedVersion(versions []string) (*Version, error) {
+	if len(versions) == 0 {
+		return nil, errors.New("empty array for supported versions")
+	}
+
+	var (
+		highestSupportedVersion *Version
+		theErr                  error
+	)
+
+	for i := len(versions) - 1; i >= 0; i-- {
+		currentHighestVer, err := ParseGeneric(versions[i])
+		if err != nil {
+			theErr = err
+			continue
+		}
+
+		if currentHighestVer.Major() > 1 {
+			continue
+		}
+
+		if highestSupportedVersion == nil || highestSupportedVersion.LessThan(currentHighestVer) {
+			highestSupportedVersion = currentHighestVer
+		}
+	}
+
+	if highestSupportedVersion == nil {
+		return nil, fmt.Errorf(
+			"could not find a highest supported version from versions (%v) reported: %+v",
+			versions, theErr)
+	}
+
+	if highestSupportedVersion.Major() != 1 {
+		return nil, fmt.Errorf("highest supported version reported is %v, must be v1.x", highestSupportedVersion)
+	}
+
+	return highestSupportedVersion, nil
+}
+
+// ParseGeneric parses a "generic" version string. The version string must consist of two
+// or more dot-separated numeric fields (the first of which can't have leading zeroes),
+// followed by arbitrary uninterpreted data (which need not be separated from the final
+// numeric field by punctuation). For convenience, leading and trailing whitespace is
+// ignored, and the version can be preceded by the letter "v". See also ParseSemantic.
+func ParseGeneric(str string) (*Version, error) {
+	return parse(str, false)
+}
+
+// MustParseGeneric is like ParseGeneric except that it panics on error
+func MustParseGeneric(str string) *Version {
+	v, err := ParseGeneric(str)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Parse tries to do ParseSemantic first to keep more information.
+// If ParseSemantic fails, it would just do ParseGeneric.
+func Parse(str string) (*Version, error) {
+	v, err := parse(str, true)
+	if err != nil {
+		return parse(str, false)
+	}
+	return v, err
+}
+
+// MustParse is like Parse except that it panics on error
+func MustParse(str string) *Version {
+	v, err := Parse(str)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// ParseMajorMinor parses a "generic" version string and returns a version with the major and minor version.
+func ParseMajorMinor(str string) (*Version, error) {
+	v, err := ParseGeneric(str)
+	if err != nil {
+		return nil, err
+	}
+	return MajorMinor(v.Major(), v.Minor()), nil
+}
+
+// MustParseMajorMinor is like ParseMajorMinor except that it panics on error
+func MustParseMajorMinor(str string) *Version {
+	v, err := ParseMajorMinor(str)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// ParseSemantic parses a version string that exactly obeys the syntax and semantics of
+// the "Semantic Versioning" specification (http://semver.org/) (although it ignores
+// leading and trailing whitespace, and allows the version to be preceded by "v"). For
+// version strings that are not guaranteed to obey the Semantic Versioning syntax, use
+// ParseGeneric.
+func ParseSemantic(str string) (*Version, error) {
+	return parse(str, true)
+}
+
+// MustParseSemantic is like ParseSemantic except that it panics on error
+func MustParseSemantic(str string) *Version {
+	v, err := ParseSemantic(str)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// MajorMinor returns a version with the provided major and minor version.
+func MajorMinor(major, minor uint) *Version {
+	return &Version{components: []uint{major, minor}}
+}
+
+// Major returns the major release number
+func (v *Version) Major() uint {
+	return v.components[0]
+}
+
+// Minor returns the minor release number
+func (v *Version) Minor() uint {
+	return v.components[1]
+}
+
+// Patch returns the patch release number if v is a Semantic Version, or 0
+func (v *Version) Patch() uint {
+	if len(v.components) < 3 {
+		return 0
+	}
+	return v.components[2]
+}
+
+// BuildMetadata returns the build metadata, if v is a Semantic Version, or ""
+func (v *Version) BuildMetadata() string {
+	return v.buildMetadata
+}
+
+// PreRelease returns the prerelease metadata, if v is a Semantic Version, or ""
+func (v *Version) PreRelease() string {
+	return v.preRelease
+}
+
+// Components returns the version number components
+func (v *Version) Components() []uint {
+	return v.components
+}
+
+// WithMajor returns copy of the version object with requested major number
+func (v *Version) WithMajor(major uint) *Version {
+	result := *v
+	result.components = []uint{major, v.Minor(), v.Patch()}
+	return &result
+}
+
+// WithMinor returns copy of the version object with requested minor number
+func (v *Version) WithMinor(minor uint) *Version {
+	result := *v
+	result.components = []uint{v.Major(), minor, v.Patch()}
+	return &result
+}
+
+// SubtractMinor returns the version with offset from the original minor, with the same major and no patch.
+// If -offset >= current minor, the minor would be 0.
+func (v *Version) OffsetMinor(offset int) *Version {
+	var minor uint
+	if offset >= 0 {
+		minor = v.Minor() + uint(offset)
+	} else {
+		diff := uint(-offset)
+		if diff < v.Minor() {
+			minor = v.Minor() - diff
+		}
+	}
+	return MajorMinor(v.Major(), minor)
+}
+
+// SubtractMinor returns the version diff minor versions back, with the same major and no patch.
+// If diff >= current minor, the minor would be 0.
+func (v *Version) SubtractMinor(diff uint) *Version {
+	return v.OffsetMinor(-int(diff))
+}
+
+// AddMinor returns the version diff minor versions forward, with the same major and no patch.
+func (v *Version) AddMinor(diff uint) *Version {
+	return v.OffsetMinor(int(diff))
+}
+
+// WithPatch returns copy of the version object with requested patch number
+func (v *Version) WithPatch(patch uint) *Version {
+	result := *v
+	result.components = []uint{v.Major(), v.Minor(), patch}
+	return &result
+}
+
+// WithPreRelease returns copy of the version object with requested prerelease
+func (v *Version) WithPreRelease(preRelease string) *Version {
+	if len(preRelease) == 0 {
+		return v
+	}
+	result := *v
+	result.components = []uint{v.Major(), v.Minor(), v.Patch()}
+	result.preRelease = preRelease
+	return &result
+}
+
+// WithBuildMetadata returns copy of the version object with requested buildMetadata
+func (v *Version) WithBuildMetadata(buildMetadata string) *Version {
+	result := *v
+	result.components = []uint{v.Major(), v.Minor(), v.Patch()}
+	result.buildMetadata = buildMetadata
+	return &result
+}
+
+// String converts a Version back to a string; note that for versions parsed with
+// ParseGeneric, this will not include the trailing uninterpreted portion of the version
+// number.
+func (v *Version) String() string {
+	if v == nil {
+		return ""
+	}
+	var buffer bytes.Buffer
+
+	for i, comp := range v.components {
+		if i > 0 {
+			buffer.WriteString(".")
+		}
+		buffer.WriteString(fmt.Sprintf("%d", comp))
+	}
+	if v.preRelease != "" {
+		buffer.WriteString("-")
+		buffer.WriteString(v.preRelease)
+	}
+	if v.buildMetadata != "" {
+		buffer.WriteString("+")
+		buffer.WriteString(v.buildMetadata)
+	}
+
+	return buffer.String()
+}
+
+// compareInternal returns -1 if v is less than other, 1 if it is greater than other, or 0
+// if they are equal
+func (v *Version) compareInternal(other *Version) int {
+
+	vLen := len(v.components)
+	oLen := len(other.components)
+	for i := 0; i < vLen && i < oLen; i++ {
+		switch {
+		case other.components[i] < v.components[i]:
+			return 1
+		case other.components[i] > v.components[i]:
+			return -1
+		}
+	}
+
+	// If components are common but one has more items and they are not zeros, it is bigger
+	switch {
+	case oLen < vLen && !onlyZeros(v.components[oLen:]):
+		return 1
+	case oLen > vLen && !onlyZeros(other.components[vLen:]):
+		return -1
+	}
+
+	if !v.semver || !other.semver {
+		return 0
+	}
+
+	switch {
+	case v.preRelease == "" && other.preRelease != "":
+		return 1
+	case v.preRelease != "" && other.preRelease == "":
+		return -1
+	case v.preRelease == other.preRelease: // includes case where both are ""
+		return 0
+	}
+
+	vPR := strings.Split(v.preRelease, ".")
+	oPR := strings.Split(other.preRelease, ".")
+	for i := 0; i < len(vPR) && i < len(oPR); i++ {
+		vNum, err := strconv.ParseUint(vPR[i], 10, 0)
+		if err == nil {
+			oNum, err := strconv.ParseUint(oPR[i], 10, 0)
+			if err == nil {
+				switch {
+				case oNum < vNum:
+					return 1
+				case oNum > vNum:
+					return -1
+				default:
+					continue
+				}
+			}
+		}
+		if oPR[i] < vPR[i] {
+			return 1
+		} else if oPR[i] > vPR[i] {
+			return -1
+		}
+	}
+
+	switch {
+	case len(oPR) < len(vPR):
+		return 1
+	case len(oPR) > len(vPR):
+		return -1
+	}
+
+	return 0
+}
+
+// returns false if array contain any non-zero element
+func onlyZeros(array []uint) bool {
+	for _, num := range array {
+		if num != 0 {
+			return false
+		}
+	}
+	return true
+}
+
+// EqualTo tests if a version is equal to a given version.
+func (v *Version) EqualTo(other *Version) bool {
+	if v == nil {
+		return other == nil
+	}
+	if other == nil {
+		return false
+	}
+	return v.compareInternal(other) == 0
+}
+
+// AtLeast tests if a version is at least equal to a given minimum version. If both
+// Versions are Semantic Versions, this will use the Semantic Version comparison
+// algorithm. Otherwise, it will compare only the numeric components, with non-present
+// components being considered "0" (ie, "1.4" is equal to "1.4.0").
+func (v *Version) AtLeast(min *Version) bool {
+	return v.compareInternal(min) != -1
+}
+
+// LessThan tests if a version is less than a given version. (It is exactly the opposite
+// of AtLeast, for situations where asking "is v too old?" makes more sense than asking
+// "is v new enough?".)
+func (v *Version) LessThan(other *Version) bool {
+	return v.compareInternal(other) == -1
+}
+
+// GreaterThan tests if a version is greater than a given version.
+func (v *Version) GreaterThan(other *Version) bool {
+	return v.compareInternal(other) == 1
+}
+
+// Compare compares v against a version string (which will be parsed as either Semantic
+// or non-Semantic depending on v). On success it returns -1 if v is less than other, 1 if
+// it is greater than other, or 0 if they are equal.
+func (v *Version) Compare(other string) (int, error) {
+	ov, err := parse(other, v.semver)
+	if err != nil {
+		return 0, err
+	}
+	return v.compareInternal(ov), nil
+}
+
+// WithInfo returns copy of the version object with requested info
+func (v *Version) WithInfo(info apimachineryversion.Info) *Version {
+	result := *v
+	result.info = info
+	return &result
+}
+
+func (v *Version) Info() *apimachineryversion.Info {
+	if v == nil {
+		return nil
+	}
+	// in case info is empty, or the major and minor in info is different from the actual major and minor
+	v.info.Major = itoa(v.Major())
+	v.info.Minor = itoa(v.Minor())
+	if v.info.GitVersion == "" {
+		v.info.GitVersion = v.String()
+	}
+	return &v.info
+}
+
+func itoa(i uint) string {
+	if i == 0 {
+		return ""
+	}
+	return strconv.Itoa(int(i))
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go
new file mode 100644
index 0000000000..4187619256
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go
@@ -0,0 +1,502 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package wait
+
+import (
+	"context"
+	"math"
+	"sync"
+	"time"
+
+	"k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/utils/clock"
+)
+
+// Backoff holds parameters applied to a Backoff function.
+type Backoff struct {
+	// The initial duration.
+	Duration time.Duration
+	// Duration is multiplied by factor each iteration, if factor is not zero
+	// and the limits imposed by Steps and Cap have not been reached.
+	// Should not be negative.
+	// The jitter does not contribute to the updates to the duration parameter.
+	Factor float64
+	// The sleep at each iteration is the duration plus an additional
+	// amount chosen uniformly at random from the interval between
+	// zero and `jitter*duration`.
+	Jitter float64
+	// The remaining number of iterations in which the duration
+	// parameter may change (but progress can be stopped earlier by
+	// hitting the cap). If not positive, the duration is not
+	// changed. Used for exponential backoff in combination with
+	// Factor and Cap.
+	Steps int
+	// A limit on revised values of the duration parameter. If a
+	// multiplication by the factor parameter would make the duration
+	// exceed the cap then the duration is set to the cap and the
+	// steps parameter is set to zero.
+	Cap time.Duration
+}
+
+// Step returns an amount of time to sleep determined by the original
+// Duration and Jitter. The backoff is mutated to update its Steps and
+// Duration. A nil Backoff always has a zero-duration step.
+func (b *Backoff) Step() time.Duration {
+	if b == nil {
+		return 0
+	}
+	var nextDuration time.Duration
+	nextDuration, b.Duration, b.Steps = delay(b.Steps, b.Duration, b.Cap, b.Factor, b.Jitter)
+	return nextDuration
+}
+
+// DelayFunc returns a function that will compute the next interval to
+// wait given the arguments in b. It does not mutate the original backoff
+// but the function is safe to use only from a single goroutine.
+func (b Backoff) DelayFunc() DelayFunc {
+	steps := b.Steps
+	duration := b.Duration
+	cap := b.Cap
+	factor := b.Factor
+	jitter := b.Jitter
+
+	return func() time.Duration {
+		var nextDuration time.Duration
+		// jitter is applied per step and is not cumulative over multiple steps
+		nextDuration, duration, steps = delay(steps, duration, cap, factor, jitter)
+		return nextDuration
+	}
+}
+
+// Timer returns a timer implementation appropriate to this backoff's parameters
+// for use with wait functions.
+func (b Backoff) Timer() Timer {
+	if b.Steps > 1 || b.Jitter != 0 {
+		return &variableTimer{new: internalClock.NewTimer, fn: b.DelayFunc()}
+	}
+	if b.Duration > 0 {
+		return &fixedTimer{new: internalClock.NewTicker, interval: b.Duration}
+	}
+	return newNoopTimer()
+}
+
+// delay implements the core delay algorithm used in this package.
+func delay(steps int, duration, cap time.Duration, factor, jitter float64) (_ time.Duration, next time.Duration, nextSteps int) {
+	// when steps is non-positive, do not alter the base duration
+	if steps < 1 {
+		if jitter > 0 {
+			return Jitter(duration, jitter), duration, 0
+		}
+		return duration, duration, 0
+	}
+	steps--
+
+	// calculate the next step's interval
+	if factor != 0 {
+		next = time.Duration(float64(duration) * factor)
+		if cap > 0 && next > cap {
+			next = cap
+			steps = 0
+		}
+	} else {
+		next = duration
+	}
+
+	// add jitter for this step
+	if jitter > 0 {
+		duration = Jitter(duration, jitter)
+	}
+
+	return duration, next, steps
+
+}
+
+// DelayWithReset returns a DelayFunc that will return the appropriate next interval to
+// wait. Every resetInterval the backoff parameters are reset to their initial state.
+// This method is safe to invoke from multiple goroutines, but all calls will advance
+// the backoff state when Factor is set. If Factor is zero, this method is the same as
+// invoking b.DelayFunc() since Steps has no impact without Factor. If resetInterval is
+// zero no backoff will be performed as the same calling DelayFunc with a zero factor
+// and steps.
+func (b Backoff) DelayWithReset(c clock.Clock, resetInterval time.Duration) DelayFunc {
+	if b.Factor <= 0 {
+		return b.DelayFunc()
+	}
+	if resetInterval <= 0 {
+		b.Steps = 0
+		b.Factor = 0
+		return b.DelayFunc()
+	}
+	return (&backoffManager{
+		backoff:        b,
+		initialBackoff: b,
+		resetInterval:  resetInterval,
+
+		clock:     c,
+		lastStart: c.Now(),
+		timer:     nil,
+	}).Step
+}
+
+// Until loops until stop channel is closed, running f every period.
+//
+// Until is syntactic sugar on top of JitterUntil with zero jitter factor and
+// with sliding = true (which means the timer for period starts after the f
+// completes).
+func Until(f func(), period time.Duration, stopCh <-chan struct{}) {
+	JitterUntil(f, period, 0.0, true, stopCh)
+}
+
+// UntilWithContext loops until context is done, running f every period.
+//
+// UntilWithContext is syntactic sugar on top of JitterUntilWithContext
+// with zero jitter factor and with sliding = true (which means the timer
+// for period starts after the f completes).
+func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) {
+	JitterUntilWithContext(ctx, f, period, 0.0, true)
+}
+
+// NonSlidingUntil loops until stop channel is closed, running f every
+// period.
+//
+// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter
+// factor, with sliding = false (meaning the timer for period starts at the same
+// time as the function starts).
+func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) {
+	JitterUntil(f, period, 0.0, false, stopCh)
+}
+
+// NonSlidingUntilWithContext loops until context is done, running f every
+// period.
+//
+// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext
+// with zero jitter factor, with sliding = false (meaning the timer for period
+// starts at the same time as the function starts).
+func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) {
+	JitterUntilWithContext(ctx, f, period, 0.0, false)
+}
+
+// JitterUntil loops until stop channel is closed, running f every period.
+//
+// If jitterFactor is positive, the period is jittered before every run of f.
+// If jitterFactor is not positive, the period is unchanged and not jittered.
+//
+// If sliding is true, the period is computed after f runs. If it is false then
+// period includes the runtime for f.
+//
+// Close stopCh to stop. f may not be invoked if stop channel is already
+// closed. Pass NeverStop to if you don't want it stop.
+func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) {
+	BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh)
+}
+
+// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager.
+//
+// If sliding is true, the period is computed after f runs. If it is false then
+// period includes the runtime for f.
+func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) {
+	var t clock.Timer
+	for {
+		select {
+		case <-stopCh:
+			return
+		default:
+		}
+
+		if !sliding {
+			t = backoff.Backoff()
+		}
+
+		func() {
+			defer runtime.HandleCrash()
+			f()
+		}()
+
+		if sliding {
+			t = backoff.Backoff()
+		}
+
+		// NOTE: b/c there is no priority selection in golang
+		// it is possible for this to race, meaning we could
+		// trigger t.C and stopCh, and t.C select falls through.
+		// In order to mitigate we re-check stopCh at the beginning
+		// of every loop to prevent extra executions of f().
+		select {
+		case <-stopCh:
+			if !t.Stop() {
+				<-t.C()
+			}
+			return
+		case <-t.C():
+		}
+	}
+}
+
+// JitterUntilWithContext loops until context is done, running f every period.
+//
+// If jitterFactor is positive, the period is jittered before every run of f.
+// If jitterFactor is not positive, the period is unchanged and not jittered.
+//
+// If sliding is true, the period is computed after f runs. If it is false then
+// period includes the runtime for f.
+//
+// Cancel context to stop. f may not be invoked if context is already expired.
+func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) {
+	JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done())
+}
+
+// backoffManager provides simple backoff behavior in a threadsafe manner to a caller.
+type backoffManager struct {
+	backoff        Backoff
+	initialBackoff Backoff
+	resetInterval  time.Duration
+
+	clock clock.Clock
+
+	lock      sync.Mutex
+	lastStart time.Time
+	timer     clock.Timer
+}
+
+// Step returns the expected next duration to wait.
+func (b *backoffManager) Step() time.Duration {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	switch {
+	case b.resetInterval == 0:
+		b.backoff = b.initialBackoff
+	case b.clock.Now().Sub(b.lastStart) > b.resetInterval:
+		b.backoff = b.initialBackoff
+		b.lastStart = b.clock.Now()
+	}
+	return b.backoff.Step()
+}
+
+// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer
+// for exponential backoff. The returned timer must be drained before calling Backoff() the second
+// time.
+func (b *backoffManager) Backoff() clock.Timer {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+	if b.timer == nil {
+		b.timer = b.clock.NewTimer(b.Step())
+	} else {
+		b.timer.Reset(b.Step())
+	}
+	return b.timer
+}
+
+// Timer returns a new Timer instance that shares the clock and the reset behavior with all other
+// timers.
+func (b *backoffManager) Timer() Timer {
+	return DelayFunc(b.Step).Timer(b.clock)
+}
+
+// BackoffManager manages backoff with a particular scheme based on its underlying implementation.
+type BackoffManager interface {
+	// Backoff returns a shared clock.Timer that is Reset on every invocation. This method is not
+	// safe for use from multiple threads. It returns a timer for backoff, and caller shall backoff
+	// until Timer.C() drains. If the second Backoff() is called before the timer from the first
+	// Backoff() call finishes, the first timer will NOT be drained and result in undetermined
+	// behavior.
+	Backoff() clock.Timer
+}
+
+// Deprecated: Will be removed when the legacy polling functions are removed.
+type exponentialBackoffManagerImpl struct {
+	backoff              *Backoff
+	backoffTimer         clock.Timer
+	lastBackoffStart     time.Time
+	initialBackoff       time.Duration
+	backoffResetDuration time.Duration
+	clock                clock.Clock
+}
+
+// NewExponentialBackoffManager returns a manager for managing exponential backoff. Each backoff is jittered and
+// backoff will not exceed the given max. If the backoff is not called within resetDuration, the backoff is reset.
+// This backoff manager is used to reduce load during upstream unhealthiness.
+//
+// Deprecated: Will be removed when the legacy Poll methods are removed. Callers should construct a
+// Backoff struct, use DelayWithReset() to get a DelayFunc that periodically resets itself, and then
+// invoke Timer() when calling wait.BackoffUntil.
+//
+// Instead of:
+//
+//	bm := wait.NewExponentialBackoffManager(init, max, reset, factor, jitter, clock)
+//	...
+//	wait.BackoffUntil(..., bm.Backoff, ...)
+//
+// Use:
+//
+//	delayFn := wait.Backoff{
+//	  Duration: init,
+//	  Cap:      max,
+//	  Steps:    int(math.Ceil(float64(max) / float64(init))), // now a required argument
+//	  Factor:   factor,
+//	  Jitter:   jitter,
+//	}.DelayWithReset(reset, clock)
+//	wait.BackoffUntil(..., delayFn.Timer(), ...)
+func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Duration, backoffFactor, jitter float64, c clock.Clock) BackoffManager {
+	return &exponentialBackoffManagerImpl{
+		backoff: &Backoff{
+			Duration: initBackoff,
+			Factor:   backoffFactor,
+			Jitter:   jitter,
+
+			// the current impl of wait.Backoff returns Backoff.Duration once steps are used up, which is not
+			// what we ideally need here, we set it to max int and assume we will never use up the steps
+			Steps: math.MaxInt32,
+			Cap:   maxBackoff,
+		},
+		backoffTimer:         nil,
+		initialBackoff:       initBackoff,
+		lastBackoffStart:     c.Now(),
+		backoffResetDuration: resetDuration,
+		clock:                c,
+	}
+}
+
+func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration {
+	if b.clock.Now().Sub(b.lastBackoffStart) > b.backoffResetDuration {
+		b.backoff.Steps = math.MaxInt32
+		b.backoff.Duration = b.initialBackoff
+	}
+	b.lastBackoffStart = b.clock.Now()
+	return b.backoff.Step()
+}
+
+// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff.
+// The returned timer must be drained before calling Backoff() the second time
+func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer {
+	if b.backoffTimer == nil {
+		b.backoffTimer = b.clock.NewTimer(b.getNextBackoff())
+	} else {
+		b.backoffTimer.Reset(b.getNextBackoff())
+	}
+	return b.backoffTimer
+}
+
+// Deprecated: Will be removed when the legacy polling functions are removed.
+type jitteredBackoffManagerImpl struct {
+	clock        clock.Clock
+	duration     time.Duration
+	jitter       float64
+	backoffTimer clock.Timer
+}
+
+// NewJitteredBackoffManager returns a BackoffManager that backoffs with given duration plus given jitter. If the jitter
+// is negative, backoff will not be jittered.
+//
+// Deprecated: Will be removed when the legacy Poll methods are removed. Callers should construct a
+// Backoff struct and invoke Timer() when calling wait.BackoffUntil.
+//
+// Instead of:
+//
+//	bm := wait.NewJitteredBackoffManager(duration, jitter, clock)
+//	...
+//	wait.BackoffUntil(..., bm.Backoff, ...)
+//
+// Use:
+//
+//	wait.BackoffUntil(..., wait.Backoff{Duration: duration, Jitter: jitter}.Timer(), ...)
+func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.Clock) BackoffManager {
+	return &jitteredBackoffManagerImpl{
+		clock:        c,
+		duration:     duration,
+		jitter:       jitter,
+		backoffTimer: nil,
+	}
+}
+
+func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration {
+	jitteredPeriod := j.duration
+	if j.jitter > 0.0 {
+		jitteredPeriod = Jitter(j.duration, j.jitter)
+	}
+	return jitteredPeriod
+}
+
+// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff.
+// The returned timer must be drained before calling Backoff() the second time
+func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer {
+	backoff := j.getNextBackoff()
+	if j.backoffTimer == nil {
+		j.backoffTimer = j.clock.NewTimer(backoff)
+	} else {
+		j.backoffTimer.Reset(backoff)
+	}
+	return j.backoffTimer
+}
+
+// ExponentialBackoff repeats a condition check with exponential backoff.
+//
+// It repeatedly checks the condition and then sleeps, using `backoff.Step()`
+// to determine the length of the sleep and adjust Duration and Steps.
+// Stops and returns as soon as:
+// 1. the condition check returns true or an error,
+// 2. `backoff.Steps` checks of the condition have been done, or
+// 3. a sleep truncated by the cap on duration has been completed.
+// In case (1) the returned error is what the condition function returned.
+// In all other cases, ErrWaitTimeout is returned.
+//
+// Since backoffs are often subject to cancellation, we recommend using
+// ExponentialBackoffWithContext and passing a context to the method.
+func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
+	for backoff.Steps > 0 {
+		if ok, err := runConditionWithCrashProtection(condition); err != nil || ok {
+			return err
+		}
+		if backoff.Steps == 1 {
+			break
+		}
+		time.Sleep(backoff.Step())
+	}
+	return ErrWaitTimeout
+}
+
+// ExponentialBackoffWithContext repeats a condition check with exponential backoff.
+// It immediately returns an error if the condition returns an error, the context is cancelled
+// or hits the deadline, or if the maximum attempts defined in backoff is exceeded (ErrWaitTimeout).
+// If an error is returned by the condition the backoff stops immediately. The condition will
+// never be invoked more than backoff.Steps times.
+func ExponentialBackoffWithContext(ctx context.Context, backoff Backoff, condition ConditionWithContextFunc) error {
+	for backoff.Steps > 0 {
+		select {
+		case <-ctx.Done():
+			return ctx.Err()
+		default:
+		}
+
+		if ok, err := runConditionWithCrashProtectionWithContext(ctx, condition); err != nil || ok {
+			return err
+		}
+
+		if backoff.Steps == 1 {
+			break
+		}
+
+		waitBeforeRetry := backoff.Step()
+		select {
+		case <-ctx.Done():
+			return ctx.Err()
+		case <-time.After(waitBeforeRetry):
+		}
+	}
+
+	return ErrWaitTimeout
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/delay.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/delay.go
new file mode 100644
index 0000000000..1d3dcaa74e
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/delay.go
@@ -0,0 +1,51 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package wait
+
+import (
+	"context"
+	"sync"
+	"time"
+
+	"k8s.io/utils/clock"
+)
+
+// DelayFunc returns the next time interval to wait.
+type DelayFunc func() time.Duration
+
+// Timer takes an arbitrary delay function and returns a timer that can handle arbitrary interval changes.
+// Use Backoff{...}.Timer() for simple delays and more efficient timers.
+func (fn DelayFunc) Timer(c clock.Clock) Timer {
+	return &variableTimer{fn: fn, new: c.NewTimer}
+}
+
+// Until takes an arbitrary delay function and runs until cancelled or the condition indicates exit. This
+// offers all of the functionality of the methods in this package.
+func (fn DelayFunc) Until(ctx context.Context, immediate, sliding bool, condition ConditionWithContextFunc) error {
+	return loopConditionUntilContext(ctx, &variableTimer{fn: fn, new: internalClock.NewTimer}, immediate, sliding, condition)
+}
+
+// Concurrent returns a version of this DelayFunc that is safe for use by multiple goroutines that
+// wish to share a single delay timer.
+func (fn DelayFunc) Concurrent() DelayFunc {
+	var lock sync.Mutex
+	return func() time.Duration {
+		lock.Lock()
+		defer lock.Unlock()
+		return fn()
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go
new file mode 100644
index 0000000000..3f0c968ec9
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package wait provides tools for polling or listening for changes
+// to a condition.
+package wait // import "k8s.io/apimachinery/pkg/util/wait"
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/error.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/error.go
new file mode 100644
index 0000000000..dd75801d82
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/error.go
@@ -0,0 +1,96 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package wait
+
+import (
+	"context"
+	"errors"
+)
+
+// ErrWaitTimeout is returned when the condition was not satisfied in time.
+//
+// Deprecated: This type will be made private in favor of Interrupted()
+// for checking errors or ErrorInterrupted(err) for returning a wrapped error.
+var ErrWaitTimeout = ErrorInterrupted(errors.New("timed out waiting for the condition"))
+
+// Interrupted returns true if the error indicates a Poll, ExponentialBackoff, or
+// Until loop exited for any reason besides the condition returning true or an
+// error. A loop is considered interrupted if the calling context is cancelled,
+// the context reaches its deadline, or a backoff reaches its maximum allowed
+// steps.
+//
+// Callers should use this method instead of comparing the error value directly to
+// ErrWaitTimeout, as methods that cancel a context may not return that error.
+//
+// Instead of:
+//
+//	err := wait.Poll(...)
+//	if err == wait.ErrWaitTimeout {
+//	    log.Infof("Wait for operation exceeded")
+//	} else ...
+//
+// Use:
+//
+//	err := wait.Poll(...)
+//	if wait.Interrupted(err) {
+//	    log.Infof("Wait for operation exceeded")
+//	} else ...
+func Interrupted(err error) bool {
+	switch {
+	case errors.Is(err, errWaitTimeout),
+		errors.Is(err, context.Canceled),
+		errors.Is(err, context.DeadlineExceeded):
+		return true
+	default:
+		return false
+	}
+}
+
+// errInterrupted
+type errInterrupted struct {
+	cause error
+}
+
+// ErrorInterrupted returns an error that indicates the wait was ended
+// early for a given reason. If no cause is provided a generic error
+// will be used but callers are encouraged to provide a real cause for
+// clarity in debugging.
+func ErrorInterrupted(cause error) error {
+	switch cause.(type) {
+	case errInterrupted:
+		// no need to wrap twice since errInterrupted is only needed
+		// once in a chain
+		return cause
+	default:
+		return errInterrupted{cause}
+	}
+}
+
+// errWaitTimeout is the private version of the previous ErrWaitTimeout
+// and is private to prevent direct comparison. Use ErrorInterrupted(err)
+// to get an error that will return true for Interrupted(err).
+var errWaitTimeout = errInterrupted{}
+
+func (e errInterrupted) Unwrap() error        { return e.cause }
+func (e errInterrupted) Is(target error) bool { return target == errWaitTimeout }
+func (e errInterrupted) Error() string {
+	if e.cause == nil {
+		// returns the same error message as historical behavior
+		return "timed out waiting for the condition"
+	}
+	return e.cause.Error()
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go
new file mode 100644
index 0000000000..107bfc132f
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go
@@ -0,0 +1,95 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package wait
+
+import (
+	"context"
+	"time"
+
+	"k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// loopConditionUntilContext executes the provided condition at intervals defined by
+// the provided timer until the provided context is cancelled, the condition returns
+// true, or the condition returns an error. If sliding is true, the period is computed
+// after condition runs. If it is false then period includes the runtime for condition.
+// If immediate is false the first delay happens before any call to condition, if
+// immediate is true the condition will be invoked before waiting and guarantees that
+// the condition is invoked at least once, regardless of whether the context has been
+// cancelled. The returned error is the error returned by the last condition or the
+// context error if the context was terminated.
+//
+// This is the common loop construct for all polling in the wait package.
+func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding bool, condition ConditionWithContextFunc) error {
+	defer t.Stop()
+
+	var timeCh <-chan time.Time
+	doneCh := ctx.Done()
+
+	if !sliding {
+		timeCh = t.C()
+	}
+
+	// if immediate is true the condition is
+	// guaranteed to be executed at least once,
+	// if we haven't requested immediate execution, delay once
+	if immediate {
+		if ok, err := func() (bool, error) {
+			defer runtime.HandleCrash()
+			return condition(ctx)
+		}(); err != nil || ok {
+			return err
+		}
+	}
+
+	if sliding {
+		timeCh = t.C()
+	}
+
+	for {
+
+		// Wait for either the context to be cancelled or the next invocation be called
+		select {
+		case <-doneCh:
+			return ctx.Err()
+		case <-timeCh:
+		}
+
+		// IMPORTANT: Because there is no channel priority selection in golang
+		// it is possible for very short timers to "win" the race in the previous select
+		// repeatedly even when the context has been canceled.  We therefore must
+		// explicitly check for context cancellation on every loop and exit if true to
+		// guarantee that we don't invoke condition more than once after context has
+		// been cancelled.
+		if err := ctx.Err(); err != nil {
+			return err
+		}
+
+		if !sliding {
+			t.Next()
+		}
+		if ok, err := func() (bool, error) {
+			defer runtime.HandleCrash()
+			return condition(ctx)
+		}(); err != nil || ok {
+			return err
+		}
+		if sliding {
+			t.Next()
+		}
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go
new file mode 100644
index 0000000000..231d4c3842
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/poll.go
@@ -0,0 +1,315 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package wait
+
+import (
+	"context"
+	"time"
+)
+
+// PollUntilContextCancel tries a condition func until it returns true, an error, or the context
+// is cancelled or hits a deadline. condition will be invoked after the first interval if the
+// context is not cancelled first. The returned error will be from ctx.Err(), the condition's
+// err return value, or nil. If invoking condition takes longer than interval the next condition
+// will be invoked immediately. When using very short intervals, condition may be invoked multiple
+// times before a context cancellation is detected. If immediate is true, condition will be
+// invoked before waiting and guarantees that condition is invoked at least once, regardless of
+// whether the context has been cancelled.
+func PollUntilContextCancel(ctx context.Context, interval time.Duration, immediate bool, condition ConditionWithContextFunc) error {
+	return loopConditionUntilContext(ctx, Backoff{Duration: interval}.Timer(), immediate, false, condition)
+}
+
+// PollUntilContextTimeout will terminate polling after timeout duration by setting a context
+// timeout. This is provided as a convenience function for callers not currently executing under
+// a deadline and is equivalent to:
+//
+//	deadlineCtx, deadlineCancel := context.WithTimeout(ctx, timeout)
+//	err := PollUntilContextCancel(deadlineCtx, interval, immediate, condition)
+//
+// The deadline context will be cancelled if the Poll succeeds before the timeout, simplifying
+// inline usage. All other behavior is identical to PollUntilContextCancel.
+func PollUntilContextTimeout(ctx context.Context, interval, timeout time.Duration, immediate bool, condition ConditionWithContextFunc) error {
+	deadlineCtx, deadlineCancel := context.WithTimeout(ctx, timeout)
+	defer deadlineCancel()
+	return loopConditionUntilContext(deadlineCtx, Backoff{Duration: interval}.Timer(), immediate, false, condition)
+}
+
+// Poll tries a condition func until it returns true, an error, or the timeout
+// is reached.
+//
+// Poll always waits the interval before the run of 'condition'.
+// 'condition' will always be invoked at least once.
+//
+// Some intervals may be missed if the condition takes too long or the time
+// window is too short.
+//
+// If you want to Poll something forever, see PollInfinite.
+//
+// Deprecated: This method does not return errors from context, use PollUntilContextTimeout.
+// Note that the new method will no longer return ErrWaitTimeout and instead return errors
+// defined by the context package. Will be removed in a future release.
+func Poll(interval, timeout time.Duration, condition ConditionFunc) error {
+	return PollWithContext(context.Background(), interval, timeout, condition.WithContext())
+}
+
+// PollWithContext tries a condition func until it returns true, an error,
+// or when the context expires or the timeout is reached, whichever
+// happens first.
+//
+// PollWithContext always waits the interval before the run of 'condition'.
+// 'condition' will always be invoked at least once.
+//
+// Some intervals may be missed if the condition takes too long or the time
+// window is too short.
+//
+// If you want to Poll something forever, see PollInfinite.
+//
+// Deprecated: This method does not return errors from context, use PollUntilContextTimeout.
+// Note that the new method will no longer return ErrWaitTimeout and instead return errors
+// defined by the context package. Will be removed in a future release.
+func PollWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error {
+	return poll(ctx, false, poller(interval, timeout), condition)
+}
+
+// PollUntil tries a condition func until it returns true, an error or stopCh is
+// closed.
+//
+// PollUntil always waits interval before the first run of 'condition'.
+// 'condition' will always be invoked at least once.
+//
+// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
+// Note that the new method will no longer return ErrWaitTimeout and instead return errors
+// defined by the context package. Will be removed in a future release.
+func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
+	return PollUntilWithContext(ContextForChannel(stopCh), interval, condition.WithContext())
+}
+
+// PollUntilWithContext tries a condition func until it returns true,
+// an error or the specified context is cancelled or expired.
+//
+// PollUntilWithContext always waits interval before the first run of 'condition'.
+// 'condition' will always be invoked at least once.
+//
+// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
+// Note that the new method will no longer return ErrWaitTimeout and instead return errors
+// defined by the context package. Will be removed in a future release.
+func PollUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
+	return poll(ctx, false, poller(interval, 0), condition)
+}
+
+// PollInfinite tries a condition func until it returns true or an error
+//
+// PollInfinite always waits the interval before the run of 'condition'.
+//
+// Some intervals may be missed if the condition takes too long or the time
+// window is too short.
+//
+// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
+// Note that the new method will no longer return ErrWaitTimeout and instead return errors
+// defined by the context package. Will be removed in a future release.
+func PollInfinite(interval time.Duration, condition ConditionFunc) error {
+	return PollInfiniteWithContext(context.Background(), interval, condition.WithContext())
+}
+
+// PollInfiniteWithContext tries a condition func until it returns true or an error
+//
+// PollInfiniteWithContext always waits the interval before the run of 'condition'.
+//
+// Some intervals may be missed if the condition takes too long or the time
+// window is too short.
+//
+// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
+// Note that the new method will no longer return ErrWaitTimeout and instead return errors
+// defined by the context package. Will be removed in a future release.
+func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
+	return poll(ctx, false, poller(interval, 0), condition)
+}
+
+// PollImmediate tries a condition func until it returns true, an error, or the timeout
+// is reached.
+//
+// PollImmediate always checks 'condition' before waiting for the interval. 'condition'
+// will always be invoked at least once.
+//
+// Some intervals may be missed if the condition takes too long or the time
+// window is too short.
+//
+// If you want to immediately Poll something forever, see PollImmediateInfinite.
+//
+// Deprecated: This method does not return errors from context, use PollUntilContextTimeout.
+// Note that the new method will no longer return ErrWaitTimeout and instead return errors
+// defined by the context package. Will be removed in a future release.
+func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error {
+	return PollImmediateWithContext(context.Background(), interval, timeout, condition.WithContext())
+}
+
+// PollImmediateWithContext tries a condition func until it returns true, an error,
+// or the timeout is reached or the specified context expires, whichever happens first.
+//
+// PollImmediateWithContext always checks 'condition' before waiting for the interval.
+// 'condition' will always be invoked at least once.
+//
+// Some intervals may be missed if the condition takes too long or the time
+// window is too short.
+//
+// If you want to immediately Poll something forever, see PollImmediateInfinite.
+//
+// Deprecated: This method does not return errors from context, use PollUntilContextTimeout.
+// Note that the new method will no longer return ErrWaitTimeout and instead return errors
+// defined by the context package. Will be removed in a future release.
+func PollImmediateWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error {
+	return poll(ctx, true, poller(interval, timeout), condition)
+}
+
+// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed.
+//
+// PollImmediateUntil runs the 'condition' before waiting for the interval.
+// 'condition' will always be invoked at least once.
+//
+// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
+// Note that the new method will no longer return ErrWaitTimeout and instead return errors
+// defined by the context package. Will be removed in a future release.
+func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
+	return PollImmediateUntilWithContext(ContextForChannel(stopCh), interval, condition.WithContext())
+}
+
+// PollImmediateUntilWithContext tries a condition func until it returns true,
+// an error or the specified context is cancelled or expired.
+//
+// PollImmediateUntilWithContext runs the 'condition' before waiting for the interval.
+// 'condition' will always be invoked at least once.
+//
+// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
+// Note that the new method will no longer return ErrWaitTimeout and instead return errors
+// defined by the context package. Will be removed in a future release.
+func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
+	return poll(ctx, true, poller(interval, 0), condition)
+}
+
+// PollImmediateInfinite tries a condition func until it returns true or an error
+//
+// PollImmediateInfinite runs the 'condition' before waiting for the interval.
+//
+// Some intervals may be missed if the condition takes too long or the time
+// window is too short.
+//
+// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
+// Note that the new method will no longer return ErrWaitTimeout and instead return errors
+// defined by the context package. Will be removed in a future release.
+func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error {
+	return PollImmediateInfiniteWithContext(context.Background(), interval, condition.WithContext())
+}
+
+// PollImmediateInfiniteWithContext tries a condition func until it returns true
+// or an error or the specified context gets cancelled or expired.
+//
+// PollImmediateInfiniteWithContext runs the 'condition' before waiting for the interval.
+//
+// Some intervals may be missed if the condition takes too long or the time
+// window is too short.
+//
+// Deprecated: This method does not return errors from context, use PollUntilContextCancel.
+// Note that the new method will no longer return ErrWaitTimeout and instead return errors
+// defined by the context package. Will be removed in a future release.
+func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
+	return poll(ctx, true, poller(interval, 0), condition)
+}
+
+// Internally used, each of the public 'Poll*' function defined in this
+// package should invoke this internal function with appropriate parameters.
+// ctx: the context specified by the caller, for infinite polling pass
+// a context that never gets cancelled or expired.
+// immediate: if true, the 'condition' will be invoked before waiting for the interval,
+// in this case 'condition' will always be invoked at least once.
+// wait: user specified WaitFunc function that controls at what interval the condition
+// function should be invoked periodically and whether it is bound by a timeout.
+// condition: user specified ConditionWithContextFunc function.
+//
+// Deprecated: will be removed in favor of loopConditionUntilContext.
+func poll(ctx context.Context, immediate bool, wait waitWithContextFunc, condition ConditionWithContextFunc) error {
+	if immediate {
+		done, err := runConditionWithCrashProtectionWithContext(ctx, condition)
+		if err != nil {
+			return err
+		}
+		if done {
+			return nil
+		}
+	}
+
+	select {
+	case <-ctx.Done():
+		// returning ctx.Err() will break backward compatibility, use new PollUntilContext*
+		// methods instead
+		return ErrWaitTimeout
+	default:
+		return waitForWithContext(ctx, wait, condition)
+	}
+}
+
+// poller returns a WaitFunc that will send to the channel every interval until
+// timeout has elapsed and then closes the channel.
+//
+// Over very short intervals you may receive no ticks before the channel is
+// closed. A timeout of 0 is interpreted as an infinity, and in such a case
+// it would be the caller's responsibility to close the done channel.
+// Failure to do so would result in a leaked goroutine.
+//
+// Output ticks are not buffered. If the channel is not ready to receive an
+// item, the tick is skipped.
+//
+// Deprecated: Will be removed in a future release.
+func poller(interval, timeout time.Duration) waitWithContextFunc {
+	return waitWithContextFunc(func(ctx context.Context) <-chan struct{} {
+		ch := make(chan struct{})
+
+		go func() {
+			defer close(ch)
+
+			tick := time.NewTicker(interval)
+			defer tick.Stop()
+
+			var after <-chan time.Time
+			if timeout != 0 {
+				// time.After is more convenient, but it
+				// potentially leaves timers around much longer
+				// than necessary if we exit early.
+				timer := time.NewTimer(timeout)
+				after = timer.C
+				defer timer.Stop()
+			}
+
+			for {
+				select {
+				case <-tick.C:
+					// If the consumer isn't ready for this signal drop it and
+					// check the other channels.
+					select {
+					case ch <- struct{}{}:
+					default:
+					}
+				case <-after:
+					return
+				case <-ctx.Done():
+					return
+				}
+			}
+		}()
+
+		return ch
+	})
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/timer.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/timer.go
new file mode 100644
index 0000000000..3efba32132
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/timer.go
@@ -0,0 +1,121 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package wait
+
+import (
+	"time"
+
+	"k8s.io/utils/clock"
+)
+
+// Timer abstracts how wait functions interact with time runtime efficiently. Test
+// code may implement this interface directly but package consumers are encouraged
+// to use the Backoff type as the primary mechanism for acquiring a Timer. The
+// interface is a simplification of clock.Timer to prevent misuse. Timers are not
+// expected to be safe for calls from multiple goroutines.
+type Timer interface {
+	// C returns a channel that will receive a struct{} each time the timer fires.
+	// The channel should not be waited on after Stop() is invoked. It is allowed
+	// to cache the returned value of C() for the lifetime of the Timer.
+	C() <-chan time.Time
+	// Next is invoked by wait functions to signal timers that the next interval
+	// should begin. You may only use Next() if you have drained the channel C().
+	// You should not call Next() after Stop() is invoked.
+	Next()
+	// Stop releases the timer. It is safe to invoke if no other methods have been
+	// called.
+	Stop()
+}
+
+type noopTimer struct {
+	closedCh <-chan time.Time
+}
+
+// newNoopTimer creates a timer with a unique channel to avoid contention
+// for the channel's lock across multiple unrelated timers.
+func newNoopTimer() noopTimer {
+	ch := make(chan time.Time)
+	close(ch)
+	return noopTimer{closedCh: ch}
+}
+
+func (t noopTimer) C() <-chan time.Time {
+	return t.closedCh
+}
+func (noopTimer) Next() {}
+func (noopTimer) Stop() {}
+
+type variableTimer struct {
+	fn  DelayFunc
+	t   clock.Timer
+	new func(time.Duration) clock.Timer
+}
+
+func (t *variableTimer) C() <-chan time.Time {
+	if t.t == nil {
+		d := t.fn()
+		t.t = t.new(d)
+	}
+	return t.t.C()
+}
+func (t *variableTimer) Next() {
+	if t.t == nil {
+		return
+	}
+	d := t.fn()
+	t.t.Reset(d)
+}
+func (t *variableTimer) Stop() {
+	if t.t == nil {
+		return
+	}
+	t.t.Stop()
+	t.t = nil
+}
+
+type fixedTimer struct {
+	interval time.Duration
+	t        clock.Ticker
+	new      func(time.Duration) clock.Ticker
+}
+
+func (t *fixedTimer) C() <-chan time.Time {
+	if t.t == nil {
+		t.t = t.new(t.interval)
+	}
+	return t.t.C()
+}
+func (t *fixedTimer) Next() {
+	// no-op for fixed timers
+}
+func (t *fixedTimer) Stop() {
+	if t.t == nil {
+		return
+	}
+	t.t.Stop()
+	t.t = nil
+}
+
+var (
+	// RealTimer can be passed to methods that need a clock.Timer.
+	RealTimer = clock.RealClock{}.NewTimer
+)
+
+var (
+	// internalClock is used for test injection of clocks
+	internalClock = clock.RealClock{}
+)
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
new file mode 100644
index 0000000000..6805e8cf94
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
@@ -0,0 +1,223 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package wait
+
+import (
+	"context"
+	"math/rand"
+	"sync"
+	"time"
+
+	"k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// For any test of the style:
+//
+//	...
+//	<- time.After(timeout):
+//	   t.Errorf("Timed out")
+//
+// The value for timeout should effectively be "forever." Obviously we don't want our tests to truly lock up forever, but 30s
+// is long enough that it is effectively forever for the things that can slow down a run on a heavily contended machine
+// (GC, seeks, etc), but not so long as to make a developer ctrl-c a test run if they do happen to break that test.
+var ForeverTestTimeout = time.Second * 30
+
+// NeverStop may be passed to Until to make it never stop.
+var NeverStop <-chan struct{} = make(chan struct{})
+
+// Group allows to start a group of goroutines and wait for their completion.
+type Group struct {
+	wg sync.WaitGroup
+}
+
+func (g *Group) Wait() {
+	g.wg.Wait()
+}
+
+// StartWithChannel starts f in a new goroutine in the group.
+// stopCh is passed to f as an argument. f should stop when stopCh is available.
+func (g *Group) StartWithChannel(stopCh <-chan struct{}, f func(stopCh <-chan struct{})) {
+	g.Start(func() {
+		f(stopCh)
+	})
+}
+
+// StartWithContext starts f in a new goroutine in the group.
+// ctx is passed to f as an argument. f should stop when ctx.Done() is available.
+func (g *Group) StartWithContext(ctx context.Context, f func(context.Context)) {
+	g.Start(func() {
+		f(ctx)
+	})
+}
+
+// Start starts f in a new goroutine in the group.
+func (g *Group) Start(f func()) {
+	g.wg.Add(1)
+	go func() {
+		defer g.wg.Done()
+		f()
+	}()
+}
+
+// Forever calls f every period for ever.
+//
+// Forever is syntactic sugar on top of Until.
+func Forever(f func(), period time.Duration) {
+	Until(f, period, NeverStop)
+}
+
+// Jitter returns a time.Duration between duration and duration + maxFactor *
+// duration.
+//
+// This allows clients to avoid converging on periodic behavior. If maxFactor
+// is 0.0, a suggested default value will be chosen.
+func Jitter(duration time.Duration, maxFactor float64) time.Duration {
+	if maxFactor <= 0.0 {
+		maxFactor = 1.0
+	}
+	wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration))
+	return wait
+}
+
+// ConditionFunc returns true if the condition is satisfied, or an error
+// if the loop should be aborted.
+type ConditionFunc func() (done bool, err error)
+
+// ConditionWithContextFunc returns true if the condition is satisfied, or an error
+// if the loop should be aborted.
+//
+// The caller passes along a context that can be used by the condition function.
+type ConditionWithContextFunc func(context.Context) (done bool, err error)
+
+// WithContext converts a ConditionFunc into a ConditionWithContextFunc
+func (cf ConditionFunc) WithContext() ConditionWithContextFunc {
+	return func(context.Context) (done bool, err error) {
+		return cf()
+	}
+}
+
+// ContextForChannel provides a context that will be treated as cancelled
+// when the provided parentCh is closed. The implementation returns
+// context.Canceled for Err() if and only if the parentCh is closed.
+func ContextForChannel(parentCh <-chan struct{}) context.Context {
+	return channelContext{stopCh: parentCh}
+}
+
+var _ context.Context = channelContext{}
+
+// channelContext will behave as if the context were cancelled when stopCh is
+// closed.
+type channelContext struct {
+	stopCh <-chan struct{}
+}
+
+func (c channelContext) Done() <-chan struct{} { return c.stopCh }
+func (c channelContext) Err() error {
+	select {
+	case <-c.stopCh:
+		return context.Canceled
+	default:
+		return nil
+	}
+}
+func (c channelContext) Deadline() (time.Time, bool) { return time.Time{}, false }
+func (c channelContext) Value(key any) any           { return nil }
+
+// runConditionWithCrashProtection runs a ConditionFunc with crash protection.
+//
+// Deprecated: Will be removed when the legacy polling methods are removed.
+func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) {
+	defer runtime.HandleCrash()
+	return condition()
+}
+
+// runConditionWithCrashProtectionWithContext runs a ConditionWithContextFunc
+// with crash protection.
+//
+// Deprecated: Will be removed when the legacy polling methods are removed.
+func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) {
+	defer runtime.HandleCrash()
+	return condition(ctx)
+}
+
+// waitFunc creates a channel that receives an item every time a test
+// should be executed and is closed when the last test should be invoked.
+//
+// Deprecated: Will be removed in a future release in favor of
+// loopConditionUntilContext.
+type waitFunc func(done <-chan struct{}) <-chan struct{}
+
+// WithContext converts the WaitFunc to an equivalent WaitWithContextFunc
+func (w waitFunc) WithContext() waitWithContextFunc {
+	return func(ctx context.Context) <-chan struct{} {
+		return w(ctx.Done())
+	}
+}
+
+// waitWithContextFunc creates a channel that receives an item every time a test
+// should be executed and is closed when the last test should be invoked.
+//
+// When the specified context gets cancelled or expires the function
+// stops sending item and returns immediately.
+//
+// Deprecated: Will be removed in a future release in favor of
+// loopConditionUntilContext.
+type waitWithContextFunc func(ctx context.Context) <-chan struct{}
+
+// waitForWithContext continually checks 'fn' as driven by 'wait'.
+//
+// waitForWithContext gets a channel from 'wait()”, and then invokes 'fn'
+// once for every value placed on the channel and once more when the
+// channel is closed. If the channel is closed and 'fn'
+// returns false without error, waitForWithContext returns ErrWaitTimeout.
+//
+// If 'fn' returns an error the loop ends and that error is returned. If
+// 'fn' returns true the loop ends and nil is returned.
+//
+// context.Canceled will be returned if the ctx.Done() channel is closed
+// without fn ever returning true.
+//
+// When the ctx.Done() channel is closed, because the golang `select` statement is
+// "uniform pseudo-random", the `fn` might still run one or multiple times,
+// though eventually `waitForWithContext` will return.
+//
+// Deprecated: Will be removed in a future release in favor of
+// loopConditionUntilContext.
+func waitForWithContext(ctx context.Context, wait waitWithContextFunc, fn ConditionWithContextFunc) error {
+	waitCtx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	c := wait(waitCtx)
+	for {
+		select {
+		case _, open := <-c:
+			ok, err := runConditionWithCrashProtectionWithContext(ctx, fn)
+			if err != nil {
+				return err
+			}
+			if ok {
+				return nil
+			}
+			if !open {
+				return ErrWaitTimeout
+			}
+		case <-ctx.Done():
+			// returning ctx.Err() will break backward compatibility, use new PollUntilContext*
+			// methods instead
+			return ErrWaitTimeout
+		}
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/version/doc.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/version/doc.go
new file mode 100644
index 0000000000..29574fd6d5
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/version/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:openapi-gen=true
+
+// Package version supplies the type for version information collected at build time.
+package version // import "k8s.io/apimachinery/pkg/version"
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/version/helpers.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/version/helpers.go
new file mode 100644
index 0000000000..5e041d6f3f
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/version/helpers.go
@@ -0,0 +1,88 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+import (
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+type versionType int
+
+const (
+	// Bigger the version type number, higher priority it is
+	versionTypeAlpha versionType = iota
+	versionTypeBeta
+	versionTypeGA
+)
+
+var kubeVersionRegex = regexp.MustCompile("^v([\\d]+)(?:(alpha|beta)([\\d]+))?$")
+
+func parseKubeVersion(v string) (majorVersion int, vType versionType, minorVersion int, ok bool) {
+	var err error
+	submatches := kubeVersionRegex.FindStringSubmatch(v)
+	if len(submatches) != 4 {
+		return 0, 0, 0, false
+	}
+	switch submatches[2] {
+	case "alpha":
+		vType = versionTypeAlpha
+	case "beta":
+		vType = versionTypeBeta
+	case "":
+		vType = versionTypeGA
+	default:
+		return 0, 0, 0, false
+	}
+	if majorVersion, err = strconv.Atoi(submatches[1]); err != nil {
+		return 0, 0, 0, false
+	}
+	if vType != versionTypeGA {
+		if minorVersion, err = strconv.Atoi(submatches[3]); err != nil {
+			return 0, 0, 0, false
+		}
+	}
+	return majorVersion, vType, minorVersion, true
+}
+
+// CompareKubeAwareVersionStrings compares two kube-like version strings.
+// Kube-like version strings are starting with a v, followed by a major version, optional "alpha" or "beta" strings
+// followed by a minor version (e.g. v1, v2beta1). Versions will be sorted based on GA/alpha/beta first and then major
+// and minor versions. e.g. v2, v1, v1beta2, v1beta1, v1alpha1.
+func CompareKubeAwareVersionStrings(v1, v2 string) int {
+	if v1 == v2 {
+		return 0
+	}
+	v1major, v1type, v1minor, ok1 := parseKubeVersion(v1)
+	v2major, v2type, v2minor, ok2 := parseKubeVersion(v2)
+	switch {
+	case !ok1 && !ok2:
+		return strings.Compare(v2, v1)
+	case !ok1 && ok2:
+		return -1
+	case ok1 && !ok2:
+		return 1
+	}
+	if v1type != v2type {
+		return int(v1type) - int(v2type)
+	}
+	if v1major != v2major {
+		return v1major - v2major
+	}
+	return v1minor - v2minor
+}
diff --git a/hack/tools/vendor/k8s.io/apimachinery/pkg/version/types.go b/hack/tools/vendor/k8s.io/apimachinery/pkg/version/types.go
new file mode 100644
index 0000000000..72727b503b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apimachinery/pkg/version/types.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+// Info contains versioning information.
+// TODO: Add []string of api versions supported? It's still unclear
+// how we'll want to distribute that information.
+type Info struct {
+	Major        string `json:"major"`
+	Minor        string `json:"minor"`
+	GitVersion   string `json:"gitVersion"`
+	GitCommit    string `json:"gitCommit"`
+	GitTreeState string `json:"gitTreeState"`
+	BuildDate    string `json:"buildDate"`
+	GoVersion    string `json:"goVersion"`
+	Compiler     string `json:"compiler"`
+	Platform     string `json:"platform"`
+}
+
+// String returns info as a human-friendly version string.
+func (info Info) String() string {
+	return info.GitVersion
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/LICENSE b/hack/tools/vendor/k8s.io/apiserver/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go
new file mode 100644
index 0000000000..88db1ffa67
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +groupName=apiserver.k8s.io
+
+// Package apiserver is the internal version of the API.
+package apiserver // import "k8s.io/apiserver/pkg/apis/apiserver"
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/install.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/install.go
new file mode 100644
index 0000000000..8a8202cb5b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/install.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package install
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/apiserver/pkg/apis/apiserver"
+	v1 "k8s.io/apiserver/pkg/apis/apiserver/v1"
+	"k8s.io/apiserver/pkg/apis/apiserver/v1alpha1"
+	"k8s.io/apiserver/pkg/apis/apiserver/v1beta1"
+)
+
+// Install registers the API group and adds types to a scheme
+func Install(scheme *runtime.Scheme) {
+	utilruntime.Must(apiserver.AddToScheme(scheme))
+
+	// v1alpha is in the k8s.io-suffixed API group
+	utilruntime.Must(v1alpha1.AddToScheme(scheme))
+	utilruntime.Must(scheme.SetVersionPriority(v1alpha1.SchemeGroupVersion))
+
+	// v1alpha is in the k8s.io-suffixed API group
+	utilruntime.Must(v1beta1.AddToScheme(scheme))
+	utilruntime.Must(scheme.SetVersionPriority(v1beta1.SchemeGroupVersion))
+
+	// v1 is in the config.k8s.io-suffixed API group
+	utilruntime.Must(v1.AddToScheme(scheme))
+	utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion))
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go
new file mode 100644
index 0000000000..fd0b087c8d
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apiserver
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const LegacyGroupName = "apiserver.k8s.io"
+const GroupName = "apiserver.config.k8s.io"
+
+// LegacySchemeGroupVersion is group version used to register these objects
+var LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: runtime.APIVersionInternal}
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+var (
+	SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+	AddToScheme   = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(LegacySchemeGroupVersion,
+		&AdmissionConfiguration{},
+		&EgressSelectorConfiguration{},
+	)
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&AdmissionConfiguration{},
+		&AuthenticationConfiguration{},
+		&AuthorizationConfiguration{},
+		&EncryptionConfiguration{},
+		&EgressSelectorConfiguration{},
+		&TracingConfiguration{},
+	)
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go
new file mode 100644
index 0000000000..a610ebc1a6
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go
@@ -0,0 +1,413 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apiserver
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	tracingapi "k8s.io/component-base/tracing/api/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AdmissionConfiguration provides versioned configuration for admission controllers.
+type AdmissionConfiguration struct {
+	metav1.TypeMeta
+
+	// Plugins allows specifying a configuration per admission control plugin.
+	// +optional
+	Plugins []AdmissionPluginConfiguration
+}
+
+// AdmissionPluginConfiguration provides the configuration for a single plug-in.
+type AdmissionPluginConfiguration struct {
+	// Name is the name of the admission controller.
+	// It must match the registered admission plugin name.
+	Name string
+
+	// Path is the path to a configuration file that contains the plugin's
+	// configuration
+	// +optional
+	Path string
+
+	// Configuration is an embedded configuration object to be used as the plugin's
+	// configuration. If present, it will be used instead of the path to the configuration file.
+	// +optional
+	Configuration *runtime.Unknown
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EgressSelectorConfiguration provides versioned configuration for egress selector clients.
+type EgressSelectorConfiguration struct {
+	metav1.TypeMeta
+
+	// EgressSelections contains a list of egress selection client configurations
+	EgressSelections []EgressSelection
+}
+
+// EgressSelection provides the configuration for a single egress selection client.
+type EgressSelection struct {
+	// Name is the name of the egress selection.
+	// Currently supported values are "controlplane", "etcd" and "cluster"
+	Name string
+
+	// Connection is the exact information used to configure the egress selection
+	Connection Connection
+}
+
+// Connection provides the configuration for a single egress selection client.
+type Connection struct {
+	// Protocol is the protocol used to connect from client to the konnectivity server.
+	ProxyProtocol ProtocolType
+
+	// Transport defines the transport configurations we use to dial to the konnectivity server.
+	// This is required if ProxyProtocol is HTTPConnect or GRPC.
+	// +optional
+	Transport *Transport
+}
+
+// ProtocolType is a set of valid values for Connection.ProtocolType
+type ProtocolType string
+
+// Valid types for ProtocolType for konnectivity server
+const (
+	// Use HTTPConnect to connect to konnectivity server
+	ProtocolHTTPConnect ProtocolType = "HTTPConnect"
+	// Use grpc to connect to konnectivity server
+	ProtocolGRPC ProtocolType = "GRPC"
+	// Connect directly (skip konnectivity server)
+	ProtocolDirect ProtocolType = "Direct"
+)
+
+// Transport defines the transport configurations we use to dial to the konnectivity server
+type Transport struct {
+	// TCP is the TCP configuration for communicating with the konnectivity server via TCP
+	// ProxyProtocol of GRPC is not supported with TCP transport at the moment
+	// Requires at least one of TCP or UDS to be set
+	// +optional
+	TCP *TCPTransport
+
+	// UDS is the UDS configuration for communicating with the konnectivity server via UDS
+	// Requires at least one of TCP or UDS to be set
+	// +optional
+	UDS *UDSTransport
+}
+
+// TCPTransport provides the information to connect to konnectivity server via TCP
+type TCPTransport struct {
+	// URL is the location of the konnectivity server to connect to.
+	// As an example it might be "https://127.0.0.1:8131"
+	URL string
+
+	// TLSConfig is the config needed to use TLS when connecting to konnectivity server
+	// +optional
+	TLSConfig *TLSConfig
+}
+
+// UDSTransport provides the information to connect to konnectivity server via UDS
+type UDSTransport struct {
+	// UDSName is the name of the unix domain socket to connect to konnectivity server
+	// This does not use a unix:// prefix. (Eg: /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket)
+	UDSName string
+}
+
+// TLSConfig provides the authentication information to connect to konnectivity server
+// Only used with TCPTransport
+type TLSConfig struct {
+	// caBundle is the file location of the CA to be used to determine trust with the konnectivity server.
+	// Must be absent/empty if TCPTransport.URL is prefixed with http://
+	// If absent while TCPTransport.URL is prefixed with https://, default to system trust roots.
+	// +optional
+	CABundle string
+
+	// clientKey is the file location of the client key to authenticate with the konnectivity server
+	// Must be absent/empty if TCPTransport.URL is prefixed with http://
+	// Must be configured if TCPTransport.URL is prefixed with https://
+	// +optional
+	ClientKey string
+
+	// clientCert is the file location of the client certificate to authenticate with the konnectivity server
+	// Must be absent/empty if TCPTransport.URL is prefixed with http://
+	// Must be configured if TCPTransport.URL is prefixed with https://
+	// +optional
+	ClientCert string
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// TracingConfiguration provides versioned configuration for tracing clients.
+type TracingConfiguration struct {
+	metav1.TypeMeta
+
+	// Embed the component config tracing configuration struct
+	tracingapi.TracingConfiguration
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AuthenticationConfiguration provides versioned configuration for authentication.
+type AuthenticationConfiguration struct {
+	metav1.TypeMeta
+
+	JWT []JWTAuthenticator
+
+	// If present --anonymous-auth must not be set
+	Anonymous *AnonymousAuthConfig
+}
+
+// AnonymousAuthConfig provides the configuration for the anonymous authenticator.
+type AnonymousAuthConfig struct {
+	Enabled bool
+
+	// If set, anonymous auth is only allowed if the request meets one of the
+	// conditions.
+	Conditions []AnonymousAuthCondition
+}
+
+// AnonymousAuthCondition describes the condition under which anonymous auth
+// should be enabled.
+type AnonymousAuthCondition struct {
+	// Path for which anonymous auth is enabled.
+	Path string
+}
+
+// JWTAuthenticator provides the configuration for a single JWT authenticator.
+type JWTAuthenticator struct {
+	Issuer               Issuer
+	ClaimValidationRules []ClaimValidationRule
+	ClaimMappings        ClaimMappings
+	UserValidationRules  []UserValidationRule
+}
+
+// Issuer provides the configuration for an external provider's specific settings.
+type Issuer struct {
+	// url points to the issuer URL in a format https://url or https://url/path.
+	// This must match the "iss" claim in the presented JWT, and the issuer returned from discovery.
+	// Same value as the --oidc-issuer-url flag.
+	// Discovery information is fetched from "{url}/.well-known/openid-configuration" unless overridden by discoveryURL.
+	// Required to be unique across all JWT authenticators.
+	// Note that egress selection configuration is not used for this network connection.
+	// +required
+	URL string
+	// discoveryURL, if specified, overrides the URL used to fetch discovery
+	// information instead of using "{url}/.well-known/openid-configuration".
+	// The exact value specified is used, so "/.well-known/openid-configuration"
+	// must be included in discoveryURL if needed.
+	//
+	// The "issuer" field in the fetched discovery information must match the "issuer.url" field
+	// in the AuthenticationConfiguration and will be used to validate the "iss" claim in the presented JWT.
+	// This is for scenarios where the well-known and jwks endpoints are hosted at a different
+	// location than the issuer (such as locally in the cluster).
+	//
+	// Example:
+	// A discovery url that is exposed using kubernetes service 'oidc' in namespace 'oidc-namespace'
+	// and discovery information is available at '/.well-known/openid-configuration'.
+	// discoveryURL: "https://oidc.oidc-namespace/.well-known/openid-configuration"
+	// certificateAuthority is used to verify the TLS connection and the hostname on the leaf certificate
+	// must be set to 'oidc.oidc-namespace'.
+	//
+	// curl https://oidc.oidc-namespace/.well-known/openid-configuration (.discoveryURL field)
+	// {
+	//     issuer: "https://oidc.example.com" (.url field)
+	// }
+	//
+	// discoveryURL must be different from url.
+	// Required to be unique across all JWT authenticators.
+	// Note that egress selection configuration is not used for this network connection.
+	// +optional
+	DiscoveryURL         string
+	CertificateAuthority string
+	Audiences            []string
+	AudienceMatchPolicy  AudienceMatchPolicyType
+}
+
+// AudienceMatchPolicyType is a set of valid values for Issuer.AudienceMatchPolicy
+type AudienceMatchPolicyType string
+
+// Valid types for AudienceMatchPolicyType
+const (
+	AudienceMatchPolicyMatchAny AudienceMatchPolicyType = "MatchAny"
+)
+
+// ClaimValidationRule provides the configuration for a single claim validation rule.
+type ClaimValidationRule struct {
+	Claim         string
+	RequiredValue string
+
+	Expression string
+	Message    string
+}
+
+// ClaimMappings provides the configuration for claim mapping
+type ClaimMappings struct {
+	Username PrefixedClaimOrExpression
+	Groups   PrefixedClaimOrExpression
+	UID      ClaimOrExpression
+	Extra    []ExtraMapping
+}
+
+// PrefixedClaimOrExpression provides the configuration for a single prefixed claim or expression.
+type PrefixedClaimOrExpression struct {
+	Claim  string
+	Prefix *string
+
+	Expression string
+}
+
+// ClaimOrExpression provides the configuration for a single claim or expression.
+type ClaimOrExpression struct {
+	Claim      string
+	Expression string
+}
+
+// ExtraMapping provides the configuration for a single extra mapping.
+type ExtraMapping struct {
+	Key             string
+	ValueExpression string
+}
+
+// UserValidationRule provides the configuration for a single user validation rule.
+type UserValidationRule struct {
+	Expression string
+	Message    string
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type AuthorizationConfiguration struct {
+	metav1.TypeMeta
+
+	// Authorizers is an ordered list of authorizers to
+	// authorize requests against.
+	// This is similar to the --authorization-modes kube-apiserver flag
+	// Must be at least one.
+	Authorizers []AuthorizerConfiguration `json:"authorizers"`
+}
+
+const (
+	TypeWebhook                                          AuthorizerType = "Webhook"
+	FailurePolicyNoOpinion                               string         = "NoOpinion"
+	FailurePolicyDeny                                    string         = "Deny"
+	AuthorizationWebhookConnectionInfoTypeKubeConfigFile string         = "KubeConfigFile"
+	AuthorizationWebhookConnectionInfoTypeInCluster      string         = "InClusterConfig"
+)
+
+type AuthorizerType string
+
+type AuthorizerConfiguration struct {
+	// Type refers to the type of the authorizer
+	// "Webhook" is supported in the generic API server
+	// Other API servers may support additional authorizer
+	// types like Node, RBAC, ABAC, etc.
+	Type AuthorizerType
+
+	// Name used to describe the webhook
+	// This is explicitly used in monitoring machinery for metrics
+	// Note: Names must be DNS1123 labels like `myauthorizername` or
+	//		 subdomains like `myauthorizer.example.domain`
+	// Required, with no default
+	Name string
+
+	// Webhook defines the configuration for a Webhook authorizer
+	// Must be defined when Type=Webhook
+	Webhook *WebhookConfiguration
+}
+
+type WebhookConfiguration struct {
+	// The duration to cache 'authorized' responses from the webhook
+	// authorizer.
+	// Same as setting `--authorization-webhook-cache-authorized-ttl` flag
+	// Default: 5m0s
+	AuthorizedTTL metav1.Duration
+	// The duration to cache 'unauthorized' responses from the webhook
+	// authorizer.
+	// Same as setting `--authorization-webhook-cache-unauthorized-ttl` flag
+	// Default: 30s
+	UnauthorizedTTL metav1.Duration
+	// Timeout for the webhook request
+	// Maximum allowed value is 30s.
+	// Required, no default value.
+	Timeout metav1.Duration
+	// The API version of the authorization.k8s.io SubjectAccessReview to
+	// send to and expect from the webhook.
+	// Same as setting `--authorization-webhook-version` flag
+	// Valid values: v1beta1, v1
+	// Required, no default value
+	SubjectAccessReviewVersion string
+	// MatchConditionSubjectAccessReviewVersion specifies the SubjectAccessReview
+	// version the CEL expressions are evaluated against
+	// Valid values: v1
+	// Required, no default value
+	MatchConditionSubjectAccessReviewVersion string
+	// Controls the authorization decision when a webhook request fails to
+	// complete or returns a malformed response or errors evaluating
+	// matchConditions.
+	// Valid values:
+	//   - NoOpinion: continue to subsequent authorizers to see if one of
+	//     them allows the request
+	//   - Deny: reject the request without consulting subsequent authorizers
+	// Required, with no default.
+	FailurePolicy string
+
+	// ConnectionInfo defines how we talk to the webhook
+	ConnectionInfo WebhookConnectionInfo
+
+	// matchConditions is a list of conditions that must be met for a request to be sent to this
+	// webhook. An empty list of matchConditions matches all requests.
+	// There are a maximum of 64 match conditions allowed.
+	//
+	// The exact matching logic is (in order):
+	//   1. If at least one matchCondition evaluates to FALSE, then the webhook is skipped.
+	//   2. If ALL matchConditions evaluate to TRUE, then the webhook is called.
+	//   3. If at least one matchCondition evaluates to an error (but none are FALSE):
+	//      - If failurePolicy=Deny, then the webhook rejects the request
+	//      - If failurePolicy=NoOpinion, then the error is ignored and the webhook is skipped
+	MatchConditions []WebhookMatchCondition
+}
+
+type WebhookConnectionInfo struct {
+	// Controls how the webhook should communicate with the server.
+	// Valid values:
+	// - KubeConfigFile: use the file specified in kubeConfigFile to locate the
+	//   server.
+	// - InClusterConfig: use the in-cluster configuration to call the
+	//   SubjectAccessReview API hosted by kube-apiserver. This mode is not
+	//   allowed for kube-apiserver.
+	Type string
+
+	// Path to KubeConfigFile for connection info
+	// Required, if connectionInfo.Type is KubeConfig
+	KubeConfigFile *string
+}
+
+type WebhookMatchCondition struct {
+	// expression represents the expression which will be evaluated by CEL. Must evaluate to bool.
+	// CEL expressions have access to the contents of the SubjectAccessReview in v1 version.
+	// If version specified by subjectAccessReviewVersion in the request variable is v1beta1,
+	// the contents would be converted to the v1 version before evaluating the CEL expression.
+	//
+	// - 'resourceAttributes' describes information for a resource access request and is unset for non-resource requests. e.g. has(request.resourceAttributes) && request.resourceAttributes.namespace == 'default'
+	// - 'nonResourceAttributes' describes information for a non-resource access request and is unset for resource requests. e.g. has(request.nonResourceAttributes) && request.nonResourceAttributes.path == '/healthz'.
+	// - 'user' is the user to test for. e.g. request.user == 'alice'
+	// - 'groups' is the groups to test for. e.g. ('group1' in request.groups)
+	// - 'extra' corresponds to the user.Info.GetExtra() method from the authenticator.
+	// - 'uid' is the information about the requesting user. e.g. request.uid == '1'
+	//
+	// Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/
+	Expression string
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/types_encryption.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/types_encryption.go
new file mode 100644
index 0000000000..fb66305050
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/types_encryption.go
@@ -0,0 +1,149 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apiserver
+
+import (
+	"fmt"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+/*
+EncryptionConfiguration stores the complete configuration for encryption providers.
+It also allows the use of wildcards to specify the resources that should be encrypted.
+Use '*.' to encrypt all resources within a group or '*.*' to encrypt all resources.
+'*.' can be used to encrypt all resource in the core group.  '*.*' will encrypt all
+resources, even custom resources that are added after API server start.
+Use of wildcards that overlap within the same resource list or across multiple
+entries are not allowed since part of the configuration would be ineffective.
+Resource lists are processed in order, with earlier lists taking precedence.
+
+Example:
+
+	kind: EncryptionConfiguration
+	apiVersion: apiserver.config.k8s.io/v1
+	resources:
+	- resources:
+	  - events
+	  providers:
+	  - identity: {}  # do not encrypt events even though *.* is specified below
+	- resources:
+	  - secrets
+	  - configmaps
+	  - pandas.awesome.bears.example
+	  providers:
+	  - aescbc:
+	      keys:
+	      - name: key1
+	        secret: c2VjcmV0IGlzIHNlY3VyZQ==
+	- resources:
+	  - '*.apps'
+	  providers:
+	  - aescbc:
+	      keys:
+	      - name: key2
+	        secret: c2VjcmV0IGlzIHNlY3VyZSwgb3IgaXMgaXQ/Cg==
+	- resources:
+	  - '*.*'
+	  providers:
+	  - aescbc:
+	      keys:
+	      - name: key3
+	        secret: c2VjcmV0IGlzIHNlY3VyZSwgSSB0aGluaw==
+*/
+type EncryptionConfiguration struct {
+	metav1.TypeMeta
+	// resources is a list containing resources, and their corresponding encryption providers.
+	Resources []ResourceConfiguration
+}
+
+// ResourceConfiguration stores per resource configuration.
+type ResourceConfiguration struct {
+	// resources is a list of kubernetes resources which have to be encrypted. The resource names are derived from `resource` or `resource.group` of the group/version/resource.
+	// eg: pandas.awesome.bears.example is a custom resource with 'group': awesome.bears.example, 'resource': pandas.
+	// Use '*.*' to encrypt all resources and '*.' to encrypt all resources in a specific group.
+	// eg: '*.awesome.bears.example' will encrypt all resources in the group 'awesome.bears.example'.
+	// eg: '*.' will encrypt all resources in the core group (such as pods, configmaps, etc).
+	Resources []string
+	// providers is a list of transformers to be used for reading and writing the resources to disk.
+	// eg: aesgcm, aescbc, secretbox, identity, kms.
+	Providers []ProviderConfiguration
+}
+
+// ProviderConfiguration stores the provided configuration for an encryption provider.
+type ProviderConfiguration struct {
+	// aesgcm is the configuration for the AES-GCM transformer.
+	AESGCM *AESConfiguration
+	// aescbc is the configuration for the AES-CBC transformer.
+	AESCBC *AESConfiguration
+	// secretbox is the configuration for the Secretbox based transformer.
+	Secretbox *SecretboxConfiguration
+	// identity is the (empty) configuration for the identity transformer.
+	Identity *IdentityConfiguration
+	// kms contains the name, cache size and path to configuration file for a KMS based envelope transformer.
+	KMS *KMSConfiguration
+}
+
+// AESConfiguration contains the API configuration for an AES transformer.
+type AESConfiguration struct {
+	// keys is a list of keys to be used for creating the AES transformer.
+	// Each key has to be 32 bytes long for AES-CBC and 16, 24 or 32 bytes for AES-GCM.
+	Keys []Key
+}
+
+// SecretboxConfiguration contains the API configuration for an Secretbox transformer.
+type SecretboxConfiguration struct {
+	// keys is a list of keys to be used for creating the Secretbox transformer.
+	// Each key has to be 32 bytes long.
+	Keys []Key
+}
+
+// Key contains name and secret of the provided key for a transformer.
+type Key struct {
+	// name is the name of the key to be used while storing data to disk.
+	Name string
+	// secret is the actual key, encoded in base64.
+	Secret string
+}
+
+// String implements Stringer interface in a log safe way.
+func (k Key) String() string {
+	return fmt.Sprintf("Name: %s, Secret: [REDACTED]", k.Name)
+}
+
+// IdentityConfiguration is an empty struct to allow identity transformer in provider configuration.
+type IdentityConfiguration struct{}
+
+// KMSConfiguration contains the name, cache size and path to configuration file for a KMS based envelope transformer.
+type KMSConfiguration struct {
+	// apiVersion of KeyManagementService
+	// +optional
+	APIVersion string
+	// name is the name of the KMS plugin to be used.
+	Name string
+	// cachesize is the maximum number of secrets which are cached in memory. The default value is 1000.
+	// Set to a negative value to disable caching. This field is only allowed for KMS v1 providers.
+	// +optional
+	CacheSize *int32
+	// endpoint is the gRPC server listening address, for example "unix:///var/run/kms-provider.sock".
+	Endpoint string
+	// timeout for gRPC calls to kms-plugin (ex. 5s). The default is 3 seconds.
+	// +optional
+	Timeout *metav1.Duration
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/defaults.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/defaults.go
new file mode 100644
index 0000000000..46fb841a5f
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/defaults.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"time"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+var (
+	defaultTimeout          = &metav1.Duration{Duration: 3 * time.Second}
+	defaultCacheSize  int32 = 1000
+	defaultAPIVersion       = "v1"
+)
+
+func addDefaultingFuncs(scheme *runtime.Scheme) error {
+	return RegisterDefaults(scheme)
+}
+
+// SetDefaults_KMSConfiguration applies defaults to KMSConfiguration.
+func SetDefaults_KMSConfiguration(obj *KMSConfiguration) {
+	if obj.Timeout == nil {
+		obj.Timeout = defaultTimeout
+	}
+
+	if obj.APIVersion == "" {
+		obj.APIVersion = defaultAPIVersion
+	}
+
+	// cacheSize is relevant only for kms v1
+	if obj.CacheSize == nil && obj.APIVersion == "v1" {
+		obj.CacheSize = &defaultCacheSize
+	}
+}
+
+func SetDefaults_WebhookConfiguration(obj *WebhookConfiguration) {
+	if obj.AuthorizedTTL.Duration == 0 {
+		obj.AuthorizedTTL.Duration = 5 * time.Minute
+	}
+	if obj.UnauthorizedTTL.Duration == 0 {
+		obj.UnauthorizedTTL.Duration = 30 * time.Second
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/doc.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/doc.go
new file mode 100644
index 0000000000..91458aee4e
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/doc.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/apiserver
+// +k8s:defaulter-gen=TypeMeta
+// +groupName=apiserver.config.k8s.io
+
+// Package v1 is the v1 version of the API.
+package v1 // import "k8s.io/apiserver/pkg/apis/apiserver/v1"
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go
new file mode 100644
index 0000000000..7b1b51b629
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go
@@ -0,0 +1,57 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const GroupName = "apiserver.config.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      runtime.SchemeBuilder
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+	// We only register manually written functions here. The registration of the
+	// generated functions takes place in the generated files. The separation
+	// makes the code compile even when the generated files are missing.
+	localSchemeBuilder.Register(addKnownTypes)
+	localSchemeBuilder.Register(addDefaultingFuncs)
+}
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&AdmissionConfiguration{},
+		&AuthorizationConfiguration{},
+		&EncryptionConfiguration{},
+	)
+	// also register into the v1 group as EncryptionConfig (due to a docs bug)
+	scheme.AddKnownTypeWithName(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "EncryptionConfig"}, &EncryptionConfiguration{})
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go
new file mode 100644
index 0000000000..18328c5582
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go
@@ -0,0 +1,176 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AdmissionConfiguration provides versioned configuration for admission controllers.
+type AdmissionConfiguration struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Plugins allows specifying a configuration per admission control plugin.
+	// +optional
+	Plugins []AdmissionPluginConfiguration `json:"plugins"`
+}
+
+// AdmissionPluginConfiguration provides the configuration for a single plug-in.
+type AdmissionPluginConfiguration struct {
+	// Name is the name of the admission controller.
+	// It must match the registered admission plugin name.
+	Name string `json:"name"`
+
+	// Path is the path to a configuration file that contains the plugin's
+	// configuration
+	// +optional
+	Path string `json:"path"`
+
+	// Configuration is an embedded configuration object to be used as the plugin's
+	// configuration. If present, it will be used instead of the path to the configuration file.
+	// +optional
+	Configuration *runtime.Unknown `json:"configuration"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type AuthorizationConfiguration struct {
+	metav1.TypeMeta
+
+	// Authorizers is an ordered list of authorizers to
+	// authorize requests against.
+	// This is similar to the --authorization-modes kube-apiserver flag
+	// Must be at least one.
+	Authorizers []AuthorizerConfiguration `json:"authorizers"`
+}
+
+const (
+	TypeWebhook                                          AuthorizerType = "Webhook"
+	FailurePolicyNoOpinion                               string         = "NoOpinion"
+	FailurePolicyDeny                                    string         = "Deny"
+	AuthorizationWebhookConnectionInfoTypeKubeConfigFile string         = "KubeConfigFile"
+	AuthorizationWebhookConnectionInfoTypeInCluster      string         = "InClusterConfig"
+)
+
+type AuthorizerType string
+
+type AuthorizerConfiguration struct {
+	// Type refers to the type of the authorizer
+	// "Webhook" is supported in the generic API server
+	// Other API servers may support additional authorizer
+	// types like Node, RBAC, ABAC, etc.
+	Type string `json:"type"`
+
+	// Name used to describe the webhook
+	// This is explicitly used in monitoring machinery for metrics
+	// Note: Names must be DNS1123 labels like `myauthorizername` or
+	//		 subdomains like `myauthorizer.example.domain`
+	// Required, with no default
+	Name string `json:"name"`
+
+	// Webhook defines the configuration for a Webhook authorizer
+	// Must be defined when Type=Webhook
+	// Must not be defined when Type!=Webhook
+	Webhook *WebhookConfiguration `json:"webhook,omitempty"`
+}
+
+type WebhookConfiguration struct {
+	// The duration to cache 'authorized' responses from the webhook
+	// authorizer.
+	// Same as setting `--authorization-webhook-cache-authorized-ttl` flag
+	// Default: 5m0s
+	AuthorizedTTL metav1.Duration `json:"authorizedTTL"`
+	// The duration to cache 'unauthorized' responses from the webhook
+	// authorizer.
+	// Same as setting `--authorization-webhook-cache-unauthorized-ttl` flag
+	// Default: 30s
+	UnauthorizedTTL metav1.Duration `json:"unauthorizedTTL"`
+	// Timeout for the webhook request
+	// Maximum allowed value is 30s.
+	// Required, no default value.
+	Timeout metav1.Duration `json:"timeout"`
+	// The API version of the authorization.k8s.io SubjectAccessReview to
+	// send to and expect from the webhook.
+	// Same as setting `--authorization-webhook-version` flag
+	// Valid values: v1beta1, v1
+	// Required, no default value
+	SubjectAccessReviewVersion string `json:"subjectAccessReviewVersion"`
+	// MatchConditionSubjectAccessReviewVersion specifies the SubjectAccessReview
+	// version the CEL expressions are evaluated against
+	// Valid values: v1
+	// Required, no default value
+	MatchConditionSubjectAccessReviewVersion string `json:"matchConditionSubjectAccessReviewVersion"`
+	// Controls the authorization decision when a webhook request fails to
+	// complete or returns a malformed response or errors evaluating
+	// matchConditions.
+	// Valid values:
+	//   - NoOpinion: continue to subsequent authorizers to see if one of
+	//     them allows the request
+	//   - Deny: reject the request without consulting subsequent authorizers
+	// Required, with no default.
+	FailurePolicy string `json:"failurePolicy"`
+
+	// ConnectionInfo defines how we talk to the webhook
+	ConnectionInfo WebhookConnectionInfo `json:"connectionInfo"`
+
+	// matchConditions is a list of conditions that must be met for a request to be sent to this
+	// webhook. An empty list of matchConditions matches all requests.
+	// There are a maximum of 64 match conditions allowed.
+	//
+	// The exact matching logic is (in order):
+	//   1. If at least one matchCondition evaluates to FALSE, then the webhook is skipped.
+	//   2. If ALL matchConditions evaluate to TRUE, then the webhook is called.
+	//   3. If at least one matchCondition evaluates to an error (but none are FALSE):
+	//      - If failurePolicy=Deny, then the webhook rejects the request
+	//      - If failurePolicy=NoOpinion, then the error is ignored and the webhook is skipped
+	MatchConditions []WebhookMatchCondition `json:"matchConditions"`
+}
+
+type WebhookConnectionInfo struct {
+	// Controls how the webhook should communicate with the server.
+	// Valid values:
+	// - KubeConfigFile: use the file specified in kubeConfigFile to locate the
+	//   server.
+	// - InClusterConfig: use the in-cluster configuration to call the
+	//   SubjectAccessReview API hosted by kube-apiserver. This mode is not
+	//   allowed for kube-apiserver.
+	Type string `json:"type"`
+
+	// Path to KubeConfigFile for connection info
+	// Required, if connectionInfo.Type is KubeConfig
+	KubeConfigFile *string `json:"kubeConfigFile"`
+}
+
+type WebhookMatchCondition struct {
+	// expression represents the expression which will be evaluated by CEL. Must evaluate to bool.
+	// CEL expressions have access to the contents of the SubjectAccessReview in v1 version.
+	// If version specified by subjectAccessReviewVersion in the request variable is v1beta1,
+	// the contents would be converted to the v1 version before evaluating the CEL expression.
+	//
+	// - 'resourceAttributes' describes information for a resource access request and is unset for non-resource requests. e.g. has(request.resourceAttributes) && request.resourceAttributes.namespace == 'default'
+	// - 'nonResourceAttributes' describes information for a non-resource access request and is unset for resource requests. e.g. has(request.nonResourceAttributes) && request.nonResourceAttributes.path == '/healthz'.
+	// - 'user' is the user to test for. e.g. request.user == 'alice'
+	// - 'groups' is the groups to test for. e.g. ('group1' in request.groups)
+	// - 'extra' corresponds to the user.Info.GetExtra() method from the authenticator.
+	// - 'uid' is the information about the requesting user. e.g. request.uid == '1'
+	//
+	// Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/
+	Expression string `json:"expression"`
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types_encryption.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types_encryption.go
new file mode 100644
index 0000000000..7aced8cf62
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types_encryption.go
@@ -0,0 +1,149 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"fmt"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+/*
+EncryptionConfiguration stores the complete configuration for encryption providers.
+It also allows the use of wildcards to specify the resources that should be encrypted.
+Use '*.' to encrypt all resources within a group or '*.*' to encrypt all resources.
+'*.' can be used to encrypt all resource in the core group.  '*.*' will encrypt all
+resources, even custom resources that are added after API server start.
+Use of wildcards that overlap within the same resource list or across multiple
+entries are not allowed since part of the configuration would be ineffective.
+Resource lists are processed in order, with earlier lists taking precedence.
+
+Example:
+
+	kind: EncryptionConfiguration
+	apiVersion: apiserver.config.k8s.io/v1
+	resources:
+	- resources:
+	  - events
+	  providers:
+	  - identity: {}  # do not encrypt events even though *.* is specified below
+	- resources:
+	  - secrets
+	  - configmaps
+	  - pandas.awesome.bears.example
+	  providers:
+	  - aescbc:
+	      keys:
+	      - name: key1
+	        secret: c2VjcmV0IGlzIHNlY3VyZQ==
+	- resources:
+	  - '*.apps'
+	  providers:
+	  - aescbc:
+	      keys:
+	      - name: key2
+	        secret: c2VjcmV0IGlzIHNlY3VyZSwgb3IgaXMgaXQ/Cg==
+	- resources:
+	  - '*.*'
+	  providers:
+	  - aescbc:
+	      keys:
+	      - name: key3
+	        secret: c2VjcmV0IGlzIHNlY3VyZSwgSSB0aGluaw==
+*/
+type EncryptionConfiguration struct {
+	metav1.TypeMeta
+	// resources is a list containing resources, and their corresponding encryption providers.
+	Resources []ResourceConfiguration `json:"resources"`
+}
+
+// ResourceConfiguration stores per resource configuration.
+type ResourceConfiguration struct {
+	// resources is a list of kubernetes resources which have to be encrypted. The resource names are derived from `resource` or `resource.group` of the group/version/resource.
+	// eg: pandas.awesome.bears.example is a custom resource with 'group': awesome.bears.example, 'resource': pandas.
+	// Use '*.*' to encrypt all resources and '*.' to encrypt all resources in a specific group.
+	// eg: '*.awesome.bears.example' will encrypt all resources in the group 'awesome.bears.example'.
+	// eg: '*.' will encrypt all resources in the core group (such as pods, configmaps, etc).
+	Resources []string `json:"resources"`
+	// providers is a list of transformers to be used for reading and writing the resources to disk.
+	// eg: aesgcm, aescbc, secretbox, identity, kms.
+	Providers []ProviderConfiguration `json:"providers"`
+}
+
+// ProviderConfiguration stores the provided configuration for an encryption provider.
+type ProviderConfiguration struct {
+	// aesgcm is the configuration for the AES-GCM transformer.
+	AESGCM *AESConfiguration `json:"aesgcm,omitempty"`
+	// aescbc is the configuration for the AES-CBC transformer.
+	AESCBC *AESConfiguration `json:"aescbc,omitempty"`
+	// secretbox is the configuration for the Secretbox based transformer.
+	Secretbox *SecretboxConfiguration `json:"secretbox,omitempty"`
+	// identity is the (empty) configuration for the identity transformer.
+	Identity *IdentityConfiguration `json:"identity,omitempty"`
+	// kms contains the name, cache size and path to configuration file for a KMS based envelope transformer.
+	KMS *KMSConfiguration `json:"kms,omitempty"`
+}
+
+// AESConfiguration contains the API configuration for an AES transformer.
+type AESConfiguration struct {
+	// keys is a list of keys to be used for creating the AES transformer.
+	// Each key has to be 32 bytes long for AES-CBC and 16, 24 or 32 bytes for AES-GCM.
+	Keys []Key `json:"keys"`
+}
+
+// SecretboxConfiguration contains the API configuration for an Secretbox transformer.
+type SecretboxConfiguration struct {
+	// keys is a list of keys to be used for creating the Secretbox transformer.
+	// Each key has to be 32 bytes long.
+	Keys []Key `json:"keys"`
+}
+
+// Key contains name and secret of the provided key for a transformer.
+type Key struct {
+	// name is the name of the key to be used while storing data to disk.
+	Name string `json:"name"`
+	// secret is the actual key, encoded in base64.
+	Secret string `json:"secret"`
+}
+
+// String implements Stringer interface in a log safe way.
+func (k Key) String() string {
+	return fmt.Sprintf("Name: %s, Secret: [REDACTED]", k.Name)
+}
+
+// IdentityConfiguration is an empty struct to allow identity transformer in provider configuration.
+type IdentityConfiguration struct{}
+
+// KMSConfiguration contains the name, cache size and path to configuration file for a KMS based envelope transformer.
+type KMSConfiguration struct {
+	// apiVersion of KeyManagementService
+	// +optional
+	APIVersion string `json:"apiVersion"`
+	// name is the name of the KMS plugin to be used.
+	Name string `json:"name"`
+	// cachesize is the maximum number of secrets which are cached in memory. The default value is 1000.
+	// Set to a negative value to disable caching. This field is only allowed for KMS v1 providers.
+	// +optional
+	CacheSize *int32 `json:"cachesize,omitempty"`
+	// endpoint is the gRPC server listening address, for example "unix:///var/run/kms-provider.sock".
+	Endpoint string `json:"endpoint"`
+	// timeout for gRPC calls to kms-plugin (ex. 5s). The default is 3 seconds.
+	// +optional
+	Timeout *metav1.Duration `json:"timeout,omitempty"`
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go
new file mode 100644
index 0000000000..63083025a5
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go
@@ -0,0 +1,537 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	unsafe "unsafe"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	conversion "k8s.io/apimachinery/pkg/conversion"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	apiserver "k8s.io/apiserver/pkg/apis/apiserver"
+)
+
+func init() {
+	localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+	if err := s.AddGeneratedConversionFunc((*AESConfiguration)(nil), (*apiserver.AESConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_AESConfiguration_To_apiserver_AESConfiguration(a.(*AESConfiguration), b.(*apiserver.AESConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.AESConfiguration)(nil), (*AESConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_AESConfiguration_To_v1_AESConfiguration(a.(*apiserver.AESConfiguration), b.(*AESConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*AdmissionConfiguration)(nil), (*apiserver.AdmissionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(a.(*AdmissionConfiguration), b.(*apiserver.AdmissionConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.AdmissionConfiguration)(nil), (*AdmissionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_AdmissionConfiguration_To_v1_AdmissionConfiguration(a.(*apiserver.AdmissionConfiguration), b.(*AdmissionConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*AdmissionPluginConfiguration)(nil), (*apiserver.AdmissionPluginConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(a.(*AdmissionPluginConfiguration), b.(*apiserver.AdmissionPluginConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.AdmissionPluginConfiguration)(nil), (*AdmissionPluginConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration(a.(*apiserver.AdmissionPluginConfiguration), b.(*AdmissionPluginConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*AuthorizationConfiguration)(nil), (*apiserver.AuthorizationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(a.(*AuthorizationConfiguration), b.(*apiserver.AuthorizationConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.AuthorizationConfiguration)(nil), (*AuthorizationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_AuthorizationConfiguration_To_v1_AuthorizationConfiguration(a.(*apiserver.AuthorizationConfiguration), b.(*AuthorizationConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*AuthorizerConfiguration)(nil), (*apiserver.AuthorizerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(a.(*AuthorizerConfiguration), b.(*apiserver.AuthorizerConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.AuthorizerConfiguration)(nil), (*AuthorizerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_AuthorizerConfiguration_To_v1_AuthorizerConfiguration(a.(*apiserver.AuthorizerConfiguration), b.(*AuthorizerConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*EncryptionConfiguration)(nil), (*apiserver.EncryptionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_EncryptionConfiguration_To_apiserver_EncryptionConfiguration(a.(*EncryptionConfiguration), b.(*apiserver.EncryptionConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.EncryptionConfiguration)(nil), (*EncryptionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_EncryptionConfiguration_To_v1_EncryptionConfiguration(a.(*apiserver.EncryptionConfiguration), b.(*EncryptionConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*IdentityConfiguration)(nil), (*apiserver.IdentityConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_IdentityConfiguration_To_apiserver_IdentityConfiguration(a.(*IdentityConfiguration), b.(*apiserver.IdentityConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.IdentityConfiguration)(nil), (*IdentityConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_IdentityConfiguration_To_v1_IdentityConfiguration(a.(*apiserver.IdentityConfiguration), b.(*IdentityConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*KMSConfiguration)(nil), (*apiserver.KMSConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_KMSConfiguration_To_apiserver_KMSConfiguration(a.(*KMSConfiguration), b.(*apiserver.KMSConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.KMSConfiguration)(nil), (*KMSConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_KMSConfiguration_To_v1_KMSConfiguration(a.(*apiserver.KMSConfiguration), b.(*KMSConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*Key)(nil), (*apiserver.Key)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_Key_To_apiserver_Key(a.(*Key), b.(*apiserver.Key), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.Key)(nil), (*Key)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_Key_To_v1_Key(a.(*apiserver.Key), b.(*Key), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ProviderConfiguration)(nil), (*apiserver.ProviderConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_ProviderConfiguration_To_apiserver_ProviderConfiguration(a.(*ProviderConfiguration), b.(*apiserver.ProviderConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.ProviderConfiguration)(nil), (*ProviderConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_ProviderConfiguration_To_v1_ProviderConfiguration(a.(*apiserver.ProviderConfiguration), b.(*ProviderConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ResourceConfiguration)(nil), (*apiserver.ResourceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_ResourceConfiguration_To_apiserver_ResourceConfiguration(a.(*ResourceConfiguration), b.(*apiserver.ResourceConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.ResourceConfiguration)(nil), (*ResourceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_ResourceConfiguration_To_v1_ResourceConfiguration(a.(*apiserver.ResourceConfiguration), b.(*ResourceConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*SecretboxConfiguration)(nil), (*apiserver.SecretboxConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_SecretboxConfiguration_To_apiserver_SecretboxConfiguration(a.(*SecretboxConfiguration), b.(*apiserver.SecretboxConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.SecretboxConfiguration)(nil), (*SecretboxConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_SecretboxConfiguration_To_v1_SecretboxConfiguration(a.(*apiserver.SecretboxConfiguration), b.(*SecretboxConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*WebhookConfiguration)(nil), (*apiserver.WebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_WebhookConfiguration_To_apiserver_WebhookConfiguration(a.(*WebhookConfiguration), b.(*apiserver.WebhookConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.WebhookConfiguration)(nil), (*WebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_WebhookConfiguration_To_v1_WebhookConfiguration(a.(*apiserver.WebhookConfiguration), b.(*WebhookConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*WebhookConnectionInfo)(nil), (*apiserver.WebhookConnectionInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(a.(*WebhookConnectionInfo), b.(*apiserver.WebhookConnectionInfo), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.WebhookConnectionInfo)(nil), (*WebhookConnectionInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_WebhookConnectionInfo_To_v1_WebhookConnectionInfo(a.(*apiserver.WebhookConnectionInfo), b.(*WebhookConnectionInfo), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*WebhookMatchCondition)(nil), (*apiserver.WebhookMatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(a.(*WebhookMatchCondition), b.(*apiserver.WebhookMatchCondition), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.WebhookMatchCondition)(nil), (*WebhookMatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_WebhookMatchCondition_To_v1_WebhookMatchCondition(a.(*apiserver.WebhookMatchCondition), b.(*WebhookMatchCondition), scope)
+	}); err != nil {
+		return err
+	}
+	return nil
+}
+
+func autoConvert_v1_AESConfiguration_To_apiserver_AESConfiguration(in *AESConfiguration, out *apiserver.AESConfiguration, s conversion.Scope) error {
+	out.Keys = *(*[]apiserver.Key)(unsafe.Pointer(&in.Keys))
+	return nil
+}
+
+// Convert_v1_AESConfiguration_To_apiserver_AESConfiguration is an autogenerated conversion function.
+func Convert_v1_AESConfiguration_To_apiserver_AESConfiguration(in *AESConfiguration, out *apiserver.AESConfiguration, s conversion.Scope) error {
+	return autoConvert_v1_AESConfiguration_To_apiserver_AESConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_AESConfiguration_To_v1_AESConfiguration(in *apiserver.AESConfiguration, out *AESConfiguration, s conversion.Scope) error {
+	out.Keys = *(*[]Key)(unsafe.Pointer(&in.Keys))
+	return nil
+}
+
+// Convert_apiserver_AESConfiguration_To_v1_AESConfiguration is an autogenerated conversion function.
+func Convert_apiserver_AESConfiguration_To_v1_AESConfiguration(in *apiserver.AESConfiguration, out *AESConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_AESConfiguration_To_v1_AESConfiguration(in, out, s)
+}
+
+func autoConvert_v1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in *AdmissionConfiguration, out *apiserver.AdmissionConfiguration, s conversion.Scope) error {
+	out.Plugins = *(*[]apiserver.AdmissionPluginConfiguration)(unsafe.Pointer(&in.Plugins))
+	return nil
+}
+
+// Convert_v1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration is an autogenerated conversion function.
+func Convert_v1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in *AdmissionConfiguration, out *apiserver.AdmissionConfiguration, s conversion.Scope) error {
+	return autoConvert_v1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_AdmissionConfiguration_To_v1_AdmissionConfiguration(in *apiserver.AdmissionConfiguration, out *AdmissionConfiguration, s conversion.Scope) error {
+	out.Plugins = *(*[]AdmissionPluginConfiguration)(unsafe.Pointer(&in.Plugins))
+	return nil
+}
+
+// Convert_apiserver_AdmissionConfiguration_To_v1_AdmissionConfiguration is an autogenerated conversion function.
+func Convert_apiserver_AdmissionConfiguration_To_v1_AdmissionConfiguration(in *apiserver.AdmissionConfiguration, out *AdmissionConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_AdmissionConfiguration_To_v1_AdmissionConfiguration(in, out, s)
+}
+
+func autoConvert_v1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in *AdmissionPluginConfiguration, out *apiserver.AdmissionPluginConfiguration, s conversion.Scope) error {
+	out.Name = in.Name
+	out.Path = in.Path
+	out.Configuration = (*runtime.Unknown)(unsafe.Pointer(in.Configuration))
+	return nil
+}
+
+// Convert_v1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration is an autogenerated conversion function.
+func Convert_v1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in *AdmissionPluginConfiguration, out *apiserver.AdmissionPluginConfiguration, s conversion.Scope) error {
+	return autoConvert_v1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration(in *apiserver.AdmissionPluginConfiguration, out *AdmissionPluginConfiguration, s conversion.Scope) error {
+	out.Name = in.Name
+	out.Path = in.Path
+	out.Configuration = (*runtime.Unknown)(unsafe.Pointer(in.Configuration))
+	return nil
+}
+
+// Convert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration is an autogenerated conversion function.
+func Convert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration(in *apiserver.AdmissionPluginConfiguration, out *AdmissionPluginConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration(in, out, s)
+}
+
+func autoConvert_v1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in *AuthorizationConfiguration, out *apiserver.AuthorizationConfiguration, s conversion.Scope) error {
+	out.Authorizers = *(*[]apiserver.AuthorizerConfiguration)(unsafe.Pointer(&in.Authorizers))
+	return nil
+}
+
+// Convert_v1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration is an autogenerated conversion function.
+func Convert_v1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in *AuthorizationConfiguration, out *apiserver.AuthorizationConfiguration, s conversion.Scope) error {
+	return autoConvert_v1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_AuthorizationConfiguration_To_v1_AuthorizationConfiguration(in *apiserver.AuthorizationConfiguration, out *AuthorizationConfiguration, s conversion.Scope) error {
+	out.Authorizers = *(*[]AuthorizerConfiguration)(unsafe.Pointer(&in.Authorizers))
+	return nil
+}
+
+// Convert_apiserver_AuthorizationConfiguration_To_v1_AuthorizationConfiguration is an autogenerated conversion function.
+func Convert_apiserver_AuthorizationConfiguration_To_v1_AuthorizationConfiguration(in *apiserver.AuthorizationConfiguration, out *AuthorizationConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_AuthorizationConfiguration_To_v1_AuthorizationConfiguration(in, out, s)
+}
+
+func autoConvert_v1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in *AuthorizerConfiguration, out *apiserver.AuthorizerConfiguration, s conversion.Scope) error {
+	out.Type = apiserver.AuthorizerType(in.Type)
+	out.Name = in.Name
+	out.Webhook = (*apiserver.WebhookConfiguration)(unsafe.Pointer(in.Webhook))
+	return nil
+}
+
+// Convert_v1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration is an autogenerated conversion function.
+func Convert_v1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in *AuthorizerConfiguration, out *apiserver.AuthorizerConfiguration, s conversion.Scope) error {
+	return autoConvert_v1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_AuthorizerConfiguration_To_v1_AuthorizerConfiguration(in *apiserver.AuthorizerConfiguration, out *AuthorizerConfiguration, s conversion.Scope) error {
+	out.Type = string(in.Type)
+	out.Name = in.Name
+	out.Webhook = (*WebhookConfiguration)(unsafe.Pointer(in.Webhook))
+	return nil
+}
+
+// Convert_apiserver_AuthorizerConfiguration_To_v1_AuthorizerConfiguration is an autogenerated conversion function.
+func Convert_apiserver_AuthorizerConfiguration_To_v1_AuthorizerConfiguration(in *apiserver.AuthorizerConfiguration, out *AuthorizerConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_AuthorizerConfiguration_To_v1_AuthorizerConfiguration(in, out, s)
+}
+
+func autoConvert_v1_EncryptionConfiguration_To_apiserver_EncryptionConfiguration(in *EncryptionConfiguration, out *apiserver.EncryptionConfiguration, s conversion.Scope) error {
+	out.Resources = *(*[]apiserver.ResourceConfiguration)(unsafe.Pointer(&in.Resources))
+	return nil
+}
+
+// Convert_v1_EncryptionConfiguration_To_apiserver_EncryptionConfiguration is an autogenerated conversion function.
+func Convert_v1_EncryptionConfiguration_To_apiserver_EncryptionConfiguration(in *EncryptionConfiguration, out *apiserver.EncryptionConfiguration, s conversion.Scope) error {
+	return autoConvert_v1_EncryptionConfiguration_To_apiserver_EncryptionConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_EncryptionConfiguration_To_v1_EncryptionConfiguration(in *apiserver.EncryptionConfiguration, out *EncryptionConfiguration, s conversion.Scope) error {
+	out.Resources = *(*[]ResourceConfiguration)(unsafe.Pointer(&in.Resources))
+	return nil
+}
+
+// Convert_apiserver_EncryptionConfiguration_To_v1_EncryptionConfiguration is an autogenerated conversion function.
+func Convert_apiserver_EncryptionConfiguration_To_v1_EncryptionConfiguration(in *apiserver.EncryptionConfiguration, out *EncryptionConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_EncryptionConfiguration_To_v1_EncryptionConfiguration(in, out, s)
+}
+
+func autoConvert_v1_IdentityConfiguration_To_apiserver_IdentityConfiguration(in *IdentityConfiguration, out *apiserver.IdentityConfiguration, s conversion.Scope) error {
+	return nil
+}
+
+// Convert_v1_IdentityConfiguration_To_apiserver_IdentityConfiguration is an autogenerated conversion function.
+func Convert_v1_IdentityConfiguration_To_apiserver_IdentityConfiguration(in *IdentityConfiguration, out *apiserver.IdentityConfiguration, s conversion.Scope) error {
+	return autoConvert_v1_IdentityConfiguration_To_apiserver_IdentityConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_IdentityConfiguration_To_v1_IdentityConfiguration(in *apiserver.IdentityConfiguration, out *IdentityConfiguration, s conversion.Scope) error {
+	return nil
+}
+
+// Convert_apiserver_IdentityConfiguration_To_v1_IdentityConfiguration is an autogenerated conversion function.
+func Convert_apiserver_IdentityConfiguration_To_v1_IdentityConfiguration(in *apiserver.IdentityConfiguration, out *IdentityConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_IdentityConfiguration_To_v1_IdentityConfiguration(in, out, s)
+}
+
+func autoConvert_v1_KMSConfiguration_To_apiserver_KMSConfiguration(in *KMSConfiguration, out *apiserver.KMSConfiguration, s conversion.Scope) error {
+	out.APIVersion = in.APIVersion
+	out.Name = in.Name
+	out.CacheSize = (*int32)(unsafe.Pointer(in.CacheSize))
+	out.Endpoint = in.Endpoint
+	out.Timeout = (*metav1.Duration)(unsafe.Pointer(in.Timeout))
+	return nil
+}
+
+// Convert_v1_KMSConfiguration_To_apiserver_KMSConfiguration is an autogenerated conversion function.
+func Convert_v1_KMSConfiguration_To_apiserver_KMSConfiguration(in *KMSConfiguration, out *apiserver.KMSConfiguration, s conversion.Scope) error {
+	return autoConvert_v1_KMSConfiguration_To_apiserver_KMSConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_KMSConfiguration_To_v1_KMSConfiguration(in *apiserver.KMSConfiguration, out *KMSConfiguration, s conversion.Scope) error {
+	out.APIVersion = in.APIVersion
+	out.Name = in.Name
+	out.CacheSize = (*int32)(unsafe.Pointer(in.CacheSize))
+	out.Endpoint = in.Endpoint
+	out.Timeout = (*metav1.Duration)(unsafe.Pointer(in.Timeout))
+	return nil
+}
+
+// Convert_apiserver_KMSConfiguration_To_v1_KMSConfiguration is an autogenerated conversion function.
+func Convert_apiserver_KMSConfiguration_To_v1_KMSConfiguration(in *apiserver.KMSConfiguration, out *KMSConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_KMSConfiguration_To_v1_KMSConfiguration(in, out, s)
+}
+
+func autoConvert_v1_Key_To_apiserver_Key(in *Key, out *apiserver.Key, s conversion.Scope) error {
+	out.Name = in.Name
+	out.Secret = in.Secret
+	return nil
+}
+
+// Convert_v1_Key_To_apiserver_Key is an autogenerated conversion function.
+func Convert_v1_Key_To_apiserver_Key(in *Key, out *apiserver.Key, s conversion.Scope) error {
+	return autoConvert_v1_Key_To_apiserver_Key(in, out, s)
+}
+
+func autoConvert_apiserver_Key_To_v1_Key(in *apiserver.Key, out *Key, s conversion.Scope) error {
+	out.Name = in.Name
+	out.Secret = in.Secret
+	return nil
+}
+
+// Convert_apiserver_Key_To_v1_Key is an autogenerated conversion function.
+func Convert_apiserver_Key_To_v1_Key(in *apiserver.Key, out *Key, s conversion.Scope) error {
+	return autoConvert_apiserver_Key_To_v1_Key(in, out, s)
+}
+
+func autoConvert_v1_ProviderConfiguration_To_apiserver_ProviderConfiguration(in *ProviderConfiguration, out *apiserver.ProviderConfiguration, s conversion.Scope) error {
+	out.AESGCM = (*apiserver.AESConfiguration)(unsafe.Pointer(in.AESGCM))
+	out.AESCBC = (*apiserver.AESConfiguration)(unsafe.Pointer(in.AESCBC))
+	out.Secretbox = (*apiserver.SecretboxConfiguration)(unsafe.Pointer(in.Secretbox))
+	out.Identity = (*apiserver.IdentityConfiguration)(unsafe.Pointer(in.Identity))
+	out.KMS = (*apiserver.KMSConfiguration)(unsafe.Pointer(in.KMS))
+	return nil
+}
+
+// Convert_v1_ProviderConfiguration_To_apiserver_ProviderConfiguration is an autogenerated conversion function.
+func Convert_v1_ProviderConfiguration_To_apiserver_ProviderConfiguration(in *ProviderConfiguration, out *apiserver.ProviderConfiguration, s conversion.Scope) error {
+	return autoConvert_v1_ProviderConfiguration_To_apiserver_ProviderConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_ProviderConfiguration_To_v1_ProviderConfiguration(in *apiserver.ProviderConfiguration, out *ProviderConfiguration, s conversion.Scope) error {
+	out.AESGCM = (*AESConfiguration)(unsafe.Pointer(in.AESGCM))
+	out.AESCBC = (*AESConfiguration)(unsafe.Pointer(in.AESCBC))
+	out.Secretbox = (*SecretboxConfiguration)(unsafe.Pointer(in.Secretbox))
+	out.Identity = (*IdentityConfiguration)(unsafe.Pointer(in.Identity))
+	out.KMS = (*KMSConfiguration)(unsafe.Pointer(in.KMS))
+	return nil
+}
+
+// Convert_apiserver_ProviderConfiguration_To_v1_ProviderConfiguration is an autogenerated conversion function.
+func Convert_apiserver_ProviderConfiguration_To_v1_ProviderConfiguration(in *apiserver.ProviderConfiguration, out *ProviderConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_ProviderConfiguration_To_v1_ProviderConfiguration(in, out, s)
+}
+
+func autoConvert_v1_ResourceConfiguration_To_apiserver_ResourceConfiguration(in *ResourceConfiguration, out *apiserver.ResourceConfiguration, s conversion.Scope) error {
+	out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
+	out.Providers = *(*[]apiserver.ProviderConfiguration)(unsafe.Pointer(&in.Providers))
+	return nil
+}
+
+// Convert_v1_ResourceConfiguration_To_apiserver_ResourceConfiguration is an autogenerated conversion function.
+func Convert_v1_ResourceConfiguration_To_apiserver_ResourceConfiguration(in *ResourceConfiguration, out *apiserver.ResourceConfiguration, s conversion.Scope) error {
+	return autoConvert_v1_ResourceConfiguration_To_apiserver_ResourceConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_ResourceConfiguration_To_v1_ResourceConfiguration(in *apiserver.ResourceConfiguration, out *ResourceConfiguration, s conversion.Scope) error {
+	out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
+	out.Providers = *(*[]ProviderConfiguration)(unsafe.Pointer(&in.Providers))
+	return nil
+}
+
+// Convert_apiserver_ResourceConfiguration_To_v1_ResourceConfiguration is an autogenerated conversion function.
+func Convert_apiserver_ResourceConfiguration_To_v1_ResourceConfiguration(in *apiserver.ResourceConfiguration, out *ResourceConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_ResourceConfiguration_To_v1_ResourceConfiguration(in, out, s)
+}
+
+func autoConvert_v1_SecretboxConfiguration_To_apiserver_SecretboxConfiguration(in *SecretboxConfiguration, out *apiserver.SecretboxConfiguration, s conversion.Scope) error {
+	out.Keys = *(*[]apiserver.Key)(unsafe.Pointer(&in.Keys))
+	return nil
+}
+
+// Convert_v1_SecretboxConfiguration_To_apiserver_SecretboxConfiguration is an autogenerated conversion function.
+func Convert_v1_SecretboxConfiguration_To_apiserver_SecretboxConfiguration(in *SecretboxConfiguration, out *apiserver.SecretboxConfiguration, s conversion.Scope) error {
+	return autoConvert_v1_SecretboxConfiguration_To_apiserver_SecretboxConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_SecretboxConfiguration_To_v1_SecretboxConfiguration(in *apiserver.SecretboxConfiguration, out *SecretboxConfiguration, s conversion.Scope) error {
+	out.Keys = *(*[]Key)(unsafe.Pointer(&in.Keys))
+	return nil
+}
+
+// Convert_apiserver_SecretboxConfiguration_To_v1_SecretboxConfiguration is an autogenerated conversion function.
+func Convert_apiserver_SecretboxConfiguration_To_v1_SecretboxConfiguration(in *apiserver.SecretboxConfiguration, out *SecretboxConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_SecretboxConfiguration_To_v1_SecretboxConfiguration(in, out, s)
+}
+
+func autoConvert_v1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in *WebhookConfiguration, out *apiserver.WebhookConfiguration, s conversion.Scope) error {
+	out.AuthorizedTTL = in.AuthorizedTTL
+	out.UnauthorizedTTL = in.UnauthorizedTTL
+	out.Timeout = in.Timeout
+	out.SubjectAccessReviewVersion = in.SubjectAccessReviewVersion
+	out.MatchConditionSubjectAccessReviewVersion = in.MatchConditionSubjectAccessReviewVersion
+	out.FailurePolicy = in.FailurePolicy
+	if err := Convert_v1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(&in.ConnectionInfo, &out.ConnectionInfo, s); err != nil {
+		return err
+	}
+	out.MatchConditions = *(*[]apiserver.WebhookMatchCondition)(unsafe.Pointer(&in.MatchConditions))
+	return nil
+}
+
+// Convert_v1_WebhookConfiguration_To_apiserver_WebhookConfiguration is an autogenerated conversion function.
+func Convert_v1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in *WebhookConfiguration, out *apiserver.WebhookConfiguration, s conversion.Scope) error {
+	return autoConvert_v1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_WebhookConfiguration_To_v1_WebhookConfiguration(in *apiserver.WebhookConfiguration, out *WebhookConfiguration, s conversion.Scope) error {
+	out.AuthorizedTTL = in.AuthorizedTTL
+	out.UnauthorizedTTL = in.UnauthorizedTTL
+	out.Timeout = in.Timeout
+	out.SubjectAccessReviewVersion = in.SubjectAccessReviewVersion
+	out.MatchConditionSubjectAccessReviewVersion = in.MatchConditionSubjectAccessReviewVersion
+	out.FailurePolicy = in.FailurePolicy
+	if err := Convert_apiserver_WebhookConnectionInfo_To_v1_WebhookConnectionInfo(&in.ConnectionInfo, &out.ConnectionInfo, s); err != nil {
+		return err
+	}
+	out.MatchConditions = *(*[]WebhookMatchCondition)(unsafe.Pointer(&in.MatchConditions))
+	return nil
+}
+
+// Convert_apiserver_WebhookConfiguration_To_v1_WebhookConfiguration is an autogenerated conversion function.
+func Convert_apiserver_WebhookConfiguration_To_v1_WebhookConfiguration(in *apiserver.WebhookConfiguration, out *WebhookConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_WebhookConfiguration_To_v1_WebhookConfiguration(in, out, s)
+}
+
+func autoConvert_v1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in *WebhookConnectionInfo, out *apiserver.WebhookConnectionInfo, s conversion.Scope) error {
+	out.Type = in.Type
+	out.KubeConfigFile = (*string)(unsafe.Pointer(in.KubeConfigFile))
+	return nil
+}
+
+// Convert_v1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo is an autogenerated conversion function.
+func Convert_v1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in *WebhookConnectionInfo, out *apiserver.WebhookConnectionInfo, s conversion.Scope) error {
+	return autoConvert_v1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in, out, s)
+}
+
+func autoConvert_apiserver_WebhookConnectionInfo_To_v1_WebhookConnectionInfo(in *apiserver.WebhookConnectionInfo, out *WebhookConnectionInfo, s conversion.Scope) error {
+	out.Type = in.Type
+	out.KubeConfigFile = (*string)(unsafe.Pointer(in.KubeConfigFile))
+	return nil
+}
+
+// Convert_apiserver_WebhookConnectionInfo_To_v1_WebhookConnectionInfo is an autogenerated conversion function.
+func Convert_apiserver_WebhookConnectionInfo_To_v1_WebhookConnectionInfo(in *apiserver.WebhookConnectionInfo, out *WebhookConnectionInfo, s conversion.Scope) error {
+	return autoConvert_apiserver_WebhookConnectionInfo_To_v1_WebhookConnectionInfo(in, out, s)
+}
+
+func autoConvert_v1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in *WebhookMatchCondition, out *apiserver.WebhookMatchCondition, s conversion.Scope) error {
+	out.Expression = in.Expression
+	return nil
+}
+
+// Convert_v1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition is an autogenerated conversion function.
+func Convert_v1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in *WebhookMatchCondition, out *apiserver.WebhookMatchCondition, s conversion.Scope) error {
+	return autoConvert_v1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in, out, s)
+}
+
+func autoConvert_apiserver_WebhookMatchCondition_To_v1_WebhookMatchCondition(in *apiserver.WebhookMatchCondition, out *WebhookMatchCondition, s conversion.Scope) error {
+	out.Expression = in.Expression
+	return nil
+}
+
+// Convert_apiserver_WebhookMatchCondition_To_v1_WebhookMatchCondition is an autogenerated conversion function.
+func Convert_apiserver_WebhookMatchCondition_To_v1_WebhookMatchCondition(in *apiserver.WebhookMatchCondition, out *WebhookMatchCondition, s conversion.Scope) error {
+	return autoConvert_apiserver_WebhookMatchCondition_To_v1_WebhookMatchCondition(in, out, s)
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..6afdbd3a2c
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go
@@ -0,0 +1,396 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AESConfiguration) DeepCopyInto(out *AESConfiguration) {
+	*out = *in
+	if in.Keys != nil {
+		in, out := &in.Keys, &out.Keys
+		*out = make([]Key, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AESConfiguration.
+func (in *AESConfiguration) DeepCopy() *AESConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(AESConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionConfiguration) DeepCopyInto(out *AdmissionConfiguration) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Plugins != nil {
+		in, out := &in.Plugins, &out.Plugins
+		*out = make([]AdmissionPluginConfiguration, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfiguration.
+func (in *AdmissionConfiguration) DeepCopy() *AdmissionConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(AdmissionConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AdmissionConfiguration) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionPluginConfiguration) DeepCopyInto(out *AdmissionPluginConfiguration) {
+	*out = *in
+	if in.Configuration != nil {
+		in, out := &in.Configuration, &out.Configuration
+		*out = new(runtime.Unknown)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfiguration.
+func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(AdmissionPluginConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthorizationConfiguration) DeepCopyInto(out *AuthorizationConfiguration) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Authorizers != nil {
+		in, out := &in.Authorizers, &out.Authorizers
+		*out = make([]AuthorizerConfiguration, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationConfiguration.
+func (in *AuthorizationConfiguration) DeepCopy() *AuthorizationConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(AuthorizationConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AuthorizationConfiguration) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthorizerConfiguration) DeepCopyInto(out *AuthorizerConfiguration) {
+	*out = *in
+	if in.Webhook != nil {
+		in, out := &in.Webhook, &out.Webhook
+		*out = new(WebhookConfiguration)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizerConfiguration.
+func (in *AuthorizerConfiguration) DeepCopy() *AuthorizerConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(AuthorizerConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EncryptionConfiguration) DeepCopyInto(out *EncryptionConfiguration) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Resources != nil {
+		in, out := &in.Resources, &out.Resources
+		*out = make([]ResourceConfiguration, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfiguration.
+func (in *EncryptionConfiguration) DeepCopy() *EncryptionConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(EncryptionConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EncryptionConfiguration) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IdentityConfiguration) DeepCopyInto(out *IdentityConfiguration) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityConfiguration.
+func (in *IdentityConfiguration) DeepCopy() *IdentityConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(IdentityConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KMSConfiguration) DeepCopyInto(out *KMSConfiguration) {
+	*out = *in
+	if in.CacheSize != nil {
+		in, out := &in.CacheSize, &out.CacheSize
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Timeout != nil {
+		in, out := &in.Timeout, &out.Timeout
+		*out = new(metav1.Duration)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSConfiguration.
+func (in *KMSConfiguration) DeepCopy() *KMSConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(KMSConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Key) DeepCopyInto(out *Key) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Key.
+func (in *Key) DeepCopy() *Key {
+	if in == nil {
+		return nil
+	}
+	out := new(Key)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProviderConfiguration) DeepCopyInto(out *ProviderConfiguration) {
+	*out = *in
+	if in.AESGCM != nil {
+		in, out := &in.AESGCM, &out.AESGCM
+		*out = new(AESConfiguration)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.AESCBC != nil {
+		in, out := &in.AESCBC, &out.AESCBC
+		*out = new(AESConfiguration)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Secretbox != nil {
+		in, out := &in.Secretbox, &out.Secretbox
+		*out = new(SecretboxConfiguration)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Identity != nil {
+		in, out := &in.Identity, &out.Identity
+		*out = new(IdentityConfiguration)
+		**out = **in
+	}
+	if in.KMS != nil {
+		in, out := &in.KMS, &out.KMS
+		*out = new(KMSConfiguration)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfiguration.
+func (in *ProviderConfiguration) DeepCopy() *ProviderConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(ProviderConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceConfiguration) DeepCopyInto(out *ResourceConfiguration) {
+	*out = *in
+	if in.Resources != nil {
+		in, out := &in.Resources, &out.Resources
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Providers != nil {
+		in, out := &in.Providers, &out.Providers
+		*out = make([]ProviderConfiguration, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceConfiguration.
+func (in *ResourceConfiguration) DeepCopy() *ResourceConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretboxConfiguration) DeepCopyInto(out *SecretboxConfiguration) {
+	*out = *in
+	if in.Keys != nil {
+		in, out := &in.Keys, &out.Keys
+		*out = make([]Key, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretboxConfiguration.
+func (in *SecretboxConfiguration) DeepCopy() *SecretboxConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(SecretboxConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebhookConfiguration) DeepCopyInto(out *WebhookConfiguration) {
+	*out = *in
+	out.AuthorizedTTL = in.AuthorizedTTL
+	out.UnauthorizedTTL = in.UnauthorizedTTL
+	out.Timeout = in.Timeout
+	in.ConnectionInfo.DeepCopyInto(&out.ConnectionInfo)
+	if in.MatchConditions != nil {
+		in, out := &in.MatchConditions, &out.MatchConditions
+		*out = make([]WebhookMatchCondition, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConfiguration.
+func (in *WebhookConfiguration) DeepCopy() *WebhookConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(WebhookConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebhookConnectionInfo) DeepCopyInto(out *WebhookConnectionInfo) {
+	*out = *in
+	if in.KubeConfigFile != nil {
+		in, out := &in.KubeConfigFile, &out.KubeConfigFile
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConnectionInfo.
+func (in *WebhookConnectionInfo) DeepCopy() *WebhookConnectionInfo {
+	if in == nil {
+		return nil
+	}
+	out := new(WebhookConnectionInfo)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebhookMatchCondition) DeepCopyInto(out *WebhookMatchCondition) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookMatchCondition.
+func (in *WebhookMatchCondition) DeepCopy() *WebhookMatchCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(WebhookMatchCondition)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go
new file mode 100644
index 0000000000..4c8189b131
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go
@@ -0,0 +1,56 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+	scheme.AddTypeDefaultingFunc(&AuthorizationConfiguration{}, func(obj interface{}) { SetObjectDefaults_AuthorizationConfiguration(obj.(*AuthorizationConfiguration)) })
+	scheme.AddTypeDefaultingFunc(&EncryptionConfiguration{}, func(obj interface{}) { SetObjectDefaults_EncryptionConfiguration(obj.(*EncryptionConfiguration)) })
+	return nil
+}
+
+func SetObjectDefaults_AuthorizationConfiguration(in *AuthorizationConfiguration) {
+	for i := range in.Authorizers {
+		a := &in.Authorizers[i]
+		if a.Webhook != nil {
+			SetDefaults_WebhookConfiguration(a.Webhook)
+		}
+	}
+}
+
+func SetObjectDefaults_EncryptionConfiguration(in *EncryptionConfiguration) {
+	for i := range in.Resources {
+		a := &in.Resources[i]
+		for j := range a.Providers {
+			b := &a.Providers[j]
+			if b.KMS != nil {
+				SetDefaults_KMSConfiguration(b.KMS)
+			}
+		}
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go
new file mode 100644
index 0000000000..31b8b7064b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	conversion "k8s.io/apimachinery/pkg/conversion"
+	apiserver "k8s.io/apiserver/pkg/apis/apiserver"
+)
+
+func Convert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(in *EgressSelection, out *apiserver.EgressSelection, s conversion.Scope) error {
+	if err := autoConvert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(in, out, s); err != nil {
+		return err
+	}
+	if out.Name == "master" {
+		out.Name = "controlplane"
+	}
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/defaults.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/defaults.go
new file mode 100644
index 0000000000..a9af01fe76
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/defaults.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	"time"
+
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+func addDefaultingFuncs(scheme *runtime.Scheme) error {
+	return RegisterDefaults(scheme)
+}
+
+func SetDefaults_WebhookConfiguration(obj *WebhookConfiguration) {
+	if obj.AuthorizedTTL.Duration == 0 {
+		obj.AuthorizedTTL.Duration = 5 * time.Minute
+	}
+	if obj.UnauthorizedTTL.Duration == 0 {
+		obj.UnauthorizedTTL.Duration = 30 * time.Second
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go
new file mode 100644
index 0000000000..c9b658b22a
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/apiserver
+// +k8s:defaulter-gen=TypeMeta
+// +groupName=apiserver.k8s.io
+// +groupName=apiserver.config.k8s.io
+
+// Package v1alpha1 is the v1alpha1 version of the API.
+package v1alpha1 // import "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1"
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go
new file mode 100644
index 0000000000..7d68ac0c62
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go
@@ -0,0 +1,63 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const GroupName = "apiserver.k8s.io"
+const ConfigGroupName = "apiserver.config.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+
+// ConfigSchemeGroupVersion is group version used to register these objects
+var ConfigSchemeGroupVersion = schema.GroupVersion{Group: ConfigGroupName, Version: "v1alpha1"}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      runtime.SchemeBuilder
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+	// We only register manually written functions here. The registration of the
+	// generated functions takes place in the generated files. The separation
+	// makes the code compile even when the generated files are missing.
+	localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
+}
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&AdmissionConfiguration{},
+		&EgressSelectorConfiguration{},
+	)
+	scheme.AddKnownTypes(ConfigSchemeGroupVersion,
+		&AuthenticationConfiguration{},
+		&AuthorizationConfiguration{},
+		&TracingConfiguration{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	metav1.AddToGroupVersion(scheme, ConfigSchemeGroupVersion)
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go
new file mode 100644
index 0000000000..dee2c115a1
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go
@@ -0,0 +1,627 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	tracingapi "k8s.io/component-base/tracing/api/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AdmissionConfiguration provides versioned configuration for admission controllers.
+type AdmissionConfiguration struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Plugins allows specifying a configuration per admission control plugin.
+	// +optional
+	Plugins []AdmissionPluginConfiguration `json:"plugins"`
+}
+
+// AdmissionPluginConfiguration provides the configuration for a single plug-in.
+type AdmissionPluginConfiguration struct {
+	// Name is the name of the admission controller.
+	// It must match the registered admission plugin name.
+	Name string `json:"name"`
+
+	// Path is the path to a configuration file that contains the plugin's
+	// configuration
+	// +optional
+	Path string `json:"path"`
+
+	// Configuration is an embedded configuration object to be used as the plugin's
+	// configuration. If present, it will be used instead of the path to the configuration file.
+	// +optional
+	Configuration *runtime.Unknown `json:"configuration"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EgressSelectorConfiguration provides versioned configuration for egress selector clients.
+type EgressSelectorConfiguration struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// connectionServices contains a list of egress selection client configurations
+	EgressSelections []EgressSelection `json:"egressSelections"`
+}
+
+// EgressSelection provides the configuration for a single egress selection client.
+type EgressSelection struct {
+	// name is the name of the egress selection.
+	// Currently supported values are "controlplane", "master", "etcd" and "cluster"
+	// The "master" egress selector is deprecated in favor of "controlplane"
+	Name string `json:"name"`
+
+	// connection is the exact information used to configure the egress selection
+	Connection Connection `json:"connection"`
+}
+
+// Connection provides the configuration for a single egress selection client.
+type Connection struct {
+	// Protocol is the protocol used to connect from client to the konnectivity server.
+	ProxyProtocol ProtocolType `json:"proxyProtocol,omitempty"`
+
+	// Transport defines the transport configurations we use to dial to the konnectivity server.
+	// This is required if ProxyProtocol is HTTPConnect or GRPC.
+	// +optional
+	Transport *Transport `json:"transport,omitempty"`
+}
+
+// ProtocolType is a set of valid values for Connection.ProtocolType
+type ProtocolType string
+
+// Valid types for ProtocolType for konnectivity server
+const (
+	// Use HTTPConnect to connect to konnectivity server
+	ProtocolHTTPConnect ProtocolType = "HTTPConnect"
+	// Use grpc to connect to konnectivity server
+	ProtocolGRPC ProtocolType = "GRPC"
+	// Connect directly (skip konnectivity server)
+	ProtocolDirect ProtocolType = "Direct"
+)
+
+// Transport defines the transport configurations we use to dial to the konnectivity server
+type Transport struct {
+	// TCP is the TCP configuration for communicating with the konnectivity server via TCP
+	// ProxyProtocol of GRPC is not supported with TCP transport at the moment
+	// Requires at least one of TCP or UDS to be set
+	// +optional
+	TCP *TCPTransport `json:"tcp,omitempty"`
+
+	// UDS is the UDS configuration for communicating with the konnectivity server via UDS
+	// Requires at least one of TCP or UDS to be set
+	// +optional
+	UDS *UDSTransport `json:"uds,omitempty"`
+}
+
+// TCPTransport provides the information to connect to konnectivity server via TCP
+type TCPTransport struct {
+	// URL is the location of the konnectivity server to connect to.
+	// As an example it might be "https://127.0.0.1:8131"
+	URL string `json:"url,omitempty"`
+
+	// TLSConfig is the config needed to use TLS when connecting to konnectivity server
+	// +optional
+	TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
+}
+
+// UDSTransport provides the information to connect to konnectivity server via UDS
+type UDSTransport struct {
+	// UDSName is the name of the unix domain socket to connect to konnectivity server
+	// This does not use a unix:// prefix. (Eg: /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket)
+	UDSName string `json:"udsName,omitempty"`
+}
+
+// TLSConfig provides the authentication information to connect to konnectivity server
+// Only used with TCPTransport
+type TLSConfig struct {
+	// caBundle is the file location of the CA to be used to determine trust with the konnectivity server.
+	// Must be absent/empty if TCPTransport.URL is prefixed with http://
+	// If absent while TCPTransport.URL is prefixed with https://, default to system trust roots.
+	// +optional
+	CABundle string `json:"caBundle,omitempty"`
+
+	// clientKey is the file location of the client key to be used in mtls handshakes with the konnectivity server.
+	// Must be absent/empty if TCPTransport.URL is prefixed with http://
+	// Must be configured if TCPTransport.URL is prefixed with https://
+	// +optional
+	ClientKey string `json:"clientKey,omitempty"`
+
+	// clientCert is the file location of the client certificate to be used in mtls handshakes with the konnectivity server.
+	// Must be absent/empty if TCPTransport.URL is prefixed with http://
+	// Must be configured if TCPTransport.URL is prefixed with https://
+	// +optional
+	ClientCert string `json:"clientCert,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// TracingConfiguration provides versioned configuration for tracing clients.
+type TracingConfiguration struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Embed the component config tracing configuration struct
+	tracingapi.TracingConfiguration `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AuthenticationConfiguration provides versioned configuration for authentication.
+type AuthenticationConfiguration struct {
+	metav1.TypeMeta
+
+	// jwt is a list of authenticator to authenticate Kubernetes users using
+	// JWT compliant tokens. The authenticator will attempt to parse a raw ID token,
+	// verify it's been signed by the configured issuer. The public key to verify the
+	// signature is discovered from the issuer's public endpoint using OIDC discovery.
+	// For an incoming token, each JWT authenticator will be attempted in
+	// the order in which it is specified in this list.  Note however that
+	// other authenticators may run before or after the JWT authenticators.
+	// The specific position of JWT authenticators in relation to other
+	// authenticators is neither defined nor stable across releases.  Since
+	// each JWT authenticator must have a unique issuer URL, at most one
+	// JWT authenticator will attempt to cryptographically validate the token.
+	//
+	// The minimum valid JWT payload must contain the following claims:
+	// {
+	//		"iss": "https://issuer.example.com",
+	//		"aud": ["audience"],
+	//		"exp": 1234567890,
+	//		"": "username"
+	// }
+	JWT []JWTAuthenticator `json:"jwt"`
+
+	// If present --anonymous-auth must not be set
+	Anonymous *AnonymousAuthConfig `json:"anonymous,omitempty"`
+}
+
+// AnonymousAuthConfig provides the configuration for the anonymous authenticator.
+type AnonymousAuthConfig struct {
+	Enabled bool `json:"enabled"`
+
+	// If set, anonymous auth is only allowed if the request meets one of the
+	// conditions.
+	Conditions []AnonymousAuthCondition `json:"conditions,omitempty"`
+}
+
+// AnonymousAuthCondition describes the condition under which anonymous auth
+// should be enabled.
+type AnonymousAuthCondition struct {
+	// Path for which anonymous auth is enabled.
+	Path string `json:"path"`
+}
+
+// JWTAuthenticator provides the configuration for a single JWT authenticator.
+type JWTAuthenticator struct {
+	// issuer contains the basic OIDC provider connection options.
+	// +required
+	Issuer Issuer `json:"issuer"`
+
+	// claimValidationRules are rules that are applied to validate token claims to authenticate users.
+	// +optional
+	ClaimValidationRules []ClaimValidationRule `json:"claimValidationRules,omitempty"`
+
+	// claimMappings points claims of a token to be treated as user attributes.
+	// +required
+	ClaimMappings ClaimMappings `json:"claimMappings"`
+
+	// userValidationRules are rules that are applied to final user before completing authentication.
+	// These allow invariants to be applied to incoming identities such as preventing the
+	// use of the system: prefix that is commonly used by Kubernetes components.
+	// The validation rules are logically ANDed together and must all return true for the validation to pass.
+	// +optional
+	UserValidationRules []UserValidationRule `json:"userValidationRules,omitempty"`
+}
+
+// Issuer provides the configuration for an external provider's specific settings.
+type Issuer struct {
+	// url points to the issuer URL in a format https://url or https://url/path.
+	// This must match the "iss" claim in the presented JWT, and the issuer returned from discovery.
+	// Same value as the --oidc-issuer-url flag.
+	// Discovery information is fetched from "{url}/.well-known/openid-configuration" unless overridden by discoveryURL.
+	// Required to be unique across all JWT authenticators.
+	// Note that egress selection configuration is not used for this network connection.
+	// +required
+	URL string `json:"url"`
+
+	// discoveryURL, if specified, overrides the URL used to fetch discovery
+	// information instead of using "{url}/.well-known/openid-configuration".
+	// The exact value specified is used, so "/.well-known/openid-configuration"
+	// must be included in discoveryURL if needed.
+	//
+	// The "issuer" field in the fetched discovery information must match the "issuer.url" field
+	// in the AuthenticationConfiguration and will be used to validate the "iss" claim in the presented JWT.
+	// This is for scenarios where the well-known and jwks endpoints are hosted at a different
+	// location than the issuer (such as locally in the cluster).
+	//
+	// Example:
+	// A discovery url that is exposed using kubernetes service 'oidc' in namespace 'oidc-namespace'
+	// and discovery information is available at '/.well-known/openid-configuration'.
+	// discoveryURL: "https://oidc.oidc-namespace/.well-known/openid-configuration"
+	// certificateAuthority is used to verify the TLS connection and the hostname on the leaf certificate
+	// must be set to 'oidc.oidc-namespace'.
+	//
+	// curl https://oidc.oidc-namespace/.well-known/openid-configuration (.discoveryURL field)
+	// {
+	//     issuer: "https://oidc.example.com" (.url field)
+	// }
+	//
+	// discoveryURL must be different from url.
+	// Required to be unique across all JWT authenticators.
+	// Note that egress selection configuration is not used for this network connection.
+	// +optional
+	DiscoveryURL *string `json:"discoveryURL,omitempty"`
+
+	// certificateAuthority contains PEM-encoded certificate authority certificates
+	// used to validate the connection when fetching discovery information.
+	// If unset, the system verifier is used.
+	// Same value as the content of the file referenced by the --oidc-ca-file flag.
+	// +optional
+	CertificateAuthority string `json:"certificateAuthority,omitempty"`
+
+	// audiences is the set of acceptable audiences the JWT must be issued to.
+	// At least one of the entries must match the "aud" claim in presented JWTs.
+	// Same value as the --oidc-client-id flag (though this field supports an array).
+	// Required to be non-empty.
+	// +required
+	Audiences []string `json:"audiences"`
+
+	// audienceMatchPolicy defines how the "audiences" field is used to match the "aud" claim in the presented JWT.
+	// Allowed values are:
+	// 1. "MatchAny" when multiple audiences are specified and
+	// 2. empty (or unset) or "MatchAny" when a single audience is specified.
+	//
+	// - MatchAny: the "aud" claim in the presented JWT must match at least one of the entries in the "audiences" field.
+	// For example, if "audiences" is ["foo", "bar"], the "aud" claim in the presented JWT must contain either "foo" or "bar" (and may contain both).
+	//
+	// - "": The match policy can be empty (or unset) when a single audience is specified in the "audiences" field. The "aud" claim in the presented JWT must contain the single audience (and may contain others).
+	//
+	// For more nuanced audience validation, use claimValidationRules.
+	//   example: claimValidationRule[].expression: 'sets.equivalent(claims.aud, ["bar", "foo", "baz"])' to require an exact match.
+	// +optional
+	AudienceMatchPolicy AudienceMatchPolicyType `json:"audienceMatchPolicy,omitempty"`
+}
+
+// AudienceMatchPolicyType is a set of valid values for issuer.audienceMatchPolicy
+type AudienceMatchPolicyType string
+
+// Valid types for AudienceMatchPolicyType
+const (
+	// MatchAny means the "aud" claim in the presented JWT must match at least one of the entries in the "audiences" field.
+	AudienceMatchPolicyMatchAny AudienceMatchPolicyType = "MatchAny"
+)
+
+// ClaimValidationRule provides the configuration for a single claim validation rule.
+type ClaimValidationRule struct {
+	// claim is the name of a required claim.
+	// Same as --oidc-required-claim flag.
+	// Only string claim keys are supported.
+	// Mutually exclusive with expression and message.
+	// +optional
+	Claim string `json:"claim,omitempty"`
+	// requiredValue is the value of a required claim.
+	// Same as --oidc-required-claim flag.
+	// Only string claim values are supported.
+	// If claim is set and requiredValue is not set, the claim must be present with a value set to the empty string.
+	// Mutually exclusive with expression and message.
+	// +optional
+	RequiredValue string `json:"requiredValue,omitempty"`
+
+	// expression represents the expression which will be evaluated by CEL.
+	// Must produce a boolean.
+	//
+	// CEL expressions have access to the contents of the token claims, organized into CEL variable:
+	// - 'claims' is a map of claim names to claim values.
+	//   For example, a variable named 'sub' can be accessed as 'claims.sub'.
+	//   Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'.
+	// Must return true for the validation to pass.
+	//
+	// Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/
+	//
+	// Mutually exclusive with claim and requiredValue.
+	// +optional
+	Expression string `json:"expression,omitempty"`
+	// message customizes the returned error message when expression returns false.
+	// message is a literal string.
+	// Mutually exclusive with claim and requiredValue.
+	// +optional
+	Message string `json:"message,omitempty"`
+}
+
+// ClaimMappings provides the configuration for claim mapping
+type ClaimMappings struct {
+	// username represents an option for the username attribute.
+	// The claim's value must be a singular string.
+	// Same as the --oidc-username-claim and --oidc-username-prefix flags.
+	// If username.expression is set, the expression must produce a string value.
+	// If username.expression uses 'claims.email', then 'claims.email_verified' must be used in
+	// username.expression or extra[*].valueExpression or claimValidationRules[*].expression.
+	// An example claim validation rule expression that matches the validation automatically
+	// applied when username.claim is set to 'email' is 'claims.?email_verified.orValue(true)'.
+	//
+	// In the flag based approach, the --oidc-username-claim and --oidc-username-prefix are optional. If --oidc-username-claim is not set,
+	// the default value is "sub". For the authentication config, there is no defaulting for claim or prefix. The claim and prefix must be set explicitly.
+	// For claim, if --oidc-username-claim was not set with legacy flag approach, configure username.claim="sub" in the authentication config.
+	// For prefix:
+	//     (1) --oidc-username-prefix="-", no prefix was added to the username. For the same behavior using authentication config,
+	//         set username.prefix=""
+	//     (2) --oidc-username-prefix="" and  --oidc-username-claim != "email", prefix was "#". For the same
+	//         behavior using authentication config, set username.prefix="#"
+	//     (3) --oidc-username-prefix="". For the same behavior using authentication config, set username.prefix=""
+	// +required
+	Username PrefixedClaimOrExpression `json:"username"`
+	// groups represents an option for the groups attribute.
+	// The claim's value must be a string or string array claim.
+	// If groups.claim is set, the prefix must be specified (and can be the empty string).
+	// If groups.expression is set, the expression must produce a string or string array value.
+	//  "", [], and null values are treated as the group mapping not being present.
+	// +optional
+	Groups PrefixedClaimOrExpression `json:"groups,omitempty"`
+
+	// uid represents an option for the uid attribute.
+	// Claim must be a singular string claim.
+	// If uid.expression is set, the expression must produce a string value.
+	// +optional
+	UID ClaimOrExpression `json:"uid"`
+
+	// extra represents an option for the extra attribute.
+	// expression must produce a string or string array value.
+	// If the value is empty, the extra mapping will not be present.
+	//
+	// hard-coded extra key/value
+	// - key: "foo"
+	//   valueExpression: "'bar'"
+	// This will result in an extra attribute - foo: ["bar"]
+	//
+	// hard-coded key, value copying claim value
+	// - key: "foo"
+	//   valueExpression: "claims.some_claim"
+	// This will result in an extra attribute - foo: [value of some_claim]
+	//
+	// hard-coded key, value derived from claim value
+	// - key: "admin"
+	//   valueExpression: '(has(claims.is_admin) && claims.is_admin) ? "true":""'
+	// This will result in:
+	//  - if is_admin claim is present and true, extra attribute - admin: ["true"]
+	//  - if is_admin claim is present and false or is_admin claim is not present, no extra attribute will be added
+	//
+	// +optional
+	Extra []ExtraMapping `json:"extra,omitempty"`
+}
+
+// PrefixedClaimOrExpression provides the configuration for a single prefixed claim or expression.
+type PrefixedClaimOrExpression struct {
+	// claim is the JWT claim to use.
+	// Mutually exclusive with expression.
+	// +optional
+	Claim string `json:"claim,omitempty"`
+	// prefix is prepended to claim's value to prevent clashes with existing names.
+	// prefix needs to be set if claim is set and can be the empty string.
+	// Mutually exclusive with expression.
+	// +optional
+	Prefix *string `json:"prefix,omitempty"`
+
+	// expression represents the expression which will be evaluated by CEL.
+	//
+	// CEL expressions have access to the contents of the token claims, organized into CEL variable:
+	// - 'claims' is a map of claim names to claim values.
+	//   For example, a variable named 'sub' can be accessed as 'claims.sub'.
+	//   Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'.
+	//
+	// Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/
+	//
+	// Mutually exclusive with claim and prefix.
+	// +optional
+	Expression string `json:"expression,omitempty"`
+}
+
+// ClaimOrExpression provides the configuration for a single claim or expression.
+type ClaimOrExpression struct {
+	// claim is the JWT claim to use.
+	// Either claim or expression must be set.
+	// Mutually exclusive with expression.
+	// +optional
+	Claim string `json:"claim,omitempty"`
+
+	// expression represents the expression which will be evaluated by CEL.
+	//
+	// CEL expressions have access to the contents of the token claims, organized into CEL variable:
+	// - 'claims' is a map of claim names to claim values.
+	//   For example, a variable named 'sub' can be accessed as 'claims.sub'.
+	//   Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'.
+	//
+	// Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/
+	//
+	// Mutually exclusive with claim.
+	// +optional
+	Expression string `json:"expression,omitempty"`
+}
+
+// ExtraMapping provides the configuration for a single extra mapping.
+type ExtraMapping struct {
+	// key is a string to use as the extra attribute key.
+	// key must be a domain-prefix path (e.g. example.org/foo). All characters before the first "/" must be a valid
+	// subdomain as defined by RFC 1123. All characters trailing the first "/" must
+	// be valid HTTP Path characters as defined by RFC 3986.
+	// key must be lowercase.
+	// Required to be unique.
+	// +required
+	Key string `json:"key"`
+
+	// valueExpression is a CEL expression to extract extra attribute value.
+	// valueExpression must produce a string or string array value.
+	// "", [], and null values are treated as the extra mapping not being present.
+	// Empty string values contained within a string array are filtered out.
+	//
+	// CEL expressions have access to the contents of the token claims, organized into CEL variable:
+	// - 'claims' is a map of claim names to claim values.
+	//   For example, a variable named 'sub' can be accessed as 'claims.sub'.
+	//   Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'.
+	//
+	// Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/
+	//
+	// +required
+	ValueExpression string `json:"valueExpression"`
+}
+
+// UserValidationRule provides the configuration for a single user info validation rule.
+type UserValidationRule struct {
+	// expression represents the expression which will be evaluated by CEL.
+	// Must return true for the validation to pass.
+	//
+	// CEL expressions have access to the contents of UserInfo, organized into CEL variable:
+	// - 'user' - authentication.k8s.io/v1, Kind=UserInfo object
+	//    Refer to https://github.com/kubernetes/api/blob/release-1.28/authentication/v1/types.go#L105-L122 for the definition.
+	//    API documentation: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#userinfo-v1-authentication-k8s-io
+	//
+	// Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/
+	//
+	// +required
+	Expression string `json:"expression"`
+
+	// message customizes the returned error message when rule returns false.
+	// message is a literal string.
+	// +optional
+	Message string `json:"message,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type AuthorizationConfiguration struct {
+	metav1.TypeMeta
+
+	// Authorizers is an ordered list of authorizers to
+	// authorize requests against.
+	// This is similar to the --authorization-modes kube-apiserver flag
+	// Must be at least one.
+	Authorizers []AuthorizerConfiguration `json:"authorizers"`
+}
+
+const (
+	TypeWebhook                                          AuthorizerType = "Webhook"
+	FailurePolicyNoOpinion                               string         = "NoOpinion"
+	FailurePolicyDeny                                    string         = "Deny"
+	AuthorizationWebhookConnectionInfoTypeKubeConfigFile string         = "KubeConfigFile"
+	AuthorizationWebhookConnectionInfoTypeInCluster      string         = "InClusterConfig"
+)
+
+type AuthorizerType string
+
+type AuthorizerConfiguration struct {
+	// Type refers to the type of the authorizer
+	// "Webhook" is supported in the generic API server
+	// Other API servers may support additional authorizer
+	// types like Node, RBAC, ABAC, etc.
+	Type string `json:"type"`
+
+	// Name used to describe the webhook
+	// This is explicitly used in monitoring machinery for metrics
+	// Note: Names must be DNS1123 labels like `myauthorizername` or
+	//		 subdomains like `myauthorizer.example.domain`
+	// Required, with no default
+	Name string `json:"name"`
+
+	// Webhook defines the configuration for a Webhook authorizer
+	// Must be defined when Type=Webhook
+	// Must not be defined when Type!=Webhook
+	Webhook *WebhookConfiguration `json:"webhook,omitempty"`
+}
+
+type WebhookConfiguration struct {
+	// The duration to cache 'authorized' responses from the webhook
+	// authorizer.
+	// Same as setting `--authorization-webhook-cache-authorized-ttl` flag
+	// Default: 5m0s
+	AuthorizedTTL metav1.Duration `json:"authorizedTTL"`
+	// The duration to cache 'unauthorized' responses from the webhook
+	// authorizer.
+	// Same as setting `--authorization-webhook-cache-unauthorized-ttl` flag
+	// Default: 30s
+	UnauthorizedTTL metav1.Duration `json:"unauthorizedTTL"`
+	// Timeout for the webhook request
+	// Maximum allowed value is 30s.
+	// Required, no default value.
+	Timeout metav1.Duration `json:"timeout"`
+	// The API version of the authorization.k8s.io SubjectAccessReview to
+	// send to and expect from the webhook.
+	// Same as setting `--authorization-webhook-version` flag
+	// Valid values: v1beta1, v1
+	// Required, no default value
+	SubjectAccessReviewVersion string `json:"subjectAccessReviewVersion"`
+	// MatchConditionSubjectAccessReviewVersion specifies the SubjectAccessReview
+	// version the CEL expressions are evaluated against
+	// Valid values: v1
+	// Required, no default value
+	MatchConditionSubjectAccessReviewVersion string `json:"matchConditionSubjectAccessReviewVersion"`
+	// Controls the authorization decision when a webhook request fails to
+	// complete or returns a malformed response or errors evaluating
+	// matchConditions.
+	// Valid values:
+	//   - NoOpinion: continue to subsequent authorizers to see if one of
+	//     them allows the request
+	//   - Deny: reject the request without consulting subsequent authorizers
+	// Required, with no default.
+	FailurePolicy string `json:"failurePolicy"`
+
+	// ConnectionInfo defines how we talk to the webhook
+	ConnectionInfo WebhookConnectionInfo `json:"connectionInfo"`
+
+	// matchConditions is a list of conditions that must be met for a request to be sent to this
+	// webhook. An empty list of matchConditions matches all requests.
+	// There are a maximum of 64 match conditions allowed.
+	//
+	// The exact matching logic is (in order):
+	//   1. If at least one matchCondition evaluates to FALSE, then the webhook is skipped.
+	//   2. If ALL matchConditions evaluate to TRUE, then the webhook is called.
+	//   3. If at least one matchCondition evaluates to an error (but none are FALSE):
+	//      - If failurePolicy=Deny, then the webhook rejects the request
+	//      - If failurePolicy=NoOpinion, then the error is ignored and the webhook is skipped
+	MatchConditions []WebhookMatchCondition `json:"matchConditions"`
+}
+
+type WebhookConnectionInfo struct {
+	// Controls how the webhook should communicate with the server.
+	// Valid values:
+	// - KubeConfigFile: use the file specified in kubeConfigFile to locate the
+	//   server.
+	// - InClusterConfig: use the in-cluster configuration to call the
+	//   SubjectAccessReview API hosted by kube-apiserver. This mode is not
+	//   allowed for kube-apiserver.
+	Type string `json:"type"`
+
+	// Path to KubeConfigFile for connection info
+	// Required, if connectionInfo.Type is KubeConfig
+	KubeConfigFile *string `json:"kubeConfigFile"`
+}
+
+type WebhookMatchCondition struct {
+	// expression represents the expression which will be evaluated by CEL. Must evaluate to bool.
+	// CEL expressions have access to the contents of the SubjectAccessReview in v1 version.
+	// If version specified by subjectAccessReviewVersion in the request variable is v1beta1,
+	// the contents would be converted to the v1 version before evaluating the CEL expression.
+	//
+	// - 'resourceAttributes' describes information for a resource access request and is unset for non-resource requests. e.g. has(request.resourceAttributes) && request.resourceAttributes.namespace == 'default'
+	// - 'nonResourceAttributes' describes information for a non-resource access request and is unset for resource requests. e.g. has(request.nonResourceAttributes) && request.nonResourceAttributes.path == '/healthz'.
+	// - 'user' is the user to test for. e.g. request.user == 'alice'
+	// - 'groups' is the groups to test for. e.g. ('group1' in request.groups)
+	// - 'extra' corresponds to the user.Info.GetExtra() method from the authenticator.
+	// - 'uid' is the information about the requesting user. e.g. request.uid == '1'
+	//
+	// Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/
+	Expression string `json:"expression"`
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go
new file mode 100644
index 0000000000..3a6c66c3aa
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go
@@ -0,0 +1,964 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	unsafe "unsafe"
+
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	conversion "k8s.io/apimachinery/pkg/conversion"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	apiserver "k8s.io/apiserver/pkg/apis/apiserver"
+)
+
+func init() {
+	localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+	if err := s.AddGeneratedConversionFunc((*AdmissionConfiguration)(nil), (*apiserver.AdmissionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(a.(*AdmissionConfiguration), b.(*apiserver.AdmissionConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.AdmissionConfiguration)(nil), (*AdmissionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration(a.(*apiserver.AdmissionConfiguration), b.(*AdmissionConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*AdmissionPluginConfiguration)(nil), (*apiserver.AdmissionPluginConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(a.(*AdmissionPluginConfiguration), b.(*apiserver.AdmissionPluginConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.AdmissionPluginConfiguration)(nil), (*AdmissionPluginConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(a.(*apiserver.AdmissionPluginConfiguration), b.(*AdmissionPluginConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*AnonymousAuthCondition)(nil), (*apiserver.AnonymousAuthCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(a.(*AnonymousAuthCondition), b.(*apiserver.AnonymousAuthCondition), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.AnonymousAuthCondition)(nil), (*AnonymousAuthCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition(a.(*apiserver.AnonymousAuthCondition), b.(*AnonymousAuthCondition), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*AnonymousAuthConfig)(nil), (*apiserver.AnonymousAuthConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(a.(*AnonymousAuthConfig), b.(*apiserver.AnonymousAuthConfig), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.AnonymousAuthConfig)(nil), (*AnonymousAuthConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig(a.(*apiserver.AnonymousAuthConfig), b.(*AnonymousAuthConfig), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*AuthenticationConfiguration)(nil), (*apiserver.AuthenticationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(a.(*AuthenticationConfiguration), b.(*apiserver.AuthenticationConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.AuthenticationConfiguration)(nil), (*AuthenticationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationConfiguration(a.(*apiserver.AuthenticationConfiguration), b.(*AuthenticationConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*AuthorizationConfiguration)(nil), (*apiserver.AuthorizationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(a.(*AuthorizationConfiguration), b.(*apiserver.AuthorizationConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.AuthorizationConfiguration)(nil), (*AuthorizationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration(a.(*apiserver.AuthorizationConfiguration), b.(*AuthorizationConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*AuthorizerConfiguration)(nil), (*apiserver.AuthorizerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(a.(*AuthorizerConfiguration), b.(*apiserver.AuthorizerConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.AuthorizerConfiguration)(nil), (*AuthorizerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration(a.(*apiserver.AuthorizerConfiguration), b.(*AuthorizerConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ClaimMappings)(nil), (*apiserver.ClaimMappings)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(a.(*ClaimMappings), b.(*apiserver.ClaimMappings), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.ClaimMappings)(nil), (*ClaimMappings)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings(a.(*apiserver.ClaimMappings), b.(*ClaimMappings), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ClaimOrExpression)(nil), (*apiserver.ClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression(a.(*ClaimOrExpression), b.(*apiserver.ClaimOrExpression), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.ClaimOrExpression)(nil), (*ClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression(a.(*apiserver.ClaimOrExpression), b.(*ClaimOrExpression), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ClaimValidationRule)(nil), (*apiserver.ClaimValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_ClaimValidationRule_To_apiserver_ClaimValidationRule(a.(*ClaimValidationRule), b.(*apiserver.ClaimValidationRule), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.ClaimValidationRule)(nil), (*ClaimValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_ClaimValidationRule_To_v1alpha1_ClaimValidationRule(a.(*apiserver.ClaimValidationRule), b.(*ClaimValidationRule), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*Connection)(nil), (*apiserver.Connection)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_Connection_To_apiserver_Connection(a.(*Connection), b.(*apiserver.Connection), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.Connection)(nil), (*Connection)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_Connection_To_v1alpha1_Connection(a.(*apiserver.Connection), b.(*Connection), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.EgressSelection)(nil), (*EgressSelection)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_EgressSelection_To_v1alpha1_EgressSelection(a.(*apiserver.EgressSelection), b.(*EgressSelection), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*EgressSelectorConfiguration)(nil), (*apiserver.EgressSelectorConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(a.(*EgressSelectorConfiguration), b.(*apiserver.EgressSelectorConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.EgressSelectorConfiguration)(nil), (*EgressSelectorConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorConfiguration(a.(*apiserver.EgressSelectorConfiguration), b.(*EgressSelectorConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ExtraMapping)(nil), (*apiserver.ExtraMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_ExtraMapping_To_apiserver_ExtraMapping(a.(*ExtraMapping), b.(*apiserver.ExtraMapping), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.ExtraMapping)(nil), (*ExtraMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_ExtraMapping_To_v1alpha1_ExtraMapping(a.(*apiserver.ExtraMapping), b.(*ExtraMapping), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*Issuer)(nil), (*apiserver.Issuer)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_Issuer_To_apiserver_Issuer(a.(*Issuer), b.(*apiserver.Issuer), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.Issuer)(nil), (*Issuer)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_Issuer_To_v1alpha1_Issuer(a.(*apiserver.Issuer), b.(*Issuer), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*JWTAuthenticator)(nil), (*apiserver.JWTAuthenticator)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator(a.(*JWTAuthenticator), b.(*apiserver.JWTAuthenticator), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.JWTAuthenticator)(nil), (*JWTAuthenticator)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator(a.(*apiserver.JWTAuthenticator), b.(*JWTAuthenticator), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*PrefixedClaimOrExpression)(nil), (*apiserver.PrefixedClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(a.(*PrefixedClaimOrExpression), b.(*apiserver.PrefixedClaimOrExpression), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.PrefixedClaimOrExpression)(nil), (*PrefixedClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(a.(*apiserver.PrefixedClaimOrExpression), b.(*PrefixedClaimOrExpression), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*TCPTransport)(nil), (*apiserver.TCPTransport)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_TCPTransport_To_apiserver_TCPTransport(a.(*TCPTransport), b.(*apiserver.TCPTransport), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.TCPTransport)(nil), (*TCPTransport)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_TCPTransport_To_v1alpha1_TCPTransport(a.(*apiserver.TCPTransport), b.(*TCPTransport), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*TLSConfig)(nil), (*apiserver.TLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_TLSConfig_To_apiserver_TLSConfig(a.(*TLSConfig), b.(*apiserver.TLSConfig), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.TLSConfig)(nil), (*TLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_TLSConfig_To_v1alpha1_TLSConfig(a.(*apiserver.TLSConfig), b.(*TLSConfig), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*TracingConfiguration)(nil), (*apiserver.TracingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_TracingConfiguration_To_apiserver_TracingConfiguration(a.(*TracingConfiguration), b.(*apiserver.TracingConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.TracingConfiguration)(nil), (*TracingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_TracingConfiguration_To_v1alpha1_TracingConfiguration(a.(*apiserver.TracingConfiguration), b.(*TracingConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*Transport)(nil), (*apiserver.Transport)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_Transport_To_apiserver_Transport(a.(*Transport), b.(*apiserver.Transport), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.Transport)(nil), (*Transport)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_Transport_To_v1alpha1_Transport(a.(*apiserver.Transport), b.(*Transport), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*UDSTransport)(nil), (*apiserver.UDSTransport)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_UDSTransport_To_apiserver_UDSTransport(a.(*UDSTransport), b.(*apiserver.UDSTransport), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.UDSTransport)(nil), (*UDSTransport)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(a.(*apiserver.UDSTransport), b.(*UDSTransport), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*UserValidationRule)(nil), (*apiserver.UserValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_UserValidationRule_To_apiserver_UserValidationRule(a.(*UserValidationRule), b.(*apiserver.UserValidationRule), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.UserValidationRule)(nil), (*UserValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_UserValidationRule_To_v1alpha1_UserValidationRule(a.(*apiserver.UserValidationRule), b.(*UserValidationRule), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*WebhookConfiguration)(nil), (*apiserver.WebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration(a.(*WebhookConfiguration), b.(*apiserver.WebhookConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.WebhookConfiguration)(nil), (*WebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration(a.(*apiserver.WebhookConfiguration), b.(*WebhookConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*WebhookConnectionInfo)(nil), (*apiserver.WebhookConnectionInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(a.(*WebhookConnectionInfo), b.(*apiserver.WebhookConnectionInfo), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.WebhookConnectionInfo)(nil), (*WebhookConnectionInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(a.(*apiserver.WebhookConnectionInfo), b.(*WebhookConnectionInfo), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*WebhookMatchCondition)(nil), (*apiserver.WebhookMatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(a.(*WebhookMatchCondition), b.(*apiserver.WebhookMatchCondition), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.WebhookMatchCondition)(nil), (*WebhookMatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition(a.(*apiserver.WebhookMatchCondition), b.(*WebhookMatchCondition), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*EgressSelection)(nil), (*apiserver.EgressSelection)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(a.(*EgressSelection), b.(*apiserver.EgressSelection), scope)
+	}); err != nil {
+		return err
+	}
+	return nil
+}
+
+func autoConvert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in *AdmissionConfiguration, out *apiserver.AdmissionConfiguration, s conversion.Scope) error {
+	out.Plugins = *(*[]apiserver.AdmissionPluginConfiguration)(unsafe.Pointer(&in.Plugins))
+	return nil
+}
+
+// Convert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration is an autogenerated conversion function.
+func Convert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in *AdmissionConfiguration, out *apiserver.AdmissionConfiguration, s conversion.Scope) error {
+	return autoConvert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration(in *apiserver.AdmissionConfiguration, out *AdmissionConfiguration, s conversion.Scope) error {
+	out.Plugins = *(*[]AdmissionPluginConfiguration)(unsafe.Pointer(&in.Plugins))
+	return nil
+}
+
+// Convert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration is an autogenerated conversion function.
+func Convert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration(in *apiserver.AdmissionConfiguration, out *AdmissionConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration(in, out, s)
+}
+
+func autoConvert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in *AdmissionPluginConfiguration, out *apiserver.AdmissionPluginConfiguration, s conversion.Scope) error {
+	out.Name = in.Name
+	out.Path = in.Path
+	out.Configuration = (*runtime.Unknown)(unsafe.Pointer(in.Configuration))
+	return nil
+}
+
+// Convert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration is an autogenerated conversion function.
+func Convert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in *AdmissionPluginConfiguration, out *apiserver.AdmissionPluginConfiguration, s conversion.Scope) error {
+	return autoConvert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(in *apiserver.AdmissionPluginConfiguration, out *AdmissionPluginConfiguration, s conversion.Scope) error {
+	out.Name = in.Name
+	out.Path = in.Path
+	out.Configuration = (*runtime.Unknown)(unsafe.Pointer(in.Configuration))
+	return nil
+}
+
+// Convert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration is an autogenerated conversion function.
+func Convert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(in *apiserver.AdmissionPluginConfiguration, out *AdmissionPluginConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(in, out, s)
+}
+
+func autoConvert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in *AnonymousAuthCondition, out *apiserver.AnonymousAuthCondition, s conversion.Scope) error {
+	out.Path = in.Path
+	return nil
+}
+
+// Convert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition is an autogenerated conversion function.
+func Convert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in *AnonymousAuthCondition, out *apiserver.AnonymousAuthCondition, s conversion.Scope) error {
+	return autoConvert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in, out, s)
+}
+
+func autoConvert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition(in *apiserver.AnonymousAuthCondition, out *AnonymousAuthCondition, s conversion.Scope) error {
+	out.Path = in.Path
+	return nil
+}
+
+// Convert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition is an autogenerated conversion function.
+func Convert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition(in *apiserver.AnonymousAuthCondition, out *AnonymousAuthCondition, s conversion.Scope) error {
+	return autoConvert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition(in, out, s)
+}
+
+func autoConvert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in *AnonymousAuthConfig, out *apiserver.AnonymousAuthConfig, s conversion.Scope) error {
+	out.Enabled = in.Enabled
+	out.Conditions = *(*[]apiserver.AnonymousAuthCondition)(unsafe.Pointer(&in.Conditions))
+	return nil
+}
+
+// Convert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig is an autogenerated conversion function.
+func Convert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in *AnonymousAuthConfig, out *apiserver.AnonymousAuthConfig, s conversion.Scope) error {
+	return autoConvert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in, out, s)
+}
+
+func autoConvert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig(in *apiserver.AnonymousAuthConfig, out *AnonymousAuthConfig, s conversion.Scope) error {
+	out.Enabled = in.Enabled
+	out.Conditions = *(*[]AnonymousAuthCondition)(unsafe.Pointer(&in.Conditions))
+	return nil
+}
+
+// Convert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig is an autogenerated conversion function.
+func Convert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig(in *apiserver.AnonymousAuthConfig, out *AnonymousAuthConfig, s conversion.Scope) error {
+	return autoConvert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig(in, out, s)
+}
+
+func autoConvert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in *AuthenticationConfiguration, out *apiserver.AuthenticationConfiguration, s conversion.Scope) error {
+	if in.JWT != nil {
+		in, out := &in.JWT, &out.JWT
+		*out = make([]apiserver.JWTAuthenticator, len(*in))
+		for i := range *in {
+			if err := Convert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator(&(*in)[i], &(*out)[i], s); err != nil {
+				return err
+			}
+		}
+	} else {
+		out.JWT = nil
+	}
+	out.Anonymous = (*apiserver.AnonymousAuthConfig)(unsafe.Pointer(in.Anonymous))
+	return nil
+}
+
+// Convert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration is an autogenerated conversion function.
+func Convert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in *AuthenticationConfiguration, out *apiserver.AuthenticationConfiguration, s conversion.Scope) error {
+	return autoConvert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationConfiguration(in *apiserver.AuthenticationConfiguration, out *AuthenticationConfiguration, s conversion.Scope) error {
+	if in.JWT != nil {
+		in, out := &in.JWT, &out.JWT
+		*out = make([]JWTAuthenticator, len(*in))
+		for i := range *in {
+			if err := Convert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator(&(*in)[i], &(*out)[i], s); err != nil {
+				return err
+			}
+		}
+	} else {
+		out.JWT = nil
+	}
+	out.Anonymous = (*AnonymousAuthConfig)(unsafe.Pointer(in.Anonymous))
+	return nil
+}
+
+// Convert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationConfiguration is an autogenerated conversion function.
+func Convert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationConfiguration(in *apiserver.AuthenticationConfiguration, out *AuthenticationConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationConfiguration(in, out, s)
+}
+
+func autoConvert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in *AuthorizationConfiguration, out *apiserver.AuthorizationConfiguration, s conversion.Scope) error {
+	out.Authorizers = *(*[]apiserver.AuthorizerConfiguration)(unsafe.Pointer(&in.Authorizers))
+	return nil
+}
+
+// Convert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration is an autogenerated conversion function.
+func Convert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in *AuthorizationConfiguration, out *apiserver.AuthorizationConfiguration, s conversion.Scope) error {
+	return autoConvert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration(in *apiserver.AuthorizationConfiguration, out *AuthorizationConfiguration, s conversion.Scope) error {
+	out.Authorizers = *(*[]AuthorizerConfiguration)(unsafe.Pointer(&in.Authorizers))
+	return nil
+}
+
+// Convert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration is an autogenerated conversion function.
+func Convert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration(in *apiserver.AuthorizationConfiguration, out *AuthorizationConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration(in, out, s)
+}
+
+func autoConvert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in *AuthorizerConfiguration, out *apiserver.AuthorizerConfiguration, s conversion.Scope) error {
+	out.Type = apiserver.AuthorizerType(in.Type)
+	out.Name = in.Name
+	out.Webhook = (*apiserver.WebhookConfiguration)(unsafe.Pointer(in.Webhook))
+	return nil
+}
+
+// Convert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration is an autogenerated conversion function.
+func Convert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in *AuthorizerConfiguration, out *apiserver.AuthorizerConfiguration, s conversion.Scope) error {
+	return autoConvert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration(in *apiserver.AuthorizerConfiguration, out *AuthorizerConfiguration, s conversion.Scope) error {
+	out.Type = string(in.Type)
+	out.Name = in.Name
+	out.Webhook = (*WebhookConfiguration)(unsafe.Pointer(in.Webhook))
+	return nil
+}
+
+// Convert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration is an autogenerated conversion function.
+func Convert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration(in *apiserver.AuthorizerConfiguration, out *AuthorizerConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration(in, out, s)
+}
+
+func autoConvert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(in *ClaimMappings, out *apiserver.ClaimMappings, s conversion.Scope) error {
+	if err := Convert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(&in.Username, &out.Username, s); err != nil {
+		return err
+	}
+	if err := Convert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(&in.Groups, &out.Groups, s); err != nil {
+		return err
+	}
+	if err := Convert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression(&in.UID, &out.UID, s); err != nil {
+		return err
+	}
+	out.Extra = *(*[]apiserver.ExtraMapping)(unsafe.Pointer(&in.Extra))
+	return nil
+}
+
+// Convert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings is an autogenerated conversion function.
+func Convert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(in *ClaimMappings, out *apiserver.ClaimMappings, s conversion.Scope) error {
+	return autoConvert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(in, out, s)
+}
+
+func autoConvert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings(in *apiserver.ClaimMappings, out *ClaimMappings, s conversion.Scope) error {
+	if err := Convert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(&in.Username, &out.Username, s); err != nil {
+		return err
+	}
+	if err := Convert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(&in.Groups, &out.Groups, s); err != nil {
+		return err
+	}
+	if err := Convert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression(&in.UID, &out.UID, s); err != nil {
+		return err
+	}
+	out.Extra = *(*[]ExtraMapping)(unsafe.Pointer(&in.Extra))
+	return nil
+}
+
+// Convert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings is an autogenerated conversion function.
+func Convert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings(in *apiserver.ClaimMappings, out *ClaimMappings, s conversion.Scope) error {
+	return autoConvert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings(in, out, s)
+}
+
+func autoConvert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression(in *ClaimOrExpression, out *apiserver.ClaimOrExpression, s conversion.Scope) error {
+	out.Claim = in.Claim
+	out.Expression = in.Expression
+	return nil
+}
+
+// Convert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression is an autogenerated conversion function.
+func Convert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression(in *ClaimOrExpression, out *apiserver.ClaimOrExpression, s conversion.Scope) error {
+	return autoConvert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression(in, out, s)
+}
+
+func autoConvert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression(in *apiserver.ClaimOrExpression, out *ClaimOrExpression, s conversion.Scope) error {
+	out.Claim = in.Claim
+	out.Expression = in.Expression
+	return nil
+}
+
+// Convert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression is an autogenerated conversion function.
+func Convert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression(in *apiserver.ClaimOrExpression, out *ClaimOrExpression, s conversion.Scope) error {
+	return autoConvert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression(in, out, s)
+}
+
+func autoConvert_v1alpha1_ClaimValidationRule_To_apiserver_ClaimValidationRule(in *ClaimValidationRule, out *apiserver.ClaimValidationRule, s conversion.Scope) error {
+	out.Claim = in.Claim
+	out.RequiredValue = in.RequiredValue
+	out.Expression = in.Expression
+	out.Message = in.Message
+	return nil
+}
+
+// Convert_v1alpha1_ClaimValidationRule_To_apiserver_ClaimValidationRule is an autogenerated conversion function.
+func Convert_v1alpha1_ClaimValidationRule_To_apiserver_ClaimValidationRule(in *ClaimValidationRule, out *apiserver.ClaimValidationRule, s conversion.Scope) error {
+	return autoConvert_v1alpha1_ClaimValidationRule_To_apiserver_ClaimValidationRule(in, out, s)
+}
+
+func autoConvert_apiserver_ClaimValidationRule_To_v1alpha1_ClaimValidationRule(in *apiserver.ClaimValidationRule, out *ClaimValidationRule, s conversion.Scope) error {
+	out.Claim = in.Claim
+	out.RequiredValue = in.RequiredValue
+	out.Expression = in.Expression
+	out.Message = in.Message
+	return nil
+}
+
+// Convert_apiserver_ClaimValidationRule_To_v1alpha1_ClaimValidationRule is an autogenerated conversion function.
+func Convert_apiserver_ClaimValidationRule_To_v1alpha1_ClaimValidationRule(in *apiserver.ClaimValidationRule, out *ClaimValidationRule, s conversion.Scope) error {
+	return autoConvert_apiserver_ClaimValidationRule_To_v1alpha1_ClaimValidationRule(in, out, s)
+}
+
+func autoConvert_v1alpha1_Connection_To_apiserver_Connection(in *Connection, out *apiserver.Connection, s conversion.Scope) error {
+	out.ProxyProtocol = apiserver.ProtocolType(in.ProxyProtocol)
+	out.Transport = (*apiserver.Transport)(unsafe.Pointer(in.Transport))
+	return nil
+}
+
+// Convert_v1alpha1_Connection_To_apiserver_Connection is an autogenerated conversion function.
+func Convert_v1alpha1_Connection_To_apiserver_Connection(in *Connection, out *apiserver.Connection, s conversion.Scope) error {
+	return autoConvert_v1alpha1_Connection_To_apiserver_Connection(in, out, s)
+}
+
+func autoConvert_apiserver_Connection_To_v1alpha1_Connection(in *apiserver.Connection, out *Connection, s conversion.Scope) error {
+	out.ProxyProtocol = ProtocolType(in.ProxyProtocol)
+	out.Transport = (*Transport)(unsafe.Pointer(in.Transport))
+	return nil
+}
+
+// Convert_apiserver_Connection_To_v1alpha1_Connection is an autogenerated conversion function.
+func Convert_apiserver_Connection_To_v1alpha1_Connection(in *apiserver.Connection, out *Connection, s conversion.Scope) error {
+	return autoConvert_apiserver_Connection_To_v1alpha1_Connection(in, out, s)
+}
+
+func autoConvert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(in *EgressSelection, out *apiserver.EgressSelection, s conversion.Scope) error {
+	out.Name = in.Name
+	if err := Convert_v1alpha1_Connection_To_apiserver_Connection(&in.Connection, &out.Connection, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+func autoConvert_apiserver_EgressSelection_To_v1alpha1_EgressSelection(in *apiserver.EgressSelection, out *EgressSelection, s conversion.Scope) error {
+	out.Name = in.Name
+	if err := Convert_apiserver_Connection_To_v1alpha1_Connection(&in.Connection, &out.Connection, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_apiserver_EgressSelection_To_v1alpha1_EgressSelection is an autogenerated conversion function.
+func Convert_apiserver_EgressSelection_To_v1alpha1_EgressSelection(in *apiserver.EgressSelection, out *EgressSelection, s conversion.Scope) error {
+	return autoConvert_apiserver_EgressSelection_To_v1alpha1_EgressSelection(in, out, s)
+}
+
+func autoConvert_v1alpha1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(in *EgressSelectorConfiguration, out *apiserver.EgressSelectorConfiguration, s conversion.Scope) error {
+	if in.EgressSelections != nil {
+		in, out := &in.EgressSelections, &out.EgressSelections
+		*out = make([]apiserver.EgressSelection, len(*in))
+		for i := range *in {
+			if err := Convert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(&(*in)[i], &(*out)[i], s); err != nil {
+				return err
+			}
+		}
+	} else {
+		out.EgressSelections = nil
+	}
+	return nil
+}
+
+// Convert_v1alpha1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration is an autogenerated conversion function.
+func Convert_v1alpha1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(in *EgressSelectorConfiguration, out *apiserver.EgressSelectorConfiguration, s conversion.Scope) error {
+	return autoConvert_v1alpha1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorConfiguration(in *apiserver.EgressSelectorConfiguration, out *EgressSelectorConfiguration, s conversion.Scope) error {
+	if in.EgressSelections != nil {
+		in, out := &in.EgressSelections, &out.EgressSelections
+		*out = make([]EgressSelection, len(*in))
+		for i := range *in {
+			if err := Convert_apiserver_EgressSelection_To_v1alpha1_EgressSelection(&(*in)[i], &(*out)[i], s); err != nil {
+				return err
+			}
+		}
+	} else {
+		out.EgressSelections = nil
+	}
+	return nil
+}
+
+// Convert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorConfiguration is an autogenerated conversion function.
+func Convert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorConfiguration(in *apiserver.EgressSelectorConfiguration, out *EgressSelectorConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorConfiguration(in, out, s)
+}
+
+func autoConvert_v1alpha1_ExtraMapping_To_apiserver_ExtraMapping(in *ExtraMapping, out *apiserver.ExtraMapping, s conversion.Scope) error {
+	out.Key = in.Key
+	out.ValueExpression = in.ValueExpression
+	return nil
+}
+
+// Convert_v1alpha1_ExtraMapping_To_apiserver_ExtraMapping is an autogenerated conversion function.
+func Convert_v1alpha1_ExtraMapping_To_apiserver_ExtraMapping(in *ExtraMapping, out *apiserver.ExtraMapping, s conversion.Scope) error {
+	return autoConvert_v1alpha1_ExtraMapping_To_apiserver_ExtraMapping(in, out, s)
+}
+
+func autoConvert_apiserver_ExtraMapping_To_v1alpha1_ExtraMapping(in *apiserver.ExtraMapping, out *ExtraMapping, s conversion.Scope) error {
+	out.Key = in.Key
+	out.ValueExpression = in.ValueExpression
+	return nil
+}
+
+// Convert_apiserver_ExtraMapping_To_v1alpha1_ExtraMapping is an autogenerated conversion function.
+func Convert_apiserver_ExtraMapping_To_v1alpha1_ExtraMapping(in *apiserver.ExtraMapping, out *ExtraMapping, s conversion.Scope) error {
+	return autoConvert_apiserver_ExtraMapping_To_v1alpha1_ExtraMapping(in, out, s)
+}
+
+func autoConvert_v1alpha1_Issuer_To_apiserver_Issuer(in *Issuer, out *apiserver.Issuer, s conversion.Scope) error {
+	out.URL = in.URL
+	if err := v1.Convert_Pointer_string_To_string(&in.DiscoveryURL, &out.DiscoveryURL, s); err != nil {
+		return err
+	}
+	out.CertificateAuthority = in.CertificateAuthority
+	out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences))
+	out.AudienceMatchPolicy = apiserver.AudienceMatchPolicyType(in.AudienceMatchPolicy)
+	return nil
+}
+
+// Convert_v1alpha1_Issuer_To_apiserver_Issuer is an autogenerated conversion function.
+func Convert_v1alpha1_Issuer_To_apiserver_Issuer(in *Issuer, out *apiserver.Issuer, s conversion.Scope) error {
+	return autoConvert_v1alpha1_Issuer_To_apiserver_Issuer(in, out, s)
+}
+
+func autoConvert_apiserver_Issuer_To_v1alpha1_Issuer(in *apiserver.Issuer, out *Issuer, s conversion.Scope) error {
+	out.URL = in.URL
+	if err := v1.Convert_string_To_Pointer_string(&in.DiscoveryURL, &out.DiscoveryURL, s); err != nil {
+		return err
+	}
+	out.CertificateAuthority = in.CertificateAuthority
+	out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences))
+	out.AudienceMatchPolicy = AudienceMatchPolicyType(in.AudienceMatchPolicy)
+	return nil
+}
+
+// Convert_apiserver_Issuer_To_v1alpha1_Issuer is an autogenerated conversion function.
+func Convert_apiserver_Issuer_To_v1alpha1_Issuer(in *apiserver.Issuer, out *Issuer, s conversion.Scope) error {
+	return autoConvert_apiserver_Issuer_To_v1alpha1_Issuer(in, out, s)
+}
+
+func autoConvert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator(in *JWTAuthenticator, out *apiserver.JWTAuthenticator, s conversion.Scope) error {
+	if err := Convert_v1alpha1_Issuer_To_apiserver_Issuer(&in.Issuer, &out.Issuer, s); err != nil {
+		return err
+	}
+	out.ClaimValidationRules = *(*[]apiserver.ClaimValidationRule)(unsafe.Pointer(&in.ClaimValidationRules))
+	if err := Convert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(&in.ClaimMappings, &out.ClaimMappings, s); err != nil {
+		return err
+	}
+	out.UserValidationRules = *(*[]apiserver.UserValidationRule)(unsafe.Pointer(&in.UserValidationRules))
+	return nil
+}
+
+// Convert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator is an autogenerated conversion function.
+func Convert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator(in *JWTAuthenticator, out *apiserver.JWTAuthenticator, s conversion.Scope) error {
+	return autoConvert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator(in, out, s)
+}
+
+func autoConvert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator(in *apiserver.JWTAuthenticator, out *JWTAuthenticator, s conversion.Scope) error {
+	if err := Convert_apiserver_Issuer_To_v1alpha1_Issuer(&in.Issuer, &out.Issuer, s); err != nil {
+		return err
+	}
+	out.ClaimValidationRules = *(*[]ClaimValidationRule)(unsafe.Pointer(&in.ClaimValidationRules))
+	if err := Convert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings(&in.ClaimMappings, &out.ClaimMappings, s); err != nil {
+		return err
+	}
+	out.UserValidationRules = *(*[]UserValidationRule)(unsafe.Pointer(&in.UserValidationRules))
+	return nil
+}
+
+// Convert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator is an autogenerated conversion function.
+func Convert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator(in *apiserver.JWTAuthenticator, out *JWTAuthenticator, s conversion.Scope) error {
+	return autoConvert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator(in, out, s)
+}
+
+func autoConvert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(in *PrefixedClaimOrExpression, out *apiserver.PrefixedClaimOrExpression, s conversion.Scope) error {
+	out.Claim = in.Claim
+	out.Prefix = (*string)(unsafe.Pointer(in.Prefix))
+	out.Expression = in.Expression
+	return nil
+}
+
+// Convert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression is an autogenerated conversion function.
+func Convert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(in *PrefixedClaimOrExpression, out *apiserver.PrefixedClaimOrExpression, s conversion.Scope) error {
+	return autoConvert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(in, out, s)
+}
+
+func autoConvert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(in *apiserver.PrefixedClaimOrExpression, out *PrefixedClaimOrExpression, s conversion.Scope) error {
+	out.Claim = in.Claim
+	out.Prefix = (*string)(unsafe.Pointer(in.Prefix))
+	out.Expression = in.Expression
+	return nil
+}
+
+// Convert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression is an autogenerated conversion function.
+func Convert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(in *apiserver.PrefixedClaimOrExpression, out *PrefixedClaimOrExpression, s conversion.Scope) error {
+	return autoConvert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(in, out, s)
+}
+
+func autoConvert_v1alpha1_TCPTransport_To_apiserver_TCPTransport(in *TCPTransport, out *apiserver.TCPTransport, s conversion.Scope) error {
+	out.URL = in.URL
+	out.TLSConfig = (*apiserver.TLSConfig)(unsafe.Pointer(in.TLSConfig))
+	return nil
+}
+
+// Convert_v1alpha1_TCPTransport_To_apiserver_TCPTransport is an autogenerated conversion function.
+func Convert_v1alpha1_TCPTransport_To_apiserver_TCPTransport(in *TCPTransport, out *apiserver.TCPTransport, s conversion.Scope) error {
+	return autoConvert_v1alpha1_TCPTransport_To_apiserver_TCPTransport(in, out, s)
+}
+
+func autoConvert_apiserver_TCPTransport_To_v1alpha1_TCPTransport(in *apiserver.TCPTransport, out *TCPTransport, s conversion.Scope) error {
+	out.URL = in.URL
+	out.TLSConfig = (*TLSConfig)(unsafe.Pointer(in.TLSConfig))
+	return nil
+}
+
+// Convert_apiserver_TCPTransport_To_v1alpha1_TCPTransport is an autogenerated conversion function.
+func Convert_apiserver_TCPTransport_To_v1alpha1_TCPTransport(in *apiserver.TCPTransport, out *TCPTransport, s conversion.Scope) error {
+	return autoConvert_apiserver_TCPTransport_To_v1alpha1_TCPTransport(in, out, s)
+}
+
+func autoConvert_v1alpha1_TLSConfig_To_apiserver_TLSConfig(in *TLSConfig, out *apiserver.TLSConfig, s conversion.Scope) error {
+	out.CABundle = in.CABundle
+	out.ClientKey = in.ClientKey
+	out.ClientCert = in.ClientCert
+	return nil
+}
+
+// Convert_v1alpha1_TLSConfig_To_apiserver_TLSConfig is an autogenerated conversion function.
+func Convert_v1alpha1_TLSConfig_To_apiserver_TLSConfig(in *TLSConfig, out *apiserver.TLSConfig, s conversion.Scope) error {
+	return autoConvert_v1alpha1_TLSConfig_To_apiserver_TLSConfig(in, out, s)
+}
+
+func autoConvert_apiserver_TLSConfig_To_v1alpha1_TLSConfig(in *apiserver.TLSConfig, out *TLSConfig, s conversion.Scope) error {
+	out.CABundle = in.CABundle
+	out.ClientKey = in.ClientKey
+	out.ClientCert = in.ClientCert
+	return nil
+}
+
+// Convert_apiserver_TLSConfig_To_v1alpha1_TLSConfig is an autogenerated conversion function.
+func Convert_apiserver_TLSConfig_To_v1alpha1_TLSConfig(in *apiserver.TLSConfig, out *TLSConfig, s conversion.Scope) error {
+	return autoConvert_apiserver_TLSConfig_To_v1alpha1_TLSConfig(in, out, s)
+}
+
+func autoConvert_v1alpha1_TracingConfiguration_To_apiserver_TracingConfiguration(in *TracingConfiguration, out *apiserver.TracingConfiguration, s conversion.Scope) error {
+	out.TracingConfiguration = in.TracingConfiguration
+	return nil
+}
+
+// Convert_v1alpha1_TracingConfiguration_To_apiserver_TracingConfiguration is an autogenerated conversion function.
+func Convert_v1alpha1_TracingConfiguration_To_apiserver_TracingConfiguration(in *TracingConfiguration, out *apiserver.TracingConfiguration, s conversion.Scope) error {
+	return autoConvert_v1alpha1_TracingConfiguration_To_apiserver_TracingConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_TracingConfiguration_To_v1alpha1_TracingConfiguration(in *apiserver.TracingConfiguration, out *TracingConfiguration, s conversion.Scope) error {
+	out.TracingConfiguration = in.TracingConfiguration
+	return nil
+}
+
+// Convert_apiserver_TracingConfiguration_To_v1alpha1_TracingConfiguration is an autogenerated conversion function.
+func Convert_apiserver_TracingConfiguration_To_v1alpha1_TracingConfiguration(in *apiserver.TracingConfiguration, out *TracingConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_TracingConfiguration_To_v1alpha1_TracingConfiguration(in, out, s)
+}
+
+func autoConvert_v1alpha1_Transport_To_apiserver_Transport(in *Transport, out *apiserver.Transport, s conversion.Scope) error {
+	out.TCP = (*apiserver.TCPTransport)(unsafe.Pointer(in.TCP))
+	out.UDS = (*apiserver.UDSTransport)(unsafe.Pointer(in.UDS))
+	return nil
+}
+
+// Convert_v1alpha1_Transport_To_apiserver_Transport is an autogenerated conversion function.
+func Convert_v1alpha1_Transport_To_apiserver_Transport(in *Transport, out *apiserver.Transport, s conversion.Scope) error {
+	return autoConvert_v1alpha1_Transport_To_apiserver_Transport(in, out, s)
+}
+
+func autoConvert_apiserver_Transport_To_v1alpha1_Transport(in *apiserver.Transport, out *Transport, s conversion.Scope) error {
+	out.TCP = (*TCPTransport)(unsafe.Pointer(in.TCP))
+	out.UDS = (*UDSTransport)(unsafe.Pointer(in.UDS))
+	return nil
+}
+
+// Convert_apiserver_Transport_To_v1alpha1_Transport is an autogenerated conversion function.
+func Convert_apiserver_Transport_To_v1alpha1_Transport(in *apiserver.Transport, out *Transport, s conversion.Scope) error {
+	return autoConvert_apiserver_Transport_To_v1alpha1_Transport(in, out, s)
+}
+
+func autoConvert_v1alpha1_UDSTransport_To_apiserver_UDSTransport(in *UDSTransport, out *apiserver.UDSTransport, s conversion.Scope) error {
+	out.UDSName = in.UDSName
+	return nil
+}
+
+// Convert_v1alpha1_UDSTransport_To_apiserver_UDSTransport is an autogenerated conversion function.
+func Convert_v1alpha1_UDSTransport_To_apiserver_UDSTransport(in *UDSTransport, out *apiserver.UDSTransport, s conversion.Scope) error {
+	return autoConvert_v1alpha1_UDSTransport_To_apiserver_UDSTransport(in, out, s)
+}
+
+func autoConvert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(in *apiserver.UDSTransport, out *UDSTransport, s conversion.Scope) error {
+	out.UDSName = in.UDSName
+	return nil
+}
+
+// Convert_apiserver_UDSTransport_To_v1alpha1_UDSTransport is an autogenerated conversion function.
+func Convert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(in *apiserver.UDSTransport, out *UDSTransport, s conversion.Scope) error {
+	return autoConvert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(in, out, s)
+}
+
+func autoConvert_v1alpha1_UserValidationRule_To_apiserver_UserValidationRule(in *UserValidationRule, out *apiserver.UserValidationRule, s conversion.Scope) error {
+	out.Expression = in.Expression
+	out.Message = in.Message
+	return nil
+}
+
+// Convert_v1alpha1_UserValidationRule_To_apiserver_UserValidationRule is an autogenerated conversion function.
+func Convert_v1alpha1_UserValidationRule_To_apiserver_UserValidationRule(in *UserValidationRule, out *apiserver.UserValidationRule, s conversion.Scope) error {
+	return autoConvert_v1alpha1_UserValidationRule_To_apiserver_UserValidationRule(in, out, s)
+}
+
+func autoConvert_apiserver_UserValidationRule_To_v1alpha1_UserValidationRule(in *apiserver.UserValidationRule, out *UserValidationRule, s conversion.Scope) error {
+	out.Expression = in.Expression
+	out.Message = in.Message
+	return nil
+}
+
+// Convert_apiserver_UserValidationRule_To_v1alpha1_UserValidationRule is an autogenerated conversion function.
+func Convert_apiserver_UserValidationRule_To_v1alpha1_UserValidationRule(in *apiserver.UserValidationRule, out *UserValidationRule, s conversion.Scope) error {
+	return autoConvert_apiserver_UserValidationRule_To_v1alpha1_UserValidationRule(in, out, s)
+}
+
+func autoConvert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in *WebhookConfiguration, out *apiserver.WebhookConfiguration, s conversion.Scope) error {
+	out.AuthorizedTTL = in.AuthorizedTTL
+	out.UnauthorizedTTL = in.UnauthorizedTTL
+	out.Timeout = in.Timeout
+	out.SubjectAccessReviewVersion = in.SubjectAccessReviewVersion
+	out.MatchConditionSubjectAccessReviewVersion = in.MatchConditionSubjectAccessReviewVersion
+	out.FailurePolicy = in.FailurePolicy
+	if err := Convert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(&in.ConnectionInfo, &out.ConnectionInfo, s); err != nil {
+		return err
+	}
+	out.MatchConditions = *(*[]apiserver.WebhookMatchCondition)(unsafe.Pointer(&in.MatchConditions))
+	return nil
+}
+
+// Convert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration is an autogenerated conversion function.
+func Convert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in *WebhookConfiguration, out *apiserver.WebhookConfiguration, s conversion.Scope) error {
+	return autoConvert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration(in *apiserver.WebhookConfiguration, out *WebhookConfiguration, s conversion.Scope) error {
+	out.AuthorizedTTL = in.AuthorizedTTL
+	out.UnauthorizedTTL = in.UnauthorizedTTL
+	out.Timeout = in.Timeout
+	out.SubjectAccessReviewVersion = in.SubjectAccessReviewVersion
+	out.MatchConditionSubjectAccessReviewVersion = in.MatchConditionSubjectAccessReviewVersion
+	out.FailurePolicy = in.FailurePolicy
+	if err := Convert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(&in.ConnectionInfo, &out.ConnectionInfo, s); err != nil {
+		return err
+	}
+	out.MatchConditions = *(*[]WebhookMatchCondition)(unsafe.Pointer(&in.MatchConditions))
+	return nil
+}
+
+// Convert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration is an autogenerated conversion function.
+func Convert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration(in *apiserver.WebhookConfiguration, out *WebhookConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration(in, out, s)
+}
+
+func autoConvert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in *WebhookConnectionInfo, out *apiserver.WebhookConnectionInfo, s conversion.Scope) error {
+	out.Type = in.Type
+	out.KubeConfigFile = (*string)(unsafe.Pointer(in.KubeConfigFile))
+	return nil
+}
+
+// Convert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo is an autogenerated conversion function.
+func Convert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in *WebhookConnectionInfo, out *apiserver.WebhookConnectionInfo, s conversion.Scope) error {
+	return autoConvert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in, out, s)
+}
+
+func autoConvert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(in *apiserver.WebhookConnectionInfo, out *WebhookConnectionInfo, s conversion.Scope) error {
+	out.Type = in.Type
+	out.KubeConfigFile = (*string)(unsafe.Pointer(in.KubeConfigFile))
+	return nil
+}
+
+// Convert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo is an autogenerated conversion function.
+func Convert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(in *apiserver.WebhookConnectionInfo, out *WebhookConnectionInfo, s conversion.Scope) error {
+	return autoConvert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(in, out, s)
+}
+
+func autoConvert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in *WebhookMatchCondition, out *apiserver.WebhookMatchCondition, s conversion.Scope) error {
+	out.Expression = in.Expression
+	return nil
+}
+
+// Convert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition is an autogenerated conversion function.
+func Convert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in *WebhookMatchCondition, out *apiserver.WebhookMatchCondition, s conversion.Scope) error {
+	return autoConvert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in, out, s)
+}
+
+func autoConvert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition(in *apiserver.WebhookMatchCondition, out *WebhookMatchCondition, s conversion.Scope) error {
+	out.Expression = in.Expression
+	return nil
+}
+
+// Convert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition is an autogenerated conversion function.
+func Convert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition(in *apiserver.WebhookMatchCondition, out *WebhookMatchCondition, s conversion.Scope) error {
+	return autoConvert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition(in, out, s)
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..81b652254c
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,606 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionConfiguration) DeepCopyInto(out *AdmissionConfiguration) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Plugins != nil {
+		in, out := &in.Plugins, &out.Plugins
+		*out = make([]AdmissionPluginConfiguration, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfiguration.
+func (in *AdmissionConfiguration) DeepCopy() *AdmissionConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(AdmissionConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AdmissionConfiguration) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionPluginConfiguration) DeepCopyInto(out *AdmissionPluginConfiguration) {
+	*out = *in
+	if in.Configuration != nil {
+		in, out := &in.Configuration, &out.Configuration
+		*out = new(runtime.Unknown)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfiguration.
+func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(AdmissionPluginConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AnonymousAuthCondition) DeepCopyInto(out *AnonymousAuthCondition) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthCondition.
+func (in *AnonymousAuthCondition) DeepCopy() *AnonymousAuthCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(AnonymousAuthCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AnonymousAuthConfig) DeepCopyInto(out *AnonymousAuthConfig) {
+	*out = *in
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]AnonymousAuthCondition, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthConfig.
+func (in *AnonymousAuthConfig) DeepCopy() *AnonymousAuthConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(AnonymousAuthConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfiguration) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.JWT != nil {
+		in, out := &in.JWT, &out.JWT
+		*out = make([]JWTAuthenticator, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Anonymous != nil {
+		in, out := &in.Anonymous, &out.Anonymous
+		*out = new(AnonymousAuthConfig)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfiguration.
+func (in *AuthenticationConfiguration) DeepCopy() *AuthenticationConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(AuthenticationConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AuthenticationConfiguration) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthorizationConfiguration) DeepCopyInto(out *AuthorizationConfiguration) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Authorizers != nil {
+		in, out := &in.Authorizers, &out.Authorizers
+		*out = make([]AuthorizerConfiguration, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationConfiguration.
+func (in *AuthorizationConfiguration) DeepCopy() *AuthorizationConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(AuthorizationConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AuthorizationConfiguration) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthorizerConfiguration) DeepCopyInto(out *AuthorizerConfiguration) {
+	*out = *in
+	if in.Webhook != nil {
+		in, out := &in.Webhook, &out.Webhook
+		*out = new(WebhookConfiguration)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizerConfiguration.
+func (in *AuthorizerConfiguration) DeepCopy() *AuthorizerConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(AuthorizerConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClaimMappings) DeepCopyInto(out *ClaimMappings) {
+	*out = *in
+	in.Username.DeepCopyInto(&out.Username)
+	in.Groups.DeepCopyInto(&out.Groups)
+	out.UID = in.UID
+	if in.Extra != nil {
+		in, out := &in.Extra, &out.Extra
+		*out = make([]ExtraMapping, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimMappings.
+func (in *ClaimMappings) DeepCopy() *ClaimMappings {
+	if in == nil {
+		return nil
+	}
+	out := new(ClaimMappings)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClaimOrExpression) DeepCopyInto(out *ClaimOrExpression) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimOrExpression.
+func (in *ClaimOrExpression) DeepCopy() *ClaimOrExpression {
+	if in == nil {
+		return nil
+	}
+	out := new(ClaimOrExpression)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClaimValidationRule) DeepCopyInto(out *ClaimValidationRule) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimValidationRule.
+func (in *ClaimValidationRule) DeepCopy() *ClaimValidationRule {
+	if in == nil {
+		return nil
+	}
+	out := new(ClaimValidationRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Connection) DeepCopyInto(out *Connection) {
+	*out = *in
+	if in.Transport != nil {
+		in, out := &in.Transport, &out.Transport
+		*out = new(Transport)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Connection.
+func (in *Connection) DeepCopy() *Connection {
+	if in == nil {
+		return nil
+	}
+	out := new(Connection)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressSelection) DeepCopyInto(out *EgressSelection) {
+	*out = *in
+	in.Connection.DeepCopyInto(&out.Connection)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelection.
+func (in *EgressSelection) DeepCopy() *EgressSelection {
+	if in == nil {
+		return nil
+	}
+	out := new(EgressSelection)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressSelectorConfiguration) DeepCopyInto(out *EgressSelectorConfiguration) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.EgressSelections != nil {
+		in, out := &in.EgressSelections, &out.EgressSelections
+		*out = make([]EgressSelection, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelectorConfiguration.
+func (in *EgressSelectorConfiguration) DeepCopy() *EgressSelectorConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(EgressSelectorConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EgressSelectorConfiguration) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExtraMapping) DeepCopyInto(out *ExtraMapping) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraMapping.
+func (in *ExtraMapping) DeepCopy() *ExtraMapping {
+	if in == nil {
+		return nil
+	}
+	out := new(ExtraMapping)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Issuer) DeepCopyInto(out *Issuer) {
+	*out = *in
+	if in.DiscoveryURL != nil {
+		in, out := &in.DiscoveryURL, &out.DiscoveryURL
+		*out = new(string)
+		**out = **in
+	}
+	if in.Audiences != nil {
+		in, out := &in.Audiences, &out.Audiences
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Issuer.
+func (in *Issuer) DeepCopy() *Issuer {
+	if in == nil {
+		return nil
+	}
+	out := new(Issuer)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JWTAuthenticator) DeepCopyInto(out *JWTAuthenticator) {
+	*out = *in
+	in.Issuer.DeepCopyInto(&out.Issuer)
+	if in.ClaimValidationRules != nil {
+		in, out := &in.ClaimValidationRules, &out.ClaimValidationRules
+		*out = make([]ClaimValidationRule, len(*in))
+		copy(*out, *in)
+	}
+	in.ClaimMappings.DeepCopyInto(&out.ClaimMappings)
+	if in.UserValidationRules != nil {
+		in, out := &in.UserValidationRules, &out.UserValidationRules
+		*out = make([]UserValidationRule, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JWTAuthenticator.
+func (in *JWTAuthenticator) DeepCopy() *JWTAuthenticator {
+	if in == nil {
+		return nil
+	}
+	out := new(JWTAuthenticator)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PrefixedClaimOrExpression) DeepCopyInto(out *PrefixedClaimOrExpression) {
+	*out = *in
+	if in.Prefix != nil {
+		in, out := &in.Prefix, &out.Prefix
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixedClaimOrExpression.
+func (in *PrefixedClaimOrExpression) DeepCopy() *PrefixedClaimOrExpression {
+	if in == nil {
+		return nil
+	}
+	out := new(PrefixedClaimOrExpression)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TCPTransport) DeepCopyInto(out *TCPTransport) {
+	*out = *in
+	if in.TLSConfig != nil {
+		in, out := &in.TLSConfig, &out.TLSConfig
+		*out = new(TLSConfig)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPTransport.
+func (in *TCPTransport) DeepCopy() *TCPTransport {
+	if in == nil {
+		return nil
+	}
+	out := new(TCPTransport)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TLSConfig) DeepCopyInto(out *TLSConfig) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig.
+func (in *TLSConfig) DeepCopy() *TLSConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(TLSConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TracingConfiguration) DeepCopyInto(out *TracingConfiguration) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.TracingConfiguration.DeepCopyInto(&out.TracingConfiguration)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfiguration.
+func (in *TracingConfiguration) DeepCopy() *TracingConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(TracingConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *TracingConfiguration) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Transport) DeepCopyInto(out *Transport) {
+	*out = *in
+	if in.TCP != nil {
+		in, out := &in.TCP, &out.TCP
+		*out = new(TCPTransport)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.UDS != nil {
+		in, out := &in.UDS, &out.UDS
+		*out = new(UDSTransport)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Transport.
+func (in *Transport) DeepCopy() *Transport {
+	if in == nil {
+		return nil
+	}
+	out := new(Transport)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UDSTransport) DeepCopyInto(out *UDSTransport) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDSTransport.
+func (in *UDSTransport) DeepCopy() *UDSTransport {
+	if in == nil {
+		return nil
+	}
+	out := new(UDSTransport)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UserValidationRule) DeepCopyInto(out *UserValidationRule) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserValidationRule.
+func (in *UserValidationRule) DeepCopy() *UserValidationRule {
+	if in == nil {
+		return nil
+	}
+	out := new(UserValidationRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebhookConfiguration) DeepCopyInto(out *WebhookConfiguration) {
+	*out = *in
+	out.AuthorizedTTL = in.AuthorizedTTL
+	out.UnauthorizedTTL = in.UnauthorizedTTL
+	out.Timeout = in.Timeout
+	in.ConnectionInfo.DeepCopyInto(&out.ConnectionInfo)
+	if in.MatchConditions != nil {
+		in, out := &in.MatchConditions, &out.MatchConditions
+		*out = make([]WebhookMatchCondition, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConfiguration.
+func (in *WebhookConfiguration) DeepCopy() *WebhookConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(WebhookConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebhookConnectionInfo) DeepCopyInto(out *WebhookConnectionInfo) {
+	*out = *in
+	if in.KubeConfigFile != nil {
+		in, out := &in.KubeConfigFile, &out.KubeConfigFile
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConnectionInfo.
+func (in *WebhookConnectionInfo) DeepCopy() *WebhookConnectionInfo {
+	if in == nil {
+		return nil
+	}
+	out := new(WebhookConnectionInfo)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebhookMatchCondition) DeepCopyInto(out *WebhookMatchCondition) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookMatchCondition.
+func (in *WebhookMatchCondition) DeepCopy() *WebhookMatchCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(WebhookMatchCondition)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go
new file mode 100644
index 0000000000..fc76be0fb8
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go
@@ -0,0 +1,43 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+	scheme.AddTypeDefaultingFunc(&AuthorizationConfiguration{}, func(obj interface{}) { SetObjectDefaults_AuthorizationConfiguration(obj.(*AuthorizationConfiguration)) })
+	return nil
+}
+
+func SetObjectDefaults_AuthorizationConfiguration(in *AuthorizationConfiguration) {
+	for i := range in.Authorizers {
+		a := &in.Authorizers[i]
+		if a.Webhook != nil {
+			SetDefaults_WebhookConfiguration(a.Webhook)
+		}
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/conversion.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/conversion.go
new file mode 100644
index 0000000000..d7110eff1b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/conversion.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	conversion "k8s.io/apimachinery/pkg/conversion"
+	apiserver "k8s.io/apiserver/pkg/apis/apiserver"
+)
+
+func Convert_v1beta1_EgressSelection_To_apiserver_EgressSelection(in *EgressSelection, out *apiserver.EgressSelection, s conversion.Scope) error {
+	if err := autoConvert_v1beta1_EgressSelection_To_apiserver_EgressSelection(in, out, s); err != nil {
+		return err
+	}
+	if out.Name == "master" {
+		out.Name = "controlplane"
+	}
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/defaults.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/defaults.go
new file mode 100644
index 0000000000..eebcb6c002
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/defaults.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	"time"
+
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+func addDefaultingFuncs(scheme *runtime.Scheme) error {
+	return RegisterDefaults(scheme)
+}
+
+func SetDefaults_WebhookConfiguration(obj *WebhookConfiguration) {
+	if obj.AuthorizedTTL.Duration == 0 {
+		obj.AuthorizedTTL.Duration = 5 * time.Minute
+	}
+	if obj.UnauthorizedTTL.Duration == 0 {
+		obj.UnauthorizedTTL.Duration = 30 * time.Second
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/doc.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/doc.go
new file mode 100644
index 0000000000..07321e775d
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/doc.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/apiserver
+// +k8s:defaulter-gen=TypeMeta
+// +groupName=apiserver.k8s.io
+
+// Package v1beta1 is the v1beta1 version of the API.
+package v1beta1 // import "k8s.io/apiserver/pkg/apis/apiserver/v1beta1"
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/register.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/register.go
new file mode 100644
index 0000000000..3718a85d18
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/register.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const GroupName = "apiserver.k8s.io"
+const ConfigGroupName = "apiserver.config.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// ConfigSchemeGroupVersion is group version used to register these objects
+var ConfigSchemeGroupVersion = schema.GroupVersion{Group: ConfigGroupName, Version: "v1beta1"}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      runtime.SchemeBuilder
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+	// We only register manually written functions here. The registration of the
+	// generated functions takes place in the generated files. The separation
+	// makes the code compile even when the generated files are missing.
+	localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
+}
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&EgressSelectorConfiguration{},
+	)
+	scheme.AddKnownTypes(ConfigSchemeGroupVersion,
+		&AuthenticationConfiguration{},
+		&AuthorizationConfiguration{},
+		&TracingConfiguration{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go
new file mode 100644
index 0000000000..a0e13593b3
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go
@@ -0,0 +1,598 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	tracingapi "k8s.io/component-base/tracing/api/v1"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EgressSelectorConfiguration provides versioned configuration for egress selector clients.
+type EgressSelectorConfiguration struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// connectionServices contains a list of egress selection client configurations
+	EgressSelections []EgressSelection `json:"egressSelections"`
+}
+
+// EgressSelection provides the configuration for a single egress selection client.
+type EgressSelection struct {
+	// name is the name of the egress selection.
+	// Currently supported values are "controlplane", "master", "etcd" and "cluster"
+	// The "master" egress selector is deprecated in favor of "controlplane"
+	Name string `json:"name"`
+
+	// connection is the exact information used to configure the egress selection
+	Connection Connection `json:"connection"`
+}
+
+// Connection provides the configuration for a single egress selection client.
+type Connection struct {
+	// Protocol is the protocol used to connect from client to the konnectivity server.
+	ProxyProtocol ProtocolType `json:"proxyProtocol,omitempty"`
+
+	// Transport defines the transport configurations we use to dial to the konnectivity server.
+	// This is required if ProxyProtocol is HTTPConnect or GRPC.
+	// +optional
+	Transport *Transport `json:"transport,omitempty"`
+}
+
+// ProtocolType is a set of valid values for Connection.ProtocolType
+type ProtocolType string
+
+// Valid types for ProtocolType for konnectivity server
+const (
+	// Use HTTPConnect to connect to konnectivity server
+	ProtocolHTTPConnect ProtocolType = "HTTPConnect"
+	// Use grpc to connect to konnectivity server
+	ProtocolGRPC ProtocolType = "GRPC"
+	// Connect directly (skip konnectivity server)
+	ProtocolDirect ProtocolType = "Direct"
+)
+
+// Transport defines the transport configurations we use to dial to the konnectivity server
+type Transport struct {
+	// TCP is the TCP configuration for communicating with the konnectivity server via TCP
+	// ProxyProtocol of GRPC is not supported with TCP transport at the moment
+	// Requires at least one of TCP or UDS to be set
+	// +optional
+	TCP *TCPTransport `json:"tcp,omitempty"`
+
+	// UDS is the UDS configuration for communicating with the konnectivity server via UDS
+	// Requires at least one of TCP or UDS to be set
+	// +optional
+	UDS *UDSTransport `json:"uds,omitempty"`
+}
+
+// TCPTransport provides the information to connect to konnectivity server via TCP
+type TCPTransport struct {
+	// URL is the location of the konnectivity server to connect to.
+	// As an example it might be "https://127.0.0.1:8131"
+	URL string `json:"url,omitempty"`
+
+	// TLSConfig is the config needed to use TLS when connecting to konnectivity server
+	// +optional
+	TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
+}
+
+// UDSTransport provides the information to connect to konnectivity server via UDS
+type UDSTransport struct {
+	// UDSName is the name of the unix domain socket to connect to konnectivity server
+	// This does not use a unix:// prefix. (Eg: /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket)
+	UDSName string `json:"udsName,omitempty"`
+}
+
+// TLSConfig provides the authentication information to connect to konnectivity server
+// Only used with TCPTransport
+type TLSConfig struct {
+	// caBundle is the file location of the CA to be used to determine trust with the konnectivity server.
+	// Must be absent/empty if TCPTransport.URL is prefixed with http://
+	// If absent while TCPTransport.URL is prefixed with https://, default to system trust roots.
+	// +optional
+	CABundle string `json:"caBundle,omitempty"`
+
+	// clientKey is the file location of the client key to be used in mtls handshakes with the konnectivity server.
+	// Must be absent/empty if TCPTransport.URL is prefixed with http://
+	// Must be configured if TCPTransport.URL is prefixed with https://
+	// +optional
+	ClientKey string `json:"clientKey,omitempty"`
+
+	// clientCert is the file location of the client certificate to be used in mtls handshakes with the konnectivity server.
+	// Must be absent/empty if TCPTransport.URL is prefixed with http://
+	// Must be configured if TCPTransport.URL is prefixed with https://
+	// +optional
+	ClientCert string `json:"clientCert,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// TracingConfiguration provides versioned configuration for tracing clients.
+type TracingConfiguration struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Embed the component config tracing configuration struct
+	tracingapi.TracingConfiguration `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AuthenticationConfiguration provides versioned configuration for authentication.
+type AuthenticationConfiguration struct {
+	metav1.TypeMeta
+
+	// jwt is a list of authenticator to authenticate Kubernetes users using
+	// JWT compliant tokens. The authenticator will attempt to parse a raw ID token,
+	// verify it's been signed by the configured issuer. The public key to verify the
+	// signature is discovered from the issuer's public endpoint using OIDC discovery.
+	// For an incoming token, each JWT authenticator will be attempted in
+	// the order in which it is specified in this list.  Note however that
+	// other authenticators may run before or after the JWT authenticators.
+	// The specific position of JWT authenticators in relation to other
+	// authenticators is neither defined nor stable across releases.  Since
+	// each JWT authenticator must have a unique issuer URL, at most one
+	// JWT authenticator will attempt to cryptographically validate the token.
+	//
+	// The minimum valid JWT payload must contain the following claims:
+	// {
+	//		"iss": "https://issuer.example.com",
+	//		"aud": ["audience"],
+	//		"exp": 1234567890,
+	//		"": "username"
+	// }
+	JWT []JWTAuthenticator `json:"jwt"`
+
+	// If present --anonymous-auth must not be set
+	Anonymous *AnonymousAuthConfig `json:"anonymous,omitempty"`
+}
+
+// AnonymousAuthConfig provides the configuration for the anonymous authenticator.
+type AnonymousAuthConfig struct {
+	Enabled bool `json:"enabled"`
+
+	// If set, anonymous auth is only allowed if the request meets one of the
+	// conditions.
+	Conditions []AnonymousAuthCondition `json:"conditions,omitempty"`
+}
+
+// AnonymousAuthCondition describes the condition under which anonymous auth
+// should be enabled.
+type AnonymousAuthCondition struct {
+	// Path for which anonymous auth is enabled.
+	Path string `json:"path"`
+}
+
+// JWTAuthenticator provides the configuration for a single JWT authenticator.
+type JWTAuthenticator struct {
+	// issuer contains the basic OIDC provider connection options.
+	// +required
+	Issuer Issuer `json:"issuer"`
+
+	// claimValidationRules are rules that are applied to validate token claims to authenticate users.
+	// +optional
+	ClaimValidationRules []ClaimValidationRule `json:"claimValidationRules,omitempty"`
+
+	// claimMappings points claims of a token to be treated as user attributes.
+	// +required
+	ClaimMappings ClaimMappings `json:"claimMappings"`
+
+	// userValidationRules are rules that are applied to final user before completing authentication.
+	// These allow invariants to be applied to incoming identities such as preventing the
+	// use of the system: prefix that is commonly used by Kubernetes components.
+	// The validation rules are logically ANDed together and must all return true for the validation to pass.
+	// +optional
+	UserValidationRules []UserValidationRule `json:"userValidationRules,omitempty"`
+}
+
+// Issuer provides the configuration for an external provider's specific settings.
+type Issuer struct {
+	// url points to the issuer URL in a format https://url or https://url/path.
+	// This must match the "iss" claim in the presented JWT, and the issuer returned from discovery.
+	// Same value as the --oidc-issuer-url flag.
+	// Discovery information is fetched from "{url}/.well-known/openid-configuration" unless overridden by discoveryURL.
+	// Required to be unique across all JWT authenticators.
+	// Note that egress selection configuration is not used for this network connection.
+	// +required
+	URL string `json:"url"`
+
+	// discoveryURL, if specified, overrides the URL used to fetch discovery
+	// information instead of using "{url}/.well-known/openid-configuration".
+	// The exact value specified is used, so "/.well-known/openid-configuration"
+	// must be included in discoveryURL if needed.
+	//
+	// The "issuer" field in the fetched discovery information must match the "issuer.url" field
+	// in the AuthenticationConfiguration and will be used to validate the "iss" claim in the presented JWT.
+	// This is for scenarios where the well-known and jwks endpoints are hosted at a different
+	// location than the issuer (such as locally in the cluster).
+	//
+	// Example:
+	// A discovery url that is exposed using kubernetes service 'oidc' in namespace 'oidc-namespace'
+	// and discovery information is available at '/.well-known/openid-configuration'.
+	// discoveryURL: "https://oidc.oidc-namespace/.well-known/openid-configuration"
+	// certificateAuthority is used to verify the TLS connection and the hostname on the leaf certificate
+	// must be set to 'oidc.oidc-namespace'.
+	//
+	// curl https://oidc.oidc-namespace/.well-known/openid-configuration (.discoveryURL field)
+	// {
+	//     issuer: "https://oidc.example.com" (.url field)
+	// }
+	//
+	// discoveryURL must be different from url.
+	// Required to be unique across all JWT authenticators.
+	// Note that egress selection configuration is not used for this network connection.
+	// +optional
+	DiscoveryURL *string `json:"discoveryURL,omitempty"`
+
+	// certificateAuthority contains PEM-encoded certificate authority certificates
+	// used to validate the connection when fetching discovery information.
+	// If unset, the system verifier is used.
+	// Same value as the content of the file referenced by the --oidc-ca-file flag.
+	// +optional
+	CertificateAuthority string `json:"certificateAuthority,omitempty"`
+
+	// audiences is the set of acceptable audiences the JWT must be issued to.
+	// At least one of the entries must match the "aud" claim in presented JWTs.
+	// Same value as the --oidc-client-id flag (though this field supports an array).
+	// Required to be non-empty.
+	// +required
+	Audiences []string `json:"audiences"`
+
+	// audienceMatchPolicy defines how the "audiences" field is used to match the "aud" claim in the presented JWT.
+	// Allowed values are:
+	// 1. "MatchAny" when multiple audiences are specified and
+	// 2. empty (or unset) or "MatchAny" when a single audience is specified.
+	//
+	// - MatchAny: the "aud" claim in the presented JWT must match at least one of the entries in the "audiences" field.
+	// For example, if "audiences" is ["foo", "bar"], the "aud" claim in the presented JWT must contain either "foo" or "bar" (and may contain both).
+	//
+	// - "": The match policy can be empty (or unset) when a single audience is specified in the "audiences" field. The "aud" claim in the presented JWT must contain the single audience (and may contain others).
+	//
+	// For more nuanced audience validation, use claimValidationRules.
+	//   example: claimValidationRule[].expression: 'sets.equivalent(claims.aud, ["bar", "foo", "baz"])' to require an exact match.
+	// +optional
+	AudienceMatchPolicy AudienceMatchPolicyType `json:"audienceMatchPolicy,omitempty"`
+}
+
+// AudienceMatchPolicyType is a set of valid values for issuer.audienceMatchPolicy
+type AudienceMatchPolicyType string
+
+// Valid types for AudienceMatchPolicyType
+const (
+	// MatchAny means the "aud" claim in the presented JWT must match at least one of the entries in the "audiences" field.
+	AudienceMatchPolicyMatchAny AudienceMatchPolicyType = "MatchAny"
+)
+
+// ClaimValidationRule provides the configuration for a single claim validation rule.
+type ClaimValidationRule struct {
+	// claim is the name of a required claim.
+	// Same as --oidc-required-claim flag.
+	// Only string claim keys are supported.
+	// Mutually exclusive with expression and message.
+	// +optional
+	Claim string `json:"claim,omitempty"`
+	// requiredValue is the value of a required claim.
+	// Same as --oidc-required-claim flag.
+	// Only string claim values are supported.
+	// If claim is set and requiredValue is not set, the claim must be present with a value set to the empty string.
+	// Mutually exclusive with expression and message.
+	// +optional
+	RequiredValue string `json:"requiredValue,omitempty"`
+
+	// expression represents the expression which will be evaluated by CEL.
+	// Must produce a boolean.
+	//
+	// CEL expressions have access to the contents of the token claims, organized into CEL variable:
+	// - 'claims' is a map of claim names to claim values.
+	//   For example, a variable named 'sub' can be accessed as 'claims.sub'.
+	//   Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'.
+	// Must return true for the validation to pass.
+	//
+	// Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/
+	//
+	// Mutually exclusive with claim and requiredValue.
+	// +optional
+	Expression string `json:"expression,omitempty"`
+	// message customizes the returned error message when expression returns false.
+	// message is a literal string.
+	// Mutually exclusive with claim and requiredValue.
+	// +optional
+	Message string `json:"message,omitempty"`
+}
+
+// ClaimMappings provides the configuration for claim mapping
+type ClaimMappings struct {
+	// username represents an option for the username attribute.
+	// The claim's value must be a singular string.
+	// Same as the --oidc-username-claim and --oidc-username-prefix flags.
+	// If username.expression is set, the expression must produce a string value.
+	// If username.expression uses 'claims.email', then 'claims.email_verified' must be used in
+	// username.expression or extra[*].valueExpression or claimValidationRules[*].expression.
+	// An example claim validation rule expression that matches the validation automatically
+	// applied when username.claim is set to 'email' is 'claims.?email_verified.orValue(true)'.
+	//
+	// In the flag based approach, the --oidc-username-claim and --oidc-username-prefix are optional. If --oidc-username-claim is not set,
+	// the default value is "sub". For the authentication config, there is no defaulting for claim or prefix. The claim and prefix must be set explicitly.
+	// For claim, if --oidc-username-claim was not set with legacy flag approach, configure username.claim="sub" in the authentication config.
+	// For prefix:
+	//     (1) --oidc-username-prefix="-", no prefix was added to the username. For the same behavior using authentication config,
+	//         set username.prefix=""
+	//     (2) --oidc-username-prefix="" and  --oidc-username-claim != "email", prefix was "#". For the same
+	//         behavior using authentication config, set username.prefix="#"
+	//     (3) --oidc-username-prefix="". For the same behavior using authentication config, set username.prefix=""
+	// +required
+	Username PrefixedClaimOrExpression `json:"username"`
+	// groups represents an option for the groups attribute.
+	// The claim's value must be a string or string array claim.
+	// If groups.claim is set, the prefix must be specified (and can be the empty string).
+	// If groups.expression is set, the expression must produce a string or string array value.
+	//  "", [], and null values are treated as the group mapping not being present.
+	// +optional
+	Groups PrefixedClaimOrExpression `json:"groups,omitempty"`
+
+	// uid represents an option for the uid attribute.
+	// Claim must be a singular string claim.
+	// If uid.expression is set, the expression must produce a string value.
+	// +optional
+	UID ClaimOrExpression `json:"uid"`
+
+	// extra represents an option for the extra attribute.
+	// expression must produce a string or string array value.
+	// If the value is empty, the extra mapping will not be present.
+	//
+	// hard-coded extra key/value
+	// - key: "foo"
+	//   valueExpression: "'bar'"
+	// This will result in an extra attribute - foo: ["bar"]
+	//
+	// hard-coded key, value copying claim value
+	// - key: "foo"
+	//   valueExpression: "claims.some_claim"
+	// This will result in an extra attribute - foo: [value of some_claim]
+	//
+	// hard-coded key, value derived from claim value
+	// - key: "admin"
+	//   valueExpression: '(has(claims.is_admin) && claims.is_admin) ? "true":""'
+	// This will result in:
+	//  - if is_admin claim is present and true, extra attribute - admin: ["true"]
+	//  - if is_admin claim is present and false or is_admin claim is not present, no extra attribute will be added
+	//
+	// +optional
+	Extra []ExtraMapping `json:"extra,omitempty"`
+}
+
+// PrefixedClaimOrExpression provides the configuration for a single prefixed claim or expression.
+type PrefixedClaimOrExpression struct {
+	// claim is the JWT claim to use.
+	// Mutually exclusive with expression.
+	// +optional
+	Claim string `json:"claim,omitempty"`
+	// prefix is prepended to claim's value to prevent clashes with existing names.
+	// prefix needs to be set if claim is set and can be the empty string.
+	// Mutually exclusive with expression.
+	// +optional
+	Prefix *string `json:"prefix,omitempty"`
+
+	// expression represents the expression which will be evaluated by CEL.
+	//
+	// CEL expressions have access to the contents of the token claims, organized into CEL variable:
+	// - 'claims' is a map of claim names to claim values.
+	//   For example, a variable named 'sub' can be accessed as 'claims.sub'.
+	//   Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'.
+	//
+	// Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/
+	//
+	// Mutually exclusive with claim and prefix.
+	// +optional
+	Expression string `json:"expression,omitempty"`
+}
+
+// ClaimOrExpression provides the configuration for a single claim or expression.
+type ClaimOrExpression struct {
+	// claim is the JWT claim to use.
+	// Either claim or expression must be set.
+	// Mutually exclusive with expression.
+	// +optional
+	Claim string `json:"claim,omitempty"`
+
+	// expression represents the expression which will be evaluated by CEL.
+	//
+	// CEL expressions have access to the contents of the token claims, organized into CEL variable:
+	// - 'claims' is a map of claim names to claim values.
+	//   For example, a variable named 'sub' can be accessed as 'claims.sub'.
+	//   Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'.
+	//
+	// Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/
+	//
+	// Mutually exclusive with claim.
+	// +optional
+	Expression string `json:"expression,omitempty"`
+}
+
+// ExtraMapping provides the configuration for a single extra mapping.
+type ExtraMapping struct {
+	// key is a string to use as the extra attribute key.
+	// key must be a domain-prefix path (e.g. example.org/foo). All characters before the first "/" must be a valid
+	// subdomain as defined by RFC 1123. All characters trailing the first "/" must
+	// be valid HTTP Path characters as defined by RFC 3986.
+	// key must be lowercase.
+	// Required to be unique.
+	// +required
+	Key string `json:"key"`
+
+	// valueExpression is a CEL expression to extract extra attribute value.
+	// valueExpression must produce a string or string array value.
+	// "", [], and null values are treated as the extra mapping not being present.
+	// Empty string values contained within a string array are filtered out.
+	//
+	// CEL expressions have access to the contents of the token claims, organized into CEL variable:
+	// - 'claims' is a map of claim names to claim values.
+	//   For example, a variable named 'sub' can be accessed as 'claims.sub'.
+	//   Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'.
+	//
+	// Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/
+	//
+	// +required
+	ValueExpression string `json:"valueExpression"`
+}
+
+// UserValidationRule provides the configuration for a single user info validation rule.
+type UserValidationRule struct {
+	// expression represents the expression which will be evaluated by CEL.
+	// Must return true for the validation to pass.
+	//
+	// CEL expressions have access to the contents of UserInfo, organized into CEL variable:
+	// - 'user' - authentication.k8s.io/v1, Kind=UserInfo object
+	//    Refer to https://github.com/kubernetes/api/blob/release-1.28/authentication/v1/types.go#L105-L122 for the definition.
+	//    API documentation: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#userinfo-v1-authentication-k8s-io
+	//
+	// Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/
+	//
+	// +required
+	Expression string `json:"expression"`
+
+	// message customizes the returned error message when rule returns false.
+	// message is a literal string.
+	// +optional
+	Message string `json:"message,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+type AuthorizationConfiguration struct {
+	metav1.TypeMeta
+
+	// Authorizers is an ordered list of authorizers to
+	// authorize requests against.
+	// This is similar to the --authorization-modes kube-apiserver flag
+	// Must be at least one.
+	Authorizers []AuthorizerConfiguration `json:"authorizers"`
+}
+
+const (
+	TypeWebhook                                          AuthorizerType = "Webhook"
+	FailurePolicyNoOpinion                               string         = "NoOpinion"
+	FailurePolicyDeny                                    string         = "Deny"
+	AuthorizationWebhookConnectionInfoTypeKubeConfigFile string         = "KubeConfigFile"
+	AuthorizationWebhookConnectionInfoTypeInCluster      string         = "InClusterConfig"
+)
+
+type AuthorizerType string
+
+type AuthorizerConfiguration struct {
+	// Type refers to the type of the authorizer
+	// "Webhook" is supported in the generic API server
+	// Other API servers may support additional authorizer
+	// types like Node, RBAC, ABAC, etc.
+	Type string `json:"type"`
+
+	// Name used to describe the webhook
+	// This is explicitly used in monitoring machinery for metrics
+	// Note: Names must be DNS1123 labels like `myauthorizername` or
+	//		 subdomains like `myauthorizer.example.domain`
+	// Required, with no default
+	Name string `json:"name"`
+
+	// Webhook defines the configuration for a Webhook authorizer
+	// Must be defined when Type=Webhook
+	// Must not be defined when Type!=Webhook
+	Webhook *WebhookConfiguration `json:"webhook,omitempty"`
+}
+
+type WebhookConfiguration struct {
+	// The duration to cache 'authorized' responses from the webhook
+	// authorizer.
+	// Same as setting `--authorization-webhook-cache-authorized-ttl` flag
+	// Default: 5m0s
+	AuthorizedTTL metav1.Duration `json:"authorizedTTL"`
+	// The duration to cache 'unauthorized' responses from the webhook
+	// authorizer.
+	// Same as setting `--authorization-webhook-cache-unauthorized-ttl` flag
+	// Default: 30s
+	UnauthorizedTTL metav1.Duration `json:"unauthorizedTTL"`
+	// Timeout for the webhook request
+	// Maximum allowed value is 30s.
+	// Required, no default value.
+	Timeout metav1.Duration `json:"timeout"`
+	// The API version of the authorization.k8s.io SubjectAccessReview to
+	// send to and expect from the webhook.
+	// Same as setting `--authorization-webhook-version` flag
+	// Valid values: v1beta1, v1
+	// Required, no default value
+	SubjectAccessReviewVersion string `json:"subjectAccessReviewVersion"`
+	// MatchConditionSubjectAccessReviewVersion specifies the SubjectAccessReview
+	// version the CEL expressions are evaluated against
+	// Valid values: v1
+	// Required, no default value
+	MatchConditionSubjectAccessReviewVersion string `json:"matchConditionSubjectAccessReviewVersion"`
+	// Controls the authorization decision when a webhook request fails to
+	// complete or returns a malformed response or errors evaluating
+	// matchConditions.
+	// Valid values:
+	//   - NoOpinion: continue to subsequent authorizers to see if one of
+	//     them allows the request
+	//   - Deny: reject the request without consulting subsequent authorizers
+	// Required, with no default.
+	FailurePolicy string `json:"failurePolicy"`
+
+	// ConnectionInfo defines how we talk to the webhook
+	ConnectionInfo WebhookConnectionInfo `json:"connectionInfo"`
+
+	// matchConditions is a list of conditions that must be met for a request to be sent to this
+	// webhook. An empty list of matchConditions matches all requests.
+	// There are a maximum of 64 match conditions allowed.
+	//
+	// The exact matching logic is (in order):
+	//   1. If at least one matchCondition evaluates to FALSE, then the webhook is skipped.
+	//   2. If ALL matchConditions evaluate to TRUE, then the webhook is called.
+	//   3. If at least one matchCondition evaluates to an error (but none are FALSE):
+	//      - If failurePolicy=Deny, then the webhook rejects the request
+	//      - If failurePolicy=NoOpinion, then the error is ignored and the webhook is skipped
+	MatchConditions []WebhookMatchCondition `json:"matchConditions"`
+}
+
+type WebhookConnectionInfo struct {
+	// Controls how the webhook should communicate with the server.
+	// Valid values:
+	// - KubeConfigFile: use the file specified in kubeConfigFile to locate the
+	//   server.
+	// - InClusterConfig: use the in-cluster configuration to call the
+	//   SubjectAccessReview API hosted by kube-apiserver. This mode is not
+	//   allowed for kube-apiserver.
+	Type string `json:"type"`
+
+	// Path to KubeConfigFile for connection info
+	// Required, if connectionInfo.Type is KubeConfig
+	KubeConfigFile *string `json:"kubeConfigFile"`
+}
+
+type WebhookMatchCondition struct {
+	// expression represents the expression which will be evaluated by CEL. Must evaluate to bool.
+	// CEL expressions have access to the contents of the SubjectAccessReview in v1 version.
+	// If version specified by subjectAccessReviewVersion in the request variable is v1beta1,
+	// the contents would be converted to the v1 version before evaluating the CEL expression.
+	//
+	// - 'resourceAttributes' describes information for a resource access request and is unset for non-resource requests. e.g. has(request.resourceAttributes) && request.resourceAttributes.namespace == 'default'
+	// - 'nonResourceAttributes' describes information for a non-resource access request and is unset for resource requests. e.g. has(request.nonResourceAttributes) && request.nonResourceAttributes.path == '/healthz'.
+	// - 'user' is the user to test for. e.g. request.user == 'alice'
+	// - 'groups' is the groups to test for. e.g. ('group1' in request.groups)
+	// - 'extra' corresponds to the user.Info.GetExtra() method from the authenticator.
+	// - 'uid' is the information about the requesting user. e.g. request.uid == '1'
+	//
+	// Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/
+	Expression string `json:"expression"`
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go
new file mode 100644
index 0000000000..30ef049d40
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go
@@ -0,0 +1,900 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	unsafe "unsafe"
+
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	conversion "k8s.io/apimachinery/pkg/conversion"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	apiserver "k8s.io/apiserver/pkg/apis/apiserver"
+)
+
+func init() {
+	localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+	if err := s.AddGeneratedConversionFunc((*AnonymousAuthCondition)(nil), (*apiserver.AnonymousAuthCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(a.(*AnonymousAuthCondition), b.(*apiserver.AnonymousAuthCondition), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.AnonymousAuthCondition)(nil), (*AnonymousAuthCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition(a.(*apiserver.AnonymousAuthCondition), b.(*AnonymousAuthCondition), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*AnonymousAuthConfig)(nil), (*apiserver.AnonymousAuthConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(a.(*AnonymousAuthConfig), b.(*apiserver.AnonymousAuthConfig), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.AnonymousAuthConfig)(nil), (*AnonymousAuthConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig(a.(*apiserver.AnonymousAuthConfig), b.(*AnonymousAuthConfig), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*AuthenticationConfiguration)(nil), (*apiserver.AuthenticationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(a.(*AuthenticationConfiguration), b.(*apiserver.AuthenticationConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.AuthenticationConfiguration)(nil), (*AuthenticationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_AuthenticationConfiguration_To_v1beta1_AuthenticationConfiguration(a.(*apiserver.AuthenticationConfiguration), b.(*AuthenticationConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*AuthorizationConfiguration)(nil), (*apiserver.AuthorizationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(a.(*AuthorizationConfiguration), b.(*apiserver.AuthorizationConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.AuthorizationConfiguration)(nil), (*AuthorizationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_AuthorizationConfiguration_To_v1beta1_AuthorizationConfiguration(a.(*apiserver.AuthorizationConfiguration), b.(*AuthorizationConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*AuthorizerConfiguration)(nil), (*apiserver.AuthorizerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(a.(*AuthorizerConfiguration), b.(*apiserver.AuthorizerConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.AuthorizerConfiguration)(nil), (*AuthorizerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_AuthorizerConfiguration_To_v1beta1_AuthorizerConfiguration(a.(*apiserver.AuthorizerConfiguration), b.(*AuthorizerConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ClaimMappings)(nil), (*apiserver.ClaimMappings)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_ClaimMappings_To_apiserver_ClaimMappings(a.(*ClaimMappings), b.(*apiserver.ClaimMappings), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.ClaimMappings)(nil), (*ClaimMappings)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_ClaimMappings_To_v1beta1_ClaimMappings(a.(*apiserver.ClaimMappings), b.(*ClaimMappings), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ClaimOrExpression)(nil), (*apiserver.ClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_ClaimOrExpression_To_apiserver_ClaimOrExpression(a.(*ClaimOrExpression), b.(*apiserver.ClaimOrExpression), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.ClaimOrExpression)(nil), (*ClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_ClaimOrExpression_To_v1beta1_ClaimOrExpression(a.(*apiserver.ClaimOrExpression), b.(*ClaimOrExpression), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ClaimValidationRule)(nil), (*apiserver.ClaimValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_ClaimValidationRule_To_apiserver_ClaimValidationRule(a.(*ClaimValidationRule), b.(*apiserver.ClaimValidationRule), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.ClaimValidationRule)(nil), (*ClaimValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_ClaimValidationRule_To_v1beta1_ClaimValidationRule(a.(*apiserver.ClaimValidationRule), b.(*ClaimValidationRule), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*Connection)(nil), (*apiserver.Connection)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_Connection_To_apiserver_Connection(a.(*Connection), b.(*apiserver.Connection), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.Connection)(nil), (*Connection)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_Connection_To_v1beta1_Connection(a.(*apiserver.Connection), b.(*Connection), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.EgressSelection)(nil), (*EgressSelection)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_EgressSelection_To_v1beta1_EgressSelection(a.(*apiserver.EgressSelection), b.(*EgressSelection), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*EgressSelectorConfiguration)(nil), (*apiserver.EgressSelectorConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(a.(*EgressSelectorConfiguration), b.(*apiserver.EgressSelectorConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.EgressSelectorConfiguration)(nil), (*EgressSelectorConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_EgressSelectorConfiguration_To_v1beta1_EgressSelectorConfiguration(a.(*apiserver.EgressSelectorConfiguration), b.(*EgressSelectorConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ExtraMapping)(nil), (*apiserver.ExtraMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_ExtraMapping_To_apiserver_ExtraMapping(a.(*ExtraMapping), b.(*apiserver.ExtraMapping), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.ExtraMapping)(nil), (*ExtraMapping)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_ExtraMapping_To_v1beta1_ExtraMapping(a.(*apiserver.ExtraMapping), b.(*ExtraMapping), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*Issuer)(nil), (*apiserver.Issuer)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_Issuer_To_apiserver_Issuer(a.(*Issuer), b.(*apiserver.Issuer), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.Issuer)(nil), (*Issuer)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_Issuer_To_v1beta1_Issuer(a.(*apiserver.Issuer), b.(*Issuer), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*JWTAuthenticator)(nil), (*apiserver.JWTAuthenticator)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_JWTAuthenticator_To_apiserver_JWTAuthenticator(a.(*JWTAuthenticator), b.(*apiserver.JWTAuthenticator), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.JWTAuthenticator)(nil), (*JWTAuthenticator)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_JWTAuthenticator_To_v1beta1_JWTAuthenticator(a.(*apiserver.JWTAuthenticator), b.(*JWTAuthenticator), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*PrefixedClaimOrExpression)(nil), (*apiserver.PrefixedClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(a.(*PrefixedClaimOrExpression), b.(*apiserver.PrefixedClaimOrExpression), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.PrefixedClaimOrExpression)(nil), (*PrefixedClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_PrefixedClaimOrExpression_To_v1beta1_PrefixedClaimOrExpression(a.(*apiserver.PrefixedClaimOrExpression), b.(*PrefixedClaimOrExpression), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*TCPTransport)(nil), (*apiserver.TCPTransport)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_TCPTransport_To_apiserver_TCPTransport(a.(*TCPTransport), b.(*apiserver.TCPTransport), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.TCPTransport)(nil), (*TCPTransport)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_TCPTransport_To_v1beta1_TCPTransport(a.(*apiserver.TCPTransport), b.(*TCPTransport), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*TLSConfig)(nil), (*apiserver.TLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_TLSConfig_To_apiserver_TLSConfig(a.(*TLSConfig), b.(*apiserver.TLSConfig), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.TLSConfig)(nil), (*TLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_TLSConfig_To_v1beta1_TLSConfig(a.(*apiserver.TLSConfig), b.(*TLSConfig), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*TracingConfiguration)(nil), (*apiserver.TracingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_TracingConfiguration_To_apiserver_TracingConfiguration(a.(*TracingConfiguration), b.(*apiserver.TracingConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.TracingConfiguration)(nil), (*TracingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_TracingConfiguration_To_v1beta1_TracingConfiguration(a.(*apiserver.TracingConfiguration), b.(*TracingConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*Transport)(nil), (*apiserver.Transport)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_Transport_To_apiserver_Transport(a.(*Transport), b.(*apiserver.Transport), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.Transport)(nil), (*Transport)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_Transport_To_v1beta1_Transport(a.(*apiserver.Transport), b.(*Transport), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*UDSTransport)(nil), (*apiserver.UDSTransport)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_UDSTransport_To_apiserver_UDSTransport(a.(*UDSTransport), b.(*apiserver.UDSTransport), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.UDSTransport)(nil), (*UDSTransport)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_UDSTransport_To_v1beta1_UDSTransport(a.(*apiserver.UDSTransport), b.(*UDSTransport), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*UserValidationRule)(nil), (*apiserver.UserValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_UserValidationRule_To_apiserver_UserValidationRule(a.(*UserValidationRule), b.(*apiserver.UserValidationRule), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.UserValidationRule)(nil), (*UserValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_UserValidationRule_To_v1beta1_UserValidationRule(a.(*apiserver.UserValidationRule), b.(*UserValidationRule), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*WebhookConfiguration)(nil), (*apiserver.WebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_WebhookConfiguration_To_apiserver_WebhookConfiguration(a.(*WebhookConfiguration), b.(*apiserver.WebhookConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.WebhookConfiguration)(nil), (*WebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_WebhookConfiguration_To_v1beta1_WebhookConfiguration(a.(*apiserver.WebhookConfiguration), b.(*WebhookConfiguration), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*WebhookConnectionInfo)(nil), (*apiserver.WebhookConnectionInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(a.(*WebhookConnectionInfo), b.(*apiserver.WebhookConnectionInfo), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.WebhookConnectionInfo)(nil), (*WebhookConnectionInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_WebhookConnectionInfo_To_v1beta1_WebhookConnectionInfo(a.(*apiserver.WebhookConnectionInfo), b.(*WebhookConnectionInfo), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*WebhookMatchCondition)(nil), (*apiserver.WebhookMatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(a.(*WebhookMatchCondition), b.(*apiserver.WebhookMatchCondition), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.WebhookMatchCondition)(nil), (*WebhookMatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_WebhookMatchCondition_To_v1beta1_WebhookMatchCondition(a.(*apiserver.WebhookMatchCondition), b.(*WebhookMatchCondition), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*EgressSelection)(nil), (*apiserver.EgressSelection)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_EgressSelection_To_apiserver_EgressSelection(a.(*EgressSelection), b.(*apiserver.EgressSelection), scope)
+	}); err != nil {
+		return err
+	}
+	return nil
+}
+
+func autoConvert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in *AnonymousAuthCondition, out *apiserver.AnonymousAuthCondition, s conversion.Scope) error {
+	out.Path = in.Path
+	return nil
+}
+
+// Convert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition is an autogenerated conversion function.
+func Convert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in *AnonymousAuthCondition, out *apiserver.AnonymousAuthCondition, s conversion.Scope) error {
+	return autoConvert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in, out, s)
+}
+
+func autoConvert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition(in *apiserver.AnonymousAuthCondition, out *AnonymousAuthCondition, s conversion.Scope) error {
+	out.Path = in.Path
+	return nil
+}
+
+// Convert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition is an autogenerated conversion function.
+func Convert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition(in *apiserver.AnonymousAuthCondition, out *AnonymousAuthCondition, s conversion.Scope) error {
+	return autoConvert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition(in, out, s)
+}
+
+func autoConvert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in *AnonymousAuthConfig, out *apiserver.AnonymousAuthConfig, s conversion.Scope) error {
+	out.Enabled = in.Enabled
+	out.Conditions = *(*[]apiserver.AnonymousAuthCondition)(unsafe.Pointer(&in.Conditions))
+	return nil
+}
+
+// Convert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig is an autogenerated conversion function.
+func Convert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in *AnonymousAuthConfig, out *apiserver.AnonymousAuthConfig, s conversion.Scope) error {
+	return autoConvert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in, out, s)
+}
+
+func autoConvert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig(in *apiserver.AnonymousAuthConfig, out *AnonymousAuthConfig, s conversion.Scope) error {
+	out.Enabled = in.Enabled
+	out.Conditions = *(*[]AnonymousAuthCondition)(unsafe.Pointer(&in.Conditions))
+	return nil
+}
+
+// Convert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig is an autogenerated conversion function.
+func Convert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig(in *apiserver.AnonymousAuthConfig, out *AnonymousAuthConfig, s conversion.Scope) error {
+	return autoConvert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig(in, out, s)
+}
+
+func autoConvert_v1beta1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in *AuthenticationConfiguration, out *apiserver.AuthenticationConfiguration, s conversion.Scope) error {
+	if in.JWT != nil {
+		in, out := &in.JWT, &out.JWT
+		*out = make([]apiserver.JWTAuthenticator, len(*in))
+		for i := range *in {
+			if err := Convert_v1beta1_JWTAuthenticator_To_apiserver_JWTAuthenticator(&(*in)[i], &(*out)[i], s); err != nil {
+				return err
+			}
+		}
+	} else {
+		out.JWT = nil
+	}
+	out.Anonymous = (*apiserver.AnonymousAuthConfig)(unsafe.Pointer(in.Anonymous))
+	return nil
+}
+
+// Convert_v1beta1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration is an autogenerated conversion function.
+func Convert_v1beta1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in *AuthenticationConfiguration, out *apiserver.AuthenticationConfiguration, s conversion.Scope) error {
+	return autoConvert_v1beta1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_AuthenticationConfiguration_To_v1beta1_AuthenticationConfiguration(in *apiserver.AuthenticationConfiguration, out *AuthenticationConfiguration, s conversion.Scope) error {
+	if in.JWT != nil {
+		in, out := &in.JWT, &out.JWT
+		*out = make([]JWTAuthenticator, len(*in))
+		for i := range *in {
+			if err := Convert_apiserver_JWTAuthenticator_To_v1beta1_JWTAuthenticator(&(*in)[i], &(*out)[i], s); err != nil {
+				return err
+			}
+		}
+	} else {
+		out.JWT = nil
+	}
+	out.Anonymous = (*AnonymousAuthConfig)(unsafe.Pointer(in.Anonymous))
+	return nil
+}
+
+// Convert_apiserver_AuthenticationConfiguration_To_v1beta1_AuthenticationConfiguration is an autogenerated conversion function.
+func Convert_apiserver_AuthenticationConfiguration_To_v1beta1_AuthenticationConfiguration(in *apiserver.AuthenticationConfiguration, out *AuthenticationConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_AuthenticationConfiguration_To_v1beta1_AuthenticationConfiguration(in, out, s)
+}
+
+func autoConvert_v1beta1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in *AuthorizationConfiguration, out *apiserver.AuthorizationConfiguration, s conversion.Scope) error {
+	out.Authorizers = *(*[]apiserver.AuthorizerConfiguration)(unsafe.Pointer(&in.Authorizers))
+	return nil
+}
+
+// Convert_v1beta1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration is an autogenerated conversion function.
+func Convert_v1beta1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in *AuthorizationConfiguration, out *apiserver.AuthorizationConfiguration, s conversion.Scope) error {
+	return autoConvert_v1beta1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_AuthorizationConfiguration_To_v1beta1_AuthorizationConfiguration(in *apiserver.AuthorizationConfiguration, out *AuthorizationConfiguration, s conversion.Scope) error {
+	out.Authorizers = *(*[]AuthorizerConfiguration)(unsafe.Pointer(&in.Authorizers))
+	return nil
+}
+
+// Convert_apiserver_AuthorizationConfiguration_To_v1beta1_AuthorizationConfiguration is an autogenerated conversion function.
+func Convert_apiserver_AuthorizationConfiguration_To_v1beta1_AuthorizationConfiguration(in *apiserver.AuthorizationConfiguration, out *AuthorizationConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_AuthorizationConfiguration_To_v1beta1_AuthorizationConfiguration(in, out, s)
+}
+
+func autoConvert_v1beta1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in *AuthorizerConfiguration, out *apiserver.AuthorizerConfiguration, s conversion.Scope) error {
+	out.Type = apiserver.AuthorizerType(in.Type)
+	out.Name = in.Name
+	out.Webhook = (*apiserver.WebhookConfiguration)(unsafe.Pointer(in.Webhook))
+	return nil
+}
+
+// Convert_v1beta1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration is an autogenerated conversion function.
+func Convert_v1beta1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in *AuthorizerConfiguration, out *apiserver.AuthorizerConfiguration, s conversion.Scope) error {
+	return autoConvert_v1beta1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_AuthorizerConfiguration_To_v1beta1_AuthorizerConfiguration(in *apiserver.AuthorizerConfiguration, out *AuthorizerConfiguration, s conversion.Scope) error {
+	out.Type = string(in.Type)
+	out.Name = in.Name
+	out.Webhook = (*WebhookConfiguration)(unsafe.Pointer(in.Webhook))
+	return nil
+}
+
+// Convert_apiserver_AuthorizerConfiguration_To_v1beta1_AuthorizerConfiguration is an autogenerated conversion function.
+func Convert_apiserver_AuthorizerConfiguration_To_v1beta1_AuthorizerConfiguration(in *apiserver.AuthorizerConfiguration, out *AuthorizerConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_AuthorizerConfiguration_To_v1beta1_AuthorizerConfiguration(in, out, s)
+}
+
+func autoConvert_v1beta1_ClaimMappings_To_apiserver_ClaimMappings(in *ClaimMappings, out *apiserver.ClaimMappings, s conversion.Scope) error {
+	if err := Convert_v1beta1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(&in.Username, &out.Username, s); err != nil {
+		return err
+	}
+	if err := Convert_v1beta1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(&in.Groups, &out.Groups, s); err != nil {
+		return err
+	}
+	if err := Convert_v1beta1_ClaimOrExpression_To_apiserver_ClaimOrExpression(&in.UID, &out.UID, s); err != nil {
+		return err
+	}
+	out.Extra = *(*[]apiserver.ExtraMapping)(unsafe.Pointer(&in.Extra))
+	return nil
+}
+
+// Convert_v1beta1_ClaimMappings_To_apiserver_ClaimMappings is an autogenerated conversion function.
+func Convert_v1beta1_ClaimMappings_To_apiserver_ClaimMappings(in *ClaimMappings, out *apiserver.ClaimMappings, s conversion.Scope) error {
+	return autoConvert_v1beta1_ClaimMappings_To_apiserver_ClaimMappings(in, out, s)
+}
+
+func autoConvert_apiserver_ClaimMappings_To_v1beta1_ClaimMappings(in *apiserver.ClaimMappings, out *ClaimMappings, s conversion.Scope) error {
+	if err := Convert_apiserver_PrefixedClaimOrExpression_To_v1beta1_PrefixedClaimOrExpression(&in.Username, &out.Username, s); err != nil {
+		return err
+	}
+	if err := Convert_apiserver_PrefixedClaimOrExpression_To_v1beta1_PrefixedClaimOrExpression(&in.Groups, &out.Groups, s); err != nil {
+		return err
+	}
+	if err := Convert_apiserver_ClaimOrExpression_To_v1beta1_ClaimOrExpression(&in.UID, &out.UID, s); err != nil {
+		return err
+	}
+	out.Extra = *(*[]ExtraMapping)(unsafe.Pointer(&in.Extra))
+	return nil
+}
+
+// Convert_apiserver_ClaimMappings_To_v1beta1_ClaimMappings is an autogenerated conversion function.
+func Convert_apiserver_ClaimMappings_To_v1beta1_ClaimMappings(in *apiserver.ClaimMappings, out *ClaimMappings, s conversion.Scope) error {
+	return autoConvert_apiserver_ClaimMappings_To_v1beta1_ClaimMappings(in, out, s)
+}
+
+func autoConvert_v1beta1_ClaimOrExpression_To_apiserver_ClaimOrExpression(in *ClaimOrExpression, out *apiserver.ClaimOrExpression, s conversion.Scope) error {
+	out.Claim = in.Claim
+	out.Expression = in.Expression
+	return nil
+}
+
+// Convert_v1beta1_ClaimOrExpression_To_apiserver_ClaimOrExpression is an autogenerated conversion function.
+func Convert_v1beta1_ClaimOrExpression_To_apiserver_ClaimOrExpression(in *ClaimOrExpression, out *apiserver.ClaimOrExpression, s conversion.Scope) error {
+	return autoConvert_v1beta1_ClaimOrExpression_To_apiserver_ClaimOrExpression(in, out, s)
+}
+
+func autoConvert_apiserver_ClaimOrExpression_To_v1beta1_ClaimOrExpression(in *apiserver.ClaimOrExpression, out *ClaimOrExpression, s conversion.Scope) error {
+	out.Claim = in.Claim
+	out.Expression = in.Expression
+	return nil
+}
+
+// Convert_apiserver_ClaimOrExpression_To_v1beta1_ClaimOrExpression is an autogenerated conversion function.
+func Convert_apiserver_ClaimOrExpression_To_v1beta1_ClaimOrExpression(in *apiserver.ClaimOrExpression, out *ClaimOrExpression, s conversion.Scope) error {
+	return autoConvert_apiserver_ClaimOrExpression_To_v1beta1_ClaimOrExpression(in, out, s)
+}
+
+func autoConvert_v1beta1_ClaimValidationRule_To_apiserver_ClaimValidationRule(in *ClaimValidationRule, out *apiserver.ClaimValidationRule, s conversion.Scope) error {
+	out.Claim = in.Claim
+	out.RequiredValue = in.RequiredValue
+	out.Expression = in.Expression
+	out.Message = in.Message
+	return nil
+}
+
+// Convert_v1beta1_ClaimValidationRule_To_apiserver_ClaimValidationRule is an autogenerated conversion function.
+func Convert_v1beta1_ClaimValidationRule_To_apiserver_ClaimValidationRule(in *ClaimValidationRule, out *apiserver.ClaimValidationRule, s conversion.Scope) error {
+	return autoConvert_v1beta1_ClaimValidationRule_To_apiserver_ClaimValidationRule(in, out, s)
+}
+
+func autoConvert_apiserver_ClaimValidationRule_To_v1beta1_ClaimValidationRule(in *apiserver.ClaimValidationRule, out *ClaimValidationRule, s conversion.Scope) error {
+	out.Claim = in.Claim
+	out.RequiredValue = in.RequiredValue
+	out.Expression = in.Expression
+	out.Message = in.Message
+	return nil
+}
+
+// Convert_apiserver_ClaimValidationRule_To_v1beta1_ClaimValidationRule is an autogenerated conversion function.
+func Convert_apiserver_ClaimValidationRule_To_v1beta1_ClaimValidationRule(in *apiserver.ClaimValidationRule, out *ClaimValidationRule, s conversion.Scope) error {
+	return autoConvert_apiserver_ClaimValidationRule_To_v1beta1_ClaimValidationRule(in, out, s)
+}
+
+func autoConvert_v1beta1_Connection_To_apiserver_Connection(in *Connection, out *apiserver.Connection, s conversion.Scope) error {
+	out.ProxyProtocol = apiserver.ProtocolType(in.ProxyProtocol)
+	out.Transport = (*apiserver.Transport)(unsafe.Pointer(in.Transport))
+	return nil
+}
+
+// Convert_v1beta1_Connection_To_apiserver_Connection is an autogenerated conversion function.
+func Convert_v1beta1_Connection_To_apiserver_Connection(in *Connection, out *apiserver.Connection, s conversion.Scope) error {
+	return autoConvert_v1beta1_Connection_To_apiserver_Connection(in, out, s)
+}
+
+func autoConvert_apiserver_Connection_To_v1beta1_Connection(in *apiserver.Connection, out *Connection, s conversion.Scope) error {
+	out.ProxyProtocol = ProtocolType(in.ProxyProtocol)
+	out.Transport = (*Transport)(unsafe.Pointer(in.Transport))
+	return nil
+}
+
+// Convert_apiserver_Connection_To_v1beta1_Connection is an autogenerated conversion function.
+func Convert_apiserver_Connection_To_v1beta1_Connection(in *apiserver.Connection, out *Connection, s conversion.Scope) error {
+	return autoConvert_apiserver_Connection_To_v1beta1_Connection(in, out, s)
+}
+
+func autoConvert_v1beta1_EgressSelection_To_apiserver_EgressSelection(in *EgressSelection, out *apiserver.EgressSelection, s conversion.Scope) error {
+	out.Name = in.Name
+	if err := Convert_v1beta1_Connection_To_apiserver_Connection(&in.Connection, &out.Connection, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+func autoConvert_apiserver_EgressSelection_To_v1beta1_EgressSelection(in *apiserver.EgressSelection, out *EgressSelection, s conversion.Scope) error {
+	out.Name = in.Name
+	if err := Convert_apiserver_Connection_To_v1beta1_Connection(&in.Connection, &out.Connection, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_apiserver_EgressSelection_To_v1beta1_EgressSelection is an autogenerated conversion function.
+func Convert_apiserver_EgressSelection_To_v1beta1_EgressSelection(in *apiserver.EgressSelection, out *EgressSelection, s conversion.Scope) error {
+	return autoConvert_apiserver_EgressSelection_To_v1beta1_EgressSelection(in, out, s)
+}
+
+func autoConvert_v1beta1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(in *EgressSelectorConfiguration, out *apiserver.EgressSelectorConfiguration, s conversion.Scope) error {
+	if in.EgressSelections != nil {
+		in, out := &in.EgressSelections, &out.EgressSelections
+		*out = make([]apiserver.EgressSelection, len(*in))
+		for i := range *in {
+			if err := Convert_v1beta1_EgressSelection_To_apiserver_EgressSelection(&(*in)[i], &(*out)[i], s); err != nil {
+				return err
+			}
+		}
+	} else {
+		out.EgressSelections = nil
+	}
+	return nil
+}
+
+// Convert_v1beta1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration is an autogenerated conversion function.
+func Convert_v1beta1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(in *EgressSelectorConfiguration, out *apiserver.EgressSelectorConfiguration, s conversion.Scope) error {
+	return autoConvert_v1beta1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_EgressSelectorConfiguration_To_v1beta1_EgressSelectorConfiguration(in *apiserver.EgressSelectorConfiguration, out *EgressSelectorConfiguration, s conversion.Scope) error {
+	if in.EgressSelections != nil {
+		in, out := &in.EgressSelections, &out.EgressSelections
+		*out = make([]EgressSelection, len(*in))
+		for i := range *in {
+			if err := Convert_apiserver_EgressSelection_To_v1beta1_EgressSelection(&(*in)[i], &(*out)[i], s); err != nil {
+				return err
+			}
+		}
+	} else {
+		out.EgressSelections = nil
+	}
+	return nil
+}
+
+// Convert_apiserver_EgressSelectorConfiguration_To_v1beta1_EgressSelectorConfiguration is an autogenerated conversion function.
+func Convert_apiserver_EgressSelectorConfiguration_To_v1beta1_EgressSelectorConfiguration(in *apiserver.EgressSelectorConfiguration, out *EgressSelectorConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_EgressSelectorConfiguration_To_v1beta1_EgressSelectorConfiguration(in, out, s)
+}
+
+func autoConvert_v1beta1_ExtraMapping_To_apiserver_ExtraMapping(in *ExtraMapping, out *apiserver.ExtraMapping, s conversion.Scope) error {
+	out.Key = in.Key
+	out.ValueExpression = in.ValueExpression
+	return nil
+}
+
+// Convert_v1beta1_ExtraMapping_To_apiserver_ExtraMapping is an autogenerated conversion function.
+func Convert_v1beta1_ExtraMapping_To_apiserver_ExtraMapping(in *ExtraMapping, out *apiserver.ExtraMapping, s conversion.Scope) error {
+	return autoConvert_v1beta1_ExtraMapping_To_apiserver_ExtraMapping(in, out, s)
+}
+
+func autoConvert_apiserver_ExtraMapping_To_v1beta1_ExtraMapping(in *apiserver.ExtraMapping, out *ExtraMapping, s conversion.Scope) error {
+	out.Key = in.Key
+	out.ValueExpression = in.ValueExpression
+	return nil
+}
+
+// Convert_apiserver_ExtraMapping_To_v1beta1_ExtraMapping is an autogenerated conversion function.
+func Convert_apiserver_ExtraMapping_To_v1beta1_ExtraMapping(in *apiserver.ExtraMapping, out *ExtraMapping, s conversion.Scope) error {
+	return autoConvert_apiserver_ExtraMapping_To_v1beta1_ExtraMapping(in, out, s)
+}
+
+func autoConvert_v1beta1_Issuer_To_apiserver_Issuer(in *Issuer, out *apiserver.Issuer, s conversion.Scope) error {
+	out.URL = in.URL
+	if err := v1.Convert_Pointer_string_To_string(&in.DiscoveryURL, &out.DiscoveryURL, s); err != nil {
+		return err
+	}
+	out.CertificateAuthority = in.CertificateAuthority
+	out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences))
+	out.AudienceMatchPolicy = apiserver.AudienceMatchPolicyType(in.AudienceMatchPolicy)
+	return nil
+}
+
+// Convert_v1beta1_Issuer_To_apiserver_Issuer is an autogenerated conversion function.
+func Convert_v1beta1_Issuer_To_apiserver_Issuer(in *Issuer, out *apiserver.Issuer, s conversion.Scope) error {
+	return autoConvert_v1beta1_Issuer_To_apiserver_Issuer(in, out, s)
+}
+
+func autoConvert_apiserver_Issuer_To_v1beta1_Issuer(in *apiserver.Issuer, out *Issuer, s conversion.Scope) error {
+	out.URL = in.URL
+	if err := v1.Convert_string_To_Pointer_string(&in.DiscoveryURL, &out.DiscoveryURL, s); err != nil {
+		return err
+	}
+	out.CertificateAuthority = in.CertificateAuthority
+	out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences))
+	out.AudienceMatchPolicy = AudienceMatchPolicyType(in.AudienceMatchPolicy)
+	return nil
+}
+
+// Convert_apiserver_Issuer_To_v1beta1_Issuer is an autogenerated conversion function.
+func Convert_apiserver_Issuer_To_v1beta1_Issuer(in *apiserver.Issuer, out *Issuer, s conversion.Scope) error {
+	return autoConvert_apiserver_Issuer_To_v1beta1_Issuer(in, out, s)
+}
+
+func autoConvert_v1beta1_JWTAuthenticator_To_apiserver_JWTAuthenticator(in *JWTAuthenticator, out *apiserver.JWTAuthenticator, s conversion.Scope) error {
+	if err := Convert_v1beta1_Issuer_To_apiserver_Issuer(&in.Issuer, &out.Issuer, s); err != nil {
+		return err
+	}
+	out.ClaimValidationRules = *(*[]apiserver.ClaimValidationRule)(unsafe.Pointer(&in.ClaimValidationRules))
+	if err := Convert_v1beta1_ClaimMappings_To_apiserver_ClaimMappings(&in.ClaimMappings, &out.ClaimMappings, s); err != nil {
+		return err
+	}
+	out.UserValidationRules = *(*[]apiserver.UserValidationRule)(unsafe.Pointer(&in.UserValidationRules))
+	return nil
+}
+
+// Convert_v1beta1_JWTAuthenticator_To_apiserver_JWTAuthenticator is an autogenerated conversion function.
+func Convert_v1beta1_JWTAuthenticator_To_apiserver_JWTAuthenticator(in *JWTAuthenticator, out *apiserver.JWTAuthenticator, s conversion.Scope) error {
+	return autoConvert_v1beta1_JWTAuthenticator_To_apiserver_JWTAuthenticator(in, out, s)
+}
+
+func autoConvert_apiserver_JWTAuthenticator_To_v1beta1_JWTAuthenticator(in *apiserver.JWTAuthenticator, out *JWTAuthenticator, s conversion.Scope) error {
+	if err := Convert_apiserver_Issuer_To_v1beta1_Issuer(&in.Issuer, &out.Issuer, s); err != nil {
+		return err
+	}
+	out.ClaimValidationRules = *(*[]ClaimValidationRule)(unsafe.Pointer(&in.ClaimValidationRules))
+	if err := Convert_apiserver_ClaimMappings_To_v1beta1_ClaimMappings(&in.ClaimMappings, &out.ClaimMappings, s); err != nil {
+		return err
+	}
+	out.UserValidationRules = *(*[]UserValidationRule)(unsafe.Pointer(&in.UserValidationRules))
+	return nil
+}
+
+// Convert_apiserver_JWTAuthenticator_To_v1beta1_JWTAuthenticator is an autogenerated conversion function.
+func Convert_apiserver_JWTAuthenticator_To_v1beta1_JWTAuthenticator(in *apiserver.JWTAuthenticator, out *JWTAuthenticator, s conversion.Scope) error {
+	return autoConvert_apiserver_JWTAuthenticator_To_v1beta1_JWTAuthenticator(in, out, s)
+}
+
+func autoConvert_v1beta1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(in *PrefixedClaimOrExpression, out *apiserver.PrefixedClaimOrExpression, s conversion.Scope) error {
+	out.Claim = in.Claim
+	out.Prefix = (*string)(unsafe.Pointer(in.Prefix))
+	out.Expression = in.Expression
+	return nil
+}
+
+// Convert_v1beta1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression is an autogenerated conversion function.
+func Convert_v1beta1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(in *PrefixedClaimOrExpression, out *apiserver.PrefixedClaimOrExpression, s conversion.Scope) error {
+	return autoConvert_v1beta1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(in, out, s)
+}
+
+func autoConvert_apiserver_PrefixedClaimOrExpression_To_v1beta1_PrefixedClaimOrExpression(in *apiserver.PrefixedClaimOrExpression, out *PrefixedClaimOrExpression, s conversion.Scope) error {
+	out.Claim = in.Claim
+	out.Prefix = (*string)(unsafe.Pointer(in.Prefix))
+	out.Expression = in.Expression
+	return nil
+}
+
+// Convert_apiserver_PrefixedClaimOrExpression_To_v1beta1_PrefixedClaimOrExpression is an autogenerated conversion function.
+func Convert_apiserver_PrefixedClaimOrExpression_To_v1beta1_PrefixedClaimOrExpression(in *apiserver.PrefixedClaimOrExpression, out *PrefixedClaimOrExpression, s conversion.Scope) error {
+	return autoConvert_apiserver_PrefixedClaimOrExpression_To_v1beta1_PrefixedClaimOrExpression(in, out, s)
+}
+
+func autoConvert_v1beta1_TCPTransport_To_apiserver_TCPTransport(in *TCPTransport, out *apiserver.TCPTransport, s conversion.Scope) error {
+	out.URL = in.URL
+	out.TLSConfig = (*apiserver.TLSConfig)(unsafe.Pointer(in.TLSConfig))
+	return nil
+}
+
+// Convert_v1beta1_TCPTransport_To_apiserver_TCPTransport is an autogenerated conversion function.
+func Convert_v1beta1_TCPTransport_To_apiserver_TCPTransport(in *TCPTransport, out *apiserver.TCPTransport, s conversion.Scope) error {
+	return autoConvert_v1beta1_TCPTransport_To_apiserver_TCPTransport(in, out, s)
+}
+
+func autoConvert_apiserver_TCPTransport_To_v1beta1_TCPTransport(in *apiserver.TCPTransport, out *TCPTransport, s conversion.Scope) error {
+	out.URL = in.URL
+	out.TLSConfig = (*TLSConfig)(unsafe.Pointer(in.TLSConfig))
+	return nil
+}
+
+// Convert_apiserver_TCPTransport_To_v1beta1_TCPTransport is an autogenerated conversion function.
+func Convert_apiserver_TCPTransport_To_v1beta1_TCPTransport(in *apiserver.TCPTransport, out *TCPTransport, s conversion.Scope) error {
+	return autoConvert_apiserver_TCPTransport_To_v1beta1_TCPTransport(in, out, s)
+}
+
+func autoConvert_v1beta1_TLSConfig_To_apiserver_TLSConfig(in *TLSConfig, out *apiserver.TLSConfig, s conversion.Scope) error {
+	out.CABundle = in.CABundle
+	out.ClientKey = in.ClientKey
+	out.ClientCert = in.ClientCert
+	return nil
+}
+
+// Convert_v1beta1_TLSConfig_To_apiserver_TLSConfig is an autogenerated conversion function.
+func Convert_v1beta1_TLSConfig_To_apiserver_TLSConfig(in *TLSConfig, out *apiserver.TLSConfig, s conversion.Scope) error {
+	return autoConvert_v1beta1_TLSConfig_To_apiserver_TLSConfig(in, out, s)
+}
+
+func autoConvert_apiserver_TLSConfig_To_v1beta1_TLSConfig(in *apiserver.TLSConfig, out *TLSConfig, s conversion.Scope) error {
+	out.CABundle = in.CABundle
+	out.ClientKey = in.ClientKey
+	out.ClientCert = in.ClientCert
+	return nil
+}
+
+// Convert_apiserver_TLSConfig_To_v1beta1_TLSConfig is an autogenerated conversion function.
+func Convert_apiserver_TLSConfig_To_v1beta1_TLSConfig(in *apiserver.TLSConfig, out *TLSConfig, s conversion.Scope) error {
+	return autoConvert_apiserver_TLSConfig_To_v1beta1_TLSConfig(in, out, s)
+}
+
+func autoConvert_v1beta1_TracingConfiguration_To_apiserver_TracingConfiguration(in *TracingConfiguration, out *apiserver.TracingConfiguration, s conversion.Scope) error {
+	out.TracingConfiguration = in.TracingConfiguration
+	return nil
+}
+
+// Convert_v1beta1_TracingConfiguration_To_apiserver_TracingConfiguration is an autogenerated conversion function.
+func Convert_v1beta1_TracingConfiguration_To_apiserver_TracingConfiguration(in *TracingConfiguration, out *apiserver.TracingConfiguration, s conversion.Scope) error {
+	return autoConvert_v1beta1_TracingConfiguration_To_apiserver_TracingConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_TracingConfiguration_To_v1beta1_TracingConfiguration(in *apiserver.TracingConfiguration, out *TracingConfiguration, s conversion.Scope) error {
+	out.TracingConfiguration = in.TracingConfiguration
+	return nil
+}
+
+// Convert_apiserver_TracingConfiguration_To_v1beta1_TracingConfiguration is an autogenerated conversion function.
+func Convert_apiserver_TracingConfiguration_To_v1beta1_TracingConfiguration(in *apiserver.TracingConfiguration, out *TracingConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_TracingConfiguration_To_v1beta1_TracingConfiguration(in, out, s)
+}
+
+func autoConvert_v1beta1_Transport_To_apiserver_Transport(in *Transport, out *apiserver.Transport, s conversion.Scope) error {
+	out.TCP = (*apiserver.TCPTransport)(unsafe.Pointer(in.TCP))
+	out.UDS = (*apiserver.UDSTransport)(unsafe.Pointer(in.UDS))
+	return nil
+}
+
+// Convert_v1beta1_Transport_To_apiserver_Transport is an autogenerated conversion function.
+func Convert_v1beta1_Transport_To_apiserver_Transport(in *Transport, out *apiserver.Transport, s conversion.Scope) error {
+	return autoConvert_v1beta1_Transport_To_apiserver_Transport(in, out, s)
+}
+
+func autoConvert_apiserver_Transport_To_v1beta1_Transport(in *apiserver.Transport, out *Transport, s conversion.Scope) error {
+	out.TCP = (*TCPTransport)(unsafe.Pointer(in.TCP))
+	out.UDS = (*UDSTransport)(unsafe.Pointer(in.UDS))
+	return nil
+}
+
+// Convert_apiserver_Transport_To_v1beta1_Transport is an autogenerated conversion function.
+func Convert_apiserver_Transport_To_v1beta1_Transport(in *apiserver.Transport, out *Transport, s conversion.Scope) error {
+	return autoConvert_apiserver_Transport_To_v1beta1_Transport(in, out, s)
+}
+
+func autoConvert_v1beta1_UDSTransport_To_apiserver_UDSTransport(in *UDSTransport, out *apiserver.UDSTransport, s conversion.Scope) error {
+	out.UDSName = in.UDSName
+	return nil
+}
+
+// Convert_v1beta1_UDSTransport_To_apiserver_UDSTransport is an autogenerated conversion function.
+func Convert_v1beta1_UDSTransport_To_apiserver_UDSTransport(in *UDSTransport, out *apiserver.UDSTransport, s conversion.Scope) error {
+	return autoConvert_v1beta1_UDSTransport_To_apiserver_UDSTransport(in, out, s)
+}
+
+func autoConvert_apiserver_UDSTransport_To_v1beta1_UDSTransport(in *apiserver.UDSTransport, out *UDSTransport, s conversion.Scope) error {
+	out.UDSName = in.UDSName
+	return nil
+}
+
+// Convert_apiserver_UDSTransport_To_v1beta1_UDSTransport is an autogenerated conversion function.
+func Convert_apiserver_UDSTransport_To_v1beta1_UDSTransport(in *apiserver.UDSTransport, out *UDSTransport, s conversion.Scope) error {
+	return autoConvert_apiserver_UDSTransport_To_v1beta1_UDSTransport(in, out, s)
+}
+
+func autoConvert_v1beta1_UserValidationRule_To_apiserver_UserValidationRule(in *UserValidationRule, out *apiserver.UserValidationRule, s conversion.Scope) error {
+	out.Expression = in.Expression
+	out.Message = in.Message
+	return nil
+}
+
+// Convert_v1beta1_UserValidationRule_To_apiserver_UserValidationRule is an autogenerated conversion function.
+func Convert_v1beta1_UserValidationRule_To_apiserver_UserValidationRule(in *UserValidationRule, out *apiserver.UserValidationRule, s conversion.Scope) error {
+	return autoConvert_v1beta1_UserValidationRule_To_apiserver_UserValidationRule(in, out, s)
+}
+
+func autoConvert_apiserver_UserValidationRule_To_v1beta1_UserValidationRule(in *apiserver.UserValidationRule, out *UserValidationRule, s conversion.Scope) error {
+	out.Expression = in.Expression
+	out.Message = in.Message
+	return nil
+}
+
+// Convert_apiserver_UserValidationRule_To_v1beta1_UserValidationRule is an autogenerated conversion function.
+func Convert_apiserver_UserValidationRule_To_v1beta1_UserValidationRule(in *apiserver.UserValidationRule, out *UserValidationRule, s conversion.Scope) error {
+	return autoConvert_apiserver_UserValidationRule_To_v1beta1_UserValidationRule(in, out, s)
+}
+
+func autoConvert_v1beta1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in *WebhookConfiguration, out *apiserver.WebhookConfiguration, s conversion.Scope) error {
+	out.AuthorizedTTL = in.AuthorizedTTL
+	out.UnauthorizedTTL = in.UnauthorizedTTL
+	out.Timeout = in.Timeout
+	out.SubjectAccessReviewVersion = in.SubjectAccessReviewVersion
+	out.MatchConditionSubjectAccessReviewVersion = in.MatchConditionSubjectAccessReviewVersion
+	out.FailurePolicy = in.FailurePolicy
+	if err := Convert_v1beta1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(&in.ConnectionInfo, &out.ConnectionInfo, s); err != nil {
+		return err
+	}
+	out.MatchConditions = *(*[]apiserver.WebhookMatchCondition)(unsafe.Pointer(&in.MatchConditions))
+	return nil
+}
+
+// Convert_v1beta1_WebhookConfiguration_To_apiserver_WebhookConfiguration is an autogenerated conversion function.
+func Convert_v1beta1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in *WebhookConfiguration, out *apiserver.WebhookConfiguration, s conversion.Scope) error {
+	return autoConvert_v1beta1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in, out, s)
+}
+
+func autoConvert_apiserver_WebhookConfiguration_To_v1beta1_WebhookConfiguration(in *apiserver.WebhookConfiguration, out *WebhookConfiguration, s conversion.Scope) error {
+	out.AuthorizedTTL = in.AuthorizedTTL
+	out.UnauthorizedTTL = in.UnauthorizedTTL
+	out.Timeout = in.Timeout
+	out.SubjectAccessReviewVersion = in.SubjectAccessReviewVersion
+	out.MatchConditionSubjectAccessReviewVersion = in.MatchConditionSubjectAccessReviewVersion
+	out.FailurePolicy = in.FailurePolicy
+	if err := Convert_apiserver_WebhookConnectionInfo_To_v1beta1_WebhookConnectionInfo(&in.ConnectionInfo, &out.ConnectionInfo, s); err != nil {
+		return err
+	}
+	out.MatchConditions = *(*[]WebhookMatchCondition)(unsafe.Pointer(&in.MatchConditions))
+	return nil
+}
+
+// Convert_apiserver_WebhookConfiguration_To_v1beta1_WebhookConfiguration is an autogenerated conversion function.
+func Convert_apiserver_WebhookConfiguration_To_v1beta1_WebhookConfiguration(in *apiserver.WebhookConfiguration, out *WebhookConfiguration, s conversion.Scope) error {
+	return autoConvert_apiserver_WebhookConfiguration_To_v1beta1_WebhookConfiguration(in, out, s)
+}
+
+func autoConvert_v1beta1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in *WebhookConnectionInfo, out *apiserver.WebhookConnectionInfo, s conversion.Scope) error {
+	out.Type = in.Type
+	out.KubeConfigFile = (*string)(unsafe.Pointer(in.KubeConfigFile))
+	return nil
+}
+
+// Convert_v1beta1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo is an autogenerated conversion function.
+func Convert_v1beta1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in *WebhookConnectionInfo, out *apiserver.WebhookConnectionInfo, s conversion.Scope) error {
+	return autoConvert_v1beta1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in, out, s)
+}
+
+func autoConvert_apiserver_WebhookConnectionInfo_To_v1beta1_WebhookConnectionInfo(in *apiserver.WebhookConnectionInfo, out *WebhookConnectionInfo, s conversion.Scope) error {
+	out.Type = in.Type
+	out.KubeConfigFile = (*string)(unsafe.Pointer(in.KubeConfigFile))
+	return nil
+}
+
+// Convert_apiserver_WebhookConnectionInfo_To_v1beta1_WebhookConnectionInfo is an autogenerated conversion function.
+func Convert_apiserver_WebhookConnectionInfo_To_v1beta1_WebhookConnectionInfo(in *apiserver.WebhookConnectionInfo, out *WebhookConnectionInfo, s conversion.Scope) error {
+	return autoConvert_apiserver_WebhookConnectionInfo_To_v1beta1_WebhookConnectionInfo(in, out, s)
+}
+
+func autoConvert_v1beta1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in *WebhookMatchCondition, out *apiserver.WebhookMatchCondition, s conversion.Scope) error {
+	out.Expression = in.Expression
+	return nil
+}
+
+// Convert_v1beta1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition is an autogenerated conversion function.
+func Convert_v1beta1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in *WebhookMatchCondition, out *apiserver.WebhookMatchCondition, s conversion.Scope) error {
+	return autoConvert_v1beta1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in, out, s)
+}
+
+func autoConvert_apiserver_WebhookMatchCondition_To_v1beta1_WebhookMatchCondition(in *apiserver.WebhookMatchCondition, out *WebhookMatchCondition, s conversion.Scope) error {
+	out.Expression = in.Expression
+	return nil
+}
+
+// Convert_apiserver_WebhookMatchCondition_To_v1beta1_WebhookMatchCondition is an autogenerated conversion function.
+func Convert_apiserver_WebhookMatchCondition_To_v1beta1_WebhookMatchCondition(in *apiserver.WebhookMatchCondition, out *WebhookMatchCondition, s conversion.Scope) error {
+	return autoConvert_apiserver_WebhookMatchCondition_To_v1beta1_WebhookMatchCondition(in, out, s)
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..0d78e51a96
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,553 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AnonymousAuthCondition) DeepCopyInto(out *AnonymousAuthCondition) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthCondition.
+func (in *AnonymousAuthCondition) DeepCopy() *AnonymousAuthCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(AnonymousAuthCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AnonymousAuthConfig) DeepCopyInto(out *AnonymousAuthConfig) {
+	*out = *in
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]AnonymousAuthCondition, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthConfig.
+func (in *AnonymousAuthConfig) DeepCopy() *AnonymousAuthConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(AnonymousAuthConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfiguration) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.JWT != nil {
+		in, out := &in.JWT, &out.JWT
+		*out = make([]JWTAuthenticator, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Anonymous != nil {
+		in, out := &in.Anonymous, &out.Anonymous
+		*out = new(AnonymousAuthConfig)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfiguration.
+func (in *AuthenticationConfiguration) DeepCopy() *AuthenticationConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(AuthenticationConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AuthenticationConfiguration) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthorizationConfiguration) DeepCopyInto(out *AuthorizationConfiguration) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Authorizers != nil {
+		in, out := &in.Authorizers, &out.Authorizers
+		*out = make([]AuthorizerConfiguration, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationConfiguration.
+func (in *AuthorizationConfiguration) DeepCopy() *AuthorizationConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(AuthorizationConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AuthorizationConfiguration) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthorizerConfiguration) DeepCopyInto(out *AuthorizerConfiguration) {
+	*out = *in
+	if in.Webhook != nil {
+		in, out := &in.Webhook, &out.Webhook
+		*out = new(WebhookConfiguration)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizerConfiguration.
+func (in *AuthorizerConfiguration) DeepCopy() *AuthorizerConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(AuthorizerConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClaimMappings) DeepCopyInto(out *ClaimMappings) {
+	*out = *in
+	in.Username.DeepCopyInto(&out.Username)
+	in.Groups.DeepCopyInto(&out.Groups)
+	out.UID = in.UID
+	if in.Extra != nil {
+		in, out := &in.Extra, &out.Extra
+		*out = make([]ExtraMapping, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimMappings.
+func (in *ClaimMappings) DeepCopy() *ClaimMappings {
+	if in == nil {
+		return nil
+	}
+	out := new(ClaimMappings)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClaimOrExpression) DeepCopyInto(out *ClaimOrExpression) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimOrExpression.
+func (in *ClaimOrExpression) DeepCopy() *ClaimOrExpression {
+	if in == nil {
+		return nil
+	}
+	out := new(ClaimOrExpression)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClaimValidationRule) DeepCopyInto(out *ClaimValidationRule) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimValidationRule.
+func (in *ClaimValidationRule) DeepCopy() *ClaimValidationRule {
+	if in == nil {
+		return nil
+	}
+	out := new(ClaimValidationRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Connection) DeepCopyInto(out *Connection) {
+	*out = *in
+	if in.Transport != nil {
+		in, out := &in.Transport, &out.Transport
+		*out = new(Transport)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Connection.
+func (in *Connection) DeepCopy() *Connection {
+	if in == nil {
+		return nil
+	}
+	out := new(Connection)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressSelection) DeepCopyInto(out *EgressSelection) {
+	*out = *in
+	in.Connection.DeepCopyInto(&out.Connection)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelection.
+func (in *EgressSelection) DeepCopy() *EgressSelection {
+	if in == nil {
+		return nil
+	}
+	out := new(EgressSelection)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressSelectorConfiguration) DeepCopyInto(out *EgressSelectorConfiguration) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.EgressSelections != nil {
+		in, out := &in.EgressSelections, &out.EgressSelections
+		*out = make([]EgressSelection, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelectorConfiguration.
+func (in *EgressSelectorConfiguration) DeepCopy() *EgressSelectorConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(EgressSelectorConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EgressSelectorConfiguration) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExtraMapping) DeepCopyInto(out *ExtraMapping) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraMapping.
+func (in *ExtraMapping) DeepCopy() *ExtraMapping {
+	if in == nil {
+		return nil
+	}
+	out := new(ExtraMapping)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Issuer) DeepCopyInto(out *Issuer) {
+	*out = *in
+	if in.DiscoveryURL != nil {
+		in, out := &in.DiscoveryURL, &out.DiscoveryURL
+		*out = new(string)
+		**out = **in
+	}
+	if in.Audiences != nil {
+		in, out := &in.Audiences, &out.Audiences
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Issuer.
+func (in *Issuer) DeepCopy() *Issuer {
+	if in == nil {
+		return nil
+	}
+	out := new(Issuer)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JWTAuthenticator) DeepCopyInto(out *JWTAuthenticator) {
+	*out = *in
+	in.Issuer.DeepCopyInto(&out.Issuer)
+	if in.ClaimValidationRules != nil {
+		in, out := &in.ClaimValidationRules, &out.ClaimValidationRules
+		*out = make([]ClaimValidationRule, len(*in))
+		copy(*out, *in)
+	}
+	in.ClaimMappings.DeepCopyInto(&out.ClaimMappings)
+	if in.UserValidationRules != nil {
+		in, out := &in.UserValidationRules, &out.UserValidationRules
+		*out = make([]UserValidationRule, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JWTAuthenticator.
+func (in *JWTAuthenticator) DeepCopy() *JWTAuthenticator {
+	if in == nil {
+		return nil
+	}
+	out := new(JWTAuthenticator)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PrefixedClaimOrExpression) DeepCopyInto(out *PrefixedClaimOrExpression) {
+	*out = *in
+	if in.Prefix != nil {
+		in, out := &in.Prefix, &out.Prefix
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixedClaimOrExpression.
+func (in *PrefixedClaimOrExpression) DeepCopy() *PrefixedClaimOrExpression {
+	if in == nil {
+		return nil
+	}
+	out := new(PrefixedClaimOrExpression)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TCPTransport) DeepCopyInto(out *TCPTransport) {
+	*out = *in
+	if in.TLSConfig != nil {
+		in, out := &in.TLSConfig, &out.TLSConfig
+		*out = new(TLSConfig)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPTransport.
+func (in *TCPTransport) DeepCopy() *TCPTransport {
+	if in == nil {
+		return nil
+	}
+	out := new(TCPTransport)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TLSConfig) DeepCopyInto(out *TLSConfig) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig.
+func (in *TLSConfig) DeepCopy() *TLSConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(TLSConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TracingConfiguration) DeepCopyInto(out *TracingConfiguration) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.TracingConfiguration.DeepCopyInto(&out.TracingConfiguration)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfiguration.
+func (in *TracingConfiguration) DeepCopy() *TracingConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(TracingConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *TracingConfiguration) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Transport) DeepCopyInto(out *Transport) {
+	*out = *in
+	if in.TCP != nil {
+		in, out := &in.TCP, &out.TCP
+		*out = new(TCPTransport)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.UDS != nil {
+		in, out := &in.UDS, &out.UDS
+		*out = new(UDSTransport)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Transport.
+func (in *Transport) DeepCopy() *Transport {
+	if in == nil {
+		return nil
+	}
+	out := new(Transport)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UDSTransport) DeepCopyInto(out *UDSTransport) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDSTransport.
+func (in *UDSTransport) DeepCopy() *UDSTransport {
+	if in == nil {
+		return nil
+	}
+	out := new(UDSTransport)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UserValidationRule) DeepCopyInto(out *UserValidationRule) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserValidationRule.
+func (in *UserValidationRule) DeepCopy() *UserValidationRule {
+	if in == nil {
+		return nil
+	}
+	out := new(UserValidationRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebhookConfiguration) DeepCopyInto(out *WebhookConfiguration) {
+	*out = *in
+	out.AuthorizedTTL = in.AuthorizedTTL
+	out.UnauthorizedTTL = in.UnauthorizedTTL
+	out.Timeout = in.Timeout
+	in.ConnectionInfo.DeepCopyInto(&out.ConnectionInfo)
+	if in.MatchConditions != nil {
+		in, out := &in.MatchConditions, &out.MatchConditions
+		*out = make([]WebhookMatchCondition, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConfiguration.
+func (in *WebhookConfiguration) DeepCopy() *WebhookConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(WebhookConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebhookConnectionInfo) DeepCopyInto(out *WebhookConnectionInfo) {
+	*out = *in
+	if in.KubeConfigFile != nil {
+		in, out := &in.KubeConfigFile, &out.KubeConfigFile
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConnectionInfo.
+func (in *WebhookConnectionInfo) DeepCopy() *WebhookConnectionInfo {
+	if in == nil {
+		return nil
+	}
+	out := new(WebhookConnectionInfo)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebhookMatchCondition) DeepCopyInto(out *WebhookMatchCondition) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookMatchCondition.
+func (in *WebhookMatchCondition) DeepCopy() *WebhookMatchCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(WebhookMatchCondition)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.defaults.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.defaults.go
new file mode 100644
index 0000000000..fdbb606a18
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.defaults.go
@@ -0,0 +1,43 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+	scheme.AddTypeDefaultingFunc(&AuthorizationConfiguration{}, func(obj interface{}) { SetObjectDefaults_AuthorizationConfiguration(obj.(*AuthorizationConfiguration)) })
+	return nil
+}
+
+func SetObjectDefaults_AuthorizationConfiguration(in *AuthorizationConfiguration) {
+	for i := range in.Authorizers {
+		a := &in.Authorizers[i]
+		if a.Webhook != nil {
+			SetDefaults_WebhookConfiguration(a.Webhook)
+		}
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..6439e82208
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go
@@ -0,0 +1,803 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package apiserver
+
+import (
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AESConfiguration) DeepCopyInto(out *AESConfiguration) {
+	*out = *in
+	if in.Keys != nil {
+		in, out := &in.Keys, &out.Keys
+		*out = make([]Key, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AESConfiguration.
+func (in *AESConfiguration) DeepCopy() *AESConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(AESConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionConfiguration) DeepCopyInto(out *AdmissionConfiguration) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Plugins != nil {
+		in, out := &in.Plugins, &out.Plugins
+		*out = make([]AdmissionPluginConfiguration, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfiguration.
+func (in *AdmissionConfiguration) DeepCopy() *AdmissionConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(AdmissionConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AdmissionConfiguration) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionPluginConfiguration) DeepCopyInto(out *AdmissionPluginConfiguration) {
+	*out = *in
+	if in.Configuration != nil {
+		in, out := &in.Configuration, &out.Configuration
+		*out = new(runtime.Unknown)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfiguration.
+func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(AdmissionPluginConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AnonymousAuthCondition) DeepCopyInto(out *AnonymousAuthCondition) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthCondition.
+func (in *AnonymousAuthCondition) DeepCopy() *AnonymousAuthCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(AnonymousAuthCondition)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AnonymousAuthConfig) DeepCopyInto(out *AnonymousAuthConfig) {
+	*out = *in
+	if in.Conditions != nil {
+		in, out := &in.Conditions, &out.Conditions
+		*out = make([]AnonymousAuthCondition, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthConfig.
+func (in *AnonymousAuthConfig) DeepCopy() *AnonymousAuthConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(AnonymousAuthConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfiguration) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.JWT != nil {
+		in, out := &in.JWT, &out.JWT
+		*out = make([]JWTAuthenticator, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Anonymous != nil {
+		in, out := &in.Anonymous, &out.Anonymous
+		*out = new(AnonymousAuthConfig)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfiguration.
+func (in *AuthenticationConfiguration) DeepCopy() *AuthenticationConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(AuthenticationConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AuthenticationConfiguration) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthorizationConfiguration) DeepCopyInto(out *AuthorizationConfiguration) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Authorizers != nil {
+		in, out := &in.Authorizers, &out.Authorizers
+		*out = make([]AuthorizerConfiguration, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationConfiguration.
+func (in *AuthorizationConfiguration) DeepCopy() *AuthorizationConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(AuthorizationConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AuthorizationConfiguration) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthorizerConfiguration) DeepCopyInto(out *AuthorizerConfiguration) {
+	*out = *in
+	if in.Webhook != nil {
+		in, out := &in.Webhook, &out.Webhook
+		*out = new(WebhookConfiguration)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizerConfiguration.
+func (in *AuthorizerConfiguration) DeepCopy() *AuthorizerConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(AuthorizerConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClaimMappings) DeepCopyInto(out *ClaimMappings) {
+	*out = *in
+	in.Username.DeepCopyInto(&out.Username)
+	in.Groups.DeepCopyInto(&out.Groups)
+	out.UID = in.UID
+	if in.Extra != nil {
+		in, out := &in.Extra, &out.Extra
+		*out = make([]ExtraMapping, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimMappings.
+func (in *ClaimMappings) DeepCopy() *ClaimMappings {
+	if in == nil {
+		return nil
+	}
+	out := new(ClaimMappings)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClaimOrExpression) DeepCopyInto(out *ClaimOrExpression) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimOrExpression.
+func (in *ClaimOrExpression) DeepCopy() *ClaimOrExpression {
+	if in == nil {
+		return nil
+	}
+	out := new(ClaimOrExpression)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ClaimValidationRule) DeepCopyInto(out *ClaimValidationRule) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimValidationRule.
+func (in *ClaimValidationRule) DeepCopy() *ClaimValidationRule {
+	if in == nil {
+		return nil
+	}
+	out := new(ClaimValidationRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Connection) DeepCopyInto(out *Connection) {
+	*out = *in
+	if in.Transport != nil {
+		in, out := &in.Transport, &out.Transport
+		*out = new(Transport)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Connection.
+func (in *Connection) DeepCopy() *Connection {
+	if in == nil {
+		return nil
+	}
+	out := new(Connection)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressSelection) DeepCopyInto(out *EgressSelection) {
+	*out = *in
+	in.Connection.DeepCopyInto(&out.Connection)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelection.
+func (in *EgressSelection) DeepCopy() *EgressSelection {
+	if in == nil {
+		return nil
+	}
+	out := new(EgressSelection)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressSelectorConfiguration) DeepCopyInto(out *EgressSelectorConfiguration) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.EgressSelections != nil {
+		in, out := &in.EgressSelections, &out.EgressSelections
+		*out = make([]EgressSelection, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelectorConfiguration.
+func (in *EgressSelectorConfiguration) DeepCopy() *EgressSelectorConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(EgressSelectorConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EgressSelectorConfiguration) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EncryptionConfiguration) DeepCopyInto(out *EncryptionConfiguration) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Resources != nil {
+		in, out := &in.Resources, &out.Resources
+		*out = make([]ResourceConfiguration, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfiguration.
+func (in *EncryptionConfiguration) DeepCopy() *EncryptionConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(EncryptionConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EncryptionConfiguration) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExtraMapping) DeepCopyInto(out *ExtraMapping) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraMapping.
+func (in *ExtraMapping) DeepCopy() *ExtraMapping {
+	if in == nil {
+		return nil
+	}
+	out := new(ExtraMapping)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IdentityConfiguration) DeepCopyInto(out *IdentityConfiguration) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityConfiguration.
+func (in *IdentityConfiguration) DeepCopy() *IdentityConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(IdentityConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Issuer) DeepCopyInto(out *Issuer) {
+	*out = *in
+	if in.Audiences != nil {
+		in, out := &in.Audiences, &out.Audiences
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Issuer.
+func (in *Issuer) DeepCopy() *Issuer {
+	if in == nil {
+		return nil
+	}
+	out := new(Issuer)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JWTAuthenticator) DeepCopyInto(out *JWTAuthenticator) {
+	*out = *in
+	in.Issuer.DeepCopyInto(&out.Issuer)
+	if in.ClaimValidationRules != nil {
+		in, out := &in.ClaimValidationRules, &out.ClaimValidationRules
+		*out = make([]ClaimValidationRule, len(*in))
+		copy(*out, *in)
+	}
+	in.ClaimMappings.DeepCopyInto(&out.ClaimMappings)
+	if in.UserValidationRules != nil {
+		in, out := &in.UserValidationRules, &out.UserValidationRules
+		*out = make([]UserValidationRule, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JWTAuthenticator.
+func (in *JWTAuthenticator) DeepCopy() *JWTAuthenticator {
+	if in == nil {
+		return nil
+	}
+	out := new(JWTAuthenticator)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *KMSConfiguration) DeepCopyInto(out *KMSConfiguration) {
+	*out = *in
+	if in.CacheSize != nil {
+		in, out := &in.CacheSize, &out.CacheSize
+		*out = new(int32)
+		**out = **in
+	}
+	if in.Timeout != nil {
+		in, out := &in.Timeout, &out.Timeout
+		*out = new(v1.Duration)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSConfiguration.
+func (in *KMSConfiguration) DeepCopy() *KMSConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(KMSConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Key) DeepCopyInto(out *Key) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Key.
+func (in *Key) DeepCopy() *Key {
+	if in == nil {
+		return nil
+	}
+	out := new(Key)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PrefixedClaimOrExpression) DeepCopyInto(out *PrefixedClaimOrExpression) {
+	*out = *in
+	if in.Prefix != nil {
+		in, out := &in.Prefix, &out.Prefix
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixedClaimOrExpression.
+func (in *PrefixedClaimOrExpression) DeepCopy() *PrefixedClaimOrExpression {
+	if in == nil {
+		return nil
+	}
+	out := new(PrefixedClaimOrExpression)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProviderConfiguration) DeepCopyInto(out *ProviderConfiguration) {
+	*out = *in
+	if in.AESGCM != nil {
+		in, out := &in.AESGCM, &out.AESGCM
+		*out = new(AESConfiguration)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.AESCBC != nil {
+		in, out := &in.AESCBC, &out.AESCBC
+		*out = new(AESConfiguration)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Secretbox != nil {
+		in, out := &in.Secretbox, &out.Secretbox
+		*out = new(SecretboxConfiguration)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Identity != nil {
+		in, out := &in.Identity, &out.Identity
+		*out = new(IdentityConfiguration)
+		**out = **in
+	}
+	if in.KMS != nil {
+		in, out := &in.KMS, &out.KMS
+		*out = new(KMSConfiguration)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfiguration.
+func (in *ProviderConfiguration) DeepCopy() *ProviderConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(ProviderConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ResourceConfiguration) DeepCopyInto(out *ResourceConfiguration) {
+	*out = *in
+	if in.Resources != nil {
+		in, out := &in.Resources, &out.Resources
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Providers != nil {
+		in, out := &in.Providers, &out.Providers
+		*out = make([]ProviderConfiguration, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceConfiguration.
+func (in *ResourceConfiguration) DeepCopy() *ResourceConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(ResourceConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SecretboxConfiguration) DeepCopyInto(out *SecretboxConfiguration) {
+	*out = *in
+	if in.Keys != nil {
+		in, out := &in.Keys, &out.Keys
+		*out = make([]Key, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretboxConfiguration.
+func (in *SecretboxConfiguration) DeepCopy() *SecretboxConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(SecretboxConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TCPTransport) DeepCopyInto(out *TCPTransport) {
+	*out = *in
+	if in.TLSConfig != nil {
+		in, out := &in.TLSConfig, &out.TLSConfig
+		*out = new(TLSConfig)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPTransport.
+func (in *TCPTransport) DeepCopy() *TCPTransport {
+	if in == nil {
+		return nil
+	}
+	out := new(TCPTransport)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TLSConfig) DeepCopyInto(out *TLSConfig) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig.
+func (in *TLSConfig) DeepCopy() *TLSConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(TLSConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TracingConfiguration) DeepCopyInto(out *TracingConfiguration) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.TracingConfiguration.DeepCopyInto(&out.TracingConfiguration)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfiguration.
+func (in *TracingConfiguration) DeepCopy() *TracingConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(TracingConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *TracingConfiguration) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Transport) DeepCopyInto(out *Transport) {
+	*out = *in
+	if in.TCP != nil {
+		in, out := &in.TCP, &out.TCP
+		*out = new(TCPTransport)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.UDS != nil {
+		in, out := &in.UDS, &out.UDS
+		*out = new(UDSTransport)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Transport.
+func (in *Transport) DeepCopy() *Transport {
+	if in == nil {
+		return nil
+	}
+	out := new(Transport)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UDSTransport) DeepCopyInto(out *UDSTransport) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDSTransport.
+func (in *UDSTransport) DeepCopy() *UDSTransport {
+	if in == nil {
+		return nil
+	}
+	out := new(UDSTransport)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UserValidationRule) DeepCopyInto(out *UserValidationRule) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserValidationRule.
+func (in *UserValidationRule) DeepCopy() *UserValidationRule {
+	if in == nil {
+		return nil
+	}
+	out := new(UserValidationRule)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebhookConfiguration) DeepCopyInto(out *WebhookConfiguration) {
+	*out = *in
+	out.AuthorizedTTL = in.AuthorizedTTL
+	out.UnauthorizedTTL = in.UnauthorizedTTL
+	out.Timeout = in.Timeout
+	in.ConnectionInfo.DeepCopyInto(&out.ConnectionInfo)
+	if in.MatchConditions != nil {
+		in, out := &in.MatchConditions, &out.MatchConditions
+		*out = make([]WebhookMatchCondition, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConfiguration.
+func (in *WebhookConfiguration) DeepCopy() *WebhookConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(WebhookConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebhookConnectionInfo) DeepCopyInto(out *WebhookConnectionInfo) {
+	*out = *in
+	if in.KubeConfigFile != nil {
+		in, out := &in.KubeConfigFile, &out.KubeConfigFile
+		*out = new(string)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConnectionInfo.
+func (in *WebhookConnectionInfo) DeepCopy() *WebhookConnectionInfo {
+	if in == nil {
+		return nil
+	}
+	out := new(WebhookConnectionInfo)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *WebhookMatchCondition) DeepCopyInto(out *WebhookMatchCondition) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookMatchCondition.
+func (in *WebhookMatchCondition) DeepCopy() *WebhookMatchCondition {
+	if in == nil {
+		return nil
+	}
+	out := new(WebhookMatchCondition)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/OWNERS b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/OWNERS
new file mode 100644
index 0000000000..72f9f9c690
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/OWNERS
@@ -0,0 +1,8 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+# approval on api packages bubbles to api-approvers
+reviewers:
+  - sig-auth-audit-approvers
+  - sig-auth-audit-reviewers
+labels:
+  - sig/auth
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/doc.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/doc.go
new file mode 100644
index 0000000000..deda9cbd63
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +groupName=audit.k8s.io
+
+package audit // import "k8s.io/apiserver/pkg/apis/audit"
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/helpers.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/helpers.go
new file mode 100644
index 0000000000..05fe72c0ff
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/helpers.go
@@ -0,0 +1,38 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package audit
+
+func ordLevel(l Level) int {
+	switch l {
+	case LevelMetadata:
+		return 1
+	case LevelRequest:
+		return 2
+	case LevelRequestResponse:
+		return 3
+	default:
+		return 0
+	}
+}
+
+func (a Level) Less(b Level) bool {
+	return ordLevel(a) < ordLevel(b)
+}
+
+func (a Level) GreaterOrEqual(b Level) bool {
+	return ordLevel(a) >= ordLevel(b)
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/register.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/register.go
new file mode 100644
index 0000000000..9abf739ae0
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/register.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package audit
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "audit.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+	return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+	AddToScheme   = SchemeBuilder.AddToScheme
+)
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Event{},
+		&EventList{},
+		&Policy{},
+		&PolicyList{},
+	)
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/types.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/types.go
new file mode 100644
index 0000000000..17a398ed8a
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/types.go
@@ -0,0 +1,312 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package audit
+
+import (
+	authnv1 "k8s.io/api/authentication/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/types"
+)
+
+// Header keys used by the audit system.
+const (
+	// Header to hold the audit ID as the request is propagated through the serving hierarchy. The
+	// Audit-ID header should be set by the first server to receive the request (e.g. the federation
+	// server or kube-aggregator).
+	//
+	// Audit ID is also returned to client by http response header.
+	// It's not guaranteed Audit-Id http header is sent for all requests. When kube-apiserver didn't
+	// audit the events according to the audit policy, no Audit-ID is returned. Also, for request to
+	// pods/exec, pods/attach, pods/proxy, kube-apiserver works like a proxy and redirect the request
+	// to kubelet node, users will only get http headers sent from kubelet node, so no Audit-ID is
+	// sent when users run command like "kubectl exec" or "kubectl attach".
+	HeaderAuditID = "Audit-ID"
+)
+
+// Level defines the amount of information logged during auditing
+type Level string
+
+// Valid audit levels
+const (
+	// LevelNone disables auditing
+	LevelNone Level = "None"
+	// LevelMetadata provides the basic level of auditing.
+	LevelMetadata Level = "Metadata"
+	// LevelRequest provides Metadata level of auditing, and additionally
+	// logs the request object (does not apply for non-resource requests).
+	LevelRequest Level = "Request"
+	// LevelRequestResponse provides Request level of auditing, and additionally
+	// logs the response object (does not apply for non-resource requests).
+	LevelRequestResponse Level = "RequestResponse"
+)
+
+// Stage defines the stages in request handling that audit events may be generated.
+type Stage string
+
+// Valid audit stages.
+const (
+	// The stage for events generated as soon as the audit handler receives the request, and before it
+	// is delegated down the handler chain.
+	StageRequestReceived Stage = "RequestReceived"
+	// The stage for events generated once the response headers are sent, but before the response body
+	// is sent. This stage is only generated for long-running requests (e.g. watch).
+	StageResponseStarted Stage = "ResponseStarted"
+	// The stage for events generated once the response body has been completed, and no more bytes
+	// will be sent.
+	StageResponseComplete Stage = "ResponseComplete"
+	// The stage for events generated when a panic occurred.
+	StagePanic Stage = "Panic"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Event captures all the information that can be included in an API audit log.
+type Event struct {
+	metav1.TypeMeta
+
+	// AuditLevel at which event was generated
+	Level Level
+
+	// Unique audit ID, generated for each request.
+	AuditID types.UID
+	// Stage of the request handling when this event instance was generated.
+	Stage Stage
+
+	// RequestURI is the request URI as sent by the client to a server.
+	RequestURI string
+	// Verb is the kubernetes verb associated with the request.
+	// For non-resource requests, this is the lower-cased HTTP method.
+	Verb string
+	// Authenticated user information.
+	User authnv1.UserInfo
+	// Impersonated user information.
+	// +optional
+	ImpersonatedUser *authnv1.UserInfo
+	// Source IPs, from where the request originated and intermediate proxies.
+	// The source IPs are listed from (in order):
+	// 1. X-Forwarded-For request header IPs
+	// 2. X-Real-Ip header, if not present in the X-Forwarded-For list
+	// 3. The remote address for the connection, if it doesn't match the last
+	//    IP in the list up to here (X-Forwarded-For or X-Real-Ip).
+	// Note: All but the last IP can be arbitrarily set by the client.
+	// +optional
+	SourceIPs []string
+	// UserAgent records the user agent string reported by the client.
+	// Note that the UserAgent is provided by the client, and must not be trusted.
+	// +optional
+	UserAgent string
+	// Object reference this request is targeted at.
+	// Does not apply for List-type requests, or non-resource requests.
+	// +optional
+	ObjectRef *ObjectReference
+	// The response status, populated even when the ResponseObject is not a Status type.
+	// For successful responses, this will only include the Code. For non-status type
+	// error responses, this will be auto-populated with the error Message.
+	// +optional
+	ResponseStatus *metav1.Status
+
+	// API object from the request, in JSON format. The RequestObject is recorded as-is in the request
+	// (possibly re-encoded as JSON), prior to version conversion, defaulting, admission or
+	// merging. It is an external versioned object type, and may not be a valid object on its own.
+	// Omitted for non-resource requests.  Only logged at Request Level and higher.
+	// +optional
+	RequestObject *runtime.Unknown
+	// API object returned in the response, in JSON. The ResponseObject is recorded after conversion
+	// to the external type, and serialized as JSON. Omitted for non-resource requests.  Only logged
+	// at Response Level.
+	// +optional
+	ResponseObject *runtime.Unknown
+
+	// Time the request reached the apiserver.
+	RequestReceivedTimestamp metav1.MicroTime
+	// Time the request reached current audit stage.
+	StageTimestamp metav1.MicroTime
+
+	// Annotations is an unstructured key value map stored with an audit event that may be set by
+	// plugins invoked in the request serving chain, including authentication, authorization and
+	// admission plugins. Note that these annotations are for the audit event, and do not correspond
+	// to the metadata.annotations of the submitted object. Keys should uniquely identify the informing
+	// component to avoid name collisions (e.g. podsecuritypolicy.admission.k8s.io/policy). Values
+	// should be short. Annotations are included in the Metadata level.
+	// +optional
+	Annotations map[string]string
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EventList is a list of audit Events.
+type EventList struct {
+	metav1.TypeMeta
+	// +optional
+	metav1.ListMeta
+
+	Items []Event
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Policy defines the configuration of audit logging, and the rules for how different request
+// categories are logged.
+type Policy struct {
+	metav1.TypeMeta
+	// ObjectMeta is included for interoperability with API infrastructure.
+	// +optional
+	metav1.ObjectMeta
+
+	// Rules specify the audit Level a request should be recorded at.
+	// A request may match multiple rules, in which case the FIRST matching rule is used.
+	// The default audit level is None, but can be overridden by a catch-all rule at the end of the list.
+	// PolicyRules are strictly ordered.
+	Rules []PolicyRule
+
+	// OmitStages is a list of stages for which no events are created. Note that this can also
+	// be specified per rule in which case the union of both are omitted.
+	// +optional
+	OmitStages []Stage
+
+	// OmitManagedFields indicates whether to omit the managed fields of the request
+	// and response bodies from being written to the API audit log.
+	// This is used as a global default - a value of 'true' will omit the managed fileds,
+	// otherwise the managed fields will be included in the API audit log.
+	// Note that this can also be specified per rule in which case the value specified
+	// in a rule will override the global default.
+	// +optional
+	OmitManagedFields bool
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PolicyList is a list of audit Policies.
+type PolicyList struct {
+	metav1.TypeMeta
+	// +optional
+	metav1.ListMeta
+
+	Items []Policy
+}
+
+// PolicyRule maps requests based off metadata to an audit Level.
+// Requests must match the rules of every field (an intersection of rules).
+type PolicyRule struct {
+	// The Level that requests matching this rule are recorded at.
+	Level Level
+
+	// The users (by authenticated user name) this rule applies to.
+	// An empty list implies every user.
+	// +optional
+	Users []string
+	// The user groups this rule applies to. A user is considered matching
+	// if it is a member of any of the UserGroups.
+	// An empty list implies every user group.
+	// +optional
+	UserGroups []string
+
+	// The verbs that match this rule.
+	// An empty list implies every verb.
+	// +optional
+	Verbs []string
+
+	// Rules can apply to API resources (such as "pods" or "secrets"),
+	// non-resource URL paths (such as "/api"), or neither, but not both.
+	// If neither is specified, the rule is treated as a default for all URLs.
+
+	// Resources that this rule matches. An empty list implies all kinds in all API groups.
+	// +optional
+	Resources []GroupResources
+	// Namespaces that this rule matches.
+	// The empty string "" matches non-namespaced resources.
+	// An empty list implies every namespace.
+	// +optional
+	Namespaces []string
+
+	// NonResourceURLs is a set of URL paths that should be audited.
+	// `*`s are allowed, but only as the full, final step in the path.
+	// Examples:
+	//  `/metrics` - Log requests for apiserver metrics
+	//  `/healthz*` - Log all health checks
+	// +optional
+	NonResourceURLs []string
+
+	// OmitStages is a list of stages for which no events are created. Note that this can also
+	// be specified policy wide in which case the union of both are omitted.
+	// An empty list means no restrictions will apply.
+	// +optional
+	OmitStages []Stage
+
+	// OmitManagedFields indicates whether to omit the managed fields of the request
+	// and response bodies from being written to the API audit log.
+	// - a value of 'true' will drop the managed fields from the API audit log
+	// - a value of 'false' indicates that the managed fileds should be included
+	//   in the API audit log
+	// Note that the value, if specified, in this rule will override the global default
+	// If a value is not specified then the global default specified in
+	// Policy.OmitManagedFields will stand.
+	// +optional
+	OmitManagedFields *bool
+}
+
+// GroupResources represents resource kinds in an API group.
+type GroupResources struct {
+	// Group is the name of the API group that contains the resources.
+	// The empty string represents the core API group.
+	// +optional
+	Group string
+	// Resources is a list of resources this rule applies to.
+	//
+	// For example:
+	// - `pods` matches pods.
+	// - `pods/log` matches the log subresource of pods.
+	// - `*` matches all resources and their subresources.
+	// - `pods/*` matches all subresources of pods.
+	// - `*/scale` matches all scale subresources.
+	//
+	// If wildcard is present, the validation rule will ensure resources do not
+	// overlap with each other.
+	//
+	// An empty list implies all resources and subresources in this API groups apply.
+	// +optional
+	Resources []string
+	// ResourceNames is a list of resource instance names that the policy matches.
+	// Using this field requires Resources to be specified.
+	// An empty list implies that every instance of the resource is matched.
+	// +optional
+	ResourceNames []string
+}
+
+// ObjectReference contains enough information to let you inspect or modify the referred object.
+type ObjectReference struct {
+	// +optional
+	Resource string
+	// +optional
+	Namespace string
+	// +optional
+	Name string
+	// +optional
+	UID types.UID
+	// APIGroup is the name of the API group that contains the referred object.
+	// The empty string represents the core API group.
+	// +optional
+	APIGroup string
+	// APIVersion is the version of the API group that contains the referred object.
+	// +optional
+	APIVersion string
+	// +optional
+	ResourceVersion string
+	// +optional
+	Subresource string
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/doc.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/doc.go
new file mode 100644
index 0000000000..d1f180c942
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/doc.go
@@ -0,0 +1,25 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:protobuf-gen=package
+// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/audit
+// +k8s:openapi-gen=true
+// +k8s:defaulter-gen=TypeMeta
+
+// +groupName=audit.k8s.io
+
+package v1 // import "k8s.io/apiserver/pkg/apis/audit/v1"
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go
new file mode 100644
index 0000000000..27dab8c09b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go
@@ -0,0 +1,3230 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: k8s.io/apiserver/pkg/apis/audit/v1/generated.proto
+
+package v1
+
+import (
+	fmt "fmt"
+
+	io "io"
+
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	v1 "k8s.io/api/authentication/v1"
+	v11 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+
+	math "math"
+	math_bits "math/bits"
+	reflect "reflect"
+	strings "strings"
+
+	k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *Event) Reset()      { *m = Event{} }
+func (*Event) ProtoMessage() {}
+func (*Event) Descriptor() ([]byte, []int) {
+	return fileDescriptor_62937bb89ca7b6dd, []int{0}
+}
+func (m *Event) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Event) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Event.Merge(m, src)
+}
+func (m *Event) XXX_Size() int {
+	return m.Size()
+}
+func (m *Event) XXX_DiscardUnknown() {
+	xxx_messageInfo_Event.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Event proto.InternalMessageInfo
+
+func (m *EventList) Reset()      { *m = EventList{} }
+func (*EventList) ProtoMessage() {}
+func (*EventList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_62937bb89ca7b6dd, []int{1}
+}
+func (m *EventList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *EventList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EventList.Merge(m, src)
+}
+func (m *EventList) XXX_Size() int {
+	return m.Size()
+}
+func (m *EventList) XXX_DiscardUnknown() {
+	xxx_messageInfo_EventList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventList proto.InternalMessageInfo
+
+func (m *GroupResources) Reset()      { *m = GroupResources{} }
+func (*GroupResources) ProtoMessage() {}
+func (*GroupResources) Descriptor() ([]byte, []int) {
+	return fileDescriptor_62937bb89ca7b6dd, []int{2}
+}
+func (m *GroupResources) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *GroupResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *GroupResources) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GroupResources.Merge(m, src)
+}
+func (m *GroupResources) XXX_Size() int {
+	return m.Size()
+}
+func (m *GroupResources) XXX_DiscardUnknown() {
+	xxx_messageInfo_GroupResources.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GroupResources proto.InternalMessageInfo
+
+func (m *ObjectReference) Reset()      { *m = ObjectReference{} }
+func (*ObjectReference) ProtoMessage() {}
+func (*ObjectReference) Descriptor() ([]byte, []int) {
+	return fileDescriptor_62937bb89ca7b6dd, []int{3}
+}
+func (m *ObjectReference) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ObjectReference) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ObjectReference.Merge(m, src)
+}
+func (m *ObjectReference) XXX_Size() int {
+	return m.Size()
+}
+func (m *ObjectReference) XXX_DiscardUnknown() {
+	xxx_messageInfo_ObjectReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ObjectReference proto.InternalMessageInfo
+
+func (m *Policy) Reset()      { *m = Policy{} }
+func (*Policy) ProtoMessage() {}
+func (*Policy) Descriptor() ([]byte, []int) {
+	return fileDescriptor_62937bb89ca7b6dd, []int{4}
+}
+func (m *Policy) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *Policy) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Policy.Merge(m, src)
+}
+func (m *Policy) XXX_Size() int {
+	return m.Size()
+}
+func (m *Policy) XXX_DiscardUnknown() {
+	xxx_messageInfo_Policy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Policy proto.InternalMessageInfo
+
+func (m *PolicyList) Reset()      { *m = PolicyList{} }
+func (*PolicyList) ProtoMessage() {}
+func (*PolicyList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_62937bb89ca7b6dd, []int{5}
+}
+func (m *PolicyList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PolicyList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PolicyList.Merge(m, src)
+}
+func (m *PolicyList) XXX_Size() int {
+	return m.Size()
+}
+func (m *PolicyList) XXX_DiscardUnknown() {
+	xxx_messageInfo_PolicyList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PolicyList proto.InternalMessageInfo
+
+func (m *PolicyRule) Reset()      { *m = PolicyRule{} }
+func (*PolicyRule) ProtoMessage() {}
+func (*PolicyRule) Descriptor() ([]byte, []int) {
+	return fileDescriptor_62937bb89ca7b6dd, []int{6}
+}
+func (m *PolicyRule) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *PolicyRule) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PolicyRule.Merge(m, src)
+}
+func (m *PolicyRule) XXX_Size() int {
+	return m.Size()
+}
+func (m *PolicyRule) XXX_DiscardUnknown() {
+	xxx_messageInfo_PolicyRule.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PolicyRule proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*Event)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.Event")
+	proto.RegisterMapType((map[string]string)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.Event.AnnotationsEntry")
+	proto.RegisterType((*EventList)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.EventList")
+	proto.RegisterType((*GroupResources)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.GroupResources")
+	proto.RegisterType((*ObjectReference)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.ObjectReference")
+	proto.RegisterType((*Policy)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.Policy")
+	proto.RegisterType((*PolicyList)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.PolicyList")
+	proto.RegisterType((*PolicyRule)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.PolicyRule")
+}
+
+func init() {
+	proto.RegisterFile("k8s.io/apiserver/pkg/apis/audit/v1/generated.proto", fileDescriptor_62937bb89ca7b6dd)
+}
+
+var fileDescriptor_62937bb89ca7b6dd = []byte{
+	// 1275 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcf, 0x6f, 0x1b, 0xd5,
+	0x13, 0xcf, 0xc6, 0x71, 0x63, 0x8f, 0x1b, 0xc7, 0x79, 0xed, 0xf7, 0xdb, 0x25, 0x07, 0xdb, 0x18,
+	0x09, 0x05, 0x08, 0xeb, 0xd6, 0x14, 0x5a, 0x55, 0x02, 0xc9, 0xa6, 0xa5, 0xb5, 0xd4, 0xa6, 0xd1,
+	0x33, 0xee, 0x01, 0x71, 0xe8, 0x7a, 0x3d, 0xb5, 0x97, 0xd8, 0xbb, 0xdb, 0x7d, 0x6f, 0x8d, 0x72,
+	0xe3, 0x1f, 0x40, 0xe2, 0xce, 0x7f, 0xc1, 0x0d, 0x71, 0xe2, 0x96, 0x63, 0x8f, 0x3d, 0x59, 0xc4,
+	0xf0, 0x57, 0xe4, 0x80, 0xd0, 0x7b, 0xfb, 0xf6, 0x87, 0x9d, 0x58, 0x71, 0x38, 0x70, 0xf3, 0x9b,
+	0xf9, 0x7c, 0x3e, 0x33, 0x3b, 0x3b, 0x33, 0x6f, 0x0d, 0x8d, 0xa3, 0xfb, 0xcc, 0xb0, 0xdd, 0xba,
+	0xe9, 0xd9, 0x0c, 0xfd, 0x09, 0xfa, 0x75, 0xef, 0x68, 0x20, 0x4f, 0x75, 0x33, 0xe8, 0xdb, 0xbc,
+	0x3e, 0xb9, 0x53, 0x1f, 0xa0, 0x83, 0xbe, 0xc9, 0xb1, 0x6f, 0x78, 0xbe, 0xcb, 0x5d, 0x52, 0x0b,
+	0x39, 0x46, 0xcc, 0x31, 0xbc, 0xa3, 0x81, 0x3c, 0x19, 0x92, 0x63, 0x4c, 0xee, 0xec, 0x7e, 0x3c,
+	0xb0, 0xf9, 0x30, 0xe8, 0x19, 0x96, 0x3b, 0xae, 0x0f, 0xdc, 0x81, 0x5b, 0x97, 0xd4, 0x5e, 0xf0,
+	0x4a, 0x9e, 0xe4, 0x41, 0xfe, 0x0a, 0x25, 0x77, 0xf7, 0x93, 0x34, 0xea, 0x66, 0xc0, 0x87, 0xe8,
+	0x70, 0xdb, 0x32, 0xb9, 0xed, 0x3a, 0x17, 0x24, 0xb0, 0x7b, 0x37, 0x41, 0x8f, 0x4d, 0x6b, 0x68,
+	0x3b, 0xe8, 0x1f, 0x27, 0x79, 0x8f, 0x91, 0x9b, 0x17, 0xb1, 0xea, 0xcb, 0x58, 0x7e, 0xe0, 0x70,
+	0x7b, 0x8c, 0xe7, 0x08, 0x9f, 0x5d, 0x46, 0x60, 0xd6, 0x10, 0xc7, 0xe6, 0x22, 0xaf, 0xf6, 0x17,
+	0x40, 0xf6, 0xd1, 0x04, 0x1d, 0x4e, 0xf6, 0x21, 0x3b, 0xc2, 0x09, 0x8e, 0x74, 0xad, 0xaa, 0xed,
+	0xe5, 0x5b, 0xff, 0x3f, 0x99, 0x56, 0xd6, 0x66, 0xd3, 0x4a, 0xf6, 0xa9, 0x30, 0x9e, 0x45, 0x3f,
+	0x68, 0x08, 0x22, 0x07, 0xb0, 0x29, 0xeb, 0xd7, 0x7e, 0xa8, 0xaf, 0x4b, 0xfc, 0x5d, 0x85, 0xdf,
+	0x6c, 0x86, 0xe6, 0xb3, 0x69, 0xe5, 0xdd, 0x65, 0x39, 0xf1, 0x63, 0x0f, 0x99, 0xd1, 0x6d, 0x3f,
+	0xa4, 0x91, 0x88, 0x88, 0xce, 0xb8, 0x39, 0x40, 0x3d, 0x33, 0x1f, 0xbd, 0x23, 0x8c, 0x67, 0xd1,
+	0x0f, 0x1a, 0x82, 0x48, 0x03, 0xc0, 0xc7, 0xd7, 0x01, 0x32, 0xde, 0xa5, 0x6d, 0x7d, 0x43, 0x52,
+	0x88, 0xa2, 0x00, 0x8d, 0x3d, 0x34, 0x85, 0x22, 0x55, 0xd8, 0x98, 0xa0, 0xdf, 0xd3, 0xb3, 0x12,
+	0x7d, 0x5d, 0xa1, 0x37, 0x5e, 0xa0, 0xdf, 0xa3, 0xd2, 0x43, 0x9e, 0xc0, 0x46, 0xc0, 0xd0, 0xd7,
+	0xaf, 0x55, 0xb5, 0xbd, 0x42, 0xe3, 0x7d, 0x23, 0x69, 0x1d, 0x63, 0xfe, 0x3d, 0x1b, 0x93, 0x3b,
+	0x46, 0x97, 0xa1, 0xdf, 0x76, 0x5e, 0xb9, 0x89, 0x92, 0xb0, 0x50, 0xa9, 0x40, 0x86, 0x50, 0xb2,
+	0xc7, 0x1e, 0xfa, 0xcc, 0x75, 0x44, 0xad, 0x85, 0x47, 0xdf, 0xbc, 0x92, 0xea, 0xcd, 0xd9, 0xb4,
+	0x52, 0x6a, 0x2f, 0x68, 0xd0, 0x73, 0xaa, 0xe4, 0x23, 0xc8, 0x33, 0x37, 0xf0, 0x2d, 0x6c, 0x1f,
+	0x32, 0x3d, 0x57, 0xcd, 0xec, 0xe5, 0x5b, 0x5b, 0xb3, 0x69, 0x25, 0xdf, 0x89, 0x8c, 0x34, 0xf1,
+	0x93, 0x3a, 0xe4, 0x45, 0x7a, 0xcd, 0x01, 0x3a, 0x5c, 0x2f, 0xc9, 0x3a, 0xec, 0xa8, 0xec, 0xf3,
+	0xdd, 0xc8, 0x41, 0x13, 0x0c, 0x79, 0x09, 0x79, 0xb7, 0xf7, 0x1d, 0x5a, 0x9c, 0xe2, 0x2b, 0x3d,
+	0x2f, 0x1f, 0xe0, 0x13, 0xe3, 0xf2, 0x89, 0x32, 0x9e, 0x47, 0x24, 0xf4, 0xd1, 0xb1, 0x30, 0x4c,
+	0x29, 0x36, 0xd2, 0x44, 0x94, 0x0c, 0xa1, 0xe8, 0x23, 0xf3, 0x5c, 0x87, 0x61, 0x87, 0x9b, 0x3c,
+	0x60, 0x3a, 0xc8, 0x30, 0xfb, 0xa9, 0x30, 0x71, 0xf3, 0x24, 0x91, 0xc4, 0xdc, 0x88, 0x40, 0x21,
+	0xa7, 0x45, 0x66, 0xd3, 0x4a, 0x91, 0xce, 0xe9, 0xd0, 0x05, 0x5d, 0x62, 0xc2, 0x96, 0xea, 0x86,
+	0x30, 0x11, 0xbd, 0x20, 0x03, 0xed, 0x2d, 0x0d, 0xa4, 0x26, 0xc7, 0xe8, 0x3a, 0x47, 0x8e, 0xfb,
+	0xbd, 0xd3, 0xda, 0x99, 0x4d, 0x2b, 0x5b, 0x34, 0x2d, 0x41, 0xe7, 0x15, 0x49, 0x3f, 0x79, 0x18,
+	0x15, 0xe3, 0xfa, 0x15, 0x63, 0xcc, 0x3d, 0x88, 0x0a, 0xb2, 0xa0, 0x49, 0x7e, 0xd4, 0x40, 0x57,
+	0x71, 0x29, 0x5a, 0x68, 0x4f, 0xb0, 0xff, 0xb5, 0x3d, 0x46, 0xc6, 0xcd, 0xb1, 0xa7, 0x6f, 0xc9,
+	0x80, 0xf5, 0xd5, 0xaa, 0xf7, 0xcc, 0xb6, 0x7c, 0x57, 0x70, 0x5b, 0x55, 0xd5, 0x06, 0x3a, 0x5d,
+	0x22, 0x4c, 0x97, 0x86, 0x24, 0x2e, 0x14, 0xe5, 0x54, 0x26, 0x49, 0x14, 0xff, 0x5d, 0x12, 0xd1,
+	0xd0, 0x17, 0x3b, 0x73, 0x72, 0x74, 0x41, 0x9e, 0xbc, 0x86, 0x82, 0xe9, 0x38, 0x2e, 0x97, 0x53,
+	0xc3, 0xf4, 0xed, 0x6a, 0x66, 0xaf, 0xd0, 0x78, 0xb0, 0x4a, 0x5f, 0xca, 0x4d, 0x67, 0x34, 0x13,
+	0xf2, 0x23, 0x87, 0xfb, 0xc7, 0xad, 0x1b, 0x2a, 0x70, 0x21, 0xe5, 0xa1, 0xe9, 0x18, 0xbb, 0x5f,
+	0x40, 0x69, 0x91, 0x45, 0x4a, 0x90, 0x39, 0xc2, 0xe3, 0x70, 0x5d, 0x52, 0xf1, 0x93, 0xdc, 0x84,
+	0xec, 0xc4, 0x1c, 0x05, 0x18, 0xae, 0x44, 0x1a, 0x1e, 0x1e, 0xac, 0xdf, 0xd7, 0x6a, 0xbf, 0x6a,
+	0x90, 0x97, 0xc1, 0x9f, 0xda, 0x8c, 0x93, 0x6f, 0x21, 0x27, 0x9e, 0xbe, 0x6f, 0x72, 0x53, 0xd2,
+	0x0b, 0x0d, 0x63, 0xb5, 0x5a, 0x09, 0xf6, 0x33, 0xe4, 0x66, 0xab, 0xa4, 0x32, 0xce, 0x45, 0x16,
+	0x1a, 0x2b, 0x92, 0x03, 0xc8, 0xda, 0x1c, 0xc7, 0x4c, 0x5f, 0x97, 0x85, 0xf9, 0x60, 0xe5, 0xc2,
+	0xb4, 0xb6, 0xa2, 0xad, 0xdb, 0x16, 0x7c, 0x1a, 0xca, 0xd4, 0x7e, 0xd6, 0xa0, 0xf8, 0xd8, 0x77,
+	0x03, 0x8f, 0x62, 0xb8, 0x4a, 0x18, 0x79, 0x0f, 0xb2, 0x03, 0x61, 0x51, 0x77, 0x45, 0xcc, 0x0b,
+	0x61, 0xa1, 0x4f, 0xac, 0x26, 0x3f, 0x62, 0xc8, 0x5c, 0xd4, 0x6a, 0x8a, 0x65, 0x68, 0xe2, 0x27,
+	0xf7, 0xc4, 0x74, 0x86, 0x87, 0x03, 0x73, 0x8c, 0x4c, 0xcf, 0x48, 0x82, 0x9a, 0xb9, 0x94, 0x83,
+	0xce, 0xe3, 0x6a, 0xbf, 0x64, 0x60, 0x7b, 0x61, 0xdd, 0x90, 0x7d, 0xc8, 0x45, 0x20, 0x95, 0x61,
+	0x5c, 0xaf, 0x48, 0x8b, 0xc6, 0x08, 0xb1, 0x15, 0x1d, 0x21, 0xe5, 0x99, 0x96, 0x7a, 0x73, 0xc9,
+	0x56, 0x3c, 0x88, 0x1c, 0x34, 0xc1, 0x88, 0x9b, 0x44, 0x1c, 0xd4, 0x55, 0x15, 0xef, 0x7f, 0x81,
+	0xa5, 0xd2, 0x43, 0x5a, 0x90, 0x09, 0xec, 0xbe, 0xba, 0x98, 0x6e, 0x2b, 0x40, 0xa6, 0xbb, 0xea,
+	0xad, 0x28, 0xc8, 0xe2, 0x21, 0x4c, 0xcf, 0x96, 0x15, 0x55, 0x77, 0x56, 0xfc, 0x10, 0xcd, 0xc3,
+	0x76, 0x58, 0xe9, 0x18, 0x21, 0x6e, 0x44, 0xd3, 0xb3, 0x5f, 0xa0, 0xcf, 0x6c, 0xd7, 0x91, 0x37,
+	0x58, 0xea, 0x46, 0x6c, 0x1e, 0xb6, 0x95, 0x87, 0xa6, 0x50, 0xa4, 0x09, 0xdb, 0x51, 0x11, 0x22,
+	0xe2, 0xa6, 0x24, 0xde, 0x52, 0xc4, 0x6d, 0x3a, 0xef, 0xa6, 0x8b, 0x78, 0xf2, 0x29, 0x14, 0x58,
+	0xd0, 0x8b, 0x8b, 0x9d, 0x93, 0xf4, 0x78, 0x9c, 0x3a, 0x89, 0x8b, 0xa6, 0x71, 0xb5, 0xdf, 0xd7,
+	0xe1, 0xda, 0xa1, 0x3b, 0xb2, 0xad, 0x63, 0xf2, 0xf2, 0xdc, 0x2c, 0xdc, 0x5e, 0x6d, 0x16, 0xc2,
+	0x97, 0x2e, 0xa7, 0x21, 0x7e, 0xd0, 0xc4, 0x96, 0x9a, 0x87, 0x0e, 0x64, 0xfd, 0x60, 0x84, 0xd1,
+	0x3c, 0x18, 0xab, 0xcc, 0x43, 0x98, 0x1c, 0x0d, 0x46, 0x98, 0x34, 0xb7, 0x38, 0x31, 0x1a, 0x6a,
+	0x91, 0x7b, 0x00, 0xee, 0xd8, 0xe6, 0x72, 0x53, 0x45, 0xcd, 0x7a, 0x4b, 0xa6, 0x10, 0x5b, 0x93,
+	0xaf, 0x96, 0x14, 0x94, 0x3c, 0x86, 0x1d, 0x71, 0x7a, 0x66, 0x3a, 0xe6, 0x00, 0xfb, 0x5f, 0xd9,
+	0x38, 0xea, 0x33, 0xd9, 0x28, 0xb9, 0xd6, 0x3b, 0x2a, 0xd2, 0xce, 0xf3, 0x45, 0x00, 0x3d, 0xcf,
+	0xa9, 0xfd, 0xa6, 0x01, 0x84, 0x69, 0xfe, 0x07, 0x3b, 0xe5, 0xf9, 0xfc, 0x4e, 0xf9, 0x70, 0xf5,
+	0x1a, 0x2e, 0x59, 0x2a, 0x7f, 0x67, 0xa2, 0xec, 0x45, 0x59, 0xaf, 0xf8, 0xf1, 0x59, 0x81, 0xac,
+	0xf8, 0x46, 0x89, 0xb6, 0x4a, 0x5e, 0x20, 0xc5, 0xf7, 0x0b, 0xa3, 0xa1, 0x9d, 0x18, 0x00, 0xe2,
+	0x87, 0x1c, 0x8d, 0xe8, 0xed, 0x14, 0xc5, 0xdb, 0xe9, 0xc6, 0x56, 0x9a, 0x42, 0x08, 0x41, 0xf1,
+	0x05, 0x28, 0x5e, 0x44, 0x2c, 0x28, 0x3e, 0x0c, 0x19, 0x0d, 0xed, 0xc4, 0x4a, 0xef, 0xb2, 0xac,
+	0xac, 0x41, 0x63, 0x95, 0x1a, 0xcc, 0xef, 0xcd, 0x64, 0xaf, 0x5c, 0xb8, 0x03, 0x0d, 0x80, 0x78,
+	0xc9, 0x30, 0xfd, 0x5a, 0x92, 0x75, 0xbc, 0x85, 0x18, 0x4d, 0x21, 0xc8, 0xe7, 0xb0, 0xed, 0xb8,
+	0x4e, 0x24, 0xd5, 0xa5, 0x4f, 0x99, 0xbe, 0x29, 0x49, 0x37, 0xc4, 0xec, 0x1e, 0xcc, 0xbb, 0xe8,
+	0x22, 0x76, 0xa1, 0x85, 0x73, 0xab, 0xb7, 0xf0, 0x97, 0x17, 0xb5, 0x70, 0x5e, 0xb6, 0xf0, 0xff,
+	0x56, 0x6d, 0xdf, 0xd6, 0x93, 0x93, 0xd3, 0xf2, 0xda, 0x9b, 0xd3, 0xf2, 0xda, 0xdb, 0xd3, 0xf2,
+	0xda, 0x0f, 0xb3, 0xb2, 0x76, 0x32, 0x2b, 0x6b, 0x6f, 0x66, 0x65, 0xed, 0xed, 0xac, 0xac, 0xfd,
+	0x31, 0x2b, 0x6b, 0x3f, 0xfd, 0x59, 0x5e, 0xfb, 0xa6, 0x76, 0xf9, 0x5f, 0xbe, 0x7f, 0x02, 0x00,
+	0x00, 0xff, 0xff, 0x81, 0x06, 0x4f, 0x58, 0x17, 0x0e, 0x00, 0x00,
+}
+
+func (m *Event) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Event) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.UserAgent)
+	copy(dAtA[i:], m.UserAgent)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserAgent)))
+	i--
+	dAtA[i] = 0x1
+	i--
+	dAtA[i] = 0x82
+	if len(m.Annotations) > 0 {
+		keysForAnnotations := make([]string, 0, len(m.Annotations))
+		for k := range m.Annotations {
+			keysForAnnotations = append(keysForAnnotations, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+		for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Annotations[string(keysForAnnotations[iNdEx])]
+			baseI := i
+			i -= len(v)
+			copy(dAtA[i:], v)
+			i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForAnnotations[iNdEx])
+			copy(dAtA[i:], keysForAnnotations[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x7a
+		}
+	}
+	{
+		size, err := m.StageTimestamp.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x72
+	{
+		size, err := m.RequestReceivedTimestamp.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x6a
+	if m.ResponseObject != nil {
+		{
+			size, err := m.ResponseObject.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x62
+	}
+	if m.RequestObject != nil {
+		{
+			size, err := m.RequestObject.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x5a
+	}
+	if m.ResponseStatus != nil {
+		{
+			size, err := m.ResponseStatus.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x52
+	}
+	if m.ObjectRef != nil {
+		{
+			size, err := m.ObjectRef.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4a
+	}
+	if len(m.SourceIPs) > 0 {
+		for iNdEx := len(m.SourceIPs) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.SourceIPs[iNdEx])
+			copy(dAtA[i:], m.SourceIPs[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.SourceIPs[iNdEx])))
+			i--
+			dAtA[i] = 0x42
+		}
+	}
+	if m.ImpersonatedUser != nil {
+		{
+			size, err := m.ImpersonatedUser.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3a
+	}
+	{
+		size, err := m.User.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x32
+	i -= len(m.Verb)
+	copy(dAtA[i:], m.Verb)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb)))
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.RequestURI)
+	copy(dAtA[i:], m.RequestURI)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestURI)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Stage)
+	copy(dAtA[i:], m.Stage)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Stage)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.AuditID)
+	copy(dAtA[i:], m.AuditID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuditID)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Level)
+	copy(dAtA[i:], m.Level)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *EventList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *EventList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EventList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *GroupResources) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GroupResources) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *GroupResources) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.ResourceNames) > 0 {
+		for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.ResourceNames[iNdEx])
+			copy(dAtA[i:], m.ResourceNames[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx])))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.Resources) > 0 {
+		for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Resources[iNdEx])
+			copy(dAtA[i:], m.Resources[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	i -= len(m.Group)
+	copy(dAtA[i:], m.Group)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ObjectReference) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Subresource)
+	copy(dAtA[i:], m.Subresource)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource)))
+	i--
+	dAtA[i] = 0x42
+	i -= len(m.ResourceVersion)
+	copy(dAtA[i:], m.ResourceVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion)))
+	i--
+	dAtA[i] = 0x3a
+	i -= len(m.APIVersion)
+	copy(dAtA[i:], m.APIVersion)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
+	i--
+	dAtA[i] = 0x32
+	i -= len(m.APIGroup)
+	copy(dAtA[i:], m.APIGroup)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup)))
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.UID)
+	copy(dAtA[i:], m.UID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Namespace)
+	copy(dAtA[i:], m.Namespace)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.Resource)
+	copy(dAtA[i:], m.Resource)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Policy) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Policy) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Policy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i--
+	if m.OmitManagedFields {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x20
+	if len(m.OmitStages) > 0 {
+		for iNdEx := len(m.OmitStages) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.OmitStages[iNdEx])
+			copy(dAtA[i:], m.OmitStages[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.OmitStages[iNdEx])))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.Rules) > 0 {
+		for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PolicyList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PolicyList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *PolicyRule) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.OmitManagedFields != nil {
+		i--
+		if *m.OmitManagedFields {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x48
+	}
+	if len(m.OmitStages) > 0 {
+		for iNdEx := len(m.OmitStages) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.OmitStages[iNdEx])
+			copy(dAtA[i:], m.OmitStages[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.OmitStages[iNdEx])))
+			i--
+			dAtA[i] = 0x42
+		}
+	}
+	if len(m.NonResourceURLs) > 0 {
+		for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.NonResourceURLs[iNdEx])
+			copy(dAtA[i:], m.NonResourceURLs[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx])))
+			i--
+			dAtA[i] = 0x3a
+		}
+	}
+	if len(m.Namespaces) > 0 {
+		for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Namespaces[iNdEx])
+			copy(dAtA[i:], m.Namespaces[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx])))
+			i--
+			dAtA[i] = 0x32
+		}
+	}
+	if len(m.Resources) > 0 {
+		for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x2a
+		}
+	}
+	if len(m.Verbs) > 0 {
+		for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Verbs[iNdEx])
+			copy(dAtA[i:], m.Verbs[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx])))
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	if len(m.UserGroups) > 0 {
+		for iNdEx := len(m.UserGroups) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.UserGroups[iNdEx])
+			copy(dAtA[i:], m.UserGroups[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserGroups[iNdEx])))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.Users) > 0 {
+		for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Users[iNdEx])
+			copy(dAtA[i:], m.Users[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.Users[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	i -= len(m.Level)
+	copy(dAtA[i:], m.Level)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+	offset -= sovGenerated(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *Event) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Level)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.AuditID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Stage)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.RequestURI)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Verb)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.User.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.ImpersonatedUser != nil {
+		l = m.ImpersonatedUser.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.SourceIPs) > 0 {
+		for _, s := range m.SourceIPs {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.ObjectRef != nil {
+		l = m.ObjectRef.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ResponseStatus != nil {
+		l = m.ResponseStatus.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.RequestObject != nil {
+		l = m.RequestObject.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ResponseObject != nil {
+		l = m.ResponseObject.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	l = m.RequestReceivedTimestamp.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.StageTimestamp.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Annotations) > 0 {
+		for k, v := range m.Annotations {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	l = len(m.UserAgent)
+	n += 2 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *EventList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *GroupResources) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Group)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Resources) > 0 {
+		for _, s := range m.Resources {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.ResourceNames) > 0 {
+		for _, s := range m.ResourceNames {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ObjectReference) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Resource)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Namespace)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.UID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.APIGroup)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.APIVersion)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.ResourceVersion)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Subresource)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *Policy) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Rules) > 0 {
+		for _, e := range m.Rules {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.OmitStages) > 0 {
+		for _, s := range m.OmitStages {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	n += 2
+	return n
+}
+
+func (m *PolicyList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *PolicyRule) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Level)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Users) > 0 {
+		for _, s := range m.Users {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.UserGroups) > 0 {
+		for _, s := range m.UserGroups {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Verbs) > 0 {
+		for _, s := range m.Verbs {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Resources) > 0 {
+		for _, e := range m.Resources {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Namespaces) > 0 {
+		for _, s := range m.Namespaces {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.NonResourceURLs) > 0 {
+		for _, s := range m.NonResourceURLs {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.OmitStages) > 0 {
+		for _, s := range m.OmitStages {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.OmitManagedFields != nil {
+		n += 2
+	}
+	return n
+}
+
+func sovGenerated(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Event) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForAnnotations := make([]string, 0, len(this.Annotations))
+	for k := range this.Annotations {
+		keysForAnnotations = append(keysForAnnotations, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+	mapStringForAnnotations := "map[string]string{"
+	for _, k := range keysForAnnotations {
+		mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
+	}
+	mapStringForAnnotations += "}"
+	s := strings.Join([]string{`&Event{`,
+		`Level:` + fmt.Sprintf("%v", this.Level) + `,`,
+		`AuditID:` + fmt.Sprintf("%v", this.AuditID) + `,`,
+		`Stage:` + fmt.Sprintf("%v", this.Stage) + `,`,
+		`RequestURI:` + fmt.Sprintf("%v", this.RequestURI) + `,`,
+		`Verb:` + fmt.Sprintf("%v", this.Verb) + `,`,
+		`User:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.User), "UserInfo", "v1.UserInfo", 1), `&`, ``, 1) + `,`,
+		`ImpersonatedUser:` + strings.Replace(fmt.Sprintf("%v", this.ImpersonatedUser), "UserInfo", "v1.UserInfo", 1) + `,`,
+		`SourceIPs:` + fmt.Sprintf("%v", this.SourceIPs) + `,`,
+		`ObjectRef:` + strings.Replace(this.ObjectRef.String(), "ObjectReference", "ObjectReference", 1) + `,`,
+		`ResponseStatus:` + strings.Replace(fmt.Sprintf("%v", this.ResponseStatus), "Status", "v11.Status", 1) + `,`,
+		`RequestObject:` + strings.Replace(fmt.Sprintf("%v", this.RequestObject), "Unknown", "runtime.Unknown", 1) + `,`,
+		`ResponseObject:` + strings.Replace(fmt.Sprintf("%v", this.ResponseObject), "Unknown", "runtime.Unknown", 1) + `,`,
+		`RequestReceivedTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequestReceivedTimestamp), "MicroTime", "v11.MicroTime", 1), `&`, ``, 1) + `,`,
+		`StageTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StageTimestamp), "MicroTime", "v11.MicroTime", 1), `&`, ``, 1) + `,`,
+		`Annotations:` + mapStringForAnnotations + `,`,
+		`UserAgent:` + fmt.Sprintf("%v", this.UserAgent) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *EventList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]Event{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Event", "Event", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&EventList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *GroupResources) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&GroupResources{`,
+		`Group:` + fmt.Sprintf("%v", this.Group) + `,`,
+		`Resources:` + fmt.Sprintf("%v", this.Resources) + `,`,
+		`ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ObjectReference) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ObjectReference{`,
+		`Resource:` + fmt.Sprintf("%v", this.Resource) + `,`,
+		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+		`APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`,
+		`APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`,
+		`ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`,
+		`Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Policy) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForRules := "[]PolicyRule{"
+	for _, f := range this.Rules {
+		repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForRules += "}"
+	s := strings.Join([]string{`&Policy{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Rules:` + repeatedStringForRules + `,`,
+		`OmitStages:` + fmt.Sprintf("%v", this.OmitStages) + `,`,
+		`OmitManagedFields:` + fmt.Sprintf("%v", this.OmitManagedFields) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PolicyList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]Policy{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Policy", "Policy", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&PolicyList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PolicyRule) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForResources := "[]GroupResources{"
+	for _, f := range this.Resources {
+		repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "GroupResources", "GroupResources", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForResources += "}"
+	s := strings.Join([]string{`&PolicyRule{`,
+		`Level:` + fmt.Sprintf("%v", this.Level) + `,`,
+		`Users:` + fmt.Sprintf("%v", this.Users) + `,`,
+		`UserGroups:` + fmt.Sprintf("%v", this.UserGroups) + `,`,
+		`Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`,
+		`Resources:` + repeatedStringForResources + `,`,
+		`Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`,
+		`NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`,
+		`OmitStages:` + fmt.Sprintf("%v", this.OmitStages) + `,`,
+		`OmitManagedFields:` + valueToStringGenerated(this.OmitManagedFields) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringGenerated(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *Event) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Event: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Level = Level(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuditID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.AuditID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stage", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stage = Stage(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestURI", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RequestURI = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Verb = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ImpersonatedUser", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ImpersonatedUser == nil {
+				m.ImpersonatedUser = &v1.UserInfo{}
+			}
+			if err := m.ImpersonatedUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SourceIPs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SourceIPs = append(m.SourceIPs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectRef", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ObjectRef == nil {
+				m.ObjectRef = &ObjectReference{}
+			}
+			if err := m.ObjectRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResponseStatus", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ResponseStatus == nil {
+				m.ResponseStatus = &v11.Status{}
+			}
+			if err := m.ResponseStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestObject", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.RequestObject == nil {
+				m.RequestObject = &runtime.Unknown{}
+			}
+			if err := m.RequestObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 12:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResponseObject", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ResponseObject == nil {
+				m.ResponseObject = &runtime.Unknown{}
+			}
+			if err := m.ResponseObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 13:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestReceivedTimestamp", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.RequestReceivedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 14:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StageTimestamp", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.StageTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 15:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Annotations == nil {
+				m.Annotations = make(map[string]string)
+			}
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Annotations[mapkey] = mapvalue
+			iNdEx = postIndex
+		case 16:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UserAgent", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.UserAgent = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *EventList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: EventList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, Event{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *GroupResources) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GroupResources: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GroupResources: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Group = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ObjectReference) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ObjectReference: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ObjectReference: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Resource = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Namespace = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.APIGroup = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.APIVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ResourceVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Subresource = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Policy) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Policy: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Policy: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Rules = append(m.Rules, PolicyRule{})
+			if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field OmitStages", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.OmitStages = append(m.OmitStages, Stage(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field OmitManagedFields", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.OmitManagedFields = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PolicyList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PolicyList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PolicyList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, Policy{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PolicyRule) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Level = Level(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Users = append(m.Users, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UserGroups", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.UserGroups = append(m.UserGroups, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Resources = append(m.Resources, GroupResources{})
+			if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field OmitStages", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.OmitStages = append(m.OmitStages, Stage(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 9:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field OmitManagedFields", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.OmitManagedFields = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	depth := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+		case 1:
+			iNdEx += 8
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthGenerated
+			}
+			iNdEx += length
+		case 3:
+			depth++
+		case 4:
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
+		case 5:
+			iNdEx += 4
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
+	}
+	return 0, io.ErrUnexpectedEOF
+}
+
+var (
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto
new file mode 100644
index 0000000000..032c586913
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto
@@ -0,0 +1,287 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package k8s.io.apiserver.pkg.apis.audit.v1;
+
+import "k8s.io/api/authentication/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "k8s.io/apiserver/pkg/apis/audit/v1";
+
+// Event captures all the information that can be included in an API audit log.
+message Event {
+  // AuditLevel at which event was generated
+  optional string level = 1;
+
+  // Unique audit ID, generated for each request.
+  optional string auditID = 2;
+
+  // Stage of the request handling when this event instance was generated.
+  optional string stage = 3;
+
+  // RequestURI is the request URI as sent by the client to a server.
+  optional string requestURI = 4;
+
+  // Verb is the kubernetes verb associated with the request.
+  // For non-resource requests, this is the lower-cased HTTP method.
+  optional string verb = 5;
+
+  // Authenticated user information.
+  optional .k8s.io.api.authentication.v1.UserInfo user = 6;
+
+  // Impersonated user information.
+  // +optional
+  optional .k8s.io.api.authentication.v1.UserInfo impersonatedUser = 7;
+
+  // Source IPs, from where the request originated and intermediate proxies.
+  // The source IPs are listed from (in order):
+  // 1. X-Forwarded-For request header IPs
+  // 2. X-Real-Ip header, if not present in the X-Forwarded-For list
+  // 3. The remote address for the connection, if it doesn't match the last
+  //    IP in the list up to here (X-Forwarded-For or X-Real-Ip).
+  // Note: All but the last IP can be arbitrarily set by the client.
+  // +optional
+  // +listType=atomic
+  repeated string sourceIPs = 8;
+
+  // UserAgent records the user agent string reported by the client.
+  // Note that the UserAgent is provided by the client, and must not be trusted.
+  // +optional
+  optional string userAgent = 16;
+
+  // Object reference this request is targeted at.
+  // Does not apply for List-type requests, or non-resource requests.
+  // +optional
+  optional ObjectReference objectRef = 9;
+
+  // The response status, populated even when the ResponseObject is not a Status type.
+  // For successful responses, this will only include the Code and StatusSuccess.
+  // For non-status type error responses, this will be auto-populated with the error Message.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status responseStatus = 10;
+
+  // API object from the request, in JSON format. The RequestObject is recorded as-is in the request
+  // (possibly re-encoded as JSON), prior to version conversion, defaulting, admission or
+  // merging. It is an external versioned object type, and may not be a valid object on its own.
+  // Omitted for non-resource requests.  Only logged at Request Level and higher.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.runtime.Unknown requestObject = 11;
+
+  // API object returned in the response, in JSON. The ResponseObject is recorded after conversion
+  // to the external type, and serialized as JSON.  Omitted for non-resource requests.  Only logged
+  // at Response Level.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.runtime.Unknown responseObject = 12;
+
+  // Time the request reached the apiserver.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime requestReceivedTimestamp = 13;
+
+  // Time the request reached current audit stage.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime stageTimestamp = 14;
+
+  // Annotations is an unstructured key value map stored with an audit event that may be set by
+  // plugins invoked in the request serving chain, including authentication, authorization and
+  // admission plugins. Note that these annotations are for the audit event, and do not correspond
+  // to the metadata.annotations of the submitted object. Keys should uniquely identify the informing
+  // component to avoid name collisions (e.g. podsecuritypolicy.admission.k8s.io/policy). Values
+  // should be short. Annotations are included in the Metadata level.
+  // +optional
+  map annotations = 15;
+}
+
+// EventList is a list of audit Events.
+message EventList {
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  repeated Event items = 2;
+}
+
+// GroupResources represents resource kinds in an API group.
+message GroupResources {
+  // Group is the name of the API group that contains the resources.
+  // The empty string represents the core API group.
+  // +optional
+  optional string group = 1;
+
+  // Resources is a list of resources this rule applies to.
+  //
+  // For example:
+  // - `pods` matches pods.
+  // - `pods/log` matches the log subresource of pods.
+  // - `*` matches all resources and their subresources.
+  // - `pods/*` matches all subresources of pods.
+  // - `*/scale` matches all scale subresources.
+  //
+  // If wildcard is present, the validation rule will ensure resources do not
+  // overlap with each other.
+  //
+  // An empty list implies all resources and subresources in this API groups apply.
+  // +optional
+  // +listType=atomic
+  repeated string resources = 2;
+
+  // ResourceNames is a list of resource instance names that the policy matches.
+  // Using this field requires Resources to be specified.
+  // An empty list implies that every instance of the resource is matched.
+  // +optional
+  // +listType=atomic
+  repeated string resourceNames = 3;
+}
+
+// ObjectReference contains enough information to let you inspect or modify the referred object.
+message ObjectReference {
+  // +optional
+  optional string resource = 1;
+
+  // +optional
+  optional string namespace = 2;
+
+  // +optional
+  optional string name = 3;
+
+  // +optional
+  optional string uid = 4;
+
+  // APIGroup is the name of the API group that contains the referred object.
+  // The empty string represents the core API group.
+  // +optional
+  optional string apiGroup = 5;
+
+  // APIVersion is the version of the API group that contains the referred object.
+  // +optional
+  optional string apiVersion = 6;
+
+  // +optional
+  optional string resourceVersion = 7;
+
+  // +optional
+  optional string subresource = 8;
+}
+
+// Policy defines the configuration of audit logging, and the rules for how different request
+// categories are logged.
+message Policy {
+  // ObjectMeta is included for interoperability with API infrastructure.
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Rules specify the audit Level a request should be recorded at.
+  // A request may match multiple rules, in which case the FIRST matching rule is used.
+  // The default audit level is None, but can be overridden by a catch-all rule at the end of the list.
+  // PolicyRules are strictly ordered.
+  // +listType=atomic
+  repeated PolicyRule rules = 2;
+
+  // OmitStages is a list of stages for which no events are created. Note that this can also
+  // be specified per rule in which case the union of both are omitted.
+  // +optional
+  // +listType=atomic
+  repeated string omitStages = 3;
+
+  // OmitManagedFields indicates whether to omit the managed fields of the request
+  // and response bodies from being written to the API audit log.
+  // This is used as a global default - a value of 'true' will omit the managed fileds,
+  // otherwise the managed fields will be included in the API audit log.
+  // Note that this can also be specified per rule in which case the value specified
+  // in a rule will override the global default.
+  // +optional
+  optional bool omitManagedFields = 4;
+}
+
+// PolicyList is a list of audit Policies.
+message PolicyList {
+  // +optional
+  optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  repeated Policy items = 2;
+}
+
+// PolicyRule maps requests based off metadata to an audit Level.
+// Requests must match the rules of every field (an intersection of rules).
+message PolicyRule {
+  // The Level that requests matching this rule are recorded at.
+  optional string level = 1;
+
+  // The users (by authenticated user name) this rule applies to.
+  // An empty list implies every user.
+  // +optional
+  // +listType=atomic
+  repeated string users = 2;
+
+  // The user groups this rule applies to. A user is considered matching
+  // if it is a member of any of the UserGroups.
+  // An empty list implies every user group.
+  // +optional
+  // +listType=atomic
+  repeated string userGroups = 3;
+
+  // The verbs that match this rule.
+  // An empty list implies every verb.
+  // +optional
+  // +listType=atomic
+  repeated string verbs = 4;
+
+  // Resources that this rule matches. An empty list implies all kinds in all API groups.
+  // +optional
+  // +listType=atomic
+  repeated GroupResources resources = 5;
+
+  // Namespaces that this rule matches.
+  // The empty string "" matches non-namespaced resources.
+  // An empty list implies every namespace.
+  // +optional
+  // +listType=atomic
+  repeated string namespaces = 6;
+
+  // NonResourceURLs is a set of URL paths that should be audited.
+  // `*`s are allowed, but only as the full, final step in the path.
+  // Examples:
+  // - `/metrics` - Log requests for apiserver metrics
+  // - `/healthz*` - Log all health checks
+  // +optional
+  // +listType=atomic
+  repeated string nonResourceURLs = 7;
+
+  // OmitStages is a list of stages for which no events are created. Note that this can also
+  // be specified policy wide in which case the union of both are omitted.
+  // An empty list means no restrictions will apply.
+  // +optional
+  // +listType=atomic
+  repeated string omitStages = 8;
+
+  // OmitManagedFields indicates whether to omit the managed fields of the request
+  // and response bodies from being written to the API audit log.
+  // - a value of 'true' will drop the managed fields from the API audit log
+  // - a value of 'false' indicates that the managed fileds should be included
+  //   in the API audit log
+  // Note that the value, if specified, in this rule will override the global default
+  // If a value is not specified then the global default specified in
+  // Policy.OmitManagedFields will stand.
+  // +optional
+  optional bool omitManagedFields = 9;
+}
+
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/register.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/register.go
new file mode 100644
index 0000000000..46e3e47bc6
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/register.go
@@ -0,0 +1,58 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "audit.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	SchemeBuilder      runtime.SchemeBuilder
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+	// We only register manually written functions here. The registration of the
+	// generated functions takes place in the generated files. The separation
+	// makes the code compile even when the generated files are missing.
+	localSchemeBuilder.Register(addKnownTypes)
+}
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Event{},
+		&EventList{},
+		&Policy{},
+		&PolicyList{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go
new file mode 100644
index 0000000000..ae122d6c4d
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go
@@ -0,0 +1,318 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	authnv1 "k8s.io/api/authentication/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/types"
+)
+
+// Header keys used by the audit system.
+const (
+	// Header to hold the audit ID as the request is propagated through the serving hierarchy. The
+	// Audit-ID header should be set by the first server to receive the request (e.g. the federation
+	// server or kube-aggregator).
+	HeaderAuditID = "Audit-ID"
+)
+
+// Level defines the amount of information logged during auditing
+type Level string
+
+// Valid audit levels
+const (
+	// LevelNone disables auditing
+	LevelNone Level = "None"
+	// LevelMetadata provides the basic level of auditing.
+	LevelMetadata Level = "Metadata"
+	// LevelRequest provides Metadata level of auditing, and additionally
+	// logs the request object (does not apply for non-resource requests).
+	LevelRequest Level = "Request"
+	// LevelRequestResponse provides Request level of auditing, and additionally
+	// logs the response object (does not apply for non-resource requests).
+	LevelRequestResponse Level = "RequestResponse"
+)
+
+// Stage defines the stages in request handling that audit events may be generated.
+type Stage string
+
+// Valid audit stages.
+const (
+	// The stage for events generated as soon as the audit handler receives the request, and before it
+	// is delegated down the handler chain.
+	StageRequestReceived Stage = "RequestReceived"
+	// The stage for events generated once the response headers are sent, but before the response body
+	// is sent. This stage is only generated for long-running requests (e.g. watch).
+	StageResponseStarted Stage = "ResponseStarted"
+	// The stage for events generated once the response body has been completed, and no more bytes
+	// will be sent.
+	StageResponseComplete Stage = "ResponseComplete"
+	// The stage for events generated when a panic occurred.
+	StagePanic Stage = "Panic"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Event captures all the information that can be included in an API audit log.
+type Event struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// AuditLevel at which event was generated
+	Level Level `json:"level" protobuf:"bytes,1,opt,name=level,casttype=Level"`
+
+	// Unique audit ID, generated for each request.
+	AuditID types.UID `json:"auditID" protobuf:"bytes,2,opt,name=auditID,casttype=k8s.io/apimachinery/pkg/types.UID"`
+	// Stage of the request handling when this event instance was generated.
+	Stage Stage `json:"stage" protobuf:"bytes,3,opt,name=stage,casttype=Stage"`
+
+	// RequestURI is the request URI as sent by the client to a server.
+	RequestURI string `json:"requestURI" protobuf:"bytes,4,opt,name=requestURI"`
+	// Verb is the kubernetes verb associated with the request.
+	// For non-resource requests, this is the lower-cased HTTP method.
+	Verb string `json:"verb" protobuf:"bytes,5,opt,name=verb"`
+	// Authenticated user information.
+	User authnv1.UserInfo `json:"user" protobuf:"bytes,6,opt,name=user"`
+	// Impersonated user information.
+	// +optional
+	ImpersonatedUser *authnv1.UserInfo `json:"impersonatedUser,omitempty" protobuf:"bytes,7,opt,name=impersonatedUser"`
+	// Source IPs, from where the request originated and intermediate proxies.
+	// The source IPs are listed from (in order):
+	// 1. X-Forwarded-For request header IPs
+	// 2. X-Real-Ip header, if not present in the X-Forwarded-For list
+	// 3. The remote address for the connection, if it doesn't match the last
+	//    IP in the list up to here (X-Forwarded-For or X-Real-Ip).
+	// Note: All but the last IP can be arbitrarily set by the client.
+	// +optional
+	// +listType=atomic
+	SourceIPs []string `json:"sourceIPs,omitempty" protobuf:"bytes,8,rep,name=sourceIPs"`
+	// UserAgent records the user agent string reported by the client.
+	// Note that the UserAgent is provided by the client, and must not be trusted.
+	// +optional
+	UserAgent string `json:"userAgent,omitempty" protobuf:"bytes,16,opt,name=userAgent"`
+	// Object reference this request is targeted at.
+	// Does not apply for List-type requests, or non-resource requests.
+	// +optional
+	ObjectRef *ObjectReference `json:"objectRef,omitempty" protobuf:"bytes,9,opt,name=objectRef"`
+	// The response status, populated even when the ResponseObject is not a Status type.
+	// For successful responses, this will only include the Code and StatusSuccess.
+	// For non-status type error responses, this will be auto-populated with the error Message.
+	// +optional
+	ResponseStatus *metav1.Status `json:"responseStatus,omitempty" protobuf:"bytes,10,opt,name=responseStatus"`
+
+	// API object from the request, in JSON format. The RequestObject is recorded as-is in the request
+	// (possibly re-encoded as JSON), prior to version conversion, defaulting, admission or
+	// merging. It is an external versioned object type, and may not be a valid object on its own.
+	// Omitted for non-resource requests.  Only logged at Request Level and higher.
+	// +optional
+	RequestObject *runtime.Unknown `json:"requestObject,omitempty" protobuf:"bytes,11,opt,name=requestObject"`
+	// API object returned in the response, in JSON. The ResponseObject is recorded after conversion
+	// to the external type, and serialized as JSON.  Omitted for non-resource requests.  Only logged
+	// at Response Level.
+	// +optional
+	ResponseObject *runtime.Unknown `json:"responseObject,omitempty" protobuf:"bytes,12,opt,name=responseObject"`
+	// Time the request reached the apiserver.
+	// +optional
+	RequestReceivedTimestamp metav1.MicroTime `json:"requestReceivedTimestamp" protobuf:"bytes,13,opt,name=requestReceivedTimestamp"`
+	// Time the request reached current audit stage.
+	// +optional
+	StageTimestamp metav1.MicroTime `json:"stageTimestamp" protobuf:"bytes,14,opt,name=stageTimestamp"`
+
+	// Annotations is an unstructured key value map stored with an audit event that may be set by
+	// plugins invoked in the request serving chain, including authentication, authorization and
+	// admission plugins. Note that these annotations are for the audit event, and do not correspond
+	// to the metadata.annotations of the submitted object. Keys should uniquely identify the informing
+	// component to avoid name collisions (e.g. podsecuritypolicy.admission.k8s.io/policy). Values
+	// should be short. Annotations are included in the Metadata level.
+	// +optional
+	Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,15,rep,name=annotations"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EventList is a list of audit Events.
+type EventList struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Policy defines the configuration of audit logging, and the rules for how different request
+// categories are logged.
+type Policy struct {
+	metav1.TypeMeta `json:",inline"`
+	// ObjectMeta is included for interoperability with API infrastructure.
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Rules specify the audit Level a request should be recorded at.
+	// A request may match multiple rules, in which case the FIRST matching rule is used.
+	// The default audit level is None, but can be overridden by a catch-all rule at the end of the list.
+	// PolicyRules are strictly ordered.
+	// +listType=atomic
+	Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"`
+
+	// OmitStages is a list of stages for which no events are created. Note that this can also
+	// be specified per rule in which case the union of both are omitted.
+	// +optional
+	// +listType=atomic
+	OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,3,rep,name=omitStages"`
+
+	// OmitManagedFields indicates whether to omit the managed fields of the request
+	// and response bodies from being written to the API audit log.
+	// This is used as a global default - a value of 'true' will omit the managed fileds,
+	// otherwise the managed fields will be included in the API audit log.
+	// Note that this can also be specified per rule in which case the value specified
+	// in a rule will override the global default.
+	// +optional
+	OmitManagedFields bool `json:"omitManagedFields,omitempty" protobuf:"varint,4,opt,name=omitManagedFields"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PolicyList is a list of audit Policies.
+type PolicyList struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	Items []Policy `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// PolicyRule maps requests based off metadata to an audit Level.
+// Requests must match the rules of every field (an intersection of rules).
+type PolicyRule struct {
+	// The Level that requests matching this rule are recorded at.
+	Level Level `json:"level" protobuf:"bytes,1,opt,name=level,casttype=Level"`
+
+	// The users (by authenticated user name) this rule applies to.
+	// An empty list implies every user.
+	// +optional
+	// +listType=atomic
+	Users []string `json:"users,omitempty" protobuf:"bytes,2,rep,name=users"`
+	// The user groups this rule applies to. A user is considered matching
+	// if it is a member of any of the UserGroups.
+	// An empty list implies every user group.
+	// +optional
+	// +listType=atomic
+	UserGroups []string `json:"userGroups,omitempty" protobuf:"bytes,3,rep,name=userGroups"`
+
+	// The verbs that match this rule.
+	// An empty list implies every verb.
+	// +optional
+	// +listType=atomic
+	Verbs []string `json:"verbs,omitempty" protobuf:"bytes,4,rep,name=verbs"`
+
+	// Rules can apply to API resources (such as "pods" or "secrets"),
+	// non-resource URL paths (such as "/api"), or neither, but not both.
+	// If neither is specified, the rule is treated as a default for all URLs.
+
+	// Resources that this rule matches. An empty list implies all kinds in all API groups.
+	// +optional
+	// +listType=atomic
+	Resources []GroupResources `json:"resources,omitempty" protobuf:"bytes,5,rep,name=resources"`
+	// Namespaces that this rule matches.
+	// The empty string "" matches non-namespaced resources.
+	// An empty list implies every namespace.
+	// +optional
+	// +listType=atomic
+	Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,6,rep,name=namespaces"`
+
+	// NonResourceURLs is a set of URL paths that should be audited.
+	// `*`s are allowed, but only as the full, final step in the path.
+	// Examples:
+	// - `/metrics` - Log requests for apiserver metrics
+	// - `/healthz*` - Log all health checks
+	// +optional
+	// +listType=atomic
+	NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,7,rep,name=nonResourceURLs"`
+
+	// OmitStages is a list of stages for which no events are created. Note that this can also
+	// be specified policy wide in which case the union of both are omitted.
+	// An empty list means no restrictions will apply.
+	// +optional
+	// +listType=atomic
+	OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,8,rep,name=omitStages"`
+
+	// OmitManagedFields indicates whether to omit the managed fields of the request
+	// and response bodies from being written to the API audit log.
+	// - a value of 'true' will drop the managed fields from the API audit log
+	// - a value of 'false' indicates that the managed fileds should be included
+	//   in the API audit log
+	// Note that the value, if specified, in this rule will override the global default
+	// If a value is not specified then the global default specified in
+	// Policy.OmitManagedFields will stand.
+	// +optional
+	OmitManagedFields *bool `json:"omitManagedFields,omitempty" protobuf:"varint,9,opt,name=omitManagedFields"`
+}
+
+// GroupResources represents resource kinds in an API group.
+type GroupResources struct {
+	// Group is the name of the API group that contains the resources.
+	// The empty string represents the core API group.
+	// +optional
+	Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"`
+	// Resources is a list of resources this rule applies to.
+	//
+	// For example:
+	// - `pods` matches pods.
+	// - `pods/log` matches the log subresource of pods.
+	// - `*` matches all resources and their subresources.
+	// - `pods/*` matches all subresources of pods.
+	// - `*/scale` matches all scale subresources.
+	//
+	// If wildcard is present, the validation rule will ensure resources do not
+	// overlap with each other.
+	//
+	// An empty list implies all resources and subresources in this API groups apply.
+	// +optional
+	// +listType=atomic
+	Resources []string `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"`
+	// ResourceNames is a list of resource instance names that the policy matches.
+	// Using this field requires Resources to be specified.
+	// An empty list implies that every instance of the resource is matched.
+	// +optional
+	// +listType=atomic
+	ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,3,rep,name=resourceNames"`
+}
+
+// ObjectReference contains enough information to let you inspect or modify the referred object.
+type ObjectReference struct {
+	// +optional
+	Resource string `json:"resource,omitempty" protobuf:"bytes,1,opt,name=resource"`
+	// +optional
+	Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"`
+	// +optional
+	UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
+	// APIGroup is the name of the API group that contains the referred object.
+	// The empty string represents the core API group.
+	// +optional
+	APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,5,opt,name=apiGroup"`
+	// APIVersion is the version of the API group that contains the referred object.
+	// +optional
+	APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,6,opt,name=apiVersion"`
+	// +optional
+	ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,7,opt,name=resourceVersion"`
+	// +optional
+	Subresource string `json:"subresource,omitempty" protobuf:"bytes,8,opt,name=subresource"`
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.conversion.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.conversion.go
new file mode 100644
index 0000000000..53cbb02084
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.conversion.go
@@ -0,0 +1,327 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	unsafe "unsafe"
+
+	authenticationv1 "k8s.io/api/authentication/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	conversion "k8s.io/apimachinery/pkg/conversion"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	types "k8s.io/apimachinery/pkg/types"
+	audit "k8s.io/apiserver/pkg/apis/audit"
+)
+
+func init() {
+	localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+	if err := s.AddGeneratedConversionFunc((*Event)(nil), (*audit.Event)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_Event_To_audit_Event(a.(*Event), b.(*audit.Event), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*audit.Event)(nil), (*Event)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_audit_Event_To_v1_Event(a.(*audit.Event), b.(*Event), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*EventList)(nil), (*audit.EventList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_EventList_To_audit_EventList(a.(*EventList), b.(*audit.EventList), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*audit.EventList)(nil), (*EventList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_audit_EventList_To_v1_EventList(a.(*audit.EventList), b.(*EventList), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*GroupResources)(nil), (*audit.GroupResources)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_GroupResources_To_audit_GroupResources(a.(*GroupResources), b.(*audit.GroupResources), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*audit.GroupResources)(nil), (*GroupResources)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_audit_GroupResources_To_v1_GroupResources(a.(*audit.GroupResources), b.(*GroupResources), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ObjectReference)(nil), (*audit.ObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_ObjectReference_To_audit_ObjectReference(a.(*ObjectReference), b.(*audit.ObjectReference), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*audit.ObjectReference)(nil), (*ObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_audit_ObjectReference_To_v1_ObjectReference(a.(*audit.ObjectReference), b.(*ObjectReference), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*Policy)(nil), (*audit.Policy)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_Policy_To_audit_Policy(a.(*Policy), b.(*audit.Policy), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*audit.Policy)(nil), (*Policy)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_audit_Policy_To_v1_Policy(a.(*audit.Policy), b.(*Policy), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*PolicyList)(nil), (*audit.PolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_PolicyList_To_audit_PolicyList(a.(*PolicyList), b.(*audit.PolicyList), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*audit.PolicyList)(nil), (*PolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_audit_PolicyList_To_v1_PolicyList(a.(*audit.PolicyList), b.(*PolicyList), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*PolicyRule)(nil), (*audit.PolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_PolicyRule_To_audit_PolicyRule(a.(*PolicyRule), b.(*audit.PolicyRule), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*audit.PolicyRule)(nil), (*PolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_audit_PolicyRule_To_v1_PolicyRule(a.(*audit.PolicyRule), b.(*PolicyRule), scope)
+	}); err != nil {
+		return err
+	}
+	return nil
+}
+
+func autoConvert_v1_Event_To_audit_Event(in *Event, out *audit.Event, s conversion.Scope) error {
+	out.Level = audit.Level(in.Level)
+	out.AuditID = types.UID(in.AuditID)
+	out.Stage = audit.Stage(in.Stage)
+	out.RequestURI = in.RequestURI
+	out.Verb = in.Verb
+	out.User = in.User
+	out.ImpersonatedUser = (*authenticationv1.UserInfo)(unsafe.Pointer(in.ImpersonatedUser))
+	out.SourceIPs = *(*[]string)(unsafe.Pointer(&in.SourceIPs))
+	out.UserAgent = in.UserAgent
+	out.ObjectRef = (*audit.ObjectReference)(unsafe.Pointer(in.ObjectRef))
+	out.ResponseStatus = (*metav1.Status)(unsafe.Pointer(in.ResponseStatus))
+	out.RequestObject = (*runtime.Unknown)(unsafe.Pointer(in.RequestObject))
+	out.ResponseObject = (*runtime.Unknown)(unsafe.Pointer(in.ResponseObject))
+	out.RequestReceivedTimestamp = in.RequestReceivedTimestamp
+	out.StageTimestamp = in.StageTimestamp
+	out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations))
+	return nil
+}
+
+// Convert_v1_Event_To_audit_Event is an autogenerated conversion function.
+func Convert_v1_Event_To_audit_Event(in *Event, out *audit.Event, s conversion.Scope) error {
+	return autoConvert_v1_Event_To_audit_Event(in, out, s)
+}
+
+func autoConvert_audit_Event_To_v1_Event(in *audit.Event, out *Event, s conversion.Scope) error {
+	out.Level = Level(in.Level)
+	out.AuditID = types.UID(in.AuditID)
+	out.Stage = Stage(in.Stage)
+	out.RequestURI = in.RequestURI
+	out.Verb = in.Verb
+	out.User = in.User
+	out.ImpersonatedUser = (*authenticationv1.UserInfo)(unsafe.Pointer(in.ImpersonatedUser))
+	out.SourceIPs = *(*[]string)(unsafe.Pointer(&in.SourceIPs))
+	out.UserAgent = in.UserAgent
+	out.ObjectRef = (*ObjectReference)(unsafe.Pointer(in.ObjectRef))
+	out.ResponseStatus = (*metav1.Status)(unsafe.Pointer(in.ResponseStatus))
+	out.RequestObject = (*runtime.Unknown)(unsafe.Pointer(in.RequestObject))
+	out.ResponseObject = (*runtime.Unknown)(unsafe.Pointer(in.ResponseObject))
+	out.RequestReceivedTimestamp = in.RequestReceivedTimestamp
+	out.StageTimestamp = in.StageTimestamp
+	out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations))
+	return nil
+}
+
+// Convert_audit_Event_To_v1_Event is an autogenerated conversion function.
+func Convert_audit_Event_To_v1_Event(in *audit.Event, out *Event, s conversion.Scope) error {
+	return autoConvert_audit_Event_To_v1_Event(in, out, s)
+}
+
+func autoConvert_v1_EventList_To_audit_EventList(in *EventList, out *audit.EventList, s conversion.Scope) error {
+	out.ListMeta = in.ListMeta
+	out.Items = *(*[]audit.Event)(unsafe.Pointer(&in.Items))
+	return nil
+}
+
+// Convert_v1_EventList_To_audit_EventList is an autogenerated conversion function.
+func Convert_v1_EventList_To_audit_EventList(in *EventList, out *audit.EventList, s conversion.Scope) error {
+	return autoConvert_v1_EventList_To_audit_EventList(in, out, s)
+}
+
+func autoConvert_audit_EventList_To_v1_EventList(in *audit.EventList, out *EventList, s conversion.Scope) error {
+	out.ListMeta = in.ListMeta
+	out.Items = *(*[]Event)(unsafe.Pointer(&in.Items))
+	return nil
+}
+
+// Convert_audit_EventList_To_v1_EventList is an autogenerated conversion function.
+func Convert_audit_EventList_To_v1_EventList(in *audit.EventList, out *EventList, s conversion.Scope) error {
+	return autoConvert_audit_EventList_To_v1_EventList(in, out, s)
+}
+
+func autoConvert_v1_GroupResources_To_audit_GroupResources(in *GroupResources, out *audit.GroupResources, s conversion.Scope) error {
+	out.Group = in.Group
+	out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
+	out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames))
+	return nil
+}
+
+// Convert_v1_GroupResources_To_audit_GroupResources is an autogenerated conversion function.
+func Convert_v1_GroupResources_To_audit_GroupResources(in *GroupResources, out *audit.GroupResources, s conversion.Scope) error {
+	return autoConvert_v1_GroupResources_To_audit_GroupResources(in, out, s)
+}
+
+func autoConvert_audit_GroupResources_To_v1_GroupResources(in *audit.GroupResources, out *GroupResources, s conversion.Scope) error {
+	out.Group = in.Group
+	out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources))
+	out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames))
+	return nil
+}
+
+// Convert_audit_GroupResources_To_v1_GroupResources is an autogenerated conversion function.
+func Convert_audit_GroupResources_To_v1_GroupResources(in *audit.GroupResources, out *GroupResources, s conversion.Scope) error {
+	return autoConvert_audit_GroupResources_To_v1_GroupResources(in, out, s)
+}
+
+func autoConvert_v1_ObjectReference_To_audit_ObjectReference(in *ObjectReference, out *audit.ObjectReference, s conversion.Scope) error {
+	out.Resource = in.Resource
+	out.Namespace = in.Namespace
+	out.Name = in.Name
+	out.UID = types.UID(in.UID)
+	out.APIGroup = in.APIGroup
+	out.APIVersion = in.APIVersion
+	out.ResourceVersion = in.ResourceVersion
+	out.Subresource = in.Subresource
+	return nil
+}
+
+// Convert_v1_ObjectReference_To_audit_ObjectReference is an autogenerated conversion function.
+func Convert_v1_ObjectReference_To_audit_ObjectReference(in *ObjectReference, out *audit.ObjectReference, s conversion.Scope) error {
+	return autoConvert_v1_ObjectReference_To_audit_ObjectReference(in, out, s)
+}
+
+func autoConvert_audit_ObjectReference_To_v1_ObjectReference(in *audit.ObjectReference, out *ObjectReference, s conversion.Scope) error {
+	out.Resource = in.Resource
+	out.Namespace = in.Namespace
+	out.Name = in.Name
+	out.UID = types.UID(in.UID)
+	out.APIGroup = in.APIGroup
+	out.APIVersion = in.APIVersion
+	out.ResourceVersion = in.ResourceVersion
+	out.Subresource = in.Subresource
+	return nil
+}
+
+// Convert_audit_ObjectReference_To_v1_ObjectReference is an autogenerated conversion function.
+func Convert_audit_ObjectReference_To_v1_ObjectReference(in *audit.ObjectReference, out *ObjectReference, s conversion.Scope) error {
+	return autoConvert_audit_ObjectReference_To_v1_ObjectReference(in, out, s)
+}
+
+func autoConvert_v1_Policy_To_audit_Policy(in *Policy, out *audit.Policy, s conversion.Scope) error {
+	out.ObjectMeta = in.ObjectMeta
+	out.Rules = *(*[]audit.PolicyRule)(unsafe.Pointer(&in.Rules))
+	out.OmitStages = *(*[]audit.Stage)(unsafe.Pointer(&in.OmitStages))
+	out.OmitManagedFields = in.OmitManagedFields
+	return nil
+}
+
+// Convert_v1_Policy_To_audit_Policy is an autogenerated conversion function.
+func Convert_v1_Policy_To_audit_Policy(in *Policy, out *audit.Policy, s conversion.Scope) error {
+	return autoConvert_v1_Policy_To_audit_Policy(in, out, s)
+}
+
+func autoConvert_audit_Policy_To_v1_Policy(in *audit.Policy, out *Policy, s conversion.Scope) error {
+	out.ObjectMeta = in.ObjectMeta
+	out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules))
+	out.OmitStages = *(*[]Stage)(unsafe.Pointer(&in.OmitStages))
+	out.OmitManagedFields = in.OmitManagedFields
+	return nil
+}
+
+// Convert_audit_Policy_To_v1_Policy is an autogenerated conversion function.
+func Convert_audit_Policy_To_v1_Policy(in *audit.Policy, out *Policy, s conversion.Scope) error {
+	return autoConvert_audit_Policy_To_v1_Policy(in, out, s)
+}
+
+func autoConvert_v1_PolicyList_To_audit_PolicyList(in *PolicyList, out *audit.PolicyList, s conversion.Scope) error {
+	out.ListMeta = in.ListMeta
+	out.Items = *(*[]audit.Policy)(unsafe.Pointer(&in.Items))
+	return nil
+}
+
+// Convert_v1_PolicyList_To_audit_PolicyList is an autogenerated conversion function.
+func Convert_v1_PolicyList_To_audit_PolicyList(in *PolicyList, out *audit.PolicyList, s conversion.Scope) error {
+	return autoConvert_v1_PolicyList_To_audit_PolicyList(in, out, s)
+}
+
+func autoConvert_audit_PolicyList_To_v1_PolicyList(in *audit.PolicyList, out *PolicyList, s conversion.Scope) error {
+	out.ListMeta = in.ListMeta
+	out.Items = *(*[]Policy)(unsafe.Pointer(&in.Items))
+	return nil
+}
+
+// Convert_audit_PolicyList_To_v1_PolicyList is an autogenerated conversion function.
+func Convert_audit_PolicyList_To_v1_PolicyList(in *audit.PolicyList, out *PolicyList, s conversion.Scope) error {
+	return autoConvert_audit_PolicyList_To_v1_PolicyList(in, out, s)
+}
+
+func autoConvert_v1_PolicyRule_To_audit_PolicyRule(in *PolicyRule, out *audit.PolicyRule, s conversion.Scope) error {
+	out.Level = audit.Level(in.Level)
+	out.Users = *(*[]string)(unsafe.Pointer(&in.Users))
+	out.UserGroups = *(*[]string)(unsafe.Pointer(&in.UserGroups))
+	out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
+	out.Resources = *(*[]audit.GroupResources)(unsafe.Pointer(&in.Resources))
+	out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces))
+	out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
+	out.OmitStages = *(*[]audit.Stage)(unsafe.Pointer(&in.OmitStages))
+	out.OmitManagedFields = (*bool)(unsafe.Pointer(in.OmitManagedFields))
+	return nil
+}
+
+// Convert_v1_PolicyRule_To_audit_PolicyRule is an autogenerated conversion function.
+func Convert_v1_PolicyRule_To_audit_PolicyRule(in *PolicyRule, out *audit.PolicyRule, s conversion.Scope) error {
+	return autoConvert_v1_PolicyRule_To_audit_PolicyRule(in, out, s)
+}
+
+func autoConvert_audit_PolicyRule_To_v1_PolicyRule(in *audit.PolicyRule, out *PolicyRule, s conversion.Scope) error {
+	out.Level = Level(in.Level)
+	out.Users = *(*[]string)(unsafe.Pointer(&in.Users))
+	out.UserGroups = *(*[]string)(unsafe.Pointer(&in.UserGroups))
+	out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs))
+	out.Resources = *(*[]GroupResources)(unsafe.Pointer(&in.Resources))
+	out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces))
+	out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs))
+	out.OmitStages = *(*[]Stage)(unsafe.Pointer(&in.OmitStages))
+	out.OmitManagedFields = (*bool)(unsafe.Pointer(in.OmitManagedFields))
+	return nil
+}
+
+// Convert_audit_PolicyRule_To_v1_PolicyRule is an autogenerated conversion function.
+func Convert_audit_PolicyRule_To_v1_PolicyRule(in *audit.PolicyRule, out *PolicyRule, s conversion.Scope) error {
+	return autoConvert_audit_PolicyRule_To_v1_PolicyRule(in, out, s)
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.deepcopy.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..0b1b0052d5
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.deepcopy.go
@@ -0,0 +1,297 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	authenticationv1 "k8s.io/api/authentication/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Event) DeepCopyInto(out *Event) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.User.DeepCopyInto(&out.User)
+	if in.ImpersonatedUser != nil {
+		in, out := &in.ImpersonatedUser, &out.ImpersonatedUser
+		*out = new(authenticationv1.UserInfo)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.SourceIPs != nil {
+		in, out := &in.SourceIPs, &out.SourceIPs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ObjectRef != nil {
+		in, out := &in.ObjectRef, &out.ObjectRef
+		*out = new(ObjectReference)
+		**out = **in
+	}
+	if in.ResponseStatus != nil {
+		in, out := &in.ResponseStatus, &out.ResponseStatus
+		*out = new(metav1.Status)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.RequestObject != nil {
+		in, out := &in.RequestObject, &out.RequestObject
+		*out = new(runtime.Unknown)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.ResponseObject != nil {
+		in, out := &in.ResponseObject, &out.ResponseObject
+		*out = new(runtime.Unknown)
+		(*in).DeepCopyInto(*out)
+	}
+	in.RequestReceivedTimestamp.DeepCopyInto(&out.RequestReceivedTimestamp)
+	in.StageTimestamp.DeepCopyInto(&out.StageTimestamp)
+	if in.Annotations != nil {
+		in, out := &in.Annotations, &out.Annotations
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event.
+func (in *Event) DeepCopy() *Event {
+	if in == nil {
+		return nil
+	}
+	out := new(Event)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Event) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventList) DeepCopyInto(out *EventList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Event, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList.
+func (in *EventList) DeepCopy() *EventList {
+	if in == nil {
+		return nil
+	}
+	out := new(EventList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EventList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GroupResources) DeepCopyInto(out *GroupResources) {
+	*out = *in
+	if in.Resources != nil {
+		in, out := &in.Resources, &out.Resources
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ResourceNames != nil {
+		in, out := &in.ResourceNames, &out.ResourceNames
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupResources.
+func (in *GroupResources) DeepCopy() *GroupResources {
+	if in == nil {
+		return nil
+	}
+	out := new(GroupResources)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectReference) DeepCopyInto(out *ObjectReference) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference.
+func (in *ObjectReference) DeepCopy() *ObjectReference {
+	if in == nil {
+		return nil
+	}
+	out := new(ObjectReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Policy) DeepCopyInto(out *Policy) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Rules != nil {
+		in, out := &in.Rules, &out.Rules
+		*out = make([]PolicyRule, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.OmitStages != nil {
+		in, out := &in.OmitStages, &out.OmitStages
+		*out = make([]Stage, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy.
+func (in *Policy) DeepCopy() *Policy {
+	if in == nil {
+		return nil
+	}
+	out := new(Policy)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Policy) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PolicyList) DeepCopyInto(out *PolicyList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Policy, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyList.
+func (in *PolicyList) DeepCopy() *PolicyList {
+	if in == nil {
+		return nil
+	}
+	out := new(PolicyList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PolicyList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PolicyRule) DeepCopyInto(out *PolicyRule) {
+	*out = *in
+	if in.Users != nil {
+		in, out := &in.Users, &out.Users
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.UserGroups != nil {
+		in, out := &in.UserGroups, &out.UserGroups
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Verbs != nil {
+		in, out := &in.Verbs, &out.Verbs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Resources != nil {
+		in, out := &in.Resources, &out.Resources
+		*out = make([]GroupResources, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Namespaces != nil {
+		in, out := &in.Namespaces, &out.Namespaces
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.NonResourceURLs != nil {
+		in, out := &in.NonResourceURLs, &out.NonResourceURLs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.OmitStages != nil {
+		in, out := &in.OmitStages, &out.OmitStages
+		*out = make([]Stage, len(*in))
+		copy(*out, *in)
+	}
+	if in.OmitManagedFields != nil {
+		in, out := &in.OmitManagedFields, &out.OmitManagedFields
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule.
+func (in *PolicyRule) DeepCopy() *PolicyRule {
+	if in == nil {
+		return nil
+	}
+	out := new(PolicyRule)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.defaults.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.defaults.go
new file mode 100644
index 0000000000..dac177e93b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.defaults.go
@@ -0,0 +1,33 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..81d5add47d
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go
@@ -0,0 +1,297 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package audit
+
+import (
+	v1 "k8s.io/api/authentication/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Event) DeepCopyInto(out *Event) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.User.DeepCopyInto(&out.User)
+	if in.ImpersonatedUser != nil {
+		in, out := &in.ImpersonatedUser, &out.ImpersonatedUser
+		*out = new(v1.UserInfo)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.SourceIPs != nil {
+		in, out := &in.SourceIPs, &out.SourceIPs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ObjectRef != nil {
+		in, out := &in.ObjectRef, &out.ObjectRef
+		*out = new(ObjectReference)
+		**out = **in
+	}
+	if in.ResponseStatus != nil {
+		in, out := &in.ResponseStatus, &out.ResponseStatus
+		*out = new(metav1.Status)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.RequestObject != nil {
+		in, out := &in.RequestObject, &out.RequestObject
+		*out = new(runtime.Unknown)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.ResponseObject != nil {
+		in, out := &in.ResponseObject, &out.ResponseObject
+		*out = new(runtime.Unknown)
+		(*in).DeepCopyInto(*out)
+	}
+	in.RequestReceivedTimestamp.DeepCopyInto(&out.RequestReceivedTimestamp)
+	in.StageTimestamp.DeepCopyInto(&out.StageTimestamp)
+	if in.Annotations != nil {
+		in, out := &in.Annotations, &out.Annotations
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event.
+func (in *Event) DeepCopy() *Event {
+	if in == nil {
+		return nil
+	}
+	out := new(Event)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Event) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventList) DeepCopyInto(out *EventList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Event, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList.
+func (in *EventList) DeepCopy() *EventList {
+	if in == nil {
+		return nil
+	}
+	out := new(EventList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EventList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GroupResources) DeepCopyInto(out *GroupResources) {
+	*out = *in
+	if in.Resources != nil {
+		in, out := &in.Resources, &out.Resources
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ResourceNames != nil {
+		in, out := &in.ResourceNames, &out.ResourceNames
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupResources.
+func (in *GroupResources) DeepCopy() *GroupResources {
+	if in == nil {
+		return nil
+	}
+	out := new(GroupResources)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ObjectReference) DeepCopyInto(out *ObjectReference) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference.
+func (in *ObjectReference) DeepCopy() *ObjectReference {
+	if in == nil {
+		return nil
+	}
+	out := new(ObjectReference)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Policy) DeepCopyInto(out *Policy) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Rules != nil {
+		in, out := &in.Rules, &out.Rules
+		*out = make([]PolicyRule, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.OmitStages != nil {
+		in, out := &in.OmitStages, &out.OmitStages
+		*out = make([]Stage, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy.
+func (in *Policy) DeepCopy() *Policy {
+	if in == nil {
+		return nil
+	}
+	out := new(Policy)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Policy) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PolicyList) DeepCopyInto(out *PolicyList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]Policy, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyList.
+func (in *PolicyList) DeepCopy() *PolicyList {
+	if in == nil {
+		return nil
+	}
+	out := new(PolicyList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PolicyList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PolicyRule) DeepCopyInto(out *PolicyRule) {
+	*out = *in
+	if in.Users != nil {
+		in, out := &in.Users, &out.Users
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.UserGroups != nil {
+		in, out := &in.UserGroups, &out.UserGroups
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Verbs != nil {
+		in, out := &in.Verbs, &out.Verbs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Resources != nil {
+		in, out := &in.Resources, &out.Resources
+		*out = make([]GroupResources, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Namespaces != nil {
+		in, out := &in.Namespaces, &out.Namespaces
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.NonResourceURLs != nil {
+		in, out := &in.NonResourceURLs, &out.NonResourceURLs
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.OmitStages != nil {
+		in, out := &in.OmitStages, &out.OmitStages
+		*out = make([]Stage, len(*in))
+		copy(*out, *in)
+	}
+	if in.OmitManagedFields != nil {
+		in, out := &in.OmitManagedFields, &out.OmitManagedFields
+		*out = new(bool)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule.
+func (in *PolicyRule) DeepCopy() *PolicyRule {
+	if in == nil {
+		return nil
+	}
+	out := new(PolicyRule)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/apis/cel/config.go b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/cel/config.go
new file mode 100644
index 0000000000..319548cd53
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/apis/cel/config.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cel
+
+const (
+	// PerCallLimit specify the actual cost limit per CEL validation call
+	// current PerCallLimit gives roughly 0.1 second for each expression validation call
+	PerCallLimit = 1000000
+
+	// RuntimeCELCostBudget is the overall cost budget for runtime CEL validation cost per ValidatingAdmissionPolicyBinding or CustomResource
+	// current RuntimeCELCostBudget gives roughly 1 seconds for the validation
+	RuntimeCELCostBudget = 10000000
+
+	// RuntimeCELCostBudgetMatchConditions is the overall cost budget for runtime CEL validation cost on matchConditions per object with matchConditions
+	// this is per webhook for validatingwebhookconfigurations and mutatingwebhookconfigurations or per ValidatingAdmissionPolicyBinding
+	// current RuntimeCELCostBudgetMatchConditions gives roughly 1/4 seconds for the validation
+	RuntimeCELCostBudgetMatchConditions = 2500000
+
+	// CheckFrequency configures the number of iterations within a comprehension to evaluate
+	// before checking whether the function evaluation has been interrupted
+	CheckFrequency = 100
+
+	// MaxRequestSizeBytes is the maximum size of a request to the API server
+	// TODO(DangerOnTheRanger): wire in MaxRequestBodyBytes from apiserver/pkg/server/options/server_run_options.go to make this configurable
+	// Note that even if server_run_options.go becomes configurable in the future, this cost constant should be fixed and it should be the max allowed request size for the server
+	MaxRequestSizeBytes = int64(3 * 1024 * 1024)
+
+	// MaxEvaluatedMessageExpressionSizeBytes represents the largest-allowable string generated
+	// by a messageExpression field
+	MaxEvaluatedMessageExpressionSizeBytes = 5 * 1024
+)
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/audit/OWNERS b/hack/tools/vendor/k8s.io/apiserver/pkg/audit/OWNERS
new file mode 100644
index 0000000000..60f38f133f
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/audit/OWNERS
@@ -0,0 +1,8 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+  - sig-auth-audit-approvers
+reviewers:
+  - sig-auth-audit-reviewers
+labels:
+  - sig/auth
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/audit/context.go b/hack/tools/vendor/k8s.io/apiserver/pkg/audit/context.go
new file mode 100644
index 0000000000..9648587378
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/audit/context.go
@@ -0,0 +1,188 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package audit
+
+import (
+	"context"
+	"sync"
+
+	"k8s.io/apimachinery/pkg/types"
+	auditinternal "k8s.io/apiserver/pkg/apis/audit"
+	genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
+	"k8s.io/klog/v2"
+)
+
+// The key type is unexported to prevent collisions
+type key int
+
+// auditKey is the context key for storing the audit context that is being
+// captured and the evaluated policy that applies to the given request.
+const auditKey key = iota
+
+// AuditContext holds the information for constructing the audit events for the current request.
+type AuditContext struct {
+	// RequestAuditConfig is the audit configuration that applies to the request
+	RequestAuditConfig RequestAuditConfig
+
+	// Event is the audit Event object that is being captured to be written in
+	// the API audit log.
+	Event auditinternal.Event
+
+	// annotationMutex guards event.Annotations
+	annotationMutex sync.Mutex
+}
+
+// Enabled checks whether auditing is enabled for this audit context.
+func (ac *AuditContext) Enabled() bool {
+	// Note: An unset Level should be considered Enabled, so that request data (e.g. annotations)
+	// can still be captured before the audit policy is evaluated.
+	return ac != nil && ac.RequestAuditConfig.Level != auditinternal.LevelNone
+}
+
+// AddAuditAnnotation sets the audit annotation for the given key, value pair.
+// It is safe to call at most parts of request flow that come after WithAuditAnnotations.
+// The notable exception being that this function must not be called via a
+// defer statement (i.e. after ServeHTTP) in a handler that runs before WithAudit
+// as at that point the audit event has already been sent to the audit sink.
+// Handlers that are unaware of their position in the overall request flow should
+// prefer AddAuditAnnotation over LogAnnotation to avoid dropping annotations.
+func AddAuditAnnotation(ctx context.Context, key, value string) {
+	ac := AuditContextFrom(ctx)
+	if !ac.Enabled() {
+		return
+	}
+
+	ac.annotationMutex.Lock()
+	defer ac.annotationMutex.Unlock()
+
+	addAuditAnnotationLocked(ac, key, value)
+}
+
+// AddAuditAnnotations is a bulk version of AddAuditAnnotation. Refer to AddAuditAnnotation for
+// restrictions on when this can be called.
+// keysAndValues are the key-value pairs to add, and must have an even number of items.
+func AddAuditAnnotations(ctx context.Context, keysAndValues ...string) {
+	ac := AuditContextFrom(ctx)
+	if !ac.Enabled() {
+		return
+	}
+
+	ac.annotationMutex.Lock()
+	defer ac.annotationMutex.Unlock()
+
+	if len(keysAndValues)%2 != 0 {
+		klog.Errorf("Dropping mismatched audit annotation %q", keysAndValues[len(keysAndValues)-1])
+	}
+	for i := 0; i < len(keysAndValues); i += 2 {
+		addAuditAnnotationLocked(ac, keysAndValues[i], keysAndValues[i+1])
+	}
+}
+
+// AddAuditAnnotationsMap is a bulk version of AddAuditAnnotation. Refer to AddAuditAnnotation for
+// restrictions on when this can be called.
+func AddAuditAnnotationsMap(ctx context.Context, annotations map[string]string) {
+	ac := AuditContextFrom(ctx)
+	if !ac.Enabled() {
+		return
+	}
+
+	ac.annotationMutex.Lock()
+	defer ac.annotationMutex.Unlock()
+
+	for k, v := range annotations {
+		addAuditAnnotationLocked(ac, k, v)
+	}
+}
+
+// addAuditAnnotationLocked records the audit annotation on the event.
+func addAuditAnnotationLocked(ac *AuditContext, key, value string) {
+	ae := &ac.Event
+
+	if ae.Annotations == nil {
+		ae.Annotations = make(map[string]string)
+	}
+	if v, ok := ae.Annotations[key]; ok && v != value {
+		klog.Warningf("Failed to set annotations[%q] to %q for audit:%q, it has already been set to %q", key, value, ae.AuditID, ae.Annotations[key])
+		return
+	}
+	ae.Annotations[key] = value
+}
+
+// WithAuditContext returns a new context that stores the AuditContext.
+func WithAuditContext(parent context.Context) context.Context {
+	if AuditContextFrom(parent) != nil {
+		return parent // Avoid double registering.
+	}
+
+	return genericapirequest.WithValue(parent, auditKey, &AuditContext{})
+}
+
+// AuditEventFrom returns the audit event struct on the ctx
+func AuditEventFrom(ctx context.Context) *auditinternal.Event {
+	if ac := AuditContextFrom(ctx); ac.Enabled() {
+		return &ac.Event
+	}
+	return nil
+}
+
+// AuditContextFrom returns the pair of the audit configuration object
+// that applies to the given request and the audit event that is going to
+// be written to the API audit log.
+func AuditContextFrom(ctx context.Context) *AuditContext {
+	ev, _ := ctx.Value(auditKey).(*AuditContext)
+	return ev
+}
+
+// WithAuditID sets the AuditID on the AuditContext. The AuditContext must already be present in the
+// request context. If the specified auditID is empty, no value is set.
+func WithAuditID(ctx context.Context, auditID types.UID) {
+	if auditID == "" {
+		return
+	}
+	if ac := AuditContextFrom(ctx); ac != nil {
+		ac.Event.AuditID = auditID
+	}
+}
+
+// AuditIDFrom returns the value of the audit ID from the request context, along with whether
+// auditing is enabled.
+func AuditIDFrom(ctx context.Context) (types.UID, bool) {
+	if ac := AuditContextFrom(ctx); ac != nil {
+		return ac.Event.AuditID, true
+	}
+	return "", false
+}
+
+// GetAuditIDTruncated returns the audit ID (truncated) from the request context.
+// If the length of the Audit-ID value exceeds the limit, we truncate it to keep
+// the first N (maxAuditIDLength) characters.
+// This is intended to be used in logging only.
+func GetAuditIDTruncated(ctx context.Context) string {
+	auditID, ok := AuditIDFrom(ctx)
+	if !ok {
+		return ""
+	}
+
+	// if the user has specified a very long audit ID then we will use the first N characters
+	// Note: assuming Audit-ID header is in ASCII
+	const maxAuditIDLength = 64
+	if len(auditID) > maxAuditIDLength {
+		auditID = auditID[:maxAuditIDLength]
+	}
+
+	return string(auditID)
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/audit/evaluator.go b/hack/tools/vendor/k8s.io/apiserver/pkg/audit/evaluator.go
new file mode 100644
index 0000000000..f9664fef69
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/audit/evaluator.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package audit
+
+import (
+	"k8s.io/apiserver/pkg/apis/audit"
+	"k8s.io/apiserver/pkg/authorization/authorizer"
+)
+
+// RequestAuditConfig is the evaluated audit configuration that is applicable to
+// a given request. PolicyRuleEvaluator evaluates the audit policy against the
+// authorizer attributes and returns a RequestAuditConfig that applies to the request.
+type RequestAuditConfig struct {
+	// Level at which the request is being audited at
+	Level audit.Level
+
+	// OmitStages is the stages that need to be omitted from being audited.
+	OmitStages []audit.Stage
+
+	// OmitManagedFields indicates whether to omit the managed fields of the request
+	// and response bodies from being written to the API audit log.
+	OmitManagedFields bool
+}
+
+// PolicyRuleEvaluator exposes methods for evaluating the policy rules.
+type PolicyRuleEvaluator interface {
+	// EvaluatePolicyRule evaluates the audit policy of the apiserver against
+	// the given authorizer attributes and returns the audit configuration that
+	// is applicable to the given equest.
+	EvaluatePolicyRule(authorizer.Attributes) RequestAuditConfig
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/audit/format.go b/hack/tools/vendor/k8s.io/apiserver/pkg/audit/format.go
new file mode 100644
index 0000000000..9c0784a316
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/audit/format.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package audit
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+
+	auditinternal "k8s.io/apiserver/pkg/apis/audit"
+)
+
+// EventString creates a 1-line text representation of an audit event, using a subset of the
+// information in the event struct.
+func EventString(ev *auditinternal.Event) string {
+	username := ""
+	groups := ""
+	if len(ev.User.Username) > 0 {
+		username = ev.User.Username
+		if len(ev.User.Groups) > 0 {
+			groups = auditStringSlice(ev.User.Groups)
+		}
+	}
+	asuser := ""
+	asgroups := ""
+	if ev.ImpersonatedUser != nil {
+		asuser = ev.ImpersonatedUser.Username
+		if ev.ImpersonatedUser.Groups != nil {
+			asgroups = auditStringSlice(ev.ImpersonatedUser.Groups)
+		}
+	}
+
+	namespace := ""
+	if ev.ObjectRef != nil && len(ev.ObjectRef.Namespace) != 0 {
+		namespace = ev.ObjectRef.Namespace
+	}
+
+	response := ""
+	if ev.ResponseStatus != nil {
+		response = strconv.Itoa(int(ev.ResponseStatus.Code))
+	}
+
+	ip := ""
+	if len(ev.SourceIPs) > 0 {
+		ip = ev.SourceIPs[0]
+	}
+
+	return fmt.Sprintf("%s AUDIT: id=%q stage=%q ip=%q method=%q user=%q groups=%q as=%q asgroups=%q user-agent=%q namespace=%q uri=%q response=\"%s\"",
+		ev.RequestReceivedTimestamp.Format(time.RFC3339Nano), ev.AuditID, ev.Stage, ip, ev.Verb, username, groups, asuser, asgroups, ev.UserAgent, namespace, ev.RequestURI, response)
+}
+
+func auditStringSlice(inList []string) string {
+	quotedElements := make([]string, len(inList))
+	for i, in := range inList {
+		quotedElements[i] = fmt.Sprintf("%q", in)
+	}
+	return strings.Join(quotedElements, ",")
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/audit/metrics.go b/hack/tools/vendor/k8s.io/apiserver/pkg/audit/metrics.go
new file mode 100644
index 0000000000..729a16eeca
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/audit/metrics.go
@@ -0,0 +1,111 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package audit
+
+import (
+	"context"
+	"fmt"
+
+	auditinternal "k8s.io/apiserver/pkg/apis/audit"
+	"k8s.io/component-base/metrics"
+	"k8s.io/component-base/metrics/legacyregistry"
+	"k8s.io/klog/v2"
+)
+
+const (
+	subsystem = "apiserver_audit"
+)
+
+/*
+ * By default, all the following metrics are defined as falling under
+ * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/1209-metrics-stability/kubernetes-control-plane-metrics-stability.md#stability-classes)
+ *
+ * Promoting the stability level of the metric is a responsibility of the component owner, since it
+ * involves explicitly acknowledging support for the metric across multiple releases, in accordance with
+ * the metric stability policy.
+ */
+var (
+	eventCounter = metrics.NewCounter(
+		&metrics.CounterOpts{
+			Subsystem:      subsystem,
+			Name:           "event_total",
+			Help:           "Counter of audit events generated and sent to the audit backend.",
+			StabilityLevel: metrics.ALPHA,
+		})
+	errorCounter = metrics.NewCounterVec(
+		&metrics.CounterOpts{
+			Subsystem: subsystem,
+			Name:      "error_total",
+			Help: "Counter of audit events that failed to be audited properly. " +
+				"Plugin identifies the plugin affected by the error.",
+			StabilityLevel: metrics.ALPHA,
+		},
+		[]string{"plugin"},
+	)
+	levelCounter = metrics.NewCounterVec(
+		&metrics.CounterOpts{
+			Subsystem:      subsystem,
+			Name:           "level_total",
+			Help:           "Counter of policy levels for audit events (1 per request).",
+			StabilityLevel: metrics.ALPHA,
+		},
+		[]string{"level"},
+	)
+
+	ApiserverAuditDroppedCounter = metrics.NewCounter(
+		&metrics.CounterOpts{
+			Subsystem: subsystem,
+			Name:      "requests_rejected_total",
+			Help: "Counter of apiserver requests rejected due to an error " +
+				"in audit logging backend.",
+			StabilityLevel: metrics.ALPHA,
+		},
+	)
+)
+
+func init() {
+	legacyregistry.MustRegister(eventCounter)
+	legacyregistry.MustRegister(errorCounter)
+	legacyregistry.MustRegister(levelCounter)
+	legacyregistry.MustRegister(ApiserverAuditDroppedCounter)
+}
+
+// ObserveEvent updates the relevant prometheus metrics for the generated audit event.
+func ObserveEvent(ctx context.Context) {
+	eventCounter.WithContext(ctx).Inc()
+}
+
+// ObservePolicyLevel updates the relevant prometheus metrics with the audit level for a request.
+func ObservePolicyLevel(ctx context.Context, level auditinternal.Level) {
+	levelCounter.WithContext(ctx).WithLabelValues(string(level)).Inc()
+}
+
+// HandlePluginError handles an error that occurred in an audit plugin. This method should only be
+// used if the error may have prevented the audit event from being properly recorded. The events are
+// logged to the debug log.
+func HandlePluginError(plugin string, err error, impacted ...*auditinternal.Event) {
+	// Count the error.
+	errorCounter.WithLabelValues(plugin).Add(float64(len(impacted)))
+
+	// Log the audit events to the debug log.
+	msg := fmt.Sprintf("Error in audit plugin '%s' affecting %d audit events: %v\nImpacted events:\n",
+		plugin, len(impacted), err)
+	for _, ev := range impacted {
+		msg = msg + EventString(ev) + "\n"
+	}
+	klog.Error(msg)
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/audit/request.go b/hack/tools/vendor/k8s.io/apiserver/pkg/audit/request.go
new file mode 100644
index 0000000000..9185278f06
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/audit/request.go
@@ -0,0 +1,312 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package audit
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"net/http"
+	"time"
+
+	authnv1 "k8s.io/api/authentication/v1"
+	"k8s.io/apimachinery/pkg/api/meta"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	utilnet "k8s.io/apimachinery/pkg/util/net"
+	auditinternal "k8s.io/apiserver/pkg/apis/audit"
+	"k8s.io/apiserver/pkg/authentication/user"
+	"k8s.io/apiserver/pkg/authorization/authorizer"
+	"k8s.io/klog/v2"
+)
+
+const (
+	maxUserAgentLength      = 1024
+	userAgentTruncateSuffix = "...TRUNCATED"
+)
+
+func LogRequestMetadata(ctx context.Context, req *http.Request, requestReceivedTimestamp time.Time, level auditinternal.Level, attribs authorizer.Attributes) {
+	ac := AuditContextFrom(ctx)
+	if !ac.Enabled() {
+		return
+	}
+	ev := &ac.Event
+
+	ev.RequestReceivedTimestamp = metav1.NewMicroTime(requestReceivedTimestamp)
+	ev.Verb = attribs.GetVerb()
+	ev.RequestURI = req.URL.RequestURI()
+	ev.UserAgent = maybeTruncateUserAgent(req)
+	ev.Level = level
+
+	ips := utilnet.SourceIPs(req)
+	ev.SourceIPs = make([]string, len(ips))
+	for i := range ips {
+		ev.SourceIPs[i] = ips[i].String()
+	}
+
+	if user := attribs.GetUser(); user != nil {
+		ev.User.Username = user.GetName()
+		ev.User.Extra = map[string]authnv1.ExtraValue{}
+		for k, v := range user.GetExtra() {
+			ev.User.Extra[k] = authnv1.ExtraValue(v)
+		}
+		ev.User.Groups = user.GetGroups()
+		ev.User.UID = user.GetUID()
+	}
+
+	if attribs.IsResourceRequest() {
+		ev.ObjectRef = &auditinternal.ObjectReference{
+			Namespace:   attribs.GetNamespace(),
+			Name:        attribs.GetName(),
+			Resource:    attribs.GetResource(),
+			Subresource: attribs.GetSubresource(),
+			APIGroup:    attribs.GetAPIGroup(),
+			APIVersion:  attribs.GetAPIVersion(),
+		}
+	}
+}
+
+// LogImpersonatedUser fills in the impersonated user attributes into an audit event.
+func LogImpersonatedUser(ae *auditinternal.Event, user user.Info) {
+	if ae == nil || ae.Level.Less(auditinternal.LevelMetadata) {
+		return
+	}
+	ae.ImpersonatedUser = &authnv1.UserInfo{
+		Username: user.GetName(),
+	}
+	ae.ImpersonatedUser.Groups = user.GetGroups()
+	ae.ImpersonatedUser.UID = user.GetUID()
+	ae.ImpersonatedUser.Extra = map[string]authnv1.ExtraValue{}
+	for k, v := range user.GetExtra() {
+		ae.ImpersonatedUser.Extra[k] = authnv1.ExtraValue(v)
+	}
+}
+
+// LogRequestObject fills in the request object into an audit event. The passed runtime.Object
+// will be converted to the given gv.
+func LogRequestObject(ctx context.Context, obj runtime.Object, objGV schema.GroupVersion, gvr schema.GroupVersionResource, subresource string, s runtime.NegotiatedSerializer) {
+	ae := AuditEventFrom(ctx)
+	if ae == nil || ae.Level.Less(auditinternal.LevelMetadata) {
+		return
+	}
+
+	// complete ObjectRef
+	if ae.ObjectRef == nil {
+		ae.ObjectRef = &auditinternal.ObjectReference{}
+	}
+
+	// meta.Accessor is more general than ObjectMetaAccessor, but if it fails, we can just skip setting these bits
+	if meta, err := meta.Accessor(obj); err == nil {
+		if len(ae.ObjectRef.Namespace) == 0 {
+			ae.ObjectRef.Namespace = meta.GetNamespace()
+		}
+		if len(ae.ObjectRef.Name) == 0 {
+			ae.ObjectRef.Name = meta.GetName()
+		}
+		if len(ae.ObjectRef.UID) == 0 {
+			ae.ObjectRef.UID = meta.GetUID()
+		}
+		if len(ae.ObjectRef.ResourceVersion) == 0 {
+			ae.ObjectRef.ResourceVersion = meta.GetResourceVersion()
+		}
+	}
+	if len(ae.ObjectRef.APIVersion) == 0 {
+		ae.ObjectRef.APIGroup = gvr.Group
+		ae.ObjectRef.APIVersion = gvr.Version
+	}
+	if len(ae.ObjectRef.Resource) == 0 {
+		ae.ObjectRef.Resource = gvr.Resource
+	}
+	if len(ae.ObjectRef.Subresource) == 0 {
+		ae.ObjectRef.Subresource = subresource
+	}
+
+	if ae.Level.Less(auditinternal.LevelRequest) {
+		return
+	}
+
+	if shouldOmitManagedFields(ctx) {
+		copy, ok, err := copyWithoutManagedFields(obj)
+		if err != nil {
+			klog.ErrorS(err, "Error while dropping managed fields from the request", "auditID", ae.AuditID)
+		}
+		if ok {
+			obj = copy
+		}
+	}
+
+	// TODO(audit): hook into the serializer to avoid double conversion
+	var err error
+	ae.RequestObject, err = encodeObject(obj, objGV, s)
+	if err != nil {
+		// TODO(audit): add error slice to audit event struct
+		klog.ErrorS(err, "Encoding failed of request object", "auditID", ae.AuditID, "gvr", gvr.String(), "obj", obj)
+		return
+	}
+}
+
+// LogRequestPatch fills in the given patch as the request object into an audit event.
+func LogRequestPatch(ctx context.Context, patch []byte) {
+	ae := AuditEventFrom(ctx)
+	if ae == nil || ae.Level.Less(auditinternal.LevelRequest) {
+		return
+	}
+
+	ae.RequestObject = &runtime.Unknown{
+		Raw:         patch,
+		ContentType: runtime.ContentTypeJSON,
+	}
+}
+
+// LogResponseObject fills in the response object into an audit event. The passed runtime.Object
+// will be converted to the given gv.
+func LogResponseObject(ctx context.Context, obj runtime.Object, gv schema.GroupVersion, s runtime.NegotiatedSerializer) {
+	ae := AuditEventFrom(ctx)
+	if ae == nil || ae.Level.Less(auditinternal.LevelMetadata) {
+		return
+	}
+	if status, ok := obj.(*metav1.Status); ok {
+		// selectively copy the bounded fields.
+		ae.ResponseStatus = &metav1.Status{
+			Status:  status.Status,
+			Message: status.Message,
+			Reason:  status.Reason,
+			Details: status.Details,
+			Code:    status.Code,
+		}
+	}
+
+	if ae.Level.Less(auditinternal.LevelRequestResponse) {
+		return
+	}
+
+	if shouldOmitManagedFields(ctx) {
+		copy, ok, err := copyWithoutManagedFields(obj)
+		if err != nil {
+			klog.ErrorS(err, "Error while dropping managed fields from the response", "auditID", ae.AuditID)
+		}
+		if ok {
+			obj = copy
+		}
+	}
+
+	// TODO(audit): hook into the serializer to avoid double conversion
+	var err error
+	ae.ResponseObject, err = encodeObject(obj, gv, s)
+	if err != nil {
+		klog.ErrorS(err, "Encoding failed of response object", "auditID", ae.AuditID, "obj", obj)
+	}
+}
+
+func encodeObject(obj runtime.Object, gv schema.GroupVersion, serializer runtime.NegotiatedSerializer) (*runtime.Unknown, error) {
+	const mediaType = runtime.ContentTypeJSON
+	info, ok := runtime.SerializerInfoForMediaType(serializer.SupportedMediaTypes(), mediaType)
+	if !ok {
+		return nil, fmt.Errorf("unable to locate encoder -- %q is not a supported media type", mediaType)
+	}
+
+	enc := serializer.EncoderForVersion(info.Serializer, gv)
+	var buf bytes.Buffer
+	if err := enc.Encode(obj, &buf); err != nil {
+		return nil, fmt.Errorf("encoding failed: %v", err)
+	}
+
+	return &runtime.Unknown{
+		Raw:         buf.Bytes(),
+		ContentType: mediaType,
+	}, nil
+}
+
+// truncate User-Agent if too long, otherwise return it directly.
+func maybeTruncateUserAgent(req *http.Request) string {
+	ua := req.UserAgent()
+	if len(ua) > maxUserAgentLength {
+		ua = ua[:maxUserAgentLength] + userAgentTruncateSuffix
+	}
+
+	return ua
+}
+
+// copyWithoutManagedFields will make a deep copy of the specified object and
+// will discard the managed fields from the copy.
+// The specified object is expected to be a meta.Object or a "list".
+// The specified object obj is treated as readonly and hence not mutated.
+// On return, an error is set if the function runs into any error while
+// removing the managed fields, the boolean value is true if the copy has
+// been made successfully, otherwise false.
+func copyWithoutManagedFields(obj runtime.Object) (runtime.Object, bool, error) {
+	isAccessor := true
+	if _, err := meta.Accessor(obj); err != nil {
+		isAccessor = false
+	}
+	isList := meta.IsListType(obj)
+	_, isTable := obj.(*metav1.Table)
+	if !isAccessor && !isList && !isTable {
+		return nil, false, nil
+	}
+
+	// TODO a deep copy isn't really needed here, figure out how we can reliably
+	//  use shallow copy here to omit the manageFields.
+	copy := obj.DeepCopyObject()
+
+	if isAccessor {
+		if err := removeManagedFields(copy); err != nil {
+			return nil, false, err
+		}
+	}
+
+	if isList {
+		if err := meta.EachListItem(copy, removeManagedFields); err != nil {
+			return nil, false, err
+		}
+	}
+
+	if isTable {
+		table := copy.(*metav1.Table)
+		for i := range table.Rows {
+			rowObj := table.Rows[i].Object
+			if err := removeManagedFields(rowObj.Object); err != nil {
+				return nil, false, err
+			}
+		}
+	}
+
+	return copy, true, nil
+}
+
+func removeManagedFields(obj runtime.Object) error {
+	if obj == nil {
+		return nil
+	}
+	accessor, err := meta.Accessor(obj)
+	if err != nil {
+		return err
+	}
+	accessor.SetManagedFields(nil)
+	return nil
+}
+
+func shouldOmitManagedFields(ctx context.Context) bool {
+	if auditContext := AuditContextFrom(ctx); auditContext != nil {
+		return auditContext.RequestAuditConfig.OmitManagedFields
+	}
+
+	// If we can't decide, return false to maintain current behavior which is
+	// to retain the manage fields in the audit.
+	return false
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/audit/scheme.go b/hack/tools/vendor/k8s.io/apiserver/pkg/audit/scheme.go
new file mode 100644
index 0000000000..1ebbc64be4
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/audit/scheme.go
@@ -0,0 +1,38 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// TODO: Delete this file if we generate a clientset.
+package audit
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	auditinternal "k8s.io/apiserver/pkg/apis/audit"
+	"k8s.io/apiserver/pkg/apis/audit/v1"
+)
+
+var Scheme = runtime.NewScheme()
+var Codecs = serializer.NewCodecFactory(Scheme)
+
+func init() {
+	metav1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
+	utilruntime.Must(v1.AddToScheme(Scheme))
+	utilruntime.Must(auditinternal.AddToScheme(Scheme))
+	utilruntime.Must(Scheme.SetVersionPriority(v1.SchemeGroupVersion))
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/audit/types.go b/hack/tools/vendor/k8s.io/apiserver/pkg/audit/types.go
new file mode 100644
index 0000000000..b78bd086b0
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/audit/types.go
@@ -0,0 +1,46 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package audit
+
+import (
+	auditinternal "k8s.io/apiserver/pkg/apis/audit"
+)
+
+type Sink interface {
+	// ProcessEvents handles events. Per audit ID it might be that ProcessEvents is called up to three times.
+	// Errors might be logged by the sink itself. If an error should be fatal, leading to an internal
+	// error, ProcessEvents is supposed to panic. The event must not be mutated and is reused by the caller
+	// after the call returns, i.e. the sink has to make a deepcopy to keep a copy around if necessary.
+	// Returns true on success, may return false on error.
+	ProcessEvents(events ...*auditinternal.Event) bool
+}
+
+type Backend interface {
+	Sink
+
+	// Run will initialize the backend. It must not block, but may run go routines in the background. If
+	// stopCh is closed, it is supposed to stop them. Run will be called before the first call to ProcessEvents.
+	Run(stopCh <-chan struct{}) error
+
+	// Shutdown will synchronously shut down the backend while making sure that all pending
+	// events are delivered. It can be assumed that this method is called after
+	// the stopCh channel passed to the Run method has been closed.
+	Shutdown()
+
+	// Returns the backend PluginName.
+	String() string
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/audit/union.go b/hack/tools/vendor/k8s.io/apiserver/pkg/audit/union.go
new file mode 100644
index 0000000000..0766a9207b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/audit/union.go
@@ -0,0 +1,71 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package audit
+
+import (
+	"fmt"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/util/errors"
+	auditinternal "k8s.io/apiserver/pkg/apis/audit"
+)
+
+// Union returns an audit Backend which logs events to a set of backends. The returned
+// Sink implementation blocks in turn for each call to ProcessEvents.
+func Union(backends ...Backend) Backend {
+	if len(backends) == 1 {
+		return backends[0]
+	}
+	return union{backends}
+}
+
+type union struct {
+	backends []Backend
+}
+
+func (u union) ProcessEvents(events ...*auditinternal.Event) bool {
+	success := true
+	for _, backend := range u.backends {
+		success = backend.ProcessEvents(events...) && success
+	}
+	return success
+}
+
+func (u union) Run(stopCh <-chan struct{}) error {
+	var funcs []func() error
+	for _, backend := range u.backends {
+		backend := backend
+		funcs = append(funcs, func() error {
+			return backend.Run(stopCh)
+		})
+	}
+	return errors.AggregateGoroutines(funcs...)
+}
+
+func (u union) Shutdown() {
+	for _, backend := range u.backends {
+		backend.Shutdown()
+	}
+}
+
+func (u union) String() string {
+	var backendStrings []string
+	for _, backend := range u.backends {
+		backendStrings = append(backendStrings, fmt.Sprintf("%s", backend))
+	}
+	return fmt.Sprintf("union[%s]", strings.Join(backendStrings, ","))
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go b/hack/tools/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go
new file mode 100644
index 0000000000..dd11efbde5
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go
@@ -0,0 +1,184 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package serviceaccount
+
+import (
+	"fmt"
+	"strings"
+
+	v1 "k8s.io/api/core/v1"
+	apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
+	"k8s.io/apiserver/pkg/authentication/user"
+)
+
+const (
+	ServiceAccountUsernamePrefix    = "system:serviceaccount:"
+	ServiceAccountUsernameSeparator = ":"
+	ServiceAccountGroupPrefix       = "system:serviceaccounts:"
+	AllServiceAccountsGroup         = "system:serviceaccounts"
+	// IssuedCredentialIDAuditAnnotationKey is the annotation key used in the audit event that is persisted to the
+	// '/token' endpoint for service accounts.
+	// This annotation indicates the generated credential identifier for the service account token being issued.
+	// This is useful when tracing back the origin of tokens that have gone on to make request that have persisted
+	// their credential-identifier into the audit log via the user's extra info stored on subsequent audit events.
+	IssuedCredentialIDAuditAnnotationKey = "authentication.kubernetes.io/issued-credential-id"
+	// PodNameKey is the key used in a user's "extra" to specify the pod name of
+	// the authenticating request.
+	PodNameKey = "authentication.kubernetes.io/pod-name"
+	// PodUIDKey is the key used in a user's "extra" to specify the pod UID of
+	// the authenticating request.
+	PodUIDKey = "authentication.kubernetes.io/pod-uid"
+	// NodeNameKey is the key used in a user's "extra" to specify the node name of
+	// the authenticating request.
+	NodeNameKey = "authentication.kubernetes.io/node-name"
+	// NodeUIDKey is the key used in a user's "extra" to specify the node UID of
+	// the authenticating request.
+	NodeUIDKey = "authentication.kubernetes.io/node-uid"
+)
+
+// MakeUsername generates a username from the given namespace and ServiceAccount name.
+// The resulting username can be passed to SplitUsername to extract the original namespace and ServiceAccount name.
+func MakeUsername(namespace, name string) string {
+	return ServiceAccountUsernamePrefix + namespace + ServiceAccountUsernameSeparator + name
+}
+
+// MatchesUsername checks whether the provided username matches the namespace and name without
+// allocating. Use this when checking a service account namespace and name against a known string.
+func MatchesUsername(namespace, name string, username string) bool {
+	if !strings.HasPrefix(username, ServiceAccountUsernamePrefix) {
+		return false
+	}
+	username = username[len(ServiceAccountUsernamePrefix):]
+
+	if !strings.HasPrefix(username, namespace) {
+		return false
+	}
+	username = username[len(namespace):]
+
+	if !strings.HasPrefix(username, ServiceAccountUsernameSeparator) {
+		return false
+	}
+	username = username[len(ServiceAccountUsernameSeparator):]
+
+	return username == name
+}
+
+var invalidUsernameErr = fmt.Errorf("Username must be in the form %s", MakeUsername("namespace", "name"))
+
+// SplitUsername returns the namespace and ServiceAccount name embedded in the given username,
+// or an error if the username is not a valid name produced by MakeUsername
+func SplitUsername(username string) (string, string, error) {
+	if !strings.HasPrefix(username, ServiceAccountUsernamePrefix) {
+		return "", "", invalidUsernameErr
+	}
+	trimmed := strings.TrimPrefix(username, ServiceAccountUsernamePrefix)
+	parts := strings.Split(trimmed, ServiceAccountUsernameSeparator)
+	if len(parts) != 2 {
+		return "", "", invalidUsernameErr
+	}
+	namespace, name := parts[0], parts[1]
+	if len(apimachineryvalidation.ValidateNamespaceName(namespace, false)) != 0 {
+		return "", "", invalidUsernameErr
+	}
+	if len(apimachineryvalidation.ValidateServiceAccountName(name, false)) != 0 {
+		return "", "", invalidUsernameErr
+	}
+	return namespace, name, nil
+}
+
+// MakeGroupNames generates service account group names for the given namespace
+func MakeGroupNames(namespace string) []string {
+	return []string{
+		AllServiceAccountsGroup,
+		MakeNamespaceGroupName(namespace),
+	}
+}
+
+// MakeNamespaceGroupName returns the name of the group all service accounts in the namespace are included in
+func MakeNamespaceGroupName(namespace string) string {
+	return ServiceAccountGroupPrefix + namespace
+}
+
+// UserInfo returns a user.Info interface for the given namespace, service account name and UID
+func UserInfo(namespace, name, uid string) user.Info {
+	return (&ServiceAccountInfo{
+		Name:      name,
+		Namespace: namespace,
+		UID:       uid,
+	}).UserInfo()
+}
+
+type ServiceAccountInfo struct {
+	Name, Namespace, UID string
+	PodName, PodUID      string
+	CredentialID         string
+	NodeName, NodeUID    string
+}
+
+func (sa *ServiceAccountInfo) UserInfo() user.Info {
+	info := &user.DefaultInfo{
+		Name:   MakeUsername(sa.Namespace, sa.Name),
+		UID:    sa.UID,
+		Groups: MakeGroupNames(sa.Namespace),
+	}
+
+	if sa.PodName != "" && sa.PodUID != "" {
+		if info.Extra == nil {
+			info.Extra = make(map[string][]string)
+		}
+		info.Extra[PodNameKey] = []string{sa.PodName}
+		info.Extra[PodUIDKey] = []string{sa.PodUID}
+	}
+	if sa.CredentialID != "" {
+		if info.Extra == nil {
+			info.Extra = make(map[string][]string)
+		}
+		info.Extra[user.CredentialIDKey] = []string{sa.CredentialID}
+	}
+	if sa.NodeName != "" {
+		if info.Extra == nil {
+			info.Extra = make(map[string][]string)
+		}
+		info.Extra[NodeNameKey] = []string{sa.NodeName}
+		// node UID is optional and will only be set if the node name is set
+		if sa.NodeUID != "" {
+			info.Extra[NodeUIDKey] = []string{sa.NodeUID}
+		}
+	}
+
+	return info
+}
+
+// IsServiceAccountToken returns true if the secret is a valid api token for the service account
+func IsServiceAccountToken(secret *v1.Secret, sa *v1.ServiceAccount) bool {
+	if secret.Type != v1.SecretTypeServiceAccountToken {
+		return false
+	}
+
+	name := secret.Annotations[v1.ServiceAccountNameKey]
+	uid := secret.Annotations[v1.ServiceAccountUIDKey]
+	if name != sa.Name {
+		// Name must match
+		return false
+	}
+	if len(uid) > 0 && uid != string(sa.UID) {
+		// If UID is specified, it must match
+		return false
+	}
+
+	return true
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go b/hack/tools/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go
new file mode 100644
index 0000000000..3d87fd72ca
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package user contains utilities for dealing with simple user exchange in the auth
+// packages. The user.Info interface defines an interface for exchanging that info.
+package user // import "k8s.io/apiserver/pkg/authentication/user"
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/authentication/user/user.go b/hack/tools/vendor/k8s.io/apiserver/pkg/authentication/user/user.go
new file mode 100644
index 0000000000..1af6f2b277
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/authentication/user/user.go
@@ -0,0 +1,88 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package user
+
+// Info describes a user that has been authenticated to the system.
+type Info interface {
+	// GetName returns the name that uniquely identifies this user among all
+	// other active users.
+	GetName() string
+	// GetUID returns a unique value for a particular user that will change
+	// if the user is removed from the system and another user is added with
+	// the same name.
+	GetUID() string
+	// GetGroups returns the names of the groups the user is a member of
+	GetGroups() []string
+
+	// GetExtra can contain any additional information that the authenticator
+	// thought was interesting.  One example would be scopes on a token.
+	// Keys in this map should be namespaced to the authenticator or
+	// authenticator/authorizer pair making use of them.
+	// For instance: "example.org/foo" instead of "foo"
+	// This is a map[string][]string because it needs to be serializeable into
+	// a SubjectAccessReviewSpec.authorization.k8s.io for proper authorization
+	// delegation flows
+	// In order to faithfully round-trip through an impersonation flow, these keys
+	// MUST be lowercase.
+	GetExtra() map[string][]string
+}
+
+// DefaultInfo provides a simple user information exchange object
+// for components that implement the UserInfo interface.
+type DefaultInfo struct {
+	Name   string
+	UID    string
+	Groups []string
+	Extra  map[string][]string
+}
+
+func (i *DefaultInfo) GetName() string {
+	return i.Name
+}
+
+func (i *DefaultInfo) GetUID() string {
+	return i.UID
+}
+
+func (i *DefaultInfo) GetGroups() []string {
+	return i.Groups
+}
+
+func (i *DefaultInfo) GetExtra() map[string][]string {
+	return i.Extra
+}
+
+const (
+	// well-known user and group names
+	SystemPrivilegedGroup = "system:masters"
+	NodesGroup            = "system:nodes"
+	MonitoringGroup       = "system:monitoring"
+	AllUnauthenticated    = "system:unauthenticated"
+	AllAuthenticated      = "system:authenticated"
+
+	Anonymous     = "system:anonymous"
+	APIServerUser = "system:apiserver"
+
+	// core kubernetes process identities
+	KubeProxy             = "system:kube-proxy"
+	KubeControllerManager = "system:kube-controller-manager"
+	KubeScheduler         = "system:kube-scheduler"
+
+	// CredentialIDKey is the key used in a user's "extra" to specify the unique
+	// identifier for this identity document).
+	CredentialIDKey = "authentication.kubernetes.io/credential-id"
+)
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go b/hack/tools/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go
new file mode 100644
index 0000000000..2f5f65e228
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go
@@ -0,0 +1,184 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package authorizer
+
+import (
+	"context"
+	"net/http"
+
+	"k8s.io/apimachinery/pkg/fields"
+	"k8s.io/apimachinery/pkg/labels"
+	"k8s.io/apiserver/pkg/authentication/user"
+)
+
+// Attributes is an interface used by an Authorizer to get information about a request
+// that is used to make an authorization decision.
+type Attributes interface {
+	// GetUser returns the user.Info object to authorize
+	GetUser() user.Info
+
+	// GetVerb returns the kube verb associated with API requests (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy),
+	// or the lowercased HTTP verb associated with non-API requests (this includes get, put, post, patch, and delete)
+	GetVerb() string
+
+	// When IsReadOnly() == true, the request has no side effects, other than
+	// caching, logging, and other incidentals.
+	IsReadOnly() bool
+
+	// The namespace of the object, if a request is for a REST object.
+	GetNamespace() string
+
+	// The kind of object, if a request is for a REST object.
+	GetResource() string
+
+	// GetSubresource returns the subresource being requested, if present
+	GetSubresource() string
+
+	// GetName returns the name of the object as parsed off the request.  This will not be present for all request types, but
+	// will be present for: get, update, delete
+	GetName() string
+
+	// The group of the resource, if a request is for a REST object.
+	GetAPIGroup() string
+
+	// GetAPIVersion returns the version of the group requested, if a request is for a REST object.
+	GetAPIVersion() string
+
+	// IsResourceRequest returns true for requests to API resources, like /api/v1/nodes,
+	// and false for non-resource endpoints like /api, /healthz
+	IsResourceRequest() bool
+
+	// GetPath returns the path of the request
+	GetPath() string
+
+	// ParseFieldSelector is lazy, thread-safe, and stores the parsed result and error.
+	// It returns an error if the field selector cannot be parsed.
+	// The returned requirements must be treated as readonly and not modified.
+	GetFieldSelector() (fields.Requirements, error)
+
+	// ParseLabelSelector is lazy, thread-safe, and stores the parsed result and error.
+	// It returns an error if the label selector cannot be parsed.
+	// The returned requirements must be treated as readonly and not modified.
+	GetLabelSelector() (labels.Requirements, error)
+}
+
+// Authorizer makes an authorization decision based on information gained by making
+// zero or more calls to methods of the Attributes interface.  It returns nil when an action is
+// authorized, otherwise it returns an error.
+type Authorizer interface {
+	Authorize(ctx context.Context, a Attributes) (authorized Decision, reason string, err error)
+}
+
+type AuthorizerFunc func(ctx context.Context, a Attributes) (Decision, string, error)
+
+func (f AuthorizerFunc) Authorize(ctx context.Context, a Attributes) (Decision, string, error) {
+	return f(ctx, a)
+}
+
+// RuleResolver provides a mechanism for resolving the list of rules that apply to a given user within a namespace.
+type RuleResolver interface {
+	// RulesFor get the list of cluster wide rules, the list of rules in the specific namespace, incomplete status and errors.
+	RulesFor(ctx context.Context, user user.Info, namespace string) ([]ResourceRuleInfo, []NonResourceRuleInfo, bool, error)
+}
+
+// RequestAttributesGetter provides a function that extracts Attributes from an http.Request
+type RequestAttributesGetter interface {
+	GetRequestAttributes(user.Info, *http.Request) Attributes
+}
+
+// AttributesRecord implements Attributes interface.
+type AttributesRecord struct {
+	User            user.Info
+	Verb            string
+	Namespace       string
+	APIGroup        string
+	APIVersion      string
+	Resource        string
+	Subresource     string
+	Name            string
+	ResourceRequest bool
+	Path            string
+
+	FieldSelectorRequirements fields.Requirements
+	FieldSelectorParsingErr   error
+	LabelSelectorRequirements labels.Requirements
+	LabelSelectorParsingErr   error
+}
+
+func (a AttributesRecord) GetUser() user.Info {
+	return a.User
+}
+
+func (a AttributesRecord) GetVerb() string {
+	return a.Verb
+}
+
+func (a AttributesRecord) IsReadOnly() bool {
+	return a.Verb == "get" || a.Verb == "list" || a.Verb == "watch"
+}
+
+func (a AttributesRecord) GetNamespace() string {
+	return a.Namespace
+}
+
+func (a AttributesRecord) GetResource() string {
+	return a.Resource
+}
+
+func (a AttributesRecord) GetSubresource() string {
+	return a.Subresource
+}
+
+func (a AttributesRecord) GetName() string {
+	return a.Name
+}
+
+func (a AttributesRecord) GetAPIGroup() string {
+	return a.APIGroup
+}
+
+func (a AttributesRecord) GetAPIVersion() string {
+	return a.APIVersion
+}
+
+func (a AttributesRecord) IsResourceRequest() bool {
+	return a.ResourceRequest
+}
+
+func (a AttributesRecord) GetPath() string {
+	return a.Path
+}
+
+func (a AttributesRecord) GetFieldSelector() (fields.Requirements, error) {
+	return a.FieldSelectorRequirements, a.FieldSelectorParsingErr
+}
+
+func (a AttributesRecord) GetLabelSelector() (labels.Requirements, error) {
+	return a.LabelSelectorRequirements, a.LabelSelectorParsingErr
+}
+
+type Decision int
+
+const (
+	// DecisionDeny means that an authorizer decided to deny the action.
+	DecisionDeny Decision = iota
+	// DecisionAllow means that an authorizer decided to allow the action.
+	DecisionAllow
+	// DecisionNoOpinion means that an authorizer has no opinion on whether
+	// to allow or deny an action.
+	DecisionNoOpinion
+)
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/authorization/authorizer/rule.go b/hack/tools/vendor/k8s.io/apiserver/pkg/authorization/authorizer/rule.go
new file mode 100644
index 0000000000..8f7d9d9eff
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/authorization/authorizer/rule.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package authorizer
+
+type ResourceRuleInfo interface {
+	// GetVerbs returns a list of kubernetes resource API verbs.
+	GetVerbs() []string
+	// GetAPIGroups return the names of the APIGroup that contains the resources.
+	GetAPIGroups() []string
+	// GetResources return a list of resources the rule applies to.
+	GetResources() []string
+	// GetResourceNames return a white list of names that the rule applies to.
+	GetResourceNames() []string
+}
+
+// DefaultResourceRuleInfo holds information that describes a rule for the resource
+type DefaultResourceRuleInfo struct {
+	Verbs         []string
+	APIGroups     []string
+	Resources     []string
+	ResourceNames []string
+}
+
+func (i *DefaultResourceRuleInfo) GetVerbs() []string {
+	return i.Verbs
+}
+
+func (i *DefaultResourceRuleInfo) GetAPIGroups() []string {
+	return i.APIGroups
+}
+
+func (i *DefaultResourceRuleInfo) GetResources() []string {
+	return i.Resources
+}
+
+func (i *DefaultResourceRuleInfo) GetResourceNames() []string {
+	return i.ResourceNames
+}
+
+type NonResourceRuleInfo interface {
+	// GetVerbs returns a list of kubernetes resource API verbs.
+	GetVerbs() []string
+	// GetNonResourceURLs return a set of partial urls that a user should have access to.
+	GetNonResourceURLs() []string
+}
+
+// DefaultNonResourceRuleInfo holds information that describes a rule for the non-resource
+type DefaultNonResourceRuleInfo struct {
+	Verbs           []string
+	NonResourceURLs []string
+}
+
+func (i *DefaultNonResourceRuleInfo) GetVerbs() []string {
+	return i.Verbs
+}
+
+func (i *DefaultNonResourceRuleInfo) GetNonResourceURLs() []string {
+	return i.NonResourceURLs
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/OWNERS b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/OWNERS
new file mode 100644
index 0000000000..f550fc1794
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/OWNERS
@@ -0,0 +1,11 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+# Kubernetes CEL library authors and maintainers
+approvers:
+  - jpbetz
+  - cici37
+  - jiahuif
+reviewers:
+  - jpbetz
+  - cici37
+  - jiahuif
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/cidr.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/cidr.go
new file mode 100644
index 0000000000..8e97f63cd7
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/cidr.go
@@ -0,0 +1,87 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cel
+
+import (
+	"fmt"
+	"math"
+	"net/netip"
+	"reflect"
+
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+)
+
+// CIDR provides a CEL representation of an network address.
+type CIDR struct {
+	netip.Prefix
+}
+
+var (
+	CIDRType = cel.OpaqueType("net.CIDR")
+)
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (d CIDR) ConvertToNative(typeDesc reflect.Type) (any, error) {
+	if reflect.TypeOf(d.Prefix).AssignableTo(typeDesc) {
+		return d.Prefix, nil
+	}
+	if reflect.TypeOf("").AssignableTo(typeDesc) {
+		return d.Prefix.String(), nil
+	}
+	return nil, fmt.Errorf("type conversion error from 'CIDR' to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (d CIDR) ConvertToType(typeVal ref.Type) ref.Val {
+	switch typeVal {
+	case CIDRType:
+		return d
+	case types.TypeType:
+		return CIDRType
+	case types.StringType:
+		return types.String(d.Prefix.String())
+	}
+	return types.NewErr("type conversion error from '%s' to '%s'", CIDRType, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (d CIDR) Equal(other ref.Val) ref.Val {
+	otherD, ok := other.(CIDR)
+	if !ok {
+		return types.ValOrErr(other, "no such overload")
+	}
+
+	return types.Bool(d.Prefix == otherD.Prefix)
+}
+
+// Type implements ref.Val.Type.
+func (d CIDR) Type() ref.Type {
+	return CIDRType
+}
+
+// Value implements ref.Val.Value.
+func (d CIDR) Value() any {
+	return d.Prefix
+}
+
+// Size returns the size of the CIDR prefix address in bytes.
+// Used in the size estimation of the runtime cost.
+func (d CIDR) Size() ref.Val {
+	return types.Int(int(math.Ceil(float64(d.Prefix.Bits()) / 8)))
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/common/adaptor.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/common/adaptor.go
new file mode 100644
index 0000000000..dd94e282f4
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/common/adaptor.go
@@ -0,0 +1,106 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+// Schema is the adapted type for an OpenAPI schema that CEL uses.
+// This schema does not cover all OpenAPI fields but only these CEL requires
+// are exposed as getters.
+type Schema interface {
+	// Type returns the OpenAPI type.
+	// Multiple types are not supported. It should return
+	// empty string if no type is specified.
+	Type() string
+
+	// Format returns the OpenAPI format. May be empty
+	Format() string
+
+	// Items returns the OpenAPI items. or nil of this field does not exist or
+	// contains no schema.
+	Items() Schema
+
+	// Properties returns the OpenAPI properties, or nil if this field does not
+	// exist.
+	// The values of the returned map are of the adapted type.
+	Properties() map[string]Schema
+
+	// AdditionalProperties returns the OpenAPI additional properties field,
+	// or nil if this field does not exist.
+	AdditionalProperties() SchemaOrBool
+
+	// Default returns the OpenAPI default field, or nil if this field does not exist.
+	Default() any
+
+	Validations
+	KubeExtensions
+
+	// WithTypeAndObjectMeta returns a schema that has the type and object meta set.
+	// the type includes "kind", "apiVersion" field
+	// the "metadata" field requires "name" and "generateName" to be set
+	// The original schema must not be mutated. Make a copy if necessary.
+	WithTypeAndObjectMeta() Schema
+}
+
+// Validations contains OpenAPI validation that the CEL library uses.
+type Validations interface {
+	Pattern() string
+	Minimum() *float64
+	IsExclusiveMinimum() bool
+	Maximum() *float64
+	IsExclusiveMaximum() bool
+	MultipleOf() *float64
+	MinItems() *int64
+	MaxItems() *int64
+	MinLength() *int64
+	MaxLength() *int64
+	MinProperties() *int64
+	MaxProperties() *int64
+	Required() []string
+	Enum() []any
+	Nullable() bool
+	UniqueItems() bool
+
+	AllOf() []Schema
+	OneOf() []Schema
+	AnyOf() []Schema
+	Not() Schema
+}
+
+// KubeExtensions contains Kubernetes-specific extensions to the OpenAPI schema.
+type KubeExtensions interface {
+	IsXIntOrString() bool
+	IsXEmbeddedResource() bool
+	IsXPreserveUnknownFields() bool
+	XListType() string
+	XListMapKeys() []string
+	XMapType() string
+	XValidations() []ValidationRule
+}
+
+// ValidationRule represents a single x-kubernetes-validations rule.
+type ValidationRule interface {
+	Rule() string
+	Message() string
+	MessageExpression() string
+	FieldPath() string
+}
+
+// SchemaOrBool contains either a schema or a boolean indicating if the object
+// can contain any fields.
+type SchemaOrBool interface {
+	Schema() Schema
+	Allows() bool
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/common/equality.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/common/equality.go
new file mode 100644
index 0000000000..9289637a39
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/common/equality.go
@@ -0,0 +1,334 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+	"reflect"
+	"time"
+)
+
+// CorrelatedObject represents a node in a tree of objects that are being
+// validated. It is used to keep track of the old value of an object during
+// traversal of the new value. It is also used to cache the results of
+// DeepEqual comparisons between the old and new values of objects.
+//
+// All receiver functions support being called on `nil` to support ergonomic
+// recursive descent. The nil `CorrelatedObject` represents an uncorrelatable
+// node in the tree.
+//
+// CorrelatedObject is not thread-safe. It is the responsibility of the caller
+// to handle concurrency, if any.
+type CorrelatedObject struct {
+	// Currently correlated old value during traversal of the schema/object
+	OldValue interface{}
+
+	// Value being validated
+	Value interface{}
+
+	// Schema used for validation of this value. The schema is also used
+	// to determine how to correlate the old object.
+	Schema Schema
+
+	// Duration spent on ratcheting validation for this object and all of its
+	// children.
+	Duration *time.Duration
+
+	// Scratch space below, may change during validation
+
+	// Cached comparison result of DeepEqual of `value` and `thunk.oldValue`
+	comparisonResult *bool
+
+	// Cached map representation of a map-type list, or nil if not map-type list
+	mapList MapList
+
+	// Children spawned by a call to `Validate` on this object
+	// key is either a string or an index, depending upon whether `value` is
+	// a map or a list, respectively.
+	//
+	// The list of children may be incomplete depending upon if the internal
+	// logic of kube-openapi's SchemaValidator short-circuited before
+	// reaching all of the children.
+	//
+	// It should be expected to have an entry for either all of the children, or
+	// none of them.
+	children map[interface{}]*CorrelatedObject
+}
+
+func NewCorrelatedObject(new, old interface{}, schema Schema) *CorrelatedObject {
+	d := time.Duration(0)
+	return &CorrelatedObject{
+		OldValue: old,
+		Value:    new,
+		Schema:   schema,
+		Duration: &d,
+	}
+}
+
+// If OldValue or Value is not a list, or the index is out of bounds of the
+// Value list, returns nil
+// If oldValue is a list, this considers the x-list-type to decide how to
+// correlate old values:
+//
+// If listType is map, creates a map representation of the list using the designated
+// map-keys, caches it for future calls, and returns the map value, or nil if
+// the correlated key is not in the old map
+//
+// Otherwise, if the list type is not correlatable this funcion returns nil.
+func (r *CorrelatedObject) correlateOldValueForChildAtNewIndex(index int) interface{} {
+	oldAsList, ok := r.OldValue.([]interface{})
+	if !ok {
+		return nil
+	}
+
+	asList, ok := r.Value.([]interface{})
+	if !ok {
+		return nil
+	} else if len(asList) <= index {
+		// Cannot correlate out of bounds index
+		return nil
+	}
+
+	listType := r.Schema.XListType()
+	switch listType {
+	case "map":
+		// Look up keys for this index in current object
+		currentElement := asList[index]
+
+		oldList := r.mapList
+		if oldList == nil {
+			oldList = MakeMapList(r.Schema, oldAsList)
+			r.mapList = oldList
+		}
+		return oldList.Get(currentElement)
+
+	case "set":
+		// Are sets correlatable? Only if the old value equals the current value.
+		// We might be able to support this, but do not currently see a lot
+		// of value
+		// (would allow you to add/remove items from sets with ratcheting but not change them)
+		return nil
+	case "":
+		fallthrough
+	case "atomic":
+		// Atomic lists are the default are not correlatable by item
+		// Ratcheting is not available on a per-index basis
+		return nil
+	default:
+		// Unrecognized list type. Assume non-correlatable.
+		return nil
+	}
+}
+
+// CachedDeepEqual is equivalent to reflect.DeepEqual, but caches the
+// results in the tree of ratchetInvocationScratch objects on the way:
+//
+// For objects and arrays, this function will make a best effort to make
+// use of past DeepEqual checks performed by this Node's children, if available.
+//
+// If a lazy computation could not be found for all children possibly due
+// to validation logic short circuiting and skipping the children, then
+// this function simply defers to reflect.DeepEqual.
+func (r *CorrelatedObject) CachedDeepEqual() (res bool) {
+	start := time.Now()
+	defer func() {
+		if r != nil && r.Duration != nil {
+			*r.Duration += time.Since(start)
+		}
+	}()
+
+	if r == nil {
+		// Uncorrelatable node is not considered equal to its old value
+		return false
+	} else if r.comparisonResult != nil {
+		return *r.comparisonResult
+	}
+
+	defer func() {
+		r.comparisonResult = &res
+	}()
+
+	if r.Value == nil && r.OldValue == nil {
+		return true
+	} else if r.Value == nil || r.OldValue == nil {
+		return false
+	}
+
+	oldAsArray, oldIsArray := r.OldValue.([]interface{})
+	newAsArray, newIsArray := r.Value.([]interface{})
+
+	oldAsMap, oldIsMap := r.OldValue.(map[string]interface{})
+	newAsMap, newIsMap := r.Value.(map[string]interface{})
+
+	// If old and new are not the same type, they are not equal
+	if (oldIsArray != newIsArray) || oldIsMap != newIsMap {
+		return false
+	}
+
+	// Objects are known to be same type of (map, slice, or primitive)
+	switch {
+	case oldIsArray:
+		// Both arrays case. oldIsArray == newIsArray
+		if len(oldAsArray) != len(newAsArray) {
+			return false
+		}
+
+		for i := range newAsArray {
+			child := r.Index(i)
+			if child == nil {
+				if r.mapList == nil {
+					// Treat non-correlatable array as a unit with reflect.DeepEqual
+					return reflect.DeepEqual(oldAsArray, newAsArray)
+				}
+
+				// If array is correlatable, but old not found. Just short circuit
+				// comparison
+				return false
+
+			} else if !child.CachedDeepEqual() {
+				// If one child is not equal the entire object is not equal
+				return false
+			}
+		}
+
+		return true
+	case oldIsMap:
+		// Both maps case. oldIsMap == newIsMap
+		if len(oldAsMap) != len(newAsMap) {
+			return false
+		}
+
+		for k := range newAsMap {
+			child := r.Key(k)
+			if child == nil {
+				// Un-correlatable child due to key change.
+				// Objects are not equal.
+				return false
+			} else if !child.CachedDeepEqual() {
+				// If one child is not equal the entire object is not equal
+				return false
+			}
+		}
+
+		return true
+
+	default:
+		// Primitive: use reflect.DeepEqual
+		return reflect.DeepEqual(r.OldValue, r.Value)
+	}
+}
+
+// Key returns the child of the receiver with the given name.
+// Returns nil if the given name is does not exist in the new object, or its
+// value is not correlatable to an old value.
+// If receiver is nil or if the new value is not an object/map, returns nil.
+func (r *CorrelatedObject) Key(field string) *CorrelatedObject {
+	start := time.Now()
+	defer func() {
+		if r != nil && r.Duration != nil {
+			*r.Duration += time.Since(start)
+		}
+	}()
+
+	if r == nil || r.Schema == nil {
+		return nil
+	} else if existing, exists := r.children[field]; exists {
+		return existing
+	}
+
+	// Find correlated old value
+	oldAsMap, okOld := r.OldValue.(map[string]interface{})
+	newAsMap, okNew := r.Value.(map[string]interface{})
+	if !okOld || !okNew {
+		return nil
+	}
+
+	oldValueForField, okOld := oldAsMap[field]
+	newValueForField, okNew := newAsMap[field]
+	if !okOld || !okNew {
+		return nil
+	}
+
+	var propertySchema Schema
+	if prop, exists := r.Schema.Properties()[field]; exists {
+		propertySchema = prop
+	} else if addP := r.Schema.AdditionalProperties(); addP != nil && addP.Schema() != nil {
+		propertySchema = addP.Schema()
+	} else {
+		return nil
+	}
+
+	if r.children == nil {
+		r.children = make(map[interface{}]*CorrelatedObject, len(newAsMap))
+	}
+
+	res := &CorrelatedObject{
+		OldValue: oldValueForField,
+		Value:    newValueForField,
+		Schema:   propertySchema,
+		Duration: r.Duration,
+	}
+	r.children[field] = res
+	return res
+}
+
+// Index returns the child of the receiver at the given index.
+// Returns nil if the given index is out of bounds, or its value is not
+// correlatable to an old value.
+// If receiver is nil or if the new value is not an array, returns nil.
+func (r *CorrelatedObject) Index(i int) *CorrelatedObject {
+	start := time.Now()
+	defer func() {
+		if r != nil && r.Duration != nil {
+			*r.Duration += time.Since(start)
+		}
+	}()
+
+	if r == nil || r.Schema == nil {
+		return nil
+	} else if existing, exists := r.children[i]; exists {
+		return existing
+	}
+
+	asList, ok := r.Value.([]interface{})
+	if !ok || len(asList) <= i {
+		return nil
+	}
+
+	oldValueForIndex := r.correlateOldValueForChildAtNewIndex(i)
+	if oldValueForIndex == nil {
+		return nil
+	}
+	var itemSchema Schema
+	if i := r.Schema.Items(); i != nil {
+		itemSchema = i
+	} else {
+		return nil
+	}
+
+	if r.children == nil {
+		r.children = make(map[interface{}]*CorrelatedObject, len(asList))
+	}
+
+	res := &CorrelatedObject{
+		OldValue: oldValueForIndex,
+		Value:    asList[i],
+		Schema:   itemSchema,
+		Duration: r.Duration,
+	}
+	r.children[i] = res
+	return res
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/common/maplist.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/common/maplist.go
new file mode 100644
index 0000000000..99fda092e4
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/common/maplist.go
@@ -0,0 +1,177 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+	"fmt"
+	"strings"
+)
+
+// MapList provides a "lookup by key" operation for lists (arrays) with x-kubernetes-list-type=map.
+type MapList interface {
+	// Get returns the first element having given key, for all
+	// x-kubernetes-list-map-keys, to the provided object. If the provided object isn't itself a valid MapList element,
+	// get returns nil.
+	Get(interface{}) interface{}
+}
+
+type keyStrategy interface {
+	// CompositeKeyFor returns a composite key for the provided object, if possible, and a
+	// boolean that indicates whether or not a key could be generated for the provided object.
+	CompositeKeyFor(map[string]interface{}) (interface{}, bool)
+}
+
+// singleKeyStrategy is a cheaper strategy for associative lists that have exactly one key.
+type singleKeyStrategy struct {
+	key string
+}
+
+// CompositeKeyFor directly returns the value of the single key  to
+// use as a composite key.
+func (ks *singleKeyStrategy) CompositeKeyFor(obj map[string]interface{}) (interface{}, bool) {
+	v, ok := obj[ks.key]
+	if !ok {
+		return nil, false
+	}
+
+	switch v.(type) {
+	case bool, float64, int64, string:
+		return v, true
+	default:
+		return nil, false // non-scalar
+	}
+}
+
+// multiKeyStrategy computes a composite key of all key values.
+type multiKeyStrategy struct {
+	sts Schema
+}
+
+// CompositeKeyFor returns a composite key computed from the values of all
+// keys.
+func (ks *multiKeyStrategy) CompositeKeyFor(obj map[string]interface{}) (interface{}, bool) {
+	const keyDelimiter = "\x00" // 0 byte should never appear in the composite key except as delimiter
+
+	var delimited strings.Builder
+	for _, key := range ks.sts.XListMapKeys() {
+		v, ok := obj[key]
+		if !ok {
+			return nil, false
+		}
+
+		switch v.(type) {
+		case bool:
+			fmt.Fprintf(&delimited, keyDelimiter+"%t", v)
+		case float64:
+			fmt.Fprintf(&delimited, keyDelimiter+"%f", v)
+		case int64:
+			fmt.Fprintf(&delimited, keyDelimiter+"%d", v)
+		case string:
+			fmt.Fprintf(&delimited, keyDelimiter+"%q", v)
+		default:
+			return nil, false // values must be scalars
+		}
+	}
+	return delimited.String(), true
+}
+
+// emptyMapList is a MapList containing no elements.
+type emptyMapList struct{}
+
+func (emptyMapList) Get(interface{}) interface{} {
+	return nil
+}
+
+type mapListImpl struct {
+	sts Schema
+	ks  keyStrategy
+	// keyedItems contains all lazily keyed map items
+	keyedItems map[interface{}]interface{}
+	// unkeyedItems contains all map items that have not yet been keyed
+	unkeyedItems []interface{}
+}
+
+func (a *mapListImpl) Get(obj interface{}) interface{} {
+	mobj, ok := obj.(map[string]interface{})
+	if !ok {
+		return nil
+	}
+
+	key, ok := a.ks.CompositeKeyFor(mobj)
+	if !ok {
+		return nil
+	}
+	if match, ok := a.keyedItems[key]; ok {
+		return match
+	}
+	// keep keying items until we either find a match or run out of unkeyed items
+	for len(a.unkeyedItems) > 0 {
+		// dequeue an unkeyed item
+		item := a.unkeyedItems[0]
+		a.unkeyedItems = a.unkeyedItems[1:]
+
+		// key the item
+		mitem, ok := item.(map[string]interface{})
+		if !ok {
+			continue
+		}
+		itemKey, ok := a.ks.CompositeKeyFor(mitem)
+		if !ok {
+			continue
+		}
+		if _, exists := a.keyedItems[itemKey]; !exists {
+			a.keyedItems[itemKey] = mitem
+		}
+
+		// if it matches, short-circuit
+		if itemKey == key {
+			return mitem
+		}
+	}
+
+	return nil
+}
+
+func makeKeyStrategy(sts Schema) keyStrategy {
+	listMapKeys := sts.XListMapKeys()
+	if len(listMapKeys) == 1 {
+		key := listMapKeys[0]
+		return &singleKeyStrategy{
+			key: key,
+		}
+	}
+
+	return &multiKeyStrategy{
+		sts: sts,
+	}
+}
+
+// MakeMapList returns a queryable interface over the provided x-kubernetes-list-type=map
+// keyedItems. If the provided schema is _not_ an array with x-kubernetes-list-type=map, returns an
+// empty mapList.
+func MakeMapList(sts Schema, items []interface{}) (rv MapList) {
+	if sts.Type() != "array" || sts.XListType() != "map" || len(sts.XListMapKeys()) == 0 || len(items) == 0 {
+		return emptyMapList{}
+	}
+	ks := makeKeyStrategy(sts)
+	return &mapListImpl{
+		sts:          sts,
+		ks:           ks,
+		keyedItems:   map[interface{}]interface{}{},
+		unkeyedItems: items,
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/common/schemas.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/common/schemas.go
new file mode 100644
index 0000000000..19392babeb
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/common/schemas.go
@@ -0,0 +1,274 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+	"time"
+
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/common/types"
+
+	apiservercel "k8s.io/apiserver/pkg/cel"
+	"k8s.io/kube-openapi/pkg/validation/spec"
+)
+
+const maxRequestSizeBytes = apiservercel.DefaultMaxRequestSizeBytes
+
+// SchemaDeclType converts the structural schema to a CEL declaration, or returns nil if the
+// structural schema should not be exposed in CEL expressions.
+// Set isResourceRoot to true for the root of a custom resource or embedded resource.
+//
+// Schemas with XPreserveUnknownFields not exposed unless they are objects. Array and "maps" schemas
+// are not exposed if their items or additionalProperties schemas are not exposed. Object Properties are not exposed
+// if their schema is not exposed.
+//
+// The CEL declaration for objects with XPreserveUnknownFields does not expose unknown fields.
+func SchemaDeclType(s Schema, isResourceRoot bool) *apiservercel.DeclType {
+	if s == nil {
+		return nil
+	}
+	if s.IsXIntOrString() {
+		// schemas using XIntOrString are not required to have a type.
+
+		// intOrStringType represents the x-kubernetes-int-or-string union type in CEL expressions.
+		// In CEL, the type is represented as dynamic value, which can be thought of as a union type of all types.
+		// All type checking for XIntOrString is deferred to runtime, so all access to values of this type must
+		// be guarded with a type check, e.g.:
+		//
+		// To require that the string representation be a percentage:
+		//  `type(intOrStringField) == string && intOrStringField.matches(r'(\d+(\.\d+)?%)')`
+		// To validate requirements on both the int and string representation:
+		//  `type(intOrStringField) == int ? intOrStringField < 5 : double(intOrStringField.replace('%', '')) < 0.5
+		//
+		dyn := apiservercel.NewSimpleTypeWithMinSize("dyn", cel.DynType, nil, 1) // smallest value for a serialized x-kubernetes-int-or-string is 0
+		// handle x-kubernetes-int-or-string by returning the max length/min serialized size of the largest possible string
+		dyn.MaxElements = maxRequestSizeBytes - 2
+		return dyn
+	}
+
+	// We ignore XPreserveUnknownFields since we don't support validation rules on
+	// data that we don't have schema information for.
+
+	if isResourceRoot {
+		// 'apiVersion', 'kind', 'metadata.name' and 'metadata.generateName' are always accessible to validator rules
+		// at the root of resources, even if not specified in the schema.
+		// This includes the root of a custom resource and the root of XEmbeddedResource objects.
+		s = s.WithTypeAndObjectMeta()
+	}
+
+	switch s.Type() {
+	case "array":
+		if s.Items() != nil {
+			itemsType := SchemaDeclType(s.Items(), s.Items().IsXEmbeddedResource())
+			if itemsType == nil {
+				return nil
+			}
+			var maxItems int64
+			if s.MaxItems() != nil {
+				maxItems = zeroIfNegative(*s.MaxItems())
+			} else {
+				maxItems = estimateMaxArrayItemsFromMinSize(itemsType.MinSerializedSize)
+			}
+			return apiservercel.NewListType(itemsType, maxItems)
+		}
+		return nil
+	case "object":
+		if s.AdditionalProperties() != nil && s.AdditionalProperties().Schema() != nil {
+			propsType := SchemaDeclType(s.AdditionalProperties().Schema(), s.AdditionalProperties().Schema().IsXEmbeddedResource())
+			if propsType != nil {
+				var maxProperties int64
+				if s.MaxProperties() != nil {
+					maxProperties = zeroIfNegative(*s.MaxProperties())
+				} else {
+					maxProperties = estimateMaxAdditionalPropertiesFromMinSize(propsType.MinSerializedSize)
+				}
+				return apiservercel.NewMapType(apiservercel.StringType, propsType, maxProperties)
+			}
+			return nil
+		}
+		fields := make(map[string]*apiservercel.DeclField, len(s.Properties()))
+
+		required := map[string]bool{}
+		if s.Required() != nil {
+			for _, f := range s.Required() {
+				required[f] = true
+			}
+		}
+		// an object will always be serialized at least as {}, so account for that
+		minSerializedSize := int64(2)
+		for name, prop := range s.Properties() {
+			var enumValues []interface{}
+			if prop.Enum() != nil {
+				for _, e := range prop.Enum() {
+					enumValues = append(enumValues, e)
+				}
+			}
+			if fieldType := SchemaDeclType(prop, prop.IsXEmbeddedResource()); fieldType != nil {
+				if propName, ok := apiservercel.Escape(name); ok {
+					fields[propName] = apiservercel.NewDeclField(propName, fieldType, required[name], enumValues, prop.Default())
+				}
+				// the min serialized size for an object is 2 (for {}) plus the min size of all its required
+				// properties
+				// only include required properties without a default value; default values are filled in
+				// server-side
+				if required[name] && prop.Default() == nil {
+					minSerializedSize += int64(len(name)) + fieldType.MinSerializedSize + 4
+				}
+			}
+		}
+		objType := apiservercel.NewObjectType("object", fields)
+		objType.MinSerializedSize = minSerializedSize
+		return objType
+	case "string":
+		switch s.Format() {
+		case "byte":
+			byteWithMaxLength := apiservercel.NewSimpleTypeWithMinSize("bytes", cel.BytesType, types.Bytes([]byte{}), apiservercel.MinStringSize)
+			if s.MaxLength() != nil {
+				byteWithMaxLength.MaxElements = zeroIfNegative(*s.MaxLength())
+			} else {
+				byteWithMaxLength.MaxElements = estimateMaxStringLengthPerRequest(s)
+			}
+			return byteWithMaxLength
+		case "duration":
+			durationWithMaxLength := apiservercel.NewSimpleTypeWithMinSize("duration", cel.DurationType, types.Duration{Duration: time.Duration(0)}, int64(apiservercel.MinDurationSizeJSON))
+			durationWithMaxLength.MaxElements = estimateMaxStringLengthPerRequest(s)
+			return durationWithMaxLength
+		case "date":
+			timestampWithMaxLength := apiservercel.NewSimpleTypeWithMinSize("timestamp", cel.TimestampType, types.Timestamp{Time: time.Time{}}, int64(apiservercel.JSONDateSize))
+			timestampWithMaxLength.MaxElements = estimateMaxStringLengthPerRequest(s)
+			return timestampWithMaxLength
+		case "date-time":
+			timestampWithMaxLength := apiservercel.NewSimpleTypeWithMinSize("timestamp", cel.TimestampType, types.Timestamp{Time: time.Time{}}, int64(apiservercel.MinDatetimeSizeJSON))
+			timestampWithMaxLength.MaxElements = estimateMaxStringLengthPerRequest(s)
+			return timestampWithMaxLength
+		}
+
+		strWithMaxLength := apiservercel.NewSimpleTypeWithMinSize("string", cel.StringType, types.String(""), apiservercel.MinStringSize)
+		if s.MaxLength() != nil {
+			// multiply the user-provided max length by 4 in the case of an otherwise-untyped string
+			// we do this because the OpenAPIv3 spec indicates that maxLength is specified in runes/code points,
+			// but we need to reason about length for things like request size, so we use bytes in this code (and an individual
+			// unicode code point can be up to 4 bytes long)
+			strWithMaxLength.MaxElements = zeroIfNegative(*s.MaxLength()) * 4
+		} else {
+			if len(s.Enum()) > 0 {
+				strWithMaxLength.MaxElements = estimateMaxStringEnumLength(s)
+			} else {
+				strWithMaxLength.MaxElements = estimateMaxStringLengthPerRequest(s)
+			}
+		}
+		return strWithMaxLength
+	case "boolean":
+		return apiservercel.BoolType
+	case "number":
+		return apiservercel.DoubleType
+	case "integer":
+		return apiservercel.IntType
+	}
+	return nil
+}
+
+func zeroIfNegative(v int64) int64 {
+	if v < 0 {
+		return 0
+	}
+	return v
+}
+
+// WithTypeAndObjectMeta ensures the kind, apiVersion and
+// metadata.name and metadata.generateName properties are specified, making a shallow copy of the provided schema if needed.
+func WithTypeAndObjectMeta(s *spec.Schema) *spec.Schema {
+	if s.Properties != nil &&
+		s.Properties["kind"].Type.Contains("string") &&
+		s.Properties["apiVersion"].Type.Contains("string") &&
+		s.Properties["metadata"].Type.Contains("object") &&
+		s.Properties["metadata"].Properties != nil &&
+		s.Properties["metadata"].Properties["name"].Type.Contains("string") &&
+		s.Properties["metadata"].Properties["generateName"].Type.Contains("string") {
+		return s
+	}
+	result := *s
+	props := make(map[string]spec.Schema, len(s.Properties))
+	for k, prop := range s.Properties {
+		props[k] = prop
+	}
+	stringType := spec.StringProperty()
+	props["kind"] = *stringType
+	props["apiVersion"] = *stringType
+	props["metadata"] = spec.Schema{
+		SchemaProps: spec.SchemaProps{
+			Type: []string{"object"},
+			Properties: map[string]spec.Schema{
+				"name":         *stringType,
+				"generateName": *stringType,
+			},
+		},
+	}
+	result.Properties = props
+
+	return &result
+}
+
+// estimateMaxStringLengthPerRequest estimates the maximum string length (in characters)
+// of a string compatible with the format requirements in the provided schema.
+// must only be called on schemas of type "string" or x-kubernetes-int-or-string: true
+func estimateMaxStringLengthPerRequest(s Schema) int64 {
+	if s.IsXIntOrString() {
+		return maxRequestSizeBytes - 2
+	}
+	switch s.Format() {
+	case "duration":
+		return apiservercel.MaxDurationSizeJSON
+	case "date":
+		return apiservercel.JSONDateSize
+	case "date-time":
+		return apiservercel.MaxDatetimeSizeJSON
+	default:
+		// subtract 2 to account for ""
+		return maxRequestSizeBytes - 2
+	}
+}
+
+// estimateMaxStringLengthPerRequest estimates the maximum string length (in characters)
+// that has a set of enum values.
+// The result of the estimation is the length of the longest possible value.
+func estimateMaxStringEnumLength(s Schema) int64 {
+	var maxLength int64
+	for _, v := range s.Enum() {
+		if s, ok := v.(string); ok && int64(len(s)) > maxLength {
+			maxLength = int64(len(s))
+		}
+	}
+	return maxLength
+}
+
+// estimateMaxArrayItemsPerRequest estimates the maximum number of array items with
+// the provided minimum serialized size that can fit into a single request.
+func estimateMaxArrayItemsFromMinSize(minSize int64) int64 {
+	// subtract 2 to account for [ and ]
+	return (maxRequestSizeBytes - 2) / (minSize + 1)
+}
+
+// estimateMaxAdditionalPropertiesPerRequest estimates the maximum number of additional properties
+// with the provided minimum serialized size that can fit into a single request.
+func estimateMaxAdditionalPropertiesFromMinSize(minSize int64) int64 {
+	// 2 bytes for key + "" + colon + comma + smallest possible value, realistically the actual keys
+	// will all vary in length
+	keyValuePairSize := minSize + 6
+	// subtract 2 to account for { and }
+	return (maxRequestSizeBytes - 2) / keyValuePairSize
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/common/typeprovider.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/common/typeprovider.go
new file mode 100644
index 0000000000..685a585c70
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/common/typeprovider.go
@@ -0,0 +1,127 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+)
+
+// TypeResolver resolves a type by a given name.
+type TypeResolver interface {
+	// Resolve resolves the type by its name.
+	// This function returns false if the name does not refer to a known object type.
+	Resolve(name string) (ResolvedType, bool)
+}
+
+// ResolvedType refers an object type that can be looked up for its fields.
+type ResolvedType interface {
+	ref.Type
+
+	Type() *types.Type
+
+	// Field finds the field by the field name, or false if the field is not known.
+	// This function directly return a FieldType that is known to CEL to be more customizable.
+	Field(name string) (*types.FieldType, bool)
+
+	// FieldNames returns the field names associated with the type, if the type
+	// is found.
+	FieldNames() ([]string, bool)
+
+	// Val creates an instance for the ResolvedType, given its fields and their values.
+	Val(fields map[string]ref.Val) ref.Val
+}
+
+// ResolverTypeProvider delegates type resolution first to the TypeResolver and then
+// to the underlying types.Provider for types not resolved by the TypeResolver.
+type ResolverTypeProvider struct {
+	typeResolver           TypeResolver
+	underlyingTypeProvider types.Provider
+}
+
+var _ types.Provider = (*ResolverTypeProvider)(nil)
+
+// FindStructType returns the Type give a qualified type name, by looking it up with
+// the DynamicTypeResolver and translating it to CEL Type.
+// If the type is not known to the DynamicTypeResolver, the lookup falls back to the underlying
+// ResolverTypeProvider instead.
+func (p *ResolverTypeProvider) FindStructType(structType string) (*types.Type, bool) {
+	t, ok := p.typeResolver.Resolve(structType)
+	if ok {
+		return types.NewTypeTypeWithParam(t.Type()), true
+	}
+	return p.underlyingTypeProvider.FindStructType(structType)
+}
+
+// FindStructFieldNames returns the field names associated with the type, if the type
+// is found.
+func (p *ResolverTypeProvider) FindStructFieldNames(structType string) ([]string, bool) {
+	t, ok := p.typeResolver.Resolve(structType)
+	if ok {
+		return t.FieldNames()
+	}
+	return p.underlyingTypeProvider.FindStructFieldNames(structType)
+}
+
+// FindStructFieldType returns the field type for a checked type value.
+// Returns false if the field could not be found.
+func (p *ResolverTypeProvider) FindStructFieldType(structType, fieldName string) (*types.FieldType, bool) {
+	t, ok := p.typeResolver.Resolve(structType)
+	if ok {
+		return t.Field(fieldName)
+	}
+	return p.underlyingTypeProvider.FindStructFieldType(structType, fieldName)
+}
+
+// NewValue creates a new type value from a qualified name and map of fields.
+func (p *ResolverTypeProvider) NewValue(structType string, fields map[string]ref.Val) ref.Val {
+	t, ok := p.typeResolver.Resolve(structType)
+	if ok {
+		return t.Val(fields)
+	}
+	return p.underlyingTypeProvider.NewValue(structType, fields)
+}
+
+func (p *ResolverTypeProvider) EnumValue(enumName string) ref.Val {
+	return p.underlyingTypeProvider.EnumValue(enumName)
+}
+
+func (p *ResolverTypeProvider) FindIdent(identName string) (ref.Val, bool) {
+	return p.underlyingTypeProvider.FindIdent(identName)
+}
+
+// ResolverEnvOption creates the ResolverTypeProvider with a given DynamicTypeResolver,
+// and also returns the CEL ResolverEnvOption to apply it to the env.
+func ResolverEnvOption(resolver TypeResolver) cel.EnvOption {
+	_, envOpt := NewResolverTypeProviderAndEnvOption(resolver)
+	return envOpt
+}
+
+// NewResolverTypeProviderAndEnvOption creates the ResolverTypeProvider with a given DynamicTypeResolver,
+// and also returns the CEL ResolverEnvOption to apply it to the env.
+func NewResolverTypeProviderAndEnvOption(resolver TypeResolver) (*ResolverTypeProvider, cel.EnvOption) {
+	tp := &ResolverTypeProvider{typeResolver: resolver}
+	var envOption cel.EnvOption = func(e *cel.Env) (*cel.Env, error) {
+		// wrap the existing type provider (acquired from the env)
+		// and set new type provider for the env.
+		tp.underlyingTypeProvider = e.CELTypeProvider()
+		typeProviderOption := cel.CustomTypeProvider(tp)
+		return typeProviderOption(e)
+	}
+	return tp, envOption
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/common/values.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/common/values.go
new file mode 100644
index 0000000000..c8279f0137
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/common/values.go
@@ -0,0 +1,721 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+	"fmt"
+	"reflect"
+	"sync"
+	"time"
+
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+	"github.com/google/cel-go/common/types/traits"
+
+	"k8s.io/kube-openapi/pkg/validation/strfmt"
+
+	"k8s.io/apimachinery/pkg/api/equality"
+	"k8s.io/apiserver/pkg/cel"
+)
+
+// UnstructuredToVal converts a Kubernetes unstructured data element to a CEL Val.
+// The root schema of custom resource schema is expected contain type meta and object meta schemas.
+// If Embedded resources do not contain type meta and object meta schemas, they will be added automatically.
+func UnstructuredToVal(unstructured interface{}, schema Schema) ref.Val {
+	if unstructured == nil {
+		if schema.Nullable() {
+			return types.NullValue
+		}
+		return types.NewErr("invalid data, got null for schema with nullable=false")
+	}
+	if schema.IsXIntOrString() {
+		switch v := unstructured.(type) {
+		case string:
+			return types.String(v)
+		case int:
+			return types.Int(v)
+		case int32:
+			return types.Int(v)
+		case int64:
+			return types.Int(v)
+		}
+		return types.NewErr("invalid data, expected XIntOrString value to be either a string or integer")
+	}
+	if schema.Type() == "object" {
+		m, ok := unstructured.(map[string]interface{})
+		if !ok {
+			return types.NewErr("invalid data, expected a map for the provided schema with type=object")
+		}
+		if schema.IsXEmbeddedResource() || schema.Properties() != nil {
+			if schema.IsXEmbeddedResource() {
+				schema = schema.WithTypeAndObjectMeta()
+			}
+			return &unstructuredMap{
+				value:  m,
+				schema: schema,
+				propSchema: func(key string) (Schema, bool) {
+					if schema, ok := schema.Properties()[key]; ok {
+						return schema, true
+					}
+					return nil, false
+				},
+			}
+		}
+		if schema.AdditionalProperties() != nil && schema.AdditionalProperties().Schema() != nil {
+			return &unstructuredMap{
+				value:  m,
+				schema: schema,
+				propSchema: func(key string) (Schema, bool) {
+					return schema.AdditionalProperties().Schema(), true
+				},
+			}
+		}
+
+		// properties and additionalProperties are mutual exclusive, but nothing prevents the situation
+		// where both are missing.
+		// An object that (1) has no properties (2) has no additionalProperties or additionalProperties == false
+		// is treated as an empty object.
+		// An object that has additionalProperties == true is treated as an unstructured map.
+		// An object that has x-kubernetes-preserve-unknown-field extension set is treated as an unstructured map.
+		// Empty object vs unstructured map is differentiated by unstructuredMap implementation with the set schema.
+		// The resulting result remains the same.
+		return &unstructuredMap{
+			value:  m,
+			schema: schema,
+			propSchema: func(key string) (Schema, bool) {
+				return nil, false
+			},
+		}
+	}
+
+	if schema.Type() == "array" {
+		l, ok := unstructured.([]interface{})
+		if !ok {
+			return types.NewErr("invalid data, expected an array for the provided schema with type=array")
+		}
+		if schema.Items() == nil {
+			return types.NewErr("invalid array type, expected Items with a non-empty Schema")
+		}
+		typedList := unstructuredList{elements: l, itemsSchema: schema.Items()}
+		listType := schema.XListType()
+		if listType != "" {
+			switch listType {
+			case "map":
+				mapKeys := schema.XListMapKeys()
+				return &unstructuredMapList{unstructuredList: typedList, escapedKeyProps: escapeKeyProps(mapKeys)}
+			case "set":
+				return &unstructuredSetList{unstructuredList: typedList}
+			case "atomic":
+				return &typedList
+			default:
+				return types.NewErr("invalid x-kubernetes-list-type, expected 'map', 'set' or 'atomic' but got %s", listType)
+			}
+		}
+		return &typedList
+	}
+
+	if schema.Type() == "string" {
+		str, ok := unstructured.(string)
+		if !ok {
+			return types.NewErr("invalid data, expected string, got %T", unstructured)
+		}
+		switch schema.Format() {
+		case "duration":
+			d, err := strfmt.ParseDuration(str)
+			if err != nil {
+				return types.NewErr("Invalid duration %s: %v", str, err)
+			}
+			return types.Duration{Duration: d}
+		case "date":
+			d, err := time.Parse(strfmt.RFC3339FullDate, str) // strfmt uses this format for OpenAPIv3 value validation
+			if err != nil {
+				return types.NewErr("Invalid date formatted string %s: %v", str, err)
+			}
+			return types.Timestamp{Time: d}
+		case "date-time":
+			d, err := strfmt.ParseDateTime(str)
+			if err != nil {
+				return types.NewErr("Invalid date-time formatted string %s: %v", str, err)
+			}
+			return types.Timestamp{Time: time.Time(d)}
+		case "byte":
+			base64 := strfmt.Base64{}
+			err := base64.UnmarshalText([]byte(str))
+			if err != nil {
+				return types.NewErr("Invalid byte formatted string %s: %v", str, err)
+			}
+			return types.Bytes(base64)
+		}
+
+		return types.String(str)
+	}
+	if schema.Type() == "number" {
+		switch v := unstructured.(type) {
+		// float representations of whole numbers (e.g. 1.0, 0.0) can convert to int representations (e.g. 1, 0) in yaml
+		// to json translation, and then get parsed as int64s
+		case int:
+			return types.Double(v)
+		case int32:
+			return types.Double(v)
+		case int64:
+			return types.Double(v)
+
+		case float32:
+			return types.Double(v)
+		case float64:
+			return types.Double(v)
+		default:
+			return types.NewErr("invalid data, expected float, got %T", unstructured)
+		}
+	}
+	if schema.Type() == "integer" {
+		switch v := unstructured.(type) {
+		case int:
+			return types.Int(v)
+		case int32:
+			return types.Int(v)
+		case int64:
+			return types.Int(v)
+		default:
+			return types.NewErr("invalid data, expected int, got %T", unstructured)
+		}
+	}
+	if schema.Type() == "boolean" {
+		b, ok := unstructured.(bool)
+		if !ok {
+			return types.NewErr("invalid data, expected bool, got %T", unstructured)
+		}
+		return types.Bool(b)
+	}
+
+	if schema.IsXPreserveUnknownFields() {
+		return &unknownPreserved{u: unstructured}
+	}
+
+	return types.NewErr("invalid type, expected object, array, number, integer, boolean or string, or no type with x-kubernetes-int-or-string or x-kubernetes-preserve-unknown-fields is true, got %s", schema.Type())
+}
+
+// unknownPreserved represents unknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields.
+// It preserves the data at runtime without assuming it is of any particular type and supports only equality checking.
+// unknownPreserved should be used only for values are not directly accessible in CEL expressions, i.e. for data
+// where there is no corresponding CEL type declaration.
+type unknownPreserved struct {
+	u interface{}
+}
+
+func (t *unknownPreserved) ConvertToNative(refType reflect.Type) (interface{}, error) {
+	return nil, fmt.Errorf("type conversion to '%s' not supported for values preserved by x-kubernetes-preserve-unknown-fields", refType)
+}
+
+func (t *unknownPreserved) ConvertToType(typeValue ref.Type) ref.Val {
+	return types.NewErr("type conversion to '%s' not supported for values preserved by x-kubernetes-preserve-unknown-fields", typeValue.TypeName())
+}
+
+func (t *unknownPreserved) Equal(other ref.Val) ref.Val {
+	return types.Bool(equality.Semantic.DeepEqual(t.u, other.Value()))
+}
+
+func (t *unknownPreserved) Type() ref.Type {
+	return types.UnknownType
+}
+
+func (t *unknownPreserved) Value() interface{} {
+	return t.u // used by Equal checks
+}
+
+// unstructuredMapList represents an unstructured data instance of an OpenAPI array with x-kubernetes-list-type=map.
+type unstructuredMapList struct {
+	unstructuredList
+	escapedKeyProps []string
+
+	sync.Once // for for lazy load of mapOfList since it is only needed if Equals is called
+	mapOfList map[interface{}]interface{}
+}
+
+func (t *unstructuredMapList) getMap() map[interface{}]interface{} {
+	t.Do(func() {
+		t.mapOfList = make(map[interface{}]interface{}, len(t.elements))
+		for _, e := range t.elements {
+			t.mapOfList[t.toMapKey(e)] = e
+		}
+	})
+	return t.mapOfList
+}
+
+// toMapKey returns a valid golang map key for the given element of the map list.
+// element must be a valid map list entry where all map key props are scalar types (which are comparable in go
+// and valid for use in a golang map key).
+func (t *unstructuredMapList) toMapKey(element interface{}) interface{} {
+	eObj, ok := element.(map[string]interface{})
+	if !ok {
+		return types.NewErr("unexpected data format for element of array with x-kubernetes-list-type=map: %T", element)
+	}
+	// Arrays are comparable in go and may be used as map keys, but maps and slices are not.
+	// So we can special case small numbers of key props as arrays and fall back to serialization
+	// for larger numbers of key props
+	if len(t.escapedKeyProps) == 1 {
+		return eObj[t.escapedKeyProps[0]]
+	}
+	if len(t.escapedKeyProps) == 2 {
+		return [2]interface{}{eObj[t.escapedKeyProps[0]], eObj[t.escapedKeyProps[1]]}
+	}
+	if len(t.escapedKeyProps) == 3 {
+		return [3]interface{}{eObj[t.escapedKeyProps[0]], eObj[t.escapedKeyProps[1]], eObj[t.escapedKeyProps[2]]}
+	}
+
+	key := make([]interface{}, len(t.escapedKeyProps))
+	for i, kf := range t.escapedKeyProps {
+		key[i] = eObj[kf]
+	}
+	return fmt.Sprintf("%v", key)
+}
+
+// Equal on a map list ignores list element order.
+func (t *unstructuredMapList) Equal(other ref.Val) ref.Val {
+	oMapList, ok := other.(traits.Lister)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(other)
+	}
+	sz := types.Int(len(t.elements))
+	if sz != oMapList.Size() {
+		return types.False
+	}
+	tMap := t.getMap()
+	for it := oMapList.Iterator(); it.HasNext() == types.True; {
+		v := it.Next()
+		k := t.toMapKey(v.Value())
+		tVal, ok := tMap[k]
+		if !ok {
+			return types.False
+		}
+		eq := UnstructuredToVal(tVal, t.itemsSchema).Equal(v)
+		if eq != types.True {
+			return eq // either false or error
+		}
+	}
+	return types.True
+}
+
+// Add for a map list `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values
+// are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with
+// non-intersecting keys are appended, retaining their partial order.
+func (t *unstructuredMapList) Add(other ref.Val) ref.Val {
+	oMapList, ok := other.(traits.Lister)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(other)
+	}
+	elements := make([]interface{}, len(t.elements))
+	keyToIdx := map[interface{}]int{}
+	for i, e := range t.elements {
+		k := t.toMapKey(e)
+		keyToIdx[k] = i
+		elements[i] = e
+	}
+	for it := oMapList.Iterator(); it.HasNext() == types.True; {
+		v := it.Next().Value()
+		k := t.toMapKey(v)
+		if overwritePosition, ok := keyToIdx[k]; ok {
+			elements[overwritePosition] = v
+		} else {
+			elements = append(elements, v)
+		}
+	}
+	return &unstructuredMapList{
+		unstructuredList: unstructuredList{elements: elements, itemsSchema: t.itemsSchema},
+		escapedKeyProps:  t.escapedKeyProps,
+	}
+}
+
+// escapeKeyProps returns identifiers with Escape applied to each.
+// Identifiers that cannot be escaped are left as-is. They are inaccessible to CEL programs but are
+// are still needed internally to perform equality checks.
+func escapeKeyProps(idents []string) []string {
+	result := make([]string, len(idents))
+	for i, prop := range idents {
+		if escaped, ok := cel.Escape(prop); ok {
+			result[i] = escaped
+		} else {
+			result[i] = prop
+		}
+	}
+	return result
+}
+
+// unstructuredSetList represents an unstructured data instance of an OpenAPI array with x-kubernetes-list-type=set.
+type unstructuredSetList struct {
+	unstructuredList
+	escapedKeyProps []string
+
+	sync.Once // for for lazy load of setOfList since it is only needed if Equals is called
+	set       map[interface{}]struct{}
+}
+
+func (t *unstructuredSetList) getSet() map[interface{}]struct{} {
+	// sets are only allowed to contain scalar elements, which are comparable in go, and can safely be used as
+	// golang map keys
+	t.Do(func() {
+		t.set = make(map[interface{}]struct{}, len(t.elements))
+		for _, e := range t.elements {
+			t.set[e] = struct{}{}
+		}
+	})
+	return t.set
+}
+
+// Equal on a map list ignores list element order.
+func (t *unstructuredSetList) Equal(other ref.Val) ref.Val {
+	oSetList, ok := other.(traits.Lister)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(other)
+	}
+	sz := types.Int(len(t.elements))
+	if sz != oSetList.Size() {
+		return types.False
+	}
+	tSet := t.getSet()
+	for it := oSetList.Iterator(); it.HasNext() == types.True; {
+		next := it.Next().Value()
+		_, ok := tSet[next]
+		if !ok {
+			return types.False
+		}
+	}
+	return types.True
+}
+
+// Add for a set list `X + Y` performs a union where the array positions of all elements in `X` are preserved and
+// non-intersecting elements in `Y` are appended, retaining their partial order.
+func (t *unstructuredSetList) Add(other ref.Val) ref.Val {
+	oSetList, ok := other.(traits.Lister)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(other)
+	}
+	elements := t.elements
+	set := t.getSet()
+	for it := oSetList.Iterator(); it.HasNext() == types.True; {
+		next := it.Next().Value()
+		if _, ok := set[next]; !ok {
+			set[next] = struct{}{}
+			elements = append(elements, next)
+		}
+	}
+	return &unstructuredSetList{
+		unstructuredList: unstructuredList{elements: elements, itemsSchema: t.itemsSchema},
+		escapedKeyProps:  t.escapedKeyProps,
+	}
+}
+
+// unstructuredList represents an unstructured data instance of an OpenAPI array with x-kubernetes-list-type=atomic (the default).
+type unstructuredList struct {
+	elements    []interface{}
+	itemsSchema Schema
+}
+
+var _ = traits.Lister(&unstructuredList{})
+
+func (t *unstructuredList) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+	switch typeDesc.Kind() {
+	case reflect.Slice:
+		switch t.itemsSchema.Type() {
+		// Workaround for https://github.com/kubernetes/kubernetes/issues/117590 until we
+		// resolve the desired behavior in cel-go via https://github.com/google/cel-go/issues/688
+		case "string":
+			var result []string
+			for _, e := range t.elements {
+				s, ok := e.(string)
+				if !ok {
+					return nil, fmt.Errorf("expected all elements to be of type string, but got %T", e)
+				}
+				result = append(result, s)
+			}
+			return result, nil
+		default:
+			return t.elements, nil
+		}
+	}
+	return nil, fmt.Errorf("type conversion error from '%s' to '%s'", t.Type(), typeDesc)
+}
+
+func (t *unstructuredList) ConvertToType(typeValue ref.Type) ref.Val {
+	switch typeValue {
+	case types.ListType:
+		return t
+	case types.TypeType:
+		return types.ListType
+	}
+	return types.NewErr("type conversion error from '%s' to '%s'", t.Type(), typeValue.TypeName())
+}
+
+func (t *unstructuredList) Equal(other ref.Val) ref.Val {
+	oList, ok := other.(traits.Lister)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(other)
+	}
+	sz := types.Int(len(t.elements))
+	if sz != oList.Size() {
+		return types.False
+	}
+	for i := types.Int(0); i < sz; i++ {
+		eq := t.Get(i).Equal(oList.Get(i))
+		if eq != types.True {
+			return eq // either false or error
+		}
+	}
+	return types.True
+}
+
+func (t *unstructuredList) Type() ref.Type {
+	return types.ListType
+}
+
+func (t *unstructuredList) Value() interface{} {
+	return t.elements
+}
+
+func (t *unstructuredList) Add(other ref.Val) ref.Val {
+	oList, ok := other.(traits.Lister)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(other)
+	}
+	elements := t.elements
+	for it := oList.Iterator(); it.HasNext() == types.True; {
+		next := it.Next().Value()
+		elements = append(elements, next)
+	}
+
+	return &unstructuredList{elements: elements, itemsSchema: t.itemsSchema}
+}
+
+func (t *unstructuredList) Contains(val ref.Val) ref.Val {
+	if types.IsUnknownOrError(val) {
+		return val
+	}
+	var err ref.Val
+	sz := len(t.elements)
+	for i := 0; i < sz; i++ {
+		elem := UnstructuredToVal(t.elements[i], t.itemsSchema)
+		cmp := elem.Equal(val)
+		b, ok := cmp.(types.Bool)
+		if !ok && err == nil {
+			err = types.MaybeNoSuchOverloadErr(cmp)
+		}
+		if b == types.True {
+			return types.True
+		}
+	}
+	if err != nil {
+		return err
+	}
+	return types.False
+}
+
+func (t *unstructuredList) Get(idx ref.Val) ref.Val {
+	iv, isInt := idx.(types.Int)
+	if !isInt {
+		return types.ValOrErr(idx, "unsupported index: %v", idx)
+	}
+	i := int(iv)
+	if i < 0 || i >= len(t.elements) {
+		return types.NewErr("index out of bounds: %v", idx)
+	}
+	return UnstructuredToVal(t.elements[i], t.itemsSchema)
+}
+
+func (t *unstructuredList) Iterator() traits.Iterator {
+	items := make([]ref.Val, len(t.elements))
+	for i, item := range t.elements {
+		itemCopy := item
+		items[i] = UnstructuredToVal(itemCopy, t.itemsSchema)
+	}
+	return &listIterator{unstructuredList: t, items: items}
+}
+
+type listIterator struct {
+	*unstructuredList
+	items []ref.Val
+	idx   int
+}
+
+func (it *listIterator) HasNext() ref.Val {
+	return types.Bool(it.idx < len(it.items))
+}
+
+func (it *listIterator) Next() ref.Val {
+	item := it.items[it.idx]
+	it.idx++
+	return item
+}
+
+func (t *unstructuredList) Size() ref.Val {
+	return types.Int(len(t.elements))
+}
+
+// unstructuredMap represented an unstructured data instance of an OpenAPI object.
+type unstructuredMap struct {
+	value  map[string]interface{}
+	schema Schema
+	// propSchema finds the schema to use for a particular map key.
+	propSchema func(key string) (Schema, bool)
+}
+
+var _ = traits.Mapper(&unstructuredMap{})
+
+func (t *unstructuredMap) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+	switch typeDesc.Kind() {
+	case reflect.Map:
+		return t.value, nil
+	}
+	return nil, fmt.Errorf("type conversion error from '%s' to '%s'", t.Type(), typeDesc)
+}
+
+func (t *unstructuredMap) ConvertToType(typeValue ref.Type) ref.Val {
+	switch typeValue {
+	case types.MapType:
+		return t
+	case types.TypeType:
+		return types.MapType
+	}
+	return types.NewErr("type conversion error from '%s' to '%s'", t.Type(), typeValue.TypeName())
+}
+
+func (t *unstructuredMap) Equal(other ref.Val) ref.Val {
+	oMap, isMap := other.(traits.Mapper)
+	if !isMap {
+		return types.MaybeNoSuchOverloadErr(other)
+	}
+	if t.Size() != oMap.Size() {
+		return types.False
+	}
+	for key, value := range t.value {
+		if propSchema, ok := t.propSchema(key); ok {
+			ov, found := oMap.Find(types.String(key))
+			if !found {
+				return types.False
+			}
+			v := UnstructuredToVal(value, propSchema)
+			vEq := v.Equal(ov)
+			if vEq != types.True {
+				return vEq // either false or error
+			}
+		} else {
+			// Must be an object with properties.
+			// Since we've encountered an unknown field, fallback to unstructured equality checking.
+			ouMap, ok := other.(*unstructuredMap)
+			if !ok {
+				// The compiler ensures equality is against the same type of object, so this should be unreachable
+				return types.MaybeNoSuchOverloadErr(other)
+			}
+			if oValue, ok := ouMap.value[key]; ok {
+				if !equality.Semantic.DeepEqual(value, oValue) {
+					return types.False
+				}
+			}
+		}
+	}
+	return types.True
+}
+
+func (t *unstructuredMap) Type() ref.Type {
+	return types.MapType
+}
+
+func (t *unstructuredMap) Value() interface{} {
+	return t.value
+}
+
+func (t *unstructuredMap) Contains(key ref.Val) ref.Val {
+	v, found := t.Find(key)
+	if v != nil && types.IsUnknownOrError(v) {
+		return v
+	}
+
+	return types.Bool(found)
+}
+
+func (t *unstructuredMap) Get(key ref.Val) ref.Val {
+	v, found := t.Find(key)
+	if found {
+		return v
+	}
+	return types.ValOrErr(key, "no such key: %v", key)
+}
+
+func (t *unstructuredMap) Iterator() traits.Iterator {
+	isObject := t.schema.Properties() != nil
+	keys := make([]ref.Val, len(t.value))
+	i := 0
+	for k := range t.value {
+		if _, ok := t.propSchema(k); ok {
+			mapKey := k
+			if isObject {
+				if escaped, ok := cel.Escape(k); ok {
+					mapKey = escaped
+				}
+			}
+			keys[i] = types.String(mapKey)
+			i++
+		}
+	}
+	return &mapIterator{unstructuredMap: t, keys: keys}
+}
+
+type mapIterator struct {
+	*unstructuredMap
+	keys []ref.Val
+	idx  int
+}
+
+func (it *mapIterator) HasNext() ref.Val {
+	return types.Bool(it.idx < len(it.keys))
+}
+
+func (it *mapIterator) Next() ref.Val {
+	key := it.keys[it.idx]
+	it.idx++
+	return key
+}
+
+func (t *unstructuredMap) Size() ref.Val {
+	return types.Int(len(t.value))
+}
+
+func (t *unstructuredMap) Find(key ref.Val) (ref.Val, bool) {
+	isObject := t.schema.Properties() != nil
+	keyStr, ok := key.(types.String)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(key), true
+	}
+	k := keyStr.Value().(string)
+	if isObject {
+		k, ok = cel.Unescape(k)
+		if !ok {
+			return nil, false
+		}
+	}
+	if v, ok := t.value[k]; ok {
+		// If this is an object with properties, not an object with additionalProperties,
+		// then null valued nullable fields are treated the same as absent optional fields.
+		if isObject && v == nil {
+			return nil, false
+		}
+		if propSchema, ok := t.propSchema(k); ok {
+			return UnstructuredToVal(v, propSchema), true
+		}
+	}
+
+	return nil, false
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/environment/base.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/environment/base.go
new file mode 100644
index 0000000000..b210377aed
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/environment/base.go
@@ -0,0 +1,283 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package environment
+
+import (
+	"fmt"
+	"strconv"
+	"sync"
+	"sync/atomic"
+
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/checker"
+	"github.com/google/cel-go/ext"
+	"github.com/google/cel-go/interpreter"
+	"golang.org/x/sync/singleflight"
+
+	"k8s.io/apimachinery/pkg/util/version"
+	celconfig "k8s.io/apiserver/pkg/apis/cel"
+	"k8s.io/apiserver/pkg/cel/library"
+	genericfeatures "k8s.io/apiserver/pkg/features"
+	utilfeature "k8s.io/apiserver/pkg/util/feature"
+	"k8s.io/component-base/featuregate"
+	utilversion "k8s.io/component-base/version"
+)
+
+// DefaultCompatibilityVersion returns a default compatibility version for use with EnvSet
+// that guarantees compatibility with CEL features/libraries/parameters understood by
+// the api server min compatibility version
+//
+// This default will be set to no more than the current Kubernetes major.minor version.
+//
+// Note that a default version number less than n-1 the current Kubernetes major.minor version
+// indicates a wider range of version compatibility than strictly required for rollback.
+// A wide range of compatibility is desirable because it means that CEL expressions are portable
+// across a wider range of Kubernetes versions.
+// A default version number equal to the current Kubernetes major.minor version
+// indicates fast forward CEL features that can be used when rollback is no longer needed.
+func DefaultCompatibilityVersion() *version.Version {
+	effectiveVer := featuregate.DefaultComponentGlobalsRegistry.EffectiveVersionFor(featuregate.DefaultKubeComponent)
+	if effectiveVer == nil {
+		effectiveVer = utilversion.DefaultKubeEffectiveVersion()
+	}
+	return effectiveVer.MinCompatibilityVersion()
+}
+
+var baseOpts = append(baseOptsWithoutStrictCost, StrictCostOpt)
+
+var baseOptsWithoutStrictCost = []VersionedOptions{
+	{
+		// CEL epoch was actually 1.23, but we artificially set it to 1.0 because these
+		// options should always be present.
+		IntroducedVersion: version.MajorMinor(1, 0),
+		EnvOptions: []cel.EnvOption{
+			cel.HomogeneousAggregateLiterals(),
+			// Validate function declarations once during base env initialization,
+			// so they don't need to be evaluated each time a CEL rule is compiled.
+			// This is a relatively expensive operation.
+			cel.EagerlyValidateDeclarations(true),
+			cel.DefaultUTCTimeZone(true),
+
+			UnversionedLib(library.URLs),
+			UnversionedLib(library.Regex),
+			UnversionedLib(library.Lists),
+
+			// cel-go v0.17.7 change the cost of has() from 0 to 1, but also provided the CostEstimatorOptions option to preserve the old behavior, so we enabled it at the same time we bumped our cel version to v0.17.7.
+			// Since it is a regression fix, we apply it uniformly to all code use v0.17.7.
+			cel.CostEstimatorOptions(checker.PresenceTestHasCost(false)),
+		},
+		ProgramOptions: []cel.ProgramOption{
+			cel.EvalOptions(cel.OptOptimize, cel.OptTrackCost),
+			cel.CostLimit(celconfig.PerCallLimit),
+
+			// cel-go v0.17.7 change the cost of has() from 0 to 1, but also provided the CostEstimatorOptions option to preserve the old behavior, so we enabled it at the same time we bumped our cel version to v0.17.7.
+			// Since it is a regression fix, we apply it uniformly to all code use v0.17.7.
+			cel.CostTrackerOptions(interpreter.PresenceTestHasCost(false)),
+		},
+	},
+	{
+		IntroducedVersion: version.MajorMinor(1, 27),
+		EnvOptions: []cel.EnvOption{
+			UnversionedLib(library.Authz),
+		},
+	},
+	{
+		IntroducedVersion: version.MajorMinor(1, 28),
+		EnvOptions: []cel.EnvOption{
+			cel.CrossTypeNumericComparisons(true),
+			cel.OptionalTypes(),
+			UnversionedLib(library.Quantity),
+		},
+	},
+	// add the new validator in 1.29
+	{
+		IntroducedVersion: version.MajorMinor(1, 29),
+		EnvOptions: []cel.EnvOption{
+			cel.ASTValidators(
+				cel.ValidateDurationLiterals(),
+				cel.ValidateTimestampLiterals(),
+				cel.ValidateRegexLiterals(),
+				cel.ValidateHomogeneousAggregateLiterals(),
+			),
+		},
+	},
+	// String library
+	{
+		IntroducedVersion: version.MajorMinor(1, 0),
+		RemovedVersion:    version.MajorMinor(1, 29),
+		EnvOptions: []cel.EnvOption{
+			ext.Strings(ext.StringsVersion(0)),
+		},
+	},
+	{
+		IntroducedVersion: version.MajorMinor(1, 29),
+		EnvOptions: []cel.EnvOption{
+			ext.Strings(ext.StringsVersion(2)),
+		},
+	},
+	// Set library
+	{
+		IntroducedVersion: version.MajorMinor(1, 29),
+		EnvOptions: []cel.EnvOption{
+			ext.Sets(),
+		},
+	},
+	{
+		IntroducedVersion: version.MajorMinor(1, 30),
+		EnvOptions: []cel.EnvOption{
+			UnversionedLib(library.IP),
+			UnversionedLib(library.CIDR),
+		},
+	},
+	// Format Library
+	{
+		IntroducedVersion: version.MajorMinor(1, 31),
+		EnvOptions: []cel.EnvOption{
+			UnversionedLib(library.Format),
+		},
+	},
+	// Authz selectors
+	{
+		IntroducedVersion: version.MajorMinor(1, 31),
+		FeatureEnabled: func() bool {
+			enabled := utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors)
+			authzSelectorsLibraryInit.Do(func() {
+				// Record the first time feature enablement was checked for this library.
+				// This is checked from integration tests to ensure no cached cel envs
+				// are constructed before feature enablement is effectively set.
+				authzSelectorsLibraryEnabled.Store(enabled)
+				// Uncomment to debug where the first initialization is coming from if needed.
+				// debug.PrintStack()
+			})
+			return enabled
+		},
+		EnvOptions: []cel.EnvOption{
+			UnversionedLib(library.AuthzSelectors),
+		},
+	},
+	// Two variable comprehensions
+	{
+		IntroducedVersion: version.MajorMinor(1, 32),
+		EnvOptions: []cel.EnvOption{
+			UnversionedLib(ext.TwoVarComprehensions),
+		},
+	},
+}
+
+var (
+	authzSelectorsLibraryInit    sync.Once
+	authzSelectorsLibraryEnabled atomic.Value
+)
+
+// AuthzSelectorsLibraryEnabled returns whether the AuthzSelectors library was enabled when it was constructed.
+// If it has not been contructed yet, this returns `false, false`.
+// This is solely for the benefit of the integration tests making sure feature gates get correctly parsed before AuthzSelector ever has to check for enablement.
+func AuthzSelectorsLibraryEnabled() (enabled, constructed bool) {
+	enabled, constructed = authzSelectorsLibraryEnabled.Load().(bool)
+	return
+}
+
+var StrictCostOpt = VersionedOptions{
+	// This is to configure the cost calculation for extended libraries
+	IntroducedVersion: version.MajorMinor(1, 0),
+	ProgramOptions: []cel.ProgramOption{
+		cel.CostTracking(&library.CostEstimator{}),
+	},
+}
+
+// cacheBaseEnvs controls whether calls to MustBaseEnvSet are cached.
+// Defaults to true, may be disabled by calling DisableBaseEnvSetCachingForTests.
+var cacheBaseEnvs = true
+
+// DisableBaseEnvSetCachingForTests clears and disables base env caching.
+// This is only intended for unit tests exercising MustBaseEnvSet directly with different enablement options.
+// It does not clear other initialization paths that may cache results of calling MustBaseEnvSet.
+func DisableBaseEnvSetCachingForTests() {
+	cacheBaseEnvs = false
+	baseEnvs.Clear()
+	baseEnvsWithOption.Clear()
+}
+
+// MustBaseEnvSet returns the common CEL base environments for Kubernetes for Version, or panics
+// if the version is nil, or does not have major and minor components.
+//
+// The returned environment contains function libraries, language settings, optimizations and
+// runtime cost limits appropriate CEL as it is used in Kubernetes.
+//
+// The returned environment contains no CEL variable definitions or custom type declarations and
+// should be extended to construct environments with the appropriate variable definitions,
+// type declarations and any other needed configuration.
+// strictCost is used to determine whether to enforce strict cost calculation for CEL expressions.
+func MustBaseEnvSet(ver *version.Version, strictCost bool) *EnvSet {
+	if ver == nil {
+		panic("version must be non-nil")
+	}
+	if len(ver.Components()) < 2 {
+		panic(fmt.Sprintf("version must contain an major and minor component, but got: %s", ver.String()))
+	}
+	key := strconv.FormatUint(uint64(ver.Major()), 10) + "." + strconv.FormatUint(uint64(ver.Minor()), 10)
+	var entry interface{}
+	if strictCost {
+		if entry, ok := baseEnvs.Load(key); ok {
+			return entry.(*EnvSet)
+		}
+		entry, _, _ = baseEnvsSingleflight.Do(key, func() (interface{}, error) {
+			entry := mustNewEnvSet(ver, baseOpts)
+			if cacheBaseEnvs {
+				baseEnvs.Store(key, entry)
+			}
+			return entry, nil
+		})
+	} else {
+		if entry, ok := baseEnvsWithOption.Load(key); ok {
+			return entry.(*EnvSet)
+		}
+		entry, _, _ = baseEnvsWithOptionSingleflight.Do(key, func() (interface{}, error) {
+			entry := mustNewEnvSet(ver, baseOptsWithoutStrictCost)
+			if cacheBaseEnvs {
+				baseEnvsWithOption.Store(key, entry)
+			}
+			return entry, nil
+		})
+	}
+
+	return entry.(*EnvSet)
+}
+
+var (
+	baseEnvs                       = sync.Map{}
+	baseEnvsWithOption             = sync.Map{}
+	baseEnvsSingleflight           = &singleflight.Group{}
+	baseEnvsWithOptionSingleflight = &singleflight.Group{}
+)
+
+// UnversionedLib wraps library initialization calls like ext.Sets() or library.IP()
+// to force compilation errors if the call evolves to include a varadic variable option.
+//
+// This provides automatic detection of a problem that is hard to catch in review--
+// If a CEL library used in Kubernetes is unversioned and then become versioned, and we
+// fail to set a desired version, the libraries defaults to the latest version, changing
+// CEL environment without controlled rollout, bypassing the entire purpose of the base
+// environment.
+//
+// If usages of this function fail to compile: add version=1 argument to all call sites
+// that fail compilation while removing the UnversionedLib wrapper. Next, review
+// the changes in the library present in higher versions and, if needed, use VersionedOptions to
+// the base environment to roll out to a newer version safely.
+func UnversionedLib(initializer func() cel.EnvOption) cel.EnvOption {
+	return initializer()
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/environment/environment.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/environment/environment.go
new file mode 100644
index 0000000000..07b9e8f546
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/environment/environment.go
@@ -0,0 +1,298 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package environment
+
+import (
+	"fmt"
+	"math"
+
+	"github.com/google/cel-go/cel"
+
+	"k8s.io/apimachinery/pkg/util/version"
+	apiservercel "k8s.io/apiserver/pkg/cel"
+)
+
+// Type defines the different types of CEL environments used in Kubernetes.
+// CEL environments are used to compile and evaluate CEL expressions.
+// Environments include:
+//   - Function libraries
+//   - Variables
+//   - Types (both core CEL types and Kubernetes types)
+//   - Other CEL environment and program options
+type Type string
+
+const (
+	// NewExpressions is used to validate new or modified expressions in
+	// requests that write expressions to API resources.
+	//
+	// This environment type is compatible with a specific Kubernetes
+	// major/minor version. To ensure safe rollback, this environment type
+	// may not include all the function libraries, variables, type declarations, and CEL
+	// language settings available in the StoredExpressions environment type.
+	//
+	// NewExpressions must be used to validate (parse, compile, type check)
+	// all new or modified CEL expressions before they are written to storage.
+	NewExpressions Type = "NewExpressions"
+
+	// StoredExpressions is used to compile and run CEL expressions that have been
+	// persisted to storage.
+	//
+	// This environment type is compatible with CEL expressions that have been
+	// persisted to storage by all known versions of Kubernetes. This is the most
+	// permissive environment available.
+	//
+	// StoredExpressions is appropriate for use with CEL expressions in
+	// configuration files.
+	StoredExpressions Type = "StoredExpressions"
+)
+
+// EnvSet manages the creation and extension of CEL environments. Each EnvSet contains
+// both an NewExpressions and StoredExpressions environment. EnvSets are created
+// and extended using VersionedOptions so that the EnvSet can prepare environments according
+// to what options were introduced at which versions.
+//
+// Each EnvSet is given a compatibility version when it is created, and prepares the
+// NewExpressions environment to be compatible with that version. The EnvSet also
+// prepares StoredExpressions to be compatible with all known versions of Kubernetes.
+type EnvSet struct {
+	// compatibilityVersion is the version that all configuration in
+	// the NewExpressions environment is compatible with.
+	compatibilityVersion *version.Version
+
+	// newExpressions is an environment containing only configuration
+	// in this EnvSet that is enabled at this compatibilityVersion.
+	newExpressions *cel.Env
+
+	// storedExpressions is an environment containing the latest configuration
+	// in this EnvSet.
+	storedExpressions *cel.Env
+}
+
+func newEnvSet(compatibilityVersion *version.Version, opts []VersionedOptions) (*EnvSet, error) {
+	base, err := cel.NewEnv()
+	if err != nil {
+		return nil, err
+	}
+	baseSet := EnvSet{compatibilityVersion: compatibilityVersion, newExpressions: base, storedExpressions: base}
+	return baseSet.Extend(opts...)
+}
+
+func mustNewEnvSet(ver *version.Version, opts []VersionedOptions) *EnvSet {
+	envSet, err := newEnvSet(ver, opts)
+	if err != nil {
+		panic(fmt.Sprintf("Default environment misconfigured: %v", err))
+	}
+	return envSet
+}
+
+// NewExpressionsEnv returns the NewExpressions environment Type for this EnvSet.
+// See NewExpressions for details.
+func (e *EnvSet) NewExpressionsEnv() *cel.Env {
+	return e.newExpressions
+}
+
+// StoredExpressionsEnv returns the StoredExpressions environment Type for this EnvSet.
+// See StoredExpressions for details.
+func (e *EnvSet) StoredExpressionsEnv() *cel.Env {
+	return e.storedExpressions
+}
+
+// Env returns the CEL environment for the given Type.
+func (e *EnvSet) Env(envType Type) (*cel.Env, error) {
+	switch envType {
+	case NewExpressions:
+		return e.newExpressions, nil
+	case StoredExpressions:
+		return e.storedExpressions, nil
+	default:
+		return nil, fmt.Errorf("unsupported environment type: %v", envType)
+	}
+}
+
+// VersionedOptions provides a set of CEL configuration options as well as the version the
+// options were introduced and, optionally, the version the options were removed.
+type VersionedOptions struct {
+	// IntroducedVersion is the version at which these options were introduced.
+	// The NewExpressions environment will only include options introduced at or before the
+	// compatibility version of the EnvSet.
+	//
+	// For example, to configure a CEL environment with an "object" variable bound to a
+	// resource kind, first create a DeclType from the groupVersionKind of the resource and then
+	// populate a VersionedOptions with the variable and the type:
+	//
+	//    schema := schemaResolver.ResolveSchema(groupVersionKind)
+	//    objectType := apiservercel.SchemaDeclType(schema, true)
+	//    ...
+	//    VersionOptions{
+	//      IntroducedVersion: version.MajorMinor(1, 26),
+	//      DeclTypes: []*apiservercel.DeclType{ objectType },
+	//      EnvOptions: []cel.EnvOption{ cel.Variable("object", objectType.CelType()) },
+	//    },
+	//
+	// To create an DeclType from a CRD, use a structural schema. For example:
+	//
+	//    schema := structuralschema.NewStructural(crdJSONProps)
+	//    objectType := apiservercel.SchemaDeclType(schema, true)
+	//
+	// Required.
+	IntroducedVersion *version.Version
+	// RemovedVersion is the version at which these options were removed.
+	// The NewExpressions environment will not include options removed at or before the
+	// compatibility version of the EnvSet.
+	//
+	// All option removals must be backward compatible; the removal must either be paired
+	// with a compatible replacement introduced at the same version, or the removal must be non-breaking.
+	// The StoredExpressions environment will not include removed options.
+	//
+	// A function library may be upgraded by setting the RemovedVersion of the old library
+	// to the same value as the IntroducedVersion of the new library. The new library must
+	// be backward compatible with the old library.
+	//
+	// For example:
+	//
+	//    VersionOptions{
+	//      IntroducedVersion: version.MajorMinor(1, 26), RemovedVersion: version.MajorMinor(1, 27),
+	//      EnvOptions: []cel.EnvOption{ libraries.Example(libraries.ExampleVersion(1)) },
+	//    },
+	//    VersionOptions{
+	//      IntroducedVersion: version.MajorMinor(1, 27),
+	//      EnvOptions: []EnvOptions{ libraries.Example(libraries.ExampleVersion(2)) },
+	//    },
+	//
+	// Optional.
+	RemovedVersion *version.Version
+	// FeatureEnabled returns true if these options are enabled by feature gates,
+	// and returns false if these options are not enabled due to feature gates.
+	//
+	// This takes priority over IntroducedVersion / RemovedVersion for the NewExpressions environment.
+	//
+	// The StoredExpressions environment ignores this function.
+	//
+	// Optional.
+	FeatureEnabled func() bool
+	// EnvOptions provides CEL EnvOptions. This may be used to add a cel.Variable, a
+	// cel.Library, or to enable other CEL EnvOptions such as language settings.
+	//
+	// If an added cel.Variable has an OpenAPI type, the type must be included in DeclTypes.
+	EnvOptions []cel.EnvOption
+	// ProgramOptions provides CEL ProgramOptions. This may be used to set a cel.CostLimit,
+	// enable optimizations, and set other program level options that should be enabled
+	// for all programs using this environment.
+	ProgramOptions []cel.ProgramOption
+	// DeclTypes provides OpenAPI type declarations to register with the environment.
+	//
+	// If cel.Variables added to EnvOptions refer to a OpenAPI type, the type must be included in
+	// DeclTypes.
+	DeclTypes []*apiservercel.DeclType
+}
+
+// Extend returns an EnvSet based on this EnvSet but extended with given VersionedOptions.
+// This EnvSet is not mutated.
+// The returned EnvSet has the same compatibility version as the EnvSet that was extended.
+//
+// Extend is an expensive operation and each call to Extend that adds DeclTypes increases
+// the depth of a chain of resolvers. For these reasons, calls to Extend should be kept
+// to a minimum.
+//
+// Some best practices:
+//
+//   - Minimize calls Extend when handling API requests. Where possible, call Extend
+//     when initializing components.
+//   - If an EnvSets returned by Extend can be used to compile multiple CEL programs,
+//     call Extend once and reuse the returned EnvSets.
+//   - Prefer a single call to Extend with a full list of VersionedOptions over
+//     making multiple calls to Extend.
+func (e *EnvSet) Extend(options ...VersionedOptions) (*EnvSet, error) {
+	if len(options) > 0 {
+		newExprOpts, err := e.filterAndBuildOpts(e.newExpressions, e.compatibilityVersion, true, options)
+		if err != nil {
+			return nil, err
+		}
+		p, err := e.newExpressions.Extend(newExprOpts)
+		if err != nil {
+			return nil, err
+		}
+		storedExprOpt, err := e.filterAndBuildOpts(e.storedExpressions, version.MajorMinor(math.MaxUint, math.MaxUint), false, options)
+		if err != nil {
+			return nil, err
+		}
+		s, err := e.storedExpressions.Extend(storedExprOpt)
+		if err != nil {
+			return nil, err
+		}
+		return &EnvSet{compatibilityVersion: e.compatibilityVersion, newExpressions: p, storedExpressions: s}, nil
+	}
+	return e, nil
+}
+
+func (e *EnvSet) filterAndBuildOpts(base *cel.Env, compatVer *version.Version, honorFeatureGateEnablement bool, opts []VersionedOptions) (cel.EnvOption, error) {
+	var envOpts []cel.EnvOption
+	var progOpts []cel.ProgramOption
+	var declTypes []*apiservercel.DeclType
+
+	for _, opt := range opts {
+		var allowedByFeatureGate, allowedByVersion bool
+		if opt.FeatureEnabled != nil && honorFeatureGateEnablement {
+			// Feature-gate-enabled libraries must follow compatible default feature enablement.
+			// Enabling alpha features in their first release enables libraries the previous API server is unaware of.
+			allowedByFeatureGate = opt.FeatureEnabled()
+			if !allowedByFeatureGate {
+				continue
+			}
+		}
+		if compatVer.AtLeast(opt.IntroducedVersion) && (opt.RemovedVersion == nil || compatVer.LessThan(opt.RemovedVersion)) {
+			allowedByVersion = true
+		}
+
+		if allowedByFeatureGate || allowedByVersion {
+			envOpts = append(envOpts, opt.EnvOptions...)
+			progOpts = append(progOpts, opt.ProgramOptions...)
+			declTypes = append(declTypes, opt.DeclTypes...)
+		}
+	}
+
+	if len(declTypes) > 0 {
+		provider := apiservercel.NewDeclTypeProvider(declTypes...)
+		if compatVer.AtLeast(version.MajorMinor(1, 31)) {
+			provider.SetRecognizeKeywordAsFieldName(true)
+		}
+		providerOpts, err := provider.EnvOptions(base.CELTypeProvider())
+		if err != nil {
+			return nil, err
+		}
+		envOpts = append(envOpts, providerOpts...)
+	}
+
+	combined := cel.Lib(&envLoader{
+		envOpts:  envOpts,
+		progOpts: progOpts,
+	})
+	return combined, nil
+}
+
+type envLoader struct {
+	envOpts  []cel.EnvOption
+	progOpts []cel.ProgramOption
+}
+
+func (e *envLoader) CompileOptions() []cel.EnvOption {
+	return e.envOpts
+}
+
+func (e *envLoader) ProgramOptions() []cel.ProgramOption {
+	return e.progOpts
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/errors.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/errors.go
new file mode 100644
index 0000000000..d7b052fc9e
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/errors.go
@@ -0,0 +1,124 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cel
+
+import (
+	"fmt"
+
+	"github.com/google/cel-go/cel"
+)
+
+// ErrInternal the basic error that occurs when the expression fails to evaluate
+// due to internal reasons. Any Error that has the Type of
+// ErrorInternal is considered equal to ErrInternal
+var ErrInternal = fmt.Errorf("internal")
+
+// ErrInvalid is the basic error that occurs when the expression fails to
+// evaluate but not due to internal reasons. Any Error that has the Type of
+// ErrorInvalid is considered equal to ErrInvalid.
+var ErrInvalid = fmt.Errorf("invalid")
+
+// ErrRequired is the basic error that occurs when the expression is required
+// but absent.
+// Any Error that has the Type of ErrorRequired is considered equal
+// to ErrRequired.
+var ErrRequired = fmt.Errorf("required")
+
+// ErrCompilation is the basic error that occurs when the expression fails to
+// compile. Any CompilationError wraps ErrCompilation.
+// ErrCompilation wraps ErrInvalid
+var ErrCompilation = fmt.Errorf("%w: compilation error", ErrInvalid)
+
+// ErrOutOfBudget is the basic error that occurs when the expression fails due to
+// exceeding budget.
+var ErrOutOfBudget = fmt.Errorf("out of budget")
+
+// Error is an implementation of the 'error' interface, which represents a
+// XValidation error.
+type Error struct {
+	Type   ErrorType
+	Detail string
+
+	// Cause is an optional wrapped errors that can be useful to
+	// programmatically retrieve detailed errors.
+	Cause error
+}
+
+var _ error = &Error{}
+
+// Error implements the error interface.
+func (v *Error) Error() string {
+	return v.Detail
+}
+
+func (v *Error) Is(err error) bool {
+	switch v.Type {
+	case ErrorTypeRequired:
+		return err == ErrRequired
+	case ErrorTypeInvalid:
+		return err == ErrInvalid
+	case ErrorTypeInternal:
+		return err == ErrInternal
+	}
+	return false
+}
+
+// Unwrap returns the wrapped Cause.
+func (v *Error) Unwrap() error {
+	return v.Cause
+}
+
+// ErrorType is a machine-readable value providing more detail about why
+// a XValidation is invalid.
+type ErrorType string
+
+const (
+	// ErrorTypeRequired is used to report withNullable values that are not
+	// provided (e.g. empty strings, null values, or empty arrays).  See
+	// Required().
+	ErrorTypeRequired ErrorType = "RuleRequired"
+	// ErrorTypeInvalid is used to report malformed values
+	ErrorTypeInvalid ErrorType = "RuleInvalid"
+	// ErrorTypeInternal is used to report other errors that are not related
+	// to user input.  See InternalError().
+	ErrorTypeInternal ErrorType = "InternalError"
+)
+
+// CompilationError indicates an error during expression compilation.
+// It wraps ErrCompilation.
+type CompilationError struct {
+	err    *Error
+	Issues *cel.Issues
+}
+
+// NewCompilationError wraps a cel.Issues to indicate a compilation failure.
+func NewCompilationError(issues *cel.Issues) *CompilationError {
+	return &CompilationError{
+		Issues: issues,
+		err: &Error{
+			Type:   ErrorTypeInvalid,
+			Detail: fmt.Sprintf("compilation error: %s", issues),
+		}}
+}
+
+func (e *CompilationError) Error() string {
+	return e.err.Error()
+}
+
+func (e *CompilationError) Unwrap() []error {
+	return []error{e.err, ErrCompilation}
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/escaping.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/escaping.go
new file mode 100644
index 0000000000..705c353a2b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/escaping.go
@@ -0,0 +1,170 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cel
+
+import (
+	"regexp"
+
+	"k8s.io/apimachinery/pkg/util/sets"
+)
+
+// celReservedSymbols is a list of RESERVED symbols defined in the CEL lexer.
+// No identifiers are allowed to collide with these symbols.
+// https://github.com/google/cel-spec/blob/master/doc/langdef.md#syntax
+var celReservedSymbols = sets.NewString(
+	"true", "false", "null", "in",
+	"as", "break", "const", "continue", "else",
+	"for", "function", "if", "import", "let",
+	"loop", "package", "namespace", "return", // !! 'namespace' is used heavily in Kubernetes
+	"var", "void", "while",
+)
+
+// expandMatcher matches the escape sequence, characters that are escaped, and characters that are unsupported
+var expandMatcher = regexp.MustCompile(`(__|[-./]|[^a-zA-Z0-9-./_])`)
+
+// newCharacterFilter returns a boolean array to indicate the allowed characters
+func newCharacterFilter(characters string) []bool {
+	maxChar := 0
+	for _, c := range characters {
+		if maxChar < int(c) {
+			maxChar = int(c)
+		}
+	}
+	filter := make([]bool, maxChar+1)
+
+	for _, c := range characters {
+		filter[int(c)] = true
+	}
+
+	return filter
+}
+
+type escapeCheck struct {
+	canSkipRegex     bool
+	invalidCharFound bool
+}
+
+// skipRegexCheck checks if escape would be skipped.
+// if invalidCharFound is true, it must have invalid character; if invalidCharFound is false, not sure if it has invalid character or not
+func skipRegexCheck(ident string) escapeCheck {
+	escapeCheck := escapeCheck{canSkipRegex: true, invalidCharFound: false}
+	// skip escape if possible
+	previous_underscore := false
+	for _, c := range ident {
+		if c == '/' || c == '-' || c == '.' {
+			escapeCheck.canSkipRegex = false
+			return escapeCheck
+		}
+		intc := int(c)
+		if intc < 0 || intc >= len(validCharacterFilter) || !validCharacterFilter[intc] {
+			escapeCheck.invalidCharFound = true
+			return escapeCheck
+		}
+		if c == '_' && previous_underscore {
+			escapeCheck.canSkipRegex = false
+			return escapeCheck
+		}
+
+		previous_underscore = c == '_'
+	}
+	return escapeCheck
+}
+
+// validCharacterFilter indicates the allowed characters.
+var validCharacterFilter = newCharacterFilter("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_")
+
+// Escape escapes ident and returns a CEL identifier (of the form '[a-zA-Z_][a-zA-Z0-9_]*'), or returns
+// false if the ident does not match the supported input format of `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*`.
+// Escaping Rules:
+//   - '__' escapes to '__underscores__'
+//   - '.' escapes to '__dot__'
+//   - '-' escapes to '__dash__'
+//   - '/' escapes to '__slash__'
+//   - Identifiers that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: "true", "false",
+//     "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", "import", "let", loop", "package",
+//     "namespace", "return".
+func Escape(ident string) (string, bool) {
+	if len(ident) == 0 || ('0' <= ident[0] && ident[0] <= '9') {
+		return "", false
+	}
+	if celReservedSymbols.Has(ident) {
+		return "__" + ident + "__", true
+	}
+
+	escapeCheck := skipRegexCheck(ident)
+	if escapeCheck.invalidCharFound {
+		return "", false
+	}
+	if escapeCheck.canSkipRegex {
+		return ident, true
+	}
+
+	ok := true
+	ident = expandMatcher.ReplaceAllStringFunc(ident, func(s string) string {
+		switch s {
+		case "__":
+			return "__underscores__"
+		case ".":
+			return "__dot__"
+		case "-":
+			return "__dash__"
+		case "/":
+			return "__slash__"
+		default: // matched a unsupported supported
+			ok = false
+			return ""
+		}
+	})
+	if !ok {
+		return "", false
+	}
+	return ident, true
+}
+
+var unexpandMatcher = regexp.MustCompile(`(_{2}[^_]+_{2})`)
+
+// Unescape unescapes an CEL identifier containing the escape sequences described in Escape, or return false if the
+// string contains invalid escape sequences. The escaped input is expected to be a valid CEL identifier, but is
+// not checked.
+func Unescape(escaped string) (string, bool) {
+	ok := true
+	escaped = unexpandMatcher.ReplaceAllStringFunc(escaped, func(s string) string {
+		contents := s[2 : len(s)-2]
+		switch contents {
+		case "underscores":
+			return "__"
+		case "dot":
+			return "."
+		case "dash":
+			return "-"
+		case "slash":
+			return "/"
+		}
+		if celReservedSymbols.Has(contents) {
+			if len(s) != len(escaped) {
+				ok = false
+			}
+			return contents
+		}
+		ok = false
+		return ""
+	})
+	if !ok {
+		return "", false
+	}
+	return escaped, true
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/format.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/format.go
new file mode 100644
index 0000000000..31216806f7
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/format.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cel
+
+import (
+	"fmt"
+	"reflect"
+
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/checker/decls"
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+)
+
+var (
+	FormatObject = decls.NewObjectType("kubernetes.NamedFormat")
+	FormatType   = cel.ObjectType("kubernetes.NamedFormat")
+)
+
+// Format provdes a CEL representation of kubernetes format
+type Format struct {
+	Name         string
+	ValidateFunc func(string) []string
+
+	// Size of the regex string or estimated equivalent regex string used
+	// for cost estimation
+	MaxRegexSize int
+}
+
+func (d Format) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+	return nil, fmt.Errorf("type conversion error from 'Format' to '%v'", typeDesc)
+}
+
+func (d Format) ConvertToType(typeVal ref.Type) ref.Val {
+	switch typeVal {
+	case FormatType:
+		return d
+	case types.TypeType:
+		return FormatType
+	default:
+		return types.NewErr("type conversion error from '%s' to '%s'", FormatType, typeVal)
+	}
+}
+
+func (d Format) Equal(other ref.Val) ref.Val {
+	otherDur, ok := other.(Format)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(other)
+	}
+	return types.Bool(d.Name == otherDur.Name)
+}
+
+func (d Format) Type() ref.Type {
+	return FormatType
+}
+
+func (d Format) Value() interface{} {
+	return d
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/ip.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/ip.go
new file mode 100644
index 0000000000..f91c6cb7a8
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/ip.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cel
+
+import (
+	"fmt"
+	"math"
+	"net/netip"
+	"reflect"
+
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+)
+
+// IP provides a CEL representation of an IP address.
+type IP struct {
+	netip.Addr
+}
+
+var (
+	IPType = cel.OpaqueType("net.IP")
+)
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (d IP) ConvertToNative(typeDesc reflect.Type) (any, error) {
+	if reflect.TypeOf(d.Addr).AssignableTo(typeDesc) {
+		return d.Addr, nil
+	}
+	if reflect.TypeOf("").AssignableTo(typeDesc) {
+		return d.Addr.String(), nil
+	}
+	return nil, fmt.Errorf("type conversion error from 'IP' to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (d IP) ConvertToType(typeVal ref.Type) ref.Val {
+	switch typeVal {
+	case IPType:
+		return d
+	case types.TypeType:
+		return IPType
+	case types.StringType:
+		return types.String(d.Addr.String())
+	}
+	return types.NewErr("type conversion error from '%s' to '%s'", IPType, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (d IP) Equal(other ref.Val) ref.Val {
+	otherD, ok := other.(IP)
+	if !ok {
+		return types.ValOrErr(other, "no such overload")
+	}
+	return types.Bool(d.Addr == otherD.Addr)
+}
+
+// Type implements ref.Val.Type.
+func (d IP) Type() ref.Type {
+	return IPType
+}
+
+// Value implements ref.Val.Value.
+func (d IP) Value() any {
+	return d.Addr
+}
+
+// Size returns the size of the IP address in bytes.
+// Used in the size estimation of the runtime cost.
+func (d IP) Size() ref.Val {
+	return types.Int(int(math.Ceil(float64(d.Addr.BitLen()) / 8)))
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/authz.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/authz.go
new file mode 100644
index 0000000000..77332cff8b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/authz.go
@@ -0,0 +1,790 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package library
+
+import (
+	"context"
+	"fmt"
+	"reflect"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/fields"
+	"k8s.io/apimachinery/pkg/labels"
+	genericfeatures "k8s.io/apiserver/pkg/features"
+	utilfeature "k8s.io/apiserver/pkg/util/feature"
+
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+
+	apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apiserver/pkg/authentication/serviceaccount"
+	"k8s.io/apiserver/pkg/authentication/user"
+	"k8s.io/apiserver/pkg/authorization/authorizer"
+)
+
+// Authz provides a CEL function library extension for performing authorization checks.
+// Note that authorization checks are only supported for CEL expression fields in the API
+// where an 'authorizer' variable is provided to the CEL expression. See the
+// documentation of API fields where CEL expressions are used to learn if the 'authorizer'
+// variable is provided.
+//
+// path
+//
+// Returns a PathCheck configured to check authorization for a non-resource request
+// path (e.g. /healthz). If path is an empty string, an error is returned.
+// Note that the leading '/' is not required.
+//
+//	.path() 
+//
+// Examples:
+//
+//	authorizer.path('/healthz') // returns a PathCheck for the '/healthz' API path
+//	authorizer.path('') // results in "path must not be empty" error
+//	authorizer.path('  ') // results in "path must not be empty" error
+//
+// group
+//
+// Returns a GroupCheck configured to check authorization for the API resources for
+// a particular API group.
+// Note that authorization checks are only supported for CEL expression fields in the API
+// where an 'authorizer' variable is provided to the CEL expression. Check the
+// documentation of API fields where CEL expressions are used to learn if the 'authorizer'
+// variable is provided.
+//
+//	.group() 
+//
+// Examples:
+//
+//	authorizer.group('apps') // returns a GroupCheck for the 'apps' API group
+//	authorizer.group('') // returns a GroupCheck for the core API group
+//	authorizer.group('example.com') // returns a GroupCheck for the custom resources in the 'example.com' API group
+//
+// serviceAccount
+//
+// Returns an Authorizer configured to check authorization for the provided service account namespace and name.
+// If the name is not a valid DNS subdomain string (as defined by RFC 1123), an error is returned.
+// If the namespace is not a valid DNS label (as defined by RFC 1123), an error is returned.
+//
+//	.serviceAccount(, ) 
+//
+// Examples:
+//
+//	authorizer.serviceAccount('default', 'myserviceaccount') // returns an Authorizer for the service account with namespace 'default' and name 'myserviceaccount'
+//	authorizer.serviceAccount('not@a#valid!namespace', 'validname') // returns an error
+//	authorizer.serviceAccount('valid.example.com', 'invalid@*name') // returns an error
+//
+// resource
+//
+// Returns a ResourceCheck configured to check authorization for a particular API resource.
+// Note that the provided resource string should be a lower case plural name of a Kubernetes API resource.
+//
+//	.resource() 
+//
+// Examples:
+//
+//	authorizer.group('apps').resource('deployments') // returns a ResourceCheck for the 'deployments' resources in the 'apps' group.
+//	authorizer.group('').resource('pods') // returns a ResourceCheck for the 'pods' resources in the core group.
+//	authorizer.group('apps').resource('') // results in "resource must not be empty" error
+//	authorizer.group('apps').resource('  ') // results in "resource must not be empty" error
+//
+// subresource
+//
+// Returns a ResourceCheck configured to check authorization for a particular subresource of an API resource.
+// If subresource is set to "", the subresource field of this ResourceCheck is considered unset.
+//
+//	.subresource() 
+//
+// Examples:
+//
+//	authorizer.group('').resource('pods').subresource('status') // returns a ResourceCheck the 'status' subresource of 'pods'
+//	authorizer.group('apps').resource('deployments').subresource('scale') // returns a ResourceCheck the 'scale' subresource of 'deployments'
+//	authorizer.group('example.com').resource('widgets').subresource('scale') // returns a ResourceCheck for the 'scale' subresource of the 'widgets' custom resource
+//	authorizer.group('example.com').resource('widgets').subresource('') // returns a ResourceCheck for the 'widgets' resource.
+//
+// namespace
+//
+// Returns a ResourceCheck configured to check authorization for a particular namespace.
+// For cluster scoped resources, namespace() does not need to be called; namespace defaults
+// to "", which is the correct namespace value to use to check cluster scoped resources.
+// If namespace is set to "", the ResourceCheck will check authorization for the cluster scope.
+//
+//	.namespace() 
+//
+// Examples:
+//
+//	authorizer.group('apps').resource('deployments').namespace('test') // returns a ResourceCheck for 'deployments' in the 'test' namespace
+//	authorizer.group('').resource('pods').namespace('default') // returns a ResourceCheck for 'pods' in the 'default' namespace
+//	authorizer.group('').resource('widgets').namespace('') // returns a ResourceCheck for 'widgets' in the cluster scope
+//
+// name
+//
+// Returns a ResourceCheck configured to check authorization for a particular resource name.
+// If name is set to "", the name field of this ResourceCheck is considered unset.
+//
+//	.name() 
+//
+// Examples:
+//
+//	authorizer.group('apps').resource('deployments').namespace('test').name('backend') // returns a ResourceCheck for the 'backend' 'deployments' resource in the 'test' namespace
+//	authorizer.group('apps').resource('deployments').namespace('test').name('') // returns a ResourceCheck for the 'deployments' resource in the 'test' namespace
+//
+// check
+//
+// For PathCheck, checks if the principal (user or service account) that sent the request is authorized for the HTTP request verb of the path.
+// For ResourceCheck, checks if the principal (user or service account) that sent the request is authorized for the API verb and the configured authorization checks of the ResourceCheck.
+// The check operation can be expensive, particularly in clusters using the webhook authorization mode.
+//
+//	.check() 
+//	.check() 
+//
+// Examples:
+//
+//	authorizer.group('').resource('pods').namespace('default').check('create') // Checks if the principal (user or service account) is authorized create pods in the 'default' namespace.
+//	authorizer.path('/healthz').check('get') // Checks if the principal (user or service account) is authorized to make HTTP GET requests to the /healthz API path.
+//
+// allowed
+//
+// Returns true if the authorizer's decision for the check is "allow".  Note that if the authorizer's decision is
+// "no opinion", that the 'allowed' function will return false.
+//
+//	.allowed() 
+//
+// Examples:
+//
+//	authorizer.group('').resource('pods').namespace('default').check('create').allowed() // Returns true if the principal (user or service account) is allowed create pods in the 'default' namespace.
+//	authorizer.path('/healthz').check('get').allowed()  // Returns true if the principal (user or service account) is allowed to make HTTP GET requests to the /healthz API path.
+//
+// reason
+//
+// Returns a string reason for the authorization decision
+//
+//	.reason() 
+//
+// Examples:
+//
+//	authorizer.path('/healthz').check('GET').reason()
+//
+// errored
+//
+// Returns true if the authorization check resulted in an error.
+//
+//	.errored() 
+//
+// Examples:
+//
+//	authorizer.group('').resource('pods').namespace('default').check('create').errored() // Returns true if the authorization check resulted in an error
+//
+// error
+//
+// If the authorization check resulted in an error, returns the error. Otherwise, returns the empty string.
+//
+//	.error() 
+//
+// Examples:
+//
+//	authorizer.group('').resource('pods').namespace('default').check('create').error()
+//
+// fieldSelector
+//
+// Takes a string field selector, parses it to field selector requirements, and includes it in the authorization check.
+// If the field selector does not parse successfully, no field selector requirements are included in the authorization check.
+// Added in Kubernetes 1.31+, Authz library version 1.
+//
+//	.fieldSelector() 
+//
+// Examples:
+//
+//	authorizer.group('').resource('pods').fieldSelector('spec.nodeName=mynode').check('list').allowed()
+//
+// labelSelector (added in v1, Kubernetes 1.31+)
+//
+// Takes a string label selector, parses it to label selector requirements, and includes it in the authorization check.
+// If the label selector does not parse successfully, no label selector requirements are included in the authorization check.
+// Added in Kubernetes 1.31+, Authz library version 1.
+//
+//	.labelSelector() 
+//
+// Examples:
+//
+//	authorizer.group('').resource('pods').labelSelector('app=example').check('list').allowed()
+func Authz() cel.EnvOption {
+	return cel.Lib(authzLib)
+}
+
+var authzLib = &authz{}
+
+type authz struct{}
+
+func (*authz) LibraryName() string {
+	return "kubernetes.authz"
+}
+
+func (*authz) Types() []*cel.Type {
+	return []*cel.Type{
+		AuthorizerType,
+		PathCheckType,
+		GroupCheckType,
+		ResourceCheckType,
+		DecisionType}
+}
+
+func (*authz) declarations() map[string][]cel.FunctionOpt {
+	return authzLibraryDecls
+}
+
+var authzLibraryDecls = map[string][]cel.FunctionOpt{
+	"path": {
+		cel.MemberOverload("authorizer_path", []*cel.Type{AuthorizerType, cel.StringType}, PathCheckType,
+			cel.BinaryBinding(authorizerPath))},
+	"group": {
+		cel.MemberOverload("authorizer_group", []*cel.Type{AuthorizerType, cel.StringType}, GroupCheckType,
+			cel.BinaryBinding(authorizerGroup))},
+	"serviceAccount": {
+		cel.MemberOverload("authorizer_serviceaccount", []*cel.Type{AuthorizerType, cel.StringType, cel.StringType}, AuthorizerType,
+			cel.FunctionBinding(authorizerServiceAccount))},
+	"resource": {
+		cel.MemberOverload("groupcheck_resource", []*cel.Type{GroupCheckType, cel.StringType}, ResourceCheckType,
+			cel.BinaryBinding(groupCheckResource))},
+	"subresource": {
+		cel.MemberOverload("resourcecheck_subresource", []*cel.Type{ResourceCheckType, cel.StringType}, ResourceCheckType,
+			cel.BinaryBinding(resourceCheckSubresource))},
+	"namespace": {
+		cel.MemberOverload("resourcecheck_namespace", []*cel.Type{ResourceCheckType, cel.StringType}, ResourceCheckType,
+			cel.BinaryBinding(resourceCheckNamespace))},
+	"name": {
+		cel.MemberOverload("resourcecheck_name", []*cel.Type{ResourceCheckType, cel.StringType}, ResourceCheckType,
+			cel.BinaryBinding(resourceCheckName))},
+	"check": {
+		cel.MemberOverload("pathcheck_check", []*cel.Type{PathCheckType, cel.StringType}, DecisionType,
+			cel.BinaryBinding(pathCheckCheck)),
+		cel.MemberOverload("resourcecheck_check", []*cel.Type{ResourceCheckType, cel.StringType}, DecisionType,
+			cel.BinaryBinding(resourceCheckCheck))},
+	"errored": {
+		cel.MemberOverload("decision_errored", []*cel.Type{DecisionType}, cel.BoolType,
+			cel.UnaryBinding(decisionErrored))},
+	"error": {
+		cel.MemberOverload("decision_error", []*cel.Type{DecisionType}, cel.StringType,
+			cel.UnaryBinding(decisionError))},
+	"allowed": {
+		cel.MemberOverload("decision_allowed", []*cel.Type{DecisionType}, cel.BoolType,
+			cel.UnaryBinding(decisionAllowed))},
+	"reason": {
+		cel.MemberOverload("decision_reason", []*cel.Type{DecisionType}, cel.StringType,
+			cel.UnaryBinding(decisionReason))},
+}
+
+func (*authz) CompileOptions() []cel.EnvOption {
+	options := make([]cel.EnvOption, 0, len(authzLibraryDecls))
+	for name, overloads := range authzLibraryDecls {
+		options = append(options, cel.Function(name, overloads...))
+	}
+	return options
+}
+
+func (*authz) ProgramOptions() []cel.ProgramOption {
+	return []cel.ProgramOption{}
+}
+
+// AuthzSelectors provides a CEL function library extension for adding fieldSelector and
+// labelSelector filters to authorization checks. This requires the Authz library.
+// See documentation of the Authz library for use and availability of the authorizer variable.
+//
+// fieldSelector
+//
+// Takes a string field selector, parses it to field selector requirements, and includes it in the authorization check.
+// If the field selector does not parse successfully, no field selector requirements are included in the authorization check.
+// Added in Kubernetes 1.31+.
+//
+//	.fieldSelector() 
+//
+// Examples:
+//
+//	authorizer.group('').resource('pods').fieldSelector('spec.nodeName=mynode').check('list').allowed()
+//
+// labelSelector
+//
+// Takes a string label selector, parses it to label selector requirements, and includes it in the authorization check.
+// If the label selector does not parse successfully, no label selector requirements are included in the authorization check.
+// Added in Kubernetes 1.31+.
+//
+//	.labelSelector() 
+//
+// Examples:
+//
+//	authorizer.group('').resource('pods').labelSelector('app=example').check('list').allowed()
+func AuthzSelectors() cel.EnvOption {
+	return cel.Lib(authzSelectorsLib)
+}
+
+var authzSelectorsLib = &authzSelectors{}
+
+type authzSelectors struct{}
+
+func (*authzSelectors) LibraryName() string {
+	return "kubernetes.authzSelectors"
+}
+
+func (*authzSelectors) Types() []*cel.Type {
+	return []*cel.Type{ResourceCheckType}
+}
+
+func (*authzSelectors) declarations() map[string][]cel.FunctionOpt {
+	return authzSelectorsLibraryDecls
+}
+
+var authzSelectorsLibraryDecls = map[string][]cel.FunctionOpt{
+	"fieldSelector": {
+		cel.MemberOverload("authorizer_fieldselector", []*cel.Type{ResourceCheckType, cel.StringType}, ResourceCheckType,
+			cel.BinaryBinding(resourceCheckFieldSelector))},
+	"labelSelector": {
+		cel.MemberOverload("authorizer_labelselector", []*cel.Type{ResourceCheckType, cel.StringType}, ResourceCheckType,
+			cel.BinaryBinding(resourceCheckLabelSelector))},
+}
+
+func (*authzSelectors) CompileOptions() []cel.EnvOption {
+	options := make([]cel.EnvOption, 0, len(authzSelectorsLibraryDecls))
+	for name, overloads := range authzSelectorsLibraryDecls {
+		options = append(options, cel.Function(name, overloads...))
+	}
+	return options
+}
+
+func (*authzSelectors) ProgramOptions() []cel.ProgramOption {
+	return []cel.ProgramOption{}
+}
+
+func authorizerPath(arg1, arg2 ref.Val) ref.Val {
+	authz, ok := arg1.(authorizerVal)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg1)
+	}
+
+	path, ok := arg2.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg1)
+	}
+
+	if len(strings.TrimSpace(path)) == 0 {
+		return types.NewErr("path must not be empty")
+	}
+
+	return authz.pathCheck(path)
+}
+
+func authorizerGroup(arg1, arg2 ref.Val) ref.Val {
+	authz, ok := arg1.(authorizerVal)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg1)
+	}
+
+	group, ok := arg2.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg1)
+	}
+
+	return authz.groupCheck(group)
+}
+
+func authorizerServiceAccount(args ...ref.Val) ref.Val {
+	argn := len(args)
+	if argn != 3 {
+		return types.NoSuchOverloadErr()
+	}
+
+	authz, ok := args[0].(authorizerVal)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(args[0])
+	}
+
+	namespace, ok := args[1].Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(args[1])
+	}
+
+	name, ok := args[2].Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(args[2])
+	}
+
+	if errors := apimachineryvalidation.ValidateServiceAccountName(name, false); len(errors) > 0 {
+		return types.NewErr("Invalid service account name")
+	}
+	if errors := apimachineryvalidation.ValidateNamespaceName(namespace, false); len(errors) > 0 {
+		return types.NewErr("Invalid service account namespace")
+	}
+	return authz.serviceAccount(namespace, name)
+}
+
+func groupCheckResource(arg1, arg2 ref.Val) ref.Val {
+	groupCheck, ok := arg1.(groupCheckVal)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg1)
+	}
+
+	resource, ok := arg2.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg1)
+	}
+
+	if len(strings.TrimSpace(resource)) == 0 {
+		return types.NewErr("resource must not be empty")
+	}
+	return groupCheck.resourceCheck(resource)
+}
+
+func resourceCheckSubresource(arg1, arg2 ref.Val) ref.Val {
+	resourceCheck, ok := arg1.(resourceCheckVal)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg1)
+	}
+
+	subresource, ok := arg2.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg1)
+	}
+
+	result := resourceCheck
+	result.subresource = subresource
+	return result
+}
+
+func resourceCheckFieldSelector(arg1, arg2 ref.Val) ref.Val {
+	resourceCheck, ok := arg1.(resourceCheckVal)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg1)
+	}
+
+	fieldSelector, ok := arg2.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg1)
+	}
+
+	result := resourceCheck
+	result.fieldSelector = fieldSelector
+	return result
+}
+
+func resourceCheckLabelSelector(arg1, arg2 ref.Val) ref.Val {
+	resourceCheck, ok := arg1.(resourceCheckVal)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg1)
+	}
+
+	labelSelector, ok := arg2.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg1)
+	}
+
+	result := resourceCheck
+	result.labelSelector = labelSelector
+	return result
+}
+
+func resourceCheckNamespace(arg1, arg2 ref.Val) ref.Val {
+	resourceCheck, ok := arg1.(resourceCheckVal)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg1)
+	}
+
+	namespace, ok := arg2.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg1)
+	}
+
+	result := resourceCheck
+	result.namespace = namespace
+	return result
+}
+
+func resourceCheckName(arg1, arg2 ref.Val) ref.Val {
+	resourceCheck, ok := arg1.(resourceCheckVal)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg1)
+	}
+
+	name, ok := arg2.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg1)
+	}
+
+	result := resourceCheck
+	result.name = name
+	return result
+}
+
+func pathCheckCheck(arg1, arg2 ref.Val) ref.Val {
+	pathCheck, ok := arg1.(pathCheckVal)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg1)
+	}
+
+	httpRequestVerb, ok := arg2.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg1)
+	}
+
+	return pathCheck.Authorize(context.TODO(), httpRequestVerb)
+}
+
+func resourceCheckCheck(arg1, arg2 ref.Val) ref.Val {
+	resourceCheck, ok := arg1.(resourceCheckVal)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg1)
+	}
+
+	apiVerb, ok := arg2.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg1)
+	}
+
+	return resourceCheck.Authorize(context.TODO(), apiVerb)
+}
+
+func decisionErrored(arg ref.Val) ref.Val {
+	decision, ok := arg.(decisionVal)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	return types.Bool(decision.err != nil)
+}
+
+func decisionError(arg ref.Val) ref.Val {
+	decision, ok := arg.(decisionVal)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	if decision.err == nil {
+		return types.String("")
+	}
+	return types.String(decision.err.Error())
+}
+
+func decisionAllowed(arg ref.Val) ref.Val {
+	decision, ok := arg.(decisionVal)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	return types.Bool(decision.authDecision == authorizer.DecisionAllow)
+}
+
+func decisionReason(arg ref.Val) ref.Val {
+	decision, ok := arg.(decisionVal)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	return types.String(decision.reason)
+}
+
+var (
+	AuthorizerType    = cel.ObjectType("kubernetes.authorization.Authorizer")
+	PathCheckType     = cel.ObjectType("kubernetes.authorization.PathCheck")
+	GroupCheckType    = cel.ObjectType("kubernetes.authorization.GroupCheck")
+	ResourceCheckType = cel.ObjectType("kubernetes.authorization.ResourceCheck")
+	DecisionType      = cel.ObjectType("kubernetes.authorization.Decision")
+)
+
+// Resource represents an API resource
+type Resource interface {
+	// GetName returns the name of the object as presented in the request.  On a CREATE operation, the client
+	// may omit name and rely on the server to generate the name.  If that is the case, this method will return
+	// the empty string
+	GetName() string
+	// GetNamespace is the namespace associated with the request (if any)
+	GetNamespace() string
+	// GetResource is the name of the resource being requested.  This is not the kind.  For example: pods
+	GetResource() schema.GroupVersionResource
+	// GetSubresource is the name of the subresource being requested.  This is a different resource, scoped to the parent resource, but it may have a different kind.
+	// For instance, /pods has the resource "pods" and the kind "Pod", while /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod"
+	// (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource "binding", and kind "Binding".
+	GetSubresource() string
+}
+
+func NewAuthorizerVal(userInfo user.Info, authorizer authorizer.Authorizer) ref.Val {
+	return authorizerVal{receiverOnlyObjectVal: receiverOnlyVal(AuthorizerType), userInfo: userInfo, authAuthorizer: authorizer}
+}
+
+func NewResourceAuthorizerVal(userInfo user.Info, authorizer authorizer.Authorizer, requestResource Resource) ref.Val {
+	a := authorizerVal{receiverOnlyObjectVal: receiverOnlyVal(AuthorizerType), userInfo: userInfo, authAuthorizer: authorizer}
+	resource := requestResource.GetResource()
+	g := a.groupCheck(resource.Group)
+	r := g.resourceCheck(resource.Resource)
+	r.subresource = requestResource.GetSubresource()
+	r.namespace = requestResource.GetNamespace()
+	r.name = requestResource.GetName()
+	return r
+}
+
+type authorizerVal struct {
+	receiverOnlyObjectVal
+	userInfo       user.Info
+	authAuthorizer authorizer.Authorizer
+}
+
+func (a authorizerVal) pathCheck(path string) pathCheckVal {
+	return pathCheckVal{receiverOnlyObjectVal: receiverOnlyVal(PathCheckType), authorizer: a, path: path}
+}
+
+func (a authorizerVal) groupCheck(group string) groupCheckVal {
+	return groupCheckVal{receiverOnlyObjectVal: receiverOnlyVal(GroupCheckType), authorizer: a, group: group}
+}
+
+func (a authorizerVal) serviceAccount(namespace, name string) authorizerVal {
+	sa := &serviceaccount.ServiceAccountInfo{Name: name, Namespace: namespace}
+	return authorizerVal{
+		receiverOnlyObjectVal: receiverOnlyVal(AuthorizerType),
+		userInfo:              sa.UserInfo(),
+		authAuthorizer:        a.authAuthorizer,
+	}
+}
+
+type pathCheckVal struct {
+	receiverOnlyObjectVal
+	authorizer authorizerVal
+	path       string
+}
+
+func (a pathCheckVal) Authorize(ctx context.Context, verb string) ref.Val {
+	attr := &authorizer.AttributesRecord{
+		Path: a.path,
+		Verb: verb,
+		User: a.authorizer.userInfo,
+	}
+
+	decision, reason, err := a.authorizer.authAuthorizer.Authorize(ctx, attr)
+	return newDecision(decision, err, reason)
+}
+
+type groupCheckVal struct {
+	receiverOnlyObjectVal
+	authorizer authorizerVal
+	group      string
+}
+
+func (g groupCheckVal) resourceCheck(resource string) resourceCheckVal {
+	return resourceCheckVal{receiverOnlyObjectVal: receiverOnlyVal(ResourceCheckType), groupCheck: g, resource: resource}
+}
+
+type resourceCheckVal struct {
+	receiverOnlyObjectVal
+	groupCheck    groupCheckVal
+	resource      string
+	subresource   string
+	namespace     string
+	name          string
+	fieldSelector string
+	labelSelector string
+}
+
+func (a resourceCheckVal) Authorize(ctx context.Context, verb string) ref.Val {
+	attr := &authorizer.AttributesRecord{
+		ResourceRequest: true,
+		APIGroup:        a.groupCheck.group,
+		APIVersion:      "*",
+		Resource:        a.resource,
+		Subresource:     a.subresource,
+		Namespace:       a.namespace,
+		Name:            a.name,
+		Verb:            verb,
+		User:            a.groupCheck.authorizer.userInfo,
+	}
+
+	if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) {
+		if len(a.fieldSelector) > 0 {
+			selector, err := fields.ParseSelector(a.fieldSelector)
+			if err != nil {
+				attr.FieldSelectorRequirements, attr.FieldSelectorParsingErr = nil, err
+			} else {
+				attr.FieldSelectorRequirements, attr.FieldSelectorParsingErr = selector.Requirements(), nil
+			}
+		}
+		if len(a.labelSelector) > 0 {
+			requirements, err := labels.ParseToRequirements(a.labelSelector)
+			if err != nil {
+				attr.LabelSelectorRequirements, attr.LabelSelectorParsingErr = nil, err
+			} else {
+				attr.LabelSelectorRequirements, attr.LabelSelectorParsingErr = requirements, nil
+			}
+		}
+	}
+
+	decision, reason, err := a.groupCheck.authorizer.authAuthorizer.Authorize(ctx, attr)
+	return newDecision(decision, err, reason)
+}
+
+func newDecision(authDecision authorizer.Decision, err error, reason string) decisionVal {
+	return decisionVal{receiverOnlyObjectVal: receiverOnlyVal(DecisionType), authDecision: authDecision, err: err, reason: reason}
+}
+
+type decisionVal struct {
+	receiverOnlyObjectVal
+	err          error
+	authDecision authorizer.Decision
+	reason       string
+}
+
+// receiverOnlyObjectVal provides an implementation of ref.Val for
+// any object type that has receiver functions but does not expose any fields to
+// CEL.
+type receiverOnlyObjectVal struct {
+	typeValue *types.Type
+}
+
+// receiverOnlyVal returns a receiverOnlyObjectVal for the given type.
+func receiverOnlyVal(objectType *cel.Type) receiverOnlyObjectVal {
+	return receiverOnlyObjectVal{typeValue: types.NewTypeValue(objectType.String())}
+}
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (a receiverOnlyObjectVal) ConvertToNative(typeDesc reflect.Type) (any, error) {
+	return nil, fmt.Errorf("type conversion error from '%s' to '%v'", a.typeValue.String(), typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (a receiverOnlyObjectVal) ConvertToType(typeVal ref.Type) ref.Val {
+	switch typeVal {
+	case a.typeValue:
+		return a
+	case types.TypeType:
+		return a.typeValue
+	}
+	return types.NewErr("type conversion error from '%s' to '%s'", a.typeValue, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (a receiverOnlyObjectVal) Equal(other ref.Val) ref.Val {
+	o, ok := other.(receiverOnlyObjectVal)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(other)
+	}
+	return types.Bool(a == o)
+}
+
+// Type implements ref.Val.Type.
+func (a receiverOnlyObjectVal) Type() ref.Type {
+	return a.typeValue
+}
+
+// Value implements ref.Val.Value.
+func (a receiverOnlyObjectVal) Value() any {
+	return types.NoSuchOverloadErr()
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/cidr.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/cidr.go
new file mode 100644
index 0000000000..2992e99e65
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/cidr.go
@@ -0,0 +1,295 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package library
+
+import (
+	"fmt"
+	"net/netip"
+
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+
+	apiservercel "k8s.io/apiserver/pkg/cel"
+)
+
+// CIDR provides a CEL function library extension of CIDR notation parsing functions.
+//
+// cidr
+//
+// Converts a string in CIDR notation to a network address representation or results in an error if the string is not a valid CIDR notation.
+// The CIDR must be an IPv4 or IPv6 subnet address with a mask.
+// Leading zeros in IPv4 address octets are not allowed.
+// IPv4-mapped IPv6 addresses (e.g. ::ffff:1.2.3.4/24) are not allowed.
+//
+//	cidr() 
+//
+// Examples:
+//
+//	cidr('192.168.0.0/16') // returns an IPv4 address with a CIDR mask
+//	cidr('::1/128') // returns an IPv6 address with a CIDR mask
+//	cidr('192.168.0.0/33') // error
+//	cidr('::1/129') // error
+//	cidr('192.168.0.1/16') // error, because there are non-0 bits after the prefix
+//
+// isCIDR
+//
+// Returns true if a string is a valid CIDR notation respresentation of a subnet with mask.
+// The CIDR must be an IPv4 or IPv6 subnet address with a mask.
+// Leading zeros in IPv4 address octets are not allowed.
+// IPv4-mapped IPv6 addresses (e.g. ::ffff:1.2.3.4/24) are not allowed.
+//
+//	isCIDR() 
+//
+// Examples:
+//
+//	isCIDR('192.168.0.0/16') // returns true
+//	isCIDR('::1/128') // returns true
+//	isCIDR('192.168.0.0/33') // returns false
+//	isCIDR('::1/129') // returns false
+//
+// containsIP / containerCIDR / ip / masked / prefixLength
+//
+// - containsIP: Returns true if a the CIDR contains the given IP address.
+// The IP address must be an IPv4 or IPv6 address.
+// May take either a string or IP address as an argument.
+//
+// - containsCIDR: Returns true if a the CIDR contains the given CIDR.
+// The CIDR must be an IPv4 or IPv6 subnet address with a mask.
+// May take either a string or CIDR as an argument.
+//
+// - ip: Returns the IP address representation of the CIDR.
+//
+// - masked: Returns the CIDR representation of the network address with a masked prefix.
+// This can be used to return the canonical form of the CIDR network.
+//
+// - prefixLength: Returns the prefix length of the CIDR in bits.
+// This is the number of bits in the mask.
+//
+// Examples:
+//
+// cidr('192.168.0.0/24').containsIP(ip('192.168.0.1')) // returns true
+// cidr('192.168.0.0/24').containsIP(ip('192.168.1.1')) // returns false
+// cidr('192.168.0.0/24').containsIP('192.168.0.1') // returns true
+// cidr('192.168.0.0/24').containsIP('192.168.1.1') // returns false
+// cidr('192.168.0.0/16').containsCIDR(cidr('192.168.10.0/24')) // returns true
+// cidr('192.168.1.0/24').containsCIDR(cidr('192.168.2.0/24')) // returns false
+// cidr('192.168.0.0/16').containsCIDR('192.168.10.0/24') // returns true
+// cidr('192.168.1.0/24').containsCIDR('192.168.2.0/24') // returns false
+// cidr('192.168.0.1/24').ip() // returns ipAddr('192.168.0.1')
+// cidr('192.168.0.1/24').ip().family() // returns '4'
+// cidr('::1/128').ip() // returns ipAddr('::1')
+// cidr('::1/128').ip().family() // returns '6'
+// cidr('192.168.0.0/24').masked() // returns cidr('192.168.0.0/24')
+// cidr('192.168.0.1/24').masked() // returns cidr('192.168.0.0/24')
+// cidr('192.168.0.0/24') == cidr('192.168.0.0/24').masked() // returns true, CIDR was already in canonical format
+// cidr('192.168.0.1/24') == cidr('192.168.0.1/24').masked() // returns false, CIDR was not in canonical format
+// cidr('192.168.0.0/16').prefixLength() // returns 16
+// cidr('::1/128').prefixLength() // returns 128
+func CIDR() cel.EnvOption {
+	return cel.Lib(cidrsLib)
+}
+
+var cidrsLib = &cidrs{}
+
+type cidrs struct{}
+
+func (*cidrs) LibraryName() string {
+	return "kubernetes.net.cidr"
+}
+
+func (*cidrs) declarations() map[string][]cel.FunctionOpt {
+	return cidrLibraryDecls
+}
+
+func (*cidrs) Types() []*cel.Type {
+	return []*cel.Type{apiservercel.CIDRType, apiservercel.IPType}
+}
+
+var cidrLibraryDecls = map[string][]cel.FunctionOpt{
+	"cidr": {
+		cel.Overload("string_to_cidr", []*cel.Type{cel.StringType}, apiservercel.CIDRType,
+			cel.UnaryBinding(stringToCIDR)),
+	},
+	"containsIP": {
+		cel.MemberOverload("cidr_contains_ip_string", []*cel.Type{apiservercel.CIDRType, cel.StringType}, cel.BoolType,
+			cel.BinaryBinding(cidrContainsIPString)),
+		cel.MemberOverload("cidr_contains_ip_ip", []*cel.Type{apiservercel.CIDRType, apiservercel.IPType}, cel.BoolType,
+			cel.BinaryBinding(cidrContainsIP)),
+	},
+	"containsCIDR": {
+		cel.MemberOverload("cidr_contains_cidr_string", []*cel.Type{apiservercel.CIDRType, cel.StringType}, cel.BoolType,
+			cel.BinaryBinding(cidrContainsCIDRString)),
+		cel.MemberOverload("cidr_contains_cidr", []*cel.Type{apiservercel.CIDRType, apiservercel.CIDRType}, cel.BoolType,
+			cel.BinaryBinding(cidrContainsCIDR)),
+	},
+	"ip": {
+		cel.MemberOverload("cidr_ip", []*cel.Type{apiservercel.CIDRType}, apiservercel.IPType,
+			cel.UnaryBinding(cidrToIP)),
+	},
+	"prefixLength": {
+		cel.MemberOverload("cidr_prefix_length", []*cel.Type{apiservercel.CIDRType}, cel.IntType,
+			cel.UnaryBinding(prefixLength)),
+	},
+	"masked": {
+		cel.MemberOverload("cidr_masked", []*cel.Type{apiservercel.CIDRType}, apiservercel.CIDRType,
+			cel.UnaryBinding(masked)),
+	},
+	"isCIDR": {
+		cel.Overload("is_cidr", []*cel.Type{cel.StringType}, cel.BoolType,
+			cel.UnaryBinding(isCIDR)),
+	},
+	"string": {
+		cel.Overload("cidr_to_string", []*cel.Type{apiservercel.CIDRType}, cel.StringType,
+			cel.UnaryBinding(cidrToString)),
+	},
+}
+
+func (*cidrs) CompileOptions() []cel.EnvOption {
+	options := []cel.EnvOption{cel.Types(apiservercel.CIDRType),
+		cel.Variable(apiservercel.CIDRType.TypeName(), types.NewTypeTypeWithParam(apiservercel.CIDRType)),
+	}
+	for name, overloads := range cidrLibraryDecls {
+		options = append(options, cel.Function(name, overloads...))
+	}
+	return options
+}
+
+func (*cidrs) ProgramOptions() []cel.ProgramOption {
+	return []cel.ProgramOption{}
+}
+
+func stringToCIDR(arg ref.Val) ref.Val {
+	s, ok := arg.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	net, err := parseCIDR(s)
+	if err != nil {
+		return types.NewErr("network address parse error during conversion from string: %v", err)
+	}
+
+	return apiservercel.CIDR{
+		Prefix: net,
+	}
+}
+
+func cidrToString(arg ref.Val) ref.Val {
+	cidr, ok := arg.(apiservercel.CIDR)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	return types.String(cidr.Prefix.String())
+}
+
+func cidrContainsIPString(arg ref.Val, other ref.Val) ref.Val {
+	return cidrContainsIP(arg, stringToIP(other))
+}
+
+func cidrContainsCIDRString(arg ref.Val, other ref.Val) ref.Val {
+	return cidrContainsCIDR(arg, stringToCIDR(other))
+}
+
+func cidrContainsIP(arg ref.Val, other ref.Val) ref.Val {
+	cidr, ok := arg.(apiservercel.CIDR)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(other)
+	}
+
+	ip, ok := other.(apiservercel.IP)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	return types.Bool(cidr.Contains(ip.Addr))
+}
+
+func cidrContainsCIDR(arg ref.Val, other ref.Val) ref.Val {
+	cidr, ok := arg.(apiservercel.CIDR)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	containsCIDR, ok := other.(apiservercel.CIDR)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(other)
+	}
+
+	equalMasked := cidr.Prefix.Masked() == netip.PrefixFrom(containsCIDR.Prefix.Addr(), cidr.Prefix.Bits())
+	return types.Bool(equalMasked && cidr.Prefix.Bits() <= containsCIDR.Prefix.Bits())
+}
+
+func prefixLength(arg ref.Val) ref.Val {
+	cidr, ok := arg.(apiservercel.CIDR)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	return types.Int(cidr.Prefix.Bits())
+}
+
+func isCIDR(arg ref.Val) ref.Val {
+	s, ok := arg.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	_, err := parseCIDR(s)
+	return types.Bool(err == nil)
+}
+
+func cidrToIP(arg ref.Val) ref.Val {
+	cidr, ok := arg.(apiservercel.CIDR)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	return apiservercel.IP{
+		Addr: cidr.Prefix.Addr(),
+	}
+}
+
+func masked(arg ref.Val) ref.Val {
+	cidr, ok := arg.(apiservercel.CIDR)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	maskedCIDR := cidr.Prefix.Masked()
+	return apiservercel.CIDR{
+		Prefix: maskedCIDR,
+	}
+}
+
+// parseCIDR parses a string into an CIDR.
+// We use this function to parse CIDR notation in the CEL library
+// so that we can share the common logic of rejecting strings
+// that IPv4-mapped IPv6 addresses or contain non-zero bits after the mask.
+func parseCIDR(raw string) (netip.Prefix, error) {
+	net, err := netip.ParsePrefix(raw)
+	if err != nil {
+		return netip.Prefix{}, fmt.Errorf("network address parse error during conversion from string: %v", err)
+	}
+
+	if net.Addr().Is4In6() {
+		return netip.Prefix{}, fmt.Errorf("IPv4-mapped IPv6 address %q is not allowed", raw)
+	}
+
+	return net, nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/cost.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/cost.go
new file mode 100644
index 0000000000..a9e5db811a
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/cost.go
@@ -0,0 +1,643 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package library
+
+import (
+	"fmt"
+	"github.com/google/cel-go/checker"
+	"github.com/google/cel-go/common"
+	"github.com/google/cel-go/common/ast"
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+	"github.com/google/cel-go/common/types/traits"
+	"math"
+
+	"k8s.io/apiserver/pkg/cel"
+)
+
+// panicOnUnknown makes cost estimate functions panic on unrecognized functions.
+// This is only set to true for unit tests.
+var panicOnUnknown = false
+
+// builtInFunctions is a list of functions used in cost tests that are not handled by CostEstimator.
+var knownUnhandledFunctions = map[string]bool{
+	"@not_strictly_false": true,
+	"uint":                true,
+	"duration":            true,
+	"bytes":               true,
+	"cel.@mapInsert":      true,
+	"timestamp":           true,
+	"strings.quote":       true,
+	"value":               true,
+	"_==_":                true,
+	"_&&_":                true,
+	"_||_":                true,
+	"_>_":                 true,
+	"_>=_":                true,
+	"_<_":                 true,
+	"_<=_":                true,
+	"!_":                  true,
+	"_?_:_":               true,
+	"_+_":                 true,
+	"_-_":                 true,
+}
+
+// CostEstimator implements CEL's interpretable.ActualCostEstimator and checker.CostEstimator.
+type CostEstimator struct {
+	// SizeEstimator provides a CostEstimator.EstimateSize that this CostEstimator will delegate size estimation
+	// calculations to if the size is not well known (i.e. a constant).
+	SizeEstimator checker.CostEstimator
+}
+
+const (
+	// shortest repeatable selector requirement that allocates a values slice is 2 characters: k,
+	selectorLengthToRequirementCount = float64(.5)
+	// the expensive parts to represent each requirement are a struct and a values slice
+	costPerRequirement = float64(common.ListCreateBaseCost + common.StructCreateBaseCost)
+)
+
+// a selector consists of a list of requirements held in a slice
+var baseSelectorCost = checker.CostEstimate{Min: common.ListCreateBaseCost, Max: common.ListCreateBaseCost}
+
+func selectorCostEstimate(selectorLength checker.SizeEstimate) checker.CostEstimate {
+	parseCost := selectorLength.MultiplyByCostFactor(common.StringTraversalCostFactor)
+
+	requirementCount := selectorLength.MultiplyByCostFactor(selectorLengthToRequirementCount)
+	requirementCost := requirementCount.MultiplyByCostFactor(costPerRequirement)
+
+	return baseSelectorCost.Add(parseCost).Add(requirementCost)
+}
+
+func (l *CostEstimator) CallCost(function, overloadId string, args []ref.Val, result ref.Val) *uint64 {
+	switch function {
+	case "check":
+		// An authorization check has a fixed cost
+		// This cost is set to allow for only two authorization checks per expression
+		cost := uint64(350000)
+		return &cost
+	case "serviceAccount", "path", "group", "resource", "subresource", "namespace", "name", "allowed", "reason", "error", "errored":
+		// All authorization builder and accessor functions have a nominal cost
+		cost := uint64(1)
+		return &cost
+	case "fieldSelector", "labelSelector":
+		// field and label selector parse is a string parse into a structured set of requirements
+		if len(args) >= 2 {
+			selectorLength := actualSize(args[1])
+			cost := selectorCostEstimate(checker.SizeEstimate{Min: selectorLength, Max: selectorLength})
+			return &cost.Max
+		}
+	case "isSorted", "sum", "max", "min", "indexOf", "lastIndexOf":
+		var cost uint64
+		if len(args) > 0 {
+			cost += traversalCost(args[0]) // these O(n) operations all cost roughly the cost of a single traversal
+		}
+		return &cost
+	case "url", "lowerAscii", "upperAscii", "substring", "trim", "jsonpatch.escapeKey":
+		if len(args) >= 1 {
+			cost := uint64(math.Ceil(float64(actualSize(args[0])) * common.StringTraversalCostFactor))
+			return &cost
+		}
+	case "replace", "split":
+		if len(args) >= 1 {
+			// cost is the traversal plus the construction of the result
+			cost := uint64(math.Ceil(float64(actualSize(args[0])) * 2 * common.StringTraversalCostFactor))
+			return &cost
+		}
+	case "join":
+		if len(args) >= 1 {
+			cost := uint64(math.Ceil(float64(actualSize(result)) * 2 * common.StringTraversalCostFactor))
+			return &cost
+		}
+	case "find", "findAll":
+		if len(args) >= 2 {
+			strCost := uint64(math.Ceil((1.0 + float64(actualSize(args[0]))) * common.StringTraversalCostFactor))
+			// We don't know how many expressions are in the regex, just the string length (a huge
+			// improvement here would be to somehow get a count the number of expressions in the regex or
+			// how many states are in the regex state machine and use that to measure regex cost).
+			// For now, we're making a guess that each expression in a regex is typically at least 4 chars
+			// in length.
+			regexCost := uint64(math.Ceil(float64(actualSize(args[1])) * common.RegexStringLengthCostFactor))
+			cost := strCost * regexCost
+			return &cost
+		}
+	case "cidr", "isIP", "isCIDR":
+		// IP and CIDR parsing is a string traversal.
+		if len(args) >= 1 {
+			cost := uint64(math.Ceil(float64(actualSize(args[0])) * common.StringTraversalCostFactor))
+			return &cost
+		}
+	case "ip":
+		// IP and CIDR parsing is a string traversal.
+		if len(args) >= 1 {
+			if overloadId == "cidr_ip" {
+				// The IP member of the CIDR object is just accessing a field.
+				// Nominal cost.
+				cost := uint64(1)
+				return &cost
+			}
+
+			cost := uint64(math.Ceil(float64(actualSize(args[0])) * common.StringTraversalCostFactor))
+			return &cost
+		}
+	case "ip.isCanonical":
+		if len(args) >= 1 {
+			// We have to parse the string and then compare the parsed string to the original string.
+			// So we double the cost of parsing the string.
+			cost := uint64(math.Ceil(float64(actualSize(args[0])) * 2 * common.StringTraversalCostFactor))
+			return &cost
+		}
+	case "masked", "prefixLength", "family", "isUnspecified", "isLoopback", "isLinkLocalMulticast", "isLinkLocalUnicast", "isGlobalUnicast":
+		// IP and CIDR accessors are nominal cost.
+		cost := uint64(1)
+		return &cost
+	case "containsIP":
+		if len(args) >= 2 {
+			cidrSize := actualSize(args[0])
+			otherSize := actualSize(args[1])
+
+			// This is the base cost of comparing two byte lists.
+			// We will compare only up to the length of the CIDR prefix in bytes, so use the cidrSize twice.
+			cost := uint64(math.Ceil(float64(cidrSize+cidrSize) * common.StringTraversalCostFactor))
+
+			if overloadId == "cidr_contains_ip_string" {
+				// If we are comparing a string, we must parse the string to into the right type, so add the cost of traversing the string again.
+				cost += uint64(math.Ceil(float64(otherSize) * common.StringTraversalCostFactor))
+
+			}
+
+			return &cost
+		}
+	case "containsCIDR":
+		if len(args) >= 2 {
+			cidrSize := actualSize(args[0])
+			otherSize := actualSize(args[1])
+
+			// This is the base cost of comparing two byte lists.
+			// We will compare only up to the length of the CIDR prefix in bytes, so use the cidrSize twice.
+			cost := uint64(math.Ceil(float64(cidrSize+cidrSize) * common.StringTraversalCostFactor))
+
+			// As we are comparing if a CIDR is within another CIDR, we first mask the base CIDR and
+			// also compare the CIDR bits.
+			// This has an additional cost of the length of the IP being traversed again, plus 1.
+			cost += uint64(math.Ceil(float64(cidrSize)*common.StringTraversalCostFactor)) + 1
+
+			if overloadId == "cidr_contains_cidr_string" {
+				// If we are comparing a string, we must parse the string to into the right type, so add the cost of traversing the string again.
+				cost += uint64(math.Ceil(float64(otherSize) * common.StringTraversalCostFactor))
+			}
+
+			return &cost
+		}
+	case "quantity", "isQuantity":
+		if len(args) >= 1 {
+			cost := uint64(math.Ceil(float64(actualSize(args[0])) * common.StringTraversalCostFactor))
+			return &cost
+		}
+	case "validate":
+		if len(args) >= 2 {
+			format, isFormat := args[0].Value().(cel.Format)
+			if isFormat {
+				strSize := actualSize(args[1])
+
+				// Dont have access to underlying regex, estimate a long regexp
+				regexSize := format.MaxRegexSize
+
+				// Copied from CEL implementation for regex cost
+				//
+				// https://swtch.com/~rsc/regexp/regexp1.html applies to RE2 implementation supported by CEL
+				// Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0
+				// in case where string is empty but regex is still expensive.
+				strCost := uint64(math.Ceil((1.0 + float64(strSize)) * common.StringTraversalCostFactor))
+				// We don't know how many expressions are in the regex, just the string length (a huge
+				// improvement here would be to somehow get a count the number of expressions in the regex or
+				// how many states are in the regex state machine and use that to measure regex cost).
+				// For now, we're making a guess that each expression in a regex is typically at least 4 chars
+				// in length.
+				regexCost := uint64(math.Ceil(float64(regexSize) * common.RegexStringLengthCostFactor))
+				cost := strCost * regexCost
+				return &cost
+			}
+		}
+	case "format.named":
+		// Simply dictionary lookup
+		cost := uint64(1)
+		return &cost
+	case "sign", "asInteger", "isInteger", "asApproximateFloat", "isGreaterThan", "isLessThan", "compareTo", "add", "sub":
+		cost := uint64(1)
+		return &cost
+	case "getScheme", "getHostname", "getHost", "getPort", "getEscapedPath", "getQuery":
+		// url accessors
+		cost := uint64(1)
+		return &cost
+	case "_==_":
+		if len(args) == 2 {
+			unitCost := uint64(1)
+			lhs := args[0]
+			switch lhs.(type) {
+			case *cel.Quantity, cel.Quantity,
+				*cel.IP, cel.IP,
+				*cel.CIDR, cel.CIDR,
+				*cel.Format, cel.Format, // Formats have a small max size. Format takes pointer receiver.
+				*cel.URL, cel.URL, // TODO: Computing the actual cost is expensive, and changing this would be a breaking change
+				*cel.Semver, cel.Semver,
+				*authorizerVal, authorizerVal, *pathCheckVal, pathCheckVal, *groupCheckVal, groupCheckVal,
+				*resourceCheckVal, resourceCheckVal, *decisionVal, decisionVal:
+				return &unitCost
+			default:
+				if panicOnUnknown && lhs.Type() != nil && isRegisteredType(lhs.Type().TypeName()) {
+					panic(fmt.Errorf("CallCost: unhandled equality for Kubernetes type %T", lhs))
+				}
+			}
+		}
+	}
+	if panicOnUnknown && !knownUnhandledFunctions[function] {
+		panic(fmt.Errorf("CallCost: unhandled function %q or args %v", function, args))
+	}
+	return nil
+}
+
+func (l *CostEstimator) EstimateCallCost(function, overloadId string, target *checker.AstNode, args []checker.AstNode) *checker.CallEstimate {
+	// WARNING: Any changes to this code impact API compatibility! The estimated cost is used to determine which CEL rules may be written to a
+	// CRD and any change (cost increases and cost decreases) are breaking.
+	switch function {
+	case "check":
+		// An authorization check has a fixed cost
+		// This cost is set to allow for only two authorization checks per expression
+		return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 350000, Max: 350000}}
+	case "serviceAccount", "path", "group", "resource", "subresource", "namespace", "name", "allowed", "reason", "error", "errored":
+		// All authorization builder and accessor functions have a nominal cost
+		return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}}
+	case "fieldSelector", "labelSelector":
+		// field and label selector parse is a string parse into a structured set of requirements
+		if len(args) == 1 {
+			return &checker.CallEstimate{CostEstimate: selectorCostEstimate(l.sizeEstimate(args[0]))}
+		}
+	case "isSorted", "sum", "max", "min", "indexOf", "lastIndexOf":
+		if target != nil {
+			// Charge 1 cost for comparing each element in the list
+			elCost := checker.CostEstimate{Min: 1, Max: 1}
+			// If the list contains strings or bytes, add the cost of traversing all the strings/bytes as a way
+			// of estimating the additional comparison cost.
+			if elNode := l.listElementNode(*target); elNode != nil {
+				k := elNode.Type().Kind()
+				if k == types.StringKind || k == types.BytesKind {
+					sz := l.sizeEstimate(elNode)
+					elCost = elCost.Add(sz.MultiplyByCostFactor(common.StringTraversalCostFactor))
+				}
+				return &checker.CallEstimate{CostEstimate: l.sizeEstimate(*target).MultiplyByCost(elCost)}
+			} else { // the target is a string, which is supported by indexOf and lastIndexOf
+				return &checker.CallEstimate{CostEstimate: l.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor)}
+			}
+		}
+	case "url", "jsonpatch.escapeKey":
+		if len(args) == 1 {
+			sz := l.sizeEstimate(args[0])
+			return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor), ResultSize: &sz}
+		}
+	case "lowerAscii", "upperAscii", "substring", "trim":
+		if target != nil {
+			sz := l.sizeEstimate(*target)
+			return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor), ResultSize: &sz}
+		}
+	case "replace":
+		if target != nil && len(args) >= 2 {
+			sz := l.sizeEstimate(*target)
+			toReplaceSz := l.sizeEstimate(args[0])
+			replaceWithSz := l.sizeEstimate(args[1])
+
+			var replaceCount, retainedSz checker.SizeEstimate
+			// find the longest replacement:
+			if toReplaceSz.Min == 0 {
+				// if the string being replaced is empty, replace surrounds all characters in the input string with the replacement.
+				if sz.Max < math.MaxUint64 {
+					replaceCount.Max = sz.Max + 1
+				} else {
+					replaceCount.Max = sz.Max
+				}
+				// Include the length of the longest possible original string length.
+				retainedSz.Max = sz.Max
+			} else if replaceWithSz.Max <= toReplaceSz.Min {
+				// If the replacement does not make the result longer, use the original string length.
+				replaceCount.Max = 0
+				retainedSz.Max = sz.Max
+			} else {
+				// Replace the smallest possible substrings with the largest possible replacement
+				// as many times as possible.
+				replaceCount.Max = uint64(math.Ceil(float64(sz.Max) / float64(toReplaceSz.Min)))
+			}
+
+			// find the shortest replacement:
+			if toReplaceSz.Max == 0 {
+				// if the string being replaced is empty, replace surrounds all characters in the input string with the replacement.
+				if sz.Min < math.MaxUint64 {
+					replaceCount.Min = sz.Min + 1
+				} else {
+					replaceCount.Min = sz.Min
+				}
+				// Include the length of the shortest possible original string length.
+				retainedSz.Min = sz.Min
+			} else if toReplaceSz.Max <= replaceWithSz.Min {
+				// If the replacement does not make the result shorter, use the original string length.
+				replaceCount.Min = 0
+				retainedSz.Min = sz.Min
+			} else {
+				// Replace the largest possible substrings being with the smallest possible replacement
+				// as many times as possible.
+				replaceCount.Min = uint64(math.Ceil(float64(sz.Min) / float64(toReplaceSz.Max)))
+			}
+			size := replaceCount.Multiply(replaceWithSz).Add(retainedSz)
+
+			// cost is the traversal plus the construction of the result
+			return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(2 * common.StringTraversalCostFactor), ResultSize: &size}
+		}
+	case "split":
+		if target != nil {
+			sz := l.sizeEstimate(*target)
+
+			// Worst case size is where is that a separator of "" is used, and each char is returned as a list element.
+			max := sz.Max
+			if len(args) > 1 {
+				if v := args[1].Expr().AsLiteral(); v != nil {
+					if i, ok := v.Value().(int64); ok {
+						max = uint64(i)
+					}
+				}
+			}
+			// Cost is the traversal plus the construction of the result.
+			return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(2 * common.StringTraversalCostFactor), ResultSize: &checker.SizeEstimate{Min: 0, Max: max}}
+		}
+	case "join":
+		if target != nil {
+			var sz checker.SizeEstimate
+			listSize := l.sizeEstimate(*target)
+			if elNode := l.listElementNode(*target); elNode != nil {
+				elemSize := l.sizeEstimate(elNode)
+				sz = listSize.Multiply(elemSize)
+			}
+
+			if len(args) > 0 {
+				sepSize := l.sizeEstimate(args[0])
+				minSeparators := uint64(0)
+				maxSeparators := uint64(0)
+				if listSize.Min > 0 {
+					minSeparators = listSize.Min - 1
+				}
+				if listSize.Max > 0 {
+					maxSeparators = listSize.Max - 1
+				}
+				sz = sz.Add(sepSize.Multiply(checker.SizeEstimate{Min: minSeparators, Max: maxSeparators}))
+			}
+
+			return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor), ResultSize: &sz}
+		}
+	case "find", "findAll":
+		if target != nil && len(args) >= 1 {
+			sz := l.sizeEstimate(*target)
+			// Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0
+			// in case where string is empty but regex is still expensive.
+			strCost := sz.Add(checker.SizeEstimate{Min: 1, Max: 1}).MultiplyByCostFactor(common.StringTraversalCostFactor)
+			// We don't know how many expressions are in the regex, just the string length (a huge
+			// improvement here would be to somehow get a count the number of expressions in the regex or
+			// how many states are in the regex state machine and use that to measure regex cost).
+			// For now, we're making a guess that each expression in a regex is typically at least 4 chars
+			// in length.
+			regexCost := l.sizeEstimate(args[0]).MultiplyByCostFactor(common.RegexStringLengthCostFactor)
+			// worst case size of result is that every char is returned as separate find result.
+			return &checker.CallEstimate{CostEstimate: strCost.Multiply(regexCost), ResultSize: &checker.SizeEstimate{Min: 0, Max: sz.Max}}
+		}
+	case "cidr", "isIP", "isCIDR":
+		if target != nil {
+			sz := l.sizeEstimate(args[0])
+			return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor)}
+		}
+	case "ip":
+		if target != nil && len(args) >= 1 {
+			if overloadId == "cidr_ip" {
+				// The IP member of the CIDR object is just accessing a field.
+				// Nominal cost.
+				return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}}
+			}
+
+			sz := l.sizeEstimate(args[0])
+			return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor)}
+		} else if target != nil {
+			// The IP member of a CIDR is a just accessing a field, nominal cost.
+			return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}}
+		}
+	case "ip.isCanonical":
+		if target != nil && len(args) >= 1 {
+			sz := l.sizeEstimate(args[0])
+			// We have to parse the string and then compare the parsed string to the original string.
+			// So we double the cost of parsing the string.
+			return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(2 * common.StringTraversalCostFactor)}
+		}
+	case "masked", "prefixLength", "family", "isUnspecified", "isLoopback", "isLinkLocalMulticast", "isLinkLocalUnicast", "isGlobalUnicast":
+		// IP and CIDR accessors are nominal cost.
+		return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}}
+	case "containsIP":
+		if target != nil && len(args) >= 1 {
+			// The base cost of the function is the cost of comparing two byte lists.
+			// The byte lists will be either ipv4 or ipv6 so will have a length of 4, or 16 bytes.
+			sz := checker.SizeEstimate{Min: 4, Max: 16}
+
+			// We have to compare the two strings to determine if the CIDR/IP is in the other CIDR.
+			ipCompCost := sz.Add(sz).MultiplyByCostFactor(common.StringTraversalCostFactor)
+
+			if overloadId == "cidr_contains_ip_string" {
+				// If we are comparing a string, we must parse the string to into the right type, so add the cost of traversing the string again.
+				ipCompCost = ipCompCost.Add(checker.CostEstimate(l.sizeEstimate(args[0])).MultiplyByCostFactor(common.StringTraversalCostFactor))
+			}
+
+			return &checker.CallEstimate{CostEstimate: ipCompCost}
+		}
+	case "containsCIDR":
+		if target != nil && len(args) >= 1 {
+			// The base cost of the function is the cost of comparing two byte lists.
+			// The byte lists will be either ipv4 or ipv6 so will have a length of 4, or 16 bytes.
+			sz := checker.SizeEstimate{Min: 4, Max: 16}
+
+			// We have to compare the two strings to determine if the CIDR/IP is in the other CIDR.
+			ipCompCost := sz.Add(sz).MultiplyByCostFactor(common.StringTraversalCostFactor)
+
+			// As we are comparing if a CIDR is within another CIDR, we first mask the base CIDR and
+			// also compare the CIDR bits.
+			// This has an additional cost of the length of the IP being traversed again, plus 1.
+			ipCompCost = ipCompCost.Add(sz.MultiplyByCostFactor(common.StringTraversalCostFactor))
+			ipCompCost = ipCompCost.Add(checker.CostEstimate{Min: 1, Max: 1})
+
+			if overloadId == "cidr_contains_cidr_string" {
+				// If we are comparing a string, we must parse the string to into the right type, so add the cost of traversing the string again.
+				ipCompCost = ipCompCost.Add(checker.CostEstimate(l.sizeEstimate(args[0])).MultiplyByCostFactor(common.StringTraversalCostFactor))
+			}
+
+			return &checker.CallEstimate{CostEstimate: ipCompCost}
+		}
+	case "quantity", "isQuantity":
+		if target != nil {
+			sz := l.sizeEstimate(args[0])
+			return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor)}
+		}
+	case "validate":
+		if target != nil {
+			sz := l.sizeEstimate(args[0])
+			return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).MultiplyByCostFactor(cel.MaxNameFormatRegexSize * common.RegexStringLengthCostFactor)}
+		}
+	case "format.named":
+		return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}}
+	case "sign", "asInteger", "isInteger", "asApproximateFloat", "isGreaterThan", "isLessThan", "compareTo", "add", "sub":
+		return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}}
+	case "getScheme", "getHostname", "getHost", "getPort", "getEscapedPath", "getQuery":
+		// url accessors
+		return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}}
+	case "_==_":
+		if len(args) == 2 {
+			lhs := args[0]
+			rhs := args[1]
+			if lhs.Type().Equal(rhs.Type()) == types.True {
+				t := lhs.Type()
+				if t.Kind() == types.OpaqueKind {
+					switch t.TypeName() {
+					case cel.IPType.TypeName(), cel.CIDRType.TypeName():
+						return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}}
+					}
+				}
+				if t.Kind() == types.StructKind {
+					switch t {
+					case cel.QuantityType, AuthorizerType, PathCheckType, // O(1) cost equality checks
+						GroupCheckType, ResourceCheckType, DecisionType, cel.SemverType:
+						return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}}
+					case cel.FormatType:
+						return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: cel.MaxFormatSize}.MultiplyByCostFactor(common.StringTraversalCostFactor)}
+					case cel.URLType:
+						size := checker.SizeEstimate{Min: 1, Max: 1}
+						rhSize := rhs.ComputedSize()
+						lhSize := rhs.ComputedSize()
+						if rhSize != nil && lhSize != nil {
+							size = rhSize.Union(*lhSize)
+						}
+						return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: size.Max}.MultiplyByCostFactor(common.StringTraversalCostFactor)}
+					}
+				}
+				if panicOnUnknown && isRegisteredType(t.TypeName()) {
+					panic(fmt.Errorf("EstimateCallCost: unhandled equality for Kubernetes type %v", t))
+				}
+			}
+		}
+	}
+	if panicOnUnknown && !knownUnhandledFunctions[function] {
+		panic(fmt.Errorf("EstimateCallCost: unhandled function %q, target %v, args %v", function, target, args))
+	}
+	return nil
+}
+
+func actualSize(value ref.Val) uint64 {
+	if sz, ok := value.(traits.Sizer); ok {
+		return uint64(sz.Size().(types.Int))
+	}
+	if panicOnUnknown {
+		// debug.PrintStack()
+		panic(fmt.Errorf("actualSize: non-sizer type %T", value))
+	}
+	return 1
+}
+
+func (l *CostEstimator) sizeEstimate(t checker.AstNode) checker.SizeEstimate {
+	if sz := t.ComputedSize(); sz != nil {
+		return *sz
+	}
+	if sz := l.EstimateSize(t); sz != nil {
+		return *sz
+	}
+	return checker.SizeEstimate{Min: 0, Max: math.MaxUint64}
+}
+
+func (l *CostEstimator) listElementNode(list checker.AstNode) checker.AstNode {
+	if params := list.Type().Parameters(); len(params) > 0 {
+		lt := params[0]
+		nodePath := list.Path()
+		if nodePath != nil {
+			// Provide path if we have it so that a OpenAPIv3 maxLength validation can be looked up, if it exists
+			// for this node.
+			path := make([]string, len(nodePath)+1)
+			copy(path, nodePath)
+			path[len(nodePath)] = "@items"
+			return &itemsNode{path: path, t: lt, expr: nil}
+		} else {
+			// Provide just the type if no path is available so that worst case size can be looked up based on type.
+			return &itemsNode{t: lt, expr: nil}
+		}
+	}
+	return nil
+}
+
+func (l *CostEstimator) EstimateSize(element checker.AstNode) *checker.SizeEstimate {
+	if l.SizeEstimator != nil {
+		return l.SizeEstimator.EstimateSize(element)
+	}
+	return nil
+}
+
+type itemsNode struct {
+	path []string
+	t    *types.Type
+	expr ast.Expr
+}
+
+func (i *itemsNode) Path() []string {
+	return i.path
+}
+
+func (i *itemsNode) Type() *types.Type {
+	return i.t
+}
+
+func (i *itemsNode) Expr() ast.Expr {
+	return i.expr
+}
+
+func (i *itemsNode) ComputedSize() *checker.SizeEstimate {
+	return nil
+}
+
+var _ checker.AstNode = (*itemsNode)(nil)
+
+// traversalCost computes the cost of traversing a ref.Val as a data tree.
+func traversalCost(v ref.Val) uint64 {
+	// TODO: This could potentially be optimized by sampling maps and lists instead of traversing.
+	switch vt := v.(type) {
+	case types.String:
+		return uint64(float64(len(string(vt))) * common.StringTraversalCostFactor)
+	case types.Bytes:
+		return uint64(float64(len([]byte(vt))) * common.StringTraversalCostFactor)
+	case traits.Lister:
+		cost := uint64(0)
+		for it := vt.Iterator(); it.HasNext() == types.True; {
+			i := it.Next()
+			cost += traversalCost(i)
+		}
+		return cost
+	case traits.Mapper: // maps and objects
+		cost := uint64(0)
+		for it := vt.Iterator(); it.HasNext() == types.True; {
+			k := it.Next()
+			cost += traversalCost(k) + traversalCost(vt.Get(k))
+		}
+		return cost
+	default:
+		return 1
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/format.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/format.go
new file mode 100644
index 0000000000..82ecffb412
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/format.go
@@ -0,0 +1,279 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package library
+
+import (
+	"fmt"
+	"net/url"
+
+	"github.com/asaskevich/govalidator"
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/common/decls"
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+
+	apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
+	"k8s.io/apimachinery/pkg/util/validation"
+	apiservercel "k8s.io/apiserver/pkg/cel"
+	"k8s.io/kube-openapi/pkg/validation/strfmt"
+)
+
+// Format provides a CEL library exposing common named Kubernetes string
+// validations. Can be used in CRD ValidationRules messageExpression.
+//
+//  Example:
+//
+//    rule:              format.dns1123label.validate(object.metadata.name).hasValue()
+//    messageExpression: format.dns1123label.validate(object.metadata.name).value().join("\n")
+//
+// format.named(name: string) -> ?Format
+//
+//  Returns the Format with the given name, if it exists. Otherwise, optional.none
+//  Allowed names are:
+// 	 - `dns1123Label`
+// 	 - `dns1123Subdomain`
+// 	 - `dns1035Label`
+// 	 - `qualifiedName`
+// 	 - `dns1123LabelPrefix`
+// 	 - `dns1123SubdomainPrefix`
+// 	 - `dns1035LabelPrefix`
+// 	 - `labelValue`
+// 	 - `uri`
+// 	 - `uuid`
+// 	 - `byte`
+// 	 - `date`
+// 	 - `datetime`
+//
+// format.() -> Format
+//
+//  Convenience functions for all the named formats are also available
+//
+//  Examples:
+//      format.dns1123Label().validate("my-label-name")
+//      format.dns1123Subdomain().validate("apiextensions.k8s.io")
+//      format.dns1035Label().validate("my-label-name")
+//      format.qualifiedName().validate("apiextensions.k8s.io/v1beta1")
+//      format.dns1123LabelPrefix().validate("my-label-prefix-")
+//      format.dns1123SubdomainPrefix().validate("mysubdomain.prefix.-")
+//      format.dns1035LabelPrefix().validate("my-label-prefix-")
+//      format.uri().validate("http://example.com")
+//          Uses same pattern as isURL, but returns an error
+//      format.uuid().validate("123e4567-e89b-12d3-a456-426614174000")
+//      format.byte().validate("aGVsbG8=")
+//      format.date().validate("2021-01-01")
+//      format.datetime().validate("2021-01-01T00:00:00Z")
+//
+
+// .validate(str: string) -> ?list
+//
+//	Validates the given string against the given format. Returns optional.none
+//	if the string is valid, otherwise a list of validation error strings.
+func Format() cel.EnvOption {
+	return cel.Lib(formatLib)
+}
+
+var formatLib = &format{}
+
+type format struct{}
+
+func (*format) LibraryName() string {
+	return "kubernetes.format"
+}
+
+func (*format) Types() []*cel.Type {
+	return []*cel.Type{apiservercel.FormatType}
+}
+
+func (*format) declarations() map[string][]cel.FunctionOpt {
+	return formatLibraryDecls
+}
+
+func ZeroArgumentFunctionBinding(binding func() ref.Val) decls.OverloadOpt {
+	return func(o *decls.OverloadDecl) (*decls.OverloadDecl, error) {
+		wrapped, err := decls.FunctionBinding(func(values ...ref.Val) ref.Val { return binding() })(o)
+		if err != nil {
+			return nil, err
+		}
+		if len(wrapped.ArgTypes()) != 0 {
+			return nil, fmt.Errorf("function binding must have 0 arguments")
+		}
+		return o, nil
+	}
+}
+
+func (*format) CompileOptions() []cel.EnvOption {
+	options := make([]cel.EnvOption, 0, len(formatLibraryDecls))
+	for name, overloads := range formatLibraryDecls {
+		options = append(options, cel.Function(name, overloads...))
+	}
+	for name, constantValue := range ConstantFormats {
+		prefixedName := "format." + name
+		options = append(options, cel.Function(prefixedName, cel.Overload(prefixedName, []*cel.Type{}, apiservercel.FormatType, ZeroArgumentFunctionBinding(func() ref.Val {
+			return constantValue
+		}))))
+	}
+	return options
+}
+
+func (*format) ProgramOptions() []cel.ProgramOption {
+	return []cel.ProgramOption{}
+}
+
+var ConstantFormats = map[string]apiservercel.Format{
+	"dns1123Label": {
+		Name:         "DNS1123Label",
+		ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNSLabel(s, false) },
+		MaxRegexSize: 30,
+	},
+	"dns1123Subdomain": {
+		Name:         "DNS1123Subdomain",
+		ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNSSubdomain(s, false) },
+		MaxRegexSize: 60,
+	},
+	"dns1035Label": {
+		Name:         "DNS1035Label",
+		ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNS1035Label(s, false) },
+		MaxRegexSize: 30,
+	},
+	"qualifiedName": {
+		Name:         "QualifiedName",
+		ValidateFunc: validation.IsQualifiedName,
+		MaxRegexSize: 60, // uses subdomain regex
+	},
+
+	"dns1123LabelPrefix": {
+		Name:         "DNS1123LabelPrefix",
+		ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNSLabel(s, true) },
+		MaxRegexSize: 30,
+	},
+	"dns1123SubdomainPrefix": {
+		Name:         "DNS1123SubdomainPrefix",
+		ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNSSubdomain(s, true) },
+		MaxRegexSize: 60,
+	},
+	"dns1035LabelPrefix": {
+		Name:         "DNS1035LabelPrefix",
+		ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNS1035Label(s, true) },
+		MaxRegexSize: 30,
+	},
+	"labelValue": {
+		Name:         "LabelValue",
+		ValidateFunc: validation.IsValidLabelValue,
+		MaxRegexSize: 40,
+	},
+
+	// CRD formats
+	// Implementations sourced from strfmt, which kube-openapi uses as its
+	// format library. There are other CRD formats supported, but they are
+	// covered by other portions of the CEL library (like IP/CIDR), or their
+	// use is discouraged (like bsonobjectid, email, etc)
+	"uri": {
+		Name: "URI",
+		ValidateFunc: func(s string) []string {
+			// Directly call ParseRequestURI since we can get a better error message
+			_, err := url.ParseRequestURI(s)
+			if err != nil {
+				return []string{err.Error()}
+			}
+			return nil
+		},
+		// Use govalidator url regex to estimate, since ParseRequestURI
+		// doesnt use regex
+		MaxRegexSize: len(govalidator.URL),
+	},
+	"uuid": {
+		Name: "uuid",
+		ValidateFunc: func(s string) []string {
+			if !strfmt.Default.Validates("uuid", s) {
+				return []string{"does not match the UUID format"}
+			}
+			return nil
+		},
+		MaxRegexSize: len(strfmt.UUIDPattern),
+	},
+	"byte": {
+		Name: "byte",
+		ValidateFunc: func(s string) []string {
+			if !strfmt.Default.Validates("byte", s) {
+				return []string{"invalid base64"}
+			}
+			return nil
+		},
+		MaxRegexSize: len(govalidator.Base64),
+	},
+	"date": {
+		Name: "date",
+		ValidateFunc: func(s string) []string {
+			if !strfmt.Default.Validates("date", s) {
+				return []string{"invalid date"}
+			}
+			return nil
+		},
+		// Estimated regex size for RFC3339FullDate which is
+		// a date format. Assume a date-time pattern is longer
+		// so use that to conservatively estimate this
+		MaxRegexSize: len(strfmt.DateTimePattern),
+	},
+	"datetime": {
+		Name: "datetime",
+		ValidateFunc: func(s string) []string {
+			if !strfmt.Default.Validates("datetime", s) {
+				return []string{"invalid datetime"}
+			}
+			return nil
+		},
+		MaxRegexSize: len(strfmt.DateTimePattern),
+	},
+}
+
+var formatLibraryDecls = map[string][]cel.FunctionOpt{
+	"validate": {
+		cel.MemberOverload("format-validate", []*cel.Type{apiservercel.FormatType, cel.StringType}, cel.OptionalType(cel.ListType(cel.StringType)), cel.BinaryBinding(formatValidate)),
+	},
+	"format.named": {
+		cel.Overload("format-named", []*cel.Type{cel.StringType}, cel.OptionalType(apiservercel.FormatType), cel.UnaryBinding(func(name ref.Val) ref.Val {
+			nameString, ok := name.Value().(string)
+			if !ok {
+				return types.MaybeNoSuchOverloadErr(name)
+			}
+
+			f, ok := ConstantFormats[nameString]
+			if !ok {
+				return types.OptionalNone
+			}
+			return types.OptionalOf(f)
+		})),
+	},
+}
+
+func formatValidate(arg1, arg2 ref.Val) ref.Val {
+	f, ok := arg1.Value().(apiservercel.Format)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg1)
+	}
+
+	str, ok := arg2.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg2)
+	}
+
+	res := f.ValidateFunc(str)
+	if len(res) == 0 {
+		return types.OptionalNone
+	}
+	return types.OptionalOf(types.NewStringList(types.DefaultTypeAdapter, res))
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/ip.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/ip.go
new file mode 100644
index 0000000000..8edc4463a0
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/ip.go
@@ -0,0 +1,337 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package library
+
+import (
+	"fmt"
+	"net/netip"
+
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+
+	apiservercel "k8s.io/apiserver/pkg/cel"
+)
+
+// IP provides a CEL function library extension of IP address parsing functions.
+//
+// ip
+//
+// Converts a string to an IP address or results in an error if the string is not a valid IP address.
+// The IP address must be an IPv4 or IPv6 address.
+// IPv4-mapped IPv6 addresses (e.g. ::ffff:1.2.3.4) are not allowed.
+// IP addresses with zones (e.g. fe80::1%eth0) are not allowed.
+// Leading zeros in IPv4 address octets are not allowed.
+//
+//	ip() 
+//
+// Examples:
+//
+//	ip('127.0.0.1') // returns an IPv4 address
+//	ip('::1') // returns an IPv6 address
+//	ip('127.0.0.256') // error
+//	ip(':::1') // error
+//
+// isIP
+//
+// Returns true if a string is a valid IP address.
+// The IP address must be an IPv4 or IPv6 address.
+// IPv4-mapped IPv6 addresses (e.g. ::ffff:1.2.3.4) are not allowed.
+// IP addresses with zones (e.g. fe80::1%eth0) are not allowed.
+// Leading zeros in IPv4 address octets are not allowed.
+//
+//	isIP() 
+//
+// Examples:
+//
+//	isIP('127.0.0.1') // returns true
+//	isIP('::1') // returns true
+//	isIP('127.0.0.256') // returns false
+//	isIP(':::1') // returns false
+//
+// ip.isCanonical
+//
+// Returns true if the IP address is in its canonical form.
+// There is exactly one canonical form for every IP address, so fields containing
+// IPs in canonical form can just be treated as strings when checking for equality or uniqueness.
+//
+//	ip.isCanonical() 
+//
+// Examples:
+//
+//	ip.isCanonical('127.0.0.1') // returns true; all valid IPv4 addresses are canonical
+//	ip.isCanonical('2001:db8::abcd') // returns true
+//	ip.isCanonical('2001:DB8::ABCD') // returns false
+//	ip.isCanonical('2001:db8::0:0:0:abcd') // returns false
+//
+// family / isUnspecified / isLoopback / isLinkLocalMulticast / isLinkLocalUnicast / isGlobalUnicast
+//
+// - family: returns the IP addresses' family (IPv4 or IPv6) as an integer, either '4' or '6'.
+//
+// - isUnspecified: returns true if the IP address is the unspecified address.
+// Either the IPv4 address "0.0.0.0" or the IPv6 address "::".
+//
+// - isLoopback: returns true if the IP address is the loopback address.
+// Either an IPv4 address with a value of 127.x.x.x or an IPv6 address with a value of ::1.
+//
+// - isLinkLocalMulticast: returns true if the IP address is a link-local multicast address.
+// Either an IPv4 address with a value of 224.0.0.x or an IPv6 address in the network ff00::/8.
+//
+// - isLinkLocalUnicast: returns true if the IP address is a link-local unicast address.
+// Either an IPv4 address with a value of 169.254.x.x or an IPv6 address in the network fe80::/10.
+//
+// - isGlobalUnicast: returns true if the IP address is a global unicast address.
+// Either an IPv4 address that is not zero or 255.255.255.255 or an IPv6 address that is not a link-local unicast, loopback or multicast address.
+//
+// Examples:
+//
+// ip('127.0.0.1').family() // returns '4”
+// ip('::1').family() // returns '6'
+// ip('127.0.0.1').family() == 4 // returns true
+// ip('::1').family() == 6 // returns true
+// ip('0.0.0.0').isUnspecified() // returns true
+// ip('127.0.0.1').isUnspecified() // returns false
+// ip('::').isUnspecified() // returns true
+// ip('::1').isUnspecified() // returns false
+// ip('127.0.0.1').isLoopback() // returns true
+// ip('192.168.0.1').isLoopback() // returns false
+// ip('::1').isLoopback() // returns true
+// ip('2001:db8::abcd').isLoopback() // returns false
+// ip('224.0.0.1').isLinkLocalMulticast() // returns true
+// ip('224.0.1.1').isLinkLocalMulticast() // returns false
+// ip('ff02::1').isLinkLocalMulticast() // returns true
+// ip('fd00::1').isLinkLocalMulticast() // returns false
+// ip('169.254.169.254').isLinkLocalUnicast() // returns true
+// ip('192.168.0.1').isLinkLocalUnicast() // returns false
+// ip('fe80::1').isLinkLocalUnicast() // returns true
+// ip('fd80::1').isLinkLocalUnicast() // returns false
+// ip('192.168.0.1').isGlobalUnicast() // returns true
+// ip('255.255.255.255').isGlobalUnicast() // returns false
+// ip('2001:db8::abcd').isGlobalUnicast() // returns true
+// ip('ff00::1').isGlobalUnicast() // returns false
+func IP() cel.EnvOption {
+	return cel.Lib(ipLib)
+}
+
+var ipLib = &ip{}
+
+type ip struct{}
+
+func (*ip) LibraryName() string {
+	return "kubernetes.net.ip"
+}
+
+func (*ip) declarations() map[string][]cel.FunctionOpt {
+	return ipLibraryDecls
+}
+
+func (*ip) Types() []*cel.Type {
+	return []*cel.Type{apiservercel.IPType}
+}
+
+var ipLibraryDecls = map[string][]cel.FunctionOpt{
+	"ip": {
+		cel.Overload("string_to_ip", []*cel.Type{cel.StringType}, apiservercel.IPType,
+			cel.UnaryBinding(stringToIP)),
+	},
+	"family": {
+		cel.MemberOverload("ip_family", []*cel.Type{apiservercel.IPType}, cel.IntType,
+			cel.UnaryBinding(family)),
+	},
+	"ip.isCanonical": {
+		cel.Overload("ip_is_canonical", []*cel.Type{cel.StringType}, cel.BoolType,
+			cel.UnaryBinding(ipIsCanonical)),
+	},
+	"isUnspecified": {
+		cel.MemberOverload("ip_is_unspecified", []*cel.Type{apiservercel.IPType}, cel.BoolType,
+			cel.UnaryBinding(isUnspecified)),
+	},
+	"isLoopback": {
+		cel.MemberOverload("ip_is_loopback", []*cel.Type{apiservercel.IPType}, cel.BoolType,
+			cel.UnaryBinding(isLoopback)),
+	},
+	"isLinkLocalMulticast": {
+		cel.MemberOverload("ip_is_link_local_multicast", []*cel.Type{apiservercel.IPType}, cel.BoolType,
+			cel.UnaryBinding(isLinkLocalMulticast)),
+	},
+	"isLinkLocalUnicast": {
+		cel.MemberOverload("ip_is_link_local_unicast", []*cel.Type{apiservercel.IPType}, cel.BoolType,
+			cel.UnaryBinding(isLinkLocalUnicast)),
+	},
+	"isGlobalUnicast": {
+		cel.MemberOverload("ip_is_global_unicast", []*cel.Type{apiservercel.IPType}, cel.BoolType,
+			cel.UnaryBinding(isGlobalUnicast)),
+	},
+	"isIP": {
+		cel.Overload("is_ip", []*cel.Type{cel.StringType}, cel.BoolType,
+			cel.UnaryBinding(isIP)),
+	},
+	"string": {
+		cel.Overload("ip_to_string", []*cel.Type{apiservercel.IPType}, cel.StringType,
+			cel.UnaryBinding(ipToString)),
+	},
+}
+
+func (*ip) CompileOptions() []cel.EnvOption {
+	options := []cel.EnvOption{cel.Types(apiservercel.IPType),
+		cel.Variable(apiservercel.IPType.TypeName(), types.NewTypeTypeWithParam(apiservercel.IPType)),
+	}
+	for name, overloads := range ipLibraryDecls {
+		options = append(options, cel.Function(name, overloads...))
+	}
+	return options
+}
+
+func (*ip) ProgramOptions() []cel.ProgramOption {
+	return []cel.ProgramOption{}
+}
+
+func stringToIP(arg ref.Val) ref.Val {
+	s, ok := arg.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	addr, err := parseIPAddr(s)
+	if err != nil {
+		// Don't add context, we control the error message already.
+		return types.NewErr("%v", err)
+	}
+
+	return apiservercel.IP{
+		Addr: addr,
+	}
+}
+
+func ipToString(arg ref.Val) ref.Val {
+	ip, ok := arg.(apiservercel.IP)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	return types.String(ip.Addr.String())
+}
+
+func family(arg ref.Val) ref.Val {
+	ip, ok := arg.(apiservercel.IP)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	switch {
+	case ip.Addr.Is4():
+		return types.Int(4)
+	case ip.Addr.Is6():
+		return types.Int(6)
+	default:
+		return types.NewErr("IP address %q is not an IPv4 or IPv6 address", ip.Addr.String())
+	}
+}
+
+func ipIsCanonical(arg ref.Val) ref.Val {
+	s, ok := arg.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	addr, err := parseIPAddr(s)
+	if err != nil {
+		// Don't add context, we control the error message already.
+		return types.NewErr("%v", err)
+	}
+
+	// Addr.String() always returns the canonical form of the IP address.
+	// Therefore comparing this with the original string representation
+	// will tell us if the IP address is in its canonical form.
+	return types.Bool(addr.String() == s)
+}
+
+func isIP(arg ref.Val) ref.Val {
+	s, ok := arg.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	_, err := parseIPAddr(s)
+	return types.Bool(err == nil)
+}
+
+func isUnspecified(arg ref.Val) ref.Val {
+	ip, ok := arg.(apiservercel.IP)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	return types.Bool(ip.Addr.IsUnspecified())
+}
+
+func isLoopback(arg ref.Val) ref.Val {
+	ip, ok := arg.(apiservercel.IP)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	return types.Bool(ip.Addr.IsLoopback())
+}
+
+func isLinkLocalMulticast(arg ref.Val) ref.Val {
+	ip, ok := arg.(apiservercel.IP)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	return types.Bool(ip.Addr.IsLinkLocalMulticast())
+}
+
+func isLinkLocalUnicast(arg ref.Val) ref.Val {
+	ip, ok := arg.(apiservercel.IP)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	return types.Bool(ip.Addr.IsLinkLocalUnicast())
+}
+
+func isGlobalUnicast(arg ref.Val) ref.Val {
+	ip, ok := arg.(apiservercel.IP)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	return types.Bool(ip.Addr.IsGlobalUnicast())
+}
+
+// parseIPAddr parses a string into an IP address.
+// We use this function to parse IP addresses in the CEL library
+// so that we can share the common logic of rejecting IP addresses
+// that contain zones or are IPv4-mapped IPv6 addresses.
+func parseIPAddr(raw string) (netip.Addr, error) {
+	addr, err := netip.ParseAddr(raw)
+	if err != nil {
+		return netip.Addr{}, fmt.Errorf("IP Address %q parse error during conversion from string: %v", raw, err)
+	}
+
+	if addr.Zone() != "" {
+		return netip.Addr{}, fmt.Errorf("IP address %q with zone value is not allowed", raw)
+	}
+
+	if addr.Is4In6() {
+		return netip.Addr{}, fmt.Errorf("IPv4-mapped IPv6 address %q is not allowed", raw)
+	}
+
+	return addr, nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/jsonpatch.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/jsonpatch.go
new file mode 100644
index 0000000000..bdcb6d852b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/jsonpatch.go
@@ -0,0 +1,89 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package library
+
+import (
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+	"strings"
+)
+
+// JSONPatch provides a CEL function library extension of JSONPatch functions.
+//
+// jsonpatch.escapeKey
+//
+// Escapes a string for use as a JSONPatch path key.
+//
+//	jsonpatch.escapeKey() 
+//
+// Examples:
+//
+//	"/metadata/labels/" + jsonpatch.escapeKey('k8s.io/my~label') // returns "/metadata/labels/k8s.io~1my~0label"
+func JSONPatch() cel.EnvOption {
+	return cel.Lib(jsonPatchLib)
+}
+
+var jsonPatchLib = &jsonPatch{}
+
+type jsonPatch struct{}
+
+func (*jsonPatch) LibraryName() string {
+	return "kubernetes.jsonpatch"
+}
+
+func (*jsonPatch) declarations() map[string][]cel.FunctionOpt {
+	return jsonPatchLibraryDecls
+}
+
+func (*jsonPatch) Types() []*cel.Type {
+	return []*cel.Type{}
+}
+
+var jsonPatchLibraryDecls = map[string][]cel.FunctionOpt{
+	"jsonpatch.escapeKey": {
+		cel.Overload("string_jsonpatch_escapeKey_string", []*cel.Type{cel.StringType}, cel.StringType,
+			cel.UnaryBinding(escape)),
+	},
+}
+
+func (*jsonPatch) CompileOptions() []cel.EnvOption {
+	var options []cel.EnvOption
+	for name, overloads := range jsonPatchLibraryDecls {
+		options = append(options, cel.Function(name, overloads...))
+	}
+	return options
+}
+
+func (*jsonPatch) ProgramOptions() []cel.ProgramOption {
+	return []cel.ProgramOption{}
+}
+
+var jsonPatchReplacer = strings.NewReplacer("/", "~1", "~", "~0")
+
+func escapeKey(k string) string {
+	return jsonPatchReplacer.Replace(k)
+}
+
+func escape(arg ref.Val) ref.Val {
+	s, ok := arg.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+	escaped := escapeKey(s)
+	return types.String(escaped)
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/libraries.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/libraries.go
new file mode 100644
index 0000000000..dc436973e5
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/libraries.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package library
+
+import (
+	"github.com/google/cel-go/cel"
+)
+
+// Library represents a CEL library used by kubernetes.
+type Library interface {
+	// SingletonLibrary provides the library name and ensures the library can be safely registered into environments.
+	cel.SingletonLibrary
+
+	// Types provides all custom types introduced by the library.
+	Types() []*cel.Type
+
+	// declarations returns all function declarations provided by the library.
+	declarations() map[string][]cel.FunctionOpt
+}
+
+// KnownLibraries returns all libraries used in Kubernetes.
+func KnownLibraries() []Library {
+	return []Library{
+		authzLib,
+		authzSelectorsLib,
+		listsLib,
+		regexLib,
+		urlsLib,
+		quantityLib,
+		ipLib,
+		cidrsLib,
+		formatLib,
+		semverLib,
+		jsonPatchLib,
+	}
+}
+
+func isRegisteredType(typeName string) bool {
+	for _, lib := range KnownLibraries() {
+		for _, rt := range lib.Types() {
+			if rt.TypeName() == typeName {
+				return true
+			}
+		}
+	}
+	return false
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/lists.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/lists.go
new file mode 100644
index 0000000000..1f61b1181e
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/lists.go
@@ -0,0 +1,324 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package library
+
+import (
+	"fmt"
+
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+	"github.com/google/cel-go/common/types/traits"
+	"github.com/google/cel-go/interpreter/functions"
+)
+
+// Lists provides a CEL function library extension of list utility functions.
+//
+// isSorted
+//
+// Returns true if the provided list of comparable elements is sorted, else returns false.
+//
+//	>.isSorted() , T must be a comparable type
+//
+// Examples:
+//
+//	[1, 2, 3].isSorted()  // return true
+//	['a', 'b', 'b', 'c'].isSorted()  // return true
+//	[2.0, 1.0].isSorted()  // return false
+//	[1].isSorted()  // return true
+//	[].isSorted()  // return true
+//
+// sum
+//
+// Returns the sum of the elements of the provided list. Supports CEL number (int, uint, double) and duration types.
+//
+//	>.sum() , T must be a numeric type or a duration
+//
+// Examples:
+//
+//	[1, 3].sum() // returns 4
+//	[1.0, 3.0].sum() // returns 4.0
+//	['1m', '1s'].sum() // returns '1m1s'
+//	emptyIntList.sum() // returns 0
+//	emptyDoubleList.sum() // returns 0.0
+//	[].sum() // returns 0
+//
+// min / max
+//
+// Returns the minimum/maximum valued element of the provided list. Supports all comparable types.
+// If the list is empty, an error is returned.
+//
+//	>.min() , T must be a comparable type
+//	>.max() , T must be a comparable type
+//
+// Examples:
+//
+//	[1, 3].min() // returns 1
+//	[1, 3].max() // returns 3
+//	[].min() // error
+//	[1].min() // returns 1
+//	([0] + emptyList).min() // returns 0
+//
+// indexOf / lastIndexOf
+//
+// Returns either the first or last positional index of the provided element in the list.
+// If the element is not found, -1 is returned. Supports all equatable types.
+//
+//	>.indexOf() , T must be an equatable type
+//	>.lastIndexOf() , T must be an equatable type
+//
+// Examples:
+//
+//	[1, 2, 2, 3].indexOf(2) // returns 1
+//	['a', 'b', 'b', 'c'].lastIndexOf('b') // returns 2
+//	[1.0].indexOf(1.1) // returns -1
+//	[].indexOf('string') // returns -1
+func Lists() cel.EnvOption {
+	return cel.Lib(listsLib)
+}
+
+var listsLib = &lists{}
+
+type lists struct{}
+
+func (*lists) LibraryName() string {
+	return "kubernetes.lists"
+}
+
+func (*lists) Types() []*cel.Type {
+	return []*cel.Type{}
+}
+
+func (*lists) declarations() map[string][]cel.FunctionOpt {
+	return listsLibraryDecls
+}
+
+var paramA = cel.TypeParamType("A")
+
+// CEL typeParams can be used to constraint to a specific trait (e.g. traits.ComparableType) if the 1st operand is the type to constrain.
+// But the functions we need to constrain are >, not just .
+// Make sure the order of overload set is deterministic
+type namedCELType struct {
+	typeName string
+	celType  *cel.Type
+}
+
+var summableTypes = []namedCELType{
+	{typeName: "int", celType: cel.IntType},
+	{typeName: "uint", celType: cel.UintType},
+	{typeName: "double", celType: cel.DoubleType},
+	{typeName: "duration", celType: cel.DurationType},
+}
+
+var zeroValuesOfSummableTypes = map[string]ref.Val{
+	"int":      types.Int(0),
+	"uint":     types.Uint(0),
+	"double":   types.Double(0.0),
+	"duration": types.Duration{Duration: 0},
+}
+var comparableTypes = []namedCELType{
+	{typeName: "int", celType: cel.IntType},
+	{typeName: "uint", celType: cel.UintType},
+	{typeName: "double", celType: cel.DoubleType},
+	{typeName: "bool", celType: cel.BoolType},
+	{typeName: "duration", celType: cel.DurationType},
+	{typeName: "timestamp", celType: cel.TimestampType},
+	{typeName: "string", celType: cel.StringType},
+	{typeName: "bytes", celType: cel.BytesType},
+}
+
+// WARNING: All library additions or modifications must follow
+// https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/2876-crd-validation-expression-language#function-library-updates
+var listsLibraryDecls = map[string][]cel.FunctionOpt{
+	"isSorted": templatedOverloads(comparableTypes, func(name string, paramType *cel.Type) cel.FunctionOpt {
+		return cel.MemberOverload(fmt.Sprintf("list_%s_is_sorted_bool", name),
+			[]*cel.Type{cel.ListType(paramType)}, cel.BoolType, cel.UnaryBinding(isSorted))
+	}),
+	"sum": templatedOverloads(summableTypes, func(name string, paramType *cel.Type) cel.FunctionOpt {
+		return cel.MemberOverload(fmt.Sprintf("list_%s_sum_%s", name, name),
+			[]*cel.Type{cel.ListType(paramType)}, paramType, cel.UnaryBinding(func(list ref.Val) ref.Val {
+				return sum(
+					func() ref.Val {
+						return zeroValuesOfSummableTypes[name]
+					})(list)
+			}))
+	}),
+	"max": templatedOverloads(comparableTypes, func(name string, paramType *cel.Type) cel.FunctionOpt {
+		return cel.MemberOverload(fmt.Sprintf("list_%s_max_%s", name, name),
+			[]*cel.Type{cel.ListType(paramType)}, paramType, cel.UnaryBinding(max()))
+	}),
+	"min": templatedOverloads(comparableTypes, func(name string, paramType *cel.Type) cel.FunctionOpt {
+		return cel.MemberOverload(fmt.Sprintf("list_%s_min_%s", name, name),
+			[]*cel.Type{cel.ListType(paramType)}, paramType, cel.UnaryBinding(min()))
+	}),
+	"indexOf": {
+		cel.MemberOverload("list_a_index_of_int", []*cel.Type{cel.ListType(paramA), paramA}, cel.IntType,
+			cel.BinaryBinding(indexOf)),
+	},
+	"lastIndexOf": {
+		cel.MemberOverload("list_a_last_index_of_int", []*cel.Type{cel.ListType(paramA), paramA}, cel.IntType,
+			cel.BinaryBinding(lastIndexOf)),
+	},
+}
+
+func (*lists) CompileOptions() []cel.EnvOption {
+	options := []cel.EnvOption{}
+	for name, overloads := range listsLibraryDecls {
+		options = append(options, cel.Function(name, overloads...))
+	}
+	return options
+}
+
+func (*lists) ProgramOptions() []cel.ProgramOption {
+	return []cel.ProgramOption{}
+}
+
+func isSorted(val ref.Val) ref.Val {
+	var prev traits.Comparer
+	iterable, ok := val.(traits.Iterable)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(val)
+	}
+	for it := iterable.Iterator(); it.HasNext() == types.True; {
+		next := it.Next()
+		nextCmp, ok := next.(traits.Comparer)
+		if !ok {
+			return types.MaybeNoSuchOverloadErr(next)
+		}
+		if prev != nil {
+			cmp := prev.Compare(next)
+			if cmp == types.IntOne {
+				return types.False
+			}
+		}
+		prev = nextCmp
+	}
+	return types.True
+}
+
+func sum(init func() ref.Val) functions.UnaryOp {
+	return func(val ref.Val) ref.Val {
+		i := init()
+		acc, ok := i.(traits.Adder)
+		if !ok {
+			// Should never happen since all passed in init values are valid
+			return types.MaybeNoSuchOverloadErr(i)
+		}
+		iterable, ok := val.(traits.Iterable)
+		if !ok {
+			return types.MaybeNoSuchOverloadErr(val)
+		}
+		for it := iterable.Iterator(); it.HasNext() == types.True; {
+			next := it.Next()
+			nextAdder, ok := next.(traits.Adder)
+			if !ok {
+				// Should never happen for type checked CEL programs
+				return types.MaybeNoSuchOverloadErr(next)
+			}
+			if acc != nil {
+				s := acc.Add(next)
+				sum, ok := s.(traits.Adder)
+				if !ok {
+					// Should never happen for type checked CEL programs
+					return types.MaybeNoSuchOverloadErr(s)
+				}
+				acc = sum
+			} else {
+				acc = nextAdder
+			}
+		}
+		return acc.(ref.Val)
+	}
+}
+
+func min() functions.UnaryOp {
+	return cmp("min", types.IntOne)
+}
+
+func max() functions.UnaryOp {
+	return cmp("max", types.IntNegOne)
+}
+
+func cmp(opName string, opPreferCmpResult ref.Val) functions.UnaryOp {
+	return func(val ref.Val) ref.Val {
+		var result traits.Comparer
+		iterable, ok := val.(traits.Iterable)
+		if !ok {
+			return types.MaybeNoSuchOverloadErr(val)
+		}
+		for it := iterable.Iterator(); it.HasNext() == types.True; {
+			next := it.Next()
+			nextCmp, ok := next.(traits.Comparer)
+			if !ok {
+				// Should never happen for type checked CEL programs
+				return types.MaybeNoSuchOverloadErr(next)
+			}
+			if result == nil {
+				result = nextCmp
+			} else {
+				cmp := result.Compare(next)
+				if cmp == opPreferCmpResult {
+					result = nextCmp
+				}
+			}
+		}
+		if result == nil {
+			return types.NewErr("%s called on empty list", opName)
+		}
+		return result.(ref.Val)
+	}
+}
+
+func indexOf(list ref.Val, item ref.Val) ref.Val {
+	lister, ok := list.(traits.Lister)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(list)
+	}
+	sz := lister.Size().(types.Int)
+	for i := types.Int(0); i < sz; i++ {
+		if lister.Get(types.Int(i)).Equal(item) == types.True {
+			return types.Int(i)
+		}
+	}
+	return types.Int(-1)
+}
+
+func lastIndexOf(list ref.Val, item ref.Val) ref.Val {
+	lister, ok := list.(traits.Lister)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(list)
+	}
+	sz := lister.Size().(types.Int)
+	for i := sz - 1; i >= 0; i-- {
+		if lister.Get(types.Int(i)).Equal(item) == types.True {
+			return types.Int(i)
+		}
+	}
+	return types.Int(-1)
+}
+
+// templatedOverloads returns overloads for each of the provided types. The template function is called with each type
+// name (map key) and type to construct the overloads.
+func templatedOverloads(types []namedCELType, template func(name string, t *cel.Type) cel.FunctionOpt) []cel.FunctionOpt {
+	overloads := make([]cel.FunctionOpt, len(types))
+	i := 0
+	for _, t := range types {
+		overloads[i] = template(t.typeName, t.celType)
+		i++
+	}
+	return overloads
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go
new file mode 100644
index 0000000000..236b366b46
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go
@@ -0,0 +1,388 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package library
+
+import (
+	"errors"
+
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+
+	"k8s.io/apimachinery/pkg/api/resource"
+	apiservercel "k8s.io/apiserver/pkg/cel"
+)
+
+// Quantity provides a CEL function library extension of Kubernetes
+// resource.Quantity parsing functions. See `resource.Quantity`
+// documentation for more detailed information about the format itself:
+// https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity
+//
+// quantity
+//
+// Converts a string to a Quantity or results in an error if the string is not a valid Quantity. Refer
+// to resource.Quantity documentation for information on accepted patterns.
+//
+//	quantity() 
+//
+// Examples:
+//
+//	quantity('1.5G') // returns a Quantity
+//	quantity('200k') // returns a Quantity
+//	quantity('200K') // error
+//	quantity('Three') // error
+//	quantity('Mi') // error
+//
+// isQuantity
+//
+// Returns true if a string is a valid Quantity. isQuantity returns true if and
+// only if quantity does not result in error.
+//
+//	isQuantity( ) 
+//
+// Examples:
+//
+//	isQuantity('1.3G') // returns true
+//	isQuantity('1.3Gi') // returns true
+//	isQuantity('1,3G') // returns false
+//	isQuantity('10000k') // returns true
+//	isQuantity('200K') // returns false
+//	isQuantity('Three') // returns false
+//	isQuantity('Mi') // returns false
+//
+// Conversion to Scalars:
+//
+//   - isInteger: returns true if and only if asInteger is safe to call without an error
+//
+//   - asInteger: returns a representation of the current value as an int64 if
+//     possible or results in an error if conversion would result in overflow
+//	   or loss of precision.
+//
+//   - asApproximateFloat: returns a float64 representation of the quantity which may
+//     lose precision. If the value of the quantity is outside the range of a float64
+//     +Inf/-Inf will be returned.
+//
+//     .isInteger() 
+//     .asInteger() 
+//     .asApproximateFloat() 
+//
+// Examples:
+//
+// quantity("50000000G").isInteger() // returns true
+// quantity("50k").isInteger() // returns true
+// quantity("9999999999999999999999999999999999999G").asInteger() // error: cannot convert value to integer
+// quantity("9999999999999999999999999999999999999G").isInteger() // returns false
+// quantity("50k").asInteger() == 50000 // returns true
+// quantity("50k").sub(20000).asApproximateFloat() == 30000 // returns true
+//
+// Arithmetic
+//
+//   - sign: Returns `1` if the quantity is positive, `-1` if it is negative. `0` if it is zero
+//
+//   - add: Returns sum of two quantities or a quantity and an integer
+//
+//   - sub: Returns difference between two quantities or a quantity and an integer
+//
+//     .sign() 
+//     .add() 
+//     .add() 
+//     .sub() 
+//     .sub() 
+//
+// Examples:
+//
+// quantity("50k").add("20k") == quantity("70k") // returns true
+// quantity("50k").add(20) == quantity("50020") // returns true
+// quantity("50k").sub("20k") == quantity("30k") // returns true
+// quantity("50k").sub(20000) == quantity("30k") // returns true
+// quantity("50k").add(20).sub(quantity("100k")).sub(-50000) == quantity("20") // returns true
+//
+// Comparisons
+//
+//   - isGreaterThan: Returns true if and only if the receiver is greater than the operand
+//
+//   - isLessThan: Returns true if and only if the receiver is less than the operand
+//
+//   - compareTo: Compares receiver to operand and returns 0 if they are equal, 1 if the receiver is greater, or -1 if the receiver is less than the operand
+//
+//
+//     .isLessThan() 
+//     .isGreaterThan() 
+//     .compareTo() 
+//
+// Examples:
+//
+// quantity("200M").compareTo(quantity("0.2G")) // returns 0
+// quantity("50M").compareTo(quantity("50Mi")) // returns -1
+// quantity("50Mi").compareTo(quantity("50M")) // returns 1
+// quantity("150Mi").isGreaterThan(quantity("100Mi")) // returns true
+// quantity("50Mi").isGreaterThan(quantity("100Mi")) // returns false
+// quantity("50M").isLessThan(quantity("100M")) // returns true
+// quantity("100M").isLessThan(quantity("50M")) // returns false
+
+func Quantity() cel.EnvOption {
+	return cel.Lib(quantityLib)
+}
+
+var quantityLib = &quantity{}
+
+type quantity struct{}
+
+func (*quantity) LibraryName() string {
+	return "kubernetes.quantity"
+}
+
+func (*quantity) Types() []*cel.Type {
+	return []*cel.Type{apiservercel.QuantityType}
+}
+
+func (*quantity) declarations() map[string][]cel.FunctionOpt {
+	return quantityLibraryDecls
+}
+
+var quantityLibraryDecls = map[string][]cel.FunctionOpt{
+	"quantity": {
+		cel.Overload("string_to_quantity", []*cel.Type{cel.StringType}, apiservercel.QuantityType, cel.UnaryBinding((stringToQuantity))),
+	},
+	"isQuantity": {
+		cel.Overload("is_quantity_string", []*cel.Type{cel.StringType}, cel.BoolType, cel.UnaryBinding(isQuantity)),
+	},
+	"sign": {
+		cel.Overload("quantity_sign", []*cel.Type{apiservercel.QuantityType}, cel.IntType, cel.UnaryBinding(quantityGetSign)),
+	},
+	"isGreaterThan": {
+		cel.MemberOverload("quantity_is_greater_than", []*cel.Type{apiservercel.QuantityType, apiservercel.QuantityType}, cel.BoolType, cel.BinaryBinding(quantityIsGreaterThan)),
+	},
+	"isLessThan": {
+		cel.MemberOverload("quantity_is_less_than", []*cel.Type{apiservercel.QuantityType, apiservercel.QuantityType}, cel.BoolType, cel.BinaryBinding(quantityIsLessThan)),
+	},
+	"compareTo": {
+		cel.MemberOverload("quantity_compare_to", []*cel.Type{apiservercel.QuantityType, apiservercel.QuantityType}, cel.IntType, cel.BinaryBinding(quantityCompareTo)),
+	},
+	"asApproximateFloat": {
+		cel.MemberOverload("quantity_get_float", []*cel.Type{apiservercel.QuantityType}, cel.DoubleType, cel.UnaryBinding(quantityGetApproximateFloat)),
+	},
+	"asInteger": {
+		cel.MemberOverload("quantity_get_int", []*cel.Type{apiservercel.QuantityType}, cel.IntType, cel.UnaryBinding(quantityGetValue)),
+	},
+	"isInteger": {
+		cel.MemberOverload("quantity_is_integer", []*cel.Type{apiservercel.QuantityType}, cel.BoolType, cel.UnaryBinding(quantityCanValue)),
+	},
+	"add": {
+		cel.MemberOverload("quantity_add", []*cel.Type{apiservercel.QuantityType, apiservercel.QuantityType}, apiservercel.QuantityType, cel.BinaryBinding(quantityAdd)),
+		cel.MemberOverload("quantity_add_int", []*cel.Type{apiservercel.QuantityType, cel.IntType}, apiservercel.QuantityType, cel.BinaryBinding(quantityAddInt)),
+	},
+	"sub": {
+		cel.MemberOverload("quantity_sub", []*cel.Type{apiservercel.QuantityType, apiservercel.QuantityType}, apiservercel.QuantityType, cel.BinaryBinding(quantitySub)),
+		cel.MemberOverload("quantity_sub_int", []*cel.Type{apiservercel.QuantityType, cel.IntType}, apiservercel.QuantityType, cel.BinaryBinding(quantitySubInt)),
+	},
+}
+
+func (*quantity) CompileOptions() []cel.EnvOption {
+	options := make([]cel.EnvOption, 0, len(quantityLibraryDecls))
+	for name, overloads := range quantityLibraryDecls {
+		options = append(options, cel.Function(name, overloads...))
+	}
+	return options
+}
+
+func (*quantity) ProgramOptions() []cel.ProgramOption {
+	return []cel.ProgramOption{}
+}
+
+func isQuantity(arg ref.Val) ref.Val {
+	str, ok := arg.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	_, err := resource.ParseQuantity(str)
+	if err != nil {
+		return types.Bool(false)
+	}
+
+	return types.Bool(true)
+}
+
+func stringToQuantity(arg ref.Val) ref.Val {
+	str, ok := arg.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	q, err := resource.ParseQuantity(str)
+	if err != nil {
+		return types.WrapErr(err)
+	}
+
+	return apiservercel.Quantity{Quantity: &q}
+}
+
+func quantityGetApproximateFloat(arg ref.Val) ref.Val {
+	q, ok := arg.Value().(*resource.Quantity)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+	return types.Double(q.AsApproximateFloat64())
+}
+
+func quantityCanValue(arg ref.Val) ref.Val {
+	q, ok := arg.Value().(*resource.Quantity)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+	_, success := q.AsInt64()
+	return types.Bool(success)
+}
+
+func quantityGetValue(arg ref.Val) ref.Val {
+	q, ok := arg.Value().(*resource.Quantity)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+	v, success := q.AsInt64()
+	if !success {
+		return types.WrapErr(errors.New("cannot convert value to integer"))
+	}
+	return types.Int(v)
+}
+
+func quantityGetSign(arg ref.Val) ref.Val {
+	q, ok := arg.Value().(*resource.Quantity)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+	return types.Int(q.Sign())
+}
+
+func quantityIsGreaterThan(arg ref.Val, other ref.Val) ref.Val {
+	q, ok := arg.Value().(*resource.Quantity)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	q2, ok := other.Value().(*resource.Quantity)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	return types.Bool(q.Cmp(*q2) == 1)
+}
+
+func quantityIsLessThan(arg ref.Val, other ref.Val) ref.Val {
+	q, ok := arg.Value().(*resource.Quantity)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	q2, ok := other.Value().(*resource.Quantity)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	return types.Bool(q.Cmp(*q2) == -1)
+}
+
+func quantityCompareTo(arg ref.Val, other ref.Val) ref.Val {
+	q, ok := arg.Value().(*resource.Quantity)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	q2, ok := other.Value().(*resource.Quantity)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	return types.Int(q.Cmp(*q2))
+}
+
+func quantityAdd(arg ref.Val, other ref.Val) ref.Val {
+	q, ok := arg.Value().(*resource.Quantity)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	q2, ok := other.Value().(*resource.Quantity)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	copy := *q
+	copy.Add(*q2)
+	return &apiservercel.Quantity{
+		Quantity: ©,
+	}
+}
+
+func quantityAddInt(arg ref.Val, other ref.Val) ref.Val {
+	q, ok := arg.Value().(*resource.Quantity)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	q2, ok := other.Value().(int64)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	q2Converted := *resource.NewQuantity(q2, resource.DecimalExponent)
+
+	copy := *q
+	copy.Add(q2Converted)
+	return &apiservercel.Quantity{
+		Quantity: ©,
+	}
+}
+
+func quantitySub(arg ref.Val, other ref.Val) ref.Val {
+	q, ok := arg.Value().(*resource.Quantity)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	q2, ok := other.Value().(*resource.Quantity)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	copy := *q
+	copy.Sub(*q2)
+	return &apiservercel.Quantity{
+		Quantity: ©,
+	}
+}
+
+func quantitySubInt(arg ref.Val, other ref.Val) ref.Val {
+	q, ok := arg.Value().(*resource.Quantity)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	q2, ok := other.Value().(int64)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	q2Converted := *resource.NewQuantity(q2, resource.DecimalExponent)
+
+	copy := *q
+	copy.Sub(q2Converted)
+	return &apiservercel.Quantity{
+		Quantity: ©,
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/regex.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/regex.go
new file mode 100644
index 0000000000..2cf8b00375
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/regex.go
@@ -0,0 +1,201 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package library
+
+import (
+	"regexp"
+
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+	"github.com/google/cel-go/interpreter"
+)
+
+// Regex provides a CEL function library extension of regex utility functions.
+//
+// find / findAll
+//
+// Returns substrings that match the provided regular expression. find returns the first match. findAll may optionally
+// be provided a limit. If the limit is set and >= 0, no more than the limit number of matches are returned.
+//
+//	.find() 
+//	.findAll() >
+//	.findAll(, ) >
+//
+// Examples:
+//
+//	"abc 123".find('[0-9]*') // returns '123'
+//	"abc 123".find('xyz') // returns ''
+//	"123 abc 456".findAll('[0-9]*') // returns ['123', '456']
+//	"123 abc 456".findAll('[0-9]*', 1) // returns ['123']
+//	"123 abc 456".findAll('xyz') // returns []
+func Regex() cel.EnvOption {
+	return cel.Lib(regexLib)
+}
+
+var regexLib = ®ex{}
+
+type regex struct{}
+
+func (*regex) LibraryName() string {
+	return "kubernetes.regex"
+}
+
+func (*regex) Types() []*cel.Type {
+	return []*cel.Type{}
+}
+
+func (*regex) declarations() map[string][]cel.FunctionOpt {
+	return regexLibraryDecls
+}
+
+var regexLibraryDecls = map[string][]cel.FunctionOpt{
+	"find": {
+		cel.MemberOverload("string_find_string", []*cel.Type{cel.StringType, cel.StringType}, cel.StringType,
+			cel.BinaryBinding(find))},
+	"findAll": {
+		cel.MemberOverload("string_find_all_string", []*cel.Type{cel.StringType, cel.StringType},
+			cel.ListType(cel.StringType),
+			cel.BinaryBinding(func(str, regex ref.Val) ref.Val {
+				return findAll(str, regex, types.Int(-1))
+			})),
+		cel.MemberOverload("string_find_all_string_int",
+			[]*cel.Type{cel.StringType, cel.StringType, cel.IntType},
+			cel.ListType(cel.StringType),
+			cel.FunctionBinding(findAll)),
+	},
+}
+
+func (*regex) CompileOptions() []cel.EnvOption {
+	options := []cel.EnvOption{}
+	for name, overloads := range regexLibraryDecls {
+		options = append(options, cel.Function(name, overloads...))
+	}
+	return options
+}
+
+func (*regex) ProgramOptions() []cel.ProgramOption {
+	return []cel.ProgramOption{
+		cel.OptimizeRegex(FindRegexOptimization, FindAllRegexOptimization),
+	}
+}
+
+func find(strVal ref.Val, regexVal ref.Val) ref.Val {
+	str, ok := strVal.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(strVal)
+	}
+	regex, ok := regexVal.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(regexVal)
+	}
+	re, err := regexp.Compile(regex)
+	if err != nil {
+		return types.NewErr("Illegal regex: %v", err.Error())
+	}
+	result := re.FindString(str)
+	return types.String(result)
+}
+
+func findAll(args ...ref.Val) ref.Val {
+	argn := len(args)
+	if argn < 2 || argn > 3 {
+		return types.NoSuchOverloadErr()
+	}
+	str, ok := args[0].Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(args[0])
+	}
+	regex, ok := args[1].Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(args[1])
+	}
+	n := int64(-1)
+	if argn == 3 {
+		n, ok = args[2].Value().(int64)
+		if !ok {
+			return types.MaybeNoSuchOverloadErr(args[2])
+		}
+	}
+
+	re, err := regexp.Compile(regex)
+	if err != nil {
+		return types.NewErr("Illegal regex: %v", err.Error())
+	}
+
+	result := re.FindAllString(str, int(n))
+
+	return types.NewStringList(types.DefaultTypeAdapter, result)
+}
+
+// FindRegexOptimization optimizes the 'find' function by compiling the regex pattern and
+// reporting any compilation errors at program creation time, and using the compiled regex pattern for all function
+// call invocations.
+var FindRegexOptimization = &interpreter.RegexOptimization{
+	Function:   "find",
+	RegexIndex: 1,
+	Factory: func(call interpreter.InterpretableCall, regexPattern string) (interpreter.InterpretableCall, error) {
+		compiledRegex, err := regexp.Compile(regexPattern)
+		if err != nil {
+			return nil, err
+		}
+		return interpreter.NewCall(call.ID(), call.Function(), call.OverloadID(), call.Args(), func(args ...ref.Val) ref.Val {
+			if len(args) != 2 {
+				return types.NoSuchOverloadErr()
+			}
+			in, ok := args[0].Value().(string)
+			if !ok {
+				return types.MaybeNoSuchOverloadErr(args[0])
+			}
+			return types.String(compiledRegex.FindString(in))
+		}), nil
+	},
+}
+
+// FindAllRegexOptimization optimizes the 'findAll' function by compiling the regex pattern and
+// reporting any compilation errors at program creation time, and using the compiled regex pattern for all function
+// call invocations.
+var FindAllRegexOptimization = &interpreter.RegexOptimization{
+	Function:   "findAll",
+	RegexIndex: 1,
+	Factory: func(call interpreter.InterpretableCall, regexPattern string) (interpreter.InterpretableCall, error) {
+		compiledRegex, err := regexp.Compile(regexPattern)
+		if err != nil {
+			return nil, err
+		}
+		return interpreter.NewCall(call.ID(), call.Function(), call.OverloadID(), call.Args(), func(args ...ref.Val) ref.Val {
+			argn := len(args)
+			if argn < 2 || argn > 3 {
+				return types.NoSuchOverloadErr()
+			}
+			str, ok := args[0].Value().(string)
+			if !ok {
+				return types.MaybeNoSuchOverloadErr(args[0])
+			}
+			n := int64(-1)
+			if argn == 3 {
+				n, ok = args[2].Value().(int64)
+				if !ok {
+					return types.MaybeNoSuchOverloadErr(args[2])
+				}
+			}
+
+			result := compiledRegex.FindAllString(str, int(n))
+			return types.NewStringList(types.DefaultTypeAdapter, result)
+		}), nil
+	},
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/semverlib.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/semverlib.go
new file mode 100644
index 0000000000..d8c79ae021
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/semverlib.go
@@ -0,0 +1,247 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package library
+
+import (
+	"github.com/blang/semver/v4"
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+
+	apiservercel "k8s.io/apiserver/pkg/cel"
+)
+
+// Semver provides a CEL function library extension for [semver.Version].
+//
+// semver
+//
+// Converts a string to a semantic version or results in an error if the string is not a valid semantic version. Refer
+// to semver.org documentation for information on accepted patterns.
+//
+//	semver() 
+//
+// Examples:
+//
+//	semver('1.0.0') // returns a Semver
+//	semver('0.1.0-alpha.1') // returns a Semver
+//	semver('200K') // error
+//	semver('Three') // error
+//	semver('Mi') // error
+//
+// isSemver
+//
+// Returns true if a string is a valid Semver. isSemver returns true if and
+// only if semver does not result in error.
+//
+//	isSemver( ) 
+//
+// Examples:
+//
+//	isSemver('1.0.0') // returns true
+//	isSemver('v1.0') // returns true (tolerant parsing)
+//	isSemver('hello') // returns false
+//
+// Conversion to Scalars:
+//
+//   - major/minor/patch: return the major version number as int64.
+//
+//     .major() 
+//
+// Examples:
+//
+// semver("1.2.3").major() // returns 1
+//
+// Comparisons
+//
+//   - isGreaterThan: Returns true if and only if the receiver is greater than the operand
+//
+//   - isLessThan: Returns true if and only if the receiver is less than the operand
+//
+//   - compareTo: Compares receiver to operand and returns 0 if they are equal, 1 if the receiver is greater, or -1 if the receiver is less than the operand
+//
+//
+//     .isLessThan() 
+//     .isGreaterThan() 
+//     .compareTo() 
+//
+// Examples:
+//
+// semver("1.2.3").compareTo(semver("1.2.3")) // returns 0
+// semver("1.2.3").compareTo(semver("2.0.0")) // returns -1
+// semver("1.2.3").compareTo(semver("0.1.2")) // returns 1
+
+func SemverLib() cel.EnvOption {
+	return cel.Lib(semverLib)
+}
+
+var semverLib = &semverLibType{}
+
+type semverLibType struct{}
+
+func (*semverLibType) LibraryName() string {
+	return "kubernetes.Semver"
+}
+
+func (*semverLibType) Types() []*cel.Type {
+	return []*cel.Type{apiservercel.SemverType}
+}
+
+func (*semverLibType) declarations() map[string][]cel.FunctionOpt {
+	return map[string][]cel.FunctionOpt{
+		"semver": {
+			cel.Overload("string_to_semver", []*cel.Type{cel.StringType}, apiservercel.SemverType, cel.UnaryBinding((stringToSemver))),
+		},
+		"isSemver": {
+			cel.Overload("is_semver_string", []*cel.Type{cel.StringType}, cel.BoolType, cel.UnaryBinding(isSemver)),
+		},
+		"isGreaterThan": {
+			cel.MemberOverload("semver_is_greater_than", []*cel.Type{apiservercel.SemverType, apiservercel.SemverType}, cel.BoolType, cel.BinaryBinding(semverIsGreaterThan)),
+		},
+		"isLessThan": {
+			cel.MemberOverload("semver_is_less_than", []*cel.Type{apiservercel.SemverType, apiservercel.SemverType}, cel.BoolType, cel.BinaryBinding(semverIsLessThan)),
+		},
+		"compareTo": {
+			cel.MemberOverload("semver_compare_to", []*cel.Type{apiservercel.SemverType, apiservercel.SemverType}, cel.IntType, cel.BinaryBinding(semverCompareTo)),
+		},
+		"major": {
+			cel.MemberOverload("semver_major", []*cel.Type{apiservercel.SemverType}, cel.IntType, cel.UnaryBinding(semverMajor)),
+		},
+		"minor": {
+			cel.MemberOverload("semver_minor", []*cel.Type{apiservercel.SemverType}, cel.IntType, cel.UnaryBinding(semverMinor)),
+		},
+		"patch": {
+			cel.MemberOverload("semver_patch", []*cel.Type{apiservercel.SemverType}, cel.IntType, cel.UnaryBinding(semverPatch)),
+		},
+	}
+}
+
+func (s *semverLibType) CompileOptions() []cel.EnvOption {
+	// Defined in this function to avoid an initialization order problem.
+	semverLibraryDecls := s.declarations()
+	options := make([]cel.EnvOption, 0, len(semverLibraryDecls))
+	for name, overloads := range semverLibraryDecls {
+		options = append(options, cel.Function(name, overloads...))
+	}
+	return options
+}
+
+func (*semverLibType) ProgramOptions() []cel.ProgramOption {
+	return []cel.ProgramOption{}
+}
+
+func isSemver(arg ref.Val) ref.Val {
+	str, ok := arg.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	// Using semver/v4 here is okay because this function isn't
+	// used to validate the Kubernetes API. In the CEL base library
+	// we would have to use the regular expression from
+	// pkg/apis/resource/structured/namedresources/validation/validation.go.
+	_, err := semver.Parse(str)
+	if err != nil {
+		return types.Bool(false)
+	}
+
+	return types.Bool(true)
+}
+
+func stringToSemver(arg ref.Val) ref.Val {
+	str, ok := arg.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	// Using semver/v4 here is okay because this function isn't
+	// used to validate the Kubernetes API. In the CEL base library
+	// we would have to use the regular expression from
+	// pkg/apis/resource/structured/namedresources/validation/validation.go
+	// first before parsing.
+	v, err := semver.Parse(str)
+	if err != nil {
+		return types.WrapErr(err)
+	}
+
+	return apiservercel.Semver{Version: v}
+}
+
+func semverMajor(arg ref.Val) ref.Val {
+	v, ok := arg.Value().(semver.Version)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+	return types.Int(v.Major)
+}
+
+func semverMinor(arg ref.Val) ref.Val {
+	v, ok := arg.Value().(semver.Version)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+	return types.Int(v.Minor)
+}
+
+func semverPatch(arg ref.Val) ref.Val {
+	v, ok := arg.Value().(semver.Version)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+	return types.Int(v.Patch)
+}
+
+func semverIsGreaterThan(arg ref.Val, other ref.Val) ref.Val {
+	v, ok := arg.Value().(semver.Version)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	v2, ok := other.Value().(semver.Version)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	return types.Bool(v.Compare(v2) == 1)
+}
+
+func semverIsLessThan(arg ref.Val, other ref.Val) ref.Val {
+	v, ok := arg.Value().(semver.Version)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	v2, ok := other.Value().(semver.Version)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	return types.Bool(v.Compare(v2) == -1)
+}
+
+func semverCompareTo(arg ref.Val, other ref.Val) ref.Val {
+	v, ok := arg.Value().(semver.Version)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	v2, ok := other.Value().(semver.Version)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	return types.Int(v.Compare(v2))
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/test.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/test.go
new file mode 100644
index 0000000000..282d939620
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/test.go
@@ -0,0 +1,83 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package library
+
+import (
+	"math"
+
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+)
+
+// Test provides a test() function that returns true.
+func Test(options ...TestOption) cel.EnvOption {
+	t := &testLib{version: math.MaxUint32}
+	for _, o := range options {
+		t = o(t)
+	}
+	return cel.Lib(t)
+}
+
+type testLib struct {
+	version uint32
+}
+
+func (*testLib) LibraryName() string {
+	return "kubernetes.test"
+}
+
+type TestOption func(*testLib) *testLib
+
+func TestVersion(version uint32) func(lib *testLib) *testLib {
+	return func(sl *testLib) *testLib {
+		sl.version = version
+		return sl
+	}
+}
+
+func (t *testLib) CompileOptions() []cel.EnvOption {
+	var options []cel.EnvOption
+
+	if t.version == 0 {
+		options = append(options, cel.Function("test",
+			cel.Overload("test", []*cel.Type{}, cel.BoolType,
+				cel.FunctionBinding(func(args ...ref.Val) ref.Val {
+					return types.True
+				}))))
+	}
+
+	if t.version >= 1 {
+		options = append(options, cel.Function("test",
+			cel.Overload("test", []*cel.Type{}, cel.BoolType,
+				cel.FunctionBinding(func(args ...ref.Val) ref.Val {
+					// Return false here so tests can observe which version of the function is registered
+					// Actual function libraries must not break backward compatibility
+					return types.False
+				}))))
+		options = append(options, cel.Function("testV1",
+			cel.Overload("testV1", []*cel.Type{}, cel.BoolType,
+				cel.FunctionBinding(func(args ...ref.Val) ref.Val {
+					return types.True
+				}))))
+	}
+	return options
+}
+
+func (*testLib) ProgramOptions() []cel.ProgramOption {
+	return []cel.ProgramOption{}
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/urls.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/urls.go
new file mode 100644
index 0000000000..4b7ffb95a4
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/library/urls.go
@@ -0,0 +1,248 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package library
+
+import (
+	"net/url"
+
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+
+	apiservercel "k8s.io/apiserver/pkg/cel"
+)
+
+// URLs provides a CEL function library extension of URL parsing functions.
+//
+// url
+//
+// Converts a string to a URL or results in an error if the string is not a valid URL. The URL must be an absolute URI
+// or an absolute path.
+//
+//	url() 
+//
+// Examples:
+//
+//	url('https://user:pass@example.com:80/path?query=val#fragment') // returns a URL
+//	url('/absolute-path') // returns a URL
+//	url('https://a:b:c/') // error
+//	url('../relative-path') // error
+//
+// isURL
+//
+// Returns true if a string is a valid URL. The URL must be an absolute URI or an absolute path.
+//
+//	isURL( ) 
+//
+// Examples:
+//
+//	isURL('https://user:pass@example.com:80/path?query=val#fragment') // returns true
+//	isURL('/absolute-path') // returns true
+//	isURL('https://a:b:c/') // returns false
+//	isURL('../relative-path') // returns false
+//
+// getScheme / getHost / getHostname / getPort / getEscapedPath / getQuery
+//
+// Return the parsed components of a URL.
+//
+//   - getScheme: If absent in the URL, returns an empty string.
+//
+//   - getHostname: IPv6 addresses are returned without braces, e.g. "::1". If absent in the URL, returns an empty string.
+//
+//   - getHost: IPv6 addresses are returned with braces, e.g. "[::1]". If absent in the URL, returns an empty string.
+//
+//   - getEscapedPath: The string returned by getEscapedPath is URL escaped, e.g. "with space" becomes "with%20space".
+//     If absent in the URL, returns an empty string.
+//
+//   - getPort: If absent in the URL, returns an empty string.
+//
+//   - getQuery: Returns the query parameters in "matrix" form where a repeated query key is interpreted to
+//     mean that there are multiple values for that key. The keys and values are returned unescaped.
+//     If absent in the URL, returns an empty map.
+//
+//     .getScheme() 
+//     .getHost() 
+//     .getHostname() 
+//     .getPort() 
+//     .getEscapedPath() 
+//     .getQuery() , >
+//
+// Examples:
+//
+//	url('/path').getScheme() // returns ''
+//	url('https://example.com/').getScheme() // returns 'https'
+//	url('https://example.com:80/').getHost() // returns 'example.com:80'
+//	url('https://example.com/').getHost() // returns 'example.com'
+//	url('https://[::1]:80/').getHost() // returns '[::1]:80'
+//	url('https://[::1]/').getHost() // returns '[::1]'
+//	url('/path').getHost() // returns ''
+//	url('https://example.com:80/').getHostname() // returns 'example.com'
+//	url('https://127.0.0.1:80/').getHostname() // returns '127.0.0.1'
+//	url('https://[::1]:80/').getHostname() // returns '::1'
+//	url('/path').getHostname() // returns ''
+//	url('https://example.com:80/').getPort() // returns '80'
+//	url('https://example.com/').getPort() // returns ''
+//	url('/path').getPort() // returns ''
+//	url('https://example.com/path').getEscapedPath() // returns '/path'
+//	url('https://example.com/path with spaces/').getEscapedPath() // returns '/path%20with%20spaces/'
+//	url('https://example.com').getEscapedPath() // returns ''
+//	url('https://example.com/path?k1=a&k2=b&k2=c').getQuery() // returns { 'k1': ['a'], 'k2': ['b', 'c']}
+//	url('https://example.com/path?key with spaces=value with spaces').getQuery() // returns { 'key with spaces': ['value with spaces']}
+//	url('https://example.com/path?').getQuery() // returns {}
+//	url('https://example.com/path').getQuery() // returns {}
+func URLs() cel.EnvOption {
+	return cel.Lib(urlsLib)
+}
+
+var urlsLib = &urls{}
+
+type urls struct{}
+
+func (*urls) LibraryName() string {
+	return "kubernetes.urls"
+}
+
+func (*urls) Types() []*cel.Type {
+	return []*cel.Type{apiservercel.URLType}
+}
+
+func (*urls) declarations() map[string][]cel.FunctionOpt {
+	return urlLibraryDecls
+}
+
+var urlLibraryDecls = map[string][]cel.FunctionOpt{
+	"url": {
+		cel.Overload("string_to_url", []*cel.Type{cel.StringType}, apiservercel.URLType,
+			cel.UnaryBinding(stringToUrl))},
+	"getScheme": {
+		cel.MemberOverload("url_get_scheme", []*cel.Type{apiservercel.URLType}, cel.StringType,
+			cel.UnaryBinding(getScheme))},
+	"getHost": {
+		cel.MemberOverload("url_get_host", []*cel.Type{apiservercel.URLType}, cel.StringType,
+			cel.UnaryBinding(getHost))},
+	"getHostname": {
+		cel.MemberOverload("url_get_hostname", []*cel.Type{apiservercel.URLType}, cel.StringType,
+			cel.UnaryBinding(getHostname))},
+	"getPort": {
+		cel.MemberOverload("url_get_port", []*cel.Type{apiservercel.URLType}, cel.StringType,
+			cel.UnaryBinding(getPort))},
+	"getEscapedPath": {
+		cel.MemberOverload("url_get_escaped_path", []*cel.Type{apiservercel.URLType}, cel.StringType,
+			cel.UnaryBinding(getEscapedPath))},
+	"getQuery": {
+		cel.MemberOverload("url_get_query", []*cel.Type{apiservercel.URLType},
+			cel.MapType(cel.StringType, cel.ListType(cel.StringType)),
+			cel.UnaryBinding(getQuery))},
+	"isURL": {
+		cel.Overload("is_url_string", []*cel.Type{cel.StringType}, cel.BoolType,
+			cel.UnaryBinding(isURL))},
+}
+
+func (*urls) CompileOptions() []cel.EnvOption {
+	options := []cel.EnvOption{}
+	for name, overloads := range urlLibraryDecls {
+		options = append(options, cel.Function(name, overloads...))
+	}
+	return options
+}
+
+func (*urls) ProgramOptions() []cel.ProgramOption {
+	return []cel.ProgramOption{}
+}
+
+func stringToUrl(arg ref.Val) ref.Val {
+	s, ok := arg.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+	// Use ParseRequestURI to check the URL before conversion.
+	// ParseRequestURI requires absolute URLs and is used by the OpenAPIv3 'uri' format.
+	_, err := url.ParseRequestURI(s)
+	if err != nil {
+		return types.NewErr("URL parse error during conversion from string: %v", err)
+	}
+	// We must parse again with Parse since ParseRequestURI incorrectly parses URLs that contain a fragment
+	// part and will incorrectly append the fragment to either the path or the query, depending on which it was adjacent to.
+	u, err := url.Parse(s)
+	if err != nil {
+		// Errors are not expected here since Parse is a more lenient parser than ParseRequestURI.
+		return types.NewErr("URL parse error during conversion from string: %v", err)
+	}
+	return apiservercel.URL{URL: u}
+}
+
+func getScheme(arg ref.Val) ref.Val {
+	u, ok := arg.Value().(*url.URL)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+	return types.String(u.Scheme)
+}
+
+func getHost(arg ref.Val) ref.Val {
+	u, ok := arg.Value().(*url.URL)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+	return types.String(u.Host)
+}
+
+func getHostname(arg ref.Val) ref.Val {
+	u, ok := arg.Value().(*url.URL)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+	return types.String(u.Hostname())
+}
+
+func getPort(arg ref.Val) ref.Val {
+	u, ok := arg.Value().(*url.URL)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+	return types.String(u.Port())
+}
+
+func getEscapedPath(arg ref.Val) ref.Val {
+	u, ok := arg.Value().(*url.URL)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+	return types.String(u.EscapedPath())
+}
+
+func getQuery(arg ref.Val) ref.Val {
+	u, ok := arg.Value().(*url.URL)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+
+	result := map[ref.Val]ref.Val{}
+	for k, v := range u.Query() {
+		result[types.String(k)] = types.NewStringList(types.DefaultTypeAdapter, v)
+	}
+	return types.NewRefValMap(types.DefaultTypeAdapter, result)
+}
+
+func isURL(arg ref.Val) ref.Val {
+	s, ok := arg.Value().(string)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(arg)
+	}
+	_, err := url.ParseRequestURI(s)
+	return types.Bool(err == nil)
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/limits.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/limits.go
new file mode 100644
index 0000000000..14b3ec2d20
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/limits.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cel
+
+import celconfig "k8s.io/apiserver/pkg/apis/cel"
+
+const (
+	// DefaultMaxRequestSizeBytes is the size of the largest request that will be accepted
+	DefaultMaxRequestSizeBytes = celconfig.MaxRequestSizeBytes
+
+	// MaxDurationSizeJSON
+	// OpenAPI duration strings follow RFC 3339, section 5.6 - see the comment on maxDatetimeSizeJSON
+	MaxDurationSizeJSON = 32
+	// MaxDatetimeSizeJSON
+	// OpenAPI datetime strings follow RFC 3339, section 5.6, and the longest possible
+	// such string is 9999-12-31T23:59:59.999999999Z, which has length 30 - we add 2
+	// to allow for quotation marks
+	MaxDatetimeSizeJSON = 32
+	// MinDurationSizeJSON
+	// Golang allows a string of 0 to be parsed as a duration, so that plus 2 to account for
+	// quotation marks makes 3
+	MinDurationSizeJSON = 3
+	// JSONDateSize is the size of a date serialized as part of a JSON object
+	// RFC 3339 dates require YYYY-MM-DD, and then we add 2 to allow for quotation marks
+	JSONDateSize = 12
+	// MinDatetimeSizeJSON is the minimal length of a datetime formatted as RFC 3339
+	// RFC 3339 datetimes require a full date (YYYY-MM-DD) and full time (HH:MM:SS), and we add 3 for
+	// quotation marks like always in addition to the capital T that separates the date and time
+	MinDatetimeSizeJSON = 21
+	// MinStringSize is the size of literal ""
+	MinStringSize = 2
+	// MinBoolSize is the length of literal true
+	MinBoolSize = 4
+	// MinNumberSize is the length of literal 0
+	MinNumberSize = 1
+
+	// MaxFormatSize is the maximum size we allow for format strings
+	MaxFormatSize          = 64
+	MaxNameFormatRegexSize = 128
+)
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/metrics/metrics.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/metrics/metrics.go
new file mode 100644
index 0000000000..02f232c17c
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/metrics/metrics.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+	"time"
+
+	"k8s.io/component-base/metrics"
+	"k8s.io/component-base/metrics/legacyregistry"
+)
+
+// TODO(jiahuif) CEL is to be used in multiple components, revise naming when that happens.
+const (
+	namespace = "apiserver"
+	subsystem = "cel"
+)
+
+// Metrics provides access to CEL metrics.
+var Metrics = newCelMetrics()
+
+type CelMetrics struct {
+	compilationTime *metrics.Histogram
+	evaluationTime  *metrics.Histogram
+}
+
+func newCelMetrics() *CelMetrics {
+	m := &CelMetrics{
+		compilationTime: metrics.NewHistogram(&metrics.HistogramOpts{
+			Namespace:      namespace,
+			Subsystem:      subsystem,
+			Name:           "compilation_duration_seconds",
+			Help:           "CEL compilation time in seconds.",
+			StabilityLevel: metrics.BETA,
+		}),
+		evaluationTime: metrics.NewHistogram(&metrics.HistogramOpts{
+			Namespace:      namespace,
+			Subsystem:      subsystem,
+			Name:           "evaluation_duration_seconds",
+			Help:           "CEL evaluation time in seconds.",
+			StabilityLevel: metrics.BETA,
+		}),
+	}
+
+	legacyregistry.MustRegister(m.compilationTime)
+	legacyregistry.MustRegister(m.evaluationTime)
+
+	return m
+}
+
+// ObserveCompilation records a CEL compilation with the time the compilation took.
+func (m *CelMetrics) ObserveCompilation(elapsed time.Duration) {
+	seconds := elapsed.Seconds()
+	m.compilationTime.Observe(seconds)
+}
+
+// ObserveEvaluation records a CEL evaluation with the time the evaluation took.
+func (m *CelMetrics) ObserveEvaluation(elapsed time.Duration) {
+	seconds := elapsed.Seconds()
+	m.evaluationTime.Observe(seconds)
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/openapi/adaptor.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/openapi/adaptor.go
new file mode 100644
index 0000000000..bc7b0d8c95
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/openapi/adaptor.go
@@ -0,0 +1,229 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package openapi
+
+import (
+	"github.com/google/cel-go/common/types/ref"
+
+	apiservercel "k8s.io/apiserver/pkg/cel"
+	"k8s.io/apiserver/pkg/cel/common"
+	"k8s.io/kube-openapi/pkg/validation/spec"
+)
+
+var _ common.Schema = (*Schema)(nil)
+var _ common.SchemaOrBool = (*SchemaOrBool)(nil)
+
+type Schema struct {
+	Schema *spec.Schema
+}
+
+type SchemaOrBool struct {
+	SchemaOrBool *spec.SchemaOrBool
+}
+
+func (sb *SchemaOrBool) Schema() common.Schema {
+	return &Schema{Schema: sb.SchemaOrBool.Schema}
+}
+
+func (sb *SchemaOrBool) Allows() bool {
+	return sb.SchemaOrBool.Allows
+}
+
+func (s *Schema) Type() string {
+	if len(s.Schema.Type) == 0 {
+		return ""
+	}
+	return s.Schema.Type[0]
+}
+
+func (s *Schema) Format() string {
+	return s.Schema.Format
+}
+
+func (s *Schema) Pattern() string {
+	return s.Schema.Pattern
+}
+
+func (s *Schema) Items() common.Schema {
+	if s.Schema.Items == nil || s.Schema.Items.Schema == nil {
+		return nil
+	}
+	return &Schema{Schema: s.Schema.Items.Schema}
+}
+
+func (s *Schema) Properties() map[string]common.Schema {
+	if s.Schema.Properties == nil {
+		return nil
+	}
+	res := make(map[string]common.Schema, len(s.Schema.Properties))
+	for n, prop := range s.Schema.Properties {
+		// map value is unaddressable, create a shallow copy
+		// this is a shallow non-recursive copy
+		s := prop
+		res[n] = &Schema{Schema: &s}
+	}
+	return res
+}
+
+func (s *Schema) AdditionalProperties() common.SchemaOrBool {
+	if s.Schema.AdditionalProperties == nil {
+		return nil
+	}
+	return &SchemaOrBool{SchemaOrBool: s.Schema.AdditionalProperties}
+}
+
+func (s *Schema) Default() any {
+	return s.Schema.Default
+}
+
+func (s *Schema) Minimum() *float64 {
+	return s.Schema.Minimum
+}
+
+func (s *Schema) IsExclusiveMinimum() bool {
+	return s.Schema.ExclusiveMinimum
+}
+
+func (s *Schema) Maximum() *float64 {
+	return s.Schema.Maximum
+}
+
+func (s *Schema) IsExclusiveMaximum() bool {
+	return s.Schema.ExclusiveMaximum
+}
+
+func (s *Schema) MultipleOf() *float64 {
+	return s.Schema.MultipleOf
+}
+
+func (s *Schema) UniqueItems() bool {
+	return s.Schema.UniqueItems
+}
+
+func (s *Schema) MinItems() *int64 {
+	return s.Schema.MinItems
+}
+
+func (s *Schema) MaxItems() *int64 {
+	return s.Schema.MaxItems
+}
+
+func (s *Schema) MinLength() *int64 {
+	return s.Schema.MinLength
+}
+
+func (s *Schema) MaxLength() *int64 {
+	return s.Schema.MaxLength
+}
+
+func (s *Schema) MinProperties() *int64 {
+	return s.Schema.MinProperties
+}
+
+func (s *Schema) MaxProperties() *int64 {
+	return s.Schema.MaxProperties
+}
+
+func (s *Schema) Required() []string {
+	return s.Schema.Required
+}
+
+func (s *Schema) Enum() []any {
+	return s.Schema.Enum
+}
+
+func (s *Schema) Nullable() bool {
+	return s.Schema.Nullable
+}
+
+func (s *Schema) AllOf() []common.Schema {
+	var res []common.Schema
+	for _, nestedSchema := range s.Schema.AllOf {
+		nestedSchema := nestedSchema
+		res = append(res, &Schema{&nestedSchema})
+	}
+	return res
+}
+
+func (s *Schema) AnyOf() []common.Schema {
+	var res []common.Schema
+	for _, nestedSchema := range s.Schema.AnyOf {
+		nestedSchema := nestedSchema
+		res = append(res, &Schema{&nestedSchema})
+	}
+	return res
+}
+
+func (s *Schema) OneOf() []common.Schema {
+	var res []common.Schema
+	for _, nestedSchema := range s.Schema.OneOf {
+		nestedSchema := nestedSchema
+		res = append(res, &Schema{&nestedSchema})
+	}
+	return res
+}
+
+func (s *Schema) Not() common.Schema {
+	if s.Schema.Not == nil {
+		return nil
+	}
+	return &Schema{s.Schema.Not}
+}
+
+func (s *Schema) IsXIntOrString() bool {
+	return isXIntOrString(s.Schema)
+}
+
+func (s *Schema) IsXEmbeddedResource() bool {
+	return isXEmbeddedResource(s.Schema)
+}
+
+func (s *Schema) IsXPreserveUnknownFields() bool {
+	return isXPreserveUnknownFields(s.Schema)
+}
+
+func (s *Schema) XListType() string {
+	return getXListType(s.Schema)
+}
+
+func (s *Schema) XMapType() string {
+	return getXMapType(s.Schema)
+}
+
+func (s *Schema) XListMapKeys() []string {
+	return getXListMapKeys(s.Schema)
+}
+
+func (s *Schema) XValidations() []common.ValidationRule {
+	return getXValidations(s.Schema)
+}
+
+func (s *Schema) WithTypeAndObjectMeta() common.Schema {
+	return &Schema{common.WithTypeAndObjectMeta(s.Schema)}
+}
+
+func UnstructuredToVal(unstructured any, schema *spec.Schema) ref.Val {
+	return common.UnstructuredToVal(unstructured, &Schema{schema})
+}
+
+func SchemaDeclType(s *spec.Schema, isResourceRoot bool) *apiservercel.DeclType {
+	return common.SchemaDeclType(&Schema{Schema: s}, isResourceRoot)
+}
+
+func MakeMapList(sts *spec.Schema, items []interface{}) (rv common.MapList) {
+	return common.MakeMapList(&Schema{Schema: sts}, items)
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/openapi/extensions.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/openapi/extensions.go
new file mode 100644
index 0000000000..3bb3bccf05
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/openapi/extensions.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package openapi
+
+import (
+	"k8s.io/apimachinery/pkg/util/intstr"
+	"k8s.io/apiserver/pkg/cel/common"
+	"k8s.io/kube-openapi/pkg/validation/spec"
+)
+
+var intOrStringFormat = intstr.IntOrString{}.OpenAPISchemaFormat()
+
+func isExtension(schema *spec.Schema, key string) bool {
+	v, ok := schema.Extensions.GetBool(key)
+	return v && ok
+}
+
+func isXIntOrString(schema *spec.Schema) bool {
+	// built-in types have the Format while CRDs use extension
+	// both are valid, checking both
+	return schema.Format == intOrStringFormat || isExtension(schema, extIntOrString)
+}
+
+func isXEmbeddedResource(schema *spec.Schema) bool {
+	return isExtension(schema, extEmbeddedResource)
+}
+
+func isXPreserveUnknownFields(schema *spec.Schema) bool {
+	return isExtension(schema, extPreserveUnknownFields)
+}
+
+func getXListType(schema *spec.Schema) string {
+	s, _ := schema.Extensions.GetString(extListType)
+	return s
+}
+
+func getXMapType(schema *spec.Schema) string {
+	s, _ := schema.Extensions.GetString(extMapType)
+	return s
+}
+
+func getXListMapKeys(schema *spec.Schema) []string {
+	mapKeys, ok := schema.Extensions.GetStringSlice(extListMapKeys)
+	if !ok {
+		return nil
+	}
+	return mapKeys
+}
+
+type ValidationRule struct {
+	RuleField              string `json:"rule"`
+	MessageField           string `json:"message"`
+	MessageExpressionField string `json:"messageExpression"`
+	PathField              string `json:"fieldPath"`
+}
+
+func (v ValidationRule) Rule() string {
+	return v.RuleField
+}
+
+func (v ValidationRule) Message() string {
+	return v.MessageField
+}
+
+func (v ValidationRule) FieldPath() string {
+	return v.PathField
+}
+
+func (v ValidationRule) MessageExpression() string {
+	return v.MessageExpressionField
+}
+
+// TODO: simplify
+func getXValidations(schema *spec.Schema) []common.ValidationRule {
+	var rules []ValidationRule
+	err := schema.Extensions.GetObject(extValidations, &rules)
+	if err != nil {
+		return nil
+	}
+	results := make([]common.ValidationRule, len(rules))
+	for i, rule := range rules {
+		results[i] = rule
+	}
+	return results
+}
+
+const extIntOrString = "x-kubernetes-int-or-string"
+const extEmbeddedResource = "x-kubernetes-embedded-resource"
+const extPreserveUnknownFields = "x-kubernetes-preserve-unknown-fields"
+const extListType = "x-kubernetes-list-type"
+const extMapType = "x-kubernetes-map-type"
+const extListMapKeys = "x-kubernetes-list-map-keys"
+const extValidations = "x-kubernetes-validations"
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/quantity.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/quantity.go
new file mode 100644
index 0000000000..ce82396436
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/quantity.go
@@ -0,0 +1,76 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cel
+
+import (
+	"fmt"
+	"reflect"
+
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/checker/decls"
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+	"k8s.io/apimachinery/pkg/api/resource"
+)
+
+var (
+	QuantityObject    = decls.NewObjectType("kubernetes.Quantity")
+	quantityTypeValue = types.NewTypeValue("kubernetes.Quantity")
+	QuantityType      = cel.ObjectType("kubernetes.Quantity")
+)
+
+// Quantity provdes a CEL representation of a resource.Quantity
+type Quantity struct {
+	*resource.Quantity
+}
+
+func (d Quantity) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+	if reflect.TypeOf(d.Quantity).AssignableTo(typeDesc) {
+		return d.Quantity, nil
+	}
+	if reflect.TypeOf("").AssignableTo(typeDesc) {
+		return d.Quantity.String(), nil
+	}
+	return nil, fmt.Errorf("type conversion error from 'Quantity' to '%v'", typeDesc)
+}
+
+func (d Quantity) ConvertToType(typeVal ref.Type) ref.Val {
+	switch typeVal {
+	case quantityTypeValue:
+		return d
+	case types.TypeType:
+		return quantityTypeValue
+	default:
+		return types.NewErr("type conversion error from '%s' to '%s'", quantityTypeValue, typeVal)
+	}
+}
+
+func (d Quantity) Equal(other ref.Val) ref.Val {
+	otherDur, ok := other.(Quantity)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(other)
+	}
+	return types.Bool(d.Quantity.Equal(*otherDur.Quantity))
+}
+
+func (d Quantity) Type() ref.Type {
+	return quantityTypeValue
+}
+
+func (d Quantity) Value() interface{} {
+	return d.Quantity
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/semver.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/semver.go
new file mode 100644
index 0000000000..c53b9c306a
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/semver.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cel
+
+import (
+	"fmt"
+	"reflect"
+
+	"github.com/blang/semver/v4"
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+)
+
+var (
+	SemverType = cel.ObjectType("kubernetes.Semver")
+)
+
+// Semver provdes a CEL representation of a [semver.Version].
+type Semver struct {
+	semver.Version
+}
+
+func (v Semver) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+	if reflect.TypeOf(v.Version).AssignableTo(typeDesc) {
+		return v.Version, nil
+	}
+	if reflect.TypeOf("").AssignableTo(typeDesc) {
+		return v.Version.String(), nil
+	}
+	return nil, fmt.Errorf("type conversion error from 'Semver' to '%v'", typeDesc)
+}
+
+func (v Semver) ConvertToType(typeVal ref.Type) ref.Val {
+	switch typeVal {
+	case SemverType:
+		return v
+	case types.TypeType:
+		return SemverType
+	default:
+		return types.NewErr("type conversion error from '%s' to '%s'", SemverType, typeVal)
+	}
+}
+
+func (v Semver) Equal(other ref.Val) ref.Val {
+	otherDur, ok := other.(Semver)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(other)
+	}
+	return types.Bool(v.Version.EQ(otherDur.Version))
+}
+
+func (v Semver) Type() ref.Type {
+	return SemverType
+}
+
+func (v Semver) Value() interface{} {
+	return v.Version
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/types.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/types.go
new file mode 100644
index 0000000000..84bfd7e658
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/types.go
@@ -0,0 +1,598 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cel
+
+import (
+	"fmt"
+	"math"
+	"time"
+
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+	"github.com/google/cel-go/common/types/traits"
+
+	exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+	"k8s.io/apimachinery/pkg/api/resource"
+)
+
+const (
+	noMaxLength = math.MaxInt
+)
+
+// NewListType returns a parameterized list type with a specified element type.
+func NewListType(elem *DeclType, maxItems int64) *DeclType {
+	return &DeclType{
+		name:         "list",
+		ElemType:     elem,
+		MaxElements:  maxItems,
+		celType:      cel.ListType(elem.CelType()),
+		defaultValue: NewListValue(),
+		// a list can always be represented as [] in JSON, so hardcode the min size
+		// to 2
+		MinSerializedSize: 2,
+	}
+}
+
+// NewMapType returns a parameterized map type with the given key and element types.
+func NewMapType(key, elem *DeclType, maxProperties int64) *DeclType {
+	return &DeclType{
+		name:         "map",
+		KeyType:      key,
+		ElemType:     elem,
+		MaxElements:  maxProperties,
+		celType:      cel.MapType(key.CelType(), elem.CelType()),
+		defaultValue: NewMapValue(),
+		// a map can always be represented as {} in JSON, so hardcode the min size
+		// to 2
+		MinSerializedSize: 2,
+	}
+}
+
+// NewObjectType creates an object type with a qualified name and a set of field declarations.
+func NewObjectType(name string, fields map[string]*DeclField) *DeclType {
+	t := &DeclType{
+		name:      name,
+		Fields:    fields,
+		celType:   cel.ObjectType(name),
+		traitMask: traits.FieldTesterType | traits.IndexerType,
+		// an object could potentially be larger than the min size we default to here ({}),
+		// but we rely upon the caller to change MinSerializedSize accordingly if they add
+		// properties to the object
+		MinSerializedSize: 2,
+	}
+	t.defaultValue = NewObjectValue(t)
+	return t
+}
+
+func NewSimpleTypeWithMinSize(name string, celType *cel.Type, zeroVal ref.Val, minSize int64) *DeclType {
+	return &DeclType{
+		name:              name,
+		celType:           celType,
+		defaultValue:      zeroVal,
+		MinSerializedSize: minSize,
+	}
+}
+
+// DeclType represents the universal type descriptor for OpenAPIv3 types.
+type DeclType struct {
+	fmt.Stringer
+
+	name string
+	// Fields contains a map of escaped CEL identifier field names to field declarations.
+	Fields      map[string]*DeclField
+	KeyType     *DeclType
+	ElemType    *DeclType
+	TypeParam   bool
+	Metadata    map[string]string
+	MaxElements int64
+	// MinSerializedSize represents the smallest possible size in bytes that
+	// the DeclType could be serialized to in JSON.
+	MinSerializedSize int64
+
+	celType      *cel.Type
+	traitMask    int
+	defaultValue ref.Val
+}
+
+// MaybeAssignTypeName attempts to set the DeclType name to a fully qualified name, if the type
+// is of `object` type.
+//
+// The DeclType must return true for `IsObject` or this assignment will error.
+func (t *DeclType) MaybeAssignTypeName(name string) *DeclType {
+	if t.IsObject() {
+		objUpdated := false
+		if t.name != "object" {
+			name = t.name
+		} else {
+			objUpdated = true
+		}
+		fieldMap := make(map[string]*DeclField, len(t.Fields))
+		for fieldName, field := range t.Fields {
+			fieldType := field.Type
+			fieldTypeName := fmt.Sprintf("%s.%s", name, fieldName)
+			updated := fieldType.MaybeAssignTypeName(fieldTypeName)
+			if updated == fieldType {
+				fieldMap[fieldName] = field
+				continue
+			}
+			objUpdated = true
+			fieldMap[fieldName] = &DeclField{
+				Name:         fieldName,
+				Type:         updated,
+				Required:     field.Required,
+				enumValues:   field.enumValues,
+				defaultValue: field.defaultValue,
+			}
+		}
+		if !objUpdated {
+			return t
+		}
+		return &DeclType{
+			name:              name,
+			Fields:            fieldMap,
+			KeyType:           t.KeyType,
+			ElemType:          t.ElemType,
+			TypeParam:         t.TypeParam,
+			Metadata:          t.Metadata,
+			celType:           cel.ObjectType(name),
+			traitMask:         t.traitMask,
+			defaultValue:      t.defaultValue,
+			MinSerializedSize: t.MinSerializedSize,
+		}
+	}
+	if t.IsMap() {
+		elemTypeName := fmt.Sprintf("%s.@elem", name)
+		updated := t.ElemType.MaybeAssignTypeName(elemTypeName)
+		if updated == t.ElemType {
+			return t
+		}
+		return NewMapType(t.KeyType, updated, t.MaxElements)
+	}
+	if t.IsList() {
+		elemTypeName := fmt.Sprintf("%s.@idx", name)
+		updated := t.ElemType.MaybeAssignTypeName(elemTypeName)
+		if updated == t.ElemType {
+			return t
+		}
+		return NewListType(updated, t.MaxElements)
+	}
+	return t
+}
+
+// ExprType returns the CEL expression type of this declaration.
+func (t *DeclType) ExprType() (*exprpb.Type, error) {
+	return cel.TypeToExprType(t.celType)
+}
+
+// CelType returns the CEL type of this declaration.
+func (t *DeclType) CelType() *cel.Type {
+	return t.celType
+}
+
+// FindField returns the DeclField with the given name if present.
+func (t *DeclType) FindField(name string) (*DeclField, bool) {
+	f, found := t.Fields[name]
+	return f, found
+}
+
+// HasTrait implements the CEL ref.Type interface making this type declaration suitable for use
+// within the CEL evaluator.
+func (t *DeclType) HasTrait(trait int) bool {
+	if t.traitMask&trait == trait {
+		return true
+	}
+	if t.defaultValue == nil {
+		return false
+	}
+	_, isDecl := t.defaultValue.Type().(*DeclType)
+	if isDecl {
+		return false
+	}
+	return t.defaultValue.Type().HasTrait(trait)
+}
+
+// IsList returns whether the declaration is a `list` type which defines a parameterized element
+// type, but not a parameterized key type or fields.
+func (t *DeclType) IsList() bool {
+	return t.KeyType == nil && t.ElemType != nil && t.Fields == nil
+}
+
+// IsMap returns whether the declaration is a 'map' type which defines parameterized key and
+// element types, but not fields.
+func (t *DeclType) IsMap() bool {
+	return t.KeyType != nil && t.ElemType != nil && t.Fields == nil
+}
+
+// IsObject returns whether the declartion is an 'object' type which defined a set of typed fields.
+func (t *DeclType) IsObject() bool {
+	return t.KeyType == nil && t.ElemType == nil && t.Fields != nil
+}
+
+// String implements the fmt.Stringer interface method.
+func (t *DeclType) String() string {
+	return t.name
+}
+
+// TypeName returns the fully qualified type name for the DeclType.
+func (t *DeclType) TypeName() string {
+	return t.name
+}
+
+// DefaultValue returns the CEL ref.Val representing the default value for this object type,
+// if one exists.
+func (t *DeclType) DefaultValue() ref.Val {
+	return t.defaultValue
+}
+
+// FieldTypeMap constructs a map of the field and object types nested within a given type.
+func FieldTypeMap(path string, t *DeclType) map[string]*DeclType {
+	if t.IsObject() && t.TypeName() != "object" {
+		path = t.TypeName()
+	}
+	types := make(map[string]*DeclType)
+	buildDeclTypes(path, t, types)
+	return types
+}
+
+func buildDeclTypes(path string, t *DeclType, types map[string]*DeclType) {
+	// Ensure object types are properly named according to where they appear in the schema.
+	if t.IsObject() {
+		// Hack to ensure that names are uniquely qualified and work well with the type
+		// resolution steps which require fully qualified type names for field resolution
+		// to function properly.
+		types[t.TypeName()] = t
+		for name, field := range t.Fields {
+			fieldPath := fmt.Sprintf("%s.%s", path, name)
+			buildDeclTypes(fieldPath, field.Type, types)
+		}
+	}
+	// Map element properties to type names if needed.
+	if t.IsMap() {
+		mapElemPath := fmt.Sprintf("%s.@elem", path)
+		buildDeclTypes(mapElemPath, t.ElemType, types)
+		types[path] = t
+	}
+	// List element properties.
+	if t.IsList() {
+		listIdxPath := fmt.Sprintf("%s.@idx", path)
+		buildDeclTypes(listIdxPath, t.ElemType, types)
+		types[path] = t
+	}
+}
+
+// DeclField describes the name, ordinal, and optionality of a field declaration within a type.
+type DeclField struct {
+	Name         string
+	Type         *DeclType
+	Required     bool
+	enumValues   []interface{}
+	defaultValue interface{}
+}
+
+func NewDeclField(name string, declType *DeclType, required bool, enumValues []interface{}, defaultValue interface{}) *DeclField {
+	return &DeclField{
+		Name:         name,
+		Type:         declType,
+		Required:     required,
+		enumValues:   enumValues,
+		defaultValue: defaultValue,
+	}
+}
+
+// TypeName returns the string type name of the field.
+func (f *DeclField) TypeName() string {
+	return f.Type.TypeName()
+}
+
+// DefaultValue returns the zero value associated with the field.
+func (f *DeclField) DefaultValue() ref.Val {
+	if f.defaultValue != nil {
+		return types.DefaultTypeAdapter.NativeToValue(f.defaultValue)
+	}
+	return f.Type.DefaultValue()
+}
+
+// EnumValues returns the set of values that this field may take.
+func (f *DeclField) EnumValues() []ref.Val {
+	if f.enumValues == nil || len(f.enumValues) == 0 {
+		return []ref.Val{}
+	}
+	ev := make([]ref.Val, len(f.enumValues))
+	for i, e := range f.enumValues {
+		ev[i] = types.DefaultTypeAdapter.NativeToValue(e)
+	}
+	return ev
+}
+
+func allTypesForDecl(declTypes []*DeclType) map[string]*DeclType {
+	if declTypes == nil {
+		return nil
+	}
+	allTypes := map[string]*DeclType{}
+	for _, declType := range declTypes {
+		for k, t := range FieldTypeMap(declType.TypeName(), declType) {
+			allTypes[k] = t
+		}
+	}
+
+	return allTypes
+}
+
+// NewDeclTypeProvider returns an Open API Schema-based type-system which is CEL compatible.
+func NewDeclTypeProvider(rootTypes ...*DeclType) *DeclTypeProvider {
+	// Note, if the schema indicates that it's actually based on another proto
+	// then prefer the proto definition. For expressions in the proto, a new field
+	// annotation will be needed to indicate the expected environment and type of
+	// the expression.
+	allTypes := allTypesForDecl(rootTypes)
+	return &DeclTypeProvider{
+		registeredTypes: allTypes,
+	}
+}
+
+// DeclTypeProvider extends the CEL ref.TypeProvider interface and provides an Open API Schema-based
+// type-system.
+type DeclTypeProvider struct {
+	registeredTypes             map[string]*DeclType
+	typeProvider                types.Provider
+	typeAdapter                 types.Adapter
+	recognizeKeywordAsFieldName bool
+}
+
+func (rt *DeclTypeProvider) SetRecognizeKeywordAsFieldName(recognize bool) {
+	rt.recognizeKeywordAsFieldName = recognize
+}
+
+func (rt *DeclTypeProvider) EnumValue(enumName string) ref.Val {
+	return rt.typeProvider.EnumValue(enumName)
+}
+
+func (rt *DeclTypeProvider) FindIdent(identName string) (ref.Val, bool) {
+	return rt.typeProvider.FindIdent(identName)
+}
+
+// EnvOptions returns a set of cel.EnvOption values which includes the declaration set
+// as well as a custom ref.TypeProvider.
+//
+// If the DeclTypeProvider value is nil, an empty []cel.EnvOption set is returned.
+func (rt *DeclTypeProvider) EnvOptions(tp types.Provider) ([]cel.EnvOption, error) {
+	if rt == nil {
+		return []cel.EnvOption{}, nil
+	}
+	rtWithTypes, err := rt.WithTypeProvider(tp)
+	if err != nil {
+		return nil, err
+	}
+	return []cel.EnvOption{
+		cel.CustomTypeProvider(rtWithTypes),
+		cel.CustomTypeAdapter(rtWithTypes),
+	}, nil
+}
+
+// WithTypeProvider returns a new DeclTypeProvider that sets the given TypeProvider
+// If the original DeclTypeProvider is nil, the returned DeclTypeProvider is still nil.
+func (rt *DeclTypeProvider) WithTypeProvider(tp types.Provider) (*DeclTypeProvider, error) {
+	if rt == nil {
+		return nil, nil
+	}
+	var ta types.Adapter = types.DefaultTypeAdapter
+	tpa, ok := tp.(types.Adapter)
+	if ok {
+		ta = tpa
+	}
+	rtWithTypes := &DeclTypeProvider{
+		typeProvider:                tp,
+		typeAdapter:                 ta,
+		registeredTypes:             rt.registeredTypes,
+		recognizeKeywordAsFieldName: rt.recognizeKeywordAsFieldName,
+	}
+	for name, declType := range rt.registeredTypes {
+		tpType, found := tp.FindStructType(name)
+		// cast celType to types.type
+
+		expT := declType.CelType()
+		if found && !expT.IsExactType(tpType) {
+			return nil, fmt.Errorf(
+				"type %s definition differs between CEL environment and type provider", name)
+		}
+
+	}
+	return rtWithTypes, nil
+}
+
+// FindStructType attempts to resolve the typeName provided from the rule's rule-schema, or if not
+// from the embedded ref.TypeProvider.
+//
+// FindStructType overrides the default type-finding behavior of the embedded TypeProvider.
+//
+// Note, when the type name is based on the Open API Schema, the name will reflect the object path
+// where the type definition appears.
+func (rt *DeclTypeProvider) FindStructType(typeName string) (*types.Type, bool) {
+	if rt == nil {
+		return nil, false
+	}
+	declType, found := rt.findDeclType(typeName)
+	if found {
+		expT := declType.CelType()
+		return types.NewTypeTypeWithParam(expT), found
+	}
+	return rt.typeProvider.FindStructType(typeName)
+}
+
+// FindDeclType returns the CPT type description which can be mapped to a CEL type.
+func (rt *DeclTypeProvider) FindDeclType(typeName string) (*DeclType, bool) {
+	if rt == nil {
+		return nil, false
+	}
+	return rt.findDeclType(typeName)
+}
+
+// FindStructFieldNames returns the field names associated with the type, if the type
+// is found.
+func (rt *DeclTypeProvider) FindStructFieldNames(typeName string) ([]string, bool) {
+	return []string{}, false
+}
+
+// FindStructFieldType returns a field type given a type name and field name, if found.
+//
+// Note, the type name for an Open API Schema type is likely to be its qualified object path.
+// If, in the future an object instance rather than a type name were provided, the field
+// resolution might more accurately reflect the expected type model. However, in this case
+// concessions were made to align with the existing CEL interfaces.
+func (rt *DeclTypeProvider) FindStructFieldType(typeName, fieldName string) (*types.FieldType, bool) {
+	st, found := rt.findDeclType(typeName)
+	if !found {
+		return rt.typeProvider.FindStructFieldType(typeName, fieldName)
+	}
+
+	f, found := st.Fields[fieldName]
+	if rt.recognizeKeywordAsFieldName && !found && celReservedSymbols.Has(fieldName) {
+		f, found = st.Fields["__"+fieldName+"__"]
+	}
+
+	if found {
+		ft := f.Type
+		expT := ft.CelType()
+		return &types.FieldType{
+			Type: expT,
+		}, true
+	}
+	// This could be a dynamic map.
+	if st.IsMap() {
+		et := st.ElemType
+		expT := et.CelType()
+		return &types.FieldType{
+			Type: expT,
+		}, true
+	}
+	return nil, false
+}
+
+// NativeToValue is an implementation of the ref.TypeAdapater interface which supports conversion
+// of rule values to CEL ref.Val instances.
+func (rt *DeclTypeProvider) NativeToValue(val interface{}) ref.Val {
+	return rt.typeAdapter.NativeToValue(val)
+}
+
+func (rt *DeclTypeProvider) NewValue(typeName string, fields map[string]ref.Val) ref.Val {
+	// TODO: implement for OpenAPI types to enable CEL object instantiation, which is needed
+	// for mutating admission.
+	return rt.typeProvider.NewValue(typeName, fields)
+}
+
+// TypeNames returns the list of type names declared within the DeclTypeProvider object.
+func (rt *DeclTypeProvider) TypeNames() []string {
+	typeNames := make([]string, len(rt.registeredTypes))
+	i := 0
+	for name := range rt.registeredTypes {
+		typeNames[i] = name
+		i++
+	}
+	return typeNames
+}
+
+func (rt *DeclTypeProvider) findDeclType(typeName string) (*DeclType, bool) {
+	declType, found := rt.registeredTypes[typeName]
+	if found {
+		return declType, true
+	}
+	declType = findScalar(typeName)
+	return declType, declType != nil
+}
+
+func findScalar(typename string) *DeclType {
+	switch typename {
+	case BoolType.TypeName():
+		return BoolType
+	case BytesType.TypeName():
+		return BytesType
+	case DoubleType.TypeName():
+		return DoubleType
+	case DurationType.TypeName():
+		return DurationType
+	case IntType.TypeName():
+		return IntType
+	case NullType.TypeName():
+		return NullType
+	case StringType.TypeName():
+		return StringType
+	case TimestampType.TypeName():
+		return TimestampType
+	case UintType.TypeName():
+		return UintType
+	case ListType.TypeName():
+		return ListType
+	case MapType.TypeName():
+		return MapType
+	default:
+		return nil
+	}
+}
+
+var (
+	// AnyType is equivalent to the CEL 'protobuf.Any' type in that the value may have any of the
+	// types supported.
+	AnyType = NewSimpleTypeWithMinSize("any", cel.AnyType, nil, 1)
+
+	// BoolType is equivalent to the CEL 'bool' type.
+	BoolType = NewSimpleTypeWithMinSize("bool", cel.BoolType, types.False, MinBoolSize)
+
+	// BytesType is equivalent to the CEL 'bytes' type.
+	BytesType = NewSimpleTypeWithMinSize("bytes", cel.BytesType, types.Bytes([]byte{}), MinStringSize)
+
+	// DoubleType is equivalent to the CEL 'double' type which is a 64-bit floating point value.
+	DoubleType = NewSimpleTypeWithMinSize("double", cel.DoubleType, types.Double(0), MinNumberSize)
+
+	// DurationType is equivalent to the CEL 'duration' type.
+	DurationType = NewSimpleTypeWithMinSize("duration", cel.DurationType, types.Duration{Duration: time.Duration(0)}, MinDurationSizeJSON)
+
+	// DateType is equivalent to the CEL 'date' type.
+	DateType = NewSimpleTypeWithMinSize("date", cel.TimestampType, types.Timestamp{Time: time.Time{}}, JSONDateSize)
+
+	// DynType is the equivalent of the CEL 'dyn' concept which indicates that the type will be
+	// determined at runtime rather than compile time.
+	DynType = NewSimpleTypeWithMinSize("dyn", cel.DynType, nil, 1)
+
+	// IntType is equivalent to the CEL 'int' type which is a 64-bit signed int.
+	IntType = NewSimpleTypeWithMinSize("int", cel.IntType, types.IntZero, MinNumberSize)
+
+	// NullType is equivalent to the CEL 'null_type'.
+	NullType = NewSimpleTypeWithMinSize("null_type", cel.NullType, types.NullValue, 4)
+
+	// StringType is equivalent to the CEL 'string' type which is expected to be a UTF-8 string.
+	// StringType values may either be string literals or expression strings.
+	StringType = NewSimpleTypeWithMinSize("string", cel.StringType, types.String(""), MinStringSize)
+
+	// TimestampType corresponds to the well-known protobuf.Timestamp type supported within CEL.
+	// Note that both the OpenAPI date and date-time types map onto TimestampType, so not all types
+	// labeled as Timestamp will necessarily have the same MinSerializedSize.
+	TimestampType = NewSimpleTypeWithMinSize("timestamp", cel.TimestampType, types.Timestamp{Time: time.Time{}}, JSONDateSize)
+
+	// QuantityDeclType wraps a [QuantityType] and makes it usable with functions that expect
+	// a [DeclType].
+	QuantityDeclType = NewSimpleTypeWithMinSize("quantity", QuantityType, Quantity{Quantity: resource.NewQuantity(0, resource.DecimalSI)}, 8)
+
+	// UintType is equivalent to the CEL 'uint' type.
+	UintType = NewSimpleTypeWithMinSize("uint", cel.UintType, types.Uint(0), 1)
+
+	// ListType is equivalent to the CEL 'list' type.
+	ListType = NewListType(AnyType, noMaxLength)
+
+	// MapType is equivalent to the CEL 'map' type.
+	MapType = NewMapType(AnyType, AnyType, noMaxLength)
+)
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/url.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/url.go
new file mode 100644
index 0000000000..6800205c9a
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/url.go
@@ -0,0 +1,80 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cel
+
+import (
+	"fmt"
+	"net/url"
+	"reflect"
+
+	"github.com/google/cel-go/cel"
+	"github.com/google/cel-go/checker/decls"
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+)
+
+// URL provides a CEL representation of a URL.
+type URL struct {
+	*url.URL
+}
+
+var (
+	URLObject = decls.NewObjectType("kubernetes.URL")
+	typeValue = types.NewTypeValue("kubernetes.URL")
+	URLType   = cel.ObjectType("kubernetes.URL")
+)
+
+// ConvertToNative implements ref.Val.ConvertToNative.
+func (d URL) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+	if reflect.TypeOf(d.URL).AssignableTo(typeDesc) {
+		return d.URL, nil
+	}
+	if reflect.TypeOf("").AssignableTo(typeDesc) {
+		return d.URL.String(), nil
+	}
+	return nil, fmt.Errorf("type conversion error from 'URL' to '%v'", typeDesc)
+}
+
+// ConvertToType implements ref.Val.ConvertToType.
+func (d URL) ConvertToType(typeVal ref.Type) ref.Val {
+	switch typeVal {
+	case typeValue:
+		return d
+	case types.TypeType:
+		return typeValue
+	}
+	return types.NewErr("type conversion error from '%s' to '%s'", typeValue, typeVal)
+}
+
+// Equal implements ref.Val.Equal.
+func (d URL) Equal(other ref.Val) ref.Val {
+	otherDur, ok := other.(URL)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(other)
+	}
+	return types.Bool(d.URL.String() == otherDur.URL.String())
+}
+
+// Type implements ref.Val.Type.
+func (d URL) Type() ref.Type {
+	return typeValue
+}
+
+// Value implements ref.Val.Value.
+func (d URL) Value() interface{} {
+	return d.URL
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/cel/value.go b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/value.go
new file mode 100644
index 0000000000..01c7f20acc
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/cel/value.go
@@ -0,0 +1,769 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cel
+
+import (
+	"fmt"
+	"reflect"
+	"sync"
+	"time"
+
+	"github.com/google/cel-go/common/types"
+	"github.com/google/cel-go/common/types/ref"
+	"github.com/google/cel-go/common/types/traits"
+)
+
+// EncodeStyle is a hint for string encoding of parsed values.
+type EncodeStyle int
+
+const (
+	// BlockValueStyle is the default string encoding which preserves whitespace and newlines.
+	BlockValueStyle EncodeStyle = iota
+
+	// FlowValueStyle indicates that the string is an inline representation of complex types.
+	FlowValueStyle
+
+	// FoldedValueStyle is a multiline string with whitespace and newlines trimmed to a single
+	// a whitespace. Repeated newlines are replaced with a single newline rather than a single
+	// whitespace.
+	FoldedValueStyle
+
+	// LiteralStyle is a multiline string that preserves newlines, but trims all other whitespace
+	// to a single character.
+	LiteralStyle
+)
+
+// NewEmptyDynValue returns the zero-valued DynValue.
+func NewEmptyDynValue() *DynValue {
+	// note: 0 is not a valid parse node identifier.
+	dv, _ := NewDynValue(0, nil)
+	return dv
+}
+
+// NewDynValue returns a DynValue that corresponds to a parse node id and value.
+func NewDynValue(id int64, val interface{}) (*DynValue, error) {
+	dv := &DynValue{ID: id}
+	err := dv.SetValue(val)
+	return dv, err
+}
+
+// DynValue is a dynamically typed value used to describe unstructured content.
+// Whether the value has the desired type is determined by where it is used within the Instance or
+// Template, and whether there are schemas which might enforce a more rigid type definition.
+type DynValue struct {
+	ID          int64
+	EncodeStyle EncodeStyle
+	value       interface{}
+	exprValue   ref.Val
+	declType    *DeclType
+}
+
+// DeclType returns the policy model type of the dyn value.
+func (dv *DynValue) DeclType() *DeclType {
+	return dv.declType
+}
+
+// ConvertToNative is an implementation of the CEL ref.Val method used to adapt between CEL types
+// and Go-native types.
+//
+// The default behavior of this method is to first convert to a CEL type which has a well-defined
+// set of conversion behaviors and proxy to the CEL ConvertToNative method for the type.
+func (dv *DynValue) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+	ev := dv.ExprValue()
+	if types.IsError(ev) {
+		return nil, ev.(*types.Err)
+	}
+	return ev.ConvertToNative(typeDesc)
+}
+
+// Equal returns whether the dyn value is equal to a given CEL value.
+func (dv *DynValue) Equal(other ref.Val) ref.Val {
+	dvType := dv.Type()
+	otherType := other.Type()
+	// Preserve CEL's homogeneous equality constraint.
+	if dvType.TypeName() != otherType.TypeName() {
+		return types.MaybeNoSuchOverloadErr(other)
+	}
+	switch v := dv.value.(type) {
+	case ref.Val:
+		return v.Equal(other)
+	case PlainTextValue:
+		return celBool(string(v) == other.Value().(string))
+	case *MultilineStringValue:
+		return celBool(v.Value == other.Value().(string))
+	case time.Duration:
+		otherDuration := other.Value().(time.Duration)
+		return celBool(v == otherDuration)
+	case time.Time:
+		otherTimestamp := other.Value().(time.Time)
+		return celBool(v.Equal(otherTimestamp))
+	default:
+		return celBool(reflect.DeepEqual(v, other.Value()))
+	}
+}
+
+// ExprValue converts the DynValue into a CEL value.
+func (dv *DynValue) ExprValue() ref.Val {
+	return dv.exprValue
+}
+
+// Value returns the underlying value held by this reference.
+func (dv *DynValue) Value() interface{} {
+	return dv.value
+}
+
+// SetValue updates the underlying value held by this reference.
+func (dv *DynValue) SetValue(value interface{}) error {
+	dv.value = value
+	var err error
+	dv.exprValue, dv.declType, err = exprValue(value)
+	return err
+}
+
+// Type returns the CEL type for the given value.
+func (dv *DynValue) Type() ref.Type {
+	return dv.ExprValue().Type()
+}
+
+func exprValue(value interface{}) (ref.Val, *DeclType, error) {
+	switch v := value.(type) {
+	case bool:
+		return types.Bool(v), BoolType, nil
+	case []byte:
+		return types.Bytes(v), BytesType, nil
+	case float64:
+		return types.Double(v), DoubleType, nil
+	case int64:
+		return types.Int(v), IntType, nil
+	case string:
+		return types.String(v), StringType, nil
+	case uint64:
+		return types.Uint(v), UintType, nil
+	case time.Duration:
+		return types.Duration{Duration: v}, DurationType, nil
+	case time.Time:
+		return types.Timestamp{Time: v}, TimestampType, nil
+	case types.Null:
+		return v, NullType, nil
+	case *ListValue:
+		return v, ListType, nil
+	case *MapValue:
+		return v, MapType, nil
+	case *ObjectValue:
+		return v, v.objectType, nil
+	default:
+		return nil, unknownType, fmt.Errorf("unsupported type: (%T)%v", v, v)
+	}
+}
+
+// PlainTextValue is a text string literal which must not be treated as an expression.
+type PlainTextValue string
+
+// MultilineStringValue is a multiline string value which has been parsed in a way which omits
+// whitespace as well as a raw form which preserves whitespace.
+type MultilineStringValue struct {
+	Value string
+	Raw   string
+}
+
+func newStructValue() *structValue {
+	return &structValue{
+		Fields:   []*Field{},
+		fieldMap: map[string]*Field{},
+	}
+}
+
+type structValue struct {
+	Fields   []*Field
+	fieldMap map[string]*Field
+}
+
+// AddField appends a MapField to the MapValue and indexes the field by name.
+func (sv *structValue) AddField(field *Field) {
+	sv.Fields = append(sv.Fields, field)
+	sv.fieldMap[field.Name] = field
+}
+
+// ConvertToNative converts the MapValue type to a native go types.
+func (sv *structValue) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+	if typeDesc.Kind() != reflect.Map &&
+		typeDesc.Kind() != reflect.Struct &&
+		typeDesc.Kind() != reflect.Pointer &&
+		typeDesc.Kind() != reflect.Interface {
+		return nil, fmt.Errorf("type conversion error from object to '%v'", typeDesc)
+	}
+
+	// Unwrap pointers, but track their use.
+	isPtr := false
+	if typeDesc.Kind() == reflect.Pointer {
+		tk := typeDesc
+		typeDesc = typeDesc.Elem()
+		if typeDesc.Kind() == reflect.Pointer {
+			return nil, fmt.Errorf("unsupported type conversion to '%v'", tk)
+		}
+		isPtr = true
+	}
+
+	if typeDesc.Kind() == reflect.Map {
+		keyType := typeDesc.Key()
+		if keyType.Kind() != reflect.String && keyType.Kind() != reflect.Interface {
+			return nil, fmt.Errorf("object fields cannot be converted to type '%v'", keyType)
+		}
+		elemType := typeDesc.Elem()
+		sz := len(sv.fieldMap)
+		ntvMap := reflect.MakeMapWithSize(typeDesc, sz)
+		for name, val := range sv.fieldMap {
+			refVal, err := val.Ref.ConvertToNative(elemType)
+			if err != nil {
+				return nil, err
+			}
+			ntvMap.SetMapIndex(reflect.ValueOf(name), reflect.ValueOf(refVal))
+		}
+		return ntvMap.Interface(), nil
+	}
+
+	if typeDesc.Kind() == reflect.Struct {
+		ntvObjPtr := reflect.New(typeDesc)
+		ntvObj := ntvObjPtr.Elem()
+		for name, val := range sv.fieldMap {
+			f := ntvObj.FieldByName(name)
+			if !f.IsValid() {
+				return nil, fmt.Errorf("type conversion error, no such field %s in type %v",
+					name, typeDesc)
+			}
+			fv, err := val.Ref.ConvertToNative(f.Type())
+			if err != nil {
+				return nil, err
+			}
+			f.Set(reflect.ValueOf(fv))
+		}
+		if isPtr {
+			return ntvObjPtr.Interface(), nil
+		}
+		return ntvObj.Interface(), nil
+	}
+	return nil, fmt.Errorf("type conversion error from object to '%v'", typeDesc)
+}
+
+// GetField returns a MapField by name if one exists.
+func (sv *structValue) GetField(name string) (*Field, bool) {
+	field, found := sv.fieldMap[name]
+	return field, found
+}
+
+// IsSet returns whether the given field, which is defined, has also been set.
+func (sv *structValue) IsSet(key ref.Val) ref.Val {
+	k, ok := key.(types.String)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(key)
+	}
+	name := string(k)
+	_, found := sv.fieldMap[name]
+	return celBool(found)
+}
+
+// NewObjectValue creates a struct value with a schema type and returns the empty ObjectValue.
+func NewObjectValue(sType *DeclType) *ObjectValue {
+	return &ObjectValue{
+		structValue: newStructValue(),
+		objectType:  sType,
+	}
+}
+
+// ObjectValue is a struct with a custom schema type which indicates the fields and types
+// associated with the structure.
+type ObjectValue struct {
+	*structValue
+	objectType *DeclType
+}
+
+// ConvertToType is an implementation of the CEL ref.Val interface method.
+func (o *ObjectValue) ConvertToType(t ref.Type) ref.Val {
+	if t == types.TypeType {
+		return types.NewObjectTypeValue(o.objectType.TypeName())
+	}
+	if t.TypeName() == o.objectType.TypeName() {
+		return o
+	}
+	return types.NewErr("type conversion error from '%s' to '%s'", o.Type(), t)
+}
+
+// Equal returns true if the two object types are equal and their field values are equal.
+func (o *ObjectValue) Equal(other ref.Val) ref.Val {
+	// Preserve CEL's homogeneous equality semantics.
+	if o.objectType.TypeName() != other.Type().TypeName() {
+		return types.MaybeNoSuchOverloadErr(other)
+	}
+	o2 := other.(traits.Indexer)
+	for name := range o.objectType.Fields {
+		k := types.String(name)
+		v := o.Get(k)
+		ov := o2.Get(k)
+		vEq := v.Equal(ov)
+		if vEq != types.True {
+			return vEq
+		}
+	}
+	return types.True
+}
+
+// Get returns the value of the specified field.
+//
+// If the field is set, its value is returned. If the field is not set, the default value for the
+// field is returned thus allowing for safe-traversal and preserving proto-like field traversal
+// semantics for Open API Schema backed types.
+func (o *ObjectValue) Get(name ref.Val) ref.Val {
+	n, ok := name.(types.String)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(n)
+	}
+	nameStr := string(n)
+	field, found := o.fieldMap[nameStr]
+	if found {
+		return field.Ref.ExprValue()
+	}
+	fieldDef, found := o.objectType.Fields[nameStr]
+	if !found {
+		return types.NewErr("no such field: %s", nameStr)
+	}
+	defValue := fieldDef.DefaultValue()
+	if defValue != nil {
+		return defValue
+	}
+	return types.NewErr("no default for type: %s", fieldDef.TypeName())
+}
+
+// Type returns the CEL type value of the object.
+func (o *ObjectValue) Type() ref.Type {
+	return o.objectType
+}
+
+// Value returns the Go-native representation of the object.
+func (o *ObjectValue) Value() interface{} {
+	return o
+}
+
+// NewMapValue returns an empty MapValue.
+func NewMapValue() *MapValue {
+	return &MapValue{
+		structValue: newStructValue(),
+	}
+}
+
+// MapValue declares an object with a set of named fields whose values are dynamically typed.
+type MapValue struct {
+	*structValue
+}
+
+// ConvertToObject produces an ObjectValue from the MapValue with the associated schema type.
+//
+// The conversion is shallow and the memory shared between the Object and Map as all references
+// to the map are expected to be replaced with the Object reference.
+func (m *MapValue) ConvertToObject(declType *DeclType) *ObjectValue {
+	return &ObjectValue{
+		structValue: m.structValue,
+		objectType:  declType,
+	}
+}
+
+// Contains returns whether the given key is contained in the MapValue.
+func (m *MapValue) Contains(key ref.Val) ref.Val {
+	v, found := m.Find(key)
+	if v != nil && types.IsUnknownOrError(v) {
+		return v
+	}
+	return celBool(found)
+}
+
+// ConvertToType converts the MapValue to another CEL type, if possible.
+func (m *MapValue) ConvertToType(t ref.Type) ref.Val {
+	switch t {
+	case types.MapType:
+		return m
+	case types.TypeType:
+		return types.MapType
+	}
+	return types.NewErr("type conversion error from '%s' to '%s'", m.Type(), t)
+}
+
+// Equal returns true if the maps are of the same size, have the same keys, and the key-values
+// from each map are equal.
+func (m *MapValue) Equal(other ref.Val) ref.Val {
+	oMap, isMap := other.(traits.Mapper)
+	if !isMap {
+		return types.MaybeNoSuchOverloadErr(other)
+	}
+	if m.Size() != oMap.Size() {
+		return types.False
+	}
+	for name, field := range m.fieldMap {
+		k := types.String(name)
+		ov, found := oMap.Find(k)
+		if !found {
+			return types.False
+		}
+		v := field.Ref.ExprValue()
+		vEq := v.Equal(ov)
+		if vEq != types.True {
+			return vEq
+		}
+	}
+	return types.True
+}
+
+// Find returns the value for the key in the map, if found.
+func (m *MapValue) Find(name ref.Val) (ref.Val, bool) {
+	// Currently only maps with string keys are supported as this is best aligned with JSON,
+	// and also much simpler to support.
+	n, ok := name.(types.String)
+	if !ok {
+		return types.MaybeNoSuchOverloadErr(n), true
+	}
+	nameStr := string(n)
+	field, found := m.fieldMap[nameStr]
+	if found {
+		return field.Ref.ExprValue(), true
+	}
+	return nil, false
+}
+
+// Get returns the value for the key in the map, or error if not found.
+func (m *MapValue) Get(key ref.Val) ref.Val {
+	v, found := m.Find(key)
+	if found {
+		return v
+	}
+	return types.ValOrErr(key, "no such key: %v", key)
+}
+
+// Iterator produces a traits.Iterator which walks over the map keys.
+//
+// The Iterator is frequently used within comprehensions.
+func (m *MapValue) Iterator() traits.Iterator {
+	keys := make([]ref.Val, len(m.fieldMap))
+	i := 0
+	for k := range m.fieldMap {
+		keys[i] = types.String(k)
+		i++
+	}
+	return &baseMapIterator{
+		baseVal: &baseVal{},
+		keys:    keys,
+	}
+}
+
+// Size returns the number of keys in the map.
+func (m *MapValue) Size() ref.Val {
+	return types.Int(len(m.Fields))
+}
+
+// Type returns the CEL ref.Type for the map.
+func (m *MapValue) Type() ref.Type {
+	return types.MapType
+}
+
+// Value returns the Go-native representation of the MapValue.
+func (m *MapValue) Value() interface{} {
+	return m
+}
+
+type baseMapIterator struct {
+	*baseVal
+	keys []ref.Val
+	idx  int
+}
+
+// HasNext implements the traits.Iterator interface method.
+func (it *baseMapIterator) HasNext() ref.Val {
+	if it.idx < len(it.keys) {
+		return types.True
+	}
+	return types.False
+}
+
+// Next implements the traits.Iterator interface method.
+func (it *baseMapIterator) Next() ref.Val {
+	key := it.keys[it.idx]
+	it.idx++
+	return key
+}
+
+// Type implements the CEL ref.Val interface metohd.
+func (it *baseMapIterator) Type() ref.Type {
+	return types.IteratorType
+}
+
+// NewField returns a MapField instance with an empty DynValue that refers to the
+// specified parse node id and field name.
+func NewField(id int64, name string) *Field {
+	return &Field{
+		ID:   id,
+		Name: name,
+		Ref:  NewEmptyDynValue(),
+	}
+}
+
+// Field specifies a field name and a reference to a dynamic value.
+type Field struct {
+	ID   int64
+	Name string
+	Ref  *DynValue
+}
+
+// NewListValue returns an empty ListValue instance.
+func NewListValue() *ListValue {
+	return &ListValue{
+		Entries: []*DynValue{},
+	}
+}
+
+// ListValue contains a list of dynamically typed entries.
+type ListValue struct {
+	Entries      []*DynValue
+	initValueSet sync.Once
+	valueSet     map[ref.Val]struct{}
+}
+
+// Add concatenates two lists together to produce a new CEL list value.
+func (lv *ListValue) Add(other ref.Val) ref.Val {
+	oArr, isArr := other.(traits.Lister)
+	if !isArr {
+		return types.MaybeNoSuchOverloadErr(other)
+	}
+	szRight := len(lv.Entries)
+	szLeft := int(oArr.Size().(types.Int))
+	sz := szRight + szLeft
+	combo := make([]ref.Val, sz)
+	for i := 0; i < szRight; i++ {
+		combo[i] = lv.Entries[i].ExprValue()
+	}
+	for i := 0; i < szLeft; i++ {
+		combo[i+szRight] = oArr.Get(types.Int(i))
+	}
+	return types.DefaultTypeAdapter.NativeToValue(combo)
+}
+
+// Append adds another entry into the ListValue.
+func (lv *ListValue) Append(entry *DynValue) {
+	lv.Entries = append(lv.Entries, entry)
+	// The append resets all previously built indices.
+	lv.initValueSet = sync.Once{}
+}
+
+// Contains returns whether the input `val` is equal to an element in the list.
+//
+// If any pair-wise comparison between the input value and the list element is an error, the
+// operation will return an error.
+func (lv *ListValue) Contains(val ref.Val) ref.Val {
+	if types.IsUnknownOrError(val) {
+		return val
+	}
+	lv.initValueSet.Do(lv.finalizeValueSet)
+	if lv.valueSet != nil {
+		_, found := lv.valueSet[val]
+		if found {
+			return types.True
+		}
+		// Instead of returning false, ensure that CEL's heterogeneous equality constraint
+		// is satisfied by allowing pair-wise equality behavior to determine the outcome.
+	}
+	var err ref.Val
+	sz := len(lv.Entries)
+	for i := 0; i < sz; i++ {
+		elem := lv.Entries[i]
+		cmp := elem.Equal(val)
+		b, ok := cmp.(types.Bool)
+		if !ok && err == nil {
+			err = types.MaybeNoSuchOverloadErr(cmp)
+		}
+		if b == types.True {
+			return types.True
+		}
+	}
+	if err != nil {
+		return err
+	}
+	return types.False
+}
+
+// ConvertToNative is an implementation of the CEL ref.Val method used to adapt between CEL types
+// and Go-native array-like types.
+func (lv *ListValue) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+	// Non-list conversion.
+	if typeDesc.Kind() != reflect.Slice &&
+		typeDesc.Kind() != reflect.Array &&
+		typeDesc.Kind() != reflect.Interface {
+		return nil, fmt.Errorf("type conversion error from list to '%v'", typeDesc)
+	}
+
+	// If the list is already assignable to the desired type return it.
+	if reflect.TypeOf(lv).AssignableTo(typeDesc) {
+		return lv, nil
+	}
+
+	// List conversion.
+	otherElem := typeDesc.Elem()
+
+	// Allow the element ConvertToNative() function to determine whether conversion is possible.
+	sz := len(lv.Entries)
+	nativeList := reflect.MakeSlice(typeDesc, int(sz), int(sz))
+	for i := 0; i < sz; i++ {
+		elem := lv.Entries[i]
+		nativeElemVal, err := elem.ConvertToNative(otherElem)
+		if err != nil {
+			return nil, err
+		}
+		nativeList.Index(int(i)).Set(reflect.ValueOf(nativeElemVal))
+	}
+	return nativeList.Interface(), nil
+}
+
+// ConvertToType converts the ListValue to another CEL type.
+func (lv *ListValue) ConvertToType(t ref.Type) ref.Val {
+	switch t {
+	case types.ListType:
+		return lv
+	case types.TypeType:
+		return types.ListType
+	}
+	return types.NewErr("type conversion error from '%s' to '%s'", ListType, t)
+}
+
+// Equal returns true if two lists are of the same size, and the values at each index are also
+// equal.
+func (lv *ListValue) Equal(other ref.Val) ref.Val {
+	oArr, isArr := other.(traits.Lister)
+	if !isArr {
+		return types.MaybeNoSuchOverloadErr(other)
+	}
+	sz := types.Int(len(lv.Entries))
+	if sz != oArr.Size() {
+		return types.False
+	}
+	for i := types.Int(0); i < sz; i++ {
+		cmp := lv.Get(i).Equal(oArr.Get(i))
+		if cmp != types.True {
+			return cmp
+		}
+	}
+	return types.True
+}
+
+// Get returns the value at the given index.
+//
+// If the index is negative or greater than the size of the list, an error is returned.
+func (lv *ListValue) Get(idx ref.Val) ref.Val {
+	iv, isInt := idx.(types.Int)
+	if !isInt {
+		return types.ValOrErr(idx, "unsupported index: %v", idx)
+	}
+	i := int(iv)
+	if i < 0 || i >= len(lv.Entries) {
+		return types.NewErr("index out of bounds: %v", idx)
+	}
+	return lv.Entries[i].ExprValue()
+}
+
+// Iterator produces a traits.Iterator suitable for use in CEL comprehension macros.
+func (lv *ListValue) Iterator() traits.Iterator {
+	return &baseListIterator{
+		getter: lv.Get,
+		sz:     len(lv.Entries),
+	}
+}
+
+// Size returns the number of elements in the list.
+func (lv *ListValue) Size() ref.Val {
+	return types.Int(len(lv.Entries))
+}
+
+// Type returns the CEL ref.Type for the list.
+func (lv *ListValue) Type() ref.Type {
+	return types.ListType
+}
+
+// Value returns the Go-native value.
+func (lv *ListValue) Value() interface{} {
+	return lv
+}
+
+// finalizeValueSet inspects the ListValue entries in order to make internal optimizations once all list
+// entries are known.
+func (lv *ListValue) finalizeValueSet() {
+	valueSet := make(map[ref.Val]struct{})
+	for _, e := range lv.Entries {
+		switch e.value.(type) {
+		case bool, float64, int64, string, uint64, types.Null, PlainTextValue:
+			valueSet[e.ExprValue()] = struct{}{}
+		default:
+			lv.valueSet = nil
+			return
+		}
+	}
+	lv.valueSet = valueSet
+}
+
+type baseVal struct{}
+
+func (*baseVal) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
+	return nil, fmt.Errorf("unsupported native conversion to: %v", typeDesc)
+}
+
+func (*baseVal) ConvertToType(t ref.Type) ref.Val {
+	return types.NewErr("unsupported type conversion to: %v", t)
+}
+
+func (*baseVal) Equal(other ref.Val) ref.Val {
+	return types.NewErr("unsupported equality test between instances")
+}
+
+func (v *baseVal) Value() interface{} {
+	return nil
+}
+
+type baseListIterator struct {
+	*baseVal
+	getter func(idx ref.Val) ref.Val
+	sz     int
+	idx    int
+}
+
+func (it *baseListIterator) HasNext() ref.Val {
+	if it.idx < it.sz {
+		return types.True
+	}
+	return types.False
+}
+
+func (it *baseListIterator) Next() ref.Val {
+	v := it.getter(types.Int(it.idx))
+	it.idx++
+	return v
+}
+
+func (it *baseListIterator) Type() ref.Type {
+	return types.IteratorType
+}
+
+func celBool(pred bool) ref.Val {
+	if pred {
+		return types.True
+	}
+	return types.False
+}
+
+var unknownType = &DeclType{name: "unknown", MinSerializedSize: 1}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/endpoints/request/OWNERS b/hack/tools/vendor/k8s.io/apiserver/pkg/endpoints/request/OWNERS
new file mode 100644
index 0000000000..87c80edbbb
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/endpoints/request/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+  - sttts
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go b/hack/tools/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go
new file mode 100644
index 0000000000..8f4e60f549
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go
@@ -0,0 +1,78 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package request
+
+import (
+	"context"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apiserver/pkg/authentication/user"
+)
+
+// The key type is unexported to prevent collisions
+type key int
+
+const (
+	// namespaceKey is the context key for the request namespace.
+	namespaceKey key = iota
+
+	// userKey is the context key for the request user.
+	userKey
+)
+
+// NewContext instantiates a base context object for request flows.
+func NewContext() context.Context {
+	return context.TODO()
+}
+
+// NewDefaultContext instantiates a base context object for request flows in the default namespace
+func NewDefaultContext() context.Context {
+	return WithNamespace(NewContext(), metav1.NamespaceDefault)
+}
+
+// WithValue returns a copy of parent in which the value associated with key is val.
+func WithValue(parent context.Context, key interface{}, val interface{}) context.Context {
+	return context.WithValue(parent, key, val)
+}
+
+// WithNamespace returns a copy of parent in which the namespace value is set
+func WithNamespace(parent context.Context, namespace string) context.Context {
+	return WithValue(parent, namespaceKey, namespace)
+}
+
+// NamespaceFrom returns the value of the namespace key on the ctx
+func NamespaceFrom(ctx context.Context) (string, bool) {
+	namespace, ok := ctx.Value(namespaceKey).(string)
+	return namespace, ok
+}
+
+// NamespaceValue returns the value of the namespace key on the ctx, or the empty string if none
+func NamespaceValue(ctx context.Context) string {
+	namespace, _ := NamespaceFrom(ctx)
+	return namespace
+}
+
+// WithUser returns a copy of parent in which the user value is set
+func WithUser(parent context.Context, user user.Info) context.Context {
+	return WithValue(parent, userKey, user)
+}
+
+// UserFrom returns the value of the user key on the ctx
+func UserFrom(ctx context.Context) (user.Info, bool) {
+	user, ok := ctx.Value(userKey).(user.Info)
+	return user, ok
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/endpoints/request/doc.go b/hack/tools/vendor/k8s.io/apiserver/pkg/endpoints/request/doc.go
new file mode 100644
index 0000000000..96da6f2b9c
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/endpoints/request/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package request contains everything around extracting info from
+// a http request object.
+// TODO: this package is temporary. Handlers must move into pkg/apiserver/handlers to avoid dependency cycle
+package request // import "k8s.io/apiserver/pkg/endpoints/request"
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/endpoints/request/received_time.go b/hack/tools/vendor/k8s.io/apiserver/pkg/endpoints/request/received_time.go
new file mode 100644
index 0000000000..7d58cf3ad9
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/endpoints/request/received_time.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package request
+
+import (
+	"context"
+	"time"
+)
+
+type requestReceivedTimestampKeyType int
+
+// requestReceivedTimestampKey is the ReceivedTimestamp (the time the request reached the apiserver)
+// key for the context.
+const requestReceivedTimestampKey requestReceivedTimestampKeyType = iota
+
+// WithReceivedTimestamp returns a copy of parent context in which the ReceivedTimestamp
+// (the time the request reached the apiserver) is set.
+//
+// If the specified ReceivedTimestamp is zero, no value is set and the parent context is returned as is.
+func WithReceivedTimestamp(parent context.Context, receivedTimestamp time.Time) context.Context {
+	if receivedTimestamp.IsZero() {
+		return parent
+	}
+	return WithValue(parent, requestReceivedTimestampKey, receivedTimestamp)
+}
+
+// ReceivedTimestampFrom returns the value of the ReceivedTimestamp key from the specified context.
+func ReceivedTimestampFrom(ctx context.Context) (time.Time, bool) {
+	info, ok := ctx.Value(requestReceivedTimestampKey).(time.Time)
+	return info, ok
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go b/hack/tools/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go
new file mode 100644
index 0000000000..808943d161
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go
@@ -0,0 +1,305 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package request
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/api/validation/path"
+	metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
+	metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/util/sets"
+	genericfeatures "k8s.io/apiserver/pkg/features"
+	utilfeature "k8s.io/apiserver/pkg/util/feature"
+
+	"k8s.io/klog/v2"
+)
+
+// LongRunningRequestCheck is a predicate which is true for long-running http requests.
+type LongRunningRequestCheck func(r *http.Request, requestInfo *RequestInfo) bool
+
+type RequestInfoResolver interface {
+	NewRequestInfo(req *http.Request) (*RequestInfo, error)
+}
+
+// RequestInfo holds information parsed from the http.Request
+type RequestInfo struct {
+	// IsResourceRequest indicates whether or not the request is for an API resource or subresource
+	IsResourceRequest bool
+	// Path is the URL path of the request
+	Path string
+	// Verb is the kube verb associated with the request for API requests, not the http verb.  This includes things like list and watch.
+	// for non-resource requests, this is the lowercase http verb
+	Verb string
+
+	APIPrefix  string
+	APIGroup   string
+	APIVersion string
+	Namespace  string
+	// Resource is the name of the resource being requested.  This is not the kind.  For example: pods
+	Resource string
+	// Subresource is the name of the subresource being requested.  This is a different resource, scoped to the parent resource, but it may have a different kind.
+	// For instance, /pods has the resource "pods" and the kind "Pod", while /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod"
+	// (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource "binding", and kind "Binding".
+	Subresource string
+	// Name is empty for some verbs, but if the request directly indicates a name (not in body content) then this field is filled in.
+	Name string
+	// Parts are the path parts for the request, always starting with /{resource}/{name}
+	Parts []string
+
+	// FieldSelector contains the unparsed field selector from a request.  It is only present if the apiserver
+	// honors field selectors for the verb this request is associated with.
+	FieldSelector string
+	// LabelSelector contains the unparsed field selector from a request.  It is only present if the apiserver
+	// honors field selectors for the verb this request is associated with.
+	LabelSelector string
+}
+
+// specialVerbs contains just strings which are used in REST paths for special actions that don't fall under the normal
+// CRUDdy GET/POST/PUT/DELETE actions on REST objects.
+// TODO: find a way to keep this up to date automatically.  Maybe dynamically populate list as handlers added to
+// master's Mux.
+var specialVerbs = sets.NewString("proxy", "watch")
+
+// specialVerbsNoSubresources contains root verbs which do not allow subresources
+var specialVerbsNoSubresources = sets.NewString("proxy")
+
+// namespaceSubresources contains subresources of namespace
+// this list allows the parser to distinguish between a namespace subresource, and a namespaced resource
+var namespaceSubresources = sets.NewString("status", "finalize")
+
+// verbsWithSelectors is the list of verbs which support fieldSelector and labelSelector parameters
+var verbsWithSelectors = sets.NewString("list", "watch", "deletecollection")
+
+// NamespaceSubResourcesForTest exports namespaceSubresources for testing in pkg/controlplane/master_test.go, so we never drift
+var NamespaceSubResourcesForTest = sets.NewString(namespaceSubresources.List()...)
+
+type RequestInfoFactory struct {
+	APIPrefixes          sets.String // without leading and trailing slashes
+	GrouplessAPIPrefixes sets.String // without leading and trailing slashes
+}
+
+// TODO write an integration test against the swagger doc to test the RequestInfo and match up behavior to responses
+// NewRequestInfo returns the information from the http request.  If error is not nil, RequestInfo holds the information as best it is known before the failure
+// It handles both resource and non-resource requests and fills in all the pertinent information for each.
+// Valid Inputs:
+// Resource paths
+// /apis/{api-group}/{version}/namespaces
+// /api/{version}/namespaces
+// /api/{version}/namespaces/{namespace}
+// /api/{version}/namespaces/{namespace}/{resource}
+// /api/{version}/namespaces/{namespace}/{resource}/{resourceName}
+// /api/{version}/{resource}
+// /api/{version}/{resource}/{resourceName}
+//
+// Special verbs without subresources:
+// /api/{version}/proxy/{resource}/{resourceName}
+// /api/{version}/proxy/namespaces/{namespace}/{resource}/{resourceName}
+//
+// Special verbs with subresources:
+// /api/{version}/watch/{resource}
+// /api/{version}/watch/namespaces/{namespace}/{resource}
+//
+// NonResource paths
+// /apis/{api-group}/{version}
+// /apis/{api-group}
+// /apis
+// /api/{version}
+// /api
+// /healthz
+// /
+func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, error) {
+	// start with a non-resource request until proven otherwise
+	requestInfo := RequestInfo{
+		IsResourceRequest: false,
+		Path:              req.URL.Path,
+		Verb:              strings.ToLower(req.Method),
+	}
+
+	currentParts := splitPath(req.URL.Path)
+	if len(currentParts) < 3 {
+		// return a non-resource request
+		return &requestInfo, nil
+	}
+
+	if !r.APIPrefixes.Has(currentParts[0]) {
+		// return a non-resource request
+		return &requestInfo, nil
+	}
+	requestInfo.APIPrefix = currentParts[0]
+	currentParts = currentParts[1:]
+
+	if !r.GrouplessAPIPrefixes.Has(requestInfo.APIPrefix) {
+		// one part (APIPrefix) has already been consumed, so this is actually "do we have four parts?"
+		if len(currentParts) < 3 {
+			// return a non-resource request
+			return &requestInfo, nil
+		}
+
+		requestInfo.APIGroup = currentParts[0]
+		currentParts = currentParts[1:]
+	}
+
+	requestInfo.IsResourceRequest = true
+	requestInfo.APIVersion = currentParts[0]
+	currentParts = currentParts[1:]
+
+	// handle input of form /{specialVerb}/*
+	verbViaPathPrefix := false
+	if specialVerbs.Has(currentParts[0]) {
+		if len(currentParts) < 2 {
+			return &requestInfo, fmt.Errorf("unable to determine kind and namespace from url, %v", req.URL)
+		}
+
+		requestInfo.Verb = currentParts[0]
+		currentParts = currentParts[1:]
+		verbViaPathPrefix = true
+
+	} else {
+		switch req.Method {
+		case "POST":
+			requestInfo.Verb = "create"
+		case "GET", "HEAD":
+			requestInfo.Verb = "get"
+		case "PUT":
+			requestInfo.Verb = "update"
+		case "PATCH":
+			requestInfo.Verb = "patch"
+		case "DELETE":
+			requestInfo.Verb = "delete"
+		default:
+			requestInfo.Verb = ""
+		}
+	}
+
+	// URL forms: /namespaces/{namespace}/{kind}/*, where parts are adjusted to be relative to kind
+	if currentParts[0] == "namespaces" {
+		if len(currentParts) > 1 {
+			requestInfo.Namespace = currentParts[1]
+
+			// if there is another step after the namespace name and it is not a known namespace subresource
+			// move currentParts to include it as a resource in its own right
+			if len(currentParts) > 2 && !namespaceSubresources.Has(currentParts[2]) {
+				currentParts = currentParts[2:]
+			}
+		}
+	} else {
+		requestInfo.Namespace = metav1.NamespaceNone
+	}
+
+	// parsing successful, so we now know the proper value for .Parts
+	requestInfo.Parts = currentParts
+
+	// parts look like: resource/resourceName/subresource/other/stuff/we/don't/interpret
+	switch {
+	case len(requestInfo.Parts) >= 3 && !specialVerbsNoSubresources.Has(requestInfo.Verb):
+		requestInfo.Subresource = requestInfo.Parts[2]
+		fallthrough
+	case len(requestInfo.Parts) >= 2:
+		requestInfo.Name = requestInfo.Parts[1]
+		fallthrough
+	case len(requestInfo.Parts) >= 1:
+		requestInfo.Resource = requestInfo.Parts[0]
+	}
+
+	// if there's no name on the request and we thought it was a get before, then the actual verb is a list or a watch
+	if len(requestInfo.Name) == 0 && requestInfo.Verb == "get" {
+		opts := metainternalversion.ListOptions{}
+		if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, &opts); err != nil {
+			// An error in parsing request will result in default to "list" and not setting "name" field.
+			klog.ErrorS(err, "Couldn't parse request", "request", req.URL.Query())
+			// Reset opts to not rely on partial results from parsing.
+			// However, if watch is set, let's report it.
+			opts = metainternalversion.ListOptions{}
+			if values := req.URL.Query()["watch"]; len(values) > 0 {
+				switch strings.ToLower(values[0]) {
+				case "false", "0":
+				default:
+					opts.Watch = true
+				}
+			}
+		}
+
+		if opts.Watch {
+			requestInfo.Verb = "watch"
+		} else {
+			requestInfo.Verb = "list"
+		}
+
+		if opts.FieldSelector != nil {
+			if name, ok := opts.FieldSelector.RequiresExactMatch("metadata.name"); ok {
+				if len(path.IsValidPathSegmentName(name)) == 0 {
+					requestInfo.Name = name
+				}
+			}
+		}
+	}
+
+	// if there's no name on the request and we thought it was a delete before, then the actual verb is deletecollection
+	if len(requestInfo.Name) == 0 && requestInfo.Verb == "delete" {
+		requestInfo.Verb = "deletecollection"
+	}
+
+	if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) {
+		// Don't support selector authorization on requests that used the deprecated verb-via-path mechanism, since they don't support selectors consistently.
+		// There are multi-object and single-object watch endpoints, and only the multi-object one supports selectors.
+		if !verbViaPathPrefix && verbsWithSelectors.Has(requestInfo.Verb) {
+			// interestingly these are parsed above, but the current structure there means that if one (or anything) in the
+			// listOptions fails to decode, the field and label selectors are lost.
+			// therefore, do the straight query param read here.
+			if vals := req.URL.Query()["fieldSelector"]; len(vals) > 0 {
+				requestInfo.FieldSelector = vals[0]
+			}
+			if vals := req.URL.Query()["labelSelector"]; len(vals) > 0 {
+				requestInfo.LabelSelector = vals[0]
+			}
+		}
+	}
+
+	return &requestInfo, nil
+}
+
+type requestInfoKeyType int
+
+// requestInfoKey is the RequestInfo key for the context. It's of private type here. Because
+// keys are interfaces and interfaces are equal when the type and the value is equal, this
+// does not conflict with the keys defined in pkg/api.
+const requestInfoKey requestInfoKeyType = iota
+
+// WithRequestInfo returns a copy of parent in which the request info value is set
+func WithRequestInfo(parent context.Context, info *RequestInfo) context.Context {
+	return WithValue(parent, requestInfoKey, info)
+}
+
+// RequestInfoFrom returns the value of the RequestInfo key on the ctx
+func RequestInfoFrom(ctx context.Context) (*RequestInfo, bool) {
+	info, ok := ctx.Value(requestInfoKey).(*RequestInfo)
+	return info, ok
+}
+
+// splitPath returns the segments for a URL path.
+func splitPath(path string) []string {
+	path = strings.Trim(path, "/")
+	if path == "" {
+		return []string{}
+	}
+	return strings.Split(path, "/")
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/endpoints/request/server_shutdown_signal.go b/hack/tools/vendor/k8s.io/apiserver/pkg/endpoints/request/server_shutdown_signal.go
new file mode 100644
index 0000000000..d06275b833
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/endpoints/request/server_shutdown_signal.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package request
+
+import (
+	"context"
+)
+
+// The serverShutdownSignalKeyType type is unexported to prevent collisions
+type serverShutdownSignalKeyType int
+
+// serverShutdownSignalKey is the context key for storing the
+// watch termination interface instance for a WATCH request.
+const serverShutdownSignalKey serverShutdownSignalKeyType = iota
+
+// ServerShutdownSignal is associated with the request context so
+// the request handler logic has access to signals rlated to
+// the server shutdown events
+type ServerShutdownSignal interface {
+	// Signaled when the apiserver is not receiving any new request
+	ShuttingDown() <-chan struct{}
+}
+
+// ServerShutdownSignalFrom returns the ServerShutdownSignal instance
+// associated with the request context.
+// If there is no ServerShutdownSignal asscoaied with the context,
+// nil is returned.
+func ServerShutdownSignalFrom(ctx context.Context) ServerShutdownSignal {
+	ev, _ := ctx.Value(serverShutdownSignalKey).(ServerShutdownSignal)
+	return ev
+}
+
+// WithServerShutdownSignal returns a new context that stores
+// the ServerShutdownSignal interface instance.
+func WithServerShutdownSignal(parent context.Context, window ServerShutdownSignal) context.Context {
+	if ServerShutdownSignalFrom(parent) != nil {
+		return parent // Avoid double registering.
+	}
+
+	return context.WithValue(parent, serverShutdownSignalKey, window)
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go b/hack/tools/vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go
new file mode 100644
index 0000000000..435af8e7cf
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go
@@ -0,0 +1,311 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package request
+
+import (
+	"context"
+	"sync"
+	"time"
+
+	"k8s.io/utils/clock"
+)
+
+func sumDuration(d1 time.Duration, d2 time.Duration) time.Duration {
+	return d1 + d2
+}
+
+func maxDuration(d1 time.Duration, d2 time.Duration) time.Duration {
+	if d1 > d2 {
+		return d1
+	}
+	return d2
+}
+
+// DurationTracker is a simple interface for tracking functions duration,
+// it is safe for concurrent use by multiple goroutines.
+type DurationTracker interface {
+	// Track measures time spent in the given function f and
+	// aggregates measured duration using aggregateFunction.
+	// if Track is invoked with f from multiple goroutines concurrently,
+	// then f must be safe to be invoked concurrently by multiple goroutines.
+	Track(f func())
+
+	// TrackDuration tracks latency from the specified duration
+	// and aggregate it using aggregateFunction
+	TrackDuration(time.Duration)
+
+	// GetLatency returns the total latency incurred so far
+	GetLatency() time.Duration
+}
+
+// durationTracker implements DurationTracker by measuring function time
+// using given clock and aggregates the duration using given aggregate function
+type durationTracker struct {
+	clock             clock.Clock
+	latency           time.Duration
+	mu                sync.Mutex
+	aggregateFunction func(time.Duration, time.Duration) time.Duration
+}
+
+// Track measures time spent in given function and aggregates measured
+// duration using aggregateFunction
+func (t *durationTracker) Track(f func()) {
+	startedAt := t.clock.Now()
+	defer func() {
+		duration := t.clock.Since(startedAt)
+		t.mu.Lock()
+		defer t.mu.Unlock()
+		t.latency = t.aggregateFunction(t.latency, duration)
+	}()
+
+	f()
+}
+
+// TrackDuration tracks latency from the given duration
+// using aggregateFunction
+func (t *durationTracker) TrackDuration(d time.Duration) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	t.latency = t.aggregateFunction(t.latency, d)
+}
+
+// GetLatency returns aggregated latency tracked by a tracker
+func (t *durationTracker) GetLatency() time.Duration {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	return t.latency
+}
+
+func newSumLatencyTracker(c clock.Clock) DurationTracker {
+	return &durationTracker{
+		clock:             c,
+		aggregateFunction: sumDuration,
+	}
+}
+
+func newMaxLatencyTracker(c clock.Clock) DurationTracker {
+	return &durationTracker{
+		clock:             c,
+		aggregateFunction: maxDuration,
+	}
+}
+
+// LatencyTrackers stores trackers used to measure latecny incurred in
+// components within the apiserver.
+type LatencyTrackers struct {
+	// MutatingWebhookTracker tracks the latency incurred in mutating webhook(s).
+	// Since mutating webhooks are done sequentially, latency
+	// is aggregated using sum function.
+	MutatingWebhookTracker DurationTracker
+
+	// ValidatingWebhookTracker tracks the latency incurred in validating webhook(s).
+	// Validate webhooks are done in parallel, so max function is used.
+	ValidatingWebhookTracker DurationTracker
+
+	// APFQueueWaitTracker tracks the latency incurred by queue wait times
+	// from priority & fairness.
+	APFQueueWaitTracker DurationTracker
+
+	// StorageTracker tracks the latency incurred inside the storage layer,
+	// it accounts for the time it takes to send data to the underlying
+	// storage layer (etcd) and get the complete response back.
+	// If a request involves N (N>=1) round trips to the underlying
+	// stogare layer, the latency will account for the total duration
+	// from these N round trips.
+	// It does not include the time incurred in admission, or validation.
+	StorageTracker DurationTracker
+
+	// TransformTracker tracks the latency incurred in transforming the
+	// response object(s) returned from the underlying storage layer.
+	// This includes transforming the object to user's desired form
+	// (ie. as Table), and also setting appropriate API level fields.
+	// This does not include the latency incurred in serialization
+	// (json or protobuf) of the response object or writing
+	// of it to the http ResponseWriter object.
+	TransformTracker DurationTracker
+
+	// SerializationTracker tracks the latency incurred in serialization
+	// (json or protobuf) of the response object.
+	// NOTE: serialization and writing of the serialized raw bytes to the
+	// associated http ResponseWriter object are interleaved, and hence
+	// the latency measured here will include the time spent writing the
+	// serialized raw bytes to the http ResponseWriter object.
+	SerializationTracker DurationTracker
+
+	// ResponseWriteTracker tracks the latency incurred in writing the
+	// serialized raw bytes to the http ResponseWriter object (via the
+	// Write method) associated with the request.
+	// The Write method can be invoked multiple times, so we use a
+	// latency tracker that sums up the duration from each call.
+	ResponseWriteTracker DurationTracker
+
+	// DecodeTracker is used to track latency incurred inside the function
+	// that takes an object returned from the underlying storage layer
+	// (etcd) and performs decoding of the response object.
+	// When called multiple times, the latency incurred inside to
+	// decode func each time will be summed up.
+	DecodeTracker DurationTracker
+}
+
+type latencyTrackersKeyType int
+
+// latencyTrackersKey is the key that associates a LatencyTrackers
+// instance with the request context.
+const latencyTrackersKey latencyTrackersKeyType = iota
+
+// WithLatencyTrackers returns a copy of parent context to which an
+// instance of LatencyTrackers is added.
+func WithLatencyTrackers(parent context.Context) context.Context {
+	return WithLatencyTrackersAndCustomClock(parent, clock.RealClock{})
+}
+
+// WithLatencyTrackersAndCustomClock returns a copy of parent context to which
+// an instance of LatencyTrackers is added. Tracers use given clock.
+func WithLatencyTrackersAndCustomClock(parent context.Context, c clock.Clock) context.Context {
+	return WithValue(parent, latencyTrackersKey, &LatencyTrackers{
+		MutatingWebhookTracker:   newSumLatencyTracker(c),
+		ValidatingWebhookTracker: newMaxLatencyTracker(c),
+		APFQueueWaitTracker:      newMaxLatencyTracker(c),
+		StorageTracker:           newSumLatencyTracker(c),
+		TransformTracker:         newSumLatencyTracker(c),
+		SerializationTracker:     newSumLatencyTracker(c),
+		ResponseWriteTracker:     newSumLatencyTracker(c),
+		DecodeTracker:            newSumLatencyTracker(c),
+	})
+}
+
+// LatencyTrackersFrom returns the associated LatencyTrackers instance
+// from the specified context.
+func LatencyTrackersFrom(ctx context.Context) (*LatencyTrackers, bool) {
+	wd, ok := ctx.Value(latencyTrackersKey).(*LatencyTrackers)
+	return wd, ok && wd != nil
+}
+
+// TrackTransformResponseObjectLatency is used to track latency incurred
+// inside the function that takes an object returned from the underlying
+// storage layer (etcd) and performs any necessary transformations
+// of the response object. This does not include the latency incurred in
+// serialization (json or protobuf) of the response object or writing of
+// it to the http ResponseWriter object.
+// When called multiple times, the latency incurred inside the
+// transform func each time will be summed up.
+func TrackTransformResponseObjectLatency(ctx context.Context, transform func()) {
+	if tracker, ok := LatencyTrackersFrom(ctx); ok {
+		tracker.TransformTracker.Track(transform)
+		return
+	}
+
+	transform()
+}
+
+// TrackStorageLatency is used to track latency incurred
+// inside the underlying storage layer.
+// When called multiple times, the latency provided will be summed up.
+func TrackStorageLatency(ctx context.Context, d time.Duration) {
+	if tracker, ok := LatencyTrackersFrom(ctx); ok {
+		tracker.StorageTracker.TrackDuration(d)
+	}
+}
+
+// TrackSerializeResponseObjectLatency is used to track latency incurred in
+// serialization (json or protobuf) of the response object.
+// When called multiple times, the latency provided will be summed up.
+func TrackSerializeResponseObjectLatency(ctx context.Context, f func()) {
+	if tracker, ok := LatencyTrackersFrom(ctx); ok {
+		tracker.SerializationTracker.Track(f)
+		return
+	}
+
+	f()
+}
+
+// TrackResponseWriteLatency is used to track latency incurred in writing
+// the serialized raw bytes to the http ResponseWriter object (via the
+// Write method) associated with the request.
+// When called multiple times, the latency provided will be summed up.
+func TrackResponseWriteLatency(ctx context.Context, d time.Duration) {
+	if tracker, ok := LatencyTrackersFrom(ctx); ok {
+		tracker.ResponseWriteTracker.TrackDuration(d)
+	}
+}
+
+// TrackAPFQueueWaitLatency is used to track latency incurred
+// by priority and fairness queues.
+func TrackAPFQueueWaitLatency(ctx context.Context, d time.Duration) {
+	if tracker, ok := LatencyTrackersFrom(ctx); ok {
+		tracker.APFQueueWaitTracker.TrackDuration(d)
+	}
+}
+
+// TrackDecodeLatency is used to track latency incurred inside the function
+// that takes an object returned from the underlying storage layer
+// (etcd) and performs decoding of the response object.
+// When called multiple times, the latency incurred inside to
+// decode func each time will be summed up.
+func TrackDecodeLatency(ctx context.Context, d time.Duration) {
+	if tracker, ok := LatencyTrackersFrom(ctx); ok {
+		tracker.DecodeTracker.TrackDuration(d)
+	}
+}
+
+// AuditAnnotationsFromLatencyTrackers will inspect each latency tracker
+// associated with the request context and return a set of audit
+// annotations that can be added to the API audit entry.
+func AuditAnnotationsFromLatencyTrackers(ctx context.Context) map[string]string {
+	const (
+		transformLatencyKey         = "apiserver.latency.k8s.io/transform-response-object"
+		storageLatencyKey           = "apiserver.latency.k8s.io/etcd"
+		serializationLatencyKey     = "apiserver.latency.k8s.io/serialize-response-object"
+		responseWriteLatencyKey     = "apiserver.latency.k8s.io/response-write"
+		mutatingWebhookLatencyKey   = "apiserver.latency.k8s.io/mutating-webhook"
+		validatingWebhookLatencyKey = "apiserver.latency.k8s.io/validating-webhook"
+		decodeLatencyKey            = "apiserver.latency.k8s.io/decode-response-object"
+		apfQueueWaitLatencyKey      = "apiserver.latency.k8s.io/apf-queue-wait"
+	)
+
+	tracker, ok := LatencyTrackersFrom(ctx)
+	if !ok {
+		return nil
+	}
+
+	annotations := map[string]string{}
+	if latency := tracker.TransformTracker.GetLatency(); latency != 0 {
+		annotations[transformLatencyKey] = latency.String()
+	}
+	if latency := tracker.StorageTracker.GetLatency(); latency != 0 {
+		annotations[storageLatencyKey] = latency.String()
+	}
+	if latency := tracker.SerializationTracker.GetLatency(); latency != 0 {
+		annotations[serializationLatencyKey] = latency.String()
+	}
+	if latency := tracker.ResponseWriteTracker.GetLatency(); latency != 0 {
+		annotations[responseWriteLatencyKey] = latency.String()
+	}
+	if latency := tracker.MutatingWebhookTracker.GetLatency(); latency != 0 {
+		annotations[mutatingWebhookLatencyKey] = latency.String()
+	}
+	if latency := tracker.ValidatingWebhookTracker.GetLatency(); latency != 0 {
+		annotations[validatingWebhookLatencyKey] = latency.String()
+	}
+	if latency := tracker.DecodeTracker.GetLatency(); latency != 0 {
+		annotations[decodeLatencyKey] = latency.String()
+	}
+	if latency := tracker.APFQueueWaitTracker.GetLatency(); latency != 0 {
+		annotations[apfQueueWaitLatencyKey] = latency.String()
+	}
+	return annotations
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/features/OWNERS b/hack/tools/vendor/k8s.io/apiserver/pkg/features/OWNERS
new file mode 100644
index 0000000000..3e1dd9f081
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/features/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+  - feature-approvers
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/hack/tools/vendor/k8s.io/apiserver/pkg/features/kube_features.go
new file mode 100644
index 0000000000..c23343346e
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/features/kube_features.go
@@ -0,0 +1,444 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package features
+
+import (
+	"k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/apimachinery/pkg/util/version"
+	utilfeature "k8s.io/apiserver/pkg/util/feature"
+	"k8s.io/component-base/featuregate"
+)
+
+const (
+	// Every feature gate should add method here following this template:
+	//
+	// // owner: @username
+	// MyFeature featuregate.Feature = "MyFeature"
+	//
+	// Feature gates should be listed in alphabetical, case-sensitive
+	// (upper before any lower case character) order. This reduces the risk
+	// of code conflicts because changes are more likely to be scattered
+	// across the file.
+
+	// owner: @ivelichkovich, @tallclair
+	// stable: v1.30
+	// kep: https://kep.k8s.io/3716
+	//
+	// Enables usage of MatchConditions fields to use CEL expressions for matching on admission webhooks
+	AdmissionWebhookMatchConditions featuregate.Feature = "AdmissionWebhookMatchConditions"
+
+	// owner: @jefftree @alexzielenski
+	// stable: v1.30
+	//
+	// Enables an single HTTP endpoint /discovery/ which supports native HTTP
+	// caching with ETags containing all APIResources known to the apiserver.
+	AggregatedDiscoveryEndpoint featuregate.Feature = "AggregatedDiscoveryEndpoint"
+
+	// owner: @vinayakankugoyal
+	// kep: https://kep.k8s.io/4633
+	//
+	// Allows us to enable anonymous auth for only certain apiserver endpoints.
+	AnonymousAuthConfigurableEndpoints featuregate.Feature = "AnonymousAuthConfigurableEndpoints"
+
+	// owner: @stlaz @tkashem @dgrisonnet
+	// kep: https://kep.k8s.io/3926
+	//
+	// Enables the cluster admin to identify resources that fail to
+	// decrypt or fail to be decoded into an object, and introduces
+	// a new delete option to allow deletion of such corrupt
+	// resources using the Kubernetes API only.
+	AllowUnsafeMalformedObjectDeletion featuregate.Feature = "AllowUnsafeMalformedObjectDeletion"
+
+	// owner: @smarterclayton
+	// stable: 1.29
+	//
+	// Allow API clients to retrieve resource lists in chunks rather than
+	// all at once.
+	APIListChunking featuregate.Feature = "APIListChunking"
+
+	// owner: @ilackams
+	//
+	// Enables compression of REST responses (GET and LIST only)
+	APIResponseCompression featuregate.Feature = "APIResponseCompression"
+
+	// owner: @roycaihw
+	//
+	// Assigns each kube-apiserver an ID in a cluster.
+	APIServerIdentity featuregate.Feature = "APIServerIdentity"
+
+	// owner: @dashpole
+	//
+	// Add support for distributed tracing in the API Server
+	APIServerTracing featuregate.Feature = "APIServerTracing"
+
+	// owner: @linxiulei
+	//
+	// Enables serving watch requests in separate goroutines.
+	APIServingWithRoutine featuregate.Feature = "APIServingWithRoutine"
+
+	// owner: @deads2k
+	// kep: https://kep.k8s.io/4601
+	//
+	// Allows authorization to use field and label selectors.
+	AuthorizeWithSelectors featuregate.Feature = "AuthorizeWithSelectors"
+
+	// owner: @benluddy
+	// kep: https://kep.k8s.io/4222
+	//
+	// Enables CBOR as a supported encoding for requests and responses, and as the
+	// preferred storage encoding for custom resources.
+	CBORServingAndStorage featuregate.Feature = "CBORServingAndStorage"
+
+	// owner: @serathius
+	//
+	// Replaces watch cache hashmap implementation with a btree based one, bringing performance improvements.
+	BtreeWatchCache featuregate.Feature = "BtreeWatchCache"
+
+	// owner: @serathius
+	// Enables concurrent watch object decoding to avoid starving watch cache when conversion webhook is installed.
+	ConcurrentWatchObjectDecode featuregate.Feature = "ConcurrentWatchObjectDecode"
+
+	// owner: @jefftree
+	// kep: https://kep.k8s.io/4355
+	//
+	// Enables coordinated leader election in the API server
+	CoordinatedLeaderElection featuregate.Feature = "CoordinatedLeaderElection"
+
+	//
+	// Allows for updating watchcache resource version with progress notify events.
+	EfficientWatchResumption featuregate.Feature = "EfficientWatchResumption"
+
+	// owner: @aramase
+	// kep: https://kep.k8s.io/3299
+	// deprecated: v1.28
+	//
+	// Enables KMS v1 API for encryption at rest.
+	KMSv1 featuregate.Feature = "KMSv1"
+
+	// owner: @alexzielenski, @cici37, @jiahuif, @jpbetz
+	// kep: https://kep.k8s.io/3962
+	//
+	// Enables the MutatingAdmissionPolicy in Admission Chain
+	MutatingAdmissionPolicy featuregate.Feature = "MutatingAdmissionPolicy"
+
+	// owner: @jiahuif
+	// kep: https://kep.k8s.io/2887
+	//
+	// Enables populating "enum" field of OpenAPI schemas
+	// in the spec returned from kube-apiserver.
+	OpenAPIEnums featuregate.Feature = "OpenAPIEnums"
+
+	// owner: @caesarxuchao
+	// stable: 1.29
+	//
+	// Allow apiservers to show a count of remaining items in the response
+	// to a chunking list request.
+	RemainingItemCount featuregate.Feature = "RemainingItemCount"
+
+	// owner: @stlaz
+	//
+	// Enable kube-apiserver to accept UIDs via request header authentication.
+	// This will also make the kube-apiserver's API aggregator add UIDs via standard
+	// headers when forwarding requests to the servers serving the aggregated API.
+	RemoteRequestHeaderUID featuregate.Feature = "RemoteRequestHeaderUID"
+
+	// owner: @wojtek-t
+	//
+	// Enables resilient watchcache initialization to avoid controlplane
+	// overload.
+	ResilientWatchCacheInitialization featuregate.Feature = "ResilientWatchCacheInitialization"
+
+	// owner: @serathius
+	//
+	// Allow watch cache to create a watch on a dedicated RPC.
+	// This prevents watch cache from being starved by other watches.
+	SeparateCacheWatchRPC featuregate.Feature = "SeparateCacheWatchRPC"
+
+	// owner: @enj
+	//
+	// Enables http2 DOS mitigations for unauthenticated clients.
+	//
+	// Some known reasons to disable these mitigations:
+	//
+	// An API server that is fronted by an L7 load balancer that is set up
+	// to mitigate http2 attacks may opt to disable this protection to prevent
+	// unauthenticated clients from disabling connection reuse between the load
+	// balancer and the API server (many incoming connections could share the
+	// same backend connection).
+	//
+	// An API server that is on a private network may opt to disable this
+	// protection to prevent performance regressions for unauthenticated
+	// clients.
+	UnauthenticatedHTTP2DOSMitigation featuregate.Feature = "UnauthenticatedHTTP2DOSMitigation"
+
+	// owner: @jpbetz
+	// Resource create requests using generateName are retried automatically by the apiserver
+	// if the generated name conflicts with an existing resource name, up to a maximum number of 7 retries.
+	RetryGenerateName featuregate.Feature = "RetryGenerateName"
+
+	// owner: @cici37
+	//
+	// StrictCostEnforcementForVAP is used to apply strict CEL cost validation for ValidatingAdmissionPolicy.
+	// It will be set to off by default for certain time of period to prevent the impact on the existing users.
+	// It is strongly recommended to enable this feature gate as early as possible.
+	// The strict cost is specific for the extended libraries whose cost defined under k8s/apiserver/pkg/cel/library.
+	StrictCostEnforcementForVAP featuregate.Feature = "StrictCostEnforcementForVAP"
+
+	// owner: @cici37
+	//
+	// StrictCostEnforcementForWebhooks is used to apply strict CEL cost validation for matchConditions in Webhooks.
+	// It will be set to off by default for certain time of period to prevent the impact on the existing users.
+	// It is strongly recommended to enable this feature gate as early as possible.
+	// The strict cost is specific for the extended libraries whose cost defined under k8s/apiserver/pkg/cel/library.
+	StrictCostEnforcementForWebhooks featuregate.Feature = "StrictCostEnforcementForWebhooks"
+
+	// owner: @caesarxuchao @roycaihw
+	//
+	// Enable the storage version API.
+	StorageVersionAPI featuregate.Feature = "StorageVersionAPI"
+
+	// owner: @caesarxuchao
+	//
+	// Allow apiservers to expose the storage version hash in the discovery
+	// document.
+	StorageVersionHash featuregate.Feature = "StorageVersionHash"
+
+	// owner: @aramase, @enj, @nabokihms
+	// kep: https://kep.k8s.io/3331
+	//
+	// Enables Structured Authentication Configuration
+	StructuredAuthenticationConfiguration featuregate.Feature = "StructuredAuthenticationConfiguration"
+
+	// owner: @palnabarun
+	// kep: https://kep.k8s.io/3221
+	//
+	// Enables Structured Authorization Configuration
+	StructuredAuthorizationConfiguration featuregate.Feature = "StructuredAuthorizationConfiguration"
+
+	// owner: @wojtek-t
+	//
+	// Enables support for watch bookmark events.
+	WatchBookmark featuregate.Feature = "WatchBookmark"
+
+	// owner: @wojtek-t
+	//
+	// Enables post-start-hook for storage readiness
+	WatchCacheInitializationPostStartHook featuregate.Feature = "WatchCacheInitializationPostStartHook"
+
+	// owner: @serathius
+	// Enables watches without resourceVersion to be served from storage.
+	// Used to prevent https://github.com/kubernetes/kubernetes/issues/123072 until etcd fixes the issue.
+	WatchFromStorageWithoutResourceVersion featuregate.Feature = "WatchFromStorageWithoutResourceVersion"
+
+	// owner: @p0lyn0mial
+	//
+	// Allow the API server to stream individual items instead of chunking
+	WatchList featuregate.Feature = "WatchList"
+
+	// owner: @serathius
+	// kep: http://kep.k8s.io/2340
+	//
+	// Allow the API server to serve consistent lists from cache
+	ConsistentListFromCache featuregate.Feature = "ConsistentListFromCache"
+)
+
+func init() {
+	runtime.Must(utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates))
+	runtime.Must(utilfeature.DefaultMutableFeatureGate.AddVersioned(defaultVersionedKubernetesFeatureGates))
+}
+
+// defaultVersionedKubernetesFeatureGates consists of all known Kubernetes-specific feature keys with VersionedSpecs.
+// To add a new feature, define a key for it above and add it here. The features will be
+// available throughout Kubernetes binaries.
+//
+// Entries are alphabetized and separated from each other with blank lines to avoid sweeping gofmt changes
+// when adding or removing one entry.
+var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate.VersionedSpecs{
+	AdmissionWebhookMatchConditions: {
+		{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
+		{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta},
+		{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
+	},
+
+	AggregatedDiscoveryEndpoint: {
+		{Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha},
+		{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
+		{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
+	},
+
+	AllowUnsafeMalformedObjectDeletion: {
+		{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
+	},
+
+	AnonymousAuthConfigurableEndpoints: {
+		{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
+		{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
+	},
+
+	APIListChunking: {
+		{Version: version.MustParse("1.8"), Default: false, PreRelease: featuregate.Alpha},
+		{Version: version.MustParse("1.9"), Default: true, PreRelease: featuregate.Beta},
+		{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
+	},
+
+	APIResponseCompression: {
+		{Version: version.MustParse("1.8"), Default: false, PreRelease: featuregate.Alpha},
+		{Version: version.MustParse("1.16"), Default: true, PreRelease: featuregate.Beta},
+	},
+
+	APIServerIdentity: {
+		{Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha},
+		{Version: version.MustParse("1.26"), Default: true, PreRelease: featuregate.Beta},
+	},
+
+	APIServerTracing: {
+		{Version: version.MustParse("1.22"), Default: false, PreRelease: featuregate.Alpha},
+		{Version: version.MustParse("1.27"), Default: true, PreRelease: featuregate.Beta},
+	},
+
+	APIServingWithRoutine: {
+		{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
+	},
+
+	BtreeWatchCache: {
+		{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
+	},
+
+	AuthorizeWithSelectors: {
+		{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
+		{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
+	},
+
+	CBORServingAndStorage: {
+		{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
+	},
+
+	ConcurrentWatchObjectDecode: {
+		{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
+	},
+
+	ConsistentListFromCache: {
+		{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
+		{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
+	},
+
+	CoordinatedLeaderElection: {
+		{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
+	},
+
+	EfficientWatchResumption: {
+		{Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha},
+		{Version: version.MustParse("1.21"), Default: true, PreRelease: featuregate.Beta},
+		{Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
+	},
+
+	KMSv1: {
+		{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Deprecated},
+		{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Deprecated},
+	},
+
+	MutatingAdmissionPolicy: {
+		{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
+	},
+
+	OpenAPIEnums: {
+		{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
+		{Version: version.MustParse("1.24"), Default: true, PreRelease: featuregate.Beta},
+	},
+
+	RemainingItemCount: {
+		{Version: version.MustParse("1.15"), Default: false, PreRelease: featuregate.Alpha},
+		{Version: version.MustParse("1.16"), Default: true, PreRelease: featuregate.Beta},
+		{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
+	},
+
+	RemoteRequestHeaderUID: {
+		{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
+	},
+
+	ResilientWatchCacheInitialization: {
+		{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
+	},
+
+	RetryGenerateName: {
+		{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
+		{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.Beta},
+		{Version: version.MustParse("1.32"), Default: true, LockToDefault: true, PreRelease: featuregate.GA},
+	},
+
+	SeparateCacheWatchRPC: {
+		{Version: version.MustParse("1.28"), Default: true, PreRelease: featuregate.Beta},
+	},
+
+	StorageVersionAPI: {
+		{Version: version.MustParse("1.20"), Default: false, PreRelease: featuregate.Alpha},
+	},
+
+	StorageVersionHash: {
+		{Version: version.MustParse("1.14"), Default: false, PreRelease: featuregate.Alpha},
+		{Version: version.MustParse("1.15"), Default: true, PreRelease: featuregate.Beta},
+	},
+
+	StrictCostEnforcementForVAP: {
+		{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta},
+		{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
+	},
+
+	StrictCostEnforcementForWebhooks: {
+		{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Beta},
+		{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
+	},
+
+	StructuredAuthenticationConfiguration: {
+		{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
+		{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
+	},
+
+	StructuredAuthorizationConfiguration: {
+		{Version: version.MustParse("1.29"), Default: false, PreRelease: featuregate.Alpha},
+		{Version: version.MustParse("1.30"), Default: true, PreRelease: featuregate.Beta},
+		{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
+	},
+
+	UnauthenticatedHTTP2DOSMitigation: {
+		{Version: version.MustParse("1.25"), Default: false, PreRelease: featuregate.Beta},
+		{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
+	},
+
+	WatchBookmark: {
+		{Version: version.MustParse("1.15"), Default: false, PreRelease: featuregate.Alpha},
+		{Version: version.MustParse("1.16"), Default: true, PreRelease: featuregate.Beta},
+		{Version: version.MustParse("1.17"), Default: true, PreRelease: featuregate.GA, LockToDefault: true},
+	},
+
+	WatchCacheInitializationPostStartHook: {
+		{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
+	},
+
+	WatchFromStorageWithoutResourceVersion: {
+		{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Beta},
+	},
+
+	WatchList: {
+		{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
+		{Version: version.MustParse("1.32"), Default: true, PreRelease: featuregate.Beta},
+	},
+}
+
+// defaultKubernetesFeatureGates consists of legacy unversioned Kubernetes-specific feature keys.
+// Please do not add to this struct and use defaultVersionedKubernetesFeatureGates instead.
+var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go b/hack/tools/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go
new file mode 100644
index 0000000000..0513b28223
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go
@@ -0,0 +1,247 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package egressselector
+
+import (
+	"fmt"
+	"os"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/apimachinery/pkg/util/sets"
+	"k8s.io/apimachinery/pkg/util/validation/field"
+	"k8s.io/apiserver/pkg/apis/apiserver"
+	"k8s.io/apiserver/pkg/apis/apiserver/install"
+	"k8s.io/utils/path"
+)
+
+var cfgScheme = runtime.NewScheme()
+
+// validEgressSelectorNames contains the set of valid egress selctor names.
+var validEgressSelectorNames = sets.NewString("controlplane", "cluster", "etcd")
+
+func init() {
+	install.Install(cfgScheme)
+}
+
+// ReadEgressSelectorConfiguration reads the egress selector configuration at the specified path.
+// It returns the loaded egress selector configuration if the input file aligns with the required syntax.
+// If it does not align with the provided syntax, it returns a default configuration which should function as a no-op.
+// It does this by returning a nil configuration, which preserves backward compatibility.
+// This works because prior to this there was no egress selector configuration.
+// It returns an error if the file did not exist.
+func ReadEgressSelectorConfiguration(configFilePath string) (*apiserver.EgressSelectorConfiguration, error) {
+	if configFilePath == "" {
+		return nil, nil
+	}
+	// a file was provided, so we just read it.
+	data, err := os.ReadFile(configFilePath)
+	if err != nil {
+		return nil, fmt.Errorf("unable to read egress selector configuration from %q [%v]", configFilePath, err)
+	}
+	config, gvk, err := serializer.NewCodecFactory(cfgScheme, serializer.EnableStrict).UniversalDecoder().Decode(data, nil, nil)
+	if err != nil {
+		return nil, err
+	}
+	internalConfig, ok := config.(*apiserver.EgressSelectorConfiguration)
+	if !ok {
+		return nil, fmt.Errorf("unexpected config type: %v", gvk)
+	}
+	return internalConfig, nil
+}
+
+// ValidateEgressSelectorConfiguration checks the apiserver.EgressSelectorConfiguration for
+// common configuration errors. It will return error for problems such as configuring mtls/cert
+// settings for protocol which do not support security. It will also try to catch errors such as
+// incorrect file paths. It will return nil if it does not find anything wrong.
+func ValidateEgressSelectorConfiguration(config *apiserver.EgressSelectorConfiguration) field.ErrorList {
+	allErrs := field.ErrorList{}
+	if config == nil {
+		return allErrs // Treating a nil configuration as valid
+	}
+	for _, service := range config.EgressSelections {
+		fldPath := field.NewPath("service", "connection")
+		switch service.Connection.ProxyProtocol {
+		case apiserver.ProtocolDirect:
+			allErrs = append(allErrs, validateDirectConnection(service.Connection, fldPath)...)
+		case apiserver.ProtocolHTTPConnect:
+			allErrs = append(allErrs, validateHTTPConnectTransport(service.Connection.Transport, fldPath)...)
+		case apiserver.ProtocolGRPC:
+			allErrs = append(allErrs, validateGRPCTransport(service.Connection.Transport, fldPath)...)
+		default:
+			allErrs = append(allErrs, field.NotSupported(
+				fldPath.Child("protocol"),
+				service.Connection.ProxyProtocol,
+				[]string{
+					string(apiserver.ProtocolDirect),
+					string(apiserver.ProtocolHTTPConnect),
+					string(apiserver.ProtocolGRPC),
+				}))
+		}
+	}
+
+	seen := sets.String{}
+	for i, service := range config.EgressSelections {
+		canonicalName := strings.ToLower(service.Name)
+		fldPath := field.NewPath("service", "connection")
+		// no duplicate check
+		if seen.Has(canonicalName) {
+			allErrs = append(allErrs, field.Duplicate(fldPath.Index(i), canonicalName))
+			continue
+		}
+		seen.Insert(canonicalName)
+
+		if !validEgressSelectorNames.Has(canonicalName) {
+			allErrs = append(allErrs, field.NotSupported(fldPath, canonicalName, validEgressSelectorNames.List()))
+			continue
+		}
+	}
+
+	return allErrs
+}
+
+func validateHTTPConnectTransport(transport *apiserver.Transport, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+	if transport == nil {
+		allErrs = append(allErrs, field.Required(
+			fldPath.Child("transport"),
+			"transport must be set for HTTPConnect"))
+		return allErrs
+	}
+
+	if transport.TCP != nil && transport.UDS != nil {
+		allErrs = append(allErrs, field.Invalid(
+			fldPath.Child("tcp"),
+			transport.TCP,
+			"TCP and UDS cannot both be set"))
+	} else if transport.TCP == nil && transport.UDS == nil {
+		allErrs = append(allErrs, field.Required(
+			fldPath.Child("tcp"),
+			"One of TCP or UDS must be set"))
+	} else if transport.TCP != nil {
+		allErrs = append(allErrs, validateTCPConnection(transport.TCP, fldPath)...)
+	} else if transport.UDS != nil {
+		allErrs = append(allErrs, validateUDSConnection(transport.UDS, fldPath)...)
+	}
+	return allErrs
+}
+
+func validateGRPCTransport(transport *apiserver.Transport, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+	if transport == nil {
+		allErrs = append(allErrs, field.Required(
+			fldPath.Child("transport"),
+			"transport must be set for GRPC"))
+		return allErrs
+	}
+
+	if transport.UDS != nil {
+		allErrs = append(allErrs, validateUDSConnection(transport.UDS, fldPath)...)
+	} else {
+		allErrs = append(allErrs, field.Required(
+			fldPath.Child("uds"),
+			"UDS must be set with GRPC"))
+	}
+	return allErrs
+}
+
+func validateDirectConnection(connection apiserver.Connection, fldPath *field.Path) field.ErrorList {
+	if connection.Transport != nil {
+		return field.ErrorList{field.Invalid(
+			fldPath.Child("transport"),
+			"direct",
+			"Transport config should be absent for direct connect"),
+		}
+	}
+
+	return nil
+}
+
+func validateUDSConnection(udsConfig *apiserver.UDSTransport, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+	if udsConfig.UDSName == "" {
+		allErrs = append(allErrs, field.Invalid(
+			fldPath.Child("udsName"),
+			"nil",
+			"UDSName should be present for UDS connections"))
+	}
+	return allErrs
+}
+
+func validateTCPConnection(tcpConfig *apiserver.TCPTransport, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+
+	if strings.HasPrefix(tcpConfig.URL, "http://") {
+		if tcpConfig.TLSConfig != nil {
+			allErrs = append(allErrs, field.Invalid(
+				fldPath.Child("tlsConfig"),
+				"nil",
+				"TLSConfig config should not be present when using HTTP"))
+		}
+	} else if strings.HasPrefix(tcpConfig.URL, "https://") {
+		return validateTLSConfig(tcpConfig.TLSConfig, fldPath)
+	} else {
+		allErrs = append(allErrs, field.Invalid(
+			fldPath.Child("url"),
+			tcpConfig.URL,
+			"supported connection protocols are http:// and https://"))
+	}
+	return allErrs
+}
+
+func validateTLSConfig(tlsConfig *apiserver.TLSConfig, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+
+	if tlsConfig == nil {
+		allErrs = append(allErrs, field.Required(
+			fldPath.Child("tlsConfig"),
+			"TLSConfig must be present when using HTTPS"))
+		return allErrs
+	}
+	if tlsConfig.CABundle != "" {
+		if exists, err := path.Exists(path.CheckFollowSymlink, tlsConfig.CABundle); !exists || err != nil {
+			allErrs = append(allErrs, field.Invalid(
+				fldPath.Child("tlsConfig", "caBundle"),
+				tlsConfig.CABundle,
+				"TLS config ca bundle does not exist"))
+		}
+	}
+	if tlsConfig.ClientCert == "" {
+		allErrs = append(allErrs, field.Invalid(
+			fldPath.Child("tlsConfig", "clientCert"),
+			"nil",
+			"Using TLS requires clientCert"))
+	} else if exists, err := path.Exists(path.CheckFollowSymlink, tlsConfig.ClientCert); !exists || err != nil {
+		allErrs = append(allErrs, field.Invalid(
+			fldPath.Child("tlsConfig", "clientCert"),
+			tlsConfig.ClientCert,
+			"TLS client cert does not exist"))
+	}
+	if tlsConfig.ClientKey == "" {
+		allErrs = append(allErrs, field.Invalid(
+			fldPath.Child("tlsConfig", "clientKey"),
+			"nil",
+			"Using TLS requires requires clientKey"))
+	} else if exists, err := path.Exists(path.CheckFollowSymlink, tlsConfig.ClientKey); !exists || err != nil {
+		allErrs = append(allErrs, field.Invalid(
+			fldPath.Child("tlsConfig", "clientKey"),
+			tlsConfig.ClientKey,
+			"TLS client key does not exist"))
+	}
+	return allErrs
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go b/hack/tools/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go
new file mode 100644
index 0000000000..a38ef64649
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go
@@ -0,0 +1,406 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package egressselector
+
+import (
+	"bufio"
+	"context"
+	"crypto/tls"
+	"crypto/x509"
+	"fmt"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"strings"
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/credentials/insecure"
+
+	utilnet "k8s.io/apimachinery/pkg/util/net"
+	"k8s.io/apiserver/pkg/apis/apiserver"
+	egressmetrics "k8s.io/apiserver/pkg/server/egressselector/metrics"
+	"k8s.io/component-base/metrics/legacyregistry"
+	"k8s.io/component-base/tracing"
+	"k8s.io/klog/v2"
+	client "sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client"
+)
+
+var directDialer utilnet.DialFunc = http.DefaultTransport.(*http.Transport).DialContext
+
+func init() {
+	client.Metrics.RegisterMetrics(legacyregistry.Registerer())
+}
+
+// EgressSelector is the map of network context type to context dialer, for network egress.
+type EgressSelector struct {
+	egressToDialer map[EgressType]utilnet.DialFunc
+}
+
+// EgressType is an indicator of which egress selection should be used for sending traffic.
+// See https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/1281-network-proxy/README.md#network-context
+type EgressType int
+
+const (
+	// ControlPlane is the EgressType for traffic intended to go to the control plane.
+	ControlPlane EgressType = iota
+	// Etcd is the EgressType for traffic intended to go to Kubernetes persistence store.
+	Etcd
+	// Cluster is the EgressType for traffic intended to go to the system being managed by Kubernetes.
+	Cluster
+)
+
+// NetworkContext is the struct used by Kubernetes API Server to indicate where it intends traffic to be sent.
+type NetworkContext struct {
+	// EgressSelectionName is the unique name of the
+	// EgressSelectorConfiguration which determines
+	// the network we route the traffic to.
+	EgressSelectionName EgressType
+}
+
+// Lookup is the interface to get the dialer function for the network context.
+type Lookup func(networkContext NetworkContext) (utilnet.DialFunc, error)
+
+// String returns the canonical string representation of the egress type
+func (s EgressType) String() string {
+	switch s {
+	case ControlPlane:
+		return "controlplane"
+	case Etcd:
+		return "etcd"
+	case Cluster:
+		return "cluster"
+	default:
+		return "invalid"
+	}
+}
+
+// AsNetworkContext is a helper function to make it easy to get the basic NetworkContext objects.
+func (s EgressType) AsNetworkContext() NetworkContext {
+	return NetworkContext{EgressSelectionName: s}
+}
+
+func lookupServiceName(name string) (EgressType, error) {
+	switch strings.ToLower(name) {
+	case "controlplane":
+		return ControlPlane, nil
+	case "etcd":
+		return Etcd, nil
+	case "cluster":
+		return Cluster, nil
+	}
+	return -1, fmt.Errorf("unrecognized service name %s", name)
+}
+
+func tunnelHTTPConnect(proxyConn net.Conn, proxyAddress, addr string) (net.Conn, error) {
+	fmt.Fprintf(proxyConn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n", addr, "127.0.0.1")
+	br := bufio.NewReader(proxyConn)
+	res, err := http.ReadResponse(br, nil)
+	if err != nil {
+		proxyConn.Close()
+		return nil, fmt.Errorf("reading HTTP response from CONNECT to %s via proxy %s failed: %v",
+			addr, proxyAddress, err)
+	}
+	if res.StatusCode != 200 {
+		proxyConn.Close()
+		return nil, fmt.Errorf("proxy error from %s while dialing %s, code %d: %v",
+			proxyAddress, addr, res.StatusCode, res.Status)
+	}
+
+	// It's safe to discard the bufio.Reader here and return the
+	// original TCP conn directly because we only use this for
+	// TLS, and in TLS the client speaks first, so we know there's
+	// no unbuffered data. But we can double-check.
+	if br.Buffered() > 0 {
+		proxyConn.Close()
+		return nil, fmt.Errorf("unexpected %d bytes of buffered data from CONNECT proxy %q",
+			br.Buffered(), proxyAddress)
+	}
+	return proxyConn, nil
+}
+
+type proxier interface {
+	// proxy returns a connection to addr.
+	proxy(ctx context.Context, addr string) (net.Conn, error)
+}
+
+var _ proxier = &httpConnectProxier{}
+
+type httpConnectProxier struct {
+	conn         net.Conn
+	proxyAddress string
+}
+
+func (t *httpConnectProxier) proxy(ctx context.Context, addr string) (net.Conn, error) {
+	return tunnelHTTPConnect(t.conn, t.proxyAddress, addr)
+}
+
+var _ proxier = &grpcProxier{}
+
+type grpcProxier struct {
+	tunnel client.Tunnel
+}
+
+func (g *grpcProxier) proxy(ctx context.Context, addr string) (net.Conn, error) {
+	return g.tunnel.DialContext(ctx, "tcp", addr)
+}
+
+type proxyServerConnector interface {
+	// connect establishes connection to the proxy server, and returns a
+	// proxier based on the connection.
+	//
+	// The provided Context must be non-nil. The context is used for connecting to the proxy only.
+	// If the context expires before the connection is complete, an error is returned.
+	// Once successfully connected to the proxy, any expiration of the context will not affect the connection.
+	connect(context.Context) (proxier, error)
+}
+
+type tcpHTTPConnectConnector struct {
+	proxyAddress string
+	tlsConfig    *tls.Config
+}
+
+func (t *tcpHTTPConnectConnector) connect(ctx context.Context) (proxier, error) {
+	d := tls.Dialer{
+		Config: t.tlsConfig,
+	}
+	conn, err := d.DialContext(ctx, "tcp", t.proxyAddress)
+	if err != nil {
+		return nil, err
+	}
+	return &httpConnectProxier{conn: conn, proxyAddress: t.proxyAddress}, nil
+}
+
+type udsHTTPConnectConnector struct {
+	udsName string
+}
+
+func (u *udsHTTPConnectConnector) connect(ctx context.Context) (proxier, error) {
+	var d net.Dialer
+	conn, err := d.DialContext(ctx, "unix", u.udsName)
+	if err != nil {
+		return nil, err
+	}
+	return &httpConnectProxier{conn: conn, proxyAddress: u.udsName}, nil
+}
+
+type udsGRPCConnector struct {
+	udsName string
+}
+
+// connect establishes a connection to a proxy over gRPC.
+// TODO At the moment, it does not use the provided context.
+func (u *udsGRPCConnector) connect(_ context.Context) (proxier, error) {
+	udsName := u.udsName
+	dialOption := grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
+		var d net.Dialer
+		c, err := d.DialContext(ctx, "unix", udsName)
+		if err != nil {
+			klog.Errorf("failed to create connection to uds name %s, error: %v", udsName, err)
+		}
+		return c, err
+	})
+
+	// CreateSingleUseGrpcTunnel() unfortunately couples dial and connection contexts. Because of that,
+	// we cannot use ctx just for dialing and control the connection lifetime separately.
+	// See https://github.com/kubernetes-sigs/apiserver-network-proxy/issues/357.
+	tunnelCtx := context.TODO()
+	tunnel, err := client.CreateSingleUseGrpcTunnel(tunnelCtx, udsName, dialOption,
+		grpc.WithBlock(),
+		grpc.WithReturnConnectionError(),
+		grpc.WithTimeout(30*time.Second), // matches http.DefaultTransport dial timeout
+		grpc.WithTransportCredentials(insecure.NewCredentials()))
+	if err != nil {
+		return nil, err
+	}
+	return &grpcProxier{tunnel: tunnel}, nil
+}
+
+type dialerCreator struct {
+	connector proxyServerConnector
+	direct    bool
+	options   metricsOptions
+}
+
+type metricsOptions struct {
+	transport string
+	protocol  string
+}
+
+func (d *dialerCreator) createDialer() utilnet.DialFunc {
+	if d.direct {
+		return directDialer
+	}
+	return func(ctx context.Context, network, addr string) (net.Conn, error) {
+		ctx, span := tracing.Start(ctx, fmt.Sprintf("Proxy via %s protocol over %s", d.options.protocol, d.options.transport), attribute.String("address", addr))
+		defer span.End(500 * time.Millisecond)
+		start := egressmetrics.Metrics.Clock().Now()
+		egressmetrics.Metrics.ObserveDialStart(d.options.protocol, d.options.transport)
+		proxier, err := d.connector.connect(ctx)
+		if err != nil {
+			egressmetrics.Metrics.ObserveDialFailure(d.options.protocol, d.options.transport, egressmetrics.StageConnect)
+			return nil, err
+		}
+		conn, err := proxier.proxy(ctx, addr)
+		if err != nil {
+			egressmetrics.Metrics.ObserveDialFailure(d.options.protocol, d.options.transport, egressmetrics.StageProxy)
+			return nil, err
+		}
+		egressmetrics.Metrics.ObserveDialLatency(egressmetrics.Metrics.Clock().Now().Sub(start), d.options.protocol, d.options.transport)
+		return conn, nil
+	}
+}
+
+func getTLSConfig(t *apiserver.TLSConfig) (*tls.Config, error) {
+	clientCert := t.ClientCert
+	clientKey := t.ClientKey
+	caCert := t.CABundle
+	clientCerts, err := tls.LoadX509KeyPair(clientCert, clientKey)
+	if err != nil {
+		return nil, fmt.Errorf("failed to read key pair %s & %s, got %v", clientCert, clientKey, err)
+	}
+	certPool := x509.NewCertPool()
+	if caCert != "" {
+		certBytes, err := os.ReadFile(caCert)
+		if err != nil {
+			return nil, fmt.Errorf("failed to read cert file %s, got %v", caCert, err)
+		}
+		ok := certPool.AppendCertsFromPEM(certBytes)
+		if !ok {
+			return nil, fmt.Errorf("failed to append CA cert to the cert pool")
+		}
+	} else {
+		// Use host's root CA set instead of providing our own
+		certPool = nil
+	}
+	return &tls.Config{
+		Certificates: []tls.Certificate{clientCerts},
+		RootCAs:      certPool,
+	}, nil
+}
+
+func getProxyAddress(urlString string) (string, error) {
+	proxyURL, err := url.Parse(urlString)
+	if err != nil {
+		return "", fmt.Errorf("invalid proxy server url %q: %v", urlString, err)
+	}
+	return proxyURL.Host, nil
+}
+
+func connectionToDialerCreator(c apiserver.Connection) (*dialerCreator, error) {
+	switch c.ProxyProtocol {
+
+	case apiserver.ProtocolHTTPConnect:
+		if c.Transport.UDS != nil {
+			return &dialerCreator{
+				connector: &udsHTTPConnectConnector{
+					udsName: c.Transport.UDS.UDSName,
+				},
+				options: metricsOptions{
+					transport: egressmetrics.TransportUDS,
+					protocol:  egressmetrics.ProtocolHTTPConnect,
+				},
+			}, nil
+		} else if c.Transport.TCP != nil {
+			tlsConfig, err := getTLSConfig(c.Transport.TCP.TLSConfig)
+			if err != nil {
+				return nil, err
+			}
+			proxyAddress, err := getProxyAddress(c.Transport.TCP.URL)
+			if err != nil {
+				return nil, err
+			}
+			return &dialerCreator{
+				connector: &tcpHTTPConnectConnector{
+					tlsConfig:    tlsConfig,
+					proxyAddress: proxyAddress,
+				},
+				options: metricsOptions{
+					transport: egressmetrics.TransportTCP,
+					protocol:  egressmetrics.ProtocolHTTPConnect,
+				},
+			}, nil
+		} else {
+			return nil, fmt.Errorf("Either a TCP or UDS transport must be specified")
+		}
+	case apiserver.ProtocolGRPC:
+		if c.Transport.UDS != nil {
+			return &dialerCreator{
+				connector: &udsGRPCConnector{
+					udsName: c.Transport.UDS.UDSName,
+				},
+				options: metricsOptions{
+					transport: egressmetrics.TransportUDS,
+					protocol:  egressmetrics.ProtocolGRPC,
+				},
+			}, nil
+		}
+		return nil, fmt.Errorf("UDS transport must be specified for GRPC")
+	case apiserver.ProtocolDirect:
+		return &dialerCreator{direct: true}, nil
+	default:
+		return nil, fmt.Errorf("unrecognized service connection protocol %q", c.ProxyProtocol)
+	}
+
+}
+
+// NewEgressSelector configures lookup mechanism for Lookup.
+// It does so based on a EgressSelectorConfiguration which was read at startup.
+func NewEgressSelector(config *apiserver.EgressSelectorConfiguration) (*EgressSelector, error) {
+	if config == nil || config.EgressSelections == nil {
+		// No Connection Services configured, leaving the serviceMap empty, will return default dialer.
+		return nil, nil
+	}
+	cs := &EgressSelector{
+		egressToDialer: make(map[EgressType]utilnet.DialFunc),
+	}
+	for _, service := range config.EgressSelections {
+		name, err := lookupServiceName(service.Name)
+		if err != nil {
+			return nil, err
+		}
+		dialerCreator, err := connectionToDialerCreator(service.Connection)
+		if err != nil {
+			return nil, fmt.Errorf("failed to create dialer for egressSelection %q: %v", name, err)
+		}
+		cs.egressToDialer[name] = dialerCreator.createDialer()
+	}
+	return cs, nil
+}
+
+// NewEgressSelectorWithMap returns a EgressSelector with the supplied EgressType to DialFunc map.
+func NewEgressSelectorWithMap(m map[EgressType]utilnet.DialFunc) *EgressSelector {
+	if m == nil {
+		m = make(map[EgressType]utilnet.DialFunc)
+	}
+	return &EgressSelector{
+		egressToDialer: m,
+	}
+}
+
+// Lookup gets the dialer function for the network context.
+// This is configured for the Kubernetes API Server at startup.
+func (cs *EgressSelector) Lookup(networkContext NetworkContext) (utilnet.DialFunc, error) {
+	if cs.egressToDialer == nil {
+		// The round trip wrapper will over-ride the dialContext method appropriately
+		return nil, nil
+	}
+
+	return cs.egressToDialer[networkContext.EgressSelectionName], nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/server/egressselector/metrics/metrics.go b/hack/tools/vendor/k8s.io/apiserver/pkg/server/egressselector/metrics/metrics.go
new file mode 100644
index 0000000000..2e39947cd5
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/server/egressselector/metrics/metrics.go
@@ -0,0 +1,133 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+	"time"
+
+	"k8s.io/component-base/metrics"
+	"k8s.io/component-base/metrics/legacyregistry"
+	"k8s.io/utils/clock"
+)
+
+const (
+	namespace = "apiserver"
+	subsystem = "egress_dialer"
+
+	// ProtocolHTTPConnect means that the proxy protocol is http-connect.
+	ProtocolHTTPConnect = "http_connect"
+	// ProtocolGRPC means that the proxy protocol is the GRPC protocol.
+	ProtocolGRPC = "grpc"
+	// TransportTCP means that the transport is TCP.
+	TransportTCP = "tcp"
+	// TransportUDS means that the transport is UDS.
+	TransportUDS = "uds"
+	// StageConnect indicates that the dial failed at establishing connection to the proxy server.
+	StageConnect = "connect"
+	// StageProxy indicates that the dial failed at requesting the proxy server to proxy.
+	StageProxy = "proxy"
+)
+
+var (
+	// Use buckets ranging from 5 ms to 12.5 seconds.
+	latencyBuckets = []float64{0.005, 0.025, 0.1, 0.5, 2.5, 12.5}
+
+	// Metrics provides access to all dial metrics.
+	Metrics = newDialMetrics()
+)
+
+// DialMetrics instruments dials to proxy server with prometheus metrics.
+type DialMetrics struct {
+	clock     clock.Clock
+	starts    *metrics.CounterVec
+	latencies *metrics.HistogramVec
+	failures  *metrics.CounterVec
+}
+
+// newDialMetrics create a new DialMetrics, configured with default metric names.
+func newDialMetrics() *DialMetrics {
+	starts := metrics.NewCounterVec(
+		&metrics.CounterOpts{
+			Namespace:      namespace,
+			Subsystem:      subsystem,
+			Name:           "dial_start_total",
+			Help:           "Dial starts, labeled by the protocol (http-connect or grpc) and transport (tcp or uds).",
+			StabilityLevel: metrics.ALPHA,
+		},
+		[]string{"protocol", "transport"},
+	)
+
+	latencies := metrics.NewHistogramVec(
+		&metrics.HistogramOpts{
+			Namespace:      namespace,
+			Subsystem:      subsystem,
+			Name:           "dial_duration_seconds",
+			Help:           "Dial latency histogram in seconds, labeled by the protocol (http-connect or grpc), transport (tcp or uds)",
+			Buckets:        latencyBuckets,
+			StabilityLevel: metrics.ALPHA,
+		},
+		[]string{"protocol", "transport"},
+	)
+
+	failures := metrics.NewCounterVec(
+		&metrics.CounterOpts{
+			Namespace:      namespace,
+			Subsystem:      subsystem,
+			Name:           "dial_failure_count",
+			Help:           "Dial failure count, labeled by the protocol (http-connect or grpc), transport (tcp or uds), and stage (connect or proxy). The stage indicates at which stage the dial failed",
+			StabilityLevel: metrics.ALPHA,
+		},
+		[]string{"protocol", "transport", "stage"},
+	)
+
+	legacyregistry.MustRegister(starts)
+	legacyregistry.MustRegister(latencies)
+	legacyregistry.MustRegister(failures)
+	return &DialMetrics{starts: starts, latencies: latencies, failures: failures, clock: clock.RealClock{}}
+}
+
+// Clock returns the clock.
+func (m *DialMetrics) Clock() clock.Clock {
+	return m.clock
+}
+
+// SetClock sets the clock.
+func (m *DialMetrics) SetClock(c clock.Clock) {
+	m.clock = c
+}
+
+// Reset resets the metrics.
+func (m *DialMetrics) Reset() {
+	m.starts.Reset()
+	m.latencies.Reset()
+	m.failures.Reset()
+}
+
+// ObserveDialStart records the start of a dial attempt, labeled by protocol, transport.
+func (m *DialMetrics) ObserveDialStart(protocol, transport string) {
+	m.starts.WithLabelValues(protocol, transport).Inc()
+}
+
+// ObserveDialLatency records the latency of a dial, labeled by protocol, transport.
+func (m *DialMetrics) ObserveDialLatency(elapsed time.Duration, protocol, transport string) {
+	m.latencies.WithLabelValues(protocol, transport).Observe(elapsed.Seconds())
+}
+
+// ObserveDialFailure records a failed dial, labeled by protocol, transport, and the stage the dial failed at.
+func (m *DialMetrics) ObserveDialFailure(protocol, transport, stage string) {
+	m.failures.WithLabelValues(protocol, transport, stage).Inc()
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go b/hack/tools/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go
new file mode 100644
index 0000000000..00a9e099ba
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package feature
+
+import (
+	"k8s.io/component-base/featuregate"
+)
+
+var (
+	// DefaultMutableFeatureGate is a mutable version of DefaultFeatureGate.
+	// Only top-level commands/options setup and the k8s.io/component-base/featuregate/testing package should make use of this.
+	// Tests that need to modify feature gates for the duration of their test should use:
+	//   featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features., )
+	DefaultMutableFeatureGate featuregate.MutableVersionedFeatureGate = featuregate.NewFeatureGate()
+
+	// DefaultFeatureGate is a shared global FeatureGate.
+	// Top-level commands/options setup that needs to modify this feature gate should use DefaultMutableFeatureGate.
+	DefaultFeatureGate featuregate.FeatureGate = DefaultMutableFeatureGate
+)
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go b/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go
new file mode 100644
index 0000000000..0816b45a10
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go
@@ -0,0 +1,276 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package webhook
+
+import (
+	"fmt"
+	"net"
+	"net/http"
+	"os"
+	"strconv"
+	"strings"
+	"time"
+
+	"go.opentelemetry.io/otel/trace"
+
+	corev1 "k8s.io/api/core/v1"
+	utilnet "k8s.io/apimachinery/pkg/util/net"
+	"k8s.io/apiserver/pkg/features"
+	egressselector "k8s.io/apiserver/pkg/server/egressselector"
+	"k8s.io/apiserver/pkg/util/feature"
+	"k8s.io/client-go/rest"
+	"k8s.io/client-go/tools/clientcmd"
+	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+	tracing "k8s.io/component-base/tracing"
+)
+
+// AuthenticationInfoResolverWrapper can be used to inject Dial function to the
+// rest.Config generated by the resolver.
+type AuthenticationInfoResolverWrapper func(AuthenticationInfoResolver) AuthenticationInfoResolver
+
+// NewDefaultAuthenticationInfoResolverWrapper builds a default authn resolver wrapper
+func NewDefaultAuthenticationInfoResolverWrapper(
+	proxyTransport *http.Transport,
+	egressSelector *egressselector.EgressSelector,
+	kubeapiserverClientConfig *rest.Config,
+	tp trace.TracerProvider) AuthenticationInfoResolverWrapper {
+
+	webhookAuthResolverWrapper := func(delegate AuthenticationInfoResolver) AuthenticationInfoResolver {
+		return &AuthenticationInfoResolverDelegator{
+			ClientConfigForFunc: func(hostPort string) (*rest.Config, error) {
+				if hostPort == "kubernetes.default.svc:443" {
+					return kubeapiserverClientConfig, nil
+				}
+				ret, err := delegate.ClientConfigFor(hostPort)
+				if err != nil {
+					return nil, err
+				}
+				if feature.DefaultFeatureGate.Enabled(features.APIServerTracing) {
+					ret.Wrap(tracing.WrapperFor(tp))
+				}
+
+				if egressSelector != nil {
+					networkContext := egressselector.ControlPlane.AsNetworkContext()
+					var egressDialer utilnet.DialFunc
+					egressDialer, err = egressSelector.Lookup(networkContext)
+
+					if err != nil {
+						return nil, err
+					}
+
+					ret.Dial = egressDialer
+				}
+				return ret, nil
+			},
+			ClientConfigForServiceFunc: func(serviceName, serviceNamespace string, servicePort int) (*rest.Config, error) {
+				if serviceName == "kubernetes" && serviceNamespace == corev1.NamespaceDefault && servicePort == 443 {
+					return kubeapiserverClientConfig, nil
+				}
+				ret, err := delegate.ClientConfigForService(serviceName, serviceNamespace, servicePort)
+				if err != nil {
+					return nil, err
+				}
+				if feature.DefaultFeatureGate.Enabled(features.APIServerTracing) {
+					ret.Wrap(tracing.WrapperFor(tp))
+				}
+
+				if egressSelector != nil {
+					networkContext := egressselector.Cluster.AsNetworkContext()
+					var egressDialer utilnet.DialFunc
+					egressDialer, err = egressSelector.Lookup(networkContext)
+					if err != nil {
+						return nil, err
+					}
+
+					ret.Dial = egressDialer
+				} else if proxyTransport != nil && proxyTransport.DialContext != nil {
+					ret.Dial = proxyTransport.DialContext
+				}
+				return ret, nil
+			},
+		}
+	}
+	return webhookAuthResolverWrapper
+}
+
+// AuthenticationInfoResolver builds rest.Config base on the server or service
+// name and service namespace.
+type AuthenticationInfoResolver interface {
+	// ClientConfigFor builds rest.Config based on the hostPort.
+	ClientConfigFor(hostPort string) (*rest.Config, error)
+	// ClientConfigForService builds rest.Config based on the serviceName and
+	// serviceNamespace.
+	ClientConfigForService(serviceName, serviceNamespace string, servicePort int) (*rest.Config, error)
+}
+
+// AuthenticationInfoResolverDelegator implements AuthenticationInfoResolver.
+type AuthenticationInfoResolverDelegator struct {
+	ClientConfigForFunc        func(hostPort string) (*rest.Config, error)
+	ClientConfigForServiceFunc func(serviceName, serviceNamespace string, servicePort int) (*rest.Config, error)
+}
+
+// ClientConfigFor returns client config for given hostPort.
+func (a *AuthenticationInfoResolverDelegator) ClientConfigFor(hostPort string) (*rest.Config, error) {
+	return a.ClientConfigForFunc(hostPort)
+}
+
+// ClientConfigForService returns client config for given service.
+func (a *AuthenticationInfoResolverDelegator) ClientConfigForService(serviceName, serviceNamespace string, servicePort int) (*rest.Config, error) {
+	return a.ClientConfigForServiceFunc(serviceName, serviceNamespace, servicePort)
+}
+
+type defaultAuthenticationInfoResolver struct {
+	kubeconfig clientcmdapi.Config
+}
+
+// NewDefaultAuthenticationInfoResolver generates an AuthenticationInfoResolver
+// that builds rest.Config based on the kubeconfig file. kubeconfigFile is the
+// path to the kubeconfig.
+func NewDefaultAuthenticationInfoResolver(kubeconfigFile string) (AuthenticationInfoResolver, error) {
+	if len(kubeconfigFile) == 0 {
+		return &defaultAuthenticationInfoResolver{}, nil
+	}
+
+	loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
+	loadingRules.ExplicitPath = kubeconfigFile
+	loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{})
+	clientConfig, err := loader.RawConfig()
+	if err != nil {
+		return nil, err
+	}
+
+	return &defaultAuthenticationInfoResolver{kubeconfig: clientConfig}, nil
+}
+
+func (c *defaultAuthenticationInfoResolver) ClientConfigFor(hostPort string) (*rest.Config, error) {
+	return c.clientConfig(hostPort)
+}
+
+func (c *defaultAuthenticationInfoResolver) ClientConfigForService(serviceName, serviceNamespace string, servicePort int) (*rest.Config, error) {
+	return c.clientConfig(net.JoinHostPort(serviceName+"."+serviceNamespace+".svc", strconv.Itoa(servicePort)))
+}
+
+func (c *defaultAuthenticationInfoResolver) clientConfig(target string) (*rest.Config, error) {
+	// exact match
+	if authConfig, ok := c.kubeconfig.AuthInfos[target]; ok {
+		return restConfigFromKubeconfig(authConfig)
+	}
+
+	// star prefixed match
+	serverSteps := strings.Split(target, ".")
+	for i := 1; i < len(serverSteps); i++ {
+		nickName := "*." + strings.Join(serverSteps[i:], ".")
+		if authConfig, ok := c.kubeconfig.AuthInfos[nickName]; ok {
+			return restConfigFromKubeconfig(authConfig)
+		}
+	}
+
+	// If target included the default https port (443), search again without the port
+	if target, port, err := net.SplitHostPort(target); err == nil && port == "443" {
+		// exact match without port
+		if authConfig, ok := c.kubeconfig.AuthInfos[target]; ok {
+			return restConfigFromKubeconfig(authConfig)
+		}
+
+		// star prefixed match without port
+		serverSteps := strings.Split(target, ".")
+		for i := 1; i < len(serverSteps); i++ {
+			nickName := "*." + strings.Join(serverSteps[i:], ".")
+			if authConfig, ok := c.kubeconfig.AuthInfos[nickName]; ok {
+				return restConfigFromKubeconfig(authConfig)
+			}
+		}
+	}
+
+	// if we're trying to hit the kube-apiserver and there wasn't an explicit config, use the in-cluster config
+	if target == "kubernetes.default.svc:443" {
+		// if we can find an in-cluster-config use that.  If we can't, fall through.
+		inClusterConfig, err := rest.InClusterConfig()
+		if err == nil {
+			return setGlobalDefaults(inClusterConfig), nil
+		}
+	}
+
+	// star (default) match
+	if authConfig, ok := c.kubeconfig.AuthInfos["*"]; ok {
+		return restConfigFromKubeconfig(authConfig)
+	}
+
+	// use the current context from the kubeconfig if possible
+	if len(c.kubeconfig.CurrentContext) > 0 {
+		if currContext, ok := c.kubeconfig.Contexts[c.kubeconfig.CurrentContext]; ok {
+			if len(currContext.AuthInfo) > 0 {
+				if currAuth, ok := c.kubeconfig.AuthInfos[currContext.AuthInfo]; ok {
+					return restConfigFromKubeconfig(currAuth)
+				}
+			}
+		}
+	}
+
+	// anonymous
+	return setGlobalDefaults(&rest.Config{}), nil
+}
+
+func restConfigFromKubeconfig(configAuthInfo *clientcmdapi.AuthInfo) (*rest.Config, error) {
+	config := &rest.Config{}
+
+	// blindly overwrite existing values based on precedence
+	if len(configAuthInfo.Token) > 0 {
+		config.BearerToken = configAuthInfo.Token
+		config.BearerTokenFile = configAuthInfo.TokenFile
+	} else if len(configAuthInfo.TokenFile) > 0 {
+		tokenBytes, err := os.ReadFile(configAuthInfo.TokenFile)
+		if err != nil {
+			return nil, err
+		}
+		config.BearerToken = string(tokenBytes)
+		config.BearerTokenFile = configAuthInfo.TokenFile
+	}
+	if len(configAuthInfo.Impersonate) > 0 {
+		config.Impersonate = rest.ImpersonationConfig{
+			UserName: configAuthInfo.Impersonate,
+			UID:      configAuthInfo.ImpersonateUID,
+			Groups:   configAuthInfo.ImpersonateGroups,
+			Extra:    configAuthInfo.ImpersonateUserExtra,
+		}
+	}
+	if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 {
+		config.CertFile = configAuthInfo.ClientCertificate
+		config.CertData = configAuthInfo.ClientCertificateData
+		config.KeyFile = configAuthInfo.ClientKey
+		config.KeyData = configAuthInfo.ClientKeyData
+	}
+	if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 {
+		config.Username = configAuthInfo.Username
+		config.Password = configAuthInfo.Password
+	}
+	if configAuthInfo.Exec != nil {
+		config.ExecProvider = configAuthInfo.Exec.DeepCopy()
+	}
+	if configAuthInfo.AuthProvider != nil {
+		return nil, fmt.Errorf("auth provider not supported")
+	}
+
+	return setGlobalDefaults(config), nil
+}
+
+func setGlobalDefaults(config *rest.Config) *rest.Config {
+	config.UserAgent = "kube-apiserver-admission"
+	config.Timeout = 30 * time.Second
+
+	return config
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/client.go b/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/client.go
new file mode 100644
index 0000000000..63ea4e2666
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/client.go
@@ -0,0 +1,257 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package webhook
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net"
+	"net/url"
+	"strconv"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer"
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+	"k8s.io/apiserver/pkg/util/x509metrics"
+	"k8s.io/client-go/rest"
+	"k8s.io/utils/lru"
+	netutils "k8s.io/utils/net"
+)
+
+const (
+	defaultCacheSize = 200
+)
+
+// ClientConfig defines parameters required for creating a hook client.
+type ClientConfig struct {
+	Name     string
+	URL      string
+	CABundle []byte
+	Service  *ClientConfigService
+}
+
+// ClientConfigService defines service discovery parameters of the webhook.
+type ClientConfigService struct {
+	Name      string
+	Namespace string
+	Path      string
+	Port      int32
+}
+
+// ClientManager builds REST clients to talk to webhooks. It caches the clients
+// to avoid duplicate creation.
+type ClientManager struct {
+	authInfoResolver     AuthenticationInfoResolver
+	serviceResolver      ServiceResolver
+	negotiatedSerializer runtime.NegotiatedSerializer
+	cache                *lru.Cache
+}
+
+// NewClientManager creates a clientManager.
+func NewClientManager(gvs []schema.GroupVersion, addToSchemaFuncs ...func(s *runtime.Scheme) error) (ClientManager, error) {
+	cache := lru.New(defaultCacheSize)
+	hookScheme := runtime.NewScheme()
+	for _, addToSchemaFunc := range addToSchemaFuncs {
+		if err := addToSchemaFunc(hookScheme); err != nil {
+			return ClientManager{}, err
+		}
+	}
+	return ClientManager{
+		cache: cache,
+		negotiatedSerializer: serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{
+			Serializer: serializer.NewCodecFactory(hookScheme).LegacyCodec(gvs...),
+		}),
+	}, nil
+}
+
+// SetAuthenticationInfoResolverWrapper sets the
+// AuthenticationInfoResolverWrapper.
+func (cm *ClientManager) SetAuthenticationInfoResolverWrapper(wrapper AuthenticationInfoResolverWrapper) {
+	if wrapper != nil {
+		cm.authInfoResolver = wrapper(cm.authInfoResolver)
+	}
+}
+
+// SetAuthenticationInfoResolver sets the AuthenticationInfoResolver.
+func (cm *ClientManager) SetAuthenticationInfoResolver(resolver AuthenticationInfoResolver) {
+	cm.authInfoResolver = resolver
+}
+
+// SetServiceResolver sets the ServiceResolver.
+func (cm *ClientManager) SetServiceResolver(sr ServiceResolver) {
+	if sr != nil {
+		cm.serviceResolver = sr
+	}
+}
+
+// Validate checks if ClientManager is properly set up.
+func (cm *ClientManager) Validate() error {
+	var errs []error
+	if cm.negotiatedSerializer == nil {
+		errs = append(errs, fmt.Errorf("the clientManager requires a negotiatedSerializer"))
+	}
+	if cm.serviceResolver == nil {
+		errs = append(errs, fmt.Errorf("the clientManager requires a serviceResolver"))
+	}
+	if cm.authInfoResolver == nil {
+		errs = append(errs, fmt.Errorf("the clientManager requires an authInfoResolver"))
+	}
+	return utilerrors.NewAggregate(errs)
+}
+
+// HookClient get a RESTClient from the cache, or constructs one based on the
+// webhook configuration.
+func (cm *ClientManager) HookClient(cc ClientConfig) (*rest.RESTClient, error) {
+	ccWithNoName := cc
+	ccWithNoName.Name = ""
+	cacheKey, err := json.Marshal(ccWithNoName)
+	if err != nil {
+		return nil, err
+	}
+	if client, ok := cm.cache.Get(string(cacheKey)); ok {
+		return client.(*rest.RESTClient), nil
+	}
+
+	cfg, err := cm.hookClientConfig(cc)
+	if err != nil {
+		return nil, err
+	}
+
+	client, err := rest.UnversionedRESTClientFor(cfg)
+	if err == nil {
+		cm.cache.Add(string(cacheKey), client)
+	}
+	return client, err
+}
+
+func (cm *ClientManager) hookClientConfig(cc ClientConfig) (*rest.Config, error) {
+	complete := func(cfg *rest.Config) (*rest.Config, error) {
+		// Avoid client-side rate limiting talking to the webhook backend.
+		// Rate limiting should happen when deciding how many requests to serve.
+		cfg.QPS = -1
+
+		// Combine CAData from the config with any existing CA bundle provided
+		if len(cfg.TLSClientConfig.CAData) > 0 {
+			cfg.TLSClientConfig.CAData = append(cfg.TLSClientConfig.CAData, '\n')
+		}
+		cfg.TLSClientConfig.CAData = append(cfg.TLSClientConfig.CAData, cc.CABundle...)
+
+		cfg.ContentConfig.NegotiatedSerializer = cm.negotiatedSerializer
+		cfg.ContentConfig.ContentType = runtime.ContentTypeJSON
+
+		// Add a transport wrapper that allows detection of TLS connections to
+		// servers with serving certificates with deprecated characteristics
+		cfg.Wrap(x509metrics.NewDeprecatedCertificateRoundTripperWrapperConstructor(
+			x509MissingSANCounter,
+			x509InsecureSHA1Counter,
+		))
+		return cfg, nil
+	}
+
+	if cc.Service != nil {
+		port := cc.Service.Port
+		if port == 0 {
+			// Default to port 443 if no service port is specified
+			port = 443
+		}
+
+		restConfig, err := cm.authInfoResolver.ClientConfigForService(cc.Service.Name, cc.Service.Namespace, int(port))
+		if err != nil {
+			return nil, err
+		}
+		cfg := rest.CopyConfig(restConfig)
+
+		// Use http/1.1 instead of http/2.
+		// This is a workaround for http/2-enabled clients not load-balancing concurrent requests to multiple backends.
+		// See https://issue.k8s.io/75791 for details.
+		cfg.NextProtos = []string{"http/1.1"}
+
+		serverName := cc.Service.Name + "." + cc.Service.Namespace + ".svc"
+
+		host := net.JoinHostPort(serverName, strconv.Itoa(int(port)))
+		cfg.Host = "https://" + host
+		cfg.APIPath = cc.Service.Path
+		// Set the server name if not already set
+		if len(cfg.TLSClientConfig.ServerName) == 0 {
+			cfg.TLSClientConfig.ServerName = serverName
+		}
+
+		delegateDialer := cfg.Dial
+		if delegateDialer == nil {
+			var d net.Dialer
+			delegateDialer = d.DialContext
+		}
+		cfg.Dial = func(ctx context.Context, network, addr string) (net.Conn, error) {
+			if addr == host {
+				u, err := cm.serviceResolver.ResolveEndpoint(cc.Service.Namespace, cc.Service.Name, port)
+				if err != nil {
+					return nil, err
+				}
+				addr = u.Host
+			}
+			return delegateDialer(ctx, network, addr)
+		}
+
+		return complete(cfg)
+	}
+
+	if cc.URL == "" {
+		return nil, &ErrCallingWebhook{WebhookName: cc.Name, Reason: errors.New("webhook configuration must have either service or URL")}
+	}
+
+	u, err := url.Parse(cc.URL)
+	if err != nil {
+		return nil, &ErrCallingWebhook{WebhookName: cc.Name, Reason: fmt.Errorf("Unparsable URL: %v", err)}
+	}
+
+	hostPort := u.Host
+	if len(u.Port()) == 0 {
+		// Default to port 443 if no port is specified
+		hostPort = net.JoinHostPort(hostPort, "443")
+	}
+
+	restConfig, err := cm.authInfoResolver.ClientConfigFor(hostPort)
+	if err != nil {
+		return nil, err
+	}
+
+	cfg := rest.CopyConfig(restConfig)
+	cfg.Host = u.Scheme + "://" + u.Host
+	cfg.APIPath = u.Path
+	if !isLocalHost(u) {
+		cfg.NextProtos = []string{"http/1.1"}
+	}
+
+	return complete(cfg)
+}
+
+func isLocalHost(u *url.URL) bool {
+	host := u.Hostname()
+	if strings.EqualFold(host, "localhost") {
+		return true
+	}
+
+	netIP := netutils.ParseIPSloppy(host)
+	if netIP != nil {
+		return netIP.IsLoopback()
+	}
+	return false
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/error.go b/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/error.go
new file mode 100644
index 0000000000..3c5d39ae3c
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/error.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package webhook
+
+import (
+	"fmt"
+
+	apierrors "k8s.io/apimachinery/pkg/api/errors"
+)
+
+// ErrCallingWebhook is returned for transport-layer errors calling webhooks. It
+// represents a failure to talk to the webhook, not the webhook rejecting a
+// request.
+type ErrCallingWebhook struct {
+	WebhookName string
+	Reason      error
+	Status      *apierrors.StatusError
+}
+
+func (e *ErrCallingWebhook) Error() string {
+	if e.Reason != nil {
+		return fmt.Sprintf("failed calling webhook %q: %v", e.WebhookName, e.Reason)
+	}
+	return fmt.Sprintf("failed calling webhook %q; no further details available", e.WebhookName)
+}
+
+// ErrWebhookRejection represents a webhook properly rejecting a request.
+type ErrWebhookRejection struct {
+	Status *apierrors.StatusError
+}
+
+func (e *ErrWebhookRejection) Error() string {
+	return e.Status.Error()
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/gencerts.sh b/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/gencerts.sh
new file mode 100644
index 0000000000..a6426a9591
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/gencerts.sh
@@ -0,0 +1,148 @@
+#!/usr/bin/env bash
+
+# Copyright 2017 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# gencerts.sh generates the certificates for the webhook tests.
+#
+# It is not expected to be run often (there is no go generate rule), and mainly
+# exists for documentation purposes.
+
+CN_BASE="webhook_tests"
+
+cat > intermediate_ca.conf << EOF
+[ v3_ca ]
+subjectKeyIdentifier=hash
+authorityKeyIdentifier=keyid:always,issuer
+basicConstraints = critical,CA:true
+keyUsage = cRLSign, keyCertSign
+EOF
+
+cat > server.conf << EOF
+[req]
+req_extensions = v3_req
+distinguished_name = req_distinguished_name
+[req_distinguished_name]
+[ v3_req ]
+basicConstraints = CA:FALSE
+keyUsage = nonRepudiation, digitalSignature, keyEncipherment
+extendedKeyUsage = clientAuth, serverAuth
+subjectAltName = @alt_names
+[alt_names]
+IP.1 = 127.0.0.1
+DNS.1 = localhost
+EOF
+
+cat > server_no_san.conf << EOF
+[req]
+req_extensions = v3_req
+distinguished_name = req_distinguished_name
+[req_distinguished_name]
+[ v3_req ]
+basicConstraints = CA:FALSE
+keyUsage = nonRepudiation, digitalSignature, keyEncipherment
+extendedKeyUsage = clientAuth, serverAuth
+EOF
+
+cat > client.conf << EOF
+[req]
+req_extensions = v3_req
+distinguished_name = req_distinguished_name
+[req_distinguished_name]
+[ v3_req ]
+basicConstraints = CA:FALSE
+keyUsage = nonRepudiation, digitalSignature, keyEncipherment
+extendedKeyUsage = clientAuth, serverAuth
+subjectAltName = @alt_names
+[alt_names]
+IP.1 = 127.0.0.1
+EOF
+
+# Create a certificate authority
+openssl genrsa -out caKey.pem 2048
+openssl req -x509 -new -nodes -key caKey.pem -days 100000 -out caCert.pem -subj "/CN=${CN_BASE}_ca"
+
+# Create a second certificate authority
+openssl genrsa -out badCAKey.pem 2048
+openssl req -x509 -new -nodes -key badCAKey.pem -days 100000 -out badCACert.pem -subj "/CN=${CN_BASE}_ca"
+
+# Create an intermediate certificate authority
+openssl genrsa -out caKeyInter.pem 2048
+openssl req -new -nodes -key caKeyInter.pem -days 100000 -out caCertInter.csr -subj "/CN=${CN_BASE}_intermediate_ca"
+openssl x509 -req -in caCertInter.csr -CA caCert.pem -CAkey caKey.pem -CAcreateserial -out caCertInter.pem -days 100000 -extensions v3_ca -extfile intermediate_ca.conf
+
+# Create an intermediate certificate authority with sha1 signature
+openssl req -new -nodes -key caKeyInter.pem -days 100000 -out caCertInterSHA1.csr -subj "/CN=${CN_BASE}_intermediate_ca"
+openssl x509 -sha1 -req -in caCertInterSHA1.csr -CA caCert.pem -CAkey caKey.pem -CAcreateserial -out caCertInterSHA1.pem -days 100000 -extensions v3_ca -extfile intermediate_ca.conf
+
+# Create a server certiticate
+openssl genrsa -out serverKey.pem 2048
+openssl req -new -key serverKey.pem -out server.csr -subj "/CN=${CN_BASE}_server" -config server.conf
+openssl x509 -req -in server.csr -CA caCert.pem -CAkey caKey.pem -CAcreateserial -out serverCert.pem -days 100000 -extensions v3_req -extfile server.conf
+
+# Create a server certiticate w/o SAN
+openssl req -new -key serverKey.pem -out serverNoSAN.csr -subj "/CN=localhost" -config server_no_san.conf
+openssl x509 -req -in serverNoSAN.csr -CA caCert.pem -CAkey caKey.pem -CAcreateserial -out serverCertNoSAN.pem -days 100000 -extensions v3_req -extfile server_no_san.conf
+
+# Create a server certiticate with SHA1 signature signed by OK intermediate CA
+openssl req -new -key serverKey.pem -out serverSHA1.csr -subj "/CN=localhost" -config server.conf
+openssl x509 -sha1 -req -in serverSHA1.csr -CA caCertInter.pem -CAkey caKeyInter.pem -CAcreateserial -out sha1ServerCertInter.pem -days 100000 -extensions v3_req -extfile server.conf
+
+# Create a server certiticate signed by SHA1-signed intermediate CA
+openssl req -new -key serverKey.pem -out serverInterSHA1.csr -subj "/CN=localhost" -config server.conf
+openssl x509 -req -in serverInterSHA1.csr -CA caCertInterSHA1.pem -CAkey caKeyInter.pem -CAcreateserial -out serverCertInterSHA1.pem -days 100000 -extensions v3_req -extfile server.conf
+
+# Create a client certiticate
+openssl genrsa -out clientKey.pem 2048
+openssl req -new -key clientKey.pem -out client.csr -subj "/CN=${CN_BASE}_client" -config client.conf
+openssl x509 -req -in client.csr -CA caCert.pem -CAkey caKey.pem -CAcreateserial -out clientCert.pem -days 100000 -extensions v3_req -extfile client.conf
+
+outfile=certs_test.go
+
+cat > $outfile << EOF
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file was generated using openssl by the gencerts.sh script
+// and holds raw certificates for the webhook tests.
+
+package webhook
+EOF
+
+for file in caKey caCert badCAKey badCACert caCertInter caCertInterSHA1 serverKey serverCert serverCertNoSAN clientKey clientCert sha1ServerCertInter serverCertInterSHA1; do
+	data=$(cat ${file}.pem)
+	echo "" >> $outfile
+	echo "var $file = []byte(\`$data\`)" >> $outfile
+done
+
+# Clean up after we're done.
+rm ./*.pem
+rm ./*.csr
+rm ./*.srl
+rm ./*.conf
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/metrics.go b/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/metrics.go
new file mode 100644
index 0000000000..67fc8fabe0
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/metrics.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package webhook
+
+import (
+	"k8s.io/component-base/metrics"
+	"k8s.io/component-base/metrics/legacyregistry"
+)
+
+var x509MissingSANCounter = metrics.NewCounter(
+	&metrics.CounterOpts{
+		Subsystem: "webhooks",
+		Namespace: "apiserver",
+		Name:      "x509_missing_san_total",
+		Help: "Counts the number of requests to servers missing SAN extension " +
+			"in their serving certificate OR the number of connection failures " +
+			"due to the lack of x509 certificate SAN extension missing " +
+			"(either/or, based on the runtime environment)",
+		StabilityLevel: metrics.ALPHA,
+	},
+)
+
+var x509InsecureSHA1Counter = metrics.NewCounter(
+	&metrics.CounterOpts{
+		Subsystem: "webhooks",
+		Namespace: "apiserver",
+		Name:      "x509_insecure_sha1_total",
+		Help: "Counts the number of requests to servers with insecure SHA1 signatures " +
+			"in their serving certificate OR the number of connection failures " +
+			"due to the insecure SHA1 signatures (either/or, based on the runtime environment)",
+		StabilityLevel: metrics.ALPHA,
+	},
+)
+
+func init() {
+	legacyregistry.MustRegister(x509MissingSANCounter)
+	legacyregistry.MustRegister(x509InsecureSHA1Counter)
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/serviceresolver.go b/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/serviceresolver.go
new file mode 100644
index 0000000000..fcd953da3c
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/serviceresolver.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package webhook
+
+import (
+	"errors"
+	"fmt"
+	"net/url"
+)
+
+// ServiceResolver knows how to convert a service reference into an actual location.
+type ServiceResolver interface {
+	ResolveEndpoint(namespace, name string, port int32) (*url.URL, error)
+}
+
+type defaultServiceResolver struct{}
+
+// NewDefaultServiceResolver creates a new default server resolver.
+func NewDefaultServiceResolver() ServiceResolver {
+	return &defaultServiceResolver{}
+}
+
+// ResolveEndpoint constructs a service URL from a given namespace and name
+// note that the name, namespace, and port are required and by default all
+// created addresses use HTTPS scheme.
+// for example:
+//
+//	name=ross namespace=andromeda resolves to https://ross.andromeda.svc:443
+func (sr defaultServiceResolver) ResolveEndpoint(namespace, name string, port int32) (*url.URL, error) {
+	if len(name) == 0 || len(namespace) == 0 || port == 0 {
+		return nil, errors.New("cannot resolve an empty service name or namespace or port")
+	}
+	return &url.URL{Scheme: "https", Host: fmt.Sprintf("%s.%s.svc:%d", name, namespace, port)}, nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go b/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go
new file mode 100644
index 0000000000..9cf258b723
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go
@@ -0,0 +1,115 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package webhook
+
+import (
+	"fmt"
+	"net/url"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/util/validation"
+	"k8s.io/apimachinery/pkg/util/validation/field"
+	"k8s.io/client-go/transport"
+)
+
+func ValidateCABundle(fldPath *field.Path, caBundle []byte) field.ErrorList {
+	var allErrors field.ErrorList
+	_, err := transport.TLSConfigFor(&transport.Config{TLS: transport.TLSConfig{CAData: caBundle}})
+	if err != nil {
+		allErrors = append(allErrors, field.Invalid(fldPath, caBundle, err.Error()))
+	}
+	return allErrors
+}
+
+// ValidateWebhookURL validates webhook's URL.
+func ValidateWebhookURL(fldPath *field.Path, URL string, forceHttps bool) field.ErrorList {
+	var allErrors field.ErrorList
+	const form = "; desired format: https://host[/path]"
+	if u, err := url.Parse(URL); err != nil {
+		allErrors = append(allErrors, field.Required(fldPath, "url must be a valid URL: "+err.Error()+form))
+	} else {
+		if forceHttps && u.Scheme != "https" {
+			allErrors = append(allErrors, field.Invalid(fldPath, u.Scheme, "'https' is the only allowed URL scheme"+form))
+		}
+		if len(u.Host) == 0 {
+			allErrors = append(allErrors, field.Invalid(fldPath, u.Host, "host must be specified"+form))
+		}
+		if u.User != nil {
+			allErrors = append(allErrors, field.Invalid(fldPath, u.User.String(), "user information is not permitted in the URL"))
+		}
+		if len(u.Fragment) != 0 {
+			allErrors = append(allErrors, field.Invalid(fldPath, u.Fragment, "fragments are not permitted in the URL"))
+		}
+		if len(u.RawQuery) != 0 {
+			allErrors = append(allErrors, field.Invalid(fldPath, u.RawQuery, "query parameters are not permitted in the URL"))
+		}
+	}
+	return allErrors
+}
+
+func ValidateWebhookService(fldPath *field.Path, namespace, name string, path *string, port int32) field.ErrorList {
+	var allErrors field.ErrorList
+
+	if len(name) == 0 {
+		allErrors = append(allErrors, field.Required(fldPath.Child("name"), "service name is required"))
+	}
+
+	if len(namespace) == 0 {
+		allErrors = append(allErrors, field.Required(fldPath.Child("namespace"), "service namespace is required"))
+	}
+
+	if errs := validation.IsValidPortNum(int(port)); errs != nil {
+		allErrors = append(allErrors, field.Invalid(fldPath.Child("port"), port, "port is not valid: "+strings.Join(errs, ", ")))
+	}
+
+	if path == nil {
+		return allErrors
+	}
+
+	// TODO: replace below with url.Parse + verifying that host is empty?
+
+	urlPath := *path
+	if urlPath == "/" || len(urlPath) == 0 {
+		return allErrors
+	}
+	if urlPath == "//" {
+		allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, "segment[0] may not be empty"))
+		return allErrors
+	}
+
+	if !strings.HasPrefix(urlPath, "/") {
+		allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, "must start with a '/'"))
+	}
+
+	urlPathToCheck := urlPath[1:]
+	if strings.HasSuffix(urlPathToCheck, "/") {
+		urlPathToCheck = urlPathToCheck[:len(urlPathToCheck)-1]
+	}
+	steps := strings.Split(urlPathToCheck, "/")
+	for i, step := range steps {
+		if len(step) == 0 {
+			allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, fmt.Sprintf("segment[%d] may not be empty", i)))
+			continue
+		}
+		failures := validation.IsDNS1123Subdomain(step)
+		for _, failure := range failures {
+			allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, fmt.Sprintf("segment[%d]: %v", i, failure)))
+		}
+	}
+
+	return allErrors
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go b/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go
new file mode 100644
index 0000000000..b03640ae8d
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go
@@ -0,0 +1,170 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package webhook implements a generic HTTP webhook plugin.
+package webhook
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	apierrors "k8s.io/apimachinery/pkg/api/errors"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer"
+	utilnet "k8s.io/apimachinery/pkg/util/net"
+	"k8s.io/apimachinery/pkg/util/wait"
+	"k8s.io/apiserver/pkg/util/x509metrics"
+	"k8s.io/client-go/rest"
+	"k8s.io/client-go/tools/clientcmd"
+)
+
+// defaultRequestTimeout is set for all webhook request. This is the absolute
+// timeout of the HTTP request, including reading the response body.
+const defaultRequestTimeout = 30 * time.Second
+
+// DefaultRetryBackoffWithInitialDelay returns the default backoff parameters for webhook retry from a given initial delay.
+// Handy for the client that provides a custom initial delay only.
+func DefaultRetryBackoffWithInitialDelay(initialBackoffDelay time.Duration) wait.Backoff {
+	return wait.Backoff{
+		Duration: initialBackoffDelay,
+		Factor:   1.5,
+		Jitter:   0.2,
+		Steps:    5,
+	}
+}
+
+// GenericWebhook defines a generic client for webhooks with commonly used capabilities,
+// such as retry requests.
+type GenericWebhook struct {
+	RestClient   *rest.RESTClient
+	RetryBackoff wait.Backoff
+	ShouldRetry  func(error) bool
+}
+
+// DefaultShouldRetry is a default implementation for the GenericWebhook ShouldRetry function property.
+// If the error reason is one of: networking (connection reset) or http (InternalServerError (500), GatewayTimeout (504), TooManyRequests (429)),
+// or apierrors.SuggestsClientDelay() returns true, then the function advises a retry.
+// Otherwise it returns false for an immediate fail.
+func DefaultShouldRetry(err error) bool {
+	// these errors indicate a transient error that should be retried.
+	if utilnet.IsConnectionReset(err) || utilnet.IsHTTP2ConnectionLost(err) || apierrors.IsInternalError(err) || apierrors.IsTimeout(err) || apierrors.IsTooManyRequests(err) {
+		return true
+	}
+	// if the error sends the Retry-After header, we respect it as an explicit confirmation we should retry.
+	if _, shouldRetry := apierrors.SuggestsClientDelay(err); shouldRetry {
+		return true
+	}
+	return false
+}
+
+// NewGenericWebhook creates a new GenericWebhook from the provided rest.Config.
+func NewGenericWebhook(scheme *runtime.Scheme, codecFactory serializer.CodecFactory, config *rest.Config, groupVersions []schema.GroupVersion, retryBackoff wait.Backoff) (*GenericWebhook, error) {
+	for _, groupVersion := range groupVersions {
+		if !scheme.IsVersionRegistered(groupVersion) {
+			return nil, fmt.Errorf("webhook plugin requires enabling extension resource: %s", groupVersion)
+		}
+	}
+
+	clientConfig := rest.CopyConfig(config)
+
+	codec := codecFactory.LegacyCodec(groupVersions...)
+	clientConfig.ContentConfig.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec})
+
+	clientConfig.Wrap(x509metrics.NewDeprecatedCertificateRoundTripperWrapperConstructor(
+		x509MissingSANCounter,
+		x509InsecureSHA1Counter,
+	))
+
+	restClient, err := rest.UnversionedRESTClientFor(clientConfig)
+	if err != nil {
+		return nil, err
+	}
+
+	return &GenericWebhook{restClient, retryBackoff, DefaultShouldRetry}, nil
+}
+
+// WithExponentialBackoff will retry webhookFn() as specified by the given backoff parameters with exponentially
+// increasing backoff when it returns an error for which this GenericWebhook's ShouldRetry function returns true,
+// confirming it to be retriable. If no ShouldRetry has been defined for the webhook,
+// then the default one is used (DefaultShouldRetry).
+func (g *GenericWebhook) WithExponentialBackoff(ctx context.Context, webhookFn func() rest.Result) rest.Result {
+	var result rest.Result
+	shouldRetry := g.ShouldRetry
+	if shouldRetry == nil {
+		shouldRetry = DefaultShouldRetry
+	}
+	WithExponentialBackoff(ctx, g.RetryBackoff, func() error {
+		result = webhookFn()
+		return result.Error()
+	}, shouldRetry)
+	return result
+}
+
+// WithExponentialBackoff will retry webhookFn up to 5 times with exponentially increasing backoff when
+// it returns an error for which shouldRetry returns true, confirming it to be retriable.
+func WithExponentialBackoff(ctx context.Context, retryBackoff wait.Backoff, webhookFn func() error, shouldRetry func(error) bool) error {
+	// having a webhook error allows us to track the last actual webhook error for requests that
+	// are later cancelled or time out.
+	var webhookErr error
+	err := wait.ExponentialBackoffWithContext(ctx, retryBackoff, func(_ context.Context) (bool, error) {
+		webhookErr = webhookFn()
+		if shouldRetry(webhookErr) {
+			return false, nil
+		}
+		if webhookErr != nil {
+			return false, webhookErr
+		}
+		return true, nil
+	})
+
+	switch {
+	// we check for webhookErr first, if webhookErr is set it's the most important error to return.
+	case webhookErr != nil:
+		return webhookErr
+	case err != nil:
+		return fmt.Errorf("webhook call failed: %s", err.Error())
+	default:
+		return nil
+	}
+}
+
+func LoadKubeconfig(kubeConfigFile string, customDial utilnet.DialFunc) (*rest.Config, error) {
+	loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
+	loadingRules.ExplicitPath = kubeConfigFile
+	loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{})
+
+	clientConfig, err := loader.ClientConfig()
+	if err != nil {
+		return nil, err
+	}
+
+	clientConfig.Dial = customDial
+
+	// Kubeconfigs can't set a timeout, this can only be set through a command line flag.
+	//
+	// https://github.com/kubernetes/client-go/blob/master/tools/clientcmd/overrides.go
+	//
+	// Set this to something reasonable so request to webhooks don't hang forever.
+	clientConfig.Timeout = defaultRequestTimeout
+
+	// Avoid client-side rate limiting talking to the webhook backend.
+	// Rate limiting should happen when deciding how many requests to serve.
+	clientConfig.QPS = -1
+
+	return clientConfig, nil
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/util/x509metrics/server_cert_deprecations.go b/hack/tools/vendor/k8s.io/apiserver/pkg/util/x509metrics/server_cert_deprecations.go
new file mode 100644
index 0000000000..464510ea8f
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/util/x509metrics/server_cert_deprecations.go
@@ -0,0 +1,225 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package x509metrics
+
+import (
+	"crypto/x509"
+	"errors"
+	"fmt"
+	"net/http"
+	"reflect"
+	"strings"
+
+	utilnet "k8s.io/apimachinery/pkg/util/net"
+	"k8s.io/apiserver/pkg/audit"
+	"k8s.io/component-base/metrics"
+	"k8s.io/klog/v2"
+)
+
+var _ utilnet.RoundTripperWrapper = &x509DeprecatedCertificateMetricsRTWrapper{}
+
+type x509DeprecatedCertificateMetricsRTWrapper struct {
+	rt http.RoundTripper
+
+	checkers []deprecatedCertificateAttributeChecker
+}
+
+type deprecatedCertificateAttributeChecker interface {
+	// CheckRoundTripError returns true if the err is an error specific
+	// to this deprecated certificate attribute
+	CheckRoundTripError(err error) bool
+	// CheckPeerCertificates returns true if the deprecated attribute/value pair
+	// was found in a given certificate in the http.Response.TLS.PeerCertificates bundle
+	CheckPeerCertificates(certs []*x509.Certificate) bool
+	// IncreaseCounter increases the counter internal to this interface
+	// Use the req to derive and log information useful for troubleshooting the certificate issue
+	IncreaseMetricsCounter(req *http.Request)
+}
+
+// counterRaiser is a helper structure to include in certificate deprecation checkers.
+// It implements the IncreaseMetricsCounter() method so that, when included in the checker,
+// it does not have to be reimplemented.
+type counterRaiser struct {
+	counter *metrics.Counter
+	// programmatic id used in log and audit annotations prefixes
+	id string
+	// human readable explanation
+	reason string
+}
+
+func (c *counterRaiser) IncreaseMetricsCounter(req *http.Request) {
+	if req != nil && req.URL != nil {
+		if hostname := req.URL.Hostname(); len(hostname) > 0 {
+			prefix := fmt.Sprintf("%s.invalid-cert.kubernetes.io", c.id)
+			klog.Infof("%s: invalid certificate detected connecting to %q: %s", prefix, hostname, c.reason)
+			audit.AddAuditAnnotation(req.Context(), prefix+"/"+hostname, c.reason)
+		}
+	}
+	c.counter.Inc()
+}
+
+// NewDeprecatedCertificateRoundTripperWrapperConstructor returns a RoundTripper wrapper that's usable within ClientConfig.Wrap.
+//
+// It increases the `missingSAN` counter whenever:
+//  1. we get a x509.HostnameError with string `x509: certificate relies on legacy Common Name field`
+//     which indicates an error caused by the deprecation of Common Name field when veryfing remote
+//     hostname
+//  2. the server certificate in response contains no SAN. This indicates that this binary run
+//     with the GODEBUG=x509ignoreCN=0 in env
+//
+// It increases the `sha1` counter whenever:
+//  1. we get a x509.InsecureAlgorithmError with string `SHA1`
+//     which indicates an error caused by an insecure SHA1 signature
+//  2. the server certificate in response contains a SHA1WithRSA or ECDSAWithSHA1 signature.
+//     This indicates that this binary run with the GODEBUG=x509sha1=1 in env
+func NewDeprecatedCertificateRoundTripperWrapperConstructor(missingSAN, sha1 *metrics.Counter) func(rt http.RoundTripper) http.RoundTripper {
+	return func(rt http.RoundTripper) http.RoundTripper {
+		return &x509DeprecatedCertificateMetricsRTWrapper{
+			rt: rt,
+			checkers: []deprecatedCertificateAttributeChecker{
+				NewSANDeprecatedChecker(missingSAN),
+				NewSHA1SignatureDeprecatedChecker(sha1),
+			},
+		}
+	}
+}
+
+func (w *x509DeprecatedCertificateMetricsRTWrapper) RoundTrip(req *http.Request) (*http.Response, error) {
+	resp, err := w.rt.RoundTrip(req)
+
+	if err != nil {
+		for _, checker := range w.checkers {
+			if checker.CheckRoundTripError(err) {
+				checker.IncreaseMetricsCounter(req)
+			}
+		}
+	} else if resp != nil {
+		if resp.TLS != nil && len(resp.TLS.PeerCertificates) > 0 {
+			for _, checker := range w.checkers {
+				if checker.CheckPeerCertificates(resp.TLS.PeerCertificates) {
+					checker.IncreaseMetricsCounter(req)
+				}
+			}
+		}
+	}
+
+	return resp, err
+}
+
+func (w *x509DeprecatedCertificateMetricsRTWrapper) WrappedRoundTripper() http.RoundTripper {
+	return w.rt
+}
+
+var _ deprecatedCertificateAttributeChecker = &missingSANChecker{}
+
+type missingSANChecker struct {
+	counterRaiser
+}
+
+func NewSANDeprecatedChecker(counter *metrics.Counter) *missingSANChecker {
+	return &missingSANChecker{
+		counterRaiser: counterRaiser{
+			counter: counter,
+			id:      "missing-san",
+			reason:  "relies on a legacy Common Name field instead of the SAN extension for subject validation",
+		},
+	}
+}
+
+// CheckRoundTripError returns true when we're running w/o GODEBUG=x509ignoreCN=0
+// and the client reports a HostnameError about the legacy CN fields
+func (c *missingSANChecker) CheckRoundTripError(err error) bool {
+	if err != nil && errors.As(err, &x509.HostnameError{}) && strings.Contains(err.Error(), "x509: certificate relies on legacy Common Name field") {
+		// increase the count of registered failures due to Go 1.15 x509 cert Common Name deprecation
+		return true
+	}
+
+	return false
+}
+
+// CheckPeerCertificates returns true when the server response contains
+// a leaf certificate w/o the SAN extension
+func (c *missingSANChecker) CheckPeerCertificates(peerCertificates []*x509.Certificate) bool {
+	if len(peerCertificates) > 0 {
+		if serverCert := peerCertificates[0]; !hasSAN(serverCert) {
+			return true
+		}
+	}
+
+	return false
+}
+
+func hasSAN(c *x509.Certificate) bool {
+	sanOID := []int{2, 5, 29, 17}
+
+	for _, e := range c.Extensions {
+		if e.Id.Equal(sanOID) {
+			return true
+		}
+	}
+	return false
+}
+
+type sha1SignatureChecker struct {
+	*counterRaiser
+}
+
+func NewSHA1SignatureDeprecatedChecker(counter *metrics.Counter) *sha1SignatureChecker {
+	return &sha1SignatureChecker{
+		counterRaiser: &counterRaiser{
+			counter: counter,
+			id:      "insecure-sha1",
+			reason:  "uses an insecure SHA-1 signature",
+		},
+	}
+}
+
+// CheckRoundTripError returns true when we're running w/o GODEBUG=x509sha1=1
+// and the client reports an InsecureAlgorithmError about a SHA1 signature
+func (c *sha1SignatureChecker) CheckRoundTripError(err error) bool {
+	var unknownAuthorityError x509.UnknownAuthorityError
+	if err == nil {
+		return false
+	}
+	if !errors.As(err, &unknownAuthorityError) {
+		return false
+	}
+
+	errMsg := err.Error()
+	if strIdx := strings.Index(errMsg, "x509: cannot verify signature: insecure algorithm"); strIdx != -1 && strings.Contains(errMsg[strIdx:], "SHA1") {
+		// increase the count of registered failures due to Go 1.18 x509 sha1 signature deprecation
+		return true
+	}
+
+	return false
+}
+
+// CheckPeerCertificates returns true when the server response contains
+// a non-root non-self-signed  certificate with a deprecated SHA1 signature
+func (c *sha1SignatureChecker) CheckPeerCertificates(peerCertificates []*x509.Certificate) bool {
+	// check all received non-self-signed certificates for deprecated signing algorithms
+	for _, cert := range peerCertificates {
+		if cert.SignatureAlgorithm == x509.SHA1WithRSA || cert.SignatureAlgorithm == x509.ECDSAWithSHA1 {
+			// the SHA-1 deprecation does not involve self-signed root certificates
+			if !reflect.DeepEqual(cert.Issuer, cert.Subject) {
+				return true
+			}
+		}
+	}
+
+	return false
+}
diff --git a/hack/tools/vendor/k8s.io/apiserver/pkg/warning/context.go b/hack/tools/vendor/k8s.io/apiserver/pkg/warning/context.go
new file mode 100644
index 0000000000..2009225458
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/apiserver/pkg/warning/context.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package warning
+
+import (
+	"context"
+)
+
+// The key type is unexported to prevent collisions
+type key int
+
+const (
+	// warningRecorderKey is the context key for the warning recorder.
+	warningRecorderKey key = iota
+)
+
+// Recorder provides a method for recording warnings
+type Recorder interface {
+	// AddWarning adds the specified warning to the response.
+	// agent must be valid UTF-8, and must not contain spaces, quotes, backslashes, or control characters.
+	// text must be valid UTF-8, and must not contain control characters.
+	AddWarning(agent, text string)
+}
+
+// WithWarningRecorder returns a new context that wraps the provided context and contains the provided Recorder implementation.
+// The returned context can be passed to AddWarning().
+func WithWarningRecorder(ctx context.Context, recorder Recorder) context.Context {
+	return context.WithValue(ctx, warningRecorderKey, recorder)
+}
+
+func warningRecorderFrom(ctx context.Context) (Recorder, bool) {
+	recorder, ok := ctx.Value(warningRecorderKey).(Recorder)
+	return recorder, ok
+}
+
+// AddWarning records a warning for the specified agent and text to the Recorder added to the provided context using WithWarningRecorder().
+// If no Recorder exists in the provided context, this is a no-op.
+// agent must be valid UTF-8, and must not contain spaces, quotes, backslashes, or control characters.
+// text must be valid UTF-8, and must not contain control characters.
+func AddWarning(ctx context.Context, agent string, text string) {
+	recorder, ok := warningRecorderFrom(ctx)
+	if !ok {
+		return
+	}
+	recorder.AddWarning(agent, text)
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/LICENSE b/hack/tools/vendor/k8s.io/client-go/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/hack/tools/vendor/k8s.io/client-go/features/envvar.go b/hack/tools/vendor/k8s.io/client-go/features/envvar.go
new file mode 100644
index 0000000000..8c3f887dc4
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/features/envvar.go
@@ -0,0 +1,188 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package features
+
+import (
+	"fmt"
+	"os"
+	"strconv"
+	"sync"
+	"sync/atomic"
+
+	"k8s.io/apimachinery/pkg/util/naming"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/klog/v2"
+)
+
+// internalPackages are packages that ignored when creating a name for featureGates. These packages are in the common
+// call chains, so they'd be unhelpful as names.
+var internalPackages = []string{"k8s.io/client-go/features/envvar.go"}
+
+var _ Gates = &envVarFeatureGates{}
+
+// newEnvVarFeatureGates creates a feature gate that allows for registration
+// of features and checking if the features are enabled.
+//
+// On the first call to Enabled, the effective state of all known features is loaded from
+// environment variables. The environment variable read for a given feature is formed by
+// concatenating the prefix "KUBE_FEATURE_" with the feature's name.
+//
+// For example, if you have a feature named "MyFeature"
+// setting an environmental variable "KUBE_FEATURE_MyFeature"
+// will allow you to configure the state of that feature.
+//
+// Please note that environmental variables can only be set to the boolean value.
+// Incorrect values will be ignored and logged.
+//
+// Features can also be set directly via the Set method.
+// In that case, these features take precedence over
+// features set via environmental variables.
+func newEnvVarFeatureGates(features map[Feature]FeatureSpec) *envVarFeatureGates {
+	known := map[Feature]FeatureSpec{}
+	for name, spec := range features {
+		known[name] = spec
+	}
+
+	fg := &envVarFeatureGates{
+		callSiteName: naming.GetNameFromCallsite(internalPackages...),
+		known:        known,
+	}
+	fg.enabledViaEnvVar.Store(map[Feature]bool{})
+	fg.enabledViaSetMethod = map[Feature]bool{}
+
+	return fg
+}
+
+// envVarFeatureGates implements Gates and allows for feature registration.
+type envVarFeatureGates struct {
+	// callSiteName holds the name of the file
+	// that created this instance
+	callSiteName string
+
+	// readEnvVarsOnce guards reading environmental variables
+	readEnvVarsOnce sync.Once
+
+	// known holds known feature gates
+	known map[Feature]FeatureSpec
+
+	// enabledViaEnvVar holds a map[Feature]bool
+	// with values explicitly set via env var
+	enabledViaEnvVar atomic.Value
+
+	// lockEnabledViaSetMethod protects enabledViaSetMethod
+	lockEnabledViaSetMethod sync.RWMutex
+
+	// enabledViaSetMethod holds values explicitly set
+	// via Set method, features stored in this map take
+	// precedence over features stored in enabledViaEnvVar
+	enabledViaSetMethod map[Feature]bool
+
+	// readEnvVars holds the boolean value which
+	// indicates whether readEnvVarsOnce has been called.
+	readEnvVars atomic.Bool
+}
+
+// Enabled returns true if the key is enabled. If the key is not known, this call will panic.
+func (f *envVarFeatureGates) Enabled(key Feature) bool {
+	if v, ok := f.wasFeatureEnabledViaSetMethod(key); ok {
+		// ensue that the state of all known features
+		// is loaded from environment variables
+		// on the first call to Enabled method.
+		if !f.hasAlreadyReadEnvVar() {
+			_ = f.getEnabledMapFromEnvVar()
+		}
+		return v
+	}
+	if v, ok := f.getEnabledMapFromEnvVar()[key]; ok {
+		return v
+	}
+	if v, ok := f.known[key]; ok {
+		return v.Default
+	}
+	panic(fmt.Errorf("feature %q is not registered in FeatureGates %q", key, f.callSiteName))
+}
+
+// Set sets the given feature to the given value.
+//
+// Features set via this method take precedence over
+// the features set via environment variables.
+func (f *envVarFeatureGates) Set(featureName Feature, featureValue bool) error {
+	feature, ok := f.known[featureName]
+	if !ok {
+		return fmt.Errorf("feature %q is not registered in FeatureGates %q", featureName, f.callSiteName)
+	}
+	if feature.LockToDefault && feature.Default != featureValue {
+		return fmt.Errorf("cannot set feature gate %q to %v, feature is locked to %v", featureName, featureValue, feature.Default)
+	}
+
+	f.lockEnabledViaSetMethod.Lock()
+	defer f.lockEnabledViaSetMethod.Unlock()
+	f.enabledViaSetMethod[featureName] = featureValue
+
+	return nil
+}
+
+// getEnabledMapFromEnvVar will fill the enabled map on the first call.
+// This is the only time a known feature can be set to a value
+// read from the corresponding environmental variable.
+func (f *envVarFeatureGates) getEnabledMapFromEnvVar() map[Feature]bool {
+	f.readEnvVarsOnce.Do(func() {
+		featureGatesState := map[Feature]bool{}
+		for feature, featureSpec := range f.known {
+			featureState, featureStateSet := os.LookupEnv(fmt.Sprintf("KUBE_FEATURE_%s", feature))
+			if !featureStateSet {
+				continue
+			}
+			boolVal, boolErr := strconv.ParseBool(featureState)
+			switch {
+			case boolErr != nil:
+				utilruntime.HandleError(fmt.Errorf("cannot set feature gate %q to %q, due to %v", feature, featureState, boolErr))
+			case featureSpec.LockToDefault:
+				if boolVal != featureSpec.Default {
+					utilruntime.HandleError(fmt.Errorf("cannot set feature gate %q to %q, feature is locked to %v", feature, featureState, featureSpec.Default))
+					break
+				}
+				featureGatesState[feature] = featureSpec.Default
+			default:
+				featureGatesState[feature] = boolVal
+			}
+		}
+		f.enabledViaEnvVar.Store(featureGatesState)
+		f.readEnvVars.Store(true)
+
+		for feature, featureSpec := range f.known {
+			if featureState, ok := featureGatesState[feature]; ok {
+				klog.V(1).InfoS("Feature gate updated state", "feature", feature, "enabled", featureState)
+				continue
+			}
+			klog.V(1).InfoS("Feature gate default state", "feature", feature, "enabled", featureSpec.Default)
+		}
+	})
+	return f.enabledViaEnvVar.Load().(map[Feature]bool)
+}
+
+func (f *envVarFeatureGates) wasFeatureEnabledViaSetMethod(key Feature) (bool, bool) {
+	f.lockEnabledViaSetMethod.RLock()
+	defer f.lockEnabledViaSetMethod.RUnlock()
+
+	value, found := f.enabledViaSetMethod[key]
+	return value, found
+}
+
+func (f *envVarFeatureGates) hasAlreadyReadEnvVar() bool {
+	return f.readEnvVars.Load()
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/features/features.go b/hack/tools/vendor/k8s.io/client-go/features/features.go
new file mode 100644
index 0000000000..5ccdcc55f6
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/features/features.go
@@ -0,0 +1,143 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package features
+
+import (
+	"errors"
+	"sync/atomic"
+
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// NOTE: types Feature, FeatureSpec, prerelease (and its values)
+// were duplicated from the component-base repository
+//
+// for more information please refer to https://docs.google.com/document/d/1g9BGCRw-7ucUxO6OtCWbb3lfzUGA_uU9178wLdXAIfs
+
+const (
+	// Values for PreRelease.
+	Alpha = prerelease("ALPHA")
+	Beta  = prerelease("BETA")
+	GA    = prerelease("")
+
+	// Deprecated
+	Deprecated = prerelease("DEPRECATED")
+)
+
+type prerelease string
+
+type Feature string
+
+type FeatureSpec struct {
+	// Default is the default enablement state for the feature
+	Default bool
+	// LockToDefault indicates that the feature is locked to its default and cannot be changed
+	LockToDefault bool
+	// PreRelease indicates the maturity level of the feature
+	PreRelease prerelease
+}
+
+// Gates indicates whether a given feature is enabled or not.
+type Gates interface {
+	// Enabled returns true if the key is enabled.
+	Enabled(key Feature) bool
+}
+
+// Registry represents an external feature gates registry.
+type Registry interface {
+	// Add adds existing feature gates to the provided registry.
+	//
+	// As of today, this method is used by AddFeaturesToExistingFeatureGates and
+	// ReplaceFeatureGates to take control of the features exposed by this library.
+	Add(map[Feature]FeatureSpec) error
+}
+
+// FeatureGates returns the feature gates exposed by this library.
+//
+// By default, only the default features gates will be returned.
+// The default implementation allows controlling the features
+// via environmental variables.
+// For example, if you have a feature named "MyFeature"
+// setting an environmental variable "KUBE_FEATURE_MyFeature"
+// will allow you to configure the state of that feature.
+//
+// Please note that the actual set of the feature gates
+// might be overwritten by calling ReplaceFeatureGates method.
+func FeatureGates() Gates {
+	return featureGates.Load().(*featureGatesWrapper).Gates
+}
+
+// AddFeaturesToExistingFeatureGates adds the default feature gates to the provided registry.
+// Usually this function is combined with ReplaceFeatureGates to take control of the
+// features exposed by this library.
+func AddFeaturesToExistingFeatureGates(registry Registry) error {
+	return registry.Add(defaultKubernetesFeatureGates)
+}
+
+// ReplaceFeatureGates overwrites the default implementation of the feature gates
+// used by this library.
+//
+// Useful for binaries that would like to have full control of the features
+// exposed by this library, such as allowing consumers of a binary
+// to interact with the features via a command line flag.
+//
+// For example:
+//
+//	// first, register client-go's features to your registry.
+//	clientgofeaturegate.AddFeaturesToExistingFeatureGates(utilfeature.DefaultMutableFeatureGate)
+//	// then replace client-go's feature gates implementation with your implementation
+//	clientgofeaturegate.ReplaceFeatureGates(utilfeature.DefaultMutableFeatureGate)
+func ReplaceFeatureGates(newFeatureGates Gates) {
+	if replaceFeatureGatesWithWarningIndicator(newFeatureGates) {
+		utilruntime.HandleError(errors.New("the default feature gates implementation has already been used and now it's being overwritten. This might lead to unexpected behaviour. Check your initialization order"))
+	}
+}
+
+func replaceFeatureGatesWithWarningIndicator(newFeatureGates Gates) bool {
+	shouldProduceWarning := false
+
+	if defaultFeatureGates, ok := FeatureGates().(*envVarFeatureGates); ok {
+		if defaultFeatureGates.hasAlreadyReadEnvVar() {
+			shouldProduceWarning = true
+		}
+	}
+	wrappedFeatureGates := &featureGatesWrapper{newFeatureGates}
+	featureGates.Store(wrappedFeatureGates)
+
+	return shouldProduceWarning
+}
+
+func init() {
+	envVarGates := newEnvVarFeatureGates(defaultKubernetesFeatureGates)
+
+	wrappedFeatureGates := &featureGatesWrapper{envVarGates}
+	featureGates.Store(wrappedFeatureGates)
+}
+
+// featureGatesWrapper a thin wrapper to satisfy featureGates variable (atomic.Value).
+// That is, all calls to Store for a given Value must use values of the same concrete type.
+type featureGatesWrapper struct {
+	Gates
+}
+
+var (
+	// featureGates is a shared global FeatureGates.
+	//
+	// Top-level commands/options setup that needs to modify this feature gates
+	// should use AddFeaturesToExistingFeatureGates followed by ReplaceFeatureGates.
+	featureGates = &atomic.Value{}
+)
diff --git a/hack/tools/vendor/k8s.io/client-go/features/known_features.go b/hack/tools/vendor/k8s.io/client-go/features/known_features.go
new file mode 100644
index 0000000000..a74f6a8333
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/features/known_features.go
@@ -0,0 +1,77 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package features
+
+const (
+	// Every feature gate should add method here following this template:
+	//
+	// // owner: @username
+	// // alpha: v1.4
+	// MyFeature featuregate.Feature = "MyFeature"
+	//
+	// Feature gates should be listed in alphabetical, case-sensitive
+	// (upper before any lower case character) order. This reduces the risk
+	// of code conflicts because changes are more likely to be scattered
+	// across the file.
+
+	// owner: @benluddy
+	// kep: https://kep.k8s.io/4222
+	// alpha: 1.32
+	//
+	// If disabled, clients configured to accept "application/cbor" will instead accept
+	// "application/json" with the same relative preference, and clients configured to write
+	// "application/cbor" or "application/apply-patch+cbor" will instead write
+	// "application/json" or "application/apply-patch+yaml", respectively.
+	ClientsAllowCBOR Feature = "ClientsAllowCBOR"
+
+	// owner: @benluddy
+	// kep: https://kep.k8s.io/4222
+	// alpha: 1.32
+	//
+	// If enabled, and only if ClientsAllowCBOR is also enabled, the default request content
+	// type (if not explicitly configured) and the dynamic client's request content type both
+	// become "application/cbor" instead of "application/json". The default content type for
+	// apply patch requests becomes "application/apply-patch+cbor" instead of
+	// "application/apply-patch+yaml".
+	ClientsPreferCBOR Feature = "ClientsPreferCBOR"
+
+	// owner: @nilekhc
+	// alpha: v1.30
+	InformerResourceVersion Feature = "InformerResourceVersion"
+
+	// owner: @p0lyn0mial
+	// beta: v1.30
+	//
+	// Allow the client to get a stream of individual items instead of chunking from the server.
+	//
+	// NOTE:
+	//  The feature is disabled in Beta by default because
+	//  it will only be turned on for selected control plane component(s).
+	WatchListClient Feature = "WatchListClient"
+)
+
+// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys.
+//
+// To add a new feature, define a key for it above and add it here.
+// After registering with the binary, the features are, by default, controllable using environment variables.
+// For more details, please see envVarFeatureGates implementation.
+var defaultKubernetesFeatureGates = map[Feature]FeatureSpec{
+	ClientsAllowCBOR:        {Default: false, PreRelease: Alpha},
+	ClientsPreferCBOR:       {Default: false, PreRelease: Alpha},
+	InformerResourceVersion: {Default: false, PreRelease: Alpha},
+	WatchListClient:         {Default: false, PreRelease: Beta},
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS
new file mode 100644
index 0000000000..4dfbb98aec
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS
@@ -0,0 +1,8 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+# approval on api packages bubbles to api-approvers
+reviewers:
+  - sig-auth-authenticators-approvers
+  - sig-auth-authenticators-reviewers
+labels:
+  - sig/auth
diff --git a/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go
new file mode 100644
index 0000000000..b99459757e
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +groupName=client.authentication.k8s.io
+
+package clientauthentication // import "k8s.io/client-go/pkg/apis/clientauthentication"
diff --git a/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/install/install.go b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/install/install.go
new file mode 100644
index 0000000000..ee5c338e38
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/install/install.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package install installs the experimental API group, making it available as
+// an option to all of the API encoding/decoding machinery.
+package install
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/client-go/pkg/apis/clientauthentication"
+	"k8s.io/client-go/pkg/apis/clientauthentication/v1"
+	"k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
+)
+
+// Install registers the API group and adds types to a scheme
+func Install(scheme *runtime.Scheme) {
+	utilruntime.Must(clientauthentication.AddToScheme(scheme))
+	utilruntime.Must(v1.AddToScheme(scheme))
+	utilruntime.Must(v1beta1.AddToScheme(scheme))
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go
new file mode 100644
index 0000000000..e4fbc3ea9d
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientauthentication
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "client.authentication.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+	return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+	AddToScheme   = SchemeBuilder.AddToScheme
+)
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&ExecCredential{},
+	)
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go
new file mode 100644
index 0000000000..dae1bf1b07
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go
@@ -0,0 +1,129 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientauthentication
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ExecCredential is used by exec-based plugins to communicate credentials to
+// HTTP transports.
+type ExecCredential struct {
+	metav1.TypeMeta
+
+	// Spec holds information passed to the plugin by the transport. This contains
+	// request and runtime specific information, such as if the session is interactive.
+	Spec ExecCredentialSpec
+
+	// Status is filled in by the plugin and holds the credentials that the transport
+	// should use to contact the API.
+	// +optional
+	Status *ExecCredentialStatus
+}
+
+// ExecCredentialSpec holds request and runtime specific information provided by
+// the transport.
+type ExecCredentialSpec struct {
+	// Interactive is true when the transport detects the command is being called from an
+	// interactive prompt, i.e., when stdin has been passed to this exec plugin.
+	// +optional
+	Interactive bool
+
+	// Cluster contains information to allow an exec plugin to communicate with the
+	// kubernetes cluster being authenticated to. Note that Cluster is non-nil only
+	// when provideClusterInfo is set to true in the exec provider config (i.e.,
+	// ExecConfig.ProvideClusterInfo).
+	// +optional
+	Cluster *Cluster
+}
+
+// ExecCredentialStatus holds credentials for the transport to use.
+type ExecCredentialStatus struct {
+	// ExpirationTimestamp indicates a time when the provided credentials expire.
+	// +optional
+	ExpirationTimestamp *metav1.Time
+	// Token is a bearer token used by the client for request authentication.
+	// +optional
+	Token string `datapolicy:"token"`
+	// PEM-encoded client TLS certificate.
+	// +optional
+	ClientCertificateData string
+	// PEM-encoded client TLS private key.
+	// +optional
+	ClientKeyData string `datapolicy:"secret-key"`
+}
+
+// Cluster contains information to allow an exec plugin to communicate
+// with the kubernetes cluster being authenticated to.
+//
+// To ensure that this struct contains everything someone would need to communicate
+// with a kubernetes cluster (just like they would via a kubeconfig), the fields
+// should shadow "k8s.io/client-go/tools/clientcmd/api/v1".Cluster, with the exception
+// of CertificateAuthority, since CA data will always be passed to the plugin as bytes.
+type Cluster struct {
+	// Server is the address of the kubernetes cluster (https://hostname:port).
+	Server string
+	// TLSServerName is passed to the server for SNI and is used in the client to
+	// check server certificates against. If ServerName is empty, the hostname
+	// used to contact the server is used.
+	// +optional
+	TLSServerName string
+	// InsecureSkipTLSVerify skips the validity check for the server's certificate.
+	// This will make your HTTPS connections insecure.
+	// +optional
+	InsecureSkipTLSVerify bool
+	// CAData contains PEM-encoded certificate authority certificates.
+	// If empty, system roots should be used.
+	// +listType=atomic
+	// +optional
+	CertificateAuthorityData []byte
+	// ProxyURL is the URL to the proxy to be used for all requests to this
+	// cluster.
+	// +optional
+	ProxyURL string
+	// DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful
+	// to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on
+	// compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296.
+	// +optional
+	DisableCompression bool
+	// Config holds additional config data that is specific to the exec
+	// plugin with regards to the cluster being authenticated to.
+	//
+	// This data is sourced from the clientcmd Cluster object's
+	// extensions[client.authentication.k8s.io/exec] field:
+	//
+	// clusters:
+	// - name: my-cluster
+	//   cluster:
+	//     ...
+	//     extensions:
+	//     - name: client.authentication.k8s.io/exec  # reserved extension name for per cluster exec config
+	//       extension:
+	//         audience: 06e3fbd18de8  # arbitrary config
+	//
+	// In some environments, the user config may be exactly the same across many clusters
+	// (i.e. call this exec plugin) minus some details that are specific to each cluster
+	// such as the audience.  This field allows the per cluster config to be directly
+	// specified with the cluster info.  Using this field to store secret data is not
+	// recommended as one of the prime benefits of exec plugins is that no secrets need
+	// to be stored directly in the kubeconfig.
+	// +optional
+	Config runtime.Object
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go
new file mode 100644
index 0000000000..94ca35c2c9
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:conversion-gen=k8s.io/client-go/pkg/apis/clientauthentication
+// +k8s:openapi-gen=true
+// +k8s:defaulter-gen=TypeMeta
+
+// +groupName=client.authentication.k8s.io
+
+package v1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1"
diff --git a/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/register.go b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/register.go
new file mode 100644
index 0000000000..bfc719a42a
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/register.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "client.authentication.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	SchemeBuilder      runtime.SchemeBuilder
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+	// We only register manually written functions here. The registration of the
+	// generated functions takes place in the generated files. The separation
+	// makes the code compile even when the generated files are missing.
+	localSchemeBuilder.Register(addKnownTypes)
+}
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&ExecCredential{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/types.go b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/types.go
new file mode 100644
index 0000000000..b9082e8145
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/types.go
@@ -0,0 +1,127 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ExecCredential is used by exec-based plugins to communicate credentials to
+// HTTP transports.
+type ExecCredential struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Spec holds information passed to the plugin by the transport.
+	Spec ExecCredentialSpec `json:"spec,omitempty"`
+
+	// Status is filled in by the plugin and holds the credentials that the transport
+	// should use to contact the API.
+	// +optional
+	Status *ExecCredentialStatus `json:"status,omitempty"`
+}
+
+// ExecCredentialSpec holds request and runtime specific information provided by
+// the transport.
+type ExecCredentialSpec struct {
+	// Cluster contains information to allow an exec plugin to communicate with the
+	// kubernetes cluster being authenticated to. Note that Cluster is non-nil only
+	// when provideClusterInfo is set to true in the exec provider config (i.e.,
+	// ExecConfig.ProvideClusterInfo).
+	// +optional
+	Cluster *Cluster `json:"cluster,omitempty"`
+
+	// Interactive declares whether stdin has been passed to this exec plugin.
+	Interactive bool `json:"interactive"`
+}
+
+// ExecCredentialStatus holds credentials for the transport to use.
+//
+// Token and ClientKeyData are sensitive fields. This data should only be
+// transmitted in-memory between client and exec plugin process. Exec plugin
+// itself should at least be protected via file permissions.
+type ExecCredentialStatus struct {
+	// ExpirationTimestamp indicates a time when the provided credentials expire.
+	// +optional
+	ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"`
+	// Token is a bearer token used by the client for request authentication.
+	Token string `json:"token,omitempty" datapolicy:"token"`
+	// PEM-encoded client TLS certificates (including intermediates, if any).
+	ClientCertificateData string `json:"clientCertificateData,omitempty"`
+	// PEM-encoded private key for the above certificate.
+	ClientKeyData string `json:"clientKeyData,omitempty" datapolicy:"security-key"`
+}
+
+// Cluster contains information to allow an exec plugin to communicate
+// with the kubernetes cluster being authenticated to.
+//
+// To ensure that this struct contains everything someone would need to communicate
+// with a kubernetes cluster (just like they would via a kubeconfig), the fields
+// should shadow "k8s.io/client-go/tools/clientcmd/api/v1".Cluster, with the exception
+// of CertificateAuthority, since CA data will always be passed to the plugin as bytes.
+type Cluster struct {
+	// Server is the address of the kubernetes cluster (https://hostname:port).
+	Server string `json:"server"`
+	// TLSServerName is passed to the server for SNI and is used in the client to
+	// check server certificates against. If ServerName is empty, the hostname
+	// used to contact the server is used.
+	// +optional
+	TLSServerName string `json:"tls-server-name,omitempty"`
+	// InsecureSkipTLSVerify skips the validity check for the server's certificate.
+	// This will make your HTTPS connections insecure.
+	// +optional
+	InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"`
+	// CAData contains PEM-encoded certificate authority certificates.
+	// If empty, system roots should be used.
+	// +listType=atomic
+	// +optional
+	CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
+	// ProxyURL is the URL to the proxy to be used for all requests to this
+	// cluster.
+	// +optional
+	ProxyURL string `json:"proxy-url,omitempty"`
+	// DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful
+	// to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on
+	// compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296.
+	// +optional
+	DisableCompression bool `json:"disable-compression,omitempty"`
+	// Config holds additional config data that is specific to the exec
+	// plugin with regards to the cluster being authenticated to.
+	//
+	// This data is sourced from the clientcmd Cluster object's
+	// extensions[client.authentication.k8s.io/exec] field:
+	//
+	// clusters:
+	// - name: my-cluster
+	//   cluster:
+	//     ...
+	//     extensions:
+	//     - name: client.authentication.k8s.io/exec  # reserved extension name for per cluster exec config
+	//       extension:
+	//         audience: 06e3fbd18de8  # arbitrary config
+	//
+	// In some environments, the user config may be exactly the same across many clusters
+	// (i.e. call this exec plugin) minus some details that are specific to each cluster
+	// such as the audience.  This field allows the per cluster config to be directly
+	// specified with the cluster info.  Using this field to store secret data is not
+	// recommended as one of the prime benefits of exec plugins is that no secrets need
+	// to be stored directly in the kubeconfig.
+	// +optional
+	Config runtime.RawExtension `json:"config,omitempty"`
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go
new file mode 100644
index 0000000000..9c3f63fca8
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go
@@ -0,0 +1,207 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	unsafe "unsafe"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	conversion "k8s.io/apimachinery/pkg/conversion"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication"
+)
+
+func init() {
+	localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+	if err := s.AddGeneratedConversionFunc((*Cluster)(nil), (*clientauthentication.Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_Cluster_To_clientauthentication_Cluster(a.(*Cluster), b.(*clientauthentication.Cluster), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*clientauthentication.Cluster)(nil), (*Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_clientauthentication_Cluster_To_v1_Cluster(a.(*clientauthentication.Cluster), b.(*Cluster), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ExecCredential)(nil), (*clientauthentication.ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_ExecCredential_To_clientauthentication_ExecCredential(a.(*ExecCredential), b.(*clientauthentication.ExecCredential), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredential)(nil), (*ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_clientauthentication_ExecCredential_To_v1_ExecCredential(a.(*clientauthentication.ExecCredential), b.(*ExecCredential), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ExecCredentialSpec)(nil), (*clientauthentication.ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(a.(*ExecCredentialSpec), b.(*clientauthentication.ExecCredentialSpec), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialStatus)(nil), (*ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_clientauthentication_ExecCredentialStatus_To_v1_ExecCredentialStatus(a.(*clientauthentication.ExecCredentialStatus), b.(*ExecCredentialStatus), scope)
+	}); err != nil {
+		return err
+	}
+	return nil
+}
+
+func autoConvert_v1_Cluster_To_clientauthentication_Cluster(in *Cluster, out *clientauthentication.Cluster, s conversion.Scope) error {
+	out.Server = in.Server
+	out.TLSServerName = in.TLSServerName
+	out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify
+	out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData))
+	out.ProxyURL = in.ProxyURL
+	out.DisableCompression = in.DisableCompression
+	if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Config, &out.Config, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_v1_Cluster_To_clientauthentication_Cluster is an autogenerated conversion function.
+func Convert_v1_Cluster_To_clientauthentication_Cluster(in *Cluster, out *clientauthentication.Cluster, s conversion.Scope) error {
+	return autoConvert_v1_Cluster_To_clientauthentication_Cluster(in, out, s)
+}
+
+func autoConvert_clientauthentication_Cluster_To_v1_Cluster(in *clientauthentication.Cluster, out *Cluster, s conversion.Scope) error {
+	out.Server = in.Server
+	out.TLSServerName = in.TLSServerName
+	out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify
+	out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData))
+	out.ProxyURL = in.ProxyURL
+	out.DisableCompression = in.DisableCompression
+	if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Config, &out.Config, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_clientauthentication_Cluster_To_v1_Cluster is an autogenerated conversion function.
+func Convert_clientauthentication_Cluster_To_v1_Cluster(in *clientauthentication.Cluster, out *Cluster, s conversion.Scope) error {
+	return autoConvert_clientauthentication_Cluster_To_v1_Cluster(in, out, s)
+}
+
+func autoConvert_v1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error {
+	if err := Convert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil {
+		return err
+	}
+	out.Status = (*clientauthentication.ExecCredentialStatus)(unsafe.Pointer(in.Status))
+	return nil
+}
+
+// Convert_v1_ExecCredential_To_clientauthentication_ExecCredential is an autogenerated conversion function.
+func Convert_v1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error {
+	return autoConvert_v1_ExecCredential_To_clientauthentication_ExecCredential(in, out, s)
+}
+
+func autoConvert_clientauthentication_ExecCredential_To_v1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error {
+	if err := Convert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil {
+		return err
+	}
+	out.Status = (*ExecCredentialStatus)(unsafe.Pointer(in.Status))
+	return nil
+}
+
+// Convert_clientauthentication_ExecCredential_To_v1_ExecCredential is an autogenerated conversion function.
+func Convert_clientauthentication_ExecCredential_To_v1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error {
+	return autoConvert_clientauthentication_ExecCredential_To_v1_ExecCredential(in, out, s)
+}
+
+func autoConvert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error {
+	if in.Cluster != nil {
+		in, out := &in.Cluster, &out.Cluster
+		*out = new(clientauthentication.Cluster)
+		if err := Convert_v1_Cluster_To_clientauthentication_Cluster(*in, *out, s); err != nil {
+			return err
+		}
+	} else {
+		out.Cluster = nil
+	}
+	out.Interactive = in.Interactive
+	return nil
+}
+
+// Convert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec is an autogenerated conversion function.
+func Convert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error {
+	return autoConvert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in, out, s)
+}
+
+func autoConvert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error {
+	out.Interactive = in.Interactive
+	if in.Cluster != nil {
+		in, out := &in.Cluster, &out.Cluster
+		*out = new(Cluster)
+		if err := Convert_clientauthentication_Cluster_To_v1_Cluster(*in, *out, s); err != nil {
+			return err
+		}
+	} else {
+		out.Cluster = nil
+	}
+	return nil
+}
+
+// Convert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec is an autogenerated conversion function.
+func Convert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error {
+	return autoConvert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(in, out, s)
+}
+
+func autoConvert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error {
+	out.ExpirationTimestamp = (*metav1.Time)(unsafe.Pointer(in.ExpirationTimestamp))
+	out.Token = in.Token
+	out.ClientCertificateData = in.ClientCertificateData
+	out.ClientKeyData = in.ClientKeyData
+	return nil
+}
+
+// Convert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus is an autogenerated conversion function.
+func Convert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error {
+	return autoConvert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in, out, s)
+}
+
+func autoConvert_clientauthentication_ExecCredentialStatus_To_v1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error {
+	out.ExpirationTimestamp = (*metav1.Time)(unsafe.Pointer(in.ExpirationTimestamp))
+	out.Token = in.Token
+	out.ClientCertificateData = in.ClientCertificateData
+	out.ClientKeyData = in.ClientKeyData
+	return nil
+}
+
+// Convert_clientauthentication_ExecCredentialStatus_To_v1_ExecCredentialStatus is an autogenerated conversion function.
+func Convert_clientauthentication_ExecCredentialStatus_To_v1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error {
+	return autoConvert_clientauthentication_ExecCredentialStatus_To_v1_ExecCredentialStatus(in, out, s)
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.deepcopy.go b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..893933c45f
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.deepcopy.go
@@ -0,0 +1,120 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Cluster) DeepCopyInto(out *Cluster) {
+	*out = *in
+	if in.CertificateAuthorityData != nil {
+		in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	in.Config.DeepCopyInto(&out.Config)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster.
+func (in *Cluster) DeepCopy() *Cluster {
+	if in == nil {
+		return nil
+	}
+	out := new(Cluster)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredential) DeepCopyInto(out *ExecCredential) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.Spec.DeepCopyInto(&out.Spec)
+	if in.Status != nil {
+		in, out := &in.Status, &out.Status
+		*out = new(ExecCredentialStatus)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential.
+func (in *ExecCredential) DeepCopy() *ExecCredential {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecCredential)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ExecCredential) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) {
+	*out = *in
+	if in.Cluster != nil {
+		in, out := &in.Cluster, &out.Cluster
+		*out = new(Cluster)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec.
+func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecCredentialSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) {
+	*out = *in
+	if in.ExpirationTimestamp != nil {
+		in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp
+		*out = (*in).DeepCopy()
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus.
+func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecCredentialStatus)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.defaults.go b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.defaults.go
new file mode 100644
index 0000000000..dac177e93b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.defaults.go
@@ -0,0 +1,33 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go
new file mode 100644
index 0000000000..22d1c588bc
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:conversion-gen=k8s.io/client-go/pkg/apis/clientauthentication
+// +k8s:openapi-gen=true
+// +k8s:defaulter-gen=TypeMeta
+
+// +groupName=client.authentication.k8s.io
+
+package v1beta1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
diff --git a/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go
new file mode 100644
index 0000000000..0bb92f16a4
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "client.authentication.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	SchemeBuilder      runtime.SchemeBuilder
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+	// We only register manually written functions here. The registration of the
+	// generated functions takes place in the generated files. The separation
+	// makes the code compile even when the generated files are missing.
+	localSchemeBuilder.Register(addKnownTypes)
+}
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&ExecCredential{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go
new file mode 100644
index 0000000000..918eb11e14
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go
@@ -0,0 +1,127 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ExecCredential is used by exec-based plugins to communicate credentials to
+// HTTP transports.
+type ExecCredential struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Spec holds information passed to the plugin by the transport.
+	Spec ExecCredentialSpec `json:"spec,omitempty"`
+
+	// Status is filled in by the plugin and holds the credentials that the transport
+	// should use to contact the API.
+	// +optional
+	Status *ExecCredentialStatus `json:"status,omitempty"`
+}
+
+// ExecCredentialSpec holds request and runtime specific information provided by
+// the transport.
+type ExecCredentialSpec struct {
+	// Cluster contains information to allow an exec plugin to communicate with the
+	// kubernetes cluster being authenticated to. Note that Cluster is non-nil only
+	// when provideClusterInfo is set to true in the exec provider config (i.e.,
+	// ExecConfig.ProvideClusterInfo).
+	// +optional
+	Cluster *Cluster `json:"cluster,omitempty"`
+
+	// Interactive declares whether stdin has been passed to this exec plugin.
+	Interactive bool `json:"interactive"`
+}
+
+// ExecCredentialStatus holds credentials for the transport to use.
+//
+// Token and ClientKeyData are sensitive fields. This data should only be
+// transmitted in-memory between client and exec plugin process. Exec plugin
+// itself should at least be protected via file permissions.
+type ExecCredentialStatus struct {
+	// ExpirationTimestamp indicates a time when the provided credentials expire.
+	// +optional
+	ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"`
+	// Token is a bearer token used by the client for request authentication.
+	Token string `json:"token,omitempty" datapolicy:"token"`
+	// PEM-encoded client TLS certificates (including intermediates, if any).
+	ClientCertificateData string `json:"clientCertificateData,omitempty"`
+	// PEM-encoded private key for the above certificate.
+	ClientKeyData string `json:"clientKeyData,omitempty" datapolicy:"security-key"`
+}
+
+// Cluster contains information to allow an exec plugin to communicate
+// with the kubernetes cluster being authenticated to.
+//
+// To ensure that this struct contains everything someone would need to communicate
+// with a kubernetes cluster (just like they would via a kubeconfig), the fields
+// should shadow "k8s.io/client-go/tools/clientcmd/api/v1".Cluster, with the exception
+// of CertificateAuthority, since CA data will always be passed to the plugin as bytes.
+type Cluster struct {
+	// Server is the address of the kubernetes cluster (https://hostname:port).
+	Server string `json:"server"`
+	// TLSServerName is passed to the server for SNI and is used in the client to
+	// check server certificates against. If ServerName is empty, the hostname
+	// used to contact the server is used.
+	// +optional
+	TLSServerName string `json:"tls-server-name,omitempty"`
+	// InsecureSkipTLSVerify skips the validity check for the server's certificate.
+	// This will make your HTTPS connections insecure.
+	// +optional
+	InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"`
+	// CAData contains PEM-encoded certificate authority certificates.
+	// If empty, system roots should be used.
+	// +listType=atomic
+	// +optional
+	CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
+	// ProxyURL is the URL to the proxy to be used for all requests to this
+	// cluster.
+	// +optional
+	ProxyURL string `json:"proxy-url,omitempty"`
+	// DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful
+	// to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on
+	// compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296.
+	// +optional
+	DisableCompression bool `json:"disable-compression,omitempty"`
+	// Config holds additional config data that is specific to the exec
+	// plugin with regards to the cluster being authenticated to.
+	//
+	// This data is sourced from the clientcmd Cluster object's
+	// extensions[client.authentication.k8s.io/exec] field:
+	//
+	// clusters:
+	// - name: my-cluster
+	//   cluster:
+	//     ...
+	//     extensions:
+	//     - name: client.authentication.k8s.io/exec  # reserved extension name for per cluster exec config
+	//       extension:
+	//         audience: 06e3fbd18de8  # arbitrary config
+	//
+	// In some environments, the user config may be exactly the same across many clusters
+	// (i.e. call this exec plugin) minus some details that are specific to each cluster
+	// such as the audience.  This field allows the per cluster config to be directly
+	// specified with the cluster info.  Using this field to store secret data is not
+	// recommended as one of the prime benefits of exec plugins is that no secrets need
+	// to be stored directly in the kubeconfig.
+	// +optional
+	Config runtime.RawExtension `json:"config,omitempty"`
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go
new file mode 100644
index 0000000000..5372da1513
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go
@@ -0,0 +1,207 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	unsafe "unsafe"
+
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	conversion "k8s.io/apimachinery/pkg/conversion"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication"
+)
+
+func init() {
+	localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+	if err := s.AddGeneratedConversionFunc((*Cluster)(nil), (*clientauthentication.Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_Cluster_To_clientauthentication_Cluster(a.(*Cluster), b.(*clientauthentication.Cluster), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*clientauthentication.Cluster)(nil), (*Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_clientauthentication_Cluster_To_v1beta1_Cluster(a.(*clientauthentication.Cluster), b.(*Cluster), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ExecCredential)(nil), (*clientauthentication.ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(a.(*ExecCredential), b.(*clientauthentication.ExecCredential), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredential)(nil), (*ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(a.(*clientauthentication.ExecCredential), b.(*ExecCredential), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ExecCredentialSpec)(nil), (*clientauthentication.ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(a.(*ExecCredentialSpec), b.(*clientauthentication.ExecCredentialSpec), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialStatus)(nil), (*ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(a.(*clientauthentication.ExecCredentialStatus), b.(*ExecCredentialStatus), scope)
+	}); err != nil {
+		return err
+	}
+	return nil
+}
+
+func autoConvert_v1beta1_Cluster_To_clientauthentication_Cluster(in *Cluster, out *clientauthentication.Cluster, s conversion.Scope) error {
+	out.Server = in.Server
+	out.TLSServerName = in.TLSServerName
+	out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify
+	out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData))
+	out.ProxyURL = in.ProxyURL
+	out.DisableCompression = in.DisableCompression
+	if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Config, &out.Config, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_v1beta1_Cluster_To_clientauthentication_Cluster is an autogenerated conversion function.
+func Convert_v1beta1_Cluster_To_clientauthentication_Cluster(in *Cluster, out *clientauthentication.Cluster, s conversion.Scope) error {
+	return autoConvert_v1beta1_Cluster_To_clientauthentication_Cluster(in, out, s)
+}
+
+func autoConvert_clientauthentication_Cluster_To_v1beta1_Cluster(in *clientauthentication.Cluster, out *Cluster, s conversion.Scope) error {
+	out.Server = in.Server
+	out.TLSServerName = in.TLSServerName
+	out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify
+	out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData))
+	out.ProxyURL = in.ProxyURL
+	out.DisableCompression = in.DisableCompression
+	if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Config, &out.Config, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_clientauthentication_Cluster_To_v1beta1_Cluster is an autogenerated conversion function.
+func Convert_clientauthentication_Cluster_To_v1beta1_Cluster(in *clientauthentication.Cluster, out *Cluster, s conversion.Scope) error {
+	return autoConvert_clientauthentication_Cluster_To_v1beta1_Cluster(in, out, s)
+}
+
+func autoConvert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error {
+	if err := Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil {
+		return err
+	}
+	out.Status = (*clientauthentication.ExecCredentialStatus)(unsafe.Pointer(in.Status))
+	return nil
+}
+
+// Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential is an autogenerated conversion function.
+func Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error {
+	return autoConvert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in, out, s)
+}
+
+func autoConvert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error {
+	if err := Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil {
+		return err
+	}
+	out.Status = (*ExecCredentialStatus)(unsafe.Pointer(in.Status))
+	return nil
+}
+
+// Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential is an autogenerated conversion function.
+func Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error {
+	return autoConvert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in, out, s)
+}
+
+func autoConvert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error {
+	if in.Cluster != nil {
+		in, out := &in.Cluster, &out.Cluster
+		*out = new(clientauthentication.Cluster)
+		if err := Convert_v1beta1_Cluster_To_clientauthentication_Cluster(*in, *out, s); err != nil {
+			return err
+		}
+	} else {
+		out.Cluster = nil
+	}
+	out.Interactive = in.Interactive
+	return nil
+}
+
+// Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec is an autogenerated conversion function.
+func Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error {
+	return autoConvert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in, out, s)
+}
+
+func autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error {
+	out.Interactive = in.Interactive
+	if in.Cluster != nil {
+		in, out := &in.Cluster, &out.Cluster
+		*out = new(Cluster)
+		if err := Convert_clientauthentication_Cluster_To_v1beta1_Cluster(*in, *out, s); err != nil {
+			return err
+		}
+	} else {
+		out.Cluster = nil
+	}
+	return nil
+}
+
+// Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec is an autogenerated conversion function.
+func Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error {
+	return autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error {
+	out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp))
+	out.Token = in.Token
+	out.ClientCertificateData = in.ClientCertificateData
+	out.ClientKeyData = in.ClientKeyData
+	return nil
+}
+
+// Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus is an autogenerated conversion function.
+func Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error {
+	return autoConvert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in, out, s)
+}
+
+func autoConvert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error {
+	out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp))
+	out.Token = in.Token
+	out.ClientCertificateData = in.ClientCertificateData
+	out.ClientKeyData = in.ClientKeyData
+	return nil
+}
+
+// Convert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus is an autogenerated conversion function.
+func Convert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error {
+	return autoConvert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(in, out, s)
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..d7a801c27d
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,120 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Cluster) DeepCopyInto(out *Cluster) {
+	*out = *in
+	if in.CertificateAuthorityData != nil {
+		in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	in.Config.DeepCopyInto(&out.Config)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster.
+func (in *Cluster) DeepCopy() *Cluster {
+	if in == nil {
+		return nil
+	}
+	out := new(Cluster)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredential) DeepCopyInto(out *ExecCredential) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.Spec.DeepCopyInto(&out.Spec)
+	if in.Status != nil {
+		in, out := &in.Status, &out.Status
+		*out = new(ExecCredentialStatus)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential.
+func (in *ExecCredential) DeepCopy() *ExecCredential {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecCredential)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ExecCredential) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) {
+	*out = *in
+	if in.Cluster != nil {
+		in, out := &in.Cluster, &out.Cluster
+		*out = new(Cluster)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec.
+func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecCredentialSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) {
+	*out = *in
+	if in.ExpirationTimestamp != nil {
+		in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp
+		*out = (*in).DeepCopy()
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus.
+func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecCredentialStatus)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go
new file mode 100644
index 0000000000..198b5be4af
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go
@@ -0,0 +1,33 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..244d54ce3f
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go
@@ -0,0 +1,122 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package clientauthentication
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Cluster) DeepCopyInto(out *Cluster) {
+	*out = *in
+	if in.CertificateAuthorityData != nil {
+		in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.Config != nil {
+		out.Config = in.Config.DeepCopyObject()
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster.
+func (in *Cluster) DeepCopy() *Cluster {
+	if in == nil {
+		return nil
+	}
+	out := new(Cluster)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredential) DeepCopyInto(out *ExecCredential) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.Spec.DeepCopyInto(&out.Spec)
+	if in.Status != nil {
+		in, out := &in.Status, &out.Status
+		*out = new(ExecCredentialStatus)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential.
+func (in *ExecCredential) DeepCopy() *ExecCredential {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecCredential)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ExecCredential) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) {
+	*out = *in
+	if in.Cluster != nil {
+		in, out := &in.Cluster, &out.Cluster
+		*out = new(Cluster)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec.
+func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecCredentialSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) {
+	*out = *in
+	if in.ExpirationTimestamp != nil {
+		in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp
+		*out = (*in).DeepCopy()
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus.
+func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecCredentialStatus)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/pkg/version/base.go b/hack/tools/vendor/k8s.io/client-go/pkg/version/base.go
new file mode 100644
index 0000000000..676d51d321
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/pkg/version/base.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+// Base version information.
+//
+// This is the fallback data used when version information from git is not
+// provided via go ldflags. It provides an approximation of the Kubernetes
+// version for ad-hoc builds (e.g. `go build`) that cannot get the version
+// information from git.
+//
+// If you are looking at these fields in the git tree, they look
+// strange. They are modified on the fly by the build process. The
+// in-tree values are dummy values used for "git archive", which also
+// works for GitHub tar downloads.
+//
+// When releasing a new Kubernetes version, this file is updated by
+// build/mark_new_version.sh to reflect the new version, and then a
+// git annotated tag (using format vX.Y where X == Major version and Y
+// == Minor version) is created to point to the commit that updates
+// pkg/version/base.go
+var (
+	// TODO: Deprecate gitMajor and gitMinor, use only gitVersion
+	// instead. First step in deprecation, keep the fields but make
+	// them irrelevant. (Next we'll take it out, which may muck with
+	// scripts consuming the kubectl version output - but most of
+	// these should be looking at gitVersion already anyways.)
+	gitMajor string = "" // major version, always numeric
+	gitMinor string = "" // minor version, numeric possibly followed by "+"
+
+	// semantic version, derived by build scripts (see
+	// https://github.com/kubernetes/sig-release/blob/master/release-engineering/versioning.md#kubernetes-release-versioning
+	// https://kubernetes.io/releases/version-skew-policy/
+	// for a detailed discussion of this field)
+	//
+	// TODO: This field is still called "gitVersion" for legacy
+	// reasons. For prerelease versions, the build metadata on the
+	// semantic version is a git hash, but the version itself is no
+	// longer the direct output of "git describe", but a slight
+	// translation to be semver compliant.
+
+	// NOTE: The $Format strings are replaced during 'git archive' thanks to the
+	// companion .gitattributes file containing 'export-subst' in this same
+	// directory.  See also https://git-scm.com/docs/gitattributes
+	gitVersion   string = "v0.0.0-master+$Format:%H$"
+	gitCommit    string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
+	gitTreeState string = ""            // state of git tree, either "clean" or "dirty"
+
+	buildDate string = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
+)
diff --git a/hack/tools/vendor/k8s.io/client-go/pkg/version/doc.go b/hack/tools/vendor/k8s.io/client-go/pkg/version/doc.go
new file mode 100644
index 0000000000..05e997e133
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/pkg/version/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:openapi-gen=true
+
+// Package version supplies version information collected at build time to
+// kubernetes components.
+package version // import "k8s.io/client-go/pkg/version"
diff --git a/hack/tools/vendor/k8s.io/client-go/pkg/version/version.go b/hack/tools/vendor/k8s.io/client-go/pkg/version/version.go
new file mode 100644
index 0000000000..8c8350d13b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/pkg/version/version.go
@@ -0,0 +1,42 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+import (
+	"fmt"
+	"runtime"
+
+	apimachineryversion "k8s.io/apimachinery/pkg/version"
+)
+
+// Get returns the overall codebase version. It's for detecting
+// what code a binary was built from.
+func Get() apimachineryversion.Info {
+	// These variables typically come from -ldflags settings and in
+	// their absence fallback to the settings in pkg/version/base.go
+	return apimachineryversion.Info{
+		Major:        gitMajor,
+		Minor:        gitMinor,
+		GitVersion:   gitVersion,
+		GitCommit:    gitCommit,
+		GitTreeState: gitTreeState,
+		BuildDate:    buildDate,
+		GoVersion:    runtime.Version(),
+		Compiler:     runtime.Compiler,
+		Platform:     fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/hack/tools/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
new file mode 100644
index 0000000000..b471f5cc64
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
@@ -0,0 +1,547 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package exec
+
+import (
+	"bytes"
+	"crypto/tls"
+	"crypto/x509"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"os"
+	"os/exec"
+	"reflect"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/term"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/apimachinery/pkg/util/dump"
+	utilnet "k8s.io/apimachinery/pkg/util/net"
+	"k8s.io/client-go/pkg/apis/clientauthentication"
+	"k8s.io/client-go/pkg/apis/clientauthentication/install"
+	clientauthenticationv1 "k8s.io/client-go/pkg/apis/clientauthentication/v1"
+	clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
+	"k8s.io/client-go/tools/clientcmd/api"
+	"k8s.io/client-go/tools/metrics"
+	"k8s.io/client-go/transport"
+	"k8s.io/client-go/util/connrotation"
+	"k8s.io/klog/v2"
+	"k8s.io/utils/clock"
+)
+
+const execInfoEnv = "KUBERNETES_EXEC_INFO"
+const installHintVerboseHelp = `
+
+It looks like you are trying to use a client-go credential plugin that is not installed.
+
+To learn more about this feature, consult the documentation available at:
+      https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins`
+
+var scheme = runtime.NewScheme()
+var codecs = serializer.NewCodecFactory(scheme)
+
+func init() {
+	install.Install(scheme)
+}
+
+var (
+	// Since transports can be constantly re-initialized by programs like kubectl,
+	// keep a cache of initialized authenticators keyed by a hash of their config.
+	globalCache = newCache()
+	// The list of API versions we accept.
+	apiVersions = map[string]schema.GroupVersion{
+		clientauthenticationv1beta1.SchemeGroupVersion.String(): clientauthenticationv1beta1.SchemeGroupVersion,
+		clientauthenticationv1.SchemeGroupVersion.String():      clientauthenticationv1.SchemeGroupVersion,
+	}
+)
+
+func newCache() *cache {
+	return &cache{m: make(map[string]*Authenticator)}
+}
+
+func cacheKey(conf *api.ExecConfig, cluster *clientauthentication.Cluster) string {
+	key := struct {
+		conf    *api.ExecConfig
+		cluster *clientauthentication.Cluster
+	}{
+		conf:    conf,
+		cluster: cluster,
+	}
+	return dump.Pretty(key)
+}
+
+type cache struct {
+	mu sync.Mutex
+	m  map[string]*Authenticator
+}
+
+func (c *cache) get(s string) (*Authenticator, bool) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	a, ok := c.m[s]
+	return a, ok
+}
+
+// put inserts an authenticator into the cache. If an authenticator is already
+// associated with the key, the first one is returned instead.
+func (c *cache) put(s string, a *Authenticator) *Authenticator {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	existing, ok := c.m[s]
+	if ok {
+		return existing
+	}
+	c.m[s] = a
+	return a
+}
+
+// sometimes rate limits how often a function f() is called. Specifically, Do()
+// will run the provided function f() up to threshold times every interval
+// duration.
+type sometimes struct {
+	threshold int
+	interval  time.Duration
+
+	clock clock.Clock
+	mu    sync.Mutex
+
+	count  int       // times we have called f() in this window
+	window time.Time // beginning of current window of length interval
+}
+
+func (s *sometimes) Do(f func()) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	now := s.clock.Now()
+	if s.window.IsZero() {
+		s.window = now
+	}
+
+	// If we are no longer in our saved time window, then we get to reset our run
+	// count back to 0 and start increasing towards the threshold again.
+	if inWindow := now.Sub(s.window) < s.interval; !inWindow {
+		s.window = now
+		s.count = 0
+	}
+
+	// If we have not run the function more than threshold times in this current
+	// time window, we get to run it now!
+	if underThreshold := s.count < s.threshold; underThreshold {
+		s.count++
+		f()
+	}
+}
+
+// GetAuthenticator returns an exec-based plugin for providing client credentials.
+func GetAuthenticator(config *api.ExecConfig, cluster *clientauthentication.Cluster) (*Authenticator, error) {
+	return newAuthenticator(globalCache, term.IsTerminal, config, cluster)
+}
+
+func newAuthenticator(c *cache, isTerminalFunc func(int) bool, config *api.ExecConfig, cluster *clientauthentication.Cluster) (*Authenticator, error) {
+	key := cacheKey(config, cluster)
+	if a, ok := c.get(key); ok {
+		return a, nil
+	}
+
+	gv, ok := apiVersions[config.APIVersion]
+	if !ok {
+		return nil, fmt.Errorf("exec plugin: invalid apiVersion %q", config.APIVersion)
+	}
+
+	connTracker := connrotation.NewConnectionTracker()
+	defaultDialer := connrotation.NewDialerWithTracker(
+		(&net.Dialer{Timeout: 30 * time.Second, KeepAlive: 30 * time.Second}).DialContext,
+		connTracker,
+	)
+
+	a := &Authenticator{
+		cmd:                config.Command,
+		args:               config.Args,
+		group:              gv,
+		cluster:            cluster,
+		provideClusterInfo: config.ProvideClusterInfo,
+
+		installHint: config.InstallHint,
+		sometimes: &sometimes{
+			threshold: 10,
+			interval:  time.Hour,
+			clock:     clock.RealClock{},
+		},
+
+		stdin:           os.Stdin,
+		stderr:          os.Stderr,
+		interactiveFunc: func() (bool, error) { return isInteractive(isTerminalFunc, config) },
+		now:             time.Now,
+		environ:         os.Environ,
+
+		connTracker: connTracker,
+	}
+
+	for _, env := range config.Env {
+		a.env = append(a.env, env.Name+"="+env.Value)
+	}
+
+	// these functions are made comparable and stored in the cache so that repeated clientset
+	// construction with the same rest.Config results in a single TLS cache and Authenticator
+	a.getCert = &transport.GetCertHolder{GetCert: a.cert}
+	a.dial = &transport.DialHolder{Dial: defaultDialer.DialContext}
+
+	return c.put(key, a), nil
+}
+
+func isInteractive(isTerminalFunc func(int) bool, config *api.ExecConfig) (bool, error) {
+	var shouldBeInteractive bool
+	switch config.InteractiveMode {
+	case api.NeverExecInteractiveMode:
+		shouldBeInteractive = false
+	case api.IfAvailableExecInteractiveMode:
+		shouldBeInteractive = !config.StdinUnavailable && isTerminalFunc(int(os.Stdin.Fd()))
+	case api.AlwaysExecInteractiveMode:
+		if !isTerminalFunc(int(os.Stdin.Fd())) {
+			return false, errors.New("standard input is not a terminal")
+		}
+		if config.StdinUnavailable {
+			suffix := ""
+			if len(config.StdinUnavailableMessage) > 0 {
+				// only print extra ": " if the user actually specified a message
+				suffix = fmt.Sprintf(": %s", config.StdinUnavailableMessage)
+			}
+			return false, fmt.Errorf("standard input is unavailable%s", suffix)
+		}
+		shouldBeInteractive = true
+	default:
+		return false, fmt.Errorf("unknown interactiveMode: %q", config.InteractiveMode)
+	}
+
+	return shouldBeInteractive, nil
+}
+
+// Authenticator is a client credential provider that rotates credentials by executing a plugin.
+// The plugin input and output are defined by the API group client.authentication.k8s.io.
+type Authenticator struct {
+	// Set by the config
+	cmd                string
+	args               []string
+	group              schema.GroupVersion
+	env                []string
+	cluster            *clientauthentication.Cluster
+	provideClusterInfo bool
+
+	// Used to avoid log spew by rate limiting install hint printing. We didn't do
+	// this by interval based rate limiting alone since that way may have prevented
+	// the install hint from showing up for kubectl users.
+	sometimes   *sometimes
+	installHint string
+
+	// Stubbable for testing
+	stdin           io.Reader
+	stderr          io.Writer
+	interactiveFunc func() (bool, error)
+	now             func() time.Time
+	environ         func() []string
+
+	// connTracker tracks all connections opened that we need to close when rotating a client certificate
+	connTracker *connrotation.ConnectionTracker
+
+	// Cached results.
+	//
+	// The mutex also guards calling the plugin. Since the plugin could be
+	// interactive we want to make sure it's only called once.
+	mu          sync.Mutex
+	cachedCreds *credentials
+	exp         time.Time
+
+	// getCert makes Authenticator.cert comparable to support TLS config caching
+	getCert *transport.GetCertHolder
+	// dial is used for clients which do not specify a custom dialer
+	// it is comparable to support TLS config caching
+	dial *transport.DialHolder
+}
+
+type credentials struct {
+	token string           `datapolicy:"token"`
+	cert  *tls.Certificate `datapolicy:"secret-key"`
+}
+
+// UpdateTransportConfig updates the transport.Config to use credentials
+// returned by the plugin.
+func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error {
+	// If a bearer token is present in the request - avoid the GetCert callback when
+	// setting up the transport, as that triggers the exec action if the server is
+	// also configured to allow client certificates for authentication. For requests
+	// like "kubectl get --token (token) pods" we should assume the intention is to
+	// use the provided token for authentication. The same can be said for when the
+	// user specifies basic auth or cert auth.
+	if c.HasTokenAuth() || c.HasBasicAuth() || c.HasCertAuth() {
+		return nil
+	}
+
+	c.Wrap(func(rt http.RoundTripper) http.RoundTripper {
+		return &roundTripper{a, rt}
+	})
+
+	if c.HasCertCallback() {
+		return errors.New("can't add TLS certificate callback: transport.Config.TLS.GetCert already set")
+	}
+	c.TLS.GetCertHolder = a.getCert // comparable for TLS config caching
+
+	if c.DialHolder != nil {
+		if c.DialHolder.Dial == nil {
+			return errors.New("invalid transport.Config.DialHolder: wrapped Dial function is nil")
+		}
+
+		// if c has a custom dialer, we have to wrap it
+		// TLS config caching is not supported for this config
+		d := connrotation.NewDialerWithTracker(c.DialHolder.Dial, a.connTracker)
+		c.DialHolder = &transport.DialHolder{Dial: d.DialContext}
+	} else {
+		c.DialHolder = a.dial // comparable for TLS config caching
+	}
+
+	return nil
+}
+
+var _ utilnet.RoundTripperWrapper = &roundTripper{}
+
+type roundTripper struct {
+	a    *Authenticator
+	base http.RoundTripper
+}
+
+func (r *roundTripper) WrappedRoundTripper() http.RoundTripper {
+	return r.base
+}
+
+func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	// If a user has already set credentials, use that. This makes commands like
+	// "kubectl get --token (token) pods" work.
+	if req.Header.Get("Authorization") != "" {
+		return r.base.RoundTrip(req)
+	}
+
+	creds, err := r.a.getCreds()
+	if err != nil {
+		return nil, fmt.Errorf("getting credentials: %v", err)
+	}
+	if creds.token != "" {
+		req.Header.Set("Authorization", "Bearer "+creds.token)
+	}
+
+	res, err := r.base.RoundTrip(req)
+	if err != nil {
+		return nil, err
+	}
+	if res.StatusCode == http.StatusUnauthorized {
+		if err := r.a.maybeRefreshCreds(creds); err != nil {
+			klog.Errorf("refreshing credentials: %v", err)
+		}
+	}
+	return res, nil
+}
+
+func (a *Authenticator) credsExpired() bool {
+	if a.exp.IsZero() {
+		return false
+	}
+	return a.now().After(a.exp)
+}
+
+func (a *Authenticator) cert() (*tls.Certificate, error) {
+	creds, err := a.getCreds()
+	if err != nil {
+		return nil, err
+	}
+	return creds.cert, nil
+}
+
+func (a *Authenticator) getCreds() (*credentials, error) {
+	a.mu.Lock()
+	defer a.mu.Unlock()
+
+	if a.cachedCreds != nil && !a.credsExpired() {
+		return a.cachedCreds, nil
+	}
+
+	if err := a.refreshCredsLocked(); err != nil {
+		return nil, err
+	}
+
+	return a.cachedCreds, nil
+}
+
+// maybeRefreshCreds executes the plugin to force a rotation of the
+// credentials, unless they were rotated already.
+func (a *Authenticator) maybeRefreshCreds(creds *credentials) error {
+	a.mu.Lock()
+	defer a.mu.Unlock()
+
+	// Since we're not making a new pointer to a.cachedCreds in getCreds, no
+	// need to do deep comparison.
+	if creds != a.cachedCreds {
+		// Credentials already rotated.
+		return nil
+	}
+
+	return a.refreshCredsLocked()
+}
+
+// refreshCredsLocked executes the plugin and reads the credentials from
+// stdout. It must be called while holding the Authenticator's mutex.
+func (a *Authenticator) refreshCredsLocked() error {
+	interactive, err := a.interactiveFunc()
+	if err != nil {
+		return fmt.Errorf("exec plugin cannot support interactive mode: %w", err)
+	}
+
+	cred := &clientauthentication.ExecCredential{
+		Spec: clientauthentication.ExecCredentialSpec{
+			Interactive: interactive,
+		},
+	}
+	if a.provideClusterInfo {
+		cred.Spec.Cluster = a.cluster
+	}
+
+	env := append(a.environ(), a.env...)
+	data, err := runtime.Encode(codecs.LegacyCodec(a.group), cred)
+	if err != nil {
+		return fmt.Errorf("encode ExecCredentials: %v", err)
+	}
+	env = append(env, fmt.Sprintf("%s=%s", execInfoEnv, data))
+
+	stdout := &bytes.Buffer{}
+	cmd := exec.Command(a.cmd, a.args...)
+	cmd.Env = env
+	cmd.Stderr = a.stderr
+	cmd.Stdout = stdout
+	if interactive {
+		cmd.Stdin = a.stdin
+	}
+
+	err = cmd.Run()
+	incrementCallsMetric(err)
+	if err != nil {
+		return a.wrapCmdRunErrorLocked(err)
+	}
+
+	_, gvk, err := codecs.UniversalDecoder(a.group).Decode(stdout.Bytes(), nil, cred)
+	if err != nil {
+		return fmt.Errorf("decoding stdout: %v", err)
+	}
+	if gvk.Group != a.group.Group || gvk.Version != a.group.Version {
+		return fmt.Errorf("exec plugin is configured to use API version %s, plugin returned version %s",
+			a.group, schema.GroupVersion{Group: gvk.Group, Version: gvk.Version})
+	}
+
+	if cred.Status == nil {
+		return fmt.Errorf("exec plugin didn't return a status field")
+	}
+	if cred.Status.Token == "" && cred.Status.ClientCertificateData == "" && cred.Status.ClientKeyData == "" {
+		return fmt.Errorf("exec plugin didn't return a token or cert/key pair")
+	}
+	if (cred.Status.ClientCertificateData == "") != (cred.Status.ClientKeyData == "") {
+		return fmt.Errorf("exec plugin returned only certificate or key, not both")
+	}
+
+	if cred.Status.ExpirationTimestamp != nil {
+		a.exp = cred.Status.ExpirationTimestamp.Time
+	} else {
+		a.exp = time.Time{}
+	}
+
+	newCreds := &credentials{
+		token: cred.Status.Token,
+	}
+	if cred.Status.ClientKeyData != "" && cred.Status.ClientCertificateData != "" {
+		cert, err := tls.X509KeyPair([]byte(cred.Status.ClientCertificateData), []byte(cred.Status.ClientKeyData))
+		if err != nil {
+			return fmt.Errorf("failed parsing client key/certificate: %v", err)
+		}
+
+		// Leaf is initialized to be nil:
+		//  https://golang.org/pkg/crypto/tls/#X509KeyPair
+		// Leaf certificate is the first certificate:
+		//  https://golang.org/pkg/crypto/tls/#Certificate
+		// Populating leaf is useful for quickly accessing the underlying x509
+		// certificate values.
+		cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
+		if err != nil {
+			return fmt.Errorf("failed parsing client leaf certificate: %v", err)
+		}
+		newCreds.cert = &cert
+	}
+
+	oldCreds := a.cachedCreds
+	a.cachedCreds = newCreds
+	// Only close all connections when TLS cert rotates. Token rotation doesn't
+	// need the extra noise.
+	if oldCreds != nil && !reflect.DeepEqual(oldCreds.cert, a.cachedCreds.cert) {
+		// Can be nil if the exec auth plugin only returned token auth.
+		if oldCreds.cert != nil && oldCreds.cert.Leaf != nil {
+			metrics.ClientCertRotationAge.Observe(time.Since(oldCreds.cert.Leaf.NotBefore))
+		}
+		a.connTracker.CloseAll()
+	}
+
+	expiry := time.Time{}
+	if a.cachedCreds.cert != nil && a.cachedCreds.cert.Leaf != nil {
+		expiry = a.cachedCreds.cert.Leaf.NotAfter
+	}
+	expirationMetrics.set(a, expiry)
+	return nil
+}
+
+// wrapCmdRunErrorLocked pulls out the code to construct a helpful error message
+// for when the exec plugin's binary fails to Run().
+//
+// It must be called while holding the Authenticator's mutex.
+func (a *Authenticator) wrapCmdRunErrorLocked(err error) error {
+	switch err.(type) {
+	case *exec.Error: // Binary does not exist (see exec.Error).
+		builder := strings.Builder{}
+		fmt.Fprintf(&builder, "exec: executable %s not found", a.cmd)
+
+		a.sometimes.Do(func() {
+			fmt.Fprint(&builder, installHintVerboseHelp)
+			if a.installHint != "" {
+				fmt.Fprintf(&builder, "\n\n%s", a.installHint)
+			}
+		})
+
+		return errors.New(builder.String())
+
+	case *exec.ExitError: // Binary execution failed (see exec.Cmd.Run()).
+		e := err.(*exec.ExitError)
+		return fmt.Errorf(
+			"exec: executable %s failed with exit code %d",
+			a.cmd,
+			e.ProcessState.ExitCode(),
+		)
+
+	default:
+		return fmt.Errorf("exec: %v", err)
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go b/hack/tools/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go
new file mode 100644
index 0000000000..51210975a6
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go
@@ -0,0 +1,111 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package exec
+
+import (
+	"errors"
+	"io/fs"
+	"os/exec"
+	"reflect"
+	"sync"
+	"time"
+
+	"k8s.io/klog/v2"
+
+	"k8s.io/client-go/tools/metrics"
+)
+
+// The following constants shadow the special values used in the prometheus metrics implementation.
+const (
+	// noError indicates that the plugin process was successfully started and exited with an exit
+	// code of 0.
+	noError = "no_error"
+	// pluginExecutionError indicates that the plugin process was successfully started and then
+	// it returned a non-zero exit code.
+	pluginExecutionError = "plugin_execution_error"
+	// pluginNotFoundError indicates that we could not find the exec plugin.
+	pluginNotFoundError = "plugin_not_found_error"
+	// clientInternalError indicates that we attempted to start the plugin process, but failed
+	// for some reason.
+	clientInternalError = "client_internal_error"
+
+	// successExitCode represents an exec plugin invocation that was successful.
+	successExitCode = 0
+	// failureExitCode represents an exec plugin invocation that was not successful. This code is
+	// used in some failure modes (e.g., plugin not found, client internal error) so that someone
+	// can more easily monitor all unsuccessful invocations.
+	failureExitCode = 1
+)
+
+type certificateExpirationTracker struct {
+	mu        sync.RWMutex
+	m         map[*Authenticator]time.Time
+	metricSet func(*time.Time)
+}
+
+var expirationMetrics = &certificateExpirationTracker{
+	m: map[*Authenticator]time.Time{},
+	metricSet: func(e *time.Time) {
+		metrics.ClientCertExpiry.Set(e)
+	},
+}
+
+// set stores the given expiration time and updates the updates the certificate
+// expiry metric to the earliest expiration time.
+func (c *certificateExpirationTracker) set(a *Authenticator, t time.Time) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	c.m[a] = t
+
+	earliest := time.Time{}
+	for _, t := range c.m {
+		if t.IsZero() {
+			continue
+		}
+		if earliest.IsZero() || earliest.After(t) {
+			earliest = t
+		}
+	}
+	if earliest.IsZero() {
+		c.metricSet(nil)
+	} else {
+		c.metricSet(&earliest)
+	}
+}
+
+// incrementCallsMetric increments a global metrics counter for the number of calls to an exec
+// plugin, partitioned by exit code. The provided err should be the return value from
+// exec.Cmd.Run().
+func incrementCallsMetric(err error) {
+	execExitError := &exec.ExitError{}
+	execError := &exec.Error{}
+	pathError := &fs.PathError{}
+	switch {
+	case err == nil: // Binary execution succeeded.
+		metrics.ExecPluginCalls.Increment(successExitCode, noError)
+
+	case errors.As(err, &execExitError): // Binary execution failed (see "os/exec".Cmd.Run()).
+		metrics.ExecPluginCalls.Increment(execExitError.ExitCode(), pluginExecutionError)
+
+	case errors.As(err, &execError), errors.As(err, &pathError): // Binary does not exist (see exec.Error, fs.PathError).
+		metrics.ExecPluginCalls.Increment(failureExitCode, pluginNotFoundError)
+
+	default: // We don't know about this error type.
+		klog.V(2).InfoS("unexpected exec plugin return error type", "type", reflect.TypeOf(err).String(), "err", err)
+		metrics.ExecPluginCalls.Increment(failureExitCode, clientInternalError)
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/rest/OWNERS b/hack/tools/vendor/k8s.io/client-go/rest/OWNERS
new file mode 100644
index 0000000000..7b23294c45
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/rest/OWNERS
@@ -0,0 +1,14 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+  - thockin
+  - smarterclayton
+  - caesarxuchao
+  - wojtek-t
+  - deads2k
+  - liggitt
+  - sttts
+  - luxas
+  - dims
+  - cjcullen
+  - lojies
diff --git a/hack/tools/vendor/k8s.io/client-go/rest/client.go b/hack/tools/vendor/k8s.io/client-go/rest/client.go
new file mode 100644
index 0000000000..159caa13fa
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/rest/client.go
@@ -0,0 +1,343 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+	"fmt"
+	"mime"
+	"net/http"
+	"net/url"
+	"os"
+	"strconv"
+	"strings"
+	"sync/atomic"
+	"time"
+
+	"github.com/munnerz/goautoneg"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/types"
+	clientfeatures "k8s.io/client-go/features"
+	"k8s.io/client-go/util/flowcontrol"
+)
+
+const (
+	// Environment variables: Note that the duration should be long enough that the backoff
+	// persists for some reasonable time (i.e. 120 seconds).  The typical base might be "1".
+	envBackoffBase     = "KUBE_CLIENT_BACKOFF_BASE"
+	envBackoffDuration = "KUBE_CLIENT_BACKOFF_DURATION"
+)
+
+// Interface captures the set of operations for generically interacting with Kubernetes REST apis.
+type Interface interface {
+	GetRateLimiter() flowcontrol.RateLimiter
+	Verb(verb string) *Request
+	Post() *Request
+	Put() *Request
+	Patch(pt types.PatchType) *Request
+	Get() *Request
+	Delete() *Request
+	APIVersion() schema.GroupVersion
+}
+
+// ClientContentConfig controls how RESTClient communicates with the server.
+//
+// TODO: ContentConfig will be updated to accept a Negotiator instead of a
+// NegotiatedSerializer and NegotiatedSerializer will be removed.
+type ClientContentConfig struct {
+	// AcceptContentTypes specifies the types the client will accept and is optional.
+	// If not set, ContentType will be used to define the Accept header
+	AcceptContentTypes string
+	// ContentType specifies the wire format used to communicate with the server.
+	// This value will be set as the Accept header on requests made to the server if
+	// AcceptContentTypes is not set, and as the default content type on any object
+	// sent to the server. If not set, "application/json" is used.
+	ContentType string
+	// GroupVersion is the API version to talk to. Must be provided when initializing
+	// a RESTClient directly. When initializing a Client, will be set with the default
+	// code version. This is used as the default group version for VersionedParams.
+	GroupVersion schema.GroupVersion
+	// Negotiator is used for obtaining encoders and decoders for multiple
+	// supported media types.
+	Negotiator runtime.ClientNegotiator
+}
+
+// RESTClient imposes common Kubernetes API conventions on a set of resource paths.
+// The baseURL is expected to point to an HTTP or HTTPS path that is the parent
+// of one or more resources.  The server should return a decodable API resource
+// object, or an api.Status object which contains information about the reason for
+// any failure.
+//
+// Most consumers should use client.New() to get a Kubernetes API client.
+type RESTClient struct {
+	// base is the root URL for all invocations of the client
+	base *url.URL
+	// versionedAPIPath is a path segment connecting the base URL to the resource root
+	versionedAPIPath string
+
+	// content describes how a RESTClient encodes and decodes responses.
+	content requestClientContentConfigProvider
+
+	// creates BackoffManager that is passed to requests.
+	createBackoffMgr func() BackoffManager
+
+	// rateLimiter is shared among all requests created by this client unless specifically
+	// overridden.
+	rateLimiter flowcontrol.RateLimiter
+
+	// warningHandler is shared among all requests created by this client.
+	// If not set, defaultWarningHandler is used.
+	warningHandler WarningHandler
+
+	// Set specific behavior of the client.  If not set http.DefaultClient will be used.
+	Client *http.Client
+}
+
+// NewRESTClient creates a new RESTClient. This client performs generic REST functions
+// such as Get, Put, Post, and Delete on specified paths.
+func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ClientContentConfig, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) {
+	base := *baseURL
+	if !strings.HasSuffix(base.Path, "/") {
+		base.Path += "/"
+	}
+	base.RawQuery = ""
+	base.Fragment = ""
+
+	return &RESTClient{
+		base:             &base,
+		versionedAPIPath: versionedAPIPath,
+		content:          requestClientContentConfigProvider{base: scrubCBORContentConfigIfDisabled(config)},
+		createBackoffMgr: readExpBackoffConfig,
+		rateLimiter:      rateLimiter,
+		Client:           client,
+	}, nil
+}
+
+func scrubCBORContentConfigIfDisabled(content ClientContentConfig) ClientContentConfig {
+	if clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsAllowCBOR) {
+		content.Negotiator = clientNegotiatorWithCBORSequenceStreamDecoder{content.Negotiator}
+		return content
+	}
+
+	if mediatype, _, err := mime.ParseMediaType(content.ContentType); err == nil && mediatype == "application/cbor" {
+		content.ContentType = "application/json"
+	}
+
+	clauses := goautoneg.ParseAccept(content.AcceptContentTypes)
+	scrubbed := false
+	for i, clause := range clauses {
+		if clause.Type == "application" && clause.SubType == "cbor" {
+			scrubbed = true
+			clauses[i].SubType = "json"
+		}
+	}
+	if !scrubbed {
+		// No application/cbor in AcceptContentTypes, nothing more to do.
+		return content
+	}
+
+	parts := make([]string, 0, len(clauses))
+	for _, clause := range clauses {
+		// ParseAccept does not store the parameter "q" in Params.
+		params := clause.Params
+		if clause.Q < 1 { // omit q=1, it's the default
+			if params == nil {
+				params = make(map[string]string, 1)
+			}
+			params["q"] = strconv.FormatFloat(clause.Q, 'g', 3, 32)
+		}
+		parts = append(parts, mime.FormatMediaType(fmt.Sprintf("%s/%s", clause.Type, clause.SubType), params))
+	}
+	content.AcceptContentTypes = strings.Join(parts, ",")
+
+	return content
+}
+
+// GetRateLimiter returns rate limiter for a given client, or nil if it's called on a nil client
+func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter {
+	if c == nil {
+		return nil
+	}
+	return c.rateLimiter
+}
+
+// readExpBackoffConfig handles the internal logic of determining what the
+// backoff policy is.  By default if no information is available, NoBackoff.
+// TODO Generalize this see #17727 .
+func readExpBackoffConfig() BackoffManager {
+	backoffBase := os.Getenv(envBackoffBase)
+	backoffDuration := os.Getenv(envBackoffDuration)
+
+	backoffBaseInt, errBase := strconv.ParseInt(backoffBase, 10, 64)
+	backoffDurationInt, errDuration := strconv.ParseInt(backoffDuration, 10, 64)
+	if errBase != nil || errDuration != nil {
+		return &NoBackoff{}
+	}
+	return &URLBackoff{
+		Backoff: flowcontrol.NewBackOff(
+			time.Duration(backoffBaseInt)*time.Second,
+			time.Duration(backoffDurationInt)*time.Second)}
+}
+
+// Verb begins a request with a verb (GET, POST, PUT, DELETE).
+//
+// Example usage of RESTClient's request building interface:
+// c, err := NewRESTClient(...)
+// if err != nil { ... }
+// resp, err := c.Verb("GET").
+//
+//	Path("pods").
+//	SelectorParam("labels", "area=staging").
+//	Timeout(10*time.Second).
+//	Do()
+//
+// if err != nil { ... }
+// list, ok := resp.(*api.PodList)
+func (c *RESTClient) Verb(verb string) *Request {
+	return NewRequest(c).Verb(verb)
+}
+
+// Post begins a POST request. Short for c.Verb("POST").
+func (c *RESTClient) Post() *Request {
+	return c.Verb("POST")
+}
+
+// Put begins a PUT request. Short for c.Verb("PUT").
+func (c *RESTClient) Put() *Request {
+	return c.Verb("PUT")
+}
+
+// Patch begins a PATCH request. Short for c.Verb("Patch").
+func (c *RESTClient) Patch(pt types.PatchType) *Request {
+	return c.Verb("PATCH").SetHeader("Content-Type", string(pt))
+}
+
+// Get begins a GET request. Short for c.Verb("GET").
+func (c *RESTClient) Get() *Request {
+	return c.Verb("GET")
+}
+
+// Delete begins a DELETE request. Short for c.Verb("DELETE").
+func (c *RESTClient) Delete() *Request {
+	return c.Verb("DELETE")
+}
+
+// APIVersion returns the APIVersion this RESTClient is expected to use.
+func (c *RESTClient) APIVersion() schema.GroupVersion {
+	config, _ := c.content.GetClientContentConfig()
+	return config.GroupVersion
+}
+
+// requestClientContentConfigProvider observes HTTP 415 (Unsupported Media Type) responses to detect
+// that the server does not understand CBOR. Once this has happened, future requests are forced to
+// use JSON so they can succeed. This is convenient for client users that want to prefer CBOR, but
+// also need to interoperate with older servers so requests do not permanently fail. The clients
+// will not default to using CBOR until at least all supported kube-apiservers have enable-CBOR
+// locked to true, so this path will be rarely taken. Additionally, all generated clients accessing
+// built-in kube resources are forced to protobuf, so those will not degrade to JSON.
+type requestClientContentConfigProvider struct {
+	base ClientContentConfig
+
+	// Becomes permanently true if a server responds with HTTP 415 (Unsupported Media Type) to a
+	// request with "Content-Type" header containing the CBOR media type.
+	sawUnsupportedMediaTypeForCBOR atomic.Bool
+}
+
+// GetClientContentConfig returns the ClientContentConfig that should be used for new requests by
+// this client and true if the request ContentType was selected by default.
+func (p *requestClientContentConfigProvider) GetClientContentConfig() (ClientContentConfig, bool) {
+	config := p.base
+
+	defaulted := config.ContentType == ""
+	if defaulted {
+		config.ContentType = "application/json"
+	}
+
+	if !clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsAllowCBOR) {
+		return config, defaulted
+	}
+
+	if defaulted && clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsPreferCBOR) {
+		config.ContentType = "application/cbor"
+	}
+
+	if sawUnsupportedMediaTypeForCBOR := p.sawUnsupportedMediaTypeForCBOR.Load(); !sawUnsupportedMediaTypeForCBOR {
+		return config, defaulted
+	}
+
+	if mediaType, _, _ := mime.ParseMediaType(config.ContentType); mediaType != runtime.ContentTypeCBOR {
+		return config, defaulted
+	}
+
+	// The effective ContentType is CBOR and the client has previously received an HTTP 415 in
+	// response to a CBOR request. Override ContentType to JSON.
+	config.ContentType = runtime.ContentTypeJSON
+	return config, defaulted
+}
+
+// UnsupportedMediaType reports that the server has responded to a request with HTTP 415 Unsupported
+// Media Type.
+func (p *requestClientContentConfigProvider) UnsupportedMediaType(requestContentType string) {
+	if !clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsAllowCBOR) {
+		return
+	}
+
+	// This could be extended to consider the Content-Encoding request header, the Accept and
+	// Accept-Encoding response headers, the request method, and URI (as mentioned in
+	// https://www.rfc-editor.org/rfc/rfc9110.html#section-15.5.16). The request Content-Type
+	// header is sufficient to implement a blanket CBOR fallback mechanism.
+	requestContentType, _, _ = mime.ParseMediaType(requestContentType)
+	switch requestContentType {
+	case runtime.ContentTypeCBOR, string(types.ApplyCBORPatchType):
+		p.sawUnsupportedMediaTypeForCBOR.Store(true)
+	}
+}
+
+// clientNegotiatorWithCBORSequenceStreamDecoder is a ClientNegotiator that delegates to another
+// ClientNegotiator to select the appropriate Encoder or Decoder for a given media type. As a
+// special case, it will resolve "application/cbor-seq" (a CBOR Sequence, the concatenation of zero
+// or more CBOR data items) as an alias for "application/cbor" (exactly one CBOR data item) when
+// selecting a stream decoder.
+type clientNegotiatorWithCBORSequenceStreamDecoder struct {
+	negotiator runtime.ClientNegotiator
+}
+
+func (n clientNegotiatorWithCBORSequenceStreamDecoder) Encoder(contentType string, params map[string]string) (runtime.Encoder, error) {
+	return n.negotiator.Encoder(contentType, params)
+}
+
+func (n clientNegotiatorWithCBORSequenceStreamDecoder) Decoder(contentType string, params map[string]string) (runtime.Decoder, error) {
+	return n.negotiator.Decoder(contentType, params)
+}
+
+func (n clientNegotiatorWithCBORSequenceStreamDecoder) StreamDecoder(contentType string, params map[string]string) (runtime.Decoder, runtime.Serializer, runtime.Framer, error) {
+	if !clientfeatures.FeatureGates().Enabled(clientfeatures.ClientsAllowCBOR) {
+		return n.negotiator.StreamDecoder(contentType, params)
+	}
+
+	switch contentType {
+	case runtime.ContentTypeCBORSequence:
+		return n.negotiator.StreamDecoder(runtime.ContentTypeCBOR, params)
+	case runtime.ContentTypeCBOR:
+		// This media type is only appropriate for exactly one data item, not the zero or
+		// more events of a watch stream.
+		return nil, nil, nil, runtime.NegotiateError{ContentType: contentType, Stream: true}
+	default:
+		return n.negotiator.StreamDecoder(contentType, params)
+	}
+
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/rest/config.go b/hack/tools/vendor/k8s.io/client-go/rest/config.go
new file mode 100644
index 0000000000..f2e813d075
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/rest/config.go
@@ -0,0 +1,693 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"path/filepath"
+	gruntime "runtime"
+	"strings"
+	"time"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/apimachinery/pkg/runtime/serializer/cbor"
+	"k8s.io/client-go/features"
+	"k8s.io/client-go/pkg/version"
+	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+	"k8s.io/client-go/transport"
+	certutil "k8s.io/client-go/util/cert"
+	"k8s.io/client-go/util/flowcontrol"
+	"k8s.io/klog/v2"
+)
+
+const (
+	DefaultQPS   float32 = 5.0
+	DefaultBurst int     = 10
+)
+
+var ErrNotInCluster = errors.New("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined")
+
+// Config holds the common attributes that can be passed to a Kubernetes client on
+// initialization.
+type Config struct {
+	// Host must be a host string, a host:port pair, or a URL to the base of the apiserver.
+	// If a URL is given then the (optional) Path of that URL represents a prefix that must
+	// be appended to all request URIs used to access the apiserver. This allows a frontend
+	// proxy to easily relocate all of the apiserver endpoints.
+	Host string
+	// APIPath is a sub-path that points to an API root.
+	APIPath string
+
+	// ContentConfig contains settings that affect how objects are transformed when
+	// sent to the server.
+	ContentConfig
+
+	// Server requires Basic authentication
+	Username string
+	Password string `datapolicy:"password"`
+
+	// Server requires Bearer authentication. This client will not attempt to use
+	// refresh tokens for an OAuth2 flow.
+	// TODO: demonstrate an OAuth2 compatible client.
+	BearerToken string `datapolicy:"token"`
+
+	// Path to a file containing a BearerToken.
+	// If set, the contents are periodically read.
+	// The last successfully read value takes precedence over BearerToken.
+	BearerTokenFile string
+
+	// Impersonate is the configuration that RESTClient will use for impersonation.
+	Impersonate ImpersonationConfig
+
+	// Server requires plugin-specified authentication.
+	AuthProvider *clientcmdapi.AuthProviderConfig
+
+	// Callback to persist config for AuthProvider.
+	AuthConfigPersister AuthProviderConfigPersister
+
+	// Exec-based authentication provider.
+	ExecProvider *clientcmdapi.ExecConfig
+
+	// TLSClientConfig contains settings to enable transport layer security
+	TLSClientConfig
+
+	// UserAgent is an optional field that specifies the caller of this request.
+	UserAgent string
+
+	// DisableCompression bypasses automatic GZip compression requests to the
+	// server.
+	DisableCompression bool
+
+	// Transport may be used for custom HTTP behavior. This attribute may not
+	// be specified with the TLS client certificate options. Use WrapTransport
+	// to provide additional per-server middleware behavior.
+	Transport http.RoundTripper
+	// WrapTransport will be invoked for custom HTTP behavior after the underlying
+	// transport is initialized (either the transport created from TLSClientConfig,
+	// Transport, or http.DefaultTransport). The config may layer other RoundTrippers
+	// on top of the returned RoundTripper.
+	//
+	// A future release will change this field to an array. Use config.Wrap()
+	// instead of setting this value directly.
+	WrapTransport transport.WrapperFunc
+
+	// QPS indicates the maximum QPS to the master from this client.
+	// If it's zero, the created RESTClient will use DefaultQPS: 5
+	//
+	// Setting this to a negative value will disable client-side ratelimiting
+	// unless `Ratelimiter` is also set.
+	QPS float32
+
+	// Maximum burst for throttle.
+	// If it's zero, the created RESTClient will use DefaultBurst: 10.
+	Burst int
+
+	// Rate limiter for limiting connections to the master from this client. If present overwrites QPS/Burst
+	RateLimiter flowcontrol.RateLimiter
+
+	// WarningHandler handles warnings in server responses.
+	// If not set, the default warning handler is used.
+	// See documentation for SetDefaultWarningHandler() for details.
+	WarningHandler WarningHandler
+
+	// The maximum length of time to wait before giving up on a server request. A value of zero means no timeout.
+	Timeout time.Duration
+
+	// Dial specifies the dial function for creating unencrypted TCP connections.
+	Dial func(ctx context.Context, network, address string) (net.Conn, error)
+
+	// Proxy is the proxy func to be used for all requests made by this
+	// transport. If Proxy is nil, http.ProxyFromEnvironment is used. If Proxy
+	// returns a nil *URL, no proxy is used.
+	//
+	// socks5 proxying does not currently support spdy streaming endpoints.
+	Proxy func(*http.Request) (*url.URL, error)
+
+	// Version forces a specific version to be used (if registered)
+	// Do we need this?
+	// Version string
+}
+
+var _ fmt.Stringer = new(Config)
+var _ fmt.GoStringer = new(Config)
+
+type sanitizedConfig *Config
+
+type sanitizedAuthConfigPersister struct{ AuthProviderConfigPersister }
+
+func (sanitizedAuthConfigPersister) GoString() string {
+	return "rest.AuthProviderConfigPersister(--- REDACTED ---)"
+}
+func (sanitizedAuthConfigPersister) String() string {
+	return "rest.AuthProviderConfigPersister(--- REDACTED ---)"
+}
+
+type sanitizedObject struct{ runtime.Object }
+
+func (sanitizedObject) GoString() string {
+	return "runtime.Object(--- REDACTED ---)"
+}
+func (sanitizedObject) String() string {
+	return "runtime.Object(--- REDACTED ---)"
+}
+
+// GoString implements fmt.GoStringer and sanitizes sensitive fields of Config
+// to prevent accidental leaking via logs.
+func (c *Config) GoString() string {
+	return c.String()
+}
+
+// String implements fmt.Stringer and sanitizes sensitive fields of Config to
+// prevent accidental leaking via logs.
+func (c *Config) String() string {
+	if c == nil {
+		return ""
+	}
+	cc := sanitizedConfig(CopyConfig(c))
+	// Explicitly mark non-empty credential fields as redacted.
+	if cc.Password != "" {
+		cc.Password = "--- REDACTED ---"
+	}
+	if cc.BearerToken != "" {
+		cc.BearerToken = "--- REDACTED ---"
+	}
+	if cc.AuthConfigPersister != nil {
+		cc.AuthConfigPersister = sanitizedAuthConfigPersister{cc.AuthConfigPersister}
+	}
+	if cc.ExecProvider != nil && cc.ExecProvider.Config != nil {
+		cc.ExecProvider.Config = sanitizedObject{Object: cc.ExecProvider.Config}
+	}
+	return fmt.Sprintf("%#v", cc)
+}
+
+// ImpersonationConfig has all the available impersonation options
+type ImpersonationConfig struct {
+	// UserName is the username to impersonate on each request.
+	UserName string
+	// UID is a unique value that identifies the user.
+	UID string
+	// Groups are the groups to impersonate on each request.
+	Groups []string
+	// Extra is a free-form field which can be used to link some authentication information
+	// to authorization information.  This field allows you to impersonate it.
+	Extra map[string][]string
+}
+
+// +k8s:deepcopy-gen=true
+// TLSClientConfig contains settings to enable transport layer security
+type TLSClientConfig struct {
+	// Server should be accessed without verifying the TLS certificate. For testing only.
+	Insecure bool
+	// ServerName is passed to the server for SNI and is used in the client to check server
+	// certificates against. If ServerName is empty, the hostname used to contact the
+	// server is used.
+	ServerName string
+
+	// Server requires TLS client certificate authentication
+	CertFile string
+	// Server requires TLS client certificate authentication
+	KeyFile string
+	// Trusted root certificates for server
+	CAFile string
+
+	// CertData holds PEM-encoded bytes (typically read from a client certificate file).
+	// CertData takes precedence over CertFile
+	CertData []byte
+	// KeyData holds PEM-encoded bytes (typically read from a client certificate key file).
+	// KeyData takes precedence over KeyFile
+	KeyData []byte `datapolicy:"security-key"`
+	// CAData holds PEM-encoded bytes (typically read from a root certificates bundle).
+	// CAData takes precedence over CAFile
+	CAData []byte
+
+	// NextProtos is a list of supported application level protocols, in order of preference.
+	// Used to populate tls.Config.NextProtos.
+	// To indicate to the server http/1.1 is preferred over http/2, set to ["http/1.1", "h2"] (though the server is free to ignore that preference).
+	// To use only http/1.1, set to ["http/1.1"].
+	NextProtos []string
+}
+
+var _ fmt.Stringer = TLSClientConfig{}
+var _ fmt.GoStringer = TLSClientConfig{}
+
+type sanitizedTLSClientConfig TLSClientConfig
+
+// GoString implements fmt.GoStringer and sanitizes sensitive fields of
+// TLSClientConfig to prevent accidental leaking via logs.
+func (c TLSClientConfig) GoString() string {
+	return c.String()
+}
+
+// String implements fmt.Stringer and sanitizes sensitive fields of
+// TLSClientConfig to prevent accidental leaking via logs.
+func (c TLSClientConfig) String() string {
+	cc := sanitizedTLSClientConfig{
+		Insecure:   c.Insecure,
+		ServerName: c.ServerName,
+		CertFile:   c.CertFile,
+		KeyFile:    c.KeyFile,
+		CAFile:     c.CAFile,
+		CertData:   c.CertData,
+		KeyData:    c.KeyData,
+		CAData:     c.CAData,
+		NextProtos: c.NextProtos,
+	}
+	// Explicitly mark non-empty credential fields as redacted.
+	if len(cc.CertData) != 0 {
+		cc.CertData = []byte("--- TRUNCATED ---")
+	}
+	if len(cc.KeyData) != 0 {
+		cc.KeyData = []byte("--- REDACTED ---")
+	}
+	return fmt.Sprintf("%#v", cc)
+}
+
+type ContentConfig struct {
+	// AcceptContentTypes specifies the types the client will accept and is optional.
+	// If not set, ContentType will be used to define the Accept header
+	AcceptContentTypes string
+	// ContentType specifies the wire format used to communicate with the server.
+	// This value will be set as the Accept header on requests made to the server, and
+	// as the default content type on any object sent to the server. If not set,
+	// "application/json" is used.
+	ContentType string
+	// GroupVersion is the API version to talk to. Must be provided when initializing
+	// a RESTClient directly. When initializing a Client, will be set with the default
+	// code version.
+	GroupVersion *schema.GroupVersion
+	// NegotiatedSerializer is used for obtaining encoders and decoders for multiple
+	// supported media types.
+	//
+	// TODO: NegotiatedSerializer will be phased out as internal clients are removed
+	//   from Kubernetes.
+	NegotiatedSerializer runtime.NegotiatedSerializer
+}
+
+// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config
+// object. Note that a RESTClient may require fields that are optional when initializing a Client.
+// A RESTClient created by this method is generic - it expects to operate on an API that follows
+// the Kubernetes conventions, but may not be the Kubernetes API.
+// RESTClientFor is equivalent to calling RESTClientForConfigAndClient(config, httpClient),
+// where httpClient was generated with HTTPClientFor(config).
+func RESTClientFor(config *Config) (*RESTClient, error) {
+	if config.GroupVersion == nil {
+		return nil, fmt.Errorf("GroupVersion is required when initializing a RESTClient")
+	}
+	if config.NegotiatedSerializer == nil {
+		return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient")
+	}
+
+	// Validate config.Host before constructing the transport/client so we can fail fast.
+	// ServerURL will be obtained later in RESTClientForConfigAndClient()
+	_, _, err := DefaultServerUrlFor(config)
+	if err != nil {
+		return nil, err
+	}
+
+	httpClient, err := HTTPClientFor(config)
+	if err != nil {
+		return nil, err
+	}
+
+	return RESTClientForConfigAndClient(config, httpClient)
+}
+
+// RESTClientForConfigAndClient returns a RESTClient that satisfies the requested attributes on a
+// client Config object.
+// Unlike RESTClientFor, RESTClientForConfigAndClient allows to pass an http.Client that is shared
+// between all the API Groups and Versions.
+// Note that the http client takes precedence over the transport values configured.
+// The http client defaults to the `http.DefaultClient` if nil.
+func RESTClientForConfigAndClient(config *Config, httpClient *http.Client) (*RESTClient, error) {
+	if config.GroupVersion == nil {
+		return nil, fmt.Errorf("GroupVersion is required when initializing a RESTClient")
+	}
+	if config.NegotiatedSerializer == nil {
+		return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient")
+	}
+
+	baseURL, versionedAPIPath, err := DefaultServerUrlFor(config)
+	if err != nil {
+		return nil, err
+	}
+
+	rateLimiter := config.RateLimiter
+	if rateLimiter == nil {
+		qps := config.QPS
+		if config.QPS == 0.0 {
+			qps = DefaultQPS
+		}
+		burst := config.Burst
+		if config.Burst == 0 {
+			burst = DefaultBurst
+		}
+		if qps > 0 {
+			rateLimiter = flowcontrol.NewTokenBucketRateLimiter(qps, burst)
+		}
+	}
+
+	var gv schema.GroupVersion
+	if config.GroupVersion != nil {
+		gv = *config.GroupVersion
+	}
+	clientContent := ClientContentConfig{
+		AcceptContentTypes: config.AcceptContentTypes,
+		ContentType:        config.ContentType,
+		GroupVersion:       gv,
+		Negotiator:         runtime.NewClientNegotiator(config.NegotiatedSerializer, gv),
+	}
+
+	restClient, err := NewRESTClient(baseURL, versionedAPIPath, clientContent, rateLimiter, httpClient)
+	if err == nil && config.WarningHandler != nil {
+		restClient.warningHandler = config.WarningHandler
+	}
+	return restClient, err
+}
+
+// UnversionedRESTClientFor is the same as RESTClientFor, except that it allows
+// the config.Version to be empty.
+func UnversionedRESTClientFor(config *Config) (*RESTClient, error) {
+	if config.NegotiatedSerializer == nil {
+		return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient")
+	}
+
+	// Validate config.Host before constructing the transport/client so we can fail fast.
+	// ServerURL will be obtained later in UnversionedRESTClientForConfigAndClient()
+	_, _, err := DefaultServerUrlFor(config)
+	if err != nil {
+		return nil, err
+	}
+
+	httpClient, err := HTTPClientFor(config)
+	if err != nil {
+		return nil, err
+	}
+
+	return UnversionedRESTClientForConfigAndClient(config, httpClient)
+}
+
+// UnversionedRESTClientForConfigAndClient is the same as RESTClientForConfigAndClient,
+// except that it allows the config.Version to be empty.
+func UnversionedRESTClientForConfigAndClient(config *Config, httpClient *http.Client) (*RESTClient, error) {
+	if config.NegotiatedSerializer == nil {
+		return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient")
+	}
+
+	baseURL, versionedAPIPath, err := DefaultServerUrlFor(config)
+	if err != nil {
+		return nil, err
+	}
+
+	rateLimiter := config.RateLimiter
+	if rateLimiter == nil {
+		qps := config.QPS
+		if config.QPS == 0.0 {
+			qps = DefaultQPS
+		}
+		burst := config.Burst
+		if config.Burst == 0 {
+			burst = DefaultBurst
+		}
+		if qps > 0 {
+			rateLimiter = flowcontrol.NewTokenBucketRateLimiter(qps, burst)
+		}
+	}
+
+	gv := metav1.SchemeGroupVersion
+	if config.GroupVersion != nil {
+		gv = *config.GroupVersion
+	}
+	clientContent := ClientContentConfig{
+		AcceptContentTypes: config.AcceptContentTypes,
+		ContentType:        config.ContentType,
+		GroupVersion:       gv,
+		Negotiator:         runtime.NewClientNegotiator(config.NegotiatedSerializer, gv),
+	}
+
+	restClient, err := NewRESTClient(baseURL, versionedAPIPath, clientContent, rateLimiter, httpClient)
+	if err == nil && config.WarningHandler != nil {
+		restClient.warningHandler = config.WarningHandler
+	}
+	return restClient, err
+}
+
+// SetKubernetesDefaults sets default values on the provided client config for accessing the
+// Kubernetes API or returns an error if any of the defaults are impossible or invalid.
+func SetKubernetesDefaults(config *Config) error {
+	if len(config.UserAgent) == 0 {
+		config.UserAgent = DefaultKubernetesUserAgent()
+	}
+	return nil
+}
+
+// adjustCommit returns sufficient significant figures of the commit's git hash.
+func adjustCommit(c string) string {
+	if len(c) == 0 {
+		return "unknown"
+	}
+	if len(c) > 7 {
+		return c[:7]
+	}
+	return c
+}
+
+// adjustVersion strips "alpha", "beta", etc. from version in form
+// major.minor.patch-[alpha|beta|etc].
+func adjustVersion(v string) string {
+	if len(v) == 0 {
+		return "unknown"
+	}
+	seg := strings.SplitN(v, "-", 2)
+	return seg[0]
+}
+
+// adjustCommand returns the last component of the
+// OS-specific command path for use in User-Agent.
+func adjustCommand(p string) string {
+	// Unlikely, but better than returning "".
+	if len(p) == 0 {
+		return "unknown"
+	}
+	return filepath.Base(p)
+}
+
+// buildUserAgent builds a User-Agent string from given args.
+func buildUserAgent(command, version, os, arch, commit string) string {
+	return fmt.Sprintf(
+		"%s/%s (%s/%s) kubernetes/%s", command, version, os, arch, commit)
+}
+
+// DefaultKubernetesUserAgent returns a User-Agent string built from static global vars.
+func DefaultKubernetesUserAgent() string {
+	return buildUserAgent(
+		adjustCommand(os.Args[0]),
+		adjustVersion(version.Get().GitVersion),
+		gruntime.GOOS,
+		gruntime.GOARCH,
+		adjustCommit(version.Get().GitCommit))
+}
+
+// InClusterConfig returns a config object which uses the service account
+// kubernetes gives to pods. It's intended for clients that expect to be
+// running inside a pod running on kubernetes. It will return ErrNotInCluster
+// if called from a process not running in a kubernetes environment.
+func InClusterConfig() (*Config, error) {
+	const (
+		tokenFile  = "/var/run/secrets/kubernetes.io/serviceaccount/token"
+		rootCAFile = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+	)
+	host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT")
+	if len(host) == 0 || len(port) == 0 {
+		return nil, ErrNotInCluster
+	}
+
+	token, err := os.ReadFile(tokenFile)
+	if err != nil {
+		return nil, err
+	}
+
+	tlsClientConfig := TLSClientConfig{}
+
+	if _, err := certutil.NewPool(rootCAFile); err != nil {
+		klog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err)
+	} else {
+		tlsClientConfig.CAFile = rootCAFile
+	}
+
+	return &Config{
+		// TODO: switch to using cluster DNS.
+		Host:            "https://" + net.JoinHostPort(host, port),
+		TLSClientConfig: tlsClientConfig,
+		BearerToken:     string(token),
+		BearerTokenFile: tokenFile,
+	}, nil
+}
+
+// IsConfigTransportTLS returns true if and only if the provided
+// config will result in a protected connection to the server when it
+// is passed to restclient.RESTClientFor().  Use to determine when to
+// send credentials over the wire.
+//
+// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are
+// still possible.
+func IsConfigTransportTLS(config Config) bool {
+	baseURL, _, err := DefaultServerUrlFor(&config)
+	if err != nil {
+		return false
+	}
+	return baseURL.Scheme == "https"
+}
+
+// LoadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData,
+// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are
+// either populated or were empty to start.
+func LoadTLSFiles(c *Config) error {
+	var err error
+	c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile)
+	if err != nil {
+		return err
+	}
+
+	c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile)
+	if err != nil {
+		return err
+	}
+
+	c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile)
+	return err
+}
+
+// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file,
+// or an error if an error occurred reading the file
+func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
+	if len(data) > 0 {
+		return data, nil
+	}
+	if len(file) > 0 {
+		fileData, err := os.ReadFile(file)
+		if err != nil {
+			return []byte{}, err
+		}
+		return fileData, nil
+	}
+	return nil, nil
+}
+
+func AddUserAgent(config *Config, userAgent string) *Config {
+	fullUserAgent := DefaultKubernetesUserAgent() + "/" + userAgent
+	config.UserAgent = fullUserAgent
+	return config
+}
+
+// AnonymousClientConfig returns a copy of the given config with all user credentials (cert/key, bearer token, and username/password) and custom transports (WrapTransport, Transport) removed
+func AnonymousClientConfig(config *Config) *Config {
+	// copy only known safe fields
+	return &Config{
+		Host:          config.Host,
+		APIPath:       config.APIPath,
+		ContentConfig: config.ContentConfig,
+		TLSClientConfig: TLSClientConfig{
+			Insecure:   config.Insecure,
+			ServerName: config.ServerName,
+			CAFile:     config.TLSClientConfig.CAFile,
+			CAData:     config.TLSClientConfig.CAData,
+			NextProtos: config.TLSClientConfig.NextProtos,
+		},
+		RateLimiter:        config.RateLimiter,
+		WarningHandler:     config.WarningHandler,
+		UserAgent:          config.UserAgent,
+		DisableCompression: config.DisableCompression,
+		QPS:                config.QPS,
+		Burst:              config.Burst,
+		Timeout:            config.Timeout,
+		Dial:               config.Dial,
+		Proxy:              config.Proxy,
+	}
+}
+
+// CopyConfig returns a copy of the given config
+func CopyConfig(config *Config) *Config {
+	c := &Config{
+		Host:            config.Host,
+		APIPath:         config.APIPath,
+		ContentConfig:   config.ContentConfig,
+		Username:        config.Username,
+		Password:        config.Password,
+		BearerToken:     config.BearerToken,
+		BearerTokenFile: config.BearerTokenFile,
+		Impersonate: ImpersonationConfig{
+			UserName: config.Impersonate.UserName,
+			UID:      config.Impersonate.UID,
+			Groups:   config.Impersonate.Groups,
+			Extra:    config.Impersonate.Extra,
+		},
+		AuthProvider:        config.AuthProvider,
+		AuthConfigPersister: config.AuthConfigPersister,
+		ExecProvider:        config.ExecProvider,
+		TLSClientConfig: TLSClientConfig{
+			Insecure:   config.TLSClientConfig.Insecure,
+			ServerName: config.TLSClientConfig.ServerName,
+			CertFile:   config.TLSClientConfig.CertFile,
+			KeyFile:    config.TLSClientConfig.KeyFile,
+			CAFile:     config.TLSClientConfig.CAFile,
+			CertData:   config.TLSClientConfig.CertData,
+			KeyData:    config.TLSClientConfig.KeyData,
+			CAData:     config.TLSClientConfig.CAData,
+			NextProtos: config.TLSClientConfig.NextProtos,
+		},
+		UserAgent:          config.UserAgent,
+		DisableCompression: config.DisableCompression,
+		Transport:          config.Transport,
+		WrapTransport:      config.WrapTransport,
+		QPS:                config.QPS,
+		Burst:              config.Burst,
+		RateLimiter:        config.RateLimiter,
+		WarningHandler:     config.WarningHandler,
+		Timeout:            config.Timeout,
+		Dial:               config.Dial,
+		Proxy:              config.Proxy,
+	}
+	if config.ExecProvider != nil && config.ExecProvider.Config != nil {
+		c.ExecProvider.Config = config.ExecProvider.Config.DeepCopyObject()
+	}
+	return c
+}
+
+// CodecFactoryForGeneratedClient returns the provided CodecFactory if there are no enabled client
+// feature gates affecting serialization. Otherwise, it constructs and returns a new CodecFactory
+// from the provided Scheme.
+//
+// This is supported ONLY for use by clients generated with client-gen. The caller is responsible
+// for ensuring that the CodecFactory argument was constructed using the Scheme argument.
+func CodecFactoryForGeneratedClient(scheme *runtime.Scheme, codecs serializer.CodecFactory) serializer.CodecFactory {
+	if !features.FeatureGates().Enabled(features.ClientsAllowCBOR) {
+		// NOTE: This assumes client-gen will not generate CBOR-enabled Codecs as long as
+		// the feature gate exists.
+		return codecs
+	}
+
+	return serializer.NewCodecFactory(scheme, serializer.WithSerializer(cbor.NewSerializerInfo))
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/rest/exec.go b/hack/tools/vendor/k8s.io/client-go/rest/exec.go
new file mode 100644
index 0000000000..96f5dba357
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/rest/exec.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+	"fmt"
+	"net/http"
+	"net/url"
+
+	clientauthenticationapi "k8s.io/client-go/pkg/apis/clientauthentication"
+)
+
+// This file contains Config logic related to exec credential plugins.
+
+// ConfigToExecCluster creates a clientauthenticationapi.Cluster with the corresponding fields from
+// the provided Config.
+func ConfigToExecCluster(config *Config) (*clientauthenticationapi.Cluster, error) {
+	caData, err := dataFromSliceOrFile(config.CAData, config.CAFile)
+	if err != nil {
+		return nil, fmt.Errorf("failed to load CA bundle for execProvider: %v", err)
+	}
+
+	var proxyURL string
+	if config.Proxy != nil {
+		req, err := http.NewRequest("", config.Host, nil)
+		if err != nil {
+			return nil, fmt.Errorf("failed to create proxy URL request for execProvider: %w", err)
+		}
+		url, err := config.Proxy(req)
+		if err != nil {
+			return nil, fmt.Errorf("failed to get proxy URL for execProvider: %w", err)
+		}
+		if url != nil {
+			proxyURL = url.String()
+		}
+	}
+
+	return &clientauthenticationapi.Cluster{
+		Server:                   config.Host,
+		TLSServerName:            config.ServerName,
+		InsecureSkipTLSVerify:    config.Insecure,
+		CertificateAuthorityData: caData,
+		ProxyURL:                 proxyURL,
+		DisableCompression:       config.DisableCompression,
+		Config:                   config.ExecProvider.Config,
+	}, nil
+}
+
+// ExecClusterToConfig creates a Config with the corresponding fields from the provided
+// clientauthenticationapi.Cluster. The returned Config will be anonymous (i.e., it will not have
+// any authentication-related fields set).
+func ExecClusterToConfig(cluster *clientauthenticationapi.Cluster) (*Config, error) {
+	var proxy func(*http.Request) (*url.URL, error)
+	if cluster.ProxyURL != "" {
+		proxyURL, err := url.Parse(cluster.ProxyURL)
+		if err != nil {
+			return nil, fmt.Errorf("cannot parse proxy URL: %w", err)
+		}
+		proxy = http.ProxyURL(proxyURL)
+	}
+
+	return &Config{
+		Host: cluster.Server,
+		TLSClientConfig: TLSClientConfig{
+			Insecure:   cluster.InsecureSkipTLSVerify,
+			ServerName: cluster.TLSServerName,
+			CAData:     cluster.CertificateAuthorityData,
+		},
+		Proxy:              proxy,
+		DisableCompression: cluster.DisableCompression,
+	}, nil
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/rest/plugin.go b/hack/tools/vendor/k8s.io/client-go/rest/plugin.go
new file mode 100644
index 0000000000..ae5cbdc2c4
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/rest/plugin.go
@@ -0,0 +1,84 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+	"fmt"
+	"net/http"
+	"sync"
+
+	"k8s.io/klog/v2"
+
+	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+)
+
+type AuthProvider interface {
+	// WrapTransport allows the plugin to create a modified RoundTripper that
+	// attaches authorization headers (or other info) to requests.
+	WrapTransport(http.RoundTripper) http.RoundTripper
+	// Login allows the plugin to initialize its configuration. It must not
+	// require direct user interaction.
+	Login() error
+}
+
+// Factory generates an AuthProvider plugin.
+//
+//	clusterAddress is the address of the current cluster.
+//	config is the initial configuration for this plugin.
+//	persister allows the plugin to save updated configuration.
+type Factory func(clusterAddress string, config map[string]string, persister AuthProviderConfigPersister) (AuthProvider, error)
+
+// AuthProviderConfigPersister allows a plugin to persist configuration info
+// for just itself.
+type AuthProviderConfigPersister interface {
+	Persist(map[string]string) error
+}
+
+type noopPersister struct{}
+
+func (n *noopPersister) Persist(_ map[string]string) error {
+	// no operation persister
+	return nil
+}
+
+// All registered auth provider plugins.
+var pluginsLock sync.Mutex
+var plugins = make(map[string]Factory)
+
+func RegisterAuthProviderPlugin(name string, plugin Factory) error {
+	pluginsLock.Lock()
+	defer pluginsLock.Unlock()
+	if _, found := plugins[name]; found {
+		return fmt.Errorf("auth Provider Plugin %q was registered twice", name)
+	}
+	klog.V(4).Infof("Registered Auth Provider Plugin %q", name)
+	plugins[name] = plugin
+	return nil
+}
+
+func GetAuthProvider(clusterAddress string, apc *clientcmdapi.AuthProviderConfig, persister AuthProviderConfigPersister) (AuthProvider, error) {
+	pluginsLock.Lock()
+	defer pluginsLock.Unlock()
+	p, ok := plugins[apc.Name]
+	if !ok {
+		return nil, fmt.Errorf("no Auth Provider found for name %q", apc.Name)
+	}
+	if persister == nil {
+		persister = &noopPersister{}
+	}
+	return p(clusterAddress, apc.Config, persister)
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/rest/request.go b/hack/tools/vendor/k8s.io/client-go/rest/request.go
new file mode 100644
index 0000000000..0ec90ad188
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/rest/request.go
@@ -0,0 +1,1700 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+	"bytes"
+	"context"
+	"encoding/base64"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"mime"
+	"net/http"
+	"net/http/httptrace"
+	"net/url"
+	"os"
+	"path"
+	"reflect"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/net/http2"
+
+	"k8s.io/apimachinery/pkg/api/errors"
+	"k8s.io/apimachinery/pkg/api/meta"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/conversion"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer/streaming"
+	"k8s.io/apimachinery/pkg/util/net"
+	"k8s.io/apimachinery/pkg/watch"
+	clientfeatures "k8s.io/client-go/features"
+	restclientwatch "k8s.io/client-go/rest/watch"
+	"k8s.io/client-go/tools/metrics"
+	"k8s.io/client-go/util/flowcontrol"
+	"k8s.io/klog/v2"
+	"k8s.io/utils/clock"
+)
+
+var (
+	// longThrottleLatency defines threshold for logging requests. All requests being
+	// throttled (via the provided rateLimiter) for more than longThrottleLatency will
+	// be logged.
+	longThrottleLatency = 50 * time.Millisecond
+
+	// extraLongThrottleLatency defines the threshold for logging requests at log level 2.
+	extraLongThrottleLatency = 1 * time.Second
+)
+
+// HTTPClient is an interface for testing a request object.
+type HTTPClient interface {
+	Do(req *http.Request) (*http.Response, error)
+}
+
+// ResponseWrapper is an interface for getting a response.
+// The response may be either accessed as a raw data (the whole output is put into memory) or as a stream.
+type ResponseWrapper interface {
+	DoRaw(context.Context) ([]byte, error)
+	Stream(context.Context) (io.ReadCloser, error)
+}
+
+// RequestConstructionError is returned when there's an error assembling a request.
+type RequestConstructionError struct {
+	Err error
+}
+
+// Error returns a textual description of 'r'.
+func (r *RequestConstructionError) Error() string {
+	return fmt.Sprintf("request construction error: '%v'", r.Err)
+}
+
+var noBackoff = &NoBackoff{}
+
+type requestRetryFunc func(maxRetries int) WithRetry
+
+func defaultRequestRetryFn(maxRetries int) WithRetry {
+	return &withRetry{maxRetries: maxRetries}
+}
+
+// Request allows for building up a request to a server in a chained fashion.
+// Any errors are stored until the end of your call, so you only have to
+// check once.
+type Request struct {
+	c *RESTClient
+
+	contentConfig     ClientContentConfig
+	contentTypeNotSet bool
+
+	warningHandler WarningHandler
+
+	rateLimiter flowcontrol.RateLimiter
+	backoff     BackoffManager
+	timeout     time.Duration
+	maxRetries  int
+
+	// generic components accessible via method setters
+	verb       string
+	pathPrefix string
+	subpath    string
+	params     url.Values
+	headers    http.Header
+
+	// structural elements of the request that are part of the Kubernetes API conventions
+	namespace    string
+	namespaceSet bool
+	resource     string
+	resourceName string
+	subresource  string
+
+	// output
+	err error
+
+	// only one of body / bodyBytes may be set. requests using body are not retryable.
+	body      io.Reader
+	bodyBytes []byte
+
+	retryFn requestRetryFunc
+}
+
+// NewRequest creates a new request helper object for accessing runtime.Objects on a server.
+func NewRequest(c *RESTClient) *Request {
+	var backoff BackoffManager
+	if c.createBackoffMgr != nil {
+		backoff = c.createBackoffMgr()
+	}
+	if backoff == nil {
+		backoff = noBackoff
+	}
+
+	var pathPrefix string
+	if c.base != nil {
+		pathPrefix = path.Join("/", c.base.Path, c.versionedAPIPath)
+	} else {
+		pathPrefix = path.Join("/", c.versionedAPIPath)
+	}
+
+	var timeout time.Duration
+	if c.Client != nil {
+		timeout = c.Client.Timeout
+	}
+
+	// A request needs to know whether the content type was explicitly configured or selected by
+	// default in order to support the per-request Protobuf override used by clients generated
+	// with --prefers-protobuf.
+	contentConfig, contentTypeDefaulted := c.content.GetClientContentConfig()
+
+	r := &Request{
+		c:              c,
+		rateLimiter:    c.rateLimiter,
+		backoff:        backoff,
+		timeout:        timeout,
+		pathPrefix:     pathPrefix,
+		maxRetries:     10,
+		retryFn:        defaultRequestRetryFn,
+		warningHandler: c.warningHandler,
+
+		contentConfig:     contentConfig,
+		contentTypeNotSet: contentTypeDefaulted,
+	}
+
+	r.setAcceptHeader()
+	return r
+}
+
+// NewRequestWithClient creates a Request with an embedded RESTClient for use in test scenarios.
+func NewRequestWithClient(base *url.URL, versionedAPIPath string, content ClientContentConfig, client *http.Client) *Request {
+	return NewRequest(&RESTClient{
+		base:             base,
+		versionedAPIPath: versionedAPIPath,
+		content:          requestClientContentConfigProvider{base: content},
+		Client:           client,
+	})
+}
+
+func (r *Request) UseProtobufAsDefaultIfPreferred(prefersProtobuf bool) *Request {
+	if prefersProtobuf {
+		return r.UseProtobufAsDefault()
+	}
+	return r
+}
+
+func (r *Request) UseProtobufAsDefault() *Request {
+	if r.contentTypeNotSet && len(r.contentConfig.AcceptContentTypes) == 0 {
+		r.contentConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json"
+		r.contentConfig.ContentType = "application/vnd.kubernetes.protobuf"
+		r.setAcceptHeader()
+	}
+	return r
+}
+
+func (r *Request) setAcceptHeader() {
+	switch {
+	case len(r.contentConfig.AcceptContentTypes) > 0:
+		r.SetHeader("Accept", r.contentConfig.AcceptContentTypes)
+	case len(r.contentConfig.ContentType) > 0:
+		r.SetHeader("Accept", r.contentConfig.ContentType+", */*")
+	}
+}
+
+// Verb sets the verb this request will use.
+func (r *Request) Verb(verb string) *Request {
+	r.verb = verb
+	return r
+}
+
+// Prefix adds segments to the relative beginning to the request path. These
+// items will be placed before the optional Namespace, Resource, or Name sections.
+// Setting AbsPath will clear any previously set Prefix segments
+func (r *Request) Prefix(segments ...string) *Request {
+	if r.err != nil {
+		return r
+	}
+	r.pathPrefix = path.Join(r.pathPrefix, path.Join(segments...))
+	return r
+}
+
+// Suffix appends segments to the end of the path. These items will be placed after the prefix and optional
+// Namespace, Resource, or Name sections.
+func (r *Request) Suffix(segments ...string) *Request {
+	if r.err != nil {
+		return r
+	}
+	r.subpath = path.Join(r.subpath, path.Join(segments...))
+	return r
+}
+
+// Resource sets the resource to access (/[ns//])
+func (r *Request) Resource(resource string) *Request {
+	if r.err != nil {
+		return r
+	}
+	if len(r.resource) != 0 {
+		r.err = fmt.Errorf("resource already set to %q, cannot change to %q", r.resource, resource)
+		return r
+	}
+	if msgs := IsValidPathSegmentName(resource); len(msgs) != 0 {
+		r.err = fmt.Errorf("invalid resource %q: %v", resource, msgs)
+		return r
+	}
+	r.resource = resource
+	return r
+}
+
+// BackOff sets the request's backoff manager to the one specified,
+// or defaults to the stub implementation if nil is provided
+func (r *Request) BackOff(manager BackoffManager) *Request {
+	if manager == nil {
+		r.backoff = &NoBackoff{}
+		return r
+	}
+
+	r.backoff = manager
+	return r
+}
+
+// WarningHandler sets the handler this client uses when warning headers are encountered.
+// If set to nil, this client will use the default warning handler (see SetDefaultWarningHandler).
+func (r *Request) WarningHandler(handler WarningHandler) *Request {
+	r.warningHandler = handler
+	return r
+}
+
+// Throttle receives a rate-limiter and sets or replaces an existing request limiter
+func (r *Request) Throttle(limiter flowcontrol.RateLimiter) *Request {
+	r.rateLimiter = limiter
+	return r
+}
+
+// SubResource sets a sub-resource path which can be multiple segments after the resource
+// name but before the suffix.
+func (r *Request) SubResource(subresources ...string) *Request {
+	if r.err != nil {
+		return r
+	}
+	subresource := path.Join(subresources...)
+	if len(r.subresource) != 0 {
+		r.err = fmt.Errorf("subresource already set to %q, cannot change to %q", r.subresource, subresource)
+		return r
+	}
+	for _, s := range subresources {
+		if msgs := IsValidPathSegmentName(s); len(msgs) != 0 {
+			r.err = fmt.Errorf("invalid subresource %q: %v", s, msgs)
+			return r
+		}
+	}
+	r.subresource = subresource
+	return r
+}
+
+// Name sets the name of a resource to access (/[ns//])
+func (r *Request) Name(resourceName string) *Request {
+	if r.err != nil {
+		return r
+	}
+	if len(resourceName) == 0 {
+		r.err = fmt.Errorf("resource name may not be empty")
+		return r
+	}
+	if len(r.resourceName) != 0 {
+		r.err = fmt.Errorf("resource name already set to %q, cannot change to %q", r.resourceName, resourceName)
+		return r
+	}
+	if msgs := IsValidPathSegmentName(resourceName); len(msgs) != 0 {
+		r.err = fmt.Errorf("invalid resource name %q: %v", resourceName, msgs)
+		return r
+	}
+	r.resourceName = resourceName
+	return r
+}
+
+// Namespace applies the namespace scope to a request (/[ns//])
+func (r *Request) Namespace(namespace string) *Request {
+	if r.err != nil {
+		return r
+	}
+	if r.namespaceSet {
+		r.err = fmt.Errorf("namespace already set to %q, cannot change to %q", r.namespace, namespace)
+		return r
+	}
+	if msgs := IsValidPathSegmentName(namespace); len(msgs) != 0 {
+		r.err = fmt.Errorf("invalid namespace %q: %v", namespace, msgs)
+		return r
+	}
+	r.namespaceSet = true
+	r.namespace = namespace
+	return r
+}
+
+// NamespaceIfScoped is a convenience function to set a namespace if scoped is true
+func (r *Request) NamespaceIfScoped(namespace string, scoped bool) *Request {
+	if scoped {
+		return r.Namespace(namespace)
+	}
+	return r
+}
+
+// AbsPath overwrites an existing path with the segments provided. Trailing slashes are preserved
+// when a single segment is passed.
+func (r *Request) AbsPath(segments ...string) *Request {
+	if r.err != nil {
+		return r
+	}
+	r.pathPrefix = path.Join(r.c.base.Path, path.Join(segments...))
+	if len(segments) == 1 && (len(r.c.base.Path) > 1 || len(segments[0]) > 1) && strings.HasSuffix(segments[0], "/") {
+		// preserve any trailing slashes for legacy behavior
+		r.pathPrefix += "/"
+	}
+	return r
+}
+
+// RequestURI overwrites existing path and parameters with the value of the provided server relative
+// URI.
+func (r *Request) RequestURI(uri string) *Request {
+	if r.err != nil {
+		return r
+	}
+	locator, err := url.Parse(uri)
+	if err != nil {
+		r.err = err
+		return r
+	}
+	r.pathPrefix = locator.Path
+	if len(locator.Query()) > 0 {
+		if r.params == nil {
+			r.params = make(url.Values)
+		}
+		for k, v := range locator.Query() {
+			r.params[k] = v
+		}
+	}
+	return r
+}
+
+// Param creates a query parameter with the given string value.
+func (r *Request) Param(paramName, s string) *Request {
+	if r.err != nil {
+		return r
+	}
+	return r.setParam(paramName, s)
+}
+
+// VersionedParams will take the provided object, serialize it to a map[string][]string using the
+// implicit RESTClient API version and the default parameter codec, and then add those as parameters
+// to the request. Use this to provide versioned query parameters from client libraries.
+// VersionedParams will not write query parameters that have omitempty set and are empty. If a
+// parameter has already been set it is appended to (Params and VersionedParams are additive).
+func (r *Request) VersionedParams(obj runtime.Object, codec runtime.ParameterCodec) *Request {
+	return r.SpecificallyVersionedParams(obj, codec, r.contentConfig.GroupVersion)
+}
+
+func (r *Request) SpecificallyVersionedParams(obj runtime.Object, codec runtime.ParameterCodec, version schema.GroupVersion) *Request {
+	if r.err != nil {
+		return r
+	}
+	params, err := codec.EncodeParameters(obj, version)
+	if err != nil {
+		r.err = err
+		return r
+	}
+	for k, v := range params {
+		if r.params == nil {
+			r.params = make(url.Values)
+		}
+		r.params[k] = append(r.params[k], v...)
+	}
+	return r
+}
+
+func (r *Request) setParam(paramName, value string) *Request {
+	if r.params == nil {
+		r.params = make(url.Values)
+	}
+	r.params[paramName] = append(r.params[paramName], value)
+	return r
+}
+
+func (r *Request) SetHeader(key string, values ...string) *Request {
+	if r.headers == nil {
+		r.headers = http.Header{}
+	}
+	r.headers.Del(key)
+	for _, value := range values {
+		r.headers.Add(key, value)
+	}
+	return r
+}
+
+// Timeout makes the request use the given duration as an overall timeout for the
+// request. Additionally, if set passes the value as "timeout" parameter in URL.
+func (r *Request) Timeout(d time.Duration) *Request {
+	if r.err != nil {
+		return r
+	}
+	r.timeout = d
+	return r
+}
+
+// MaxRetries makes the request use the given integer as a ceiling of retrying upon receiving
+// "Retry-After" headers and 429 status-code in the response. The default is 10 unless this
+// function is specifically called with a different value.
+// A zero maxRetries prevent it from doing retires and return an error immediately.
+func (r *Request) MaxRetries(maxRetries int) *Request {
+	if maxRetries < 0 {
+		maxRetries = 0
+	}
+	r.maxRetries = maxRetries
+	return r
+}
+
+// Body makes the request use obj as the body. Optional.
+// If obj is a string, try to read a file of that name.
+// If obj is a []byte, send it directly.
+// If obj is an io.Reader, use it directly.
+// If obj is a runtime.Object, marshal it correctly, and set Content-Type header.
+// If obj is a runtime.Object and nil, do nothing.
+// Otherwise, set an error.
+func (r *Request) Body(obj interface{}) *Request {
+	if r.err != nil {
+		return r
+	}
+	switch t := obj.(type) {
+	case string:
+		data, err := os.ReadFile(t)
+		if err != nil {
+			r.err = err
+			return r
+		}
+		r.body = nil
+		r.bodyBytes = data
+	case []byte:
+		r.body = nil
+		r.bodyBytes = t
+	case io.Reader:
+		r.body = t
+		r.bodyBytes = nil
+	case runtime.Object:
+		// callers may pass typed interface pointers, therefore we must check nil with reflection
+		if reflect.ValueOf(t).IsNil() {
+			return r
+		}
+		encoder, err := r.contentConfig.Negotiator.Encoder(r.contentConfig.ContentType, nil)
+		if err != nil {
+			r.err = err
+			return r
+		}
+		data, err := runtime.Encode(encoder, t)
+		if err != nil {
+			r.err = err
+			return r
+		}
+		r.body = nil
+		r.bodyBytes = data
+		r.SetHeader("Content-Type", r.contentConfig.ContentType)
+	default:
+		r.err = fmt.Errorf("unknown type used for body: %+v", obj)
+	}
+	return r
+}
+
+// Error returns any error encountered constructing the request, if any.
+func (r *Request) Error() error {
+	return r.err
+}
+
+// URL returns the current working URL. Check the result of Error() to ensure
+// that the returned URL is valid.
+func (r *Request) URL() *url.URL {
+	p := r.pathPrefix
+	if r.namespaceSet && len(r.namespace) > 0 {
+		p = path.Join(p, "namespaces", r.namespace)
+	}
+	if len(r.resource) != 0 {
+		p = path.Join(p, strings.ToLower(r.resource))
+	}
+	// Join trims trailing slashes, so preserve r.pathPrefix's trailing slash for backwards compatibility if nothing was changed
+	if len(r.resourceName) != 0 || len(r.subpath) != 0 || len(r.subresource) != 0 {
+		p = path.Join(p, r.resourceName, r.subresource, r.subpath)
+	}
+
+	finalURL := &url.URL{}
+	if r.c.base != nil {
+		*finalURL = *r.c.base
+	}
+	finalURL.Path = p
+
+	query := url.Values{}
+	for key, values := range r.params {
+		for _, value := range values {
+			query.Add(key, value)
+		}
+	}
+
+	// timeout is handled specially here.
+	if r.timeout != 0 {
+		query.Set("timeout", r.timeout.String())
+	}
+	finalURL.RawQuery = query.Encode()
+	return finalURL
+}
+
+// finalURLTemplate is similar to URL(), but will make all specific parameter values equal
+// - instead of name or namespace, "{name}" and "{namespace}" will be used, and all query
+// parameters will be reset. This creates a copy of the url so as not to change the
+// underlying object.
+func (r Request) finalURLTemplate() url.URL {
+	newParams := url.Values{}
+	v := []string{"{value}"}
+	for k := range r.params {
+		newParams[k] = v
+	}
+	r.params = newParams
+	u := r.URL()
+	if u == nil {
+		return url.URL{}
+	}
+
+	segments := strings.Split(u.Path, "/")
+	groupIndex := 0
+	index := 0
+	trimmedBasePath := ""
+	if r.c.base != nil && strings.Contains(u.Path, r.c.base.Path) {
+		p := strings.TrimPrefix(u.Path, r.c.base.Path)
+		if !strings.HasPrefix(p, "/") {
+			p = "/" + p
+		}
+		// store the base path that we have trimmed so we can append it
+		// before returning the URL
+		trimmedBasePath = r.c.base.Path
+		segments = strings.Split(p, "/")
+		groupIndex = 1
+	}
+	if len(segments) <= 2 {
+		return *u
+	}
+
+	const CoreGroupPrefix = "api"
+	const NamedGroupPrefix = "apis"
+	isCoreGroup := segments[groupIndex] == CoreGroupPrefix
+	isNamedGroup := segments[groupIndex] == NamedGroupPrefix
+	if isCoreGroup {
+		// checking the case of core group with /api/v1/... format
+		index = groupIndex + 2
+	} else if isNamedGroup {
+		// checking the case of named group with /apis/apps/v1/... format
+		index = groupIndex + 3
+	} else {
+		// this should not happen that the only two possibilities are /api... and /apis..., just want to put an
+		// outlet here in case more API groups are added in future if ever possible:
+		// https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-groups
+		// if a wrong API groups name is encountered, return the {prefix} for url.Path
+		u.Path = "/{prefix}"
+		u.RawQuery = ""
+		return *u
+	}
+	// switch segLength := len(segments) - index; segLength {
+	switch {
+	// case len(segments) - index == 1:
+	// resource (with no name) do nothing
+	case len(segments)-index == 2:
+		// /$RESOURCE/$NAME: replace $NAME with {name}
+		segments[index+1] = "{name}"
+	case len(segments)-index == 3:
+		if segments[index+2] == "finalize" || segments[index+2] == "status" {
+			// /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name}
+			segments[index+1] = "{name}"
+		} else {
+			// /namespace/$NAMESPACE/$RESOURCE: replace $NAMESPACE with {namespace}
+			segments[index+1] = "{namespace}"
+		}
+	case len(segments)-index >= 4:
+		segments[index+1] = "{namespace}"
+		// /namespace/$NAMESPACE/$RESOURCE/$NAME: replace $NAMESPACE with {namespace},  $NAME with {name}
+		if segments[index+3] != "finalize" && segments[index+3] != "status" {
+			// /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name}
+			segments[index+3] = "{name}"
+		}
+	}
+	u.Path = path.Join(trimmedBasePath, path.Join(segments...))
+	return *u
+}
+
+func (r *Request) tryThrottleWithInfo(ctx context.Context, retryInfo string) error {
+	if r.rateLimiter == nil {
+		return nil
+	}
+
+	now := time.Now()
+
+	err := r.rateLimiter.Wait(ctx)
+	if err != nil {
+		err = fmt.Errorf("client rate limiter Wait returned an error: %w", err)
+	}
+	latency := time.Since(now)
+
+	var message string
+	switch {
+	case len(retryInfo) > 0:
+		message = fmt.Sprintf("Waited for %v, %s - request: %s:%s", latency, retryInfo, r.verb, r.URL().String())
+	default:
+		message = fmt.Sprintf("Waited for %v due to client-side throttling, not priority and fairness, request: %s:%s", latency, r.verb, r.URL().String())
+	}
+
+	if latency > longThrottleLatency {
+		klog.V(3).Info(message)
+	}
+	if latency > extraLongThrottleLatency {
+		// If the rate limiter latency is very high, the log message should be printed at a higher log level,
+		// but we use a throttled logger to prevent spamming.
+		globalThrottledLogger.Infof("%s", message)
+	}
+	metrics.RateLimiterLatency.Observe(ctx, r.verb, r.finalURLTemplate(), latency)
+
+	return err
+}
+
+func (r *Request) tryThrottle(ctx context.Context) error {
+	return r.tryThrottleWithInfo(ctx, "")
+}
+
+type throttleSettings struct {
+	logLevel       klog.Level
+	minLogInterval time.Duration
+
+	lastLogTime time.Time
+	lock        sync.RWMutex
+}
+
+type throttledLogger struct {
+	clock    clock.PassiveClock
+	settings []*throttleSettings
+}
+
+var globalThrottledLogger = &throttledLogger{
+	clock: clock.RealClock{},
+	settings: []*throttleSettings{
+		{
+			logLevel:       2,
+			minLogInterval: 1 * time.Second,
+		}, {
+			logLevel:       0,
+			minLogInterval: 10 * time.Second,
+		},
+	},
+}
+
+func (b *throttledLogger) attemptToLog() (klog.Level, bool) {
+	for _, setting := range b.settings {
+		if bool(klog.V(setting.logLevel).Enabled()) {
+			// Return early without write locking if possible.
+			if func() bool {
+				setting.lock.RLock()
+				defer setting.lock.RUnlock()
+				return b.clock.Since(setting.lastLogTime) >= setting.minLogInterval
+			}() {
+				setting.lock.Lock()
+				defer setting.lock.Unlock()
+				if b.clock.Since(setting.lastLogTime) >= setting.minLogInterval {
+					setting.lastLogTime = b.clock.Now()
+					return setting.logLevel, true
+				}
+			}
+			return -1, false
+		}
+	}
+	return -1, false
+}
+
+// Infof will write a log message at each logLevel specified by the receiver's throttleSettings
+// as long as it hasn't written a log message more recently than minLogInterval.
+func (b *throttledLogger) Infof(message string, args ...interface{}) {
+	if logLevel, ok := b.attemptToLog(); ok {
+		klog.V(logLevel).Infof(message, args...)
+	}
+}
+
+// Watch attempts to begin watching the requested location.
+// Returns a watch.Interface, or an error.
+func (r *Request) Watch(ctx context.Context) (watch.Interface, error) {
+	w, _, e := r.watchInternal(ctx)
+	return w, e
+}
+
+func (r *Request) watchInternal(ctx context.Context) (watch.Interface, runtime.Decoder, error) {
+	if r.body == nil {
+		logBody(ctx, 2, "Request Body", r.bodyBytes)
+	}
+
+	// We specifically don't want to rate limit watches, so we
+	// don't use r.rateLimiter here.
+	if r.err != nil {
+		return nil, nil, r.err
+	}
+
+	client := r.c.Client
+	if client == nil {
+		client = http.DefaultClient
+	}
+
+	isErrRetryableFunc := func(request *http.Request, err error) bool {
+		// The watch stream mechanism handles many common partial data errors, so closed
+		// connections can be retried in many cases.
+		if net.IsProbableEOF(err) || net.IsTimeout(err) {
+			return true
+		}
+		return false
+	}
+	retry := r.retryFn(r.maxRetries)
+	url := r.URL().String()
+	for {
+		if err := retry.Before(ctx, r); err != nil {
+			return nil, nil, retry.WrapPreviousError(err)
+		}
+
+		req, err := r.newHTTPRequest(ctx)
+		if err != nil {
+			return nil, nil, err
+		}
+
+		resp, err := client.Do(req)
+		retry.After(ctx, r, resp, err)
+		if err == nil && resp.StatusCode == http.StatusOK {
+			return r.newStreamWatcher(resp)
+		}
+
+		done, transformErr := func() (bool, error) {
+			defer readAndCloseResponseBody(resp)
+
+			if retry.IsNextRetry(ctx, r, req, resp, err, isErrRetryableFunc) {
+				return false, nil
+			}
+
+			if resp == nil {
+				// the server must have sent us an error in 'err'
+				return true, nil
+			}
+			result := r.transformResponse(ctx, resp, req)
+			if err := result.Error(); err != nil {
+				return true, err
+			}
+			return true, fmt.Errorf("for request %s, got status: %v", url, resp.StatusCode)
+		}()
+		if done {
+			if isErrRetryableFunc(req, err) {
+				return watch.NewEmptyWatch(), nil, nil
+			}
+			if err == nil {
+				// if the server sent us an HTTP Response object,
+				// we need to return the error object from that.
+				err = transformErr
+			}
+			return nil, nil, retry.WrapPreviousError(err)
+		}
+	}
+}
+
+type WatchListResult struct {
+	// err holds any errors we might have received
+	// during streaming.
+	err error
+
+	// items hold the collected data
+	items []runtime.Object
+
+	// initialEventsEndBookmarkRV holds the resource version
+	// extracted from the bookmark event that marks
+	// the end of the stream.
+	initialEventsEndBookmarkRV string
+
+	// negotiatedObjectDecoder knows how to decode
+	// the initialEventsListBlueprint
+	negotiatedObjectDecoder runtime.Decoder
+
+	// base64EncodedInitialEventsListBlueprint contains an empty,
+	// versioned list encoded in the requested format
+	// (e.g., protobuf, JSON, CBOR) and stored as a base64-encoded string
+	base64EncodedInitialEventsListBlueprint string
+}
+
+// Into stores the result into obj. The passed obj parameter must be a pointer to a list type.
+//
+// Note:
+//
+// Special attention should be given to the type *unstructured.Unstructured,
+// which represents a list type but does not have an "Items" field.
+// Users who directly use RESTClient may store the response in such an object.
+// This particular case is not handled by the current implementation of this function,
+// but may be considered for future updates.
+func (r WatchListResult) Into(obj runtime.Object) error {
+	if r.err != nil {
+		return r.err
+	}
+
+	listItemsPtr, err := meta.GetItemsPtr(obj)
+	if err != nil {
+		return err
+	}
+	listVal, err := conversion.EnforcePtr(listItemsPtr)
+	if err != nil {
+		return err
+	}
+	if listVal.Kind() != reflect.Slice {
+		return fmt.Errorf("need a pointer to slice, got %v", listVal.Kind())
+	}
+
+	encodedInitialEventsListBlueprint, err := base64.StdEncoding.DecodeString(r.base64EncodedInitialEventsListBlueprint)
+	if err != nil {
+		return fmt.Errorf("failed to decode the received blueprint list, err %w", err)
+	}
+
+	err = runtime.DecodeInto(r.negotiatedObjectDecoder, encodedInitialEventsListBlueprint, obj)
+	if err != nil {
+		return err
+	}
+
+	if len(r.items) == 0 {
+		listVal.Set(reflect.MakeSlice(listVal.Type(), 0, 0))
+	} else {
+		listVal.Set(reflect.MakeSlice(listVal.Type(), len(r.items), len(r.items)))
+		for i, o := range r.items {
+			if listVal.Type().Elem() != reflect.TypeOf(o).Elem() {
+				return fmt.Errorf("received object type = %v at index = %d, doesn't match the list item type = %v", reflect.TypeOf(o).Elem(), i, listVal.Type().Elem())
+			}
+			listVal.Index(i).Set(reflect.ValueOf(o).Elem())
+		}
+	}
+
+	listMeta, err := meta.ListAccessor(obj)
+	if err != nil {
+		return err
+	}
+	listMeta.SetResourceVersion(r.initialEventsEndBookmarkRV)
+	return nil
+}
+
+// WatchList establishes a stream to get a consistent snapshot of data
+// from the server as described in https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#proposal
+//
+// Note that the watchlist requires properly setting the ListOptions
+// otherwise it just establishes a regular watch with the server.
+// Check the documentation https://kubernetes.io/docs/reference/using-api/api-concepts/#streaming-lists
+// to see what parameters are currently required.
+func (r *Request) WatchList(ctx context.Context) WatchListResult {
+	if r.body == nil {
+		logBody(ctx, 2, "Request Body", r.bodyBytes)
+	}
+
+	if !clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient) {
+		return WatchListResult{err: fmt.Errorf("%q feature gate is not enabled", clientfeatures.WatchListClient)}
+	}
+	// TODO(#115478): consider validating request parameters (i.e sendInitialEvents).
+	//  Most users use the generated client, which handles the proper setting of parameters.
+	//  We don't have validation for other methods (e.g., the Watch)
+	//  thus, for symmetry, we haven't added additional checks for the WatchList method.
+	w, d, err := r.watchInternal(ctx)
+	if err != nil {
+		return WatchListResult{err: err}
+	}
+	return r.handleWatchList(ctx, w, d)
+}
+
+// handleWatchList holds the actual logic for easier unit testing.
+// Note that this function will close the passed watch.
+func (r *Request) handleWatchList(ctx context.Context, w watch.Interface, negotiatedObjectDecoder runtime.Decoder) WatchListResult {
+	defer w.Stop()
+	var lastKey string
+	var items []runtime.Object
+
+	for {
+		select {
+		case <-ctx.Done():
+			return WatchListResult{err: ctx.Err()}
+		case event, ok := <-w.ResultChan():
+			if !ok {
+				return WatchListResult{err: fmt.Errorf("unexpected watch close")}
+			}
+			if event.Type == watch.Error {
+				return WatchListResult{err: errors.FromObject(event.Object)}
+			}
+			meta, err := meta.Accessor(event.Object)
+			if err != nil {
+				return WatchListResult{err: fmt.Errorf("failed to parse watch event: %#v", event)}
+			}
+
+			switch event.Type {
+			case watch.Added:
+				// the following check ensures that the response is ordered.
+				// earlier servers had a bug that caused them to not sort the output.
+				// in such cases, return an error which can trigger fallback logic.
+				key := objectKeyFromMeta(meta)
+				if len(lastKey) > 0 && lastKey > key {
+					return WatchListResult{err: fmt.Errorf("cannot add the obj (%#v) with the key = %s, as it violates the ordering guarantees provided by the watchlist feature in beta phase, lastInsertedKey was = %s", event.Object, key, lastKey)}
+				}
+				items = append(items, event.Object)
+				lastKey = key
+			case watch.Bookmark:
+				if meta.GetAnnotations()[metav1.InitialEventsAnnotationKey] == "true" {
+					base64EncodedInitialEventsListBlueprint := meta.GetAnnotations()[metav1.InitialEventsListBlueprintAnnotationKey]
+					if len(base64EncodedInitialEventsListBlueprint) == 0 {
+						return WatchListResult{err: fmt.Errorf("%q annotation is missing content", metav1.InitialEventsListBlueprintAnnotationKey)}
+					}
+					return WatchListResult{
+						items:                                   items,
+						initialEventsEndBookmarkRV:              meta.GetResourceVersion(),
+						negotiatedObjectDecoder:                 negotiatedObjectDecoder,
+						base64EncodedInitialEventsListBlueprint: base64EncodedInitialEventsListBlueprint,
+					}
+				}
+			default:
+				return WatchListResult{err: fmt.Errorf("unexpected watch event %#v, expected to only receive watch.Added and watch.Bookmark events", event)}
+			}
+		}
+	}
+}
+
+func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, runtime.Decoder, error) {
+	contentType := resp.Header.Get("Content-Type")
+	mediaType, params, err := mime.ParseMediaType(contentType)
+	if err != nil {
+		klog.V(4).Infof("Unexpected content type from the server: %q: %v", contentType, err)
+	}
+	objectDecoder, streamingSerializer, framer, err := r.contentConfig.Negotiator.StreamDecoder(mediaType, params)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	handleWarnings(resp.Header, r.warningHandler)
+
+	frameReader := framer.NewFrameReader(resp.Body)
+	watchEventDecoder := streaming.NewDecoder(frameReader, streamingSerializer)
+
+	return watch.NewStreamWatcher(
+		restclientwatch.NewDecoder(watchEventDecoder, objectDecoder),
+		// use 500 to indicate that the cause of the error is unknown - other error codes
+		// are more specific to HTTP interactions, and set a reason
+		errors.NewClientErrorReporter(http.StatusInternalServerError, r.verb, "ClientWatchDecoding"),
+	), objectDecoder, nil
+}
+
+// updateRequestResultMetric increments the RequestResult metric counter,
+// it should be called with the (response, err) tuple from the final
+// reply from the server.
+func updateRequestResultMetric(ctx context.Context, req *Request, resp *http.Response, err error) {
+	code, host := sanitize(req, resp, err)
+	metrics.RequestResult.Increment(ctx, code, req.verb, host)
+}
+
+// updateRequestRetryMetric increments the RequestRetry metric counter,
+// it should be called with the (response, err) tuple for each retry
+// except for the final attempt.
+func updateRequestRetryMetric(ctx context.Context, req *Request, resp *http.Response, err error) {
+	code, host := sanitize(req, resp, err)
+	metrics.RequestRetry.IncrementRetry(ctx, code, req.verb, host)
+}
+
+func sanitize(req *Request, resp *http.Response, err error) (string, string) {
+	host := "none"
+	if req.c.base != nil {
+		host = req.c.base.Host
+	}
+
+	// Errors can be arbitrary strings. Unbound label cardinality is not suitable for a metric
+	// system so we just report them as ``.
+	code := ""
+	if resp != nil {
+		code = strconv.Itoa(resp.StatusCode)
+	}
+
+	return code, host
+}
+
+// Stream formats and executes the request, and offers streaming of the response.
+// Returns io.ReadCloser which could be used for streaming of the response, or an error
+// Any non-2xx http status code causes an error.  If we get a non-2xx code, we try to convert the body into an APIStatus object.
+// If we can, we return that as an error.  Otherwise, we create an error that lists the http status and the content of the response.
+func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) {
+	if r.body == nil {
+		logBody(ctx, 2, "Request Body", r.bodyBytes)
+	}
+
+	if r.err != nil {
+		return nil, r.err
+	}
+
+	if err := r.tryThrottle(ctx); err != nil {
+		return nil, err
+	}
+
+	client := r.c.Client
+	if client == nil {
+		client = http.DefaultClient
+	}
+
+	retry := r.retryFn(r.maxRetries)
+	url := r.URL().String()
+	for {
+		if err := retry.Before(ctx, r); err != nil {
+			return nil, err
+		}
+
+		req, err := r.newHTTPRequest(ctx)
+		if err != nil {
+			return nil, err
+		}
+		resp, err := client.Do(req)
+		retry.After(ctx, r, resp, err)
+		if err != nil {
+			// we only retry on an HTTP response with 'Retry-After' header
+			return nil, err
+		}
+
+		switch {
+		case (resp.StatusCode >= 200) && (resp.StatusCode < 300):
+			handleWarnings(resp.Header, r.warningHandler)
+			return resp.Body, nil
+
+		default:
+			done, transformErr := func() (bool, error) {
+				defer resp.Body.Close()
+
+				if retry.IsNextRetry(ctx, r, req, resp, err, neverRetryError) {
+					return false, nil
+				}
+				result := r.transformResponse(ctx, resp, req)
+				if err := result.Error(); err != nil {
+					return true, err
+				}
+				return true, fmt.Errorf("%d while accessing %v: %s", result.statusCode, url, string(result.body))
+			}()
+			if done {
+				return nil, transformErr
+			}
+		}
+	}
+}
+
+// requestPreflightCheck looks for common programmer errors on Request.
+//
+// We tackle here two programmer mistakes. The first one is to try to create
+// something(POST) using an empty string as namespace with namespaceSet as
+// true. If namespaceSet is true then namespace should also be defined. The
+// second mistake is, when under the same circumstances, the programmer tries
+// to GET, PUT or DELETE a named resource(resourceName != ""), again, if
+// namespaceSet is true then namespace must not be empty.
+func (r *Request) requestPreflightCheck() error {
+	if !r.namespaceSet {
+		return nil
+	}
+	if len(r.namespace) > 0 {
+		return nil
+	}
+
+	switch r.verb {
+	case "POST":
+		return fmt.Errorf("an empty namespace may not be set during creation")
+	case "GET", "PUT", "DELETE":
+		if len(r.resourceName) > 0 {
+			return fmt.Errorf("an empty namespace may not be set when a resource name is provided")
+		}
+	}
+	return nil
+}
+
+func (r *Request) newHTTPRequest(ctx context.Context) (*http.Request, error) {
+	var body io.Reader
+	switch {
+	case r.body != nil && r.bodyBytes != nil:
+		return nil, fmt.Errorf("cannot set both body and bodyBytes")
+	case r.body != nil:
+		body = r.body
+	case r.bodyBytes != nil:
+		// Create a new reader specifically for this request.
+		// Giving each request a dedicated reader allows retries to avoid races resetting the request body.
+		body = bytes.NewReader(r.bodyBytes)
+	}
+
+	url := r.URL().String()
+	req, err := http.NewRequestWithContext(httptrace.WithClientTrace(ctx, newDNSMetricsTrace(ctx)), r.verb, url, body)
+	if err != nil {
+		return nil, err
+	}
+	req.Header = r.headers
+	return req, nil
+}
+
+// newDNSMetricsTrace returns an HTTP trace that tracks time spent on DNS lookups per host.
+// This metric is available in client as "rest_client_dns_resolution_duration_seconds".
+func newDNSMetricsTrace(ctx context.Context) *httptrace.ClientTrace {
+	type dnsMetric struct {
+		start time.Time
+		host  string
+		sync.Mutex
+	}
+	dns := &dnsMetric{}
+	return &httptrace.ClientTrace{
+		DNSStart: func(info httptrace.DNSStartInfo) {
+			dns.Lock()
+			defer dns.Unlock()
+			dns.start = time.Now()
+			dns.host = info.Host
+		},
+		DNSDone: func(info httptrace.DNSDoneInfo) {
+			dns.Lock()
+			defer dns.Unlock()
+			metrics.ResolverLatency.Observe(ctx, dns.host, time.Since(dns.start))
+		},
+	}
+}
+
+// request connects to the server and invokes the provided function when a server response is
+// received. It handles retry behavior and up front validation of requests. It will invoke
+// fn at most once. It will return an error if a problem occurred prior to connecting to the
+// server - the provided function is responsible for handling server errors.
+func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Response)) error {
+	// Metrics for total request latency
+	start := time.Now()
+	defer func() {
+		metrics.RequestLatency.Observe(ctx, r.verb, r.finalURLTemplate(), time.Since(start))
+	}()
+
+	if r.err != nil {
+		klog.V(4).Infof("Error in request: %v", r.err)
+		return r.err
+	}
+
+	if err := r.requestPreflightCheck(); err != nil {
+		return err
+	}
+
+	client := r.c.Client
+	if client == nil {
+		client = http.DefaultClient
+	}
+
+	// Throttle the first try before setting up the timeout configured on the
+	// client. We don't want a throttled client to return timeouts to callers
+	// before it makes a single request.
+	if err := r.tryThrottle(ctx); err != nil {
+		return err
+	}
+
+	if r.timeout > 0 {
+		var cancel context.CancelFunc
+		ctx, cancel = context.WithTimeout(ctx, r.timeout)
+		defer cancel()
+	}
+
+	isErrRetryableFunc := func(req *http.Request, err error) bool {
+		// "Connection reset by peer" or "apiserver is shutting down" are usually a transient errors.
+		// Thus in case of "GET" operations, we simply retry it.
+		// We are not automatically retrying "write" operations, as they are not idempotent.
+		if req.Method != "GET" {
+			return false
+		}
+		// For connection errors and apiserver shutdown errors retry.
+		if net.IsConnectionReset(err) || net.IsProbableEOF(err) || net.IsHTTP2ConnectionLost(err) {
+			return true
+		}
+		return false
+	}
+
+	// Right now we make about ten retry attempts if we get a Retry-After response.
+	retry := r.retryFn(r.maxRetries)
+	for {
+		if err := retry.Before(ctx, r); err != nil {
+			return retry.WrapPreviousError(err)
+		}
+		req, err := r.newHTTPRequest(ctx)
+		if err != nil {
+			return err
+		}
+		resp, err := client.Do(req)
+		// The value -1 or a value of 0 with a non-nil Body indicates that the length is unknown.
+		// https://pkg.go.dev/net/http#Request
+		if req.ContentLength >= 0 && !(req.Body != nil && req.ContentLength == 0) {
+			metrics.RequestSize.Observe(ctx, r.verb, r.URL().Host, float64(req.ContentLength))
+		}
+		if resp != nil && resp.StatusCode == http.StatusUnsupportedMediaType {
+			r.c.content.UnsupportedMediaType(resp.Request.Header.Get("Content-Type"))
+		}
+		retry.After(ctx, r, resp, err)
+
+		done := func() bool {
+			defer readAndCloseResponseBody(resp)
+
+			// if the server returns an error in err, the response will be nil.
+			f := func(req *http.Request, resp *http.Response) {
+				if resp == nil {
+					return
+				}
+				fn(req, resp)
+			}
+
+			if retry.IsNextRetry(ctx, r, req, resp, err, isErrRetryableFunc) {
+				return false
+			}
+
+			f(req, resp)
+			return true
+		}()
+		if done {
+			return retry.WrapPreviousError(err)
+		}
+	}
+}
+
+// Do formats and executes the request. Returns a Result object for easy response
+// processing.
+//
+// Error type:
+//   - If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError
+//   - http.Client.Do errors are returned directly.
+func (r *Request) Do(ctx context.Context) Result {
+	if r.body == nil {
+		logBody(ctx, 2, "Request Body", r.bodyBytes)
+	}
+
+	var result Result
+	err := r.request(ctx, func(req *http.Request, resp *http.Response) {
+		result = r.transformResponse(ctx, resp, req)
+	})
+	if err != nil {
+		return Result{err: err}
+	}
+	if result.err == nil || len(result.body) > 0 {
+		metrics.ResponseSize.Observe(ctx, r.verb, r.URL().Host, float64(len(result.body)))
+	}
+	return result
+}
+
+// DoRaw executes the request but does not process the response body.
+func (r *Request) DoRaw(ctx context.Context) ([]byte, error) {
+	if r.body == nil {
+		logBody(ctx, 2, "Request Body", r.bodyBytes)
+	}
+
+	var result Result
+	err := r.request(ctx, func(req *http.Request, resp *http.Response) {
+		result.body, result.err = io.ReadAll(resp.Body)
+		logBody(ctx, 2, "Response Body", result.body)
+		if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent {
+			result.err = r.transformUnstructuredResponseError(resp, req, result.body)
+		}
+	})
+	if err != nil {
+		return nil, err
+	}
+	if result.err == nil || len(result.body) > 0 {
+		metrics.ResponseSize.Observe(ctx, r.verb, r.URL().Host, float64(len(result.body)))
+	}
+	return result.body, result.err
+}
+
+// transformResponse converts an API response into a structured API object
+func (r *Request) transformResponse(ctx context.Context, resp *http.Response, req *http.Request) Result {
+	var body []byte
+	if resp.Body != nil {
+		data, err := io.ReadAll(resp.Body)
+		switch err.(type) {
+		case nil:
+			body = data
+		case http2.StreamError:
+			// This is trying to catch the scenario that the server may close the connection when sending the
+			// response body. This can be caused by server timeout due to a slow network connection.
+			// TODO: Add test for this. Steps may be:
+			// 1. client-go (or kubectl) sends a GET request.
+			// 2. Apiserver sends back the headers and then part of the body
+			// 3. Apiserver closes connection.
+			// 4. client-go should catch this and return an error.
+			klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err)
+			streamErr := fmt.Errorf("stream error when reading response body, may be caused by closed connection. Please retry. Original error: %w", err)
+			return Result{
+				err: streamErr,
+			}
+		default:
+			klog.Errorf("Unexpected error when reading response body: %v", err)
+			unexpectedErr := fmt.Errorf("unexpected error when reading response body. Please retry. Original error: %w", err)
+			return Result{
+				err: unexpectedErr,
+			}
+		}
+	}
+
+	// Call depth is tricky. This one is okay for Do and DoRaw.
+	logBody(ctx, 7, "Response Body", body)
+
+	// verify the content type is accurate
+	var decoder runtime.Decoder
+	contentType := resp.Header.Get("Content-Type")
+	if len(contentType) == 0 {
+		contentType = r.contentConfig.ContentType
+	}
+	if len(contentType) > 0 {
+		var err error
+		mediaType, params, err := mime.ParseMediaType(contentType)
+		if err != nil {
+			return Result{err: errors.NewInternalError(err)}
+		}
+		decoder, err = r.contentConfig.Negotiator.Decoder(mediaType, params)
+		if err != nil {
+			// if we fail to negotiate a decoder, treat this as an unstructured error
+			switch {
+			case resp.StatusCode == http.StatusSwitchingProtocols:
+				// no-op, we've been upgraded
+			case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent:
+				return Result{err: r.transformUnstructuredResponseError(resp, req, body)}
+			}
+			return Result{
+				body:        body,
+				contentType: contentType,
+				statusCode:  resp.StatusCode,
+				warnings:    handleWarnings(resp.Header, r.warningHandler),
+			}
+		}
+	}
+
+	switch {
+	case resp.StatusCode == http.StatusSwitchingProtocols:
+		// no-op, we've been upgraded
+	case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent:
+		// calculate an unstructured error from the response which the Result object may use if the caller
+		// did not return a structured error.
+		retryAfter, _ := retryAfterSeconds(resp)
+		err := r.newUnstructuredResponseError(body, isTextResponse(resp), resp.StatusCode, req.Method, retryAfter)
+		return Result{
+			body:        body,
+			contentType: contentType,
+			statusCode:  resp.StatusCode,
+			decoder:     decoder,
+			err:         err,
+			warnings:    handleWarnings(resp.Header, r.warningHandler),
+		}
+	}
+
+	return Result{
+		body:        body,
+		contentType: contentType,
+		statusCode:  resp.StatusCode,
+		decoder:     decoder,
+		warnings:    handleWarnings(resp.Header, r.warningHandler),
+	}
+}
+
+// truncateBody decides if the body should be truncated, based on the glog Verbosity.
+func truncateBody(logger klog.Logger, body string) string {
+	max := 0
+	switch {
+	case bool(logger.V(10).Enabled()):
+		return body
+	case bool(logger.V(9).Enabled()):
+		max = 10240
+	case bool(logger.V(8).Enabled()):
+		max = 1024
+	}
+
+	if len(body) <= max {
+		return body
+	}
+
+	return body[:max] + fmt.Sprintf(" [truncated %d chars]", len(body)-max)
+}
+
+// logBody logs a body output that could be either JSON or protobuf. It explicitly guards against
+// allocating a new string for the body output unless necessary. Uses a simple heuristic to determine
+// whether the body is printable.
+//
+// It needs to be called by all functions which send or receive the data.
+func logBody(ctx context.Context, callDepth int, prefix string, body []byte) {
+	logger := klog.FromContext(ctx)
+	if loggerV := logger.V(8); loggerV.Enabled() {
+		loggerV := loggerV.WithCallDepth(callDepth)
+		if bytes.IndexFunc(body, func(r rune) bool {
+			return r < 0x0a
+		}) != -1 {
+			loggerV.Info(prefix, "body", truncateBody(logger, hex.Dump(body)))
+		} else {
+			loggerV.Info(prefix, "body", truncateBody(logger, string(body)))
+		}
+	}
+}
+
+// maxUnstructuredResponseTextBytes is an upper bound on how much output to include in the unstructured error.
+const maxUnstructuredResponseTextBytes = 2048
+
+// transformUnstructuredResponseError handles an error from the server that is not in a structured form.
+// It is expected to transform any response that is not recognizable as a clear server sent error from the
+// K8S API using the information provided with the request. In practice, HTTP proxies and client libraries
+// introduce a level of uncertainty to the responses returned by servers that in common use result in
+// unexpected responses. The rough structure is:
+//
+// 1. Assume the server sends you something sane - JSON + well defined error objects + proper codes
+//   - this is the happy path
+//   - when you get this output, trust what the server sends
+//     2. Guard against empty fields / bodies in received JSON and attempt to cull sufficient info from them to
+//     generate a reasonable facsimile of the original failure.
+//   - Be sure to use a distinct error type or flag that allows a client to distinguish between this and error 1 above
+//     3. Handle true disconnect failures / completely malformed data by moving up to a more generic client error
+//     4. Distinguish between various connection failures like SSL certificates, timeouts, proxy errors, unexpected
+//     initial contact, the presence of mismatched body contents from posted content types
+//   - Give these a separate distinct error type and capture as much as possible of the original message
+//
+// TODO: introduce transformation of generic http.Client.Do() errors that separates 4.
+func (r *Request) transformUnstructuredResponseError(resp *http.Response, req *http.Request, body []byte) error {
+	if body == nil && resp.Body != nil {
+		if data, err := io.ReadAll(&io.LimitedReader{R: resp.Body, N: maxUnstructuredResponseTextBytes}); err == nil {
+			body = data
+		}
+	}
+	retryAfter, _ := retryAfterSeconds(resp)
+	return r.newUnstructuredResponseError(body, isTextResponse(resp), resp.StatusCode, req.Method, retryAfter)
+}
+
+// newUnstructuredResponseError instantiates the appropriate generic error for the provided input. It also logs the body.
+func (r *Request) newUnstructuredResponseError(body []byte, isTextResponse bool, statusCode int, method string, retryAfter int) error {
+	// cap the amount of output we create
+	if len(body) > maxUnstructuredResponseTextBytes {
+		body = body[:maxUnstructuredResponseTextBytes]
+	}
+
+	message := "unknown"
+	if isTextResponse {
+		message = strings.TrimSpace(string(body))
+	}
+	var groupResource schema.GroupResource
+	if len(r.resource) > 0 {
+		groupResource.Group = r.contentConfig.GroupVersion.Group
+		groupResource.Resource = r.resource
+	}
+	return errors.NewGenericServerResponse(
+		statusCode,
+		method,
+		groupResource,
+		r.resourceName,
+		message,
+		retryAfter,
+		true,
+	)
+}
+
+// isTextResponse returns true if the response appears to be a textual media type.
+func isTextResponse(resp *http.Response) bool {
+	contentType := resp.Header.Get("Content-Type")
+	if len(contentType) == 0 {
+		return true
+	}
+	media, _, err := mime.ParseMediaType(contentType)
+	if err != nil {
+		return false
+	}
+	return strings.HasPrefix(media, "text/")
+}
+
+// retryAfterSeconds returns the value of the Retry-After header and true, or 0 and false if
+// the header was missing or not a valid number.
+func retryAfterSeconds(resp *http.Response) (int, bool) {
+	if h := resp.Header.Get("Retry-After"); len(h) > 0 {
+		if i, err := strconv.Atoi(h); err == nil {
+			return i, true
+		}
+	}
+	return 0, false
+}
+
+// Result contains the result of calling Request.Do().
+type Result struct {
+	body        []byte
+	warnings    []net.WarningHeader
+	contentType string
+	err         error
+	statusCode  int
+
+	decoder runtime.Decoder
+}
+
+// Raw returns the raw result.
+func (r Result) Raw() ([]byte, error) {
+	return r.body, r.err
+}
+
+// Get returns the result as an object, which means it passes through the decoder.
+// If the returned object is of type Status and has .Status != StatusSuccess, the
+// additional information in Status will be used to enrich the error.
+func (r Result) Get() (runtime.Object, error) {
+	if r.err != nil {
+		// Check whether the result has a Status object in the body and prefer that.
+		return nil, r.Error()
+	}
+	if r.decoder == nil {
+		return nil, fmt.Errorf("serializer for %s doesn't exist", r.contentType)
+	}
+
+	// decode, but if the result is Status return that as an error instead.
+	out, _, err := r.decoder.Decode(r.body, nil, nil)
+	if err != nil {
+		return nil, err
+	}
+	switch t := out.(type) {
+	case *metav1.Status:
+		// any status besides StatusSuccess is considered an error.
+		if t.Status != metav1.StatusSuccess {
+			return nil, errors.FromObject(t)
+		}
+	}
+	return out, nil
+}
+
+// StatusCode returns the HTTP status code of the request. (Only valid if no
+// error was returned.)
+func (r Result) StatusCode(statusCode *int) Result {
+	*statusCode = r.statusCode
+	return r
+}
+
+// ContentType returns the "Content-Type" response header into the passed
+// string, returning the Result for possible chaining. (Only valid if no
+// error code was returned.)
+func (r Result) ContentType(contentType *string) Result {
+	*contentType = r.contentType
+	return r
+}
+
+// Into stores the result into obj, if possible. If obj is nil it is ignored.
+// If the returned object is of type Status and has .Status != StatusSuccess, the
+// additional information in Status will be used to enrich the error.
+func (r Result) Into(obj runtime.Object) error {
+	if r.err != nil {
+		// Check whether the result has a Status object in the body and prefer that.
+		return r.Error()
+	}
+	if r.decoder == nil {
+		return fmt.Errorf("serializer for %s doesn't exist", r.contentType)
+	}
+	if len(r.body) == 0 {
+		return fmt.Errorf("0-length response with status code: %d and content type: %s",
+			r.statusCode, r.contentType)
+	}
+
+	out, _, err := r.decoder.Decode(r.body, nil, obj)
+	if err != nil || out == obj {
+		return err
+	}
+	// if a different object is returned, see if it is Status and avoid double decoding
+	// the object.
+	switch t := out.(type) {
+	case *metav1.Status:
+		// any status besides StatusSuccess is considered an error.
+		if t.Status != metav1.StatusSuccess {
+			return errors.FromObject(t)
+		}
+	}
+	return nil
+}
+
+// WasCreated updates the provided bool pointer to whether the server returned
+// 201 created or a different response.
+func (r Result) WasCreated(wasCreated *bool) Result {
+	*wasCreated = r.statusCode == http.StatusCreated
+	return r
+}
+
+// Error returns the error executing the request, nil if no error occurred.
+// If the returned object is of type Status and has Status != StatusSuccess, the
+// additional information in Status will be used to enrich the error.
+// See the Request.Do() comment for what errors you might get.
+func (r Result) Error() error {
+	// if we have received an unexpected server error, and we have a body and decoder, we can try to extract
+	// a Status object.
+	if r.err == nil || !errors.IsUnexpectedServerError(r.err) || len(r.body) == 0 || r.decoder == nil {
+		return r.err
+	}
+
+	// attempt to convert the body into a Status object
+	// to be backwards compatible with old servers that do not return a version, default to "v1"
+	out, _, err := r.decoder.Decode(r.body, &schema.GroupVersionKind{Version: "v1"}, nil)
+	if err != nil {
+		klog.V(5).Infof("body was not decodable (unable to check for Status): %v", err)
+		return r.err
+	}
+	switch t := out.(type) {
+	case *metav1.Status:
+		// because we default the kind, we *must* check for StatusFailure
+		if t.Status == metav1.StatusFailure {
+			return errors.FromObject(t)
+		}
+	}
+	return r.err
+}
+
+// Warnings returns any warning headers received in the response
+func (r Result) Warnings() []net.WarningHeader {
+	return r.warnings
+}
+
+// NameMayNotBe specifies strings that cannot be used as names specified as path segments (like the REST API or etcd store)
+var NameMayNotBe = []string{".", ".."}
+
+// NameMayNotContain specifies substrings that cannot be used in names specified as path segments (like the REST API or etcd store)
+var NameMayNotContain = []string{"/", "%"}
+
+// IsValidPathSegmentName validates the name can be safely encoded as a path segment
+func IsValidPathSegmentName(name string) []string {
+	for _, illegalName := range NameMayNotBe {
+		if name == illegalName {
+			return []string{fmt.Sprintf(`may not be '%s'`, illegalName)}
+		}
+	}
+
+	var errors []string
+	for _, illegalContent := range NameMayNotContain {
+		if strings.Contains(name, illegalContent) {
+			errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent))
+		}
+	}
+
+	return errors
+}
+
+// IsValidPathSegmentPrefix validates the name can be used as a prefix for a name which will be encoded as a path segment
+// It does not check for exact matches with disallowed names, since an arbitrary suffix might make the name valid
+func IsValidPathSegmentPrefix(name string) []string {
+	var errors []string
+	for _, illegalContent := range NameMayNotContain {
+		if strings.Contains(name, illegalContent) {
+			errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent))
+		}
+	}
+
+	return errors
+}
+
+// ValidatePathSegmentName validates the name can be safely encoded as a path segment
+func ValidatePathSegmentName(name string, prefix bool) []string {
+	if prefix {
+		return IsValidPathSegmentPrefix(name)
+	}
+	return IsValidPathSegmentName(name)
+}
+
+func objectKeyFromMeta(objMeta metav1.Object) string {
+	if len(objMeta.GetNamespace()) > 0 {
+		return fmt.Sprintf("%s/%s", objMeta.GetNamespace(), objMeta.GetName())
+	}
+	return objMeta.GetName()
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/rest/transport.go b/hack/tools/vendor/k8s.io/client-go/rest/transport.go
new file mode 100644
index 0000000000..53f986cbf3
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/rest/transport.go
@@ -0,0 +1,155 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+	"crypto/tls"
+	"errors"
+	"net/http"
+
+	"k8s.io/client-go/pkg/apis/clientauthentication"
+	"k8s.io/client-go/plugin/pkg/client/auth/exec"
+	"k8s.io/client-go/transport"
+)
+
+// HTTPClientFor returns an http.Client that will provide the authentication
+// or transport level security defined by the provided Config. Will return the
+// default http.DefaultClient if no special case behavior is needed.
+func HTTPClientFor(config *Config) (*http.Client, error) {
+	transport, err := TransportFor(config)
+	if err != nil {
+		return nil, err
+	}
+	var httpClient *http.Client
+	if transport != http.DefaultTransport || config.Timeout > 0 {
+		httpClient = &http.Client{
+			Transport: transport,
+			Timeout:   config.Timeout,
+		}
+	} else {
+		httpClient = http.DefaultClient
+	}
+
+	return httpClient, nil
+}
+
+// TLSConfigFor returns a tls.Config that will provide the transport level security defined
+// by the provided Config. Will return nil if no transport level security is requested.
+func TLSConfigFor(config *Config) (*tls.Config, error) {
+	cfg, err := config.TransportConfig()
+	if err != nil {
+		return nil, err
+	}
+	return transport.TLSConfigFor(cfg)
+}
+
+// TransportFor returns an http.RoundTripper that will provide the authentication
+// or transport level security defined by the provided Config. Will return the
+// default http.DefaultTransport if no special case behavior is needed.
+func TransportFor(config *Config) (http.RoundTripper, error) {
+	cfg, err := config.TransportConfig()
+	if err != nil {
+		return nil, err
+	}
+	return transport.New(cfg)
+}
+
+// HTTPWrappersForConfig wraps a round tripper with any relevant layered behavior from the
+// config. Exposed to allow more clients that need HTTP-like behavior but then must hijack
+// the underlying connection (like WebSocket or HTTP2 clients). Pure HTTP clients should use
+// the higher level TransportFor or RESTClientFor methods.
+func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) {
+	cfg, err := config.TransportConfig()
+	if err != nil {
+		return nil, err
+	}
+	return transport.HTTPWrappersForConfig(cfg, rt)
+}
+
+// TransportConfig converts a client config to an appropriate transport config.
+func (c *Config) TransportConfig() (*transport.Config, error) {
+	conf := &transport.Config{
+		UserAgent:          c.UserAgent,
+		Transport:          c.Transport,
+		WrapTransport:      c.WrapTransport,
+		DisableCompression: c.DisableCompression,
+		TLS: transport.TLSConfig{
+			Insecure:   c.Insecure,
+			ServerName: c.ServerName,
+			CAFile:     c.CAFile,
+			CAData:     c.CAData,
+			CertFile:   c.CertFile,
+			CertData:   c.CertData,
+			KeyFile:    c.KeyFile,
+			KeyData:    c.KeyData,
+			NextProtos: c.NextProtos,
+		},
+		Username:        c.Username,
+		Password:        c.Password,
+		BearerToken:     c.BearerToken,
+		BearerTokenFile: c.BearerTokenFile,
+		Impersonate: transport.ImpersonationConfig{
+			UserName: c.Impersonate.UserName,
+			UID:      c.Impersonate.UID,
+			Groups:   c.Impersonate.Groups,
+			Extra:    c.Impersonate.Extra,
+		},
+		Proxy: c.Proxy,
+	}
+
+	if c.Dial != nil {
+		conf.DialHolder = &transport.DialHolder{Dial: c.Dial}
+	}
+
+	if c.ExecProvider != nil && c.AuthProvider != nil {
+		return nil, errors.New("execProvider and authProvider cannot be used in combination")
+	}
+
+	if c.ExecProvider != nil {
+		var cluster *clientauthentication.Cluster
+		if c.ExecProvider.ProvideClusterInfo {
+			var err error
+			cluster, err = ConfigToExecCluster(c)
+			if err != nil {
+				return nil, err
+			}
+		}
+		provider, err := exec.GetAuthenticator(c.ExecProvider, cluster)
+		if err != nil {
+			return nil, err
+		}
+		if err := provider.UpdateTransportConfig(conf); err != nil {
+			return nil, err
+		}
+	}
+	if c.AuthProvider != nil {
+		provider, err := GetAuthProvider(c.Host, c.AuthProvider, c.AuthConfigPersister)
+		if err != nil {
+			return nil, err
+		}
+		conf.Wrap(provider.WrapTransport)
+	}
+	return conf, nil
+}
+
+// Wrap adds a transport middleware function that will give the caller
+// an opportunity to wrap the underlying http.RoundTripper prior to the
+// first API call being made. The provided function is invoked after any
+// existing transport wrappers are invoked.
+func (c *Config) Wrap(fn transport.WrapperFunc) {
+	c.WrapTransport = transport.Wrappers(c.WrapTransport, fn)
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/rest/url_utils.go b/hack/tools/vendor/k8s.io/client-go/rest/url_utils.go
new file mode 100644
index 0000000000..0a0ab79173
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/rest/url_utils.go
@@ -0,0 +1,97 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+	"fmt"
+	"net/url"
+	"path"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// DefaultServerURL converts a host, host:port, or URL string to the default base server API path
+// to use with a Client at a given API version following the standard conventions for a
+// Kubernetes API.
+func DefaultServerURL(host, apiPath string, groupVersion schema.GroupVersion, defaultTLS bool) (*url.URL, string, error) {
+	if host == "" {
+		return nil, "", fmt.Errorf("host must be a URL or a host:port pair")
+	}
+	base := host
+	hostURL, err := url.Parse(base)
+	if err != nil || hostURL.Scheme == "" || hostURL.Host == "" {
+		scheme := "http://"
+		if defaultTLS {
+			scheme = "https://"
+		}
+		hostURL, err = url.Parse(scheme + base)
+		if err != nil {
+			return nil, "", err
+		}
+		if hostURL.Path != "" && hostURL.Path != "/" {
+			return nil, "", fmt.Errorf("host must be a URL or a host:port pair: %q", base)
+		}
+	}
+
+	// hostURL.Path is optional; a non-empty Path is treated as a prefix that is to be applied to
+	// all URIs used to access the host. this is useful when there's a proxy in front of the
+	// apiserver that has relocated the apiserver endpoints, forwarding all requests from, for
+	// example, /a/b/c to the apiserver. in this case the Path should be /a/b/c.
+	//
+	// if running without a frontend proxy (that changes the location of the apiserver), then
+	// hostURL.Path should be blank.
+	//
+	// versionedAPIPath, a path relative to baseURL.Path, points to a versioned API base
+	versionedAPIPath := DefaultVersionedAPIPath(apiPath, groupVersion)
+
+	return hostURL, versionedAPIPath, nil
+}
+
+// DefaultVersionedAPIPath constructs the default path for the given group version, assuming the given
+// API path, following the standard conventions of the Kubernetes API.
+func DefaultVersionedAPIPath(apiPath string, groupVersion schema.GroupVersion) string {
+	versionedAPIPath := path.Join("/", apiPath)
+
+	// Add the version to the end of the path
+	if len(groupVersion.Group) > 0 {
+		versionedAPIPath = path.Join(versionedAPIPath, groupVersion.Group, groupVersion.Version)
+
+	} else {
+		versionedAPIPath = path.Join(versionedAPIPath, groupVersion.Version)
+	}
+
+	return versionedAPIPath
+}
+
+// DefaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It
+// requires Host and Version to be set prior to being called.
+func DefaultServerUrlFor(config *Config) (*url.URL, string, error) {
+	// TODO: move the default to secure when the apiserver supports TLS by default
+	// config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA."
+	hasCA := len(config.CAFile) != 0 || len(config.CAData) != 0
+	hasCert := len(config.CertFile) != 0 || len(config.CertData) != 0
+	defaultTLS := hasCA || hasCert || config.Insecure
+	host := config.Host
+	if host == "" {
+		host = "localhost"
+	}
+
+	if config.GroupVersion != nil {
+		return DefaultServerURL(host, config.APIPath, *config.GroupVersion, defaultTLS)
+	}
+	return DefaultServerURL(host, config.APIPath, schema.GroupVersion{}, defaultTLS)
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/rest/urlbackoff.go b/hack/tools/vendor/k8s.io/client-go/rest/urlbackoff.go
new file mode 100644
index 0000000000..2f9962d7e5
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/rest/urlbackoff.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+	"net/url"
+	"time"
+
+	"k8s.io/apimachinery/pkg/util/sets"
+	"k8s.io/client-go/util/flowcontrol"
+	"k8s.io/klog/v2"
+)
+
+// Set of resp. Codes that we backoff for.
+// In general these should be errors that indicate a server is overloaded.
+// These shouldn't be configured by any user, we set them based on conventions
+// described in
+var serverIsOverloadedSet = sets.NewInt(429)
+var maxResponseCode = 499
+
+type BackoffManager interface {
+	UpdateBackoff(actualUrl *url.URL, err error, responseCode int)
+	CalculateBackoff(actualUrl *url.URL) time.Duration
+	Sleep(d time.Duration)
+}
+
+// URLBackoff struct implements the semantics on top of Backoff which
+// we need for URL specific exponential backoff.
+type URLBackoff struct {
+	// Uses backoff as underlying implementation.
+	Backoff *flowcontrol.Backoff
+}
+
+// NoBackoff is a stub implementation, can be used for mocking or else as a default.
+type NoBackoff struct {
+}
+
+func (n *NoBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) {
+	// do nothing.
+}
+
+func (n *NoBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration {
+	return 0 * time.Second
+}
+
+func (n *NoBackoff) Sleep(d time.Duration) {
+	time.Sleep(d)
+}
+
+// Disable makes the backoff trivial, i.e., sets it to zero.  This might be used
+// by tests which want to run 1000s of mock requests without slowing down.
+func (b *URLBackoff) Disable() {
+	klog.V(4).Infof("Disabling backoff strategy")
+	b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second)
+}
+
+// baseUrlKey returns the key which urls will be mapped to.
+// For example, 127.0.0.1:8080/api/v2/abcde -> 127.0.0.1:8080.
+func (b *URLBackoff) baseUrlKey(rawurl *url.URL) string {
+	// Simple implementation for now, just the host.
+	// We may backoff specific paths (i.e. "pods") differentially
+	// in the future.
+	host, err := url.Parse(rawurl.String())
+	if err != nil {
+		klog.V(4).Infof("Error extracting url: %v", rawurl)
+		panic("bad url!")
+	}
+	return host.Host
+}
+
+// UpdateBackoff updates backoff metadata
+func (b *URLBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) {
+	// range for retry counts that we store is [0,13]
+	if responseCode > maxResponseCode || serverIsOverloadedSet.Has(responseCode) {
+		b.Backoff.Next(b.baseUrlKey(actualUrl), b.Backoff.Clock.Now())
+		return
+	} else if responseCode >= 300 || err != nil {
+		klog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err)
+	}
+
+	//If we got this far, there is no backoff required for this URL anymore.
+	b.Backoff.Reset(b.baseUrlKey(actualUrl))
+}
+
+// CalculateBackoff takes a url and back's off exponentially,
+// based on its knowledge of existing failures.
+func (b *URLBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration {
+	return b.Backoff.Get(b.baseUrlKey(actualUrl))
+}
+
+func (b *URLBackoff) Sleep(d time.Duration) {
+	b.Backoff.Clock.Sleep(d)
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/rest/warnings.go b/hack/tools/vendor/k8s.io/client-go/rest/warnings.go
new file mode 100644
index 0000000000..ad493659f2
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/rest/warnings.go
@@ -0,0 +1,147 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+	"fmt"
+	"io"
+	"net/http"
+	"sync"
+
+	"k8s.io/klog/v2"
+
+	"k8s.io/apimachinery/pkg/util/net"
+)
+
+// WarningHandler is an interface for handling warning headers
+type WarningHandler interface {
+	// HandleWarningHeader is called with the warn code, agent, and text when a warning header is countered.
+	HandleWarningHeader(code int, agent string, text string)
+}
+
+var (
+	defaultWarningHandler     WarningHandler = WarningLogger{}
+	defaultWarningHandlerLock sync.RWMutex
+)
+
+// SetDefaultWarningHandler sets the default handler clients use when warning headers are encountered.
+// By default, warnings are logged. Several built-in implementations are provided:
+//   - NoWarnings suppresses warnings.
+//   - WarningLogger logs warnings.
+//   - NewWarningWriter() outputs warnings to the provided writer.
+func SetDefaultWarningHandler(l WarningHandler) {
+	defaultWarningHandlerLock.Lock()
+	defer defaultWarningHandlerLock.Unlock()
+	defaultWarningHandler = l
+}
+func getDefaultWarningHandler() WarningHandler {
+	defaultWarningHandlerLock.RLock()
+	defer defaultWarningHandlerLock.RUnlock()
+	l := defaultWarningHandler
+	return l
+}
+
+// NoWarnings is an implementation of WarningHandler that suppresses warnings.
+type NoWarnings struct{}
+
+func (NoWarnings) HandleWarningHeader(code int, agent string, message string) {}
+
+// WarningLogger is an implementation of WarningHandler that logs code 299 warnings
+type WarningLogger struct{}
+
+func (WarningLogger) HandleWarningHeader(code int, agent string, message string) {
+	if code != 299 || len(message) == 0 {
+		return
+	}
+	klog.Warning(message)
+}
+
+type warningWriter struct {
+	// out is the writer to output warnings to
+	out io.Writer
+	// opts contains options controlling warning output
+	opts WarningWriterOptions
+	// writtenLock guards written and writtenCount
+	writtenLock  sync.Mutex
+	writtenCount int
+	written      map[string]struct{}
+}
+
+// WarningWriterOptions controls the behavior of a WarningHandler constructed using NewWarningWriter()
+type WarningWriterOptions struct {
+	// Deduplicate indicates a given warning message should only be written once.
+	// Setting this to true in a long-running process handling many warnings can result in increased memory use.
+	Deduplicate bool
+	// Color indicates that warning output can include ANSI color codes
+	Color bool
+}
+
+// NewWarningWriter returns an implementation of WarningHandler that outputs code 299 warnings to the specified writer.
+func NewWarningWriter(out io.Writer, opts WarningWriterOptions) *warningWriter {
+	h := &warningWriter{out: out, opts: opts}
+	if opts.Deduplicate {
+		h.written = map[string]struct{}{}
+	}
+	return h
+}
+
+const (
+	yellowColor = "\u001b[33;1m"
+	resetColor  = "\u001b[0m"
+)
+
+// HandleWarningHeader prints warnings with code=299 to the configured writer.
+func (w *warningWriter) HandleWarningHeader(code int, agent string, message string) {
+	if code != 299 || len(message) == 0 {
+		return
+	}
+
+	w.writtenLock.Lock()
+	defer w.writtenLock.Unlock()
+
+	if w.opts.Deduplicate {
+		if _, alreadyWritten := w.written[message]; alreadyWritten {
+			return
+		}
+		w.written[message] = struct{}{}
+	}
+	w.writtenCount++
+
+	if w.opts.Color {
+		fmt.Fprintf(w.out, "%sWarning:%s %s\n", yellowColor, resetColor, message)
+	} else {
+		fmt.Fprintf(w.out, "Warning: %s\n", message)
+	}
+}
+
+func (w *warningWriter) WarningCount() int {
+	w.writtenLock.Lock()
+	defer w.writtenLock.Unlock()
+	return w.writtenCount
+}
+
+func handleWarnings(headers http.Header, handler WarningHandler) []net.WarningHeader {
+	if handler == nil {
+		handler = getDefaultWarningHandler()
+	}
+
+	warnings, _ := net.ParseWarningHeaders(headers["Warning"])
+	for _, warning := range warnings {
+		handler.HandleWarningHeader(warning.Code, warning.Agent, warning.Text)
+	}
+	return warnings
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/rest/watch/decoder.go b/hack/tools/vendor/k8s.io/client-go/rest/watch/decoder.go
new file mode 100644
index 0000000000..c2b68cbcb7
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/rest/watch/decoder.go
@@ -0,0 +1,72 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package watch
+
+import (
+	"fmt"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/serializer/streaming"
+	"k8s.io/apimachinery/pkg/watch"
+)
+
+// Decoder implements the watch.Decoder interface for io.ReadClosers that
+// have contents which consist of a series of watchEvent objects encoded
+// with the given streaming decoder. The internal objects will be then
+// decoded by the embedded decoder.
+type Decoder struct {
+	decoder         streaming.Decoder
+	embeddedDecoder runtime.Decoder
+}
+
+// NewDecoder creates an Decoder for the given writer and codec.
+func NewDecoder(decoder streaming.Decoder, embeddedDecoder runtime.Decoder) *Decoder {
+	return &Decoder{
+		decoder:         decoder,
+		embeddedDecoder: embeddedDecoder,
+	}
+}
+
+// Decode blocks until it can return the next object in the reader. Returns an error
+// if the reader is closed or an object can't be decoded.
+func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) {
+	var got metav1.WatchEvent
+	res, _, err := d.decoder.Decode(nil, &got)
+	if err != nil {
+		return "", nil, err
+	}
+	if res != &got {
+		return "", nil, fmt.Errorf("unable to decode to metav1.WatchEvent")
+	}
+	switch got.Type {
+	case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error), string(watch.Bookmark):
+	default:
+		return "", nil, fmt.Errorf("got invalid watch event type: %v", got.Type)
+	}
+
+	obj, err := runtime.Decode(d.embeddedDecoder, got.Object.Raw)
+	if err != nil {
+		return "", nil, fmt.Errorf("unable to decode watch event: %v", err)
+	}
+	return watch.EventType(got.Type), obj, nil
+}
+
+// Close closes the underlying r.
+func (d *Decoder) Close() {
+	d.decoder.Close()
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/rest/watch/encoder.go b/hack/tools/vendor/k8s.io/client-go/rest/watch/encoder.go
new file mode 100644
index 0000000000..a95b4985c5
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/rest/watch/encoder.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package watch
+
+import (
+	"encoding/json"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/serializer/streaming"
+	"k8s.io/apimachinery/pkg/watch"
+)
+
+// Encoder serializes watch.Events into io.Writer. The internal objects
+// are encoded using embedded encoder, and the outer Event is serialized
+// using encoder.
+// TODO: this type is only used by tests
+type Encoder struct {
+	encoder         streaming.Encoder
+	embeddedEncoder runtime.Encoder
+}
+
+func NewEncoder(encoder streaming.Encoder, embeddedEncoder runtime.Encoder) *Encoder {
+	return &Encoder{
+		encoder:         encoder,
+		embeddedEncoder: embeddedEncoder,
+	}
+}
+
+// Encode writes an event to the writer. Returns an error
+// if the writer is closed or an object can't be encoded.
+func (e *Encoder) Encode(event *watch.Event) error {
+	data, err := runtime.Encode(e.embeddedEncoder, event.Object)
+	if err != nil {
+		return err
+	}
+	// FIXME: get rid of json.RawMessage.
+	return e.encoder.Encode(&metav1.WatchEvent{
+		Type:   string(event.Type),
+		Object: runtime.RawExtension{Raw: json.RawMessage(data)},
+	})
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/rest/with_retry.go b/hack/tools/vendor/k8s.io/client-go/rest/with_retry.go
new file mode 100644
index 0000000000..eaaadc6a4c
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/rest/with_retry.go
@@ -0,0 +1,369 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+	"time"
+
+	"k8s.io/klog/v2"
+)
+
+// IsRetryableErrorFunc allows the client to provide its own function
+// that determines whether the specified err from the server is retryable.
+//
+// request: the original request sent to the server
+// err: the server sent this error to us
+//
+// The function returns true if the error is retryable and the request
+// can be retried, otherwise it returns false.
+// We have four mode of communications - 'Stream', 'Watch', 'Do' and 'DoRaw', this
+// function allows us to customize the retryability aspect of each.
+type IsRetryableErrorFunc func(request *http.Request, err error) bool
+
+func (r IsRetryableErrorFunc) IsErrorRetryable(request *http.Request, err error) bool {
+	return r(request, err)
+}
+
+var neverRetryError = IsRetryableErrorFunc(func(_ *http.Request, _ error) bool {
+	return false
+})
+
+// WithRetry allows the client to retry a request up to a certain number of times
+// Note that WithRetry is not safe for concurrent use by multiple
+// goroutines without additional locking or coordination.
+type WithRetry interface {
+	// IsNextRetry advances the retry counter appropriately
+	// and returns true if the request should be retried,
+	// otherwise it returns false, if:
+	//  - we have already reached the maximum retry threshold.
+	//  - the error does not fall into the retryable category.
+	//  - the server has not sent us a 429, or 5xx status code and the
+	//    'Retry-After' response header is not set with a value.
+	//  - we need to seek to the beginning of the request body before we
+	//    initiate the next retry, the function should log an error and
+	//    return false if it fails to do so.
+	//
+	// restReq: the associated rest.Request
+	// httpReq: the HTTP Request sent to the server
+	// resp: the response sent from the server, it is set if err is nil
+	// err: the server sent this error to us, if err is set then resp is nil.
+	// f: a IsRetryableErrorFunc function provided by the client that determines
+	//    if the err sent by the server is retryable.
+	IsNextRetry(ctx context.Context, restReq *Request, httpReq *http.Request, resp *http.Response, err error, f IsRetryableErrorFunc) bool
+
+	// Before should be invoked prior to each attempt, including
+	// the first one. If an error is returned, the request should
+	// be aborted immediately.
+	//
+	// Before may also be additionally responsible for preparing
+	// the request for the next retry, namely in terms of resetting
+	// the request body in case it has been read.
+	Before(ctx context.Context, r *Request) error
+
+	// After should be invoked immediately after an attempt is made.
+	After(ctx context.Context, r *Request, resp *http.Response, err error)
+
+	// WrapPreviousError wraps the error from any previous attempt into
+	// the final error specified in 'finalErr', so the user has more
+	// context why the request failed.
+	// For example, if a request times out after multiple retries then
+	// we see a generic context.Canceled or context.DeadlineExceeded
+	// error which is not very useful in debugging. This function can
+	// wrap any error from previous attempt(s) to provide more context to
+	// the user. The error returned in 'err' must satisfy the
+	// following conditions:
+	//  a: errors.Unwrap(err) = errors.Unwrap(finalErr) if finalErr
+	//     implements Unwrap
+	//  b: errors.Unwrap(err) = finalErr if finalErr does not
+	//     implements Unwrap
+	//  c: errors.Is(err, otherErr) = errors.Is(finalErr, otherErr)
+	WrapPreviousError(finalErr error) (err error)
+}
+
+// RetryAfter holds information associated with the next retry.
+type RetryAfter struct {
+	// Wait is the duration the server has asked us to wait before
+	// the next retry is initiated.
+	// This is the value of the 'Retry-After' response header in seconds.
+	Wait time.Duration
+
+	// Attempt is the Nth attempt after which we have received a retryable
+	// error or a 'Retry-After' response header from the server.
+	Attempt int
+
+	// Reason describes why we are retrying the request
+	Reason string
+}
+
+type withRetry struct {
+	maxRetries int
+	attempts   int
+
+	// retry after parameters that pertain to the attempt that is to
+	// be made soon, so as to enable 'Before' and 'After' to refer
+	// to the retry parameters.
+	//  - for the first attempt, it will always be nil
+	//  - for consecutive attempts, it is non nil and holds the
+	//    retry after parameters for the next attempt to be made.
+	retryAfter *RetryAfter
+
+	// we keep track of two most recent errors, if the most
+	// recent attempt is labeled as 'N' then:
+	//  - currentErr represents the error returned by attempt N, it
+	//    can be nil if attempt N did not return an error.
+	//  - previousErr represents an error from an attempt 'M' which
+	//    precedes attempt 'N' (N - M >= 1), it is non nil only when:
+	//      - for a sequence of attempt(s) 1..n (n>1), there
+	//        is an attempt k (k r.maxRetries {
+		return false
+	}
+
+	// if the server returned an error, it takes precedence over the http response.
+	var errIsRetryable bool
+	if f != nil && err != nil && f.IsErrorRetryable(httpReq, err) {
+		errIsRetryable = true
+		// we have a retryable error, for which we will create an
+		// artificial "Retry-After" response.
+		resp = retryAfterResponse()
+	}
+	if err != nil && !errIsRetryable {
+		return false
+	}
+
+	// if we are here, we have either a or b:
+	//  a: we have a retryable error, for which we already
+	//     have an artificial "Retry-After" response.
+	//  b: we have a response from the server for which we
+	//     need to check if it is retryable
+	seconds, wait := checkWait(resp)
+	if !wait {
+		return false
+	}
+
+	r.retryAfter.Wait = time.Duration(seconds) * time.Second
+	r.retryAfter.Reason = getRetryReason(r.attempts, seconds, resp, err)
+
+	return true
+}
+
+func (r *withRetry) Before(ctx context.Context, request *Request) error {
+	// If the request context is already canceled there
+	// is no need to retry.
+	if ctx.Err() != nil {
+		r.trackPreviousError(ctx.Err())
+		return ctx.Err()
+	}
+
+	url := request.URL()
+	// r.retryAfter represents the retry after parameters calculated
+	// from the (response, err) tuple from the last attempt, so 'Before'
+	// can apply these retry after parameters prior to the next attempt.
+	// 'r.retryAfter == nil' indicates that this is the very first attempt.
+	if r.retryAfter == nil {
+		// we do a backoff sleep before the first attempt is made,
+		// (preserving current behavior).
+		if request.backoff != nil {
+			request.backoff.Sleep(request.backoff.CalculateBackoff(url))
+		}
+		return nil
+	}
+
+	// if we are here, we have made attempt(s) at least once before.
+	if request.backoff != nil {
+		delay := request.backoff.CalculateBackoff(url)
+		if r.retryAfter.Wait > delay {
+			delay = r.retryAfter.Wait
+		}
+		request.backoff.Sleep(delay)
+	}
+
+	// We are retrying the request that we already send to
+	// apiserver at least once before. This request should
+	// also be throttled with the client-internal rate limiter.
+	if err := request.tryThrottleWithInfo(ctx, r.retryAfter.Reason); err != nil {
+		r.trackPreviousError(ctx.Err())
+		return err
+	}
+
+	klog.V(4).Infof("Got a Retry-After %s response for attempt %d to %v", r.retryAfter.Wait, r.retryAfter.Attempt, request.URL().String())
+	return nil
+}
+
+func (r *withRetry) After(ctx context.Context, request *Request, resp *http.Response, err error) {
+	// 'After' is invoked immediately after an attempt is made, let's label
+	// the attempt we have just made as attempt 'N'.
+	// the current value of r.retryAfter represents the retry after
+	// parameters calculated from the (response, err) tuple from
+	// attempt N-1, so r.retryAfter is outdated and should not be
+	// referred to here.
+	isRetry := r.retryAfter != nil
+	r.retryAfter = nil
+
+	// the client finishes a single request after N attempts (1..N)
+	//  - all attempts (1..N) are counted to the rest_client_requests_total
+	//    metric (current behavior).
+	//  - every attempt after the first (2..N) are counted to the
+	//    rest_client_request_retries_total metric.
+	updateRequestResultMetric(ctx, request, resp, err)
+	if isRetry {
+		// this is attempt 2 or later
+		updateRequestRetryMetric(ctx, request, resp, err)
+	}
+
+	if request.c.base != nil {
+		if err != nil {
+			request.backoff.UpdateBackoff(request.URL(), err, 0)
+		} else {
+			request.backoff.UpdateBackoff(request.URL(), err, resp.StatusCode)
+		}
+	}
+}
+
+func (r *withRetry) WrapPreviousError(currentErr error) error {
+	if currentErr == nil || r.previousErr == nil {
+		return currentErr
+	}
+
+	// if both previous and current error objects represent the error,
+	// then there is no need to wrap the previous error.
+	if currentErr.Error() == r.previousErr.Error() {
+		return currentErr
+	}
+
+	previousErr := r.previousErr
+	// net/http wraps the underlying error with an url.Error, if the
+	// previous err object is an instance of url.Error, then we can
+	// unwrap it to get to the inner error object, this is so we can
+	// avoid error message like:
+	//  Error: Get "http://foo.bar/api/v1": context deadline exceeded - error \
+	//  from a previous attempt: Error: Get "http://foo.bar/api/v1": EOF
+	if urlErr, ok := r.previousErr.(*url.Error); ok && urlErr != nil {
+		if urlErr.Unwrap() != nil {
+			previousErr = urlErr.Unwrap()
+		}
+	}
+
+	return &wrapPreviousError{
+		currentErr:    currentErr,
+		previousError: previousErr,
+	}
+}
+
+type wrapPreviousError struct {
+	currentErr, previousError error
+}
+
+func (w *wrapPreviousError) Unwrap() error { return w.currentErr }
+func (w *wrapPreviousError) Error() string {
+	return fmt.Sprintf("%s - error from a previous attempt: %s", w.currentErr.Error(), w.previousError.Error())
+}
+
+// checkWait returns true along with a number of seconds if
+// the server instructed us to wait before retrying.
+func checkWait(resp *http.Response) (int, bool) {
+	switch r := resp.StatusCode; {
+	// any 500 error code and 429 can trigger a wait
+	case r == http.StatusTooManyRequests, r >= 500:
+	default:
+		return 0, false
+	}
+	i, ok := retryAfterSeconds(resp)
+	return i, ok
+}
+
+func getRetryReason(retries, seconds int, resp *http.Response, err error) string {
+	// priority and fairness sets the UID of the FlowSchema
+	// associated with a request in the following response Header.
+	const responseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID"
+
+	message := fmt.Sprintf("retries: %d, retry-after: %ds", retries, seconds)
+
+	switch {
+	case resp.StatusCode == http.StatusTooManyRequests:
+		// it is server-side throttling from priority and fairness
+		flowSchemaUID := resp.Header.Get(responseHeaderMatchedFlowSchemaUID)
+		return fmt.Sprintf("%s - retry-reason: due to server-side throttling, FlowSchema UID: %q", message, flowSchemaUID)
+	case err != nil:
+		// it's a retryable error
+		return fmt.Sprintf("%s - retry-reason: due to retryable error, error: %v", message, err)
+	default:
+		return fmt.Sprintf("%s - retry-reason: %d", message, resp.StatusCode)
+	}
+}
+
+func readAndCloseResponseBody(resp *http.Response) {
+	if resp == nil {
+		return
+	}
+
+	// Ensure the response body is fully read and closed
+	// before we reconnect, so that we reuse the same TCP
+	// connection.
+	const maxBodySlurpSize = 2 << 10
+	defer resp.Body.Close()
+
+	if resp.ContentLength <= maxBodySlurpSize {
+		io.Copy(io.Discard, &io.LimitedReader{R: resp.Body, N: maxBodySlurpSize})
+	}
+}
+
+func retryAfterResponse() *http.Response {
+	return retryAfterResponseWithDelay("1")
+}
+
+func retryAfterResponseWithDelay(delay string) *http.Response {
+	return retryAfterResponseWithCodeAndDelay(http.StatusInternalServerError, delay)
+}
+
+func retryAfterResponseWithCodeAndDelay(code int, delay string) *http.Response {
+	return &http.Response{
+		StatusCode: code,
+		Header:     http.Header{"Retry-After": []string{delay}},
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go b/hack/tools/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..86991be3b5
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go
@@ -0,0 +1,58 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package rest
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TLSClientConfig) DeepCopyInto(out *TLSClientConfig) {
+	*out = *in
+	if in.CertData != nil {
+		in, out := &in.CertData, &out.CertData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.KeyData != nil {
+		in, out := &in.KeyData, &out.KeyData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.CAData != nil {
+		in, out := &in.CAData, &out.CAData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.NextProtos != nil {
+		in, out := &in.NextProtos, &out.NextProtos
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSClientConfig.
+func (in *TLSClientConfig) DeepCopy() *TLSClientConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(TLSClientConfig)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/auth/OWNERS b/hack/tools/vendor/k8s.io/client-go/tools/auth/OWNERS
new file mode 100644
index 0000000000..c4ea6463df
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/auth/OWNERS
@@ -0,0 +1,8 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+  - sig-auth-authenticators-approvers
+reviewers:
+  - sig-auth-authenticators-reviewers
+labels:
+  - sig/auth
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/auth/clientauth.go b/hack/tools/vendor/k8s.io/client-go/tools/auth/clientauth.go
new file mode 100644
index 0000000000..5a256c1c3e
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/auth/clientauth.go
@@ -0,0 +1,125 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package auth defines a file format for holding authentication
+information needed by clients of Kubernetes.  Typically,
+a Kubernetes cluster will put auth info for the admin in a known
+location when it is created, and will (soon) put it in a known
+location within a Container's file tree for Containers that
+need access to the Kubernetes API.
+
+Having a defined format allows:
+  - clients to be implemented in multiple languages
+  - applications which link clients to be portable across
+    clusters with different authentication styles (e.g.
+    some may use SSL Client certs, others may not, etc)
+  - when the format changes, applications only
+    need to update this code.
+
+The file format is json, marshalled from a struct authcfg.Info.
+
+Client libraries in other languages should use the same format.
+
+It is not intended to store general preferences, such as default
+namespace, output options, etc.  CLIs (such as kubectl) and UIs should
+develop their own format and may wish to inline the authcfg.Info type.
+
+The authcfg.Info is just a file format.  It is distinct from
+client.Config which holds options for creating a client.Client.
+Helper functions are provided in this package to fill in a
+client.Client from an authcfg.Info.
+
+Example:
+
+	import (
+	    "pkg/client"
+	    "pkg/client/auth"
+	)
+
+	info, err := auth.LoadFromFile(filename)
+	if err != nil {
+	  // handle error
+	}
+	clientConfig = client.Config{}
+	clientConfig.Host = "example.com:4901"
+	clientConfig = info.MergeWithConfig()
+	client := client.New(clientConfig)
+	client.Pods(ns).List()
+*/
+package auth
+
+// TODO: need a way to rotate Tokens.  Therefore, need a way for client object to be reset when the authcfg is updated.
+import (
+	"encoding/json"
+	"os"
+
+	restclient "k8s.io/client-go/rest"
+)
+
+// Info holds Kubernetes API authorization config.  It is intended
+// to be read/written from a file as a JSON object.
+type Info struct {
+	User        string
+	Password    string `datapolicy:"password"`
+	CAFile      string
+	CertFile    string
+	KeyFile     string
+	BearerToken string `datapolicy:"token"`
+	Insecure    *bool
+}
+
+// LoadFromFile parses an Info object from a file path.
+// If the file does not exist, then os.IsNotExist(err) == true
+func LoadFromFile(path string) (*Info, error) {
+	var info Info
+	if _, err := os.Stat(path); os.IsNotExist(err) {
+		return nil, err
+	}
+	data, err := os.ReadFile(path)
+	if err != nil {
+		return nil, err
+	}
+	err = json.Unmarshal(data, &info)
+	if err != nil {
+		return nil, err
+	}
+	return &info, err
+}
+
+// MergeWithConfig returns a copy of a client.Config with values from the Info.
+// The fields of client.Config with a corresponding field in the Info are set
+// with the value from the Info.
+func (info Info) MergeWithConfig(c restclient.Config) (restclient.Config, error) {
+	var config = c
+	config.Username = info.User
+	config.Password = info.Password
+	config.CAFile = info.CAFile
+	config.CertFile = info.CertFile
+	config.KeyFile = info.KeyFile
+	config.BearerToken = info.BearerToken
+	if info.Insecure != nil {
+		config.Insecure = *info.Insecure
+	}
+	return config, nil
+}
+
+// Complete returns true if the Kubernetes API authorization info is complete.
+func (info Info) Complete() bool {
+	return len(info.User) > 0 ||
+		len(info.CertFile) > 0 ||
+		len(info.BearerToken) > 0
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go
new file mode 100644
index 0000000000..fd913a3083
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+
+package api // import "k8s.io/client-go/tools/clientcmd/api"
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
new file mode 100644
index 0000000000..261dcacb5b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go
@@ -0,0 +1,265 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
+	"reflect"
+	"strings"
+)
+
+func init() {
+	sDec, _ := base64.StdEncoding.DecodeString("REDACTED+")
+	redactedBytes = []byte(string(sDec))
+	sDec, _ = base64.StdEncoding.DecodeString("DATA+OMITTED")
+	dataOmittedBytes = []byte(string(sDec))
+}
+
+// IsConfigEmpty returns true if the config is empty.
+func IsConfigEmpty(config *Config) bool {
+	return len(config.AuthInfos) == 0 && len(config.Clusters) == 0 && len(config.Contexts) == 0 &&
+		len(config.CurrentContext) == 0 &&
+		len(config.Preferences.Extensions) == 0 && !config.Preferences.Colors &&
+		len(config.Extensions) == 0
+}
+
+// MinifyConfig read the current context and uses that to keep only the relevant pieces of config
+// This is useful for making secrets based on kubeconfig files
+func MinifyConfig(config *Config) error {
+	if len(config.CurrentContext) == 0 {
+		return errors.New("current-context must exist in order to minify")
+	}
+
+	currContext, exists := config.Contexts[config.CurrentContext]
+	if !exists {
+		return fmt.Errorf("cannot locate context %v", config.CurrentContext)
+	}
+
+	newContexts := map[string]*Context{}
+	newContexts[config.CurrentContext] = currContext
+
+	newClusters := map[string]*Cluster{}
+	if len(currContext.Cluster) > 0 {
+		if _, exists := config.Clusters[currContext.Cluster]; !exists {
+			return fmt.Errorf("cannot locate cluster %v", currContext.Cluster)
+		}
+
+		newClusters[currContext.Cluster] = config.Clusters[currContext.Cluster]
+	}
+
+	newAuthInfos := map[string]*AuthInfo{}
+	if len(currContext.AuthInfo) > 0 {
+		if _, exists := config.AuthInfos[currContext.AuthInfo]; !exists {
+			return fmt.Errorf("cannot locate user %v", currContext.AuthInfo)
+		}
+
+		newAuthInfos[currContext.AuthInfo] = config.AuthInfos[currContext.AuthInfo]
+	}
+
+	config.AuthInfos = newAuthInfos
+	config.Clusters = newClusters
+	config.Contexts = newContexts
+
+	return nil
+}
+
+var (
+	dataOmittedBytes []byte
+	redactedBytes    []byte
+)
+
+// ShortenConfig redacts raw data entries from the config object for a human-readable view.
+func ShortenConfig(config *Config) {
+	// trick json encoder into printing a human-readable string in the raw data
+	// by base64 decoding what we want to print. Relies on implementation of
+	// http://golang.org/pkg/encoding/json/#Marshal using base64 to encode []byte
+	for key, authInfo := range config.AuthInfos {
+		if len(authInfo.ClientKeyData) > 0 {
+			authInfo.ClientKeyData = dataOmittedBytes
+		}
+		if len(authInfo.ClientCertificateData) > 0 {
+			authInfo.ClientCertificateData = dataOmittedBytes
+		}
+		if len(authInfo.Token) > 0 {
+			authInfo.Token = "REDACTED"
+		}
+		config.AuthInfos[key] = authInfo
+	}
+	for key, cluster := range config.Clusters {
+		if len(cluster.CertificateAuthorityData) > 0 {
+			cluster.CertificateAuthorityData = dataOmittedBytes
+		}
+		config.Clusters[key] = cluster
+	}
+}
+
+// FlattenConfig changes the config object into a self-contained config (useful for making secrets)
+func FlattenConfig(config *Config) error {
+	for key, authInfo := range config.AuthInfos {
+		baseDir, err := MakeAbs(filepath.Dir(authInfo.LocationOfOrigin), "")
+		if err != nil {
+			return err
+		}
+
+		if err := FlattenContent(&authInfo.ClientCertificate, &authInfo.ClientCertificateData, baseDir); err != nil {
+			return err
+		}
+		if err := FlattenContent(&authInfo.ClientKey, &authInfo.ClientKeyData, baseDir); err != nil {
+			return err
+		}
+
+		config.AuthInfos[key] = authInfo
+	}
+	for key, cluster := range config.Clusters {
+		baseDir, err := MakeAbs(filepath.Dir(cluster.LocationOfOrigin), "")
+		if err != nil {
+			return err
+		}
+
+		if err := FlattenContent(&cluster.CertificateAuthority, &cluster.CertificateAuthorityData, baseDir); err != nil {
+			return err
+		}
+
+		config.Clusters[key] = cluster
+	}
+
+	return nil
+}
+
+func FlattenContent(path *string, contents *[]byte, baseDir string) error {
+	if len(*path) != 0 {
+		if len(*contents) > 0 {
+			return errors.New("cannot have values for both path and contents")
+		}
+
+		var err error
+		absPath := ResolvePath(*path, baseDir)
+		*contents, err = os.ReadFile(absPath)
+		if err != nil {
+			return err
+		}
+
+		*path = ""
+	}
+
+	return nil
+}
+
+// ResolvePath returns the path as an absolute paths, relative to the given base directory
+func ResolvePath(path string, base string) string {
+	// Don't resolve empty paths
+	if len(path) > 0 {
+		// Don't resolve absolute paths
+		if !filepath.IsAbs(path) {
+			return filepath.Join(base, path)
+		}
+	}
+
+	return path
+}
+
+func MakeAbs(path, base string) (string, error) {
+	if filepath.IsAbs(path) {
+		return path, nil
+	}
+	if len(base) == 0 {
+		cwd, err := os.Getwd()
+		if err != nil {
+			return "", err
+		}
+		base = cwd
+	}
+	return filepath.Join(base, path), nil
+}
+
+// RedactSecrets replaces any sensitive values with REDACTED
+func RedactSecrets(config *Config) error {
+	return redactSecrets(reflect.ValueOf(config), false)
+}
+
+func redactSecrets(curr reflect.Value, redact bool) error {
+	redactedBytes = []byte("REDACTED")
+	if !curr.IsValid() {
+		return nil
+	}
+
+	actualCurrValue := curr
+	if curr.Kind() == reflect.Ptr {
+		actualCurrValue = curr.Elem()
+	}
+
+	switch actualCurrValue.Kind() {
+	case reflect.Map:
+		for _, v := range actualCurrValue.MapKeys() {
+			err := redactSecrets(actualCurrValue.MapIndex(v), false)
+			if err != nil {
+				return err
+			}
+		}
+		return nil
+
+	case reflect.String:
+		if redact {
+			if !actualCurrValue.IsZero() {
+				actualCurrValue.SetString("REDACTED")
+			}
+		}
+		return nil
+
+	case reflect.Slice:
+		if actualCurrValue.Type() == reflect.TypeOf([]byte{}) && redact {
+			if !actualCurrValue.IsNil() {
+				actualCurrValue.SetBytes(redactedBytes)
+			}
+			return nil
+		}
+		for i := 0; i < actualCurrValue.Len(); i++ {
+			err := redactSecrets(actualCurrValue.Index(i), false)
+			if err != nil {
+				return err
+			}
+		}
+		return nil
+
+	case reflect.Struct:
+		for fieldIndex := 0; fieldIndex < actualCurrValue.NumField(); fieldIndex++ {
+			currFieldValue := actualCurrValue.Field(fieldIndex)
+			currFieldType := actualCurrValue.Type().Field(fieldIndex)
+			currYamlTag := currFieldType.Tag.Get("datapolicy")
+			currFieldTypeYamlName := strings.Split(currYamlTag, ",")[0]
+			if currFieldTypeYamlName != "" {
+				err := redactSecrets(currFieldValue, true)
+				if err != nil {
+					return err
+				}
+			} else {
+				err := redactSecrets(currFieldValue, false)
+				if err != nil {
+					return err
+				}
+			}
+		}
+		return nil
+
+	default:
+		return nil
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go
new file mode 100644
index 0000000000..c575652b1a
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package latest
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer/json"
+	"k8s.io/apimachinery/pkg/runtime/serializer/versioning"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/client-go/tools/clientcmd/api"
+	"k8s.io/client-go/tools/clientcmd/api/v1"
+)
+
+// Version is the string that represents the current external default version.
+const Version = "v1"
+
+var ExternalVersion = schema.GroupVersion{Group: "", Version: "v1"}
+
+// OldestVersion is the string that represents the oldest server version supported,
+// for client code that wants to hardcode the lowest common denominator.
+const OldestVersion = "v1"
+
+// Versions is the list of versions that are recognized in code. The order provided
+// may be assumed to be least feature rich to most feature rich, and clients may
+// choose to prefer the latter items in the list over the former items when presented
+// with a set of versions to choose.
+var Versions = []string{"v1"}
+
+var (
+	Codec  runtime.Codec
+	Scheme *runtime.Scheme
+)
+
+func init() {
+	Scheme = runtime.NewScheme()
+	utilruntime.Must(api.AddToScheme(Scheme))
+	utilruntime.Must(v1.AddToScheme(Scheme))
+	yamlSerializer := json.NewSerializerWithOptions(json.DefaultMetaFactory, Scheme, Scheme, json.SerializerOptions{Yaml: true})
+	Codec = versioning.NewDefaultingCodecForScheme(
+		Scheme,
+		yamlSerializer,
+		yamlSerializer,
+		schema.GroupVersion{Version: Version},
+		runtime.InternalGroupVersioner,
+	)
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/register.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/register.go
new file mode 100644
index 0000000000..2eec3881cd
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/register.go
@@ -0,0 +1,46 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+// TODO this should be in the "kubeconfig" group
+var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal}
+
+var (
+	SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+	AddToScheme   = SchemeBuilder.AddToScheme
+)
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Config{},
+	)
+	return nil
+}
+
+func (obj *Config) GetObjectKind() schema.ObjectKind { return obj }
+func (obj *Config) SetGroupVersionKind(gvk schema.GroupVersionKind) {
+	obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
+}
+func (obj *Config) GroupVersionKind() schema.GroupVersionKind {
+	return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
new file mode 100644
index 0000000000..ae8b8c7038
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
@@ -0,0 +1,375 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package api
+
+import (
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// Where possible, json tags match the cli argument names.
+// Top level config objects and all values required for proper functioning are not "omitempty".  Any truly optional piece of config is allowed to be omitted.
+
+// Config holds the information needed to build connect to remote kubernetes clusters as a given user
+// IMPORTANT if you add fields to this struct, please update IsConfigEmpty()
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type Config struct {
+	// Legacy field from pkg/api/types.go TypeMeta.
+	// TODO(jlowdermilk): remove this after eliminating downstream dependencies.
+	// +k8s:conversion-gen=false
+	// +optional
+	Kind string `json:"kind,omitempty"`
+	// Legacy field from pkg/api/types.go TypeMeta.
+	// TODO(jlowdermilk): remove this after eliminating downstream dependencies.
+	// +k8s:conversion-gen=false
+	// +optional
+	APIVersion string `json:"apiVersion,omitempty"`
+	// Preferences holds general information to be use for cli interactions
+	Preferences Preferences `json:"preferences"`
+	// Clusters is a map of referencable names to cluster configs
+	Clusters map[string]*Cluster `json:"clusters"`
+	// AuthInfos is a map of referencable names to user configs
+	AuthInfos map[string]*AuthInfo `json:"users"`
+	// Contexts is a map of referencable names to context configs
+	Contexts map[string]*Context `json:"contexts"`
+	// CurrentContext is the name of the context that you would like to use by default
+	CurrentContext string `json:"current-context"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions map[string]runtime.Object `json:"extensions,omitempty"`
+}
+
+// IMPORTANT if you add fields to this struct, please update IsConfigEmpty()
+type Preferences struct {
+	// +optional
+	Colors bool `json:"colors,omitempty"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions map[string]runtime.Object `json:"extensions,omitempty"`
+}
+
+// Cluster contains information about how to communicate with a kubernetes cluster
+type Cluster struct {
+	// LocationOfOrigin indicates where this object came from.  It is used for round tripping config post-merge, but never serialized.
+	// +k8s:conversion-gen=false
+	LocationOfOrigin string `json:"-"`
+	// Server is the address of the kubernetes cluster (https://hostname:port).
+	Server string `json:"server"`
+	// TLSServerName is used to check server certificate. If TLSServerName is empty, the hostname used to contact the server is used.
+	// +optional
+	TLSServerName string `json:"tls-server-name,omitempty"`
+	// InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure.
+	// +optional
+	InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"`
+	// CertificateAuthority is the path to a cert file for the certificate authority.
+	// +optional
+	CertificateAuthority string `json:"certificate-authority,omitempty"`
+	// CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority
+	// +optional
+	CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
+	// ProxyURL is the URL to the proxy to be used for all requests made by this
+	// client. URLs with "http", "https", and "socks5" schemes are supported.  If
+	// this configuration is not provided or the empty string, the client
+	// attempts to construct a proxy configuration from http_proxy and
+	// https_proxy environment variables. If these environment variables are not
+	// set, the client does not attempt to proxy requests.
+	//
+	// socks5 proxying does not currently support spdy streaming endpoints (exec,
+	// attach, port forward).
+	// +optional
+	ProxyURL string `json:"proxy-url,omitempty"`
+	// DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful
+	// to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on
+	// compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296.
+	// +optional
+	DisableCompression bool `json:"disable-compression,omitempty"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions map[string]runtime.Object `json:"extensions,omitempty"`
+}
+
+// AuthInfo contains information that describes identity information.  This is use to tell the kubernetes cluster who you are.
+type AuthInfo struct {
+	// LocationOfOrigin indicates where this object came from.  It is used for round tripping config post-merge, but never serialized.
+	// +k8s:conversion-gen=false
+	LocationOfOrigin string `json:"-"`
+	// ClientCertificate is the path to a client cert file for TLS.
+	// +optional
+	ClientCertificate string `json:"client-certificate,omitempty"`
+	// ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate
+	// +optional
+	ClientCertificateData []byte `json:"client-certificate-data,omitempty"`
+	// ClientKey is the path to a client key file for TLS.
+	// +optional
+	ClientKey string `json:"client-key,omitempty"`
+	// ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey
+	// +optional
+	ClientKeyData []byte `json:"client-key-data,omitempty" datapolicy:"security-key"`
+	// Token is the bearer token for authentication to the kubernetes cluster.
+	// +optional
+	Token string `json:"token,omitempty" datapolicy:"token"`
+	// TokenFile is a pointer to a file that contains a bearer token (as described above).  If both Token and TokenFile are present, Token takes precedence.
+	// +optional
+	TokenFile string `json:"tokenFile,omitempty"`
+	// Impersonate is the username to act-as.
+	// +optional
+	Impersonate string `json:"act-as,omitempty"`
+	// ImpersonateUID is the uid to impersonate.
+	// +optional
+	ImpersonateUID string `json:"act-as-uid,omitempty"`
+	// ImpersonateGroups is the groups to impersonate.
+	// +optional
+	ImpersonateGroups []string `json:"act-as-groups,omitempty"`
+	// ImpersonateUserExtra contains additional information for impersonated user.
+	// +optional
+	ImpersonateUserExtra map[string][]string `json:"act-as-user-extra,omitempty"`
+	// Username is the username for basic authentication to the kubernetes cluster.
+	// +optional
+	Username string `json:"username,omitempty"`
+	// Password is the password for basic authentication to the kubernetes cluster.
+	// +optional
+	Password string `json:"password,omitempty" datapolicy:"password"`
+	// AuthProvider specifies a custom authentication plugin for the kubernetes cluster.
+	// +optional
+	AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"`
+	// Exec specifies a custom exec-based authentication plugin for the kubernetes cluster.
+	// +optional
+	Exec *ExecConfig `json:"exec,omitempty"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions map[string]runtime.Object `json:"extensions,omitempty"`
+}
+
+// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with)
+type Context struct {
+	// LocationOfOrigin indicates where this object came from.  It is used for round tripping config post-merge, but never serialized.
+	// +k8s:conversion-gen=false
+	LocationOfOrigin string `json:"-"`
+	// Cluster is the name of the cluster for this context
+	Cluster string `json:"cluster"`
+	// AuthInfo is the name of the authInfo for this context
+	AuthInfo string `json:"user"`
+	// Namespace is the default namespace to use on unspecified requests
+	// +optional
+	Namespace string `json:"namespace,omitempty"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions map[string]runtime.Object `json:"extensions,omitempty"`
+}
+
+// AuthProviderConfig holds the configuration for a specified auth provider.
+type AuthProviderConfig struct {
+	Name string `json:"name"`
+	// +optional
+	Config map[string]string `json:"config,omitempty"`
+}
+
+var _ fmt.Stringer = new(AuthProviderConfig)
+var _ fmt.GoStringer = new(AuthProviderConfig)
+
+// GoString implements fmt.GoStringer and sanitizes sensitive fields of
+// AuthProviderConfig to prevent accidental leaking via logs.
+func (c AuthProviderConfig) GoString() string {
+	return c.String()
+}
+
+// String implements fmt.Stringer and sanitizes sensitive fields of
+// AuthProviderConfig to prevent accidental leaking via logs.
+func (c AuthProviderConfig) String() string {
+	cfg := ""
+	if c.Config != nil {
+		cfg = "--- REDACTED ---"
+	}
+	return fmt.Sprintf("api.AuthProviderConfig{Name: %q, Config: map[string]string{%s}}", c.Name, cfg)
+}
+
+// ExecConfig specifies a command to provide client credentials. The command is exec'd
+// and outputs structured stdout holding credentials.
+//
+// See the client.authentication.k8s.io API group for specifications of the exact input
+// and output format
+type ExecConfig struct {
+	// Command to execute.
+	Command string `json:"command"`
+	// Arguments to pass to the command when executing it.
+	// +optional
+	Args []string `json:"args"`
+	// Env defines additional environment variables to expose to the process. These
+	// are unioned with the host's environment, as well as variables client-go uses
+	// to pass argument to the plugin.
+	// +optional
+	Env []ExecEnvVar `json:"env"`
+
+	// Preferred input version of the ExecInfo. The returned ExecCredentials MUST use
+	// the same encoding version as the input.
+	APIVersion string `json:"apiVersion,omitempty"`
+
+	// This text is shown to the user when the executable doesn't seem to be
+	// present. For example, `brew install foo-cli` might be a good InstallHint for
+	// foo-cli on Mac OS systems.
+	InstallHint string `json:"installHint,omitempty"`
+
+	// ProvideClusterInfo determines whether or not to provide cluster information,
+	// which could potentially contain very large CA data, to this exec plugin as a
+	// part of the KUBERNETES_EXEC_INFO environment variable. By default, it is set
+	// to false. Package k8s.io/client-go/tools/auth/exec provides helper methods for
+	// reading this environment variable.
+	ProvideClusterInfo bool `json:"provideClusterInfo"`
+
+	// Config holds additional config data that is specific to the exec
+	// plugin with regards to the cluster being authenticated to.
+	//
+	// This data is sourced from the clientcmd Cluster object's extensions[exec] field:
+	//
+	// clusters:
+	// - name: my-cluster
+	//   cluster:
+	//     ...
+	//     extensions:
+	//     - name: client.authentication.k8s.io/exec  # reserved extension name for per cluster exec config
+	//       extension:
+	//         audience: 06e3fbd18de8  # arbitrary config
+	//
+	// In some environments, the user config may be exactly the same across many clusters
+	// (i.e. call this exec plugin) minus some details that are specific to each cluster
+	// such as the audience.  This field allows the per cluster config to be directly
+	// specified with the cluster info.  Using this field to store secret data is not
+	// recommended as one of the prime benefits of exec plugins is that no secrets need
+	// to be stored directly in the kubeconfig.
+	// +k8s:conversion-gen=false
+	Config runtime.Object `json:"-"`
+
+	// InteractiveMode determines this plugin's relationship with standard input. Valid
+	// values are "Never" (this exec plugin never uses standard input), "IfAvailable" (this
+	// exec plugin wants to use standard input if it is available), or "Always" (this exec
+	// plugin requires standard input to function). See ExecInteractiveMode values for more
+	// details.
+	//
+	// If APIVersion is client.authentication.k8s.io/v1alpha1 or
+	// client.authentication.k8s.io/v1beta1, then this field is optional and defaults
+	// to "IfAvailable" when unset. Otherwise, this field is required.
+	// +optional
+	InteractiveMode ExecInteractiveMode `json:"interactiveMode,omitempty"`
+
+	// StdinUnavailable indicates whether the exec authenticator can pass standard
+	// input through to this exec plugin. For example, a higher level entity might be using
+	// standard input for something else and therefore it would not be safe for the exec
+	// plugin to use standard input. This is kept here in order to keep all of the exec configuration
+	// together, but it is never serialized.
+	// +k8s:conversion-gen=false
+	StdinUnavailable bool `json:"-"`
+
+	// StdinUnavailableMessage is an optional message to be displayed when the exec authenticator
+	// cannot successfully run this exec plugin because it needs to use standard input and
+	// StdinUnavailable is true. For example, a process that is already using standard input to
+	// read user instructions might set this to "used by my-program to read user instructions".
+	// +k8s:conversion-gen=false
+	StdinUnavailableMessage string `json:"-"`
+}
+
+var _ fmt.Stringer = new(ExecConfig)
+var _ fmt.GoStringer = new(ExecConfig)
+
+// GoString implements fmt.GoStringer and sanitizes sensitive fields of
+// ExecConfig to prevent accidental leaking via logs.
+func (c ExecConfig) GoString() string {
+	return c.String()
+}
+
+// String implements fmt.Stringer and sanitizes sensitive fields of ExecConfig
+// to prevent accidental leaking via logs.
+func (c ExecConfig) String() string {
+	var args []string
+	if len(c.Args) > 0 {
+		args = []string{"--- REDACTED ---"}
+	}
+	env := "[]ExecEnvVar(nil)"
+	if len(c.Env) > 0 {
+		env = "[]ExecEnvVar{--- REDACTED ---}"
+	}
+	config := "runtime.Object(nil)"
+	if c.Config != nil {
+		config = "runtime.Object(--- REDACTED ---)"
+	}
+	return fmt.Sprintf("api.ExecConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q, ProvideClusterInfo: %t, Config: %s, StdinUnavailable: %t}", c.Command, args, env, c.APIVersion, c.ProvideClusterInfo, config, c.StdinUnavailable)
+}
+
+// ExecEnvVar is used for setting environment variables when executing an exec-based
+// credential plugin.
+type ExecEnvVar struct {
+	Name  string `json:"name"`
+	Value string `json:"value"`
+}
+
+// ExecInteractiveMode is a string that describes an exec plugin's relationship with standard input.
+type ExecInteractiveMode string
+
+const (
+	// NeverExecInteractiveMode declares that this exec plugin never needs to use standard
+	// input, and therefore the exec plugin will be run regardless of whether standard input is
+	// available for user input.
+	NeverExecInteractiveMode ExecInteractiveMode = "Never"
+	// IfAvailableExecInteractiveMode declares that this exec plugin would like to use standard input
+	// if it is available, but can still operate if standard input is not available. Therefore, the
+	// exec plugin will be run regardless of whether stdin is available for user input. If standard
+	// input is available for user input, then it will be provided to this exec plugin.
+	IfAvailableExecInteractiveMode ExecInteractiveMode = "IfAvailable"
+	// AlwaysExecInteractiveMode declares that this exec plugin requires standard input in order to
+	// run, and therefore the exec plugin will only be run if standard input is available for user
+	// input. If standard input is not available for user input, then the exec plugin will not be run
+	// and an error will be returned by the exec plugin runner.
+	AlwaysExecInteractiveMode ExecInteractiveMode = "Always"
+)
+
+// NewConfig is a convenience function that returns a new Config object with non-nil maps
+func NewConfig() *Config {
+	return &Config{
+		Preferences: *NewPreferences(),
+		Clusters:    make(map[string]*Cluster),
+		AuthInfos:   make(map[string]*AuthInfo),
+		Contexts:    make(map[string]*Context),
+		Extensions:  make(map[string]runtime.Object),
+	}
+}
+
+// NewContext is a convenience function that returns a new Context
+// object with non-nil maps
+func NewContext() *Context {
+	return &Context{Extensions: make(map[string]runtime.Object)}
+}
+
+// NewCluster is a convenience function that returns a new Cluster
+// object with non-nil maps
+func NewCluster() *Cluster {
+	return &Cluster{Extensions: make(map[string]runtime.Object)}
+}
+
+// NewAuthInfo is a convenience function that returns a new AuthInfo
+// object with non-nil maps
+func NewAuthInfo() *AuthInfo {
+	return &AuthInfo{
+		Extensions:           make(map[string]runtime.Object),
+		ImpersonateUserExtra: make(map[string][]string),
+	}
+}
+
+// NewPreferences is a convenience function that returns a new
+// Preferences object with non-nil maps
+func NewPreferences() *Preferences {
+	return &Preferences{Extensions: make(map[string]runtime.Object)}
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go
new file mode 100644
index 0000000000..6eee281bc6
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go
@@ -0,0 +1,174 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"fmt"
+	"sort"
+
+	"k8s.io/apimachinery/pkg/conversion"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/client-go/tools/clientcmd/api"
+)
+
+func Convert_Slice_v1_NamedCluster_To_Map_string_To_Pointer_api_Cluster(in *[]NamedCluster, out *map[string]*api.Cluster, s conversion.Scope) error {
+	for _, curr := range *in {
+		newCluster := api.NewCluster()
+		if err := Convert_v1_Cluster_To_api_Cluster(&curr.Cluster, newCluster, s); err != nil {
+			return err
+		}
+		if *out == nil {
+			*out = make(map[string]*api.Cluster)
+		}
+		if (*out)[curr.Name] == nil {
+			(*out)[curr.Name] = newCluster
+		} else {
+			return fmt.Errorf("error converting *[]NamedCluster into *map[string]*api.Cluster: duplicate name \"%v\" in list: %v", curr.Name, *in)
+		}
+	}
+	return nil
+}
+
+func Convert_Map_string_To_Pointer_api_Cluster_To_Slice_v1_NamedCluster(in *map[string]*api.Cluster, out *[]NamedCluster, s conversion.Scope) error {
+	allKeys := make([]string, 0, len(*in))
+	for key := range *in {
+		allKeys = append(allKeys, key)
+	}
+	sort.Strings(allKeys)
+
+	for _, key := range allKeys {
+		newCluster := (*in)[key]
+		oldCluster := Cluster{}
+		if err := Convert_api_Cluster_To_v1_Cluster(newCluster, &oldCluster, s); err != nil {
+			return err
+		}
+		namedCluster := NamedCluster{key, oldCluster}
+		*out = append(*out, namedCluster)
+	}
+	return nil
+}
+
+func Convert_Slice_v1_NamedAuthInfo_To_Map_string_To_Pointer_api_AuthInfo(in *[]NamedAuthInfo, out *map[string]*api.AuthInfo, s conversion.Scope) error {
+	for _, curr := range *in {
+		newAuthInfo := api.NewAuthInfo()
+		if err := Convert_v1_AuthInfo_To_api_AuthInfo(&curr.AuthInfo, newAuthInfo, s); err != nil {
+			return err
+		}
+		if *out == nil {
+			*out = make(map[string]*api.AuthInfo)
+		}
+		if (*out)[curr.Name] == nil {
+			(*out)[curr.Name] = newAuthInfo
+		} else {
+			return fmt.Errorf("error converting *[]NamedAuthInfo into *map[string]*api.AuthInfo: duplicate name \"%v\" in list: %v", curr.Name, *in)
+		}
+	}
+	return nil
+}
+
+func Convert_Map_string_To_Pointer_api_AuthInfo_To_Slice_v1_NamedAuthInfo(in *map[string]*api.AuthInfo, out *[]NamedAuthInfo, s conversion.Scope) error {
+	allKeys := make([]string, 0, len(*in))
+	for key := range *in {
+		allKeys = append(allKeys, key)
+	}
+	sort.Strings(allKeys)
+
+	for _, key := range allKeys {
+		newAuthInfo := (*in)[key]
+		oldAuthInfo := AuthInfo{}
+		if err := Convert_api_AuthInfo_To_v1_AuthInfo(newAuthInfo, &oldAuthInfo, s); err != nil {
+			return err
+		}
+		namedAuthInfo := NamedAuthInfo{key, oldAuthInfo}
+		*out = append(*out, namedAuthInfo)
+	}
+	return nil
+}
+
+func Convert_Slice_v1_NamedContext_To_Map_string_To_Pointer_api_Context(in *[]NamedContext, out *map[string]*api.Context, s conversion.Scope) error {
+	for _, curr := range *in {
+		newContext := api.NewContext()
+		if err := Convert_v1_Context_To_api_Context(&curr.Context, newContext, s); err != nil {
+			return err
+		}
+		if *out == nil {
+			*out = make(map[string]*api.Context)
+		}
+		if (*out)[curr.Name] == nil {
+			(*out)[curr.Name] = newContext
+		} else {
+			return fmt.Errorf("error converting *[]NamedContext into *map[string]*api.Context: duplicate name \"%v\" in list: %v", curr.Name, *in)
+		}
+	}
+	return nil
+}
+
+func Convert_Map_string_To_Pointer_api_Context_To_Slice_v1_NamedContext(in *map[string]*api.Context, out *[]NamedContext, s conversion.Scope) error {
+	allKeys := make([]string, 0, len(*in))
+	for key := range *in {
+		allKeys = append(allKeys, key)
+	}
+	sort.Strings(allKeys)
+
+	for _, key := range allKeys {
+		newContext := (*in)[key]
+		oldContext := Context{}
+		if err := Convert_api_Context_To_v1_Context(newContext, &oldContext, s); err != nil {
+			return err
+		}
+		namedContext := NamedContext{key, oldContext}
+		*out = append(*out, namedContext)
+	}
+	return nil
+}
+
+func Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(in *[]NamedExtension, out *map[string]runtime.Object, s conversion.Scope) error {
+	for _, curr := range *in {
+		var newExtension runtime.Object
+		if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&curr.Extension, &newExtension, s); err != nil {
+			return err
+		}
+		if *out == nil {
+			*out = make(map[string]runtime.Object)
+		}
+		if (*out)[curr.Name] == nil {
+			(*out)[curr.Name] = newExtension
+		} else {
+			return fmt.Errorf("error converting *[]NamedExtension into *map[string]runtime.Object: duplicate name \"%v\" in list: %v", curr.Name, *in)
+		}
+	}
+	return nil
+}
+
+func Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(in *map[string]runtime.Object, out *[]NamedExtension, s conversion.Scope) error {
+	allKeys := make([]string, 0, len(*in))
+	for key := range *in {
+		allKeys = append(allKeys, key)
+	}
+	sort.Strings(allKeys)
+
+	for _, key := range allKeys {
+		newExtension := (*in)[key]
+		oldExtension := runtime.RawExtension{}
+		if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&newExtension, &oldExtension, s); err != nil {
+			return err
+		}
+		namedExtension := NamedExtension{key, oldExtension}
+		*out = append(*out, namedExtension)
+	}
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/defaults.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/defaults.go
new file mode 100644
index 0000000000..bf513dc7c7
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/defaults.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+func addDefaultingFuncs(scheme *runtime.Scheme) error {
+	return RegisterDefaults(scheme)
+}
+
+func SetDefaults_ExecConfig(exec *ExecConfig) {
+	if len(exec.InteractiveMode) == 0 {
+		switch exec.APIVersion {
+		case "client.authentication.k8s.io/v1beta1", "client.authentication.k8s.io/v1alpha1":
+			// default to IfAvailableExecInteractiveMode for backwards compatibility
+			exec.InteractiveMode = IfAvailableExecInteractiveMode
+		default:
+			// require other versions to explicitly declare whether they want stdin or not
+		}
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go
new file mode 100644
index 0000000000..9e483e9d75
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:conversion-gen=k8s.io/client-go/tools/clientcmd/api
+// +k8s:deepcopy-gen=package
+// +k8s:defaulter-gen=Kind
+
+package v1 // import "k8s.io/client-go/tools/clientcmd/api/v1"
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go
new file mode 100644
index 0000000000..4a4d4a55fb
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+// TODO this should be in the "kubeconfig" group
+var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: "v1"}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      runtime.SchemeBuilder
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+	// We only register manually written functions here. The registration of the
+	// generated functions takes place in the generated files. The separation
+	// makes the code compile even when the generated files are missing.
+	localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
+}
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Config{},
+	)
+	return nil
+}
+
+func (obj *Config) GetObjectKind() schema.ObjectKind { return obj }
+func (obj *Config) SetGroupVersionKind(gvk schema.GroupVersionKind) {
+	obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
+}
+func (obj *Config) GroupVersionKind() schema.GroupVersionKind {
+	return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go
new file mode 100644
index 0000000000..5018a72b1c
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go
@@ -0,0 +1,271 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// Where possible, json tags match the cli argument names.
+// Top level config objects and all values required for proper functioning are not "omitempty".  Any truly optional piece of config is allowed to be omitted.
+
+// Config holds the information needed to build connect to remote kubernetes clusters as a given user
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type Config struct {
+	// Legacy field from pkg/api/types.go TypeMeta.
+	// TODO(jlowdermilk): remove this after eliminating downstream dependencies.
+	// +k8s:conversion-gen=false
+	// +optional
+	Kind string `json:"kind,omitempty"`
+	// Legacy field from pkg/api/types.go TypeMeta.
+	// TODO(jlowdermilk): remove this after eliminating downstream dependencies.
+	// +k8s:conversion-gen=false
+	// +optional
+	APIVersion string `json:"apiVersion,omitempty"`
+	// Preferences holds general information to be use for cli interactions
+	Preferences Preferences `json:"preferences"`
+	// Clusters is a map of referencable names to cluster configs
+	Clusters []NamedCluster `json:"clusters"`
+	// AuthInfos is a map of referencable names to user configs
+	AuthInfos []NamedAuthInfo `json:"users"`
+	// Contexts is a map of referencable names to context configs
+	Contexts []NamedContext `json:"contexts"`
+	// CurrentContext is the name of the context that you would like to use by default
+	CurrentContext string `json:"current-context"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions []NamedExtension `json:"extensions,omitempty"`
+}
+
+type Preferences struct {
+	// +optional
+	Colors bool `json:"colors,omitempty"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions []NamedExtension `json:"extensions,omitempty"`
+}
+
+// Cluster contains information about how to communicate with a kubernetes cluster
+type Cluster struct {
+	// Server is the address of the kubernetes cluster (https://hostname:port).
+	Server string `json:"server"`
+	// TLSServerName is used to check server certificate. If TLSServerName is empty, the hostname used to contact the server is used.
+	// +optional
+	TLSServerName string `json:"tls-server-name,omitempty"`
+	// InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure.
+	// +optional
+	InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"`
+	// CertificateAuthority is the path to a cert file for the certificate authority.
+	// +optional
+	CertificateAuthority string `json:"certificate-authority,omitempty"`
+	// CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority
+	// +optional
+	CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
+	// ProxyURL is the URL to the proxy to be used for all requests made by this
+	// client. URLs with "http", "https", and "socks5" schemes are supported.  If
+	// this configuration is not provided or the empty string, the client
+	// attempts to construct a proxy configuration from http_proxy and
+	// https_proxy environment variables. If these environment variables are not
+	// set, the client does not attempt to proxy requests.
+	//
+	// socks5 proxying does not currently support spdy streaming endpoints (exec,
+	// attach, port forward).
+	// +optional
+	ProxyURL string `json:"proxy-url,omitempty"`
+	// DisableCompression allows client to opt-out of response compression for all requests to the server. This is useful
+	// to speed up requests (specifically lists) when client-server network bandwidth is ample, by saving time on
+	// compression (server-side) and decompression (client-side): https://github.com/kubernetes/kubernetes/issues/112296.
+	// +optional
+	DisableCompression bool `json:"disable-compression,omitempty"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions []NamedExtension `json:"extensions,omitempty"`
+}
+
+// AuthInfo contains information that describes identity information.  This is use to tell the kubernetes cluster who you are.
+type AuthInfo struct {
+	// ClientCertificate is the path to a client cert file for TLS.
+	// +optional
+	ClientCertificate string `json:"client-certificate,omitempty"`
+	// ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate
+	// +optional
+	ClientCertificateData []byte `json:"client-certificate-data,omitempty"`
+	// ClientKey is the path to a client key file for TLS.
+	// +optional
+	ClientKey string `json:"client-key,omitempty"`
+	// ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey
+	// +optional
+	ClientKeyData []byte `json:"client-key-data,omitempty" datapolicy:"security-key"`
+	// Token is the bearer token for authentication to the kubernetes cluster.
+	// +optional
+	Token string `json:"token,omitempty" datapolicy:"token"`
+	// TokenFile is a pointer to a file that contains a bearer token (as described above).  If both Token and TokenFile are present, Token takes precedence.
+	// +optional
+	TokenFile string `json:"tokenFile,omitempty"`
+	// Impersonate is the username to impersonate.  The name matches the flag.
+	// +optional
+	Impersonate string `json:"as,omitempty"`
+	// ImpersonateUID is the uid to impersonate.
+	// +optional
+	ImpersonateUID string `json:"as-uid,omitempty"`
+	// ImpersonateGroups is the groups to impersonate.
+	// +optional
+	ImpersonateGroups []string `json:"as-groups,omitempty"`
+	// ImpersonateUserExtra contains additional information for impersonated user.
+	// +optional
+	ImpersonateUserExtra map[string][]string `json:"as-user-extra,omitempty"`
+	// Username is the username for basic authentication to the kubernetes cluster.
+	// +optional
+	Username string `json:"username,omitempty"`
+	// Password is the password for basic authentication to the kubernetes cluster.
+	// +optional
+	Password string `json:"password,omitempty" datapolicy:"password"`
+	// AuthProvider specifies a custom authentication plugin for the kubernetes cluster.
+	// +optional
+	AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"`
+	// Exec specifies a custom exec-based authentication plugin for the kubernetes cluster.
+	// +optional
+	Exec *ExecConfig `json:"exec,omitempty"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions []NamedExtension `json:"extensions,omitempty"`
+}
+
+// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with)
+type Context struct {
+	// Cluster is the name of the cluster for this context
+	Cluster string `json:"cluster"`
+	// AuthInfo is the name of the authInfo for this context
+	AuthInfo string `json:"user"`
+	// Namespace is the default namespace to use on unspecified requests
+	// +optional
+	Namespace string `json:"namespace,omitempty"`
+	// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
+	// +optional
+	Extensions []NamedExtension `json:"extensions,omitempty"`
+}
+
+// NamedCluster relates nicknames to cluster information
+type NamedCluster struct {
+	// Name is the nickname for this Cluster
+	Name string `json:"name"`
+	// Cluster holds the cluster information
+	Cluster Cluster `json:"cluster"`
+}
+
+// NamedContext relates nicknames to context information
+type NamedContext struct {
+	// Name is the nickname for this Context
+	Name string `json:"name"`
+	// Context holds the context information
+	Context Context `json:"context"`
+}
+
+// NamedAuthInfo relates nicknames to auth information
+type NamedAuthInfo struct {
+	// Name is the nickname for this AuthInfo
+	Name string `json:"name"`
+	// AuthInfo holds the auth information
+	AuthInfo AuthInfo `json:"user"`
+}
+
+// NamedExtension relates nicknames to extension information
+type NamedExtension struct {
+	// Name is the nickname for this Extension
+	Name string `json:"name"`
+	// Extension holds the extension information
+	Extension runtime.RawExtension `json:"extension"`
+}
+
+// AuthProviderConfig holds the configuration for a specified auth provider.
+type AuthProviderConfig struct {
+	Name   string            `json:"name"`
+	Config map[string]string `json:"config"`
+}
+
+// ExecConfig specifies a command to provide client credentials. The command is exec'd
+// and outputs structured stdout holding credentials.
+//
+// See the client.authentication.k8s.io API group for specifications of the exact input
+// and output format
+type ExecConfig struct {
+	// Command to execute.
+	Command string `json:"command"`
+	// Arguments to pass to the command when executing it.
+	// +optional
+	Args []string `json:"args"`
+	// Env defines additional environment variables to expose to the process. These
+	// are unioned with the host's environment, as well as variables client-go uses
+	// to pass argument to the plugin.
+	// +optional
+	Env []ExecEnvVar `json:"env"`
+
+	// Preferred input version of the ExecInfo. The returned ExecCredentials MUST use
+	// the same encoding version as the input.
+	APIVersion string `json:"apiVersion,omitempty"`
+
+	// This text is shown to the user when the executable doesn't seem to be
+	// present. For example, `brew install foo-cli` might be a good InstallHint for
+	// foo-cli on Mac OS systems.
+	InstallHint string `json:"installHint,omitempty"`
+
+	// ProvideClusterInfo determines whether or not to provide cluster information,
+	// which could potentially contain very large CA data, to this exec plugin as a
+	// part of the KUBERNETES_EXEC_INFO environment variable. By default, it is set
+	// to false. Package k8s.io/client-go/tools/auth/exec provides helper methods for
+	// reading this environment variable.
+	ProvideClusterInfo bool `json:"provideClusterInfo"`
+
+	// InteractiveMode determines this plugin's relationship with standard input. Valid
+	// values are "Never" (this exec plugin never uses standard input), "IfAvailable" (this
+	// exec plugin wants to use standard input if it is available), or "Always" (this exec
+	// plugin requires standard input to function). See ExecInteractiveMode values for more
+	// details.
+	//
+	// If APIVersion is client.authentication.k8s.io/v1alpha1 or
+	// client.authentication.k8s.io/v1beta1, then this field is optional and defaults
+	// to "IfAvailable" when unset. Otherwise, this field is required.
+	//+optional
+	InteractiveMode ExecInteractiveMode `json:"interactiveMode,omitempty"`
+}
+
+// ExecEnvVar is used for setting environment variables when executing an exec-based
+// credential plugin.
+type ExecEnvVar struct {
+	Name  string `json:"name"`
+	Value string `json:"value"`
+}
+
+// ExecInteractiveMode is a string that describes an exec plugin's relationship with standard input.
+type ExecInteractiveMode string
+
+const (
+	// NeverExecInteractiveMode declares that this exec plugin never needs to use standard
+	// input, and therefore the exec plugin will be run regardless of whether standard input is
+	// available for user input.
+	NeverExecInteractiveMode ExecInteractiveMode = "Never"
+	// IfAvailableExecInteractiveMode declares that this exec plugin would like to use standard input
+	// if it is available, but can still operate if standard input is not available. Therefore, the
+	// exec plugin will be run regardless of whether stdin is available for user input. If standard
+	// input is available for user input, then it will be provided to this exec plugin.
+	IfAvailableExecInteractiveMode ExecInteractiveMode = "IfAvailable"
+	// AlwaysExecInteractiveMode declares that this exec plugin requires standard input in order to
+	// run, and therefore the exec plugin will only be run if standard input is available for user
+	// input. If standard input is not available for user input, then the exec plugin will not be run
+	// and an error will be returned by the exec plugin runner.
+	AlwaysExecInteractiveMode ExecInteractiveMode = "Always"
+)
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go
new file mode 100644
index 0000000000..bdedc16669
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go
@@ -0,0 +1,458 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	unsafe "unsafe"
+
+	conversion "k8s.io/apimachinery/pkg/conversion"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	api "k8s.io/client-go/tools/clientcmd/api"
+)
+
+func init() {
+	localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+	if err := s.AddGeneratedConversionFunc((*AuthInfo)(nil), (*api.AuthInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_AuthInfo_To_api_AuthInfo(a.(*AuthInfo), b.(*api.AuthInfo), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*api.AuthInfo)(nil), (*AuthInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_api_AuthInfo_To_v1_AuthInfo(a.(*api.AuthInfo), b.(*AuthInfo), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*AuthProviderConfig)(nil), (*api.AuthProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_AuthProviderConfig_To_api_AuthProviderConfig(a.(*AuthProviderConfig), b.(*api.AuthProviderConfig), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*api.AuthProviderConfig)(nil), (*AuthProviderConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_api_AuthProviderConfig_To_v1_AuthProviderConfig(a.(*api.AuthProviderConfig), b.(*AuthProviderConfig), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*Cluster)(nil), (*api.Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_Cluster_To_api_Cluster(a.(*Cluster), b.(*api.Cluster), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*api.Cluster)(nil), (*Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_api_Cluster_To_v1_Cluster(a.(*api.Cluster), b.(*Cluster), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*Config)(nil), (*api.Config)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_Config_To_api_Config(a.(*Config), b.(*api.Config), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*api.Config)(nil), (*Config)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_api_Config_To_v1_Config(a.(*api.Config), b.(*Config), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*Context)(nil), (*api.Context)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_Context_To_api_Context(a.(*Context), b.(*api.Context), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*api.Context)(nil), (*Context)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_api_Context_To_v1_Context(a.(*api.Context), b.(*Context), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ExecConfig)(nil), (*api.ExecConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_ExecConfig_To_api_ExecConfig(a.(*ExecConfig), b.(*api.ExecConfig), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*api.ExecConfig)(nil), (*ExecConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_api_ExecConfig_To_v1_ExecConfig(a.(*api.ExecConfig), b.(*ExecConfig), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*ExecEnvVar)(nil), (*api.ExecEnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_ExecEnvVar_To_api_ExecEnvVar(a.(*ExecEnvVar), b.(*api.ExecEnvVar), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*api.ExecEnvVar)(nil), (*ExecEnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_api_ExecEnvVar_To_v1_ExecEnvVar(a.(*api.ExecEnvVar), b.(*ExecEnvVar), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*Preferences)(nil), (*api.Preferences)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_Preferences_To_api_Preferences(a.(*Preferences), b.(*api.Preferences), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*api.Preferences)(nil), (*Preferences)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_api_Preferences_To_v1_Preferences(a.(*api.Preferences), b.(*Preferences), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*map[string]*api.AuthInfo)(nil), (*[]NamedAuthInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Map_string_To_Pointer_api_AuthInfo_To_Slice_v1_NamedAuthInfo(a.(*map[string]*api.AuthInfo), b.(*[]NamedAuthInfo), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*map[string]*api.Cluster)(nil), (*[]NamedCluster)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Map_string_To_Pointer_api_Cluster_To_Slice_v1_NamedCluster(a.(*map[string]*api.Cluster), b.(*[]NamedCluster), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*map[string]*api.Context)(nil), (*[]NamedContext)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Map_string_To_Pointer_api_Context_To_Slice_v1_NamedContext(a.(*map[string]*api.Context), b.(*[]NamedContext), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*map[string]runtime.Object)(nil), (*[]NamedExtension)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(a.(*map[string]runtime.Object), b.(*[]NamedExtension), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*[]NamedAuthInfo)(nil), (*map[string]*api.AuthInfo)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Slice_v1_NamedAuthInfo_To_Map_string_To_Pointer_api_AuthInfo(a.(*[]NamedAuthInfo), b.(*map[string]*api.AuthInfo), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*[]NamedCluster)(nil), (*map[string]*api.Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Slice_v1_NamedCluster_To_Map_string_To_Pointer_api_Cluster(a.(*[]NamedCluster), b.(*map[string]*api.Cluster), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*[]NamedContext)(nil), (*map[string]*api.Context)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Slice_v1_NamedContext_To_Map_string_To_Pointer_api_Context(a.(*[]NamedContext), b.(*map[string]*api.Context), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*[]NamedExtension)(nil), (*map[string]runtime.Object)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(a.(*[]NamedExtension), b.(*map[string]runtime.Object), scope)
+	}); err != nil {
+		return err
+	}
+	return nil
+}
+
+func autoConvert_v1_AuthInfo_To_api_AuthInfo(in *AuthInfo, out *api.AuthInfo, s conversion.Scope) error {
+	out.ClientCertificate = in.ClientCertificate
+	out.ClientCertificateData = *(*[]byte)(unsafe.Pointer(&in.ClientCertificateData))
+	out.ClientKey = in.ClientKey
+	out.ClientKeyData = *(*[]byte)(unsafe.Pointer(&in.ClientKeyData))
+	out.Token = in.Token
+	out.TokenFile = in.TokenFile
+	out.Impersonate = in.Impersonate
+	out.ImpersonateUID = in.ImpersonateUID
+	out.ImpersonateGroups = *(*[]string)(unsafe.Pointer(&in.ImpersonateGroups))
+	out.ImpersonateUserExtra = *(*map[string][]string)(unsafe.Pointer(&in.ImpersonateUserExtra))
+	out.Username = in.Username
+	out.Password = in.Password
+	out.AuthProvider = (*api.AuthProviderConfig)(unsafe.Pointer(in.AuthProvider))
+	if in.Exec != nil {
+		in, out := &in.Exec, &out.Exec
+		*out = new(api.ExecConfig)
+		if err := Convert_v1_ExecConfig_To_api_ExecConfig(*in, *out, s); err != nil {
+			return err
+		}
+	} else {
+		out.Exec = nil
+	}
+	if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_v1_AuthInfo_To_api_AuthInfo is an autogenerated conversion function.
+func Convert_v1_AuthInfo_To_api_AuthInfo(in *AuthInfo, out *api.AuthInfo, s conversion.Scope) error {
+	return autoConvert_v1_AuthInfo_To_api_AuthInfo(in, out, s)
+}
+
+func autoConvert_api_AuthInfo_To_v1_AuthInfo(in *api.AuthInfo, out *AuthInfo, s conversion.Scope) error {
+	// INFO: in.LocationOfOrigin opted out of conversion generation
+	out.ClientCertificate = in.ClientCertificate
+	out.ClientCertificateData = *(*[]byte)(unsafe.Pointer(&in.ClientCertificateData))
+	out.ClientKey = in.ClientKey
+	out.ClientKeyData = *(*[]byte)(unsafe.Pointer(&in.ClientKeyData))
+	out.Token = in.Token
+	out.TokenFile = in.TokenFile
+	out.Impersonate = in.Impersonate
+	out.ImpersonateUID = in.ImpersonateUID
+	out.ImpersonateGroups = *(*[]string)(unsafe.Pointer(&in.ImpersonateGroups))
+	out.ImpersonateUserExtra = *(*map[string][]string)(unsafe.Pointer(&in.ImpersonateUserExtra))
+	out.Username = in.Username
+	out.Password = in.Password
+	out.AuthProvider = (*AuthProviderConfig)(unsafe.Pointer(in.AuthProvider))
+	if in.Exec != nil {
+		in, out := &in.Exec, &out.Exec
+		*out = new(ExecConfig)
+		if err := Convert_api_ExecConfig_To_v1_ExecConfig(*in, *out, s); err != nil {
+			return err
+		}
+	} else {
+		out.Exec = nil
+	}
+	if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_api_AuthInfo_To_v1_AuthInfo is an autogenerated conversion function.
+func Convert_api_AuthInfo_To_v1_AuthInfo(in *api.AuthInfo, out *AuthInfo, s conversion.Scope) error {
+	return autoConvert_api_AuthInfo_To_v1_AuthInfo(in, out, s)
+}
+
+func autoConvert_v1_AuthProviderConfig_To_api_AuthProviderConfig(in *AuthProviderConfig, out *api.AuthProviderConfig, s conversion.Scope) error {
+	out.Name = in.Name
+	out.Config = *(*map[string]string)(unsafe.Pointer(&in.Config))
+	return nil
+}
+
+// Convert_v1_AuthProviderConfig_To_api_AuthProviderConfig is an autogenerated conversion function.
+func Convert_v1_AuthProviderConfig_To_api_AuthProviderConfig(in *AuthProviderConfig, out *api.AuthProviderConfig, s conversion.Scope) error {
+	return autoConvert_v1_AuthProviderConfig_To_api_AuthProviderConfig(in, out, s)
+}
+
+func autoConvert_api_AuthProviderConfig_To_v1_AuthProviderConfig(in *api.AuthProviderConfig, out *AuthProviderConfig, s conversion.Scope) error {
+	out.Name = in.Name
+	out.Config = *(*map[string]string)(unsafe.Pointer(&in.Config))
+	return nil
+}
+
+// Convert_api_AuthProviderConfig_To_v1_AuthProviderConfig is an autogenerated conversion function.
+func Convert_api_AuthProviderConfig_To_v1_AuthProviderConfig(in *api.AuthProviderConfig, out *AuthProviderConfig, s conversion.Scope) error {
+	return autoConvert_api_AuthProviderConfig_To_v1_AuthProviderConfig(in, out, s)
+}
+
+func autoConvert_v1_Cluster_To_api_Cluster(in *Cluster, out *api.Cluster, s conversion.Scope) error {
+	out.Server = in.Server
+	out.TLSServerName = in.TLSServerName
+	out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify
+	out.CertificateAuthority = in.CertificateAuthority
+	out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData))
+	out.ProxyURL = in.ProxyURL
+	out.DisableCompression = in.DisableCompression
+	if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_v1_Cluster_To_api_Cluster is an autogenerated conversion function.
+func Convert_v1_Cluster_To_api_Cluster(in *Cluster, out *api.Cluster, s conversion.Scope) error {
+	return autoConvert_v1_Cluster_To_api_Cluster(in, out, s)
+}
+
+func autoConvert_api_Cluster_To_v1_Cluster(in *api.Cluster, out *Cluster, s conversion.Scope) error {
+	// INFO: in.LocationOfOrigin opted out of conversion generation
+	out.Server = in.Server
+	out.TLSServerName = in.TLSServerName
+	out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify
+	out.CertificateAuthority = in.CertificateAuthority
+	out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData))
+	out.ProxyURL = in.ProxyURL
+	out.DisableCompression = in.DisableCompression
+	if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_api_Cluster_To_v1_Cluster is an autogenerated conversion function.
+func Convert_api_Cluster_To_v1_Cluster(in *api.Cluster, out *Cluster, s conversion.Scope) error {
+	return autoConvert_api_Cluster_To_v1_Cluster(in, out, s)
+}
+
+func autoConvert_v1_Config_To_api_Config(in *Config, out *api.Config, s conversion.Scope) error {
+	// INFO: in.Kind opted out of conversion generation
+	// INFO: in.APIVersion opted out of conversion generation
+	if err := Convert_v1_Preferences_To_api_Preferences(&in.Preferences, &out.Preferences, s); err != nil {
+		return err
+	}
+	if err := Convert_Slice_v1_NamedCluster_To_Map_string_To_Pointer_api_Cluster(&in.Clusters, &out.Clusters, s); err != nil {
+		return err
+	}
+	if err := Convert_Slice_v1_NamedAuthInfo_To_Map_string_To_Pointer_api_AuthInfo(&in.AuthInfos, &out.AuthInfos, s); err != nil {
+		return err
+	}
+	if err := Convert_Slice_v1_NamedContext_To_Map_string_To_Pointer_api_Context(&in.Contexts, &out.Contexts, s); err != nil {
+		return err
+	}
+	out.CurrentContext = in.CurrentContext
+	if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_v1_Config_To_api_Config is an autogenerated conversion function.
+func Convert_v1_Config_To_api_Config(in *Config, out *api.Config, s conversion.Scope) error {
+	return autoConvert_v1_Config_To_api_Config(in, out, s)
+}
+
+func autoConvert_api_Config_To_v1_Config(in *api.Config, out *Config, s conversion.Scope) error {
+	// INFO: in.Kind opted out of conversion generation
+	// INFO: in.APIVersion opted out of conversion generation
+	if err := Convert_api_Preferences_To_v1_Preferences(&in.Preferences, &out.Preferences, s); err != nil {
+		return err
+	}
+	if err := Convert_Map_string_To_Pointer_api_Cluster_To_Slice_v1_NamedCluster(&in.Clusters, &out.Clusters, s); err != nil {
+		return err
+	}
+	if err := Convert_Map_string_To_Pointer_api_AuthInfo_To_Slice_v1_NamedAuthInfo(&in.AuthInfos, &out.AuthInfos, s); err != nil {
+		return err
+	}
+	if err := Convert_Map_string_To_Pointer_api_Context_To_Slice_v1_NamedContext(&in.Contexts, &out.Contexts, s); err != nil {
+		return err
+	}
+	out.CurrentContext = in.CurrentContext
+	if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_api_Config_To_v1_Config is an autogenerated conversion function.
+func Convert_api_Config_To_v1_Config(in *api.Config, out *Config, s conversion.Scope) error {
+	return autoConvert_api_Config_To_v1_Config(in, out, s)
+}
+
+func autoConvert_v1_Context_To_api_Context(in *Context, out *api.Context, s conversion.Scope) error {
+	out.Cluster = in.Cluster
+	out.AuthInfo = in.AuthInfo
+	out.Namespace = in.Namespace
+	if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_v1_Context_To_api_Context is an autogenerated conversion function.
+func Convert_v1_Context_To_api_Context(in *Context, out *api.Context, s conversion.Scope) error {
+	return autoConvert_v1_Context_To_api_Context(in, out, s)
+}
+
+func autoConvert_api_Context_To_v1_Context(in *api.Context, out *Context, s conversion.Scope) error {
+	// INFO: in.LocationOfOrigin opted out of conversion generation
+	out.Cluster = in.Cluster
+	out.AuthInfo = in.AuthInfo
+	out.Namespace = in.Namespace
+	if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_api_Context_To_v1_Context is an autogenerated conversion function.
+func Convert_api_Context_To_v1_Context(in *api.Context, out *Context, s conversion.Scope) error {
+	return autoConvert_api_Context_To_v1_Context(in, out, s)
+}
+
+func autoConvert_v1_ExecConfig_To_api_ExecConfig(in *ExecConfig, out *api.ExecConfig, s conversion.Scope) error {
+	out.Command = in.Command
+	out.Args = *(*[]string)(unsafe.Pointer(&in.Args))
+	out.Env = *(*[]api.ExecEnvVar)(unsafe.Pointer(&in.Env))
+	out.APIVersion = in.APIVersion
+	out.InstallHint = in.InstallHint
+	out.ProvideClusterInfo = in.ProvideClusterInfo
+	out.InteractiveMode = api.ExecInteractiveMode(in.InteractiveMode)
+	return nil
+}
+
+// Convert_v1_ExecConfig_To_api_ExecConfig is an autogenerated conversion function.
+func Convert_v1_ExecConfig_To_api_ExecConfig(in *ExecConfig, out *api.ExecConfig, s conversion.Scope) error {
+	return autoConvert_v1_ExecConfig_To_api_ExecConfig(in, out, s)
+}
+
+func autoConvert_api_ExecConfig_To_v1_ExecConfig(in *api.ExecConfig, out *ExecConfig, s conversion.Scope) error {
+	out.Command = in.Command
+	out.Args = *(*[]string)(unsafe.Pointer(&in.Args))
+	out.Env = *(*[]ExecEnvVar)(unsafe.Pointer(&in.Env))
+	out.APIVersion = in.APIVersion
+	out.InstallHint = in.InstallHint
+	out.ProvideClusterInfo = in.ProvideClusterInfo
+	// INFO: in.Config opted out of conversion generation
+	out.InteractiveMode = ExecInteractiveMode(in.InteractiveMode)
+	// INFO: in.StdinUnavailable opted out of conversion generation
+	// INFO: in.StdinUnavailableMessage opted out of conversion generation
+	return nil
+}
+
+// Convert_api_ExecConfig_To_v1_ExecConfig is an autogenerated conversion function.
+func Convert_api_ExecConfig_To_v1_ExecConfig(in *api.ExecConfig, out *ExecConfig, s conversion.Scope) error {
+	return autoConvert_api_ExecConfig_To_v1_ExecConfig(in, out, s)
+}
+
+func autoConvert_v1_ExecEnvVar_To_api_ExecEnvVar(in *ExecEnvVar, out *api.ExecEnvVar, s conversion.Scope) error {
+	out.Name = in.Name
+	out.Value = in.Value
+	return nil
+}
+
+// Convert_v1_ExecEnvVar_To_api_ExecEnvVar is an autogenerated conversion function.
+func Convert_v1_ExecEnvVar_To_api_ExecEnvVar(in *ExecEnvVar, out *api.ExecEnvVar, s conversion.Scope) error {
+	return autoConvert_v1_ExecEnvVar_To_api_ExecEnvVar(in, out, s)
+}
+
+func autoConvert_api_ExecEnvVar_To_v1_ExecEnvVar(in *api.ExecEnvVar, out *ExecEnvVar, s conversion.Scope) error {
+	out.Name = in.Name
+	out.Value = in.Value
+	return nil
+}
+
+// Convert_api_ExecEnvVar_To_v1_ExecEnvVar is an autogenerated conversion function.
+func Convert_api_ExecEnvVar_To_v1_ExecEnvVar(in *api.ExecEnvVar, out *ExecEnvVar, s conversion.Scope) error {
+	return autoConvert_api_ExecEnvVar_To_v1_ExecEnvVar(in, out, s)
+}
+
+func autoConvert_v1_Preferences_To_api_Preferences(in *Preferences, out *api.Preferences, s conversion.Scope) error {
+	out.Colors = in.Colors
+	if err := Convert_Slice_v1_NamedExtension_To_Map_string_To_runtime_Object(&in.Extensions, &out.Extensions, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_v1_Preferences_To_api_Preferences is an autogenerated conversion function.
+func Convert_v1_Preferences_To_api_Preferences(in *Preferences, out *api.Preferences, s conversion.Scope) error {
+	return autoConvert_v1_Preferences_To_api_Preferences(in, out, s)
+}
+
+func autoConvert_api_Preferences_To_v1_Preferences(in *api.Preferences, out *Preferences, s conversion.Scope) error {
+	out.Colors = in.Colors
+	if err := Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(&in.Extensions, &out.Extensions, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_api_Preferences_To_v1_Preferences is an autogenerated conversion function.
+func Convert_api_Preferences_To_v1_Preferences(in *api.Preferences, out *Preferences, s conversion.Scope) error {
+	return autoConvert_api_Preferences_To_v1_Preferences(in, out, s)
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..78492598bc
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go
@@ -0,0 +1,349 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthInfo) DeepCopyInto(out *AuthInfo) {
+	*out = *in
+	if in.ClientCertificateData != nil {
+		in, out := &in.ClientCertificateData, &out.ClientCertificateData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.ClientKeyData != nil {
+		in, out := &in.ClientKeyData, &out.ClientKeyData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.ImpersonateGroups != nil {
+		in, out := &in.ImpersonateGroups, &out.ImpersonateGroups
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ImpersonateUserExtra != nil {
+		in, out := &in.ImpersonateUserExtra, &out.ImpersonateUserExtra
+		*out = make(map[string][]string, len(*in))
+		for key, val := range *in {
+			var outVal []string
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				in, out := &val, &outVal
+				*out = make([]string, len(*in))
+				copy(*out, *in)
+			}
+			(*out)[key] = outVal
+		}
+	}
+	if in.AuthProvider != nil {
+		in, out := &in.AuthProvider, &out.AuthProvider
+		*out = new(AuthProviderConfig)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Exec != nil {
+		in, out := &in.Exec, &out.Exec
+		*out = new(ExecConfig)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make([]NamedExtension, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthInfo.
+func (in *AuthInfo) DeepCopy() *AuthInfo {
+	if in == nil {
+		return nil
+	}
+	out := new(AuthInfo)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthProviderConfig) DeepCopyInto(out *AuthProviderConfig) {
+	*out = *in
+	if in.Config != nil {
+		in, out := &in.Config, &out.Config
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthProviderConfig.
+func (in *AuthProviderConfig) DeepCopy() *AuthProviderConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(AuthProviderConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Cluster) DeepCopyInto(out *Cluster) {
+	*out = *in
+	if in.CertificateAuthorityData != nil {
+		in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make([]NamedExtension, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster.
+func (in *Cluster) DeepCopy() *Cluster {
+	if in == nil {
+		return nil
+	}
+	out := new(Cluster)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Config) DeepCopyInto(out *Config) {
+	*out = *in
+	in.Preferences.DeepCopyInto(&out.Preferences)
+	if in.Clusters != nil {
+		in, out := &in.Clusters, &out.Clusters
+		*out = make([]NamedCluster, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.AuthInfos != nil {
+		in, out := &in.AuthInfos, &out.AuthInfos
+		*out = make([]NamedAuthInfo, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Contexts != nil {
+		in, out := &in.Contexts, &out.Contexts
+		*out = make([]NamedContext, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make([]NamedExtension, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
+func (in *Config) DeepCopy() *Config {
+	if in == nil {
+		return nil
+	}
+	out := new(Config)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Config) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Context) DeepCopyInto(out *Context) {
+	*out = *in
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make([]NamedExtension, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Context.
+func (in *Context) DeepCopy() *Context {
+	if in == nil {
+		return nil
+	}
+	out := new(Context)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecConfig) DeepCopyInto(out *ExecConfig) {
+	*out = *in
+	if in.Args != nil {
+		in, out := &in.Args, &out.Args
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Env != nil {
+		in, out := &in.Env, &out.Env
+		*out = make([]ExecEnvVar, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecConfig.
+func (in *ExecConfig) DeepCopy() *ExecConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecEnvVar) DeepCopyInto(out *ExecEnvVar) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecEnvVar.
+func (in *ExecEnvVar) DeepCopy() *ExecEnvVar {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecEnvVar)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamedAuthInfo) DeepCopyInto(out *NamedAuthInfo) {
+	*out = *in
+	in.AuthInfo.DeepCopyInto(&out.AuthInfo)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedAuthInfo.
+func (in *NamedAuthInfo) DeepCopy() *NamedAuthInfo {
+	if in == nil {
+		return nil
+	}
+	out := new(NamedAuthInfo)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamedCluster) DeepCopyInto(out *NamedCluster) {
+	*out = *in
+	in.Cluster.DeepCopyInto(&out.Cluster)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedCluster.
+func (in *NamedCluster) DeepCopy() *NamedCluster {
+	if in == nil {
+		return nil
+	}
+	out := new(NamedCluster)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamedContext) DeepCopyInto(out *NamedContext) {
+	*out = *in
+	in.Context.DeepCopyInto(&out.Context)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedContext.
+func (in *NamedContext) DeepCopy() *NamedContext {
+	if in == nil {
+		return nil
+	}
+	out := new(NamedContext)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NamedExtension) DeepCopyInto(out *NamedExtension) {
+	*out = *in
+	in.Extension.DeepCopyInto(&out.Extension)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedExtension.
+func (in *NamedExtension) DeepCopy() *NamedExtension {
+	if in == nil {
+		return nil
+	}
+	out := new(NamedExtension)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Preferences) DeepCopyInto(out *Preferences) {
+	*out = *in
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make([]NamedExtension, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preferences.
+func (in *Preferences) DeepCopy() *Preferences {
+	if in == nil {
+		return nil
+	}
+	out := new(Preferences)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.defaults.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.defaults.go
new file mode 100644
index 0000000000..6a57decf62
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.defaults.go
@@ -0,0 +1,43 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+	scheme.AddTypeDefaultingFunc(&Config{}, func(obj interface{}) { SetObjectDefaults_Config(obj.(*Config)) })
+	return nil
+}
+
+func SetObjectDefaults_Config(in *Config) {
+	for i := range in.AuthInfos {
+		a := &in.AuthInfos[i]
+		if a.AuthInfo.Exec != nil {
+			SetDefaults_ExecConfig(a.AuthInfo.Exec)
+		}
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..86e4ddef1b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go
@@ -0,0 +1,328 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package api
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthInfo) DeepCopyInto(out *AuthInfo) {
+	*out = *in
+	if in.ClientCertificateData != nil {
+		in, out := &in.ClientCertificateData, &out.ClientCertificateData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.ClientKeyData != nil {
+		in, out := &in.ClientKeyData, &out.ClientKeyData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.ImpersonateGroups != nil {
+		in, out := &in.ImpersonateGroups, &out.ImpersonateGroups
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.ImpersonateUserExtra != nil {
+		in, out := &in.ImpersonateUserExtra, &out.ImpersonateUserExtra
+		*out = make(map[string][]string, len(*in))
+		for key, val := range *in {
+			var outVal []string
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				in, out := &val, &outVal
+				*out = make([]string, len(*in))
+				copy(*out, *in)
+			}
+			(*out)[key] = outVal
+		}
+	}
+	if in.AuthProvider != nil {
+		in, out := &in.AuthProvider, &out.AuthProvider
+		*out = new(AuthProviderConfig)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Exec != nil {
+		in, out := &in.Exec, &out.Exec
+		*out = new(ExecConfig)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make(map[string]runtime.Object, len(*in))
+		for key, val := range *in {
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				(*out)[key] = val.DeepCopyObject()
+			}
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthInfo.
+func (in *AuthInfo) DeepCopy() *AuthInfo {
+	if in == nil {
+		return nil
+	}
+	out := new(AuthInfo)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AuthProviderConfig) DeepCopyInto(out *AuthProviderConfig) {
+	*out = *in
+	if in.Config != nil {
+		in, out := &in.Config, &out.Config
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthProviderConfig.
+func (in *AuthProviderConfig) DeepCopy() *AuthProviderConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(AuthProviderConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Cluster) DeepCopyInto(out *Cluster) {
+	*out = *in
+	if in.CertificateAuthorityData != nil {
+		in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make(map[string]runtime.Object, len(*in))
+		for key, val := range *in {
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				(*out)[key] = val.DeepCopyObject()
+			}
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster.
+func (in *Cluster) DeepCopy() *Cluster {
+	if in == nil {
+		return nil
+	}
+	out := new(Cluster)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Config) DeepCopyInto(out *Config) {
+	*out = *in
+	in.Preferences.DeepCopyInto(&out.Preferences)
+	if in.Clusters != nil {
+		in, out := &in.Clusters, &out.Clusters
+		*out = make(map[string]*Cluster, len(*in))
+		for key, val := range *in {
+			var outVal *Cluster
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				in, out := &val, &outVal
+				*out = new(Cluster)
+				(*in).DeepCopyInto(*out)
+			}
+			(*out)[key] = outVal
+		}
+	}
+	if in.AuthInfos != nil {
+		in, out := &in.AuthInfos, &out.AuthInfos
+		*out = make(map[string]*AuthInfo, len(*in))
+		for key, val := range *in {
+			var outVal *AuthInfo
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				in, out := &val, &outVal
+				*out = new(AuthInfo)
+				(*in).DeepCopyInto(*out)
+			}
+			(*out)[key] = outVal
+		}
+	}
+	if in.Contexts != nil {
+		in, out := &in.Contexts, &out.Contexts
+		*out = make(map[string]*Context, len(*in))
+		for key, val := range *in {
+			var outVal *Context
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				in, out := &val, &outVal
+				*out = new(Context)
+				(*in).DeepCopyInto(*out)
+			}
+			(*out)[key] = outVal
+		}
+	}
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make(map[string]runtime.Object, len(*in))
+		for key, val := range *in {
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				(*out)[key] = val.DeepCopyObject()
+			}
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
+func (in *Config) DeepCopy() *Config {
+	if in == nil {
+		return nil
+	}
+	out := new(Config)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Config) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Context) DeepCopyInto(out *Context) {
+	*out = *in
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make(map[string]runtime.Object, len(*in))
+		for key, val := range *in {
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				(*out)[key] = val.DeepCopyObject()
+			}
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Context.
+func (in *Context) DeepCopy() *Context {
+	if in == nil {
+		return nil
+	}
+	out := new(Context)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecConfig) DeepCopyInto(out *ExecConfig) {
+	*out = *in
+	if in.Args != nil {
+		in, out := &in.Args, &out.Args
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+	if in.Env != nil {
+		in, out := &in.Env, &out.Env
+		*out = make([]ExecEnvVar, len(*in))
+		copy(*out, *in)
+	}
+	if in.Config != nil {
+		out.Config = in.Config.DeepCopyObject()
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecConfig.
+func (in *ExecConfig) DeepCopy() *ExecConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecEnvVar) DeepCopyInto(out *ExecEnvVar) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecEnvVar.
+func (in *ExecEnvVar) DeepCopy() *ExecEnvVar {
+	if in == nil {
+		return nil
+	}
+	out := new(ExecEnvVar)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Preferences) DeepCopyInto(out *Preferences) {
+	*out = *in
+	if in.Extensions != nil {
+		in, out := &in.Extensions, &out.Extensions
+		*out = make(map[string]runtime.Object, len(*in))
+		for key, val := range *in {
+			if val == nil {
+				(*out)[key] = nil
+			} else {
+				(*out)[key] = val.DeepCopyObject()
+			}
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preferences.
+func (in *Preferences) DeepCopy() *Preferences {
+	if in == nil {
+		return nil
+	}
+	out := new(Preferences)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go
new file mode 100644
index 0000000000..ce951e88d5
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go
@@ -0,0 +1,110 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+
+	"golang.org/x/term"
+
+	clientauth "k8s.io/client-go/tools/auth"
+)
+
+// AuthLoaders are used to build clientauth.Info objects.
+type AuthLoader interface {
+	// LoadAuth takes a path to a config file and can then do anything it needs in order to return a valid clientauth.Info
+	LoadAuth(path string) (*clientauth.Info, error)
+}
+
+// default implementation of an AuthLoader
+type defaultAuthLoader struct{}
+
+// LoadAuth for defaultAuthLoader simply delegates to clientauth.LoadFromFile
+func (*defaultAuthLoader) LoadAuth(path string) (*clientauth.Info, error) {
+	return clientauth.LoadFromFile(path)
+}
+
+type PromptingAuthLoader struct {
+	reader io.Reader
+}
+
+// LoadAuth parses an AuthInfo object from a file path. It prompts user and creates file if it doesn't exist.
+func (a *PromptingAuthLoader) LoadAuth(path string) (*clientauth.Info, error) {
+	// Prompt for user/pass and write a file if none exists.
+	if _, err := os.Stat(path); os.IsNotExist(err) {
+		authPtr, err := a.Prompt()
+		if err != nil {
+			return nil, err
+		}
+		auth := *authPtr
+		data, err := json.Marshal(auth)
+		if err != nil {
+			return &auth, err
+		}
+		err = os.WriteFile(path, data, 0600)
+		return &auth, err
+	}
+	authPtr, err := clientauth.LoadFromFile(path)
+	if err != nil {
+		return nil, err
+	}
+	return authPtr, nil
+}
+
+// Prompt pulls the user and password from a reader
+func (a *PromptingAuthLoader) Prompt() (*clientauth.Info, error) {
+	var err error
+	auth := &clientauth.Info{}
+	auth.User, err = promptForString("Username", a.reader, true)
+	if err != nil {
+		return nil, err
+	}
+	auth.Password, err = promptForString("Password", nil, false)
+	if err != nil {
+		return nil, err
+	}
+	return auth, nil
+}
+
+func promptForString(field string, r io.Reader, show bool) (result string, err error) {
+	fmt.Printf("Please enter %s: ", field)
+	if show {
+		_, err = fmt.Fscan(r, &result)
+	} else {
+		var data []byte
+		if term.IsTerminal(int(os.Stdin.Fd())) {
+			data, err = term.ReadPassword(int(os.Stdin.Fd()))
+			result = string(data)
+		} else {
+			return "", fmt.Errorf("error reading input for %s", field)
+		}
+	}
+	return result, err
+}
+
+// NewPromptingAuthLoader is an AuthLoader that parses an AuthInfo object from a file path. It prompts user and creates file if it doesn't exist.
+func NewPromptingAuthLoader(reader io.Reader) *PromptingAuthLoader {
+	return &PromptingAuthLoader{reader}
+}
+
+// NewDefaultAuthLoader returns a default implementation of an AuthLoader that only reads from a config file
+func NewDefaultAuthLoader() AuthLoader {
+	return &defaultAuthLoader{}
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/client_config.go
new file mode 100644
index 0000000000..cd0a8649b1
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/client_config.go
@@ -0,0 +1,687 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+	"os"
+	"strings"
+	"unicode"
+
+	restclient "k8s.io/client-go/rest"
+	clientauth "k8s.io/client-go/tools/auth"
+	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+	"k8s.io/klog/v2"
+)
+
+const (
+	// clusterExtensionKey is reserved in the cluster extensions list for exec plugin config.
+	clusterExtensionKey = "client.authentication.k8s.io/exec"
+)
+
+var (
+	// ClusterDefaults has the same behavior as the old EnvVar and DefaultCluster fields
+	// DEPRECATED will be replaced
+	ClusterDefaults = clientcmdapi.Cluster{Server: getDefaultServer()}
+	// DefaultClientConfig represents the legacy behavior of this package for defaulting
+	// DEPRECATED will be replace
+	DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{
+		ClusterDefaults: ClusterDefaults,
+	}, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}}
+)
+
+// getDefaultServer returns a default setting for DefaultClientConfig
+// DEPRECATED
+func getDefaultServer() string {
+	if server := os.Getenv("KUBERNETES_MASTER"); len(server) > 0 {
+		return server
+	}
+	return "http://localhost:8080"
+}
+
+// ClientConfig is used to make it easy to get an api server client
+type ClientConfig interface {
+	// RawConfig returns the merged result of all overrides
+	RawConfig() (clientcmdapi.Config, error)
+	// ClientConfig returns a complete client config
+	ClientConfig() (*restclient.Config, error)
+	// Namespace returns the namespace resulting from the merged
+	// result of all overrides and a boolean indicating if it was
+	// overridden
+	Namespace() (string, bool, error)
+	// ConfigAccess returns the rules for loading/persisting the config.
+	ConfigAccess() ConfigAccess
+}
+
+// OverridingClientConfig is used to enable overrriding the raw KubeConfig
+type OverridingClientConfig interface {
+	ClientConfig
+	// MergedRawConfig return the RawConfig merged with all overrides.
+	MergedRawConfig() (clientcmdapi.Config, error)
+}
+
+type PersistAuthProviderConfigForUser func(user string) restclient.AuthProviderConfigPersister
+
+type promptedCredentials struct {
+	username string
+	password string `datapolicy:"password"`
+}
+
+// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information
+type DirectClientConfig struct {
+	config         clientcmdapi.Config
+	contextName    string
+	overrides      *ConfigOverrides
+	fallbackReader io.Reader
+	configAccess   ConfigAccess
+	// promptedCredentials store the credentials input by the user
+	promptedCredentials promptedCredentials
+}
+
+// NewDefaultClientConfig creates a DirectClientConfig using the config.CurrentContext as the context name
+func NewDefaultClientConfig(config clientcmdapi.Config, overrides *ConfigOverrides) OverridingClientConfig {
+	return &DirectClientConfig{config, config.CurrentContext, overrides, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}}
+}
+
+// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information
+func NewNonInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, configAccess ConfigAccess) OverridingClientConfig {
+	return &DirectClientConfig{config, contextName, overrides, nil, configAccess, promptedCredentials{}}
+}
+
+// NewInteractiveClientConfig creates a DirectClientConfig using the passed context name and a reader in case auth information is not provided via files or flags
+func NewInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, fallbackReader io.Reader, configAccess ConfigAccess) OverridingClientConfig {
+	return &DirectClientConfig{config, contextName, overrides, fallbackReader, configAccess, promptedCredentials{}}
+}
+
+// NewClientConfigFromBytes takes your kubeconfig and gives you back a ClientConfig
+func NewClientConfigFromBytes(configBytes []byte) (OverridingClientConfig, error) {
+	config, err := Load(configBytes)
+	if err != nil {
+		return nil, err
+	}
+
+	return &DirectClientConfig{*config, "", &ConfigOverrides{}, nil, nil, promptedCredentials{}}, nil
+}
+
+// RESTConfigFromKubeConfig is a convenience method to give back a restconfig from your kubeconfig bytes.
+// For programmatic access, this is what you want 80% of the time
+func RESTConfigFromKubeConfig(configBytes []byte) (*restclient.Config, error) {
+	clientConfig, err := NewClientConfigFromBytes(configBytes)
+	if err != nil {
+		return nil, err
+	}
+	return clientConfig.ClientConfig()
+}
+
+func (config *DirectClientConfig) RawConfig() (clientcmdapi.Config, error) {
+	return config.config, nil
+}
+
+// MergedRawConfig returns the raw kube config merged with the overrides
+func (config *DirectClientConfig) MergedRawConfig() (clientcmdapi.Config, error) {
+	if err := config.ConfirmUsable(); err != nil {
+		return clientcmdapi.Config{}, err
+	}
+	merged := config.config.DeepCopy()
+
+	// set the AuthInfo merged with overrides in the merged config
+	mergedAuthInfo, err := config.getAuthInfo()
+	if err != nil {
+		return clientcmdapi.Config{}, err
+	}
+	mergedAuthInfoName, _ := config.getAuthInfoName()
+	merged.AuthInfos[mergedAuthInfoName] = &mergedAuthInfo
+
+	// set the Context merged with overrides in the merged config
+	mergedContext, err := config.getContext()
+	if err != nil {
+		return clientcmdapi.Config{}, err
+	}
+	mergedContextName, _ := config.getContextName()
+	merged.Contexts[mergedContextName] = &mergedContext
+	merged.CurrentContext = mergedContextName
+
+	// set the Cluster merged with overrides in the merged config
+	configClusterInfo, err := config.getCluster()
+	if err != nil {
+		return clientcmdapi.Config{}, err
+	}
+	configClusterName, _ := config.getClusterName()
+	merged.Clusters[configClusterName] = &configClusterInfo
+	return *merged, nil
+}
+
+// ClientConfig implements ClientConfig
+func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) {
+	// check that getAuthInfo, getContext, and getCluster do not return an error.
+	// Do this before checking if the current config is usable in the event that an
+	// AuthInfo, Context, or Cluster config with user-defined names are not found.
+	// This provides a user with the immediate cause for error if one is found
+	configAuthInfo, err := config.getAuthInfo()
+	if err != nil {
+		return nil, err
+	}
+
+	_, err = config.getContext()
+	if err != nil {
+		return nil, err
+	}
+
+	configClusterInfo, err := config.getCluster()
+	if err != nil {
+		return nil, err
+	}
+
+	if err := config.ConfirmUsable(); err != nil {
+		return nil, err
+	}
+
+	clientConfig := &restclient.Config{}
+	clientConfig.Host = configClusterInfo.Server
+	if configClusterInfo.ProxyURL != "" {
+		u, err := parseProxyURL(configClusterInfo.ProxyURL)
+		if err != nil {
+			return nil, err
+		}
+		clientConfig.Proxy = http.ProxyURL(u)
+	}
+
+	clientConfig.DisableCompression = configClusterInfo.DisableCompression
+
+	if config.overrides != nil && len(config.overrides.Timeout) > 0 {
+		timeout, err := ParseTimeout(config.overrides.Timeout)
+		if err != nil {
+			return nil, err
+		}
+		clientConfig.Timeout = timeout
+	}
+
+	if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 {
+		u.RawQuery = ""
+		u.Fragment = ""
+		clientConfig.Host = u.String()
+	}
+	if len(configAuthInfo.Impersonate) > 0 {
+		clientConfig.Impersonate = restclient.ImpersonationConfig{
+			UserName: configAuthInfo.Impersonate,
+			UID:      configAuthInfo.ImpersonateUID,
+			Groups:   configAuthInfo.ImpersonateGroups,
+			Extra:    configAuthInfo.ImpersonateUserExtra,
+		}
+	}
+
+	// only try to read the auth information if we are secure
+	if restclient.IsConfigTransportTLS(*clientConfig) {
+		var err error
+		var persister restclient.AuthProviderConfigPersister
+		if config.configAccess != nil {
+			authInfoName, _ := config.getAuthInfoName()
+			persister = PersisterForUser(config.configAccess, authInfoName)
+		}
+		userAuthPartialConfig, err := config.getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister, configClusterInfo)
+		if err != nil {
+			return nil, err
+		}
+		if err := merge(clientConfig, userAuthPartialConfig); err != nil {
+			return nil, err
+		}
+
+		serverAuthPartialConfig := getServerIdentificationPartialConfig(configClusterInfo)
+		if err := merge(clientConfig, serverAuthPartialConfig); err != nil {
+			return nil, err
+		}
+	}
+
+	return clientConfig, nil
+}
+
+// clientauth.Info object contain both user identification and server identification.  We want different precedence orders for
+// both, so we have to split the objects and merge them separately.
+
+// getServerIdentificationPartialConfig extracts server identification information from configClusterInfo
+// (the final result of command line flags and merged .kubeconfig files).
+func getServerIdentificationPartialConfig(configClusterInfo clientcmdapi.Cluster) *restclient.Config {
+	configClientConfig := &restclient.Config{}
+	configClientConfig.CAFile = configClusterInfo.CertificateAuthority
+	configClientConfig.CAData = configClusterInfo.CertificateAuthorityData
+	configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify
+	configClientConfig.ServerName = configClusterInfo.TLSServerName
+
+	return configClientConfig
+}
+
+// getUserIdentificationPartialConfig extracts user identification information from configAuthInfo
+// (the final result of command line flags and merged .kubeconfig files);
+// if the information available there is insufficient, it prompts (if possible) for additional information.
+func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) {
+	mergedConfig := &restclient.Config{}
+
+	// blindly overwrite existing values based on precedence
+	if len(configAuthInfo.Token) > 0 {
+		mergedConfig.BearerToken = configAuthInfo.Token
+		mergedConfig.BearerTokenFile = configAuthInfo.TokenFile
+	} else if len(configAuthInfo.TokenFile) > 0 {
+		tokenBytes, err := os.ReadFile(configAuthInfo.TokenFile)
+		if err != nil {
+			return nil, err
+		}
+		mergedConfig.BearerToken = string(tokenBytes)
+		mergedConfig.BearerTokenFile = configAuthInfo.TokenFile
+	}
+	if len(configAuthInfo.Impersonate) > 0 {
+		mergedConfig.Impersonate = restclient.ImpersonationConfig{
+			UserName: configAuthInfo.Impersonate,
+			UID:      configAuthInfo.ImpersonateUID,
+			Groups:   configAuthInfo.ImpersonateGroups,
+			Extra:    configAuthInfo.ImpersonateUserExtra,
+		}
+	}
+	if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 {
+		mergedConfig.CertFile = configAuthInfo.ClientCertificate
+		mergedConfig.CertData = configAuthInfo.ClientCertificateData
+		mergedConfig.KeyFile = configAuthInfo.ClientKey
+		mergedConfig.KeyData = configAuthInfo.ClientKeyData
+	}
+	if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 {
+		mergedConfig.Username = configAuthInfo.Username
+		mergedConfig.Password = configAuthInfo.Password
+	}
+	if configAuthInfo.AuthProvider != nil {
+		mergedConfig.AuthProvider = configAuthInfo.AuthProvider
+		mergedConfig.AuthConfigPersister = persistAuthConfig
+	}
+	if configAuthInfo.Exec != nil {
+		mergedConfig.ExecProvider = configAuthInfo.Exec
+		mergedConfig.ExecProvider.InstallHint = cleanANSIEscapeCodes(mergedConfig.ExecProvider.InstallHint)
+		mergedConfig.ExecProvider.Config = configClusterInfo.Extensions[clusterExtensionKey]
+	}
+
+	// if there still isn't enough information to authenticate the user, try prompting
+	if !canIdentifyUser(*mergedConfig) && (fallbackReader != nil) {
+		if len(config.promptedCredentials.username) > 0 && len(config.promptedCredentials.password) > 0 {
+			mergedConfig.Username = config.promptedCredentials.username
+			mergedConfig.Password = config.promptedCredentials.password
+			return mergedConfig, nil
+		}
+		prompter := NewPromptingAuthLoader(fallbackReader)
+		promptedAuthInfo, err := prompter.Prompt()
+		if err != nil {
+			return nil, err
+		}
+		promptedConfig := makeUserIdentificationConfig(*promptedAuthInfo)
+		previouslyMergedConfig := mergedConfig
+		mergedConfig = &restclient.Config{}
+		if err := merge(mergedConfig, promptedConfig); err != nil {
+			return nil, err
+		}
+		if err := merge(mergedConfig, previouslyMergedConfig); err != nil {
+			return nil, err
+		}
+		config.promptedCredentials.username = mergedConfig.Username
+		config.promptedCredentials.password = mergedConfig.Password
+	}
+
+	return mergedConfig, nil
+}
+
+// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged for only user identification information
+func makeUserIdentificationConfig(info clientauth.Info) *restclient.Config {
+	config := &restclient.Config{}
+	config.Username = info.User
+	config.Password = info.Password
+	config.CertFile = info.CertFile
+	config.KeyFile = info.KeyFile
+	config.BearerToken = info.BearerToken
+	return config
+}
+
+func canIdentifyUser(config restclient.Config) bool {
+	return len(config.Username) > 0 ||
+		(len(config.CertFile) > 0 || len(config.CertData) > 0) ||
+		len(config.BearerToken) > 0 ||
+		config.AuthProvider != nil ||
+		config.ExecProvider != nil
+}
+
+// cleanANSIEscapeCodes takes an arbitrary string and ensures that there are no
+// ANSI escape sequences that could put the terminal in a weird state (e.g.,
+// "\e[1m" bolds text)
+func cleanANSIEscapeCodes(s string) string {
+	// spaceControlCharacters includes tab, new line, vertical tab, new page, and
+	// carriage return. These are in the unicode.Cc category, but that category also
+	// contains ESC (U+001B) which we don't want.
+	spaceControlCharacters := unicode.RangeTable{
+		R16: []unicode.Range16{
+			{Lo: 0x0009, Hi: 0x000D, Stride: 1},
+		},
+	}
+
+	// Why not make this deny-only (instead of allow-only)? Because unicode.C
+	// contains newline and tab characters that we want.
+	allowedRanges := []*unicode.RangeTable{
+		unicode.L,
+		unicode.M,
+		unicode.N,
+		unicode.P,
+		unicode.S,
+		unicode.Z,
+		&spaceControlCharacters,
+	}
+	builder := strings.Builder{}
+	for _, roon := range s {
+		if unicode.IsOneOf(allowedRanges, roon) {
+			builder.WriteRune(roon) // returns nil error, per go doc
+		} else {
+			fmt.Fprintf(&builder, "%U", roon)
+		}
+	}
+	return builder.String()
+}
+
+// Namespace implements ClientConfig
+func (config *DirectClientConfig) Namespace() (string, bool, error) {
+	if config.overrides != nil && config.overrides.Context.Namespace != "" {
+		// In the event we have an empty config but we do have a namespace override, we should return
+		// the namespace override instead of having config.ConfirmUsable() return an error. This allows
+		// things like in-cluster clients to execute `kubectl get pods --namespace=foo` and have the
+		// --namespace flag honored instead of being ignored.
+		return config.overrides.Context.Namespace, true, nil
+	}
+
+	if err := config.ConfirmUsable(); err != nil {
+		return "", false, err
+	}
+
+	configContext, err := config.getContext()
+	if err != nil {
+		return "", false, err
+	}
+
+	if len(configContext.Namespace) == 0 {
+		return "default", false, nil
+	}
+
+	return configContext.Namespace, false, nil
+}
+
+// ConfigAccess implements ClientConfig
+func (config *DirectClientConfig) ConfigAccess() ConfigAccess {
+	return config.configAccess
+}
+
+// ConfirmUsable looks a particular context and determines if that particular part of the config is useable.  There might still be errors in the config,
+// but no errors in the sections requested or referenced.  It does not return early so that it can find as many errors as possible.
+func (config *DirectClientConfig) ConfirmUsable() error {
+	validationErrors := make([]error, 0)
+
+	var contextName string
+	if len(config.contextName) != 0 {
+		contextName = config.contextName
+	} else {
+		contextName = config.config.CurrentContext
+	}
+
+	if len(contextName) > 0 {
+		_, exists := config.config.Contexts[contextName]
+		if !exists {
+			validationErrors = append(validationErrors, &errContextNotFound{contextName})
+		}
+	}
+
+	authInfoName, _ := config.getAuthInfoName()
+	authInfo, _ := config.getAuthInfo()
+	validationErrors = append(validationErrors, validateAuthInfo(authInfoName, authInfo)...)
+	clusterName, _ := config.getClusterName()
+	cluster, _ := config.getCluster()
+	validationErrors = append(validationErrors, validateClusterInfo(clusterName, cluster)...)
+	// when direct client config is specified, and our only error is that no server is defined, we should
+	// return a standard "no config" error
+	if len(validationErrors) == 1 && validationErrors[0] == ErrEmptyCluster {
+		return newErrConfigurationInvalid([]error{ErrEmptyConfig})
+	}
+	return newErrConfigurationInvalid(validationErrors)
+}
+
+// getContextName returns the default, or user-set context name, and a boolean that indicates
+// whether the default context name has been overwritten by a user-set flag, or left as its default value
+func (config *DirectClientConfig) getContextName() (string, bool) {
+	if config.overrides != nil && len(config.overrides.CurrentContext) != 0 {
+		return config.overrides.CurrentContext, true
+	}
+	if len(config.contextName) != 0 {
+		return config.contextName, false
+	}
+
+	return config.config.CurrentContext, false
+}
+
+// getAuthInfoName returns a string containing the current authinfo name for the current context,
+// and a boolean indicating  whether the default authInfo name is overwritten by a user-set flag, or
+// left as its default value
+func (config *DirectClientConfig) getAuthInfoName() (string, bool) {
+	if config.overrides != nil && len(config.overrides.Context.AuthInfo) != 0 {
+		return config.overrides.Context.AuthInfo, true
+	}
+	context, _ := config.getContext()
+	return context.AuthInfo, false
+}
+
+// getClusterName returns a string containing the default, or user-set cluster name, and a boolean
+// indicating whether the default clusterName has been overwritten by a user-set flag, or left as
+// its default value
+func (config *DirectClientConfig) getClusterName() (string, bool) {
+	if config.overrides != nil && len(config.overrides.Context.Cluster) != 0 {
+		return config.overrides.Context.Cluster, true
+	}
+	context, _ := config.getContext()
+	return context.Cluster, false
+}
+
+// getContext returns the clientcmdapi.Context, or an error if a required context is not found.
+func (config *DirectClientConfig) getContext() (clientcmdapi.Context, error) {
+	contexts := config.config.Contexts
+	contextName, required := config.getContextName()
+
+	mergedContext := clientcmdapi.NewContext()
+	if configContext, exists := contexts[contextName]; exists {
+		if err := merge(mergedContext, configContext); err != nil {
+			return clientcmdapi.Context{}, err
+		}
+	} else if required {
+		return clientcmdapi.Context{}, fmt.Errorf("context %q does not exist", contextName)
+	}
+	if config.overrides != nil {
+		if err := merge(mergedContext, &config.overrides.Context); err != nil {
+			return clientcmdapi.Context{}, err
+		}
+	}
+
+	return *mergedContext, nil
+}
+
+// getAuthInfo returns the clientcmdapi.AuthInfo, or an error if a required auth info is not found.
+func (config *DirectClientConfig) getAuthInfo() (clientcmdapi.AuthInfo, error) {
+	authInfos := config.config.AuthInfos
+	authInfoName, required := config.getAuthInfoName()
+
+	mergedAuthInfo := clientcmdapi.NewAuthInfo()
+	if configAuthInfo, exists := authInfos[authInfoName]; exists {
+		if err := merge(mergedAuthInfo, configAuthInfo); err != nil {
+			return clientcmdapi.AuthInfo{}, err
+		}
+	} else if required {
+		return clientcmdapi.AuthInfo{}, fmt.Errorf("auth info %q does not exist", authInfoName)
+	}
+	if config.overrides != nil {
+		if err := merge(mergedAuthInfo, &config.overrides.AuthInfo); err != nil {
+			return clientcmdapi.AuthInfo{}, err
+		}
+	}
+
+	return *mergedAuthInfo, nil
+}
+
+// getCluster returns the clientcmdapi.Cluster, or an error if a required cluster is not found.
+func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) {
+	clusterInfos := config.config.Clusters
+	clusterInfoName, required := config.getClusterName()
+
+	mergedClusterInfo := clientcmdapi.NewCluster()
+	if config.overrides != nil {
+		if err := merge(mergedClusterInfo, &config.overrides.ClusterDefaults); err != nil {
+			return clientcmdapi.Cluster{}, err
+		}
+	}
+	if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists {
+		if err := merge(mergedClusterInfo, configClusterInfo); err != nil {
+			return clientcmdapi.Cluster{}, err
+		}
+	} else if required {
+		return clientcmdapi.Cluster{}, fmt.Errorf("cluster %q does not exist", clusterInfoName)
+	}
+	if config.overrides != nil {
+		if err := merge(mergedClusterInfo, &config.overrides.ClusterInfo); err != nil {
+			return clientcmdapi.Cluster{}, err
+		}
+	}
+
+	// * An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data
+	// otherwise, a kubeconfig containing a CA reference would return an error that "CA and insecure-skip-tls-verify couldn't both be set".
+	// * An override of --certificate-authority should also override TLS skip settings and CA data, otherwise existing CA data will take precedence.
+	if config.overrides != nil {
+		caLen := len(config.overrides.ClusterInfo.CertificateAuthority)
+		caDataLen := len(config.overrides.ClusterInfo.CertificateAuthorityData)
+		if config.overrides.ClusterInfo.InsecureSkipTLSVerify || caLen > 0 || caDataLen > 0 {
+			mergedClusterInfo.InsecureSkipTLSVerify = config.overrides.ClusterInfo.InsecureSkipTLSVerify
+			mergedClusterInfo.CertificateAuthority = config.overrides.ClusterInfo.CertificateAuthority
+			mergedClusterInfo.CertificateAuthorityData = config.overrides.ClusterInfo.CertificateAuthorityData
+		}
+
+		// if the --tls-server-name has been set in overrides, use that value.
+		// if the --server has been set in overrides, then use the value of --tls-server-name specified on the CLI too.  This gives the property
+		// that setting a --server will effectively clear the KUBECONFIG value of tls-server-name if it is specified on the command line which is
+		// usually correct.
+		if config.overrides.ClusterInfo.TLSServerName != "" || config.overrides.ClusterInfo.Server != "" {
+			mergedClusterInfo.TLSServerName = config.overrides.ClusterInfo.TLSServerName
+		}
+	}
+
+	return *mergedClusterInfo, nil
+}
+
+// inClusterClientConfig makes a config that will work from within a kubernetes cluster container environment.
+// Can take options overrides for flags explicitly provided to the command inside the cluster container.
+type inClusterClientConfig struct {
+	overrides               *ConfigOverrides
+	inClusterConfigProvider func() (*restclient.Config, error)
+}
+
+var _ ClientConfig = &inClusterClientConfig{}
+
+func (config *inClusterClientConfig) RawConfig() (clientcmdapi.Config, error) {
+	return clientcmdapi.Config{}, fmt.Errorf("inCluster environment config doesn't support multiple clusters")
+}
+
+func (config *inClusterClientConfig) ClientConfig() (*restclient.Config, error) {
+	inClusterConfigProvider := config.inClusterConfigProvider
+	if inClusterConfigProvider == nil {
+		inClusterConfigProvider = restclient.InClusterConfig
+	}
+
+	icc, err := inClusterConfigProvider()
+	if err != nil {
+		return nil, err
+	}
+
+	// in-cluster configs only takes a host, token, or CA file
+	// if any of them were individually provided, overwrite anything else
+	if config.overrides != nil {
+		if server := config.overrides.ClusterInfo.Server; len(server) > 0 {
+			icc.Host = server
+		}
+		if len(config.overrides.AuthInfo.Token) > 0 || len(config.overrides.AuthInfo.TokenFile) > 0 {
+			icc.BearerToken = config.overrides.AuthInfo.Token
+			icc.BearerTokenFile = config.overrides.AuthInfo.TokenFile
+		}
+		if certificateAuthorityFile := config.overrides.ClusterInfo.CertificateAuthority; len(certificateAuthorityFile) > 0 {
+			icc.TLSClientConfig.CAFile = certificateAuthorityFile
+		}
+	}
+
+	return icc, nil
+}
+
+func (config *inClusterClientConfig) Namespace() (string, bool, error) {
+	// This way assumes you've set the POD_NAMESPACE environment variable using the downward API.
+	// This check has to be done first for backwards compatibility with the way InClusterConfig was originally set up
+	if ns := os.Getenv("POD_NAMESPACE"); ns != "" {
+		return ns, false, nil
+	}
+
+	// Fall back to the namespace associated with the service account token, if available
+	if data, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil {
+		if ns := strings.TrimSpace(string(data)); len(ns) > 0 {
+			return ns, false, nil
+		}
+	}
+
+	return "default", false, nil
+}
+
+func (config *inClusterClientConfig) ConfigAccess() ConfigAccess {
+	return NewDefaultClientConfigLoadingRules()
+}
+
+// Possible returns true if loading an inside-kubernetes-cluster is possible.
+func (config *inClusterClientConfig) Possible() bool {
+	fi, err := os.Stat("/var/run/secrets/kubernetes.io/serviceaccount/token")
+	return os.Getenv("KUBERNETES_SERVICE_HOST") != "" &&
+		os.Getenv("KUBERNETES_SERVICE_PORT") != "" &&
+		err == nil && !fi.IsDir()
+}
+
+// BuildConfigFromFlags is a helper function that builds configs from a master
+// url or a kubeconfig filepath. These are passed in as command line flags for cluster
+// components. Warnings should reflect this usage. If neither masterUrl or kubeconfigPath
+// are passed in we fallback to inClusterConfig. If inClusterConfig fails, we fallback
+// to the default config.
+func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*restclient.Config, error) {
+	if kubeconfigPath == "" && masterUrl == "" {
+		klog.Warning("Neither --kubeconfig nor --master was specified.  Using the inClusterConfig.  This might not work.")
+		kubeconfig, err := restclient.InClusterConfig()
+		if err == nil {
+			return kubeconfig, nil
+		}
+		klog.Warning("error creating inClusterConfig, falling back to default config: ", err)
+	}
+	return NewNonInteractiveDeferredLoadingClientConfig(
+		&ClientConfigLoadingRules{ExplicitPath: kubeconfigPath},
+		&ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}).ClientConfig()
+}
+
+// BuildConfigFromKubeconfigGetter is a helper function that builds configs from a master
+// url and a kubeconfigGetter.
+func BuildConfigFromKubeconfigGetter(masterUrl string, kubeconfigGetter KubeconfigGetter) (*restclient.Config, error) {
+	// TODO: We do not need a DeferredLoader here. Refactor code and see if we can use DirectClientConfig here.
+	cc := NewNonInteractiveDeferredLoadingClientConfig(
+		&ClientConfigGetter{kubeconfigGetter: kubeconfigGetter},
+		&ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}})
+	return cc.ClientConfig()
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/config.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/config.go
new file mode 100644
index 0000000000..2cd213ccb3
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/config.go
@@ -0,0 +1,499 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+	"errors"
+	"os"
+	"path/filepath"
+	"reflect"
+	"sort"
+
+	"k8s.io/klog/v2"
+
+	restclient "k8s.io/client-go/rest"
+	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+)
+
+// ConfigAccess is used by subcommands and methods in this package to load and modify the appropriate config files
+type ConfigAccess interface {
+	// GetLoadingPrecedence returns the slice of files that should be used for loading and inspecting the config
+	GetLoadingPrecedence() []string
+	// GetStartingConfig returns the config that subcommands should being operating against.  It may or may not be merged depending on loading rules
+	GetStartingConfig() (*clientcmdapi.Config, error)
+	// GetDefaultFilename returns the name of the file you should write into (create if necessary), if you're trying to create a new stanza as opposed to updating an existing one.
+	GetDefaultFilename() string
+	// IsExplicitFile indicates whether or not this command is interested in exactly one file.  This implementation only ever does that  via a flag, but implementations that handle local, global, and flags may have more
+	IsExplicitFile() bool
+	// GetExplicitFile returns the particular file this command is operating against.  This implementation only ever has one, but implementations that handle local, global, and flags may have more
+	GetExplicitFile() string
+}
+
+type PathOptions struct {
+	// GlobalFile is the full path to the file to load as the global (final) option
+	GlobalFile string
+	// EnvVar is the env var name that points to the list of kubeconfig files to load
+	EnvVar string
+	// ExplicitFileFlag is the name of the flag to use for prompting for the kubeconfig file
+	ExplicitFileFlag string
+
+	// GlobalFileSubpath is an optional value used for displaying help
+	GlobalFileSubpath string
+
+	LoadingRules *ClientConfigLoadingRules
+}
+
+var (
+	// UseModifyConfigLock ensures that access to kubeconfig file using ModifyConfig method
+	// is being guarded by a lock file.
+	// This variable is intentionaly made public so other consumers of this library
+	// can modify its default behavior, but be caution when disabling it since
+	// this will make your code not threadsafe.
+	UseModifyConfigLock = true
+)
+
+func (o *PathOptions) GetEnvVarFiles() []string {
+	if len(o.EnvVar) == 0 {
+		return []string{}
+	}
+
+	envVarValue := os.Getenv(o.EnvVar)
+	if len(envVarValue) == 0 {
+		return []string{}
+	}
+
+	fileList := filepath.SplitList(envVarValue)
+	// prevent the same path load multiple times
+	return deduplicate(fileList)
+}
+
+func (o *PathOptions) GetLoadingPrecedence() []string {
+	if o.IsExplicitFile() {
+		return []string{o.GetExplicitFile()}
+	}
+
+	if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 {
+		return envVarFiles
+	}
+	return []string{o.GlobalFile}
+}
+
+func (o *PathOptions) GetStartingConfig() (*clientcmdapi.Config, error) {
+	// don't mutate the original
+	loadingRules := *o.LoadingRules
+	loadingRules.Precedence = o.GetLoadingPrecedence()
+
+	clientConfig := NewNonInteractiveDeferredLoadingClientConfig(&loadingRules, &ConfigOverrides{})
+	rawConfig, err := clientConfig.RawConfig()
+	if os.IsNotExist(err) {
+		return clientcmdapi.NewConfig(), nil
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	return &rawConfig, nil
+}
+
+func (o *PathOptions) GetDefaultFilename() string {
+	if o.IsExplicitFile() {
+		return o.GetExplicitFile()
+	}
+
+	if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 {
+		if len(envVarFiles) == 1 {
+			return envVarFiles[0]
+		}
+
+		// if any of the envvar files already exists, return it
+		for _, envVarFile := range envVarFiles {
+			if _, err := os.Stat(envVarFile); err == nil {
+				return envVarFile
+			}
+		}
+
+		// otherwise, return the last one in the list
+		return envVarFiles[len(envVarFiles)-1]
+	}
+
+	return o.GlobalFile
+}
+
+func (o *PathOptions) IsExplicitFile() bool {
+	return len(o.LoadingRules.ExplicitPath) > 0
+}
+
+func (o *PathOptions) GetExplicitFile() string {
+	return o.LoadingRules.ExplicitPath
+}
+
+func NewDefaultPathOptions() *PathOptions {
+	ret := &PathOptions{
+		GlobalFile:       RecommendedHomeFile,
+		EnvVar:           RecommendedConfigPathEnvVar,
+		ExplicitFileFlag: RecommendedConfigPathFlag,
+
+		GlobalFileSubpath: filepath.Join(RecommendedHomeDir, RecommendedFileName),
+
+		LoadingRules: NewDefaultClientConfigLoadingRules(),
+	}
+	ret.LoadingRules.DoNotResolvePaths = true
+
+	return ret
+}
+
+// ModifyConfig takes a Config object, iterates through Clusters, AuthInfos, and Contexts, uses the LocationOfOrigin if specified or
+// uses the default destination file to write the results into.  This results in multiple file reads, but it's very easy to follow.
+// Preferences and CurrentContext should always be set in the default destination file.  Since we can't distinguish between empty and missing values
+// (no nil strings), we're forced have separate handling for them.  In the kubeconfig cases, newConfig should have at most one difference,
+// that means that this code will only write into a single file.  If you want to relativizePaths, you must provide a fully qualified path in any
+// modified element.
+func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config, relativizePaths bool) error {
+	if UseModifyConfigLock {
+		possibleSources := configAccess.GetLoadingPrecedence()
+		// sort the possible kubeconfig files so we always "lock" in the same order
+		// to avoid deadlock (note: this can fail w/ symlinks, but... come on).
+		sort.Strings(possibleSources)
+		for _, filename := range possibleSources {
+			if err := lockFile(filename); err != nil {
+				return err
+			}
+			defer unlockFile(filename)
+		}
+	}
+
+	startingConfig, err := configAccess.GetStartingConfig()
+	if err != nil {
+		return err
+	}
+
+	// We need to find all differences, locate their original files, read a partial config to modify only that stanza and write out the file.
+	// Special case the test for current context and preferences since those always write to the default file.
+	if reflect.DeepEqual(*startingConfig, newConfig) {
+		// nothing to do
+		return nil
+	}
+
+	if startingConfig.CurrentContext != newConfig.CurrentContext {
+		if err := writeCurrentContext(configAccess, newConfig.CurrentContext); err != nil {
+			return err
+		}
+	}
+
+	if !reflect.DeepEqual(startingConfig.Preferences, newConfig.Preferences) {
+		if err := writePreferences(configAccess, newConfig.Preferences); err != nil {
+			return err
+		}
+	}
+
+	// Search every cluster, authInfo, and context.  First from new to old for differences, then from old to new for deletions
+	for key, cluster := range newConfig.Clusters {
+		startingCluster, exists := startingConfig.Clusters[key]
+		if !reflect.DeepEqual(cluster, startingCluster) || !exists {
+			destinationFile := cluster.LocationOfOrigin
+			if len(destinationFile) == 0 {
+				destinationFile = configAccess.GetDefaultFilename()
+			}
+
+			configToWrite, err := getConfigFromFile(destinationFile)
+			if err != nil {
+				return err
+			}
+			t := *cluster
+
+			configToWrite.Clusters[key] = &t
+			configToWrite.Clusters[key].LocationOfOrigin = destinationFile
+			if relativizePaths {
+				if err := RelativizeClusterLocalPaths(configToWrite.Clusters[key]); err != nil {
+					return err
+				}
+			}
+
+			if err := WriteToFile(*configToWrite, destinationFile); err != nil {
+				return err
+			}
+		}
+	}
+
+	// seenConfigs stores a map of config source filenames to computed config objects
+	seenConfigs := map[string]*clientcmdapi.Config{}
+
+	for key, context := range newConfig.Contexts {
+		startingContext, exists := startingConfig.Contexts[key]
+		if !reflect.DeepEqual(context, startingContext) || !exists {
+			destinationFile := context.LocationOfOrigin
+			if len(destinationFile) == 0 {
+				destinationFile = configAccess.GetDefaultFilename()
+			}
+
+			// we only obtain a fresh config object from its source file
+			// if we have not seen it already - this prevents us from
+			// reading and writing to the same number of files repeatedly
+			// when multiple / all contexts share the same destination file.
+			configToWrite, seen := seenConfigs[destinationFile]
+			if !seen {
+				var err error
+				configToWrite, err = getConfigFromFile(destinationFile)
+				if err != nil {
+					return err
+				}
+				seenConfigs[destinationFile] = configToWrite
+			}
+
+			configToWrite.Contexts[key] = context
+		}
+	}
+
+	// actually persist config object changes
+	for destinationFile, configToWrite := range seenConfigs {
+		if err := WriteToFile(*configToWrite, destinationFile); err != nil {
+			return err
+		}
+	}
+
+	for key, authInfo := range newConfig.AuthInfos {
+		startingAuthInfo, exists := startingConfig.AuthInfos[key]
+		if !reflect.DeepEqual(authInfo, startingAuthInfo) || !exists {
+			destinationFile := authInfo.LocationOfOrigin
+			if len(destinationFile) == 0 {
+				destinationFile = configAccess.GetDefaultFilename()
+			}
+
+			configToWrite, err := getConfigFromFile(destinationFile)
+			if err != nil {
+				return err
+			}
+			t := *authInfo
+			configToWrite.AuthInfos[key] = &t
+			configToWrite.AuthInfos[key].LocationOfOrigin = destinationFile
+			if relativizePaths {
+				if err := RelativizeAuthInfoLocalPaths(configToWrite.AuthInfos[key]); err != nil {
+					return err
+				}
+			}
+
+			if err := WriteToFile(*configToWrite, destinationFile); err != nil {
+				return err
+			}
+		}
+	}
+
+	for key, cluster := range startingConfig.Clusters {
+		if _, exists := newConfig.Clusters[key]; !exists {
+			destinationFile := cluster.LocationOfOrigin
+			if len(destinationFile) == 0 {
+				destinationFile = configAccess.GetDefaultFilename()
+			}
+
+			configToWrite, err := getConfigFromFile(destinationFile)
+			if err != nil {
+				return err
+			}
+			delete(configToWrite.Clusters, key)
+
+			if err := WriteToFile(*configToWrite, destinationFile); err != nil {
+				return err
+			}
+		}
+	}
+
+	for key, context := range startingConfig.Contexts {
+		if _, exists := newConfig.Contexts[key]; !exists {
+			destinationFile := context.LocationOfOrigin
+			if len(destinationFile) == 0 {
+				destinationFile = configAccess.GetDefaultFilename()
+			}
+
+			configToWrite, err := getConfigFromFile(destinationFile)
+			if err != nil {
+				return err
+			}
+			delete(configToWrite.Contexts, key)
+
+			if err := WriteToFile(*configToWrite, destinationFile); err != nil {
+				return err
+			}
+		}
+	}
+
+	for key, authInfo := range startingConfig.AuthInfos {
+		if _, exists := newConfig.AuthInfos[key]; !exists {
+			destinationFile := authInfo.LocationOfOrigin
+			if len(destinationFile) == 0 {
+				destinationFile = configAccess.GetDefaultFilename()
+			}
+
+			configToWrite, err := getConfigFromFile(destinationFile)
+			if err != nil {
+				return err
+			}
+			delete(configToWrite.AuthInfos, key)
+
+			if err := WriteToFile(*configToWrite, destinationFile); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func PersisterForUser(configAccess ConfigAccess, user string) restclient.AuthProviderConfigPersister {
+	return &persister{configAccess, user}
+}
+
+type persister struct {
+	configAccess ConfigAccess
+	user         string
+}
+
+func (p *persister) Persist(config map[string]string) error {
+	newConfig, err := p.configAccess.GetStartingConfig()
+	if err != nil {
+		return err
+	}
+	authInfo, ok := newConfig.AuthInfos[p.user]
+	if ok && authInfo.AuthProvider != nil {
+		authInfo.AuthProvider.Config = config
+		return ModifyConfig(p.configAccess, *newConfig, false)
+	}
+	return nil
+}
+
+// writeCurrentContext takes three possible paths.
+// If newCurrentContext is the same as the startingConfig's current context, then we exit.
+// If newCurrentContext has a value, then that value is written into the default destination file.
+// If newCurrentContext is empty, then we find the config file that is setting the CurrentContext and clear the value from that file
+func writeCurrentContext(configAccess ConfigAccess, newCurrentContext string) error {
+	if startingConfig, err := configAccess.GetStartingConfig(); err != nil {
+		return err
+	} else if startingConfig.CurrentContext == newCurrentContext {
+		return nil
+	}
+
+	if configAccess.IsExplicitFile() {
+		file := configAccess.GetExplicitFile()
+		currConfig, err := getConfigFromFile(file)
+		if err != nil {
+			return err
+		}
+		currConfig.CurrentContext = newCurrentContext
+		if err := WriteToFile(*currConfig, file); err != nil {
+			return err
+		}
+
+		return nil
+	}
+
+	if len(newCurrentContext) > 0 {
+		destinationFile := configAccess.GetDefaultFilename()
+		config, err := getConfigFromFile(destinationFile)
+		if err != nil {
+			return err
+		}
+		config.CurrentContext = newCurrentContext
+
+		if err := WriteToFile(*config, destinationFile); err != nil {
+			return err
+		}
+
+		return nil
+	}
+
+	// we're supposed to be clearing the current context.  We need to find the first spot in the chain that is setting it and clear it
+	for _, file := range configAccess.GetLoadingPrecedence() {
+		if _, err := os.Stat(file); err == nil {
+			currConfig, err := getConfigFromFile(file)
+			if err != nil {
+				return err
+			}
+
+			if len(currConfig.CurrentContext) > 0 {
+				currConfig.CurrentContext = newCurrentContext
+				if err := WriteToFile(*currConfig, file); err != nil {
+					return err
+				}
+
+				return nil
+			}
+		}
+	}
+
+	return errors.New("no config found to write context")
+}
+
+func writePreferences(configAccess ConfigAccess, newPrefs clientcmdapi.Preferences) error {
+	if startingConfig, err := configAccess.GetStartingConfig(); err != nil {
+		return err
+	} else if reflect.DeepEqual(startingConfig.Preferences, newPrefs) {
+		return nil
+	}
+
+	if configAccess.IsExplicitFile() {
+		file := configAccess.GetExplicitFile()
+		currConfig, err := getConfigFromFile(file)
+		if err != nil {
+			return err
+		}
+		currConfig.Preferences = newPrefs
+		if err := WriteToFile(*currConfig, file); err != nil {
+			return err
+		}
+
+		return nil
+	}
+
+	for _, file := range configAccess.GetLoadingPrecedence() {
+		currConfig, err := getConfigFromFile(file)
+		if err != nil {
+			return err
+		}
+
+		if !reflect.DeepEqual(currConfig.Preferences, newPrefs) {
+			currConfig.Preferences = newPrefs
+			if err := WriteToFile(*currConfig, file); err != nil {
+				return err
+			}
+
+			return nil
+		}
+	}
+
+	return errors.New("no config found to write preferences")
+}
+
+// getConfigFromFile tries to read a kubeconfig file and if it can't, returns an error.  One exception, missing files result in empty configs, not an error.
+func getConfigFromFile(filename string) (*clientcmdapi.Config, error) {
+	config, err := LoadFromFile(filename)
+	if err != nil && !os.IsNotExist(err) {
+		return nil, err
+	}
+	if config == nil {
+		config = clientcmdapi.NewConfig()
+	}
+	return config, nil
+}
+
+// GetConfigFromFileOrDie tries to read a kubeconfig file and if it can't, it calls exit.  One exception, missing files result in empty configs, not an exit
+func GetConfigFromFileOrDie(filename string) *clientcmdapi.Config {
+	config, err := getConfigFromFile(filename)
+	if err != nil {
+		klog.FatalDepth(1, err)
+	}
+
+	return config
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/doc.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/doc.go
new file mode 100644
index 0000000000..424311ee12
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/doc.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package clientcmd provides one stop shopping for building a working client from a fixed config,
+from a .kubeconfig file, from command line flags, or from any merged combination.
+
+Sample usage from merged .kubeconfig files (local directory, home directory)
+
+	loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
+	// if you want to change the loading rules (which files in which order), you can do so here
+
+	configOverrides := &clientcmd.ConfigOverrides{}
+	// if you want to change override values or bind them to flags, there are methods to help you
+
+	kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
+	config, err := kubeConfig.ClientConfig()
+	if err != nil {
+		// Do something
+	}
+	client, err := metav1.New(config)
+	// ...
+*/
+package clientcmd // import "k8s.io/client-go/tools/clientcmd"
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/flag.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/flag.go
new file mode 100644
index 0000000000..8d60d201cd
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/flag.go
@@ -0,0 +1,49 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+// transformingStringValue implements pflag.Value to store string values,
+// allowing transforming them while being set
+type transformingStringValue struct {
+	target      *string
+	transformer func(string) (string, error)
+}
+
+func newTransformingStringValue(val string, target *string, transformer func(string) (string, error)) *transformingStringValue {
+	*target = val
+	return &transformingStringValue{
+		target:      target,
+		transformer: transformer,
+	}
+}
+
+func (t *transformingStringValue) Set(val string) error {
+	val, err := t.transformer(val)
+	if err != nil {
+		return err
+	}
+	*t.target = val
+	return nil
+}
+
+func (t *transformingStringValue) Type() string {
+	return "string"
+}
+
+func (t *transformingStringValue) String() string {
+	return string(*t.target)
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/helpers.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/helpers.go
new file mode 100644
index 0000000000..d7572232aa
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/helpers.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+	"fmt"
+	"net/url"
+	"strconv"
+	"time"
+)
+
+// ParseTimeout returns a parsed duration from a string
+// A duration string value must be a positive integer, optionally followed by a corresponding time unit (s|m|h).
+func ParseTimeout(duration string) (time.Duration, error) {
+	if i, err := strconv.ParseInt(duration, 10, 64); err == nil && i >= 0 {
+		return (time.Duration(i) * time.Second), nil
+	}
+	if requestTimeout, err := time.ParseDuration(duration); err == nil {
+		return requestTimeout, nil
+	}
+	return 0, fmt.Errorf("Invalid timeout value. Timeout must be a single integer in seconds, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h)")
+}
+
+func parseProxyURL(proxyURL string) (*url.URL, error) {
+	u, err := url.Parse(proxyURL)
+	if err != nil {
+		return nil, fmt.Errorf("could not parse: %v", proxyURL)
+	}
+
+	switch u.Scheme {
+	case "http", "https", "socks5":
+	default:
+		return nil, fmt.Errorf("unsupported scheme %q, must be http, https, or socks5", u.Scheme)
+	}
+	return u, nil
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/loader.go
new file mode 100644
index 0000000000..c900e5fd19
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/loader.go
@@ -0,0 +1,676 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"reflect"
+	goruntime "runtime"
+	"strings"
+
+	"k8s.io/klog/v2"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+	restclient "k8s.io/client-go/rest"
+	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+	clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest"
+	"k8s.io/client-go/util/homedir"
+)
+
+const (
+	RecommendedConfigPathFlag   = "kubeconfig"
+	RecommendedConfigPathEnvVar = "KUBECONFIG"
+	RecommendedHomeDir          = ".kube"
+	RecommendedFileName         = "config"
+	RecommendedSchemaName       = "schema"
+)
+
+var (
+	RecommendedConfigDir  = filepath.Join(homedir.HomeDir(), RecommendedHomeDir)
+	RecommendedHomeFile   = filepath.Join(RecommendedConfigDir, RecommendedFileName)
+	RecommendedSchemaFile = filepath.Join(RecommendedConfigDir, RecommendedSchemaName)
+)
+
+// currentMigrationRules returns a map that holds the history of recommended home directories used in previous versions.
+// Any future changes to RecommendedHomeFile and related are expected to add a migration rule here, in order to make
+// sure existing config files are migrated to their new locations properly.
+func currentMigrationRules() map[string]string {
+	var oldRecommendedHomeFileName string
+	if goruntime.GOOS == "windows" {
+		oldRecommendedHomeFileName = RecommendedFileName
+	} else {
+		oldRecommendedHomeFileName = ".kubeconfig"
+	}
+	return map[string]string{
+		RecommendedHomeFile: filepath.Join(os.Getenv("HOME"), RecommendedHomeDir, oldRecommendedHomeFileName),
+	}
+}
+
+type ClientConfigLoader interface {
+	ConfigAccess
+	// IsDefaultConfig returns true if the returned config matches the defaults.
+	IsDefaultConfig(*restclient.Config) bool
+	// Load returns the latest config
+	Load() (*clientcmdapi.Config, error)
+}
+
+type KubeconfigGetter func() (*clientcmdapi.Config, error)
+
+type ClientConfigGetter struct {
+	kubeconfigGetter KubeconfigGetter
+}
+
+// ClientConfigGetter implements the ClientConfigLoader interface.
+var _ ClientConfigLoader = &ClientConfigGetter{}
+
+func (g *ClientConfigGetter) Load() (*clientcmdapi.Config, error) {
+	return g.kubeconfigGetter()
+}
+
+func (g *ClientConfigGetter) GetLoadingPrecedence() []string {
+	return nil
+}
+func (g *ClientConfigGetter) GetStartingConfig() (*clientcmdapi.Config, error) {
+	return g.kubeconfigGetter()
+}
+func (g *ClientConfigGetter) GetDefaultFilename() string {
+	return ""
+}
+func (g *ClientConfigGetter) IsExplicitFile() bool {
+	return false
+}
+func (g *ClientConfigGetter) GetExplicitFile() string {
+	return ""
+}
+func (g *ClientConfigGetter) IsDefaultConfig(config *restclient.Config) bool {
+	return false
+}
+
+// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config
+// Callers can put the chain together however they want, but we'd recommend:
+// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath
+// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if this file is not present
+type ClientConfigLoadingRules struct {
+	ExplicitPath string
+	Precedence   []string
+
+	// MigrationRules is a map of destination files to source files.  If a destination file is not present, then the source file is checked.
+	// If the source file is present, then it is copied to the destination file BEFORE any further loading happens.
+	MigrationRules map[string]string
+
+	// DoNotResolvePaths indicates whether or not to resolve paths with respect to the originating files.  This is phrased as a negative so
+	// that a default object that doesn't set this will usually get the behavior it wants.
+	DoNotResolvePaths bool
+
+	// DefaultClientConfig is an optional field indicating what rules to use to calculate a default configuration.
+	// This should match the overrides passed in to ClientConfig loader.
+	DefaultClientConfig ClientConfig
+
+	// WarnIfAllMissing indicates whether the configuration files pointed by KUBECONFIG environment variable are present or not.
+	// In case of missing files, it warns the user about the missing files.
+	WarnIfAllMissing bool
+
+	// Warner is the warning log callback to use in case of missing files.
+	Warner WarningHandler
+}
+
+// WarningHandler allows to set the logging function to use
+type WarningHandler func(error)
+
+func (handler WarningHandler) Warn(err error) {
+	if handler == nil {
+		klog.V(1).Info(err)
+	} else {
+		handler(err)
+	}
+}
+
+type MissingConfigError struct {
+	Missing []string
+}
+
+func (c MissingConfigError) Error() string {
+	return fmt.Sprintf("Config not found: %s", strings.Join(c.Missing, ", "))
+}
+
+// ClientConfigLoadingRules implements the ClientConfigLoader interface.
+var _ ClientConfigLoader = &ClientConfigLoadingRules{}
+
+// NewDefaultClientConfigLoadingRules returns a ClientConfigLoadingRules object with default fields filled in.  You are not required to
+// use this constructor
+func NewDefaultClientConfigLoadingRules() *ClientConfigLoadingRules {
+	chain := []string{}
+	warnIfAllMissing := false
+
+	envVarFiles := os.Getenv(RecommendedConfigPathEnvVar)
+	if len(envVarFiles) != 0 {
+		fileList := filepath.SplitList(envVarFiles)
+		// prevent the same path load multiple times
+		chain = append(chain, deduplicate(fileList)...)
+		warnIfAllMissing = true
+
+	} else {
+		chain = append(chain, RecommendedHomeFile)
+	}
+
+	return &ClientConfigLoadingRules{
+		Precedence:       chain,
+		MigrationRules:   currentMigrationRules(),
+		WarnIfAllMissing: warnIfAllMissing,
+	}
+}
+
+// Load starts by running the MigrationRules and then
+// takes the loading rules and returns a Config object based on following rules.
+//
+//	if the ExplicitPath, return the unmerged explicit file
+//	Otherwise, return a merged config based on the Precedence slice
+//
+// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored.
+// Read errors or files with non-deserializable content produce errors.
+// The first file to set a particular map key wins and map key's value is never changed.
+// BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed.
+// This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two.
+// It also means that if two files specify a "red-user", only values from the first file's red-user are used.  Even
+// non-conflicting entries from the second file's "red-user" are discarded.
+// Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder
+// and only absolute file paths are returned.
+func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) {
+	if err := rules.Migrate(); err != nil {
+		return nil, err
+	}
+
+	errlist := []error{}
+	missingList := []string{}
+
+	kubeConfigFiles := []string{}
+
+	// Make sure a file we were explicitly told to use exists
+	if len(rules.ExplicitPath) > 0 {
+		if _, err := os.Stat(rules.ExplicitPath); os.IsNotExist(err) {
+			return nil, err
+		}
+		kubeConfigFiles = append(kubeConfigFiles, rules.ExplicitPath)
+
+	} else {
+		kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...)
+	}
+
+	kubeconfigs := []*clientcmdapi.Config{}
+	// read and cache the config files so that we only look at them once
+	for _, filename := range kubeConfigFiles {
+		if len(filename) == 0 {
+			// no work to do
+			continue
+		}
+
+		config, err := LoadFromFile(filename)
+
+		if os.IsNotExist(err) {
+			// skip missing files
+			// Add to the missing list to produce a warning
+			missingList = append(missingList, filename)
+			continue
+		}
+
+		if err != nil {
+			errlist = append(errlist, fmt.Errorf("error loading config file \"%s\": %v", filename, err))
+			continue
+		}
+
+		kubeconfigs = append(kubeconfigs, config)
+	}
+
+	if rules.WarnIfAllMissing && len(missingList) > 0 && len(kubeconfigs) == 0 {
+		rules.Warner.Warn(MissingConfigError{Missing: missingList})
+	}
+
+	// first merge all of our maps
+	mapConfig := clientcmdapi.NewConfig()
+
+	for _, kubeconfig := range kubeconfigs {
+		if err := merge(mapConfig, kubeconfig); err != nil {
+			return nil, err
+		}
+	}
+
+	// merge all of the struct values in the reverse order so that priority is given correctly
+	// errors are not added to the list the second time
+	nonMapConfig := clientcmdapi.NewConfig()
+	for i := len(kubeconfigs) - 1; i >= 0; i-- {
+		kubeconfig := kubeconfigs[i]
+		if err := merge(nonMapConfig, kubeconfig); err != nil {
+			return nil, err
+		}
+	}
+
+	// since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and
+	// get the values we expect.
+	config := clientcmdapi.NewConfig()
+	if err := merge(config, mapConfig); err != nil {
+		return nil, err
+	}
+	if err := merge(config, nonMapConfig); err != nil {
+		return nil, err
+	}
+
+	if rules.ResolvePaths() {
+		if err := ResolveLocalPaths(config); err != nil {
+			errlist = append(errlist, err)
+		}
+	}
+	return config, utilerrors.NewAggregate(errlist)
+}
+
+// Migrate uses the MigrationRules map.  If a destination file is not present, then the source file is checked.
+// If the source file is present, then it is copied to the destination file BEFORE any further loading happens.
+func (rules *ClientConfigLoadingRules) Migrate() error {
+	if rules.MigrationRules == nil {
+		return nil
+	}
+
+	for destination, source := range rules.MigrationRules {
+		if _, err := os.Stat(destination); err == nil {
+			// if the destination already exists, do nothing
+			continue
+		} else if os.IsPermission(err) {
+			// if we can't access the file, skip it
+			continue
+		} else if !os.IsNotExist(err) {
+			// if we had an error other than non-existence, fail
+			return err
+		}
+
+		if sourceInfo, err := os.Stat(source); err != nil {
+			if os.IsNotExist(err) || os.IsPermission(err) {
+				// if the source file doesn't exist or we can't access it, there's no work to do.
+				continue
+			}
+
+			// if we had an error other than non-existence, fail
+			return err
+		} else if sourceInfo.IsDir() {
+			return fmt.Errorf("cannot migrate %v to %v because it is a directory", source, destination)
+		}
+
+		data, err := os.ReadFile(source)
+		if err != nil {
+			return err
+		}
+		// destination is created with mode 0666 before umask
+		err = os.WriteFile(destination, data, 0666)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// GetLoadingPrecedence implements ConfigAccess
+func (rules *ClientConfigLoadingRules) GetLoadingPrecedence() []string {
+	if len(rules.ExplicitPath) > 0 {
+		return []string{rules.ExplicitPath}
+	}
+
+	return rules.Precedence
+}
+
+// GetStartingConfig implements ConfigAccess
+func (rules *ClientConfigLoadingRules) GetStartingConfig() (*clientcmdapi.Config, error) {
+	clientConfig := NewNonInteractiveDeferredLoadingClientConfig(rules, &ConfigOverrides{})
+	rawConfig, err := clientConfig.RawConfig()
+	if os.IsNotExist(err) {
+		return clientcmdapi.NewConfig(), nil
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	return &rawConfig, nil
+}
+
+// GetDefaultFilename implements ConfigAccess
+func (rules *ClientConfigLoadingRules) GetDefaultFilename() string {
+	// Explicit file if we have one.
+	if rules.IsExplicitFile() {
+		return rules.GetExplicitFile()
+	}
+	// Otherwise, first existing file from precedence.
+	for _, filename := range rules.GetLoadingPrecedence() {
+		if _, err := os.Stat(filename); err == nil {
+			return filename
+		}
+	}
+	// If none exists, use the first from precedence.
+	if len(rules.Precedence) > 0 {
+		return rules.Precedence[0]
+	}
+	return ""
+}
+
+// IsExplicitFile implements ConfigAccess
+func (rules *ClientConfigLoadingRules) IsExplicitFile() bool {
+	return len(rules.ExplicitPath) > 0
+}
+
+// GetExplicitFile implements ConfigAccess
+func (rules *ClientConfigLoadingRules) GetExplicitFile() string {
+	return rules.ExplicitPath
+}
+
+// IsDefaultConfig returns true if the provided configuration matches the default
+func (rules *ClientConfigLoadingRules) IsDefaultConfig(config *restclient.Config) bool {
+	if rules.DefaultClientConfig == nil {
+		return false
+	}
+	defaultConfig, err := rules.DefaultClientConfig.ClientConfig()
+	if err != nil {
+		return false
+	}
+	return reflect.DeepEqual(config, defaultConfig)
+}
+
+// LoadFromFile takes a filename and deserializes the contents into Config object
+func LoadFromFile(filename string) (*clientcmdapi.Config, error) {
+	kubeconfigBytes, err := os.ReadFile(filename)
+	if err != nil {
+		return nil, err
+	}
+	config, err := Load(kubeconfigBytes)
+	if err != nil {
+		return nil, err
+	}
+	klog.V(6).Infoln("Config loaded from file: ", filename)
+
+	// set LocationOfOrigin on every Cluster, User, and Context
+	for key, obj := range config.AuthInfos {
+		obj.LocationOfOrigin = filename
+		config.AuthInfos[key] = obj
+	}
+	for key, obj := range config.Clusters {
+		obj.LocationOfOrigin = filename
+		config.Clusters[key] = obj
+	}
+	for key, obj := range config.Contexts {
+		obj.LocationOfOrigin = filename
+		config.Contexts[key] = obj
+	}
+
+	if config.AuthInfos == nil {
+		config.AuthInfos = map[string]*clientcmdapi.AuthInfo{}
+	}
+	if config.Clusters == nil {
+		config.Clusters = map[string]*clientcmdapi.Cluster{}
+	}
+	if config.Contexts == nil {
+		config.Contexts = map[string]*clientcmdapi.Context{}
+	}
+
+	return config, nil
+}
+
+// Load takes a byte slice and deserializes the contents into Config object.
+// Encapsulates deserialization without assuming the source is a file.
+func Load(data []byte) (*clientcmdapi.Config, error) {
+	config := clientcmdapi.NewConfig()
+	// if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input)
+	if len(data) == 0 {
+		return config, nil
+	}
+	decoded, _, err := clientcmdlatest.Codec.Decode(data, &schema.GroupVersionKind{Version: clientcmdlatest.Version, Kind: "Config"}, config)
+	if err != nil {
+		return nil, err
+	}
+	return decoded.(*clientcmdapi.Config), nil
+}
+
+// WriteToFile serializes the config to yaml and writes it out to a file.  If not present, it creates the file with the mode 0600.  If it is present
+// it stomps the contents
+func WriteToFile(config clientcmdapi.Config, filename string) error {
+	content, err := Write(config)
+	if err != nil {
+		return err
+	}
+	dir := filepath.Dir(filename)
+	if _, err := os.Stat(dir); os.IsNotExist(err) {
+		if err = os.MkdirAll(dir, 0755); err != nil {
+			return err
+		}
+	}
+
+	if err := os.WriteFile(filename, content, 0600); err != nil {
+		return err
+	}
+	return nil
+}
+
+func lockFile(filename string) error {
+	// TODO: find a way to do this with actual file locks. Will
+	// probably need separate solution for windows and Linux.
+
+	// Make sure the dir exists before we try to create a lock file.
+	dir := filepath.Dir(filename)
+	if _, err := os.Stat(dir); os.IsNotExist(err) {
+		if err = os.MkdirAll(dir, 0755); err != nil {
+			return err
+		}
+	}
+	f, err := os.OpenFile(lockName(filename), os.O_CREATE|os.O_EXCL, 0)
+	if err != nil {
+		return err
+	}
+	f.Close()
+	return nil
+}
+
+func unlockFile(filename string) error {
+	return os.Remove(lockName(filename))
+}
+
+func lockName(filename string) string {
+	return filename + ".lock"
+}
+
+// Write serializes the config to yaml.
+// Encapsulates serialization without assuming the destination is a file.
+func Write(config clientcmdapi.Config) ([]byte, error) {
+	return runtime.Encode(clientcmdlatest.Codec, &config)
+}
+
+func (rules ClientConfigLoadingRules) ResolvePaths() bool {
+	return !rules.DoNotResolvePaths
+}
+
+// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin
+// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without
+// modification of its contents.
+func ResolveLocalPaths(config *clientcmdapi.Config) error {
+	for _, cluster := range config.Clusters {
+		if len(cluster.LocationOfOrigin) == 0 {
+			continue
+		}
+		base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin))
+		if err != nil {
+			return fmt.Errorf("could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err)
+		}
+
+		if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil {
+			return err
+		}
+	}
+	for _, authInfo := range config.AuthInfos {
+		if len(authInfo.LocationOfOrigin) == 0 {
+			continue
+		}
+		base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin))
+		if err != nil {
+			return fmt.Errorf("could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err)
+		}
+
+		if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// RelativizeClusterLocalPaths first absolutizes the paths by calling ResolveLocalPaths.  This assumes that any NEW path is already
+// absolute, but any existing path will be resolved relative to LocationOfOrigin
+func RelativizeClusterLocalPaths(cluster *clientcmdapi.Cluster) error {
+	if len(cluster.LocationOfOrigin) == 0 {
+		return fmt.Errorf("no location of origin for %s", cluster.Server)
+	}
+	base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin))
+	if err != nil {
+		return fmt.Errorf("could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err)
+	}
+
+	if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil {
+		return err
+	}
+	if err := RelativizePathWithNoBacksteps(GetClusterFileReferences(cluster), base); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// RelativizeAuthInfoLocalPaths first absolutizes the paths by calling ResolveLocalPaths.  This assumes that any NEW path is already
+// absolute, but any existing path will be resolved relative to LocationOfOrigin
+func RelativizeAuthInfoLocalPaths(authInfo *clientcmdapi.AuthInfo) error {
+	if len(authInfo.LocationOfOrigin) == 0 {
+		return fmt.Errorf("no location of origin for %v", authInfo)
+	}
+	base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin))
+	if err != nil {
+		return fmt.Errorf("could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err)
+	}
+
+	if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil {
+		return err
+	}
+	if err := RelativizePathWithNoBacksteps(GetAuthInfoFileReferences(authInfo), base); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func RelativizeConfigPaths(config *clientcmdapi.Config, base string) error {
+	return RelativizePathWithNoBacksteps(GetConfigFileReferences(config), base)
+}
+
+func ResolveConfigPaths(config *clientcmdapi.Config, base string) error {
+	return ResolvePaths(GetConfigFileReferences(config), base)
+}
+
+func GetConfigFileReferences(config *clientcmdapi.Config) []*string {
+	refs := []*string{}
+
+	for _, cluster := range config.Clusters {
+		refs = append(refs, GetClusterFileReferences(cluster)...)
+	}
+	for _, authInfo := range config.AuthInfos {
+		refs = append(refs, GetAuthInfoFileReferences(authInfo)...)
+	}
+
+	return refs
+}
+
+func GetClusterFileReferences(cluster *clientcmdapi.Cluster) []*string {
+	return []*string{&cluster.CertificateAuthority}
+}
+
+func GetAuthInfoFileReferences(authInfo *clientcmdapi.AuthInfo) []*string {
+	s := []*string{&authInfo.ClientCertificate, &authInfo.ClientKey, &authInfo.TokenFile}
+	// Only resolve exec command if it isn't PATH based.
+	if authInfo.Exec != nil && strings.ContainsRune(authInfo.Exec.Command, filepath.Separator) {
+		s = append(s, &authInfo.Exec.Command)
+	}
+	return s
+}
+
+// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory
+func ResolvePaths(refs []*string, base string) error {
+	for _, ref := range refs {
+		// Don't resolve empty paths
+		if len(*ref) > 0 {
+			// Don't resolve absolute paths
+			if !filepath.IsAbs(*ref) {
+				*ref = filepath.Join(base, *ref)
+			}
+		}
+	}
+	return nil
+}
+
+// RelativizePathWithNoBacksteps updates the given refs to be relative paths, relative to the given base directory as long as they do not require backsteps.
+// Any path requiring a backstep is left as-is as long it is absolute.  Any non-absolute path that can't be relativized produces an error
+func RelativizePathWithNoBacksteps(refs []*string, base string) error {
+	for _, ref := range refs {
+		// Don't relativize empty paths
+		if len(*ref) > 0 {
+			rel, err := MakeRelative(*ref, base)
+			if err != nil {
+				return err
+			}
+
+			// if we have a backstep, don't mess with the path
+			if strings.HasPrefix(rel, "../") {
+				if filepath.IsAbs(*ref) {
+					continue
+				}
+
+				return fmt.Errorf("%v requires backsteps and is not absolute", *ref)
+			}
+
+			*ref = rel
+		}
+	}
+	return nil
+}
+
+func MakeRelative(path, base string) (string, error) {
+	if len(path) > 0 {
+		rel, err := filepath.Rel(base, path)
+		if err != nil {
+			return path, err
+		}
+		return rel, nil
+	}
+	return path, nil
+}
+
+// deduplicate removes any duplicated values and returns a new slice, keeping the order unchanged
+func deduplicate(s []string) []string {
+	encountered := map[string]bool{}
+	ret := make([]string, 0)
+	for i := range s {
+		if encountered[s[i]] {
+			continue
+		}
+		encountered[s[i]] = true
+		ret = append(ret, s[i])
+	}
+	return ret
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/merge.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/merge.go
new file mode 100644
index 0000000000..3d74e60292
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/merge.go
@@ -0,0 +1,121 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+)
+
+// recursively merges src into dst:
+// - non-pointer struct fields with any exported fields are recursively merged
+// - non-pointer struct fields with only unexported fields prefer src if the field is non-zero
+// - maps are shallow merged with src keys taking priority over dst
+// - non-zero src fields encountered during recursion that are not maps or structs overwrite and recursion stops
+func merge[T any](dst, src *T) error {
+	if dst == nil {
+		return fmt.Errorf("cannot merge into nil pointer")
+	}
+	if src == nil {
+		return nil
+	}
+	return mergeValues(nil, reflect.ValueOf(dst).Elem(), reflect.ValueOf(src).Elem())
+}
+
+func mergeValues(fieldNames []string, dst, src reflect.Value) error {
+	dstType := dst.Type()
+	// no-op if we can't read the src
+	if !src.IsValid() {
+		return nil
+	}
+	// sanity check types match
+	if srcType := src.Type(); dstType != srcType {
+		return fmt.Errorf("cannot merge mismatched types (%s, %s) at %s", dstType, srcType, strings.Join(fieldNames, "."))
+	}
+
+	switch dstType.Kind() {
+	case reflect.Struct:
+		if hasExportedField(dstType) {
+			// recursively merge
+			for i, n := 0, dstType.NumField(); i < n; i++ {
+				if err := mergeValues(append(fieldNames, dstType.Field(i).Name), dst.Field(i), src.Field(i)); err != nil {
+					return err
+				}
+			}
+		} else if dst.CanSet() {
+			// If all fields are unexported, overwrite with src.
+			// Using src.IsZero() would make more sense but that's not what mergo did.
+			dst.Set(src)
+		}
+
+	case reflect.Map:
+		if dst.CanSet() && !src.IsZero() {
+			// initialize dst if needed
+			if dst.IsZero() {
+				dst.Set(reflect.MakeMap(dstType))
+			}
+			// shallow-merge overwriting dst keys with src keys
+			for _, mapKey := range src.MapKeys() {
+				dst.SetMapIndex(mapKey, src.MapIndex(mapKey))
+			}
+		}
+
+	case reflect.Slice:
+		if dst.CanSet() && src.Len() > 0 {
+			// overwrite dst with non-empty src slice
+			dst.Set(src)
+		}
+
+	case reflect.Pointer:
+		if dst.CanSet() && !src.IsZero() {
+			// overwrite dst with non-zero values for other types
+			if dstType.Elem().Kind() == reflect.Struct {
+				// use struct pointer as-is
+				dst.Set(src)
+			} else {
+				// shallow-copy non-struct pointer (interfaces, primitives, etc)
+				dst.Set(reflect.New(dstType.Elem()))
+				dst.Elem().Set(src.Elem())
+			}
+		}
+
+	default:
+		if dst.CanSet() && !src.IsZero() {
+			// overwrite dst with non-zero values for other types
+			dst.Set(src)
+		}
+	}
+
+	return nil
+}
+
+// hasExportedField returns true if the given type has any exported fields,
+// or if it has any anonymous/embedded struct fields with exported fields
+func hasExportedField(dstType reflect.Type) bool {
+	for i, n := 0, dstType.NumField(); i < n; i++ {
+		field := dstType.Field(i)
+		if field.Anonymous && field.Type.Kind() == reflect.Struct {
+			if hasExportedField(dstType.Field(i).Type) {
+				return true
+			}
+		} else if len(field.PkgPath) == 0 {
+			return true
+		}
+	}
+	return false
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go
new file mode 100644
index 0000000000..0fc2fd0a0c
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go
@@ -0,0 +1,172 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+	"io"
+	"sync"
+
+	"k8s.io/klog/v2"
+
+	restclient "k8s.io/client-go/rest"
+	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+)
+
+// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a client config loader.
+// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that
+// the most recent rules are used.  This is useful in cases where you bind flags to loading rule parameters before
+// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid
+// passing extraneous information down a call stack
+type DeferredLoadingClientConfig struct {
+	loader         ClientConfigLoader
+	overrides      *ConfigOverrides
+	fallbackReader io.Reader
+
+	clientConfig ClientConfig
+	loadingLock  sync.Mutex
+
+	// provided for testing
+	icc InClusterConfig
+}
+
+// InClusterConfig abstracts details of whether the client is running in a cluster for testing.
+type InClusterConfig interface {
+	ClientConfig
+	Possible() bool
+}
+
+// NewNonInteractiveDeferredLoadingClientConfig creates a ClientConfig using the passed context name
+func NewNonInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides) ClientConfig {
+	return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}}
+}
+
+// NewInteractiveDeferredLoadingClientConfig creates a ClientConfig using the passed context name and the fallback auth reader
+func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig {
+	return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}, fallbackReader: fallbackReader}
+}
+
+func (config *DeferredLoadingClientConfig) createClientConfig() (ClientConfig, error) {
+	config.loadingLock.Lock()
+	defer config.loadingLock.Unlock()
+
+	if config.clientConfig != nil {
+		return config.clientConfig, nil
+	}
+	mergedConfig, err := config.loader.Load()
+	if err != nil {
+		return nil, err
+	}
+
+	var currentContext string
+	if config.overrides != nil {
+		currentContext = config.overrides.CurrentContext
+	}
+	if config.fallbackReader != nil {
+		config.clientConfig = NewInteractiveClientConfig(*mergedConfig, currentContext, config.overrides, config.fallbackReader, config.loader)
+	} else {
+		config.clientConfig = NewNonInteractiveClientConfig(*mergedConfig, currentContext, config.overrides, config.loader)
+	}
+	return config.clientConfig, nil
+}
+
+func (config *DeferredLoadingClientConfig) RawConfig() (clientcmdapi.Config, error) {
+	mergedConfig, err := config.createClientConfig()
+	if err != nil {
+		return clientcmdapi.Config{}, err
+	}
+
+	return mergedConfig.RawConfig()
+}
+
+// ClientConfig implements ClientConfig
+func (config *DeferredLoadingClientConfig) ClientConfig() (*restclient.Config, error) {
+	mergedClientConfig, err := config.createClientConfig()
+	if err != nil {
+		return nil, err
+	}
+
+	// load the configuration and return on non-empty errors and if the
+	// content differs from the default config
+	mergedConfig, err := mergedClientConfig.ClientConfig()
+	switch {
+	case err != nil:
+		if !IsEmptyConfig(err) {
+			// return on any error except empty config
+			return nil, err
+		}
+	case mergedConfig != nil:
+		// the configuration is valid, but if this is equal to the defaults we should try
+		// in-cluster configuration
+		if !config.loader.IsDefaultConfig(mergedConfig) {
+			return mergedConfig, nil
+		}
+	}
+
+	// check for in-cluster configuration and use it
+	if config.icc.Possible() {
+		klog.V(4).Infof("Using in-cluster configuration")
+		return config.icc.ClientConfig()
+	}
+
+	// return the result of the merged client config
+	return mergedConfig, err
+}
+
+// Namespace implements KubeConfig
+func (config *DeferredLoadingClientConfig) Namespace() (string, bool, error) {
+	mergedKubeConfig, err := config.createClientConfig()
+	if err != nil {
+		return "", false, err
+	}
+
+	ns, overridden, err := mergedKubeConfig.Namespace()
+	// if we get an error and it is not empty config, or if the merged config defined an explicit namespace, or
+	// if in-cluster config is not possible, return immediately
+	if (err != nil && !IsEmptyConfig(err)) || overridden || !config.icc.Possible() {
+		// return on any error except empty config
+		return ns, overridden, err
+	}
+
+	if len(ns) > 0 {
+		// if we got a non-default namespace from the kubeconfig, use it
+		if ns != "default" {
+			return ns, false, nil
+		}
+
+		// if we got a default namespace, determine whether it was explicit or implicit
+		if raw, err := mergedKubeConfig.RawConfig(); err == nil {
+			// determine the current context
+			currentContext := raw.CurrentContext
+			if config.overrides != nil && len(config.overrides.CurrentContext) > 0 {
+				currentContext = config.overrides.CurrentContext
+			}
+			if context := raw.Contexts[currentContext]; context != nil && len(context.Namespace) > 0 {
+				return ns, false, nil
+			}
+		}
+	}
+
+	klog.V(4).Infof("Using in-cluster namespace")
+
+	// allow the namespace from the service account token directory to be used.
+	return config.icc.Namespace()
+}
+
+// ConfigAccess implements ClientConfig
+func (config *DeferredLoadingClientConfig) ConfigAccess() ConfigAccess {
+	return config.loader
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/overrides.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/overrides.go
new file mode 100644
index 0000000000..483e515320
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/overrides.go
@@ -0,0 +1,263 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+	"strconv"
+	"strings"
+
+	"github.com/spf13/pflag"
+
+	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+)
+
+// ConfigOverrides holds values that should override whatever information is pulled from the actual Config object.  You can't
+// simply use an actual Config object, because Configs hold maps, but overrides are restricted to "at most one"
+type ConfigOverrides struct {
+	AuthInfo clientcmdapi.AuthInfo
+	// ClusterDefaults are applied before the configured cluster info is loaded.
+	ClusterDefaults clientcmdapi.Cluster
+	ClusterInfo     clientcmdapi.Cluster
+	Context         clientcmdapi.Context
+	CurrentContext  string
+	Timeout         string
+}
+
+// ConfigOverrideFlags holds the flag names to be used for binding command line flags. Notice that this structure tightly
+// corresponds to ConfigOverrides
+type ConfigOverrideFlags struct {
+	AuthOverrideFlags    AuthOverrideFlags
+	ClusterOverrideFlags ClusterOverrideFlags
+	ContextOverrideFlags ContextOverrideFlags
+	CurrentContext       FlagInfo
+	Timeout              FlagInfo
+}
+
+// AuthOverrideFlags holds the flag names to be used for binding command line flags for AuthInfo objects
+type AuthOverrideFlags struct {
+	ClientCertificate FlagInfo
+	ClientKey         FlagInfo
+	Token             FlagInfo
+	Impersonate       FlagInfo
+	ImpersonateUID    FlagInfo
+	ImpersonateGroups FlagInfo
+	Username          FlagInfo
+	Password          FlagInfo
+}
+
+// ContextOverrideFlags holds the flag names to be used for binding command line flags for Cluster objects
+type ContextOverrideFlags struct {
+	ClusterName  FlagInfo
+	AuthInfoName FlagInfo
+	Namespace    FlagInfo
+}
+
+// ClusterOverride holds the flag names to be used for binding command line flags for Cluster objects
+type ClusterOverrideFlags struct {
+	APIServer             FlagInfo
+	APIVersion            FlagInfo
+	CertificateAuthority  FlagInfo
+	InsecureSkipTLSVerify FlagInfo
+	TLSServerName         FlagInfo
+	ProxyURL              FlagInfo
+	DisableCompression    FlagInfo
+}
+
+// FlagInfo contains information about how to register a flag.  This struct is useful if you want to provide a way for an extender to
+// get back a set of recommended flag names, descriptions, and defaults, but allow for customization by an extender.  This makes for
+// coherent extension, without full prescription
+type FlagInfo struct {
+	// LongName is the long string for a flag.  If this is empty, then the flag will not be bound
+	LongName string
+	// ShortName is the single character for a flag.  If this is empty, then there will be no short flag
+	ShortName string
+	// Default is the default value for the flag
+	Default string
+	// Description is the description for the flag
+	Description string
+}
+
+// AddSecretAnnotation add secret flag to Annotation.
+func (f FlagInfo) AddSecretAnnotation(flags *pflag.FlagSet) FlagInfo {
+	flags.SetAnnotation(f.LongName, "classified", []string{"true"})
+	return f
+}
+
+// BindStringFlag binds the flag based on the provided info.  If LongName == "", nothing is registered
+func (f FlagInfo) BindStringFlag(flags *pflag.FlagSet, target *string) FlagInfo {
+	// you can't register a flag without a long name
+	if len(f.LongName) > 0 {
+		flags.StringVarP(target, f.LongName, f.ShortName, f.Default, f.Description)
+	}
+	return f
+}
+
+// BindTransformingStringFlag binds the flag based on the provided info.  If LongName == "", nothing is registered
+func (f FlagInfo) BindTransformingStringFlag(flags *pflag.FlagSet, target *string, transformer func(string) (string, error)) FlagInfo {
+	// you can't register a flag without a long name
+	if len(f.LongName) > 0 {
+		flags.VarP(newTransformingStringValue(f.Default, target, transformer), f.LongName, f.ShortName, f.Description)
+	}
+	return f
+}
+
+// BindStringSliceFlag binds the flag based on the provided info.  If LongName == "", nothing is registered
+func (f FlagInfo) BindStringArrayFlag(flags *pflag.FlagSet, target *[]string) FlagInfo {
+	// you can't register a flag without a long name
+	if len(f.LongName) > 0 {
+		sliceVal := []string{}
+		if len(f.Default) > 0 {
+			sliceVal = []string{f.Default}
+		}
+		flags.StringArrayVarP(target, f.LongName, f.ShortName, sliceVal, f.Description)
+	}
+	return f
+}
+
+// BindBoolFlag binds the flag based on the provided info.  If LongName == "", nothing is registered
+func (f FlagInfo) BindBoolFlag(flags *pflag.FlagSet, target *bool) FlagInfo {
+	// you can't register a flag without a long name
+	if len(f.LongName) > 0 {
+		// try to parse Default as a bool.  If it fails, assume false
+		boolVal, err := strconv.ParseBool(f.Default)
+		if err != nil {
+			boolVal = false
+		}
+
+		flags.BoolVarP(target, f.LongName, f.ShortName, boolVal, f.Description)
+	}
+	return f
+}
+
+const (
+	FlagClusterName        = "cluster"
+	FlagAuthInfoName       = "user"
+	FlagContext            = "context"
+	FlagNamespace          = "namespace"
+	FlagAPIServer          = "server"
+	FlagTLSServerName      = "tls-server-name"
+	FlagInsecure           = "insecure-skip-tls-verify"
+	FlagCertFile           = "client-certificate"
+	FlagKeyFile            = "client-key"
+	FlagCAFile             = "certificate-authority"
+	FlagEmbedCerts         = "embed-certs"
+	FlagBearerToken        = "token"
+	FlagImpersonate        = "as"
+	FlagImpersonateUID     = "as-uid"
+	FlagImpersonateGroup   = "as-group"
+	FlagUsername           = "username"
+	FlagPassword           = "password"
+	FlagTimeout            = "request-timeout"
+	FlagProxyURL           = "proxy-url"
+	FlagDisableCompression = "disable-compression"
+)
+
+// RecommendedConfigOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing
+func RecommendedConfigOverrideFlags(prefix string) ConfigOverrideFlags {
+	return ConfigOverrideFlags{
+		AuthOverrideFlags:    RecommendedAuthOverrideFlags(prefix),
+		ClusterOverrideFlags: RecommendedClusterOverrideFlags(prefix),
+		ContextOverrideFlags: RecommendedContextOverrideFlags(prefix),
+
+		CurrentContext: FlagInfo{prefix + FlagContext, "", "", "The name of the kubeconfig context to use"},
+		Timeout:        FlagInfo{prefix + FlagTimeout, "", "0", "The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests."},
+	}
+}
+
+// RecommendedAuthOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing
+func RecommendedAuthOverrideFlags(prefix string) AuthOverrideFlags {
+	return AuthOverrideFlags{
+		ClientCertificate: FlagInfo{prefix + FlagCertFile, "", "", "Path to a client certificate file for TLS"},
+		ClientKey:         FlagInfo{prefix + FlagKeyFile, "", "", "Path to a client key file for TLS"},
+		Token:             FlagInfo{prefix + FlagBearerToken, "", "", "Bearer token for authentication to the API server"},
+		Impersonate:       FlagInfo{prefix + FlagImpersonate, "", "", "Username to impersonate for the operation"},
+		ImpersonateUID:    FlagInfo{prefix + FlagImpersonateUID, "", "", "UID to impersonate for the operation"},
+		ImpersonateGroups: FlagInfo{prefix + FlagImpersonateGroup, "", "", "Group to impersonate for the operation, this flag can be repeated to specify multiple groups."},
+		Username:          FlagInfo{prefix + FlagUsername, "", "", "Username for basic authentication to the API server"},
+		Password:          FlagInfo{prefix + FlagPassword, "", "", "Password for basic authentication to the API server"},
+	}
+}
+
+// RecommendedClusterOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing
+func RecommendedClusterOverrideFlags(prefix string) ClusterOverrideFlags {
+	return ClusterOverrideFlags{
+		APIServer:             FlagInfo{prefix + FlagAPIServer, "", "", "The address and port of the Kubernetes API server"},
+		CertificateAuthority:  FlagInfo{prefix + FlagCAFile, "", "", "Path to a cert file for the certificate authority"},
+		InsecureSkipTLSVerify: FlagInfo{prefix + FlagInsecure, "", "false", "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure"},
+		TLSServerName:         FlagInfo{prefix + FlagTLSServerName, "", "", "If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used."},
+		ProxyURL:              FlagInfo{prefix + FlagProxyURL, "", "", "If provided, this URL will be used to connect via proxy"},
+		DisableCompression:    FlagInfo{prefix + FlagDisableCompression, "", "", "If true, opt-out of response compression for all requests to the server"},
+	}
+}
+
+// RecommendedContextOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing
+func RecommendedContextOverrideFlags(prefix string) ContextOverrideFlags {
+	return ContextOverrideFlags{
+		ClusterName:  FlagInfo{prefix + FlagClusterName, "", "", "The name of the kubeconfig cluster to use"},
+		AuthInfoName: FlagInfo{prefix + FlagAuthInfoName, "", "", "The name of the kubeconfig user to use"},
+		Namespace:    FlagInfo{prefix + FlagNamespace, "n", "", "If present, the namespace scope for this CLI request"},
+	}
+}
+
+// BindOverrideFlags is a convenience method to bind the specified flags to their associated variables
+func BindOverrideFlags(overrides *ConfigOverrides, flags *pflag.FlagSet, flagNames ConfigOverrideFlags) {
+	BindAuthInfoFlags(&overrides.AuthInfo, flags, flagNames.AuthOverrideFlags)
+	BindClusterFlags(&overrides.ClusterInfo, flags, flagNames.ClusterOverrideFlags)
+	BindContextFlags(&overrides.Context, flags, flagNames.ContextOverrideFlags)
+	flagNames.CurrentContext.BindStringFlag(flags, &overrides.CurrentContext)
+	flagNames.Timeout.BindStringFlag(flags, &overrides.Timeout)
+}
+
+// BindAuthInfoFlags is a convenience method to bind the specified flags to their associated variables
+func BindAuthInfoFlags(authInfo *clientcmdapi.AuthInfo, flags *pflag.FlagSet, flagNames AuthOverrideFlags) {
+	flagNames.ClientCertificate.BindStringFlag(flags, &authInfo.ClientCertificate).AddSecretAnnotation(flags)
+	flagNames.ClientKey.BindStringFlag(flags, &authInfo.ClientKey).AddSecretAnnotation(flags)
+	flagNames.Token.BindStringFlag(flags, &authInfo.Token).AddSecretAnnotation(flags)
+	flagNames.Impersonate.BindStringFlag(flags, &authInfo.Impersonate).AddSecretAnnotation(flags)
+	flagNames.ImpersonateUID.BindStringFlag(flags, &authInfo.ImpersonateUID).AddSecretAnnotation(flags)
+	flagNames.ImpersonateGroups.BindStringArrayFlag(flags, &authInfo.ImpersonateGroups).AddSecretAnnotation(flags)
+	flagNames.Username.BindStringFlag(flags, &authInfo.Username).AddSecretAnnotation(flags)
+	flagNames.Password.BindStringFlag(flags, &authInfo.Password).AddSecretAnnotation(flags)
+}
+
+// BindClusterFlags is a convenience method to bind the specified flags to their associated variables
+func BindClusterFlags(clusterInfo *clientcmdapi.Cluster, flags *pflag.FlagSet, flagNames ClusterOverrideFlags) {
+	flagNames.APIServer.BindStringFlag(flags, &clusterInfo.Server)
+	flagNames.CertificateAuthority.BindStringFlag(flags, &clusterInfo.CertificateAuthority)
+	flagNames.InsecureSkipTLSVerify.BindBoolFlag(flags, &clusterInfo.InsecureSkipTLSVerify)
+	flagNames.TLSServerName.BindStringFlag(flags, &clusterInfo.TLSServerName)
+	flagNames.ProxyURL.BindStringFlag(flags, &clusterInfo.ProxyURL)
+	flagNames.DisableCompression.BindBoolFlag(flags, &clusterInfo.DisableCompression)
+}
+
+// BindFlags is a convenience method to bind the specified flags to their associated variables
+func BindContextFlags(contextInfo *clientcmdapi.Context, flags *pflag.FlagSet, flagNames ContextOverrideFlags) {
+	flagNames.ClusterName.BindStringFlag(flags, &contextInfo.Cluster)
+	flagNames.AuthInfoName.BindStringFlag(flags, &contextInfo.AuthInfo)
+	flagNames.Namespace.BindTransformingStringFlag(flags, &contextInfo.Namespace, RemoveNamespacesPrefix)
+}
+
+// RemoveNamespacesPrefix is a transformer that strips "ns/", "namespace/" and "namespaces/" prefixes case-insensitively
+func RemoveNamespacesPrefix(value string) (string, error) {
+	for _, prefix := range []string{"namespaces/", "namespace/", "ns/"} {
+		if len(value) > len(prefix) && strings.EqualFold(value[0:len(prefix)], prefix) {
+			value = value[len(prefix):]
+			break
+		}
+	}
+	return value, nil
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/validation.go b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/validation.go
new file mode 100644
index 0000000000..088972ef65
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/clientcmd/validation.go
@@ -0,0 +1,371 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package clientcmd
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"reflect"
+	"strings"
+
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+	"k8s.io/apimachinery/pkg/util/validation"
+	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+)
+
+var (
+	ErrNoContext   = errors.New("no context chosen")
+	ErrEmptyConfig = NewEmptyConfigError("no configuration has been provided, try setting KUBERNETES_MASTER environment variable")
+	// message is for consistency with old behavior
+	ErrEmptyCluster = errors.New("cluster has no server defined")
+)
+
+// NewEmptyConfigError returns an error wrapping the given message which IsEmptyConfig() will recognize as an empty config error
+func NewEmptyConfigError(message string) error {
+	return &errEmptyConfig{message}
+}
+
+type errEmptyConfig struct {
+	message string
+}
+
+func (e *errEmptyConfig) Error() string {
+	return e.message
+}
+
+type errContextNotFound struct {
+	ContextName string
+}
+
+func (e *errContextNotFound) Error() string {
+	return fmt.Sprintf("context was not found for specified context: %v", e.ContextName)
+}
+
+// IsContextNotFound returns a boolean indicating whether the error is known to
+// report that a context was not found
+func IsContextNotFound(err error) bool {
+	if err == nil {
+		return false
+	}
+	if _, ok := err.(*errContextNotFound); ok || err == ErrNoContext {
+		return true
+	}
+	return strings.Contains(err.Error(), "context was not found for specified context")
+}
+
+// IsEmptyConfig returns true if the provided error indicates the provided configuration
+// is empty.
+func IsEmptyConfig(err error) bool {
+	switch t := err.(type) {
+	case errConfigurationInvalid:
+		if len(t) != 1 {
+			return false
+		}
+		_, ok := t[0].(*errEmptyConfig)
+		return ok
+	}
+	_, ok := err.(*errEmptyConfig)
+	return ok
+}
+
+// errConfigurationInvalid is a set of errors indicating the configuration is invalid.
+type errConfigurationInvalid []error
+
+// errConfigurationInvalid implements error and Aggregate
+var _ error = errConfigurationInvalid{}
+var _ utilerrors.Aggregate = errConfigurationInvalid{}
+
+func newErrConfigurationInvalid(errs []error) error {
+	switch len(errs) {
+	case 0:
+		return nil
+	default:
+		return errConfigurationInvalid(errs)
+	}
+}
+
+// Error implements the error interface
+func (e errConfigurationInvalid) Error() string {
+	return fmt.Sprintf("invalid configuration: %v", utilerrors.NewAggregate(e).Error())
+}
+
+// Errors implements the utilerrors.Aggregate interface
+func (e errConfigurationInvalid) Errors() []error {
+	return e
+}
+
+// Is implements the utilerrors.Aggregate interface
+func (e errConfigurationInvalid) Is(target error) bool {
+	return e.visit(func(err error) bool {
+		return errors.Is(err, target)
+	})
+}
+
+func (e errConfigurationInvalid) visit(f func(err error) bool) bool {
+	for _, err := range e {
+		switch err := err.(type) {
+		case errConfigurationInvalid:
+			if match := err.visit(f); match {
+				return match
+			}
+		case utilerrors.Aggregate:
+			for _, nestedErr := range err.Errors() {
+				if match := f(nestedErr); match {
+					return match
+				}
+			}
+		default:
+			if match := f(err); match {
+				return match
+			}
+		}
+	}
+
+	return false
+}
+
+// IsConfigurationInvalid returns true if the provided error indicates the configuration is invalid.
+func IsConfigurationInvalid(err error) bool {
+	switch err.(type) {
+	case *errContextNotFound, errConfigurationInvalid:
+		return true
+	}
+	return IsContextNotFound(err)
+}
+
+// Validate checks for errors in the Config.  It does not return early so that it can find as many errors as possible.
+func Validate(config clientcmdapi.Config) error {
+	validationErrors := make([]error, 0)
+
+	if clientcmdapi.IsConfigEmpty(&config) {
+		return newErrConfigurationInvalid([]error{ErrEmptyConfig})
+	}
+
+	if len(config.CurrentContext) != 0 {
+		if _, exists := config.Contexts[config.CurrentContext]; !exists {
+			validationErrors = append(validationErrors, &errContextNotFound{config.CurrentContext})
+		}
+	}
+
+	for contextName, context := range config.Contexts {
+		validationErrors = append(validationErrors, validateContext(contextName, *context, config)...)
+	}
+
+	for authInfoName, authInfo := range config.AuthInfos {
+		validationErrors = append(validationErrors, validateAuthInfo(authInfoName, *authInfo)...)
+	}
+
+	for clusterName, clusterInfo := range config.Clusters {
+		validationErrors = append(validationErrors, validateClusterInfo(clusterName, *clusterInfo)...)
+	}
+
+	return newErrConfigurationInvalid(validationErrors)
+}
+
+// ConfirmUsable looks a particular context and determines if that particular part of the config is useable.  There might still be errors in the config,
+// but no errors in the sections requested or referenced.  It does not return early so that it can find as many errors as possible.
+func ConfirmUsable(config clientcmdapi.Config, passedContextName string) error {
+	validationErrors := make([]error, 0)
+
+	if clientcmdapi.IsConfigEmpty(&config) {
+		return newErrConfigurationInvalid([]error{ErrEmptyConfig})
+	}
+
+	var contextName string
+	if len(passedContextName) != 0 {
+		contextName = passedContextName
+	} else {
+		contextName = config.CurrentContext
+	}
+
+	if len(contextName) == 0 {
+		return ErrNoContext
+	}
+
+	context, exists := config.Contexts[contextName]
+	if !exists {
+		validationErrors = append(validationErrors, &errContextNotFound{contextName})
+	}
+
+	if exists {
+		validationErrors = append(validationErrors, validateContext(contextName, *context, config)...)
+
+		// Default to empty users and clusters and let the validation function report an error.
+		authInfo := config.AuthInfos[context.AuthInfo]
+		if authInfo == nil {
+			authInfo = &clientcmdapi.AuthInfo{}
+		}
+		validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *authInfo)...)
+
+		cluster := config.Clusters[context.Cluster]
+		if cluster == nil {
+			cluster = &clientcmdapi.Cluster{}
+		}
+		validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *cluster)...)
+	}
+
+	return newErrConfigurationInvalid(validationErrors)
+}
+
+// validateClusterInfo looks for conflicts and errors in the cluster info
+func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) []error {
+	validationErrors := make([]error, 0)
+
+	emptyCluster := clientcmdapi.NewCluster()
+	if reflect.DeepEqual(*emptyCluster, clusterInfo) {
+		return []error{ErrEmptyCluster}
+	}
+
+	if len(clusterInfo.Server) == 0 {
+		if len(clusterName) == 0 {
+			validationErrors = append(validationErrors, fmt.Errorf("default cluster has no server defined"))
+		} else {
+			validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName))
+		}
+	}
+	if proxyURL := clusterInfo.ProxyURL; proxyURL != "" {
+		if _, err := parseProxyURL(proxyURL); err != nil {
+			validationErrors = append(validationErrors, fmt.Errorf("invalid 'proxy-url' %q for cluster %q: %w", proxyURL, clusterName, err))
+		}
+	}
+	// Make sure CA data and CA file aren't both specified
+	if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 {
+		validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override.", clusterName))
+	}
+	if len(clusterInfo.CertificateAuthority) != 0 {
+		clientCertCA, err := os.Open(clusterInfo.CertificateAuthority)
+		if err != nil {
+			validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %w", clusterInfo.CertificateAuthority, clusterName, err))
+		} else {
+			defer clientCertCA.Close()
+		}
+	}
+
+	return validationErrors
+}
+
+// validateAuthInfo looks for conflicts and errors in the auth info
+func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []error {
+	validationErrors := make([]error, 0)
+
+	usingAuthPath := false
+	methods := make([]string, 0, 3)
+	if len(authInfo.Token) != 0 {
+		methods = append(methods, "token")
+	}
+	if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 {
+		methods = append(methods, "basicAuth")
+	}
+
+	if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 {
+		// Make sure cert data and file aren't both specified
+		if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 {
+			validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override.", authInfoName))
+		}
+		// Make sure key data and file aren't both specified
+		if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 {
+			validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName))
+		}
+		// Make sure a key is specified
+		if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 {
+			validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method.", authInfoName))
+		}
+
+		if len(authInfo.ClientCertificate) != 0 {
+			clientCertFile, err := os.Open(authInfo.ClientCertificate)
+			if err != nil {
+				validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %w", authInfo.ClientCertificate, authInfoName, err))
+			} else {
+				defer clientCertFile.Close()
+			}
+		}
+		if len(authInfo.ClientKey) != 0 {
+			clientKeyFile, err := os.Open(authInfo.ClientKey)
+			if err != nil {
+				validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %w", authInfo.ClientKey, authInfoName, err))
+			} else {
+				defer clientKeyFile.Close()
+			}
+		}
+	}
+
+	if authInfo.Exec != nil {
+		if authInfo.AuthProvider != nil {
+			validationErrors = append(validationErrors, fmt.Errorf("authProvider cannot be provided in combination with an exec plugin for %s", authInfoName))
+		}
+		if len(authInfo.Exec.Command) == 0 {
+			validationErrors = append(validationErrors, fmt.Errorf("command must be specified for %v to use exec authentication plugin", authInfoName))
+		}
+		if len(authInfo.Exec.APIVersion) == 0 {
+			validationErrors = append(validationErrors, fmt.Errorf("apiVersion must be specified for %v to use exec authentication plugin", authInfoName))
+		}
+		for _, v := range authInfo.Exec.Env {
+			if len(v.Name) == 0 {
+				validationErrors = append(validationErrors, fmt.Errorf("env variable name must be specified for %v to use exec authentication plugin", authInfoName))
+			}
+		}
+		switch authInfo.Exec.InteractiveMode {
+		case "":
+			validationErrors = append(validationErrors, fmt.Errorf("interactiveMode must be specified for %v to use exec authentication plugin", authInfoName))
+		case clientcmdapi.NeverExecInteractiveMode, clientcmdapi.IfAvailableExecInteractiveMode, clientcmdapi.AlwaysExecInteractiveMode:
+			// These are valid
+		default:
+			validationErrors = append(validationErrors, fmt.Errorf("invalid interactiveMode for %v: %q", authInfoName, authInfo.Exec.InteractiveMode))
+		}
+	}
+
+	// authPath also provides information for the client to identify the server, so allow multiple auth methods in that case
+	if (len(methods) > 1) && (!usingAuthPath) {
+		validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods))
+	}
+
+	// ImpersonateUID, ImpersonateGroups or ImpersonateUserExtra should be requested with a user
+	if (len(authInfo.ImpersonateUID) > 0 || len(authInfo.ImpersonateGroups) > 0 || len(authInfo.ImpersonateUserExtra) > 0) && (len(authInfo.Impersonate) == 0) {
+		validationErrors = append(validationErrors, fmt.Errorf("requesting uid, groups or user-extra for %v without impersonating a user", authInfoName))
+	}
+	return validationErrors
+}
+
+// validateContext looks for errors in the context.  It is not transitive, so errors in the reference authInfo or cluster configs are not included in this return
+func validateContext(contextName string, context clientcmdapi.Context, config clientcmdapi.Config) []error {
+	validationErrors := make([]error, 0)
+
+	if len(contextName) == 0 {
+		validationErrors = append(validationErrors, fmt.Errorf("empty context name for %#v is not allowed", context))
+	}
+
+	if len(context.AuthInfo) == 0 {
+		validationErrors = append(validationErrors, fmt.Errorf("user was not specified for context %q", contextName))
+	} else if _, exists := config.AuthInfos[context.AuthInfo]; !exists {
+		validationErrors = append(validationErrors, fmt.Errorf("user %q was not found for context %q", context.AuthInfo, contextName))
+	}
+
+	if len(context.Cluster) == 0 {
+		validationErrors = append(validationErrors, fmt.Errorf("cluster was not specified for context %q", contextName))
+	} else if _, exists := config.Clusters[context.Cluster]; !exists {
+		validationErrors = append(validationErrors, fmt.Errorf("cluster %q was not found for context %q", context.Cluster, contextName))
+	}
+
+	if len(context.Namespace) != 0 {
+		if len(validation.IsDNS1123Label(context.Namespace)) != 0 {
+			validationErrors = append(validationErrors, fmt.Errorf("namespace %q for context %q does not conform to the kubernetes DNS_LABEL rules", context.Namespace, contextName))
+		}
+	}
+
+	return validationErrors
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/metrics/OWNERS b/hack/tools/vendor/k8s.io/client-go/tools/metrics/OWNERS
new file mode 100644
index 0000000000..2c9488a5fb
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/metrics/OWNERS
@@ -0,0 +1,5 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+  - wojtek-t
+  - jayunit100
diff --git a/hack/tools/vendor/k8s.io/client-go/tools/metrics/metrics.go b/hack/tools/vendor/k8s.io/client-go/tools/metrics/metrics.go
new file mode 100644
index 0000000000..99d3d8e239
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/tools/metrics/metrics.go
@@ -0,0 +1,211 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package metrics provides abstractions for registering which metrics
+// to record.
+package metrics
+
+import (
+	"context"
+	"net/url"
+	"sync"
+	"time"
+)
+
+var registerMetrics sync.Once
+
+// DurationMetric is a measurement of some amount of time.
+type DurationMetric interface {
+	Observe(duration time.Duration)
+}
+
+// ExpiryMetric sets some time of expiry. If nil, assume not relevant.
+type ExpiryMetric interface {
+	Set(expiry *time.Time)
+}
+
+// LatencyMetric observes client latency partitioned by verb and url.
+type LatencyMetric interface {
+	Observe(ctx context.Context, verb string, u url.URL, latency time.Duration)
+}
+
+type ResolverLatencyMetric interface {
+	Observe(ctx context.Context, host string, latency time.Duration)
+}
+
+// SizeMetric observes client response size partitioned by verb and host.
+type SizeMetric interface {
+	Observe(ctx context.Context, verb string, host string, size float64)
+}
+
+// ResultMetric counts response codes partitioned by method and host.
+type ResultMetric interface {
+	Increment(ctx context.Context, code string, method string, host string)
+}
+
+// CallsMetric counts calls that take place for a specific exec plugin.
+type CallsMetric interface {
+	// Increment increments a counter per exitCode and callStatus.
+	Increment(exitCode int, callStatus string)
+}
+
+// RetryMetric counts the number of retries sent to the server
+// partitioned by code, method, and host.
+type RetryMetric interface {
+	IncrementRetry(ctx context.Context, code string, method string, host string)
+}
+
+// TransportCacheMetric shows the number of entries in the internal transport cache
+type TransportCacheMetric interface {
+	Observe(value int)
+}
+
+// TransportCreateCallsMetric counts the number of times a transport is created
+// partitioned by the result of the cache: hit, miss, uncacheable
+type TransportCreateCallsMetric interface {
+	Increment(result string)
+}
+
+var (
+	// ClientCertExpiry is the expiry time of a client certificate
+	ClientCertExpiry ExpiryMetric = noopExpiry{}
+	// ClientCertRotationAge is the age of a certificate that has just been rotated.
+	ClientCertRotationAge DurationMetric = noopDuration{}
+	// RequestLatency is the latency metric that rest clients will update.
+	RequestLatency LatencyMetric = noopLatency{}
+	// ResolverLatency is the latency metric that DNS resolver will update
+	ResolverLatency ResolverLatencyMetric = noopResolverLatency{}
+	// RequestSize is the request size metric that rest clients will update.
+	RequestSize SizeMetric = noopSize{}
+	// ResponseSize is the response size metric that rest clients will update.
+	ResponseSize SizeMetric = noopSize{}
+	// RateLimiterLatency is the client side rate limiter latency metric.
+	RateLimiterLatency LatencyMetric = noopLatency{}
+	// RequestResult is the result metric that rest clients will update.
+	RequestResult ResultMetric = noopResult{}
+	// ExecPluginCalls is the number of calls made to an exec plugin, partitioned by
+	// exit code and call status.
+	ExecPluginCalls CallsMetric = noopCalls{}
+	// RequestRetry is the retry metric that tracks the number of
+	// retries sent to the server.
+	RequestRetry RetryMetric = noopRetry{}
+	// TransportCacheEntries is the metric that tracks the number of entries in the
+	// internal transport cache.
+	TransportCacheEntries TransportCacheMetric = noopTransportCache{}
+	// TransportCreateCalls is the metric that counts the number of times a new transport
+	// is created
+	TransportCreateCalls TransportCreateCallsMetric = noopTransportCreateCalls{}
+)
+
+// RegisterOpts contains all the metrics to register. Metrics may be nil.
+type RegisterOpts struct {
+	ClientCertExpiry      ExpiryMetric
+	ClientCertRotationAge DurationMetric
+	RequestLatency        LatencyMetric
+	ResolverLatency       ResolverLatencyMetric
+	RequestSize           SizeMetric
+	ResponseSize          SizeMetric
+	RateLimiterLatency    LatencyMetric
+	RequestResult         ResultMetric
+	ExecPluginCalls       CallsMetric
+	RequestRetry          RetryMetric
+	TransportCacheEntries TransportCacheMetric
+	TransportCreateCalls  TransportCreateCallsMetric
+}
+
+// Register registers metrics for the rest client to use. This can
+// only be called once.
+func Register(opts RegisterOpts) {
+	registerMetrics.Do(func() {
+		if opts.ClientCertExpiry != nil {
+			ClientCertExpiry = opts.ClientCertExpiry
+		}
+		if opts.ClientCertRotationAge != nil {
+			ClientCertRotationAge = opts.ClientCertRotationAge
+		}
+		if opts.RequestLatency != nil {
+			RequestLatency = opts.RequestLatency
+		}
+		if opts.ResolverLatency != nil {
+			ResolverLatency = opts.ResolverLatency
+		}
+		if opts.RequestSize != nil {
+			RequestSize = opts.RequestSize
+		}
+		if opts.ResponseSize != nil {
+			ResponseSize = opts.ResponseSize
+		}
+		if opts.RateLimiterLatency != nil {
+			RateLimiterLatency = opts.RateLimiterLatency
+		}
+		if opts.RequestResult != nil {
+			RequestResult = opts.RequestResult
+		}
+		if opts.ExecPluginCalls != nil {
+			ExecPluginCalls = opts.ExecPluginCalls
+		}
+		if opts.RequestRetry != nil {
+			RequestRetry = opts.RequestRetry
+		}
+		if opts.TransportCacheEntries != nil {
+			TransportCacheEntries = opts.TransportCacheEntries
+		}
+		if opts.TransportCreateCalls != nil {
+			TransportCreateCalls = opts.TransportCreateCalls
+		}
+	})
+}
+
+type noopDuration struct{}
+
+func (noopDuration) Observe(time.Duration) {}
+
+type noopExpiry struct{}
+
+func (noopExpiry) Set(*time.Time) {}
+
+type noopLatency struct{}
+
+func (noopLatency) Observe(context.Context, string, url.URL, time.Duration) {}
+
+type noopResolverLatency struct{}
+
+func (n noopResolverLatency) Observe(ctx context.Context, host string, latency time.Duration) {
+}
+
+type noopSize struct{}
+
+func (noopSize) Observe(context.Context, string, string, float64) {}
+
+type noopResult struct{}
+
+func (noopResult) Increment(context.Context, string, string, string) {}
+
+type noopCalls struct{}
+
+func (noopCalls) Increment(int, string) {}
+
+type noopRetry struct{}
+
+func (noopRetry) IncrementRetry(context.Context, string, string, string) {}
+
+type noopTransportCache struct{}
+
+func (noopTransportCache) Observe(int) {}
+
+type noopTransportCreateCalls struct{}
+
+func (noopTransportCreateCalls) Increment(string) {}
diff --git a/hack/tools/vendor/k8s.io/client-go/transport/OWNERS b/hack/tools/vendor/k8s.io/client-go/transport/OWNERS
new file mode 100644
index 0000000000..34adee5ec5
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/transport/OWNERS
@@ -0,0 +1,8 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+reviewers:
+  - smarterclayton
+  - wojtek-t
+  - deads2k
+  - liggitt
+  - caesarxuchao
diff --git a/hack/tools/vendor/k8s.io/client-go/transport/cache.go b/hack/tools/vendor/k8s.io/client-go/transport/cache.go
new file mode 100644
index 0000000000..7c7f1b330f
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/transport/cache.go
@@ -0,0 +1,178 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transport
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"net/http"
+	"strings"
+	"sync"
+	"time"
+
+	utilnet "k8s.io/apimachinery/pkg/util/net"
+	"k8s.io/apimachinery/pkg/util/wait"
+	"k8s.io/client-go/tools/metrics"
+)
+
+// TlsTransportCache caches TLS http.RoundTrippers different configurations. The
+// same RoundTripper will be returned for configs with identical TLS options If
+// the config has no custom TLS options, http.DefaultTransport is returned.
+type tlsTransportCache struct {
+	mu         sync.Mutex
+	transports map[tlsCacheKey]*http.Transport
+}
+
+// DialerStopCh is stop channel that is passed down to dynamic cert dialer.
+// It's exposed as variable for testing purposes to avoid testing for goroutine
+// leakages.
+var DialerStopCh = wait.NeverStop
+
+const idleConnsPerHost = 25
+
+var tlsCache = &tlsTransportCache{transports: make(map[tlsCacheKey]*http.Transport)}
+
+type tlsCacheKey struct {
+	insecure           bool
+	caData             string
+	certData           string
+	keyData            string `datapolicy:"security-key"`
+	certFile           string
+	keyFile            string
+	serverName         string
+	nextProtos         string
+	disableCompression bool
+	// these functions are wrapped to allow them to be used as map keys
+	getCert *GetCertHolder
+	dial    *DialHolder
+}
+
+func (t tlsCacheKey) String() string {
+	keyText := ""
+	if len(t.keyData) > 0 {
+		keyText = ""
+	}
+	return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, serverName:%s, disableCompression:%t, getCert:%p, dial:%p",
+		t.insecure, t.caData, t.certData, keyText, t.serverName, t.disableCompression, t.getCert, t.dial)
+}
+
+func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) {
+	key, canCache, err := tlsConfigKey(config)
+	if err != nil {
+		return nil, err
+	}
+
+	if canCache {
+		// Ensure we only create a single transport for the given TLS options
+		c.mu.Lock()
+		defer c.mu.Unlock()
+		defer metrics.TransportCacheEntries.Observe(len(c.transports))
+
+		// See if we already have a custom transport for this config
+		if t, ok := c.transports[key]; ok {
+			metrics.TransportCreateCalls.Increment("hit")
+			return t, nil
+		}
+		metrics.TransportCreateCalls.Increment("miss")
+	} else {
+		metrics.TransportCreateCalls.Increment("uncacheable")
+	}
+
+	// Get the TLS options for this client config
+	tlsConfig, err := TLSConfigFor(config)
+	if err != nil {
+		return nil, err
+	}
+	// The options didn't require a custom TLS config
+	if tlsConfig == nil && config.DialHolder == nil && config.Proxy == nil {
+		return http.DefaultTransport, nil
+	}
+
+	var dial func(ctx context.Context, network, address string) (net.Conn, error)
+	if config.DialHolder != nil {
+		dial = config.DialHolder.Dial
+	} else {
+		dial = (&net.Dialer{
+			Timeout:   30 * time.Second,
+			KeepAlive: 30 * time.Second,
+		}).DialContext
+	}
+
+	// If we use are reloading files, we need to handle certificate rotation properly
+	// TODO(jackkleeman): We can also add rotation here when config.HasCertCallback() is true
+	if config.TLS.ReloadTLSFiles && tlsConfig != nil && tlsConfig.GetClientCertificate != nil {
+		dynamicCertDialer := certRotatingDialer(tlsConfig.GetClientCertificate, dial)
+		tlsConfig.GetClientCertificate = dynamicCertDialer.GetClientCertificate
+		dial = dynamicCertDialer.connDialer.DialContext
+		go dynamicCertDialer.Run(DialerStopCh)
+	}
+
+	proxy := http.ProxyFromEnvironment
+	if config.Proxy != nil {
+		proxy = config.Proxy
+	}
+
+	transport := utilnet.SetTransportDefaults(&http.Transport{
+		Proxy:               proxy,
+		TLSHandshakeTimeout: 10 * time.Second,
+		TLSClientConfig:     tlsConfig,
+		MaxIdleConnsPerHost: idleConnsPerHost,
+		DialContext:         dial,
+		DisableCompression:  config.DisableCompression,
+	})
+
+	if canCache {
+		// Cache a single transport for these options
+		c.transports[key] = transport
+	}
+
+	return transport, nil
+}
+
+// tlsConfigKey returns a unique key for tls.Config objects returned from TLSConfigFor
+func tlsConfigKey(c *Config) (tlsCacheKey, bool, error) {
+	// Make sure ca/key/cert content is loaded
+	if err := loadTLSFiles(c); err != nil {
+		return tlsCacheKey{}, false, err
+	}
+
+	if c.Proxy != nil {
+		// cannot determine equality for functions
+		return tlsCacheKey{}, false, nil
+	}
+
+	k := tlsCacheKey{
+		insecure:           c.TLS.Insecure,
+		caData:             string(c.TLS.CAData),
+		serverName:         c.TLS.ServerName,
+		nextProtos:         strings.Join(c.TLS.NextProtos, ","),
+		disableCompression: c.DisableCompression,
+		getCert:            c.TLS.GetCertHolder,
+		dial:               c.DialHolder,
+	}
+
+	if c.TLS.ReloadTLSFiles {
+		k.certFile = c.TLS.CertFile
+		k.keyFile = c.TLS.KeyFile
+	} else {
+		k.certData = string(c.TLS.CertData)
+		k.keyData = string(c.TLS.KeyData)
+	}
+
+	return k, true, nil
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/transport/cache_go118.go b/hack/tools/vendor/k8s.io/client-go/transport/cache_go118.go
new file mode 100644
index 0000000000..babdaf8b5a
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/transport/cache_go118.go
@@ -0,0 +1,46 @@
+//go:build go1.18
+
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transport
+
+// this is just to make the "unused" linter rule happy
+var _ = isCacheKeyComparable[tlsCacheKey]
+
+// assert at compile time that tlsCacheKey is comparable in a way that will never panic at runtime.
+//
+// Golang 1.20 introduced an exception to type constraints that allows comparable, but not
+// necessarily strictly comparable type arguments to satisfy the `comparable` type constraint,
+// thus allowing interfaces to fulfil the `comparable` constraint.
+// However, by definition, "A comparison of two interface values with identical
+// dynamic types causes a run-time panic if that type is not comparable".
+//
+// We want to make sure that comparing two `tlsCacheKey` elements won't cause a
+// runtime panic. In order to do that, we'll force the `tlsCacheKey` to be strictly
+// comparable, thus making it impossible for it to contain interfaces.
+// To assert strict comparability, we'll use another definition: "Type
+// parameters are comparable if they are strictly comparable".
+// Below, we first construct a type parameter from the `tlsCacheKey` type so that
+// we can then push this type parameter to a comparable check, thus checking these
+// are strictly comparable.
+//
+// Original suggestion from https://github.com/golang/go/issues/56548#issuecomment-1317673963
+func isCacheKeyComparable[K tlsCacheKey]() {
+	_ = isComparable[K]
+}
+
+func isComparable[T comparable]() {}
diff --git a/hack/tools/vendor/k8s.io/client-go/transport/cert_rotation.go b/hack/tools/vendor/k8s.io/client-go/transport/cert_rotation.go
new file mode 100644
index 0000000000..e76f65812d
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/transport/cert_rotation.go
@@ -0,0 +1,179 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transport
+
+import (
+	"bytes"
+	"crypto/tls"
+	"fmt"
+	"reflect"
+	"sync"
+	"time"
+
+	utilnet "k8s.io/apimachinery/pkg/util/net"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/apimachinery/pkg/util/wait"
+	"k8s.io/client-go/util/connrotation"
+	"k8s.io/client-go/util/workqueue"
+	"k8s.io/klog/v2"
+)
+
+const workItemKey = "key"
+
+// CertCallbackRefreshDuration is exposed so that integration tests can crank up the reload speed.
+var CertCallbackRefreshDuration = 5 * time.Minute
+
+type reloadFunc func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
+
+type dynamicClientCert struct {
+	clientCert *tls.Certificate
+	certMtx    sync.RWMutex
+
+	reload     reloadFunc
+	connDialer *connrotation.Dialer
+
+	// queue only ever has one item, but it has nice error handling backoff/retry semantics
+	queue workqueue.TypedRateLimitingInterface[string]
+}
+
+func certRotatingDialer(reload reloadFunc, dial utilnet.DialFunc) *dynamicClientCert {
+	d := &dynamicClientCert{
+		reload:     reload,
+		connDialer: connrotation.NewDialer(connrotation.DialFunc(dial)),
+		queue: workqueue.NewTypedRateLimitingQueueWithConfig(
+			workqueue.DefaultTypedControllerRateLimiter[string](),
+			workqueue.TypedRateLimitingQueueConfig[string]{Name: "DynamicClientCertificate"},
+		),
+	}
+
+	return d
+}
+
+// loadClientCert calls the callback and rotates connections if needed
+func (c *dynamicClientCert) loadClientCert() (*tls.Certificate, error) {
+	cert, err := c.reload(nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// check to see if we have a change. If the values are the same, do nothing.
+	c.certMtx.RLock()
+	haveCert := c.clientCert != nil
+	if certsEqual(c.clientCert, cert) {
+		c.certMtx.RUnlock()
+		return c.clientCert, nil
+	}
+	c.certMtx.RUnlock()
+
+	c.certMtx.Lock()
+	c.clientCert = cert
+	c.certMtx.Unlock()
+
+	// The first certificate requested is not a rotation that is worth closing connections for
+	if !haveCert {
+		return cert, nil
+	}
+
+	klog.V(1).Infof("certificate rotation detected, shutting down client connections to start using new credentials")
+	c.connDialer.CloseAll()
+
+	return cert, nil
+}
+
+// certsEqual compares tls Certificates, ignoring the Leaf which may get filled in dynamically
+func certsEqual(left, right *tls.Certificate) bool {
+	if left == nil || right == nil {
+		return left == right
+	}
+
+	if !byteMatrixEqual(left.Certificate, right.Certificate) {
+		return false
+	}
+
+	if !reflect.DeepEqual(left.PrivateKey, right.PrivateKey) {
+		return false
+	}
+
+	if !byteMatrixEqual(left.SignedCertificateTimestamps, right.SignedCertificateTimestamps) {
+		return false
+	}
+
+	if !bytes.Equal(left.OCSPStaple, right.OCSPStaple) {
+		return false
+	}
+
+	return true
+}
+
+func byteMatrixEqual(left, right [][]byte) bool {
+	if len(left) != len(right) {
+		return false
+	}
+
+	for i := range left {
+		if !bytes.Equal(left[i], right[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+// run starts the controller and blocks until stopCh is closed.
+func (c *dynamicClientCert) Run(stopCh <-chan struct{}) {
+	defer utilruntime.HandleCrash()
+	defer c.queue.ShutDown()
+
+	klog.V(3).Infof("Starting client certificate rotation controller")
+	defer klog.V(3).Infof("Shutting down client certificate rotation controller")
+
+	go wait.Until(c.runWorker, time.Second, stopCh)
+
+	go wait.PollImmediateUntil(CertCallbackRefreshDuration, func() (bool, error) {
+		c.queue.Add(workItemKey)
+		return false, nil
+	}, stopCh)
+
+	<-stopCh
+}
+
+func (c *dynamicClientCert) runWorker() {
+	for c.processNextWorkItem() {
+	}
+}
+
+func (c *dynamicClientCert) processNextWorkItem() bool {
+	dsKey, quit := c.queue.Get()
+	if quit {
+		return false
+	}
+	defer c.queue.Done(dsKey)
+
+	_, err := c.loadClientCert()
+	if err == nil {
+		c.queue.Forget(dsKey)
+		return true
+	}
+
+	utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err))
+	c.queue.AddRateLimited(dsKey)
+
+	return true
+}
+
+func (c *dynamicClientCert) GetClientCertificate(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
+	return c.loadClientCert()
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/transport/config.go b/hack/tools/vendor/k8s.io/client-go/transport/config.go
new file mode 100644
index 0000000000..d8a3d64b30
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/transport/config.go
@@ -0,0 +1,160 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transport
+
+import (
+	"context"
+	"crypto/tls"
+	"net"
+	"net/http"
+	"net/url"
+)
+
+// Config holds various options for establishing a transport.
+type Config struct {
+	// UserAgent is an optional field that specifies the caller of this
+	// request.
+	UserAgent string
+
+	// The base TLS configuration for this transport.
+	TLS TLSConfig
+
+	// Username and password for basic authentication
+	Username string
+	Password string `datapolicy:"password"`
+
+	// Bearer token for authentication
+	BearerToken string `datapolicy:"token"`
+
+	// Path to a file containing a BearerToken.
+	// If set, the contents are periodically read.
+	// The last successfully read value takes precedence over BearerToken.
+	BearerTokenFile string
+
+	// Impersonate is the config that this Config will impersonate using
+	Impersonate ImpersonationConfig
+
+	// DisableCompression bypasses automatic GZip compression requests to the
+	// server.
+	DisableCompression bool
+
+	// Transport may be used for custom HTTP behavior. This attribute may
+	// not be specified with the TLS client certificate options. Use
+	// WrapTransport for most client level operations.
+	Transport http.RoundTripper
+
+	// WrapTransport will be invoked for custom HTTP behavior after the
+	// underlying transport is initialized (either the transport created
+	// from TLSClientConfig, Transport, or http.DefaultTransport). The
+	// config may layer other RoundTrippers on top of the returned
+	// RoundTripper.
+	//
+	// A future release will change this field to an array. Use config.Wrap()
+	// instead of setting this value directly.
+	WrapTransport WrapperFunc
+
+	// DialHolder specifies the dial function for creating unencrypted TCP connections.
+	// This struct indirection is used to make transport configs cacheable.
+	DialHolder *DialHolder
+
+	// Proxy is the proxy func to be used for all requests made by this
+	// transport. If Proxy is nil, http.ProxyFromEnvironment is used. If Proxy
+	// returns a nil *URL, no proxy is used.
+	//
+	// socks5 proxying does not currently support spdy streaming endpoints.
+	Proxy func(*http.Request) (*url.URL, error)
+}
+
+// DialHolder is used to make the wrapped function comparable so that it can be used as a map key.
+type DialHolder struct {
+	Dial func(ctx context.Context, network, address string) (net.Conn, error)
+}
+
+// ImpersonationConfig has all the available impersonation options
+type ImpersonationConfig struct {
+	// UserName matches user.Info.GetName()
+	UserName string
+	// UID matches user.Info.GetUID()
+	UID string
+	// Groups matches user.Info.GetGroups()
+	Groups []string
+	// Extra matches user.Info.GetExtra()
+	Extra map[string][]string
+}
+
+// HasCA returns whether the configuration has a certificate authority or not.
+func (c *Config) HasCA() bool {
+	return len(c.TLS.CAData) > 0 || len(c.TLS.CAFile) > 0
+}
+
+// HasBasicAuth returns whether the configuration has basic authentication or not.
+func (c *Config) HasBasicAuth() bool {
+	return len(c.Username) != 0
+}
+
+// HasTokenAuth returns whether the configuration has token authentication or not.
+func (c *Config) HasTokenAuth() bool {
+	return len(c.BearerToken) != 0 || len(c.BearerTokenFile) != 0
+}
+
+// HasCertAuth returns whether the configuration has certificate authentication or not.
+func (c *Config) HasCertAuth() bool {
+	return (len(c.TLS.CertData) != 0 || len(c.TLS.CertFile) != 0) && (len(c.TLS.KeyData) != 0 || len(c.TLS.KeyFile) != 0)
+}
+
+// HasCertCallback returns whether the configuration has certificate callback or not.
+func (c *Config) HasCertCallback() bool {
+	return c.TLS.GetCertHolder != nil
+}
+
+// Wrap adds a transport middleware function that will give the caller
+// an opportunity to wrap the underlying http.RoundTripper prior to the
+// first API call being made. The provided function is invoked after any
+// existing transport wrappers are invoked.
+func (c *Config) Wrap(fn WrapperFunc) {
+	c.WrapTransport = Wrappers(c.WrapTransport, fn)
+}
+
+// TLSConfig holds the information needed to set up a TLS transport.
+type TLSConfig struct {
+	CAFile         string // Path of the PEM-encoded server trusted root certificates.
+	CertFile       string // Path of the PEM-encoded client certificate.
+	KeyFile        string // Path of the PEM-encoded client key.
+	ReloadTLSFiles bool   // Set to indicate that the original config provided files, and that they should be reloaded
+
+	Insecure   bool   // Server should be accessed without verifying the certificate. For testing only.
+	ServerName string // Override for the server name passed to the server for SNI and used to verify certificates.
+
+	CAData   []byte // Bytes of the PEM-encoded server trusted root certificates. Supercedes CAFile.
+	CertData []byte // Bytes of the PEM-encoded client certificate. Supercedes CertFile.
+	KeyData  []byte // Bytes of the PEM-encoded client key. Supercedes KeyFile.
+
+	// NextProtos is a list of supported application level protocols, in order of preference.
+	// Used to populate tls.Config.NextProtos.
+	// To indicate to the server http/1.1 is preferred over http/2, set to ["http/1.1", "h2"] (though the server is free to ignore that preference).
+	// To use only http/1.1, set to ["http/1.1"].
+	NextProtos []string
+
+	// Callback that returns a TLS client certificate. CertData, CertFile, KeyData and KeyFile supercede this field.
+	// This struct indirection is used to make transport configs cacheable.
+	GetCertHolder *GetCertHolder
+}
+
+// GetCertHolder is used to make the wrapped function comparable so that it can be used as a map key.
+type GetCertHolder struct {
+	GetCert func() (*tls.Certificate, error)
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/transport/round_trippers.go b/hack/tools/vendor/k8s.io/client-go/transport/round_trippers.go
new file mode 100644
index 0000000000..52fefb5316
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/transport/round_trippers.go
@@ -0,0 +1,704 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transport
+
+import (
+	"crypto/tls"
+	"fmt"
+	"net/http"
+	"net/http/httptrace"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/oauth2"
+
+	utilnet "k8s.io/apimachinery/pkg/util/net"
+	"k8s.io/klog/v2"
+)
+
+// HTTPWrappersForConfig wraps a round tripper with any relevant layered
+// behavior from the config. Exposed to allow more clients that need HTTP-like
+// behavior but then must hijack the underlying connection (like WebSocket or
+// HTTP2 clients). Pure HTTP clients should use the RoundTripper returned from
+// New.
+func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) {
+	if config.WrapTransport != nil {
+		rt = config.WrapTransport(rt)
+	}
+
+	rt = DebugWrappers(rt)
+
+	// Set authentication wrappers
+	switch {
+	case config.HasBasicAuth() && config.HasTokenAuth():
+		return nil, fmt.Errorf("username/password or bearer token may be set, but not both")
+	case config.HasTokenAuth():
+		var err error
+		rt, err = NewBearerAuthWithRefreshRoundTripper(config.BearerToken, config.BearerTokenFile, rt)
+		if err != nil {
+			return nil, err
+		}
+	case config.HasBasicAuth():
+		rt = NewBasicAuthRoundTripper(config.Username, config.Password, rt)
+	}
+	if len(config.UserAgent) > 0 {
+		rt = NewUserAgentRoundTripper(config.UserAgent, rt)
+	}
+	if len(config.Impersonate.UserName) > 0 ||
+		len(config.Impersonate.UID) > 0 ||
+		len(config.Impersonate.Groups) > 0 ||
+		len(config.Impersonate.Extra) > 0 {
+		rt = NewImpersonatingRoundTripper(config.Impersonate, rt)
+	}
+	return rt, nil
+}
+
+// DebugWrappers wraps a round tripper and logs based on the current log level.
+func DebugWrappers(rt http.RoundTripper) http.RoundTripper {
+	switch {
+	case bool(klog.V(9).Enabled()):
+		rt = NewDebuggingRoundTripper(rt, DebugCurlCommand, DebugURLTiming, DebugDetailedTiming, DebugResponseHeaders)
+	case bool(klog.V(8).Enabled()):
+		rt = NewDebuggingRoundTripper(rt, DebugJustURL, DebugRequestHeaders, DebugResponseStatus, DebugResponseHeaders)
+	case bool(klog.V(7).Enabled()):
+		rt = NewDebuggingRoundTripper(rt, DebugJustURL, DebugRequestHeaders, DebugResponseStatus)
+	case bool(klog.V(6).Enabled()):
+		rt = NewDebuggingRoundTripper(rt, DebugURLTiming)
+	}
+
+	return rt
+}
+
+type authProxyRoundTripper struct {
+	username string
+	uid      string
+	groups   []string
+	extra    map[string][]string
+
+	rt http.RoundTripper
+}
+
+var _ utilnet.RoundTripperWrapper = &authProxyRoundTripper{}
+
+// NewAuthProxyRoundTripper provides a roundtripper which will add auth proxy fields to requests for
+// authentication terminating proxy cases
+// assuming you pull the user from the context:
+// username is the user.Info.GetName() of the user
+// uid is the user.Info.GetUID() of the user
+// groups is the user.Info.GetGroups() of the user
+// extra is the user.Info.GetExtra() of the user
+// extra can contain any additional information that the authenticator
+// thought was interesting, for example authorization scopes.
+// In order to faithfully round-trip through an impersonation flow, these keys
+// MUST be lowercase.
+func NewAuthProxyRoundTripper(username, uid string, groups []string, extra map[string][]string, rt http.RoundTripper) http.RoundTripper {
+	return &authProxyRoundTripper{
+		username: username,
+		uid:      uid,
+		groups:   groups,
+		extra:    extra,
+		rt:       rt,
+	}
+}
+
+func (rt *authProxyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	req = utilnet.CloneRequest(req)
+	SetAuthProxyHeaders(req, rt.username, rt.uid, rt.groups, rt.extra)
+
+	return rt.rt.RoundTrip(req)
+}
+
+// SetAuthProxyHeaders stomps the auth proxy header fields.  It mutates its argument.
+func SetAuthProxyHeaders(req *http.Request, username, uid string, groups []string, extra map[string][]string) {
+	req.Header.Del("X-Remote-User")
+	req.Header.Del("X-Remote-Uid")
+	req.Header.Del("X-Remote-Group")
+	for key := range req.Header {
+		if strings.HasPrefix(strings.ToLower(key), strings.ToLower("X-Remote-Extra-")) {
+			req.Header.Del(key)
+		}
+	}
+
+	req.Header.Set("X-Remote-User", username)
+	if len(uid) > 0 {
+		req.Header.Set("X-Remote-Uid", uid)
+	}
+	for _, group := range groups {
+		req.Header.Add("X-Remote-Group", group)
+	}
+	for key, values := range extra {
+		for _, value := range values {
+			req.Header.Add("X-Remote-Extra-"+headerKeyEscape(key), value)
+		}
+	}
+}
+
+func (rt *authProxyRoundTripper) CancelRequest(req *http.Request) {
+	tryCancelRequest(rt.WrappedRoundTripper(), req)
+}
+
+func (rt *authProxyRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt }
+
+type userAgentRoundTripper struct {
+	agent string
+	rt    http.RoundTripper
+}
+
+var _ utilnet.RoundTripperWrapper = &userAgentRoundTripper{}
+
+// NewUserAgentRoundTripper will add User-Agent header to a request unless it has already been set.
+func NewUserAgentRoundTripper(agent string, rt http.RoundTripper) http.RoundTripper {
+	return &userAgentRoundTripper{agent, rt}
+}
+
+func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	if len(req.Header.Get("User-Agent")) != 0 {
+		return rt.rt.RoundTrip(req)
+	}
+	req = utilnet.CloneRequest(req)
+	req.Header.Set("User-Agent", rt.agent)
+	return rt.rt.RoundTrip(req)
+}
+
+func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) {
+	tryCancelRequest(rt.WrappedRoundTripper(), req)
+}
+
+func (rt *userAgentRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt }
+
+type basicAuthRoundTripper struct {
+	username string
+	password string `datapolicy:"password"`
+	rt       http.RoundTripper
+}
+
+var _ utilnet.RoundTripperWrapper = &basicAuthRoundTripper{}
+
+// NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a
+// request unless it has already been set.
+func NewBasicAuthRoundTripper(username, password string, rt http.RoundTripper) http.RoundTripper {
+	return &basicAuthRoundTripper{username, password, rt}
+}
+
+func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	if len(req.Header.Get("Authorization")) != 0 {
+		return rt.rt.RoundTrip(req)
+	}
+	req = utilnet.CloneRequest(req)
+	req.SetBasicAuth(rt.username, rt.password)
+	return rt.rt.RoundTrip(req)
+}
+
+func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) {
+	tryCancelRequest(rt.WrappedRoundTripper(), req)
+}
+
+func (rt *basicAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt }
+
+// These correspond to the headers used in pkg/apis/authentication.  We don't want the package dependency,
+// but you must not change the values.
+const (
+	// ImpersonateUserHeader is used to impersonate a particular user during an API server request
+	ImpersonateUserHeader = "Impersonate-User"
+
+	// ImpersonateUIDHeader is used to impersonate a particular UID during an API server request
+	ImpersonateUIDHeader = "Impersonate-Uid"
+
+	// ImpersonateGroupHeader is used to impersonate a particular group during an API server request.
+	// It can be repeated multiplied times for multiple groups.
+	ImpersonateGroupHeader = "Impersonate-Group"
+
+	// ImpersonateUserExtraHeaderPrefix is a prefix for a header used to impersonate an entry in the
+	// extra map[string][]string for user.Info.  The key for the `extra` map is suffix.
+	// The same key can be repeated multiple times to have multiple elements in the slice under a single key.
+	// For instance:
+	// Impersonate-Extra-Foo: one
+	// Impersonate-Extra-Foo: two
+	// results in extra["Foo"] = []string{"one", "two"}
+	ImpersonateUserExtraHeaderPrefix = "Impersonate-Extra-"
+)
+
+type impersonatingRoundTripper struct {
+	impersonate ImpersonationConfig
+	delegate    http.RoundTripper
+}
+
+var _ utilnet.RoundTripperWrapper = &impersonatingRoundTripper{}
+
+// NewImpersonatingRoundTripper will add an Act-As header to a request unless it has already been set.
+func NewImpersonatingRoundTripper(impersonate ImpersonationConfig, delegate http.RoundTripper) http.RoundTripper {
+	return &impersonatingRoundTripper{impersonate, delegate}
+}
+
+func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	// use the user header as marker for the rest.
+	if len(req.Header.Get(ImpersonateUserHeader)) != 0 {
+		return rt.delegate.RoundTrip(req)
+	}
+	req = utilnet.CloneRequest(req)
+	req.Header.Set(ImpersonateUserHeader, rt.impersonate.UserName)
+	if rt.impersonate.UID != "" {
+		req.Header.Set(ImpersonateUIDHeader, rt.impersonate.UID)
+	}
+	for _, group := range rt.impersonate.Groups {
+		req.Header.Add(ImpersonateGroupHeader, group)
+	}
+	for k, vv := range rt.impersonate.Extra {
+		for _, v := range vv {
+			req.Header.Add(ImpersonateUserExtraHeaderPrefix+headerKeyEscape(k), v)
+		}
+	}
+
+	return rt.delegate.RoundTrip(req)
+}
+
+func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) {
+	tryCancelRequest(rt.WrappedRoundTripper(), req)
+}
+
+func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.delegate }
+
+type bearerAuthRoundTripper struct {
+	bearer string
+	source oauth2.TokenSource
+	rt     http.RoundTripper
+}
+
+var _ utilnet.RoundTripperWrapper = &bearerAuthRoundTripper{}
+
+// NewBearerAuthRoundTripper adds the provided bearer token to a request
+// unless the authorization header has already been set.
+func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTripper {
+	return &bearerAuthRoundTripper{bearer, nil, rt}
+}
+
+// NewBearerAuthWithRefreshRoundTripper adds the provided bearer token to a request
+// unless the authorization header has already been set.
+// If tokenFile is non-empty, it is periodically read,
+// and the last successfully read content is used as the bearer token.
+// If tokenFile is non-empty and bearer is empty, the tokenFile is read
+// immediately to populate the initial bearer token.
+func NewBearerAuthWithRefreshRoundTripper(bearer string, tokenFile string, rt http.RoundTripper) (http.RoundTripper, error) {
+	if len(tokenFile) == 0 {
+		return &bearerAuthRoundTripper{bearer, nil, rt}, nil
+	}
+	source := NewCachedFileTokenSource(tokenFile)
+	if len(bearer) == 0 {
+		token, err := source.Token()
+		if err != nil {
+			return nil, err
+		}
+		bearer = token.AccessToken
+	}
+	return &bearerAuthRoundTripper{bearer, source, rt}, nil
+}
+
+func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	if len(req.Header.Get("Authorization")) != 0 {
+		return rt.rt.RoundTrip(req)
+	}
+
+	req = utilnet.CloneRequest(req)
+	token := rt.bearer
+	if rt.source != nil {
+		if refreshedToken, err := rt.source.Token(); err == nil {
+			token = refreshedToken.AccessToken
+		}
+	}
+	req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
+	return rt.rt.RoundTrip(req)
+}
+
+func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) {
+	tryCancelRequest(rt.WrappedRoundTripper(), req)
+}
+
+func (rt *bearerAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt }
+
+// requestInfo keeps track of information about a request/response combination
+type requestInfo struct {
+	RequestHeaders http.Header `datapolicy:"token"`
+	RequestVerb    string
+	RequestURL     string
+
+	ResponseStatus  string
+	ResponseHeaders http.Header
+	ResponseErr     error
+
+	muTrace          sync.Mutex // Protect trace fields
+	DNSLookup        time.Duration
+	Dialing          time.Duration
+	GetConnection    time.Duration
+	TLSHandshake     time.Duration
+	ServerProcessing time.Duration
+	ConnectionReused bool
+
+	Duration time.Duration
+}
+
+// newRequestInfo creates a new RequestInfo based on an http request
+func newRequestInfo(req *http.Request) *requestInfo {
+	return &requestInfo{
+		RequestURL:     req.URL.String(),
+		RequestVerb:    req.Method,
+		RequestHeaders: req.Header,
+	}
+}
+
+// complete adds information about the response to the requestInfo
+func (r *requestInfo) complete(response *http.Response, err error) {
+	if err != nil {
+		r.ResponseErr = err
+		return
+	}
+	r.ResponseStatus = response.Status
+	r.ResponseHeaders = response.Header
+}
+
+// toCurl returns a string that can be run as a command in a terminal (minus the body)
+func (r *requestInfo) toCurl() string {
+	headers := ""
+	for key, values := range r.RequestHeaders {
+		for _, value := range values {
+			value = maskValue(key, value)
+			headers += fmt.Sprintf(` -H %q`, fmt.Sprintf("%s: %s", key, value))
+		}
+	}
+
+	return fmt.Sprintf("curl -v -X%s %s '%s'", r.RequestVerb, headers, r.RequestURL)
+}
+
+// debuggingRoundTripper will display information about the requests passing
+// through it based on what is configured
+type debuggingRoundTripper struct {
+	delegatedRoundTripper http.RoundTripper
+	levels                map[DebugLevel]bool
+}
+
+var _ utilnet.RoundTripperWrapper = &debuggingRoundTripper{}
+
+// DebugLevel is used to enable debugging of certain
+// HTTP requests and responses fields via the debuggingRoundTripper.
+type DebugLevel int
+
+const (
+	// DebugJustURL will add to the debug output HTTP requests method and url.
+	DebugJustURL DebugLevel = iota
+	// DebugURLTiming will add to the debug output the duration of HTTP requests.
+	DebugURLTiming
+	// DebugCurlCommand will add to the debug output the curl command equivalent to the
+	// HTTP request.
+	DebugCurlCommand
+	// DebugRequestHeaders will add to the debug output the HTTP requests headers.
+	DebugRequestHeaders
+	// DebugResponseStatus will add to the debug output the HTTP response status.
+	DebugResponseStatus
+	// DebugResponseHeaders will add to the debug output the HTTP response headers.
+	DebugResponseHeaders
+	// DebugDetailedTiming will add to the debug output the duration of the HTTP requests events.
+	DebugDetailedTiming
+)
+
+// NewDebuggingRoundTripper allows to display in the logs output debug information
+// on the API requests performed by the client.
+func NewDebuggingRoundTripper(rt http.RoundTripper, levels ...DebugLevel) http.RoundTripper {
+	drt := &debuggingRoundTripper{
+		delegatedRoundTripper: rt,
+		levels:                make(map[DebugLevel]bool, len(levels)),
+	}
+	for _, v := range levels {
+		drt.levels[v] = true
+	}
+	return drt
+}
+
+func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) {
+	tryCancelRequest(rt.WrappedRoundTripper(), req)
+}
+
+var knownAuthTypes = map[string]bool{
+	"bearer":    true,
+	"basic":     true,
+	"negotiate": true,
+}
+
+// maskValue masks credential content from authorization headers
+// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Authorization
+func maskValue(key string, value string) string {
+	if !strings.EqualFold(key, "Authorization") {
+		return value
+	}
+	if len(value) == 0 {
+		return ""
+	}
+	var authType string
+	if i := strings.Index(value, " "); i > 0 {
+		authType = value[0:i]
+	} else {
+		authType = value
+	}
+	if !knownAuthTypes[strings.ToLower(authType)] {
+		return ""
+	}
+	if len(value) > len(authType)+1 {
+		value = authType + " "
+	} else {
+		value = authType
+	}
+	return value
+}
+
+func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	reqInfo := newRequestInfo(req)
+
+	if rt.levels[DebugJustURL] {
+		klog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL)
+	}
+	if rt.levels[DebugCurlCommand] {
+		klog.Infof("%s", reqInfo.toCurl())
+	}
+	if rt.levels[DebugRequestHeaders] {
+		klog.Info("Request Headers:")
+		for key, values := range reqInfo.RequestHeaders {
+			for _, value := range values {
+				value = maskValue(key, value)
+				klog.Infof("    %s: %s", key, value)
+			}
+		}
+	}
+
+	startTime := time.Now()
+
+	if rt.levels[DebugDetailedTiming] {
+		var getConn, dnsStart, dialStart, tlsStart, serverStart time.Time
+		var host string
+		trace := &httptrace.ClientTrace{
+			// DNS
+			DNSStart: func(info httptrace.DNSStartInfo) {
+				reqInfo.muTrace.Lock()
+				defer reqInfo.muTrace.Unlock()
+				dnsStart = time.Now()
+				host = info.Host
+			},
+			DNSDone: func(info httptrace.DNSDoneInfo) {
+				reqInfo.muTrace.Lock()
+				defer reqInfo.muTrace.Unlock()
+				reqInfo.DNSLookup = time.Since(dnsStart)
+				klog.Infof("HTTP Trace: DNS Lookup for %s resolved to %v", host, info.Addrs)
+			},
+			// Dial
+			ConnectStart: func(network, addr string) {
+				reqInfo.muTrace.Lock()
+				defer reqInfo.muTrace.Unlock()
+				dialStart = time.Now()
+			},
+			ConnectDone: func(network, addr string, err error) {
+				reqInfo.muTrace.Lock()
+				defer reqInfo.muTrace.Unlock()
+				reqInfo.Dialing = time.Since(dialStart)
+				if err != nil {
+					klog.Infof("HTTP Trace: Dial to %s:%s failed: %v", network, addr, err)
+				} else {
+					klog.Infof("HTTP Trace: Dial to %s:%s succeed", network, addr)
+				}
+			},
+			// TLS
+			TLSHandshakeStart: func() {
+				tlsStart = time.Now()
+			},
+			TLSHandshakeDone: func(_ tls.ConnectionState, _ error) {
+				reqInfo.muTrace.Lock()
+				defer reqInfo.muTrace.Unlock()
+				reqInfo.TLSHandshake = time.Since(tlsStart)
+			},
+			// Connection (it can be DNS + Dial or just the time to get one from the connection pool)
+			GetConn: func(hostPort string) {
+				getConn = time.Now()
+			},
+			GotConn: func(info httptrace.GotConnInfo) {
+				reqInfo.muTrace.Lock()
+				defer reqInfo.muTrace.Unlock()
+				reqInfo.GetConnection = time.Since(getConn)
+				reqInfo.ConnectionReused = info.Reused
+			},
+			// Server Processing (time since we wrote the request until first byte is received)
+			WroteRequest: func(info httptrace.WroteRequestInfo) {
+				reqInfo.muTrace.Lock()
+				defer reqInfo.muTrace.Unlock()
+				serverStart = time.Now()
+			},
+			GotFirstResponseByte: func() {
+				reqInfo.muTrace.Lock()
+				defer reqInfo.muTrace.Unlock()
+				reqInfo.ServerProcessing = time.Since(serverStart)
+			},
+		}
+		req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace))
+	}
+
+	response, err := rt.delegatedRoundTripper.RoundTrip(req)
+	reqInfo.Duration = time.Since(startTime)
+
+	reqInfo.complete(response, err)
+
+	if rt.levels[DebugURLTiming] {
+		klog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond))
+	}
+	if rt.levels[DebugDetailedTiming] {
+		stats := ""
+		if !reqInfo.ConnectionReused {
+			stats += fmt.Sprintf(`DNSLookup %d ms Dial %d ms TLSHandshake %d ms`,
+				reqInfo.DNSLookup.Nanoseconds()/int64(time.Millisecond),
+				reqInfo.Dialing.Nanoseconds()/int64(time.Millisecond),
+				reqInfo.TLSHandshake.Nanoseconds()/int64(time.Millisecond),
+			)
+		} else {
+			stats += fmt.Sprintf(`GetConnection %d ms`, reqInfo.GetConnection.Nanoseconds()/int64(time.Millisecond))
+		}
+		if reqInfo.ServerProcessing != 0 {
+			stats += fmt.Sprintf(` ServerProcessing %d ms`, reqInfo.ServerProcessing.Nanoseconds()/int64(time.Millisecond))
+		}
+		stats += fmt.Sprintf(` Duration %d ms`, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond))
+		klog.Infof("HTTP Statistics: %s", stats)
+	}
+
+	if rt.levels[DebugResponseStatus] {
+		klog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond))
+	}
+	if rt.levels[DebugResponseHeaders] {
+		klog.Info("Response Headers:")
+		for key, values := range reqInfo.ResponseHeaders {
+			for _, value := range values {
+				klog.Infof("    %s: %s", key, value)
+			}
+		}
+	}
+
+	return response, err
+}
+
+func (rt *debuggingRoundTripper) WrappedRoundTripper() http.RoundTripper {
+	return rt.delegatedRoundTripper
+}
+
+func legalHeaderByte(b byte) bool {
+	return int(b) < len(legalHeaderKeyBytes) && legalHeaderKeyBytes[b]
+}
+
+func shouldEscape(b byte) bool {
+	// url.PathUnescape() returns an error if any '%' is not followed by two
+	// hexadecimal digits, so we'll intentionally encode it.
+	return !legalHeaderByte(b) || b == '%'
+}
+
+func headerKeyEscape(key string) string {
+	buf := strings.Builder{}
+	for i := 0; i < len(key); i++ {
+		b := key[i]
+		if shouldEscape(b) {
+			// %-encode bytes that should be escaped:
+			// https://tools.ietf.org/html/rfc3986#section-2.1
+			fmt.Fprintf(&buf, "%%%02X", b)
+			continue
+		}
+		buf.WriteByte(b)
+	}
+	return buf.String()
+}
+
+// legalHeaderKeyBytes was copied from net/http/lex.go's isTokenTable.
+// See https://httpwg.github.io/specs/rfc7230.html#rule.token.separators
+var legalHeaderKeyBytes = [127]bool{
+	'%':  true,
+	'!':  true,
+	'#':  true,
+	'$':  true,
+	'&':  true,
+	'\'': true,
+	'*':  true,
+	'+':  true,
+	'-':  true,
+	'.':  true,
+	'0':  true,
+	'1':  true,
+	'2':  true,
+	'3':  true,
+	'4':  true,
+	'5':  true,
+	'6':  true,
+	'7':  true,
+	'8':  true,
+	'9':  true,
+	'A':  true,
+	'B':  true,
+	'C':  true,
+	'D':  true,
+	'E':  true,
+	'F':  true,
+	'G':  true,
+	'H':  true,
+	'I':  true,
+	'J':  true,
+	'K':  true,
+	'L':  true,
+	'M':  true,
+	'N':  true,
+	'O':  true,
+	'P':  true,
+	'Q':  true,
+	'R':  true,
+	'S':  true,
+	'T':  true,
+	'U':  true,
+	'W':  true,
+	'V':  true,
+	'X':  true,
+	'Y':  true,
+	'Z':  true,
+	'^':  true,
+	'_':  true,
+	'`':  true,
+	'a':  true,
+	'b':  true,
+	'c':  true,
+	'd':  true,
+	'e':  true,
+	'f':  true,
+	'g':  true,
+	'h':  true,
+	'i':  true,
+	'j':  true,
+	'k':  true,
+	'l':  true,
+	'm':  true,
+	'n':  true,
+	'o':  true,
+	'p':  true,
+	'q':  true,
+	'r':  true,
+	's':  true,
+	't':  true,
+	'u':  true,
+	'v':  true,
+	'w':  true,
+	'x':  true,
+	'y':  true,
+	'z':  true,
+	'|':  true,
+	'~':  true,
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/transport/token_source.go b/hack/tools/vendor/k8s.io/client-go/transport/token_source.go
new file mode 100644
index 0000000000..8e312800f7
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/transport/token_source.go
@@ -0,0 +1,201 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transport
+
+import (
+	"fmt"
+	"net/http"
+	"os"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/oauth2"
+
+	utilnet "k8s.io/apimachinery/pkg/util/net"
+	"k8s.io/klog/v2"
+)
+
+// TokenSourceWrapTransport returns a WrapTransport that injects bearer tokens
+// authentication from an oauth2.TokenSource.
+func TokenSourceWrapTransport(ts oauth2.TokenSource) func(http.RoundTripper) http.RoundTripper {
+	return func(rt http.RoundTripper) http.RoundTripper {
+		return &tokenSourceTransport{
+			base: rt,
+			ort: &oauth2.Transport{
+				Source: ts,
+				Base:   rt,
+			},
+		}
+	}
+}
+
+type ResettableTokenSource interface {
+	oauth2.TokenSource
+	ResetTokenOlderThan(time.Time)
+}
+
+// ResettableTokenSourceWrapTransport returns a WrapTransport that injects bearer tokens
+// authentication from an ResettableTokenSource.
+func ResettableTokenSourceWrapTransport(ts ResettableTokenSource) func(http.RoundTripper) http.RoundTripper {
+	return func(rt http.RoundTripper) http.RoundTripper {
+		return &tokenSourceTransport{
+			base: rt,
+			ort: &oauth2.Transport{
+				Source: ts,
+				Base:   rt,
+			},
+			src: ts,
+		}
+	}
+}
+
+// NewCachedFileTokenSource returns a resettable token source which reads a
+// token from a file at a specified path and periodically reloads it.
+func NewCachedFileTokenSource(path string) *cachingTokenSource {
+	return &cachingTokenSource{
+		now:    time.Now,
+		leeway: 10 * time.Second,
+		base: &fileTokenSource{
+			path: path,
+			// This period was picked because it is half of the duration between when the kubelet
+			// refreshes a projected service account token and when the original token expires.
+			// Default token lifetime is 10 minutes, and the kubelet starts refreshing at 80% of lifetime.
+			// This should induce re-reading at a frequency that works with the token volume source.
+			period: time.Minute,
+		},
+	}
+}
+
+// NewCachedTokenSource returns resettable token source with caching. It reads
+// a token from a designed TokenSource if not in cache or expired.
+func NewCachedTokenSource(ts oauth2.TokenSource) *cachingTokenSource {
+	return &cachingTokenSource{
+		now:  time.Now,
+		base: ts,
+	}
+}
+
+type tokenSourceTransport struct {
+	base http.RoundTripper
+	ort  http.RoundTripper
+	src  ResettableTokenSource
+}
+
+var _ utilnet.RoundTripperWrapper = &tokenSourceTransport{}
+
+func (tst *tokenSourceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+	// This is to allow --token to override other bearer token providers.
+	if req.Header.Get("Authorization") != "" {
+		return tst.base.RoundTrip(req)
+	}
+	// record time before RoundTrip to make sure newly acquired Unauthorized
+	// token would not be reset. Another request from user is required to reset
+	// and proceed.
+	start := time.Now()
+	resp, err := tst.ort.RoundTrip(req)
+	if err == nil && resp != nil && resp.StatusCode == 401 && tst.src != nil {
+		tst.src.ResetTokenOlderThan(start)
+	}
+	return resp, err
+}
+
+func (tst *tokenSourceTransport) CancelRequest(req *http.Request) {
+	if req.Header.Get("Authorization") != "" {
+		tryCancelRequest(tst.base, req)
+		return
+	}
+	tryCancelRequest(tst.ort, req)
+}
+
+func (tst *tokenSourceTransport) WrappedRoundTripper() http.RoundTripper { return tst.base }
+
+type fileTokenSource struct {
+	path   string
+	period time.Duration
+}
+
+var _ = oauth2.TokenSource(&fileTokenSource{})
+
+func (ts *fileTokenSource) Token() (*oauth2.Token, error) {
+	tokb, err := os.ReadFile(ts.path)
+	if err != nil {
+		return nil, fmt.Errorf("failed to read token file %q: %v", ts.path, err)
+	}
+	tok := strings.TrimSpace(string(tokb))
+	if len(tok) == 0 {
+		return nil, fmt.Errorf("read empty token from file %q", ts.path)
+	}
+
+	return &oauth2.Token{
+		AccessToken: tok,
+		Expiry:      time.Now().Add(ts.period),
+	}, nil
+}
+
+type cachingTokenSource struct {
+	base   oauth2.TokenSource
+	leeway time.Duration
+
+	sync.RWMutex
+	tok *oauth2.Token
+	t   time.Time
+
+	// for testing
+	now func() time.Time
+}
+
+func (ts *cachingTokenSource) Token() (*oauth2.Token, error) {
+	now := ts.now()
+	// fast path
+	ts.RLock()
+	tok := ts.tok
+	ts.RUnlock()
+
+	if tok != nil && tok.Expiry.Add(-1*ts.leeway).After(now) {
+		return tok, nil
+	}
+
+	// slow path
+	ts.Lock()
+	defer ts.Unlock()
+	if tok := ts.tok; tok != nil && tok.Expiry.Add(-1*ts.leeway).After(now) {
+		return tok, nil
+	}
+
+	tok, err := ts.base.Token()
+	if err != nil {
+		if ts.tok == nil {
+			return nil, err
+		}
+		klog.Errorf("Unable to rotate token: %v", err)
+		return ts.tok, nil
+	}
+
+	ts.t = ts.now()
+	ts.tok = tok
+	return tok, nil
+}
+
+func (ts *cachingTokenSource) ResetTokenOlderThan(t time.Time) {
+	ts.Lock()
+	defer ts.Unlock()
+	if ts.t.Before(t) {
+		ts.tok = nil
+		ts.t = time.Time{}
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/transport/transport.go b/hack/tools/vendor/k8s.io/client-go/transport/transport.go
new file mode 100644
index 0000000000..4770331a0e
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/transport/transport.go
@@ -0,0 +1,399 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transport
+
+import (
+	"context"
+	"crypto/tls"
+	"crypto/x509"
+	"encoding/pem"
+	"fmt"
+	"net/http"
+	"os"
+	"sync"
+	"time"
+
+	utilnet "k8s.io/apimachinery/pkg/util/net"
+	"k8s.io/klog/v2"
+)
+
+// New returns an http.RoundTripper that will provide the authentication
+// or transport level security defined by the provided Config.
+func New(config *Config) (http.RoundTripper, error) {
+	// Set transport level security
+	if config.Transport != nil && (config.HasCA() || config.HasCertAuth() || config.HasCertCallback() || config.TLS.Insecure) {
+		return nil, fmt.Errorf("using a custom transport with TLS certificate options or the insecure flag is not allowed")
+	}
+
+	if !isValidHolders(config) {
+		return nil, fmt.Errorf("misconfigured holder for dialer or cert callback")
+	}
+
+	var (
+		rt  http.RoundTripper
+		err error
+	)
+
+	if config.Transport != nil {
+		rt = config.Transport
+	} else {
+		rt, err = tlsCache.get(config)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return HTTPWrappersForConfig(config, rt)
+}
+
+func isValidHolders(config *Config) bool {
+	if config.TLS.GetCertHolder != nil && config.TLS.GetCertHolder.GetCert == nil {
+		return false
+	}
+
+	if config.DialHolder != nil && config.DialHolder.Dial == nil {
+		return false
+	}
+
+	return true
+}
+
+// TLSConfigFor returns a tls.Config that will provide the transport level security defined
+// by the provided Config. Will return nil if no transport level security is requested.
+func TLSConfigFor(c *Config) (*tls.Config, error) {
+	if !(c.HasCA() || c.HasCertAuth() || c.HasCertCallback() || c.TLS.Insecure || len(c.TLS.ServerName) > 0 || len(c.TLS.NextProtos) > 0) {
+		return nil, nil
+	}
+	if c.HasCA() && c.TLS.Insecure {
+		return nil, fmt.Errorf("specifying a root certificates file with the insecure flag is not allowed")
+	}
+	if err := loadTLSFiles(c); err != nil {
+		return nil, err
+	}
+
+	tlsConfig := &tls.Config{
+		// Can't use SSLv3 because of POODLE and BEAST
+		// Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher
+		// Can't use TLSv1.1 because of RC4 cipher usage
+		MinVersion:         tls.VersionTLS12,
+		InsecureSkipVerify: c.TLS.Insecure,
+		ServerName:         c.TLS.ServerName,
+		NextProtos:         c.TLS.NextProtos,
+	}
+
+	if c.HasCA() {
+		/*
+			kubernetes mutual (2-way) x509 between client and apiserver:
+
+				1. apiserver sending its apiserver certificate along with its publickey to client
+				>2. client verifies the apiserver certificate sent against its cluster certificate authority data
+				3. client sending its client certificate along with its public key to the apiserver
+				4. apiserver verifies the client certificate sent against its cluster certificate authority data
+
+				description:
+					here, with this block,
+					cluster certificate authority data gets loaded into TLS before the handshake process
+					for client to later during the handshake verify the apiserver certificate
+
+				normal args related to this stage:
+					--certificate-authority='':
+						Path to a cert file for the certificate authority
+
+					(retrievable from "kubectl options" command)
+					(suggested by @deads2k)
+
+				see also:
+					- for the step 1, see: staging/src/k8s.io/apiserver/pkg/server/options/serving.go
+					- for the step 3, see: a few lines below in this file
+					- for the step 4, see: staging/src/k8s.io/apiserver/pkg/authentication/request/x509/x509.go
+		*/
+
+		rootCAs, err := rootCertPool(c.TLS.CAData)
+		if err != nil {
+			return nil, fmt.Errorf("unable to load root certificates: %w", err)
+		}
+		tlsConfig.RootCAs = rootCAs
+	}
+
+	var staticCert *tls.Certificate
+	// Treat cert as static if either key or cert was data, not a file
+	if c.HasCertAuth() && !c.TLS.ReloadTLSFiles {
+		// If key/cert were provided, verify them before setting up
+		// tlsConfig.GetClientCertificate.
+		cert, err := tls.X509KeyPair(c.TLS.CertData, c.TLS.KeyData)
+		if err != nil {
+			return nil, err
+		}
+		staticCert = &cert
+	}
+
+	var dynamicCertLoader func() (*tls.Certificate, error)
+	if c.TLS.ReloadTLSFiles {
+		dynamicCertLoader = cachingCertificateLoader(c.TLS.CertFile, c.TLS.KeyFile)
+	}
+
+	if c.HasCertAuth() || c.HasCertCallback() {
+
+		/*
+			    kubernetes mutual (2-way) x509 between client and apiserver:
+
+					1. apiserver sending its apiserver certificate along with its publickey to client
+					2. client verifies the apiserver certificate sent against its cluster certificate authority data
+					>3. client sending its client certificate along with its public key to the apiserver
+					4. apiserver verifies the client certificate sent against its cluster certificate authority data
+
+					description:
+						here, with this callback function,
+						client certificate and pub key get loaded into TLS during the handshake process
+						for apiserver to later in the step 4 verify the client certificate
+
+					normal args related to this stage:
+						--client-certificate='':
+							Path to a client certificate file for TLS
+						--client-key='':
+							Path to a client key file for TLS
+
+						(retrievable from "kubectl options" command)
+						(suggested by @deads2k)
+
+					see also:
+						- for the step 1, see: staging/src/k8s.io/apiserver/pkg/server/options/serving.go
+						- for the step 2, see: a few lines above in this file
+						- for the step 4, see: staging/src/k8s.io/apiserver/pkg/authentication/request/x509/x509.go
+		*/
+
+		tlsConfig.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
+			// Note: static key/cert data always take precedence over cert
+			// callback.
+			if staticCert != nil {
+				return staticCert, nil
+			}
+			// key/cert files lead to ReloadTLSFiles being set - takes precedence over cert callback
+			if dynamicCertLoader != nil {
+				return dynamicCertLoader()
+			}
+			if c.HasCertCallback() {
+				cert, err := c.TLS.GetCertHolder.GetCert()
+				if err != nil {
+					return nil, err
+				}
+				// GetCert may return empty value, meaning no cert.
+				if cert != nil {
+					return cert, nil
+				}
+			}
+
+			// Both c.TLS.CertData/KeyData were unset and GetCert didn't return
+			// anything. Return an empty tls.Certificate, no client cert will
+			// be sent to the server.
+			return &tls.Certificate{}, nil
+		}
+	}
+
+	return tlsConfig, nil
+}
+
+// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData,
+// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are
+// either populated or were empty to start.
+func loadTLSFiles(c *Config) error {
+	var err error
+	c.TLS.CAData, err = dataFromSliceOrFile(c.TLS.CAData, c.TLS.CAFile)
+	if err != nil {
+		return err
+	}
+
+	// Check that we are purely loading from files
+	if len(c.TLS.CertFile) > 0 && len(c.TLS.CertData) == 0 && len(c.TLS.KeyFile) > 0 && len(c.TLS.KeyData) == 0 {
+		c.TLS.ReloadTLSFiles = true
+	}
+
+	c.TLS.CertData, err = dataFromSliceOrFile(c.TLS.CertData, c.TLS.CertFile)
+	if err != nil {
+		return err
+	}
+
+	c.TLS.KeyData, err = dataFromSliceOrFile(c.TLS.KeyData, c.TLS.KeyFile)
+	return err
+}
+
+// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file,
+// or an error if an error occurred reading the file
+func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
+	if len(data) > 0 {
+		return data, nil
+	}
+	if len(file) > 0 {
+		fileData, err := os.ReadFile(file)
+		if err != nil {
+			return []byte{}, err
+		}
+		return fileData, nil
+	}
+	return nil, nil
+}
+
+// rootCertPool returns nil if caData is empty.  When passed along, this will mean "use system CAs".
+// When caData is not empty, it will be the ONLY information used in the CertPool.
+func rootCertPool(caData []byte) (*x509.CertPool, error) {
+	// What we really want is a copy of x509.systemRootsPool, but that isn't exposed.  It's difficult to build (see the go
+	// code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values
+	// It doesn't allow trusting either/or, but hopefully that won't be an issue
+	if len(caData) == 0 {
+		return nil, nil
+	}
+
+	// if we have caData, use it
+	certPool := x509.NewCertPool()
+	if ok := certPool.AppendCertsFromPEM(caData); !ok {
+		return nil, createErrorParsingCAData(caData)
+	}
+	return certPool, nil
+}
+
+// createErrorParsingCAData ALWAYS returns an error.  We call it because know we failed to AppendCertsFromPEM
+// but we don't know the specific error because that API is just true/false
+func createErrorParsingCAData(pemCerts []byte) error {
+	for len(pemCerts) > 0 {
+		var block *pem.Block
+		block, pemCerts = pem.Decode(pemCerts)
+		if block == nil {
+			return fmt.Errorf("unable to parse bytes as PEM block")
+		}
+
+		if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
+			continue
+		}
+
+		if _, err := x509.ParseCertificate(block.Bytes); err != nil {
+			return fmt.Errorf("failed to parse certificate: %w", err)
+		}
+	}
+	return fmt.Errorf("no valid certificate authority data seen")
+}
+
+// WrapperFunc wraps an http.RoundTripper when a new transport
+// is created for a client, allowing per connection behavior
+// to be injected.
+type WrapperFunc func(rt http.RoundTripper) http.RoundTripper
+
+// Wrappers accepts any number of wrappers and returns a wrapper
+// function that is the equivalent of calling each of them in order. Nil
+// values are ignored, which makes this function convenient for incrementally
+// wrapping a function.
+func Wrappers(fns ...WrapperFunc) WrapperFunc {
+	if len(fns) == 0 {
+		return nil
+	}
+	// optimize the common case of wrapping a possibly nil transport wrapper
+	// with an additional wrapper
+	if len(fns) == 2 && fns[0] == nil {
+		return fns[1]
+	}
+	return func(rt http.RoundTripper) http.RoundTripper {
+		base := rt
+		for _, fn := range fns {
+			if fn != nil {
+				base = fn(base)
+			}
+		}
+		return base
+	}
+}
+
+// ContextCanceller prevents new requests after the provided context is finished.
+// err is returned when the context is closed, allowing the caller to provide a context
+// appropriate error.
+func ContextCanceller(ctx context.Context, err error) WrapperFunc {
+	return func(rt http.RoundTripper) http.RoundTripper {
+		return &contextCanceller{
+			ctx: ctx,
+			rt:  rt,
+			err: err,
+		}
+	}
+}
+
+type contextCanceller struct {
+	ctx context.Context
+	rt  http.RoundTripper
+	err error
+}
+
+func (b *contextCanceller) RoundTrip(req *http.Request) (*http.Response, error) {
+	select {
+	case <-b.ctx.Done():
+		return nil, b.err
+	default:
+		return b.rt.RoundTrip(req)
+	}
+}
+
+func tryCancelRequest(rt http.RoundTripper, req *http.Request) {
+	type canceler interface {
+		CancelRequest(*http.Request)
+	}
+	switch rt := rt.(type) {
+	case canceler:
+		rt.CancelRequest(req)
+	case utilnet.RoundTripperWrapper:
+		tryCancelRequest(rt.WrappedRoundTripper(), req)
+	default:
+		klog.Warningf("Unable to cancel request for %T", rt)
+	}
+}
+
+type certificateCacheEntry struct {
+	cert  *tls.Certificate
+	err   error
+	birth time.Time
+}
+
+// isStale returns true when this cache entry is too old to be usable
+func (c *certificateCacheEntry) isStale() bool {
+	return time.Since(c.birth) > time.Second
+}
+
+func newCertificateCacheEntry(certFile, keyFile string) certificateCacheEntry {
+	cert, err := tls.LoadX509KeyPair(certFile, keyFile)
+	return certificateCacheEntry{cert: &cert, err: err, birth: time.Now()}
+}
+
+// cachingCertificateLoader ensures that we don't hammer the filesystem when opening many connections
+// the underlying cert files are read at most once every second
+func cachingCertificateLoader(certFile, keyFile string) func() (*tls.Certificate, error) {
+	current := newCertificateCacheEntry(certFile, keyFile)
+	var currentMtx sync.RWMutex
+
+	return func() (*tls.Certificate, error) {
+		currentMtx.RLock()
+		if current.isStale() {
+			currentMtx.RUnlock()
+
+			currentMtx.Lock()
+			defer currentMtx.Unlock()
+
+			if current.isStale() {
+				current = newCertificateCacheEntry(certFile, keyFile)
+			}
+		} else {
+			defer currentMtx.RUnlock()
+		}
+
+		return current.cert, current.err
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/util/cert/OWNERS b/hack/tools/vendor/k8s.io/client-go/util/cert/OWNERS
new file mode 100644
index 0000000000..3c3b94c58c
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/util/cert/OWNERS
@@ -0,0 +1,8 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+  - sig-auth-certificates-approvers
+reviewers:
+  - sig-auth-certificates-reviewers
+labels:
+  - sig/auth
diff --git a/hack/tools/vendor/k8s.io/client-go/util/cert/cert.go b/hack/tools/vendor/k8s.io/client-go/util/cert/cert.go
new file mode 100644
index 0000000000..91e171271a
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/util/cert/cert.go
@@ -0,0 +1,230 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cert
+
+import (
+	"bytes"
+	"crypto"
+	cryptorand "crypto/rand"
+	"crypto/rsa"
+	"crypto/x509"
+	"crypto/x509/pkix"
+	"encoding/pem"
+	"fmt"
+	"math"
+	"math/big"
+	"net"
+	"os"
+	"path/filepath"
+	"strings"
+	"time"
+
+	"k8s.io/client-go/util/keyutil"
+	netutils "k8s.io/utils/net"
+)
+
+const duration365d = time.Hour * 24 * 365
+
+// Config contains the basic fields required for creating a certificate
+type Config struct {
+	CommonName   string
+	Organization []string
+	AltNames     AltNames
+	Usages       []x509.ExtKeyUsage
+	NotBefore    time.Time
+}
+
+// AltNames contains the domain names and IP addresses that will be added
+// to the API Server's x509 certificate SubAltNames field. The values will
+// be passed directly to the x509.Certificate object.
+type AltNames struct {
+	DNSNames []string
+	IPs      []net.IP
+}
+
+// NewSelfSignedCACert creates a CA certificate
+func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, error) {
+	now := time.Now()
+	// returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max).
+	serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1))
+	if err != nil {
+		return nil, err
+	}
+	serial = new(big.Int).Add(serial, big.NewInt(1))
+	notBefore := now.UTC()
+	if !cfg.NotBefore.IsZero() {
+		notBefore = cfg.NotBefore.UTC()
+	}
+	tmpl := x509.Certificate{
+		SerialNumber: serial,
+		Subject: pkix.Name{
+			CommonName:   cfg.CommonName,
+			Organization: cfg.Organization,
+		},
+		DNSNames:              []string{cfg.CommonName},
+		NotBefore:             notBefore,
+		NotAfter:              now.Add(duration365d * 10).UTC(),
+		KeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
+		BasicConstraintsValid: true,
+		IsCA:                  true,
+	}
+
+	certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key)
+	if err != nil {
+		return nil, err
+	}
+	return x509.ParseCertificate(certDERBytes)
+}
+
+// GenerateSelfSignedCertKey creates a self-signed certificate and key for the given host.
+// Host may be an IP or a DNS name
+// You may also specify additional subject alt names (either ip or dns names) for the certificate.
+func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS []string) ([]byte, []byte, error) {
+	return GenerateSelfSignedCertKeyWithFixtures(host, alternateIPs, alternateDNS, "")
+}
+
+// GenerateSelfSignedCertKeyWithFixtures creates a self-signed certificate and key for the given host.
+// Host may be an IP or a DNS name. You may also specify additional subject alt names (either ip or dns names)
+// for the certificate.
+//
+// If fixtureDirectory is non-empty, it is a directory path which can contain pre-generated certs. The format is:
+// _-_-.crt
+// _-_-.key
+// Certs/keys not existing in that directory are created.
+func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, alternateDNS []string, fixtureDirectory string) ([]byte, []byte, error) {
+	validFrom := time.Now().Add(-time.Hour) // valid an hour earlier to avoid flakes due to clock skew
+	maxAge := time.Hour * 24 * 365          // one year self-signed certs
+
+	baseName := fmt.Sprintf("%s_%s_%s", host, strings.Join(ipsToStrings(alternateIPs), "-"), strings.Join(alternateDNS, "-"))
+	certFixturePath := filepath.Join(fixtureDirectory, baseName+".crt")
+	keyFixturePath := filepath.Join(fixtureDirectory, baseName+".key")
+	if len(fixtureDirectory) > 0 {
+		cert, err := os.ReadFile(certFixturePath)
+		if err == nil {
+			key, err := os.ReadFile(keyFixturePath)
+			if err == nil {
+				return cert, key, nil
+			}
+			return nil, nil, fmt.Errorf("cert %s can be read, but key %s cannot: %v", certFixturePath, keyFixturePath, err)
+		}
+		maxAge = 100 * time.Hour * 24 * 365 // 100 years fixtures
+	}
+
+	caKey, err := rsa.GenerateKey(cryptorand.Reader, 2048)
+	if err != nil {
+		return nil, nil, err
+	}
+	// returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max).
+	serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1))
+	if err != nil {
+		return nil, nil, err
+	}
+	serial = new(big.Int).Add(serial, big.NewInt(1))
+	caTemplate := x509.Certificate{
+		SerialNumber: serial,
+		Subject: pkix.Name{
+			CommonName: fmt.Sprintf("%s-ca@%d", host, time.Now().Unix()),
+		},
+		NotBefore: validFrom,
+		NotAfter:  validFrom.Add(maxAge),
+
+		KeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
+		BasicConstraintsValid: true,
+		IsCA:                  true,
+	}
+
+	caDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &caTemplate, &caTemplate, &caKey.PublicKey, caKey)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	caCertificate, err := x509.ParseCertificate(caDERBytes)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	priv, err := rsa.GenerateKey(cryptorand.Reader, 2048)
+	if err != nil {
+		return nil, nil, err
+	}
+	// returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max).
+	serial, err = cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1))
+	if err != nil {
+		return nil, nil, err
+	}
+	serial = new(big.Int).Add(serial, big.NewInt(1))
+	template := x509.Certificate{
+		SerialNumber: serial,
+		Subject: pkix.Name{
+			CommonName: fmt.Sprintf("%s@%d", host, time.Now().Unix()),
+		},
+		NotBefore: validFrom,
+		NotAfter:  validFrom.Add(maxAge),
+
+		KeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
+		ExtKeyUsage:           []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
+		BasicConstraintsValid: true,
+	}
+
+	if ip := netutils.ParseIPSloppy(host); ip != nil {
+		template.IPAddresses = append(template.IPAddresses, ip)
+	} else {
+		template.DNSNames = append(template.DNSNames, host)
+	}
+
+	template.IPAddresses = append(template.IPAddresses, alternateIPs...)
+	template.DNSNames = append(template.DNSNames, alternateDNS...)
+
+	derBytes, err := x509.CreateCertificate(cryptorand.Reader, &template, caCertificate, &priv.PublicKey, caKey)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// Generate cert, followed by ca
+	certBuffer := bytes.Buffer{}
+	if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: derBytes}); err != nil {
+		return nil, nil, err
+	}
+	if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: caDERBytes}); err != nil {
+		return nil, nil, err
+	}
+
+	// Generate key
+	keyBuffer := bytes.Buffer{}
+	if err := pem.Encode(&keyBuffer, &pem.Block{Type: keyutil.RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil {
+		return nil, nil, err
+	}
+
+	if len(fixtureDirectory) > 0 {
+		if err := os.WriteFile(certFixturePath, certBuffer.Bytes(), 0644); err != nil {
+			return nil, nil, fmt.Errorf("failed to write cert fixture to %s: %v", certFixturePath, err)
+		}
+		if err := os.WriteFile(keyFixturePath, keyBuffer.Bytes(), 0600); err != nil {
+			return nil, nil, fmt.Errorf("failed to write key fixture to %s: %v", certFixturePath, err)
+		}
+	}
+
+	return certBuffer.Bytes(), keyBuffer.Bytes(), nil
+}
+
+func ipsToStrings(ips []net.IP) []string {
+	ss := make([]string, 0, len(ips))
+	for _, ip := range ips {
+		ss = append(ss, ip.String())
+	}
+	return ss
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/util/cert/csr.go b/hack/tools/vendor/k8s.io/client-go/util/cert/csr.go
new file mode 100644
index 0000000000..39a6751f70
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/util/cert/csr.go
@@ -0,0 +1,75 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cert
+
+import (
+	cryptorand "crypto/rand"
+	"crypto/rsa"
+	"crypto/x509"
+	"crypto/x509/pkix"
+	"encoding/pem"
+	"net"
+)
+
+// MakeCSR generates a PEM-encoded CSR using the supplied private key, subject, and SANs.
+// All key types that are implemented via crypto.Signer are supported (This includes *rsa.PrivateKey and *ecdsa.PrivateKey.)
+func MakeCSR(privateKey interface{}, subject *pkix.Name, dnsSANs []string, ipSANs []net.IP) (csr []byte, err error) {
+	template := &x509.CertificateRequest{
+		Subject:     *subject,
+		DNSNames:    dnsSANs,
+		IPAddresses: ipSANs,
+	}
+
+	return MakeCSRFromTemplate(privateKey, template)
+}
+
+// MakeCSRFromTemplate generates a PEM-encoded CSR using the supplied private
+// key and certificate request as a template. All key types that are
+// implemented via crypto.Signer are supported (This includes *rsa.PrivateKey
+// and *ecdsa.PrivateKey.)
+func MakeCSRFromTemplate(privateKey interface{}, template *x509.CertificateRequest) ([]byte, error) {
+	t := *template
+	t.SignatureAlgorithm = sigType(privateKey)
+
+	csrDER, err := x509.CreateCertificateRequest(cryptorand.Reader, &t, privateKey)
+	if err != nil {
+		return nil, err
+	}
+
+	csrPemBlock := &pem.Block{
+		Type:  CertificateRequestBlockType,
+		Bytes: csrDER,
+	}
+
+	return pem.EncodeToMemory(csrPemBlock), nil
+}
+
+func sigType(privateKey interface{}) x509.SignatureAlgorithm {
+	// Customize the signature for RSA keys, depending on the key size
+	if privateKey, ok := privateKey.(*rsa.PrivateKey); ok {
+		keySize := privateKey.N.BitLen()
+		switch {
+		case keySize >= 4096:
+			return x509.SHA512WithRSA
+		case keySize >= 3072:
+			return x509.SHA384WithRSA
+		default:
+			return x509.SHA256WithRSA
+		}
+	}
+	return x509.UnknownSignatureAlgorithm
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/util/cert/io.go b/hack/tools/vendor/k8s.io/client-go/util/cert/io.go
new file mode 100644
index 0000000000..a70e513271
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/util/cert/io.go
@@ -0,0 +1,112 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cert
+
+import (
+	"crypto/x509"
+	"fmt"
+	"os"
+	"path/filepath"
+)
+
+// CanReadCertAndKey returns true if the certificate and key files already exists,
+// otherwise returns false. If lost one of cert and key, returns error.
+func CanReadCertAndKey(certPath, keyPath string) (bool, error) {
+	certReadable := canReadFile(certPath)
+	keyReadable := canReadFile(keyPath)
+
+	if certReadable == false && keyReadable == false {
+		return false, nil
+	}
+
+	if certReadable == false {
+		return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", certPath)
+	}
+
+	if keyReadable == false {
+		return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", keyPath)
+	}
+
+	return true, nil
+}
+
+// If the file represented by path exists and
+// readable, returns true otherwise returns false.
+func canReadFile(path string) bool {
+	f, err := os.Open(path)
+	if err != nil {
+		return false
+	}
+
+	defer f.Close()
+
+	return true
+}
+
+// WriteCert writes the pem-encoded certificate data to certPath.
+// The certificate file will be created with file mode 0644.
+// If the certificate file already exists, it will be overwritten.
+// The parent directory of the certPath will be created as needed with file mode 0755.
+func WriteCert(certPath string, data []byte) error {
+	if err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil {
+		return err
+	}
+	return os.WriteFile(certPath, data, os.FileMode(0644))
+}
+
+// NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file.
+// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
+func NewPool(filename string) (*x509.CertPool, error) {
+	pemBlock, err := os.ReadFile(filename)
+	if err != nil {
+		return nil, err
+	}
+
+	pool, err := NewPoolFromBytes(pemBlock)
+	if err != nil {
+		return nil, fmt.Errorf("error creating pool from %s: %s", filename, err)
+	}
+	return pool, nil
+}
+
+// NewPoolFromBytes returns an x509.CertPool containing the certificates in the given PEM-encoded bytes.
+// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
+func NewPoolFromBytes(pemBlock []byte) (*x509.CertPool, error) {
+	certs, err := ParseCertsPEM(pemBlock)
+	if err != nil {
+		return nil, err
+	}
+	pool := x509.NewCertPool()
+	for _, cert := range certs {
+		pool.AddCert(cert)
+	}
+	return pool, nil
+}
+
+// CertsFromFile returns the x509.Certificates contained in the given PEM-encoded file.
+// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
+func CertsFromFile(file string) ([]*x509.Certificate, error) {
+	pemBlock, err := os.ReadFile(file)
+	if err != nil {
+		return nil, err
+	}
+	certs, err := ParseCertsPEM(pemBlock)
+	if err != nil {
+		return nil, fmt.Errorf("error reading %s: %s", file, err)
+	}
+	return certs, nil
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/util/cert/pem.go b/hack/tools/vendor/k8s.io/client-go/util/cert/pem.go
new file mode 100644
index 0000000000..c77512315a
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/util/cert/pem.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cert
+
+import (
+	"bytes"
+	"crypto/x509"
+	"encoding/pem"
+	"errors"
+)
+
+const (
+	// CertificateBlockType is a possible value for pem.Block.Type.
+	CertificateBlockType = "CERTIFICATE"
+	// CertificateRequestBlockType is a possible value for pem.Block.Type.
+	CertificateRequestBlockType = "CERTIFICATE REQUEST"
+)
+
+// ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array
+// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates
+func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) {
+	ok := false
+	certs := []*x509.Certificate{}
+	for len(pemCerts) > 0 {
+		var block *pem.Block
+		block, pemCerts = pem.Decode(pemCerts)
+		if block == nil {
+			break
+		}
+		// Only use PEM "CERTIFICATE" blocks without extra headers
+		if block.Type != CertificateBlockType || len(block.Headers) != 0 {
+			continue
+		}
+
+		cert, err := x509.ParseCertificate(block.Bytes)
+		if err != nil {
+			return certs, err
+		}
+
+		certs = append(certs, cert)
+		ok = true
+	}
+
+	if !ok {
+		return certs, errors.New("data does not contain any valid RSA or ECDSA certificates")
+	}
+	return certs, nil
+}
+
+// EncodeCertificates returns the PEM-encoded byte array that represents by the specified certs.
+func EncodeCertificates(certs ...*x509.Certificate) ([]byte, error) {
+	b := bytes.Buffer{}
+	for _, cert := range certs {
+		if err := pem.Encode(&b, &pem.Block{Type: CertificateBlockType, Bytes: cert.Raw}); err != nil {
+			return []byte{}, err
+		}
+	}
+	return b.Bytes(), nil
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/util/cert/server_inspection.go b/hack/tools/vendor/k8s.io/client-go/util/cert/server_inspection.go
new file mode 100644
index 0000000000..f1ef292dee
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/util/cert/server_inspection.go
@@ -0,0 +1,102 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cert
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"fmt"
+	"net/url"
+	"strings"
+)
+
+// GetClientCANames gets the CA names for client certs that a server accepts.  This is useful when inspecting the
+// state of particular servers.  apiHost is "host:port"
+func GetClientCANames(apiHost string) ([]string, error) {
+	// when we run this the second time, we know which one we are expecting
+	acceptableCAs := []string{}
+	tlsConfig := &tls.Config{
+		InsecureSkipVerify: true, // this is insecure to always get to the GetClientCertificate
+		GetClientCertificate: func(hello *tls.CertificateRequestInfo) (*tls.Certificate, error) {
+			acceptableCAs = []string{}
+			for _, curr := range hello.AcceptableCAs {
+				acceptableCAs = append(acceptableCAs, string(curr))
+			}
+			return &tls.Certificate{}, nil
+		},
+	}
+
+	conn, err := tls.Dial("tcp", apiHost, tlsConfig)
+	if err != nil {
+		return nil, err
+	}
+	if err := conn.Close(); err != nil {
+		return nil, err
+	}
+
+	return acceptableCAs, nil
+}
+
+// GetClientCANamesForURL is GetClientCANames against a URL string like we use in kubeconfigs
+func GetClientCANamesForURL(kubeConfigURL string) ([]string, error) {
+	apiserverURL, err := url.Parse(kubeConfigURL)
+	if err != nil {
+		return nil, err
+	}
+	return GetClientCANames(apiserverURL.Host)
+}
+
+// GetServingCertificates returns the x509 certs used by a server as certificates and pem encoded bytes.
+// The serverName is optional for specifying a different name to get SNI certificates.  apiHost is "host:port"
+func GetServingCertificates(apiHost, serverName string) ([]*x509.Certificate, [][]byte, error) {
+	tlsConfig := &tls.Config{
+		InsecureSkipVerify: true, // this is insecure so that we always get connected
+	}
+	// if a name is specified for SNI, set it.
+	if len(serverName) > 0 {
+		tlsConfig.ServerName = serverName
+	}
+
+	conn, err := tls.Dial("tcp", apiHost, tlsConfig)
+	if err != nil {
+		return nil, nil, err
+	}
+	if err = conn.Close(); err != nil {
+		return nil, nil, fmt.Errorf("failed to close connection : %v", err)
+	}
+
+	peerCerts := conn.ConnectionState().PeerCertificates
+	peerCertBytes := [][]byte{}
+	for _, a := range peerCerts {
+		actualCert, err := EncodeCertificates(a)
+		if err != nil {
+			return nil, nil, err
+		}
+		peerCertBytes = append(peerCertBytes, []byte(strings.TrimSpace(string(actualCert))))
+	}
+
+	return peerCerts, peerCertBytes, err
+}
+
+// GetServingCertificatesForURL is GetServingCertificates against a URL string like we use in kubeconfigs
+func GetServingCertificatesForURL(kubeConfigURL, serverName string) ([]*x509.Certificate, [][]byte, error) {
+	apiserverURL, err := url.Parse(kubeConfigURL)
+	if err != nil {
+		return nil, nil, err
+	}
+	return GetServingCertificates(apiserverURL.Host, serverName)
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/util/connrotation/connrotation.go b/hack/tools/vendor/k8s.io/client-go/util/connrotation/connrotation.go
new file mode 100644
index 0000000000..2b9bf72bde
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/util/connrotation/connrotation.go
@@ -0,0 +1,133 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package connrotation implements a connection dialer that tracks and can close
+// all created connections.
+//
+// This is used for credential rotation of long-lived connections, when there's
+// no way to re-authenticate on a live connection.
+package connrotation
+
+import (
+	"context"
+	"net"
+	"sync"
+)
+
+// DialFunc is a shorthand for signature of net.DialContext.
+type DialFunc func(ctx context.Context, network, address string) (net.Conn, error)
+
+// Dialer opens connections through Dial and tracks them.
+type Dialer struct {
+	dial DialFunc
+	*ConnectionTracker
+}
+
+// NewDialer creates a new Dialer instance.
+// Equivalent to NewDialerWithTracker(dial, nil).
+func NewDialer(dial DialFunc) *Dialer {
+	return NewDialerWithTracker(dial, nil)
+}
+
+// NewDialerWithTracker creates a new Dialer instance.
+//
+// If dial is not nil, it will be used to create new underlying connections.
+// Otherwise net.DialContext is used.
+// If tracker is not nil, it will be used to track new underlying connections.
+// Otherwise NewConnectionTracker() is used.
+func NewDialerWithTracker(dial DialFunc, tracker *ConnectionTracker) *Dialer {
+	if tracker == nil {
+		tracker = NewConnectionTracker()
+	}
+	return &Dialer{
+		dial:              dial,
+		ConnectionTracker: tracker,
+	}
+}
+
+// ConnectionTracker keeps track of opened connections
+type ConnectionTracker struct {
+	mu    sync.Mutex
+	conns map[*closableConn]struct{}
+}
+
+// NewConnectionTracker returns a connection tracker for use with NewDialerWithTracker
+func NewConnectionTracker() *ConnectionTracker {
+	return &ConnectionTracker{
+		conns: make(map[*closableConn]struct{}),
+	}
+}
+
+// CloseAll forcibly closes all tracked connections.
+//
+// Note: new connections may get created before CloseAll returns.
+func (c *ConnectionTracker) CloseAll() {
+	c.mu.Lock()
+	conns := c.conns
+	c.conns = make(map[*closableConn]struct{})
+	c.mu.Unlock()
+
+	for conn := range conns {
+		conn.Close()
+	}
+}
+
+// Track adds the connection to the list of tracked connections,
+// and returns a wrapped copy of the connection that stops tracking the connection
+// when it is closed.
+func (c *ConnectionTracker) Track(conn net.Conn) net.Conn {
+	closable := &closableConn{Conn: conn}
+
+	// When the connection is closed, remove it from the map. This will
+	// be no-op if the connection isn't in the map, e.g. if CloseAll()
+	// is called.
+	closable.onClose = func() {
+		c.mu.Lock()
+		delete(c.conns, closable)
+		c.mu.Unlock()
+	}
+
+	// Start tracking the connection
+	c.mu.Lock()
+	c.conns[closable] = struct{}{}
+	c.mu.Unlock()
+
+	return closable
+}
+
+// Dial creates a new tracked connection.
+func (d *Dialer) Dial(network, address string) (net.Conn, error) {
+	return d.DialContext(context.Background(), network, address)
+}
+
+// DialContext creates a new tracked connection.
+func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
+	conn, err := d.dial(ctx, network, address)
+	if err != nil {
+		return nil, err
+	}
+	return d.ConnectionTracker.Track(conn), nil
+}
+
+type closableConn struct {
+	onClose func()
+	net.Conn
+}
+
+func (c *closableConn) Close() error {
+	go c.onClose()
+	return c.Conn.Close()
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/hack/tools/vendor/k8s.io/client-go/util/flowcontrol/backoff.go
new file mode 100644
index 0000000000..899b8e34ef
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/util/flowcontrol/backoff.go
@@ -0,0 +1,188 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package flowcontrol
+
+import (
+	"math/rand"
+	"sync"
+	"time"
+
+	"k8s.io/utils/clock"
+	testingclock "k8s.io/utils/clock/testing"
+)
+
+type backoffEntry struct {
+	backoff    time.Duration
+	lastUpdate time.Time
+}
+
+type Backoff struct {
+	sync.RWMutex
+	Clock clock.Clock
+	// HasExpiredFunc controls the logic that determines whether the backoff
+	// counter should be reset, and when to GC old backoff entries. If nil, the
+	// default hasExpired function will restart the backoff factor to the
+	// beginning after observing time has passed at least equal to 2*maxDuration
+	HasExpiredFunc  func(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool
+	defaultDuration time.Duration
+	maxDuration     time.Duration
+	perItemBackoff  map[string]*backoffEntry
+	rand            *rand.Rand
+
+	// maxJitterFactor adds jitter to the exponentially backed off delay.
+	// if maxJitterFactor is zero, no jitter is added to the delay in
+	// order to maintain current behavior.
+	maxJitterFactor float64
+}
+
+func NewFakeBackOff(initial, max time.Duration, tc *testingclock.FakeClock) *Backoff {
+	return newBackoff(tc, initial, max, 0.0)
+}
+
+func NewBackOff(initial, max time.Duration) *Backoff {
+	return NewBackOffWithJitter(initial, max, 0.0)
+}
+
+func NewFakeBackOffWithJitter(initial, max time.Duration, tc *testingclock.FakeClock, maxJitterFactor float64) *Backoff {
+	return newBackoff(tc, initial, max, maxJitterFactor)
+}
+
+func NewBackOffWithJitter(initial, max time.Duration, maxJitterFactor float64) *Backoff {
+	clock := clock.RealClock{}
+	return newBackoff(clock, initial, max, maxJitterFactor)
+}
+
+func newBackoff(clock clock.Clock, initial, max time.Duration, maxJitterFactor float64) *Backoff {
+	var random *rand.Rand
+	if maxJitterFactor > 0 {
+		random = rand.New(rand.NewSource(clock.Now().UnixNano()))
+	}
+	return &Backoff{
+		perItemBackoff:  map[string]*backoffEntry{},
+		Clock:           clock,
+		defaultDuration: initial,
+		maxDuration:     max,
+		maxJitterFactor: maxJitterFactor,
+		rand:            random,
+	}
+}
+
+// Get the current backoff Duration
+func (p *Backoff) Get(id string) time.Duration {
+	p.RLock()
+	defer p.RUnlock()
+	var delay time.Duration
+	entry, ok := p.perItemBackoff[id]
+	if ok {
+		delay = entry.backoff
+	}
+	return delay
+}
+
+// move backoff to the next mark, capping at maxDuration
+func (p *Backoff) Next(id string, eventTime time.Time) {
+	p.Lock()
+	defer p.Unlock()
+	entry, ok := p.perItemBackoff[id]
+	if !ok || p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
+		entry = p.initEntryUnsafe(id)
+		entry.backoff += p.jitter(entry.backoff)
+	} else {
+		delay := entry.backoff * 2       // exponential
+		delay += p.jitter(entry.backoff) // add some jitter to the delay
+		entry.backoff = min(delay, p.maxDuration)
+	}
+	entry.lastUpdate = p.Clock.Now()
+}
+
+// Reset forces clearing of all backoff data for a given key.
+func (p *Backoff) Reset(id string) {
+	p.Lock()
+	defer p.Unlock()
+	delete(p.perItemBackoff, id)
+}
+
+// Returns True if the elapsed time since eventTime is smaller than the current backoff window
+func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool {
+	p.RLock()
+	defer p.RUnlock()
+	entry, ok := p.perItemBackoff[id]
+	if !ok {
+		return false
+	}
+	if p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
+		return false
+	}
+	return p.Clock.Since(eventTime) < entry.backoff
+}
+
+// Returns True if time since lastupdate is less than the current backoff window.
+func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool {
+	p.RLock()
+	defer p.RUnlock()
+	entry, ok := p.perItemBackoff[id]
+	if !ok {
+		return false
+	}
+	if p.hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
+		return false
+	}
+	return eventTime.Sub(entry.lastUpdate) < entry.backoff
+}
+
+// Garbage collect records that have aged past their expiration, which defaults
+// to 2*maxDuration (see hasExpired godoc). Backoff users are expected to invoke
+// this periodically.
+func (p *Backoff) GC() {
+	p.Lock()
+	defer p.Unlock()
+	now := p.Clock.Now()
+	for id, entry := range p.perItemBackoff {
+		if p.hasExpired(now, entry.lastUpdate, p.maxDuration) {
+			delete(p.perItemBackoff, id)
+		}
+	}
+}
+
+func (p *Backoff) DeleteEntry(id string) {
+	p.Lock()
+	defer p.Unlock()
+	delete(p.perItemBackoff, id)
+}
+
+// Take a lock on *Backoff, before calling initEntryUnsafe
+func (p *Backoff) initEntryUnsafe(id string) *backoffEntry {
+	entry := &backoffEntry{backoff: p.defaultDuration}
+	p.perItemBackoff[id] = entry
+	return entry
+}
+
+func (p *Backoff) jitter(delay time.Duration) time.Duration {
+	if p.rand == nil {
+		return 0
+	}
+
+	return time.Duration(p.rand.Float64() * p.maxJitterFactor * float64(delay))
+}
+
+// Unless an alternate function is provided, after 2*maxDuration we restart the backoff factor to the beginning
+func (p *Backoff) hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool {
+	if p.HasExpiredFunc != nil {
+		return p.HasExpiredFunc(eventTime, lastUpdate, maxDuration)
+	}
+	return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/util/flowcontrol/throttle.go b/hack/tools/vendor/k8s.io/client-go/util/flowcontrol/throttle.go
new file mode 100644
index 0000000000..af7abd898f
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/util/flowcontrol/throttle.go
@@ -0,0 +1,192 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package flowcontrol
+
+import (
+	"context"
+	"errors"
+	"sync"
+	"time"
+
+	"golang.org/x/time/rate"
+	"k8s.io/utils/clock"
+)
+
+type PassiveRateLimiter interface {
+	// TryAccept returns true if a token is taken immediately. Otherwise,
+	// it returns false.
+	TryAccept() bool
+	// Stop stops the rate limiter, subsequent calls to CanAccept will return false
+	Stop()
+	// QPS returns QPS of this rate limiter
+	QPS() float32
+}
+
+type RateLimiter interface {
+	PassiveRateLimiter
+	// Accept returns once a token becomes available.
+	Accept()
+	// Wait returns nil if a token is taken before the Context is done.
+	Wait(ctx context.Context) error
+}
+
+type tokenBucketPassiveRateLimiter struct {
+	limiter *rate.Limiter
+	qps     float32
+	clock   clock.PassiveClock
+}
+
+type tokenBucketRateLimiter struct {
+	tokenBucketPassiveRateLimiter
+	clock Clock
+}
+
+// NewTokenBucketRateLimiter creates a rate limiter which implements a token bucket approach.
+// The rate limiter allows bursts of up to 'burst' to exceed the QPS, while still maintaining a
+// smoothed qps rate of 'qps'.
+// The bucket is initially filled with 'burst' tokens, and refills at a rate of 'qps'.
+// The maximum number of tokens in the bucket is capped at 'burst'.
+func NewTokenBucketRateLimiter(qps float32, burst int) RateLimiter {
+	limiter := rate.NewLimiter(rate.Limit(qps), burst)
+	return newTokenBucketRateLimiterWithClock(limiter, clock.RealClock{}, qps)
+}
+
+// NewTokenBucketPassiveRateLimiter is similar to NewTokenBucketRateLimiter except that it returns
+// a PassiveRateLimiter which does not have Accept() and Wait() methods.
+func NewTokenBucketPassiveRateLimiter(qps float32, burst int) PassiveRateLimiter {
+	limiter := rate.NewLimiter(rate.Limit(qps), burst)
+	return newTokenBucketRateLimiterWithPassiveClock(limiter, clock.RealClock{}, qps)
+}
+
+// An injectable, mockable clock interface.
+type Clock interface {
+	clock.PassiveClock
+	Sleep(time.Duration)
+}
+
+var _ Clock = (*clock.RealClock)(nil)
+
+// NewTokenBucketRateLimiterWithClock is identical to NewTokenBucketRateLimiter
+// but allows an injectable clock, for testing.
+func NewTokenBucketRateLimiterWithClock(qps float32, burst int, c Clock) RateLimiter {
+	limiter := rate.NewLimiter(rate.Limit(qps), burst)
+	return newTokenBucketRateLimiterWithClock(limiter, c, qps)
+}
+
+// NewTokenBucketPassiveRateLimiterWithClock is similar to NewTokenBucketRateLimiterWithClock
+// except that it returns a PassiveRateLimiter which does not have Accept() and Wait() methods
+// and uses a PassiveClock.
+func NewTokenBucketPassiveRateLimiterWithClock(qps float32, burst int, c clock.PassiveClock) PassiveRateLimiter {
+	limiter := rate.NewLimiter(rate.Limit(qps), burst)
+	return newTokenBucketRateLimiterWithPassiveClock(limiter, c, qps)
+}
+
+func newTokenBucketRateLimiterWithClock(limiter *rate.Limiter, c Clock, qps float32) *tokenBucketRateLimiter {
+	return &tokenBucketRateLimiter{
+		tokenBucketPassiveRateLimiter: *newTokenBucketRateLimiterWithPassiveClock(limiter, c, qps),
+		clock:                         c,
+	}
+}
+
+func newTokenBucketRateLimiterWithPassiveClock(limiter *rate.Limiter, c clock.PassiveClock, qps float32) *tokenBucketPassiveRateLimiter {
+	return &tokenBucketPassiveRateLimiter{
+		limiter: limiter,
+		qps:     qps,
+		clock:   c,
+	}
+}
+
+func (tbprl *tokenBucketPassiveRateLimiter) Stop() {
+}
+
+func (tbprl *tokenBucketPassiveRateLimiter) QPS() float32 {
+	return tbprl.qps
+}
+
+func (tbprl *tokenBucketPassiveRateLimiter) TryAccept() bool {
+	return tbprl.limiter.AllowN(tbprl.clock.Now(), 1)
+}
+
+// Accept will block until a token becomes available
+func (tbrl *tokenBucketRateLimiter) Accept() {
+	now := tbrl.clock.Now()
+	tbrl.clock.Sleep(tbrl.limiter.ReserveN(now, 1).DelayFrom(now))
+}
+
+func (tbrl *tokenBucketRateLimiter) Wait(ctx context.Context) error {
+	return tbrl.limiter.Wait(ctx)
+}
+
+type fakeAlwaysRateLimiter struct{}
+
+func NewFakeAlwaysRateLimiter() RateLimiter {
+	return &fakeAlwaysRateLimiter{}
+}
+
+func (t *fakeAlwaysRateLimiter) TryAccept() bool {
+	return true
+}
+
+func (t *fakeAlwaysRateLimiter) Stop() {}
+
+func (t *fakeAlwaysRateLimiter) Accept() {}
+
+func (t *fakeAlwaysRateLimiter) QPS() float32 {
+	return 1
+}
+
+func (t *fakeAlwaysRateLimiter) Wait(ctx context.Context) error {
+	return nil
+}
+
+type fakeNeverRateLimiter struct {
+	wg sync.WaitGroup
+}
+
+func NewFakeNeverRateLimiter() RateLimiter {
+	rl := fakeNeverRateLimiter{}
+	rl.wg.Add(1)
+	return &rl
+}
+
+func (t *fakeNeverRateLimiter) TryAccept() bool {
+	return false
+}
+
+func (t *fakeNeverRateLimiter) Stop() {
+	t.wg.Done()
+}
+
+func (t *fakeNeverRateLimiter) Accept() {
+	t.wg.Wait()
+}
+
+func (t *fakeNeverRateLimiter) QPS() float32 {
+	return 1
+}
+
+func (t *fakeNeverRateLimiter) Wait(ctx context.Context) error {
+	return errors.New("can not be accept")
+}
+
+var (
+	_ RateLimiter = (*tokenBucketRateLimiter)(nil)
+	_ RateLimiter = (*fakeAlwaysRateLimiter)(nil)
+	_ RateLimiter = (*fakeNeverRateLimiter)(nil)
+)
+
+var _ PassiveRateLimiter = (*tokenBucketPassiveRateLimiter)(nil)
diff --git a/hack/tools/vendor/k8s.io/client-go/util/homedir/homedir.go b/hack/tools/vendor/k8s.io/client-go/util/homedir/homedir.go
new file mode 100644
index 0000000000..3fdbeb8cf1
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/util/homedir/homedir.go
@@ -0,0 +1,92 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package homedir
+
+import (
+	"os"
+	"path/filepath"
+	"runtime"
+)
+
+// HomeDir returns the home directory for the current user.
+// On Windows:
+// 1. the first of %HOME%, %HOMEDRIVE%%HOMEPATH%, %USERPROFILE% containing a `.kube\config` file is returned.
+// 2. if none of those locations contain a `.kube\config` file, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists and is writeable is returned.
+// 3. if none of those locations are writeable, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists is returned.
+// 4. if none of those locations exists, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that is set is returned.
+func HomeDir() string {
+	if runtime.GOOS == "windows" {
+		home := os.Getenv("HOME")
+		homeDriveHomePath := ""
+		if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 {
+			homeDriveHomePath = homeDrive + homePath
+		}
+		userProfile := os.Getenv("USERPROFILE")
+
+		// Return first of %HOME%, %HOMEDRIVE%/%HOMEPATH%, %USERPROFILE% that contains a `.kube\config` file.
+		// %HOMEDRIVE%/%HOMEPATH% is preferred over %USERPROFILE% for backwards-compatibility.
+		for _, p := range []string{home, homeDriveHomePath, userProfile} {
+			if len(p) == 0 {
+				continue
+			}
+			if _, err := os.Stat(filepath.Join(p, ".kube", "config")); err != nil {
+				continue
+			}
+			return p
+		}
+
+		firstSetPath := ""
+		firstExistingPath := ""
+
+		// Prefer %USERPROFILE% over %HOMEDRIVE%/%HOMEPATH% for compatibility with other auth-writing tools
+		for _, p := range []string{home, userProfile, homeDriveHomePath} {
+			if len(p) == 0 {
+				continue
+			}
+			if len(firstSetPath) == 0 {
+				// remember the first path that is set
+				firstSetPath = p
+			}
+			info, err := os.Stat(p)
+			if err != nil {
+				continue
+			}
+			if len(firstExistingPath) == 0 {
+				// remember the first path that exists
+				firstExistingPath = p
+			}
+			if info.IsDir() && info.Mode().Perm()&(1<<(uint(7))) != 0 {
+				// return first path that is writeable
+				return p
+			}
+		}
+
+		// If none are writeable, return first location that exists
+		if len(firstExistingPath) > 0 {
+			return firstExistingPath
+		}
+
+		// If none exist, return first location that is set
+		if len(firstSetPath) > 0 {
+			return firstSetPath
+		}
+
+		// We've got nothing
+		return ""
+	}
+	return os.Getenv("HOME")
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/util/keyutil/OWNERS b/hack/tools/vendor/k8s.io/client-go/util/keyutil/OWNERS
new file mode 100644
index 0000000000..e6d229d5db
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/util/keyutil/OWNERS
@@ -0,0 +1,6 @@
+approvers:
+  - sig-auth-certificates-approvers
+reviewers:
+  - sig-auth-certificates-reviewers
+labels:
+  - sig/auth
diff --git a/hack/tools/vendor/k8s.io/client-go/util/keyutil/key.go b/hack/tools/vendor/k8s.io/client-go/util/keyutil/key.go
new file mode 100644
index 0000000000..ecd3e4710f
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/util/keyutil/key.go
@@ -0,0 +1,322 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package keyutil contains utilities for managing public/private key pairs.
+package keyutil
+
+import (
+	"crypto"
+	"crypto/ecdsa"
+	"crypto/elliptic"
+	cryptorand "crypto/rand"
+	"crypto/rsa"
+	"crypto/x509"
+	"encoding/pem"
+	"fmt"
+	"os"
+	"path/filepath"
+)
+
+const (
+	// ECPrivateKeyBlockType is a possible value for pem.Block.Type.
+	ECPrivateKeyBlockType = "EC PRIVATE KEY"
+	// RSAPrivateKeyBlockType is a possible value for pem.Block.Type.
+	RSAPrivateKeyBlockType = "RSA PRIVATE KEY"
+	// PrivateKeyBlockType is a possible value for pem.Block.Type.
+	PrivateKeyBlockType = "PRIVATE KEY"
+	// PublicKeyBlockType is a possible value for pem.Block.Type.
+	PublicKeyBlockType = "PUBLIC KEY"
+)
+
+// MakeEllipticPrivateKeyPEM creates an ECDSA private key
+func MakeEllipticPrivateKeyPEM() ([]byte, error) {
+	privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader)
+	if err != nil {
+		return nil, err
+	}
+
+	derBytes, err := x509.MarshalECPrivateKey(privateKey)
+	if err != nil {
+		return nil, err
+	}
+
+	privateKeyPemBlock := &pem.Block{
+		Type:  ECPrivateKeyBlockType,
+		Bytes: derBytes,
+	}
+	return pem.EncodeToMemory(privateKeyPemBlock), nil
+}
+
+// WriteKey writes the pem-encoded key data to keyPath.
+// The key file will be created with file mode 0600.
+// If the key file already exists, it will be overwritten.
+// The parent directory of the keyPath will be created as needed with file mode 0755.
+func WriteKey(keyPath string, data []byte) error {
+	if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil {
+		return err
+	}
+	return os.WriteFile(keyPath, data, os.FileMode(0600))
+}
+
+// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it
+// can't find one, it will generate a new key and store it there.
+func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) {
+	loadedData, err := os.ReadFile(keyPath)
+	// Call verifyKeyData to ensure the file wasn't empty/corrupt.
+	if err == nil && verifyKeyData(loadedData) {
+		return loadedData, false, err
+	}
+	if !os.IsNotExist(err) {
+		return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err)
+	}
+
+	generatedData, err := MakeEllipticPrivateKeyPEM()
+	if err != nil {
+		return nil, false, fmt.Errorf("error generating key: %v", err)
+	}
+	if err := WriteKey(keyPath, generatedData); err != nil {
+		return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err)
+	}
+	return generatedData, true, nil
+}
+
+// MarshalPrivateKeyToPEM converts a known private key type of RSA or ECDSA to
+// a PEM encoded block or returns an error.
+func MarshalPrivateKeyToPEM(privateKey crypto.PrivateKey) ([]byte, error) {
+	switch t := privateKey.(type) {
+	case *ecdsa.PrivateKey:
+		derBytes, err := x509.MarshalECPrivateKey(t)
+		if err != nil {
+			return nil, err
+		}
+		block := &pem.Block{
+			Type:  ECPrivateKeyBlockType,
+			Bytes: derBytes,
+		}
+		return pem.EncodeToMemory(block), nil
+	case *rsa.PrivateKey:
+		block := &pem.Block{
+			Type:  RSAPrivateKeyBlockType,
+			Bytes: x509.MarshalPKCS1PrivateKey(t),
+		}
+		return pem.EncodeToMemory(block), nil
+	default:
+		return nil, fmt.Errorf("private key is not a recognized type: %T", privateKey)
+	}
+}
+
+// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file.
+// Returns an error if the file could not be read or if the private key could not be parsed.
+func PrivateKeyFromFile(file string) (interface{}, error) {
+	data, err := os.ReadFile(file)
+	if err != nil {
+		return nil, err
+	}
+	key, err := ParsePrivateKeyPEM(data)
+	if err != nil {
+		return nil, fmt.Errorf("error reading private key file %s: %v", file, err)
+	}
+	return key, nil
+}
+
+// PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file.
+// Reads public keys from both public and private key files.
+func PublicKeysFromFile(file string) ([]interface{}, error) {
+	data, err := os.ReadFile(file)
+	if err != nil {
+		return nil, err
+	}
+	keys, err := ParsePublicKeysPEM(data)
+	if err != nil {
+		return nil, fmt.Errorf("error reading public key file %s: %v", file, err)
+	}
+	return keys, nil
+}
+
+// verifyKeyData returns true if the provided data appears to be a valid private key.
+func verifyKeyData(data []byte) bool {
+	if len(data) == 0 {
+		return false
+	}
+	_, err := ParsePrivateKeyPEM(data)
+	return err == nil
+}
+
+// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data.
+// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY"
+func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) {
+	var privateKeyPemBlock *pem.Block
+	for {
+		privateKeyPemBlock, keyData = pem.Decode(keyData)
+		if privateKeyPemBlock == nil {
+			break
+		}
+
+		switch privateKeyPemBlock.Type {
+		case ECPrivateKeyBlockType:
+			// ECDSA Private Key in ASN.1 format
+			if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil {
+				return key, nil
+			}
+		case RSAPrivateKeyBlockType:
+			// RSA Private Key in PKCS#1 format
+			if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil {
+				return key, nil
+			}
+		case PrivateKeyBlockType:
+			// RSA or ECDSA Private Key in unencrypted PKCS#8 format
+			if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil {
+				return key, nil
+			}
+		}
+
+		// tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks
+		// originally, only the first PEM block was parsed and expected to be a key block
+	}
+
+	// we read all the PEM blocks and didn't recognize one
+	return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key")
+}
+
+// ParsePublicKeysPEM is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded byte array.
+// Reads public keys from both public and private key files.
+func ParsePublicKeysPEM(keyData []byte) ([]interface{}, error) {
+	var block *pem.Block
+	keys := []interface{}{}
+	for {
+		// read the next block
+		block, keyData = pem.Decode(keyData)
+		if block == nil {
+			break
+		}
+
+		// test block against parsing functions
+		if privateKey, err := parseRSAPrivateKey(block.Bytes); err == nil {
+			keys = append(keys, &privateKey.PublicKey)
+			continue
+		}
+		if publicKey, err := parseRSAPublicKey(block.Bytes); err == nil {
+			keys = append(keys, publicKey)
+			continue
+		}
+		if privateKey, err := parseECPrivateKey(block.Bytes); err == nil {
+			keys = append(keys, &privateKey.PublicKey)
+			continue
+		}
+		if publicKey, err := parseECPublicKey(block.Bytes); err == nil {
+			keys = append(keys, publicKey)
+			continue
+		}
+
+		// tolerate non-key PEM blocks for backwards compatibility
+		// originally, only the first PEM block was parsed and expected to be a key block
+	}
+
+	if len(keys) == 0 {
+		return nil, fmt.Errorf("data does not contain any valid RSA or ECDSA public keys")
+	}
+	return keys, nil
+}
+
+// parseRSAPublicKey parses a single RSA public key from the provided data
+func parseRSAPublicKey(data []byte) (*rsa.PublicKey, error) {
+	var err error
+
+	// Parse the key
+	var parsedKey interface{}
+	if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil {
+		if cert, err := x509.ParseCertificate(data); err == nil {
+			parsedKey = cert.PublicKey
+		} else {
+			return nil, err
+		}
+	}
+
+	// Test if parsed key is an RSA Public Key
+	var pubKey *rsa.PublicKey
+	var ok bool
+	if pubKey, ok = parsedKey.(*rsa.PublicKey); !ok {
+		return nil, fmt.Errorf("data doesn't contain valid RSA Public Key")
+	}
+
+	return pubKey, nil
+}
+
+// parseRSAPrivateKey parses a single RSA private key from the provided data
+func parseRSAPrivateKey(data []byte) (*rsa.PrivateKey, error) {
+	var err error
+
+	// Parse the key
+	var parsedKey interface{}
+	if parsedKey, err = x509.ParsePKCS1PrivateKey(data); err != nil {
+		if parsedKey, err = x509.ParsePKCS8PrivateKey(data); err != nil {
+			return nil, err
+		}
+	}
+
+	// Test if parsed key is an RSA Private Key
+	var privKey *rsa.PrivateKey
+	var ok bool
+	if privKey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+		return nil, fmt.Errorf("data doesn't contain valid RSA Private Key")
+	}
+
+	return privKey, nil
+}
+
+// parseECPublicKey parses a single ECDSA public key from the provided data
+func parseECPublicKey(data []byte) (*ecdsa.PublicKey, error) {
+	var err error
+
+	// Parse the key
+	var parsedKey interface{}
+	if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil {
+		if cert, err := x509.ParseCertificate(data); err == nil {
+			parsedKey = cert.PublicKey
+		} else {
+			return nil, err
+		}
+	}
+
+	// Test if parsed key is an ECDSA Public Key
+	var pubKey *ecdsa.PublicKey
+	var ok bool
+	if pubKey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
+		return nil, fmt.Errorf("data doesn't contain valid ECDSA Public Key")
+	}
+
+	return pubKey, nil
+}
+
+// parseECPrivateKey parses a single ECDSA private key from the provided data
+func parseECPrivateKey(data []byte) (*ecdsa.PrivateKey, error) {
+	var err error
+
+	// Parse the key
+	var parsedKey interface{}
+	if parsedKey, err = x509.ParseECPrivateKey(data); err != nil {
+		return nil, err
+	}
+
+	// Test if parsed key is an ECDSA Private Key
+	var privKey *ecdsa.PrivateKey
+	var ok bool
+	if privKey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
+		return nil, fmt.Errorf("data doesn't contain valid ECDSA Private Key")
+	}
+
+	return privKey, nil
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go b/hack/tools/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
new file mode 100644
index 0000000000..1f9567881c
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
@@ -0,0 +1,295 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workqueue
+
+import (
+	"math"
+	"sync"
+	"time"
+
+	"golang.org/x/time/rate"
+)
+
+// Deprecated: RateLimiter is deprecated, use TypedRateLimiter instead.
+type RateLimiter TypedRateLimiter[any]
+
+type TypedRateLimiter[T comparable] interface {
+	// When gets an item and gets to decide how long that item should wait
+	When(item T) time.Duration
+	// Forget indicates that an item is finished being retried.  Doesn't matter whether it's for failing
+	// or for success, we'll stop tracking it
+	Forget(item T)
+	// NumRequeues returns back how many failures the item has had
+	NumRequeues(item T) int
+}
+
+// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue.  It has
+// both overall and per-item rate limiting.  The overall is a token bucket and the per-item is exponential
+//
+// Deprecated: Use DefaultTypedControllerRateLimiter instead.
+func DefaultControllerRateLimiter() RateLimiter {
+	return DefaultTypedControllerRateLimiter[any]()
+}
+
+// DefaultTypedControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue.  It has
+// both overall and per-item rate limiting.  The overall is a token bucket and the per-item is exponential
+func DefaultTypedControllerRateLimiter[T comparable]() TypedRateLimiter[T] {
+	return NewTypedMaxOfRateLimiter(
+		NewTypedItemExponentialFailureRateLimiter[T](5*time.Millisecond, 1000*time.Second),
+		// 10 qps, 100 bucket size.  This is only for retry speed and its only the overall factor (not per item)
+		&TypedBucketRateLimiter[T]{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
+	)
+}
+
+// Deprecated: BucketRateLimiter is deprecated, use TypedBucketRateLimiter instead.
+type BucketRateLimiter = TypedBucketRateLimiter[any]
+
+// TypedBucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API
+type TypedBucketRateLimiter[T comparable] struct {
+	*rate.Limiter
+}
+
+var _ RateLimiter = &BucketRateLimiter{}
+
+func (r *TypedBucketRateLimiter[T]) When(item T) time.Duration {
+	return r.Limiter.Reserve().Delay()
+}
+
+func (r *TypedBucketRateLimiter[T]) NumRequeues(item T) int {
+	return 0
+}
+
+func (r *TypedBucketRateLimiter[T]) Forget(item T) {
+}
+
+// Deprecated: ItemExponentialFailureRateLimiter is deprecated, use TypedItemExponentialFailureRateLimiter instead.
+type ItemExponentialFailureRateLimiter = TypedItemExponentialFailureRateLimiter[any]
+
+// TypedItemExponentialFailureRateLimiter does a simple baseDelay*2^ limit
+// dealing with max failures and expiration are up to the caller
+type TypedItemExponentialFailureRateLimiter[T comparable] struct {
+	failuresLock sync.Mutex
+	failures     map[T]int
+
+	baseDelay time.Duration
+	maxDelay  time.Duration
+}
+
+var _ RateLimiter = &ItemExponentialFailureRateLimiter{}
+
+// Deprecated: NewItemExponentialFailureRateLimiter is deprecated, use NewTypedItemExponentialFailureRateLimiter instead.
+func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter {
+	return NewTypedItemExponentialFailureRateLimiter[any](baseDelay, maxDelay)
+}
+
+func NewTypedItemExponentialFailureRateLimiter[T comparable](baseDelay time.Duration, maxDelay time.Duration) TypedRateLimiter[T] {
+	return &TypedItemExponentialFailureRateLimiter[T]{
+		failures:  map[T]int{},
+		baseDelay: baseDelay,
+		maxDelay:  maxDelay,
+	}
+}
+
+// Deprecated: DefaultItemBasedRateLimiter is deprecated, use DefaultTypedItemBasedRateLimiter instead.
+func DefaultItemBasedRateLimiter() RateLimiter {
+	return DefaultTypedItemBasedRateLimiter[any]()
+}
+
+func DefaultTypedItemBasedRateLimiter[T comparable]() TypedRateLimiter[T] {
+	return NewTypedItemExponentialFailureRateLimiter[T](time.Millisecond, 1000*time.Second)
+}
+
+func (r *TypedItemExponentialFailureRateLimiter[T]) When(item T) time.Duration {
+	r.failuresLock.Lock()
+	defer r.failuresLock.Unlock()
+
+	exp := r.failures[item]
+	r.failures[item] = r.failures[item] + 1
+
+	// The backoff is capped such that 'calculated' value never overflows.
+	backoff := float64(r.baseDelay.Nanoseconds()) * math.Pow(2, float64(exp))
+	if backoff > math.MaxInt64 {
+		return r.maxDelay
+	}
+
+	calculated := time.Duration(backoff)
+	if calculated > r.maxDelay {
+		return r.maxDelay
+	}
+
+	return calculated
+}
+
+func (r *TypedItemExponentialFailureRateLimiter[T]) NumRequeues(item T) int {
+	r.failuresLock.Lock()
+	defer r.failuresLock.Unlock()
+
+	return r.failures[item]
+}
+
+func (r *TypedItemExponentialFailureRateLimiter[T]) Forget(item T) {
+	r.failuresLock.Lock()
+	defer r.failuresLock.Unlock()
+
+	delete(r.failures, item)
+}
+
+// ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that
+// Deprecated: Use TypedItemFastSlowRateLimiter instead.
+type ItemFastSlowRateLimiter = TypedItemFastSlowRateLimiter[any]
+
+// TypedItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that
+type TypedItemFastSlowRateLimiter[T comparable] struct {
+	failuresLock sync.Mutex
+	failures     map[T]int
+
+	maxFastAttempts int
+	fastDelay       time.Duration
+	slowDelay       time.Duration
+}
+
+var _ RateLimiter = &ItemFastSlowRateLimiter{}
+
+// Deprecated: NewItemFastSlowRateLimiter is deprecated, use NewTypedItemFastSlowRateLimiter instead.
+func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter {
+	return NewTypedItemFastSlowRateLimiter[any](fastDelay, slowDelay, maxFastAttempts)
+}
+
+func NewTypedItemFastSlowRateLimiter[T comparable](fastDelay, slowDelay time.Duration, maxFastAttempts int) TypedRateLimiter[T] {
+	return &TypedItemFastSlowRateLimiter[T]{
+		failures:        map[T]int{},
+		fastDelay:       fastDelay,
+		slowDelay:       slowDelay,
+		maxFastAttempts: maxFastAttempts,
+	}
+}
+
+func (r *TypedItemFastSlowRateLimiter[T]) When(item T) time.Duration {
+	r.failuresLock.Lock()
+	defer r.failuresLock.Unlock()
+
+	r.failures[item] = r.failures[item] + 1
+
+	if r.failures[item] <= r.maxFastAttempts {
+		return r.fastDelay
+	}
+
+	return r.slowDelay
+}
+
+func (r *TypedItemFastSlowRateLimiter[T]) NumRequeues(item T) int {
+	r.failuresLock.Lock()
+	defer r.failuresLock.Unlock()
+
+	return r.failures[item]
+}
+
+func (r *TypedItemFastSlowRateLimiter[T]) Forget(item T) {
+	r.failuresLock.Lock()
+	defer r.failuresLock.Unlock()
+
+	delete(r.failures, item)
+}
+
+// MaxOfRateLimiter calls every RateLimiter and returns the worst case response
+// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items
+// were separately delayed a longer time.
+//
+// Deprecated: Use TypedMaxOfRateLimiter instead.
+type MaxOfRateLimiter = TypedMaxOfRateLimiter[any]
+
+// TypedMaxOfRateLimiter calls every RateLimiter and returns the worst case response
+// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items
+// were separately delayed a longer time.
+type TypedMaxOfRateLimiter[T comparable] struct {
+	limiters []TypedRateLimiter[T]
+}
+
+func (r *TypedMaxOfRateLimiter[T]) When(item T) time.Duration {
+	ret := time.Duration(0)
+	for _, limiter := range r.limiters {
+		curr := limiter.When(item)
+		if curr > ret {
+			ret = curr
+		}
+	}
+
+	return ret
+}
+
+// Deprecated: NewMaxOfRateLimiter is deprecated, use NewTypedMaxOfRateLimiter instead.
+func NewMaxOfRateLimiter(limiters ...TypedRateLimiter[any]) RateLimiter {
+	return NewTypedMaxOfRateLimiter(limiters...)
+}
+
+func NewTypedMaxOfRateLimiter[T comparable](limiters ...TypedRateLimiter[T]) TypedRateLimiter[T] {
+	return &TypedMaxOfRateLimiter[T]{limiters: limiters}
+}
+
+func (r *TypedMaxOfRateLimiter[T]) NumRequeues(item T) int {
+	ret := 0
+	for _, limiter := range r.limiters {
+		curr := limiter.NumRequeues(item)
+		if curr > ret {
+			ret = curr
+		}
+	}
+
+	return ret
+}
+
+func (r *TypedMaxOfRateLimiter[T]) Forget(item T) {
+	for _, limiter := range r.limiters {
+		limiter.Forget(item)
+	}
+}
+
+// WithMaxWaitRateLimiter have maxDelay which avoids waiting too long
+// Deprecated: Use TypedWithMaxWaitRateLimiter instead.
+type WithMaxWaitRateLimiter = TypedWithMaxWaitRateLimiter[any]
+
+// TypedWithMaxWaitRateLimiter have maxDelay which avoids waiting too long
+type TypedWithMaxWaitRateLimiter[T comparable] struct {
+	limiter  TypedRateLimiter[T]
+	maxDelay time.Duration
+}
+
+// Deprecated: NewWithMaxWaitRateLimiter is deprecated, use NewTypedWithMaxWaitRateLimiter instead.
+func NewWithMaxWaitRateLimiter(limiter RateLimiter, maxDelay time.Duration) RateLimiter {
+	return NewTypedWithMaxWaitRateLimiter[any](limiter, maxDelay)
+}
+
+func NewTypedWithMaxWaitRateLimiter[T comparable](limiter TypedRateLimiter[T], maxDelay time.Duration) TypedRateLimiter[T] {
+	return &TypedWithMaxWaitRateLimiter[T]{limiter: limiter, maxDelay: maxDelay}
+}
+
+func (w TypedWithMaxWaitRateLimiter[T]) When(item T) time.Duration {
+	delay := w.limiter.When(item)
+	if delay > w.maxDelay {
+		return w.maxDelay
+	}
+
+	return delay
+}
+
+func (w TypedWithMaxWaitRateLimiter[T]) Forget(item T) {
+	w.limiter.Forget(item)
+}
+
+func (w TypedWithMaxWaitRateLimiter[T]) NumRequeues(item T) int {
+	return w.limiter.NumRequeues(item)
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/hack/tools/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
new file mode 100644
index 0000000000..e33a6c6929
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
@@ -0,0 +1,360 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workqueue
+
+import (
+	"container/heap"
+	"sync"
+	"time"
+
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/utils/clock"
+)
+
+// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
+// requeue items after failures without ending up in a hot-loop.
+//
+// Deprecated: use TypedDelayingInterface instead.
+type DelayingInterface TypedDelayingInterface[any]
+
+// TypedDelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
+// requeue items after failures without ending up in a hot-loop.
+type TypedDelayingInterface[T comparable] interface {
+	TypedInterface[T]
+	// AddAfter adds an item to the workqueue after the indicated duration has passed
+	AddAfter(item T, duration time.Duration)
+}
+
+// DelayingQueueConfig specifies optional configurations to customize a DelayingInterface.
+//
+// Deprecated: use TypedDelayingQueueConfig instead.
+type DelayingQueueConfig = TypedDelayingQueueConfig[any]
+
+// TypedDelayingQueueConfig specifies optional configurations to customize a DelayingInterface.
+type TypedDelayingQueueConfig[T comparable] struct {
+	// Name for the queue. If unnamed, the metrics will not be registered.
+	Name string
+
+	// MetricsProvider optionally allows specifying a metrics provider to use for the queue
+	// instead of the global provider.
+	MetricsProvider MetricsProvider
+
+	// Clock optionally allows injecting a real or fake clock for testing purposes.
+	Clock clock.WithTicker
+
+	// Queue optionally allows injecting custom queue Interface instead of the default one.
+	Queue TypedInterface[T]
+}
+
+// NewDelayingQueue constructs a new workqueue with delayed queuing ability.
+// NewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use
+// NewDelayingQueueWithConfig instead and specify a name.
+//
+// Deprecated: use NewTypedDelayingQueue instead.
+func NewDelayingQueue() DelayingInterface {
+	return NewDelayingQueueWithConfig(DelayingQueueConfig{})
+}
+
+// NewTypedDelayingQueue constructs a new workqueue with delayed queuing ability.
+// NewTypedDelayingQueue does not emit metrics. For use with a MetricsProvider, please use
+// NewTypedDelayingQueueWithConfig instead and specify a name.
+func NewTypedDelayingQueue[T comparable]() TypedDelayingInterface[T] {
+	return NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{})
+}
+
+// NewDelayingQueueWithConfig constructs a new workqueue with options to
+// customize different properties.
+//
+// Deprecated: use NewTypedDelayingQueueWithConfig instead.
+func NewDelayingQueueWithConfig(config DelayingQueueConfig) DelayingInterface {
+	return NewTypedDelayingQueueWithConfig[any](config)
+}
+
+// TypedNewDelayingQueue exists for backwards compatibility only.
+//
+// Deprecated: use NewTypedDelayingQueueWithConfig instead.
+func TypedNewDelayingQueue[T comparable]() TypedDelayingInterface[T] {
+	return NewTypedDelayingQueue[T]()
+}
+
+// NewTypedDelayingQueueWithConfig constructs a new workqueue with options to
+// customize different properties.
+func NewTypedDelayingQueueWithConfig[T comparable](config TypedDelayingQueueConfig[T]) TypedDelayingInterface[T] {
+	if config.Clock == nil {
+		config.Clock = clock.RealClock{}
+	}
+
+	if config.Queue == nil {
+		config.Queue = NewTypedWithConfig[T](TypedQueueConfig[T]{
+			Name:            config.Name,
+			MetricsProvider: config.MetricsProvider,
+			Clock:           config.Clock,
+		})
+	}
+
+	return newDelayingQueue(config.Clock, config.Queue, config.Name, config.MetricsProvider)
+}
+
+// NewDelayingQueueWithCustomQueue constructs a new workqueue with ability to
+// inject custom queue Interface instead of the default one
+// Deprecated: Use NewDelayingQueueWithConfig instead.
+func NewDelayingQueueWithCustomQueue(q Interface, name string) DelayingInterface {
+	return NewDelayingQueueWithConfig(DelayingQueueConfig{
+		Name:  name,
+		Queue: q,
+	})
+}
+
+// NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability.
+// Deprecated: Use NewDelayingQueueWithConfig instead.
+func NewNamedDelayingQueue(name string) DelayingInterface {
+	return NewDelayingQueueWithConfig(DelayingQueueConfig{Name: name})
+}
+
+// NewDelayingQueueWithCustomClock constructs a new named workqueue
+// with ability to inject real or fake clock for testing purposes.
+// Deprecated: Use NewDelayingQueueWithConfig instead.
+func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) DelayingInterface {
+	return NewDelayingQueueWithConfig(DelayingQueueConfig{
+		Name:  name,
+		Clock: clock,
+	})
+}
+
+func newDelayingQueue[T comparable](clock clock.WithTicker, q TypedInterface[T], name string, provider MetricsProvider) *delayingType[T] {
+	ret := &delayingType[T]{
+		TypedInterface:  q,
+		clock:           clock,
+		heartbeat:       clock.NewTicker(maxWait),
+		stopCh:          make(chan struct{}),
+		waitingForAddCh: make(chan *waitFor[T], 1000),
+		metrics:         newRetryMetrics(name, provider),
+	}
+
+	go ret.waitingLoop()
+	return ret
+}
+
+// delayingType wraps an Interface and provides delayed re-enquing
+type delayingType[T comparable] struct {
+	TypedInterface[T]
+
+	// clock tracks time for delayed firing
+	clock clock.Clock
+
+	// stopCh lets us signal a shutdown to the waiting loop
+	stopCh chan struct{}
+	// stopOnce guarantees we only signal shutdown a single time
+	stopOnce sync.Once
+
+	// heartbeat ensures we wait no more than maxWait before firing
+	heartbeat clock.Ticker
+
+	// waitingForAddCh is a buffered channel that feeds waitingForAdd
+	waitingForAddCh chan *waitFor[T]
+
+	// metrics counts the number of retries
+	metrics retryMetrics
+}
+
+// waitFor holds the data to add and the time it should be added
+type waitFor[T any] struct {
+	data    T
+	readyAt time.Time
+	// index in the priority queue (heap)
+	index int
+}
+
+// waitForPriorityQueue implements a priority queue for waitFor items.
+//
+// waitForPriorityQueue implements heap.Interface. The item occurring next in
+// time (i.e., the item with the smallest readyAt) is at the root (index 0).
+// Peek returns this minimum item at index 0. Pop returns the minimum item after
+// it has been removed from the queue and placed at index Len()-1 by
+// container/heap. Push adds an item at index Len(), and container/heap
+// percolates it into the correct location.
+type waitForPriorityQueue[T any] []*waitFor[T]
+
+func (pq waitForPriorityQueue[T]) Len() int {
+	return len(pq)
+}
+func (pq waitForPriorityQueue[T]) Less(i, j int) bool {
+	return pq[i].readyAt.Before(pq[j].readyAt)
+}
+func (pq waitForPriorityQueue[T]) Swap(i, j int) {
+	pq[i], pq[j] = pq[j], pq[i]
+	pq[i].index = i
+	pq[j].index = j
+}
+
+// Push adds an item to the queue. Push should not be called directly; instead,
+// use `heap.Push`.
+func (pq *waitForPriorityQueue[T]) Push(x interface{}) {
+	n := len(*pq)
+	item := x.(*waitFor[T])
+	item.index = n
+	*pq = append(*pq, item)
+}
+
+// Pop removes an item from the queue. Pop should not be called directly;
+// instead, use `heap.Pop`.
+func (pq *waitForPriorityQueue[T]) Pop() interface{} {
+	n := len(*pq)
+	item := (*pq)[n-1]
+	item.index = -1
+	*pq = (*pq)[0:(n - 1)]
+	return item
+}
+
+// Peek returns the item at the beginning of the queue, without removing the
+// item or otherwise mutating the queue. It is safe to call directly.
+func (pq waitForPriorityQueue[T]) Peek() interface{} {
+	return pq[0]
+}
+
+// ShutDown stops the queue. After the queue drains, the returned shutdown bool
+// on Get() will be true. This method may be invoked more than once.
+func (q *delayingType[T]) ShutDown() {
+	q.stopOnce.Do(func() {
+		q.TypedInterface.ShutDown()
+		close(q.stopCh)
+		q.heartbeat.Stop()
+	})
+}
+
+// AddAfter adds the given item to the work queue after the given delay
+func (q *delayingType[T]) AddAfter(item T, duration time.Duration) {
+	// don't add if we're already shutting down
+	if q.ShuttingDown() {
+		return
+	}
+
+	q.metrics.retry()
+
+	// immediately add things with no delay
+	if duration <= 0 {
+		q.Add(item)
+		return
+	}
+
+	select {
+	case <-q.stopCh:
+		// unblock if ShutDown() is called
+	case q.waitingForAddCh <- &waitFor[T]{data: item, readyAt: q.clock.Now().Add(duration)}:
+	}
+}
+
+// maxWait keeps a max bound on the wait time. It's just insurance against weird things happening.
+// Checking the queue every 10 seconds isn't expensive and we know that we'll never end up with an
+// expired item sitting for more than 10 seconds.
+const maxWait = 10 * time.Second
+
+// waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added.
+func (q *delayingType[T]) waitingLoop() {
+	defer utilruntime.HandleCrash()
+
+	// Make a placeholder channel to use when there are no items in our list
+	never := make(<-chan time.Time)
+
+	// Make a timer that expires when the item at the head of the waiting queue is ready
+	var nextReadyAtTimer clock.Timer
+
+	waitingForQueue := &waitForPriorityQueue[T]{}
+	heap.Init(waitingForQueue)
+
+	waitingEntryByData := map[T]*waitFor[T]{}
+
+	for {
+		if q.TypedInterface.ShuttingDown() {
+			return
+		}
+
+		now := q.clock.Now()
+
+		// Add ready entries
+		for waitingForQueue.Len() > 0 {
+			entry := waitingForQueue.Peek().(*waitFor[T])
+			if entry.readyAt.After(now) {
+				break
+			}
+
+			entry = heap.Pop(waitingForQueue).(*waitFor[T])
+			q.Add(entry.data)
+			delete(waitingEntryByData, entry.data)
+		}
+
+		// Set up a wait for the first item's readyAt (if one exists)
+		nextReadyAt := never
+		if waitingForQueue.Len() > 0 {
+			if nextReadyAtTimer != nil {
+				nextReadyAtTimer.Stop()
+			}
+			entry := waitingForQueue.Peek().(*waitFor[T])
+			nextReadyAtTimer = q.clock.NewTimer(entry.readyAt.Sub(now))
+			nextReadyAt = nextReadyAtTimer.C()
+		}
+
+		select {
+		case <-q.stopCh:
+			return
+
+		case <-q.heartbeat.C():
+			// continue the loop, which will add ready items
+
+		case <-nextReadyAt:
+			// continue the loop, which will add ready items
+
+		case waitEntry := <-q.waitingForAddCh:
+			if waitEntry.readyAt.After(q.clock.Now()) {
+				insert(waitingForQueue, waitingEntryByData, waitEntry)
+			} else {
+				q.Add(waitEntry.data)
+			}
+
+			drained := false
+			for !drained {
+				select {
+				case waitEntry := <-q.waitingForAddCh:
+					if waitEntry.readyAt.After(q.clock.Now()) {
+						insert(waitingForQueue, waitingEntryByData, waitEntry)
+					} else {
+						q.Add(waitEntry.data)
+					}
+				default:
+					drained = true
+				}
+			}
+		}
+	}
+}
+
+// insert adds the entry to the priority queue, or updates the readyAt if it already exists in the queue
+func insert[T comparable](q *waitForPriorityQueue[T], knownEntries map[T]*waitFor[T], entry *waitFor[T]) {
+	// if the entry already exists, update the time only if it would cause the item to be queued sooner
+	existing, exists := knownEntries[entry.data]
+	if exists {
+		if existing.readyAt.After(entry.readyAt) {
+			existing.readyAt = entry.readyAt
+			heap.Fix(q, existing.index)
+		}
+
+		return
+	}
+
+	heap.Push(q, entry)
+	knownEntries[entry.data] = entry
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/util/workqueue/doc.go b/hack/tools/vendor/k8s.io/client-go/util/workqueue/doc.go
new file mode 100644
index 0000000000..8555aa95fe
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/util/workqueue/doc.go
@@ -0,0 +1,26 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package workqueue provides a simple queue that supports the following
+// features:
+//   - Fair: items processed in the order in which they are added.
+//   - Stingy: a single item will not be processed multiple times concurrently,
+//     and if an item is added multiple times before it can be processed, it
+//     will only be processed once.
+//   - Multiple consumers and producers. In particular, it is allowed for an
+//     item to be reenqueued while it is being processed.
+//   - Shutdown notifications.
+package workqueue // import "k8s.io/client-go/util/workqueue"
diff --git a/hack/tools/vendor/k8s.io/client-go/util/workqueue/metrics.go b/hack/tools/vendor/k8s.io/client-go/util/workqueue/metrics.go
new file mode 100644
index 0000000000..4400cb65e1
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/util/workqueue/metrics.go
@@ -0,0 +1,255 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workqueue
+
+import (
+	"sync"
+	"time"
+
+	"k8s.io/utils/clock"
+)
+
+// This file provides abstractions for setting the provider (e.g., prometheus)
+// of metrics.
+
+type queueMetrics[T comparable] interface {
+	add(item T)
+	get(item T)
+	done(item T)
+	updateUnfinishedWork()
+}
+
+// GaugeMetric represents a single numerical value that can arbitrarily go up
+// and down.
+type GaugeMetric interface {
+	Inc()
+	Dec()
+}
+
+// SettableGaugeMetric represents a single numerical value that can arbitrarily go up
+// and down. (Separate from GaugeMetric to preserve backwards compatibility.)
+type SettableGaugeMetric interface {
+	Set(float64)
+}
+
+// CounterMetric represents a single numerical value that only ever
+// goes up.
+type CounterMetric interface {
+	Inc()
+}
+
+// SummaryMetric captures individual observations.
+type SummaryMetric interface {
+	Observe(float64)
+}
+
+// HistogramMetric counts individual observations.
+type HistogramMetric interface {
+	Observe(float64)
+}
+
+type noopMetric struct{}
+
+func (noopMetric) Inc()            {}
+func (noopMetric) Dec()            {}
+func (noopMetric) Set(float64)     {}
+func (noopMetric) Observe(float64) {}
+
+// defaultQueueMetrics expects the caller to lock before setting any metrics.
+type defaultQueueMetrics[T comparable] struct {
+	clock clock.Clock
+
+	// current depth of a workqueue
+	depth GaugeMetric
+	// total number of adds handled by a workqueue
+	adds CounterMetric
+	// how long an item stays in a workqueue
+	latency HistogramMetric
+	// how long processing an item from a workqueue takes
+	workDuration         HistogramMetric
+	addTimes             map[T]time.Time
+	processingStartTimes map[T]time.Time
+
+	// how long have current threads been working?
+	unfinishedWorkSeconds   SettableGaugeMetric
+	longestRunningProcessor SettableGaugeMetric
+}
+
+func (m *defaultQueueMetrics[T]) add(item T) {
+	if m == nil {
+		return
+	}
+
+	m.adds.Inc()
+	m.depth.Inc()
+	if _, exists := m.addTimes[item]; !exists {
+		m.addTimes[item] = m.clock.Now()
+	}
+}
+
+func (m *defaultQueueMetrics[T]) get(item T) {
+	if m == nil {
+		return
+	}
+
+	m.depth.Dec()
+	m.processingStartTimes[item] = m.clock.Now()
+	if startTime, exists := m.addTimes[item]; exists {
+		m.latency.Observe(m.sinceInSeconds(startTime))
+		delete(m.addTimes, item)
+	}
+}
+
+func (m *defaultQueueMetrics[T]) done(item T) {
+	if m == nil {
+		return
+	}
+
+	if startTime, exists := m.processingStartTimes[item]; exists {
+		m.workDuration.Observe(m.sinceInSeconds(startTime))
+		delete(m.processingStartTimes, item)
+	}
+}
+
+func (m *defaultQueueMetrics[T]) updateUnfinishedWork() {
+	// Note that a summary metric would be better for this, but prometheus
+	// doesn't seem to have non-hacky ways to reset the summary metrics.
+	var total float64
+	var oldest float64
+	for _, t := range m.processingStartTimes {
+		age := m.sinceInSeconds(t)
+		total += age
+		if age > oldest {
+			oldest = age
+		}
+	}
+	m.unfinishedWorkSeconds.Set(total)
+	m.longestRunningProcessor.Set(oldest)
+}
+
+type noMetrics[T any] struct{}
+
+func (noMetrics[T]) add(item T)            {}
+func (noMetrics[T]) get(item T)            {}
+func (noMetrics[T]) done(item T)           {}
+func (noMetrics[T]) updateUnfinishedWork() {}
+
+// Gets the time since the specified start in seconds.
+func (m *defaultQueueMetrics[T]) sinceInSeconds(start time.Time) float64 {
+	return m.clock.Since(start).Seconds()
+}
+
+type retryMetrics interface {
+	retry()
+}
+
+type defaultRetryMetrics struct {
+	retries CounterMetric
+}
+
+func (m *defaultRetryMetrics) retry() {
+	if m == nil {
+		return
+	}
+
+	m.retries.Inc()
+}
+
+// MetricsProvider generates various metrics used by the queue.
+type MetricsProvider interface {
+	NewDepthMetric(name string) GaugeMetric
+	NewAddsMetric(name string) CounterMetric
+	NewLatencyMetric(name string) HistogramMetric
+	NewWorkDurationMetric(name string) HistogramMetric
+	NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric
+	NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric
+	NewRetriesMetric(name string) CounterMetric
+}
+
+type noopMetricsProvider struct{}
+
+func (_ noopMetricsProvider) NewDepthMetric(name string) GaugeMetric {
+	return noopMetric{}
+}
+
+func (_ noopMetricsProvider) NewAddsMetric(name string) CounterMetric {
+	return noopMetric{}
+}
+
+func (_ noopMetricsProvider) NewLatencyMetric(name string) HistogramMetric {
+	return noopMetric{}
+}
+
+func (_ noopMetricsProvider) NewWorkDurationMetric(name string) HistogramMetric {
+	return noopMetric{}
+}
+
+func (_ noopMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric {
+	return noopMetric{}
+}
+
+func (_ noopMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric {
+	return noopMetric{}
+}
+
+func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric {
+	return noopMetric{}
+}
+
+var globalMetricsProvider MetricsProvider = noopMetricsProvider{}
+
+var setGlobalMetricsProviderOnce sync.Once
+
+func newQueueMetrics[T comparable](mp MetricsProvider, name string, clock clock.Clock) queueMetrics[T] {
+	if len(name) == 0 || mp == (noopMetricsProvider{}) {
+		return noMetrics[T]{}
+	}
+	return &defaultQueueMetrics[T]{
+		clock:                   clock,
+		depth:                   mp.NewDepthMetric(name),
+		adds:                    mp.NewAddsMetric(name),
+		latency:                 mp.NewLatencyMetric(name),
+		workDuration:            mp.NewWorkDurationMetric(name),
+		unfinishedWorkSeconds:   mp.NewUnfinishedWorkSecondsMetric(name),
+		longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name),
+		addTimes:                map[T]time.Time{},
+		processingStartTimes:    map[T]time.Time{},
+	}
+}
+
+func newRetryMetrics(name string, provider MetricsProvider) retryMetrics {
+	var ret *defaultRetryMetrics
+	if len(name) == 0 {
+		return ret
+	}
+
+	if provider == nil {
+		provider = globalMetricsProvider
+	}
+
+	return &defaultRetryMetrics{
+		retries: provider.NewRetriesMetric(name),
+	}
+}
+
+// SetProvider sets the metrics provider for all subsequently created work
+// queues. Only the first call has an effect.
+func SetProvider(metricsProvider MetricsProvider) {
+	setGlobalMetricsProviderOnce.Do(func() {
+		globalMetricsProvider = metricsProvider
+	})
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/util/workqueue/parallelizer.go b/hack/tools/vendor/k8s.io/client-go/util/workqueue/parallelizer.go
new file mode 100644
index 0000000000..366bf20a31
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/util/workqueue/parallelizer.go
@@ -0,0 +1,101 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workqueue
+
+import (
+	"context"
+	"sync"
+
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+type DoWorkPieceFunc func(piece int)
+
+type options struct {
+	chunkSize int
+}
+
+type Options func(*options)
+
+// WithChunkSize allows to set chunks of work items to the workers, rather than
+// processing one by one.
+// It is recommended to use this option if the number of pieces significantly
+// higher than the number of workers and the work done for each item is small.
+func WithChunkSize(c int) func(*options) {
+	return func(o *options) {
+		o.chunkSize = c
+	}
+}
+
+// ParallelizeUntil is a framework that allows for parallelizing N
+// independent pieces of work until done or the context is canceled.
+func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc, opts ...Options) {
+	if pieces == 0 {
+		return
+	}
+	o := options{}
+	for _, opt := range opts {
+		opt(&o)
+	}
+	chunkSize := o.chunkSize
+	if chunkSize < 1 {
+		chunkSize = 1
+	}
+
+	chunks := ceilDiv(pieces, chunkSize)
+	toProcess := make(chan int, chunks)
+	for i := 0; i < chunks; i++ {
+		toProcess <- i
+	}
+	close(toProcess)
+
+	var stop <-chan struct{}
+	if ctx != nil {
+		stop = ctx.Done()
+	}
+	if chunks < workers {
+		workers = chunks
+	}
+	wg := sync.WaitGroup{}
+	wg.Add(workers)
+	for i := 0; i < workers; i++ {
+		go func() {
+			defer utilruntime.HandleCrash()
+			defer wg.Done()
+			for chunk := range toProcess {
+				start := chunk * chunkSize
+				end := start + chunkSize
+				if end > pieces {
+					end = pieces
+				}
+				for p := start; p < end; p++ {
+					select {
+					case <-stop:
+						return
+					default:
+						doWorkPiece(p)
+					}
+				}
+			}
+		}()
+	}
+	wg.Wait()
+}
+
+func ceilDiv(a, b int) int {
+	return (a + b - 1) / b
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/util/workqueue/queue.go b/hack/tools/vendor/k8s.io/client-go/util/workqueue/queue.go
new file mode 100644
index 0000000000..3cec1768a0
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/util/workqueue/queue.go
@@ -0,0 +1,369 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workqueue
+
+import (
+	"sync"
+	"time"
+
+	"k8s.io/utils/clock"
+)
+
+// Deprecated: Interface is deprecated, use TypedInterface instead.
+type Interface TypedInterface[any]
+
+type TypedInterface[T comparable] interface {
+	Add(item T)
+	Len() int
+	Get() (item T, shutdown bool)
+	Done(item T)
+	ShutDown()
+	ShutDownWithDrain()
+	ShuttingDown() bool
+}
+
+// Queue is the underlying storage for items. The functions below are always
+// called from the same goroutine.
+type Queue[T comparable] interface {
+	// Touch can be hooked when an existing item is added again. This may be
+	// useful if the implementation allows priority change for the given item.
+	Touch(item T)
+	// Push adds a new item.
+	Push(item T)
+	// Len tells the total number of items.
+	Len() int
+	// Pop retrieves an item.
+	Pop() (item T)
+}
+
+// DefaultQueue is a slice based FIFO queue.
+func DefaultQueue[T comparable]() Queue[T] {
+	return new(queue[T])
+}
+
+// queue is a slice which implements Queue.
+type queue[T comparable] []T
+
+func (q *queue[T]) Touch(item T) {}
+
+func (q *queue[T]) Push(item T) {
+	*q = append(*q, item)
+}
+
+func (q *queue[T]) Len() int {
+	return len(*q)
+}
+
+func (q *queue[T]) Pop() (item T) {
+	item = (*q)[0]
+
+	// The underlying array still exists and reference this object, so the object will not be garbage collected.
+	(*q)[0] = *new(T)
+	*q = (*q)[1:]
+
+	return item
+}
+
+// QueueConfig specifies optional configurations to customize an Interface.
+// Deprecated: use TypedQueueConfig instead.
+type QueueConfig = TypedQueueConfig[any]
+
+type TypedQueueConfig[T comparable] struct {
+	// Name for the queue. If unnamed, the metrics will not be registered.
+	Name string
+
+	// MetricsProvider optionally allows specifying a metrics provider to use for the queue
+	// instead of the global provider.
+	MetricsProvider MetricsProvider
+
+	// Clock ability to inject real or fake clock for testing purposes.
+	Clock clock.WithTicker
+
+	// Queue provides the underlying queue to use. It is optional and defaults to slice based FIFO queue.
+	Queue Queue[T]
+}
+
+// New constructs a new work queue (see the package comment).
+//
+// Deprecated: use NewTyped instead.
+func New() *Type {
+	return NewWithConfig(QueueConfig{
+		Name: "",
+	})
+}
+
+// NewTyped constructs a new work queue (see the package comment).
+func NewTyped[T comparable]() *Typed[T] {
+	return NewTypedWithConfig(TypedQueueConfig[T]{
+		Name: "",
+	})
+}
+
+// NewWithConfig constructs a new workqueue with ability to
+// customize different properties.
+//
+// Deprecated: use NewTypedWithConfig instead.
+func NewWithConfig(config QueueConfig) *Type {
+	return NewTypedWithConfig(config)
+}
+
+// NewTypedWithConfig constructs a new workqueue with ability to
+// customize different properties.
+func NewTypedWithConfig[T comparable](config TypedQueueConfig[T]) *Typed[T] {
+	return newQueueWithConfig(config, defaultUnfinishedWorkUpdatePeriod)
+}
+
+// NewNamed creates a new named queue.
+// Deprecated: Use NewWithConfig instead.
+func NewNamed(name string) *Type {
+	return NewWithConfig(QueueConfig{
+		Name: name,
+	})
+}
+
+// newQueueWithConfig constructs a new named workqueue
+// with the ability to customize different properties for testing purposes
+func newQueueWithConfig[T comparable](config TypedQueueConfig[T], updatePeriod time.Duration) *Typed[T] {
+	metricsProvider := globalMetricsProvider
+	if config.MetricsProvider != nil {
+		metricsProvider = config.MetricsProvider
+	}
+
+	if config.Clock == nil {
+		config.Clock = clock.RealClock{}
+	}
+
+	if config.Queue == nil {
+		config.Queue = DefaultQueue[T]()
+	}
+
+	return newQueue(
+		config.Clock,
+		config.Queue,
+		newQueueMetrics[T](metricsProvider, config.Name, config.Clock),
+		updatePeriod,
+	)
+}
+
+func newQueue[T comparable](c clock.WithTicker, queue Queue[T], metrics queueMetrics[T], updatePeriod time.Duration) *Typed[T] {
+	t := &Typed[T]{
+		clock:                      c,
+		queue:                      queue,
+		dirty:                      set[T]{},
+		processing:                 set[T]{},
+		cond:                       sync.NewCond(&sync.Mutex{}),
+		metrics:                    metrics,
+		unfinishedWorkUpdatePeriod: updatePeriod,
+	}
+
+	// Don't start the goroutine for a type of noMetrics so we don't consume
+	// resources unnecessarily
+	if _, ok := metrics.(noMetrics[T]); !ok {
+		go t.updateUnfinishedWorkLoop()
+	}
+
+	return t
+}
+
+const defaultUnfinishedWorkUpdatePeriod = 500 * time.Millisecond
+
+// Type is a work queue (see the package comment).
+// Deprecated: Use Typed instead.
+type Type = Typed[any]
+
+type Typed[t comparable] struct {
+	// queue defines the order in which we will work on items. Every
+	// element of queue should be in the dirty set and not in the
+	// processing set.
+	queue Queue[t]
+
+	// dirty defines all of the items that need to be processed.
+	dirty set[t]
+
+	// Things that are currently being processed are in the processing set.
+	// These things may be simultaneously in the dirty set. When we finish
+	// processing something and remove it from this set, we'll check if
+	// it's in the dirty set, and if so, add it to the queue.
+	processing set[t]
+
+	cond *sync.Cond
+
+	shuttingDown bool
+	drain        bool
+
+	metrics queueMetrics[t]
+
+	unfinishedWorkUpdatePeriod time.Duration
+	clock                      clock.WithTicker
+}
+
+type empty struct{}
+type set[t comparable] map[t]empty
+
+func (s set[t]) has(item t) bool {
+	_, exists := s[item]
+	return exists
+}
+
+func (s set[t]) insert(item t) {
+	s[item] = empty{}
+}
+
+func (s set[t]) delete(item t) {
+	delete(s, item)
+}
+
+func (s set[t]) len() int {
+	return len(s)
+}
+
+// Add marks item as needing processing.
+func (q *Typed[T]) Add(item T) {
+	q.cond.L.Lock()
+	defer q.cond.L.Unlock()
+	if q.shuttingDown {
+		return
+	}
+	if q.dirty.has(item) {
+		// the same item is added again before it is processed, call the Touch
+		// function if the queue cares about it (for e.g, reset its priority)
+		if !q.processing.has(item) {
+			q.queue.Touch(item)
+		}
+		return
+	}
+
+	q.metrics.add(item)
+
+	q.dirty.insert(item)
+	if q.processing.has(item) {
+		return
+	}
+
+	q.queue.Push(item)
+	q.cond.Signal()
+}
+
+// Len returns the current queue length, for informational purposes only. You
+// shouldn't e.g. gate a call to Add() or Get() on Len() being a particular
+// value, that can't be synchronized properly.
+func (q *Typed[T]) Len() int {
+	q.cond.L.Lock()
+	defer q.cond.L.Unlock()
+	return q.queue.Len()
+}
+
+// Get blocks until it can return an item to be processed. If shutdown = true,
+// the caller should end their goroutine. You must call Done with item when you
+// have finished processing it.
+func (q *Typed[T]) Get() (item T, shutdown bool) {
+	q.cond.L.Lock()
+	defer q.cond.L.Unlock()
+	for q.queue.Len() == 0 && !q.shuttingDown {
+		q.cond.Wait()
+	}
+	if q.queue.Len() == 0 {
+		// We must be shutting down.
+		return *new(T), true
+	}
+
+	item = q.queue.Pop()
+
+	q.metrics.get(item)
+
+	q.processing.insert(item)
+	q.dirty.delete(item)
+
+	return item, false
+}
+
+// Done marks item as done processing, and if it has been marked as dirty again
+// while it was being processed, it will be re-added to the queue for
+// re-processing.
+func (q *Typed[T]) Done(item T) {
+	q.cond.L.Lock()
+	defer q.cond.L.Unlock()
+
+	q.metrics.done(item)
+
+	q.processing.delete(item)
+	if q.dirty.has(item) {
+		q.queue.Push(item)
+		q.cond.Signal()
+	} else if q.processing.len() == 0 {
+		q.cond.Signal()
+	}
+}
+
+// ShutDown will cause q to ignore all new items added to it and
+// immediately instruct the worker goroutines to exit.
+func (q *Typed[T]) ShutDown() {
+	q.cond.L.Lock()
+	defer q.cond.L.Unlock()
+
+	q.drain = false
+	q.shuttingDown = true
+	q.cond.Broadcast()
+}
+
+// ShutDownWithDrain will cause q to ignore all new items added to it. As soon
+// as the worker goroutines have "drained", i.e: finished processing and called
+// Done on all existing items in the queue; they will be instructed to exit and
+// ShutDownWithDrain will return. Hence: a strict requirement for using this is;
+// your workers must ensure that Done is called on all items in the queue once
+// the shut down has been initiated, if that is not the case: this will block
+// indefinitely. It is, however, safe to call ShutDown after having called
+// ShutDownWithDrain, as to force the queue shut down to terminate immediately
+// without waiting for the drainage.
+func (q *Typed[T]) ShutDownWithDrain() {
+	q.cond.L.Lock()
+	defer q.cond.L.Unlock()
+
+	q.drain = true
+	q.shuttingDown = true
+	q.cond.Broadcast()
+
+	for q.processing.len() != 0 && q.drain {
+		q.cond.Wait()
+	}
+}
+
+func (q *Typed[T]) ShuttingDown() bool {
+	q.cond.L.Lock()
+	defer q.cond.L.Unlock()
+
+	return q.shuttingDown
+}
+
+func (q *Typed[T]) updateUnfinishedWorkLoop() {
+	t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod)
+	defer t.Stop()
+	for range t.C() {
+		if !func() bool {
+			q.cond.L.Lock()
+			defer q.cond.L.Unlock()
+			if !q.shuttingDown {
+				q.metrics.updateUnfinishedWork()
+				return true
+			}
+			return false
+
+		}() {
+			return
+		}
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go b/hack/tools/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go
new file mode 100644
index 0000000000..fe45afa5a4
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go
@@ -0,0 +1,147 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workqueue
+
+import "k8s.io/utils/clock"
+
+// RateLimitingInterface is an interface that rate limits items being added to the queue.
+//
+// Deprecated: Use TypedRateLimitingInterface instead.
+type RateLimitingInterface TypedRateLimitingInterface[any]
+
+// TypedRateLimitingInterface is an interface that rate limits items being added to the queue.
+type TypedRateLimitingInterface[T comparable] interface {
+	TypedDelayingInterface[T]
+
+	// AddRateLimited adds an item to the workqueue after the rate limiter says it's ok
+	AddRateLimited(item T)
+
+	// Forget indicates that an item is finished being retried.  Doesn't matter whether it's for perm failing
+	// or for success, we'll stop the rate limiter from tracking it.  This only clears the `rateLimiter`, you
+	// still have to call `Done` on the queue.
+	Forget(item T)
+
+	// NumRequeues returns back how many times the item was requeued
+	NumRequeues(item T) int
+}
+
+// RateLimitingQueueConfig specifies optional configurations to customize a RateLimitingInterface.
+//
+// Deprecated: Use TypedRateLimitingQueueConfig instead.
+type RateLimitingQueueConfig = TypedRateLimitingQueueConfig[any]
+
+// TypedRateLimitingQueueConfig specifies optional configurations to customize a TypedRateLimitingInterface.
+type TypedRateLimitingQueueConfig[T comparable] struct {
+	// Name for the queue. If unnamed, the metrics will not be registered.
+	Name string
+
+	// MetricsProvider optionally allows specifying a metrics provider to use for the queue
+	// instead of the global provider.
+	MetricsProvider MetricsProvider
+
+	// Clock optionally allows injecting a real or fake clock for testing purposes.
+	Clock clock.WithTicker
+
+	// DelayingQueue optionally allows injecting custom delaying queue DelayingInterface instead of the default one.
+	DelayingQueue TypedDelayingInterface[T]
+}
+
+// NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability
+// Remember to call Forget!  If you don't, you may end up tracking failures forever.
+// NewRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use
+// NewRateLimitingQueueWithConfig instead and specify a name.
+//
+// Deprecated: Use NewTypedRateLimitingQueue instead.
+func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface {
+	return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{})
+}
+
+// NewTypedRateLimitingQueue constructs a new workqueue with rateLimited queuing ability
+// Remember to call Forget!  If you don't, you may end up tracking failures forever.
+// NewTypedRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use
+// NewTypedRateLimitingQueueWithConfig instead and specify a name.
+func NewTypedRateLimitingQueue[T comparable](rateLimiter TypedRateLimiter[T]) TypedRateLimitingInterface[T] {
+	return NewTypedRateLimitingQueueWithConfig(rateLimiter, TypedRateLimitingQueueConfig[T]{})
+}
+
+// NewRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability
+// with options to customize different properties.
+// Remember to call Forget!  If you don't, you may end up tracking failures forever.
+//
+// Deprecated: Use NewTypedRateLimitingQueueWithConfig instead.
+func NewRateLimitingQueueWithConfig(rateLimiter RateLimiter, config RateLimitingQueueConfig) RateLimitingInterface {
+	return NewTypedRateLimitingQueueWithConfig(rateLimiter, config)
+}
+
+// NewTypedRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability
+// with options to customize different properties.
+// Remember to call Forget!  If you don't, you may end up tracking failures forever.
+func NewTypedRateLimitingQueueWithConfig[T comparable](rateLimiter TypedRateLimiter[T], config TypedRateLimitingQueueConfig[T]) TypedRateLimitingInterface[T] {
+	if config.Clock == nil {
+		config.Clock = clock.RealClock{}
+	}
+
+	if config.DelayingQueue == nil {
+		config.DelayingQueue = NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{
+			Name:            config.Name,
+			MetricsProvider: config.MetricsProvider,
+			Clock:           config.Clock,
+		})
+	}
+
+	return &rateLimitingType[T]{
+		TypedDelayingInterface: config.DelayingQueue,
+		rateLimiter:            rateLimiter,
+	}
+}
+
+// NewNamedRateLimitingQueue constructs a new named workqueue with rateLimited queuing ability.
+// Deprecated: Use NewRateLimitingQueueWithConfig instead.
+func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface {
+	return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{
+		Name: name,
+	})
+}
+
+// NewRateLimitingQueueWithDelayingInterface constructs a new named workqueue with rateLimited queuing ability
+// with the option to inject a custom delaying queue instead of the default one.
+// Deprecated: Use NewRateLimitingQueueWithConfig instead.
+func NewRateLimitingQueueWithDelayingInterface(di DelayingInterface, rateLimiter RateLimiter) RateLimitingInterface {
+	return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{
+		DelayingQueue: di,
+	})
+}
+
+// rateLimitingType wraps an Interface and provides rateLimited re-enquing
+type rateLimitingType[T comparable] struct {
+	TypedDelayingInterface[T]
+
+	rateLimiter TypedRateLimiter[T]
+}
+
+// AddRateLimited AddAfter's the item based on the time when the rate limiter says it's ok
+func (q *rateLimitingType[T]) AddRateLimited(item T) {
+	q.TypedDelayingInterface.AddAfter(item, q.rateLimiter.When(item))
+}
+
+func (q *rateLimitingType[T]) NumRequeues(item T) int {
+	return q.rateLimiter.NumRequeues(item)
+}
+
+func (q *rateLimitingType[T]) Forget(item T) {
+	q.rateLimiter.Forget(item)
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/LICENSE b/hack/tools/vendor/k8s.io/component-base/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/hack/tools/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go b/hack/tools/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go
new file mode 100644
index 0000000000..11adc26831
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go
@@ -0,0 +1,147 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package flag
+
+import (
+	"crypto/tls"
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/util/sets"
+)
+
+var (
+	// ciphers maps strings into tls package cipher constants in
+	// https://golang.org/pkg/crypto/tls/#pkg-constants
+	ciphers         = map[string]uint16{}
+	insecureCiphers = map[string]uint16{}
+)
+
+func init() {
+	for _, suite := range tls.CipherSuites() {
+		ciphers[suite.Name] = suite.ID
+	}
+	// keep legacy names for backward compatibility
+	ciphers["TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"] = tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256
+	ciphers["TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305"] = tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
+
+	for _, suite := range tls.InsecureCipherSuites() {
+		insecureCiphers[suite.Name] = suite.ID
+	}
+}
+
+// InsecureTLSCiphers returns the cipher suites implemented by crypto/tls which have
+// security issues.
+func InsecureTLSCiphers() map[string]uint16 {
+	cipherKeys := make(map[string]uint16, len(insecureCiphers))
+	for k, v := range insecureCiphers {
+		cipherKeys[k] = v
+	}
+	return cipherKeys
+}
+
+// InsecureTLSCipherNames returns a list of cipher suite names implemented by crypto/tls
+// which have security issues.
+func InsecureTLSCipherNames() []string {
+	cipherKeys := sets.NewString()
+	for key := range insecureCiphers {
+		cipherKeys.Insert(key)
+	}
+	return cipherKeys.List()
+}
+
+// PreferredTLSCipherNames returns a list of cipher suite names implemented by crypto/tls.
+func PreferredTLSCipherNames() []string {
+	cipherKeys := sets.NewString()
+	for key := range ciphers {
+		cipherKeys.Insert(key)
+	}
+	return cipherKeys.List()
+}
+
+func allCiphers() map[string]uint16 {
+	acceptedCiphers := make(map[string]uint16, len(ciphers)+len(insecureCiphers))
+	for k, v := range ciphers {
+		acceptedCiphers[k] = v
+	}
+	for k, v := range insecureCiphers {
+		acceptedCiphers[k] = v
+	}
+	return acceptedCiphers
+}
+
+// TLSCipherPossibleValues returns all acceptable cipher suite names.
+// This is a combination of both InsecureTLSCipherNames() and PreferredTLSCipherNames().
+func TLSCipherPossibleValues() []string {
+	cipherKeys := sets.NewString()
+	acceptedCiphers := allCiphers()
+	for key := range acceptedCiphers {
+		cipherKeys.Insert(key)
+	}
+	return cipherKeys.List()
+}
+
+// TLSCipherSuites returns a list of cipher suite IDs from the cipher suite names passed.
+func TLSCipherSuites(cipherNames []string) ([]uint16, error) {
+	if len(cipherNames) == 0 {
+		return nil, nil
+	}
+	ciphersIntSlice := make([]uint16, 0)
+	possibleCiphers := allCiphers()
+	for _, cipher := range cipherNames {
+		intValue, ok := possibleCiphers[cipher]
+		if !ok {
+			return nil, fmt.Errorf("Cipher suite %s not supported or doesn't exist", cipher)
+		}
+		ciphersIntSlice = append(ciphersIntSlice, intValue)
+	}
+	return ciphersIntSlice, nil
+}
+
+var versions = map[string]uint16{
+	"VersionTLS10": tls.VersionTLS10,
+	"VersionTLS11": tls.VersionTLS11,
+	"VersionTLS12": tls.VersionTLS12,
+	"VersionTLS13": tls.VersionTLS13,
+}
+
+// TLSPossibleVersions returns all acceptable values for TLS Version.
+func TLSPossibleVersions() []string {
+	versionsKeys := sets.NewString()
+	for key := range versions {
+		versionsKeys.Insert(key)
+	}
+	return versionsKeys.List()
+}
+
+// TLSVersion returns the TLS Version ID for the version name passed.
+func TLSVersion(versionName string) (uint16, error) {
+	if len(versionName) == 0 {
+		return DefaultTLSVersion(), nil
+	}
+	if version, ok := versions[versionName]; ok {
+		return version, nil
+	}
+	return 0, fmt.Errorf("unknown tls version %q", versionName)
+}
+
+// DefaultTLSVersion defines the default TLS Version.
+func DefaultTLSVersion() uint16 {
+	// Can't use SSLv3 because of POODLE and BEAST
+	// Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher
+	// Can't use TLSv1.1 because of RC4 cipher usage
+	return tls.VersionTLS12
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go b/hack/tools/vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go
new file mode 100644
index 0000000000..728fa520b6
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go
@@ -0,0 +1,114 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package flag
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// ColonSeparatedMultimapStringString supports setting a map[string][]string from an encoding
+// that separates keys from values with ':' and separates key-value pairs with ','.
+// A key can be repeated multiple times, in which case the values are appended to a
+// slice of strings associated with that key. Items in the list associated with a given
+// key will appear in the order provided.
+// For example: `a:hello,b:again,c:world,b:beautiful` results in `{"a": ["hello"], "b": ["again", "beautiful"], "c": ["world"]}`
+// The first call to Set will clear the map before adding entries; subsequent calls will simply append to the map.
+// This makes it possible to override default values with a command-line option rather than appending to defaults,
+// while still allowing the distribution of key-value pairs across multiple flag invocations.
+// For example: `--flag "a:hello" --flag "b:again" --flag "b:beautiful" --flag "c:world"` results in `{"a": ["hello"], "b": ["again", "beautiful"], "c": ["world"]}`
+type ColonSeparatedMultimapStringString struct {
+	Multimap             *map[string][]string
+	initialized          bool // set to true after the first Set call
+	allowDefaultEmptyKey bool
+}
+
+// NewColonSeparatedMultimapStringString takes a pointer to a map[string][]string and returns the
+// ColonSeparatedMultimapStringString flag parsing shim for that map.
+func NewColonSeparatedMultimapStringString(m *map[string][]string) *ColonSeparatedMultimapStringString {
+	return &ColonSeparatedMultimapStringString{Multimap: m}
+}
+
+// NewColonSeparatedMultimapStringStringAllowDefaultEmptyKey takes a pointer to a map[string][]string and returns the
+// ColonSeparatedMultimapStringString flag parsing shim for that map. It allows default empty key with no colon in the flag.
+func NewColonSeparatedMultimapStringStringAllowDefaultEmptyKey(m *map[string][]string) *ColonSeparatedMultimapStringString {
+	return &ColonSeparatedMultimapStringString{Multimap: m, allowDefaultEmptyKey: true}
+}
+
+// Set implements github.com/spf13/pflag.Value
+func (m *ColonSeparatedMultimapStringString) Set(value string) error {
+	if m.Multimap == nil {
+		return fmt.Errorf("no target (nil pointer to map[string][]string)")
+	}
+	if !m.initialized || *m.Multimap == nil {
+		// clear default values, or allocate if no existing map
+		*m.Multimap = make(map[string][]string)
+		m.initialized = true
+	}
+	for _, pair := range strings.Split(value, ",") {
+		if len(pair) == 0 {
+			continue
+		}
+		kv := strings.SplitN(pair, ":", 2)
+		var k, v string
+		if m.allowDefaultEmptyKey && len(kv) == 1 {
+			v = strings.TrimSpace(kv[0])
+		} else {
+			if len(kv) != 2 {
+				return fmt.Errorf("malformed pair, expect string:string")
+			}
+			k = strings.TrimSpace(kv[0])
+			v = strings.TrimSpace(kv[1])
+		}
+		(*m.Multimap)[k] = append((*m.Multimap)[k], v)
+	}
+	return nil
+}
+
+// String implements github.com/spf13/pflag.Value
+func (m *ColonSeparatedMultimapStringString) String() string {
+	type kv struct {
+		k string
+		v string
+	}
+	kvs := make([]kv, 0, len(*m.Multimap))
+	for k, vs := range *m.Multimap {
+		for i := range vs {
+			kvs = append(kvs, kv{k: k, v: vs[i]})
+		}
+	}
+	// stable sort by keys, order of values should be preserved
+	sort.SliceStable(kvs, func(i, j int) bool {
+		return kvs[i].k < kvs[j].k
+	})
+	pairs := make([]string, 0, len(kvs))
+	for i := range kvs {
+		pairs = append(pairs, fmt.Sprintf("%s:%s", kvs[i].k, kvs[i].v))
+	}
+	return strings.Join(pairs, ",")
+}
+
+// Type implements github.com/spf13/pflag.Value
+func (m *ColonSeparatedMultimapStringString) Type() string {
+	return "colonSeparatedMultimapStringString"
+}
+
+// Empty implements OmitEmpty
+func (m *ColonSeparatedMultimapStringString) Empty() bool {
+	return len(*m.Multimap) == 0
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/cli/flag/configuration_map.go b/hack/tools/vendor/k8s.io/component-base/cli/flag/configuration_map.go
new file mode 100644
index 0000000000..911b05ec6c
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/cli/flag/configuration_map.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package flag
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+type ConfigurationMap map[string]string
+
+func (m *ConfigurationMap) String() string {
+	pairs := []string{}
+	for k, v := range *m {
+		pairs = append(pairs, fmt.Sprintf("%s=%s", k, v))
+	}
+	sort.Strings(pairs)
+	return strings.Join(pairs, ",")
+}
+
+func (m *ConfigurationMap) Set(value string) error {
+	for _, s := range strings.Split(value, ",") {
+		if len(s) == 0 {
+			continue
+		}
+		arr := strings.SplitN(s, "=", 2)
+		if len(arr) == 2 {
+			(*m)[strings.TrimSpace(arr[0])] = strings.TrimSpace(arr[1])
+		} else {
+			(*m)[strings.TrimSpace(arr[0])] = ""
+		}
+	}
+	return nil
+}
+
+func (*ConfigurationMap) Type() string {
+	return "mapStringString"
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/cli/flag/flags.go b/hack/tools/vendor/k8s.io/component-base/cli/flag/flags.go
new file mode 100644
index 0000000000..8d4a59ce96
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/cli/flag/flags.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package flag
+
+import (
+	goflag "flag"
+	"strings"
+
+	"github.com/spf13/pflag"
+	"k8s.io/klog/v2"
+)
+
+var underscoreWarnings = make(map[string]struct{})
+
+// WordSepNormalizeFunc changes all flags that contain "_" separators
+func WordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+	if strings.Contains(name, "_") {
+		return pflag.NormalizedName(strings.Replace(name, "_", "-", -1))
+	}
+	return pflag.NormalizedName(name)
+}
+
+// WarnWordSepNormalizeFunc changes and warns for flags that contain "_" separators
+func WarnWordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+	if strings.Contains(name, "_") {
+		nname := strings.Replace(name, "_", "-", -1)
+		if _, alreadyWarned := underscoreWarnings[name]; !alreadyWarned {
+			klog.Warningf("using an underscore in a flag name is not supported. %s has been converted to %s.", name, nname)
+			underscoreWarnings[name] = struct{}{}
+		}
+
+		return pflag.NormalizedName(nname)
+	}
+	return pflag.NormalizedName(name)
+}
+
+// InitFlags normalizes, parses, then logs the command line flags
+func InitFlags() {
+	pflag.CommandLine.SetNormalizeFunc(WordSepNormalizeFunc)
+	pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
+	pflag.Parse()
+	pflag.VisitAll(func(flag *pflag.Flag) {
+		klog.V(2).Infof("FLAG: --%s=%q", flag.Name, flag.Value)
+	})
+}
+
+// PrintFlags logs the flags in the flagset
+func PrintFlags(flags *pflag.FlagSet) {
+	flags.VisitAll(func(flag *pflag.Flag) {
+		klog.V(1).Infof("FLAG: --%s=%q", flag.Name, flag.Value)
+	})
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/cli/flag/langle_separated_map_string_string.go b/hack/tools/vendor/k8s.io/component-base/cli/flag/langle_separated_map_string_string.go
new file mode 100644
index 0000000000..bf8dbfb9bf
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/cli/flag/langle_separated_map_string_string.go
@@ -0,0 +1,82 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package flag
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// LangleSeparatedMapStringString can be set from the command line with the format `--flag "string 0 {
+		s = s + ":" + strings.Join(nkc.Names, ",")
+	}
+	return s
+}
+
+func (nkc *NamedCertKey) Set(value string) error {
+	cs := strings.SplitN(value, ":", 2)
+	var keycert string
+	if len(cs) == 2 {
+		var names string
+		keycert, names = strings.TrimSpace(cs[0]), strings.TrimSpace(cs[1])
+		if names == "" {
+			return errors.New("empty names list is not allowed")
+		}
+		nkc.Names = nil
+		for _, name := range strings.Split(names, ",") {
+			nkc.Names = append(nkc.Names, strings.TrimSpace(name))
+		}
+	} else {
+		nkc.Names = nil
+		keycert = strings.TrimSpace(cs[0])
+	}
+	cs = strings.Split(keycert, ",")
+	if len(cs) != 2 {
+		return errors.New("expected comma separated certificate and key file paths")
+	}
+	nkc.CertFile = strings.TrimSpace(cs[0])
+	nkc.KeyFile = strings.TrimSpace(cs[1])
+	return nil
+}
+
+func (*NamedCertKey) Type() string {
+	return "namedCertKey"
+}
+
+// NamedCertKeyArray is a flag value parsing NamedCertKeys, each passed with its own
+// flag instance (in contrast to comma separated slices).
+type NamedCertKeyArray struct {
+	value   *[]NamedCertKey
+	changed bool
+}
+
+var _ flag.Value = &NamedCertKeyArray{}
+
+// NewNamedKeyCertArray creates a new NamedCertKeyArray with the internal value
+// pointing to p.
+func NewNamedCertKeyArray(p *[]NamedCertKey) *NamedCertKeyArray {
+	return &NamedCertKeyArray{
+		value: p,
+	}
+}
+
+func (a *NamedCertKeyArray) Set(val string) error {
+	nkc := NamedCertKey{}
+	err := nkc.Set(val)
+	if err != nil {
+		return err
+	}
+	if !a.changed {
+		*a.value = []NamedCertKey{nkc}
+		a.changed = true
+	} else {
+		*a.value = append(*a.value, nkc)
+	}
+	return nil
+}
+
+func (a *NamedCertKeyArray) Type() string {
+	return "namedCertKey"
+}
+
+func (a *NamedCertKeyArray) String() string {
+	nkcs := make([]string, 0, len(*a.value))
+	for i := range *a.value {
+		nkcs = append(nkcs, (*a.value)[i].String())
+	}
+	return "[" + strings.Join(nkcs, ";") + "]"
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/cli/flag/noop.go b/hack/tools/vendor/k8s.io/component-base/cli/flag/noop.go
new file mode 100644
index 0000000000..03f7f14c0b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/cli/flag/noop.go
@@ -0,0 +1,41 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package flag
+
+import (
+	goflag "flag"
+	"github.com/spf13/pflag"
+)
+
+// NoOp implements goflag.Value and plfag.Value,
+// but has a noop Set implementation
+type NoOp struct{}
+
+var _ goflag.Value = NoOp{}
+var _ pflag.Value = NoOp{}
+
+func (NoOp) String() string {
+	return ""
+}
+
+func (NoOp) Set(val string) error {
+	return nil
+}
+
+func (NoOp) Type() string {
+	return "NoOp"
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/cli/flag/omitempty.go b/hack/tools/vendor/k8s.io/component-base/cli/flag/omitempty.go
new file mode 100644
index 0000000000..c354754ea7
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/cli/flag/omitempty.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package flag
+
+// OmitEmpty is an interface for flags to report whether their underlying value
+// is "empty." If a flag implements OmitEmpty and returns true for a call to Empty(),
+// it is assumed that flag may be omitted from the command line.
+type OmitEmpty interface {
+	Empty() bool
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/cli/flag/sectioned.go b/hack/tools/vendor/k8s.io/component-base/cli/flag/sectioned.go
new file mode 100644
index 0000000000..2357428761
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/cli/flag/sectioned.go
@@ -0,0 +1,105 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package flag
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"strings"
+
+	"github.com/spf13/cobra"
+	"github.com/spf13/pflag"
+)
+
+const (
+	usageFmt = "Usage:\n  %s\n"
+)
+
+// NamedFlagSets stores named flag sets in the order of calling FlagSet.
+type NamedFlagSets struct {
+	// Order is an ordered list of flag set names.
+	Order []string
+	// FlagSets stores the flag sets by name.
+	FlagSets map[string]*pflag.FlagSet
+	// NormalizeNameFunc is the normalize function which used to initialize FlagSets created by NamedFlagSets.
+	NormalizeNameFunc func(f *pflag.FlagSet, name string) pflag.NormalizedName
+}
+
+// FlagSet returns the flag set with the given name and adds it to the
+// ordered name list if it is not in there yet.
+func (nfs *NamedFlagSets) FlagSet(name string) *pflag.FlagSet {
+	if nfs.FlagSets == nil {
+		nfs.FlagSets = map[string]*pflag.FlagSet{}
+	}
+	if _, ok := nfs.FlagSets[name]; !ok {
+		flagSet := pflag.NewFlagSet(name, pflag.ExitOnError)
+		flagSet.SetNormalizeFunc(pflag.CommandLine.GetNormalizeFunc())
+		if nfs.NormalizeNameFunc != nil {
+			flagSet.SetNormalizeFunc(nfs.NormalizeNameFunc)
+		}
+		nfs.FlagSets[name] = flagSet
+		nfs.Order = append(nfs.Order, name)
+	}
+	return nfs.FlagSets[name]
+}
+
+// PrintSections prints the given names flag sets in sections, with the maximal given column number.
+// If cols is zero, lines are not wrapped.
+func PrintSections(w io.Writer, fss NamedFlagSets, cols int) {
+	for _, name := range fss.Order {
+		fs := fss.FlagSets[name]
+		if !fs.HasFlags() {
+			continue
+		}
+
+		wideFS := pflag.NewFlagSet("", pflag.ExitOnError)
+		wideFS.AddFlagSet(fs)
+
+		var zzz string
+		if cols > 24 {
+			zzz = strings.Repeat("z", cols-24)
+			wideFS.Int(zzz, 0, strings.Repeat("z", cols-24))
+		}
+
+		var buf bytes.Buffer
+		fmt.Fprintf(&buf, "\n%s flags:\n\n%s", strings.ToUpper(name[:1])+name[1:], wideFS.FlagUsagesWrapped(cols))
+
+		if cols > 24 {
+			i := strings.Index(buf.String(), zzz)
+			lines := strings.Split(buf.String()[:i], "\n")
+			fmt.Fprint(w, strings.Join(lines[:len(lines)-1], "\n"))
+			fmt.Fprintln(w)
+		} else {
+			fmt.Fprint(w, buf.String())
+		}
+	}
+}
+
+// SetUsageAndHelpFunc set both usage and help function.
+// Print the flag sets we need instead of all of them.
+func SetUsageAndHelpFunc(cmd *cobra.Command, fss NamedFlagSets, cols int) {
+	cmd.SetUsageFunc(func(cmd *cobra.Command) error {
+		fmt.Fprintf(cmd.OutOrStderr(), usageFmt, cmd.UseLine())
+		PrintSections(cmd.OutOrStderr(), fss, cols)
+		return nil
+	})
+	cmd.SetHelpFunc(func(cmd *cobra.Command, args []string) {
+		fmt.Fprintf(cmd.OutOrStdout(), "%s\n\n"+usageFmt, cmd.Long, cmd.UseLine())
+		PrintSections(cmd.OutOrStdout(), fss, cols)
+	})
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/cli/flag/string_flag.go b/hack/tools/vendor/k8s.io/component-base/cli/flag/string_flag.go
new file mode 100644
index 0000000000..331bdb66e2
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/cli/flag/string_flag.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package flag
+
+// StringFlag is a string flag compatible with flags and pflags that keeps track of whether it had a value supplied or not.
+type StringFlag struct {
+	// If Set has been invoked this value is true
+	provided bool
+	// The exact value provided on the flag
+	value string
+}
+
+func NewStringFlag(defaultVal string) StringFlag {
+	return StringFlag{value: defaultVal}
+}
+
+func (f *StringFlag) Default(value string) {
+	f.value = value
+}
+
+func (f StringFlag) String() string {
+	return f.value
+}
+
+func (f StringFlag) Value() string {
+	return f.value
+}
+
+func (f *StringFlag) Set(value string) error {
+	f.value = value
+	f.provided = true
+
+	return nil
+}
+
+func (f StringFlag) Provided() bool {
+	return f.provided
+}
+
+func (f *StringFlag) Type() string {
+	return "string"
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/cli/flag/string_slice_flag.go b/hack/tools/vendor/k8s.io/component-base/cli/flag/string_slice_flag.go
new file mode 100644
index 0000000000..ad0d07d758
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/cli/flag/string_slice_flag.go
@@ -0,0 +1,62 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package flag
+
+import (
+	goflag "flag"
+	"fmt"
+	"strings"
+
+	"github.com/spf13/pflag"
+)
+
+// StringSlice implements goflag.Value and plfag.Value,
+// and allows set to be invoked repeatedly to accumulate values.
+type StringSlice struct {
+	value   *[]string
+	changed bool
+}
+
+func NewStringSlice(s *[]string) *StringSlice {
+	return &StringSlice{value: s}
+}
+
+var _ goflag.Value = &StringSlice{}
+var _ pflag.Value = &StringSlice{}
+
+func (s *StringSlice) String() string {
+	if s == nil || s.value == nil {
+		return ""
+	}
+	return strings.Join(*s.value, " ")
+}
+
+func (s *StringSlice) Set(val string) error {
+	if s.value == nil {
+		return fmt.Errorf("no target (nil pointer to []string)")
+	}
+	if !s.changed {
+		*s.value = make([]string, 0)
+	}
+	*s.value = append(*s.value, val)
+	s.changed = true
+	return nil
+}
+
+func (StringSlice) Type() string {
+	return "sliceString"
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/cli/flag/tristate.go b/hack/tools/vendor/k8s.io/component-base/cli/flag/tristate.go
new file mode 100644
index 0000000000..cf16376bf9
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/cli/flag/tristate.go
@@ -0,0 +1,83 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package flag
+
+import (
+	"fmt"
+	"strconv"
+)
+
+// Tristate is a flag compatible with flags and pflags that
+// keeps track of whether it had a value supplied or not.
+type Tristate int
+
+const (
+	Unset Tristate = iota // 0
+	True
+	False
+)
+
+func (f *Tristate) Default(value bool) {
+	*f = triFromBool(value)
+}
+
+func (f Tristate) String() string {
+	b := boolFromTri(f)
+	return fmt.Sprintf("%t", b)
+}
+
+func (f Tristate) Value() bool {
+	b := boolFromTri(f)
+	return b
+}
+
+func (f *Tristate) Set(value string) error {
+	boolVal, err := strconv.ParseBool(value)
+	if err != nil {
+		return err
+	}
+
+	*f = triFromBool(boolVal)
+	return nil
+}
+
+func (f Tristate) Provided() bool {
+	if f != Unset {
+		return true
+	}
+	return false
+}
+
+func (f *Tristate) Type() string {
+	return "tristate"
+}
+
+func boolFromTri(t Tristate) bool {
+	if t == True {
+		return true
+	} else {
+		return false
+	}
+}
+
+func triFromBool(b bool) Tristate {
+	if b {
+		return True
+	} else {
+		return False
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/featuregate/OWNERS b/hack/tools/vendor/k8s.io/component-base/featuregate/OWNERS
new file mode 100644
index 0000000000..b2f165b6d5
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/featuregate/OWNERS
@@ -0,0 +1,16 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+# Currently assigned to api-approvers since feature gates are the API
+# for enabling/disabling other APIs.
+
+# Disable inheritance as this is an api owners file
+options:
+  no_parent_owners: true
+approvers:
+  - api-approvers
+reviewers:
+  - api-reviewers
+labels:
+  - kind/api-change
+  - sig/api-machinery
+  - sig/cluster-lifecycle
diff --git a/hack/tools/vendor/k8s.io/component-base/featuregate/feature_gate.go b/hack/tools/vendor/k8s.io/component-base/featuregate/feature_gate.go
new file mode 100644
index 0000000000..b6f08a6cd6
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/featuregate/feature_gate.go
@@ -0,0 +1,738 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package featuregate
+
+import (
+	"context"
+	"fmt"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+
+	"github.com/spf13/pflag"
+
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+	"k8s.io/apimachinery/pkg/util/naming"
+	"k8s.io/apimachinery/pkg/util/sets"
+	"k8s.io/apimachinery/pkg/util/version"
+	featuremetrics "k8s.io/component-base/metrics/prometheus/feature"
+	baseversion "k8s.io/component-base/version"
+	"k8s.io/klog/v2"
+)
+
+type Feature string
+
+const (
+	flagName = "feature-gates"
+
+	// allAlphaGate is a global toggle for alpha features. Per-feature key
+	// values override the default set by allAlphaGate. Examples:
+	//   AllAlpha=false,NewFeature=true  will result in newFeature=true
+	//   AllAlpha=true,NewFeature=false  will result in newFeature=false
+	allAlphaGate Feature = "AllAlpha"
+
+	// allBetaGate is a global toggle for beta features. Per-feature key
+	// values override the default set by allBetaGate. Examples:
+	//   AllBeta=false,NewFeature=true  will result in NewFeature=true
+	//   AllBeta=true,NewFeature=false  will result in NewFeature=false
+	allBetaGate Feature = "AllBeta"
+)
+
+var (
+	// The generic features.
+	defaultFeatures = map[Feature]VersionedSpecs{
+		allAlphaGate: {{Default: false, PreRelease: Alpha, Version: version.MajorMinor(0, 0)}},
+		allBetaGate:  {{Default: false, PreRelease: Beta, Version: version.MajorMinor(0, 0)}},
+	}
+
+	// Special handling for a few gates.
+	specialFeatures = map[Feature]func(known map[Feature]VersionedSpecs, enabled map[Feature]bool, val bool, cVer *version.Version){
+		allAlphaGate: setUnsetAlphaGates,
+		allBetaGate:  setUnsetBetaGates,
+	}
+)
+
+type FeatureSpec struct {
+	// Default is the default enablement state for the feature
+	Default bool
+	// LockToDefault indicates that the feature is locked to its default and cannot be changed
+	LockToDefault bool
+	// PreRelease indicates the current maturity level of the feature
+	PreRelease prerelease
+	// Version indicates the earliest version from which this FeatureSpec is valid.
+	// If multiple FeatureSpecs exist for a Feature, the one with the highest version that is less
+	// than or equal to the effective version of the component is used.
+	Version *version.Version
+}
+
+type VersionedSpecs []FeatureSpec
+
+func (g VersionedSpecs) Len() int { return len(g) }
+func (g VersionedSpecs) Less(i, j int) bool {
+	return g[i].Version.LessThan(g[j].Version)
+}
+func (g VersionedSpecs) Swap(i, j int) { g[i], g[j] = g[j], g[i] }
+
+type PromotionVersionMapping map[prerelease]string
+
+type prerelease string
+
+const (
+	PreAlpha = prerelease("PRE-ALPHA")
+	// Values for PreRelease.
+	Alpha = prerelease("ALPHA")
+	Beta  = prerelease("BETA")
+	GA    = prerelease("")
+
+	// Deprecated
+	Deprecated = prerelease("DEPRECATED")
+)
+
+// FeatureGate indicates whether a given feature is enabled or not
+type FeatureGate interface {
+	// Enabled returns true if the key is enabled.
+	Enabled(key Feature) bool
+	// KnownFeatures returns a slice of strings describing the FeatureGate's known features.
+	KnownFeatures() []string
+	// DeepCopy returns a deep copy of the FeatureGate object, such that gates can be
+	// set on the copy without mutating the original. This is useful for validating
+	// config against potential feature gate changes before committing those changes.
+	DeepCopy() MutableVersionedFeatureGate
+	// Validate checks if the flag gates are valid at the emulated version.
+	Validate() []error
+}
+
+// MutableFeatureGate parses and stores flag gates for known features from
+// a string like feature1=true,feature2=false,...
+type MutableFeatureGate interface {
+	FeatureGate
+
+	// AddFlag adds a flag for setting global feature gates to the specified FlagSet.
+	AddFlag(fs *pflag.FlagSet)
+	// Close sets closed to true, and prevents subsequent calls to Add
+	Close()
+	// Set parses and stores flag gates for known features
+	// from a string like feature1=true,feature2=false,...
+	Set(value string) error
+	// SetFromMap stores flag gates for known features from a map[string]bool or returns an error
+	SetFromMap(m map[string]bool) error
+	// Add adds features to the featureGate.
+	Add(features map[Feature]FeatureSpec) error
+	// GetAll returns a copy of the map of known feature names to feature specs.
+	GetAll() map[Feature]FeatureSpec
+	// AddMetrics adds feature enablement metrics
+	AddMetrics()
+	// OverrideDefault sets a local override for the registered default value of a named
+	// feature. If the feature has not been previously registered (e.g. by a call to Add), has a
+	// locked default, or if the gate has already registered itself with a FlagSet, a non-nil
+	// error is returned.
+	//
+	// When two or more components consume a common feature, one component can override its
+	// default at runtime in order to adopt new defaults before or after the other
+	// components. For example, a new feature can be evaluated with a limited blast radius by
+	// overriding its default to true for a limited number of components without simultaneously
+	// changing its default for all consuming components.
+	OverrideDefault(name Feature, override bool) error
+}
+
+// MutableVersionedFeatureGate parses and stores flag gates for known features from
+// a string like feature1=true,feature2=false,...
+// MutableVersionedFeatureGate sets options based on the emulated version of the featured gate.
+type MutableVersionedFeatureGate interface {
+	MutableFeatureGate
+	// EmulationVersion returns the version the feature gate is set to emulate.
+	// If set, the feature gate would enable/disable features based on
+	// feature availability and pre-release at the emulated version instead of the binary version.
+	EmulationVersion() *version.Version
+	// SetEmulationVersion overrides the emulationVersion of the feature gate.
+	// Otherwise, the emulationVersion will be the same as the binary version.
+	// If set, the feature defaults and availability will be as if the binary is at the emulated version.
+	SetEmulationVersion(emulationVersion *version.Version) error
+	// GetAll returns a copy of the map of known feature names to versioned feature specs.
+	GetAllVersioned() map[Feature]VersionedSpecs
+	// AddVersioned adds versioned feature specs to the featureGate.
+	AddVersioned(features map[Feature]VersionedSpecs) error
+	// OverrideDefaultAtVersion sets a local override for the registered default value of a named
+	// feature for the prerelease lifecycle the given version is at.
+	// If the feature has not been previously registered (e.g. by a call to Add),
+	// has a locked default, or if the gate has already registered itself with a FlagSet, a non-nil
+	// error is returned.
+	//
+	// When two or more components consume a common feature, one component can override its
+	// default at runtime in order to adopt new defaults before or after the other
+	// components. For example, a new feature can be evaluated with a limited blast radius by
+	// overriding its default to true for a limited number of components without simultaneously
+	// changing its default for all consuming components.
+	OverrideDefaultAtVersion(name Feature, override bool, ver *version.Version) error
+	// ExplicitlySet returns true if the feature value is explicitly set instead of
+	// being derived from the default values or special features.
+	ExplicitlySet(name Feature) bool
+	// ResetFeatureValueToDefault resets the value of the feature back to the default value.
+	ResetFeatureValueToDefault(name Feature) error
+	// DeepCopyAndReset copies all the registered features of the FeatureGate object, with all the known features and overrides,
+	// and resets all the enabled status of the new feature gate.
+	// This is useful for creating a new instance of feature gate without inheriting all the enabled configurations of the base feature gate.
+	DeepCopyAndReset() MutableVersionedFeatureGate
+}
+
+// featureGate implements FeatureGate as well as pflag.Value for flag parsing.
+type featureGate struct {
+	featureGateName string
+
+	special map[Feature]func(map[Feature]VersionedSpecs, map[Feature]bool, bool, *version.Version)
+
+	// lock guards writes to all below fields.
+	lock sync.Mutex
+	// known holds a map[Feature]FeatureSpec
+	known atomic.Value
+	// enabled holds a map[Feature]bool
+	enabled atomic.Value
+	// enabledRaw holds a raw map[string]bool of the parsed flag.
+	// It keeps the original values of "special" features like "all alpha gates",
+	// while enabled keeps the values of all resolved features.
+	enabledRaw atomic.Value
+	// closed is set to true when AddFlag is called, and prevents subsequent calls to Add
+	closed bool
+	// queriedFeatures stores all the features that have been queried through the Enabled interface.
+	// It is reset when SetEmulationVersion is called.
+	queriedFeatures  atomic.Value
+	emulationVersion atomic.Pointer[version.Version]
+}
+
+func setUnsetAlphaGates(known map[Feature]VersionedSpecs, enabled map[Feature]bool, val bool, cVer *version.Version) {
+	for k, v := range known {
+		if k == "AllAlpha" || k == "AllBeta" {
+			continue
+		}
+		featureSpec := featureSpecAtEmulationVersion(v, cVer)
+		if featureSpec.PreRelease == Alpha {
+			if _, found := enabled[k]; !found {
+				enabled[k] = val
+			}
+		}
+	}
+}
+
+func setUnsetBetaGates(known map[Feature]VersionedSpecs, enabled map[Feature]bool, val bool, cVer *version.Version) {
+	for k, v := range known {
+		if k == "AllAlpha" || k == "AllBeta" {
+			continue
+		}
+		featureSpec := featureSpecAtEmulationVersion(v, cVer)
+		if featureSpec.PreRelease == Beta {
+			if _, found := enabled[k]; !found {
+				enabled[k] = val
+			}
+		}
+	}
+}
+
+// Set, String, and Type implement pflag.Value
+var _ pflag.Value = &featureGate{}
+
+// internalPackages are packages that ignored when creating a name for featureGates. These packages are in the common
+// call chains, so they'd be unhelpful as names.
+var internalPackages = []string{"k8s.io/component-base/featuregate/feature_gate.go"}
+
+// NewVersionedFeatureGate creates a feature gate with the emulation version set to the provided version.
+// SetEmulationVersion can be called after to change emulation version to a desired value.
+func NewVersionedFeatureGate(emulationVersion *version.Version) *featureGate {
+	known := map[Feature]VersionedSpecs{}
+	for k, v := range defaultFeatures {
+		known[k] = v
+	}
+
+	f := &featureGate{
+		featureGateName: naming.GetNameFromCallsite(internalPackages...),
+		special:         specialFeatures,
+	}
+	f.known.Store(known)
+	f.enabled.Store(map[Feature]bool{})
+	f.enabledRaw.Store(map[string]bool{})
+	f.emulationVersion.Store(emulationVersion)
+	f.queriedFeatures.Store(sets.Set[Feature]{})
+	klog.V(1).Infof("new feature gate with emulationVersion=%s", f.emulationVersion.Load().String())
+	return f
+}
+
+// NewFeatureGate creates a feature gate with the current binary version.
+func NewFeatureGate() *featureGate {
+	binaryVersison := version.MustParse(baseversion.DefaultKubeBinaryVersion)
+	return NewVersionedFeatureGate(binaryVersison)
+}
+
+// Set parses a string of the form "key1=value1,key2=value2,..." into a
+// map[string]bool of known keys or returns an error.
+func (f *featureGate) Set(value string) error {
+	m := make(map[string]bool)
+	for _, s := range strings.Split(value, ",") {
+		if len(s) == 0 {
+			continue
+		}
+		arr := strings.SplitN(s, "=", 2)
+		k := strings.TrimSpace(arr[0])
+		if len(arr) != 2 {
+			return fmt.Errorf("missing bool value for %s", k)
+		}
+		v := strings.TrimSpace(arr[1])
+		boolValue, err := strconv.ParseBool(v)
+		if err != nil {
+			return fmt.Errorf("invalid value of %s=%s, err: %v", k, v, err)
+		}
+		m[k] = boolValue
+	}
+	return f.SetFromMap(m)
+}
+
+// Validate checks if the flag gates are valid at the emulated version.
+func (f *featureGate) Validate() []error {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	m, ok := f.enabledRaw.Load().(map[string]bool)
+	if !ok {
+		return []error{fmt.Errorf("cannot cast enabledRaw to map[string]bool")}
+	}
+	enabled := map[Feature]bool{}
+	return f.unsafeSetFromMap(enabled, m, f.EmulationVersion())
+}
+
+// unsafeSetFromMap stores flag gates for known features from a map[string]bool into an enabled map.
+func (f *featureGate) unsafeSetFromMap(enabled map[Feature]bool, m map[string]bool, emulationVersion *version.Version) []error {
+	var errs []error
+	// Copy existing state
+	known := map[Feature]VersionedSpecs{}
+	for k, v := range f.known.Load().(map[Feature]VersionedSpecs) {
+		sort.Sort(v)
+		known[k] = v
+	}
+
+	for k, v := range m {
+		key := Feature(k)
+		versionedSpecs, ok := known[key]
+		if !ok {
+			// early return if encounters an unknown feature.
+			errs = append(errs, fmt.Errorf("unrecognized feature gate: %s", k))
+			return errs
+		}
+		featureSpec := featureSpecAtEmulationVersion(versionedSpecs, emulationVersion)
+		if featureSpec.LockToDefault && featureSpec.Default != v {
+			errs = append(errs, fmt.Errorf("cannot set feature gate %v to %v, feature is locked to %v", k, v, featureSpec.Default))
+			continue
+		}
+		// Handle "special" features like "all alpha gates"
+		if fn, found := f.special[key]; found {
+			fn(known, enabled, v, emulationVersion)
+			enabled[key] = v
+			continue
+		}
+		if featureSpec.PreRelease == PreAlpha {
+			errs = append(errs, fmt.Errorf("cannot set feature gate %v to %v, feature is PreAlpha at emulated version %s", k, v, emulationVersion.String()))
+			continue
+		}
+		enabled[key] = v
+
+		if featureSpec.PreRelease == Deprecated {
+			klog.Warningf("Setting deprecated feature gate %s=%t. It will be removed in a future release.", k, v)
+		} else if featureSpec.PreRelease == GA {
+			klog.Warningf("Setting GA feature gate %s=%t. It will be removed in a future release.", k, v)
+		}
+	}
+	return errs
+}
+
+// SetFromMap stores flag gates for known features from a map[string]bool or returns an error
+func (f *featureGate) SetFromMap(m map[string]bool) error {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+
+	// Copy existing state
+	enabled := map[Feature]bool{}
+	for k, v := range f.enabled.Load().(map[Feature]bool) {
+		enabled[k] = v
+	}
+	enabledRaw := map[string]bool{}
+	for k, v := range f.enabledRaw.Load().(map[string]bool) {
+		enabledRaw[k] = v
+	}
+
+	// Update enabledRaw first.
+	// SetFromMap might be called when emulationVersion is not finalized yet, and we do not know the final state of enabled.
+	// But the flags still need to be saved.
+	for k, v := range m {
+		enabledRaw[k] = v
+	}
+	f.enabledRaw.Store(enabledRaw)
+
+	errs := f.unsafeSetFromMap(enabled, enabledRaw, f.EmulationVersion())
+	if len(errs) == 0 {
+		// Persist changes
+		f.enabled.Store(enabled)
+		klog.V(1).Infof("feature gates: %v", f.enabled)
+	}
+	return utilerrors.NewAggregate(errs)
+}
+
+// String returns a string containing all enabled feature gates, formatted as "key1=value1,key2=value2,...".
+func (f *featureGate) String() string {
+	pairs := []string{}
+	for k, v := range f.enabled.Load().(map[Feature]bool) {
+		pairs = append(pairs, fmt.Sprintf("%s=%t", k, v))
+	}
+	sort.Strings(pairs)
+	return strings.Join(pairs, ",")
+}
+
+func (f *featureGate) Type() string {
+	return "mapStringBool"
+}
+
+// Add adds features to the featureGate.
+func (f *featureGate) Add(features map[Feature]FeatureSpec) error {
+	vs := map[Feature]VersionedSpecs{}
+	for name, spec := range features {
+		// if no version is provided for the FeatureSpec, it is defaulted to version 0.0 so that it can be enabled/disabled regardless of emulation version.
+		spec.Version = version.MajorMinor(0, 0)
+		vs[name] = VersionedSpecs{spec}
+	}
+	return f.AddVersioned(vs)
+}
+
+// AddVersioned adds versioned feature specs to the featureGate.
+func (f *featureGate) AddVersioned(features map[Feature]VersionedSpecs) error {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+
+	if f.closed {
+		return fmt.Errorf("cannot add a feature gate after adding it to the flag set")
+	}
+
+	// Copy existing state
+	known := f.GetAllVersioned()
+
+	for name, specs := range features {
+		sort.Sort(specs)
+		if existingSpec, found := known[name]; found {
+			sort.Sort(existingSpec)
+			if reflect.DeepEqual(existingSpec, specs) {
+				continue
+			}
+			return fmt.Errorf("feature gate %q with different spec already exists: %v", name, existingSpec)
+		}
+		known[name] = specs
+	}
+
+	// Persist updated state
+	f.known.Store(known)
+
+	return nil
+}
+
+func (f *featureGate) OverrideDefault(name Feature, override bool) error {
+	return f.OverrideDefaultAtVersion(name, override, f.EmulationVersion())
+}
+
+func (f *featureGate) OverrideDefaultAtVersion(name Feature, override bool, ver *version.Version) error {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+
+	if f.closed {
+		return fmt.Errorf("cannot override default for feature %q: gates already added to a flag set", name)
+	}
+
+	// Copy existing state
+	known := f.GetAllVersioned()
+
+	specs, ok := known[name]
+	if !ok {
+		return fmt.Errorf("cannot override default: feature %q is not registered", name)
+	}
+	spec := featureSpecAtEmulationVersion(specs, ver)
+	switch {
+	case spec.LockToDefault:
+		return fmt.Errorf("cannot override default: feature %q default is locked to %t", name, spec.Default)
+	case spec.PreRelease == PreAlpha:
+		return fmt.Errorf("cannot override default: feature %q is not available before version %s", name, ver.String())
+	case spec.PreRelease == Deprecated:
+		klog.Warningf("Overriding default of deprecated feature gate %s=%t. It will be removed in a future release.", name, override)
+	case spec.PreRelease == GA:
+		klog.Warningf("Overriding default of GA feature gate %s=%t. It will be removed in a future release.", name, override)
+	}
+
+	spec.Default = override
+	known[name] = specs
+	f.known.Store(known)
+
+	return nil
+}
+
+// GetAll returns a copy of the map of known feature names to feature specs for the current emulationVersion.
+func (f *featureGate) GetAll() map[Feature]FeatureSpec {
+	retval := map[Feature]FeatureSpec{}
+	f.lock.Lock()
+	versionedSpecs := f.GetAllVersioned()
+	emuVer := f.EmulationVersion()
+	f.lock.Unlock()
+	for k, v := range versionedSpecs {
+		spec := featureSpecAtEmulationVersion(v, emuVer)
+		if spec.PreRelease == PreAlpha {
+			// The feature is not available at the emulation version.
+			continue
+		}
+		retval[k] = *spec
+	}
+	return retval
+}
+
+// GetAllVersioned returns a copy of the map of known feature names to versioned feature specs.
+func (f *featureGate) GetAllVersioned() map[Feature]VersionedSpecs {
+	retval := map[Feature]VersionedSpecs{}
+	for k, v := range f.known.Load().(map[Feature]VersionedSpecs) {
+		vCopy := make([]FeatureSpec, len(v))
+		_ = copy(vCopy, v)
+		retval[k] = vCopy
+	}
+	return retval
+}
+
+func (f *featureGate) SetEmulationVersion(emulationVersion *version.Version) error {
+	if emulationVersion.EqualTo(f.EmulationVersion()) {
+		return nil
+	}
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	klog.V(1).Infof("set feature gate emulationVersion to %s", emulationVersion.String())
+
+	// Copy existing state
+	enabledRaw := map[string]bool{}
+	for k, v := range f.enabledRaw.Load().(map[string]bool) {
+		enabledRaw[k] = v
+	}
+	// enabled map should be reset whenever emulationVersion is changed.
+	enabled := map[Feature]bool{}
+	errs := f.unsafeSetFromMap(enabled, enabledRaw, emulationVersion)
+
+	queriedFeatures := f.queriedFeatures.Load().(sets.Set[Feature])
+	known := f.known.Load().(map[Feature]VersionedSpecs)
+	for feature := range queriedFeatures {
+		newVal := featureEnabled(feature, enabled, known, emulationVersion)
+		oldVal := featureEnabled(feature, f.enabled.Load().(map[Feature]bool), known, f.EmulationVersion())
+		if newVal != oldVal {
+			klog.Warningf("SetEmulationVersion will change already queried feature:%s from %v to %v", feature, oldVal, newVal)
+		}
+	}
+
+	if len(errs) == 0 {
+		// Persist changes
+		f.enabled.Store(enabled)
+		f.emulationVersion.Store(emulationVersion)
+		f.queriedFeatures.Store(sets.Set[Feature]{})
+	}
+	return utilerrors.NewAggregate(errs)
+}
+
+func (f *featureGate) EmulationVersion() *version.Version {
+	return f.emulationVersion.Load()
+}
+
+// featureSpec returns the featureSpec at the EmulationVersion if the key exists, an error otherwise.
+// This is useful to keep multiple implementations of a feature based on the PreRelease or Version info.
+func (f *featureGate) featureSpec(key Feature) (FeatureSpec, error) {
+	if v, ok := f.known.Load().(map[Feature]VersionedSpecs)[key]; ok {
+		featureSpec := f.featureSpecAtEmulationVersion(v)
+		return *featureSpec, nil
+	}
+	return FeatureSpec{}, fmt.Errorf("feature %q is not registered in FeatureGate %q", key, f.featureGateName)
+}
+
+func (f *featureGate) unsafeRecordQueried(key Feature) {
+	queriedFeatures := f.queriedFeatures.Load().(sets.Set[Feature])
+	if _, ok := queriedFeatures[key]; ok {
+		return
+	}
+	// Clone items from queriedFeatures before mutating it
+	newQueriedFeatures := queriedFeatures.Clone()
+	newQueriedFeatures.Insert(key)
+	f.queriedFeatures.Store(newQueriedFeatures)
+}
+
+func featureEnabled(key Feature, enabled map[Feature]bool, known map[Feature]VersionedSpecs, emulationVersion *version.Version) bool {
+	// check explicitly set enabled list
+	if v, ok := enabled[key]; ok {
+		return v
+	}
+	if v, ok := known[key]; ok {
+		return featureSpecAtEmulationVersion(v, emulationVersion).Default
+	}
+
+	panic(fmt.Errorf("feature %q is not registered in FeatureGate", key))
+}
+
+// Enabled returns true if the key is enabled.  If the key is not known, this call will panic.
+func (f *featureGate) Enabled(key Feature) bool {
+	// TODO: ideally we should lock the feature gate in this call to be safe, need to evaluate how much performance impact locking would have.
+	v := featureEnabled(key, f.enabled.Load().(map[Feature]bool), f.known.Load().(map[Feature]VersionedSpecs), f.EmulationVersion())
+	f.unsafeRecordQueried(key)
+	return v
+}
+
+func (f *featureGate) featureSpecAtEmulationVersion(v VersionedSpecs) *FeatureSpec {
+	return featureSpecAtEmulationVersion(v, f.EmulationVersion())
+}
+
+func featureSpecAtEmulationVersion(v VersionedSpecs, emulationVersion *version.Version) *FeatureSpec {
+	i := len(v) - 1
+	for ; i >= 0; i-- {
+		if v[i].Version.GreaterThan(emulationVersion) {
+			continue
+		}
+		return &v[i]
+	}
+	return &FeatureSpec{
+		Default:    false,
+		PreRelease: PreAlpha,
+		Version:    version.MajorMinor(0, 0),
+	}
+}
+
+// Close sets closed to true, and prevents subsequent calls to Add
+func (f *featureGate) Close() {
+	f.lock.Lock()
+	f.closed = true
+	f.lock.Unlock()
+}
+
+// AddFlag adds a flag for setting global feature gates to the specified FlagSet.
+func (f *featureGate) AddFlag(fs *pflag.FlagSet) {
+	// TODO(mtaufen): Shouldn't we just close it on the first Set/SetFromMap instead?
+	// Not all components expose a feature gates flag using this AddFlag method, and
+	// in the future, all components will completely stop exposing a feature gates flag,
+	// in favor of componentconfig.
+	f.Close()
+
+	known := f.KnownFeatures()
+	fs.Var(f, flagName, ""+
+		"A set of key=value pairs that describe feature gates for alpha/experimental features. "+
+		"Options are:\n"+strings.Join(known, "\n"))
+}
+
+func (f *featureGate) AddMetrics() {
+	for feature, featureSpec := range f.GetAll() {
+		featuremetrics.RecordFeatureInfo(context.Background(), string(feature), string(featureSpec.PreRelease), f.Enabled(feature))
+	}
+}
+
+// KnownFeatures returns a slice of strings describing the FeatureGate's known features.
+// preAlpha, Deprecated and GA features are hidden from the list.
+func (f *featureGate) KnownFeatures() []string {
+	var known []string
+	for k, v := range f.known.Load().(map[Feature]VersionedSpecs) {
+		if k == "AllAlpha" || k == "AllBeta" {
+			known = append(known, fmt.Sprintf("%s=true|false (%s - default=%t)", k, v[0].PreRelease, v[0].Default))
+			continue
+		}
+		featureSpec := f.featureSpecAtEmulationVersion(v)
+		if featureSpec.PreRelease == GA || featureSpec.PreRelease == Deprecated || featureSpec.PreRelease == PreAlpha {
+			continue
+		}
+		known = append(known, fmt.Sprintf("%s=true|false (%s - default=%t)", k, featureSpec.PreRelease, featureSpec.Default))
+	}
+	sort.Strings(known)
+	return known
+}
+
+// DeepCopyAndReset copies all the registered features of the FeatureGate object, with all the known features and overrides,
+// and resets all the enabled status of the new feature gate.
+// This is useful for creating a new instance of feature gate without inheriting all the enabled configurations of the base feature gate.
+func (f *featureGate) DeepCopyAndReset() MutableVersionedFeatureGate {
+	fg := NewVersionedFeatureGate(f.EmulationVersion())
+	known := f.GetAllVersioned()
+	fg.known.Store(known)
+	return fg
+}
+
+// DeepCopy returns a deep copy of the FeatureGate object, such that gates can be
+// set on the copy without mutating the original. This is useful for validating
+// config against potential feature gate changes before committing those changes.
+func (f *featureGate) DeepCopy() MutableVersionedFeatureGate {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	// Copy existing state.
+	known := f.GetAllVersioned()
+	enabled := map[Feature]bool{}
+	for k, v := range f.enabled.Load().(map[Feature]bool) {
+		enabled[k] = v
+	}
+	enabledRaw := map[string]bool{}
+	for k, v := range f.enabledRaw.Load().(map[string]bool) {
+		enabledRaw[k] = v
+	}
+
+	// Construct a new featureGate around the copied state.
+	// Note that specialFeatures is treated as immutable by convention,
+	// and we maintain the value of f.closed across the copy.
+	fg := &featureGate{
+		special: specialFeatures,
+		closed:  f.closed,
+	}
+	fg.emulationVersion.Store(f.EmulationVersion())
+	fg.known.Store(known)
+	fg.enabled.Store(enabled)
+	fg.enabledRaw.Store(enabledRaw)
+	fg.queriedFeatures.Store(sets.Set[Feature]{})
+	return fg
+}
+
+// ExplicitlySet returns true if the feature value is explicitly set instead of
+// being derived from the default values or special features.
+func (f *featureGate) ExplicitlySet(name Feature) bool {
+	enabledRaw := f.enabledRaw.Load().(map[string]bool)
+	_, ok := enabledRaw[string(name)]
+	return ok
+}
+
+// ResetFeatureValueToDefault resets the value of the feature back to the default value.
+func (f *featureGate) ResetFeatureValueToDefault(name Feature) error {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+	enabled := map[Feature]bool{}
+	for k, v := range f.enabled.Load().(map[Feature]bool) {
+		enabled[k] = v
+	}
+	enabledRaw := map[string]bool{}
+	for k, v := range f.enabledRaw.Load().(map[string]bool) {
+		enabledRaw[k] = v
+	}
+	_, inEnabled := enabled[name]
+	if inEnabled {
+		delete(enabled, name)
+	}
+	_, inEnabledRaw := enabledRaw[string(name)]
+	if inEnabledRaw {
+		delete(enabledRaw, string(name))
+	}
+	// some features could be in enabled map but not enabledRaw map,
+	// for example some Alpha feature when AllAlpha is set.
+	if inEnabledRaw && !inEnabled {
+		return fmt.Errorf("feature:%s was explicitly set, but not in enabled map", name)
+	}
+	f.enabled.Store(enabled)
+	f.enabledRaw.Store(enabledRaw)
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/featuregate/registry.go b/hack/tools/vendor/k8s.io/component-base/featuregate/registry.go
new file mode 100644
index 0000000000..cf35403da4
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/featuregate/registry.go
@@ -0,0 +1,454 @@
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package featuregate
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+	"sync"
+
+	"github.com/spf13/pflag"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/apimachinery/pkg/util/version"
+	cliflag "k8s.io/component-base/cli/flag"
+	baseversion "k8s.io/component-base/version"
+	"k8s.io/klog/v2"
+)
+
+// DefaultComponentGlobalsRegistry is the global var to store the effective versions and feature gates for all components for easy access.
+// Example usage:
+// // register the component effective version and feature gate first
+// _, _ = utilversion.DefaultComponentGlobalsRegistry.ComponentGlobalsOrRegister(utilversion.DefaultKubeComponent, utilversion.DefaultKubeEffectiveVersion(), utilfeature.DefaultMutableFeatureGate)
+// wardleEffectiveVersion := utilversion.NewEffectiveVersion("1.2")
+// wardleFeatureGate := featuregate.NewFeatureGate()
+// utilruntime.Must(utilversion.DefaultComponentGlobalsRegistry.Register(apiserver.WardleComponentName, wardleEffectiveVersion, wardleFeatureGate, false))
+//
+//	cmd := &cobra.Command{
+//	 ...
+//		// call DefaultComponentGlobalsRegistry.Set() in PersistentPreRunE
+//		PersistentPreRunE: func(*cobra.Command, []string) error {
+//			if err := utilversion.DefaultComponentGlobalsRegistry.Set(); err != nil {
+//				return err
+//			}
+//	 ...
+//		},
+//		RunE: func(c *cobra.Command, args []string) error {
+//			// call utilversion.DefaultComponentGlobalsRegistry.Validate() somewhere
+//		},
+//	}
+//
+// flags := cmd.Flags()
+// // add flags
+// utilversion.DefaultComponentGlobalsRegistry.AddFlags(flags)
+var DefaultComponentGlobalsRegistry ComponentGlobalsRegistry = NewComponentGlobalsRegistry()
+
+const (
+	DefaultKubeComponent = "kube"
+
+	klogLevel = 2
+)
+
+type VersionMapping func(from *version.Version) *version.Version
+
+// ComponentGlobals stores the global variables for a component for easy access.
+type ComponentGlobals struct {
+	effectiveVersion baseversion.MutableEffectiveVersion
+	featureGate      MutableVersionedFeatureGate
+
+	// emulationVersionMapping contains the mapping from the emulation version of this component
+	// to the emulation version of another component.
+	emulationVersionMapping map[string]VersionMapping
+	// dependentEmulationVersion stores whether or not this component's EmulationVersion is dependent through mapping on another component.
+	// If true, the emulation version cannot be set from the flag, or version mapping from another component.
+	dependentEmulationVersion bool
+	// minCompatibilityVersionMapping contains the mapping from the min compatibility version of this component
+	// to the min compatibility version of another component.
+	minCompatibilityVersionMapping map[string]VersionMapping
+	// dependentMinCompatibilityVersion stores whether or not this component's MinCompatibilityVersion is dependent through mapping on another component
+	// If true, the min compatibility version cannot be set from the flag, or version mapping from another component.
+	dependentMinCompatibilityVersion bool
+}
+
+type ComponentGlobalsRegistry interface {
+	// EffectiveVersionFor returns the EffectiveVersion registered under the component.
+	// Returns nil if the component is not registered.
+	EffectiveVersionFor(component string) baseversion.EffectiveVersion
+	// FeatureGateFor returns the FeatureGate registered under the component.
+	// Returns nil if the component is not registered.
+	FeatureGateFor(component string) FeatureGate
+	// Register registers the EffectiveVersion and FeatureGate for a component.
+	// returns error if the component is already registered.
+	Register(component string, effectiveVersion baseversion.MutableEffectiveVersion, featureGate MutableVersionedFeatureGate) error
+	// ComponentGlobalsOrRegister would return the registered global variables for the component if it already exists in the registry.
+	// Otherwise, the provided variables would be registered under the component, and the same variables would be returned.
+	ComponentGlobalsOrRegister(component string, effectiveVersion baseversion.MutableEffectiveVersion, featureGate MutableVersionedFeatureGate) (baseversion.MutableEffectiveVersion, MutableVersionedFeatureGate)
+	// AddFlags adds flags of "--emulated-version" and "--feature-gates"
+	AddFlags(fs *pflag.FlagSet)
+	// Set sets the flags for all global variables for all components registered.
+	Set() error
+	// SetFallback calls Set() if it has never been called.
+	SetFallback() error
+	// Validate calls the Validate() function for all the global variables for all components registered.
+	Validate() []error
+	// Reset removes all stored ComponentGlobals, configurations, and version mappings.
+	Reset()
+	// SetEmulationVersionMapping sets the mapping from the emulation version of one component
+	// to the emulation version of another component.
+	// Once set, the emulation version of the toComponent will be determined by the emulation version of the fromComponent,
+	// and cannot be set from cmd flags anymore.
+	// For a given component, its emulation version can only depend on one other component, no multiple dependency is allowed.
+	SetEmulationVersionMapping(fromComponent, toComponent string, f VersionMapping) error
+}
+
+type componentGlobalsRegistry struct {
+	componentGlobals map[string]*ComponentGlobals
+	mutex            sync.RWMutex
+	// list of component name to emulation version set from the flag.
+	emulationVersionConfig []string
+	// map of component name to the list of feature gates set from the flag.
+	featureGatesConfig map[string][]string
+	// set stores if the Set() function for the registry is already called.
+	set bool
+}
+
+func NewComponentGlobalsRegistry() *componentGlobalsRegistry {
+	return &componentGlobalsRegistry{
+		componentGlobals:       make(map[string]*ComponentGlobals),
+		emulationVersionConfig: nil,
+		featureGatesConfig:     nil,
+	}
+}
+
+func (r *componentGlobalsRegistry) Reset() {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	r.componentGlobals = make(map[string]*ComponentGlobals)
+	r.emulationVersionConfig = nil
+	r.featureGatesConfig = nil
+	r.set = false
+}
+
+func (r *componentGlobalsRegistry) EffectiveVersionFor(component string) baseversion.EffectiveVersion {
+	r.mutex.RLock()
+	defer r.mutex.RUnlock()
+	globals, ok := r.componentGlobals[component]
+	if !ok {
+		return nil
+	}
+	return globals.effectiveVersion
+}
+
+func (r *componentGlobalsRegistry) FeatureGateFor(component string) FeatureGate {
+	r.mutex.RLock()
+	defer r.mutex.RUnlock()
+	globals, ok := r.componentGlobals[component]
+	if !ok {
+		return nil
+	}
+	return globals.featureGate
+}
+
+func (r *componentGlobalsRegistry) unsafeRegister(component string, effectiveVersion baseversion.MutableEffectiveVersion, featureGate MutableVersionedFeatureGate) error {
+	if _, ok := r.componentGlobals[component]; ok {
+		return fmt.Errorf("component globals of %s already registered", component)
+	}
+	if featureGate != nil {
+		if err := featureGate.SetEmulationVersion(effectiveVersion.EmulationVersion()); err != nil {
+			return err
+		}
+	}
+	c := ComponentGlobals{
+		effectiveVersion:               effectiveVersion,
+		featureGate:                    featureGate,
+		emulationVersionMapping:        make(map[string]VersionMapping),
+		minCompatibilityVersionMapping: make(map[string]VersionMapping),
+	}
+	r.componentGlobals[component] = &c
+	return nil
+}
+
+func (r *componentGlobalsRegistry) Register(component string, effectiveVersion baseversion.MutableEffectiveVersion, featureGate MutableVersionedFeatureGate) error {
+	if effectiveVersion == nil {
+		return fmt.Errorf("cannot register nil effectiveVersion")
+	}
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	return r.unsafeRegister(component, effectiveVersion, featureGate)
+}
+
+func (r *componentGlobalsRegistry) ComponentGlobalsOrRegister(component string, effectiveVersion baseversion.MutableEffectiveVersion, featureGate MutableVersionedFeatureGate) (baseversion.MutableEffectiveVersion, MutableVersionedFeatureGate) {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	globals, ok := r.componentGlobals[component]
+	if ok {
+		return globals.effectiveVersion, globals.featureGate
+	}
+	utilruntime.Must(r.unsafeRegister(component, effectiveVersion, featureGate))
+	return effectiveVersion, featureGate
+}
+
+func (r *componentGlobalsRegistry) unsafeKnownFeatures() []string {
+	var known []string
+	for component, globals := range r.componentGlobals {
+		if globals.featureGate == nil {
+			continue
+		}
+		for _, f := range globals.featureGate.KnownFeatures() {
+			known = append(known, component+":"+f)
+		}
+	}
+	sort.Strings(known)
+	return known
+}
+
+func (r *componentGlobalsRegistry) unsafeVersionFlagOptions(isEmulation bool) []string {
+	var vs []string
+	for component, globals := range r.componentGlobals {
+		binaryVer := globals.effectiveVersion.BinaryVersion()
+		if isEmulation {
+			if globals.dependentEmulationVersion {
+				continue
+			}
+			// emulated version could be between binaryMajor.{binaryMinor} and binaryMajor.{binaryMinor}
+			// TODO: change to binaryMajor.{binaryMinor-1} and binaryMajor.{binaryMinor} in 1.32
+			vs = append(vs, fmt.Sprintf("%s=%s..%s (default=%s)", component,
+				binaryVer.SubtractMinor(0).String(), binaryVer.String(), globals.effectiveVersion.EmulationVersion().String()))
+		} else {
+			if globals.dependentMinCompatibilityVersion {
+				continue
+			}
+			// min compatibility version could be between binaryMajor.{binaryMinor-1} and binaryMajor.{binaryMinor}
+			vs = append(vs, fmt.Sprintf("%s=%s..%s (default=%s)", component,
+				binaryVer.SubtractMinor(1).String(), binaryVer.String(), globals.effectiveVersion.MinCompatibilityVersion().String()))
+		}
+	}
+	sort.Strings(vs)
+	return vs
+}
+
+func (r *componentGlobalsRegistry) AddFlags(fs *pflag.FlagSet) {
+	if r == nil {
+		return
+	}
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	for _, globals := range r.componentGlobals {
+		if globals.featureGate != nil {
+			globals.featureGate.Close()
+		}
+	}
+	if r.emulationVersionConfig != nil || r.featureGatesConfig != nil {
+		klog.Warning("calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags")
+	}
+	r.emulationVersionConfig = []string{}
+	r.featureGatesConfig = make(map[string][]string)
+
+	fs.StringSliceVar(&r.emulationVersionConfig, "emulated-version", r.emulationVersionConfig, ""+
+		"The versions different components emulate their capabilities (APIs, features, ...) of.\n"+
+		"If set, the component will emulate the behavior of this version instead of the underlying binary version.\n"+
+		"Version format could only be major.minor, for example: '--emulated-version=wardle=1.2,kube=1.31'. Options are:\n"+strings.Join(r.unsafeVersionFlagOptions(true), "\n")+
+		"If the component is not specified, defaults to \"kube\"")
+
+	fs.Var(cliflag.NewColonSeparatedMultimapStringStringAllowDefaultEmptyKey(&r.featureGatesConfig), "feature-gates", "Comma-separated list of component:key=value pairs that describe feature gates for alpha/experimental features of different components.\n"+
+		"If the component is not specified, defaults to \"kube\". This flag can be repeatedly invoked. For example: --feature-gates 'wardle:featureA=true,wardle:featureB=false' --feature-gates 'kube:featureC=true'"+
+		"Options are:\n"+strings.Join(r.unsafeKnownFeatures(), "\n"))
+}
+
+type componentVersion struct {
+	component string
+	ver       *version.Version
+}
+
+// getFullEmulationVersionConfig expands the given version config with version registered version mapping,
+// and returns the map of component to Version.
+func (r *componentGlobalsRegistry) getFullEmulationVersionConfig(
+	versionConfigMap map[string]*version.Version) (map[string]*version.Version, error) {
+	result := map[string]*version.Version{}
+	setQueue := []componentVersion{}
+	for comp, ver := range versionConfigMap {
+		if _, ok := r.componentGlobals[comp]; !ok {
+			return result, fmt.Errorf("component not registered: %s", comp)
+		}
+		klog.V(klogLevel).Infof("setting version %s=%s", comp, ver.String())
+		setQueue = append(setQueue, componentVersion{comp, ver})
+	}
+	for len(setQueue) > 0 {
+		cv := setQueue[0]
+		if _, visited := result[cv.component]; visited {
+			return result, fmt.Errorf("setting version of %s more than once, probably version mapping loop", cv.component)
+		}
+		setQueue = setQueue[1:]
+		result[cv.component] = cv.ver
+		for toComp, f := range r.componentGlobals[cv.component].emulationVersionMapping {
+			toVer := f(cv.ver)
+			if toVer == nil {
+				return result, fmt.Errorf("got nil version from mapping of %s=%s to component:%s", cv.component, cv.ver.String(), toComp)
+			}
+			klog.V(klogLevel).Infof("setting version %s=%s from version mapping of %s=%s", toComp, toVer.String(), cv.component, cv.ver.String())
+			setQueue = append(setQueue, componentVersion{toComp, toVer})
+		}
+	}
+	return result, nil
+}
+
+func toVersionMap(versionConfig []string) (map[string]*version.Version, error) {
+	m := map[string]*version.Version{}
+	for _, compVer := range versionConfig {
+		// default to "kube" of component is not specified
+		k := "kube"
+		v := compVer
+		if strings.Contains(compVer, "=") {
+			arr := strings.SplitN(compVer, "=", 2)
+			if len(arr) != 2 {
+				return m, fmt.Errorf("malformed pair, expect string=string")
+			}
+			k = strings.TrimSpace(arr[0])
+			v = strings.TrimSpace(arr[1])
+		}
+		ver, err := version.Parse(v)
+		if err != nil {
+			return m, err
+		}
+		if ver.Patch() != 0 {
+			return m, fmt.Errorf("patch version not allowed, got: %s=%s", k, ver.String())
+		}
+		if existingVer, ok := m[k]; ok {
+			return m, fmt.Errorf("duplicate version flag, %s=%s and %s=%s", k, existingVer.String(), k, ver.String())
+		}
+		m[k] = ver
+	}
+	return m, nil
+}
+
+func (r *componentGlobalsRegistry) SetFallback() error {
+	r.mutex.Lock()
+	set := r.set
+	r.mutex.Unlock()
+	if set {
+		return nil
+	}
+	klog.Warning("setting componentGlobalsRegistry in SetFallback. We recommend calling componentGlobalsRegistry.Set()" +
+		" right after parsing flags to avoid using feature gates before their final values are set by the flags.")
+	return r.Set()
+}
+
+func (r *componentGlobalsRegistry) Set() error {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	r.set = true
+	emulationVersionConfigMap, err := toVersionMap(r.emulationVersionConfig)
+	if err != nil {
+		return err
+	}
+	for comp := range emulationVersionConfigMap {
+		if _, ok := r.componentGlobals[comp]; !ok {
+			return fmt.Errorf("component not registered: %s", comp)
+		}
+		// only components without any dependencies can be set from the flag.
+		if r.componentGlobals[comp].dependentEmulationVersion {
+			return fmt.Errorf("EmulationVersion of %s is set by mapping, cannot set it by flag", comp)
+		}
+	}
+	if emulationVersions, err := r.getFullEmulationVersionConfig(emulationVersionConfigMap); err != nil {
+		return err
+	} else {
+		for comp, ver := range emulationVersions {
+			r.componentGlobals[comp].effectiveVersion.SetEmulationVersion(ver)
+		}
+	}
+	// Set feature gate emulation version before setting feature gate flag values.
+	for comp, globals := range r.componentGlobals {
+		if globals.featureGate == nil {
+			continue
+		}
+		klog.V(klogLevel).Infof("setting %s:feature gate emulation version to %s", comp, globals.effectiveVersion.EmulationVersion().String())
+		if err := globals.featureGate.SetEmulationVersion(globals.effectiveVersion.EmulationVersion()); err != nil {
+			return err
+		}
+	}
+	for comp, fg := range r.featureGatesConfig {
+		if comp == "" {
+			if _, ok := r.featureGatesConfig[DefaultKubeComponent]; ok {
+				return fmt.Errorf("set kube feature gates with default empty prefix or kube: prefix consistently, do not mix use")
+			}
+			comp = DefaultKubeComponent
+		}
+		if _, ok := r.componentGlobals[comp]; !ok {
+			return fmt.Errorf("component not registered: %s", comp)
+		}
+		featureGate := r.componentGlobals[comp].featureGate
+		if featureGate == nil {
+			return fmt.Errorf("component featureGate not registered: %s", comp)
+		}
+		flagVal := strings.Join(fg, ",")
+		klog.V(klogLevel).Infof("setting %s:feature-gates=%s", comp, flagVal)
+		if err := featureGate.Set(flagVal); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (r *componentGlobalsRegistry) Validate() []error {
+	var errs []error
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	for _, globals := range r.componentGlobals {
+		errs = append(errs, globals.effectiveVersion.Validate()...)
+		if globals.featureGate != nil {
+			errs = append(errs, globals.featureGate.Validate()...)
+		}
+	}
+	return errs
+}
+
+func (r *componentGlobalsRegistry) SetEmulationVersionMapping(fromComponent, toComponent string, f VersionMapping) error {
+	if f == nil {
+		return nil
+	}
+	klog.V(klogLevel).Infof("setting EmulationVersion mapping from %s to %s", fromComponent, toComponent)
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	if _, ok := r.componentGlobals[fromComponent]; !ok {
+		return fmt.Errorf("component not registered: %s", fromComponent)
+	}
+	if _, ok := r.componentGlobals[toComponent]; !ok {
+		return fmt.Errorf("component not registered: %s", toComponent)
+	}
+	// check multiple dependency
+	if r.componentGlobals[toComponent].dependentEmulationVersion {
+		return fmt.Errorf("mapping of %s already exists from another component", toComponent)
+	}
+	r.componentGlobals[toComponent].dependentEmulationVersion = true
+
+	versionMapping := r.componentGlobals[fromComponent].emulationVersionMapping
+	if _, ok := versionMapping[toComponent]; ok {
+		return fmt.Errorf("EmulationVersion from %s to %s already exists", fromComponent, toComponent)
+	}
+	versionMapping[toComponent] = f
+	klog.V(klogLevel).Infof("setting the default EmulationVersion of %s based on mapping from the default EmulationVersion of %s", fromComponent, toComponent)
+	defaultFromVersion := r.componentGlobals[fromComponent].effectiveVersion.EmulationVersion()
+	emulationVersions, err := r.getFullEmulationVersionConfig(map[string]*version.Version{fromComponent: defaultFromVersion})
+	if err != nil {
+		return err
+	}
+	for comp, ver := range emulationVersions {
+		r.componentGlobals[comp].effectiveVersion.SetEmulationVersion(ver)
+	}
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/OWNERS b/hack/tools/vendor/k8s.io/component-base/metrics/OWNERS
new file mode 100644
index 0000000000..be371a4a09
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/OWNERS
@@ -0,0 +1,11 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+  - sig-instrumentation-approvers
+  - logicalhan
+  - RainbowMango
+reviewers:
+  - sig-instrumentation-reviewers
+  - YoyinZyc
+labels:
+  - sig/instrumentation
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/buckets.go b/hack/tools/vendor/k8s.io/component-base/metrics/buckets.go
new file mode 100644
index 0000000000..27a57eb7f8
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/buckets.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+// DefBuckets is a wrapper for prometheus.DefBuckets
+var DefBuckets = prometheus.DefBuckets
+
+// LinearBuckets is a wrapper for prometheus.LinearBuckets.
+func LinearBuckets(start, width float64, count int) []float64 {
+	return prometheus.LinearBuckets(start, width, count)
+}
+
+// ExponentialBuckets is a wrapper for prometheus.ExponentialBuckets.
+func ExponentialBuckets(start, factor float64, count int) []float64 {
+	return prometheus.ExponentialBuckets(start, factor, count)
+}
+
+// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is
+// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
+func ExponentialBucketsRange(min, max float64, count int) []float64 {
+	return prometheus.ExponentialBucketsRange(min, max, count)
+}
+
+// MergeBuckets merges buckets together
+func MergeBuckets(buckets ...[]float64) []float64 {
+	result := make([]float64, 1)
+	for _, s := range buckets {
+		result = append(result, s...)
+	}
+	return result
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/collector.go b/hack/tools/vendor/k8s.io/component-base/metrics/collector.go
new file mode 100644
index 0000000000..0718b6e135
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/collector.go
@@ -0,0 +1,190 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+	"fmt"
+
+	"github.com/blang/semver/v4"
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+// StableCollector extends the prometheus.Collector interface to allow customization of the
+// metric registration process, it's especially intend to be used in scenario of custom collector.
+type StableCollector interface {
+	prometheus.Collector
+
+	// DescribeWithStability sends the super-set of all possible metrics.Desc collected
+	// by this StableCollector to the provided channel.
+	DescribeWithStability(chan<- *Desc)
+
+	// CollectWithStability sends each collected metrics.Metric via the provide channel.
+	CollectWithStability(chan<- Metric)
+
+	// Create will initialize all Desc and it intends to be called by registry.
+	Create(version *semver.Version, self StableCollector) bool
+
+	// ClearState will clear all the states marked by Create.
+	ClearState()
+
+	// HiddenMetrics tells the list of hidden metrics with fqName.
+	HiddenMetrics() []string
+}
+
+// BaseStableCollector which implements almost all methods defined by StableCollector
+// is a convenient assistant for custom collectors.
+// It is recommended to inherit BaseStableCollector when implementing custom collectors.
+type BaseStableCollector struct {
+	descriptors  map[string]*Desc // stores all descriptors by pair, these are collected from DescribeWithStability().
+	registerable map[string]*Desc // stores registerable descriptors by pair, is a subset of descriptors.
+	hidden       map[string]*Desc // stores hidden descriptors by pair, is a subset of descriptors.
+	self         StableCollector
+}
+
+// DescribeWithStability sends all descriptors to the provided channel.
+// Every custom collector should over-write this method.
+func (bsc *BaseStableCollector) DescribeWithStability(ch chan<- *Desc) {
+	panic(fmt.Errorf("custom collector should over-write DescribeWithStability method"))
+}
+
+// Describe sends all descriptors to the provided channel.
+// It intended to be called by prometheus registry.
+func (bsc *BaseStableCollector) Describe(ch chan<- *prometheus.Desc) {
+	for _, d := range bsc.registerable {
+		ch <- d.toPrometheusDesc()
+	}
+}
+
+// CollectWithStability sends all metrics to the provided channel.
+// Every custom collector should over-write this method.
+func (bsc *BaseStableCollector) CollectWithStability(ch chan<- Metric) {
+	panic(fmt.Errorf("custom collector should over-write CollectWithStability method"))
+}
+
+// Collect is called by the Prometheus registry when collecting metrics.
+func (bsc *BaseStableCollector) Collect(ch chan<- prometheus.Metric) {
+	mch := make(chan Metric)
+
+	go func() {
+		bsc.self.CollectWithStability(mch)
+		close(mch)
+	}()
+
+	for m := range mch {
+		// nil Metric usually means hidden metrics
+		if m == nil {
+			continue
+		}
+
+		ch <- prometheus.Metric(m)
+	}
+}
+
+func (bsc *BaseStableCollector) add(d *Desc) {
+	if len(d.fqName) == 0 {
+		panic("nameless metrics will be not allowed")
+	}
+
+	if bsc.descriptors == nil {
+		bsc.descriptors = make(map[string]*Desc)
+	}
+
+	if _, exist := bsc.descriptors[d.fqName]; exist {
+		panic(fmt.Sprintf("duplicate metrics (%s) will be not allowed", d.fqName))
+	}
+
+	bsc.descriptors[d.fqName] = d
+}
+
+// Init intends to be called by registry.
+func (bsc *BaseStableCollector) init(self StableCollector) {
+	bsc.self = self
+
+	dch := make(chan *Desc)
+
+	// collect all possible descriptions from custom side
+	go func() {
+		bsc.self.DescribeWithStability(dch)
+		close(dch)
+	}()
+
+	for d := range dch {
+		bsc.add(d)
+	}
+}
+
+func (bsc *BaseStableCollector) trackRegistrableDescriptor(d *Desc) {
+	if bsc.registerable == nil {
+		bsc.registerable = make(map[string]*Desc)
+	}
+
+	bsc.registerable[d.fqName] = d
+}
+
+func (bsc *BaseStableCollector) trackHiddenDescriptor(d *Desc) {
+	if bsc.hidden == nil {
+		bsc.hidden = make(map[string]*Desc)
+	}
+
+	bsc.hidden[d.fqName] = d
+}
+
+// Create intends to be called by registry.
+// Create will return true as long as there is one or more metrics not be hidden.
+// Otherwise return false, that means the whole collector will be ignored by registry.
+func (bsc *BaseStableCollector) Create(version *semver.Version, self StableCollector) bool {
+	bsc.init(self)
+
+	for _, d := range bsc.descriptors {
+		d.create(version)
+		if d.IsHidden() {
+			bsc.trackHiddenDescriptor(d)
+		} else {
+			bsc.trackRegistrableDescriptor(d)
+		}
+	}
+
+	if len(bsc.registerable) > 0 {
+		return true
+	}
+
+	return false
+}
+
+// ClearState will clear all the states marked by Create.
+// It intends to be used for re-register a hidden metric.
+func (bsc *BaseStableCollector) ClearState() {
+	for _, d := range bsc.descriptors {
+		d.ClearState()
+	}
+
+	bsc.descriptors = nil
+	bsc.registerable = nil
+	bsc.hidden = nil
+	bsc.self = nil
+}
+
+// HiddenMetrics tells the list of hidden metrics with fqName.
+func (bsc *BaseStableCollector) HiddenMetrics() (fqNames []string) {
+	for i := range bsc.hidden {
+		fqNames = append(fqNames, bsc.hidden[i].fqName)
+	}
+	return
+}
+
+// Check if our BaseStableCollector implements necessary interface
+var _ StableCollector = &BaseStableCollector{}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/counter.go b/hack/tools/vendor/k8s.io/component-base/metrics/counter.go
new file mode 100644
index 0000000000..8a7dd71541
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/counter.go
@@ -0,0 +1,306 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+	"context"
+	"sync"
+
+	"github.com/blang/semver/v4"
+	"github.com/prometheus/client_golang/prometheus"
+	"go.opentelemetry.io/otel/trace"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+// Counter is our internal representation for our wrapping struct around prometheus
+// counters. Counter implements both kubeCollector and CounterMetric.
+type Counter struct {
+	ctx context.Context
+	CounterMetric
+	*CounterOpts
+	lazyMetric
+	selfCollector
+}
+
+// The implementation of the Metric interface is expected by testutil.GetCounterMetricValue.
+var _ Metric = &Counter{}
+
+// All supported exemplar metric types implement the metricWithExemplar interface.
+var _ metricWithExemplar = &Counter{}
+
+// exemplarCounterMetric holds a context to extract exemplar labels from, and a counter metric to attach them to. It implements the metricWithExemplar interface.
+type exemplarCounterMetric struct {
+	*Counter
+}
+
+// NewCounter returns an object which satisfies the kubeCollector and CounterMetric interfaces.
+// However, the object returned will not measure anything unless the collector is first
+// registered, since the metric is lazily instantiated.
+func NewCounter(opts *CounterOpts) *Counter {
+	opts.StabilityLevel.setDefaults()
+
+	kc := &Counter{
+		CounterOpts: opts,
+		lazyMetric:  lazyMetric{stabilityLevel: opts.StabilityLevel},
+	}
+	kc.setPrometheusCounter(noop)
+	kc.lazyInit(kc, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name))
+	return kc
+}
+
+func (c *Counter) Desc() *prometheus.Desc {
+	return c.metric.Desc()
+}
+
+func (c *Counter) Write(to *dto.Metric) error {
+	return c.metric.Write(to)
+}
+
+// Reset resets the underlying prometheus Counter to start counting from 0 again
+func (c *Counter) Reset() {
+	if !c.IsCreated() {
+		return
+	}
+	c.setPrometheusCounter(prometheus.NewCounter(c.CounterOpts.toPromCounterOpts()))
+}
+
+// setPrometheusCounter sets the underlying CounterMetric object, i.e. the thing that does the measurement.
+func (c *Counter) setPrometheusCounter(counter prometheus.Counter) {
+	c.CounterMetric = counter
+	c.initSelfCollection(counter)
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (c *Counter) DeprecatedVersion() *semver.Version {
+	return parseSemver(c.CounterOpts.DeprecatedVersion)
+}
+
+// initializeMetric invocation creates the actual underlying Counter. Until this method is called
+// the underlying counter is a no-op.
+func (c *Counter) initializeMetric() {
+	c.CounterOpts.annotateStabilityLevel()
+	// this actually creates the underlying prometheus counter.
+	c.setPrometheusCounter(prometheus.NewCounter(c.CounterOpts.toPromCounterOpts()))
+}
+
+// initializeDeprecatedMetric invocation creates the actual (but deprecated) Counter. Until this method
+// is called the underlying counter is a no-op.
+func (c *Counter) initializeDeprecatedMetric() {
+	c.CounterOpts.markDeprecated()
+	c.initializeMetric()
+}
+
+// WithContext allows the normal Counter metric to pass in context.
+func (c *Counter) WithContext(ctx context.Context) CounterMetric {
+	c.ctx = ctx
+	return c.CounterMetric
+}
+
+// withExemplar initializes the exemplarMetric object and sets the exemplar value.
+func (c *Counter) withExemplar(v float64) {
+	(&exemplarCounterMetric{c}).withExemplar(v)
+}
+
+func (c *Counter) Add(v float64) {
+	c.withExemplar(v)
+}
+
+func (c *Counter) Inc() {
+	c.withExemplar(1)
+}
+
+// withExemplar attaches an exemplar to the metric.
+func (e *exemplarCounterMetric) withExemplar(v float64) {
+	if m, ok := e.CounterMetric.(prometheus.ExemplarAdder); ok {
+		maybeSpanCtx := trace.SpanContextFromContext(e.ctx)
+		if maybeSpanCtx.IsValid() && maybeSpanCtx.IsSampled() {
+			exemplarLabels := prometheus.Labels{
+				"trace_id": maybeSpanCtx.TraceID().String(),
+				"span_id":  maybeSpanCtx.SpanID().String(),
+			}
+			m.AddWithExemplar(v, exemplarLabels)
+			return
+		}
+	}
+
+	e.CounterMetric.Add(v)
+}
+
+// CounterVec is the internal representation of our wrapping struct around prometheus
+// counterVecs. CounterVec implements both kubeCollector and CounterVecMetric.
+type CounterVec struct {
+	*prometheus.CounterVec
+	*CounterOpts
+	lazyMetric
+	originalLabels []string
+}
+
+var _ kubeCollector = &CounterVec{}
+
+// TODO: make this true: var _ CounterVecMetric = &CounterVec{}
+
+// NewCounterVec returns an object which satisfies the kubeCollector and (almost) CounterVecMetric interfaces.
+// However, the object returned will not measure anything unless the collector is first
+// registered, since the metric is lazily instantiated, and only members extracted after
+// registration will actually measure anything.
+func NewCounterVec(opts *CounterOpts, labels []string) *CounterVec {
+	opts.StabilityLevel.setDefaults()
+
+	fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)
+
+	cv := &CounterVec{
+		CounterVec:     noopCounterVec,
+		CounterOpts:    opts,
+		originalLabels: labels,
+		lazyMetric:     lazyMetric{stabilityLevel: opts.StabilityLevel},
+	}
+	cv.lazyInit(cv, fqName)
+	return cv
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (v *CounterVec) DeprecatedVersion() *semver.Version {
+	return parseSemver(v.CounterOpts.DeprecatedVersion)
+
+}
+
+// initializeMetric invocation creates the actual underlying CounterVec. Until this method is called
+// the underlying counterVec is a no-op.
+func (v *CounterVec) initializeMetric() {
+	v.CounterOpts.annotateStabilityLevel()
+	v.CounterVec = prometheus.NewCounterVec(v.CounterOpts.toPromCounterOpts(), v.originalLabels)
+}
+
+// initializeDeprecatedMetric invocation creates the actual (but deprecated) CounterVec. Until this method is called
+// the underlying counterVec is a no-op.
+func (v *CounterVec) initializeDeprecatedMetric() {
+	v.CounterOpts.markDeprecated()
+	v.initializeMetric()
+}
+
+// Default Prometheus Vec behavior is that member extraction results in creation of a new element
+// if one with the unique label values is not found in the underlying stored metricMap.
+// This means  that if this function is called but the underlying metric is not registered
+// (which means it will never be exposed externally nor consumed), the metric will exist in memory
+// for perpetuity (i.e. throughout application lifecycle).
+//
+// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/counter.go#L179-L197
+//
+// In contrast, the Vec behavior in this package is that member extraction before registration
+// returns a permanent noop object.
+
+// WithLabelValues returns the Counter for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Counter is created IFF the counterVec
+// has been registered to a metrics registry.
+func (v *CounterVec) WithLabelValues(lvs ...string) CounterMetric {
+	if !v.IsCreated() {
+		return noop // return no-op counter
+	}
+	if v.LabelValueAllowLists != nil {
+		v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs)
+	} else {
+		v.initializeLabelAllowListsOnce.Do(func() {
+			allowListLock.RLock()
+			if allowList, ok := labelValueAllowLists[v.FQName()]; ok {
+				v.LabelValueAllowLists = allowList
+				allowList.ConstrainToAllowedList(v.originalLabels, lvs)
+			}
+			allowListLock.RUnlock()
+		})
+	}
+
+	return v.CounterVec.WithLabelValues(lvs...)
+}
+
+// With returns the Counter for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Counter is created IFF the counterVec has
+// been registered to a metrics registry.
+func (v *CounterVec) With(labels map[string]string) CounterMetric {
+	if !v.IsCreated() {
+		return noop // return no-op counter
+	}
+	if v.LabelValueAllowLists != nil {
+		v.LabelValueAllowLists.ConstrainLabelMap(labels)
+	} else {
+		v.initializeLabelAllowListsOnce.Do(func() {
+			allowListLock.RLock()
+			if allowList, ok := labelValueAllowLists[v.FQName()]; ok {
+				v.LabelValueAllowLists = allowList
+				allowList.ConstrainLabelMap(labels)
+			}
+			allowListLock.RUnlock()
+		})
+	}
+	return v.CounterVec.With(labels)
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc. However, such inconsistent Labels
+// can never match an actual metric, so the method will always return false in
+// that case.
+func (v *CounterVec) Delete(labels map[string]string) bool {
+	if !v.IsCreated() {
+		return false // since we haven't created the metric, we haven't deleted a metric with the passed in values
+	}
+	return v.CounterVec.Delete(labels)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *CounterVec) Reset() {
+	if !v.IsCreated() {
+		return
+	}
+
+	v.CounterVec.Reset()
+}
+
+// ResetLabelAllowLists resets the label allow list for the CounterVec.
+// NOTE: This should only be used in test.
+func (v *CounterVec) ResetLabelAllowLists() {
+	v.initializeLabelAllowListsOnce = sync.Once{}
+	v.LabelValueAllowLists = nil
+}
+
+// WithContext returns wrapped CounterVec with context
+func (v *CounterVec) WithContext(ctx context.Context) *CounterVecWithContext {
+	return &CounterVecWithContext{
+		ctx:        ctx,
+		CounterVec: v,
+	}
+}
+
+// CounterVecWithContext is the wrapper of CounterVec with context.
+type CounterVecWithContext struct {
+	*CounterVec
+	ctx context.Context
+}
+
+// WithLabelValues is the wrapper of CounterVec.WithLabelValues.
+func (vc *CounterVecWithContext) WithLabelValues(lvs ...string) CounterMetric {
+	return vc.CounterVec.WithLabelValues(lvs...)
+}
+
+// With is the wrapper of CounterVec.With.
+func (vc *CounterVecWithContext) With(labels map[string]string) CounterMetric {
+	return vc.CounterVec.With(labels)
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/desc.go b/hack/tools/vendor/k8s.io/component-base/metrics/desc.go
new file mode 100644
index 0000000000..2ca9cfa7c2
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/desc.go
@@ -0,0 +1,225 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+	"fmt"
+	"sync"
+
+	"github.com/blang/semver/v4"
+	"github.com/prometheus/client_golang/prometheus"
+
+	"k8s.io/klog/v2"
+)
+
+// Desc is a prometheus.Desc extension.
+//
+// Use NewDesc to create new Desc instances.
+type Desc struct {
+	// fqName has been built from Namespace, Subsystem, and Name.
+	fqName string
+	// help provides some helpful information about this metric.
+	help string
+	// constLabels is the label names. Their label values are variable.
+	constLabels Labels
+	// variableLabels contains names of labels for which the metric
+	// maintains variable values.
+	variableLabels []string
+
+	// promDesc is the descriptor used by every Prometheus Metric.
+	promDesc      *prometheus.Desc
+	annotatedHelp string
+
+	// stabilityLevel represents the API guarantees for a given defined metric.
+	stabilityLevel StabilityLevel
+	// deprecatedVersion represents in which version this metric be deprecated.
+	deprecatedVersion string
+
+	isDeprecated        bool
+	isHidden            bool
+	isCreated           bool
+	createLock          sync.RWMutex
+	markDeprecationOnce sync.Once
+	createOnce          sync.Once
+	deprecateOnce       sync.Once
+	hideOnce            sync.Once
+	annotateOnce        sync.Once
+}
+
+// NewDesc extends prometheus.NewDesc with stability support.
+//
+// The stabilityLevel should be valid stability label, such as "metrics.ALPHA"
+// and "metrics.STABLE"(Maybe "metrics.BETA" in future). Default value "metrics.ALPHA"
+// will be used in case of empty or invalid stability label.
+//
+// The deprecatedVersion represents in which version this Metric be deprecated.
+// The deprecation policy outlined by the control plane metrics stability KEP.
+func NewDesc(fqName string, help string, variableLabels []string, constLabels Labels,
+	stabilityLevel StabilityLevel, deprecatedVersion string) *Desc {
+	d := &Desc{
+		fqName:            fqName,
+		help:              help,
+		annotatedHelp:     help,
+		variableLabels:    variableLabels,
+		constLabels:       constLabels,
+		stabilityLevel:    stabilityLevel,
+		deprecatedVersion: deprecatedVersion,
+	}
+	d.stabilityLevel.setDefaults()
+
+	return d
+}
+
+// String formats the Desc as a string.
+// The stability metadata maybe annotated in 'HELP' section if called after registry,
+// otherwise not.
+// e.g. "Desc{fqName: "normal_stable_descriptor", help: "[STABLE] this is a stable descriptor", constLabels: {}, variableLabels: []}"
+func (d *Desc) String() string {
+	if d.isCreated {
+		return d.promDesc.String()
+	}
+
+	return prometheus.NewDesc(d.fqName, d.help, d.variableLabels, prometheus.Labels(d.constLabels)).String()
+}
+
+// toPrometheusDesc transform self to prometheus.Desc
+func (d *Desc) toPrometheusDesc() *prometheus.Desc {
+	return d.promDesc
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (d *Desc) DeprecatedVersion() *semver.Version {
+	return parseSemver(d.deprecatedVersion)
+
+}
+
+func (d *Desc) determineDeprecationStatus(version semver.Version) {
+	selfVersion := d.DeprecatedVersion()
+	if selfVersion == nil {
+		return
+	}
+	d.markDeprecationOnce.Do(func() {
+		if selfVersion.LTE(version) {
+			d.isDeprecated = true
+		}
+		if ShouldShowHidden() {
+			klog.Warningf("Hidden metrics(%s) have been manually overridden, showing this very deprecated metric.", d.fqName)
+			return
+		}
+		if shouldHide(&version, selfVersion) {
+			// TODO(RainbowMango): Remove this log temporarily. https://github.com/kubernetes/kubernetes/issues/85369
+			// klog.Warningf("This metric(%s) has been deprecated for more than one release, hiding.", d.fqName)
+			d.isHidden = true
+		}
+	})
+}
+
+// IsHidden returns if metric will be hidden
+func (d *Desc) IsHidden() bool {
+	return d.isHidden
+}
+
+// IsDeprecated returns if metric has been deprecated
+func (d *Desc) IsDeprecated() bool {
+	return d.isDeprecated
+}
+
+// IsCreated returns if metric has been created.
+func (d *Desc) IsCreated() bool {
+	d.createLock.RLock()
+	defer d.createLock.RUnlock()
+
+	return d.isCreated
+}
+
+// create forces the initialization of Desc which has been deferred until
+// the point at which this method is invoked. This method will determine whether
+// the Desc is deprecated or hidden, no-opting if the Desc should be considered
+// hidden. Furthermore, this function no-opts and returns true if Desc is already
+// created.
+func (d *Desc) create(version *semver.Version) bool {
+	if version != nil {
+		d.determineDeprecationStatus(*version)
+	}
+
+	// let's not create if this metric is slated to be hidden
+	if d.IsHidden() {
+		return false
+	}
+	d.createOnce.Do(func() {
+		d.createLock.Lock()
+		defer d.createLock.Unlock()
+
+		d.isCreated = true
+		if d.IsDeprecated() {
+			d.initializeDeprecatedDesc()
+		} else {
+			d.initialize()
+		}
+	})
+	return d.IsCreated()
+}
+
+// ClearState will clear all the states marked by Create.
+// It intends to be used for re-register a hidden metric.
+func (d *Desc) ClearState() {
+	d.isDeprecated = false
+	d.isHidden = false
+	d.isCreated = false
+
+	d.markDeprecationOnce = *new(sync.Once)
+	d.createOnce = *new(sync.Once)
+	d.deprecateOnce = *new(sync.Once)
+	d.hideOnce = *new(sync.Once)
+	d.annotateOnce = *new(sync.Once)
+
+	d.annotatedHelp = d.help
+	d.promDesc = nil
+}
+
+func (d *Desc) markDeprecated() {
+	d.deprecateOnce.Do(func() {
+		d.annotatedHelp = fmt.Sprintf("(Deprecated since %s) %s", d.deprecatedVersion, d.annotatedHelp)
+	})
+}
+
+func (d *Desc) annotateStabilityLevel() {
+	d.annotateOnce.Do(func() {
+		d.annotatedHelp = fmt.Sprintf("[%v] %v", d.stabilityLevel, d.annotatedHelp)
+	})
+}
+
+func (d *Desc) initialize() {
+	d.annotateStabilityLevel()
+
+	// this actually creates the underlying prometheus desc.
+	d.promDesc = prometheus.NewDesc(d.fqName, d.annotatedHelp, d.variableLabels, prometheus.Labels(d.constLabels))
+}
+
+func (d *Desc) initializeDeprecatedDesc() {
+	d.markDeprecated()
+	d.initialize()
+}
+
+// GetRawDesc will returns a new *Desc with original parameters provided to NewDesc().
+//
+// It will be useful in testing scenario that the same Desc be registered to different registry.
+//  1. Desc `D` is registered to registry 'A' in TestA (Note: `D` maybe created)
+//  2. Desc `D` is registered to registry 'B' in TestB (Note: since 'D' has been created once, thus will be ignored by registry 'B')
+func (d *Desc) GetRawDesc() *Desc {
+	return NewDesc(d.fqName, d.help, d.variableLabels, d.constLabels, d.stabilityLevel, d.deprecatedVersion)
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/gauge.go b/hack/tools/vendor/k8s.io/component-base/metrics/gauge.go
new file mode 100644
index 0000000000..0621560d0c
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/gauge.go
@@ -0,0 +1,298 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+	"context"
+	"sync"
+
+	"github.com/blang/semver/v4"
+	"github.com/prometheus/client_golang/prometheus"
+
+	"k8s.io/component-base/version"
+)
+
+// Gauge is our internal representation for our wrapping struct around prometheus
+// gauges. kubeGauge implements both kubeCollector and KubeGauge.
+type Gauge struct {
+	GaugeMetric
+	*GaugeOpts
+	lazyMetric
+	selfCollector
+}
+
+var _ GaugeMetric = &Gauge{}
+var _ Registerable = &Gauge{}
+var _ kubeCollector = &Gauge{}
+
+// NewGauge returns an object which satisfies the kubeCollector, Registerable, and Gauge interfaces.
+// However, the object returned will not measure anything unless the collector is first
+// registered, since the metric is lazily instantiated.
+func NewGauge(opts *GaugeOpts) *Gauge {
+	opts.StabilityLevel.setDefaults()
+
+	kc := &Gauge{
+		GaugeOpts:  opts,
+		lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel},
+	}
+	kc.setPrometheusGauge(noop)
+	kc.lazyInit(kc, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name))
+	return kc
+}
+
+// setPrometheusGauge sets the underlying KubeGauge object, i.e. the thing that does the measurement.
+func (g *Gauge) setPrometheusGauge(gauge prometheus.Gauge) {
+	g.GaugeMetric = gauge
+	g.initSelfCollection(gauge)
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (g *Gauge) DeprecatedVersion() *semver.Version {
+	return parseSemver(g.GaugeOpts.DeprecatedVersion)
+}
+
+// initializeMetric invocation creates the actual underlying Gauge. Until this method is called
+// the underlying gauge is a no-op.
+func (g *Gauge) initializeMetric() {
+	g.GaugeOpts.annotateStabilityLevel()
+	// this actually creates the underlying prometheus gauge.
+	g.setPrometheusGauge(prometheus.NewGauge(g.GaugeOpts.toPromGaugeOpts()))
+}
+
+// initializeDeprecatedMetric invocation creates the actual (but deprecated) Gauge. Until this method
+// is called the underlying gauge is a no-op.
+func (g *Gauge) initializeDeprecatedMetric() {
+	g.GaugeOpts.markDeprecated()
+	g.initializeMetric()
+}
+
+// WithContext allows the normal Gauge metric to pass in context. The context is no-op now.
+func (g *Gauge) WithContext(ctx context.Context) GaugeMetric {
+	return g.GaugeMetric
+}
+
+// GaugeVec is the internal representation of our wrapping struct around prometheus
+// gaugeVecs. kubeGaugeVec implements both kubeCollector and KubeGaugeVec.
+type GaugeVec struct {
+	*prometheus.GaugeVec
+	*GaugeOpts
+	lazyMetric
+	originalLabels []string
+}
+
+var _ GaugeVecMetric = &GaugeVec{}
+var _ Registerable = &GaugeVec{}
+var _ kubeCollector = &GaugeVec{}
+
+// NewGaugeVec returns an object which satisfies the kubeCollector, Registerable, and GaugeVecMetric interfaces.
+// However, the object returned will not measure anything unless the collector is first
+// registered, since the metric is lazily instantiated, and only members extracted after
+// registration will actually measure anything.
+func NewGaugeVec(opts *GaugeOpts, labels []string) *GaugeVec {
+	opts.StabilityLevel.setDefaults()
+
+	fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)
+
+	cv := &GaugeVec{
+		GaugeVec:       noopGaugeVec,
+		GaugeOpts:      opts,
+		originalLabels: labels,
+		lazyMetric:     lazyMetric{stabilityLevel: opts.StabilityLevel},
+	}
+	cv.lazyInit(cv, fqName)
+	return cv
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (v *GaugeVec) DeprecatedVersion() *semver.Version {
+	return parseSemver(v.GaugeOpts.DeprecatedVersion)
+}
+
+// initializeMetric invocation creates the actual underlying GaugeVec. Until this method is called
+// the underlying gaugeVec is a no-op.
+func (v *GaugeVec) initializeMetric() {
+	v.GaugeOpts.annotateStabilityLevel()
+	v.GaugeVec = prometheus.NewGaugeVec(v.GaugeOpts.toPromGaugeOpts(), v.originalLabels)
+}
+
+// initializeDeprecatedMetric invocation creates the actual (but deprecated) GaugeVec. Until this method is called
+// the underlying gaugeVec is a no-op.
+func (v *GaugeVec) initializeDeprecatedMetric() {
+	v.GaugeOpts.markDeprecated()
+	v.initializeMetric()
+}
+
+func (v *GaugeVec) WithLabelValuesChecked(lvs ...string) (GaugeMetric, error) {
+	if !v.IsCreated() {
+		if v.IsHidden() {
+			return noop, nil
+		}
+		return noop, errNotRegistered // return no-op gauge
+	}
+	if v.LabelValueAllowLists != nil {
+		v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs)
+	} else {
+		v.initializeLabelAllowListsOnce.Do(func() {
+			allowListLock.RLock()
+			if allowList, ok := labelValueAllowLists[v.FQName()]; ok {
+				v.LabelValueAllowLists = allowList
+				allowList.ConstrainToAllowedList(v.originalLabels, lvs)
+			}
+			allowListLock.RUnlock()
+		})
+	}
+	elt, err := v.GaugeVec.GetMetricWithLabelValues(lvs...)
+	return elt, err
+}
+
+// Default Prometheus Vec behavior is that member extraction results in creation of a new element
+// if one with the unique label values is not found in the underlying stored metricMap.
+// This means  that if this function is called but the underlying metric is not registered
+// (which means it will never be exposed externally nor consumed), the metric will exist in memory
+// for perpetuity (i.e. throughout application lifecycle).
+//
+// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/gauge.go#L190-L208
+//
+// In contrast, the Vec behavior in this package is that member extraction before registration
+// returns a permanent noop object.
+
+// WithLabelValues returns the GaugeMetric for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new GaugeMetric is created IFF the gaugeVec
+// has been registered to a metrics registry.
+func (v *GaugeVec) WithLabelValues(lvs ...string) GaugeMetric {
+	ans, err := v.WithLabelValuesChecked(lvs...)
+	if err == nil || ErrIsNotRegistered(err) {
+		return ans
+	}
+	panic(err)
+}
+
+func (v *GaugeVec) WithChecked(labels map[string]string) (GaugeMetric, error) {
+	if !v.IsCreated() {
+		if v.IsHidden() {
+			return noop, nil
+		}
+		return noop, errNotRegistered // return no-op gauge
+	}
+	if v.LabelValueAllowLists != nil {
+		v.LabelValueAllowLists.ConstrainLabelMap(labels)
+	} else {
+		v.initializeLabelAllowListsOnce.Do(func() {
+			allowListLock.RLock()
+			if allowList, ok := labelValueAllowLists[v.FQName()]; ok {
+				v.LabelValueAllowLists = allowList
+				allowList.ConstrainLabelMap(labels)
+			}
+			allowListLock.RUnlock()
+		})
+	}
+	elt, err := v.GaugeVec.GetMetricWith(labels)
+	return elt, err
+}
+
+// With returns the GaugeMetric for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new GaugeMetric is created IFF the gaugeVec has
+// been registered to a metrics registry.
+func (v *GaugeVec) With(labels map[string]string) GaugeMetric {
+	ans, err := v.WithChecked(labels)
+	if err == nil || ErrIsNotRegistered(err) {
+		return ans
+	}
+	panic(err)
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc. However, such inconsistent Labels
+// can never match an actual metric, so the method will always return false in
+// that case.
+func (v *GaugeVec) Delete(labels map[string]string) bool {
+	if !v.IsCreated() {
+		return false // since we haven't created the metric, we haven't deleted a metric with the passed in values
+	}
+	return v.GaugeVec.Delete(labels)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *GaugeVec) Reset() {
+	if !v.IsCreated() {
+		return
+	}
+
+	v.GaugeVec.Reset()
+}
+
+// ResetLabelAllowLists resets the label allow list for the GaugeVec.
+// NOTE: This should only be used in test.
+func (v *GaugeVec) ResetLabelAllowLists() {
+	v.initializeLabelAllowListsOnce = sync.Once{}
+	v.LabelValueAllowLists = nil
+}
+
+func newGaugeFunc(opts *GaugeOpts, function func() float64, v semver.Version) GaugeFunc {
+	g := NewGauge(opts)
+
+	if !g.Create(&v) {
+		return nil
+	}
+
+	return prometheus.NewGaugeFunc(g.GaugeOpts.toPromGaugeOpts(), function)
+}
+
+// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
+// value reported is determined by calling the given function from within the
+// Write method. Take into account that metric collection may happen
+// concurrently. If that results in concurrent calls to Write, like in the case
+// where a GaugeFunc is directly registered with Prometheus, the provided
+// function must be concurrency-safe.
+func NewGaugeFunc(opts *GaugeOpts, function func() float64) GaugeFunc {
+	v := parseVersion(version.Get())
+
+	return newGaugeFunc(opts, function, v)
+}
+
+// WithContext returns wrapped GaugeVec with context
+func (v *GaugeVec) WithContext(ctx context.Context) *GaugeVecWithContext {
+	return &GaugeVecWithContext{
+		ctx:      ctx,
+		GaugeVec: v,
+	}
+}
+
+func (v *GaugeVec) InterfaceWithContext(ctx context.Context) GaugeVecMetric {
+	return v.WithContext(ctx)
+}
+
+// GaugeVecWithContext is the wrapper of GaugeVec with context.
+type GaugeVecWithContext struct {
+	*GaugeVec
+	ctx context.Context
+}
+
+// WithLabelValues is the wrapper of GaugeVec.WithLabelValues.
+func (vc *GaugeVecWithContext) WithLabelValues(lvs ...string) GaugeMetric {
+	return vc.GaugeVec.WithLabelValues(lvs...)
+}
+
+// With is the wrapper of GaugeVec.With.
+func (vc *GaugeVecWithContext) With(labels map[string]string) GaugeMetric {
+	return vc.GaugeVec.With(labels)
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/histogram.go b/hack/tools/vendor/k8s.io/component-base/metrics/histogram.go
new file mode 100644
index 0000000000..3065486ab4
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/histogram.go
@@ -0,0 +1,299 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+	"context"
+	"sync"
+
+	"github.com/blang/semver/v4"
+	"github.com/prometheus/client_golang/prometheus"
+	"go.opentelemetry.io/otel/trace"
+)
+
+// Histogram is our internal representation for our wrapping struct around prometheus
+// histograms. Summary implements both kubeCollector and ObserverMetric
+type Histogram struct {
+	ctx context.Context
+	ObserverMetric
+	*HistogramOpts
+	lazyMetric
+	selfCollector
+}
+
+// exemplarHistogramMetric holds a context to extract exemplar labels from, and a historgram metric to attach them to. It implements the metricWithExemplar interface.
+type exemplarHistogramMetric struct {
+	*Histogram
+}
+
+type exemplarHistogramVec struct {
+	*HistogramVecWithContext
+	observer prometheus.Observer
+}
+
+func (h *Histogram) Observe(v float64) {
+	h.withExemplar(v)
+}
+
+// withExemplar initializes the exemplarMetric object and sets the exemplar value.
+func (h *Histogram) withExemplar(v float64) {
+	(&exemplarHistogramMetric{h}).withExemplar(v)
+}
+
+// withExemplar attaches an exemplar to the metric.
+func (e *exemplarHistogramMetric) withExemplar(v float64) {
+	if m, ok := e.Histogram.ObserverMetric.(prometheus.ExemplarObserver); ok {
+		maybeSpanCtx := trace.SpanContextFromContext(e.ctx)
+		if maybeSpanCtx.IsValid() && maybeSpanCtx.IsSampled() {
+			exemplarLabels := prometheus.Labels{
+				"trace_id": maybeSpanCtx.TraceID().String(),
+				"span_id":  maybeSpanCtx.SpanID().String(),
+			}
+			m.ObserveWithExemplar(v, exemplarLabels)
+			return
+		}
+	}
+
+	e.ObserverMetric.Observe(v)
+}
+
+// NewHistogram returns an object which is Histogram-like. However, nothing
+// will be measured until the histogram is registered somewhere.
+func NewHistogram(opts *HistogramOpts) *Histogram {
+	opts.StabilityLevel.setDefaults()
+
+	h := &Histogram{
+		HistogramOpts: opts,
+		lazyMetric:    lazyMetric{stabilityLevel: opts.StabilityLevel},
+	}
+	h.setPrometheusHistogram(noopMetric{})
+	h.lazyInit(h, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name))
+	return h
+}
+
+// setPrometheusHistogram sets the underlying KubeGauge object, i.e. the thing that does the measurement.
+func (h *Histogram) setPrometheusHistogram(histogram prometheus.Histogram) {
+	h.ObserverMetric = histogram
+	h.initSelfCollection(histogram)
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (h *Histogram) DeprecatedVersion() *semver.Version {
+	return parseSemver(h.HistogramOpts.DeprecatedVersion)
+}
+
+// initializeMetric invokes the actual prometheus.Histogram object instantiation
+// and stores a reference to it
+func (h *Histogram) initializeMetric() {
+	h.HistogramOpts.annotateStabilityLevel()
+	// this actually creates the underlying prometheus gauge.
+	h.setPrometheusHistogram(prometheus.NewHistogram(h.HistogramOpts.toPromHistogramOpts()))
+}
+
+// initializeDeprecatedMetric invokes the actual prometheus.Histogram object instantiation
+// but modifies the Help description prior to object instantiation.
+func (h *Histogram) initializeDeprecatedMetric() {
+	h.HistogramOpts.markDeprecated()
+	h.initializeMetric()
+}
+
+// WithContext allows the normal Histogram metric to pass in context. The context is no-op now.
+func (h *Histogram) WithContext(ctx context.Context) ObserverMetric {
+	h.ctx = ctx
+	return h.ObserverMetric
+}
+
+// HistogramVec is the internal representation of our wrapping struct around prometheus
+// histogramVecs.
+type HistogramVec struct {
+	*prometheus.HistogramVec
+	*HistogramOpts
+	lazyMetric
+	originalLabels []string
+}
+
+// NewHistogramVec returns an object which satisfies kubeCollector and wraps the
+// prometheus.HistogramVec object. However, the object returned will not measure
+// anything unless the collector is first registered, since the metric is lazily instantiated,
+// and only members extracted after
+// registration will actually measure anything.
+
+func NewHistogramVec(opts *HistogramOpts, labels []string) *HistogramVec {
+	opts.StabilityLevel.setDefaults()
+
+	fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)
+
+	v := &HistogramVec{
+		HistogramVec:   noopHistogramVec,
+		HistogramOpts:  opts,
+		originalLabels: labels,
+		lazyMetric:     lazyMetric{stabilityLevel: opts.StabilityLevel},
+	}
+	v.lazyInit(v, fqName)
+	return v
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (v *HistogramVec) DeprecatedVersion() *semver.Version {
+	return parseSemver(v.HistogramOpts.DeprecatedVersion)
+}
+
+func (v *HistogramVec) initializeMetric() {
+	v.HistogramOpts.annotateStabilityLevel()
+	v.HistogramVec = prometheus.NewHistogramVec(v.HistogramOpts.toPromHistogramOpts(), v.originalLabels)
+}
+
+func (v *HistogramVec) initializeDeprecatedMetric() {
+	v.HistogramOpts.markDeprecated()
+	v.initializeMetric()
+}
+
+// Default Prometheus Vec behavior is that member extraction results in creation of a new element
+// if one with the unique label values is not found in the underlying stored metricMap.
+// This means  that if this function is called but the underlying metric is not registered
+// (which means it will never be exposed externally nor consumed), the metric will exist in memory
+// for perpetuity (i.e. throughout application lifecycle).
+//
+// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/histogram.go#L460-L470
+//
+// In contrast, the Vec behavior in this package is that member extraction before registration
+// returns a permanent noop object.
+
+// WithLabelValues returns the ObserverMetric for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new ObserverMetric is created IFF the HistogramVec
+// has been registered to a metrics registry.
+func (v *HistogramVec) WithLabelValues(lvs ...string) ObserverMetric {
+	if !v.IsCreated() {
+		return noop
+	}
+	if v.LabelValueAllowLists != nil {
+		v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs)
+	} else {
+		v.initializeLabelAllowListsOnce.Do(func() {
+			allowListLock.RLock()
+			if allowList, ok := labelValueAllowLists[v.FQName()]; ok {
+				v.LabelValueAllowLists = allowList
+				allowList.ConstrainToAllowedList(v.originalLabels, lvs)
+			}
+			allowListLock.RUnlock()
+		})
+	}
+	return v.HistogramVec.WithLabelValues(lvs...)
+}
+
+// With returns the ObserverMetric for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new ObserverMetric is created IFF the HistogramVec has
+// been registered to a metrics registry.
+func (v *HistogramVec) With(labels map[string]string) ObserverMetric {
+	if !v.IsCreated() {
+		return noop
+	}
+	if v.LabelValueAllowLists != nil {
+		v.LabelValueAllowLists.ConstrainLabelMap(labels)
+	} else {
+		v.initializeLabelAllowListsOnce.Do(func() {
+			allowListLock.RLock()
+			if allowList, ok := labelValueAllowLists[v.FQName()]; ok {
+				v.LabelValueAllowLists = allowList
+				allowList.ConstrainLabelMap(labels)
+			}
+			allowListLock.RUnlock()
+		})
+	}
+	return v.HistogramVec.With(labels)
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc. However, such inconsistent Labels
+// can never match an actual metric, so the method will always return false in
+// that case.
+func (v *HistogramVec) Delete(labels map[string]string) bool {
+	if !v.IsCreated() {
+		return false // since we haven't created the metric, we haven't deleted a metric with the passed in values
+	}
+	return v.HistogramVec.Delete(labels)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *HistogramVec) Reset() {
+	if !v.IsCreated() {
+		return
+	}
+
+	v.HistogramVec.Reset()
+}
+
+// ResetLabelAllowLists resets the label allow list for the HistogramVec.
+// NOTE: This should only be used in test.
+func (v *HistogramVec) ResetLabelAllowLists() {
+	v.initializeLabelAllowListsOnce = sync.Once{}
+	v.LabelValueAllowLists = nil
+}
+
+// WithContext returns wrapped HistogramVec with context
+func (v *HistogramVec) WithContext(ctx context.Context) *HistogramVecWithContext {
+	return &HistogramVecWithContext{
+		ctx:          ctx,
+		HistogramVec: v,
+	}
+}
+
+// HistogramVecWithContext is the wrapper of HistogramVec with context.
+type HistogramVecWithContext struct {
+	*HistogramVec
+	ctx context.Context
+}
+
+func (h *exemplarHistogramVec) Observe(v float64) {
+	h.withExemplar(v)
+}
+
+func (h *exemplarHistogramVec) withExemplar(v float64) {
+	if m, ok := h.observer.(prometheus.ExemplarObserver); ok {
+		maybeSpanCtx := trace.SpanContextFromContext(h.HistogramVecWithContext.ctx)
+		if maybeSpanCtx.IsValid() && maybeSpanCtx.IsSampled() {
+			m.ObserveWithExemplar(v, prometheus.Labels{
+				"trace_id": maybeSpanCtx.TraceID().String(),
+				"span_id":  maybeSpanCtx.SpanID().String(),
+			})
+			return
+		}
+	}
+
+	h.observer.Observe(v)
+}
+
+// WithLabelValues is the wrapper of HistogramVec.WithLabelValues.
+func (vc *HistogramVecWithContext) WithLabelValues(lvs ...string) *exemplarHistogramVec {
+	return &exemplarHistogramVec{
+		HistogramVecWithContext: vc,
+		observer:                vc.HistogramVec.WithLabelValues(lvs...),
+	}
+}
+
+// With is the wrapper of HistogramVec.With.
+func (vc *HistogramVecWithContext) With(labels map[string]string) *exemplarHistogramVec {
+	return &exemplarHistogramVec{
+		HistogramVecWithContext: vc,
+		observer:                vc.HistogramVec.With(labels),
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/http.go b/hack/tools/vendor/k8s.io/component-base/metrics/http.go
new file mode 100644
index 0000000000..2a0d249c20
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/http.go
@@ -0,0 +1,87 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+	"io"
+	"net/http"
+	"time"
+
+	"github.com/prometheus/client_golang/prometheus/promhttp"
+)
+
+var (
+	processStartedAt time.Time
+)
+
+func init() {
+	processStartedAt = time.Now()
+}
+
+// These constants cause handlers serving metrics to behave as described if
+// errors are encountered.
+const (
+	// HTTPErrorOnError serve an HTTP status code 500 upon the first error
+	// encountered. Report the error message in the body.
+	HTTPErrorOnError promhttp.HandlerErrorHandling = iota
+
+	// ContinueOnError ignore errors and try to serve as many metrics as possible.
+	// However, if no metrics can be served, serve an HTTP status code 500 and the
+	// last error message in the body. Only use this in deliberate "best
+	// effort" metrics collection scenarios. In this case, it is highly
+	// recommended to provide other means of detecting errors: By setting an
+	// ErrorLog in HandlerOpts, the errors are logged. By providing a
+	// Registry in HandlerOpts, the exposed metrics include an error counter
+	// "promhttp_metric_handler_errors_total", which can be used for
+	// alerts.
+	ContinueOnError
+
+	// PanicOnError panics upon the first error encountered (useful for "crash only" apps).
+	PanicOnError
+)
+
+// HandlerOpts specifies options how to serve metrics via an http.Handler. The
+// zero value of HandlerOpts is a reasonable default.
+type HandlerOpts promhttp.HandlerOpts
+
+func (ho *HandlerOpts) toPromhttpHandlerOpts() promhttp.HandlerOpts {
+	ho.ProcessStartTime = processStartedAt
+	return promhttp.HandlerOpts(*ho)
+}
+
+// HandlerFor returns an uninstrumented http.Handler for the provided
+// Gatherer. The behavior of the Handler is defined by the provided
+// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
+// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
+// instrumentation. Use the InstrumentMetricHandler function to apply the same
+// kind of instrumentation as it is used by the Handler function.
+func HandlerFor(reg Gatherer, opts HandlerOpts) http.Handler {
+	return promhttp.HandlerFor(reg, opts.toPromhttpHandlerOpts())
+}
+
+// HandlerWithReset return an http.Handler with Reset
+func HandlerWithReset(reg KubeRegistry, opts HandlerOpts) http.Handler {
+	defaultHandler := promhttp.HandlerFor(reg, opts.toPromhttpHandlerOpts())
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		if r.Method == http.MethodDelete {
+			reg.Reset()
+			io.WriteString(w, "metrics reset\n")
+			return
+		}
+		defaultHandler.ServeHTTP(w, r)
+	})
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/labels.go b/hack/tools/vendor/k8s.io/component-base/metrics/labels.go
new file mode 100644
index 0000000000..11af3ae424
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/labels.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import "github.com/prometheus/client_golang/prometheus"
+
+// Labels represents a collection of label name -> value mappings.
+type Labels prometheus.Labels
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go b/hack/tools/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go
new file mode 100644
index 0000000000..64a430b796
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go
@@ -0,0 +1,92 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package legacyregistry
+
+import (
+	"net/http"
+	"time"
+
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/client_golang/prometheus/collectors"
+	"github.com/prometheus/client_golang/prometheus/promhttp"
+
+	"k8s.io/component-base/metrics"
+)
+
+var (
+	defaultRegistry = metrics.NewKubeRegistry()
+	// DefaultGatherer exposes the global registry gatherer
+	DefaultGatherer metrics.Gatherer = defaultRegistry
+	// Reset calls reset on the global registry
+	Reset = defaultRegistry.Reset
+	// MustRegister registers registerable metrics but uses the global registry.
+	MustRegister = defaultRegistry.MustRegister
+	// RawMustRegister registers prometheus collectors but uses the global registry, this
+	// bypasses the metric stability framework
+	//
+	// Deprecated
+	RawMustRegister = defaultRegistry.RawMustRegister
+
+	// Register registers a collectable metric but uses the global registry
+	Register = defaultRegistry.Register
+
+	// Registerer exposes the global registerer
+	Registerer = defaultRegistry.Registerer
+
+	processStart time.Time
+)
+
+func init() {
+	RawMustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}))
+	RawMustRegister(collectors.NewGoCollector(collectors.WithGoCollectorRuntimeMetrics(collectors.MetricsAll)))
+	defaultRegistry.RegisterMetaMetrics()
+	processStart = time.Now()
+}
+
+// Handler returns an HTTP handler for the DefaultGatherer. It is
+// already instrumented with InstrumentHandler (using "prometheus" as handler
+// name).
+func Handler() http.Handler {
+	return promhttp.InstrumentMetricHandler(prometheus.DefaultRegisterer, promhttp.HandlerFor(defaultRegistry, promhttp.HandlerOpts{ProcessStartTime: processStart}))
+}
+
+// HandlerWithReset returns an HTTP handler for the DefaultGatherer but invokes
+// registry reset if the http method is DELETE.
+func HandlerWithReset() http.Handler {
+	return promhttp.InstrumentMetricHandler(
+		prometheus.DefaultRegisterer,
+		metrics.HandlerWithReset(defaultRegistry, metrics.HandlerOpts{ProcessStartTime: processStart}))
+}
+
+// CustomRegister registers a custom collector but uses the global registry.
+func CustomRegister(c metrics.StableCollector) error {
+	err := defaultRegistry.CustomRegister(c)
+
+	//TODO(RainbowMango): Maybe we can wrap this error by error wrapping.(Golang 1.13)
+	_ = prometheus.Register(c)
+
+	return err
+}
+
+// CustomMustRegister registers custom collectors but uses the global registry.
+func CustomMustRegister(cs ...metrics.StableCollector) {
+	defaultRegistry.CustomMustRegister(cs...)
+
+	for _, c := range cs {
+		prometheus.MustRegister(c)
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/metric.go b/hack/tools/vendor/k8s.io/component-base/metrics/metric.go
new file mode 100644
index 0000000000..c8b083995a
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/metric.go
@@ -0,0 +1,240 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+	"sync"
+
+	"github.com/blang/semver/v4"
+	"github.com/prometheus/client_golang/prometheus"
+	dto "github.com/prometheus/client_model/go"
+
+	promext "k8s.io/component-base/metrics/prometheusextension"
+	"k8s.io/klog/v2"
+)
+
+/*
+kubeCollector extends the prometheus.Collector interface to allow customization of the metric
+registration process. Defer metric initialization until Create() is called, which then
+delegates to the underlying metric's initializeMetric or initializeDeprecatedMetric
+method call depending on whether the metric is deprecated or not.
+*/
+type kubeCollector interface {
+	Collector
+	lazyKubeMetric
+	DeprecatedVersion() *semver.Version
+	// Each collector metric should provide an initialization function
+	// for both deprecated and non-deprecated variants of a metric. This
+	// is necessary since metric instantiation will be deferred
+	// until the metric is actually registered somewhere.
+	initializeMetric()
+	initializeDeprecatedMetric()
+}
+
+/*
+lazyKubeMetric defines our metric registration interface. lazyKubeMetric objects are expected
+to lazily instantiate metrics (i.e defer metric instantiation until when
+the Create() function is explicitly called).
+*/
+type lazyKubeMetric interface {
+	Create(*semver.Version) bool
+	IsCreated() bool
+	IsHidden() bool
+	IsDeprecated() bool
+}
+
+/*
+lazyMetric implements lazyKubeMetric. A lazy metric is lazy because it waits until metric
+registration time before instantiation. Add it as an anonymous field to a struct that
+implements kubeCollector to get deferred registration behavior. You must call lazyInit
+with the kubeCollector itself as an argument.
+*/
+type lazyMetric struct {
+	fqName              string
+	isDeprecated        bool
+	isHidden            bool
+	isCreated           bool
+	createLock          sync.RWMutex
+	markDeprecationOnce sync.Once
+	createOnce          sync.Once
+	self                kubeCollector
+	stabilityLevel      StabilityLevel
+}
+
+func (r *lazyMetric) IsCreated() bool {
+	r.createLock.RLock()
+	defer r.createLock.RUnlock()
+	return r.isCreated
+}
+
+// lazyInit provides the lazyMetric with a reference to the kubeCollector it is supposed
+// to allow lazy initialization for. It should be invoked in the factory function which creates new
+// kubeCollector type objects.
+func (r *lazyMetric) lazyInit(self kubeCollector, fqName string) {
+	r.fqName = fqName
+	r.self = self
+}
+
+// preprocessMetric figures out whether the lazy metric should be hidden or not.
+// This method takes a Version argument which should be the version of the binary in which
+// this code is currently being executed. A metric can be hidden under two conditions:
+//  1. if the metric is deprecated and is outside the grace period (i.e. has been
+//     deprecated for more than one release
+//  2. if the metric is manually disabled via a CLI flag.
+//
+// Disclaimer:  disabling a metric via a CLI flag has higher precedence than
+// deprecation and will override show-hidden-metrics for the explicitly
+// disabled metric.
+func (r *lazyMetric) preprocessMetric(version semver.Version) {
+	disabledMetricsLock.RLock()
+	defer disabledMetricsLock.RUnlock()
+	// disabling metrics is higher in precedence than showing hidden metrics
+	if _, ok := disabledMetrics[r.fqName]; ok {
+		r.isHidden = true
+		return
+	}
+	selfVersion := r.self.DeprecatedVersion()
+	if selfVersion == nil {
+		return
+	}
+	r.markDeprecationOnce.Do(func() {
+		if selfVersion.LTE(version) {
+			r.isDeprecated = true
+		}
+
+		if ShouldShowHidden() {
+			klog.Warningf("Hidden metrics (%s) have been manually overridden, showing this very deprecated metric.", r.fqName)
+			return
+		}
+		if shouldHide(&version, selfVersion) {
+			// TODO(RainbowMango): Remove this log temporarily. https://github.com/kubernetes/kubernetes/issues/85369
+			// klog.Warningf("This metric has been deprecated for more than one release, hiding.")
+			r.isHidden = true
+		}
+	})
+}
+
+func (r *lazyMetric) IsHidden() bool {
+	return r.isHidden
+}
+
+func (r *lazyMetric) IsDeprecated() bool {
+	return r.isDeprecated
+}
+
+// Create forces the initialization of metric which has been deferred until
+// the point at which this method is invoked. This method will determine whether
+// the metric is deprecated or hidden, no-opting if the metric should be considered
+// hidden. Furthermore, this function no-opts and returns true if metric is already
+// created.
+func (r *lazyMetric) Create(version *semver.Version) bool {
+	if version != nil {
+		r.preprocessMetric(*version)
+	}
+	// let's not create if this metric is slated to be hidden
+	if r.IsHidden() {
+		return false
+	}
+
+	r.createOnce.Do(func() {
+		r.createLock.Lock()
+		defer r.createLock.Unlock()
+		r.isCreated = true
+		if r.IsDeprecated() {
+			r.self.initializeDeprecatedMetric()
+		} else {
+			r.self.initializeMetric()
+		}
+	})
+	sl := r.stabilityLevel
+	deprecatedV := r.self.DeprecatedVersion()
+	dv := ""
+	if deprecatedV != nil {
+		dv = deprecatedV.String()
+	}
+	registeredMetricsTotal.WithLabelValues(string(sl), dv).Inc()
+	return r.IsCreated()
+}
+
+// ClearState will clear all the states marked by Create.
+// It intends to be used for re-register a hidden metric.
+func (r *lazyMetric) ClearState() {
+	r.createLock.Lock()
+	defer r.createLock.Unlock()
+
+	r.isDeprecated = false
+	r.isHidden = false
+	r.isCreated = false
+	r.markDeprecationOnce = sync.Once{}
+	r.createOnce = sync.Once{}
+}
+
+// FQName returns the fully-qualified metric name of the collector.
+func (r *lazyMetric) FQName() string {
+	return r.fqName
+}
+
+/*
+This code is directly lifted from the prometheus codebase. It's a convenience struct which
+allows you satisfy the Collector interface automatically if you already satisfy the Metric interface.
+
+For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/collector.go#L98-L120
+*/
+type selfCollector struct {
+	metric prometheus.Metric
+}
+
+func (c *selfCollector) initSelfCollection(m prometheus.Metric) {
+	c.metric = m
+}
+
+func (c *selfCollector) Describe(ch chan<- *prometheus.Desc) {
+	ch <- c.metric.Desc()
+}
+
+func (c *selfCollector) Collect(ch chan<- prometheus.Metric) {
+	ch <- c.metric
+}
+
+// metricWithExemplar is an interface that knows how to attach an exemplar to certain supported metric types.
+type metricWithExemplar interface {
+	withExemplar(v float64)
+}
+
+// no-op vecs for convenience
+var noopCounterVec = &prometheus.CounterVec{}
+var noopHistogramVec = &prometheus.HistogramVec{}
+var noopTimingHistogramVec = &promext.TimingHistogramVec{}
+var noopGaugeVec = &prometheus.GaugeVec{}
+
+// just use a convenience struct for all the no-ops
+var noop = &noopMetric{}
+
+type noopMetric struct{}
+
+func (noopMetric) Inc()                              {}
+func (noopMetric) Add(float64)                       {}
+func (noopMetric) Dec()                              {}
+func (noopMetric) Set(float64)                       {}
+func (noopMetric) Sub(float64)                       {}
+func (noopMetric) Observe(float64)                   {}
+func (noopMetric) ObserveWithWeight(float64, uint64) {}
+func (noopMetric) SetToCurrentTime()                 {}
+func (noopMetric) Desc() *prometheus.Desc            { return nil }
+func (noopMetric) Write(*dto.Metric) error           { return nil }
+func (noopMetric) Describe(chan<- *prometheus.Desc)  {}
+func (noopMetric) Collect(chan<- prometheus.Metric)  {}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/options.go b/hack/tools/vendor/k8s.io/component-base/metrics/options.go
new file mode 100644
index 0000000000..17f44ef2a3
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/options.go
@@ -0,0 +1,136 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+	"fmt"
+	"regexp"
+
+	"github.com/blang/semver/v4"
+	"github.com/spf13/pflag"
+
+	"k8s.io/component-base/version"
+)
+
+// Options has all parameters needed for exposing metrics from components
+type Options struct {
+	ShowHiddenMetricsForVersion string
+	DisabledMetrics             []string
+	AllowListMapping            map[string]string
+	AllowListMappingManifest    string
+}
+
+// NewOptions returns default metrics options
+func NewOptions() *Options {
+	return &Options{}
+}
+
+// Validate validates metrics flags options.
+func (o *Options) Validate() []error {
+	if o == nil {
+		return nil
+	}
+
+	var errs []error
+	err := validateShowHiddenMetricsVersion(parseVersion(version.Get()), o.ShowHiddenMetricsForVersion)
+	if err != nil {
+		errs = append(errs, err)
+	}
+
+	if err := validateAllowMetricLabel(o.AllowListMapping); err != nil {
+		errs = append(errs, err)
+	}
+
+	if len(errs) == 0 {
+		return nil
+	}
+	return errs
+}
+
+// AddFlags adds flags for exposing component metrics.
+func (o *Options) AddFlags(fs *pflag.FlagSet) {
+	if o == nil {
+		return
+	}
+	fs.StringVar(&o.ShowHiddenMetricsForVersion, "show-hidden-metrics-for-version", o.ShowHiddenMetricsForVersion,
+		"The previous version for which you want to show hidden metrics. "+
+			"Only the previous minor version is meaningful, other values will not be allowed. "+
+			"The format is ., e.g.: '1.16'. "+
+			"The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, "+
+			"rather than being surprised when they are permanently removed in the release after that.")
+	fs.StringSliceVar(&o.DisabledMetrics,
+		"disabled-metrics",
+		o.DisabledMetrics,
+		"This flag provides an escape hatch for misbehaving metrics. "+
+			"You must provide the fully qualified metric name in order to disable it. "+
+			"Disclaimer: disabling metrics is higher in precedence than showing hidden metrics.")
+	fs.StringToStringVar(&o.AllowListMapping, "allow-metric-labels", o.AllowListMapping,
+		"The map from metric-label to value allow-list of this label. The key's format is ,. "+
+			"The value's format is ,..."+
+			"e.g. metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'.")
+	fs.StringVar(&o.AllowListMappingManifest, "allow-metric-labels-manifest", o.AllowListMappingManifest,
+		"The path to the manifest file that contains the allow-list mapping. "+
+			"The format of the file is the same as the flag --allow-metric-labels. "+
+			"Note that the flag --allow-metric-labels will override the manifest file.")
+}
+
+// Apply applies parameters into global configuration of metrics.
+func (o *Options) Apply() {
+	if o == nil {
+		return
+	}
+	if len(o.ShowHiddenMetricsForVersion) > 0 {
+		SetShowHidden()
+	}
+	// set disabled metrics
+	for _, metricName := range o.DisabledMetrics {
+		SetDisabledMetric(metricName)
+	}
+	if o.AllowListMapping != nil {
+		SetLabelAllowListFromCLI(o.AllowListMapping)
+	} else if len(o.AllowListMappingManifest) > 0 {
+		SetLabelAllowListFromManifest(o.AllowListMappingManifest)
+	}
+}
+
+func validateShowHiddenMetricsVersion(currentVersion semver.Version, targetVersionStr string) error {
+	if targetVersionStr == "" {
+		return nil
+	}
+
+	validVersionStr := fmt.Sprintf("%d.%d", currentVersion.Major, currentVersion.Minor-1)
+	if targetVersionStr != validVersionStr {
+		return fmt.Errorf("--show-hidden-metrics-for-version must be omitted or have the value '%v'. Only the previous minor version is allowed", validVersionStr)
+	}
+
+	return nil
+}
+
+func validateAllowMetricLabel(allowListMapping map[string]string) error {
+	if allowListMapping == nil {
+		return nil
+	}
+	metricNameRegex := `[a-zA-Z_:][a-zA-Z0-9_:]*`
+	labelRegex := `[a-zA-Z_][a-zA-Z0-9_]*`
+	for k := range allowListMapping {
+		reg := regexp.MustCompile(metricNameRegex + `,` + labelRegex)
+		if reg.FindString(k) != k {
+			return fmt.Errorf("--allow-metric-labels must have a list of kv pair with format `metricName,labelName=labelValue, labelValue,...`")
+		}
+	}
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/opts.go b/hack/tools/vendor/k8s.io/component-base/metrics/opts.go
new file mode 100644
index 0000000000..43015169e7
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/opts.go
@@ -0,0 +1,390 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/prometheus/client_golang/prometheus"
+
+	"k8s.io/apimachinery/pkg/util/sets"
+	promext "k8s.io/component-base/metrics/prometheusextension"
+	"k8s.io/klog/v2"
+	yaml "sigs.k8s.io/yaml/goyaml.v2"
+)
+
+var (
+	labelValueAllowLists = map[string]*MetricLabelAllowList{}
+	allowListLock        sync.RWMutex
+)
+
+// ResetLabelValueAllowLists resets the allow lists for label values.
+// NOTE: This should only be used in test.
+func ResetLabelValueAllowLists() {
+	allowListLock.Lock()
+	defer allowListLock.Unlock()
+	labelValueAllowLists = map[string]*MetricLabelAllowList{}
+}
+
+// KubeOpts is superset struct for prometheus.Opts. The prometheus Opts structure
+// is purposefully not embedded here because that would change struct initialization
+// in the manner which people are currently accustomed.
+//
+// Name must be set to a non-empty string. DeprecatedVersion is defined only
+// if the metric for which this options applies is, in fact, deprecated.
+type KubeOpts struct {
+	Namespace                     string
+	Subsystem                     string
+	Name                          string
+	Help                          string
+	ConstLabels                   map[string]string
+	DeprecatedVersion             string
+	deprecateOnce                 sync.Once
+	annotateOnce                  sync.Once
+	StabilityLevel                StabilityLevel
+	initializeLabelAllowListsOnce sync.Once
+	LabelValueAllowLists          *MetricLabelAllowList
+}
+
+// BuildFQName joins the given three name components by "_". Empty name
+// components are ignored. If the name parameter itself is empty, an empty
+// string is returned, no matter what. Metric implementations included in this
+// library use this function internally to generate the fully-qualified metric
+// name from the name component in their Opts. Users of the library will only
+// need this function if they implement their own Metric or instantiate a Desc
+// (with NewDesc) directly.
+func BuildFQName(namespace, subsystem, name string) string {
+	return prometheus.BuildFQName(namespace, subsystem, name)
+}
+
+// StabilityLevel represents the API guarantees for a given defined metric.
+type StabilityLevel string
+
+const (
+	// INTERNAL metrics have no stability guarantees, as such, labels may
+	// be arbitrarily added/removed and the metric may be deleted at any time.
+	INTERNAL StabilityLevel = "INTERNAL"
+	// ALPHA metrics have no stability guarantees, as such, labels may
+	// be arbitrarily added/removed and the metric may be deleted at any time.
+	ALPHA StabilityLevel = "ALPHA"
+	// BETA metrics are governed by the deprecation policy outlined in by
+	// the control plane metrics stability KEP.
+	BETA StabilityLevel = "BETA"
+	// STABLE metrics are guaranteed not be mutated and removal is governed by
+	// the deprecation policy outlined in by the control plane metrics stability KEP.
+	STABLE StabilityLevel = "STABLE"
+)
+
+// setDefaults takes 'ALPHA' in case of empty.
+func (sl *StabilityLevel) setDefaults() {
+	switch *sl {
+	case "":
+		*sl = ALPHA
+	default:
+		// no-op, since we have a StabilityLevel already
+	}
+}
+
+// CounterOpts is an alias for Opts. See there for doc comments.
+type CounterOpts KubeOpts
+
+// Modify help description on the metric description.
+func (o *CounterOpts) markDeprecated() {
+	o.deprecateOnce.Do(func() {
+		o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help)
+	})
+}
+
+// annotateStabilityLevel annotates help description on the metric description with the stability level
+// of the metric
+func (o *CounterOpts) annotateStabilityLevel() {
+	o.annotateOnce.Do(func() {
+		o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help)
+	})
+}
+
+// convenience function to allow easy transformation to the prometheus
+// counterpart. This will do more once we have a proper label abstraction
+func (o *CounterOpts) toPromCounterOpts() prometheus.CounterOpts {
+	return prometheus.CounterOpts{
+		Namespace:   o.Namespace,
+		Subsystem:   o.Subsystem,
+		Name:        o.Name,
+		Help:        o.Help,
+		ConstLabels: o.ConstLabels,
+	}
+}
+
+// GaugeOpts is an alias for Opts. See there for doc comments.
+type GaugeOpts KubeOpts
+
+// Modify help description on the metric description.
+func (o *GaugeOpts) markDeprecated() {
+	o.deprecateOnce.Do(func() {
+		o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help)
+	})
+}
+
+// annotateStabilityLevel annotates help description on the metric description with the stability level
+// of the metric
+func (o *GaugeOpts) annotateStabilityLevel() {
+	o.annotateOnce.Do(func() {
+		o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help)
+	})
+}
+
+// convenience function to allow easy transformation to the prometheus
+// counterpart. This will do more once we have a proper label abstraction
+func (o *GaugeOpts) toPromGaugeOpts() prometheus.GaugeOpts {
+	return prometheus.GaugeOpts{
+		Namespace:   o.Namespace,
+		Subsystem:   o.Subsystem,
+		Name:        o.Name,
+		Help:        o.Help,
+		ConstLabels: o.ConstLabels,
+	}
+}
+
+// HistogramOpts bundles the options for creating a Histogram metric. It is
+// mandatory to set Name to a non-empty string. All other fields are optional
+// and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
+type HistogramOpts struct {
+	Namespace                     string
+	Subsystem                     string
+	Name                          string
+	Help                          string
+	ConstLabels                   map[string]string
+	Buckets                       []float64
+	DeprecatedVersion             string
+	deprecateOnce                 sync.Once
+	annotateOnce                  sync.Once
+	StabilityLevel                StabilityLevel
+	initializeLabelAllowListsOnce sync.Once
+	LabelValueAllowLists          *MetricLabelAllowList
+}
+
+// Modify help description on the metric description.
+func (o *HistogramOpts) markDeprecated() {
+	o.deprecateOnce.Do(func() {
+		o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help)
+	})
+}
+
+// annotateStabilityLevel annotates help description on the metric description with the stability level
+// of the metric
+func (o *HistogramOpts) annotateStabilityLevel() {
+	o.annotateOnce.Do(func() {
+		o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help)
+	})
+}
+
+// convenience function to allow easy transformation to the prometheus
+// counterpart. This will do more once we have a proper label abstraction
+func (o *HistogramOpts) toPromHistogramOpts() prometheus.HistogramOpts {
+	return prometheus.HistogramOpts{
+		Namespace:   o.Namespace,
+		Subsystem:   o.Subsystem,
+		Name:        o.Name,
+		Help:        o.Help,
+		ConstLabels: o.ConstLabels,
+		Buckets:     o.Buckets,
+	}
+}
+
+// TimingHistogramOpts bundles the options for creating a TimingHistogram metric. It is
+// mandatory to set Name to a non-empty string. All other fields are optional
+// and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
+type TimingHistogramOpts struct {
+	Namespace                     string
+	Subsystem                     string
+	Name                          string
+	Help                          string
+	ConstLabels                   map[string]string
+	Buckets                       []float64
+	InitialValue                  float64
+	DeprecatedVersion             string
+	deprecateOnce                 sync.Once
+	annotateOnce                  sync.Once
+	StabilityLevel                StabilityLevel
+	initializeLabelAllowListsOnce sync.Once
+	LabelValueAllowLists          *MetricLabelAllowList
+}
+
+// Modify help description on the metric description.
+func (o *TimingHistogramOpts) markDeprecated() {
+	o.deprecateOnce.Do(func() {
+		o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help)
+	})
+}
+
+// annotateStabilityLevel annotates help description on the metric description with the stability level
+// of the metric
+func (o *TimingHistogramOpts) annotateStabilityLevel() {
+	o.annotateOnce.Do(func() {
+		o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help)
+	})
+}
+
+// convenience function to allow easy transformation to the prometheus
+// counterpart. This will do more once we have a proper label abstraction
+func (o *TimingHistogramOpts) toPromHistogramOpts() promext.TimingHistogramOpts {
+	return promext.TimingHistogramOpts{
+		Namespace:    o.Namespace,
+		Subsystem:    o.Subsystem,
+		Name:         o.Name,
+		Help:         o.Help,
+		ConstLabels:  o.ConstLabels,
+		Buckets:      o.Buckets,
+		InitialValue: o.InitialValue,
+	}
+}
+
+// SummaryOpts bundles the options for creating a Summary metric. It is
+// mandatory to set Name to a non-empty string. While all other fields are
+// optional and can safely be left at their zero value, it is recommended to set
+// a help string and to explicitly set the Objectives field to the desired value
+// as the default value will change in the upcoming v0.10 of the library.
+type SummaryOpts struct {
+	Namespace                     string
+	Subsystem                     string
+	Name                          string
+	Help                          string
+	ConstLabels                   map[string]string
+	Objectives                    map[float64]float64
+	MaxAge                        time.Duration
+	AgeBuckets                    uint32
+	BufCap                        uint32
+	DeprecatedVersion             string
+	deprecateOnce                 sync.Once
+	annotateOnce                  sync.Once
+	StabilityLevel                StabilityLevel
+	initializeLabelAllowListsOnce sync.Once
+	LabelValueAllowLists          *MetricLabelAllowList
+}
+
+// Modify help description on the metric description.
+func (o *SummaryOpts) markDeprecated() {
+	o.deprecateOnce.Do(func() {
+		o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help)
+	})
+}
+
+// annotateStabilityLevel annotates help description on the metric description with the stability level
+// of the metric
+func (o *SummaryOpts) annotateStabilityLevel() {
+	o.annotateOnce.Do(func() {
+		o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help)
+	})
+}
+
+// Deprecated: DefObjectives will not be used as the default objectives in
+// v1.0.0 of the library. The default Summary will have no quantiles then.
+var (
+	defObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
+)
+
+// convenience function to allow easy transformation to the prometheus
+// counterpart. This will do more once we have a proper label abstraction
+func (o *SummaryOpts) toPromSummaryOpts() prometheus.SummaryOpts {
+	// we need to retain existing quantile behavior for backwards compatibility,
+	// so let's do what prometheus used to do prior to v1.
+	objectives := o.Objectives
+	if objectives == nil {
+		objectives = defObjectives
+	}
+	return prometheus.SummaryOpts{
+		Namespace:   o.Namespace,
+		Subsystem:   o.Subsystem,
+		Name:        o.Name,
+		Help:        o.Help,
+		ConstLabels: o.ConstLabels,
+		Objectives:  objectives,
+		MaxAge:      o.MaxAge,
+		AgeBuckets:  o.AgeBuckets,
+		BufCap:      o.BufCap,
+	}
+}
+
+type MetricLabelAllowList struct {
+	labelToAllowList map[string]sets.Set[string]
+}
+
+func (allowList *MetricLabelAllowList) ConstrainToAllowedList(labelNameList, labelValueList []string) {
+	for index, value := range labelValueList {
+		name := labelNameList[index]
+		if allowValues, ok := allowList.labelToAllowList[name]; ok {
+			if !allowValues.Has(value) {
+				labelValueList[index] = "unexpected"
+				cardinalityEnforcementUnexpectedCategorizationsTotal.Inc()
+			}
+		}
+	}
+}
+
+func (allowList *MetricLabelAllowList) ConstrainLabelMap(labels map[string]string) {
+	for name, value := range labels {
+		if allowValues, ok := allowList.labelToAllowList[name]; ok {
+			if !allowValues.Has(value) {
+				labels[name] = "unexpected"
+				cardinalityEnforcementUnexpectedCategorizationsTotal.Inc()
+			}
+		}
+	}
+}
+
+func SetLabelAllowListFromCLI(allowListMapping map[string]string) {
+	allowListLock.Lock()
+	defer allowListLock.Unlock()
+	for metricLabelName, labelValues := range allowListMapping {
+		metricName := strings.Split(metricLabelName, ",")[0]
+		labelName := strings.Split(metricLabelName, ",")[1]
+		valueSet := sets.New[string](strings.Split(labelValues, ",")...)
+
+		allowList, ok := labelValueAllowLists[metricName]
+		if ok {
+			allowList.labelToAllowList[labelName] = valueSet
+		} else {
+			labelToAllowList := make(map[string]sets.Set[string])
+			labelToAllowList[labelName] = valueSet
+			labelValueAllowLists[metricName] = &MetricLabelAllowList{
+				labelToAllowList,
+			}
+		}
+	}
+}
+
+func SetLabelAllowListFromManifest(manifest string) {
+	allowListMapping := make(map[string]string)
+	data, err := os.ReadFile(filepath.Clean(manifest))
+	if err != nil {
+		klog.Errorf("Failed to read allow list manifest: %v", err)
+		return
+	}
+	err = yaml.Unmarshal(data, &allowListMapping)
+	if err != nil {
+		klog.Errorf("Failed to parse allow list manifest: %v", err)
+		return
+	}
+	SetLabelAllowListFromCLI(allowListMapping)
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/processstarttime.go b/hack/tools/vendor/k8s.io/component-base/metrics/processstarttime.go
new file mode 100644
index 0000000000..f4b98f8eb0
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/processstarttime.go
@@ -0,0 +1,51 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+	"time"
+
+	"k8s.io/klog/v2"
+)
+
+var processStartTime = NewGaugeVec(
+	&GaugeOpts{
+		Name:           "process_start_time_seconds",
+		Help:           "Start time of the process since unix epoch in seconds.",
+		StabilityLevel: ALPHA,
+	},
+	[]string{},
+)
+
+// RegisterProcessStartTime registers the process_start_time_seconds to
+// a prometheus registry. This metric needs to be included to ensure counter
+// data fidelity.
+func RegisterProcessStartTime(registrationFunc func(Registerable) error) error {
+	start, err := GetProcessStart()
+	if err != nil {
+		klog.Errorf("Could not get process start time, %v", err)
+		start = float64(time.Now().Unix())
+	}
+	// processStartTime is a lazy metric which only get initialized after registered.
+	// so we need to register the metric first and then set the value for it
+	if err = registrationFunc(processStartTime); err != nil {
+		return err
+	}
+
+	processStartTime.WithLabelValues().Set(start)
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/processstarttime_others.go b/hack/tools/vendor/k8s.io/component-base/metrics/processstarttime_others.go
new file mode 100644
index 0000000000..611a12906b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/processstarttime_others.go
@@ -0,0 +1,39 @@
+//go:build !windows
+// +build !windows
+
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+	"os"
+
+	"github.com/prometheus/procfs"
+)
+
+func GetProcessStart() (float64, error) {
+	pid := os.Getpid()
+	p, err := procfs.NewProc(pid)
+	if err != nil {
+		return 0, err
+	}
+
+	if stat, err := p.Stat(); err == nil {
+		return stat.StartTime()
+	}
+	return 0, err
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/processstarttime_windows.go b/hack/tools/vendor/k8s.io/component-base/metrics/processstarttime_windows.go
new file mode 100644
index 0000000000..afee6f9b13
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/processstarttime_windows.go
@@ -0,0 +1,34 @@
+//go:build windows
+// +build windows
+
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+	"golang.org/x/sys/windows"
+)
+
+func GetProcessStart() (float64, error) {
+	processHandle := windows.CurrentProcess()
+
+	var creationTime, exitTime, kernelTime, userTime windows.Filetime
+	if err := windows.GetProcessTimes(processHandle, &creationTime, &exitTime, &kernelTime, &userTime); err != nil {
+		return 0, err
+	}
+	return float64(creationTime.Nanoseconds() / 1e9), nil
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/prometheus/feature/metrics.go b/hack/tools/vendor/k8s.io/component-base/metrics/prometheus/feature/metrics.go
new file mode 100644
index 0000000000..416e5eda26
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/prometheus/feature/metrics.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package feature
+
+import (
+	"context"
+
+	k8smetrics "k8s.io/component-base/metrics"
+	"k8s.io/component-base/metrics/legacyregistry"
+)
+
+var (
+	// featureInfo is a Prometheus Gauge metrics used for recording the enablement of a k8s feature.
+	featureInfo = k8smetrics.NewGaugeVec(
+		&k8smetrics.GaugeOpts{
+			Namespace:      "kubernetes",
+			Name:           "feature_enabled",
+			Help:           "This metric records the data about the stage and enablement of a k8s feature.",
+			StabilityLevel: k8smetrics.BETA,
+		},
+		[]string{"name", "stage"},
+	)
+)
+
+func init() {
+	legacyregistry.MustRegister(featureInfo)
+}
+
+func ResetFeatureInfoMetric() {
+	featureInfo.Reset()
+}
+
+func RecordFeatureInfo(ctx context.Context, name string, stage string, enabled bool) {
+	value := 0.0
+	if enabled {
+		value = 1.0
+	}
+	featureInfo.WithContext(ctx).WithLabelValues(name, stage).Set(value)
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram.go b/hack/tools/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram.go
new file mode 100644
index 0000000000..be07977e28
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram.go
@@ -0,0 +1,189 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package prometheusextension
+
+import (
+	"errors"
+	"time"
+
+	"github.com/prometheus/client_golang/prometheus"
+	dto "github.com/prometheus/client_model/go"
+)
+
+// GaugeOps is the part of `prometheus.Gauge` that is relevant to
+// instrumented code.
+// This factoring should be in prometheus, analogous to the way
+// it already factors out the Observer interface for histograms and summaries.
+type GaugeOps interface {
+	// Set is the same as Gauge.Set
+	Set(float64)
+	// Inc is the same as Gauge.inc
+	Inc()
+	// Dec is the same as Gauge.Dec
+	Dec()
+	// Add is the same as Gauge.Add
+	Add(float64)
+	// Sub is the same as Gauge.Sub
+	Sub(float64)
+
+	// SetToCurrentTime the same as Gauge.SetToCurrentTime
+	SetToCurrentTime()
+}
+
+// A TimingHistogram tracks how long a `float64` variable spends in
+// ranges defined by buckets.  Time is counted in nanoseconds.  The
+// histogram's sum is the integral over time (in nanoseconds, from
+// creation of the histogram) of the variable's value.
+type TimingHistogram interface {
+	prometheus.Metric
+	prometheus.Collector
+	GaugeOps
+}
+
+// TimingHistogramOpts is the parameters of the TimingHistogram constructor
+type TimingHistogramOpts struct {
+	Namespace   string
+	Subsystem   string
+	Name        string
+	Help        string
+	ConstLabels prometheus.Labels
+
+	// Buckets defines the buckets into which observations are
+	// accumulated. Each element in the slice is the upper
+	// inclusive bound of a bucket. The values must be sorted in
+	// strictly increasing order. There is no need to add a
+	// highest bucket with +Inf bound. The default value is
+	// prometheus.DefBuckets.
+	Buckets []float64
+
+	// The initial value of the variable.
+	InitialValue float64
+}
+
+// NewTimingHistogram creates a new TimingHistogram
+func NewTimingHistogram(opts TimingHistogramOpts) (TimingHistogram, error) {
+	return NewTestableTimingHistogram(time.Now, opts)
+}
+
+// NewTestableTimingHistogram creates a TimingHistogram that uses a mockable clock
+func NewTestableTimingHistogram(nowFunc func() time.Time, opts TimingHistogramOpts) (TimingHistogram, error) {
+	desc := prometheus.NewDesc(
+		prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		wrapTimingHelp(opts.Help),
+		nil,
+		opts.ConstLabels,
+	)
+	return newTimingHistogram(nowFunc, desc, opts)
+}
+
+func wrapTimingHelp(given string) string {
+	return "EXPERIMENTAL: " + given
+}
+
+func newTimingHistogram(nowFunc func() time.Time, desc *prometheus.Desc, opts TimingHistogramOpts, variableLabelValues ...string) (TimingHistogram, error) {
+	allLabelsM := prometheus.Labels{}
+	allLabelsS := prometheus.MakeLabelPairs(desc, variableLabelValues)
+	for _, pair := range allLabelsS {
+		if pair == nil || pair.Name == nil || pair.Value == nil {
+			return nil, errors.New("prometheus.MakeLabelPairs returned a nil")
+		}
+		allLabelsM[*pair.Name] = *pair.Value
+	}
+	weighted, err := newWeightedHistogram(desc, WeightedHistogramOpts{
+		Namespace:   opts.Namespace,
+		Subsystem:   opts.Subsystem,
+		Name:        opts.Name,
+		Help:        opts.Help,
+		ConstLabels: allLabelsM,
+		Buckets:     opts.Buckets,
+	}, variableLabelValues...)
+	if err != nil {
+		return nil, err
+	}
+	return &timingHistogram{
+		nowFunc:     nowFunc,
+		weighted:    weighted,
+		lastSetTime: nowFunc(),
+		value:       opts.InitialValue,
+	}, nil
+}
+
+type timingHistogram struct {
+	nowFunc  func() time.Time
+	weighted *weightedHistogram
+
+	// The following fields must only be accessed with weighted's lock held
+
+	lastSetTime time.Time // identifies when value was last set
+	value       float64
+}
+
+var _ TimingHistogram = &timingHistogram{}
+
+func (th *timingHistogram) Set(newValue float64) {
+	th.update(func(float64) float64 { return newValue })
+}
+
+func (th *timingHistogram) Inc() {
+	th.update(func(oldValue float64) float64 { return oldValue + 1 })
+}
+
+func (th *timingHistogram) Dec() {
+	th.update(func(oldValue float64) float64 { return oldValue - 1 })
+}
+
+func (th *timingHistogram) Add(delta float64) {
+	th.update(func(oldValue float64) float64 { return oldValue + delta })
+}
+
+func (th *timingHistogram) Sub(delta float64) {
+	th.update(func(oldValue float64) float64 { return oldValue - delta })
+}
+
+func (th *timingHistogram) SetToCurrentTime() {
+	th.update(func(oldValue float64) float64 { return th.nowFunc().Sub(time.Unix(0, 0)).Seconds() })
+}
+
+func (th *timingHistogram) update(updateFn func(float64) float64) {
+	th.weighted.lock.Lock()
+	defer th.weighted.lock.Unlock()
+	now := th.nowFunc()
+	delta := now.Sub(th.lastSetTime)
+	value := th.value
+	if delta > 0 {
+		th.weighted.observeWithWeightLocked(value, uint64(delta))
+		th.lastSetTime = now
+	}
+	th.value = updateFn(value)
+}
+
+func (th *timingHistogram) Desc() *prometheus.Desc {
+	return th.weighted.Desc()
+}
+
+func (th *timingHistogram) Write(dest *dto.Metric) error {
+	th.Add(0) // account for time since last update
+	return th.weighted.Write(dest)
+}
+
+func (th *timingHistogram) Describe(ch chan<- *prometheus.Desc) {
+	ch <- th.weighted.Desc()
+}
+
+func (th *timingHistogram) Collect(ch chan<- prometheus.Metric) {
+	ch <- th
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram_vec.go b/hack/tools/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram_vec.go
new file mode 100644
index 0000000000..7af1a45860
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram_vec.go
@@ -0,0 +1,111 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package prometheusextension
+
+import (
+	"time"
+
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+// GaugeVecOps is a bunch of Gauge that have the same
+// Desc and are distinguished by the values for their variable labels.
+type GaugeVecOps interface {
+	GetMetricWith(prometheus.Labels) (GaugeOps, error)
+	GetMetricWithLabelValues(lvs ...string) (GaugeOps, error)
+	With(prometheus.Labels) GaugeOps
+	WithLabelValues(...string) GaugeOps
+	CurryWith(prometheus.Labels) (GaugeVecOps, error)
+	MustCurryWith(prometheus.Labels) GaugeVecOps
+}
+
+type TimingHistogramVec struct {
+	*prometheus.MetricVec
+}
+
+var _ GaugeVecOps = &TimingHistogramVec{}
+var _ prometheus.Collector = &TimingHistogramVec{}
+
+func NewTimingHistogramVec(opts TimingHistogramOpts, labelNames ...string) *TimingHistogramVec {
+	return NewTestableTimingHistogramVec(time.Now, opts, labelNames...)
+}
+
+func NewTestableTimingHistogramVec(nowFunc func() time.Time, opts TimingHistogramOpts, labelNames ...string) *TimingHistogramVec {
+	desc := prometheus.NewDesc(
+		prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		wrapTimingHelp(opts.Help),
+		labelNames,
+		opts.ConstLabels,
+	)
+	return &TimingHistogramVec{
+		MetricVec: prometheus.NewMetricVec(desc, func(lvs ...string) prometheus.Metric {
+			metric, err := newTimingHistogram(nowFunc, desc, opts, lvs...)
+			if err != nil {
+				panic(err) // like in prometheus.newHistogram
+			}
+			return metric
+		}),
+	}
+}
+
+func (hv *TimingHistogramVec) GetMetricWith(labels prometheus.Labels) (GaugeOps, error) {
+	metric, err := hv.MetricVec.GetMetricWith(labels)
+	if metric != nil {
+		return metric.(GaugeOps), err
+	}
+	return nil, err
+}
+
+func (hv *TimingHistogramVec) GetMetricWithLabelValues(lvs ...string) (GaugeOps, error) {
+	metric, err := hv.MetricVec.GetMetricWithLabelValues(lvs...)
+	if metric != nil {
+		return metric.(GaugeOps), err
+	}
+	return nil, err
+}
+
+func (hv *TimingHistogramVec) With(labels prometheus.Labels) GaugeOps {
+	h, err := hv.GetMetricWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return h
+}
+
+func (hv *TimingHistogramVec) WithLabelValues(lvs ...string) GaugeOps {
+	h, err := hv.GetMetricWithLabelValues(lvs...)
+	if err != nil {
+		panic(err)
+	}
+	return h
+}
+
+func (hv *TimingHistogramVec) CurryWith(labels prometheus.Labels) (GaugeVecOps, error) {
+	vec, err := hv.MetricVec.CurryWith(labels)
+	if vec != nil {
+		return &TimingHistogramVec{MetricVec: vec}, err
+	}
+	return nil, err
+}
+
+func (hv *TimingHistogramVec) MustCurryWith(labels prometheus.Labels) GaugeVecOps {
+	vec, err := hv.CurryWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return vec
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram.go b/hack/tools/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram.go
new file mode 100644
index 0000000000..a060019b25
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram.go
@@ -0,0 +1,203 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package prometheusextension
+
+import (
+	"fmt"
+	"math"
+	"sort"
+	"sync"
+
+	"github.com/prometheus/client_golang/prometheus"
+	dto "github.com/prometheus/client_model/go"
+)
+
+// WeightedHistogram generalizes Histogram: each observation has
+// an associated _weight_. For a given `x` and `N`,
+// `1` call on `ObserveWithWeight(x, N)` has the same meaning as
+// `N` calls on `ObserveWithWeight(x, 1)`.
+// The weighted sum might differ slightly due to the use of
+// floating point, although the implementation takes some steps
+// to mitigate that.
+// If every weight were 1,
+// this would be the same as the existing Histogram abstraction.
+type WeightedHistogram interface {
+	prometheus.Metric
+	prometheus.Collector
+	WeightedObserver
+}
+
+// WeightedObserver generalizes the Observer interface.
+type WeightedObserver interface {
+	// Set the variable to the given value with the given weight.
+	ObserveWithWeight(value float64, weight uint64)
+}
+
+// WeightedHistogramOpts is the same as for an ordinary Histogram
+type WeightedHistogramOpts = prometheus.HistogramOpts
+
+// NewWeightedHistogram creates a new WeightedHistogram
+func NewWeightedHistogram(opts WeightedHistogramOpts) (WeightedHistogram, error) {
+	desc := prometheus.NewDesc(
+		prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		wrapWeightedHelp(opts.Help),
+		nil,
+		opts.ConstLabels,
+	)
+	return newWeightedHistogram(desc, opts)
+}
+
+func wrapWeightedHelp(given string) string {
+	return "EXPERIMENTAL: " + given
+}
+
+func newWeightedHistogram(desc *prometheus.Desc, opts WeightedHistogramOpts, variableLabelValues ...string) (*weightedHistogram, error) {
+	if len(opts.Buckets) == 0 {
+		opts.Buckets = prometheus.DefBuckets
+	}
+
+	for i, upperBound := range opts.Buckets {
+		if i < len(opts.Buckets)-1 {
+			if upperBound >= opts.Buckets[i+1] {
+				return nil, fmt.Errorf(
+					"histogram buckets must be in increasing order: %f >= %f",
+					upperBound, opts.Buckets[i+1],
+				)
+			}
+		} else {
+			if math.IsInf(upperBound, +1) {
+				// The +Inf bucket is implicit. Remove it here.
+				opts.Buckets = opts.Buckets[:i]
+			}
+		}
+	}
+	upperBounds := make([]float64, len(opts.Buckets))
+	copy(upperBounds, opts.Buckets)
+
+	return &weightedHistogram{
+		desc:                desc,
+		variableLabelValues: variableLabelValues,
+		upperBounds:         upperBounds,
+		buckets:             make([]uint64, len(upperBounds)+1),
+		hotCount:            initialHotCount,
+	}, nil
+}
+
+type weightedHistogram struct {
+	desc                *prometheus.Desc
+	variableLabelValues []string
+	upperBounds         []float64 // exclusive of +Inf
+
+	lock sync.Mutex // applies to all the following
+
+	// buckets is longer by one than upperBounds.
+	// For 0 <= idx < len(upperBounds), buckets[idx] holds the
+	// accumulated time.Duration that value has been <=
+	// upperBounds[idx] but not <= upperBounds[idx-1].
+	// buckets[len(upperBounds)] holds the accumulated
+	// time.Duration when value fit in no other bucket.
+	buckets []uint64
+
+	// sumHot + sumCold is the weighted sum of value.
+	// Rather than risk loss of precision in one
+	// float64, we do this sum hierarchically.  Many successive
+	// increments are added into sumHot; once in a while
+	// the magnitude of sumHot is compared to the magnitude
+	// of sumCold and, if the ratio is high enough,
+	// sumHot is transferred into sumCold.
+	sumHot  float64
+	sumCold float64
+
+	transferThreshold float64 // = math.Abs(sumCold) / 2^26 (that's about half of the bits of precision in a float64)
+
+	// hotCount is used to decide when to consider dumping sumHot into sumCold.
+	// hotCount counts upward from initialHotCount to zero.
+	hotCount int
+}
+
+// initialHotCount is the negative of the number of terms
+// that are summed into sumHot before considering whether
+// to transfer to sumCold.  This only has to be big enough
+// to make the extra floating point operations occur in a
+// distinct minority of cases.
+const initialHotCount = -15
+
+var _ WeightedHistogram = &weightedHistogram{}
+var _ prometheus.Metric = &weightedHistogram{}
+var _ prometheus.Collector = &weightedHistogram{}
+
+func (sh *weightedHistogram) ObserveWithWeight(value float64, weight uint64) {
+	idx := sort.SearchFloat64s(sh.upperBounds, value)
+	sh.lock.Lock()
+	defer sh.lock.Unlock()
+	sh.updateLocked(idx, value, weight)
+}
+
+func (sh *weightedHistogram) observeWithWeightLocked(value float64, weight uint64) {
+	idx := sort.SearchFloat64s(sh.upperBounds, value)
+	sh.updateLocked(idx, value, weight)
+}
+
+func (sh *weightedHistogram) updateLocked(idx int, value float64, weight uint64) {
+	sh.buckets[idx] += weight
+	newSumHot := sh.sumHot + float64(weight)*value
+	sh.hotCount++
+	if sh.hotCount >= 0 {
+		sh.hotCount = initialHotCount
+		if math.Abs(newSumHot) > sh.transferThreshold {
+			newSumCold := sh.sumCold + newSumHot
+			sh.sumCold = newSumCold
+			sh.transferThreshold = math.Abs(newSumCold / 67108864)
+			sh.sumHot = 0
+			return
+		}
+	}
+	sh.sumHot = newSumHot
+}
+
+func (sh *weightedHistogram) Desc() *prometheus.Desc {
+	return sh.desc
+}
+
+func (sh *weightedHistogram) Write(dest *dto.Metric) error {
+	count, sum, buckets := func() (uint64, float64, map[float64]uint64) {
+		sh.lock.Lock()
+		defer sh.lock.Unlock()
+		nBounds := len(sh.upperBounds)
+		buckets := make(map[float64]uint64, nBounds)
+		var count uint64
+		for idx, upperBound := range sh.upperBounds {
+			count += sh.buckets[idx]
+			buckets[upperBound] = count
+		}
+		count += sh.buckets[nBounds]
+		return count, sh.sumHot + sh.sumCold, buckets
+	}()
+	metric, err := prometheus.NewConstHistogram(sh.desc, count, sum, buckets, sh.variableLabelValues...)
+	if err != nil {
+		return err
+	}
+	return metric.Write(dest)
+}
+
+func (sh *weightedHistogram) Describe(ch chan<- *prometheus.Desc) {
+	ch <- sh.desc
+}
+
+func (sh *weightedHistogram) Collect(ch chan<- prometheus.Metric) {
+	ch <- sh
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram_vec.go b/hack/tools/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram_vec.go
new file mode 100644
index 0000000000..2ca95f0a7f
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram_vec.go
@@ -0,0 +1,106 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package prometheusextension
+
+import (
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+// WeightedObserverVec is a bunch of WeightedObservers that have the same
+// Desc and are distinguished by the values for their variable labels.
+type WeightedObserverVec interface {
+	GetMetricWith(prometheus.Labels) (WeightedObserver, error)
+	GetMetricWithLabelValues(lvs ...string) (WeightedObserver, error)
+	With(prometheus.Labels) WeightedObserver
+	WithLabelValues(...string) WeightedObserver
+	CurryWith(prometheus.Labels) (WeightedObserverVec, error)
+	MustCurryWith(prometheus.Labels) WeightedObserverVec
+}
+
+// WeightedHistogramVec implements WeightedObserverVec
+type WeightedHistogramVec struct {
+	*prometheus.MetricVec
+}
+
+var _ WeightedObserverVec = &WeightedHistogramVec{}
+var _ prometheus.Collector = &WeightedHistogramVec{}
+
+func NewWeightedHistogramVec(opts WeightedHistogramOpts, labelNames ...string) *WeightedHistogramVec {
+	desc := prometheus.NewDesc(
+		prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		wrapWeightedHelp(opts.Help),
+		labelNames,
+		opts.ConstLabels,
+	)
+	return &WeightedHistogramVec{
+		MetricVec: prometheus.NewMetricVec(desc, func(lvs ...string) prometheus.Metric {
+			metric, err := newWeightedHistogram(desc, opts, lvs...)
+			if err != nil {
+				panic(err) // like in prometheus.newHistogram
+			}
+			return metric
+		}),
+	}
+}
+
+func (hv *WeightedHistogramVec) GetMetricWith(labels prometheus.Labels) (WeightedObserver, error) {
+	metric, err := hv.MetricVec.GetMetricWith(labels)
+	if metric != nil {
+		return metric.(WeightedObserver), err
+	}
+	return nil, err
+}
+
+func (hv *WeightedHistogramVec) GetMetricWithLabelValues(lvs ...string) (WeightedObserver, error) {
+	metric, err := hv.MetricVec.GetMetricWithLabelValues(lvs...)
+	if metric != nil {
+		return metric.(WeightedObserver), err
+	}
+	return nil, err
+}
+
+func (hv *WeightedHistogramVec) With(labels prometheus.Labels) WeightedObserver {
+	h, err := hv.GetMetricWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return h
+}
+
+func (hv *WeightedHistogramVec) WithLabelValues(lvs ...string) WeightedObserver {
+	h, err := hv.GetMetricWithLabelValues(lvs...)
+	if err != nil {
+		panic(err)
+	}
+	return h
+}
+
+func (hv *WeightedHistogramVec) CurryWith(labels prometheus.Labels) (WeightedObserverVec, error) {
+	vec, err := hv.MetricVec.CurryWith(labels)
+	if vec != nil {
+		return &WeightedHistogramVec{MetricVec: vec}, err
+	}
+	return nil, err
+}
+
+func (hv *WeightedHistogramVec) MustCurryWith(labels prometheus.Labels) WeightedObserverVec {
+	vec, err := hv.CurryWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return vec
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/registry.go b/hack/tools/vendor/k8s.io/component-base/metrics/registry.go
new file mode 100644
index 0000000000..203813e814
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/registry.go
@@ -0,0 +1,394 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+	"fmt"
+	"sync"
+	"sync/atomic"
+
+	"github.com/blang/semver/v4"
+	"github.com/prometheus/client_golang/prometheus"
+	dto "github.com/prometheus/client_model/go"
+
+	apimachineryversion "k8s.io/apimachinery/pkg/version"
+	"k8s.io/component-base/version"
+)
+
+var (
+	showHiddenOnce      sync.Once
+	disabledMetricsLock sync.RWMutex
+	showHidden          atomic.Bool
+	registries          []*kubeRegistry // stores all registries created by NewKubeRegistry()
+	registriesLock      sync.RWMutex
+	disabledMetrics     = map[string]struct{}{}
+
+	registeredMetricsTotal = NewCounterVec(
+		&CounterOpts{
+			Name:           "registered_metrics_total",
+			Help:           "The count of registered metrics broken by stability level and deprecation version.",
+			StabilityLevel: BETA,
+		},
+		[]string{"stability_level", "deprecated_version"},
+	)
+
+	disabledMetricsTotal = NewCounter(
+		&CounterOpts{
+			Name:           "disabled_metrics_total",
+			Help:           "The count of disabled metrics.",
+			StabilityLevel: BETA,
+		},
+	)
+
+	hiddenMetricsTotal = NewCounter(
+		&CounterOpts{
+			Name:           "hidden_metrics_total",
+			Help:           "The count of hidden metrics.",
+			StabilityLevel: BETA,
+		},
+	)
+
+	cardinalityEnforcementUnexpectedCategorizationsTotal = NewCounter(
+		&CounterOpts{
+			Name:           "cardinality_enforcement_unexpected_categorizations_total",
+			Help:           "The count of unexpected categorizations during cardinality enforcement.",
+			StabilityLevel: ALPHA,
+		},
+	)
+)
+
+// shouldHide be used to check if a specific metric with deprecated version should be hidden
+// according to metrics deprecation lifecycle.
+func shouldHide(currentVersion *semver.Version, deprecatedVersion *semver.Version) bool {
+	guardVersion, err := semver.Make(fmt.Sprintf("%d.%d.0", currentVersion.Major, currentVersion.Minor))
+	if err != nil {
+		panic("failed to make version from current version")
+	}
+
+	if deprecatedVersion.LT(guardVersion) {
+		return true
+	}
+
+	return false
+}
+
+// ValidateShowHiddenMetricsVersion checks invalid version for which show hidden metrics.
+func ValidateShowHiddenMetricsVersion(v string) []error {
+	err := validateShowHiddenMetricsVersion(parseVersion(version.Get()), v)
+	if err != nil {
+		return []error{err}
+	}
+
+	return nil
+}
+
+func SetDisabledMetric(name string) {
+	disabledMetricsLock.Lock()
+	defer disabledMetricsLock.Unlock()
+	disabledMetrics[name] = struct{}{}
+	disabledMetricsTotal.Inc()
+}
+
+// SetShowHidden will enable showing hidden metrics. This will no-opt
+// after the initial call
+func SetShowHidden() {
+	showHiddenOnce.Do(func() {
+		showHidden.Store(true)
+
+		// re-register collectors that has been hidden in phase of last registry.
+		for _, r := range registries {
+			r.enableHiddenCollectors()
+			r.enableHiddenStableCollectors()
+		}
+	})
+}
+
+// ShouldShowHidden returns whether showing hidden deprecated metrics
+// is enabled. While the primary usecase for this is internal (to determine
+// registration behavior) this can also be used to introspect
+func ShouldShowHidden() bool {
+	return showHidden.Load()
+}
+
+// Registerable is an interface for a collector metric which we
+// will register with KubeRegistry.
+type Registerable interface {
+	prometheus.Collector
+
+	// Create will mark deprecated state for the collector
+	Create(version *semver.Version) bool
+
+	// ClearState will clear all the states marked by Create.
+	ClearState()
+
+	// FQName returns the fully-qualified metric name of the collector.
+	FQName() string
+}
+
+type resettable interface {
+	Reset()
+}
+
+// KubeRegistry is an interface which implements a subset of prometheus.Registerer and
+// prometheus.Gatherer interfaces
+type KubeRegistry interface {
+	// Deprecated
+	RawMustRegister(...prometheus.Collector)
+	// CustomRegister is our internal variant of Prometheus registry.Register
+	CustomRegister(c StableCollector) error
+	// CustomMustRegister is our internal variant of Prometheus registry.MustRegister
+	CustomMustRegister(cs ...StableCollector)
+	// Register conforms to Prometheus registry.Register
+	Register(Registerable) error
+	// MustRegister conforms to Prometheus registry.MustRegister
+	MustRegister(...Registerable)
+	// Unregister conforms to Prometheus registry.Unregister
+	Unregister(collector Collector) bool
+	// Gather conforms to Prometheus gatherer.Gather
+	Gather() ([]*dto.MetricFamily, error)
+	// Reset invokes the Reset() function on all items in the registry
+	// which are added as resettables.
+	Reset()
+	// RegisterMetaMetrics registers metrics about the number of registered metrics.
+	RegisterMetaMetrics()
+	// Registerer exposes the underlying prometheus registerer
+	Registerer() prometheus.Registerer
+	// Gatherer exposes the underlying prometheus gatherer
+	Gatherer() prometheus.Gatherer
+}
+
+// kubeRegistry is a wrapper around a prometheus registry-type object. Upon initialization
+// the kubernetes binary version information is loaded into the registry object, so that
+// automatic behavior can be configured for metric versioning.
+type kubeRegistry struct {
+	PromRegistry
+	version              semver.Version
+	hiddenCollectors     map[string]Registerable // stores all collectors that has been hidden
+	stableCollectors     []StableCollector       // stores all stable collector
+	hiddenCollectorsLock sync.RWMutex
+	stableCollectorsLock sync.RWMutex
+	resetLock            sync.RWMutex
+	resettables          []resettable
+}
+
+// Register registers a new Collector to be included in metrics
+// collection. It returns an error if the descriptors provided by the
+// Collector are invalid or if they — in combination with descriptors of
+// already registered Collectors — do not fulfill the consistency and
+// uniqueness criteria described in the documentation of metric.Desc.
+func (kr *kubeRegistry) Register(c Registerable) error {
+	if c.Create(&kr.version) {
+		defer kr.addResettable(c)
+		return kr.PromRegistry.Register(c)
+	}
+
+	kr.trackHiddenCollector(c)
+	return nil
+}
+
+// Registerer exposes the underlying prometheus.Registerer
+func (kr *kubeRegistry) Registerer() prometheus.Registerer {
+	return kr.PromRegistry
+}
+
+// Gatherer exposes the underlying prometheus.Gatherer
+func (kr *kubeRegistry) Gatherer() prometheus.Gatherer {
+	return kr.PromRegistry
+}
+
+// MustRegister works like Register but registers any number of
+// Collectors and panics upon the first registration that causes an
+// error.
+func (kr *kubeRegistry) MustRegister(cs ...Registerable) {
+	metrics := make([]prometheus.Collector, 0, len(cs))
+	for _, c := range cs {
+		if c.Create(&kr.version) {
+			metrics = append(metrics, c)
+			kr.addResettable(c)
+		} else {
+			kr.trackHiddenCollector(c)
+		}
+	}
+	kr.PromRegistry.MustRegister(metrics...)
+}
+
+// CustomRegister registers a new custom collector.
+func (kr *kubeRegistry) CustomRegister(c StableCollector) error {
+	kr.trackStableCollectors(c)
+	defer kr.addResettable(c)
+	if c.Create(&kr.version, c) {
+		return kr.PromRegistry.Register(c)
+	}
+	return nil
+}
+
+// CustomMustRegister works like CustomRegister but registers any number of
+// StableCollectors and panics upon the first registration that causes an
+// error.
+func (kr *kubeRegistry) CustomMustRegister(cs ...StableCollector) {
+	kr.trackStableCollectors(cs...)
+	collectors := make([]prometheus.Collector, 0, len(cs))
+	for _, c := range cs {
+		if c.Create(&kr.version, c) {
+			kr.addResettable(c)
+			collectors = append(collectors, c)
+		}
+	}
+	kr.PromRegistry.MustRegister(collectors...)
+}
+
+// RawMustRegister takes a native prometheus.Collector and registers the collector
+// to the registry. This bypasses metrics safety checks, so should only be used
+// to register custom prometheus collectors.
+//
+// Deprecated
+func (kr *kubeRegistry) RawMustRegister(cs ...prometheus.Collector) {
+	kr.PromRegistry.MustRegister(cs...)
+	for _, c := range cs {
+		kr.addResettable(c)
+	}
+}
+
+// addResettable will automatically add our metric to our reset
+// list if it satisfies the interface
+func (kr *kubeRegistry) addResettable(i interface{}) {
+	kr.resetLock.Lock()
+	defer kr.resetLock.Unlock()
+	if resettable, ok := i.(resettable); ok {
+		kr.resettables = append(kr.resettables, resettable)
+	}
+}
+
+// Unregister unregisters the Collector that equals the Collector passed
+// in as an argument.  (Two Collectors are considered equal if their
+// Describe method yields the same set of descriptors.) The function
+// returns whether a Collector was unregistered. Note that an unchecked
+// Collector cannot be unregistered (as its Describe method does not
+// yield any descriptor).
+func (kr *kubeRegistry) Unregister(collector Collector) bool {
+	return kr.PromRegistry.Unregister(collector)
+}
+
+// Gather calls the Collect method of the registered Collectors and then
+// gathers the collected metrics into a lexicographically sorted slice
+// of uniquely named MetricFamily protobufs. Gather ensures that the
+// returned slice is valid and self-consistent so that it can be used
+// for valid exposition. As an exception to the strict consistency
+// requirements described for metric.Desc, Gather will tolerate
+// different sets of label names for metrics of the same metric family.
+func (kr *kubeRegistry) Gather() ([]*dto.MetricFamily, error) {
+	return kr.PromRegistry.Gather()
+}
+
+// trackHiddenCollector stores all hidden collectors.
+func (kr *kubeRegistry) trackHiddenCollector(c Registerable) {
+	kr.hiddenCollectorsLock.Lock()
+	defer kr.hiddenCollectorsLock.Unlock()
+
+	kr.hiddenCollectors[c.FQName()] = c
+	hiddenMetricsTotal.Inc()
+}
+
+// trackStableCollectors stores all custom collectors.
+func (kr *kubeRegistry) trackStableCollectors(cs ...StableCollector) {
+	kr.stableCollectorsLock.Lock()
+	defer kr.stableCollectorsLock.Unlock()
+
+	kr.stableCollectors = append(kr.stableCollectors, cs...)
+}
+
+// enableHiddenCollectors will re-register all of the hidden collectors.
+func (kr *kubeRegistry) enableHiddenCollectors() {
+	if len(kr.hiddenCollectors) == 0 {
+		return
+	}
+
+	kr.hiddenCollectorsLock.Lock()
+	cs := make([]Registerable, 0, len(kr.hiddenCollectors))
+
+	for _, c := range kr.hiddenCollectors {
+		c.ClearState()
+		cs = append(cs, c)
+	}
+
+	kr.hiddenCollectors = make(map[string]Registerable)
+	kr.hiddenCollectorsLock.Unlock()
+	kr.MustRegister(cs...)
+}
+
+// enableHiddenStableCollectors will re-register the stable collectors if there is one or more hidden metrics in it.
+// Since we can not register a metrics twice, so we have to unregister first then register again.
+func (kr *kubeRegistry) enableHiddenStableCollectors() {
+	if len(kr.stableCollectors) == 0 {
+		return
+	}
+
+	kr.stableCollectorsLock.Lock()
+
+	cs := make([]StableCollector, 0, len(kr.stableCollectors))
+	for _, c := range kr.stableCollectors {
+		if len(c.HiddenMetrics()) > 0 {
+			kr.Unregister(c) // unregister must happens before clear state, otherwise no metrics would be unregister
+			c.ClearState()
+			cs = append(cs, c)
+		}
+	}
+
+	kr.stableCollectors = nil
+	kr.stableCollectorsLock.Unlock()
+	kr.CustomMustRegister(cs...)
+}
+
+// Reset invokes Reset on all metrics that are resettable.
+func (kr *kubeRegistry) Reset() {
+	kr.resetLock.RLock()
+	defer kr.resetLock.RUnlock()
+	for _, r := range kr.resettables {
+		r.Reset()
+	}
+}
+
+// BuildVersion is a helper function that can be easily mocked.
+var BuildVersion = version.Get
+
+func newKubeRegistry(v apimachineryversion.Info) *kubeRegistry {
+	r := &kubeRegistry{
+		PromRegistry:     prometheus.NewRegistry(),
+		version:          parseVersion(v),
+		hiddenCollectors: make(map[string]Registerable),
+		resettables:      make([]resettable, 0),
+	}
+
+	registriesLock.Lock()
+	defer registriesLock.Unlock()
+	registries = append(registries, r)
+
+	return r
+}
+
+// NewKubeRegistry creates a new vanilla Registry
+func NewKubeRegistry() KubeRegistry {
+	r := newKubeRegistry(BuildVersion())
+	return r
+}
+
+func (r *kubeRegistry) RegisterMetaMetrics() {
+	r.MustRegister(registeredMetricsTotal)
+	r.MustRegister(disabledMetricsTotal)
+	r.MustRegister(hiddenMetricsTotal)
+	r.MustRegister(cardinalityEnforcementUnexpectedCategorizationsTotal)
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/summary.go b/hack/tools/vendor/k8s.io/component-base/metrics/summary.go
new file mode 100644
index 0000000000..f1af121758
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/summary.go
@@ -0,0 +1,247 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+	"context"
+	"sync"
+
+	"github.com/blang/semver/v4"
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+	DefAgeBuckets = prometheus.DefAgeBuckets
+	DefBufCap     = prometheus.DefBufCap
+	DefMaxAge     = prometheus.DefMaxAge
+)
+
+// Summary is our internal representation for our wrapping struct around prometheus
+// summaries. Summary implements both kubeCollector and ObserverMetric
+//
+// DEPRECATED: as per the metrics overhaul KEP
+type Summary struct {
+	ObserverMetric
+	*SummaryOpts
+	lazyMetric
+	selfCollector
+}
+
+// NewSummary returns an object which is Summary-like. However, nothing
+// will be measured until the summary is registered somewhere.
+//
+// DEPRECATED: as per the metrics overhaul KEP
+func NewSummary(opts *SummaryOpts) *Summary {
+	opts.StabilityLevel.setDefaults()
+
+	s := &Summary{
+		SummaryOpts: opts,
+		lazyMetric:  lazyMetric{stabilityLevel: opts.StabilityLevel},
+	}
+	s.setPrometheusSummary(noopMetric{})
+	s.lazyInit(s, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name))
+	return s
+}
+
+// setPrometheusSummary sets the underlying KubeGauge object, i.e. the thing that does the measurement.
+func (s *Summary) setPrometheusSummary(summary prometheus.Summary) {
+	s.ObserverMetric = summary
+	s.initSelfCollection(summary)
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (s *Summary) DeprecatedVersion() *semver.Version {
+	return parseSemver(s.SummaryOpts.DeprecatedVersion)
+}
+
+// initializeMetric invokes the actual prometheus.Summary object instantiation
+// and stores a reference to it
+func (s *Summary) initializeMetric() {
+	s.SummaryOpts.annotateStabilityLevel()
+	// this actually creates the underlying prometheus gauge.
+	s.setPrometheusSummary(prometheus.NewSummary(s.SummaryOpts.toPromSummaryOpts()))
+}
+
+// initializeDeprecatedMetric invokes the actual prometheus.Summary object instantiation
+// but modifies the Help description prior to object instantiation.
+func (s *Summary) initializeDeprecatedMetric() {
+	s.SummaryOpts.markDeprecated()
+	s.initializeMetric()
+}
+
+// WithContext allows the normal Summary metric to pass in context. The context is no-op now.
+func (s *Summary) WithContext(ctx context.Context) ObserverMetric {
+	return s.ObserverMetric
+}
+
+// SummaryVec is the internal representation of our wrapping struct around prometheus
+// summaryVecs.
+//
+// DEPRECATED: as per the metrics overhaul KEP
+type SummaryVec struct {
+	*prometheus.SummaryVec
+	*SummaryOpts
+	lazyMetric
+	originalLabels []string
+}
+
+// NewSummaryVec returns an object which satisfies kubeCollector and wraps the
+// prometheus.SummaryVec object. However, the object returned will not measure
+// anything unless the collector is first registered, since the metric is lazily instantiated,
+// and only members extracted after
+// registration will actually measure anything.
+//
+// DEPRECATED: as per the metrics overhaul KEP
+func NewSummaryVec(opts *SummaryOpts, labels []string) *SummaryVec {
+	opts.StabilityLevel.setDefaults()
+
+	fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)
+
+	v := &SummaryVec{
+		SummaryOpts:    opts,
+		originalLabels: labels,
+		lazyMetric:     lazyMetric{stabilityLevel: opts.StabilityLevel},
+	}
+	v.lazyInit(v, fqName)
+	return v
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (v *SummaryVec) DeprecatedVersion() *semver.Version {
+	return parseSemver(v.SummaryOpts.DeprecatedVersion)
+}
+
+func (v *SummaryVec) initializeMetric() {
+	v.SummaryOpts.annotateStabilityLevel()
+	v.SummaryVec = prometheus.NewSummaryVec(v.SummaryOpts.toPromSummaryOpts(), v.originalLabels)
+}
+
+func (v *SummaryVec) initializeDeprecatedMetric() {
+	v.SummaryOpts.markDeprecated()
+	v.initializeMetric()
+}
+
+// Default Prometheus Vec behavior is that member extraction results in creation of a new element
+// if one with the unique label values is not found in the underlying stored metricMap.
+// This means  that if this function is called but the underlying metric is not registered
+// (which means it will never be exposed externally nor consumed), the metric will exist in memory
+// for perpetuity (i.e. throughout application lifecycle).
+//
+// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/histogram.go#L460-L470
+//
+// In contrast, the Vec behavior in this package is that member extraction before registration
+// returns a permanent noop object.
+
+// WithLabelValues returns the ObserverMetric for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new ObserverMetric is created IFF the summaryVec
+// has been registered to a metrics registry.
+func (v *SummaryVec) WithLabelValues(lvs ...string) ObserverMetric {
+	if !v.IsCreated() {
+		return noop
+	}
+	if v.LabelValueAllowLists != nil {
+		v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs)
+	} else {
+		v.initializeLabelAllowListsOnce.Do(func() {
+			allowListLock.RLock()
+			if allowList, ok := labelValueAllowLists[v.FQName()]; ok {
+				v.LabelValueAllowLists = allowList
+				allowList.ConstrainToAllowedList(v.originalLabels, lvs)
+			}
+			allowListLock.RUnlock()
+		})
+	}
+	return v.SummaryVec.WithLabelValues(lvs...)
+}
+
+// With returns the ObserverMetric for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new ObserverMetric is created IFF the summaryVec has
+// been registered to a metrics registry.
+func (v *SummaryVec) With(labels map[string]string) ObserverMetric {
+	if !v.IsCreated() {
+		return noop
+	}
+	if v.LabelValueAllowLists != nil {
+		v.LabelValueAllowLists.ConstrainLabelMap(labels)
+	} else {
+		v.initializeLabelAllowListsOnce.Do(func() {
+			allowListLock.RLock()
+			if allowList, ok := labelValueAllowLists[v.FQName()]; ok {
+				v.LabelValueAllowLists = allowList
+				allowList.ConstrainLabelMap(labels)
+			}
+			allowListLock.RUnlock()
+		})
+	}
+	return v.SummaryVec.With(labels)
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc. However, such inconsistent Labels
+// can never match an actual metric, so the method will always return false in
+// that case.
+func (v *SummaryVec) Delete(labels map[string]string) bool {
+	if !v.IsCreated() {
+		return false // since we haven't created the metric, we haven't deleted a metric with the passed in values
+	}
+	return v.SummaryVec.Delete(labels)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *SummaryVec) Reset() {
+	if !v.IsCreated() {
+		return
+	}
+
+	v.SummaryVec.Reset()
+}
+
+// ResetLabelAllowLists resets the label allow list for the SummaryVec.
+// NOTE: This should only be used in test.
+func (v *SummaryVec) ResetLabelAllowLists() {
+	v.initializeLabelAllowListsOnce = sync.Once{}
+	v.LabelValueAllowLists = nil
+}
+
+// WithContext returns wrapped SummaryVec with context
+func (v *SummaryVec) WithContext(ctx context.Context) *SummaryVecWithContext {
+	return &SummaryVecWithContext{
+		ctx:        ctx,
+		SummaryVec: v,
+	}
+}
+
+// SummaryVecWithContext is the wrapper of SummaryVec with context.
+type SummaryVecWithContext struct {
+	*SummaryVec
+	ctx context.Context
+}
+
+// WithLabelValues is the wrapper of SummaryVec.WithLabelValues.
+func (vc *SummaryVecWithContext) WithLabelValues(lvs ...string) ObserverMetric {
+	return vc.SummaryVec.WithLabelValues(lvs...)
+}
+
+// With is the wrapper of SummaryVec.With.
+func (vc *SummaryVecWithContext) With(labels map[string]string) ObserverMetric {
+	return vc.SummaryVec.With(labels)
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/timing_histogram.go b/hack/tools/vendor/k8s.io/component-base/metrics/timing_histogram.go
new file mode 100644
index 0000000000..4fc7574739
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/timing_histogram.go
@@ -0,0 +1,291 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+	"context"
+	"sync"
+	"time"
+
+	"github.com/blang/semver/v4"
+	promext "k8s.io/component-base/metrics/prometheusextension"
+)
+
+// PrometheusTimingHistogram is the abstraction of the underlying histogram
+// that we want to promote from the wrapper.
+type PrometheusTimingHistogram interface {
+	GaugeMetric
+}
+
+// TimingHistogram is our internal representation for our wrapping struct around
+// timing histograms. It implements both kubeCollector and GaugeMetric
+type TimingHistogram struct {
+	PrometheusTimingHistogram
+	*TimingHistogramOpts
+	nowFunc func() time.Time
+	lazyMetric
+	selfCollector
+}
+
+var _ GaugeMetric = &TimingHistogram{}
+var _ Registerable = &TimingHistogram{}
+var _ kubeCollector = &TimingHistogram{}
+
+// NewTimingHistogram returns an object which is TimingHistogram-like. However, nothing
+// will be measured until the histogram is registered somewhere.
+func NewTimingHistogram(opts *TimingHistogramOpts) *TimingHistogram {
+	return NewTestableTimingHistogram(time.Now, opts)
+}
+
+// NewTestableTimingHistogram adds injection of the clock
+func NewTestableTimingHistogram(nowFunc func() time.Time, opts *TimingHistogramOpts) *TimingHistogram {
+	opts.StabilityLevel.setDefaults()
+
+	h := &TimingHistogram{
+		TimingHistogramOpts: opts,
+		nowFunc:             nowFunc,
+		lazyMetric:          lazyMetric{stabilityLevel: opts.StabilityLevel},
+	}
+	h.setPrometheusHistogram(noopMetric{})
+	h.lazyInit(h, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name))
+	return h
+}
+
+// setPrometheusHistogram sets the underlying KubeGauge object, i.e. the thing that does the measurement.
+func (h *TimingHistogram) setPrometheusHistogram(histogram promext.TimingHistogram) {
+	h.PrometheusTimingHistogram = histogram
+	h.initSelfCollection(histogram)
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (h *TimingHistogram) DeprecatedVersion() *semver.Version {
+	return parseSemver(h.TimingHistogramOpts.DeprecatedVersion)
+}
+
+// initializeMetric invokes the actual prometheus.Histogram object instantiation
+// and stores a reference to it
+func (h *TimingHistogram) initializeMetric() {
+	h.TimingHistogramOpts.annotateStabilityLevel()
+	// this actually creates the underlying prometheus gauge.
+	histogram, err := promext.NewTestableTimingHistogram(h.nowFunc, h.TimingHistogramOpts.toPromHistogramOpts())
+	if err != nil {
+		panic(err) // handle as for regular histograms
+	}
+	h.setPrometheusHistogram(histogram)
+}
+
+// initializeDeprecatedMetric invokes the actual prometheus.Histogram object instantiation
+// but modifies the Help description prior to object instantiation.
+func (h *TimingHistogram) initializeDeprecatedMetric() {
+	h.TimingHistogramOpts.markDeprecated()
+	h.initializeMetric()
+}
+
+// WithContext allows the normal TimingHistogram metric to pass in context. The context is no-op now.
+func (h *TimingHistogram) WithContext(ctx context.Context) GaugeMetric {
+	return h.PrometheusTimingHistogram
+}
+
+// TimingHistogramVec is the internal representation of our wrapping struct around prometheus
+// TimingHistogramVecs.
+type TimingHistogramVec struct {
+	*promext.TimingHistogramVec
+	*TimingHistogramOpts
+	nowFunc func() time.Time
+	lazyMetric
+	originalLabels []string
+}
+
+var _ GaugeVecMetric = &TimingHistogramVec{}
+var _ Registerable = &TimingHistogramVec{}
+var _ kubeCollector = &TimingHistogramVec{}
+
+// NewTimingHistogramVec returns an object which satisfies the kubeCollector, Registerable, and GaugeVecMetric interfaces
+// and wraps an underlying promext.TimingHistogramVec object.  Note well the way that
+// behavior depends on registration and whether this is hidden.
+func NewTimingHistogramVec(opts *TimingHistogramOpts, labels []string) *TimingHistogramVec {
+	return NewTestableTimingHistogramVec(time.Now, opts, labels)
+}
+
+// NewTestableTimingHistogramVec adds injection of the clock.
+func NewTestableTimingHistogramVec(nowFunc func() time.Time, opts *TimingHistogramOpts, labels []string) *TimingHistogramVec {
+	opts.StabilityLevel.setDefaults()
+
+	fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)
+
+	v := &TimingHistogramVec{
+		TimingHistogramVec:  noopTimingHistogramVec,
+		TimingHistogramOpts: opts,
+		nowFunc:             nowFunc,
+		originalLabels:      labels,
+		lazyMetric:          lazyMetric{stabilityLevel: opts.StabilityLevel},
+	}
+	v.lazyInit(v, fqName)
+	return v
+}
+
+// DeprecatedVersion returns a pointer to the Version or nil
+func (v *TimingHistogramVec) DeprecatedVersion() *semver.Version {
+	return parseSemver(v.TimingHistogramOpts.DeprecatedVersion)
+}
+
+func (v *TimingHistogramVec) initializeMetric() {
+	v.TimingHistogramOpts.annotateStabilityLevel()
+	v.TimingHistogramVec = promext.NewTestableTimingHistogramVec(v.nowFunc, v.TimingHistogramOpts.toPromHistogramOpts(), v.originalLabels...)
+}
+
+func (v *TimingHistogramVec) initializeDeprecatedMetric() {
+	v.TimingHistogramOpts.markDeprecated()
+	v.initializeMetric()
+}
+
+// WithLabelValuesChecked, if called before this vector has been registered in
+// at least one registry, will return a noop gauge and
+// an error that passes ErrIsNotRegistered.
+// If called on a hidden vector,
+// will return a noop gauge and a nil error.
+// If called with a syntactic problem in the labels, will
+// return a noop gauge and an error about the labels.
+// If none of the above apply, this method will return
+// the appropriate vector member and a nil error.
+func (v *TimingHistogramVec) WithLabelValuesChecked(lvs ...string) (GaugeMetric, error) {
+	if !v.IsCreated() {
+		if v.IsHidden() {
+			return noop, nil
+		}
+		return noop, errNotRegistered
+	}
+	if v.LabelValueAllowLists != nil {
+		v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs)
+	} else {
+		v.initializeLabelAllowListsOnce.Do(func() {
+			allowListLock.RLock()
+			if allowList, ok := labelValueAllowLists[v.FQName()]; ok {
+				v.LabelValueAllowLists = allowList
+				allowList.ConstrainToAllowedList(v.originalLabels, lvs)
+			}
+			allowListLock.RUnlock()
+		})
+	}
+	ops, err := v.TimingHistogramVec.GetMetricWithLabelValues(lvs...)
+	if err != nil {
+		return noop, err
+	}
+	return ops.(GaugeMetric), err
+}
+
+// WithLabelValues calls WithLabelValuesChecked
+// and handles errors as follows.
+// An error that passes ErrIsNotRegistered is ignored
+// and the noop gauge is returned;
+// all other errors cause a panic.
+func (v *TimingHistogramVec) WithLabelValues(lvs ...string) GaugeMetric {
+	ans, err := v.WithLabelValuesChecked(lvs...)
+	if err == nil || ErrIsNotRegistered(err) {
+		return ans
+	}
+	panic(err)
+}
+
+// WithChecked, if called before this vector has been registered in
+// at least one registry, will return a noop gauge and
+// an error that passes ErrIsNotRegistered.
+// If called on a hidden vector,
+// will return a noop gauge and a nil error.
+// If called with a syntactic problem in the labels, will
+// return a noop gauge and an error about the labels.
+// If none of the above apply, this method will return
+// the appropriate vector member and a nil error.
+func (v *TimingHistogramVec) WithChecked(labels map[string]string) (GaugeMetric, error) {
+	if !v.IsCreated() {
+		if v.IsHidden() {
+			return noop, nil
+		}
+		return noop, errNotRegistered
+	}
+	if v.LabelValueAllowLists != nil {
+		v.LabelValueAllowLists.ConstrainLabelMap(labels)
+	} else {
+		v.initializeLabelAllowListsOnce.Do(func() {
+			allowListLock.RLock()
+			if allowList, ok := labelValueAllowLists[v.FQName()]; ok {
+				v.LabelValueAllowLists = allowList
+				allowList.ConstrainLabelMap(labels)
+			}
+			allowListLock.RUnlock()
+		})
+	}
+	ops, err := v.TimingHistogramVec.GetMetricWith(labels)
+	return ops.(GaugeMetric), err
+}
+
+// With calls WithChecked and handles errors as follows.
+// An error that passes ErrIsNotRegistered is ignored
+// and the noop gauge is returned;
+// all other errors cause a panic.
+func (v *TimingHistogramVec) With(labels map[string]string) GaugeMetric {
+	ans, err := v.WithChecked(labels)
+	if err == nil || ErrIsNotRegistered(err) {
+		return ans
+	}
+	panic(err)
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc. However, such inconsistent Labels
+// can never match an actual metric, so the method will always return false in
+// that case.
+func (v *TimingHistogramVec) Delete(labels map[string]string) bool {
+	if !v.IsCreated() {
+		return false // since we haven't created the metric, we haven't deleted a metric with the passed in values
+	}
+	return v.TimingHistogramVec.Delete(labels)
+}
+
+// Reset deletes all metrics in this vector.
+func (v *TimingHistogramVec) Reset() {
+	if !v.IsCreated() {
+		return
+	}
+
+	v.TimingHistogramVec.Reset()
+}
+
+// ResetLabelAllowLists resets the label allow list for the TimingHistogramVec.
+// NOTE: This should only be used in test.
+func (v *TimingHistogramVec) ResetLabelAllowLists() {
+	v.initializeLabelAllowListsOnce = sync.Once{}
+	v.LabelValueAllowLists = nil
+}
+
+// WithContext returns wrapped TimingHistogramVec with context
+func (v *TimingHistogramVec) InterfaceWithContext(ctx context.Context) GaugeVecMetric {
+	return &TimingHistogramVecWithContext{
+		ctx:                ctx,
+		TimingHistogramVec: v,
+	}
+}
+
+// TimingHistogramVecWithContext is the wrapper of TimingHistogramVec with context.
+// Currently the context is ignored.
+type TimingHistogramVecWithContext struct {
+	*TimingHistogramVec
+	ctx context.Context
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/value.go b/hack/tools/vendor/k8s.io/component-base/metrics/value.go
new file mode 100644
index 0000000000..4a405048cf
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/value.go
@@ -0,0 +1,70 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+	"time"
+
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+// ValueType is an enumeration of metric types that represent a simple value.
+type ValueType int
+
+// Possible values for the ValueType enum.
+const (
+	_ ValueType = iota
+	CounterValue
+	GaugeValue
+	UntypedValue
+)
+
+func (vt *ValueType) toPromValueType() prometheus.ValueType {
+	return prometheus.ValueType(*vt)
+}
+
+// NewLazyConstMetric is a helper of MustNewConstMetric.
+//
+// Note: If the metrics described by the desc is hidden, the metrics will not be created.
+func NewLazyConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
+	if desc.IsHidden() {
+		return nil
+	}
+	return prometheus.MustNewConstMetric(desc.toPrometheusDesc(), valueType.toPromValueType(), value, labelValues...)
+}
+
+// NewConstMetric is a helper of NewConstMetric.
+//
+// Note: If the metrics described by the desc is hidden, the metrics will not be created.
+func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
+	if desc.IsHidden() {
+		return nil, nil
+	}
+	return prometheus.NewConstMetric(desc.toPrometheusDesc(), valueType.toPromValueType(), value, labelValues...)
+}
+
+// NewLazyMetricWithTimestamp is a helper of NewMetricWithTimestamp.
+//
+// Warning: the Metric 'm' must be the one created by NewLazyConstMetric(),
+// otherwise, no stability guarantees would be offered.
+func NewLazyMetricWithTimestamp(t time.Time, m Metric) Metric {
+	if m == nil {
+		return nil
+	}
+
+	return prometheus.NewMetricWithTimestamp(t, m)
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/version.go b/hack/tools/vendor/k8s.io/component-base/metrics/version.go
new file mode 100644
index 0000000000..f963e205eb
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/version.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import "k8s.io/component-base/version"
+
+var (
+	buildInfo = NewGaugeVec(
+		&GaugeOpts{
+			Name:           "kubernetes_build_info",
+			Help:           "A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running.",
+			StabilityLevel: ALPHA,
+		},
+		[]string{"major", "minor", "git_version", "git_commit", "git_tree_state", "build_date", "go_version", "compiler", "platform"},
+	)
+)
+
+// RegisterBuildInfo registers the build and version info in a metadata metric in prometheus
+func RegisterBuildInfo(r KubeRegistry) {
+	info := version.Get()
+	r.MustRegister(buildInfo)
+	buildInfo.WithLabelValues(info.Major, info.Minor, info.GitVersion, info.GitCommit, info.GitTreeState, info.BuildDate, info.GoVersion, info.Compiler, info.Platform).Set(1)
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/version_parser.go b/hack/tools/vendor/k8s.io/component-base/metrics/version_parser.go
new file mode 100644
index 0000000000..102e108e2b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/version_parser.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+	"fmt"
+	"regexp"
+
+	"github.com/blang/semver/v4"
+
+	apimachineryversion "k8s.io/apimachinery/pkg/version"
+)
+
+const (
+	versionRegexpString = `^v(\d+\.\d+\.\d+)`
+)
+
+var (
+	versionRe = regexp.MustCompile(versionRegexpString)
+)
+
+func parseSemver(s string) *semver.Version {
+	if s != "" {
+		sv := semver.MustParse(s)
+		return &sv
+	}
+	return nil
+}
+func parseVersion(ver apimachineryversion.Info) semver.Version {
+	matches := versionRe.FindAllStringSubmatch(ver.String(), -1)
+
+	if len(matches) != 1 {
+		panic(fmt.Sprintf("version string \"%v\" doesn't match expected regular expression: \"%v\"", ver.String(), versionRe.String()))
+	}
+	return semver.MustParse(matches[0][1])
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/metrics/wrappers.go b/hack/tools/vendor/k8s.io/component-base/metrics/wrappers.go
new file mode 100644
index 0000000000..679590aad9
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/metrics/wrappers.go
@@ -0,0 +1,167 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package metrics
+
+import (
+	"errors"
+
+	"github.com/prometheus/client_golang/prometheus"
+	dto "github.com/prometheus/client_model/go"
+)
+
+// This file contains a series of interfaces which we explicitly define for
+// integrating with prometheus. We redefine the interfaces explicitly here
+// so that we can prevent breakage if methods are ever added to prometheus
+// variants of them.
+
+// Collector defines a subset of prometheus.Collector interface methods
+type Collector interface {
+	Describe(chan<- *prometheus.Desc)
+	Collect(chan<- prometheus.Metric)
+}
+
+// Metric defines a subset of prometheus.Metric interface methods
+type Metric interface {
+	Desc() *prometheus.Desc
+	Write(*dto.Metric) error
+}
+
+// CounterMetric is a Metric that represents a single numerical value that only ever
+// goes up. That implies that it cannot be used to count items whose number can
+// also go down, e.g. the number of currently running goroutines. Those
+// "counters" are represented by Gauges.
+
+// CounterMetric is an interface which defines a subset of the interface provided by prometheus.Counter
+type CounterMetric interface {
+	Inc()
+	Add(float64)
+}
+
+// CounterVecMetric is an interface which prometheus.CounterVec satisfies.
+type CounterVecMetric interface {
+	WithLabelValues(...string) CounterMetric
+	With(prometheus.Labels) CounterMetric
+}
+
+// GaugeMetric is an interface which defines a subset of the interface provided by prometheus.Gauge
+type GaugeMetric interface {
+	Set(float64)
+	Inc()
+	Dec()
+	Add(float64)
+	Write(out *dto.Metric) error
+	SetToCurrentTime()
+}
+
+// GaugeVecMetric is a collection of Gauges that differ only in label values.
+type GaugeVecMetric interface {
+	// Default Prometheus Vec behavior is that member extraction results in creation of a new element
+	// if one with the unique label values is not found in the underlying stored metricMap.
+	// This means  that if this function is called but the underlying metric is not registered
+	// (which means it will never be exposed externally nor consumed), the metric would exist in memory
+	// for perpetuity (i.e. throughout application lifecycle).
+	//
+	// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/gauge.go#L190-L208
+	//
+	// In contrast, the Vec behavior in this package is that member extraction before registration
+	// returns a permanent noop object.
+
+	// WithLabelValuesChecked, if called before this vector has been registered in
+	// at least one registry, will return a noop gauge and
+	// an error that passes ErrIsNotRegistered.
+	// If called on a hidden vector,
+	// will return a noop gauge and a nil error.
+	// If called with a syntactic problem in the labels, will
+	// return a noop gauge and an error about the labels.
+	// If none of the above apply, this method will return
+	// the appropriate vector member and a nil error.
+	WithLabelValuesChecked(labelValues ...string) (GaugeMetric, error)
+
+	// WithLabelValues calls WithLabelValuesChecked
+	// and handles errors as follows.
+	// An error that passes ErrIsNotRegistered is ignored
+	// and the noop gauge is returned;
+	// all other errors cause a panic.
+	WithLabelValues(labelValues ...string) GaugeMetric
+
+	// WithChecked, if called before this vector has been registered in
+	// at least one registry, will return a noop gauge and
+	// an error that passes ErrIsNotRegistered.
+	// If called on a hidden vector,
+	// will return a noop gauge and a nil error.
+	// If called with a syntactic problem in the labels, will
+	// return a noop gauge and an error about the labels.
+	// If none of the above apply, this method will return
+	// the appropriate vector member and a nil error.
+	WithChecked(labels map[string]string) (GaugeMetric, error)
+
+	// With calls WithChecked and handles errors as follows.
+	// An error that passes ErrIsNotRegistered is ignored
+	// and the noop gauge is returned;
+	// all other errors cause a panic.
+	With(labels map[string]string) GaugeMetric
+
+	// Delete asserts that the vec should have no member for the given label set.
+	// The returned bool indicates whether there was a change.
+	// The return will certainly be `false` if the given label set has the wrong
+	// set of label names.
+	Delete(map[string]string) bool
+
+	// Reset removes all the members
+	Reset()
+}
+
+// ObserverMetric captures individual observations.
+type ObserverMetric interface {
+	Observe(float64)
+}
+
+// PromRegistry is an interface which implements a subset of prometheus.Registerer and
+// prometheus.Gatherer interfaces
+type PromRegistry interface {
+	Register(prometheus.Collector) error
+	MustRegister(...prometheus.Collector)
+	Unregister(prometheus.Collector) bool
+	Gather() ([]*dto.MetricFamily, error)
+}
+
+// Gatherer is the interface for the part of a registry in charge of gathering
+// the collected metrics into a number of MetricFamilies.
+type Gatherer interface {
+	prometheus.Gatherer
+}
+
+// Registerer is the interface for the part of a registry in charge of registering
+// the collected metrics.
+type Registerer interface {
+	prometheus.Registerer
+}
+
+// GaugeFunc is a Gauge whose value is determined at collect time by calling a
+// provided function.
+//
+// To create GaugeFunc instances, use NewGaugeFunc.
+type GaugeFunc interface {
+	Metric
+	Collector
+}
+
+func ErrIsNotRegistered(err error) bool {
+	return err == errNotRegistered
+}
+
+var errNotRegistered = errors.New("metric vec is not registered yet")
diff --git a/hack/tools/vendor/k8s.io/component-base/tracing/OWNERS b/hack/tools/vendor/k8s.io/component-base/tracing/OWNERS
new file mode 100644
index 0000000000..b26e7a4dc7
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/tracing/OWNERS
@@ -0,0 +1,8 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+  - sig-instrumentation-approvers
+reviewers:
+  - sig-instrumentation-reviewers
+labels:
+  - sig/instrumentation
diff --git a/hack/tools/vendor/k8s.io/component-base/tracing/api/v1/config.go b/hack/tools/vendor/k8s.io/component-base/tracing/api/v1/config.go
new file mode 100644
index 0000000000..ae9bbbfc09
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/tracing/api/v1/config.go
@@ -0,0 +1,88 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"fmt"
+	"net/url"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/util/validation/field"
+	"k8s.io/component-base/featuregate"
+)
+
+var (
+	maxSamplingRatePerMillion = int32(1000000)
+)
+
+// ValidateTracingConfiguration validates the tracing configuration
+func ValidateTracingConfiguration(traceConfig *TracingConfiguration, featureGate featuregate.FeatureGate, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+	if traceConfig == nil {
+		return allErrs
+	}
+	if traceConfig.SamplingRatePerMillion != nil {
+		allErrs = append(allErrs, validateSamplingRate(*traceConfig.SamplingRatePerMillion, fldPath.Child("samplingRatePerMillion"))...)
+	}
+	if traceConfig.Endpoint != nil {
+		allErrs = append(allErrs, validateEndpoint(*traceConfig.Endpoint, fldPath.Child("endpoint"))...)
+	}
+	return allErrs
+}
+
+func validateSamplingRate(rate int32, fldPath *field.Path) field.ErrorList {
+	errs := field.ErrorList{}
+	if rate < 0 {
+		errs = append(errs, field.Invalid(
+			fldPath, rate,
+			"sampling rate must be positive",
+		))
+	}
+	if rate > maxSamplingRatePerMillion {
+		errs = append(errs, field.Invalid(
+			fldPath, rate,
+			"sampling rate per million must be less than or equal to one million",
+		))
+	}
+	return errs
+}
+
+func validateEndpoint(endpoint string, fldPath *field.Path) field.ErrorList {
+	errs := field.ErrorList{}
+	if !strings.Contains(endpoint, "//") {
+		endpoint = "dns://" + endpoint
+	}
+	url, err := url.Parse(endpoint)
+	if err != nil {
+		errs = append(errs, field.Invalid(
+			fldPath, endpoint,
+			err.Error(),
+		))
+		return errs
+	}
+	switch url.Scheme {
+	case "dns":
+	case "unix":
+	case "unix-abstract":
+	default:
+		errs = append(errs, field.Invalid(
+			fldPath, endpoint,
+			fmt.Sprintf("unsupported scheme: %v.  Options are none, dns, unix, or unix-abstract.  See https://github.com/grpc/grpc/blob/master/doc/naming.md", url.Scheme),
+		))
+	}
+	return errs
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/tracing/api/v1/doc.go b/hack/tools/vendor/k8s.io/component-base/tracing/api/v1/doc.go
new file mode 100644
index 0000000000..ffe36aef6f
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/tracing/api/v1/doc.go
@@ -0,0 +1,29 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+
+// Package v1 contains the configuration API for tracing.
+//
+// The intention is to only have a single version of this API, potentially with
+// new fields added over time in a backwards-compatible manner. Fields for
+// alpha or beta features are allowed as long as they are defined so that not
+// changing the defaults leaves those features disabled.
+//
+// The "v1" package name is just a reminder that API compatibility rules apply,
+// not an indication of the stability of all features covered by it.
+
+package v1 // import "k8s.io/component-base/tracing/api/v1"
diff --git a/hack/tools/vendor/k8s.io/component-base/tracing/api/v1/types.go b/hack/tools/vendor/k8s.io/component-base/tracing/api/v1/types.go
new file mode 100644
index 0000000000..a59d564050
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/tracing/api/v1/types.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// TracingConfiguration provides versioned configuration for OpenTelemetry tracing clients.
+type TracingConfiguration struct {
+	// Endpoint of the collector this component will report traces to.
+	// The connection is insecure, and does not currently support TLS.
+	// Recommended is unset, and endpoint is the otlp grpc default, localhost:4317.
+	// +optional
+	Endpoint *string `json:"endpoint,omitempty"`
+
+	// SamplingRatePerMillion is the number of samples to collect per million spans.
+	// Recommended is unset. If unset, sampler respects its parent span's sampling
+	// rate, but otherwise never samples.
+	// +optional
+	SamplingRatePerMillion *int32 `json:"samplingRatePerMillion,omitempty"`
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/tracing/api/v1/zz_generated.deepcopy.go b/hack/tools/vendor/k8s.io/component-base/tracing/api/v1/zz_generated.deepcopy.go
new file mode 100644
index 0000000000..2afc68117b
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/tracing/api/v1/zz_generated.deepcopy.go
@@ -0,0 +1,48 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TracingConfiguration) DeepCopyInto(out *TracingConfiguration) {
+	*out = *in
+	if in.Endpoint != nil {
+		in, out := &in.Endpoint, &out.Endpoint
+		*out = new(string)
+		**out = **in
+	}
+	if in.SamplingRatePerMillion != nil {
+		in, out := &in.SamplingRatePerMillion, &out.SamplingRatePerMillion
+		*out = new(int32)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfiguration.
+func (in *TracingConfiguration) DeepCopy() *TracingConfiguration {
+	if in == nil {
+		return nil
+	}
+	out := new(TracingConfiguration)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/tracing/tracing.go b/hack/tools/vendor/k8s.io/component-base/tracing/tracing.go
new file mode 100644
index 0000000000..bdf6f377dd
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/tracing/tracing.go
@@ -0,0 +1,98 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package tracing
+
+import (
+	"context"
+	"time"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/trace"
+
+	utiltrace "k8s.io/utils/trace"
+)
+
+const instrumentationScope = "k8s.io/component-base/tracing"
+
+// Start creates spans using both OpenTelemetry, and the k8s.io/utils/trace package.
+// It only creates an OpenTelemetry span if the incoming context already includes a span.
+func Start(ctx context.Context, name string, attributes ...attribute.KeyValue) (context.Context, *Span) {
+	// If the incoming context already includes an OpenTelemetry span, create a child span with the provided name and attributes.
+	// If the caller is not using OpenTelemetry, or has tracing disabled (e.g. with a component-specific feature flag), this is a noop.
+	ctx, otelSpan := trace.SpanFromContext(ctx).TracerProvider().Tracer(instrumentationScope).Start(ctx, name, trace.WithAttributes(attributes...))
+	// If there is already a utiltrace span in the context, use that as our parent span.
+	utilSpan := utiltrace.FromContext(ctx).Nest(name, attributesToFields(attributes)...)
+	// Set the trace as active in the context so that subsequent Start calls create nested spans.
+	return utiltrace.ContextWithTrace(ctx, utilSpan), &Span{
+		otelSpan: otelSpan,
+		utilSpan: utilSpan,
+	}
+}
+
+// Span is a component part of a trace. It represents a single named
+// and timed operation of a workflow being observed.
+// This Span is a combination of an OpenTelemetry and k8s.io/utils/trace span
+// to facilitate the migration to OpenTelemetry.
+type Span struct {
+	otelSpan trace.Span
+	utilSpan *utiltrace.Trace
+}
+
+// AddEvent adds a point-in-time event with a name and attributes.
+func (s *Span) AddEvent(name string, attributes ...attribute.KeyValue) {
+	s.otelSpan.AddEvent(name, trace.WithAttributes(attributes...))
+	if s.utilSpan != nil {
+		s.utilSpan.Step(name, attributesToFields(attributes)...)
+	}
+}
+
+// End ends the span, and logs if the span duration is greater than the logThreshold.
+func (s *Span) End(logThreshold time.Duration) {
+	s.otelSpan.End()
+	if s.utilSpan != nil {
+		s.utilSpan.LogIfLong(logThreshold)
+	}
+}
+
+// RecordError will record err as an exception span event for this span.
+// If this span is not being recorded or err is nil then this method does nothing.
+func (s *Span) RecordError(err error, attributes ...attribute.KeyValue) {
+	s.otelSpan.RecordError(err, trace.WithAttributes(attributes...))
+}
+
+func attributesToFields(attributes []attribute.KeyValue) []utiltrace.Field {
+	fields := make([]utiltrace.Field, len(attributes))
+	for i := range attributes {
+		attr := attributes[i]
+		fields[i] = utiltrace.Field{Key: string(attr.Key), Value: attr.Value.AsInterface()}
+	}
+	return fields
+}
+
+// SpanFromContext returns the *Span from the current context. It is composed of the active
+// OpenTelemetry and k8s.io/utils/trace spans.
+func SpanFromContext(ctx context.Context) *Span {
+	return &Span{
+		otelSpan: trace.SpanFromContext(ctx),
+		utilSpan: utiltrace.FromContext(ctx),
+	}
+}
+
+// ContextWithSpan returns a context with the Span included in the context.
+func ContextWithSpan(ctx context.Context, s *Span) context.Context {
+	return trace.ContextWithSpan(utiltrace.ContextWithTrace(ctx, s.utilSpan), s.otelSpan)
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/tracing/utils.go b/hack/tools/vendor/k8s.io/component-base/tracing/utils.go
new file mode 100644
index 0000000000..dde7a5b28d
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/tracing/utils.go
@@ -0,0 +1,134 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package tracing
+
+import (
+	"context"
+	"net/http"
+
+	"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+	"go.opentelemetry.io/otel/propagation"
+	"go.opentelemetry.io/otel/sdk/resource"
+	sdktrace "go.opentelemetry.io/otel/sdk/trace"
+	semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+	oteltrace "go.opentelemetry.io/otel/trace"
+	noopoteltrace "go.opentelemetry.io/otel/trace/noop"
+
+	"k8s.io/client-go/transport"
+	"k8s.io/component-base/tracing/api/v1"
+)
+
+// TracerProvider is an OpenTelemetry TracerProvider which can be shut down
+type TracerProvider interface {
+	oteltrace.TracerProvider
+	Shutdown(context.Context) error
+}
+
+type noopTracerProvider struct {
+	oteltrace.TracerProvider
+}
+
+func (n *noopTracerProvider) Shutdown(context.Context) error {
+	return nil
+}
+
+func NewNoopTracerProvider() TracerProvider {
+	return &noopTracerProvider{TracerProvider: noopoteltrace.NewTracerProvider()}
+}
+
+// NewProvider creates a TracerProvider in a component, and enforces recommended tracing behavior
+func NewProvider(ctx context.Context,
+	tracingConfig *v1.TracingConfiguration,
+	addedOpts []otlptracegrpc.Option,
+	resourceOpts []resource.Option,
+) (TracerProvider, error) {
+	if tracingConfig == nil {
+		return NewNoopTracerProvider(), nil
+	}
+	opts := append([]otlptracegrpc.Option{}, addedOpts...)
+	if tracingConfig.Endpoint != nil {
+		opts = append(opts, otlptracegrpc.WithEndpoint(*tracingConfig.Endpoint))
+	}
+	opts = append(opts, otlptracegrpc.WithInsecure())
+	exporter, err := otlptracegrpc.New(ctx, opts...)
+	if err != nil {
+		return nil, err
+	}
+	res, err := resource.New(ctx, resourceOpts...)
+	if err != nil {
+		return nil, err
+	}
+
+	// sampler respects parent span's sampling rate or
+	// otherwise never samples.
+	sampler := sdktrace.NeverSample()
+	// Or, emit spans for a fraction of transactions
+	if tracingConfig.SamplingRatePerMillion != nil && *tracingConfig.SamplingRatePerMillion > 0 {
+		sampler = sdktrace.TraceIDRatioBased(float64(*tracingConfig.SamplingRatePerMillion) / float64(1000000))
+	}
+	// batch span processor to aggregate spans before export.
+	bsp := sdktrace.NewBatchSpanProcessor(exporter)
+	tp := sdktrace.NewTracerProvider(
+		sdktrace.WithSampler(sdktrace.ParentBased(sampler)),
+		sdktrace.WithSpanProcessor(bsp),
+		sdktrace.WithResource(res),
+	)
+	return tp, nil
+}
+
+// WithTracing adds tracing to requests if the incoming request is sampled
+func WithTracing(handler http.Handler, tp oteltrace.TracerProvider, spanName string) http.Handler {
+	opts := []otelhttp.Option{
+		otelhttp.WithPropagators(Propagators()),
+		otelhttp.WithTracerProvider(tp),
+	}
+	wrappedHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		// Add the http.target attribute to the otelhttp span
+		// Workaround for https://github.com/open-telemetry/opentelemetry-go-contrib/issues/3743
+		if r.URL != nil {
+			oteltrace.SpanFromContext(r.Context()).SetAttributes(semconv.HTTPTarget(r.URL.RequestURI()))
+		}
+		handler.ServeHTTP(w, r)
+	})
+	// With Noop TracerProvider, the otelhttp still handles context propagation.
+	// See https://github.com/open-telemetry/opentelemetry-go/tree/main/example/passthrough
+	return otelhttp.NewHandler(wrappedHandler, spanName, opts...)
+}
+
+// WrapperFor can be used to add tracing to a *rest.Config.
+// Example usage:
+// tp := NewProvider(...)
+// config, _ := rest.InClusterConfig()
+// config.Wrap(WrapperFor(tp))
+// kubeclient, _ := clientset.NewForConfig(config)
+func WrapperFor(tp oteltrace.TracerProvider) transport.WrapperFunc {
+	return func(rt http.RoundTripper) http.RoundTripper {
+		opts := []otelhttp.Option{
+			otelhttp.WithPropagators(Propagators()),
+			otelhttp.WithTracerProvider(tp),
+		}
+		// With Noop TracerProvider, the otelhttp still handles context propagation.
+		// See https://github.com/open-telemetry/opentelemetry-go/tree/main/example/passthrough
+		return otelhttp.NewTransport(rt, opts...)
+	}
+}
+
+// Propagators returns the recommended set of propagators.
+func Propagators() propagation.TextMapPropagator {
+	return propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/version/OWNERS b/hack/tools/vendor/k8s.io/component-base/version/OWNERS
new file mode 100644
index 0000000000..08c7aea9c7
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/version/OWNERS
@@ -0,0 +1,16 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+# Currently assigned this directory to sig-api-machinery since this is
+# an interface to the version definition in "k8s.io/apimachinery/pkg/version",
+# and also to sig-release since this version information is set up for
+# each release.
+
+approvers:
+  - sig-api-machinery-api-approvers
+  - release-engineering-approvers
+reviewers:
+  - sig-api-machinery-api-reviewers
+  - release-managers
+labels:
+  - sig/api-machinery
+  - sig/release
diff --git a/hack/tools/vendor/k8s.io/component-base/version/base.go b/hack/tools/vendor/k8s.io/component-base/version/base.go
new file mode 100644
index 0000000000..46500118ab
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/version/base.go
@@ -0,0 +1,70 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+// Base version information.
+//
+// This is the fallback data used when version information from git is not
+// provided via go ldflags. It provides an approximation of the Kubernetes
+// version for ad-hoc builds (e.g. `go build`) that cannot get the version
+// information from git.
+//
+// If you are looking at these fields in the git tree, they look
+// strange. They are modified on the fly by the build process. The
+// in-tree values are dummy values used for "git archive", which also
+// works for GitHub tar downloads.
+//
+// When releasing a new Kubernetes version, this file is updated by
+// build/mark_new_version.sh to reflect the new version, and then a
+// git annotated tag (using format vX.Y where X == Major version and Y
+// == Minor version) is created to point to the commit that updates
+// component-base/version/base.go
+var (
+	// TODO: Deprecate gitMajor and gitMinor, use only gitVersion
+	// instead. First step in deprecation, keep the fields but make
+	// them irrelevant. (Next we'll take it out, which may muck with
+	// scripts consuming the kubectl version output - but most of
+	// these should be looking at gitVersion already anyways.)
+	gitMajor string // major version, always numeric
+	gitMinor string // minor version, numeric possibly followed by "+"
+
+	// semantic version, derived by build scripts (see
+	// https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md
+	// for a detailed discussion of this field)
+	//
+	// TODO: This field is still called "gitVersion" for legacy
+	// reasons. For prerelease versions, the build metadata on the
+	// semantic version is a git hash, but the version itself is no
+	// longer the direct output of "git describe", but a slight
+	// translation to be semver compliant.
+
+	// NOTE: The $Format strings are replaced during 'git archive' thanks to the
+	// companion .gitattributes file containing 'export-subst' in this same
+	// directory.  See also https://git-scm.com/docs/gitattributes
+	gitVersion   = "v0.0.0-master+$Format:%H$"
+	gitCommit    = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
+	gitTreeState = ""            // state of git tree, either "clean" or "dirty"
+
+	buildDate = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
+)
+
+const (
+	// DefaultKubeBinaryVersion is the hard coded k8 binary version based on the latest K8s release.
+	// It is supposed to be consistent with gitMajor and gitMinor, except for local tests, where gitMajor and gitMinor are "".
+	// Should update for each minor release!
+	DefaultKubeBinaryVersion = "1.32"
+)
diff --git a/hack/tools/vendor/k8s.io/component-base/version/dynamic.go b/hack/tools/vendor/k8s.io/component-base/version/dynamic.go
new file mode 100644
index 0000000000..46ade9f5ec
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/version/dynamic.go
@@ -0,0 +1,77 @@
+/*
+Copyright 2023 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+import (
+	"fmt"
+	"sync/atomic"
+
+	utilversion "k8s.io/apimachinery/pkg/util/version"
+)
+
+var dynamicGitVersion atomic.Value
+
+func init() {
+	// initialize to static gitVersion
+	dynamicGitVersion.Store(gitVersion)
+}
+
+// SetDynamicVersion overrides the version returned as the GitVersion from Get().
+// The specified version must be non-empty, a valid semantic version, and must
+// match the major/minor/patch version of the default gitVersion.
+func SetDynamicVersion(dynamicVersion string) error {
+	if err := ValidateDynamicVersion(dynamicVersion); err != nil {
+		return err
+	}
+	dynamicGitVersion.Store(dynamicVersion)
+	return nil
+}
+
+// ValidateDynamicVersion ensures the given version is non-empty, a valid semantic version,
+// and matched the major/minor/patch version of the default gitVersion.
+func ValidateDynamicVersion(dynamicVersion string) error {
+	return validateDynamicVersion(dynamicVersion, gitVersion)
+}
+
+func validateDynamicVersion(dynamicVersion, defaultVersion string) error {
+	if len(dynamicVersion) == 0 {
+		return fmt.Errorf("version must not be empty")
+	}
+	if dynamicVersion == defaultVersion {
+		// allow no-op
+		return nil
+	}
+	vRuntime, err := utilversion.ParseSemantic(dynamicVersion)
+	if err != nil {
+		return err
+	}
+	// must match major/minor/patch of default version
+	var vDefault *utilversion.Version
+	if defaultVersion == "v0.0.0-master+$Format:%H$" {
+		// special-case the placeholder value which doesn't parse as a semantic version
+		vDefault, err = utilversion.ParseSemantic("v0.0.0-master")
+	} else {
+		vDefault, err = utilversion.ParseSemantic(defaultVersion)
+	}
+	if err != nil {
+		return err
+	}
+	if vRuntime.Major() != vDefault.Major() || vRuntime.Minor() != vDefault.Minor() || vRuntime.Patch() != vDefault.Patch() {
+		return fmt.Errorf("version %q must match major/minor/patch of default version %q", dynamicVersion, defaultVersion)
+	}
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/component-base/version/version.go b/hack/tools/vendor/k8s.io/component-base/version/version.go
new file mode 100644
index 0000000000..99d3685348
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/component-base/version/version.go
@@ -0,0 +1,199 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+import (
+	"fmt"
+	"runtime"
+	"sync/atomic"
+
+	"k8s.io/apimachinery/pkg/util/version"
+	apimachineryversion "k8s.io/apimachinery/pkg/version"
+)
+
+type EffectiveVersion interface {
+	BinaryVersion() *version.Version
+	EmulationVersion() *version.Version
+	MinCompatibilityVersion() *version.Version
+	EqualTo(other EffectiveVersion) bool
+	String() string
+	Validate() []error
+}
+
+type MutableEffectiveVersion interface {
+	EffectiveVersion
+	Set(binaryVersion, emulationVersion, minCompatibilityVersion *version.Version)
+	SetEmulationVersion(emulationVersion *version.Version)
+	SetMinCompatibilityVersion(minCompatibilityVersion *version.Version)
+}
+
+type effectiveVersion struct {
+	// When true, BinaryVersion() returns the current binary version
+	useDefaultBuildBinaryVersion atomic.Bool
+	// Holds the last binary version stored in Set()
+	binaryVersion atomic.Pointer[version.Version]
+	// If the emulationVersion is set by the users, it could only contain major and minor versions.
+	// In tests, emulationVersion could be the same as the binary version, or set directly,
+	// which can have "alpha" as pre-release to continue serving expired apis while we clean up the test.
+	emulationVersion atomic.Pointer[version.Version]
+	// minCompatibilityVersion could only contain major and minor versions.
+	minCompatibilityVersion atomic.Pointer[version.Version]
+}
+
+// Get returns the overall codebase version. It's for detecting
+// what code a binary was built from.
+func Get() apimachineryversion.Info {
+	// These variables typically come from -ldflags settings and in
+	// their absence fallback to the settings in ./base.go
+	return apimachineryversion.Info{
+		Major:        gitMajor,
+		Minor:        gitMinor,
+		GitVersion:   dynamicGitVersion.Load().(string),
+		GitCommit:    gitCommit,
+		GitTreeState: gitTreeState,
+		BuildDate:    buildDate,
+		GoVersion:    runtime.Version(),
+		Compiler:     runtime.Compiler,
+		Platform:     fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
+	}
+}
+
+func (m *effectiveVersion) BinaryVersion() *version.Version {
+	if m.useDefaultBuildBinaryVersion.Load() {
+		return defaultBuildBinaryVersion()
+	}
+	return m.binaryVersion.Load()
+}
+
+func (m *effectiveVersion) EmulationVersion() *version.Version {
+	ver := m.emulationVersion.Load()
+	if ver != nil {
+		// Emulation version can have "alpha" as pre-release to continue serving expired apis while we clean up the test.
+		// The pre-release should not be accessible to the users.
+		return ver.WithPreRelease(m.BinaryVersion().PreRelease())
+	}
+	return ver
+}
+
+func (m *effectiveVersion) MinCompatibilityVersion() *version.Version {
+	return m.minCompatibilityVersion.Load()
+}
+
+func (m *effectiveVersion) EqualTo(other EffectiveVersion) bool {
+	return m.BinaryVersion().EqualTo(other.BinaryVersion()) && m.EmulationVersion().EqualTo(other.EmulationVersion()) && m.MinCompatibilityVersion().EqualTo(other.MinCompatibilityVersion())
+}
+
+func (m *effectiveVersion) String() string {
+	if m == nil {
+		return ""
+	}
+	return fmt.Sprintf("{BinaryVersion: %s, EmulationVersion: %s, MinCompatibilityVersion: %s}",
+		m.BinaryVersion().String(), m.EmulationVersion().String(), m.MinCompatibilityVersion().String())
+}
+
+func majorMinor(ver *version.Version) *version.Version {
+	if ver == nil {
+		return ver
+	}
+	return version.MajorMinor(ver.Major(), ver.Minor())
+}
+
+func (m *effectiveVersion) Set(binaryVersion, emulationVersion, minCompatibilityVersion *version.Version) {
+	m.binaryVersion.Store(binaryVersion)
+	m.useDefaultBuildBinaryVersion.Store(false)
+	m.emulationVersion.Store(majorMinor(emulationVersion))
+	m.minCompatibilityVersion.Store(majorMinor(minCompatibilityVersion))
+}
+
+func (m *effectiveVersion) SetEmulationVersion(emulationVersion *version.Version) {
+	m.emulationVersion.Store(majorMinor(emulationVersion))
+}
+
+func (m *effectiveVersion) SetMinCompatibilityVersion(minCompatibilityVersion *version.Version) {
+	m.minCompatibilityVersion.Store(majorMinor(minCompatibilityVersion))
+}
+
+func (m *effectiveVersion) Validate() []error {
+	var errs []error
+	// Validate only checks the major and minor versions.
+	binaryVersion := m.BinaryVersion().WithPatch(0)
+	emulationVersion := m.emulationVersion.Load()
+	minCompatibilityVersion := m.minCompatibilityVersion.Load()
+
+	// emulationVersion can only be 1.{binaryMinor-1}...1.{binaryMinor}.
+	maxEmuVer := binaryVersion
+	minEmuVer := binaryVersion.SubtractMinor(1)
+	if emulationVersion.GreaterThan(maxEmuVer) || emulationVersion.LessThan(minEmuVer) {
+		errs = append(errs, fmt.Errorf("emulation version %s is not between [%s, %s]", emulationVersion.String(), minEmuVer.String(), maxEmuVer.String()))
+	}
+	// minCompatibilityVersion can only be 1.{binaryMinor-1} for alpha.
+	maxCompVer := binaryVersion.SubtractMinor(1)
+	minCompVer := binaryVersion.SubtractMinor(1)
+	if minCompatibilityVersion.GreaterThan(maxCompVer) || minCompatibilityVersion.LessThan(minCompVer) {
+		errs = append(errs, fmt.Errorf("minCompatibilityVersion version %s is not between [%s, %s]", minCompatibilityVersion.String(), minCompVer.String(), maxCompVer.String()))
+	}
+	return errs
+}
+
+func newEffectiveVersion(binaryVersion *version.Version, useDefaultBuildBinaryVersion bool) MutableEffectiveVersion {
+	effective := &effectiveVersion{}
+	compatVersion := binaryVersion.SubtractMinor(1)
+	effective.Set(binaryVersion, binaryVersion, compatVersion)
+	effective.useDefaultBuildBinaryVersion.Store(useDefaultBuildBinaryVersion)
+	return effective
+}
+
+func NewEffectiveVersion(binaryVer string) MutableEffectiveVersion {
+	if binaryVer == "" {
+		return &effectiveVersion{}
+	}
+	binaryVersion := version.MustParse(binaryVer)
+	return newEffectiveVersion(binaryVersion, false)
+}
+
+func defaultBuildBinaryVersion() *version.Version {
+	verInfo := Get()
+	return version.MustParse(verInfo.String()).WithInfo(verInfo)
+}
+
+// DefaultBuildEffectiveVersion returns the MutableEffectiveVersion based on the
+// current build information.
+func DefaultBuildEffectiveVersion() MutableEffectiveVersion {
+	binaryVersion := defaultBuildBinaryVersion()
+	if binaryVersion.Major() == 0 && binaryVersion.Minor() == 0 {
+		return DefaultKubeEffectiveVersion()
+	}
+	return newEffectiveVersion(binaryVersion, true)
+}
+
+// DefaultKubeEffectiveVersion returns the MutableEffectiveVersion based on the
+// latest K8s release.
+func DefaultKubeEffectiveVersion() MutableEffectiveVersion {
+	binaryVersion := version.MustParse(DefaultKubeBinaryVersion).WithInfo(Get())
+	return newEffectiveVersion(binaryVersion, false)
+}
+
+// ValidateKubeEffectiveVersion validates the EmulationVersion is equal to the binary version at 1.31 for kube components.
+// emulationVersion is introduced in 1.31, so it is only allowed to be equal to the binary version at 1.31.
+func ValidateKubeEffectiveVersion(effectiveVersion EffectiveVersion) error {
+	binaryVersion := version.MajorMinor(effectiveVersion.BinaryVersion().Major(), effectiveVersion.BinaryVersion().Minor())
+	if binaryVersion.EqualTo(version.MajorMinor(1, 31)) && !effectiveVersion.EmulationVersion().EqualTo(binaryVersion) {
+		return fmt.Errorf("emulation version needs to be equal to binary version(%s) in compatibility-version alpha, got %s",
+			binaryVersion.String(), effectiveVersion.EmulationVersion().String())
+	}
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/gengo/v2/Makefile b/hack/tools/vendor/k8s.io/gengo/v2/Makefile
new file mode 100644
index 0000000000..8d0fbdaa8a
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/gengo/v2/Makefile
@@ -0,0 +1,14 @@
+all:
+	go build ./...
+
+test:
+	GODEBUG=gotypesalias=0 go test -race ./... -count=1
+	GODEBUG=gotypesalias=1 go test -race ./... -count=1
+
+# We verify for the maximum version of the go directive as 1.20
+# here because the oldest go directive that exists on our supported
+# release branches in k/k is 1.20.
+verify:
+	GODEBUG=gotypesalias=0 ./hack/verify-examples.sh
+	GODEBUG=gotypesalias=1 ./hack/verify-examples.sh
+	./hack/verify-go-directive.sh 1.20
diff --git a/hack/tools/vendor/k8s.io/gengo/v2/generator/execute.go b/hack/tools/vendor/k8s.io/gengo/v2/generator/execute.go
index 02b4c3318a..a1e052f5cc 100644
--- a/hack/tools/vendor/k8s.io/gengo/v2/generator/execute.go
+++ b/hack/tools/vendor/k8s.io/gengo/v2/generator/execute.go
@@ -26,6 +26,7 @@ import (
 	"strings"
 
 	"golang.org/x/tools/imports"
+
 	"k8s.io/gengo/v2/namer"
 	"k8s.io/gengo/v2/types"
 	"k8s.io/klog/v2"
@@ -114,7 +115,13 @@ func assembleGoFile(w io.Writer, f *File) {
 }
 
 func importsWrapper(src []byte) ([]byte, error) {
-	return imports.Process("", src, nil)
+	opt := imports.Options{
+		Comments:   true,
+		TabIndent:  true,
+		TabWidth:   8,
+		FormatOnly: true, // Disable the insertion and deletion of imports
+	}
+	return imports.Process("", src, &opt)
 }
 
 func NewGoFile() *DefaultFileType {
diff --git a/hack/tools/vendor/k8s.io/gengo/v2/generator/import_tracker.go b/hack/tools/vendor/k8s.io/gengo/v2/generator/import_tracker.go
index 70b86cf56b..22393e4d49 100644
--- a/hack/tools/vendor/k8s.io/gengo/v2/generator/import_tracker.go
+++ b/hack/tools/vendor/k8s.io/gengo/v2/generator/import_tracker.go
@@ -18,6 +18,7 @@ package generator
 
 import (
 	"go/token"
+	"path/filepath"
 	"strings"
 
 	"k8s.io/klog/v2"
@@ -45,7 +46,7 @@ import (
 func NewImportTrackerForPackage(local string, typesToAdd ...*types.Type) *namer.DefaultImportTracker {
 	tracker := namer.NewDefaultImportTracker(types.Name{Package: local})
 	tracker.IsInvalidType = func(*types.Type) bool { return false }
-	tracker.LocalName = func(name types.Name) string { return goTrackerLocalName(&tracker, name) }
+	tracker.LocalName = func(name types.Name) string { return goTrackerLocalName(&tracker, local, name) }
 	tracker.PrintImport = func(path, name string) string { return name + " \"" + path + "\"" }
 
 	tracker.AddTypes(typesToAdd...)
@@ -56,7 +57,7 @@ func NewImportTracker(typesToAdd ...*types.Type) *namer.DefaultImportTracker {
 	return NewImportTrackerForPackage("", typesToAdd...)
 }
 
-func goTrackerLocalName(tracker namer.ImportTracker, t types.Name) string {
+func goTrackerLocalName(tracker namer.ImportTracker, localPkg string, t types.Name) string {
 	path := t.Package
 
 	// Using backslashes in package names causes gengo to produce Go code which
@@ -64,6 +65,7 @@ func goTrackerLocalName(tracker namer.ImportTracker, t types.Name) string {
 	if strings.ContainsRune(path, '\\') {
 		klog.Warningf("Warning: backslash used in import path '%v', this is unsupported.\n", path)
 	}
+	localLeaf := filepath.Base(localPkg)
 
 	dirs := strings.Split(path, namer.GoSeperator)
 	for n := len(dirs) - 1; n >= 0; n-- {
@@ -74,8 +76,13 @@ func goTrackerLocalName(tracker namer.ImportTracker, t types.Name) string {
 		// packages, but aren't legal go names. So we'll sanitize.
 		name = strings.ReplaceAll(name, ".", "")
 		name = strings.ReplaceAll(name, "-", "")
-		if _, found := tracker.PathOf(name); found {
-			// This name collides with some other package
+		if _, found := tracker.PathOf(name); found || name == localLeaf {
+			// This name collides with some other package.
+			// Or, this name is tne same name as the local package,
+			// which we avoid because it can be confusing. For example,
+			// if the local package is v1, we to avoid importing
+			// another package using the v1 name, and instead import
+			// it with a more qualified name, such as metav1.
 			continue
 		}
 
diff --git a/hack/tools/vendor/k8s.io/gengo/v2/generator/snippet_writer.go b/hack/tools/vendor/k8s.io/gengo/v2/generator/snippet_writer.go
index 7f4610c00f..550214e163 100644
--- a/hack/tools/vendor/k8s.io/gengo/v2/generator/snippet_writer.go
+++ b/hack/tools/vendor/k8s.io/gengo/v2/generator/snippet_writer.go
@@ -123,25 +123,28 @@ func (s *SnippetWriter) Do(format string, args interface{}) *SnippetWriter {
 // SnippetWriter.Do.
 type Args map[interface{}]interface{}
 
-// With makes a copy of a and adds the given key, value pair.
+// With makes a copy of a and adds the given key, value pair. If key overlaps,
+// the new value wins.
 func (a Args) With(key, value interface{}) Args {
-	a2 := Args{key: value}
+	result := Args{}
 	for k, v := range a {
-		a2[k] = v
+		result[k] = v
 	}
-	return a2
+	result[key] = value
+	return result
 }
 
-// WithArgs makes a copy of a and adds the given arguments.
+// WithArgs makes a copy of a and adds the given arguments. If any keys
+// overlap, the values from rhs win.
 func (a Args) WithArgs(rhs Args) Args {
-	a2 := Args{}
-	for k, v := range rhs {
-		a2[k] = v
-	}
+	result := Args{}
 	for k, v := range a {
-		a2[k] = v
+		result[k] = v
+	}
+	for k, v := range rhs {
+		result[k] = v
 	}
-	return a2
+	return result
 }
 
 func (s *SnippetWriter) Out() io.Writer {
@@ -152,3 +155,16 @@ func (s *SnippetWriter) Out() io.Writer {
 func (s *SnippetWriter) Error() error {
 	return s.err
 }
+
+// Dup creates an exact duplicate SnippetWriter with a different io.Writer.
+func (s *SnippetWriter) Dup(w io.Writer) *SnippetWriter {
+	ret := *s
+	ret.w = w
+	return &ret
+}
+
+// Append adds the contents of the io.Reader to this SnippetWriter's buffer.
+func (s *SnippetWriter) Append(r io.Reader) error {
+	_, err := io.Copy(s.w, r)
+	return err
+}
diff --git a/hack/tools/vendor/k8s.io/gengo/v2/namer/namer.go b/hack/tools/vendor/k8s.io/gengo/v2/namer/namer.go
index e82fe66adc..37877eb492 100644
--- a/hack/tools/vendor/k8s.io/gengo/v2/namer/namer.go
+++ b/hack/tools/vendor/k8s.io/gengo/v2/namer/namer.go
@@ -281,12 +281,12 @@ func (ns *NameStrategy) Name(t *types.Type) string {
 	case types.Func:
 		// TODO: add to name test
 		parts := []string{"Func"}
-		for _, pt := range t.Signature.Parameters {
-			parts = append(parts, ns.removePrefixAndSuffix(ns.Name(pt)))
+		for _, param := range t.Signature.Parameters {
+			parts = append(parts, ns.removePrefixAndSuffix(ns.Name(param.Type)))
 		}
 		parts = append(parts, "Returns")
-		for _, rt := range t.Signature.Results {
-			parts = append(parts, ns.removePrefixAndSuffix(ns.Name(rt)))
+		for _, result := range t.Signature.Results {
+			parts = append(parts, ns.removePrefixAndSuffix(ns.Name(result.Type)))
 		}
 		name = ns.Join(ns.Prefix, parts, ns.Suffix)
 	default:
@@ -374,12 +374,12 @@ func (r *rawNamer) Name(t *types.Type) string {
 	case types.Func:
 		// TODO: add to name test
 		params := []string{}
-		for _, pt := range t.Signature.Parameters {
-			params = append(params, r.Name(pt))
+		for _, param := range t.Signature.Parameters {
+			params = append(params, r.Name(param.Type))
 		}
 		results := []string{}
-		for _, rt := range t.Signature.Results {
-			results = append(results, r.Name(rt))
+		for _, result := range t.Signature.Results {
+			results = append(results, r.Name(result.Type))
 		}
 		name = "func(" + strings.Join(params, ",") + ")"
 		if len(results) == 1 {
diff --git a/hack/tools/vendor/k8s.io/gengo/v2/parser/parse.go b/hack/tools/vendor/k8s.io/gengo/v2/parser/parse.go
index a5993d1639..d4de19e769 100644
--- a/hack/tools/vendor/k8s.io/gengo/v2/parser/parse.go
+++ b/hack/tools/vendor/k8s.io/gengo/v2/parser/parse.go
@@ -29,6 +29,7 @@ import (
 	"time"
 
 	"golang.org/x/tools/go/packages"
+
 	"k8s.io/gengo/v2/types"
 	"k8s.io/klog/v2"
 )
@@ -572,6 +573,9 @@ func goVarNameToName(in string) types.Name {
 	return goNameToName(nameParts[1])
 }
 
+// goNameToName converts a go name string to a gengo types.Name.
+// It operates solely on the string on a best effort basis. The name may be updated
+// in walkType for generics.
 func goNameToName(in string) types.Name {
 	// Detect anonymous type names. (These may have '.' characters because
 	// embedded types may have packages, so we detect them specially.)
@@ -587,14 +591,25 @@ func goNameToName(in string) types.Name {
 		return types.Name{Name: in}
 	}
 
+	// There may be '.' characters within a generic. Temporarily remove
+	// the generic.
+	genericIndex := strings.IndexRune(in, '[')
+	if genericIndex == -1 {
+		genericIndex = len(in)
+	}
+
 	// Otherwise, if there are '.' characters present, the name has a
 	// package path in front.
-	nameParts := strings.Split(in, ".")
+	nameParts := strings.Split(in[:genericIndex], ".")
 	name := types.Name{Name: in}
 	if n := len(nameParts); n >= 2 {
 		// The final "." is the name of the type--previous ones must
 		// have been in the package path.
 		name.Package, name.Name = strings.Join(nameParts[:n-1], "."), nameParts[n-1]
+		// Add back the generic component now that the package and type name have been separated.
+		if genericIndex != len(in) {
+			name.Name = name.Name + in[genericIndex:]
+		}
 	}
 	return name
 }
@@ -602,12 +617,16 @@ func goNameToName(in string) types.Name {
 func (p *Parser) convertSignature(u types.Universe, t *gotypes.Signature) *types.Signature {
 	signature := &types.Signature{}
 	for i := 0; i < t.Params().Len(); i++ {
-		signature.Parameters = append(signature.Parameters, p.walkType(u, nil, t.Params().At(i).Type()))
-		signature.ParameterNames = append(signature.ParameterNames, t.Params().At(i).Name())
+		signature.Parameters = append(signature.Parameters, &types.ParamResult{
+			Name: t.Params().At(i).Name(),
+			Type: p.walkType(u, nil, t.Params().At(i).Type()),
+		})
 	}
 	for i := 0; i < t.Results().Len(); i++ {
-		signature.Results = append(signature.Results, p.walkType(u, nil, t.Results().At(i).Type()))
-		signature.ResultNames = append(signature.ResultNames, t.Results().At(i).Name())
+		signature.Results = append(signature.Results, &types.ParamResult{
+			Name: t.Results().At(i).Name(),
+			Type: p.walkType(u, nil, t.Results().At(i).Type()),
+		})
 	}
 	if r := t.Recv(); r != nil {
 		signature.Receiver = p.walkType(u, nil, r.Type())
@@ -624,6 +643,12 @@ func (p *Parser) walkType(u types.Universe, useName *types.Name, in gotypes.Type
 		name = *useName
 	}
 
+	// Handle alias types conditionally on go1.22+.
+	// Inline this once the minimum supported version is go1.22
+	if out := p.walkAliasType(u, in); out != nil {
+		return out
+	}
+
 	switch t := in.(type) {
 	case *gotypes.Struct:
 		out := u.Type(name)
@@ -734,6 +759,27 @@ func (p *Parser) walkType(u types.Universe, useName *types.Name, in gotypes.Type
 			}
 			out.Kind = types.Alias
 			out.Underlying = p.walkType(u, nil, t.Underlying())
+		case *gotypes.Struct, *gotypes.Interface:
+			name := goNameToName(t.String())
+			tpMap := map[string]*types.Type{}
+			if t.TypeParams().Len() != 0 {
+				// Remove generics, then readd them without the encoded
+				// type, e.g. Foo[T any] => Foo[T]
+				var tpNames []string
+				for i := 0; i < t.TypeParams().Len(); i++ {
+					tp := t.TypeParams().At(i)
+					tpName := tp.Obj().Name()
+					tpNames = append(tpNames, tpName)
+					tpMap[tpName] = p.walkType(u, nil, tp.Constraint())
+				}
+				name.Name = fmt.Sprintf("%s[%s]", strings.SplitN(name.Name, "[", 2)[0], strings.Join(tpNames, ","))
+			}
+
+			if out := u.Type(name); out.Kind != types.Unknown {
+				return out // short circuit if we've already made this.
+			}
+			out = p.walkType(u, &name, t.Underlying())
+			out.TypeParams = tpMap
 		default:
 			// gotypes package makes everything "named" with an
 			// underlying anonymous type--we remove that annoying
@@ -760,6 +806,15 @@ func (p *Parser) walkType(u types.Universe, useName *types.Name, in gotypes.Type
 			}
 		}
 		return out
+	case *gotypes.TypeParam:
+		// DO NOT retrieve the type from the universe. The default type-param name is only the
+		// generic variable name. Ideally, it would be namespaced by package and struct but it is
+		// not. Thus, if we try to use the universe, we would start polluting it.
+		// e.g. if Foo[T] and Bar[T] exists, we'd mistakenly use the same type T for both.
+		return &types.Type{
+			Name: name,
+			Kind: types.TypeParam,
+		}
 	default:
 		out := u.Type(name)
 		if out.Kind != types.Unknown {
diff --git a/hack/tools/vendor/k8s.io/gengo/v2/parser/parse_122.go b/hack/tools/vendor/k8s.io/gengo/v2/parser/parse_122.go
new file mode 100644
index 0000000000..ec2064958a
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/gengo/v2/parser/parse_122.go
@@ -0,0 +1,33 @@
+//go:build go1.22
+// +build go1.22
+
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package parser
+
+import (
+	gotypes "go/types"
+
+	"k8s.io/gengo/v2/types"
+)
+
+func (p *Parser) walkAliasType(u types.Universe, in gotypes.Type) *types.Type {
+	if t, isAlias := in.(*gotypes.Alias); isAlias {
+		return p.walkType(u, nil, gotypes.Unalias(t))
+	}
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/gengo/v2/parser/parse_pre_122.go b/hack/tools/vendor/k8s.io/gengo/v2/parser/parse_pre_122.go
new file mode 100644
index 0000000000..6f62100c0a
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/gengo/v2/parser/parse_pre_122.go
@@ -0,0 +1,30 @@
+//go:build !go1.22
+// +build !go1.22
+
+/*
+Copyright 2024 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+	http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package parser
+
+import (
+	gotypes "go/types"
+
+	"k8s.io/gengo/v2/types"
+)
+
+func (p *Parser) walkAliasType(u types.Universe, in gotypes.Type) *types.Type {
+	return nil
+}
diff --git a/hack/tools/vendor/k8s.io/gengo/v2/types/types.go b/hack/tools/vendor/k8s.io/gengo/v2/types/types.go
index e9c8319c65..7bbca01733 100644
--- a/hack/tools/vendor/k8s.io/gengo/v2/types/types.go
+++ b/hack/tools/vendor/k8s.io/gengo/v2/types/types.go
@@ -98,6 +98,7 @@ const (
 	DeclarationOf Kind = "DeclarationOf"
 	Unknown       Kind = ""
 	Unsupported   Kind = "Unsupported"
+	TypeParam     Kind = "TypeParam"
 
 	// Protobuf is protobuf type.
 	Protobuf Kind = "Protobuf"
@@ -324,6 +325,9 @@ type Type struct {
 	// If Kind == Struct
 	Members []Member
 
+	// If Kind == Struct
+	TypeParams map[string]*Type
+
 	// If Kind == Map, Slice, Pointer, or Chan
 	Elem *Type
 
@@ -423,14 +427,20 @@ func (m Member) String() string {
 	return m.Name + " " + m.Type.String()
 }
 
+// ParamResult represents a parameter or a result of a method's signature.
+type ParamResult struct {
+	// The name of the parameter or result.
+	Name string
+	// The type of this parameter or result.
+	Type *Type
+}
+
 // Signature is a function's signature.
 type Signature struct {
 	// If a method of some type, this is the type it's a member of.
-	Receiver       *Type
-	Parameters     []*Type
-	ParameterNames []string
-	Results        []*Type
-	ResultNames    []string
+	Receiver   *Type
+	Parameters []*ParamResult
+	Results    []*ParamResult
 
 	// True if the last in parameter is of the form ...T.
 	Variadic bool
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/markers.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/markers.go
index 7f0fe985a6..c4dd67d3be 100644
--- a/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/markers.go
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/markers.go
@@ -75,7 +75,29 @@ func (c *CELTag) Validate() error {
 // - +listMapKeys
 // - +mapType
 type commentTags struct {
-	spec.SchemaProps
+	Nullable         *bool         `json:"nullable,omitempty"`
+	Format           *string       `json:"format,omitempty"`
+	Maximum          *float64      `json:"maximum,omitempty"`
+	ExclusiveMaximum *bool         `json:"exclusiveMaximum,omitempty"`
+	Minimum          *float64      `json:"minimum,omitempty"`
+	ExclusiveMinimum *bool         `json:"exclusiveMinimum,omitempty"`
+	MaxLength        *int64        `json:"maxLength,omitempty"`
+	MinLength        *int64        `json:"minLength,omitempty"`
+	Pattern          *string       `json:"pattern,omitempty"`
+	MaxItems         *int64        `json:"maxItems,omitempty"`
+	MinItems         *int64        `json:"minItems,omitempty"`
+	UniqueItems      *bool         `json:"uniqueItems,omitempty"`
+	MultipleOf       *float64      `json:"multipleOf,omitempty"`
+	Enum             []interface{} `json:"enum,omitempty"`
+	MaxProperties    *int64        `json:"maxProperties,omitempty"`
+	MinProperties    *int64        `json:"minProperties,omitempty"`
+
+	// Nested commentTags for extending the schemas of subfields at point-of-use
+	// when you cant annotate them directly. Cannot be used to add properties
+	// or remove validations on the overridden schema.
+	Items                *commentTags            `json:"items,omitempty"`
+	Properties           map[string]*commentTags `json:"properties,omitempty"`
+	AdditionalProperties *commentTags            `json:"additionalProperties,omitempty"`
 
 	CEL []CELTag `json:"cel,omitempty"`
 
@@ -86,9 +108,75 @@ type commentTags struct {
 
 // Returns the schema for the given CommentTags instance.
 // This is the final authoritative schema for the comment tags
-func (c commentTags) ValidationSchema() (*spec.Schema, error) {
+func (c *commentTags) ValidationSchema() (*spec.Schema, error) {
+	if c == nil {
+		return nil, nil
+	}
+
+	isNullable := c.Nullable != nil && *c.Nullable
+	format := ""
+	if c.Format != nil {
+		format = *c.Format
+	}
+	isExclusiveMaximum := c.ExclusiveMaximum != nil && *c.ExclusiveMaximum
+	isExclusiveMinimum := c.ExclusiveMinimum != nil && *c.ExclusiveMinimum
+	isUniqueItems := c.UniqueItems != nil && *c.UniqueItems
+	pattern := ""
+	if c.Pattern != nil {
+		pattern = *c.Pattern
+	}
+
+	var transformedItems *spec.SchemaOrArray
+	var transformedProperties map[string]spec.Schema
+	var transformedAdditionalProperties *spec.SchemaOrBool
+
+	if c.Items != nil {
+		items, err := c.Items.ValidationSchema()
+		if err != nil {
+			return nil, fmt.Errorf("failed to transform items: %w", err)
+		}
+		transformedItems = &spec.SchemaOrArray{Schema: items}
+	}
+
+	if c.Properties != nil {
+		properties := make(map[string]spec.Schema)
+		for key, value := range c.Properties {
+			property, err := value.ValidationSchema()
+			if err != nil {
+				return nil, fmt.Errorf("failed to transform property %q: %w", key, err)
+			}
+			properties[key] = *property
+		}
+		transformedProperties = properties
+	}
+
+	if c.AdditionalProperties != nil {
+		additionalProperties, err := c.AdditionalProperties.ValidationSchema()
+		if err != nil {
+			return nil, fmt.Errorf("failed to transform additionalProperties: %w", err)
+		}
+		transformedAdditionalProperties = &spec.SchemaOrBool{Schema: additionalProperties, Allows: true}
+	}
+
 	res := spec.Schema{
-		SchemaProps: c.SchemaProps,
+		SchemaProps: spec.SchemaProps{
+			Nullable:         isNullable,
+			Format:           format,
+			Maximum:          c.Maximum,
+			ExclusiveMaximum: isExclusiveMaximum,
+			Minimum:          c.Minimum,
+			ExclusiveMinimum: isExclusiveMinimum,
+			MaxLength:        c.MaxLength,
+			MinLength:        c.MinLength,
+			Pattern:          pattern,
+			MaxItems:         c.MaxItems,
+			MinItems:         c.MinItems,
+			UniqueItems:      isUniqueItems,
+			MultipleOf:       c.MultipleOf,
+			Enum:             c.Enum,
+			MaxProperties:    c.MaxProperties,
+			MinProperties:    c.MinProperties,
+		},
 	}
 
 	if len(c.CEL) > 0 {
@@ -105,6 +193,18 @@ func (c commentTags) ValidationSchema() (*spec.Schema, error) {
 		res.VendorExtensible.AddExtension("x-kubernetes-validations", celTagMap)
 	}
 
+	// Dont add structural properties directly to this schema. This schema
+	// is used only for validation.
+	if transformedItems != nil || len(transformedProperties) > 0 || transformedAdditionalProperties != nil {
+		res.AllOf = append(res.AllOf, spec.Schema{
+			SchemaProps: spec.SchemaProps{
+				Items:                transformedItems,
+				Properties:           transformedProperties,
+				AdditionalProperties: transformedAdditionalProperties,
+			},
+		})
+	}
+
 	return &res, nil
 }
 
@@ -134,7 +234,7 @@ func (c commentTags) Validate() error {
 	if c.Minimum != nil && c.Maximum != nil && *c.Minimum > *c.Maximum {
 		err = errors.Join(err, fmt.Errorf("minimum %f is greater than maximum %f", *c.Minimum, *c.Maximum))
 	}
-	if (c.ExclusiveMinimum || c.ExclusiveMaximum) && c.Minimum != nil && c.Maximum != nil && *c.Minimum == *c.Maximum {
+	if (c.ExclusiveMinimum != nil || c.ExclusiveMaximum != nil) && c.Minimum != nil && c.Maximum != nil && *c.Minimum == *c.Maximum {
 		err = errors.Join(err, fmt.Errorf("exclusiveMinimum/Maximum cannot be set when minimum == maximum"))
 	}
 	if c.MinLength != nil && c.MaxLength != nil && *c.MinLength > *c.MaxLength {
@@ -146,10 +246,10 @@ func (c commentTags) Validate() error {
 	if c.MinProperties != nil && c.MaxProperties != nil && *c.MinProperties > *c.MaxProperties {
 		err = errors.Join(err, fmt.Errorf("minProperties %d is greater than maxProperties %d", *c.MinProperties, *c.MaxProperties))
 	}
-	if c.Pattern != "" {
-		_, e := regexp.Compile(c.Pattern)
+	if c.Pattern != nil {
+		_, e := regexp.Compile(*c.Pattern)
 		if e != nil {
-			err = errors.Join(err, fmt.Errorf("invalid pattern %q: %v", c.Pattern, e))
+			err = errors.Join(err, fmt.Errorf("invalid pattern %q: %v", *c.Pattern, e))
 		}
 	}
 	if c.MultipleOf != nil && *c.MultipleOf == 0 {
@@ -175,10 +275,23 @@ func (c commentTags) ValidateType(t *types.Type) error {
 	typeString, _ := openapi.OpenAPITypeFormat(resolvedType.String()) // will be empty for complicated types
 
 	// Structs and interfaces may dynamically be any type, so we cant validate them
-	// easily. We may be able to if we check that they don't implement all the
-	// override functions, but for now we just skip them.
+	// easily.
 	if resolvedType.Kind == types.Interface || resolvedType.Kind == types.Struct {
-		return nil
+		// Skip validation for structs and interfaces which implement custom
+		// overrides
+		//
+		// Only check top-level t type without resolving alias to mirror generator
+		// behavior. Generator only checks the top level type without resolving
+		// alias. The `has*Method` functions can be changed to add this behavior in the
+		// future if needed.
+		elemT := resolvePtrType(t)
+		if hasOpenAPIDefinitionMethod(elemT) ||
+			hasOpenAPIDefinitionMethods(elemT) ||
+			hasOpenAPIV3DefinitionMethod(elemT) ||
+			hasOpenAPIV3OneOfMethod(elemT) {
+
+			return nil
+		}
 	}
 
 	isArray := resolvedType.Kind == types.Slice || resolvedType.Kind == types.Array
@@ -186,6 +299,7 @@ func (c commentTags) ValidateType(t *types.Type) error {
 	isString := typeString == "string"
 	isInt := typeString == "integer"
 	isFloat := typeString == "number"
+	isStruct := resolvedType.Kind == types.Struct
 
 	if c.MaxItems != nil && !isArray {
 		err = errors.Join(err, fmt.Errorf("maxItems can only be used on array types"))
@@ -193,13 +307,13 @@ func (c commentTags) ValidateType(t *types.Type) error {
 	if c.MinItems != nil && !isArray {
 		err = errors.Join(err, fmt.Errorf("minItems can only be used on array types"))
 	}
-	if c.UniqueItems && !isArray {
+	if c.UniqueItems != nil && !isArray {
 		err = errors.Join(err, fmt.Errorf("uniqueItems can only be used on array types"))
 	}
-	if c.MaxProperties != nil && !isMap {
+	if c.MaxProperties != nil && !(isMap || isStruct) {
 		err = errors.Join(err, fmt.Errorf("maxProperties can only be used on map types"))
 	}
-	if c.MinProperties != nil && !isMap {
+	if c.MinProperties != nil && !(isMap || isStruct) {
 		err = errors.Join(err, fmt.Errorf("minProperties can only be used on map types"))
 	}
 	if c.MinLength != nil && !isString {
@@ -208,7 +322,7 @@ func (c commentTags) ValidateType(t *types.Type) error {
 	if c.MaxLength != nil && !isString {
 		err = errors.Join(err, fmt.Errorf("maxLength can only be used on string types"))
 	}
-	if c.Pattern != "" && !isString {
+	if c.Pattern != nil && !isString {
 		err = errors.Join(err, fmt.Errorf("pattern can only be used on string types"))
 	}
 	if c.Minimum != nil && !isInt && !isFloat {
@@ -220,16 +334,57 @@ func (c commentTags) ValidateType(t *types.Type) error {
 	if c.MultipleOf != nil && !isInt && !isFloat {
 		err = errors.Join(err, fmt.Errorf("multipleOf can only be used on numeric types"))
 	}
-	if c.ExclusiveMinimum && !isInt && !isFloat {
+	if c.ExclusiveMinimum != nil && !isInt && !isFloat {
 		err = errors.Join(err, fmt.Errorf("exclusiveMinimum can only be used on numeric types"))
 	}
-	if c.ExclusiveMaximum && !isInt && !isFloat {
+	if c.ExclusiveMaximum != nil && !isInt && !isFloat {
 		err = errors.Join(err, fmt.Errorf("exclusiveMaximum can only be used on numeric types"))
 	}
+	if c.AdditionalProperties != nil && !isMap {
+		err = errors.Join(err, fmt.Errorf("additionalProperties can only be used on map types"))
+
+		if err == nil {
+			err = errors.Join(err, c.AdditionalProperties.ValidateType(t))
+		}
+	}
+	if c.Items != nil && !isArray {
+		err = errors.Join(err, fmt.Errorf("items can only be used on array types"))
+
+		if err == nil {
+			err = errors.Join(err, c.Items.ValidateType(t))
+		}
+	}
+	if c.Properties != nil {
+		if !isStruct && !isMap {
+			err = errors.Join(err, fmt.Errorf("properties can only be used on struct types"))
+		} else if isStruct && err == nil {
+			for key, tags := range c.Properties {
+				if member := memberWithJSONName(resolvedType, key); member == nil {
+					err = errors.Join(err, fmt.Errorf("property used in comment tag %q not found in struct %s", key, resolvedType.String()))
+				} else if nestedErr := tags.ValidateType(member.Type); nestedErr != nil {
+					err = errors.Join(err, fmt.Errorf("failed to validate property %q: %w", key, nestedErr))
+				}
+			}
+		}
+	}
 
 	return err
 }
 
+func memberWithJSONName(t *types.Type, key string) *types.Member {
+	for _, member := range t.Members {
+		tags := getJsonTags(&member)
+		if len(tags) > 0 && tags[0] == key {
+			return &member
+		} else if member.Embedded {
+			if embeddedMember := memberWithJSONName(member.Type, key); embeddedMember != nil {
+				return embeddedMember
+			}
+		}
+	}
+	return nil
+}
+
 // Parses the given comments into a CommentTags type. Validates the parsed comment tags, and returns the result.
 // Accepts an optional type to validate against, and a prefix to filter out markers not related to validation.
 // Accepts a prefix to filter out markers not related to validation.
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go
index 743f5b8b2e..c5c0093818 100644
--- a/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go
@@ -249,7 +249,7 @@ func methodReturnsValue(mt *types.Type, pkg, name string) bool {
 		return false
 	}
 	r := mt.Signature.Results[0]
-	return r.Name.Name == name && r.Name.Package == pkg
+	return r.Type.Name.Name == name && r.Type.Name.Package == pkg
 }
 
 func hasOpenAPIV3DefinitionMethod(t *types.Type) bool {
@@ -362,6 +362,88 @@ func (g openAPITypeWriter) generateCall(t *types.Type) error {
 	return g.Error()
 }
 
+// Generates Go code to represent an OpenAPI schema. May be refactored in
+// the future to take more responsibility as we transition from an on-line
+// approach to parsing the comments to spec.Schema
+func (g openAPITypeWriter) generateSchema(s *spec.Schema) error {
+	if !reflect.DeepEqual(s.SchemaProps, spec.SchemaProps{}) {
+		g.Do("SchemaProps: spec.SchemaProps{\n", nil)
+		err := g.generateValueValidations(&s.SchemaProps)
+		if err != nil {
+			return err
+		}
+
+		if len(s.Properties) > 0 {
+			g.Do("Properties: map[string]spec.Schema{\n", nil)
+
+			// Sort property names to generate deterministic output
+			keys := []string{}
+			for k := range s.Properties {
+				keys = append(keys, k)
+			}
+			sort.Strings(keys)
+
+			for _, k := range keys {
+				v := s.Properties[k]
+				g.Do("$.$: {\n", fmt.Sprintf("%#v", k))
+				err := g.generateSchema(&v)
+				if err != nil {
+					return err
+				}
+				g.Do("},\n", nil)
+			}
+			g.Do("},\n", nil)
+		}
+
+		if s.AdditionalProperties != nil && s.AdditionalProperties.Schema != nil {
+			g.Do("AdditionalProperties: &spec.SchemaOrBool{\n", nil)
+			g.Do("Allows: true,\n", nil)
+			g.Do("Schema: &spec.Schema{\n", nil)
+			err := g.generateSchema(s.AdditionalProperties.Schema)
+			if err != nil {
+				return err
+			}
+			g.Do("},\n", nil)
+			g.Do("},\n", nil)
+		}
+
+		if s.Items != nil && s.Items.Schema != nil {
+			g.Do("Items: &spec.SchemaOrArray{\n", nil)
+			g.Do("Schema: &spec.Schema{\n", nil)
+			err := g.generateSchema(s.Items.Schema)
+			if err != nil {
+				return err
+			}
+			g.Do("},\n", nil)
+			g.Do("},\n", nil)
+		}
+
+		g.Do("},\n", nil)
+	}
+
+	if len(s.Extensions) > 0 {
+		g.Do("VendorExtensible: spec.VendorExtensible{\nExtensions: spec.Extensions{\n", nil)
+
+		// Sort extension keys to generate deterministic output
+		keys := []string{}
+		for k := range s.Extensions {
+			keys = append(keys, k)
+		}
+		sort.Strings(keys)
+
+		for _, k := range keys {
+			v := s.Extensions[k]
+			g.Do("$.key$: $.value$,\n", map[string]interface{}{
+				"key":   fmt.Sprintf("%#v", k),
+				"value": fmt.Sprintf("%#v", v),
+			})
+		}
+		g.Do("},\n},\n", nil)
+	}
+
+	return nil
+}
+
 func (g openAPITypeWriter) generateValueValidations(vs *spec.SchemaProps) error {
 
 	if vs == nil {
@@ -420,6 +502,18 @@ func (g openAPITypeWriter) generateValueValidations(vs *spec.SchemaProps) error
 		g.Do("UniqueItems: true,\n", nil)
 	}
 
+	if len(vs.AllOf) > 0 {
+		g.Do("AllOf: []spec.Schema{\n", nil)
+		for _, s := range vs.AllOf {
+			g.Do("{\n", nil)
+			if err := g.generateSchema(&s); err != nil {
+				return err
+			}
+			g.Do("},\n", nil)
+		}
+		g.Do("},\n", nil)
+	}
+
 	return nil
 }
 
@@ -429,7 +523,7 @@ func (g openAPITypeWriter) generate(t *types.Type) error {
 	case types.Struct:
 		validationSchema, err := ParseCommentTags(t, t.CommentLines, markerPrefix)
 		if err != nil {
-			return err
+			return fmt.Errorf("failed parsing comment tags for %v: %w", t.String(), err)
 		}
 
 		hasV2Definition := hasOpenAPIDefinitionMethod(t)
@@ -644,7 +738,15 @@ func (g openAPITypeWriter) emitExtensions(extensions []extension, unions []union
 	}
 
 	if len(otherExtensions) > 0 {
-		for k, v := range otherExtensions {
+		// Sort extension keys to generate deterministic output
+		keys := []string{}
+		for k := range otherExtensions {
+			keys = append(keys, k)
+		}
+		sort.Strings(keys)
+
+		for _, k := range keys {
+			v := otherExtensions[k]
 			g.Do("$.key$: $.value$,\n", map[string]interface{}{
 				"key":   fmt.Sprintf("%#v", k),
 				"value": fmt.Sprintf("%#v", v),
@@ -704,7 +806,7 @@ func defaultFromComments(comments []string, commentPath string, t *types.Type) (
 
 	var i interface{}
 	if id, ok := parseSymbolReference(tag, commentPath); ok {
-		klog.Errorf("%v, %v", id, commentPath)
+		klog.V(5).Infof("%v, %v", id, commentPath)
 		return nil, &id, nil
 	} else if err := json.Unmarshal([]byte(tag), &i); err != nil {
 		return nil, nil, fmt.Errorf("failed to unmarshal default: %v", err)
@@ -844,15 +946,9 @@ func (g openAPITypeWriter) generateDescription(CommentLines []string) {
 		}
 	}
 
-	postDoc := strings.TrimLeft(buffer.String(), "\n")
-	postDoc = strings.TrimRight(postDoc, "\n")
-	postDoc = strings.Replace(postDoc, "\\\"", "\"", -1) // replace user's \" to "
-	postDoc = strings.Replace(postDoc, "\"", "\\\"", -1) // Escape "
-	postDoc = strings.Replace(postDoc, "\n", "\\n", -1)
-	postDoc = strings.Replace(postDoc, "\t", "\\t", -1)
-	postDoc = strings.Trim(postDoc, " ")
-	if postDoc != "" {
-		g.Do("Description: \"$.$\",\n", postDoc)
+	postDoc := strings.TrimSpace(buffer.String())
+	if len(postDoc) > 0 {
+		g.Do("Description: $.$,\n", fmt.Sprintf("%#v", postDoc))
 	}
 }
 
@@ -934,6 +1030,17 @@ func (g openAPITypeWriter) generateReferenceProperty(t *types.Type) {
 	g.Do("Ref: ref(\"$.$\"),\n", t.Name.String())
 }
 
+func resolvePtrType(t *types.Type) *types.Type {
+	var prev *types.Type
+	for prev != t {
+		prev = t
+		if t.Kind == types.Pointer {
+			t = t.Elem
+		}
+	}
+	return t
+}
+
 func resolveAliasAndPtrType(t *types.Type) *types.Type {
 	var prev *types.Type
 	for prev != t {
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go
index af30edc5ed..d7655f0d92 100644
--- a/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go
@@ -32,14 +32,6 @@ var (
 		"-",
 	)
 
-	// Blacklist of JSON names that should skip match evaluation
-	jsonNameBlacklist = sets.NewString(
-		// Empty name is used for inline struct field (e.g. metav1.TypeMeta)
-		"",
-		// Special case for object and list meta
-		"metadata",
-	)
-
 	// List of substrings that aren't allowed in Go name and JSON name
 	disallowedNameSubstrings = sets.NewString(
 		// Underscore is not allowed in either name
@@ -73,12 +65,11 @@ is also considered matched.
 
 	HTTPJSONSpec   httpjsonSpec   true
 
-NOTE: JSON names in jsonNameBlacklist should skip evaluation
+NOTE: an empty JSON name is valid only for inlined structs or pointer to structs.
+It cannot be empty for anything else because capitalization must be set explicitly.
 
-	                              true
-	podSpec                       true
-	podSpec        -              true
-	podSpec        metadata       true
+NOTE: metav1.ListMeta and metav1.ObjectMeta by convention must have "metadata" as name.
+Other fields may have that JSON name if the field name matches.
 */
 type NamesMatch struct{}
 
@@ -109,7 +100,7 @@ func (n *NamesMatch) Validate(t *types.Type) ([]string, error) {
 				continue
 			}
 			jsonName := strings.Split(jsonTag, ",")[0]
-			if !namesMatch(goName, jsonName) {
+			if !nameIsOkay(m, jsonName) {
 				fields = append(fields, goName)
 			}
 		}
@@ -117,6 +108,22 @@ func (n *NamesMatch) Validate(t *types.Type) ([]string, error) {
 	return fields, nil
 }
 
+func nameIsOkay(member types.Member, jsonName string) bool {
+	if jsonName == "" {
+		return member.Type.Kind == types.Struct ||
+			member.Type.Kind == types.Pointer && member.Type.Elem.Kind == types.Struct
+	}
+
+	typeName := member.Type.String()
+	switch typeName {
+	case "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta",
+		"k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta":
+		return jsonName == "metadata"
+	}
+
+	return namesMatch(member.Name, jsonName)
+}
+
 // namesMatch evaluates if goName and jsonName match the API rule
 // TODO: Use an off-the-shelf CamelCase solution instead of implementing this logic. The following existing
 //
@@ -129,9 +136,6 @@ func (n *NamesMatch) Validate(t *types.Type) ([]string, error) {
 //		 about why they don't satisfy our need. What we need can be a function that detects an acronym at the
 //		 beginning of a string.
 func namesMatch(goName, jsonName string) bool {
-	if jsonNameBlacklist.Has(jsonName) {
-		return true
-	}
 	if !isAllowedName(goName) || !isAllowedName(jsonName) {
 		return false
 	}
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go
index 5789e67ab7..1b758ab25a 100644
--- a/hack/tools/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go
@@ -22,7 +22,7 @@ import (
 	"strings"
 
 	openapi_v2 "github.com/google/gnostic-models/openapiv2"
-	"gopkg.in/yaml.v2"
+	yaml "sigs.k8s.io/yaml/goyaml.v2"
 )
 
 func newSchemaError(path *Path, format string, a ...interface{}) error {
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/errors/.gitignore b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/errors/.gitignore
new file mode 100644
index 0000000000..dd91ed6a04
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/errors/.gitignore
@@ -0,0 +1,2 @@
+secrets.yml
+coverage.out
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/errors/LICENSE b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/errors/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/errors/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/errors/api.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/errors/api.go
new file mode 100644
index 0000000000..e0b310044a
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/errors/api.go
@@ -0,0 +1,46 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+	"fmt"
+)
+
+// Error represents a error interface all swagger framework errors implement
+type Error interface {
+	error
+	Code() int32
+}
+
+type apiError struct {
+	code    int32
+	message string
+}
+
+func (a *apiError) Error() string {
+	return a.message
+}
+
+func (a *apiError) Code() int32 {
+	return a.code
+}
+
+// New creates a new API error with a code and a message
+func New(code int32, message string, args ...interface{}) Error {
+	if len(args) > 0 {
+		return &apiError{code, fmt.Sprintf(message, args...)}
+	}
+	return &apiError{code, message}
+}
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/errors/doc.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/errors/doc.go
new file mode 100644
index 0000000000..af01190ce6
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/errors/doc.go
@@ -0,0 +1,26 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package errors provides an Error interface and several concrete types
+implementing this interface to manage API errors and JSON-schema validation
+errors.
+
+A middleware handler ServeError() is provided to serve the errors types
+it defines.
+
+It is used throughout the various go-openapi toolkit libraries
+(https://github.com/go-openapi).
+*/
+package errors
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/errors/headers.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/errors/headers.go
new file mode 100644
index 0000000000..3da85c3672
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/errors/headers.go
@@ -0,0 +1,44 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+// Validation represents a failure of a precondition
+type Validation struct {
+	code    int32
+	Name    string
+	In      string
+	Value   interface{}
+	Valid   interface{}
+	message string
+	Values  []interface{}
+}
+
+func (e *Validation) Error() string {
+	return e.message
+}
+
+// Code the error code
+func (e *Validation) Code() int32 {
+	return e.code
+}
+
+// ValidateName produces an error message name for an aliased property
+func (e *Validation) ValidateName(name string) *Validation {
+	if e.Name == "" && name != "" {
+		e.Name = name
+		e.message = name + e.message
+	}
+	return e
+}
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/errors/schema.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/errors/schema.go
new file mode 100644
index 0000000000..65f133e9ee
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/errors/schema.go
@@ -0,0 +1,573 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+	"fmt"
+	"strings"
+)
+
+const (
+	invalidType               = "%s is an invalid type name"
+	typeFail                  = "%s in %s must be of type %s"
+	typeFailWithData          = "%s in %s must be of type %s: %q"
+	typeFailWithError         = "%s in %s must be of type %s, because: %s"
+	requiredFail              = "%s in %s is required"
+	tooLongMessage            = "%s in %s should be at most %d chars long"
+	tooShortMessage           = "%s in %s should be at least %d chars long"
+	patternFail               = "%s in %s should match '%s'"
+	enumFail                  = "%s in %s should be one of %v"
+	multipleOfFail            = "%s in %s should be a multiple of %v"
+	maxIncFail                = "%s in %s should be less than or equal to %v"
+	maxExcFail                = "%s in %s should be less than %v"
+	minIncFail                = "%s in %s should be greater than or equal to %v"
+	minExcFail                = "%s in %s should be greater than %v"
+	uniqueFail                = "%s in %s shouldn't contain duplicates"
+	maxItemsFail              = "%s in %s should have at most %d items"
+	minItemsFail              = "%s in %s should have at least %d items"
+	typeFailNoIn              = "%s must be of type %s"
+	typeFailWithDataNoIn      = "%s must be of type %s: %q"
+	typeFailWithErrorNoIn     = "%s must be of type %s, because: %s"
+	requiredFailNoIn          = "%s is required"
+	tooLongMessageNoIn        = "%s should be at most %d chars long"
+	tooShortMessageNoIn       = "%s should be at least %d chars long"
+	patternFailNoIn           = "%s should match '%s'"
+	enumFailNoIn              = "%s should be one of %v"
+	multipleOfFailNoIn        = "%s should be a multiple of %v"
+	maxIncFailNoIn            = "%s should be less than or equal to %v"
+	maxExcFailNoIn            = "%s should be less than %v"
+	minIncFailNoIn            = "%s should be greater than or equal to %v"
+	minExcFailNoIn            = "%s should be greater than %v"
+	uniqueFailNoIn            = "%s shouldn't contain duplicates"
+	maxItemsFailNoIn          = "%s should have at most %d items"
+	minItemsFailNoIn          = "%s should have at least %d items"
+	noAdditionalItems         = "%s in %s can't have additional items"
+	noAdditionalItemsNoIn     = "%s can't have additional items"
+	tooFewProperties          = "%s in %s should have at least %d properties"
+	tooFewPropertiesNoIn      = "%s should have at least %d properties"
+	tooManyProperties         = "%s in %s should have at most %d properties"
+	tooManyPropertiesNoIn     = "%s should have at most %d properties"
+	unallowedProperty         = "%s.%s in %s is a forbidden property"
+	unallowedPropertyNoIn     = "%s.%s is a forbidden property"
+	failedAllPatternProps     = "%s.%s in %s failed all pattern properties"
+	failedAllPatternPropsNoIn = "%s.%s failed all pattern properties"
+	multipleOfMustBePositive  = "factor MultipleOf declared for %s must be positive: %v"
+)
+
+// All code responses can be used to differentiate errors for different handling
+// by the consuming program
+const (
+	// CompositeErrorCode remains 422 for backwards-compatibility
+	// and to separate it from validation errors with cause
+	CompositeErrorCode = 422
+	// InvalidTypeCode is used for any subclass of invalid types
+	InvalidTypeCode = 600 + iota
+	RequiredFailCode
+	TooLongFailCode
+	TooShortFailCode
+	PatternFailCode
+	EnumFailCode
+	MultipleOfFailCode
+	MaxFailCode
+	MinFailCode
+	UniqueFailCode
+	MaxItemsFailCode
+	MinItemsFailCode
+	NoAdditionalItemsCode
+	TooFewPropertiesCode
+	TooManyPropertiesCode
+	UnallowedPropertyCode
+	FailedAllPatternPropsCode
+	MultipleOfMustBePositiveCode
+)
+
+// CompositeError is an error that groups several errors together
+type CompositeError struct {
+	Errors  []error
+	code    int32
+	message string
+}
+
+// Code for this error
+func (c *CompositeError) Code() int32 {
+	return c.code
+}
+
+func (c *CompositeError) Error() string {
+	if len(c.Errors) > 0 {
+		msgs := []string{c.message + ":"}
+		for _, e := range c.Errors {
+			msgs = append(msgs, e.Error())
+		}
+		return strings.Join(msgs, "\n")
+	}
+	return c.message
+}
+
+// CompositeValidationError an error to wrap a bunch of other errors
+func CompositeValidationError(errors ...error) *CompositeError {
+	return &CompositeError{
+		code:    CompositeErrorCode,
+		Errors:  append([]error{}, errors...),
+		message: "validation failure list",
+	}
+}
+
+// FailedAllPatternProperties an error for when the property doesn't match a pattern
+func FailedAllPatternProperties(name, in, key string) *Validation {
+	msg := fmt.Sprintf(failedAllPatternProps, name, key, in)
+	if in == "" {
+		msg = fmt.Sprintf(failedAllPatternPropsNoIn, name, key)
+	}
+	return &Validation{
+		code:    FailedAllPatternPropsCode,
+		Name:    name,
+		In:      in,
+		Value:   key,
+		message: msg,
+	}
+}
+
+// PropertyNotAllowed an error for when the property doesn't match a pattern
+func PropertyNotAllowed(name, in, key string) *Validation {
+	msg := fmt.Sprintf(unallowedProperty, name, key, in)
+	if in == "" {
+		msg = fmt.Sprintf(unallowedPropertyNoIn, name, key)
+	}
+	return &Validation{
+		code:    UnallowedPropertyCode,
+		Name:    name,
+		In:      in,
+		Value:   key,
+		message: msg,
+	}
+}
+
+// TooFewProperties an error for an object with too few properties
+func TooFewProperties(name, in string, minProperties, size int64) *Validation {
+	msg := fmt.Sprintf(tooFewProperties, name, in, minProperties)
+	if in == "" {
+		msg = fmt.Sprintf(tooFewPropertiesNoIn, name, minProperties)
+	}
+	return &Validation{
+		code:    TooFewPropertiesCode,
+		Name:    name,
+		In:      in,
+		Value:   size,
+		Valid:   minProperties,
+		message: msg,
+	}
+}
+
+// TooManyProperties an error for an object with too many properties
+func TooManyProperties(name, in string, maxProperties, size int64) *Validation {
+	msg := fmt.Sprintf(tooManyProperties, name, in, maxProperties)
+	if in == "" {
+		msg = fmt.Sprintf(tooManyPropertiesNoIn, name, maxProperties)
+	}
+	return &Validation{
+		code:    TooManyPropertiesCode,
+		Name:    name,
+		In:      in,
+		Value:   size,
+		Valid:   maxProperties,
+		message: msg,
+	}
+}
+
+// AdditionalItemsNotAllowed an error for invalid additional items
+func AdditionalItemsNotAllowed(name, in string) *Validation {
+	msg := fmt.Sprintf(noAdditionalItems, name, in)
+	if in == "" {
+		msg = fmt.Sprintf(noAdditionalItemsNoIn, name)
+	}
+	return &Validation{
+		code:    NoAdditionalItemsCode,
+		Name:    name,
+		In:      in,
+		message: msg,
+	}
+}
+
+// InvalidCollectionFormat another flavor of invalid type error
+func InvalidCollectionFormat(name, in, format string) *Validation {
+	return &Validation{
+		code:    InvalidTypeCode,
+		Name:    name,
+		In:      in,
+		Value:   format,
+		message: fmt.Sprintf("the collection format %q is not supported for the %s param %q", format, in, name),
+	}
+}
+
+// InvalidTypeName an error for when the type is invalid
+func InvalidTypeName(typeName string) *Validation {
+	return &Validation{
+		code:    InvalidTypeCode,
+		Value:   typeName,
+		message: fmt.Sprintf(invalidType, typeName),
+	}
+}
+
+// InvalidType creates an error for when the type is invalid
+func InvalidType(name, in, typeName string, value interface{}) *Validation {
+	var message string
+
+	if in != "" {
+		switch value.(type) {
+		case string:
+			message = fmt.Sprintf(typeFailWithData, name, in, typeName, value)
+		case error:
+			message = fmt.Sprintf(typeFailWithError, name, in, typeName, value)
+		default:
+			message = fmt.Sprintf(typeFail, name, in, typeName)
+		}
+	} else {
+		switch value.(type) {
+		case string:
+			message = fmt.Sprintf(typeFailWithDataNoIn, name, typeName, value)
+		case error:
+			message = fmt.Sprintf(typeFailWithErrorNoIn, name, typeName, value)
+		default:
+			message = fmt.Sprintf(typeFailNoIn, name, typeName)
+		}
+	}
+
+	return &Validation{
+		code:    InvalidTypeCode,
+		Name:    name,
+		In:      in,
+		Value:   value,
+		message: message,
+	}
+
+}
+
+// DuplicateItems error for when an array contains duplicates
+func DuplicateItems(name, in string) *Validation {
+	msg := fmt.Sprintf(uniqueFail, name, in)
+	if in == "" {
+		msg = fmt.Sprintf(uniqueFailNoIn, name)
+	}
+	return &Validation{
+		code:    UniqueFailCode,
+		Name:    name,
+		In:      in,
+		message: msg,
+	}
+}
+
+// TooManyItems error for when an array contains too many items
+func TooManyItems(name, in string, max int64, value interface{}) *Validation {
+	msg := fmt.Sprintf(maxItemsFail, name, in, max)
+	if in == "" {
+		msg = fmt.Sprintf(maxItemsFailNoIn, name, max)
+	}
+
+	return &Validation{
+		code:    MaxItemsFailCode,
+		Name:    name,
+		In:      in,
+		Value:   value,
+		Valid:   max,
+		message: msg,
+	}
+}
+
+// TooFewItems error for when an array contains too few items
+func TooFewItems(name, in string, min int64, value interface{}) *Validation {
+	msg := fmt.Sprintf(minItemsFail, name, in, min)
+	if in == "" {
+		msg = fmt.Sprintf(minItemsFailNoIn, name, min)
+	}
+	return &Validation{
+		code:    MinItemsFailCode,
+		Name:    name,
+		In:      in,
+		Value:   value,
+		Valid:   min,
+		message: msg,
+	}
+}
+
+// ExceedsMaximumInt error for when maxinum validation fails
+func ExceedsMaximumInt(name, in string, max int64, exclusive bool, value interface{}) *Validation {
+	var message string
+	if in == "" {
+		m := maxIncFailNoIn
+		if exclusive {
+			m = maxExcFailNoIn
+		}
+		message = fmt.Sprintf(m, name, max)
+	} else {
+		m := maxIncFail
+		if exclusive {
+			m = maxExcFail
+		}
+		message = fmt.Sprintf(m, name, in, max)
+	}
+	return &Validation{
+		code:    MaxFailCode,
+		Name:    name,
+		In:      in,
+		Value:   value,
+		message: message,
+	}
+}
+
+// ExceedsMaximumUint error for when maxinum validation fails
+func ExceedsMaximumUint(name, in string, max uint64, exclusive bool, value interface{}) *Validation {
+	var message string
+	if in == "" {
+		m := maxIncFailNoIn
+		if exclusive {
+			m = maxExcFailNoIn
+		}
+		message = fmt.Sprintf(m, name, max)
+	} else {
+		m := maxIncFail
+		if exclusive {
+			m = maxExcFail
+		}
+		message = fmt.Sprintf(m, name, in, max)
+	}
+	return &Validation{
+		code:    MaxFailCode,
+		Name:    name,
+		In:      in,
+		Value:   value,
+		message: message,
+	}
+}
+
+// ExceedsMaximum error for when maxinum validation fails
+func ExceedsMaximum(name, in string, max float64, exclusive bool, value interface{}) *Validation {
+	var message string
+	if in == "" {
+		m := maxIncFailNoIn
+		if exclusive {
+			m = maxExcFailNoIn
+		}
+		message = fmt.Sprintf(m, name, max)
+	} else {
+		m := maxIncFail
+		if exclusive {
+			m = maxExcFail
+		}
+		message = fmt.Sprintf(m, name, in, max)
+	}
+	return &Validation{
+		code:    MaxFailCode,
+		Name:    name,
+		In:      in,
+		Value:   value,
+		message: message,
+	}
+}
+
+// ExceedsMinimumInt error for when maxinum validation fails
+func ExceedsMinimumInt(name, in string, min int64, exclusive bool, value interface{}) *Validation {
+	var message string
+	if in == "" {
+		m := minIncFailNoIn
+		if exclusive {
+			m = minExcFailNoIn
+		}
+		message = fmt.Sprintf(m, name, min)
+	} else {
+		m := minIncFail
+		if exclusive {
+			m = minExcFail
+		}
+		message = fmt.Sprintf(m, name, in, min)
+	}
+	return &Validation{
+		code:    MinFailCode,
+		Name:    name,
+		In:      in,
+		Value:   value,
+		message: message,
+	}
+}
+
+// ExceedsMinimumUint error for when maxinum validation fails
+func ExceedsMinimumUint(name, in string, min uint64, exclusive bool, value interface{}) *Validation {
+	var message string
+	if in == "" {
+		m := minIncFailNoIn
+		if exclusive {
+			m = minExcFailNoIn
+		}
+		message = fmt.Sprintf(m, name, min)
+	} else {
+		m := minIncFail
+		if exclusive {
+			m = minExcFail
+		}
+		message = fmt.Sprintf(m, name, in, min)
+	}
+	return &Validation{
+		code:    MinFailCode,
+		Name:    name,
+		In:      in,
+		Value:   value,
+		message: message,
+	}
+}
+
+// ExceedsMinimum error for when maxinum validation fails
+func ExceedsMinimum(name, in string, min float64, exclusive bool, value interface{}) *Validation {
+	var message string
+	if in == "" {
+		m := minIncFailNoIn
+		if exclusive {
+			m = minExcFailNoIn
+		}
+		message = fmt.Sprintf(m, name, min)
+	} else {
+		m := minIncFail
+		if exclusive {
+			m = minExcFail
+		}
+		message = fmt.Sprintf(m, name, in, min)
+	}
+	return &Validation{
+		code:    MinFailCode,
+		Name:    name,
+		In:      in,
+		Value:   value,
+		message: message,
+	}
+}
+
+// NotMultipleOf error for when multiple of validation fails
+func NotMultipleOf(name, in string, multiple, value interface{}) *Validation {
+	var msg string
+	if in == "" {
+		msg = fmt.Sprintf(multipleOfFailNoIn, name, multiple)
+	} else {
+		msg = fmt.Sprintf(multipleOfFail, name, in, multiple)
+	}
+	return &Validation{
+		code:    MultipleOfFailCode,
+		Name:    name,
+		In:      in,
+		Value:   value,
+		message: msg,
+	}
+}
+
+// EnumFail error for when an enum validation fails
+func EnumFail(name, in string, value interface{}, values []interface{}) *Validation {
+	var msg string
+	if in == "" {
+		msg = fmt.Sprintf(enumFailNoIn, name, values)
+	} else {
+		msg = fmt.Sprintf(enumFail, name, in, values)
+	}
+
+	return &Validation{
+		code:    EnumFailCode,
+		Name:    name,
+		In:      in,
+		Value:   value,
+		Values:  values,
+		message: msg,
+	}
+}
+
+// Required error for when a value is missing
+func Required(name, in string) *Validation {
+	var msg string
+	if in == "" {
+		msg = fmt.Sprintf(requiredFailNoIn, name)
+	} else {
+		msg = fmt.Sprintf(requiredFail, name, in)
+	}
+	return &Validation{
+		code:    RequiredFailCode,
+		Name:    name,
+		In:      in,
+		message: msg,
+	}
+}
+
+// TooLong error for when a string is too long
+func TooLong(name, in string, max int64, value interface{}) *Validation {
+	var msg string
+	if in == "" {
+		msg = fmt.Sprintf(tooLongMessageNoIn, name, max)
+	} else {
+		msg = fmt.Sprintf(tooLongMessage, name, in, max)
+	}
+	return &Validation{
+		code:    TooLongFailCode,
+		Name:    name,
+		In:      in,
+		Value:   value,
+		Valid:   max,
+		message: msg,
+	}
+}
+
+// TooShort error for when a string is too short
+func TooShort(name, in string, min int64, value interface{}) *Validation {
+	var msg string
+	if in == "" {
+		msg = fmt.Sprintf(tooShortMessageNoIn, name, min)
+	} else {
+		msg = fmt.Sprintf(tooShortMessage, name, in, min)
+	}
+
+	return &Validation{
+		code:    TooShortFailCode,
+		Name:    name,
+		In:      in,
+		Value:   value,
+		Valid:   min,
+		message: msg,
+	}
+}
+
+// FailedPattern error for when a string fails a regex pattern match
+// the pattern that is returned is the ECMA syntax version of the pattern not the golang version.
+func FailedPattern(name, in, pattern string, value interface{}) *Validation {
+	var msg string
+	if in == "" {
+		msg = fmt.Sprintf(patternFailNoIn, name, pattern)
+	} else {
+		msg = fmt.Sprintf(patternFail, name, in, pattern)
+	}
+
+	return &Validation{
+		code:    PatternFailCode,
+		Name:    name,
+		In:      in,
+		Value:   value,
+		message: msg,
+	}
+}
+
+// MultipleOfMustBePositive error for when a
+// multipleOf factor is negative
+func MultipleOfMustBePositive(name, in string, factor interface{}) *Validation {
+	return &Validation{
+		code:    MultipleOfMustBePositiveCode,
+		Name:    name,
+		In:      in,
+		Value:   factor,
+		message: fmt.Sprintf(multipleOfMustBePositive, name, factor),
+	}
+}
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/.gitignore b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/.gitignore
new file mode 100644
index 0000000000..dd91ed6a04
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/.gitignore
@@ -0,0 +1,2 @@
+secrets.yml
+coverage.out
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/LICENSE b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/bson.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/bson.go
new file mode 100644
index 0000000000..0b6380c8af
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/bson.go
@@ -0,0 +1,103 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package strfmt
+
+import (
+	bsonprim "k8s.io/kube-openapi/pkg/validation/strfmt/bson"
+)
+
+func init() {
+	var id ObjectId
+	// register this format in the default registry
+	Default.Add("bsonobjectid", &id, IsBSONObjectID)
+}
+
+// IsBSONObjectID returns true when the string is a valid BSON.ObjectId
+func IsBSONObjectID(str string) bool {
+	_, err := bsonprim.ObjectIDFromHex(str)
+	return err == nil
+}
+
+// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID)
+//
+// swagger:strfmt bsonobjectid
+type ObjectId bsonprim.ObjectID
+
+// NewObjectId creates a ObjectId from a Hex String
+func NewObjectId(hex string) ObjectId {
+	oid, err := bsonprim.ObjectIDFromHex(hex)
+	if err != nil {
+		panic(err)
+	}
+	return ObjectId(oid)
+}
+
+// MarshalText turns this instance into text
+func (id ObjectId) MarshalText() ([]byte, error) {
+	oid := bsonprim.ObjectID(id)
+	if oid == bsonprim.NilObjectID {
+		return nil, nil
+	}
+	return []byte(oid.Hex()), nil
+}
+
+// UnmarshalText hydrates this instance from text
+func (id *ObjectId) UnmarshalText(data []byte) error { // validation is performed later on
+	if len(data) == 0 {
+		*id = ObjectId(bsonprim.NilObjectID)
+		return nil
+	}
+	oidstr := string(data)
+	oid, err := bsonprim.ObjectIDFromHex(oidstr)
+	if err != nil {
+		return err
+	}
+	*id = ObjectId(oid)
+	return nil
+}
+
+func (id ObjectId) String() string {
+	return bsonprim.ObjectID(id).String()
+}
+
+// MarshalJSON returns the ObjectId as JSON
+func (id ObjectId) MarshalJSON() ([]byte, error) {
+	return bsonprim.ObjectID(id).MarshalJSON()
+}
+
+// UnmarshalJSON sets the ObjectId from JSON
+func (id *ObjectId) UnmarshalJSON(data []byte) error {
+	var obj bsonprim.ObjectID
+	if err := obj.UnmarshalJSON(data); err != nil {
+		return err
+	}
+	*id = ObjectId(obj)
+	return nil
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (id *ObjectId) DeepCopyInto(out *ObjectId) {
+	*out = *id
+}
+
+// DeepCopy copies the receiver into a new ObjectId.
+func (id *ObjectId) DeepCopy() *ObjectId {
+	if id == nil {
+		return nil
+	}
+	out := new(ObjectId)
+	id.DeepCopyInto(out)
+	return out
+}
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/bson/objectid.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/bson/objectid.go
new file mode 100644
index 0000000000..824534b288
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/bson/objectid.go
@@ -0,0 +1,122 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer
+// See THIRD-PARTY-NOTICES for original license terms.
+
+package bson
+
+import (
+	"bytes"
+	"encoding/hex"
+	"encoding/json"
+	"errors"
+	"fmt"
+)
+
+// ErrInvalidHex indicates that a hex string cannot be converted to an ObjectID.
+var ErrInvalidHex = errors.New("the provided hex string is not a valid ObjectID")
+
+// ObjectID is the BSON ObjectID type.
+type ObjectID [12]byte
+
+// NilObjectID is the zero value for ObjectID.
+var NilObjectID ObjectID
+
+// Hex returns the hex encoding of the ObjectID as a string.
+func (id ObjectID) Hex() string {
+	return hex.EncodeToString(id[:])
+}
+
+func (id ObjectID) String() string {
+	return fmt.Sprintf("ObjectID(%q)", id.Hex())
+}
+
+// IsZero returns true if id is the empty ObjectID.
+func (id ObjectID) IsZero() bool {
+	return bytes.Equal(id[:], NilObjectID[:])
+}
+
+// ObjectIDFromHex creates a new ObjectID from a hex string. It returns an error if the hex string is not a
+// valid ObjectID.
+func ObjectIDFromHex(s string) (ObjectID, error) {
+	b, err := hex.DecodeString(s)
+	if err != nil {
+		return NilObjectID, err
+	}
+
+	if len(b) != 12 {
+		return NilObjectID, ErrInvalidHex
+	}
+
+	var oid [12]byte
+	copy(oid[:], b[:])
+
+	return oid, nil
+}
+
+// MarshalJSON returns the ObjectID as a string
+func (id ObjectID) MarshalJSON() ([]byte, error) {
+	return json.Marshal(id.Hex())
+}
+
+// UnmarshalJSON populates the byte slice with the ObjectID. If the byte slice is 24 bytes long, it
+// will be populated with the hex representation of the ObjectID. If the byte slice is twelve bytes
+// long, it will be populated with the BSON representation of the ObjectID. This method also accepts empty strings and
+// decodes them as NilObjectID. For any other inputs, an error will be returned.
+func (id *ObjectID) UnmarshalJSON(b []byte) error {
+	// Ignore "null" to keep parity with the standard library. Decoding a JSON null into a non-pointer ObjectID field
+	// will leave the field unchanged. For pointer values, encoding/json will set the pointer to nil and will not
+	// enter the UnmarshalJSON hook.
+	if string(b) == "null" {
+		return nil
+	}
+
+	var err error
+	switch len(b) {
+	case 12:
+		copy(id[:], b)
+	default:
+		// Extended JSON
+		var res interface{}
+		err := json.Unmarshal(b, &res)
+		if err != nil {
+			return err
+		}
+		str, ok := res.(string)
+		if !ok {
+			m, ok := res.(map[string]interface{})
+			if !ok {
+				return errors.New("not an extended JSON ObjectID")
+			}
+			oid, ok := m["$oid"]
+			if !ok {
+				return errors.New("not an extended JSON ObjectID")
+			}
+			str, ok = oid.(string)
+			if !ok {
+				return errors.New("not an extended JSON ObjectID")
+			}
+		}
+
+		// An empty string is not a valid ObjectID, but we treat it as a special value that decodes as NilObjectID.
+		if len(str) == 0 {
+			copy(id[:], NilObjectID[:])
+			return nil
+		}
+
+		if len(str) != 24 {
+			return fmt.Errorf("cannot unmarshal into an ObjectID, the length must be 24 but it is %d", len(str))
+		}
+
+		_, err = hex.Decode(id[:], []byte(str))
+		if err != nil {
+			return err
+		}
+	}
+
+	return err
+}
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/date.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/date.go
new file mode 100644
index 0000000000..74ce5b6cb6
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/date.go
@@ -0,0 +1,103 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package strfmt
+
+import (
+	"encoding/json"
+	"time"
+)
+
+func init() {
+	d := Date{}
+	// register this format in the default registry
+	Default.Add("date", &d, IsDate)
+}
+
+// IsDate returns true when the string is a valid date
+func IsDate(str string) bool {
+	_, err := time.Parse(RFC3339FullDate, str)
+	return err == nil
+}
+
+const (
+	// RFC3339FullDate represents a full-date as specified by RFC3339
+	// See: http://goo.gl/xXOvVd
+	RFC3339FullDate = "2006-01-02"
+)
+
+// Date represents a date from the API
+//
+// swagger:strfmt date
+type Date time.Time
+
+// String converts this date into a string
+func (d Date) String() string {
+	return time.Time(d).Format(RFC3339FullDate)
+}
+
+// UnmarshalText parses a text representation into a date type
+func (d *Date) UnmarshalText(text []byte) error {
+	if len(text) == 0 {
+		return nil
+	}
+	dd, err := time.Parse(RFC3339FullDate, string(text))
+	if err != nil {
+		return err
+	}
+	*d = Date(dd)
+	return nil
+}
+
+// MarshalText serializes this date type to string
+func (d Date) MarshalText() ([]byte, error) {
+	return []byte(d.String()), nil
+}
+
+// MarshalJSON returns the Date as JSON
+func (d Date) MarshalJSON() ([]byte, error) {
+	return json.Marshal(time.Time(d).Format(RFC3339FullDate))
+}
+
+// UnmarshalJSON sets the Date from JSON
+func (d *Date) UnmarshalJSON(data []byte) error {
+	if string(data) == jsonNull {
+		return nil
+	}
+	var strdate string
+	if err := json.Unmarshal(data, &strdate); err != nil {
+		return err
+	}
+	tt, err := time.Parse(RFC3339FullDate, strdate)
+	if err != nil {
+		return err
+	}
+	*d = Date(tt)
+	return nil
+}
+
+// DeepCopyInto copies the receiver and writes its value into out.
+func (d *Date) DeepCopyInto(out *Date) {
+	*out = *d
+}
+
+// DeepCopy copies the receiver into a new Date.
+func (d *Date) DeepCopy() *Date {
+	if d == nil {
+		return nil
+	}
+	out := new(Date)
+	d.DeepCopyInto(out)
+	return out
+}
diff --git a/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go
new file mode 100644
index 0000000000..e85b0f1b46
--- /dev/null
+++ b/hack/tools/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go
@@ -0,0 +1,1562 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package strfmt
+
+import (
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"net/mail"
+	"regexp"
+	"strings"
+
+	"github.com/asaskevich/govalidator"
+
+	netutils "k8s.io/utils/net"
+)
+
+const (
+	// HostnamePattern http://json-schema.org/latest/json-schema-validation.html#anchor114
+	//  A string instance is valid against this attribute if it is a valid
+	//  representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034].
+	//  http://tools.ietf.org/html/rfc1034#section-3.5
+	//   ::= any one of the ten digits 0 through 9
+	//  var digit = /[0-9]/;
+	//   ::= any one of the 52 alphabetic characters A through Z in upper case and a through z in lower case
+	//  var letter = /[a-zA-Z]/;
+	//   ::=  | 
+	//  var letDig = /[0-9a-zA-Z]/;
+	//   ::=  | "-"
+	//  var letDigHyp = /[-0-9a-zA-Z]/;
+	//   ::=  |  
+	//  var ldhStr = /[-0-9a-zA-Z]+/;
+	//